コード例 #1
0
 def _lookup(self, name, *args, **kwargs):
     instance = self._lookup_loader.get(name.lower(), loader=self._loader, templar=self)
     if instance is not None:
         from ansible.utils.listify import listify_lookup_plugin_terms
         loop_terms = listify_lookup_plugin_terms(terms=args, templar=self, loader=self._loader, fail_on_undefined=True, convert_bare=False)
         # safely catch run failures per #5059
         try:
         if convert_bare:
             variable = self._convert_bare_variable(variable)
         if isinstance(variable, string_types):
             result = variable
             if self._contains_vars(variable):
                 # Check to see if the string we are trying to render is just referencing a single
                 # var.  In this case we don't want to accidentally change the type of the variable
                 # to a string by using the jinja template renderer. We just want to pass it.
                 only_one = self.SINGLE_VAR.match(variable)
                 if only_one:
                     var_name = only_one.group(1)
                     if var_name in self._available_variables:
                         resolved_val = self._available_variables[var_name]
                         if isinstance(resolved_val, NON_TEMPLATED_TYPES):
                             return resolved_val
                         elif resolved_val is None:
                             return C.DEFAULT_NULL_REPRESENTATION
                 # Using a cache in order to prevent template calls with already templated variables
                 variable_hash = sha1(text_type(variable).encode('utf-8'))
                 options_hash  = sha1((text_type(preserve_trailing_newlines) + text_type(escape_backslashes) + text_type(fail_on_undefined) + text_type(overrides)).encode('utf-8'))
                 sha1_hash = variable_hash.hexdigest() + options_hash.hexdigest()
                 if sha1_hash in self._cached_result:
                     result = self._cached_result[sha1_hash]
                     result = self._cached_result[sha1_hash]
             #return self._clean_data(result)
             return result
         elif isinstance(variable, (list, tuple)):
             return [self.template(v, preserve_trailing_newlines=preserve_trailing_newlines, fail_on_undefined=fail_on_undefined, overrides=overrides) for v in variable]
         elif isinstance(variable, dict):
             d = {}
             # we don't use iteritems() here to avoid problems if the underlying dict
             # changes sizes due to the templating, which can happen with hostvars
             for k in variable.keys():
                 if k not in static_vars:
                 d[k] = self.template(variable[k], preserve_trailing_newlines=preserve_trailing_newlines, fail_on_undefined=fail_on_undefined, overrides=overrides)
                     d[k] = self.template(variable[k], preserve_trailing_newlines=preserve_trailing_newlines, fail_on_undefined=fail_on_undefined, overrides=overrides)
             return d
     else:
             return variable
         raise AnsibleError("lookup plugin (%s) not found" % name)
             ran = instance.run(loop_terms, variables=self._available_variables, **kwargs)
コード例 #2
0
 def env_prefix(self, **kwargs):
     env = self.env.copy()
     env.update(kwargs)
     return ' '.join([
         'set -lx %s %s;' % (k, shlex_quote(text_type(v)))
         for k, v in env.items()
     ])
コード例 #3
0
ファイル: conditional.py プロジェクト: 2ndQuadrant/ansible
    def _check_conditional(self, conditional, templar, all_vars):
        '''
        This method does the low-level evaluation of each conditional
        set on this object, using jinja2 to wrap the conditionals for
        evaluation.
        '''

        original = conditional
        if conditional is None or conditional == '':
            return True

        if conditional in all_vars and '-' not in text_type(all_vars[conditional]):
            conditional = all_vars[conditional]

        # make sure the templar is using the variables specified with this method
        templar.set_available_variables(variables=all_vars)

        try:
            conditional = templar.template(conditional)
            if not isinstance(conditional, text_type) or conditional == "":
                return conditional

            # a Jinja2 evaluation that results in something Python can eval!
            presented = "{%% if %s %%} True {%% else %%} False {%% endif %%}" % conditional
            conditional = templar.template(presented)
            val = conditional.strip()
            if val == "True":
                return True
            elif val == "False":
                return False
            else:
                raise AnsibleError("unable to evaluate conditional: %s" % original)
        except (AnsibleUndefinedVariable, UndefinedError) as e:
            # the templating failed, meaning most likely a variable was undefined. If we happened to be
            # looking for an undefined variable, return True, otherwise fail
            try:
                # first we extract the variable name from the error message
                var_name = re.compile(r"'(hostvars\[.+\]|[\w_]+)' is undefined").search(str(e)).groups()[0]
                # next we extract all defined/undefined tests from the conditional string
                def_undef = self.extract_defined_undefined(conditional)
                # then we loop through these, comparing the error variable name against
                # each def/undef test we found above. If there is a match, we determine
                # whether the logic/state mean the variable should exist or not and return
                # the corresponding True/False
                for (du_var, logic, state) in def_undef:
                    # when we compare the var names, normalize quotes because something
                    # like hostvars['foo'] may be tested against hostvars["foo"]
                    if var_name.replace("'", '"') == du_var.replace("'", '"'):
                        # the should exist is a xor test between a negation in the logic portion
                        # against the state (defined or undefined)
                        should_exist = ('not' in logic) != (state == 'defined')
                        if should_exist:
                            return False
                        else:
                            return True
                # as nothing above matched the failed var name, re-raise here to
                # trigger the AnsibleUndefinedVariable exception again below
                raise
            except Exception as new_e:
                raise AnsibleUndefinedVariable("error while evaluating conditional (%s): %s" % (original, e))
コード例 #4
0
ファイル: dataloader.py プロジェクト: saran410/Devops
    def load(self, data, file_name='<string>', show_content=True):
        '''
        Creates a python datastructure from the given data, which can be either
        a JSON or YAML string.
        '''
        new_data = None
        try:
            # we first try to load this data as JSON
            new_data = json.loads(data)
        except:
            # must not be JSON, let the rest try
            if isinstance(data, AnsibleUnicode):
                # The PyYAML's libyaml bindings use PyUnicode_CheckExact so
                # they are unable to cope with our subclass.
                # Unwrap and re-wrap the unicode so we can keep track of line
                # numbers
                in_data = text_type(data)
            else:
                in_data = data
            try:
                new_data = self._safe_load(in_data, file_name=file_name)
            except YAMLError as yaml_exc:
                self._handle_error(yaml_exc, file_name, show_content)

            if isinstance(data, AnsibleUnicode):
                new_data = AnsibleUnicode(new_data)
                new_data.ansible_pos = data.ansible_pos

        return new_data
コード例 #5
0
 def template(self, variable, convert_bare=False, preserve_trailing_newlines=True, escape_backslashes=True, fail_on_undefined=None, overrides=None, convert_data=True, static_vars = ['']):
     '''
     Templates (possibly recursively) any given data as input. If convert_bare is
     set to True, the given data will be wrapped as a jinja2 variable ('{{foo}}')
     before being sent through the template engine.
     '''
     # Don't template unsafe variables, instead drop them back down to
     # their constituent type.
     if hasattr(variable, '__UNSAFE__'):
         if isinstance(variable, text_type):
             return self._clean_data(text_type(variable))
         elif isinstance(variable, binary_type):
             return self._clean_data(bytes(variable))
         else:
             return self._clean_data(variable._obj)
     try:
     i = len(in_str)
     j = i -1
     while in_str[j] == '\n':
         j -= 1
     return i - 1 - j
     except AnsibleFilterError:
         if self._fail_on_filter_errors:
             raise
         else:
         else:
                     d[k] = variable[k]
             return variable
コード例 #6
0
    def load(self, data, file_name='<string>', show_content=True):
        '''
        Creates a python datastructure from the given data, which can be either
        a JSON or YAML string.
        '''
        new_data = None
        try:
            # we first try to load this data as JSON
            new_data = json.loads(data)
        except:
            # must not be JSON, let the rest try
            if isinstance(data, AnsibleUnicode):
                # The PyYAML's libyaml bindings use PyUnicode_CheckExact so
                # they are unable to cope with our subclass.
                # Unwrap and re-wrap the unicode so we can keep track of line
                # numbers
                in_data = text_type(data)
            else:
                in_data = data
            try:
                new_data = self._safe_load(in_data, file_name=file_name)
            except YAMLError as yaml_exc:
                self._handle_error(yaml_exc, file_name, show_content)

            if isinstance(data, AnsibleUnicode):
                new_data = AnsibleUnicode(new_data)
                new_data.ansible_pos = data.ansible_pos

        return new_data
コード例 #7
0
ファイル: __init__.py プロジェクト: sreekanthpulagam/ansible
    def _get_magic_variables(self, loader, play, host, task, include_hostvars,
                             include_delegate_to):
        '''
        Returns a dictionary of so-called "magic" variables in Ansible,
        which are special variables we set internally for use.
        '''

        variables = dict()
        variables['playbook_dir'] = loader.get_basedir()

        if host:
            variables['group_names'] = [
                group.name for group in host.get_groups()
                if group.name != 'all'
            ]

            if self._inventory is not None:
                variables['groups'] = dict()
                for (group_name, group) in iteritems(self._inventory.groups):
                    variables['groups'][group_name] = [
                        h.name for h in group.get_hosts()
                    ]

                #if include_hostvars:
                #    hostvars_cache_entry = self._get_cache_entry(play=play)
                #    if hostvars_cache_entry in HOSTVARS_CACHE:
                #        hostvars = HOSTVARS_CACHE[hostvars_cache_entry]
                #    else:
                #        hostvars = HostVars(play=play, inventory=self._inventory, loader=loader, variable_manager=self)
                #        HOSTVARS_CACHE[hostvars_cache_entry] = hostvars
                #    variables['hostvars'] = hostvars
                #    variables['vars'] = hostvars[host.get_name()]

        if play:
            variables['role_names'] = [r._role_name for r in play.roles]

        if task:
            if task._role:
                variables['role_name'] = task._role.get_name()
                variables['role_path'] = task._role._role_path
                variables['role_uuid'] = text_type(task._role._uuid)

        if self._inventory is not None:
            variables['inventory_dir'] = self._inventory.basedir()
            variables['inventory_file'] = self._inventory.src()
            if play:
                # add the list of hosts in the play, as adjusted for limit/filters
                # DEPRECATED: play_hosts should be deprecated in favor of ansible_play_hosts,
                #             however this would take work in the templating engine, so for now
                #             we'll add both so we can give users something transitional to use
                host_list = [x.name for x in self._inventory.get_hosts()]
                variables['play_hosts'] = host_list
                variables['ansible_play_hosts'] = host_list

        # the 'omit' value alows params to be left out if the variable they are based on is undefined
        variables['omit'] = self._omit_token
        variables['ansible_version'] = CLI.version_info(gitinfo=False)

        return variables
コード例 #8
0
    def _get_magic_variables(self, loader, play, host, task, include_hostvars,
                             include_delegate_to):
        '''
        Returns a dictionary of so-called "magic" variables in Ansible,
        which are special variables we set internally for use.
        '''

        variables = dict()
        variables['playbook_dir'] = loader.get_basedir()
        variables['ansible_playbook_python'] = sys.executable

        if host:
            # host already provides some magic vars via host.get_vars()
            if self._inventory:
                variables['groups'] = self._inventory.get_group_dict()

        if play:
            variables['role_names'] = [r._role_name for r in play.roles]

        if task:
            if task._role:
                variables['role_name'] = task._role.get_name()
                variables['role_path'] = task._role._role_path
                variables['role_uuid'] = text_type(task._role._uuid)

        if self._inventory is not None:
            variables['inventory_dir'] = self._inventory.basedir()
            variables['inventory_file'] = self._inventory.src()
            if play:
                # add the list of hosts in the play, as adjusted for limit/filters
                variables['ansible_play_hosts_all'] = [
                    x.name for x in self._inventory.get_hosts(
                        pattern=play.hosts or 'all', ignore_restrictions=True)
                ]
                variables['ansible_play_hosts'] = [
                    x for x in variables['ansible_play_hosts_all']
                    if x not in play._removed_hosts
                ]
                variables['ansible_play_batch'] = [
                    x.name for x in self._inventory.get_hosts()
                    if x.name not in play._removed_hosts
                ]

                #DEPRECATED: play_hosts should be deprecated in favor of ansible_play_batch,
                #  however this would take work in the templating engine, so for now we'll add both
                variables['play_hosts'] = variables['ansible_play_batch']

        # the 'omit' value alows params to be left out if the variable they are based on is undefined
        variables['omit'] = self._omit_token
        variables['ansible_version'] = CLI.version_info(gitinfo=False)
        # Set options vars
        for option, option_value in iteritems(self._options_vars):
            variables[option] = option_value

        if self._hostvars is not None and include_hostvars:
            variables['hostvars'] = self._hostvars

        return variables
コード例 #9
0
 def env_prefix(self, **kwargs):
     '''Build command prefix with environment variables.'''
     env = dict(
         LANG        = C.DEFAULT_MODULE_LANG,
         LC_ALL      = C.DEFAULT_MODULE_LANG,
         LC_MESSAGES = C.DEFAULT_MODULE_LANG,
     )
     env.update(kwargs)
     return ' '.join(['%s=%s' % (k, pipes.quote(text_type(v))) for k,v in env.items()])
コード例 #10
0
ファイル: sh.py プロジェクト: RajeevNambiar/temp
 def env_prefix(self, **kwargs):
     '''Build command prefix with environment variables.'''
     env = dict(
         LANG        = C.DEFAULT_MODULE_LANG,
         LC_ALL      = C.DEFAULT_MODULE_LANG,
         LC_MESSAGES = C.DEFAULT_MODULE_LANG,
     )
     env.update(kwargs)
     return ' '.join(['%s=%s' % (k, pipes.quote(text_type(v))) for k,v in env.items()])
コード例 #11
0
ファイル: __init__.py プロジェクト: xyuan/ansible
    def _get_magic_variables(self, loader, play, host, task, include_hostvars,
                             include_delegate_to):
        '''
        Returns a dictionary of so-called "magic" variables in Ansible,
        which are special variables we set internally for use.
        '''

        variables = dict()
        variables['playbook_dir'] = loader.get_basedir()

        if host:
            variables['group_names'] = sorted([
                group.name for group in host.get_groups()
                if group.name != 'all'
            ])

            if self._inventory:
                variables['groups'] = self._inventory.get_group_dict()

        if play:
            variables['role_names'] = [r._role_name for r in play.roles]

        if task:
            if task._role:
                variables['role_name'] = task._role.get_name()
                variables['role_path'] = task._role._role_path
                variables['role_uuid'] = text_type(task._role._uuid)

        if self._inventory is not None:
            variables['inventory_dir'] = self._inventory.basedir()
            variables['inventory_file'] = self._inventory.src()
            if play:
                # add the list of hosts in the play, as adjusted for limit/filters
                # DEPRECATED: play_hosts should be deprecated in favor of ansible_play_hosts,
                #             however this would take work in the templating engine, so for now
                #             we'll add both so we can give users something transitional to use
                host_list = [x.name for x in self._inventory.get_hosts()]
                variables['play_hosts'] = host_list
                variables['ansible_play_hosts'] = host_list

        # the 'omit' value alows params to be left out if the variable they are based on is undefined
        variables['omit'] = self._omit_token
        variables['ansible_version'] = CLI.version_info(gitinfo=False)
        # Set options vars
        for option, option_value in iteritems(self._options_vars):
            variables[option] = option_value

        if self._hostvars is not None and include_hostvars:
            variables['hostvars'] = self._hostvars

        return variables
コード例 #12
0
ファイル: __init__.py プロジェクト: 2ndQuadrant/ansible
    def _get_magic_variables(self, loader, play, host, task, include_hostvars, include_delegate_to):
        '''
        Returns a dictionary of so-called "magic" variables in Ansible,
        which are special variables we set internally for use.
        '''

        variables = dict()
        variables['playbook_dir'] = loader.get_basedir()
        variables['ansible_playbook_python'] = sys.executable

        if host:
            variables['group_names'] = sorted([group.name for group in host.get_groups() if group.name != 'all'])

            if self._inventory:
                variables['groups']  = self._inventory.get_group_dict()

        if play:
            variables['role_names'] = [r._role_name for r in play.roles]

        if task:
            if task._role:
                variables['role_name'] = task._role.get_name()
                variables['role_path'] = task._role._role_path
                variables['role_uuid'] = text_type(task._role._uuid)

        if self._inventory is not None:
            variables['inventory_dir'] = self._inventory.basedir()
            variables['inventory_file'] = self._inventory.src()
            if play:
                # add the list of hosts in the play, as adjusted for limit/filters
                variables['ansible_play_hosts_all'] = [x.name for x in self._inventory.get_hosts(pattern=play.hosts or 'all', ignore_restrictions=True)]
                variables['ansible_play_hosts'] = [x for x in variables['ansible_play_hosts_all'] if x not in play._removed_hosts]
                variables['ansible_play_batch'] = [x.name for x in self._inventory.get_hosts() if x.name not in play._removed_hosts]

                #DEPRECATED: play_hosts should be deprecated in favor of ansible_play_batch,
                #  however this would take work in the templating engine, so for now we'll add both
                variables['play_hosts'] = variables['ansible_play_batch']

        # the 'omit' value alows params to be left out if the variable they are based on is undefined
        variables['omit'] = self._omit_token
        variables['ansible_version'] = CLI.version_info(gitinfo=False)
        # Set options vars
        for option, option_value in iteritems(self._options_vars):
            variables[option] = option_value

        if self._hostvars is not None and include_hostvars:
            variables['hostvars'] = self._hostvars

        return variables
コード例 #13
0
    def _check_conditional(self, conditional, templar, all_vars):
        '''
        This method does the low-level evaluation of each conditional
        set on this object, using jinja2 to wrap the conditionals for
        evaluation.
        '''

        original = conditional
        if conditional is None or conditional == '':
            return True

        if conditional in all_vars and '-' not in text_type(
                all_vars[conditional]):
            conditional = all_vars[conditional]

        # make sure the templar is using the variables specified with this method
        templar.set_available_variables(variables=all_vars)

        try:
            conditional = templar.template(conditional)
            if not isinstance(conditional, text_type) or conditional == "":
                return conditional

            # a Jinja2 evaluation that results in something Python can eval!
            presented = "{%% if %s %%} True {%% else %%} False {%% endif %%}" % conditional
            conditional = templar.template(presented)
            val = conditional.strip()
            if val == "True":
                return True
            elif val == "False":
                return False
            else:
                raise AnsibleError("unable to evaluate conditional: %s" %
                                   original)
        except (AnsibleUndefinedVariable, UndefinedError) as e:
            # the templating failed, meaning most likely a
            # variable was undefined. If we happened to be
            # looking for an undefined variable, return True,
            # otherwise fail
            if "is undefined" in original or "is not defined" in original or "not is defined" in original:
                return True
            elif "is defined" in original or "is not undefined" in original or "not is undefined" in original:
                return False
            else:
                raise AnsibleUndefinedVariable(
                    "error while evaluating conditional (%s): %s" %
                    (original, e))
コード例 #14
0
ファイル: conditional.py プロジェクト: RajeevNambiar/temp
    def _check_conditional(self, conditional, templar, all_vars):
        '''
        This method does the low-level evaluation of each conditional
        set on this object, using jinja2 to wrap the conditionals for
        evaluation.
        '''

        original = conditional
        if conditional is None or conditional == '':
            return True

        if conditional in all_vars and '-' not in text_type(all_vars[conditional]):
            conditional = all_vars[conditional]

        # make sure the templar is using the variables specifed to this method
        templar.set_available_variables(variables=all_vars)

        try:
            conditional = templar.template(conditional)
            if not isinstance(conditional, text_type) or conditional == "":
                return conditional

            # a Jinja2 evaluation that results in something Python can eval!
            presented = "{%% if %s %%} True {%% else %%} False {%% endif %%}" % conditional
            conditional = templar.template(presented)
            val = conditional.strip()
            if val == "True":
                return True
            elif val == "False":
                return False
            else:
                raise AnsibleError("unable to evaluate conditional: %s" % original)
        except (AnsibleUndefinedVariable, UndefinedError) as e:
            # the templating failed, meaning most likely a
            # variable was undefined. If we happened to be
            # looking for an undefined variable, return True,
            # otherwise fail
            if "is undefined" in original:
                return True
            elif "is defined" in original:
                return False
            else:
                raise AnsibleError("error while evaluating conditional (%s): %s" % (original, e))
コード例 #15
0
    def _get_magic_variables(self, loader, play, host, task, include_hostvars, include_delegate_to):
        '''
        Returns a dictionary of so-called "magic" variables in Ansible,
        which are special variables we set internally for use.
        '''

        variables = dict()
        variables['playbook_dir'] = loader.get_basedir()

        if host:
            variables['group_names'] = sorted([group.name for group in host.get_groups() if group.name != 'all'])

            if self._inventory is not None:
                variables['groups']  = dict()
                for (group_name, group) in iteritems(self._inventory.groups):
                    variables['groups'][group_name] = [h.name for h in group.get_hosts()]
        if play:
            variables['role_names'] = [r._role_name for r in play.roles]

        if task:
            if task._role:
                variables['role_name'] = task._role.get_name()
                variables['role_path'] = task._role._role_path
                variables['role_uuid'] = text_type(task._role._uuid)

        if self._inventory is not None:
            variables['inventory_dir'] = self._inventory.basedir()
            variables['inventory_file'] = self._inventory.src()
            if play:
                # add the list of hosts in the play, as adjusted for limit/filters
                # DEPRECATED: play_hosts should be deprecated in favor of ansible_play_hosts,
                #             however this would take work in the templating engine, so for now
                #             we'll add both so we can give users something transitional to use
                host_list = [x.name for x in self._inventory.get_hosts()]
                variables['play_hosts'] = host_list
                variables['ansible_play_hosts'] = host_list

        # the 'omit' value alows params to be left out if the variable they are based on is undefined
        variables['omit'] = self._omit_token
        variables['ansible_version'] = CLI.version_info(gitinfo=False)

        return variables
コード例 #16
0
ファイル: dataloader.py プロジェクト: udesilva/ansible
    def load(self, data, file_name='<string>', show_content=True):
        '''
        Creates a python datastructure from the given data, which can be either
        a JSON or YAML string.
        '''

        # YAML parser will take JSON as it is a subset.
        if isinstance(data, AnsibleUnicode):
            # The PyYAML's libyaml bindings use PyUnicode_CheckExact so
            # they are unable to cope with our subclass.
            # Unwrap and re-wrap the unicode so we can keep track of line
            # numbers
            in_data = text_type(data)
        else:
            in_data = data
        try:
            new_data = self._safe_load(in_data, file_name=file_name)
        except YAMLError as yaml_exc:
            self._handle_error(yaml_exc, file_name, show_content)

        if isinstance(data, AnsibleUnicode):
            new_data = AnsibleUnicode(new_data)
            new_data.ansible_pos = data.ansible_pos
        return new_data
コード例 #17
0
ファイル: __init__.py プロジェクト: shcho1105/ansible
    def template(self,
                 variable,
                 convert_bare=False,
                 preserve_trailing_newlines=True,
                 escape_backslashes=True,
                 fail_on_undefined=None,
                 overrides=None,
                 convert_data=True):
        '''
        Templates (possibly recursively) any given data as input. If convert_bare is
        set to True, the given data will be wrapped as a jinja2 variable ('{{foo}}')
        before being sent through the template engine. 
        '''

        # Don't template unsafe variables, instead drop them back down to
        # their constituent type.
        if hasattr(variable, '__UNSAFE__'):
            if isinstance(variable, text_type):
                return self._clean_data(text_type(variable))
            elif isinstance(variable, binary_type):
                return self._clean_data(bytes(variable))
            else:
                return self._clean_data(variable._obj)

        try:
            if convert_bare:
                variable = self._convert_bare_variable(variable)

            if isinstance(variable, string_types):
                result = variable
                if self._contains_vars(variable):

                    # Check to see if the string we are trying to render is just referencing a single
                    # var.  In this case we don't want to accidentally change the type of the variable
                    # to a string by using the jinja template renderer. We just want to pass it.
                    only_one = self.SINGLE_VAR.match(variable)
                    if only_one:
                        var_name = only_one.group(1)
                        if var_name in self._available_variables:
                            resolved_val = self._available_variables[var_name]
                            if isinstance(resolved_val, NON_TEMPLATED_TYPES):
                                return resolved_val
                            elif resolved_val is None:
                                return C.DEFAULT_NULL_REPRESENTATION

                    result = self._do_template(
                        variable,
                        preserve_trailing_newlines=preserve_trailing_newlines,
                        escape_backslashes=escape_backslashes,
                        fail_on_undefined=fail_on_undefined,
                        overrides=overrides)

                    if convert_data:
                        # if this looks like a dictionary or list, convert it to such using the safe_eval method
                        if (result.startswith("{") and not result.startswith(self.environment.variable_start_string)) or \
                           result.startswith("[") or result in ("True", "False"):
                            eval_results = safe_eval(
                                result,
                                locals=self._available_variables,
                                include_exceptions=True)
                            if eval_results[1] is None:
                                result = eval_results[0]
                            else:
                                # FIXME: if the safe_eval raised an error, should we do something with it?
                                pass

                #return self._clean_data(result)
                return result

            elif isinstance(variable, (list, tuple)):
                return [
                    self.template(
                        v,
                        preserve_trailing_newlines=preserve_trailing_newlines,
                        fail_on_undefined=fail_on_undefined,
                        overrides=overrides) for v in variable
                ]
            elif isinstance(variable, dict):
                d = {}
                # we don't use iteritems() here to avoid problems if the underlying dict
                # changes sizes due to the templating, which can happen with hostvars
                for k in variable.keys():
                    d[k] = self.template(
                        variable[k],
                        preserve_trailing_newlines=preserve_trailing_newlines,
                        fail_on_undefined=fail_on_undefined,
                        overrides=overrides)
                return d
            else:
                return variable

        except AnsibleFilterError:
            if self._fail_on_filter_errors:
                raise
            else:
                return variable
コード例 #18
0
    def _execute_module(self, module_name=None, module_args=None, tmp=None, task_vars=None, persist_files=False, delete_remote_tmp=True, wrap_async=False):
        '''
        Transfer and run a module along with its arguments.
        '''
        if task_vars is None:
            task_vars = dict()

        remote_module_path = None
        args_file_path = None
        remote_files = []

        # if a module name was not specified for this execution, use the action from the task
        if module_name is None:
            module_name = self._task.action
        if module_args is None:
            module_args = self._task.args

        self._update_module_args(module_name, module_args, task_vars)

        # FUTURE: refactor this along with module build process to better encapsulate "smart wrapper" functionality
        (module_style, shebang, module_data, module_path) = self._configure_module(module_name=module_name, module_args=module_args, task_vars=task_vars)
        display.vvv("Using module file %s" % module_path)
        if not shebang and module_style != 'binary':
            raise AnsibleError("module (%s) is missing interpreter line" % module_name)

        if not self._is_pipelining_enabled(module_style, wrap_async):

            # we might need remote tmp dir
            if not tmp or not 'tmp' in tmp:
                tmp = self._make_tmp_path()

            remote_module_filename = self._connection._shell.get_remote_filename(module_path)
            remote_module_path = self._connection._shell.join_path(tmp, remote_module_filename)

        if module_style in ('old', 'non_native_want_json', 'binary'):
            # we'll also need a temp file to hold our module arguments
            args_file_path = self._connection._shell.join_path(tmp, 'args')

        if remote_module_path or module_style != 'new':
            display.debug("transferring module to remote %s" % remote_module_path)
            if module_style == 'binary':
                self._transfer_file(module_path, remote_module_path)
            else:
                self._transfer_data(remote_module_path, module_data)
            if module_style == 'old':
                # we need to dump the module args to a k=v string in a file on
                # the remote system, which can be read and parsed by the module
                args_data = ""
                for k,v in iteritems(module_args):
                    args_data += '%s=%s ' % (k, shlex_quote(text_type(v)))
                self._transfer_data(args_file_path, args_data)
            elif module_style in ('non_native_want_json', 'binary'):
                self._transfer_data(args_file_path, json.dumps(module_args))
            display.debug("done transferring module to remote")

        environment_string = self._compute_environment_string()

        if tmp and remote_module_path:
            remote_files = [tmp, remote_module_path]

        if args_file_path:
            remote_files.append(args_file_path)

        sudoable = True
        in_data = None
        cmd = ""

        if wrap_async:
            # configure, upload, and chmod the async_wrapper module
            (async_module_style, shebang, async_module_data, async_module_path) = self._configure_module(module_name='async_wrapper', module_args=dict(), task_vars=task_vars)
            async_module_remote_filename = self._connection._shell.get_remote_filename(async_module_path)
            remote_async_module_path = self._connection._shell.join_path(tmp, async_module_remote_filename)
            self._transfer_data(remote_async_module_path, async_module_data)
            remote_files.append(remote_async_module_path)

            async_limit = self._task.async
コード例 #19
0
ファイル: result.py プロジェクト: jinjin123/ansible-api
 def _send_result(self, result):
     display.debug(u"sending result: %s" % ([text_type(x) for x in result],))
     self._final_q.put(result)
     display.debug("done sending result")
コード例 #20
0
 def env_prefix(self, **kwargs):
     env = self.env.copy()
     env.update(kwargs)
     return ' '.join(['%s=%s' % (k, pipes.quote(text_type(v))) for k,v in env.items()])
コード例 #21
0
 def env_prefix(**args):
     return ' '.join([
         '%s=%s' % (k, pipes.quote(text_type(v)))
         for k, v in args.items()
     ])
コード例 #22
0
    def _check_conditional(self, conditional, templar, all_vars):
        '''
        This method does the low-level evaluation of each conditional
        set on this object, using jinja2 to wrap the conditionals for
        evaluation.
        '''

        original = conditional
        if conditional is None or conditional == '':
            return True

        if conditional in all_vars and '-' not in text_type(
                all_vars[conditional]):
            conditional = all_vars[conditional]

        # make sure the templar is using the variables specified with this method
        templar.set_available_variables(variables=all_vars)

        try:
            conditional = templar.template(conditional)
            if not isinstance(conditional, text_type) or conditional == "":
                return conditional

            # a Jinja2 evaluation that results in something Python can eval!
            presented = "{%% if %s %%} True {%% else %%} False {%% endif %%}" % conditional
            conditional = templar.template(presented)
            val = conditional.strip()
            if val == "True":
                return True
            elif val == "False":
                return False
            else:
                raise AnsibleError("unable to evaluate conditional: %s" %
                                   original)
        except (AnsibleUndefinedVariable, UndefinedError) as e:
            # the templating failed, meaning most likely a variable was undefined. If we happened to be
            # looking for an undefined variable, return True, otherwise fail
            try:
                # first we extract the variable name from the error message
                var_name = re.compile(
                    r"'(hostvars\[.+\]|[\w_]+)' is undefined").search(
                        str(e)).groups()[0]
                # next we extract all defined/undefined tests from the conditional string
                def_undef = self.extract_defined_undefined(conditional)
                # then we loop through these, comparing the error variable name against
                # each def/undef test we found above. If there is a match, we determine
                # whether the logic/state mean the variable should exist or not and return
                # the corresponding True/False
                for (du_var, logic, state) in def_undef:
                    # when we compare the var names, normalize quotes because something
                    # like hostvars['foo'] may be tested against hostvars["foo"]
                    if var_name.replace("'", '"') == du_var.replace("'", '"'):
                        # the should exist is a xor test between a negation in the logic portion
                        # against the state (defined or undefined)
                        should_exist = ('not' in logic) != (state == 'defined')
                        if should_exist:
                            return False
                        else:
                            return True
                # as nothing above matched the failed var name, re-raise here to
                # trigger the AnsibleUndefinedVariable exception again below
                raise
            except Exception as new_e:
                raise AnsibleUndefinedVariable(
                    "error while evaluating conditional (%s): %s" %
                    (original, e))
コード例 #23
0
ファイル: fish.py プロジェクト: 2ndQuadrant/ansible
 def env_prefix(self, **kwargs):
     env = self.env.copy()
     env.update(kwargs)
     return ' '.join(['set -lx %s %s;' % (k, shlex_quote(text_type(v))) for k,v in env.items()])
コード例 #24
0
    def _execute_module(self,
                        module_name=None,
                        module_args=None,
                        tmp=None,
                        task_vars=None,
                        persist_files=False,
                        delete_remote_tmp=True):
        '''
        Transfer and run a module along with its arguments.
        '''
        if task_vars is None:
            task_vars = dict()

        # if a module name was not specified for this execution, use
        # the action from the task
        if module_name is None:
            module_name = self._task.action
        if module_args is None:
            module_args = self._task.args

        # set check mode in the module arguments, if required
        if self._play_context.check_mode and not self._task.always_run:
            if not self._supports_check_mode:
                raise AnsibleError(
                    "check mode is not supported for this operation")
            module_args['_ansible_check_mode'] = True

        # set no log in the module arguments, if required
        if self._play_context.no_log or C.DEFAULT_NO_TARGET_SYSLOG:
            module_args['_ansible_no_log'] = True

        # set debug in the module arguments, if required
        if C.DEFAULT_DEBUG:
            module_args['_ansible_debug'] = True

        (module_style, shebang,
         module_data) = self._configure_module(module_name=module_name,
                                               module_args=module_args,
                                               task_vars=task_vars)
        if not shebang:
            raise AnsibleError("module (%s) is missing interpreter line" %
                               module_name)

        # a remote tmp path may be necessary and not already created
        remote_module_path = None
        args_file_path = None
        if not tmp and self._late_needs_tmp_path(tmp, module_style):
            tmp = self._make_tmp_path()

        if tmp:
            remote_module_filename = self._connection._shell.get_remote_filename(
                module_name)
            remote_module_path = self._connection._shell.join_path(
                tmp, remote_module_filename)
            if module_style in ['old', 'non_native_want_json']:
                # we'll also need a temp file to hold our module arguments
                args_file_path = self._connection._shell.join_path(tmp, 'args')

        if remote_module_path or module_style != 'new':
            display.debug("transferring module to remote")
            self._transfer_data(remote_module_path, module_data)
            if module_style == 'old':
                # we need to dump the module args to a k=v string in a file on
                # the remote system, which can be read and parsed by the module
                args_data = ""
                for k, v in iteritems(module_args):
                    args_data += '%s="%s" ' % (k, pipes.quote(text_type(v)))
                self._transfer_data(args_file_path, args_data)
            elif module_style == 'non_native_want_json':
                self._transfer_data(args_file_path, json.dumps(module_args))
            display.debug("done transferring module to remote")

        environment_string = self._compute_environment_string()

        if tmp and "tmp" in tmp and self._play_context.become and self._play_context.become_user != 'root':
            # deal with possible umask issues once sudo'ed to other user
            self._remote_chmod('a+r', remote_module_path)
            if args_file_path is not None:
                self._remote_chmod('a+r', args_file_path)

        cmd = ""
        in_data = None

        if self._connection.has_pipelining and self._play_context.pipelining and not C.DEFAULT_KEEP_REMOTE_FILES and module_style == 'new':
            in_data = module_data
        else:
            if remote_module_path:
                cmd = remote_module_path

        rm_tmp = None
        if tmp and "tmp" in tmp and not C.DEFAULT_KEEP_REMOTE_FILES and not persist_files and delete_remote_tmp:
            if not self._play_context.become or self._play_context.become_user == 'root':
                # not sudoing or sudoing to root, so can cleanup files in the same step
                rm_tmp = tmp

        cmd = self._connection._shell.build_module_command(
            environment_string,
            shebang,
            cmd,
            arg_path=args_file_path,
            rm_tmp=rm_tmp)
        cmd = cmd.strip()

        sudoable = True
        if module_name == "accelerate":
            # always run the accelerate module as the user
            # specified in the play, not the sudo_user
            sudoable = False

        res = self._low_level_execute_command(cmd,
                                              sudoable=sudoable,
                                              in_data=in_data)

        if tmp and "tmp" in tmp and not C.DEFAULT_KEEP_REMOTE_FILES and not persist_files and delete_remote_tmp:
            if self._play_context.become and self._play_context.become_user != 'root':
                # not sudoing to root, so maybe can't delete files as that other user
                # have to clean up temp files as original user in a second step
                cmd2 = self._connection._shell.remove(tmp, recurse=True)
                self._low_level_execute_command(cmd2, sudoable=False)

        try:
            data = json.loads(
                self._filter_leading_non_json_lines(res.get('stdout', u'')))
        except ValueError:
            # not valid json, lets try to capture error
            data = dict(failed=True, parsed=False)
            if 'stderr' in res and res['stderr'].startswith(u'Traceback'):
                data['exception'] = res['stderr']
            else:
                data['msg'] = "MODULE FAILURE"
                data['module_stdout'] = res.get('stdout', u'')
                if 'stderr' in res:
                    data['module_stderr'] = res['stderr']

        # pre-split stdout into lines, if stdout is in the data and there
        # isn't already a stdout_lines value there
        if 'stdout' in data and 'stdout_lines' not in data:
            data['stdout_lines'] = data.get('stdout', u'').splitlines()

        display.debug("done with _execute_module (%s, %s)" %
                      (module_name, module_args))
        return data
コード例 #25
0
 def _send_result(self, result):
     debug(u"sending result: %s" % ([text_type(x) for x in result],))
     self._final_q.put(result)
     debug("done sending result")
コード例 #26
0
ファイル: unicode.py プロジェクト: shreddy0362/vagrant-
def to_unicode(obj, encoding='utf-8', errors='replace', nonstring=None):
    '''Convert an object into a :class:`unicode` string

    :arg obj: Object to convert to a :class:`unicode` string.  This should
        normally be a byte :class:`str`
    :kwarg encoding: What encoding to try converting the byte :class:`str` as.
        Defaults to :term:`utf-8`
    :kwarg errors: If errors are found while decoding, perform this action.
        Defaults to ``replace`` which replaces the invalid bytes with
        a character that means the bytes were unable to be decoded.  Other
        values are the same as the error handling schemes in the `codec base
        classes
        <http://docs.python.org/library/codecs.html#codec-base-classes>`_.
        For instance ``strict`` which raises an exception and ``ignore`` which
        simply omits the non-decodable characters.
    :kwarg nonstring: How to treat nonstring values.  Possible values are:

        :simplerepr: Attempt to call the object's "simple representation"
            method and return that value.  Python-2.3+ has two methods that
            try to return a simple representation: :meth:`object.__unicode__`
            and :meth:`object.__str__`.  We first try to get a usable value
            from :meth:`object.__unicode__`.  If that fails we try the same
            with :meth:`object.__str__`.
        :empty: Return an empty :class:`unicode` string
        :strict: Raise a :exc:`TypeError`
        :passthru: Return the object unchanged
        :repr: Attempt to return a :class:`unicode` string of the repr of the
            object

        Default is ``simplerepr``

    :raises TypeError: if :attr:`nonstring` is ``strict`` and
        a non-:class:`basestring` object is passed in or if :attr:`nonstring`
        is set to an unknown value
    :raises UnicodeDecodeError: if :attr:`errors` is ``strict`` and
        :attr:`obj` is not decodable using the given encoding
    :returns: :class:`unicode` string or the original object depending on the
        value of :attr:`nonstring`.

    Usually this should be used on a byte :class:`str` but it can take both
    byte :class:`str` and :class:`unicode` strings intelligently.  Nonstring
    objects are handled in different ways depending on the setting of the
    :attr:`nonstring` parameter.

    The default values of this function are set so as to always return
    a :class:`unicode` string and never raise an error when converting from
    a byte :class:`str` to a :class:`unicode` string.  However, when you do
    not pass validly encoded text (or a nonstring object), you may end up with
    output that you don't expect.  Be sure you understand the requirements of
    your data, not just ignore errors by passing it through this function.
    '''
    # Could use isbasestring/isunicode here but we want this code to be as
    # fast as possible
    if isinstance(obj, basestring):
        if isinstance(obj, text_type):
            return obj
        if encoding in _UTF8_ALIASES:
            return text_type(obj, 'utf-8', errors)
        if encoding in _LATIN1_ALIASES:
            return text_type(obj, 'latin-1', errors)
        return obj.decode(encoding, errors)

    if not nonstring:
        nonstring = 'simplerepr'
    if nonstring == 'empty':
        return u''
    elif nonstring == 'passthru':
        return obj
    elif nonstring == 'simplerepr':
        try:
            simple = obj.__unicode__()
        except (AttributeError, UnicodeError):
            simple = None
        if not simple:
            try:
                simple = text_type(obj)
            except UnicodeError:
                try:
                    simple = obj.__str__()
                except (UnicodeError, AttributeError):
                    simple = u''
        if isinstance(simple, binary_type):
            return text_type(simple, encoding, errors)
        return simple
    elif nonstring in ('repr', 'strict'):
        obj_repr = repr(obj)
        if isinstance(obj_repr, binary_type):
            obj_repr = text_type(obj_repr, encoding, errors)
        if nonstring == 'repr':
            return obj_repr
        raise TypeError('to_unicode was given "%(obj)s" which is neither'
                        ' a byte string (str) or a unicode string' %
                        {'obj': obj_repr.encode(encoding, 'replace')})

    raise TypeError('nonstring value, %(param)s, is not set to a valid'
                    ' action' % {'param': nonstring})
コード例 #27
0
ファイル: __init__.py プロジェクト: matthewbga/blargotron
    def _execute_module(self, module_name=None, module_args=None, tmp=None, task_vars=None, persist_files=False, delete_remote_tmp=True):
        '''
        Transfer and run a module along with its arguments.
        '''
        if task_vars is None:
            task_vars = dict()

        # if a module name was not specified for this execution, use
        # the action from the task
        if module_name is None:
            module_name = self._task.action
        if module_args is None:
            module_args = self._task.args

        # set check mode in the module arguments, if required
        if self._play_context.check_mode:
            if not self._supports_check_mode:
                raise AnsibleError("check mode is not supported for this operation")
            module_args['_ansible_check_mode'] = True
        else:
            module_args['_ansible_check_mode'] = False

        # set no log in the module arguments, if required
        module_args['_ansible_no_log'] = self._play_context.no_log or C.DEFAULT_NO_TARGET_SYSLOG

        # set debug in the module arguments, if required
        module_args['_ansible_debug'] = C.DEFAULT_DEBUG

        # let module know we are in diff mode
        module_args['_ansible_diff'] = self._play_context.diff

        # let module know our verbosity
        module_args['_ansible_verbosity'] = self._display.verbosity

        (module_style, shebang, module_data) = self._configure_module(module_name=module_name, module_args=module_args, task_vars=task_vars)
        if not shebang:
            raise AnsibleError("module (%s) is missing interpreter line" % module_name)

        # a remote tmp path may be necessary and not already created
        remote_module_path = None
        args_file_path = None
        if not tmp and self._late_needs_tmp_path(tmp, module_style):
            tmp = self._make_tmp_path()

        if tmp:
            remote_module_filename = self._connection._shell.get_remote_filename(module_name)
            remote_module_path = self._connection._shell.join_path(tmp, remote_module_filename)
            if module_style in ['old', 'non_native_want_json']:
                # we'll also need a temp file to hold our module arguments
                args_file_path = self._connection._shell.join_path(tmp, 'args')

        if remote_module_path or module_style != 'new':
            display.debug("transferring module to remote")
            self._transfer_data(remote_module_path, module_data)
            if module_style == 'old':
                # we need to dump the module args to a k=v string in a file on
                # the remote system, which can be read and parsed by the module
                args_data = ""
                for k,v in iteritems(module_args):
                    args_data += '%s="%s" ' % (k, pipes.quote(text_type(v)))
                self._transfer_data(args_file_path, args_data)
            elif module_style == 'non_native_want_json':
                self._transfer_data(args_file_path, json.dumps(module_args))
            display.debug("done transferring module to remote")

        environment_string = self._compute_environment_string()

        if tmp and "tmp" in tmp and self._play_context.become and self._play_context.become_user != 'root':
            # deal with possible umask issues once sudo'ed to other user
            self._remote_chmod('a+r', remote_module_path)
            if args_file_path is not None:
                self._remote_chmod('a+r', args_file_path)

        cmd = ""
        in_data = None

        if self._connection.has_pipelining and self._play_context.pipelining and not C.DEFAULT_KEEP_REMOTE_FILES and module_style == 'new':
            in_data = module_data
        else:
            if remote_module_path:
                cmd = remote_module_path

        rm_tmp = None
        if tmp and "tmp" in tmp and not C.DEFAULT_KEEP_REMOTE_FILES and not persist_files and delete_remote_tmp:
            if not self._play_context.become or self._play_context.become_user == 'root':
                # not sudoing or sudoing to root, so can cleanup files in the same step
                rm_tmp = tmp

        cmd = self._connection._shell.build_module_command(environment_string, shebang, cmd, arg_path=args_file_path, rm_tmp=rm_tmp)
        cmd = cmd.strip()

        sudoable = True
        if module_name == "accelerate":
            # always run the accelerate module as the user
            # specified in the play, not the sudo_user
            sudoable = False

        res = self._low_level_execute_command(cmd, sudoable=sudoable, in_data=in_data)

        if tmp and "tmp" in tmp and not C.DEFAULT_KEEP_REMOTE_FILES and not persist_files and delete_remote_tmp:
            if self._play_context.become and self._play_context.become_user != 'root':
                # not sudoing to root, so maybe can't delete files as that other user
                # have to clean up temp files as original user in a second step
                cmd2 = self._connection._shell.remove(tmp, recurse=True)
                self._low_level_execute_command(cmd2, sudoable=False)

        try:
            data = json.loads(self._filter_leading_non_json_lines(res.get('stdout', u'')))
        except ValueError:
            # not valid json, lets try to capture error
            data = dict(failed=True, parsed=False)
            data['msg'] = "MODULE FAILURE"
            data['module_stdout'] = res.get('stdout', u'')
            if 'stderr' in res:
                data['module_stderr'] = res['stderr']
                if res['stderr'].startswith(u'Traceback'):
                    data['exception'] = res['stderr']

        # pre-split stdout into lines, if stdout is in the data and there
        # isn't already a stdout_lines value there
        if 'stdout' in data and 'stdout_lines' not in data:
            data['stdout_lines'] = data.get('stdout', u'').splitlines()

        display.debug("done with _execute_module (%s, %s)" % (module_name, module_args))
        return data
コード例 #28
0
ファイル: base.py プロジェクト: wj844908240/ansible
    def post_validate(self, templar):
        '''
        we can't tell that everything is of the right type until we have
        all the variables.  Run basic types (from isa) as well as
        any _post_validate_<foo> functions.
        '''

        # save the omit value for later checking
        omit_value = templar._available_variables.get('omit')

        for (name, attribute) in iteritems(self._get_base_attributes()):

            if getattr(self, name) is None:
                if not attribute.required:
                    continue
                else:
                    raise AnsibleParserError("the field '%s' is required but was not set" % name)
            elif not attribute.always_post_validate and self.__class__.__name__ not in ('Task', 'Handler', 'PlayContext'):
                # Intermediate objects like Play() won't have their fields validated by
                # default, as their values are often inherited by other objects and validated
                # later, so we don't want them to fail out early
                continue

            try:
                # Run the post-validator if present. These methods are responsible for
                # using the given templar to template the values, if required.
                method = getattr(self, '_post_validate_%s' % name, None)
                if method:
                    value = method(attribute, getattr(self, name), templar)
                else:
                    # if the attribute contains a variable, template it now
                    value = templar.template(getattr(self, name))

                # if this evaluated to the omit value, set the value back to
                # the default specified in the FieldAttribute and move on
                if omit_value is not None and value == omit_value:
                    setattr(self, name, attribute.default)
                    continue

                # and make sure the attribute is of the type it should be
                if value is not None:
                    if attribute.isa == 'string':
                        value = text_type(value)
                    elif attribute.isa == 'int':
                        value = int(value)
                    elif attribute.isa == 'float':
                        value = float(value)
                    elif attribute.isa == 'bool':
                        value = boolean(value)
                    elif attribute.isa == 'percent':
                        # special value, which may be an integer or float
                        # with an optional '%' at the end
                        if isinstance(value, string_types) and '%' in value:
                            value = value.replace('%', '')
                        value = float(value)
                    elif attribute.isa == 'list':
                        if value is None:
                            value = []
                        elif not isinstance(value, list):
                            value = [ value ]
                        if attribute.listof is not None:
                            for item in value:
                                if not isinstance(item, attribute.listof):
                                    raise AnsibleParserError("the field '%s' should be a list of %s,"
                                            " but the item '%s' is a %s" % (name, attribute.listof, item, type(item)), obj=self.get_ds())
                                elif attribute.required and attribute.listof == string_types:
                                    if item is None or item.strip() == "":
                                        raise AnsibleParserError("the field '%s' is required, and cannot have empty values" % (name,), obj=self.get_ds())
                    elif attribute.isa == 'set':
                        if value is None:
                            value = set()
                        else:
                            if not isinstance(value, (list, set)):
                                value = [ value ]
                            if not isinstance(value, set):
                                value = set(value)
                    elif attribute.isa == 'dict':
                        if value is None:
                            value = dict()
                        elif not isinstance(value, dict):
                            raise TypeError("%s is not a dictionary" % value)

                # and assign the massaged value back to the attribute field
                setattr(self, name, value)

            except (TypeError, ValueError) as e:
                raise AnsibleParserError("the field '%s' has an invalid value (%s), and could not be converted to an %s."
                        " Error was: %s" % (name, value, attribute.isa, e), obj=self.get_ds())
            except UndefinedError as e:
                if templar._fail_on_undefined_errors and name != 'name':
                    raise AnsibleParserError("the field '%s' has an invalid value, which appears to include a variable that is undefined."
                            " The error was: %s" % (name,e), obj=self.get_ds())
コード例 #29
0
    def _process_pending_results(self, iterator, one_pass=False):
        '''
        Reads results off the final queue and takes appropriate action
        based on the result (executing callbacks, updating state, etc.).
        '''

        ret_results = []

        while not self._final_q.empty() and not self._tqm._terminated:
            try:
                result = self._final_q.get()
                display.debug("got result from result worker: %s" % ([text_type(x) for x in result],))

                # helper method, used to find the original host from the one
                # returned in the result/message, which has been serialized and
                # thus had some information stripped from it to speed up the
                # serialization process
                def get_original_host(host):
                    if host.name in self._inventory._hosts_cache:
                       return self._inventory._hosts_cache[host.name]
                    else:
                       return self._inventory.get_host(host.name)

                # all host status messages contain 2 entries: (msg, task_result)
                if result[0] in ('host_task_ok', 'host_task_failed', 'host_task_skipped', 'host_unreachable'):
                    task_result = result[1]
                    host = get_original_host(task_result._host)
                    task = task_result._task
                    if result[0] == 'host_task_failed' or task_result.is_failed():
                        if not task.ignore_errors:
                            display.debug("marking %s as failed" % host.name)
                            if task.run_once:
                                # if we're using run_once, we have to fail every host here
                                [iterator.mark_host_failed(h) for h in self._inventory.get_hosts(iterator._play.hosts) if h.name not in self._tqm._unreachable_hosts]
                            else:
                                iterator.mark_host_failed(host)

                            # only add the host to the failed list officially if it has
                            # been failed by the iterator
                            if iterator.is_failed(host):
                                self._tqm._failed_hosts[host.name] = True
                                self._tqm._stats.increment('failures', host.name)
                        else:
                            self._tqm._stats.increment('ok', host.name)
                        self._tqm.send_callback('v2_runner_on_failed', task_result, ignore_errors=task.ignore_errors)
                    elif result[0] == 'host_unreachable':
                        self._tqm._unreachable_hosts[host.name] = True
                        self._tqm._stats.increment('dark', host.name)
                        self._tqm.send_callback('v2_runner_on_unreachable', task_result)
                    elif result[0] == 'host_task_skipped':
                        self._tqm._stats.increment('skipped', host.name)
                        self._tqm.send_callback('v2_runner_on_skipped', task_result)
                    elif result[0] == 'host_task_ok':
                        if task.action != 'include':
                            self._tqm._stats.increment('ok', host.name)
                            if 'changed' in task_result._result and task_result._result['changed']:
                                self._tqm._stats.increment('changed', host.name)
                            self._tqm.send_callback('v2_runner_on_ok', task_result)

                        if self._diff:
                            self._tqm.send_callback('v2_on_file_diff', task_result)

                    self._pending_results -= 1
                    if host.name in self._blocked_hosts:
                        del self._blocked_hosts[host.name]

                    # If this is a role task, mark the parent role as being run (if
                    # the task was ok or failed, but not skipped or unreachable)
                    if task_result._task._role is not None and result[0] in ('host_task_ok', 'host_task_failed'):
                        # lookup the role in the ROLE_CACHE to make sure we're dealing
                        # with the correct object and mark it as executed
                        for (entry, role_obj) in iteritems(iterator._play.ROLE_CACHE[task_result._task._role._role_name]):
                            if role_obj._uuid == task_result._task._role._uuid:
                                role_obj._had_task_run[host.name] = True

                    ret_results.append(task_result)

                elif result[0] == 'add_host':
                    result_item = result[1]
                    new_host_info = result_item.get('add_host', dict())

                    self._add_host(new_host_info, iterator)

                elif result[0] == 'add_group':
                    host = get_original_host(result[1])
                    result_item = result[2]
                    self._add_group(host, result_item)

                elif result[0] == 'notify_handler':
                    task_result  = result[1]
                    handler_name = result[2]

                    original_host = get_original_host(task_result._host)
                    original_task = iterator.get_original_task(original_host, task_result._task)
                    if handler_name not in self._notified_handlers:
                        self._notified_handlers[handler_name] = []

                    if original_host not in self._notified_handlers[handler_name]:
                        self._notified_handlers[handler_name].append(original_host)
                        display.vv("NOTIFIED HANDLER %s" % (handler_name,))

                elif result[0] == 'register_host_var':
                    # essentially the same as 'set_host_var' below, however we
                    # never follow the delegate_to value for registered vars and
                    # the variable goes in the fact_cache
                    host      = get_original_host(result[1])
                    task      = result[2]
                    var_value = wrap_var(result[3])
                    var_name  = task.register

                    if task.run_once:
                        host_list = [host for host in self._inventory.get_hosts(iterator._play.hosts) if host.name not in self._tqm._unreachable_hosts]
                    else:
                        host_list = [host]

                    for target_host in host_list:
                        self._variable_manager.set_nonpersistent_facts(target_host, {var_name: var_value})

                elif result[0] in ('set_host_var', 'set_host_facts'):
                    host = get_original_host(result[1])
                    task = result[2]
                    item = result[3]

                    # find the host we're actually refering too here, which may
                    # be a host that is not really in inventory at all
                    if task.delegate_to is not None and task.delegate_facts:
                        task_vars = self._variable_manager.get_vars(loader=self._loader, play=iterator._play, host=host, task=task)
                        self.add_tqm_variables(task_vars, play=iterator._play)
                        if item is not None:
                            task_vars['item'] = item
                        templar = Templar(loader=self._loader, variables=task_vars)
                        host_name = templar.template(task.delegate_to)
                        actual_host = self._inventory.get_host(host_name)
                        if actual_host is None:
                            actual_host = Host(name=host_name)
                    else:
                        actual_host = host

                    if task.run_once:
                        host_list = [host for host in self._inventory.get_hosts(iterator._play.hosts) if host.name not in self._tqm._unreachable_hosts]
                    else:
                        host_list = [actual_host]

                    if result[0] == 'set_host_var':
                        var_name  = result[4]
                        var_value = result[5]
                        for target_host in host_list:
                            self._variable_manager.set_host_variable(target_host, var_name, var_value)
                    elif result[0] == 'set_host_facts':
                        facts = result[4]
                        for target_host in host_list:
                            if task.action == 'set_fact':
                                self._variable_manager.set_nonpersistent_facts(target_host, facts)
                            else:
                                self._variable_manager.set_host_facts(target_host, facts)
                elif result[0].startswith('v2_playbook_item') or result[0] == 'v2_playbook_retry':
                    self._tqm.send_callback(result[0], result[1])
                else:
                    raise AnsibleError("unknown result message received: %s" % result[0])

            except Queue.Empty:
                time.sleep(0.0001)

            if one_pass:
                break

        return ret_results
コード例 #30
0
    def _execute_module(self, module_name=None, module_args=None, tmp=None, task_vars=None, persist_files=False, delete_remote_tmp=True):
        '''
        Transfer and run a module along with its arguments.
        '''
        if task_vars is None:
            task_vars = dict()

        # if a module name was not specified for this execution, use
        # the action from the task
        if module_name is None:
            module_name = self._task.action
        if module_args is None:
            module_args = self._task.args

        # set check mode in the module arguments, if required
        if self._play_context.check_mode:
            if not self._supports_check_mode:
                raise AnsibleError("check mode is not supported for this operation")
            module_args['_ansible_check_mode'] = True
        else:
            module_args['_ansible_check_mode'] = False

        # Get the connection user for permission checks
        remote_user = task_vars.get('ansible_ssh_user') or self._play_context.remote_user

        # set no log in the module arguments, if required
        module_args['_ansible_no_log'] = self._play_context.no_log or C.DEFAULT_NO_TARGET_SYSLOG

        # set debug in the module arguments, if required
        module_args['_ansible_debug'] = C.DEFAULT_DEBUG

        # let module know we are in diff mode
        module_args['_ansible_diff'] = self._play_context.diff

        # let module know our verbosity
        module_args['_ansible_verbosity'] = display.verbosity

        # give the module information about the ansible version
        module_args['_ansible_version'] = __version__

        # give the module information about its name
        module_args['_ansible_module_name'] = module_name

        # set the syslog facility to be used in the module
        module_args['_ansible_syslog_facility'] = task_vars.get('ansible_syslog_facility', C.DEFAULT_SYSLOG_FACILITY)

        # let module know about filesystems that selinux treats specially
        module_args['_ansible_selinux_special_fs'] = C.DEFAULT_SELINUX_SPECIAL_FS

        (module_style, shebang, module_data, module_path) = self._configure_module(module_name=module_name, module_args=module_args, task_vars=task_vars)
        display.vvv("Using module file %s" % module_path)
        if not shebang and module_style != 'binary':
            raise AnsibleError("module (%s) is missing interpreter line" % module_name)

        # a remote tmp path may be necessary and not already created
        remote_module_path = None
        args_file_path = None
        if not tmp and self._late_needs_tmp_path(tmp, module_style):
            tmp = self._make_tmp_path(remote_user)

        if tmp and \
         (module_style != 'new' or \
         not self._connection.has_pipelining or \
         not self._play_context.pipelining or \
         C.DEFAULT_KEEP_REMOTE_FILES or \
         self._play_context.become_method == 'su'):
            remote_module_filename = self._connection._shell.get_remote_filename(module_path)
            remote_module_path = self._connection._shell.join_path(tmp, remote_module_filename)
            if module_style in ('old', 'non_native_want_json', 'binary'):
                # we'll also need a temp file to hold our module arguments
                args_file_path = self._connection._shell.join_path(tmp, 'args')

        if remote_module_path or module_style != 'new':
            display.debug("transferring module to remote %s" % remote_module_path)
            if module_style == 'binary':
                self._transfer_file(module_path, remote_module_path)
            else:
                self._transfer_data(remote_module_path, module_data)
            if module_style == 'old':
                # we need to dump the module args to a k=v string in a file on
                # the remote system, which can be read and parsed by the module
                args_data = ""
                for k,v in iteritems(module_args):
                    args_data += '%s=%s ' % (k, pipes.quote(text_type(v)))
                self._transfer_data(args_file_path, args_data)
            elif module_style in ('non_native_want_json', 'binary'):
                self._transfer_data(args_file_path, json.dumps(module_args))
            display.debug("done transferring module to remote")

        environment_string = self._compute_environment_string()

        remote_files = None

        if args_file_path:
            remote_files = tmp, remote_module_path, args_file_path
        elif remote_module_path:
            remote_files = tmp, remote_module_path

        # Fix permissions of the tmp path and tmp files.  This should be
        # called after all files have been transferred.
        if remote_files:
            self._fixup_perms2(remote_files, remote_user)

        cmd = ""
        in_data = None

        if self._connection.has_pipelining and self._play_context.pipelining and not C.DEFAULT_KEEP_REMOTE_FILES and module_style == 'new':
            in_data = module_data
        else:
            if remote_module_path:
                cmd = remote_module_path

        rm_tmp = None
        if tmp and "tmp" in tmp and not C.DEFAULT_KEEP_REMOTE_FILES and not persist_files and delete_remote_tmp:
            if not self._play_context.become or self._play_context.become_user == 'root':
                # not sudoing or sudoing to root, so can cleanup files in the same step
                rm_tmp = tmp

        cmd = self._connection._shell.build_module_command(environment_string, shebang, cmd, arg_path=args_file_path, rm_tmp=rm_tmp)
        cmd = cmd.strip()

        sudoable = True
        if module_name == "accelerate":
            # always run the accelerate module as the user
            # specified in the play, not the sudo_user
            sudoable = False

        res = self._low_level_execute_command(cmd, sudoable=sudoable, in_data=in_data)

        if tmp and "tmp" in tmp and not C.DEFAULT_KEEP_REMOTE_FILES and not persist_files and delete_remote_tmp:
            if self._play_context.become and self._play_context.become_user != 'root':
                # not sudoing to root, so maybe can't delete files as that other user
                # have to clean up temp files as original user in a second step
                tmp_rm_cmd = self._connection._shell.remove(tmp, recurse=True)
                tmp_rm_res = self._low_level_execute_command(tmp_rm_cmd, sudoable=False)
                tmp_rm_data = self._parse_returned_data(tmp_rm_res)
                if tmp_rm_data.get('rc', 0) != 0:
                    display.warning('Error deleting remote temporary files (rc: {0}, stderr: {1})'.format(tmp_rm_res.get('rc'),
                        tmp_rm_res.get('stderr', 'No error string available.')))

        # parse the main result
        data = self._parse_returned_data(res)

        # pre-split stdout into lines, if stdout is in the data and there
        # isn't already a stdout_lines value there
        if 'stdout' in data and 'stdout_lines' not in data:
            data['stdout_lines'] = data.get('stdout', u'').splitlines()

        display.debug("done with _execute_module (%s, %s)" % (module_name, module_args))
        return data
コード例 #31
0
ファイル: linear.py プロジェクト: RajeevNambiar/temp
    def run(self, iterator, play_context):
        """
        The linear strategy is simple - get the next task and queue
        it for all hosts, then wait for the queue to drain before
        moving on to the next task
        """

        # iteratate over each task, while there is one left to run
        result = True
        work_to_do = True
        while work_to_do and not self._tqm._terminated:

            try:
                display.debug("getting the remaining hosts for this loop")
                hosts_left = [
                    host
                    for host in self._inventory.get_hosts(iterator._play.hosts)
                    if host.name not in self._tqm._unreachable_hosts
                ]
                display.debug("done getting the remaining hosts for this loop")

                # queue up this task for each host in the inventory
                callback_sent = False
                work_to_do = False

                host_results = []
                host_tasks = self._get_next_task_lockstep(hosts_left, iterator)

                # skip control
                skip_rest = False
                choose_step = True

                results = []
                for (host, task) in host_tasks:
                    if not task:
                        continue

                    if self._tqm._terminated:
                        break

                    run_once = False
                    work_to_do = True

                    # test to see if the task across all hosts points to an action plugin which
                    # sets BYPASS_HOST_LOOP to true, or if it has run_once enabled. If so, we
                    # will only send this task to the first host in the list.

                    try:
                        action = action_loader.get(task.action, class_only=True)
                        if task.run_once or getattr(action, "BYPASS_HOST_LOOP", False):
                            run_once = True
                    except KeyError:
                        # we don't care here, because the action may simply not have a
                        # corresponding action plugin
                        pass

                    # check to see if this task should be skipped, due to it being a member of a
                    # role which has already run (and whether that role allows duplicate execution)
                    if task._role and task._role.has_run(host):
                        # If there is no metadata, the default behavior is to not allow duplicates,
                        # if there is metadata, check to see if the allow_duplicates flag was set to true
                        if (
                            task._role._metadata is None
                            or task._role._metadata
                            and not task._role._metadata.allow_duplicates
                        ):
                            display.debug("'%s' skipped because role has already run" % task)
                            continue

                    if task.action == "meta":
                        self._execute_meta(task, play_context, iterator)
                    else:
                        # handle step if needed, skip meta actions as they are used internally
                        if self._step and choose_step:
                            if self._take_step(task):
                                choose_step = False
                            else:
                                skip_rest = True
                                break

                        display.debug("getting variables")
                        task_vars = self._variable_manager.get_vars(
                            loader=self._loader, play=iterator._play, host=host, task=task
                        )
                        self.add_tqm_variables(task_vars, play=iterator._play)
                        templar = Templar(loader=self._loader, variables=task_vars)
                        display.debug("done getting variables")

                        if not callback_sent:
                            display.debug(
                                "sending task start callback, copying the task so we can template it temporarily"
                            )
                            saved_name = task.name
                            display.debug("done copying, going to template now")
                            try:
                                task.name = text_type(templar.template(task.name, fail_on_undefined=False))
                                display.debug("done templating")
                            except:
                                # just ignore any errors during task name templating,
                                # we don't care if it just shows the raw name
                                display.debug("templating failed for some reason")
                                pass
                            display.debug("here goes the callback...")
                            self._tqm.send_callback("v2_playbook_on_task_start", task, is_conditional=False)
                            task.name = saved_name
                            callback_sent = True
                            display.debug("sending task start callback")

                        self._blocked_hosts[host.get_name()] = True
                        self._queue_task(host, task, task_vars, play_context)

                    # if we're bypassing the host loop, break out now
                    if run_once:
                        break

                    results += self._process_pending_results(iterator, one_pass=True)

                # go to next host/task group
                if skip_rest:
                    continue

                display.debug("done queuing things up, now waiting for results queue to drain")
                results += self._wait_on_pending_results(iterator)
                host_results.extend(results)

                if not work_to_do and len(iterator.get_failed_hosts()) > 0:
                    display.debug("out of hosts to run on")
                    self._tqm.send_callback("v2_playbook_on_no_hosts_remaining")
                    result = False
                    break

                try:
                    included_files = IncludedFile.process_include_results(
                        host_results,
                        self._tqm,
                        iterator=iterator,
                        inventory=self._inventory,
                        loader=self._loader,
                        variable_manager=self._variable_manager,
                    )
                except AnsibleError as e:
                    return False

                if len(included_files) > 0:
                    display.debug("we have included files to process")
                    noop_task = Task()
                    noop_task.action = "meta"
                    noop_task.args["_raw_params"] = "noop"
                    noop_task.set_loader(iterator._play._loader)

                    display.debug("generating all_blocks data")
                    all_blocks = dict((host, []) for host in hosts_left)
                    display.debug("done generating all_blocks data")
                    for included_file in included_files:
                        display.debug("processing included file: %s" % included_file._filename)
                        # included hosts get the task list while those excluded get an equal-length
                        # list of noop tasks, to make sure that they continue running in lock-step
                        try:
                            new_blocks = self._load_included_file(included_file, iterator=iterator)

                            display.debug("iterating over new_blocks loaded from include file")
                            for new_block in new_blocks:
                                task_vars = self._variable_manager.get_vars(
                                    loader=self._loader, play=iterator._play, task=included_file._task
                                )
                                display.debug("filtering new block on tags")
                                final_block = new_block.filter_tagged_tasks(play_context, task_vars)
                                display.debug("done filtering new block on tags")

                                noop_block = Block(parent_block=task._block)
                                noop_block.block = [noop_task for t in new_block.block]
                                noop_block.always = [noop_task for t in new_block.always]
                                noop_block.rescue = [noop_task for t in new_block.rescue]

                                for host in hosts_left:
                                    if host in included_file._hosts:
                                        all_blocks[host].append(final_block)
                                    else:
                                        all_blocks[host].append(noop_block)
                            display.debug("done iterating over new_blocks loaded from include file")

                        except AnsibleError as e:
                            for host in included_file._hosts:
                                self._tqm._failed_hosts[host.name] = True
                                iterator.mark_host_failed(host)
                            display.error(e, wrap_text=False)
                            continue

                    # finally go through all of the hosts and append the
                    # accumulated blocks to their list of tasks
                    display.debug("extending task lists for all hosts with included blocks")

                    for host in hosts_left:
                        iterator.add_tasks(host, all_blocks[host])

                    display.debug("done extending task lists")
                    display.debug("done processing included files")

                display.debug("results queue empty")
            except (IOError, EOFError) as e:
                display.debug("got IOError/EOFError in task loop: %s" % e)
                # most likely an abort, return failed
                return False

        # run the base class run() method, which executes the cleanup function
        # and runs any outstanding handlers which have been triggered

        return super(StrategyModule, self).run(iterator, play_context, result)
コード例 #32
0
    def template(self, variable, convert_bare=False, preserve_trailing_newlines=True, escape_backslashes=True, fail_on_undefined=None, overrides=None, convert_data=True, static_vars = [''], cache = True, bare_deprecated=True):
        '''
        Templates (possibly recursively) any given data as input. If convert_bare is
        set to True, the given data will be wrapped as a jinja2 variable ('{{foo}}')
        before being sent through the template engine.
        '''

        if fail_on_undefined is None:
            fail_on_undefined = self._fail_on_undefined_errors

        # Don't template unsafe variables, instead drop them back down to their constituent type.
        if hasattr(variable, '__UNSAFE__'):
            if isinstance(variable, text_type):
                return self._clean_data(variable)
            else:
                # Do we need to convert these into text_type as well?
                # return self._clean_data(to_text(variable._obj, nonstring='passthru'))
                return self._clean_data(variable._obj)

        try:
            if convert_bare:
                variable = self._convert_bare_variable(variable, bare_deprecated=bare_deprecated)

            if isinstance(variable, string_types):
                result = variable

                if self._contains_vars(variable):
                    # Check to see if the string we are trying to render is just referencing a single
                    # var.  In this case we don't want to accidentally change the type of the variable
                    # to a string by using the jinja template renderer. We just want to pass it.
                    only_one = self.SINGLE_VAR.match(variable)
                    if only_one:
                        var_name = only_one.group(1)
                        if var_name in self._available_variables:
                            resolved_val = self._available_variables[var_name]
                            if isinstance(resolved_val, NON_TEMPLATED_TYPES):
                                return resolved_val
                            elif resolved_val is None:
                                return C.DEFAULT_NULL_REPRESENTATION

                    # Using a cache in order to prevent template calls with already templated variables
                    sha1_hash = None
                    if cache:
                        variable_hash = sha1(text_type(variable).encode('utf-8'))
                        options_hash  = sha1((text_type(preserve_trailing_newlines) + text_type(escape_backslashes) + text_type(fail_on_undefined) + text_type(overrides)).encode('utf-8'))
                        sha1_hash = variable_hash.hexdigest() + options_hash.hexdigest()
                    if cache and sha1_hash in self._cached_result:
                        result = self._cached_result[sha1_hash]
                    else:
                        result = self._do_template(variable, preserve_trailing_newlines=preserve_trailing_newlines, escape_backslashes=escape_backslashes, fail_on_undefined=fail_on_undefined, overrides=overrides)
                        if convert_data and not self._no_type_regex.match(variable):
                            # if this looks like a dictionary or list, convert it to such using the safe_eval method
                            if (result.startswith("{") and not result.startswith(self.environment.variable_start_string)) or \
                                    result.startswith("[") or result in ("True", "False"):
                                eval_results = safe_eval(result, locals=self._available_variables, include_exceptions=True)
                                if eval_results[1] is None:
                                    result = eval_results[0]
                                else:
                                    # FIXME: if the safe_eval raised an error, should we do something with it?
                                    pass

                        # we only cache in the case where we have a single variable
                        # name, to make sure we're not putting things which may otherwise
                        # be dynamic in the cache (filters, lookups, etc.)
                        if cache:
                            self._cached_result[sha1_hash] = result

                return result

            elif isinstance(variable, (list, tuple)):
                return [self.template(v, preserve_trailing_newlines=preserve_trailing_newlines, fail_on_undefined=fail_on_undefined, overrides=overrides) for v in variable]
            elif isinstance(variable, dict):
                d = {}
                # we don't use iteritems() here to avoid problems if the underlying dict
                # changes sizes due to the templating, which can happen with hostvars
                for k in variable.keys():
                    if k not in static_vars:
                        d[k] = self.template(variable[k], preserve_trailing_newlines=preserve_trailing_newlines, fail_on_undefined=fail_on_undefined, overrides=overrides)
                    else:
                        d[k] = variable[k]
                return d
            else:
                return variable

        except AnsibleFilterError:
            if self._fail_on_filter_errors:
                raise
            else:
                return variable
コード例 #33
0
ファイル: __init__.py プロジェクト: Davidshid/ansible
    def _process_pending_results(self, iterator):
        '''
        Reads results off the final queue and takes appropriate action
        based on the result (executing callbacks, updating state, etc.).
        '''

        ret_results = []

        while not self._final_q.empty() and not self._tqm._terminated:
            try:
                result = self._final_q.get()
                display.debug("got result from result worker: %s" %
                              ([text_type(x) for x in result], ))

                # all host status messages contain 2 entries: (msg, task_result)
                if result[0] in ('host_task_ok', 'host_task_failed',
                                 'host_task_skipped', 'host_unreachable'):
                    task_result = result[1]
                    host = task_result._host
                    task = task_result._task
                    if result[0] == 'host_task_failed' or task_result.is_failed(
                    ):
                        if not task.ignore_errors:
                            display.debug("marking %s as failed" % host.name)
                            if task.run_once:
                                # if we're using run_once, we have to fail every host here
                                [
                                    iterator.mark_host_failed(h)
                                    for h in self._inventory.get_hosts(
                                        iterator._play.hosts) if h.name not in
                                    self._tqm._unreachable_hosts
                                ]
                            else:
                                iterator.mark_host_failed(host)
                            self._tqm._failed_hosts[host.name] = True
                            self._tqm._stats.increment('failures', host.name)
                        else:
                            self._tqm._stats.increment('ok', host.name)
                        self._tqm.send_callback(
                            'v2_runner_on_failed',
                            task_result,
                            ignore_errors=task.ignore_errors)
                    elif result[0] == 'host_unreachable':
                        self._tqm._unreachable_hosts[host.name] = True
                        self._tqm._stats.increment('dark', host.name)
                        self._tqm.send_callback('v2_runner_on_unreachable',
                                                task_result)
                    elif result[0] == 'host_task_skipped':
                        self._tqm._stats.increment('skipped', host.name)
                        self._tqm.send_callback('v2_runner_on_skipped',
                                                task_result)
                    elif result[0] == 'host_task_ok':
                        if task.action != 'include':
                            self._tqm._stats.increment('ok', host.name)
                            if 'changed' in task_result._result and task_result._result[
                                    'changed']:
                                self._tqm._stats.increment(
                                    'changed', host.name)
                            self._tqm.send_callback('v2_runner_on_ok',
                                                    task_result)

                        if self._diff and 'diff' in task_result._result:
                            self._tqm.send_callback('v2_on_file_diff',
                                                    task_result)

                    self._pending_results -= 1
                    if host.name in self._blocked_hosts:
                        del self._blocked_hosts[host.name]

                    # If this is a role task, mark the parent role as being run (if
                    # the task was ok or failed, but not skipped or unreachable)
                    if task_result._task._role is not None and result[0] in (
                            'host_task_ok', 'host_task_failed'):
                        # lookup the role in the ROLE_CACHE to make sure we're dealing
                        # with the correct object and mark it as executed
                        for (entry,
                             role_obj) in iteritems(iterator._play.ROLE_CACHE[
                                 task_result._task._role._role_name]):
                            if role_obj._uuid == task_result._task._role._uuid:
                                role_obj._had_task_run[host.name] = True

                    ret_results.append(task_result)

                elif result[0] == 'add_host':
                    result_item = result[1]
                    new_host_info = result_item.get('add_host', dict())

                    self._add_host(new_host_info, iterator)
                    self._tqm._hostvars_manager.hostvars().set_inventory(
                        self._inventory)

                elif result[0] == 'add_group':
                    host = result[1]
                    result_item = result[2]
                    self._add_group(host, result_item)
                    self._tqm._hostvars_manager.hostvars().set_inventory(
                        self._inventory)

                elif result[0] == 'notify_handler':
                    task_result = result[1]
                    handler_name = result[2]

                    original_task = iterator.get_original_task(
                        task_result._host, task_result._task)
                    if handler_name not in self._notified_handlers:
                        self._notified_handlers[handler_name] = []

                    if task_result._host not in self._notified_handlers[
                            handler_name]:
                        self._notified_handlers[handler_name].append(
                            task_result._host)
                        display.vv("NOTIFIED HANDLER %s" % (handler_name, ))

                elif result[0] == 'register_host_var':
                    # essentially the same as 'set_host_var' below, however we
                    # never follow the delegate_to value for registered vars and
                    # the variable goes in the fact_cache
                    host = result[1]
                    task = result[2]
                    var_value = wrap_var(result[3])
                    var_name = task.register

                    if task.run_once:
                        host_list = [
                            host for host in self._inventory.get_hosts(
                                iterator._play.hosts)
                            if host.name not in self._tqm._unreachable_hosts
                        ]
                    else:
                        host_list = [host]

                    for target_host in host_list:
                        self._variable_manager.set_nonpersistent_facts(
                            target_host, {var_name: var_value})
                        self._tqm._hostvars_manager.hostvars(
                        ).set_nonpersistent_facts(target_host,
                                                  {var_name: var_value})

                elif result[0] in ('set_host_var', 'set_host_facts'):
                    host = result[1]
                    task = result[2]
                    item = result[3]

                    # find the host we're actually refering too here, which may
                    # be a host that is not really in inventory at all
                    if task.delegate_to is not None and task.delegate_facts:
                        task_vars = self._variable_manager.get_vars(
                            loader=self._loader,
                            play=iterator._play,
                            host=host,
                            task=task)
                        self.add_tqm_variables(task_vars, play=iterator._play)
                        if item is not None:
                            task_vars['item'] = item
                        templar = Templar(loader=self._loader,
                                          variables=task_vars)
                        host_name = templar.template(task.delegate_to)
                        actual_host = self._inventory.get_host(host_name)
                        if actual_host is None:
                            actual_host = Host(name=host_name)
                    else:
                        actual_host = host

                    if result[0] == 'set_host_var':
                        var_name = result[4]
                        var_value = result[5]

                        if task.run_once:
                            host_list = [
                                host for host in self._inventory.get_hosts(
                                    iterator._play.hosts) if host.name not in
                                self._tqm._unreachable_hosts
                            ]
                        else:
                            host_list = [actual_host]

                        for target_host in host_list:
                            self._variable_manager.set_host_variable(
                                target_host, var_name, var_value)
                            self._tqm._hostvars_manager.hostvars(
                            ).set_host_variable(target_host, var_name,
                                                var_value)
                    elif result[0] == 'set_host_facts':
                        facts = result[4]
                        if task.action == 'set_fact':
                            self._variable_manager.set_nonpersistent_facts(
                                actual_host, facts)
                            self._tqm._hostvars_manager.hostvars(
                            ).set_nonpersistent_facts(actual_host, facts)
                        else:
                            self._variable_manager.set_host_facts(
                                actual_host, facts)
                            self._tqm._hostvars_manager.hostvars(
                            ).set_host_facts(actual_host, facts)

                else:
                    raise AnsibleError("unknown result message received: %s" %
                                       result[0])
            except Queue.Empty:
                time.sleep(0.0001)

        return ret_results
コード例 #34
0
ファイル: free.py プロジェクト: KMK-ONLINE/ansible
    def run(self, iterator, play_context):
        '''
        The "free" strategy is a bit more complex, in that it allows tasks to
        be sent to hosts as quickly as they can be processed. This means that
        some hosts may finish very quickly if run tasks result in little or no
        work being done versus other systems.

        The algorithm used here also tries to be more "fair" when iterating
        through hosts by remembering the last host in the list to be given a task
        and starting the search from there as opposed to the top of the hosts
        list again, which would end up favoring hosts near the beginning of the
        list.
        '''

        # the last host to be given a task
        last_host = 0

        result = True

        work_to_do = True
        while work_to_do and not self._tqm._terminated:

            hosts_left = [host for host in self._inventory.get_hosts(iterator._play.hosts) if host.name not in self._tqm._unreachable_hosts]
            if len(hosts_left) == 0:
                self._tqm.send_callback('v2_playbook_on_no_hosts_remaining')
                result = False
                break

            work_to_do = False        # assume we have no more work to do
            starting_host = last_host # save current position so we know when we've
                                      # looped back around and need to break

            # try and find an unblocked host with a task to run
            host_results = []
            while True:
                host = hosts_left[last_host]
                display.debug("next free host: %s" % host)
                host_name = host.get_name()

                # peek at the next task for the host, to see if there's
                # anything to do do for this host
                (state, task) = iterator.get_next_task_for_host(host, peek=True)
                display.debug("free host state: %s" % state)
                display.debug("free host task: %s" % task)
                if host_name not in self._tqm._unreachable_hosts and task:

                    # set the flag so the outer loop knows we've still found
                    # some work which needs to be done
                    work_to_do = True

                    display.debug("this host has work to do")

                    # check to see if this host is blocked (still executing a previous task)
                    if host_name not in self._blocked_hosts or not self._blocked_hosts[host_name]:
                        # pop the task, mark the host blocked, and queue it
                        self._blocked_hosts[host_name] = True
                        (state, task) = iterator.get_next_task_for_host(host)

                        try:
                            action = action_loader.get(task.action, class_only=True)
                        except KeyError:
                            # we don't care here, because the action may simply not have a
                            # corresponding action plugin
                            action = None

                        display.debug("getting variables")
                        task_vars = self._variable_manager.get_vars(loader=self._loader, play=iterator._play, host=host, task=task)
                        self.add_tqm_variables(task_vars, play=iterator._play)
                        templar = Templar(loader=self._loader, variables=task_vars)
                        display.debug("done getting variables")

                        try:
                            task.name = text_type(templar.template(task.name, fail_on_undefined=False))
                            display.debug("done templating")
                        except:
                            # just ignore any errors during task name templating,
                            # we don't care if it just shows the raw name
                            display.debug("templating failed for some reason")
                            pass

                        run_once = templar.template(task.run_once) or action and getattr(action, 'BYPASS_HOST_LOOP', False)
                        if run_once:
                            if action and getattr(action, 'BYPASS_HOST_LOOP', False):
                                raise AnsibleError("The '%s' module bypasses the host loop, which is currently not supported in the free strategy " \
                                                   "and would instead execute for every host in the inventory list." % task.action, obj=task._ds)
                            else:
                                display.warning("Using run_once with the free strategy is not currently supported. This task will still be " \
                                                "executed for every host in the inventory list.")

                        # check to see if this task should be skipped, due to it being a member of a
                        # role which has already run (and whether that role allows duplicate execution)
                        if task._role and task._role.has_run(host):
                            # If there is no metadata, the default behavior is to not allow duplicates,
                            # if there is metadata, check to see if the allow_duplicates flag was set to true
                            if task._role._metadata is None or task._role._metadata and not task._role._metadata.allow_duplicates:
                                display.debug("'%s' skipped because role has already run" % task)
                                del self._blocked_hosts[host_name]
                                continue

                        if task.action == 'meta':
                            self._execute_meta(task, play_context, iterator)
                            self._blocked_hosts[host_name] = False
                        else:
                            # handle step if needed, skip meta actions as they are used internally
                            if not self._step or self._take_step(task, host_name):
                                if task.any_errors_fatal:
                                    display.warning("Using any_errors_fatal with the free strategy is not supported, as tasks are executed independently on each host")
                                self._tqm.send_callback('v2_playbook_on_task_start', task, is_conditional=False)
                                self._queue_task(host, task, task_vars, play_context)
                                del task_vars
                    else:
                        display.debug("%s is blocked, skipping for now" % host_name)

                # move on to the next host and make sure we
                # haven't gone past the end of our hosts list
                last_host += 1
                if last_host > len(hosts_left) - 1:
                    last_host = 0

                # if we've looped around back to the start, break out
                if last_host == starting_host:
                    break

            results = self._process_pending_results(iterator)
            host_results.extend(results)

            try:
                included_files = IncludedFile.process_include_results(
                    host_results,
                    self._tqm,
                    iterator=iterator,
                    inventory=self._inventory,
                    loader=self._loader,
                    variable_manager=self._variable_manager
                )
            except AnsibleError as e:
                return False

            if len(included_files) > 0:
                all_blocks = dict((host, []) for host in hosts_left)
                for included_file in included_files:
                    display.debug("collecting new blocks for %s" % included_file)
                    try:
                        new_blocks = self._load_included_file(included_file, iterator=iterator)
                    except AnsibleError as e:
                        for host in included_file._hosts:
                            iterator.mark_host_failed(host)
                        display.warning(str(e))
                        continue

                    for new_block in new_blocks:
                        task_vars = self._variable_manager.get_vars(loader=self._loader, play=iterator._play, task=included_file._task)
                        final_block = new_block.filter_tagged_tasks(play_context, task_vars)
                        for host in hosts_left:
                            if host in included_file._hosts:
                                all_blocks[host].append(final_block)
                    display.debug("done collecting new blocks for %s" % included_file)

                display.debug("adding all collected blocks from %d included file(s) to iterator" % len(included_files))
                for host in hosts_left:
                    iterator.add_tasks(host, all_blocks[host])
                display.debug("done adding collected blocks to iterator")

            # pause briefly so we don't spin lock
            time.sleep(0.001)

        # collect all the final results
        results = self._wait_on_pending_results(iterator)

        # run the base class run() method, which executes the cleanup function
        # and runs any outstanding handlers which have been triggered
        return super(StrategyModule, self).run(iterator, play_context, result)
コード例 #35
0
ファイル: free.py プロジェクト: vkvns/ansible
    def run(self, iterator, play_context):
        '''
        The "free" strategy is a bit more complex, in that it allows tasks to
        be sent to hosts as quickly as they can be processed. This means that
        some hosts may finish very quickly if run tasks result in little or no
        work being done versus other systems.

        The algorithm used here also tries to be more "fair" when iterating
        through hosts by remembering the last host in the list to be given a task
        and starting the search from there as opposed to the top of the hosts
        list again, which would end up favoring hosts near the beginning of the
        list.
        '''

        # the last host to be given a task
        last_host = 0

        result = True

        work_to_do = True
        while work_to_do and not self._tqm._terminated:

            hosts_left = [
                host
                for host in self._inventory.get_hosts(iterator._play.hosts)
                if host.name not in self._tqm._unreachable_hosts
            ]
            if len(hosts_left) == 0:
                self._tqm.send_callback('v2_playbook_on_no_hosts_remaining')
                result = False
                break

            work_to_do = False  # assume we have no more work to do
            starting_host = last_host  # save current position so we know when we've
            # looped back around and need to break

            # try and find an unblocked host with a task to run
            host_results = []
            while True:
                host = hosts_left[last_host]
                display.debug("next free host: %s" % host)
                host_name = host.get_name()

                # peek at the next task for the host, to see if there's
                # anything to do do for this host
                (state, task) = iterator.get_next_task_for_host(host,
                                                                peek=True)
                display.debug("free host state: %s" % state)
                display.debug("free host task: %s" % task)
                if host_name not in self._tqm._unreachable_hosts and task:

                    # set the flag so the outer loop knows we've still found
                    # some work which needs to be done
                    work_to_do = True

                    display.debug("this host has work to do")

                    # check to see if this host is blocked (still executing a previous task)
                    if host_name not in self._blocked_hosts or not self._blocked_hosts[
                            host_name]:
                        # pop the task, mark the host blocked, and queue it
                        self._blocked_hosts[host_name] = True
                        (state, task) = iterator.get_next_task_for_host(host)

                        try:
                            action = action_loader.get(task.action,
                                                       class_only=True)
                        except KeyError:
                            # we don't care here, because the action may simply not have a
                            # corresponding action plugin
                            action = None

                        display.debug("getting variables")
                        task_vars = self._variable_manager.get_vars(
                            loader=self._loader,
                            play=iterator._play,
                            host=host,
                            task=task)
                        self.add_tqm_variables(task_vars, play=iterator._play)
                        templar = Templar(loader=self._loader,
                                          variables=task_vars)
                        display.debug("done getting variables")

                        try:
                            task.name = text_type(
                                templar.template(task.name,
                                                 fail_on_undefined=False))
                            display.debug("done templating")
                        except:
                            # just ignore any errors during task name templating,
                            # we don't care if it just shows the raw name
                            display.debug("templating failed for some reason")
                            pass

                        run_once = templar.template(
                            task.run_once) or action and getattr(
                                action, 'BYPASS_HOST_LOOP', False)
                        if run_once:
                            if action and getattr(action, 'BYPASS_HOST_LOOP',
                                                  False):
                                raise AnsibleError("The '%s' module bypasses the host loop, which is currently not supported in the free strategy " \
                                                   "and would instead execute for every host in the inventory list." % task.action, obj=task._ds)
                            else:
                                display.warning("Using run_once with the free strategy is not currently supported. This task will still be " \
                                                "executed for every host in the inventory list.")

                        # check to see if this task should be skipped, due to it being a member of a
                        # role which has already run (and whether that role allows duplicate execution)
                        if task._role and task._role.has_run(host):
                            # If there is no metadata, the default behavior is to not allow duplicates,
                            # if there is metadata, check to see if the allow_duplicates flag was set to true
                            if task._role._metadata is None or task._role._metadata and not task._role._metadata.allow_duplicates:
                                display.debug(
                                    "'%s' skipped because role has already run"
                                    % task)
                                del self._blocked_hosts[host_name]
                                continue

                        if task.action == 'meta':
                            self._execute_meta(task, play_context, iterator)
                            self._blocked_hosts[host_name] = False
                        else:
                            # handle step if needed, skip meta actions as they are used internally
                            if not self._step or self._take_step(
                                    task, host_name):
                                if task.any_errors_fatal:
                                    display.warning(
                                        "Using any_errors_fatal with the free strategy is not supported, as tasks are executed independently on each host"
                                    )
                                self._tqm.send_callback(
                                    'v2_playbook_on_task_start',
                                    task,
                                    is_conditional=False)
                                self._queue_task(host, task, task_vars,
                                                 play_context)
                                del task_vars
                    else:
                        display.debug("%s is blocked, skipping for now" %
                                      host_name)

                # move on to the next host and make sure we
                # haven't gone past the end of our hosts list
                last_host += 1
                if last_host > len(hosts_left) - 1:
                    last_host = 0

                # if we've looped around back to the start, break out
                if last_host == starting_host:
                    break

            results = self._process_pending_results(iterator)
            host_results.extend(results)

            try:
                included_files = IncludedFile.process_include_results(
                    host_results,
                    self._tqm,
                    iterator=iterator,
                    inventory=self._inventory,
                    loader=self._loader,
                    variable_manager=self._variable_manager)
            except AnsibleError as e:
                return False

            if len(included_files) > 0:
                all_blocks = dict((host, []) for host in hosts_left)
                for included_file in included_files:
                    display.debug("collecting new blocks for %s" %
                                  included_file)
                    try:
                        new_blocks = self._load_included_file(
                            included_file, iterator=iterator)
                    except AnsibleError as e:
                        for host in included_file._hosts:
                            iterator.mark_host_failed(host)
                        display.warning(str(e))
                        continue

                    for new_block in new_blocks:
                        task_vars = self._variable_manager.get_vars(
                            loader=self._loader,
                            play=iterator._play,
                            task=included_file._task)
                        final_block = new_block.filter_tagged_tasks(
                            play_context, task_vars)
                        for host in hosts_left:
                            if host in included_file._hosts:
                                all_blocks[host].append(final_block)
                    display.debug("done collecting new blocks for %s" %
                                  included_file)

                display.debug(
                    "adding all collected blocks from %d included file(s) to iterator"
                    % len(included_files))
                for host in hosts_left:
                    iterator.add_tasks(host, all_blocks[host])
                display.debug("done adding collected blocks to iterator")

            # pause briefly so we don't spin lock
            time.sleep(0.001)

        # collect all the final results
        results = self._wait_on_pending_results(iterator)

        # run the base class run() method, which executes the cleanup function
        # and runs any outstanding handlers which have been triggered
        return super(StrategyModule, self).run(iterator, play_context, result)
コード例 #36
0
ファイル: __init__.py プロジェクト: wenlien/ansible
    def _execute_module(self,
                        module_name=None,
                        module_args=None,
                        tmp=None,
                        task_vars=None,
                        persist_files=False,
                        delete_remote_tmp=True):
        '''
        Transfer and run a module along with its arguments.
        '''
        if task_vars is None:
            task_vars = dict()

        # if a module name was not specified for this execution, use
        # the action from the task
        if module_name is None:
            module_name = self._task.action
        if module_args is None:
            module_args = self._task.args

        # Get the connection user for permission checks
        remote_user = task_vars.get(
            'ansible_ssh_user') or self._play_context.remote_user

        self._update_module_args(module_args, task_vars)

        (module_style, shebang, module_data,
         module_path) = self._configure_module(module_name=module_name,
                                               module_args=module_args,
                                               task_vars=task_vars)
        display.vvv("Using module file %s" % module_path)
        if not shebang and module_style != 'binary':
            raise AnsibleError("module (%s) is missing interpreter line" %
                               module_name)

        # a remote tmp path may be necessary and not already created
        remote_module_path = None
        args_file_path = None
        if not tmp and self._late_needs_tmp_path(tmp, module_style):
            tmp = self._make_tmp_path(remote_user)

        if tmp and \
         (module_style != 'new' or \
         not self._connection.has_pipelining or \
         not self._play_context.pipelining or \
         C.DEFAULT_KEEP_REMOTE_FILES or \
         self._play_context.become_method == 'su'):
            remote_module_filename = self._connection._shell.get_remote_filename(
                module_path)
            remote_module_path = self._connection._shell.join_path(
                tmp, remote_module_filename)
            if module_style in ('old', 'non_native_want_json', 'binary'):
                # we'll also need a temp file to hold our module arguments
                args_file_path = self._connection._shell.join_path(tmp, 'args')

        if remote_module_path or module_style != 'new':
            display.debug("transferring module to remote %s" %
                          remote_module_path)
            if module_style == 'binary':
                self._transfer_file(module_path, remote_module_path)
            else:
                self._transfer_data(remote_module_path, module_data)
            if module_style == 'old':
                # we need to dump the module args to a k=v string in a file on
                # the remote system, which can be read and parsed by the module
                args_data = ""
                for k, v in iteritems(module_args):
                    args_data += '%s=%s ' % (k, shlex_quote(text_type(v)))
                self._transfer_data(args_file_path, args_data)
            elif module_style in ('non_native_want_json', 'binary'):
                self._transfer_data(args_file_path, json.dumps(module_args))
            display.debug("done transferring module to remote")

        environment_string = self._compute_environment_string()

        remote_files = None

        if args_file_path:
            remote_files = tmp, remote_module_path, args_file_path
        elif remote_module_path:
            remote_files = tmp, remote_module_path

        # Fix permissions of the tmp path and tmp files.  This should be
        # called after all files have been transferred.
        if remote_files:
            self._fixup_perms2(remote_files, remote_user)

        cmd = ""
        in_data = None

        if self._connection.has_pipelining and self._play_context.pipelining and not C.DEFAULT_KEEP_REMOTE_FILES and module_style == 'new':
            in_data = module_data
        else:
            if remote_module_path:
                cmd = remote_module_path

        rm_tmp = None
        if tmp and "tmp" in tmp and not C.DEFAULT_KEEP_REMOTE_FILES and not persist_files and delete_remote_tmp:
            if not self._play_context.become or self._play_context.become_user == 'root':
                # not sudoing or sudoing to root, so can cleanup files in the same step
                rm_tmp = tmp

        cmd = self._connection._shell.build_module_command(
            environment_string,
            shebang,
            cmd,
            arg_path=args_file_path,
            rm_tmp=rm_tmp)
        cmd = cmd.strip()

        sudoable = True
        if module_name == "accelerate":
            # always run the accelerate module as the user
            # specified in the play, not the sudo_user
            sudoable = False

        res = self._low_level_execute_command(cmd,
                                              sudoable=sudoable,
                                              in_data=in_data)

        if tmp and "tmp" in tmp and not C.DEFAULT_KEEP_REMOTE_FILES and not persist_files and delete_remote_tmp:
            if self._play_context.become and self._play_context.become_user != 'root':
                # not sudoing to root, so maybe can't delete files as that other user
                # have to clean up temp files as original user in a second step
                tmp_rm_cmd = self._connection._shell.remove(tmp, recurse=True)
                tmp_rm_res = self._low_level_execute_command(tmp_rm_cmd,
                                                             sudoable=False)
                tmp_rm_data = self._parse_returned_data(tmp_rm_res)
                if tmp_rm_data.get('rc', 0) != 0:
                    display.warning(
                        'Error deleting remote temporary files (rc: {0}, stderr: {1})'
                        .format(
                            tmp_rm_res.get('rc'),
                            tmp_rm_res.get('stderr',
                                           'No error string available.')))

        # parse the main result
        data = self._parse_returned_data(res)

        # pre-split stdout into lines, if stdout is in the data and there
        # isn't already a stdout_lines value there
        if 'stdout' in data and 'stdout_lines' not in data:
            data['stdout_lines'] = data.get('stdout', u'').splitlines()

        display.debug("done with _execute_module (%s, %s)" %
                      (module_name, module_args))
        return data
コード例 #37
0
 def env_prefix(**args):
     return ' '.join(['%s=%s' % (k, pipes.quote(text_type(v))) for k,v in args.items()])
コード例 #38
0
    def run(self, iterator, play_context):
        '''
        The linear strategy is simple - get the next task and queue
        it for all hosts, then wait for the queue to drain before
        moving on to the next task
        '''

        # iteratate over each task, while there is one left to run
        result = True
        work_to_do = True
        while work_to_do and not self._tqm._terminated:

            try:
                self._display.debug(
                    "getting the remaining hosts for this loop")
                hosts_left = [
                    host
                    for host in self._inventory.get_hosts(iterator._play.hosts)
                    if host.name not in self._tqm._unreachable_hosts
                ]
                self._display.debug(
                    "done getting the remaining hosts for this loop")

                # queue up this task for each host in the inventory
                callback_sent = False
                work_to_do = False

                host_results = []
                host_tasks = self._get_next_task_lockstep(hosts_left, iterator)

                # skip control
                skip_rest = False
                choose_step = True

                for (host, task) in host_tasks:
                    if not task:
                        continue

                    if self._tqm._terminated:
                        break

                    run_once = False
                    work_to_do = True

                    # test to see if the task across all hosts points to an action plugin which
                    # sets BYPASS_HOST_LOOP to true, or if it has run_once enabled. If so, we
                    # will only send this task to the first host in the list.

                    try:
                        action = action_loader.get(task.action,
                                                   class_only=True)
                        if task.run_once or getattr(action, 'BYPASS_HOST_LOOP',
                                                    False):
                            run_once = True
                    except KeyError:
                        # we don't care here, because the action may simply not have a
                        # corresponding action plugin
                        pass

                    # check to see if this task should be skipped, due to it being a member of a
                    # role which has already run (and whether that role allows duplicate execution)
                    if task._role and task._role.has_run(host):
                        # If there is no metadata, the default behavior is to not allow duplicates,
                        # if there is metadata, check to see if the allow_duplicates flag was set to true
                        if task._role._metadata is None or task._role._metadata and not task._role._metadata.allow_duplicates:
                            self._display.debug(
                                "'%s' skipped because role has already run" %
                                task)
                            continue

                    if task.action == 'meta':
                        self._execute_meta(task, play_context, iterator)
                    else:
                        # handle step if needed, skip meta actions as they are used internally
                        if self._step and choose_step:
                            if self._take_step(task):
                                choose_step = False
                            else:
                                skip_rest = True
                                break

                        self._display.debug("getting variables")
                        task_vars = self._variable_manager.get_vars(
                            loader=self._loader,
                            play=iterator._play,
                            host=host,
                            task=task)
                        self.add_tqm_variables(task_vars, play=iterator._play)
                        templar = Templar(loader=self._loader,
                                          variables=task_vars)
                        self._display.debug("done getting variables")

                        if not callback_sent:
                            display.debug(
                                "sending task start callback, copying the task so we can template it temporarily"
                            )
                            saved_name = task.name
                            display.debug(
                                "done copying, going to template now")
                            try:
                                task.name = text_type(
                                    templar.template(task.name,
                                                     fail_on_undefined=False))
                                display.debug("done templating")
                            except:
                                # just ignore any errors during task name templating,
                                # we don't care if it just shows the raw name
                                display.debug(
                                    "templating failed for some reason")
                                pass
                            display.debug("here goes the callback...")
                            self._tqm.send_callback(
                                'v2_playbook_on_task_start',
                                task,
                                is_conditional=False)
                            task.name = saved_name
                            callback_sent = True
                            display.debug("sending task start callback")

                        self._blocked_hosts[host.get_name()] = True
                        self._queue_task(host, task, task_vars, play_context)

                    results = self._process_pending_results(iterator)
                    host_results.extend(results)

                    # if we're bypassing the host loop, break out now
                    if run_once:
                        break

                # go to next host/task group
                if skip_rest:
                    continue

                self._display.debug(
                    "done queuing things up, now waiting for results queue to drain"
                )
                results = self._wait_on_pending_results(iterator)
                host_results.extend(results)

                if not work_to_do and len(iterator.get_failed_hosts()) > 0:
                    self._display.debug("out of hosts to run on")
                    self._tqm.send_callback(
                        'v2_playbook_on_no_hosts_remaining')
                    result = False
                    break

                try:
                    included_files = IncludedFile.process_include_results(
                        host_results,
                        self._tqm,
                        iterator=iterator,
                        loader=self._loader,
                        variable_manager=self._variable_manager)
                except AnsibleError as e:
                    return False

                if len(included_files) > 0:
                    noop_task = Task()
                    noop_task.action = 'meta'
                    noop_task.args['_raw_params'] = 'noop'
                    noop_task.set_loader(iterator._play._loader)

                    all_blocks = dict((host, []) for host in hosts_left)
                    for included_file in included_files:
                        # included hosts get the task list while those excluded get an equal-length
                        # list of noop tasks, to make sure that they continue running in lock-step
                        try:
                            new_blocks = self._load_included_file(
                                included_file, iterator=iterator)

                            for new_block in new_blocks:
                                noop_block = Block(parent_block=task._block)
                                noop_block.block = [
                                    noop_task for t in new_block.block
                                ]
                                noop_block.always = [
                                    noop_task for t in new_block.always
                                ]
                                noop_block.rescue = [
                                    noop_task for t in new_block.rescue
                                ]
                                for host in hosts_left:
                                    if host in included_file._hosts:
                                        task_vars = self._variable_manager.get_vars(
                                            loader=self._loader,
                                            play=iterator._play,
                                            host=host,
                                            task=included_file._task)
                                        final_block = new_block.filter_tagged_tasks(
                                            play_context, task_vars)
                                        all_blocks[host].append(final_block)
                                    else:
                                        all_blocks[host].append(noop_block)

                        except AnsibleError as e:
                            for host in included_file._hosts:
                                self._tqm._failed_hosts[host.name] = True
                                iterator.mark_host_failed(host)
                            self._display.error(e, wrap_text=False)
                            continue

                    # finally go through all of the hosts and append the
                    # accumulated blocks to their list of tasks
                    for host in hosts_left:
                        iterator.add_tasks(host, all_blocks[host])

                self._display.debug("results queue empty")
            except (IOError, EOFError) as e:
                self._display.debug("got IOError/EOFError in task loop: %s" %
                                    e)
                # most likely an abort, return failed
                return False

        # run the base class run() method, which executes the cleanup function
        # and runs any outstanding handlers which have been triggered

        return super(StrategyModule, self).run(iterator, play_context, result)
コード例 #39
0
    def run(self, iterator, play_context):
        '''
        The linear strategy is simple - get the next task and queue
        it for all hosts, then wait for the queue to drain before
        moving on to the next task
        '''

        # iteratate over each task, while there is one left to run
        result = self._tqm.RUN_OK
        work_to_do = True
        while work_to_do and not self._tqm._terminated:

            try:
                display.debug("getting the remaining hosts for this loop")
                hosts_left = [host for host in self._inventory.get_hosts(iterator._play.hosts) if host.name not in self._tqm._unreachable_hosts]
                display.debug("done getting the remaining hosts for this loop")

                # queue up this task for each host in the inventory
                callback_sent = False
                work_to_do = False

                host_results = []
                host_tasks = self._get_next_task_lockstep(hosts_left, iterator)

                # skip control
                skip_rest   = False
                choose_step = True

                # flag set if task is set to any_errors_fatal
                any_errors_fatal = False

                results = []
                for (host, task) in host_tasks:
                    if not task:
                        continue

                    if self._tqm._terminated:
                        break

                    run_once = False
                    work_to_do = True

                    # test to see if the task across all hosts points to an action plugin which
                    # sets BYPASS_HOST_LOOP to true, or if it has run_once enabled. If so, we
                    # will only send this task to the first host in the list.

                    try:
                        action = action_loader.get(task.action, class_only=True)
                    except KeyError:
                        # we don't care here, because the action may simply not have a
                        # corresponding action plugin
                        action = None

                    # check to see if this task should be skipped, due to it being a member of a
                    # role which has already run (and whether that role allows duplicate execution)
                    if task._role and task._role.has_run(host):
                        # If there is no metadata, the default behavior is to not allow duplicates,
                        # if there is metadata, check to see if the allow_duplicates flag was set to true
                        if task._role._metadata is None or task._role._metadata and not task._role._metadata.allow_duplicates:
                            display.debug("'%s' skipped because role has already run" % task)
                            continue

                    if task.action == 'meta':
                        self._execute_meta(task, play_context, iterator)
                    else:
                        # handle step if needed, skip meta actions as they are used internally
                        if self._step and choose_step:
                            if self._take_step(task):
                                choose_step = False
                            else:
                                skip_rest = True
                                break

                        display.debug("getting variables")
                        task_vars = self._variable_manager.get_vars(loader=self._loader, play=iterator._play, host=host, task=task)
                        self.add_tqm_variables(task_vars, play=iterator._play)
                        templar = Templar(loader=self._loader, variables=task_vars)
                        display.debug("done getting variables")

                        run_once = templar.template(task.run_once) or action and getattr(action, 'BYPASS_HOST_LOOP', False)

                        if (task.any_errors_fatal or run_once) and not task.ignore_errors:
                            any_errors_fatal = True

                        if not callback_sent:
                            display.debug("sending task start callback, copying the task so we can template it temporarily")
                            saved_name = task.name
                            display.debug("done copying, going to template now")
                            try:
                                task.name = text_type(templar.template(task.name, fail_on_undefined=False))
                                display.debug("done templating")
                            except:
                                # just ignore any errors during task name templating,
                                # we don't care if it just shows the raw name
                                display.debug("templating failed for some reason")
                                pass
                            display.debug("here goes the callback...")
                            self._tqm.send_callback('v2_playbook_on_task_start', task, is_conditional=False)
                            task.name = saved_name
                            callback_sent = True
                            display.debug("sending task start callback")

                        self._blocked_hosts[host.get_name()] = True
                        self._queue_task(host, task, task_vars, play_context)
                        del task_vars

                    # if we're bypassing the host loop, break out now
                    if run_once:
                        break

                    results += self._process_pending_results(iterator, one_pass=True)

                # go to next host/task group
                if skip_rest:
                    continue

                display.debug("done queuing things up, now waiting for results queue to drain")
                results += self._wait_on_pending_results(iterator)
                host_results.extend(results)

                try:
                    included_files = IncludedFile.process_include_results(
                        host_results,
                        self._tqm,
                        iterator=iterator,
                        inventory=self._inventory,
                        loader=self._loader,
                        variable_manager=self._variable_manager
                    )
                except AnsibleError as e:
                    # this is a fatal error, so we abort here regardless of block state
                    return self._tqm.RUN_ERROR

                include_failure = False
                if len(included_files) > 0:
                    display.debug("we have included files to process")
                    noop_task = Task()
                    noop_task.action = 'meta'
                    noop_task.args['_raw_params'] = 'noop'
                    noop_task.set_loader(iterator._play._loader)

                    display.debug("generating all_blocks data")
                    all_blocks = dict((host, []) for host in hosts_left)
                    display.debug("done generating all_blocks data")
                    for included_file in included_files:
                        display.debug("processing included file: %s" % included_file._filename)
                        # included hosts get the task list while those excluded get an equal-length
                        # list of noop tasks, to make sure that they continue running in lock-step
                        try:
                            new_blocks = self._load_included_file(included_file, iterator=iterator)

                            display.debug("iterating over new_blocks loaded from include file")
                            for new_block in new_blocks:
                                task_vars = self._variable_manager.get_vars(
                                    loader=self._loader,
                                    play=iterator._play,
                                    task=included_file._task,
                                )
                                display.debug("filtering new block on tags")
                                final_block = new_block.filter_tagged_tasks(play_context, task_vars)
                                display.debug("done filtering new block on tags")

                                noop_block = Block(parent_block=task._parent)
                                noop_block.block  = [noop_task for t in new_block.block]
                                noop_block.always = [noop_task for t in new_block.always]
                                noop_block.rescue = [noop_task for t in new_block.rescue]

                                for host in hosts_left:
                                    if host in included_file._hosts:
                                        all_blocks[host].append(final_block)
                                    else:
                                        all_blocks[host].append(noop_block)
                            display.debug("done iterating over new_blocks loaded from include file")

                        except AnsibleError as e:
                            for host in included_file._hosts:
                                self._tqm._failed_hosts[host.name] = True
                                iterator.mark_host_failed(host)
                            display.error(to_unicode(e), wrap_text=False)
                            include_failure = True
                            continue

                    # finally go through all of the hosts and append the
                    # accumulated blocks to their list of tasks
                    display.debug("extending task lists for all hosts with included blocks")

                    for host in hosts_left:
                        iterator.add_tasks(host, all_blocks[host])

                    display.debug("done extending task lists")
                    display.debug("done processing included files")

                display.debug("results queue empty")

                display.debug("checking for any_errors_fatal")
                failed_hosts = []
                unreachable_hosts = []
                for res in results:
                    if res.is_failed():
                        failed_hosts.append(res._host.name)
                    elif res.is_unreachable():
                        unreachable_hosts.append(res._host.name)

                # if any_errors_fatal and we had an error, mark all hosts as failed
                if any_errors_fatal and (len(failed_hosts) > 0 or len(unreachable_hosts) > 0):
                    for host in hosts_left:
                        (s, _) = iterator.get_next_task_for_host(host, peek=True)
                        if s.run_state != iterator.ITERATING_RESCUE:
                            self._tqm._failed_hosts[host.name] = True
                            result |= self._tqm.RUN_FAILED_BREAK_PLAY
                display.debug("done checking for any_errors_fatal")

                display.debug("checking for max_fail_percentage")
                if iterator._play.max_fail_percentage is not None and len(results) > 0:
                    percentage = iterator._play.max_fail_percentage / 100.0

                    if (len(self._tqm._failed_hosts) / len(results)) > percentage:
                        for host in hosts_left:
                            # don't double-mark hosts, or the iterator will potentially
                            # fail them out of the rescue/always states
                            if host.name not in failed_hosts:
                                self._tqm._failed_hosts[host.name] = True
                                iterator.mark_host_failed(host)
                        self._tqm.send_callback('v2_playbook_on_no_hosts_remaining')
                        result |= self._tqm.RUN_FAILED_BREAK_PLAY
                display.debug("done checking for max_fail_percentage")

            except (IOError, EOFError) as e:
                display.debug("got IOError/EOFError in task loop: %s" % e)
                # most likely an abort, return failed
                return self._tqm.RUN_UNKNOWN_ERROR

        # run the base class run() method, which executes the cleanup function
        # and runs any outstanding handlers which have been triggered

        return super(StrategyModule, self).run(iterator, play_context, result)
コード例 #40
0
ファイル: unicode.py プロジェクト: RajeevNambiar/temp
def to_unicode(obj, encoding='utf-8', errors='replace', nonstring=None):
    '''Convert an object into a :class:`unicode` string

    :arg obj: Object to convert to a :class:`unicode` string.  This should
        normally be a byte :class:`str`
    :kwarg encoding: What encoding to try converting the byte :class:`str` as.
        Defaults to :term:`utf-8`
    :kwarg errors: If errors are found while decoding, perform this action.
        Defaults to ``replace`` which replaces the invalid bytes with
        a character that means the bytes were unable to be decoded.  Other
        values are the same as the error handling schemes in the `codec base
        classes
        <http://docs.python.org/library/codecs.html#codec-base-classes>`_.
        For instance ``strict`` which raises an exception and ``ignore`` which
        simply omits the non-decodable characters.
    :kwarg nonstring: How to treat nonstring values.  Possible values are:

        :simplerepr: Attempt to call the object's "simple representation"
            method and return that value.  Python-2.3+ has two methods that
            try to return a simple representation: :meth:`object.__unicode__`
            and :meth:`object.__str__`.  We first try to get a usable value
            from :meth:`object.__unicode__`.  If that fails we try the same
            with :meth:`object.__str__`.
        :empty: Return an empty :class:`unicode` string
        :strict: Raise a :exc:`TypeError`
        :passthru: Return the object unchanged
        :repr: Attempt to return a :class:`unicode` string of the repr of the
            object

        Default is ``simplerepr``

    :raises TypeError: if :attr:`nonstring` is ``strict`` and
        a non-:class:`basestring` object is passed in or if :attr:`nonstring`
        is set to an unknown value
    :raises UnicodeDecodeError: if :attr:`errors` is ``strict`` and
        :attr:`obj` is not decodable using the given encoding
    :returns: :class:`unicode` string or the original object depending on the
        value of :attr:`nonstring`.

    Usually this should be used on a byte :class:`str` but it can take both
    byte :class:`str` and :class:`unicode` strings intelligently.  Nonstring
    objects are handled in different ways depending on the setting of the
    :attr:`nonstring` parameter.

    The default values of this function are set so as to always return
    a :class:`unicode` string and never raise an error when converting from
    a byte :class:`str` to a :class:`unicode` string.  However, when you do
    not pass validly encoded text (or a nonstring object), you may end up with
    output that you don't expect.  Be sure you understand the requirements of
    your data, not just ignore errors by passing it through this function.
    '''
    # Could use isbasestring/isunicode here but we want this code to be as
    # fast as possible
    if isinstance(obj, basestring):
        if isinstance(obj, text_type):
            return obj
        if encoding in _UTF8_ALIASES:
            return text_type(obj, 'utf-8', errors)
        if encoding in _LATIN1_ALIASES:
            return text_type(obj, 'latin-1', errors)
        return obj.decode(encoding, errors)

    if not nonstring:
        nonstring = 'simplerepr'
    if nonstring == 'empty':
        return u''
    elif nonstring == 'passthru':
        return obj
    elif nonstring == 'simplerepr':
        try:
            simple = obj.__unicode__()
        except (AttributeError, UnicodeError):
            simple = None
        if not simple:
            try:
                simple = text_type(obj)
            except UnicodeError:
                try:
                    simple = obj.__str__()
                except (UnicodeError, AttributeError):
                    simple = u''
        if isinstance(simple, binary_type):
            return text_type(simple, encoding, errors)
        return simple
    elif nonstring in ('repr', 'strict'):
        obj_repr = repr(obj)
        if isinstance(obj_repr, binary_type):
            obj_repr = text_type(obj_repr, encoding, errors)
        if nonstring == 'repr':
            return obj_repr
        raise TypeError('to_unicode was given "%(obj)s" which is neither'
            ' a byte string (str) or a unicode string' %
            {'obj': obj_repr.encode(encoding, 'replace')})

    raise TypeError('nonstring value, %(param)s, is not set to a valid'
        ' action' % {'param': nonstring})
コード例 #41
0
    def _process_pending_results(self, iterator, one_pass=False):
        '''
        Reads results off the final queue and takes appropriate action
        based on the result (executing callbacks, updating state, etc.).
        '''

        ret_results = []

        while not self._final_q.empty() and not self._tqm._terminated:
            try:
                result = self._final_q.get()
                display.debug("got result from result worker: %s" % ([text_type(x) for x in result],))

                # helper method, used to find the original host from the one
                # returned in the result/message, which has been serialized and
                # thus had some information stripped from it to speed up the
                # serialization process
                def get_original_host(host):
                    if host.name in self._inventory._hosts_cache:
                       return self._inventory._hosts_cache[host.name]
                    else:
                       return self._inventory.get_host(host.name)

                # all host status messages contain 2 entries: (msg, task_result)
                if result[0] in ('host_task_ok', 'host_task_failed', 'host_task_skipped', 'host_unreachable'):
                    task_result = result[1]
                    host = get_original_host(task_result._host)
                    task = task_result._task
                    if result[0] == 'host_task_failed' or task_result.is_failed():
                        if not task.ignore_errors:
                            display.debug("marking %s as failed" % host.name)
                            if task.run_once:
                                # if we're using run_once, we have to fail every host here
                                [iterator.mark_host_failed(h) for h in self._inventory.get_hosts(iterator._play.hosts) if h.name not in self._tqm._unreachable_hosts]
                            else:
                                iterator.mark_host_failed(host)

                            # only add the host to the failed list officially if it has
                            # been failed by the iterator
                            if iterator.is_failed(host):
                                self._tqm._failed_hosts[host.name] = True
                                self._tqm._stats.increment('failures', host.name)
                            else:
                                # otherwise, we grab the current state and if we're iterating on
                                # the rescue portion of a block then we save the failed task in a
                                # special var for use within the rescue/always
                                state, _ = iterator.get_next_task_for_host(host, peek=True)
                                if state.run_state == iterator.ITERATING_RESCUE:
                                    original_task = iterator.get_original_task(host, task)
                                    self._variable_manager.set_nonpersistent_facts(
                                        host,
                                        dict(
                                            ansible_failed_task=original_task.serialize(),
                                            ansible_failed_result=task_result._result,
                                        ),
                                    )
                        else:
                            self._tqm._stats.increment('ok', host.name)
                        self._tqm.send_callback('v2_runner_on_failed', task_result, ignore_errors=task.ignore_errors)
                    elif result[0] == 'host_unreachable':
                        self._tqm._unreachable_hosts[host.name] = True
                        self._tqm._stats.increment('dark', host.name)
                        self._tqm.send_callback('v2_runner_on_unreachable', task_result)
                    elif result[0] == 'host_task_skipped':
                        self._tqm._stats.increment('skipped', host.name)
                        self._tqm.send_callback('v2_runner_on_skipped', task_result)
                    elif result[0] == 'host_task_ok':
                        if task.action != 'include':
                            self._tqm._stats.increment('ok', host.name)
                            if 'changed' in task_result._result and task_result._result['changed']:
                                self._tqm._stats.increment('changed', host.name)
                            self._tqm.send_callback('v2_runner_on_ok', task_result)

                        if self._diff:
                            self._tqm.send_callback('v2_on_file_diff', task_result)

                    self._pending_results -= 1
                    if host.name in self._blocked_hosts:
                        del self._blocked_hosts[host.name]

                    # If this is a role task, mark the parent role as being run (if
                    # the task was ok or failed, but not skipped or unreachable)
                    if task_result._task._role is not None and result[0] in ('host_task_ok', 'host_task_failed'):
                        # lookup the role in the ROLE_CACHE to make sure we're dealing
                        # with the correct object and mark it as executed
                        for (entry, role_obj) in iteritems(iterator._play.ROLE_CACHE[task_result._task._role._role_name]):
                            if role_obj._uuid == task_result._task._role._uuid:
                                role_obj._had_task_run[host.name] = True

                    ret_results.append(task_result)

                elif result[0] == 'add_host':
                    result_item = result[1]
                    new_host_info = result_item.get('add_host', dict())

                    self._add_host(new_host_info, iterator)

                elif result[0] == 'add_group':
                    host = get_original_host(result[1])
                    result_item = result[2]
                    self._add_group(host, result_item)

                elif result[0] == 'notify_handler':
                    task_result  = result[1]
                    handler_name = result[2]

                    original_host = get_original_host(task_result._host)
                    original_task = iterator.get_original_task(original_host, task_result._task)

                    def search_handler_blocks(handler_name, handler_blocks):
                        for handler_block in handler_blocks:
                            for handler_task in handler_block.block:
                                handler_vars = self._variable_manager.get_vars(loader=self._loader, play=iterator._play, task=handler_task)
                                templar = Templar(loader=self._loader, variables=handler_vars)
                                try:
                                    # first we check with the full result of get_name(), which may
                                    # include the role name (if the handler is from a role). If that
                                    # is not found, we resort to the simple name field, which doesn't
                                    # have anything extra added to it.
                                    target_handler_name = templar.template(handler_task.name)
                                    if target_handler_name == handler_name:
                                        return handler_task
                                    else:
                                        target_handler_name = templar.template(handler_task.get_name())
                                        if target_handler_name == handler_name:
                                            return handler_task
                                except (UndefinedError, AnsibleUndefinedVariable):
                                    # We skip this handler due to the fact that it may be using
                                    # a variable in the name that was conditionally included via
                                    # set_fact or some other method, and we don't want to error
                                    # out unnecessarily
                                    continue
                        return None

                    # Find the handler using the above helper.  First we look up the
                    # dependency chain of the current task (if it's from a role), otherwise
                    # we just look through the list of handlers in the current play/all
                    # roles and use the first one that matches the notify name
                    if handler_name in self._listening_handlers:
                        for listening_handler_name in self._listening_handlers[handler_name]:
                            listening_handler = search_handler_blocks(listening_handler_name, iterator._play.handlers)
                            if listening_handler is None:
                                raise AnsibleError("The requested handler listener '%s' was not found in any of the known handlers" % listening_handler_name)

                            if original_host not in self._notified_handlers[listening_handler]:
                                self._notified_handlers[listening_handler].append(original_host)
                                display.vv("NOTIFIED HANDLER %s" % (listening_handler_name,))
                    else:
                        target_handler = search_handler_blocks(handler_name, iterator._play.handlers)
                        if target_handler is None:
                            raise AnsibleError("The requested handler '%s' was not found in any of the known handlers" % handler_name)

                        if target_handler in self._notified_handlers:
                            if original_host not in self._notified_handlers[target_handler]:
                                self._notified_handlers[target_handler].append(original_host)
                                # FIXME: should this be a callback?
                                display.vv("NOTIFIED HANDLER %s" % (handler_name,))
                        else:
                            raise AnsibleError("The requested handler '%s' was found in neither the main handlers list nor the listening handlers list" % handler_name)

                elif result[0] == 'register_host_var':
                    # essentially the same as 'set_host_var' below, however we
                    # never follow the delegate_to value for registered vars and
                    # the variable goes in the fact_cache
                    host      = get_original_host(result[1])
                    task      = result[2]
                    var_value = wrap_var(result[3])
                    var_name  = task.register

                    if task.run_once:
                        host_list = [host for host in self._inventory.get_hosts(iterator._play.hosts) if host.name not in self._tqm._unreachable_hosts]
                    else:
                        host_list = [host]

                    for target_host in host_list:
                        self._variable_manager.set_nonpersistent_facts(target_host, {var_name: var_value})

                elif result[0] in ('set_host_var', 'set_host_facts'):
                    host = get_original_host(result[1])
                    task = result[2]
                    item = result[3]

                    # find the host we're actually refering too here, which may
                    # be a host that is not really in inventory at all
                    if task.delegate_to is not None and task.delegate_facts:
                        task_vars = self._variable_manager.get_vars(loader=self._loader, play=iterator._play, host=host, task=task)
                        self.add_tqm_variables(task_vars, play=iterator._play)
                        loop_var = 'item'
                        if task.loop_control:
                            loop_var = task.loop_control.loop_var or 'item'
                        if item is not None:
                            task_vars[loop_var] = item
                        templar = Templar(loader=self._loader, variables=task_vars)
                        host_name = templar.template(task.delegate_to)
                        actual_host = self._inventory.get_host(host_name)
                        if actual_host is None:
                            actual_host = Host(name=host_name)
                    else:
                        actual_host = host

                    if task.run_once:
                        host_list = [host for host in self._inventory.get_hosts(iterator._play.hosts) if host.name not in self._tqm._unreachable_hosts]
                    else:
                        host_list = [actual_host]

                    if result[0] == 'set_host_var':
                        var_name  = result[4]
                        var_value = result[5]
                        for target_host in host_list:
                            self._variable_manager.set_host_variable(target_host, var_name, var_value)
                    elif result[0] == 'set_host_facts':
                        facts = result[4]
                        for target_host in host_list:
                            if task.action == 'set_fact':
                                self._variable_manager.set_nonpersistent_facts(target_host, facts.copy())
                            else:
                                self._variable_manager.set_host_facts(target_host, facts.copy())
                elif result[0].startswith('v2_runner_item') or result[0] == 'v2_runner_retry':
                    self._tqm.send_callback(result[0], result[1])
                elif result[0] == 'v2_on_file_diff':
                    if self._diff:
                        self._tqm.send_callback('v2_on_file_diff', result[1])
                else:
                    raise AnsibleError("unknown result message received: %s" % result[0])

            except Queue.Empty:
                time.sleep(0.005)

            if one_pass:
                break

        return ret_results
コード例 #42
0
    def _process_pending_results(self, iterator, one_pass=False):
        '''
        Reads results off the final queue and takes appropriate action
        based on the result (executing callbacks, updating state, etc.).
        '''

        ret_results = []

        while not self._final_q.empty() and not self._tqm._terminated:
            try:
                result = self._final_q.get()
                display.debug("got result from result worker: %s" % ([text_type(x) for x in result],))

                # helper method, used to find the original host from the one
                # returned in the result/message, which has been serialized and
                # thus had some information stripped from it to speed up the
                # serialization process
                def get_original_host(host):
                    if host.name in self._inventory._hosts_cache:
                       return self._inventory._hosts_cache[host.name]
                    else:
                       return self._inventory.get_host(host.name)

                # all host status messages contain 2 entries: (msg, task_result)
                if result[0] in ('host_task_ok', 'host_task_failed', 'host_task_skipped', 'host_unreachable'):
                    task_result = result[1]
                    host = get_original_host(task_result._host)
                    task = task_result._task
                    if result[0] == 'host_task_failed' or task_result.is_failed():
                        if not task.ignore_errors:
                            display.debug("marking %s as failed" % host.name)
                            if task.run_once:
                                # if we're using run_once, we have to fail every host here
                                [iterator.mark_host_failed(h) for h in self._inventory.get_hosts(iterator._play.hosts) if h.name not in self._tqm._unreachable_hosts]
                            else:
                                iterator.mark_host_failed(host)

                            # only add the host to the failed list officially if it has
                            # been failed by the iterator
                            if iterator.is_failed(host):
                                self._tqm._failed_hosts[host.name] = True
                                self._tqm._stats.increment('failures', host.name)
                            else:
                                # otherwise, we grab the current state and if we're iterating on
                                # the rescue portion of a block then we save the failed task in a
                                # special var for use within the rescue/always
                                state, _ = iterator.get_next_task_for_host(host, peek=True)
                                if state.run_state == iterator.ITERATING_RESCUE:
                                    original_task = iterator.get_original_task(host, task)
                                    self._variable_manager.set_nonpersistent_facts(
                                        host,
                                        dict(
                                            ansible_failed_task=original_task.serialize(),
                                            ansible_failed_result=task_result._result,
                                        ),
                                    )
                        else:
                            self._tqm._stats.increment('ok', host.name)
                        self._tqm.send_callback('v2_runner_on_failed', task_result, ignore_errors=task.ignore_errors)
                    elif result[0] == 'host_unreachable':
                        self._tqm._unreachable_hosts[host.name] = True
                        self._tqm._stats.increment('dark', host.name)
                        self._tqm.send_callback('v2_runner_on_unreachable', task_result)
                    elif result[0] == 'host_task_skipped':
                        self._tqm._stats.increment('skipped', host.name)
                        self._tqm.send_callback('v2_runner_on_skipped', task_result)
                    elif result[0] == 'host_task_ok':
                        if task.action != 'include':
                            self._tqm._stats.increment('ok', host.name)
                            if 'changed' in task_result._result and task_result._result['changed']:
                                self._tqm._stats.increment('changed', host.name)
                            self._tqm.send_callback('v2_runner_on_ok', task_result)

                        if self._diff:
                            self._tqm.send_callback('v2_on_file_diff', task_result)

                    self._pending_results -= 1
                    if host.name in self._blocked_hosts:
                        del self._blocked_hosts[host.name]

                    # If this is a role task, mark the parent role as being run (if
                    # the task was ok or failed, but not skipped or unreachable)
                    if task_result._task._role is not None and result[0] in ('host_task_ok', 'host_task_failed'):
                        # lookup the role in the ROLE_CACHE to make sure we're dealing
                        # with the correct object and mark it as executed
                        for (entry, role_obj) in iteritems(iterator._play.ROLE_CACHE[task_result._task._role._role_name]):
                            if role_obj._uuid == task_result._task._role._uuid:
                                role_obj._had_task_run[host.name] = True

                    ret_results.append(task_result)

                elif result[0] == 'add_host':
                    result_item = result[1]
                    new_host_info = result_item.get('add_host', dict())

                    self._add_host(new_host_info, iterator)

                elif result[0] == 'add_group':
                    host = get_original_host(result[1])
                    result_item = result[2]
                    self._add_group(host, result_item)

                elif result[0] == 'notify_handler':
                    task_result  = result[1]
                    handler_name = result[2]

                    original_host = get_original_host(task_result._host)
                    original_task = iterator.get_original_task(original_host, task_result._task)
                    if handler_name not in self._notified_handlers:
                        self._notified_handlers[handler_name] = []

                    if original_host not in self._notified_handlers[handler_name]:
                        self._notified_handlers[handler_name].append(original_host)
                        display.vv("NOTIFIED HANDLER %s" % (handler_name,))

                elif result[0] == 'register_host_var':
                    # essentially the same as 'set_host_var' below, however we
                    # never follow the delegate_to value for registered vars and
                    # the variable goes in the fact_cache
                    host      = get_original_host(result[1])
                    task      = result[2]
                    var_value = wrap_var(result[3])
                    var_name  = task.register

                    if task.run_once:
                        host_list = [host for host in self._inventory.get_hosts(iterator._play.hosts) if host.name not in self._tqm._unreachable_hosts]
                    else:
                        host_list = [host]

                    for target_host in host_list:
                        self._variable_manager.set_nonpersistent_facts(target_host, {var_name: var_value})

                elif result[0] in ('set_host_var', 'set_host_facts'):
                    host = get_original_host(result[1])
                    task = result[2]
                    item = result[3]

                    # find the host we're actually refering too here, which may
                    # be a host that is not really in inventory at all
                    if task.delegate_to is not None and task.delegate_facts:
                        task_vars = self._variable_manager.get_vars(loader=self._loader, play=iterator._play, host=host, task=task)
                        task_vars = self.add_tqm_variables(task_vars, play=iterator._play)
                        loop_var = 'item'
                        if task.loop_control:
                            loop_var = task.loop_control.loop_var or 'item'
                        if item is not None:
                            task_vars[loop_var] = item
                        templar = Templar(loader=self._loader, variables=task_vars)
                        host_name = templar.template(task.delegate_to)
                        actual_host = self._inventory.get_host(host_name)
                        if actual_host is None:
                            actual_host = Host(name=host_name)
                    else:
                        actual_host = host

                    if task.run_once:
                        host_list = [host for host in self._inventory.get_hosts(iterator._play.hosts) if host.name not in self._tqm._unreachable_hosts]
                    else:
                        host_list = [actual_host]

                    if result[0] == 'set_host_var':
                        var_name  = result[4]
                        var_value = result[5]
                        for target_host in host_list:
                            self._variable_manager.set_host_variable(target_host, var_name, var_value)
                    elif result[0] == 'set_host_facts':
                        facts = result[4]
                        for target_host in host_list:
                            if task.action == 'set_fact':
                                self._variable_manager.set_nonpersistent_facts(target_host, facts)
                            else:
                                self._variable_manager.set_host_facts(target_host, facts)
                elif result[0].startswith('v2_runner_item') or result[0] == 'v2_runner_retry':
                    self._tqm.send_callback(result[0], result[1])
                elif result[0] == 'v2_on_file_diff':
                    if self._diff:
                        self._tqm.send_callback('v2_on_file_diff', result[1])
                else:
                    raise AnsibleError("unknown result message received: %s" % result[0])

            except Queue.Empty:
                time.sleep(0.0001)

            if one_pass:
                break

        return ret_results
コード例 #43
0
ファイル: __init__.py プロジェクト: amey-git/py-net
    def _execute_module(self, module_name=None, module_args=None, tmp=None, task_vars=None, persist_files=False, delete_remote_tmp=True):
        '''
        Transfer and run a module along with its arguments.
        '''
        if task_vars is None:
            task_vars = dict()

        # if a module name was not specified for this execution, use
        # the action from the task
        if module_name is None:
            module_name = self._task.action
        if module_args is None:
            module_args = self._task.args

        # set check mode in the module arguments, if required
        if self._play_context.check_mode:
            if not self._supports_check_mode:
                raise AnsibleError("check mode is not supported for this operation")
            module_args['_ansible_check_mode'] = True
        else:
            module_args['_ansible_check_mode'] = False

        # Get the connection user for permission checks
        remote_user = task_vars.get('ansible_ssh_user') or self._play_context.remote_user

        # set no log in the module arguments, if required
        module_args['_ansible_no_log'] = self._play_context.no_log or C.DEFAULT_NO_TARGET_SYSLOG

        # set debug in the module arguments, if required
        module_args['_ansible_debug'] = C.DEFAULT_DEBUG

        # let module know we are in diff mode
        module_args['_ansible_diff'] = self._play_context.diff

        # let module know our verbosity
        module_args['_ansible_verbosity'] = display.verbosity

        # give the module information about the ansible version
        module_args['_ansible_version'] = __version__

        # set the syslog facility to be used in the module
        module_args['_ansible_syslog_facility'] = task_vars.get('ansible_syslog_facility', C.DEFAULT_SYSLOG_FACILITY)

        # let module know about filesystems that selinux treats specially
        module_args['_ansible_selinux_special_fs'] = C.DEFAULT_SELINUX_SPECIAL_FS

        (module_style, shebang, module_data) = self._configure_module(module_name=module_name, module_args=module_args, task_vars=task_vars)
        if not shebang:
            raise AnsibleError("module (%s) is missing interpreter line" % module_name)

        # a remote tmp path may be necessary and not already created
        remote_module_path = None
        args_file_path = None
        if not tmp and self._late_needs_tmp_path(tmp, module_style):
            tmp = self._make_tmp_path(remote_user)

        if tmp:
            remote_module_filename = self._connection._shell.get_remote_filename(module_name)
            remote_module_path = self._connection._shell.join_path(tmp, remote_module_filename)
            if module_style in ['old', 'non_native_want_json']:
                # we'll also need a temp file to hold our module arguments
                args_file_path = self._connection._shell.join_path(tmp, 'args')

        if remote_module_path or module_style != 'new':
            display.debug("transferring module to remote")
            self._transfer_data(remote_module_path, module_data)
            if module_style == 'old':
                # we need to dump the module args to a k=v string in a file on
                # the remote system, which can be read and parsed by the module
                args_data = ""
                for k,v in iteritems(module_args):
                    args_data += '%s=%s ' % (k, pipes.quote(text_type(v)))
                self._transfer_data(args_file_path, args_data)
            elif module_style == 'non_native_want_json':
                self._transfer_data(args_file_path, json.dumps(module_args))
            display.debug("done transferring module to remote")

        environment_string = self._compute_environment_string()

        remote_files = None

        if args_file_path:
            remote_files = tmp, remote_module_path, args_file_path
        elif remote_module_path:
            remote_files = tmp, remote_module_path

        # Fix permissions of the tmp path and tmp files.  This should be
        # called after all files have been transferred.
        if remote_files:
            self._fixup_perms2(remote_files, remote_user)

        cmd = ""
        in_data = None

        if self._connection.has_pipelining and self._play_context.pipelining and not C.DEFAULT_KEEP_REMOTE_FILES and module_style == 'new':
            in_data = module_data
        else:
            if remote_module_path:
                cmd = remote_module_path

        rm_tmp = None
        if tmp and "tmp" in tmp and not C.DEFAULT_KEEP_REMOTE_FILES and not persist_files and delete_remote_tmp:
            if not self._play_context.become or self._play_context.become_user == 'root':
                # not sudoing or sudoing to root, so can cleanup files in the same step
                rm_tmp = tmp

        cmd = self._connection._shell.build_module_command(environment_string, shebang, cmd, arg_path=args_file_path, rm_tmp=rm_tmp)
        cmd = cmd.strip()

        sudoable = True
        if module_name == "accelerate":
            # always run the accelerate module as the user
            # specified in the play, not the sudo_user
            sudoable = False

        res = self._low_level_execute_command(cmd, sudoable=sudoable, in_data=in_data)

        if tmp and "tmp" in tmp and not C.DEFAULT_KEEP_REMOTE_FILES and not persist_files and delete_remote_tmp:
            if self._play_context.become and self._play_context.become_user != 'root':
                # not sudoing to root, so maybe can't delete files as that other user
                # have to clean up temp files as original user in a second step
                tmp_rm_cmd = self._connection._shell.remove(tmp, recurse=True)
                tmp_rm_res = self._low_level_execute_command(tmp_rm_cmd, sudoable=False)
                tmp_rm_data = self._parse_returned_data(tmp_rm_res)
                if tmp_rm_data.get('rc', 0) != 0:
                    display.warning('Error deleting remote temporary files (rc: {0}, stderr: {1})'.format(tmp_rm_res.get('rc'), tmp_rm_res.get('stderr', 'No error string available.')))

        # parse the main result
        data = self._parse_returned_data(res)

        # pre-split stdout into lines, if stdout is in the data and there
        # isn't already a stdout_lines value there
        if 'stdout' in data and 'stdout_lines' not in data:
            data['stdout_lines'] = data.get('stdout', u'').splitlines()

        display.debug("done with _execute_module (%s, %s)" % (module_name, module_args))
        return data