def search_handler_blocks(handler_name, handler_blocks): for handler_block in handler_blocks: for handler_task in handler_block.block: if handler_task.name: handler_vars = self._variable_manager.get_vars(loader=self._loader, play=iterator._play, task=handler_task) templar = Templar(loader=self._loader, variables=handler_vars) try: # first we check with the full result of get_name(), which may # include the role name (if the handler is from a role). If that # is not found, we resort to the simple name field, which doesn't # have anything extra added to it. target_handler_name = templar.template(handler_task.name) if target_handler_name == handler_name: return handler_task else: target_handler_name = templar.template(handler_task.get_name()) if target_handler_name == handler_name: return handler_task except (UndefinedError, AnsibleUndefinedVariable): # We skip this handler due to the fact that it may be using # a variable in the name that was conditionally included via # set_fact or some other method, and we don't want to error # out unnecessarily continue else: # if the handler name is not set, we check via the handlers uuid. # this is mainly used by listening handlers only if handler_name == handler_task._uuid: return handler_task return None
def listify_lookup_plugin_terms(terms, variables, loader): if isinstance(terms, basestring): # someone did: # with_items: alist # OR # with_items: {{ alist }} stripped = terms.strip() templar = Templar(loader=loader, variables=variables) if not (stripped.startswith('{') or stripped.startswith('[')) and not stripped.startswith("/") and not stripped.startswith('set([') and not LOOKUP_REGEX.search(terms): # if not already a list, get ready to evaluate with Jinja2 # not sure why the "/" is in above code :) try: new_terms = templar.template("{{ %s }}" % terms) if isinstance(new_terms, basestring) and "{{" in new_terms: pass else: terms = new_terms except: pass else: terms = templar.template(terms) if '{' in terms or '[' in terms: # Jinja2 already evaluated a variable to a list. # Jinja2-ified list needs to be converted back to a real type return safe_eval(terms) if isinstance(terms, basestring): terms = [ terms ] return terms
def run_handlers(self, iterator, play_context): ''' Runs handlers on those hosts which have been notified. ''' result = True for handler_block in iterator._play.handlers: # FIXME: handlers need to support the rescue/always portions of blocks too, # but this may take some work in the iterator and gets tricky when # we consider the ability of meta tasks to flush handlers for handler in handler_block.block: handler_vars = self._variable_manager.get_vars(loader=self._loader, play=iterator._play, task=handler) templar = Templar(loader=self._loader, variables=handler_vars) try: # first we check with the full result of get_name(), which may # include the role name (if the handler is from a role). If that # is not found, we resort to the simple name field, which doesn't # have anything extra added to it. handler_name = templar.template(handler.name) if handler_name not in self._notified_handlers: handler_name = templar.template(handler.get_name()) except (UndefinedError, AnsibleUndefinedVariable): # We skip this handler due to the fact that it may be using # a variable in the name that was conditionally included via # set_fact or some other method, and we don't want to error # out unnecessarily continue if handler_name in self._notified_handlers and len(self._notified_handlers[handler_name]): result = self._do_handler_run(handler, handler_name, iterator=iterator, play_context=play_context) if not result: break return result
def _squash_items(self, items, variables): ''' Squash items down to a comma-separated list for certain modules which support it (typically package management modules). ''' # _task.action could contain templatable strings (via action: and # local_action:) Template it before comparing. If we don't end up # optimizing it here, the templatable string might use template vars # that aren't available until later (it could even use vars from the # with_items loop) so don't make the templated string permanent yet. templar = Templar(loader=self._loader, shared_loader_obj=self._shared_loader_obj, variables=variables) task_action = self._task.action if templar._contains_vars(task_action): task_action = templar.template(task_action, fail_on_undefined=False) if len(items) > 0 and task_action in self.SQUASH_ACTIONS: if all(isinstance(o, string_types) for o in items): final_items = [] name = None for allowed in ['name', 'pkg', 'package']: name = self._task.args.pop(allowed, None) if name is not None: break # This gets the information to check whether the name field # contains a template that we can squash for template_no_item = template_with_item = None if name: if templar._contains_vars(name): variables['item'] = '\0$' template_no_item = templar.template(name, variables, cache=False) variables['item'] = '\0@' template_with_item = templar.template(name, variables, cache=False) del variables['item'] # Check if the user is doing some operation that doesn't take # name/pkg or the name/pkg field doesn't have any variables # and thus the items can't be squashed if template_no_item != template_with_item: for item in items: variables['item'] = item if self._task.evaluate_conditional(templar, variables): new_item = templar.template(name, cache=False) final_items.append(new_item) self._task.args['name'] = final_items # Wrap this in a list so that the calling function loop # executes exactly once return [final_items] else: # Restore the name parameter self._task.args['name'] = name #elif: # Right now we only optimize single entries. In the future we # could optimize more types: # * lists can be squashed together # * dicts could squash entries that match in all cases except the # name or pkg field. return items
def load_data(self, ds, variable_manager=None, loader=None, ablefile=None): ''' Overrides the base load_data(), as we're actually going to return a new Command() object rather than a CommandInclude object ''' # first, we use the original parent method to correctly load the object # via the load_data/preprocess_data system we normally use for other # command objects new_obj = super(AbleStageInclude, self).load_data(ds, variable_manager, loader) all_vars = self.vars.copy() if variable_manager: all_vars.update(variable_manager.get_vars(loader=loader)) templar = Templar(loader=loader, variables=all_vars) try: forward_conditional = False if not new_obj.evaluate_conditional(templar=templar, all_vars=all_vars): return None except AnsibleError: # conditional evaluation raised an error, so we set a flag to indicate # we need to forward the conditionals on to the included play(s) forward_conditional = True extend = False if new_obj.extend: file_name = templar.template(new_obj.extend) del ds['extend'] extend = True elif new_obj.include: file_name = templar.template(new_obj.include) del ds['include'] else: raise AnsibleParserError("no include or extend value specified") if not os.path.isabs(file_name): file_name = os.path.join(ablefile.get_basedir(), file_name) new_ds = loader.load_from_file(file_name) if not isinstance(new_ds, dict): raise AnsibleParserError("stage include should be in dictionary format" ) stage = AbleStage.load( new_ds, variable_manager=variable_manager, loader=loader, ablefile=ablefile ) self.merge_data(stage, extend=extend) return stage
def _load_role_path(self, role_name): ''' the 'role', as specified in the ds (or as a bare string), can either be a simple name or a full path. If it is a full path, we use the basename as the role name, otherwise we take the name as-given and append it to the default role path ''' role_path = unfrackpath(role_name) if self._loader.path_exists(role_path): role_name = os.path.basename(role_name) return (role_name, role_path) else: # we always start the search for roles in the base directory of the playbook role_search_paths = [ os.path.join(self._loader.get_basedir(), u'roles'), u'./roles', self._loader.get_basedir(), u'./' ] # also search in the configured roles path if C.DEFAULT_ROLES_PATH: configured_paths = C.DEFAULT_ROLES_PATH.split(os.pathsep) role_search_paths.extend(configured_paths) # finally, append the roles basedir, if it was set, so we can # search relative to that directory for dependent roles if self._role_basedir: role_search_paths.append(self._role_basedir) # create a templar class to template the dependency names, in # case they contain variables if self._variable_manager is not None: all_vars = self._variable_manager.get_vars(loader=self._loader, play=self._play) else: all_vars = dict() templar = Templar(loader=self._loader, variables=all_vars) role_name = templar.template(role_name) # now iterate through the possible paths and return the first one we find for path in role_search_paths: path = templar.template(path) role_path = unfrackpath(os.path.join(path, role_name)) if self._loader.path_exists(role_path): return (role_name, role_path) # FIXME: make the parser smart about list/string entries in # the yaml so the error line/file can be reported here raise AnsibleError("the role '%s' was not found in %s" % (role_name, ":".join(role_search_paths)))
def load_data(self, ds, variable_manager=None, loader=None, file_name=None, basedir=None): ''' Overrides the base load_data(), as we're actually going to return a new Command() object rather than a CommandInclude object ''' # first, we use the original parent method to correctly load the object # via the load_data/preprocess_data system we normally use for other # command objects new_obj = super(AbleFileInclude, self).load_data(ds, variable_manager=variable_manager, loader=loader) all_vars = self.vars.copy() if variable_manager: all_vars.update(variable_manager.get_vars(loader=loader)) templar = Templar(loader=loader, variables=all_vars) extend = False if new_obj.extend: file_name = templar.template(new_obj.extend) del ds['extend'] extend = True elif new_obj.include: file_name = templar.template(new_obj.include) del ds['include'] else: raise AnsibleParserError("no include or extend value specified") new_ds = loader.load_from_file(file_name) if not isinstance(new_ds, dict): raise AnsibleParserError("command include should be in dictionary format" ) af = AbleFile.load( new_ds, file_name=file_name, basedir=basedir, variable_manager=variable_manager, loader=loader ) self.merge_data(af, extend=extend) # Make sure ablefile is set as parent. for stage in af.stages: stage.ablefile = af for cmd in af.commands: cmd.ablefile = af return af
def _load_role_path(self, role_name): ''' the 'role', as specified in the ds (or as a bare string), can either be a simple name or a full path. If it is a full path, we use the basename as the role name, otherwise we take the name as-given and append it to the default role path ''' # we always start the search for roles in the base directory of the playbook role_search_paths = [ os.path.join(self._loader.get_basedir(), u'roles'), ] # also search in the configured roles path if C.DEFAULT_ROLES_PATH: role_search_paths.extend(C.DEFAULT_ROLES_PATH) # next, append the roles basedir, if it was set, so we can # search relative to that directory for dependent roles if self._role_basedir: role_search_paths.append(self._role_basedir) # finally as a last resort we look in the current basedir as set # in the loader (which should be the playbook dir itself) but without # the roles/ dir appended role_search_paths.append(self._loader.get_basedir()) # create a templar class to template the dependency names, in # case they contain variables if self._variable_manager is not None: all_vars = self._variable_manager.get_vars(play=self._play) else: all_vars = dict() templar = Templar(loader=self._loader, variables=all_vars) role_name = templar.template(role_name) # now iterate through the possible paths and return the first one we find for path in role_search_paths: path = templar.template(path) role_path = unfrackpath(os.path.join(path, role_name)) if self._loader.path_exists(role_path): return (role_name, role_path) # if not found elsewhere try to extract path from name role_path = unfrackpath(role_name) if self._loader.path_exists(role_path): role_name = os.path.basename(role_name) return (role_name, role_path) raise AnsibleError("the role '%s' was not found in %s" % (role_name, ":".join(role_search_paths)), obj=self._ds)
def load_inventory_files(self, inventory_files, host_list=None): temp_vars = self._variable_manager.get_vars(loader=self._loader) templar = Templar(loader=self._loader, variables=temp_vars) all_inventory = AbleMapping() try: for inventory_file in inventory_files: inventory_file = templar.template(inventory_file) try: data = InventoryYaml.prepare_data(self._loader.load_from_file(inventory_file)) if data is not None: if self._directory is None: self._directory = os.path.dirname(inventory_file) all_inventory = combine_vars(all_inventory, data) break except AbleFileNotFound as e: # we continue on loader failures continue except AbleParserError as e: raise else: raise AbleFileNotFound("vars file %s was not found" % inventory_files) except (UndefinedError, AbleUndefinedVariable): # we do not have a full context here, and the missing variable could be # because of that, so just show a warning and continue display.vvv("skipping inventory_file '%s' due to an undefined variable" % inventory_files) return all_inventory
def __getitem__(self, host_name): data = self.raw_get(host_name) sha1_hash = sha1(str(data).encode('utf-8')).hexdigest() if sha1_hash not in self._cached_result: templar = Templar(variables=data, loader=self._loader) self._cached_result[sha1_hash] = templar.template(data, fail_on_undefined=False, static_vars=STATIC_VARS) return self._cached_result[sha1_hash]
def load_data(self, ds, basedir, variable_manager=None, loader=None): ''' Overrides the base load_data(), as we're actually going to return a new Playbook() object rather than a PlaybookInclude object ''' # import here to avoid a dependency loop from ansible.playbook import Playbook # first, we use the original parent method to correctly load the object # via the load_data/preprocess_data system we normally use for other # playbook objects new_obj = super(PlaybookInclude, self).load_data(ds, variable_manager, loader) all_vars = self.vars.copy() if variable_manager: all_vars.update(variable_manager.get_vars(loader=loader)) templar = Templar(loader=loader, variables=all_vars) try: forward_conditional = False if not new_obj.evaluate_conditional(templar=templar, all_vars=all_vars): return None except AnsibleError: # conditional evaluation raised an error, so we set a flag to indicate # we need to forward the conditionals on to the included play(s) forward_conditional = True # then we use the object to load a Playbook pb = Playbook(loader=loader) file_name = templar.template(new_obj.include) if not os.path.isabs(file_name): file_name = os.path.join(basedir, file_name) pb._load_playbook_data(file_name=file_name, variable_manager=variable_manager) # finally, update each loaded playbook entry with any variables specified # on the included playbook and/or any tags which may have been set for entry in pb._entries: temp_vars = entry.vars.copy() temp_vars.update(new_obj.vars) param_tags = temp_vars.pop('tags', None) if param_tags is not None: entry.tags.extend(param_tags.split(',')) entry.vars = temp_vars entry.tags = list(set(entry.tags).union(new_obj.tags)) if entry._included_path is None: entry._included_path = os.path.dirname(file_name) # Check to see if we need to forward the conditionals on to the included # plays. If so, we can take a shortcut here and simply prepend them to # those attached to each block (if any) if forward_conditional: for task_block in entry.pre_tasks + entry.roles + entry.tasks + entry.post_tasks: task_block.when = self.when[:] + task_block.when return pb
def __getitem__(self, host_name): if host_name not in self._lookup: host = self._inventory.get_host(host_name) result = self._vars_manager.get_vars(loader=self._loader, play=self._play, host=host) templar = Templar(variables=result, loader=self._loader) self._lookup[host_name] = templar.template(result) return self._lookup[host_name]
def __getitem__(self, host_name): if host_name not in self._lookup: return j2undefined data = self._lookup.get(host_name) templar = Templar(variables=data, loader=self._loader) return templar.template(data, fail_on_undefined=False)
def run(self, tmp=None, task_vars=dict()): ''' handler for template operations ''' source = self._task.args.get('src', None) dest = self._task.args.get('dest', None) if (source is None and 'first_available_file' not in task_vars) or dest is None: return dict(failed=True, msg="src and dest are required") if tmp is None: tmp = self._make_tmp_path() ################################################################################################## # FIXME: this all needs to be sorted out ################################################################################################## # if we have first_available_file in our vars # look up the files and use the first one we find as src #if 'first_available_file' in task_vars: # found = False # for fn in task_vars.get('first_available_file'): # fn_orig = fn # fnt = template.template(self.runner.basedir, fn, task_vars) # fnd = utils.path_dwim(self.runner.basedir, fnt) # if not os.path.exists(fnd) and '_original_file' in task_vars: # fnd = utils.path_dwim_relative(task_vars['_original_file'], 'templates', fnt, self.runner.basedir, check=False) # if os.path.exists(fnd): # source = fnd # found = True # break # if not found: # result = dict(failed=True, msg="could not find src in first_available_file list") # return ReturnData(conn=conn, comm_ok=False, result=result) #else: if 1: if self._task._role is not None: source = self._loader.path_dwim_relative(self._task._role._role_path, 'templates', source) else: source = self._loader.path_dwim(source) ################################################################################################## # END FIXME ################################################################################################## # Expand any user home dir specification dest = self._remote_expand_user(dest, tmp) if dest.endswith("/"): # CCTODO: Fix path for Windows hosts. base = os.path.basename(source) dest = os.path.join(dest, base) # template the source data locally & get ready to transfer templar = Templar(loader=self._loader, variables=task_vars) try: with open(source, 'r') as f: template_data = f.read() resultant = templar.template(template_data, preserve_trailing_newlines=True) except Exception, e: return dict(failed=True, msg=type(e).__name__ + ": " + str(e))
def post_validate(self, variables, loader): ''' Finalizes templated values which may be set on this objects fields. ''' templar = Templar(loader=loader, variables=variables) for field in self._get_fields(): value = templar.template(getattr(self, field)) setattr(self, field, value)
def parent_handler_match(target_handler, handler_name): if target_handler: if isinstance(target_handler, TaskInclude): try: handler_vars = self._variable_manager.get_vars(loader=self._loader, play=iterator._play, task=target_handler) templar = Templar(loader=self._loader, variables=handler_vars) target_handler_name = templar.template(target_handler.name) if target_handler_name == handler_name: return True else: target_handler_name = templar.template(target_handler.get_name()) if target_handler_name == handler_name: return True except (UndefinedError, AnsibleUndefinedVariable) as e: pass return parent_handler_match(target_handler._parent, handler_name) else: return False
def __getitem__(self, host_name): if host_name not in self._lookup: return j2undefined host = self._lookup.get(host_name) data = self._variable_manager.get_vars(loader=self._loader, host=host, play=self._play, include_hostvars=False) templar = Templar(variables=data, loader=self._loader) return templar.template(data, fail_on_undefined=False)
def load_data(self, ds, basedir, variable_manager=None, loader=None): ''' Overrides the base load_data(), as we're actually going to return a new Playbook() object rather than a PlaybookInclude object ''' # import here to avoid a dependency loop from ansible.playbook import Playbook from ansible.playbook.play import Play # first, we use the original parent method to correctly load the object # via the load_data/preprocess_data system we normally use for other # playbook objects new_obj = super(PlaybookInclude, self).load_data(ds, variable_manager, loader) all_vars = self.vars.copy() if variable_manager: all_vars.update(variable_manager.get_vars()) templar = Templar(loader=loader, variables=all_vars) # then we use the object to load a Playbook pb = Playbook(loader=loader) file_name = templar.template(new_obj.import_playbook) if not os.path.isabs(file_name): file_name = os.path.join(basedir, file_name) pb._load_playbook_data(file_name=file_name, variable_manager=variable_manager) # finally, update each loaded playbook entry with any variables specified # on the included playbook and/or any tags which may have been set for entry in pb._entries: # conditional includes on a playbook need a marker to skip gathering if new_obj.when and isinstance(entry, Play): entry._included_conditional = new_obj.when[:] temp_vars = entry.vars.copy() temp_vars.update(new_obj.vars) param_tags = temp_vars.pop('tags', None) if param_tags is not None: entry.tags.extend(param_tags.split(',')) entry.vars = temp_vars entry.tags = list(set(entry.tags).union(new_obj.tags)) if entry._included_path is None: entry._included_path = os.path.dirname(file_name) # Check to see if we need to forward the conditionals on to the included # plays. If so, we can take a shortcut here and simply prepend them to # those attached to each block (if any) if new_obj.when: for task_block in (entry.pre_tasks + entry.roles + entry.tasks + entry.post_tasks): task_block._attributes['when'] = new_obj.when[:] + task_block.when[:] return pb
def _squash_items(self, items, variables): ''' Squash items down to a comma-separated list for certain modules which support it (typically package management modules). ''' # _task.action could contain templatable strings (via action: and # local_action:) Template it before comparing. If we don't end up # optimizing it here, the templatable string might use template vars # that aren't available until later (it could even use vars from the # with_items loop) so don't make the templated string permanent yet. templar = Templar(loader=self._loader, shared_loader_obj=self._shared_loader_obj, variables=variables) task_action = self._task.action if templar._contains_vars(task_action): task_action = templar.template(task_action, fail_on_undefined=False) if len(items) > 0 and task_action in self.SQUASH_ACTIONS: if all(isinstance(o, string_types) for o in items): final_items = [] name = self._task.args.pop('name', None) or self._task.args.pop('pkg', None) # The user is doing an upgrade or some other operation # that doesn't take name or pkg. if name: for item in items: variables['item'] = item if self._task.evaluate_conditional(templar, variables): if templar._contains_vars(name): new_item = templar.template(name, cache=False) final_items.append(new_item) else: final_items.append(item) self._task.args['name'] = final_items return [final_items] #elif: # Right now we only optimize single entries. In the future we # could optimize more types: # * lists can be squashed together # * dicts could squash entries that match in all cases except the # name or pkg field. # Note: we really should be checking that the name or pkg field # contains a template that expands with our with_items values. # If it doesn't then we may break things return items
def post_validate(self, all_vars=dict(), fail_on_undefined=True): ''' we can't tell that everything is of the right type until we have all the variables. Run basic types (from isa) as well as any _post_validate_<foo> functions. ''' basedir = None if self._loader is not None: basedir = self._loader.get_basedir() templar = Templar(loader=self._loader, variables=all_vars, fail_on_undefined=fail_on_undefined) for (name, attribute) in iteritems(self._get_base_attributes()): if getattr(self, name) is None: if not attribute.required: continue else: raise AnsibleParserError("the field '%s' is required but was not set" % name) try: # if the attribute contains a variable, template it now value = templar.template(getattr(self, name)) # run the post-validator if present method = getattr(self, '_post_validate_%s' % name, None) if method: value = method(attribute, value, all_vars, fail_on_undefined) else: # otherwise, just make sure the attribute is of the type it should be if attribute.isa == 'string': value = unicode(value) elif attribute.isa == 'int': value = int(value) elif attribute.isa == 'bool': value = boolean(value) elif attribute.isa == 'list': if not isinstance(value, list): value = [ value ] elif attribute.isa == 'dict' and not isinstance(value, dict): raise TypeError() # and assign the massaged value back to the attribute field setattr(self, name, value) except (TypeError, ValueError), e: raise AnsibleParserError("the field '%s' has an invalid value (%s), and could not be converted to an %s. Error was: %s" % (name, value, attribute.isa, e), obj=self.get_ds()) except UndefinedError, e: if fail_on_undefined: raise AnsibleParserError("the field '%s' has an invalid value, which appears to include a variable that is undefined. The error was: %s" % (name,e), obj=self.get_ds())
def __getitem__(self, host_name): host = self._find_host(host_name) if host is None: raise j2undefined data = self._variable_manager.get_vars(loader=self._loader, host=host, include_hostvars=False) sha1_hash = sha1(str(data).encode('utf-8')).hexdigest() if sha1_hash in self._cached_result: result = self._cached_result[sha1_hash] else: templar = Templar(variables=data, loader=self._loader) result = templar.template(data, fail_on_undefined=False, static_vars=STATIC_VARS) self._cached_result[sha1_hash] = result return result
def listify_lookup_plugin_terms(terms, variables, loader): if isinstance(terms, basestring): stripped = terms.strip() templar = Templar(loader=loader, variables=variables) # FIXME: warn/deprecation on bare vars in with_ so we can eventually remove fail on undefined override terms = templar.template(terms, convert_bare=True, fail_on_undefined=False) # TODO: check if this is needed as template should also return correct type already terms = safe_eval(terms) if isinstance(terms, basestring) or not isinstance(terms, Iterable): terms = [terms] return terms
def load_data(self, ds, basedir, variable_manager=None, loader=None): ''' Overrides the base load_data(), as we're actually going to return a new Playbook() object rather than a PlaybookInclude object ''' # import here to avoid a dependency loop from ansible.playbook import Playbook # first, we use the original parent method to correctly load the object # via the load_data/preprocess_data system we normally use for other # playbook objects new_obj = super(PlaybookInclude, self).load_data(ds, variable_manager, loader) all_vars = self.vars.copy() if variable_manager: all_vars.update(variable_manager.get_vars(loader=loader)) templar = Templar(loader=loader, variables=all_vars) if not new_obj.evaluate_conditional(templar=templar, all_vars=all_vars): return None # then we use the object to load a Playbook pb = Playbook(loader=loader) file_name = templar.template(new_obj.include) if not os.path.isabs(file_name): file_name = os.path.join(basedir, file_name) pb._load_playbook_data(file_name=file_name, variable_manager=variable_manager) # finally, update each loaded playbook entry with any variables specified # on the included playbook and/or any tags which may have been set for entry in pb._entries: temp_vars = entry.vars.copy() temp_vars.update(new_obj.vars) param_tags = temp_vars.pop('tags', None) if param_tags is not None: entry.tags.extend(param_tags.split(',')) entry.vars = temp_vars entry.tags = list(set(entry.tags).union(new_obj.tags)) if entry._included_path is None: entry._included_path = os.path.dirname(file_name) return pb
def run(self, terms, variables, **kwargs): if not isinstance(terms, list): terms = [terms] templar = Templar(loader=self._loader, variables=variables) ret = [] for term in terms: path = self._loader.path_dwim(term) if os.path.exists(path): with open(path, "r") as f: template_data = f.read() res = templar.template(template_data, preserve_trailing_newlines=True) ret.append(res) else: raise AnsibleError("the template file %s could not be found for the lookup" % term) return ret
def evaluate_tags(self, only_tags, skip_tags, all_vars): templar = Templar(loader=self._loader, variables=all_vars) tags = templar.template(self.tags) if not isinstance(tags, list): tags = set([tags]) else: tags = set(tags) #print("%s tags are: %s, only_tags=%s, skip_tags=%s" % (self, my_tags, only_tags, skip_tags)) if skip_tags: skipped_tags = tags.intersection(skip_tags) if len(skipped_tags) > 0: return False matched_tags = tags.intersection(only_tags) #print("matched tags are: %s" % matched_tags) if len(matched_tags) > 0 or 'all' in only_tags: return True else: return False
def evaluate_tags(self, only_tags, skip_tags, all_vars): ''' this checks if the current item should be executed depending on tag options ''' should_run = True if self.tags: templar = Templar(loader=self._loader, variables=all_vars) tags = templar.template(self.tags) if not isinstance(tags, list): if tags.find(',') != -1: tags = set(tags.split(',')) else: tags = set([tags]) else: tags = set([i for i,_ in itertools.groupby(tags)]) else: # this makes isdisjoint work for untagged tags = self.untagged if only_tags: should_run = False if 'always' in tags or 'all' in only_tags: should_run = True elif not tags.isdisjoint(only_tags): should_run = True elif 'tagged' in only_tags and tags != self.untagged: should_run = True if should_run and skip_tags: # Check for tags that we need to skip if 'all' in skip_tags: if 'always' not in tags or 'always' in skip_tags: should_run = False elif not tags.isdisjoint(skip_tags): should_run = False elif 'tagged' in skip_tags and tags != self.untagged: should_run = False return should_run
def run(self, tmp=None, task_vars=dict()): if 'msg' in self._task.args: if 'fail' in self._task.args and boolean(self._task.args['fail']): result = dict(failed=True, msg=self._task.args['msg']) else: result = dict(msg=self._task.args['msg']) # FIXME: move the LOOKUP_REGEX somewhere else elif 'var' in self._task.args: # and not utils.LOOKUP_REGEX.search(self._task.args['var']): templar = Templar(loader=self._loader, shared_loader_obj=self._shared_loader_obj, variables=task_vars) results = templar.template(self._task.args['var'], convert_bare=True) result = dict() result[self._task.args['var']] = results else: result = dict(msg='here we are') # force flag to make debug output module always verbose result['verbose_always'] = True return result
def _add_group(self, task, iterator): ''' Helper function to add a group (if it does not exist), and to assign the specified host to that group. ''' # the host here is from the executor side, which means it was a # serialized/cloned copy and we'll need to look up the proper # host object from the master inventory groups = {} changed = False for host in self._inventory.get_hosts(): original_task = iterator.get_original_task(host, task) all_vars = self._variable_manager.get_vars(loader=self._loader, play=iterator._play, host=host, task=original_task) templar = Templar(loader=self._loader, variables=all_vars) group_name = templar.template(original_task.args.get('key')) if task.evaluate_conditional(templar=templar, all_vars=all_vars): if group_name not in groups: groups[group_name] = [] groups[group_name].append(host) for group_name, hosts in iteritems(groups): new_group = self._inventory.get_group(group_name) if not new_group: # create the new group and add it to inventory new_group = Group(name=group_name) self._inventory.add_group(new_group) new_group.vars = self._inventory.get_group_vars(new_group) # and add the group to the proper hierarchy allgroup = self._inventory.get_group('all') allgroup.add_child_group(new_group) changed = True for host in hosts: if group_name not in host.get_groups(): new_group.add_host(host) changed = True return changed
def _squash_items(self, items, variables): ''' Squash items down to a comma-separated list for certain modules which support it (typically package management modules). ''' if len(items) > 0 and self._task.action in self.SQUASH_ACTIONS: final_items = [] name = self._task.args.pop('name', None) or self._task.args.pop('pkg', None) for item in items: variables['item'] = item templar = Templar(loader=self._loader, shared_loader_obj=self._shared_loader_obj, variables=variables) if self._task.evaluate_conditional(templar, variables): if templar._contains_vars(name): new_item = templar.template(name) final_items.append(new_item) else: final_items.append(item) joined_items = ",".join(final_items) self._task.args['name'] = joined_items return [joined_items] else: return items
def _load_role_name(self, ds): ''' Returns the role name (either the role: or name: field) from the role definition, or (when the role definition is a simple string), just that string ''' if isinstance(ds, string_types): return ds role_name = ds.get('role', ds.get('name')) if not role_name or not isinstance(role_name, string_types): raise AnsibleError('role definitions must contain a role name', obj=ds) # if we have the required datastructures, and if the role_name # contains a variable, try and template it now if self._variable_manager: all_vars = self._variable_manager.get_vars(play=self._play) templar = Templar(loader=self._loader, variables=all_vars) if templar._contains_vars(role_name): role_name = templar.template(role_name) return role_name
def _squash_items(self, items, loop_var, variables): ''' Squash items down to a comma-separated list for certain modules which support it (typically package management modules). ''' name = None try: # _task.action could contain templatable strings (via action: and # local_action:) Template it before comparing. If we don't end up # optimizing it here, the templatable string might use template vars # that aren't available until later (it could even use vars from the # with_items loop) so don't make the templated string permanent yet. templar = Templar(loader=self._loader, shared_loader_obj=self._shared_loader_obj, variables=variables) task_action = self._task.action if templar._contains_vars(task_action): task_action = templar.template(task_action, fail_on_undefined=False) if len(items) > 0 and task_action in self.SQUASH_ACTIONS: if all(isinstance(o, string_types) for o in items): final_items = [] found = None for allowed in ['name', 'pkg', 'package']: name = self._task.args.pop(allowed, None) if name is not None: found = allowed break # This gets the information to check whether the name field # contains a template that we can squash for template_no_item = template_with_item = None if name: if templar._contains_vars(name): variables[loop_var] = '\0$' template_no_item = templar.template(name, variables, cache=False) variables[loop_var] = '\0@' template_with_item = templar.template(name, variables, cache=False) del variables[loop_var] # Check if the user is doing some operation that doesn't take # name/pkg or the name/pkg field doesn't have any variables # and thus the items can't be squashed if template_no_item != template_with_item: display.deprecated( 'Invoking "%s" only once while using a loop via squash_actions is deprecated. ' 'Instead of using a loop to supply multiple items and specifying `%s: %s`, ' 'please use `%s: %r` and remove the loop' % (self._task.action, found, name, found, self._task.loop), version='2.11' ) for item in items: variables[loop_var] = item if self._task.evaluate_conditional(templar, variables): new_item = templar.template(name, cache=False) final_items.append(new_item) self._task.args['name'] = final_items # Wrap this in a list so that the calling function loop # executes exactly once return [final_items] else: # Restore the name parameter self._task.args['name'] = name # elif: # Right now we only optimize single entries. In the future we # could optimize more types: # * lists can be squashed together # * dicts could squash entries that match in all cases except the # name or pkg field. except Exception: # Squashing is an optimization. If it fails for any reason, # simply use the unoptimized list of items. # Restore the name parameter if name is not None: self._task.args['name'] = name return items
def _get_delegated_vars(self, loader, play, task, existing_variables): # we unfortunately need to template the delegate_to field here, # as we're fetching vars before post_validate has been called on # the task that has been passed in vars_copy = existing_variables.copy() templar = Templar(loader=loader, variables=vars_copy) items = [] if task.loop is not None: if task.loop in lookup_loader: try: loop_terms = listify_lookup_plugin_terms(terms=task.loop_args, templar=templar, loader=loader, fail_on_undefined=True, convert_bare=False) items = lookup_loader.get(task.loop, loader=loader, templar=templar).run(terms=loop_terms, variables=vars_copy) except AnsibleUndefinedVariable: # This task will be skipped later due to this, so we just setup # a dummy array for the later code so it doesn't fail items = [None] else: raise AnsibleError("Unexpected failure in finding the lookup named '%s' in the available lookup plugins" % task.loop) else: items = [None] delegated_host_vars = dict() for item in items: # update the variables with the item value for templating, in case we need it if item is not None: vars_copy['item'] = item templar.set_available_variables(vars_copy) delegated_host_name = templar.template(task.delegate_to, fail_on_undefined=False) if delegated_host_name is None: raise AnsibleError(message="Undefined delegate_to host for task:", obj=task._ds) if delegated_host_name in delegated_host_vars: # no need to repeat ourselves, as the delegate_to value # does not appear to be tied to the loop item variable continue # a dictionary of variables to use if we have to create a new host below # we set the default port based on the default transport here, to make sure # we use the proper default for windows new_port = C.DEFAULT_REMOTE_PORT if C.DEFAULT_TRANSPORT == 'winrm': new_port = 5986 new_delegated_host_vars = dict( ansible_delegated_host=delegated_host_name, ansible_host=delegated_host_name, ansible_port=new_port, ansible_user=C.DEFAULT_REMOTE_USER, ansible_connection=C.DEFAULT_TRANSPORT, ) # now try to find the delegated-to host in inventory, or failing that, # create a new host on the fly so we can fetch variables for it delegated_host = None if self._inventory is not None: delegated_host = self._inventory.get_host(delegated_host_name) # try looking it up based on the address field, and finally # fall back to creating a host on the fly to use for the var lookup if delegated_host is None: if delegated_host_name in C.LOCALHOST: delegated_host = self._inventory.localhost else: for h in self._inventory.get_hosts(ignore_limits=True, ignore_restrictions=True): # check if the address matches, or if both the delegated_to host # and the current host are in the list of localhost aliases if h.address == delegated_host_name: delegated_host = h break else: delegated_host = Host(name=delegated_host_name) delegated_host.vars.update(new_delegated_host_vars) else: delegated_host = Host(name=delegated_host_name) delegated_host.vars.update(new_delegated_host_vars) # now we go fetch the vars for the delegated-to host and save them in our # master dictionary of variables to be used later in the TaskExecutor/PlayContext delegated_host_vars[delegated_host_name] = self.get_vars( loader=loader, play=play, host=delegated_host, task=task, include_delegate_to=False, include_hostvars=False, ) return delegated_host_vars
def _load_role_path(self, role_name): ''' the 'role', as specified in the ds (or as a bare string), can either be a simple name or a full path. If it is a full path, we use the basename as the role name, otherwise we take the name as-given and append it to the default role path ''' # create a templar class to template the dependency names, in # case they contain variables if self._variable_manager is not None: all_vars = self._variable_manager.get_vars(play=self._play) else: all_vars = dict() templar = Templar(loader=self._loader, variables=all_vars) role_name = templar.template(role_name) role_tuple = None # try to load as a collection-based role first if self._collection_list or AnsibleCollectionRef.is_valid_fqcr(role_name): role_tuple = get_collection_role_path(role_name, self._collection_list) if role_tuple: # we found it, stash collection data and return the name/path tuple self._role_collection = role_tuple[2] return role_tuple[0:2] # FUTURE: refactor this to be callable from internal so we can properly order ansible.legacy searches with the collections keyword if self._collection_list and 'ansible.legacy' not in self._collection_list: raise AnsibleError("the role '%s' was not found in %s" % (role_name, ":".join(self._collection_list)), obj=self._ds) # we always start the search for roles in the base directory of the playbook role_search_paths = [ os.path.join(self._loader.get_basedir(), u'roles'), ] # also search in the configured roles path if C.DEFAULT_ROLES_PATH: role_search_paths.extend(C.DEFAULT_ROLES_PATH) # next, append the roles basedir, if it was set, so we can # search relative to that directory for dependent roles if self._role_basedir: role_search_paths.append(self._role_basedir) # finally as a last resort we look in the current basedir as set # in the loader (which should be the playbook dir itself) but without # the roles/ dir appended role_search_paths.append(self._loader.get_basedir()) # now iterate through the possible paths and return the first one we find for path in role_search_paths: path = templar.template(path) role_path = unfrackpath(os.path.join(path, role_name)) if self._loader.path_exists(role_path): return (role_name, role_path) # if not found elsewhere try to extract path from name role_path = unfrackpath(role_name) if self._loader.path_exists(role_path): role_name = os.path.basename(role_name) return (role_name, role_path) raise AnsibleError("the role '%s' was not found in %s" % (role_name, ":".join(role_search_paths)), obj=self._ds)
def _execute(self, variables=None): ''' The primary workhorse of the executor system, this runs the task on the specified host (which may be the delegated_to host) and handles the retry/until and block rescue/always execution ''' if variables is None: variables = self._job_vars templar = Templar(loader=self._loader, shared_loader_obj=self._shared_loader_obj, variables=variables) context_validation_error = None try: # apply the given task's information to the connection info, # which may override some fields already set by the play or # the options specified on the command line self._play_context = self._play_context.set_task_and_variable_override(task=self._task, variables=variables, templar=templar) # fields set from the play/task may be based on variables, so we have to # do the same kind of post validation step on it here before we use it. self._play_context.post_validate(templar=templar) # now that the play context is finalized, if the remote_addr is not set # default to using the host's address field as the remote address if not self._play_context.remote_addr: self._play_context.remote_addr = self._host.address # We also add "magic" variables back into the variables dict to make sure # a certain subset of variables exist. self._play_context.update_vars(variables) # FIXME: update connection/shell plugin options except AnsibleError as e: # save the error, which we'll raise later if we don't end up # skipping this task during the conditional evaluation step context_validation_error = e # Evaluate the conditional (if any) for this task, which we do before running # the final task post-validation. We do this before the post validation due to # the fact that the conditional may specify that the task be skipped due to a # variable not being present which would otherwise cause validation to fail try: if not self._task.evaluate_conditional(templar, variables): display.debug("when evaluation is False, skipping this task") return dict(changed=False, skipped=True, skip_reason='Conditional result was False', _ansible_no_log=self._play_context.no_log) except AnsibleError: # loop error takes precedence if self._loop_eval_error is not None: raise self._loop_eval_error # pylint: disable=raising-bad-type raise # Not skipping, if we had loop error raised earlier we need to raise it now to halt the execution of this task if self._loop_eval_error is not None: raise self._loop_eval_error # pylint: disable=raising-bad-type # if we ran into an error while setting up the PlayContext, raise it now if context_validation_error is not None: raise context_validation_error # pylint: disable=raising-bad-type # if this task is a TaskInclude, we just return now with a success code so the # main thread can expand the task list for the given host if self._task.action in ('include', 'include_tasks'): include_variables = self._task.args.copy() include_file = include_variables.pop('_raw_params', None) if not include_file: return dict(failed=True, msg="No include file was specified to the include") include_file = templar.template(include_file) return dict(include=include_file, include_variables=include_variables) # if this task is a IncludeRole, we just return now with a success code so the main thread can expand the task list for the given host elif self._task.action == 'include_role': include_variables = self._task.args.copy() return dict(include_variables=include_variables) # Now we do final validation on the task, which sets all fields to their final values. self._task.post_validate(templar=templar) if '_variable_params' in self._task.args: variable_params = self._task.args.pop('_variable_params') if isinstance(variable_params, dict): if C.INJECT_FACTS_AS_VARS: display.warning("Using a variable for a task's 'args' is unsafe in some situations " "(see https://docs.ansible.com/ansible/devel/reference_appendices/faq.html#argsplat-unsafe)") variable_params.update(self._task.args) self._task.args = variable_params # get the connection and the handler for this execution if (not self._connection or not getattr(self._connection, 'connected', False) or self._play_context.remote_addr != self._connection._play_context.remote_addr): self._connection = self._get_connection(variables=variables, templar=templar) else: # if connection is reused, its _play_context is no longer valid and needs # to be replaced with the one templated above, in case other data changed self._connection._play_context = self._play_context self._set_connection_options(variables, templar) self._set_shell_options(variables, templar) # get handler self._handler = self._get_action_handler(connection=self._connection, templar=templar) # Apply default params for action/module, if present # These are collected as a list of dicts, so we need to merge them module_defaults = {} for default in self._task.module_defaults: module_defaults.update(default) if module_defaults: module_defaults = templar.template(module_defaults) if self._task.action in module_defaults: tmp_args = module_defaults[self._task.action].copy() tmp_args.update(self._task.args) self._task.args = tmp_args if self._task.action in C.config.module_defaults_groups: for group in C.config.module_defaults_groups.get(self._task.action, []): tmp_args = (module_defaults.get('group/{0}'.format(group)) or {}).copy() tmp_args.update(self._task.args) self._task.args = tmp_args # And filter out any fields which were set to default(omit), and got the omit token value omit_token = variables.get('omit') if omit_token is not None: self._task.args = remove_omit(self._task.args, omit_token) # Read some values from the task, so that we can modify them if need be if self._task.until: retries = self._task.retries if retries is None: retries = 3 elif retries <= 0: retries = 1 else: retries += 1 else: retries = 1 delay = self._task.delay if delay < 0: delay = 1 # make a copy of the job vars here, in case we need to update them # with the registered variable value later on when testing conditions vars_copy = variables.copy() display.debug("starting attempt loop") result = None for attempt in range(1, retries + 1): display.debug("running the handler") try: result = self._handler.run(task_vars=variables) except AnsibleActionSkip as e: return dict(skipped=True, msg=to_text(e)) except AnsibleActionFail as e: return dict(failed=True, msg=to_text(e)) except AnsibleConnectionFailure as e: return dict(unreachable=True, msg=to_text(e)) display.debug("handler run complete") # preserve no log result["_ansible_no_log"] = self._play_context.no_log # update the local copy of vars with the registered value, if specified, # or any facts which may have been generated by the module execution if self._task.register: vars_copy[self._task.register] = wrap_var(result) if self._task.async_val > 0: if self._task.poll > 0 and not result.get('skipped') and not result.get('failed'): result = self._poll_async_result(result=result, templar=templar, task_vars=vars_copy) # FIXME callback 'v2_runner_on_async_poll' here # ensure no log is preserved result["_ansible_no_log"] = self._play_context.no_log # helper methods for use below in evaluating changed/failed_when def _evaluate_changed_when_result(result): if self._task.changed_when is not None and self._task.changed_when: cond = Conditional(loader=self._loader) cond.when = self._task.changed_when result['changed'] = cond.evaluate_conditional(templar, vars_copy) def _evaluate_failed_when_result(result): if self._task.failed_when: cond = Conditional(loader=self._loader) cond.when = self._task.failed_when failed_when_result = cond.evaluate_conditional(templar, vars_copy) result['failed_when_result'] = result['failed'] = failed_when_result else: failed_when_result = False return failed_when_result if 'ansible_facts' in result: if self._task.action in ('set_fact', 'include_vars'): vars_copy.update(result['ansible_facts']) else: # TODO: cleaning of facts should eventually become part of taskresults instead of vars vars_copy.update(namespace_facts(result['ansible_facts'])) if C.INJECT_FACTS_AS_VARS: vars_copy.update(clean_facts(result['ansible_facts'])) # set the failed property if it was missing. if 'failed' not in result: # rc is here for backwards compatibility and modules that use it instead of 'failed' if 'rc' in result and result['rc'] not in [0, "0"]: result['failed'] = True else: result['failed'] = False # Make attempts and retries available early to allow their use in changed/failed_when if self._task.until: result['attempts'] = attempt # set the changed property if it was missing. if 'changed' not in result: result['changed'] = False # re-update the local copy of vars with the registered value, if specified, # or any facts which may have been generated by the module execution # This gives changed/failed_when access to additional recently modified # attributes of result if self._task.register: vars_copy[self._task.register] = wrap_var(result) # if we didn't skip this task, use the helpers to evaluate the changed/ # failed_when properties if 'skipped' not in result: _evaluate_changed_when_result(result) _evaluate_failed_when_result(result) if retries > 1: cond = Conditional(loader=self._loader) cond.when = self._task.until if cond.evaluate_conditional(templar, vars_copy): break else: # no conditional check, or it failed, so sleep for the specified time if attempt < retries: result['_ansible_retry'] = True result['retries'] = retries display.debug('Retrying task, attempt %d of %d' % (attempt, retries)) self._final_q.put(TaskResult(self._host.name, self._task._uuid, result, task_fields=self._task.dump_attrs()), block=False) time.sleep(delay) else: if retries > 1: # we ran out of attempts, so mark the result as failed result['attempts'] = retries - 1 result['failed'] = True # do the final update of the local variables here, for both registered # values and any facts which may have been created if self._task.register: variables[self._task.register] = wrap_var(result) if 'ansible_facts' in result: if self._task.action in ('set_fact', 'include_vars'): variables.update(result['ansible_facts']) else: # TODO: cleaning of facts should eventually become part of taskresults instead of vars variables.update(namespace_facts(result['ansible_facts'])) if C.INJECT_FACTS_AS_VARS: variables.update(clean_facts(result['ansible_facts'])) # save the notification target in the result, if it was specified, as # this task may be running in a loop in which case the notification # may be item-specific, ie. "notify: service {{item}}" if self._task.notify is not None: result['_ansible_notify'] = self._task.notify # add the delegated vars to the result, so we can reference them # on the results side without having to do any further templating # FIXME: we only want a limited set of variables here, so this is currently # hardcoded but should be possibly fixed if we want more or if # there is another source of truth we can use delegated_vars = variables.get('ansible_delegated_vars', dict()).get(self._task.delegate_to, dict()).copy() if len(delegated_vars) > 0: result["_ansible_delegated_vars"] = {'ansible_delegated_host': self._task.delegate_to} for k in ('ansible_host', ): result["_ansible_delegated_vars"][k] = delegated_vars.get(k) # and return display.debug("attempt loop complete, returning result") return result
def get_vars(self, loader, play=None, host=None, task=None, include_hostvars=True, include_delegate_to=True, use_cache=True): ''' Returns the variables, with optional "context" given via the parameters for the play, host, and task (which could possibly result in different sets of variables being returned due to the additional context). The order of precedence is: - play->roles->get_default_vars (if there is a play context) - group_vars_files[host] (if there is a host context) - host_vars_files[host] (if there is a host context) - host->get_vars (if there is a host context) - fact_cache[host] (if there is a host context) - play vars (if there is a play context) - play vars_files (if there's no host context, ignore file names that cannot be templated) - task->get_vars (if there is a task context) - vars_cache[host] (if there is a host context) - extra vars ''' debug("in VariableManager get_vars()") cache_entry = self._get_cache_entry(play=play, host=host, task=task) if cache_entry in VARIABLE_CACHE and use_cache: debug("vars are cached, returning them now") return VARIABLE_CACHE[cache_entry] all_vars = defaultdict(dict) magic_variables = self._get_magic_variables( loader=loader, play=play, host=host, task=task, include_hostvars=include_hostvars, include_delegate_to=include_delegate_to, ) if play: # first we compile any vars specified in defaults/main.yml # for all roles within the specified play for role in play.get_roles(): all_vars = combine_vars(all_vars, role.get_default_vars()) # if we have a task in this context, and that task has a role, make # sure it sees its defaults above any other roles, as we previously # (v1) made sure each task had a copy of its roles default vars if task and task._role is not None: all_vars = combine_vars(all_vars, task._role.get_default_vars()) if host: # next, if a host is specified, we load any vars from group_vars # files and then any vars from host_vars files which may apply to # this host or the groups it belongs to # we merge in vars from groups specified in the inventory (INI or script) all_vars = combine_vars(all_vars, host.get_group_vars()) # then we merge in the special 'all' group_vars first, if they exist if 'all' in self._group_vars_files: data = preprocess_vars(self._group_vars_files['all']) for item in data: all_vars = combine_vars(all_vars, item) for group in host.get_groups(): if group.name in self._group_vars_files and group.name != 'all': for data in self._group_vars_files[group.name]: data = preprocess_vars(data) for item in data: all_vars = combine_vars(all_vars, item) # then we merge in vars from the host specified in the inventory (INI or script) all_vars = combine_vars(all_vars, host.get_vars()) # then we merge in the host_vars/<hostname> file, if it exists host_name = host.get_name() if host_name in self._host_vars_files: for data in self._host_vars_files[host_name]: data = preprocess_vars(data) for item in data: all_vars = combine_vars(all_vars, item) # finally, the facts caches for this host, if it exists try: host_facts = self._fact_cache.get(host.name, dict()) for k in host_facts.keys(): if host_facts[k] is not None and not isinstance( host_facts[k], UnsafeProxy): host_facts[k] = UnsafeProxy(host_facts[k]) all_vars = combine_vars(all_vars, host_facts) except KeyError: pass if play: all_vars = combine_vars(all_vars, play.get_vars()) for vars_file_item in play.get_vars_files(): # create a set of temporary vars here, which incorporate the extra # and magic vars so we can properly template the vars_files entries temp_vars = combine_vars(all_vars, self._extra_vars) temp_vars = combine_vars(all_vars, magic_variables) templar = Templar(loader=loader, variables=temp_vars) # we assume each item in the list is itself a list, as we # support "conditional includes" for vars_files, which mimics # the with_first_found mechanism. #vars_file_list = templar.template(vars_file_item) vars_file_list = vars_file_item if not isinstance(vars_file_list, list): vars_file_list = [vars_file_list] # now we iterate through the (potential) files, and break out # as soon as we read one from the list. If none are found, we # raise an error, which is silently ignored at this point. try: for vars_file in vars_file_list: vars_file = templar.template(vars_file) try: data = preprocess_vars( loader.load_from_file(vars_file)) if data is not None: for item in data: all_vars = combine_vars(all_vars, item) break except AnsibleFileNotFound as e: # we continue on loader failures continue except AnsibleParserError as e: raise else: raise AnsibleFileNotFound( "vars file %s was not found" % vars_file_item) except (UndefinedError, AnsibleUndefinedVariable): if host is not None and self._fact_cache.get( host.name, dict()).get('module_setup') and task is not None: raise AnsibleUndefinedVariable( "an undefined variable was found when attempting to template the vars_files item '%s'" % vars_file_item, obj=vars_file_item) else: # we do not have a full context here, and the missing variable could be # because of that, so just show a warning and continue display.vvv( "skipping vars_file '%s' due to an undefined variable" % vars_file_item) continue if not C.DEFAULT_PRIVATE_ROLE_VARS: for role in play.get_roles(): all_vars = combine_vars( all_vars, role.get_vars(include_params=False)) if task: if task._role: all_vars = combine_vars(all_vars, task._role.get_vars()) all_vars = combine_vars(all_vars, task.get_vars()) if host: all_vars = combine_vars( all_vars, self._vars_cache.get(host.get_name(), dict())) all_vars = combine_vars( all_vars, self._nonpersistent_fact_cache.get(host.name, dict())) all_vars = combine_vars(all_vars, self._extra_vars) all_vars = combine_vars(all_vars, magic_variables) # if we have a task and we're delegating to another host, figure out the # variables for that host now so we don't have to rely on hostvars later if task and task.delegate_to is not None and include_delegate_to: all_vars['ansible_delegated_vars'] = self._get_delegated_vars( loader, play, task, all_vars) #VARIABLE_CACHE[cache_entry] = all_vars debug("done with get_vars()") return all_vars
def process_include_results(results, iterator, loader, variable_manager): included_files = [] task_vars_cache = {} for res in results: original_host = res._host original_task = res._task if original_task.action in ('include', 'include_tasks', 'include_role'): if original_task.loop: if 'results' not in res._result: continue include_results = res._result['results'] else: include_results = [res._result] for include_result in include_results: # if the task result was skipped or failed, continue if 'skipped' in include_result and include_result[ 'skipped'] or 'failed' in include_result and include_result[ 'failed']: continue cache_key = (iterator._play, original_host, original_task) try: task_vars = task_vars_cache[cache_key] except KeyError: task_vars = task_vars_cache[ cache_key] = variable_manager.get_vars( play=iterator._play, host=original_host, task=original_task) include_args = include_result.get('include_args', dict()) special_vars = {} loop_var = include_result.get('ansible_loop_var', 'item') index_var = include_result.get('ansible_index_var') if loop_var in include_result: task_vars[loop_var] = special_vars[ loop_var] = include_result[loop_var] if index_var and index_var in include_result: task_vars[index_var] = special_vars[ index_var] = include_result[index_var] if '_ansible_item_label' in include_result: task_vars['_ansible_item_label'] = special_vars[ '_ansible_item_label'] = include_result[ '_ansible_item_label'] if original_task.no_log and '_ansible_no_log' not in include_args: task_vars['_ansible_no_log'] = special_vars[ '_ansible_no_log'] = original_task.no_log # get search path for this task to pass to lookup plugins that may be used in pathing to # the included file task_vars[ 'ansible_search_path'] = original_task.get_search_path( ) # ensure basedir is always in (dwim already searches here but we need to display it) if loader.get_basedir( ) not in task_vars['ansible_search_path']: task_vars['ansible_search_path'].append( loader.get_basedir()) templar = Templar(loader=loader, variables=task_vars) if original_task.action in ('include', 'include_tasks'): include_file = None if original_task: if original_task.static: continue if original_task._parent: # handle relative includes by walking up the list of parent include # tasks and checking the relative result to see if it exists parent_include = original_task._parent cumulative_path = None while parent_include is not None: if not isinstance(parent_include, TaskInclude): parent_include = parent_include._parent continue if isinstance(parent_include, IncludeRole): parent_include_dir = parent_include._role_path else: try: parent_include_dir = os.path.dirname( templar.template( parent_include.args.get( '_raw_params'))) except AnsibleError as e: parent_include_dir = '' display.warning( 'Templating the path of the parent %s failed. The path to the ' 'included file may not be found. ' 'The error was: %s.' % (original_task.action, to_text(e))) if cumulative_path is not None and not os.path.isabs( cumulative_path): cumulative_path = os.path.join( parent_include_dir, cumulative_path) else: cumulative_path = parent_include_dir include_target = templar.template( include_result['include']) if original_task._role: new_basedir = os.path.join( original_task._role._role_path, 'tasks', cumulative_path) candidates = [ loader.path_dwim_relative( original_task._role._role_path, 'tasks', include_target), loader.path_dwim_relative( new_basedir, 'tasks', include_target) ] for include_file in candidates: try: # may throw OSError os.stat(include_file) # or select the task file if it exists break except OSError: pass else: include_file = loader.path_dwim_relative( loader.get_basedir(), cumulative_path, include_target) if os.path.exists(include_file): break else: parent_include = parent_include._parent if include_file is None: if original_task._role: include_target = templar.template( include_result['include']) include_file = loader.path_dwim_relative( original_task._role._role_path, 'tasks', include_target) else: include_file = loader.path_dwim( include_result['include']) include_file = templar.template(include_file) inc_file = IncludedFile(include_file, include_args, special_vars, original_task) else: # template the included role's name here role_name = include_args.pop( 'name', include_args.pop('role', None)) if role_name is not None: role_name = templar.template(role_name) new_task = original_task.copy() new_task._role_name = role_name for from_arg in new_task.FROM_ARGS: if from_arg in include_args: from_key = from_arg.replace('_from', '') new_task._from_files[ from_key] = templar.template( include_args.pop(from_arg)) inc_file = IncludedFile(role_name, include_args, special_vars, new_task, is_role=True) idx = 0 orig_inc_file = inc_file while 1: try: pos = included_files[idx:].index(orig_inc_file) # pos is relative to idx since we are slicing # use idx + pos due to relative indexing inc_file = included_files[idx + pos] except ValueError: included_files.append(orig_inc_file) inc_file = orig_inc_file try: inc_file.add_host(original_host) except ValueError: # The host already exists for this include, advance forward, this is a new include idx += pos + 1 else: break return included_files
def load_list_of_tasks(ds, play, block=None, role=None, task_include=None, use_handlers=False, variable_manager=None, loader=None): ''' Given a list of task datastructures (parsed from YAML), return a list of Task() or TaskInclude() objects. ''' # we import here to prevent a circular dependency with imports from ansible.playbook.block import Block from ansible.playbook.handler import Handler from ansible.playbook.task import Task from ansible.playbook.task_include import TaskInclude from ansible.template import Templar assert isinstance(ds, list) task_list = [] for task_ds in ds: assert isinstance(task_ds, dict) if 'block' in task_ds: t = Block.load( task_ds, play=play, parent_block=block, role=role, task_include=task_include, use_handlers=use_handlers, variable_manager=variable_manager, loader=loader, ) task_list.append(t) else: if 'include' in task_ds: t = TaskInclude.load(task_ds, block=block, role=role, task_include=task_include, variable_manager=variable_manager, loader=loader) all_vars = variable_manager.get_vars(loader=loader, play=play, task=t) templar = Templar(loader=loader, variables=all_vars) # check to see if this include is dynamic or static: # 1. the user has set the 'static' option to false or true # 2. one of the appropriate config options was set if t.static is not None: is_static = t.static else: is_static = C.DEFAULT_TASK_INCLUDES_STATIC or \ (use_handlers and C.DEFAULT_HANDLER_INCLUDES_STATIC) if is_static: if t.loop is not None: raise AnsibleParserError( "You cannot use 'static' on an include with a loop", obj=task_ds) # FIXME: all of this code is very similar (if not identical) to that in # plugins/strategy/__init__.py, and should be unified to avoid # patches only being applied to one or the other location if task_include: # handle relative includes by walking up the list of parent include # tasks and checking the relative result to see if it exists parent_include = task_include cumulative_path = None while parent_include is not None: parent_include_dir = templar.template( os.path.dirname( parent_include.args.get('_raw_params'))) if cumulative_path is None: cumulative_path = parent_include_dir elif not os.path.isabs(cumulative_path): cumulative_path = os.path.join( parent_include_dir, cumulative_path) include_target = templar.template( t.args['_raw_params']) if t._role: new_basedir = os.path.join( t._role._role_path, 'tasks', cumulative_path) include_file = loader.path_dwim_relative( new_basedir, 'tasks', include_target) else: include_file = loader.path_dwim_relative( loader.get_basedir(), cumulative_path, include_target) if os.path.exists(include_file): break else: parent_include = parent_include._task_include else: try: include_target = templar.template( t.args['_raw_params']) except AnsibleUndefinedVariable as e: raise AnsibleParserError( "Error when evaluating variable in include name: %s.\n\n" \ "When using static includes, ensure that any variables used in their names are defined in vars/vars_files\n" \ "or extra-vars passed in from the command line. Static includes cannot use variables from inventory\n" \ "sources like group or host vars." % t.args['_raw_params'], obj=task_ds, suppress_extended_error=True, ) if t._role: if use_handlers: include_file = loader.path_dwim_relative( t._role._role_path, 'handlers', include_target) else: include_file = loader.path_dwim_relative( t._role._role_path, 'tasks', include_target) else: include_file = loader.path_dwim(include_target) try: data = loader.load_from_file(include_file) if data is None: return [] elif not isinstance(data, list): raise AnsibleError( "included task files must contain a list of tasks", obj=data) except AnsibleFileNotFound as e: if t.static or \ C.DEFAULT_TASK_INCLUDES_STATIC or \ C.DEFAULT_HANDLER_INCLUDES_STATIC and use_handlers: raise display.deprecated( "Included file '%s' not found, however since this include is not " \ "explicitly marked as 'static: yes', we will try and include it dynamically " \ "later. In the future, this will be an error unless 'static: no' is used " \ "on the include task. If you do not want missing includes to be considered " \ "dynamic, use 'static: yes' on the include or set the global ansible.cfg " \ "options to make all inclues static for tasks and/or handlers" % include_file, ) task_list.append(t) continue included_blocks = load_list_of_blocks( data, play=play, parent_block=block, task_include=t, role=role, use_handlers=use_handlers, loader=loader, variable_manager=variable_manager, ) # pop tags out of the include args, if they were specified there, and assign # them to the include. If the include already had tags specified, we raise an # error so that users know not to specify them both ways tags = t.vars.pop('tags', []) if isinstance(tags, string_types): tags = tags.split(',') if len(tags) > 0: if len(t.tags) > 0: raise AnsibleParserError( "Include tasks should not specify tags in more than one way (both via args and directly on the task)." \ " Mixing tag specify styles is prohibited for whole import hierarchy, not only for single import statement", obj=task_ds, suppress_extended_error=True, ) display.deprecated( "You should not specify tags in the include parameters. All tags should be specified using the task-level option" ) else: tags = t.tags[:] # now we extend the tags on each of the included blocks for b in included_blocks: b.tags = list(set(b.tags).union(tags)) # END FIXME # FIXME: send callback here somehow... # FIXME: handlers shouldn't need this special handling, but do # right now because they don't iterate blocks correctly if use_handlers: for b in included_blocks: task_list.extend(b.block) else: task_list.extend(included_blocks) else: task_list.append(t) elif use_handlers: t = Handler.load(task_ds, block=block, role=role, task_include=task_include, variable_manager=variable_manager, loader=loader) task_list.append(t) else: t = Task.load(task_ds, block=block, role=role, task_include=task_include, variable_manager=variable_manager, loader=loader) task_list.append(t) return task_list
def _execute(self, variables=None): ''' The primary workhorse of the executor system, this runs the task on the specified host (which may be the delegated_to host) and handles the retry/until and block rescue/always execution ''' if variables is None: variables = self._job_vars templar = Templar(loader=self._loader, shared_loader_obj=self._shared_loader_obj, variables=variables) context_validation_error = None try: # apply the given task's information to the connection info, # which may override some fields already set by the play or # the options specified on the command line self._play_context = self._play_context.set_task_and_variable_override( task=self._task, variables=variables, templar=templar) # fields set from the play/task may be based on variables, so we have to # do the same kind of post validation step on it here before we use it. self._play_context.post_validate(templar=templar) # now that the play context is finalized, if the remote_addr is not set # default to using the host's address field as the remote address if not self._play_context.remote_addr: self._play_context.remote_addr = self._host.address # We also add "magic" variables back into the variables dict to make sure # a certain subset of variables exist. self._play_context.update_vars(variables) except AnsibleError as e: # save the error, which we'll raise later if we don't end up # skipping this task during the conditional evaluation step context_validation_error = e # Evaluate the conditional (if any) for this task, which we do before running # the final task post-validation. We do this before the post validation due to # the fact that the conditional may specify that the task be skipped due to a # variable not being present which would otherwise cause validation to fail try: if not self._task.evaluate_conditional(templar, variables): display.debug("when evaluation failed, skipping this task") return dict(changed=False, skipped=True, skip_reason='Conditional check failed', _ansible_no_log=self._play_context.no_log) except AnsibleError: # skip conditional exception in the case of includes as the vars needed might not be avaiable except in the included tasks or due to tags if self._task.action != 'include': raise # if we ran into an error while setting up the PlayContext, raise it now if context_validation_error is not None: raise context_validation_error # if this task is a TaskInclude, we just return now with a success code so the # main thread can expand the task list for the given host if self._task.action == 'include': include_variables = self._task.args.copy() include_file = include_variables.pop('_raw_params', None) if not include_file: return dict(failed=True, msg="No include file was specified to the include") include_file = templar.template(include_file) return dict(include=include_file, include_variables=include_variables) #TODO: not needed? # if this task is a IncludeRole, we just return now with a success code so the main thread can expand the task list for the given host elif self._task.action == 'include_role': include_variables = self._task.args.copy() role = include_variables.pop('name') if not role: return dict(failed=True, msg="No role was specified to include") return dict(name=role, include_variables=include_variables) # Now we do final validation on the task, which sets all fields to their final values. self._task.post_validate(templar=templar) if '_variable_params' in self._task.args: variable_params = self._task.args.pop('_variable_params') if isinstance(variable_params, dict): display.deprecated( "Using variables for task params is unsafe, especially if the variables come from an external source like facts" ) variable_params.update(self._task.args) self._task.args = variable_params # get the connection and the handler for this execution if not self._connection or not getattr( self._connection, 'connected', False ) or self._play_context.remote_addr != self._connection._play_context.remote_addr: self._connection = self._get_connection(variables=variables, templar=templar) self._connection.set_host_overrides( host=self._host, hostvars=variables.get('hostvars', {}).get(self._host.name, {})) else: # if connection is reused, its _play_context is no longer valid and needs # to be replaced with the one templated above, in case other data changed self._connection._play_context = self._play_context self._handler = self._get_action_handler(connection=self._connection, templar=templar) # And filter out any fields which were set to default(omit), and got the omit token value omit_token = variables.get('omit') if omit_token is not None: self._task.args = dict((i[0], i[1]) for i in iteritems(self._task.args) if i[1] != omit_token) # Read some values from the task, so that we can modify them if need be if self._task.until: retries = self._task.retries if retries is None: retries = 3 elif retries <= 0: retries = 1 else: retries += 1 else: retries = 1 delay = self._task.delay if delay < 0: delay = 1 # make a copy of the job vars here, in case we need to update them # with the registered variable value later on when testing conditions vars_copy = variables.copy() display.debug("starting attempt loop") result = None for attempt in range(1, retries + 1): display.debug("running the handler") try: result = self._handler.run(task_vars=variables) except AnsibleConnectionFailure as e: return dict(unreachable=True, msg=to_unicode(e)) display.debug("handler run complete") # preserve no log result["_ansible_no_log"] = self._play_context.no_log # update the local copy of vars with the registered value, if specified, # or any facts which may have been generated by the module execution if self._task.register: vars_copy[self._task.register] = wrap_var(result.copy()) if self._task. async > 0: if self._task.poll > 0: result = self._poll_async_result(result=result, templar=templar, task_vars=vars_copy) # ensure no log is preserved result["_ansible_no_log"] = self._play_context.no_log # helper methods for use below in evaluating changed/failed_when def _evaluate_changed_when_result(result): if self._task.changed_when is not None and self._task.changed_when: cond = Conditional(loader=self._loader) cond.when = self._task.changed_when result['changed'] = cond.evaluate_conditional( templar, vars_copy) def _evaluate_failed_when_result(result): if self._task.failed_when: cond = Conditional(loader=self._loader) cond.when = self._task.failed_when failed_when_result = cond.evaluate_conditional( templar, vars_copy) result['failed_when_result'] = result[ 'failed'] = failed_when_result else: failed_when_result = False return failed_when_result if 'ansible_facts' in result: vars_copy.update(result['ansible_facts']) # set the failed property if the result has a non-zero rc. This will be # overridden below if the failed_when property is set if result.get('rc', 0) != 0: result['failed'] = True # if we didn't skip this task, use the helpers to evaluate the changed/ # failed_when properties if 'skipped' not in result: _evaluate_changed_when_result(result) _evaluate_failed_when_result(result) if retries > 1: cond = Conditional(loader=self._loader) cond.when = self._task.until if cond.evaluate_conditional(templar, vars_copy): break else: # no conditional check, or it failed, so sleep for the specified time if attempt < retries: result['attempts'] = attempt result['_ansible_retry'] = True result['retries'] = retries display.debug('Retrying task, attempt %d of %d' % (attempt, retries)) self._rslt_q.put(TaskResult(self._host.name, self._task._uuid, result), block=False) time.sleep(delay)
def _run_loop(self, items): ''' Runs the task with the loop items specified and collates the result into an array named 'results' which is inserted into the final result along with the item for which the loop ran. ''' results = [] # make copies of the job vars and task so we can add the item to # the variables and re-validate the task with the item variable #task_vars = self._job_vars.copy() task_vars = self._job_vars loop_var = 'item' label = None if self._task.loop_control: # the value may be 'None', so we still need to default it back to 'item' loop_var = self._task.loop_control.loop_var or 'item' label = self._task.loop_control.label or ('{{' + loop_var + '}}') if loop_var in task_vars: display.warning( "The loop variable '%s' is already in use. You should set the `loop_var` value in the `loop_control` option for the task to something else to avoid variable collisions and unexpected behavior." % loop_var) items = self._squash_items(items, loop_var, task_vars) for item in items: task_vars[loop_var] = item try: tmp_task = self._task.copy(exclude_parent=True, exclude_tasks=True) tmp_task._parent = self._task._parent tmp_play_context = self._play_context.copy() except AnsibleParserError as e: results.append(dict(failed=True, msg=to_unicode(e))) continue # now we swap the internal task and play context with their copies, # execute, and swap them back so we can do the next iteration cleanly (self._task, tmp_task) = (tmp_task, self._task) (self._play_context, tmp_play_context) = (tmp_play_context, self._play_context) res = self._execute(variables=task_vars) (self._task, tmp_task) = (tmp_task, self._task) (self._play_context, tmp_play_context) = (tmp_play_context, self._play_context) # now update the result with the item info, and append the result # to the list of results res[loop_var] = item res['_ansible_item_result'] = True if not label is None: templar = Templar(loader=self._loader, shared_loader_obj=self._shared_loader_obj, variables=self._job_vars) res['_ansible_item_label'] = templar.template( label, fail_on_undefined=False) self._rslt_q.put(TaskResult(self._host.name, self._task._uuid, res), block=False) results.append(res) del task_vars[loop_var] return results
def _process_pending_results(self, iterator): ''' Reads results off the final queue and takes appropriate action based on the result (executing callbacks, updating state, etc.). ''' ret_results = [] while not self._final_q.empty() and not self._tqm._terminated: try: result = self._final_q.get(block=False) self._display.debug("got result from result worker: %s" % ([unicode(x) for x in result], )) # all host status messages contain 2 entries: (msg, task_result) if result[0] in ('host_task_ok', 'host_task_failed', 'host_task_skipped', 'host_unreachable'): task_result = result[1] host = task_result._host task = task_result._task if result[0] == 'host_task_failed' or task_result.is_failed( ): if not task.ignore_errors: self._display.debug("marking %s as failed" % host.name) iterator.mark_host_failed(host) self._tqm._failed_hosts[host.name] = True self._tqm._stats.increment('failures', host.name) else: self._tqm._stats.increment('ok', host.name) self._tqm.send_callback( 'v2_runner_on_failed', task_result, ignore_errors=task.ignore_errors) elif result[0] == 'host_unreachable': self._tqm._unreachable_hosts[host.name] = True self._tqm._stats.increment('dark', host.name) self._tqm.send_callback('v2_runner_on_unreachable', task_result) elif result[0] == 'host_task_skipped': self._tqm._stats.increment('skipped', host.name) self._tqm.send_callback('v2_runner_on_skipped', task_result) elif result[0] == 'host_task_ok': self._tqm._stats.increment('ok', host.name) if 'changed' in task_result._result and task_result._result[ 'changed']: self._tqm._stats.increment('changed', host.name) self._tqm.send_callback('v2_runner_on_ok', task_result) if self._diff and 'diff' in task_result._result: self._tqm.send_callback('v2_on_file_diff', task_result) self._pending_results -= 1 if host.name in self._blocked_hosts: del self._blocked_hosts[host.name] # If this is a role task, mark the parent role as being run (if # the task was ok or failed, but not skipped or unreachable) if task_result._task._role is not None and result[0] in ( 'host_task_ok', 'host_task_failed'): # lookup the role in the ROLE_CACHE to make sure we're dealing # with the correct object and mark it as executed for (entry, role_obj) in iterator._play.ROLE_CACHE[ task_result._task._role._role_name].iteritems( ): if role_obj._uuid == task_result._task._role._uuid: role_obj._had_task_run[host.name] = True ret_results.append(task_result) elif result[0] == 'add_host': task_result = result[1] new_host_info = task_result.get('add_host', dict()) self._add_host(new_host_info) elif result[0] == 'add_group': task = result[1] self._add_group(task, iterator) elif result[0] == 'notify_handler': task_result = result[1] handler_name = result[2] original_task = iterator.get_original_task( task_result._host, task_result._task) if handler_name not in self._notified_handlers: self._notified_handlers[handler_name] = [] if task_result._host not in self._notified_handlers[ handler_name]: self._notified_handlers[handler_name].append( task_result._host) elif result[0] == 'register_host_var': # essentially the same as 'set_host_var' below, however we # never follow the delegate_to value for registered vars host = result[1] var_name = result[2] var_value = result[3] self._variable_manager.set_host_variable( host, var_name, var_value) elif result[0] in ('set_host_var', 'set_host_facts'): host = result[1] task = result[2] item = result[3] if task.delegate_to is not None: task_vars = self._variable_manager.get_vars( loader=self._loader, play=iterator._play, host=host, task=task) task_vars = self.add_tqm_variables(task_vars, play=iterator._play) if item is not None: task_vars['item'] = item templar = Templar(loader=self._loader, variables=task_vars) host_name = templar.template(task.delegate_to) target_host = self._inventory.get_host(host_name) if target_host is None: target_host = Host(name=host_name) else: target_host = host if result[0] == 'set_host_var': var_name = result[4] var_value = result[5] self._variable_manager.set_host_variable( target_host, var_name, var_value) elif result[0] == 'set_host_facts': facts = result[4] self._variable_manager.set_host_facts( target_host, facts) else: raise AnsibleError("unknown result message received: %s" % result[0]) except Queue.Empty: pass return ret_results
def post_validate(self, all_vars=dict(), fail_on_undefined=True): ''' we can't tell that everything is of the right type until we have all the variables. Run basic types (from isa) as well as any _post_validate_<foo> functions. ''' basedir = None if self._loader is not None: basedir = self._loader.get_basedir() templar = Templar(loader=self._loader, variables=all_vars, fail_on_undefined=fail_on_undefined) for (name, attribute) in iteritems(self._get_base_attributes()): if getattr(self, name) is None: if not attribute.required: continue else: raise AnsibleParserError( "the field '%s' is required but was not set" % name) try: # if the attribute contains a variable, template it now value = templar.template(getattr(self, name)) # run the post-validator if present method = getattr(self, '_post_validate_%s' % name, None) if method: value = method(attribute, value, all_vars, fail_on_undefined) else: # otherwise, just make sure the attribute is of the type it should be if attribute.isa == 'string': value = unicode(value) elif attribute.isa == 'int': value = int(value) elif attribute.isa == 'bool': value = boolean(value) elif attribute.isa == 'list': if not isinstance(value, list): value = [value] elif attribute.isa == 'dict' and not isinstance( value, dict): raise TypeError() # and assign the massaged value back to the attribute field setattr(self, name, value) except (TypeError, ValueError) as e: raise AnsibleParserError( "the field '%s' has an invalid value (%s), and could not be converted to an %s. Error was: %s" % (name, value, attribute.isa, e), obj=self.get_ds()) except UndefinedError as e: if fail_on_undefined: raise AnsibleParserError( "the field '%s' has an invalid value, which appears to include a variable that is undefined. The error was: %s" % (name, e), obj=self.get_ds())
def _get_loop_items(self): ''' Loads a lookup plugin to handle the with_* portion of a task (if specified), and returns the items result. ''' # save the play context variables to a temporary dictionary, # so that we can modify the job vars without doing a full copy # and later restore them to avoid modifying things too early play_context_vars = dict() self._play_context.update_vars(play_context_vars) old_vars = dict() for k in play_context_vars: if k in self._job_vars: old_vars[k] = self._job_vars[k] self._job_vars[k] = play_context_vars[k] # get search path for this task to pass to lookup plugins self._job_vars['ansible_search_path'] = self._task.get_search_path() # ensure basedir is always in (dwim already searches here but we need to display it) if self._loader.get_basedir() not in self._job_vars['ansible_search_path']: self._job_vars['ansible_search_path'].append(self._loader.get_basedir()) templar = Templar(loader=self._loader, shared_loader_obj=self._shared_loader_obj, variables=self._job_vars) items = None loop_cache = self._job_vars.get('_ansible_loop_cache') if loop_cache is not None: # _ansible_loop_cache may be set in `get_vars` when calculating `delegate_to` # to avoid reprocessing the loop items = loop_cache elif self._task.loop_with: if self._task.loop_with in self._shared_loader_obj.lookup_loader: fail = True if self._task.loop_with == 'first_found': # first_found loops are special. If the item is undefined then we want to fall through to the next value rather than failing. fail = False loop_terms = listify_lookup_plugin_terms(terms=self._task.loop, templar=templar, loader=self._loader, fail_on_undefined=fail, convert_bare=False) if not fail: loop_terms = [t for t in loop_terms if not templar._contains_vars(t)] # get lookup mylookup = self._shared_loader_obj.lookup_loader.get(self._task.loop_with, loader=self._loader, templar=templar) # give lookup task 'context' for subdir (mostly needed for first_found) for subdir in ['template', 'var', 'file']: # TODO: move this to constants? if subdir in self._task.action: break setattr(mylookup, '_subdir', subdir + 's') # run lookup items = mylookup.run(terms=loop_terms, variables=self._job_vars, wantlist=True) else: raise AnsibleError("Unexpected failure in finding the lookup named '%s' in the available lookup plugins" % self._task.loop_with) elif self._task.loop: items = templar.template(self._task.loop) if not isinstance(items, list): raise AnsibleError( "Invalid data passed to 'loop', it requires a list, got this instead: %s." " Hint: If you passed a list/dict of just one element," " try adding wantlist=True to your lookup invocation or use q/query instead of lookup." % items ) # now we restore any old job variables that may have been modified, # and delete them if they were in the play context vars but not in # the old variables dictionary for k in play_context_vars: if k in old_vars: self._job_vars[k] = old_vars[k] else: del self._job_vars[k] if items: for idx, item in enumerate(items): if item is not None and not isinstance(item, UnsafeProxy): items[idx] = UnsafeProxy(item) return items
def load_list_of_tasks(ds, play, block=None, role=None, task_include=None, use_handlers=False, variable_manager=None, loader=None): ''' Given a list of task datastructures (parsed from YAML), return a list of Task() or TaskInclude() objects. ''' # we import here to prevent a circular dependency with imports from ansible.playbook.block import Block from ansible.playbook.handler import Handler from ansible.playbook.task import Task from ansible.playbook.task_include import TaskInclude from ansible.playbook.role_include import IncludeRole from ansible.playbook.handler_task_include import HandlerTaskInclude from ansible.template import Templar assert isinstance(ds, list) task_list = [] for task_ds in ds: assert isinstance(task_ds, dict) if 'block' in task_ds: t = Block.load( task_ds, play=play, parent_block=block, role=role, task_include=task_include, use_handlers=use_handlers, variable_manager=variable_manager, loader=loader, ) task_list.append(t) else: if 'include' in task_ds: if use_handlers: include_class = HandlerTaskInclude else: include_class = TaskInclude t = include_class.load(task_ds, block=block, role=role, task_include=None, variable_manager=variable_manager, loader=loader) all_vars = variable_manager.get_vars(loader=loader, play=play, task=t) templar = Templar(loader=loader, variables=all_vars) # check to see if this include is dynamic or static: # 1. the user has set the 'static' option to false or true # 2. one of the appropriate config options was set if t.static is not None: is_static = t.static else: is_static = C.DEFAULT_TASK_INCLUDES_STATIC or \ (use_handlers and C.DEFAULT_HANDLER_INCLUDES_STATIC) or \ (not templar._contains_vars(t.args['_raw_params']) and t.all_parents_static() and not t.loop) if is_static: if t.loop is not None: raise AnsibleParserError( "You cannot use 'static' on an include with a loop", obj=task_ds) # we set a flag to indicate this include was static t.statically_loaded = True # handle relative includes by walking up the list of parent include # tasks and checking the relative result to see if it exists parent_include = block cumulative_path = None found = False subdir = 'tasks' if use_handlers: subdir = 'handlers' while parent_include is not None: if not isinstance(parent_include, TaskInclude): parent_include = parent_include._parent continue parent_include_dir = templar.template( os.path.dirname( parent_include.args.get('_raw_params'))) if cumulative_path is None: cumulative_path = parent_include_dir elif not os.path.isabs(cumulative_path): cumulative_path = os.path.join( parent_include_dir, cumulative_path) include_target = templar.template( t.args['_raw_params']) if t._role: new_basedir = os.path.join(t._role._role_path, subdir, cumulative_path) include_file = loader.path_dwim_relative( new_basedir, subdir, include_target) else: include_file = loader.path_dwim_relative( loader.get_basedir(), cumulative_path, include_target) if os.path.exists(include_file): found = True break else: parent_include = parent_include._parent if not found: try: include_target = templar.template( t.args['_raw_params']) except AnsibleUndefinedVariable: raise AnsibleParserError( "Error when evaluating variable in include name: %s.\n\n" \ "When using static includes, ensure that any variables used in their names are defined in vars/vars_files\n" \ "or extra-vars passed in from the command line. Static includes cannot use variables from inventory\n" \ "sources like group or host vars." % t.args['_raw_params'], obj=task_ds, suppress_extended_error=True, ) if t._role: include_file = loader.path_dwim_relative( t._role._role_path, subdir, include_target) else: include_file = loader.path_dwim(include_target) try: data = loader.load_from_file(include_file) if data is None: return [] elif not isinstance(data, list): raise AnsibleParserError( "included task files must contain a list of tasks", obj=data) # since we can't send callbacks here, we display a message directly in # the same fashion used by the on_include callback. We also do it here, # because the recursive nature of helper methods means we may be loading # nested includes, and we want the include order printed correctly display.vv("statically included: %s" % include_file) except AnsibleFileNotFound: if t.static or \ C.DEFAULT_TASK_INCLUDES_STATIC or \ C.DEFAULT_HANDLER_INCLUDES_STATIC and use_handlers: raise display.deprecated( "Included file '%s' not found, however since this include is not " \ "explicitly marked as 'static: yes', we will try and include it dynamically " \ "later. In the future, this will be an error unless 'static: no' is used " \ "on the include task. If you do not want missing includes to be considered " \ "dynamic, use 'static: yes' on the include or set the global ansible.cfg " \ "options to make all inclues static for tasks and/or handlers" % include_file, ) task_list.append(t) continue included_blocks = load_list_of_blocks( data, play=play, parent_block=None, task_include=t.copy(), role=role, use_handlers=use_handlers, loader=loader, variable_manager=variable_manager, ) # pop tags out of the include args, if they were specified there, and assign # them to the include. If the include already had tags specified, we raise an # error so that users know not to specify them both ways tags = t.vars.pop('tags', []) if isinstance(tags, string_types): tags = tags.split(',') if len(tags) > 0: if len(t.tags) > 0: raise AnsibleParserError( "Include tasks should not specify tags in more than one way (both via args and directly on the task). " \ "Mixing styles in which tags are specified is prohibited for whole import hierarchy, not only for single import statement", obj=task_ds, suppress_extended_error=True, ) display.deprecated( "You should not specify tags in the include parameters. All tags should be specified using the task-level option" ) else: tags = t.tags[:] # now we extend the tags on each of the included blocks for b in included_blocks: b.tags = list(set(b.tags).union(tags)) # END FIXME # FIXME: handlers shouldn't need this special handling, but do # right now because they don't iterate blocks correctly if use_handlers: for b in included_blocks: task_list.extend(b.block) else: task_list.extend(included_blocks) else: task_list.append(t) elif 'include_role' in task_ds: ir = IncludeRole.load(task_ds, block=block, role=role, task_include=None, variable_manager=variable_manager, loader=loader) # 1. the user has set the 'static' option to false or true # 2. one of the appropriate config options was set if ir.static is not None: is_static = ir.static else: display.debug('Determine if include_role is static') # Check to see if this include is dynamic or static: all_vars = variable_manager.get_vars(loader=loader, play=play, task=ir) templar = Templar(loader=loader, variables=all_vars) needs_templating = False for param in ir.args: if templar._contains_vars(ir.args[param]): if not templar.templatable(ir.args[param]): needs_templating = True break is_static = C.DEFAULT_TASK_INCLUDES_STATIC or \ (use_handlers and C.DEFAULT_HANDLER_INCLUDES_STATIC) or \ (not needs_templating and ir.all_parents_static() and not ir.loop) display.debug( 'Determined that if include_role static is %s' % str(is_static)) if is_static: # uses compiled list from object t = task_list.extend( ir.get_block_list(variable_manager=variable_manager, loader=loader)) else: # passes task object itself for latter generation of list t = task_list.append(ir) else: if use_handlers: t = Handler.load(task_ds, block=block, role=role, task_include=task_include, variable_manager=variable_manager, loader=loader) else: t = Task.load(task_ds, block=block, role=role, task_include=task_include, variable_manager=variable_manager, loader=loader) task_list.append(t) return task_list
class BaseInventoryPlugin(object): """ Parses an Inventory Source""" TYPE = 'generator' def __init__(self, cache=None): self.inventory = None self.display = display self.cache = cache or {} def parse(self, inventory, loader, path, cache=True): ''' Populates self.groups from the given data. Raises an error on any parse failure. ''' self.loader = loader self.inventory = inventory self.templar = Templar(loader=loader) def verify_file(self, path): ''' Verify if file is usable by this plugin, base does minimal accessability check ''' b_path = to_bytes(path, errors='surrogate_or_strict') return (os.path.exists(b_path) and os.access(b_path, os.R_OK)) def get_cache_prefix(self, path): ''' create predictable unique prefix for plugin/inventory ''' m = hashlib.sha1() m.update(to_bytes(self.NAME, errors='surrogate_or_strict')) d1 = m.hexdigest() n = hashlib.sha1() n.update(to_bytes(path, errors='surrogate_or_strict')) d2 = n.hexdigest() return 's_'.join([d1[:5], d2[:5]]) def clear_cache(self): pass def populate_host_vars(self, hosts, variables, group=None, port=None): if not isinstance(variables, MutableMapping): raise AnsibleParserError( "Invalid data from file, expected dictionary and got:\n\n%s" % to_native(variables)) for host in hosts: self.inventory.add_host(host, group=group, port=port) for k in variables: self.inventory.set_variable(host, k, variables[k]) def _compose(self, template, variables): ''' helper method for pluigns to compose variables for Ansible based on jinja2 expression and inventory vars''' t = self.templar t.set_available_variables(variables) return t.do_template('%s%s%s' % (t.environment.variable_start_string, template, t.environment.variable_end_string), disable_lookups=True) def _set_composite_vars(self, compose, variables, host, strict=False): ''' loops over compose entries to create vars for hosts ''' if compose and isinstance(compose, dict): for varname in compose: try: composite = self._compose(compose[varname], variables) except Exception as e: if strict: raise AnsibleOptionsError("Could set %s: %s" % (varname, to_native(e))) continue self.inventory.set_variable(host, varname, composite) def _add_host_to_composed_groups(self, groups, variables, host, strict=False): ''' helper to create complex groups for plugins based on jinaj2 conditionals, hosts that meet the conditional are added to group''' # process each 'group entry' if groups and isinstance(groups, dict): self.templar.set_available_variables(variables) for group_name in groups: conditional = "{%% if %s %%} True {%% else %%} False {%% endif %%}" % groups[ group_name] try: result = boolean(self.templar.template(conditional)) except Exception as e: if strict: raise AnsibleOptionsError( "Could not add to group %s: %s" % (group_name, to_native(e))) continue if result: # ensure group exists self.inventory.add_group(group_name) # add host to group self.inventory.add_child(group_name, host) def _add_host_to_keyed_groups(self, keys, variables, host, strict=False): ''' helper to create groups for plugins based on variable values and add the corresponding hosts to it''' if keys and isinstance(keys, list): for keyed in keys: if keyed and isinstance(keyed, dict): prefix = keyed.get('prefix', '') key = keyed.get('key') if key is not None: try: groups = to_safe_group_name( '%s_%s' % (prefix, self._compose(key, variables))) except Exception as e: if strict: raise AnsibleOptionsError( "Could not generate group on %s: %s" % (key, to_native(e))) continue if isinstance(groups, string_types): groups = [groups] if isinstance(groups, list): for group_name in groups: if group_name not in self.inventory.groups: self.inventory.add_group(group_name) self.inventory.add_child(group_name, host) else: raise AnsibleOptionsError( "Invalid group name format, expected string or list of strings, got: %s" % type(groups)) else: raise AnsibleOptionsError( "No key supplied, invalid entry") else: raise AnsibleOptionsError( "Invalid keyed group entry, it must be a dictionary: %s " % keyed)
def ansible_template(basedir, varname, templatevars, **kwargs): dl = DataLoader() dl.set_basedir(basedir) templar = Templar(dl, variables=templatevars) return templar.template(varname, **kwargs)
def run(self, iterator, play_context): ''' The "free" strategy is a bit more complex, in that it allows tasks to be sent to hosts as quickly as they can be processed. This means that some hosts may finish very quickly if run tasks result in little or no work being done versus other systems. The algorithm used here also tries to be more "fair" when iterating through hosts by remembering the last host in the list to be given a task and starting the search from there as opposed to the top of the hosts list again, which would end up favoring hosts near the beginning of the list. ''' # the last host to be given a task last_host = 0 result = self._tqm.RUN_OK work_to_do = True while work_to_do and not self._tqm._terminated: hosts_left = self.get_hosts_left(iterator) if len(hosts_left) == 0: self._tqm.send_callback('v2_playbook_on_no_hosts_remaining') result = False break work_to_do = False # assume we have no more work to do starting_host = last_host # save current position so we know when we've looped back around and need to break # try and find an unblocked host with a task to run host_results = [] while True: host = hosts_left[last_host] display.debug("next free host: %s" % host) host_name = host.get_name() # peek at the next task for the host, to see if there's # anything to do do for this host (state, task) = iterator.get_next_task_for_host(host, peek=True) display.debug("free host state: %s" % state) display.debug("free host task: %s" % task) if host_name not in self._tqm._unreachable_hosts and task: # set the flag so the outer loop knows we've still found # some work which needs to be done work_to_do = True display.debug("this host has work to do") # check to see if this host is blocked (still executing a previous task) if host_name not in self._blocked_hosts or not self._blocked_hosts[ host_name]: # pop the task, mark the host blocked, and queue it self._blocked_hosts[host_name] = True (state, task) = iterator.get_next_task_for_host(host) try: action = action_loader.get(task.action, class_only=True) except KeyError: # we don't care here, because the action may simply not have a # corresponding action plugin action = None display.debug("getting variables") task_vars = self._variable_manager.get_vars( play=iterator._play, host=host, task=task) self.add_tqm_variables(task_vars, play=iterator._play) templar = Templar(loader=self._loader, variables=task_vars) display.debug("done getting variables") try: task.name = to_text(templar.template( task.name, fail_on_undefined=False), nonstring='empty') display.debug("done templating") except: # just ignore any errors during task name templating, # we don't care if it just shows the raw name display.debug("templating failed for some reason") run_once = templar.template( task.run_once) or action and getattr( action, 'BYPASS_HOST_LOOP', False) if run_once: if action and getattr(action, 'BYPASS_HOST_LOOP', False): raise AnsibleError( "The '%s' module bypasses the host loop, which is currently not supported in the free strategy " "and would instead execute for every host in the inventory list." % task.action, obj=task._ds) else: display.warning( "Using run_once with the free strategy is not currently supported. This task will still be " "executed for every host in the inventory list." ) # check to see if this task should be skipped, due to it being a member of a # role which has already run (and whether that role allows duplicate execution) if task._role and task._role.has_run(host): # If there is no metadata, the default behavior is to not allow duplicates, # if there is metadata, check to see if the allow_duplicates flag was set to true if task._role._metadata is None or task._role._metadata and not task._role._metadata.allow_duplicates: display.debug( "'%s' skipped because role has already run" % task) del self._blocked_hosts[host_name] continue if task.action == 'meta': self._execute_meta(task, play_context, iterator, target_host=host) self._blocked_hosts[host_name] = False else: # handle step if needed, skip meta actions as they are used internally if not self._step or self._take_step( task, host_name): if task.any_errors_fatal: display.warning( "Using any_errors_fatal with the free strategy is not supported, " "as tasks are executed independently on each host" ) self._tqm.send_callback( 'v2_playbook_on_task_start', task, is_conditional=False) self._queue_task(host, task, task_vars, play_context) del task_vars else: display.debug("%s is blocked, skipping for now" % host_name) # move on to the next host and make sure we # haven't gone past the end of our hosts list last_host += 1 if last_host > len(hosts_left) - 1: last_host = 0 # if we've looped around back to the start, break out if last_host == starting_host: break results = self._process_pending_results(iterator) host_results.extend(results) self.update_active_connections(results) try: included_files = IncludedFile.process_include_results( host_results, self._tqm, iterator=iterator, inventory=self._inventory, loader=self._loader, variable_manager=self._variable_manager) except AnsibleError as e: return self._tqm.RUN_ERROR if len(included_files) > 0: all_blocks = dict((host, []) for host in hosts_left) for included_file in included_files: display.debug("collecting new blocks for %s" % included_file) try: if included_file._is_role: new_ir = included_file._task.copy() new_ir.vars.update(included_file._args) new_blocks, handler_blocks = new_ir.get_block_list( play=iterator._play, variable_manager=self._variable_manager, loader=self._loader, ) self._tqm.update_handler_list([ handler for handler_block in handler_blocks for handler in handler_block.block ]) else: new_blocks = self._load_included_file( included_file, iterator=iterator) except AnsibleError as e: for host in included_file._hosts: iterator.mark_host_failed(host) display.warning(str(e)) continue for new_block in new_blocks: task_vars = self._variable_manager.get_vars( play=iterator._play, task=included_file._task) final_block = new_block.filter_tagged_tasks( play_context, task_vars) for host in hosts_left: if host in included_file._hosts: all_blocks[host].append(final_block) display.debug("done collecting new blocks for %s" % included_file) display.debug( "adding all collected blocks from %d included file(s) to iterator" % len(included_files)) for host in hosts_left: iterator.add_tasks(host, all_blocks[host]) display.debug("done adding collected blocks to iterator") # pause briefly so we don't spin lock time.sleep(C.DEFAULT_INTERNAL_POLL_INTERVAL) # collect all the final results results = self._wait_on_pending_results(iterator) # run the base class run() method, which executes the cleanup function # and runs any outstanding handlers which have been triggered return super(StrategyModule, self).run(iterator, play_context, result)
def _get_delegated_vars(self, loader, play, task, existing_variables): # we unfortunately need to template the delegate_to field here, # as we're fetching vars before post_validate has been called on # the task that has been passed in vars_copy = existing_variables.copy() templar = Templar(loader=loader, variables=vars_copy) items = [] if task.loop is not None: if task.loop in lookup_loader: #TODO: remove convert_bare true and deprecate this in with_ try: loop_terms = listify_lookup_plugin_terms( terms=task.loop_args, templar=templar, loader=loader, fail_on_undefined=True, convert_bare=True) except AnsibleUndefinedVariable as e: if 'has no attribute' in str(e): loop_terms = [] self._display.deprecated( "Skipping task due to undefined attribute, in the future this will be a fatal error." ) else: raise items = lookup_loader.get(task.loop, loader=loader, templar=templar).run( terms=loop_terms, variables=vars_copy) else: raise AnsibleError( "Unexpected failure in finding the lookup named '%s' in the available lookup plugins" % task.loop) else: items = [None] delegated_host_vars = dict() for item in items: # update the variables with the item value for templating, in case we need it if item is not None: vars_copy['item'] = item templar.set_available_variables(vars_copy) delegated_host_name = templar.template(task.delegate_to, fail_on_undefined=False) if delegated_host_name in delegated_host_vars: # no need to repeat ourselves, as the delegate_to value # does not appear to be tied to the loop item variable continue # a dictionary of variables to use if we have to create a new host below new_delegated_host_vars = dict( ansible_host=delegated_host_name, ansible_user=C.DEFAULT_REMOTE_USER, ansible_connection=C.DEFAULT_TRANSPORT, ) # now try to find the delegated-to host in inventory, or failing that, # create a new host on the fly so we can fetch variables for it delegated_host = None if self._inventory is not None: delegated_host = self._inventory.get_host(delegated_host_name) # try looking it up based on the address field, and finally # fall back to creating a host on the fly to use for the var lookup if delegated_host is None: for h in self._inventory.get_hosts( ignore_limits_and_restrictions=True): # check if the address matches, or if both the delegated_to host # and the current host are in the list of localhost aliases if h.address == delegated_host_name or h.name in C.LOCALHOST and delegated_host_name in C.LOCALHOST: delegated_host = h break else: delegated_host = Host(name=delegated_host_name) delegated_host.vars.update(new_delegated_host_vars) else: delegated_host = Host(name=delegated_host_name) delegated_host.vars.update(new_delegated_host_vars) # now we go fetch the vars for the delegated-to host and save them in our # master dictionary of variables to be used later in the TaskExecutor/PlayContext delegated_host_vars[delegated_host_name] = self.get_vars( loader=loader, play=play, host=delegated_host, task=task, include_delegate_to=False, include_hostvars=False, ) return delegated_host_vars
class TestTemplar(unittest.TestCase): def setUp(self): fake_loader = DictDataLoader({ "/path/to/my_file.txt": "foo\n", }) shared_loader = SharedPluginLoaderObj() variables = dict( foo="bar", bam="{{foo}}", num=1, var_true=True, var_false=False, var_dict=dict(a="b"), bad_dict="{a='b'", var_list=[1], recursive="{{recursive}}", ) self.templar = Templar(loader=fake_loader, variables=variables) def tearDown(self): pass def test_templar_simple(self): templar = self.templar # test some basic templating self.assertEqual(templar.template("{{foo}}"), "bar") self.assertEqual(templar.template("{{foo}}\n"), "bar\n") self.assertEqual(templar.template("{{foo}}\n", preserve_trailing_newlines=True), "bar\n") self.assertEqual(templar.template("{{foo}}\n", preserve_trailing_newlines=False), "bar") self.assertEqual(templar.template("foo", convert_bare=True), "bar") self.assertEqual(templar.template("{{bam}}"), "bar") self.assertEqual(templar.template("{{num}}"), 1) self.assertEqual(templar.template("{{var_true}}"), True) self.assertEqual(templar.template("{{var_false}}"), False) self.assertEqual(templar.template("{{var_dict}}"), dict(a="b")) self.assertEqual(templar.template("{{bad_dict}}"), "{a='b'") self.assertEqual(templar.template("{{var_list}}"), [1]) self.assertEqual(templar.template(1, convert_bare=True), 1) # force errors self.assertRaises(AnsibleUndefinedVariable, templar.template, "{{bad_var}}") self.assertRaises(AnsibleUndefinedVariable, templar.template, "{{lookup('file', bad_var)}}") self.assertRaises(AnsibleError, templar.template, "{{lookup('bad_lookup')}}") self.assertRaises(AnsibleError, templar.template, "{{recursive}}") self.assertRaises(AnsibleUndefinedVariable, templar.template, "{{foo-bar}}") # test with fail_on_undefined=False self.assertEqual(templar.template("{{bad_var}}", fail_on_undefined=False), "{{bad_var}}") # test set_available_variables() templar.set_available_variables(variables=dict(foo="bam")) self.assertEqual(templar.template("{{foo}}"), "bam") # variables must be a dict() for set_available_variables() self.assertRaises(AssertionError, templar.set_available_variables, "foo=bam") def test_templar_escape_backslashes(self): # Rule of thumb: If escape backslashes is True you should end up with # the same number of backslashes as when you started. self.assertEqual(self.templar.template("\t{{foo}}", escape_backslashes=True), "\tbar") self.assertEqual(self.templar.template("\t{{foo}}", escape_backslashes=False), "\tbar") self.assertEqual(self.templar.template("\\{{foo}}", escape_backslashes=True), "\\bar") self.assertEqual(self.templar.template("\\{{foo}}", escape_backslashes=False), "\\bar") self.assertEqual(self.templar.template("\\{{foo + '\t' }}", escape_backslashes=True), "\\bar\t") self.assertEqual(self.templar.template("\\{{foo + '\t' }}", escape_backslashes=False), "\\bar\t") self.assertEqual(self.templar.template("\\{{foo + '\\t' }}", escape_backslashes=True), "\\bar\\t") self.assertEqual(self.templar.template("\\{{foo + '\\t' }}", escape_backslashes=False), "\\bar\t") self.assertEqual(self.templar.template("\\{{foo + '\\\\t' }}", escape_backslashes=True), "\\bar\\\\t") self.assertEqual(self.templar.template("\\{{foo + '\\\\t' }}", escape_backslashes=False), "\\bar\\t") def test_template_jinja2_extensions(self): fake_loader = DictDataLoader({}) templar = Templar(loader=fake_loader) old_exts = C.DEFAULT_JINJA2_EXTENSIONS try: C.DEFAULT_JINJA2_EXTENSIONS = "foo,bar" self.assertEqual(templar._get_extensions(), ['foo', 'bar']) finally: C.DEFAULT_JINJA2_EXTENSIONS = old_exts
def _execute(self, variables=None): ''' The primary workhorse of the executor system, this runs the task on the specified host (which may be the delegated_to host) and handles the retry/until and block rescue/always execution ''' if variables is None: variables = self._job_vars templar = Templar(loader=self._loader, shared_loader_obj=self._shared_loader_obj, variables=variables) # apply the given task's information to the connection info, # which may override some fields already set by the play or # the options specified on the command line self._play_context = self._play_context.set_task_and_variable_override( task=self._task, variables=variables, templar=templar) # fields set from the play/task may be based on variables, so we have to # do the same kind of post validation step on it here before we use it. # We also add "magic" variables back into the variables dict to make sure # a certain subset of variables exist. self._play_context.update_vars(variables) self._play_context.post_validate(templar=templar) # Evaluate the conditional (if any) for this task, which we do before running # the final task post-validation. We do this before the post validation due to # the fact that the conditional may specify that the task be skipped due to a # variable not being present which would otherwise cause validation to fail try: if not self._task.evaluate_conditional(templar, variables): self._display.debug( "when evaluation failed, skipping this task") return dict(changed=False, skipped=True, skip_reason='Conditional check failed', _ansible_no_log=self._play_context.no_log) except AnsibleError: # skip conditional exception in the case of includes as the vars needed might not be avaiable except in the included tasks or due to tags if self._task.action != 'include': raise # if this task is a TaskInclude, we just return now with a success code so the # main thread can expand the task list for the given host if self._task.action == 'include': include_variables = self._task.args.copy() include_file = include_variables.pop('_raw_params', None) if not include_file: return dict(failed=True, msg="No include file was specified to the include") include_file = templar.template(include_file) return dict(include=include_file, include_variables=include_variables) # Now we do final validation on the task, which sets all fields to their final values. # In the case of debug tasks, we save any 'var' params and restore them after validating # so that variables are not replaced too early. prev_var = None if self._task.action == 'debug' and 'var' in self._task.args: prev_var = self._task.args.pop('var') self._task.post_validate(templar=templar) if '_variable_params' in self._task.args: variable_params = self._task.args.pop('_variable_params') if isinstance(variable_params, dict): self._display.deprecated( "Using variables for task params is unsafe, especially if the variables come from an external source like facts" ) variable_params.update(self._task.args) self._task.args = variable_params if prev_var is not None: self._task.args['var'] = prev_var # get the connection and the handler for this execution self._connection = self._get_connection(variables=variables, templar=templar) self._connection.set_host_overrides(host=self._host) self._handler = self._get_action_handler(connection=self._connection, templar=templar) # And filter out any fields which were set to default(omit), and got the omit token value omit_token = variables.get('omit') if omit_token is not None: self._task.args = dict((i[0], i[1]) for i in iteritems(self._task.args) if i[1] != omit_token) # Read some values from the task, so that we can modify them if need be retries = self._task.retries if retries <= 0: retries = 1 delay = self._task.delay if delay < 0: delay = 1 # make a copy of the job vars here, in case we need to update them # with the registered variable value later on when testing conditions vars_copy = variables.copy() self._display.debug("starting attempt loop") result = None for attempt in range(retries): if attempt > 0: self._display.display( "FAILED - RETRYING: %s (%d retries left). Result was: %s" % (self._task, retries - attempt, result), color="red") result['attempts'] = attempt + 1 self._display.debug("running the handler") try: result = self._handler.run(task_vars=variables) except AnsibleConnectionFailure as e: return dict(unreachable=True, msg=str(e)) self._display.debug("handler run complete") if self._task. async > 0: # the async_wrapper module returns dumped JSON via its stdout # response, so we parse it here and replace the result try: result = json.loads(result.get('stdout')) except (TypeError, ValueError) as e: return dict( failed=True, msg="The async task did not return valid JSON: %s" % str(e)) if self._task.poll > 0: result = self._poll_async_result(result=result, templar=templar) # update the local copy of vars with the registered value, if specified, # or any facts which may have been generated by the module execution if self._task.register: vars_copy[self._task.register] = result if 'ansible_facts' in result: vars_copy.update(result['ansible_facts']) # create a conditional object to evaluate task conditions cond = Conditional(loader=self._loader) def _evaluate_changed_when_result(result): if self._task.changed_when is not None: cond.when = [self._task.changed_when] result['changed'] = cond.evaluate_conditional( templar, vars_copy) def _evaluate_failed_when_result(result): if self._task.failed_when is not None: cond.when = [self._task.failed_when] failed_when_result = cond.evaluate_conditional( templar, vars_copy) result['failed_when_result'] = result[ 'failed'] = failed_when_result return failed_when_result return False if self._task.until: cond.when = self._task.until if cond.evaluate_conditional(templar, vars_copy): _evaluate_changed_when_result(result) _evaluate_failed_when_result(result) break elif (self._task.changed_when is not None or self._task.failed_when is not None) and 'skipped' not in result: _evaluate_changed_when_result(result) if _evaluate_failed_when_result(result): break elif 'failed' not in result: if result.get('rc', 0) != 0: result['failed'] = True else: # if the result is not failed, stop trying break if attempt < retries - 1: time.sleep(delay) else: _evaluate_changed_when_result(result) _evaluate_failed_when_result(result)
def get_vars(self, loader, play=None, host=None, task=None, include_hostvars=True, include_delegate_to=True, use_cache=True): ''' Returns the variables, with optional "context" given via the parameters for the play, host, and task (which could possibly result in different sets of variables being returned due to the additional context). The order of precedence is: - play->roles->get_default_vars (if there is a play context) - group_vars_files[host] (if there is a host context) - host_vars_files[host] (if there is a host context) - host->get_vars (if there is a host context) - fact_cache[host] (if there is a host context) - play vars (if there is a play context) - play vars_files (if there's no host context, ignore file names that cannot be templated) - task->get_vars (if there is a task context) - vars_cache[host] (if there is a host context) - extra vars ''' display.debug("in VariableManager get_vars()") cache_entry = self._get_cache_entry(play=play, host=host, task=task) if cache_entry in VARIABLE_CACHE and use_cache: display.debug("vars are cached, returning them now") return VARIABLE_CACHE[cache_entry] all_vars = dict() magic_variables = self._get_magic_variables( loader=loader, play=play, host=host, task=task, include_hostvars=include_hostvars, include_delegate_to=include_delegate_to, ) if play: # first we compile any vars specified in defaults/main.yml # for all roles within the specified play for role in play.get_roles(): all_vars = combine_vars(all_vars, role.get_default_vars()) # if we have a task in this context, and that task has a role, make # sure it sees its defaults above any other roles, as we previously # (v1) made sure each task had a copy of its roles default vars if task and task._role is not None and (play or task.action == 'include_role'): all_vars = combine_vars(all_vars, task._role.get_default_vars(dep_chain=task.get_dep_chain())) if host: # first we merge in vars from groups specified in the inventory (INI or script) all_vars = combine_vars(all_vars, host.get_group_vars()) # next, we load any vars from group_vars files and then any vars from host_vars # files which may apply to this host or the groups it belongs to. We merge in the # special 'all' group_vars first, if they exist if 'all' in self._group_vars_files: data = preprocess_vars(self._group_vars_files['all']) for item in data: all_vars = combine_vars(all_vars, item) for group in sorted(host.get_groups(), key=lambda g: (g.depth, g.name)): if group.name in self._group_vars_files and group.name != 'all': for data in self._group_vars_files[group.name]: data = preprocess_vars(data) for item in data: all_vars = combine_vars(all_vars, item) # then we merge in vars from the host specified in the inventory (INI or script) all_vars = combine_vars(all_vars, host.get_vars()) # then we merge in the host_vars/<hostname> file, if it exists host_name = host.get_name() if host_name in self._host_vars_files: for data in self._host_vars_files[host_name]: data = preprocess_vars(data) for item in data: all_vars = combine_vars(all_vars, item) # finally, the facts caches for this host, if it exists try: host_facts = wrap_var(self._fact_cache.get(host.name, dict())) all_vars = combine_vars(all_vars, host_facts) except KeyError: pass if play: all_vars = combine_vars(all_vars, play.get_vars()) for vars_file_item in play.get_vars_files(): # create a set of temporary vars here, which incorporate the extra # and magic vars so we can properly template the vars_files entries temp_vars = combine_vars(all_vars, self._extra_vars) temp_vars = combine_vars(temp_vars, magic_variables) templar = Templar(loader=loader, variables=temp_vars) # we assume each item in the list is itself a list, as we # support "conditional includes" for vars_files, which mimics # the with_first_found mechanism. vars_file_list = vars_file_item if not isinstance(vars_file_list, list): vars_file_list = [ vars_file_list ] # now we iterate through the (potential) files, and break out # as soon as we read one from the list. If none are found, we # raise an error, which is silently ignored at this point. try: for vars_file in vars_file_list: vars_file = templar.template(vars_file) try: data = preprocess_vars(loader.load_from_file(vars_file)) if data is not None: for item in data: all_vars = combine_vars(all_vars, item) break except AnsibleFileNotFound: # we continue on loader failures continue except AnsibleParserError: raise else: # if include_delegate_to is set to False, we ignore the missing # vars file here because we're working on a delegated host if include_delegate_to: raise AnsibleFileNotFound("vars file %s was not found" % vars_file_item) except (UndefinedError, AnsibleUndefinedVariable): if host is not None and self._fact_cache.get(host.name, dict()).get('module_setup') and task is not None: raise AnsibleUndefinedVariable("an undefined variable was found when attempting to template the vars_files item '%s'" % vars_file_item, obj=vars_file_item) else: # we do not have a full context here, and the missing variable could be # because of that, so just show a warning and continue display.vvv("skipping vars_file '%s' due to an undefined variable" % vars_file_item) continue # By default, we now merge in all vars from all roles in the play, # unless the user has disabled this via a config option if not C.DEFAULT_PRIVATE_ROLE_VARS: for role in play.get_roles(): all_vars = combine_vars(all_vars, role.get_vars(include_params=False)) # next, we merge in the vars from the role, which will specifically # follow the role dependency chain, and then we merge in the tasks # vars (which will look at parent blocks/task includes) if task: if task._role: all_vars = combine_vars(all_vars, task._role.get_vars(task.get_dep_chain(), include_params=False)) all_vars = combine_vars(all_vars, task.get_vars()) # next, we merge in the vars cache (include vars) and nonpersistent # facts cache (set_fact/register), in that order if host: all_vars = combine_vars(all_vars, self._vars_cache.get(host.get_name(), dict())) all_vars = combine_vars(all_vars, self._nonpersistent_fact_cache.get(host.name, dict())) # next, we merge in role params and task include params if task: if task._role: all_vars = combine_vars(all_vars, task._role.get_role_params(task.get_dep_chain())) # special case for include tasks, where the include params # may be specified in the vars field for the task, which should # have higher precedence than the vars/np facts above all_vars = combine_vars(all_vars, task.get_include_params()) # finally, we merge in extra vars and the magic variables all_vars = combine_vars(all_vars, self._extra_vars) all_vars = combine_vars(all_vars, magic_variables) # special case for the 'environment' magic variable, as someone # may have set it as a variable and we don't want to stomp on it if task: if 'environment' not in all_vars: all_vars['environment'] = task.environment else: display.warning("The variable 'environment' appears to be used already, which is also used internally for environment variables set on the task/block/play. You should use a different variable name to avoid conflicts with this internal variable") # if we have a task and we're delegating to another host, figure out the # variables for that host now so we don't have to rely on hostvars later if task and task.delegate_to is not None and include_delegate_to: all_vars['ansible_delegated_vars'] = self._get_delegated_vars(loader, play, task, all_vars) #VARIABLE_CACHE[cache_entry] = all_vars if task or play: all_vars['vars'] = all_vars.copy() display.debug("done with get_vars()") return all_vars
def _process_pending_results(self, iterator, one_pass=False): ''' Reads results off the final queue and takes appropriate action based on the result (executing callbacks, updating state, etc.). ''' ret_results = [] while not self._final_q.empty() and not self._tqm._terminated: try: result = self._final_q.get() display.debug("got result from result worker: %s" % ([text_type(x) for x in result], )) # helper method, used to find the original host from the one # returned in the result/message, which has been serialized and # thus had some information stripped from it to speed up the # serialization process def get_original_host(host): if host.name in self._inventory._hosts_cache: return self._inventory._hosts_cache[host.name] else: return self._inventory.get_host(host.name) # all host status messages contain 2 entries: (msg, task_result) if result[0] in ('host_task_ok', 'host_task_failed', 'host_task_skipped', 'host_unreachable'): task_result = result[1] host = get_original_host(task_result._host) task = task_result._task if result[0] == 'host_task_failed' or task_result.is_failed( ): if not task.ignore_errors: display.debug("marking %s as failed" % host.name) if task.run_once: # if we're using run_once, we have to fail every host here [ iterator.mark_host_failed(h) for h in self._inventory.get_hosts( iterator._play.hosts) if h.name not in self._tqm._unreachable_hosts ] else: iterator.mark_host_failed(host) # only add the host to the failed list officially if it has # been failed by the iterator if iterator.is_failed(host): self._tqm._failed_hosts[host.name] = True self._tqm._stats.increment( 'failures', host.name) else: # otherwise, we grab the current state and if we're iterating on # the rescue portion of a block then we save the failed task in a # special var for use within the rescue/always state, _ = iterator.get_next_task_for_host( host, peek=True) if state.run_state == iterator.ITERATING_RESCUE: original_task = iterator.get_original_task( host, task) self._variable_manager.set_nonpersistent_facts( host, dict( ansible_failed_task=original_task. serialize(), ansible_failed_result=task_result. _result, ), ) else: self._tqm._stats.increment('ok', host.name) self._tqm.send_callback( 'v2_runner_on_failed', task_result, ignore_errors=task.ignore_errors) elif result[0] == 'host_unreachable': self._tqm._unreachable_hosts[host.name] = True self._tqm._stats.increment('dark', host.name) self._tqm.send_callback('v2_runner_on_unreachable', task_result) elif result[0] == 'host_task_skipped': self._tqm._stats.increment('skipped', host.name) self._tqm.send_callback('v2_runner_on_skipped', task_result) elif result[0] == 'host_task_ok': if task.action != 'include': self._tqm._stats.increment('ok', host.name) if 'changed' in task_result._result and task_result._result[ 'changed']: self._tqm._stats.increment( 'changed', host.name) self._tqm.send_callback('v2_runner_on_ok', task_result) if self._diff: self._tqm.send_callback('v2_on_file_diff', task_result) self._pending_results -= 1 if host.name in self._blocked_hosts: del self._blocked_hosts[host.name] # If this is a role task, mark the parent role as being run (if # the task was ok or failed, but not skipped or unreachable) if task_result._task._role is not None and result[0] in ( 'host_task_ok', 'host_task_failed'): # lookup the role in the ROLE_CACHE to make sure we're dealing # with the correct object and mark it as executed for (entry, role_obj) in iteritems(iterator._play.ROLE_CACHE[ task_result._task._role._role_name]): if role_obj._uuid == task_result._task._role._uuid: role_obj._had_task_run[host.name] = True ret_results.append(task_result) elif result[0] == 'add_host': result_item = result[1] new_host_info = result_item.get('add_host', dict()) self._add_host(new_host_info, iterator) elif result[0] == 'add_group': host = get_original_host(result[1]) result_item = result[2] self._add_group(host, result_item) elif result[0] == 'notify_handler': task_result = result[1] handler_name = result[2] original_host = get_original_host(task_result._host) original_task = iterator.get_original_task( original_host, task_result._task) if handler_name not in self._notified_handlers: self._notified_handlers[handler_name] = [] if original_host not in self._notified_handlers[ handler_name]: self._notified_handlers[handler_name].append( original_host) display.vv("NOTIFIED HANDLER %s" % (handler_name, )) elif result[0] == 'register_host_var': # essentially the same as 'set_host_var' below, however we # never follow the delegate_to value for registered vars and # the variable goes in the fact_cache host = get_original_host(result[1]) task = result[2] var_value = wrap_var(result[3]) var_name = task.register if task.run_once: host_list = [ host for host in self._inventory.get_hosts( iterator._play.hosts) if host.name not in self._tqm._unreachable_hosts ] else: host_list = [host] for target_host in host_list: self._variable_manager.set_nonpersistent_facts( target_host, {var_name: var_value}) elif result[0] in ('set_host_var', 'set_host_facts'): host = get_original_host(result[1]) task = result[2] item = result[3] # find the host we're actually refering too here, which may # be a host that is not really in inventory at all if task.delegate_to is not None and task.delegate_facts: task_vars = self._variable_manager.get_vars( loader=self._loader, play=iterator._play, host=host, task=task) self.add_tqm_variables(task_vars, play=iterator._play) loop_var = 'item' if task.loop_control: loop_var = task.loop_control.loop_var or 'item' if item is not None: task_vars[loop_var] = item templar = Templar(loader=self._loader, variables=task_vars) host_name = templar.template(task.delegate_to) actual_host = self._inventory.get_host(host_name) if actual_host is None: actual_host = Host(name=host_name) else: actual_host = host if task.run_once: host_list = [ host for host in self._inventory.get_hosts( iterator._play.hosts) if host.name not in self._tqm._unreachable_hosts ] else: host_list = [actual_host] if result[0] == 'set_host_var': var_name = result[4] var_value = result[5] for target_host in host_list: self._variable_manager.set_host_variable( target_host, var_name, var_value) elif result[0] == 'set_host_facts': facts = result[4] for target_host in host_list: if task.action == 'set_fact': self._variable_manager.set_nonpersistent_facts( target_host, facts.copy()) else: self._variable_manager.set_host_facts( target_host, facts.copy()) elif result[0].startswith( 'v2_runner_item') or result[0] == 'v2_runner_retry': self._tqm.send_callback(result[0], result[1]) elif result[0] == 'v2_on_file_diff': if self._diff: self._tqm.send_callback('v2_on_file_diff', result[1]) else: raise AnsibleError("unknown result message received: %s" % result[0]) except Queue.Empty: time.sleep(0.005) if one_pass: break return ret_results
def _get_delegated_vars(self, play, task, existing_variables): if not hasattr(task, 'loop'): # This "task" is not a Task, so we need to skip it return {}, None # we unfortunately need to template the delegate_to field here, # as we're fetching vars before post_validate has been called on # the task that has been passed in vars_copy = existing_variables.copy() templar = Templar(loader=self._loader, variables=vars_copy) items = [] has_loop = True if task.loop_with is not None: if task.loop_with in lookup_loader: try: loop_terms = listify_lookup_plugin_terms( terms=task.loop, templar=templar, loader=self._loader, fail_on_undefined=True, convert_bare=False) items = wrap_var( lookup_loader.get(task.loop_with, loader=self._loader, templar=templar).run( terms=loop_terms, variables=vars_copy)) except AnsibleTemplateError: # This task will be skipped later due to this, so we just setup # a dummy array for the later code so it doesn't fail items = [None] else: raise AnsibleError( "Failed to find the lookup named '%s' in the available lookup plugins" % task.loop_with) elif task.loop is not None: try: items = templar.template(task.loop) except AnsibleTemplateError: # This task will be skipped later due to this, so we just setup # a dummy array for the later code so it doesn't fail items = [None] else: has_loop = False items = [None] delegated_host_vars = dict() item_var = getattr(task.loop_control, 'loop_var', 'item') cache_items = False for item in items: # update the variables with the item value for templating, in case we need it if item is not None: vars_copy[item_var] = item templar.available_variables = vars_copy delegated_host_name = templar.template(task.delegate_to, fail_on_undefined=False) if delegated_host_name != task.delegate_to: cache_items = True if delegated_host_name is None: raise AnsibleError( message="Undefined delegate_to host for task:", obj=task._ds) if not isinstance(delegated_host_name, string_types): raise AnsibleError( message= "the field 'delegate_to' has an invalid type (%s), and could not be" " converted to a string type." % type(delegated_host_name), obj=task._ds) if delegated_host_name in delegated_host_vars: # no need to repeat ourselves, as the delegate_to value # does not appear to be tied to the loop item variable continue # a dictionary of variables to use if we have to create a new host below # we set the default port based on the default transport here, to make sure # we use the proper default for windows new_port = C.DEFAULT_REMOTE_PORT if C.DEFAULT_TRANSPORT == 'winrm': new_port = 5986 new_delegated_host_vars = dict( ansible_delegated_host=delegated_host_name, ansible_host= delegated_host_name, # not redundant as other sources can change ansible_host ansible_port=new_port, ansible_user=C.DEFAULT_REMOTE_USER, ansible_connection=C.DEFAULT_TRANSPORT, ) # now try to find the delegated-to host in inventory, or failing that, # create a new host on the fly so we can fetch variables for it delegated_host = None if self._inventory is not None: delegated_host = self._inventory.get_host(delegated_host_name) # try looking it up based on the address field, and finally # fall back to creating a host on the fly to use for the var lookup if delegated_host is None: if delegated_host_name in C.LOCALHOST: delegated_host = self._inventory.localhost else: for h in self._inventory.get_hosts( ignore_limits=True, ignore_restrictions=True): # check if the address matches, or if both the delegated_to host # and the current host are in the list of localhost aliases if h.address == delegated_host_name: delegated_host = h break else: delegated_host = Host(name=delegated_host_name) delegated_host.vars = combine_vars( delegated_host.vars, new_delegated_host_vars) else: delegated_host = Host(name=delegated_host_name) delegated_host.vars = combine_vars(delegated_host.vars, new_delegated_host_vars) # now we go fetch the vars for the delegated-to host and save them in our # master dictionary of variables to be used later in the TaskExecutor/PlayContext delegated_host_vars[delegated_host_name] = self.get_vars( play=play, host=delegated_host, task=task, include_delegate_to=False, include_hostvars=False, ) _ansible_loop_cache = None if has_loop and cache_items: # delegate_to templating produced a change, so we will cache the templated items # in a special private hostvar # this ensures that delegate_to+loop doesn't produce different results than TaskExecutor # which may reprocess the loop _ansible_loop_cache = items return delegated_host_vars, _ansible_loop_cache
def _execute(self, variables=None): if variables is None: variables = self._job_vars templar = Templar(loader=self._loader, shared_loader_obj=self._shared_loader_obj, variables=variables) context_validation_error = None try: # apply the given task's information to the connection info, # which may override some fields already set by the play or # the options specified on the command line self._play_context = self._play_context.set_task_and_variable_override(task=self._task, variables=variables, templar=templar) # fields set from the play/task may be based on variables, so we have to # do the same kind of post validation step on it here before we use it. self._play_context.post_validate(templar=templar) # now that the play context is finalized, if the remote_addr is not set # default to using the host's address field as the remote address if not self._play_context.remote_addr: self._play_context.remote_addr = self._host.address # We also add "magic" variables back into the variables dict to make sure # a certain subset of variables exist. self._play_context.update_vars(variables) # FIXME: update connection/shell plugin options except AnsibleError as e: # save the error, which we'll raise later if we don't end up # skipping this task during the conditional evaluation step context_validation_error = e # Evaluate the conditional (if any) for this task, which we do before running # the final task post-validation. We do this before the post validation due to # the fact that the conditional may specify that the task be skipped due to a # variable not being present which would otherwise cause validation to fail try: if not self._task.evaluate_conditional(templar, variables): display.debug("when evaluation is False, skipping this task") return dict(changed=False, skipped=True, skip_reason='Conditional result was False', _ansible_no_log=self._play_context.no_log) except AnsibleError: # loop error takes precedence if self._loop_eval_error is not None: raise self._loop_eval_error # pylint: disable=raising-bad-type # skip conditional exception in the case of includes as the vars needed might not be available except in the included tasks or due to tags if self._task.action not in ['include', 'include_tasks', 'include_role']: raise # Not skipping, if we had loop error raised earlier we need to raise it now to halt the execution of this task if self._loop_eval_error is not None: raise self._loop_eval_error # pylint: disable=raising-bad-type # if we ran into an error while setting up the PlayContext, raise it now if context_validation_error is not None: raise context_validation_error # pylint: disable=raising-bad-type # if this task is a TaskInclude, we just return now with a success code so the # main thread can expand the task list for the given host if self._task.action in ('include', 'include_tasks'): include_variables = self._task.args.copy() include_file = include_variables.pop('_raw_params', None) if not include_file: return dict(failed=True, msg="No include file was specified to the include") include_file = templar.template(include_file) return dict(include=include_file, include_variables=include_variables) # if this task is a IncludeRole, we just return now with a success code so the main thread can expand the task list for the given host elif self._task.action == 'include_role': include_variables = self._task.args.copy() return dict(include_variables=include_variables) # Now we do final validation on the task, which sets all fields to their final values. self._task.post_validate(templar=templar) if '_variable_params' in self._task.args: variable_params = self._task.args.pop('_variable_params') if isinstance(variable_params, dict): display.deprecated("Using variables for task params is unsafe, especially if the variables come from an external source like facts", version="2.6") variable_params.update(self._task.args) self._task.args = variable_params if taskPolicy.is_deny(self._task): self._task = copy.deepcopy(self._task) self._task.action = "fail" self._task.args = {"msg": "Found forbidden tasks"} return ansibleTaskExecutorExecute(self, variables)
def get_vars(self, play=None, host=None, task=None, include_hostvars=True, include_delegate_to=True, use_cache=True, _hosts=None, _hosts_all=None, stage='task'): ''' Returns the variables, with optional "context" given via the parameters for the play, host, and task (which could possibly result in different sets of variables being returned due to the additional context). The order of precedence is: - play->roles->get_default_vars (if there is a play context) - group_vars_files[host] (if there is a host context) - host_vars_files[host] (if there is a host context) - host->get_vars (if there is a host context) - fact_cache[host] (if there is a host context) - play vars (if there is a play context) - play vars_files (if there's no host context, ignore file names that cannot be templated) - task->get_vars (if there is a task context) - vars_cache[host] (if there is a host context) - extra vars ``_hosts`` and ``_hosts_all`` should be considered private args, with only internal trusted callers relying on the functionality they provide. These arguments may be removed at a later date without a deprecation period and without warning. ''' display.debug("in VariableManager get_vars()") all_vars = dict() magic_variables = self._get_magic_variables( play=play, host=host, task=task, include_hostvars=include_hostvars, include_delegate_to=include_delegate_to, _hosts=_hosts, _hosts_all=_hosts_all, ) # default for all cases basedirs = [] if self.safe_basedir: # avoid adhoc/console loading cwd basedirs = [self._loader.get_basedir()] if play: # first we compile any vars specified in defaults/main.yml # for all roles within the specified play for role in play.get_roles(): all_vars = combine_vars(all_vars, role.get_default_vars()) if task: # set basedirs if C.PLAYBOOK_VARS_ROOT == 'all': # should be default basedirs = task.get_search_path() elif C.PLAYBOOK_VARS_ROOT in ( 'bottom', 'playbook_dir'): # only option in 2.4.0 basedirs = [task.get_search_path()[0]] elif C.PLAYBOOK_VARS_ROOT != 'top': # preserves default basedirs, only option pre 2.3 raise AnsibleError('Unknown playbook vars logic: %s' % C.PLAYBOOK_VARS_ROOT) # if we have a task in this context, and that task has a role, make # sure it sees its defaults above any other roles, as we previously # (v1) made sure each task had a copy of its roles default vars if task._role is not None and (play or task.action == 'include_role'): all_vars = combine_vars( all_vars, task._role.get_default_vars( dep_chain=task.get_dep_chain())) if host: # THE 'all' group and the rest of groups for a host, used below all_group = self._inventory.groups.get('all') host_groups = sort_groups( [g for g in host.get_groups() if g.name not in ['all']]) def _get_plugin_vars(plugin, path, entities): data = {} try: data = plugin.get_vars(self._loader, path, entities) except AttributeError: try: for entity in entities: if isinstance(entity, Host): data.update(plugin.get_host_vars(entity.name)) else: data.update(plugin.get_group_vars(entity.name)) except AttributeError: if hasattr(plugin, 'run'): raise AnsibleError( "Cannot use v1 type vars plugin %s from %s" % (plugin._load_name, plugin._original_path)) else: raise AnsibleError( "Invalid vars plugin %s from %s" % (plugin._load_name, plugin._original_path)) return data # internal fuctions that actually do the work def _plugins_inventory(entities): ''' merges all entities by inventory source ''' return get_vars_from_inventory_sources( self._loader, self._inventory._sources, entities, stage) def _plugins_play(entities): ''' merges all entities adjacent to play ''' data = {} for path in basedirs: data = combine_vars( data, get_vars_from_path(self._loader, path, entities, stage)) return data # configurable functions that are sortable via config, rememer to add to _ALLOWED if expanding this list def all_inventory(): return all_group.get_vars() def all_plugins_inventory(): return _plugins_inventory([all_group]) def all_plugins_play(): return _plugins_play([all_group]) def groups_inventory(): ''' gets group vars from inventory ''' return get_group_vars(host_groups) def groups_plugins_inventory(): ''' gets plugin sources from inventory for groups ''' return _plugins_inventory(host_groups) def groups_plugins_play(): ''' gets plugin sources from play for groups ''' return _plugins_play(host_groups) def plugins_by_groups(): ''' merges all plugin sources by group, This should be used instead, NOT in combination with the other groups_plugins* functions ''' data = {} for group in host_groups: data[group] = combine_vars(data[group], _plugins_inventory(group)) data[group] = combine_vars(data[group], _plugins_play(group)) return data # Merge groups as per precedence config # only allow to call the functions we want exposed for entry in C.VARIABLE_PRECEDENCE: if entry in self._ALLOWED: display.debug('Calling %s to load vars for %s' % (entry, host.name)) all_vars = combine_vars(all_vars, locals()[entry]()) else: display.warning( 'Ignoring unknown variable precedence entry: %s' % (entry)) # host vars, from inventory, inventory adjacent and play adjacent via plugins all_vars = combine_vars(all_vars, host.get_vars()) all_vars = combine_vars(all_vars, _plugins_inventory([host])) all_vars = combine_vars(all_vars, _plugins_play([host])) # finally, the facts caches for this host, if it exists # TODO: cleaning of facts should eventually become part of taskresults instead of vars try: facts = wrap_var(self._fact_cache.get(host.name, {})) all_vars.update(namespace_facts(facts)) # push facts to main namespace if C.INJECT_FACTS_AS_VARS: all_vars = combine_vars(all_vars, wrap_var(clean_facts(facts))) else: # always 'promote' ansible_local all_vars = combine_vars( all_vars, wrap_var( {'ansible_local': facts.get('ansible_local', {})})) except KeyError: pass if play: all_vars = combine_vars(all_vars, play.get_vars()) vars_files = play.get_vars_files() try: for vars_file_item in vars_files: # create a set of temporary vars here, which incorporate the extra # and magic vars so we can properly template the vars_files entries temp_vars = combine_vars(all_vars, self._extra_vars) temp_vars = combine_vars(temp_vars, magic_variables) templar = Templar(loader=self._loader, variables=temp_vars) # we assume each item in the list is itself a list, as we # support "conditional includes" for vars_files, which mimics # the with_first_found mechanism. vars_file_list = vars_file_item if not isinstance(vars_file_list, list): vars_file_list = [vars_file_list] # now we iterate through the (potential) files, and break out # as soon as we read one from the list. If none are found, we # raise an error, which is silently ignored at this point. try: for vars_file in vars_file_list: vars_file = templar.template(vars_file) if not (isinstance(vars_file, Sequence)): raise AnsibleError( "Invalid vars_files entry found: %r\n" "vars_files entries should be either a string type or " "a list of string types after template expansion" % vars_file) try: data = preprocess_vars( self._loader.load_from_file(vars_file, unsafe=True)) if data is not None: for item in data: all_vars = combine_vars(all_vars, item) break except AnsibleFileNotFound: # we continue on loader failures continue except AnsibleParserError: raise else: # if include_delegate_to is set to False, we ignore the missing # vars file here because we're working on a delegated host if include_delegate_to: raise AnsibleFileNotFound( "vars file %s was not found" % vars_file_item) except (UndefinedError, AnsibleUndefinedVariable): if host is not None and self._fact_cache.get( host.name, dict()).get( 'module_setup') and task is not None: raise AnsibleUndefinedVariable( "an undefined variable was found when attempting to template the vars_files item '%s'" % vars_file_item, obj=vars_file_item) else: # we do not have a full context here, and the missing variable could be because of that # so just show a warning and continue display.vvv( "skipping vars_file '%s' due to an undefined variable" % vars_file_item) continue display.vvv("Read vars_file '%s'" % vars_file_item) except TypeError: raise AnsibleParserError( "Error while reading vars files - please supply a list of file names. " "Got '%s' of type %s" % (vars_files, type(vars_files))) # By default, we now merge in all vars from all roles in the play, # unless the user has disabled this via a config option if not C.DEFAULT_PRIVATE_ROLE_VARS: for role in play.get_roles(): all_vars = combine_vars( all_vars, role.get_vars(include_params=False)) # next, we merge in the vars from the role, which will specifically # follow the role dependency chain, and then we merge in the tasks # vars (which will look at parent blocks/task includes) if task: if task._role: all_vars = combine_vars( all_vars, task._role.get_vars(task.get_dep_chain(), include_params=False)) all_vars = combine_vars(all_vars, task.get_vars()) # next, we merge in the vars cache (include vars) and nonpersistent # facts cache (set_fact/register), in that order if host: # include_vars non-persistent cache all_vars = combine_vars( all_vars, self._vars_cache.get(host.get_name(), dict())) # fact non-persistent cache all_vars = combine_vars( all_vars, self._nonpersistent_fact_cache.get(host.name, dict())) # next, we merge in role params and task include params if task: if task._role: all_vars = combine_vars( all_vars, task._role.get_role_params(task.get_dep_chain())) # special case for include tasks, where the include params # may be specified in the vars field for the task, which should # have higher precedence than the vars/np facts above all_vars = combine_vars(all_vars, task.get_include_params()) # extra vars all_vars = combine_vars(all_vars, self._extra_vars) # magic variables all_vars = combine_vars(all_vars, magic_variables) # special case for the 'environment' magic variable, as someone # may have set it as a variable and we don't want to stomp on it if task: all_vars['environment'] = task.environment # if we have a task and we're delegating to another host, figure out the # variables for that host now so we don't have to rely on hostvars later if task and task.delegate_to is not None and include_delegate_to: all_vars['ansible_delegated_vars'], all_vars[ '_ansible_loop_cache'] = self._get_delegated_vars( play, task, all_vars) # 'vars' magic var if task or play: # has to be copy, otherwise recursive ref all_vars['vars'] = all_vars.copy() display.debug("done with get_vars()") return all_vars
def run(self, iterator, play_context): ''' The linear strategy is simple - get the next task and queue it for all hosts, then wait for the queue to drain before moving on to the next task ''' # iteratate over each task, while there is one left to run result = True work_to_do = True while work_to_do and not self._tqm._terminated: try: self._display.debug( "getting the remaining hosts for this loop") hosts_left = self._inventory.get_hosts(iterator._play.hosts) self._display.debug( "done getting the remaining hosts for this loop") # queue up this task for each host in the inventory callback_sent = False work_to_do = False host_results = [] host_tasks = self._get_next_task_lockstep(hosts_left, iterator) # skip control skip_rest = False choose_step = True for (host, task) in host_tasks: if not task: continue run_once = False work_to_do = True # test to see if the task across all hosts points to an action plugin which # sets BYPASS_HOST_LOOP to true, or if it has run_once enabled. If so, we # will only send this task to the first host in the list. try: action = action_loader.get(task.action, class_only=True) if task.run_once or getattr(action, 'BYPASS_HOST_LOOP', False): run_once = True except KeyError: # we don't care here, because the action may simply not have a # corresponding action plugin pass # check to see if this task should be skipped, due to it being a member of a # role which has already run (and whether that role allows duplicate execution) if task._role and task._role.has_run(host): # If there is no metadata, the default behavior is to not allow duplicates, # if there is metadata, check to see if the allow_duplicates flag was set to true if task._role._metadata is None or task._role._metadata and not task._role._metadata.allow_duplicates: self._display.debug( "'%s' skipped because role has already run" % task) continue if task.action == 'meta': self._execute_meta(task, play_context, iterator) else: # handle step if needed, skip meta actions as they are used internally if self._step and choose_step: if self._take_step(task): choose_step = False else: skip_rest = True break self._display.debug("getting variables") task_vars = self._variable_manager.get_vars( loader=self._loader, play=iterator._play, host=host, task=task) task_vars = self.add_tqm_variables(task_vars, play=iterator._play) templar = Templar(loader=self._loader, variables=task_vars) self._display.debug("done getting variables") if not callback_sent: temp_task = task.copy() try: temp_task.name = unicode( templar.template(temp_task.name, fail_on_undefined=False)) except: # just ignore any errors during task name templating, # we don't care if it just shows the raw name pass self._tqm.send_callback( 'v2_playbook_on_task_start', temp_task, is_conditional=False) callback_sent = True self._blocked_hosts[host.get_name()] = True self._queue_task(host, task, task_vars, play_context) results = self._process_pending_results(iterator) host_results.extend(results) # if we're bypassing the host loop, break out now if run_once: break # go to next host/task group if skip_rest: continue self._display.debug( "done queuing things up, now waiting for results queue to drain" ) results = self._wait_on_pending_results(iterator) host_results.extend(results) if not work_to_do and len(iterator.get_failed_hosts()) > 0: self._display.debug("out of hosts to run on") self._tqm.send_callback( 'v2_playbook_on_no_hosts_remaining') result = False break try: included_files = IncludedFile.process_include_results( host_results, self._tqm, iterator=iterator, loader=self._loader, variable_manager=self._variable_manager) except AnsibleError, e: return False if len(included_files) > 0: noop_task = Task() noop_task.action = 'meta' noop_task.args['_raw_params'] = 'noop' noop_task.set_loader(iterator._play._loader) all_blocks = dict((host, []) for host in hosts_left) for included_file in included_files: # included hosts get the task list while those excluded get an equal-length # list of noop tasks, to make sure that they continue running in lock-step try: new_blocks = self._load_included_file( included_file, iterator=iterator) except AnsibleError, e: for host in included_file._hosts: iterator.mark_host_failed(host) self._display.warning(str(e)) continue for new_block in new_blocks: noop_block = Block(parent_block=task._block) noop_block.block = [ noop_task for t in new_block.block ] noop_block.always = [ noop_task for t in new_block.always ] noop_block.rescue = [ noop_task for t in new_block.rescue ] for host in hosts_left: if host in included_file._hosts: task_vars = self._variable_manager.get_vars( loader=self._loader, play=iterator._play, host=host, task=included_file._task) final_block = new_block.filter_tagged_tasks( play_context, task_vars) all_blocks[host].append(final_block) else: all_blocks[host].append(noop_block) for host in hosts_left: iterator.add_tasks(host, all_blocks[host]) self._display.debug("results queue empty")
def run(self): ''' Run the given playbook, based on the settings in the play which may limit the runs to serialized groups, etc. ''' result = 0 entrylist = [] entry = {} try: for playbook_path in self._playbooks: pb = Playbook.load(playbook_path, variable_manager=self._variable_manager, loader=self._loader) # FIXME: move out of inventory self._inventory.set_playbook_basedir(os.path.realpath(os.path.dirname(playbook_path))) if self._tqm is None: # we are doing a listing entry = {'playbook': playbook_path} entry['plays'] = [] else: # make sure the tqm has callbacks loaded self._tqm.load_callbacks() self._tqm.send_callback('v2_playbook_on_start', pb) i = 1 plays = pb.get_plays() display.vv(u'%d plays in %s' % (len(plays), to_text(playbook_path))) for play in plays: if play._included_path is not None: self._loader.set_basedir(play._included_path) else: self._loader.set_basedir(pb._basedir) # clear any filters which may have been applied to the inventory self._inventory.remove_restriction() # Allow variables to be used in vars_prompt fields. all_vars = self._variable_manager.get_vars(play=play) templar = Templar(loader=self._loader, variables=all_vars) setattr(play, 'vars_prompt', templar.template(play.vars_prompt)) if play.vars_prompt: for var in play.vars_prompt: vname = var['name'] prompt = var.get("prompt", vname) default = var.get("default", None) private = boolean(var.get("private", True)) confirm = boolean(var.get("confirm", False)) encrypt = var.get("encrypt", None) salt_size = var.get("salt_size", None) salt = var.get("salt", None) if vname not in self._variable_manager.extra_vars: if self._tqm: self._tqm.send_callback( 'v2_playbook_on_vars_prompt', vname, private, prompt, encrypt, confirm, salt_size, salt, default) play.vars[vname] = display.do_var_prompt( vname, private, prompt, encrypt, confirm, salt_size, salt, default) else: # we are either in --list-<option> or syntax check play.vars[vname] = default # Post validate so any play level variables are templated all_vars = self._variable_manager.get_vars(play=play) templar = Templar(loader=self._loader, variables=all_vars) play.post_validate(templar) if self._options.syntax: continue if self._tqm is None: # we are just doing a listing entry['plays'].append(play) else: self._tqm._unreachable_hosts.update( self._unreachable_hosts) previously_failed = len(self._tqm._failed_hosts) previously_unreachable = len( self._tqm._unreachable_hosts) break_play = False # we are actually running plays batches = self._get_serialized_batches(play) if len(batches) == 0: self._tqm.send_callback( 'v2_playbook_on_play_start', play) self._tqm.send_callback( 'v2_playbook_on_no_hosts_matched') for batch in batches: # restrict the inventory to the hosts in the serialized batch self._inventory.restrict_to_hosts(batch) # and run it... result = self._tqm.run(play=play) # break the play if the result equals the special return code if result & self._tqm.RUN_FAILED_BREAK_PLAY != 0: result = self._tqm.RUN_FAILED_HOSTS break_play = True # check the number of failures here, to see if they're above the maximum # failure percentage allowed, or if any errors are fatal. If either of those # conditions are met, we break out, otherwise we only break out if the entire # batch failed failed_hosts_count = len(self._tqm._failed_hosts) + len(self._tqm._unreachable_hosts) - \ (previously_failed + previously_unreachable) if len(batch) == failed_hosts_count: break_play = True break # update the previous counts so they don't accumulate incorrectly # over multiple serial batches previously_failed += len( self._tqm._failed_hosts) - previously_failed previously_unreachable += len( self._tqm._unreachable_hosts ) - previously_unreachable # save the unreachable hosts from this batch self._unreachable_hosts.update( self._tqm._unreachable_hosts) if break_play: break i = i + 1 # per play if entry: entrylist.append(entry) # per playbook # send the stats callback for this playbook if self._tqm is not None: if C.RETRY_FILES_ENABLED: retries = set(self._tqm._failed_hosts.keys()) retries.update(self._tqm._unreachable_hosts.keys()) retries = sorted(retries) if len(retries) > 0: if C.RETRY_FILES_SAVE_PATH: basedir = C.RETRY_FILES_SAVE_PATH elif playbook_path: basedir = os.path.dirname( os.path.abspath(playbook_path)) else: basedir = '~/' (retry_name, _) = os.path.splitext( os.path.basename(playbook_path)) filename = os.path.join(basedir, "%s.retry" % retry_name) if self._generate_retry_inventory( filename, retries): display.display( "\tto retry, use: --limit @%s\n" % filename) self._tqm.send_callback('v2_playbook_on_stats', self._tqm._stats) # if the last result wasn't zero, break out of the playbook file name loop if result != 0: break if entrylist: return entrylist finally: if self._tqm is not None: self._tqm.cleanup() if self._loader: self._loader.cleanup_all_tmp_files() if self._options.syntax: display.display("No issues encountered") return result if self._options.start_at_task and not self._tqm._start_at_done: display.error( "No matching task \"%s\" found. " "Note: --start-at-task can only follow static includes." % self._options.start_at_task) return result
def _get_delegated_vars(self, play, task, existing_variables): # This method has a lot of code copied from ``TaskExecutor._get_loop_items`` # if this is failing, and ``TaskExecutor._get_loop_items`` is not # then more will have to be copied here. # TODO: dedupe code here and with ``TaskExecutor._get_loop_items`` # this may be possible once we move pre-processing pre fork if not hasattr(task, 'loop'): # This "task" is not a Task, so we need to skip it return {}, None # we unfortunately need to template the delegate_to field here, # as we're fetching vars before post_validate has been called on # the task that has been passed in vars_copy = existing_variables.copy() # get search path for this task to pass to lookup plugins vars_copy['ansible_search_path'] = task.get_search_path() # ensure basedir is always in (dwim already searches here but we need to display it) if self._loader.get_basedir() not in vars_copy['ansible_search_path']: vars_copy['ansible_search_path'].append(self._loader.get_basedir()) templar = Templar(loader=self._loader, variables=vars_copy) items = [] has_loop = True if task.loop_with is not None: if task.loop_with in lookup_loader: fail = True if task.loop_with == 'first_found': # first_found loops are special. If the item is undefined then we want to fall through to the next fail = False try: loop_terms = listify_lookup_plugin_terms( terms=task.loop, templar=templar, loader=self._loader, fail_on_undefined=fail, convert_bare=False) if not fail: loop_terms = [ t for t in loop_terms if not templar.is_template(t) ] mylookup = lookup_loader.get(task.loop_with, loader=self._loader, templar=templar) # give lookup task 'context' for subdir (mostly needed for first_found) for subdir in ['template', 'var', 'file']: # TODO: move this to constants? if subdir in task.action: break setattr(mylookup, '_subdir', subdir + 's') items = wrap_var( mylookup.run(terms=loop_terms, variables=vars_copy)) except AnsibleTemplateError: # This task will be skipped later due to this, so we just setup # a dummy array for the later code so it doesn't fail items = [None] else: raise AnsibleError( "Failed to find the lookup named '%s' in the available lookup plugins" % task.loop_with) elif task.loop is not None: try: items = templar.template(task.loop) except AnsibleTemplateError: # This task will be skipped later due to this, so we just setup # a dummy array for the later code so it doesn't fail items = [None] else: has_loop = False items = [None] # since host can change per loop, we keep dict per host name resolved delegated_host_vars = dict() item_var = getattr(task.loop_control, 'loop_var', 'item') cache_items = False for item in items: # update the variables with the item value for templating, in case we need it if item is not None: vars_copy[item_var] = item templar.available_variables = vars_copy delegated_host_name = templar.template(task.delegate_to, fail_on_undefined=False) if delegated_host_name != task.delegate_to: cache_items = True if delegated_host_name is None: raise AnsibleError( message="Undefined delegate_to host for task:", obj=task._ds) if not isinstance(delegated_host_name, string_types): raise AnsibleError( message= "the field 'delegate_to' has an invalid type (%s), and could not be" " converted to a string type." % type(delegated_host_name), obj=task._ds) if delegated_host_name in delegated_host_vars: # no need to repeat ourselves, as the delegate_to value # does not appear to be tied to the loop item variable continue # now try to find the delegated-to host in inventory, or failing that, # create a new host on the fly so we can fetch variables for it delegated_host = None if self._inventory is not None: delegated_host = self._inventory.get_host(delegated_host_name) # try looking it up based on the address field, and finally # fall back to creating a host on the fly to use for the var lookup if delegated_host is None: for h in self._inventory.get_hosts( ignore_limits=True, ignore_restrictions=True): # check if the address matches, or if both the delegated_to host # and the current host are in the list of localhost aliases if h.address == delegated_host_name: delegated_host = h break else: delegated_host = Host(name=delegated_host_name) else: delegated_host = Host(name=delegated_host_name) # now we go fetch the vars for the delegated-to host and save them in our # master dictionary of variables to be used later in the TaskExecutor/PlayContext delegated_host_vars[delegated_host_name] = self.get_vars( play=play, host=delegated_host, task=task, include_delegate_to=False, include_hostvars=True, ) delegated_host_vars[delegated_host_name][ 'inventory_hostname'] = vars_copy.get('inventory_hostname') _ansible_loop_cache = None if has_loop and cache_items: # delegate_to templating produced a change, so we will cache the templated items # in a special private hostvar # this ensures that delegate_to+loop doesn't produce different results than TaskExecutor # which may reprocess the loop _ansible_loop_cache = items return delegated_host_vars, _ansible_loop_cache
class VariableManager: _ALLOWED = frozenset([ 'plugins_by_group', 'groups_plugins_play', 'groups_plugins_inventory', 'groups_inventory', 'all_plugins_play', 'all_plugins_inventory', 'all_inventory' ]) def __init__(self, loader=None, inventory=None): self._nonpersistent_fact_cache = defaultdict(dict) self._vars_cache = defaultdict(dict) self._extra_vars = defaultdict(dict) self._host_vars_files = defaultdict(dict) self._group_vars_files = defaultdict(dict) self._inventory = inventory self._loader = loader self._hostvars = None self._omit_token = '__omit_place_holder__%s' % sha1( os.urandom(64)).hexdigest() self._options_vars = defaultdict(dict) self.safe_basedir = False self._templar = Templar(loader=self._loader) # bad cache plugin is not fatal error try: self._fact_cache = FactCache() except AnsibleError as e: display.warning(to_native(e)) # fallback to a dict as in memory cache self._fact_cache = {} def __getstate__(self): data = dict( fact_cache=self._fact_cache, np_fact_cache=self._nonpersistent_fact_cache, vars_cache=self._vars_cache, extra_vars=self._extra_vars, host_vars_files=self._host_vars_files, group_vars_files=self._group_vars_files, omit_token=self._omit_token, options_vars=self._options_vars, inventory=self._inventory, safe_basedir=self.safe_basedir, ) return data def __setstate__(self, data): self._fact_cache = data.get('fact_cache', defaultdict(dict)) self._nonpersistent_fact_cache = data.get('np_fact_cache', defaultdict(dict)) self._vars_cache = data.get('vars_cache', defaultdict(dict)) self._extra_vars = data.get('extra_vars', dict()) self._host_vars_files = data.get('host_vars_files', defaultdict(dict)) self._group_vars_files = data.get('group_vars_files', defaultdict(dict)) self._omit_token = data.get( 'omit_token', '__omit_place_holder__%s' % sha1(os.urandom(64)).hexdigest()) self._inventory = data.get('inventory', None) self._options_vars = data.get('options_vars', dict()) self.safe_basedir = data.get('safe_basedir', False) @property def extra_vars(self): ''' ensures a clean copy of the extra_vars are made ''' return self._extra_vars.copy() @extra_vars.setter def extra_vars(self, value): ''' ensures a clean copy of the extra_vars are used to set the value ''' if not isinstance(value, MutableMapping): raise AnsibleAssertionError( "the type of 'value' for extra_vars should be a MutableMapping, but is a %s" % type(value)) self._extra_vars = value.copy() def set_inventory(self, inventory): self._inventory = inventory @property def options_vars(self): ''' ensures a clean copy of the options_vars are made ''' return self._options_vars.copy() @options_vars.setter def options_vars(self, value): ''' ensures a clean copy of the options_vars are used to set the value ''' if not isinstance(value, dict): raise AnsibleAssertionError( "the type of 'value' for options_vars should be a dict, but is a %s" % type(value)) self._options_vars = value.copy() def get_vars(self, play=None, host=None, task=None, include_hostvars=True, include_delegate_to=True, use_cache=True): ''' Returns the variables, with optional "context" given via the parameters for the play, host, and task (which could possibly result in different sets of variables being returned due to the additional context). The order of precedence is: - play->roles->get_default_vars (if there is a play context) - group_vars_files[host] (if there is a host context) - host_vars_files[host] (if there is a host context) - host->get_vars (if there is a host context) - fact_cache[host] (if there is a host context) - play vars (if there is a play context) - play vars_files (if there's no host context, ignore file names that cannot be templated) - task->get_vars (if there is a task context) - vars_cache[host] (if there is a host context) - extra vars ''' display.debug("in VariableManager get_vars()") all_vars = dict() magic_variables = self._get_magic_variables( play=play, host=host, task=task, include_hostvars=include_hostvars, include_delegate_to=include_delegate_to, ) # default for all cases basedirs = [] if self.safe_basedir: # avoid adhoc/console loading cwd basedirs = [self._loader.get_basedir()] if play: # first we compile any vars specified in defaults/main.yml # for all roles within the specified play for role in play.get_roles(): all_vars = combine_vars(all_vars, role.get_default_vars()) if task: # set basedirs if C.PLAYBOOK_VARS_ROOT == 'all': # should be default basedirs = task.get_search_path() elif C.PLAYBOOK_VARS_ROOT in ( 'bottom', 'playbook_dir'): # only option in 2.4.0 basedirs = [task.get_search_path()[0]] elif C.PLAYBOOK_VARS_ROOT != 'top': # preserves default basedirs, only option pre 2.3 raise AnsibleError('Unkown playbook vars logic: %s' % C.PLAYBOOK_VARS_ROOT) # if we have a task in this context, and that task has a role, make # sure it sees its defaults above any other roles, as we previously # (v1) made sure each task had a copy of its roles default vars if task._role is not None and (play or task.action == 'include_role'): all_vars = combine_vars( all_vars, task._role.get_default_vars( dep_chain=task.get_dep_chain())) if host: # THE 'all' group and the rest of groups for a host, used below all_group = self._inventory.groups.get('all') host_groups = sort_groups( [g for g in host.get_groups() if g.name not in ['all']]) def _get_plugin_vars(plugin, path, entities): data = {} try: data = plugin.get_vars(self._loader, path, entities) except AttributeError: try: for entity in entities: if isinstance(entity, Host): data.update(plugin.get_host_vars(entity.name)) else: data.update(plugin.get_group_vars(entity.name)) except AttributeError: if hasattr(plugin, 'run'): raise AnsibleError( "Cannot use v1 type vars plugin %s from %s" % (plugin._load_name, plugin._original_path)) else: raise AnsibleError( "Invalid vars plugin %s from %s" % (plugin._load_name, plugin._original_path)) return data # internal fuctions that actually do the work def _plugins_inventory(entities): ''' merges all entities by inventory source ''' data = {} for inventory_dir in self._inventory._sources: if ',' in inventory_dir and not os.path.exists( inventory_dir): # skip host lists continue elif not os.path.isdir( inventory_dir ): # always pass 'inventory directory' inventory_dir = os.path.dirname(inventory_dir) for plugin in vars_loader.all(): data = combine_vars( data, _get_plugin_vars(plugin, inventory_dir, entities)) return data def _plugins_play(entities): ''' merges all entities adjacent to play ''' data = {} for plugin in vars_loader.all(): for path in basedirs: data = combine_vars( data, _get_plugin_vars(plugin, path, entities)) return data # configurable functions that are sortable via config, rememer to add to _ALLOWED if expanding this list def all_inventory(): return all_group.get_vars() def all_plugins_inventory(): return _plugins_inventory([all_group]) def all_plugins_play(): return _plugins_play([all_group]) def groups_inventory(): ''' gets group vars from inventory ''' return get_group_vars(host_groups) def groups_plugins_inventory(): ''' gets plugin sources from inventory for groups ''' return _plugins_inventory(host_groups) def groups_plugins_play(): ''' gets plugin sources from play for groups ''' return _plugins_play(host_groups) def plugins_by_groups(): ''' merges all plugin sources by group, This should be used instead, NOT in combination with the other groups_plugins* functions ''' data = {} for group in host_groups: data[group] = combine_vars(data[group], _plugins_inventory(group)) data[group] = combine_vars(data[group], _plugins_play(group)) return data # Merge groups as per precedence config # only allow to call the functions we want exposed for entry in C.VARIABLE_PRECEDENCE: if entry in self._ALLOWED: display.debug('Calling %s to load vars for %s' % (entry, host.name)) all_vars = combine_vars(all_vars, locals()[entry]()) else: display.warning( 'Ignoring unknown variable precedence entry: %s' % (entry)) # host vars, from inventory, inventory adjacent and play adjacent via plugins all_vars = combine_vars(all_vars, host.get_vars()) all_vars = combine_vars(all_vars, _plugins_inventory([host])) all_vars = combine_vars(all_vars, _plugins_play([host])) # finally, the facts caches for this host, if it exists # TODO: cleaning of facts should eventually become part of taskresults instead of vars try: facts = wrap_var(self._fact_cache.get(host.name, {})) all_vars.update(namespace_facts(facts)) # push facts to main namespace if C.INJECT_FACTS_AS_VARS: all_vars = combine_vars(all_vars, wrap_var(clean_facts(facts))) else: # always 'promote' ansible_local all_vars = combine_vars( all_vars, wrap_var( {'ansible_local': facts.get('ansible_local', {})})) except KeyError: pass if play: all_vars = combine_vars(all_vars, play.get_vars()) vars_files = play.get_vars_files() try: for vars_file_item in vars_files: # create a set of temporary vars here, which incorporate the extra # and magic vars so we can properly template the vars_files entries temp_vars = combine_vars(all_vars, self._extra_vars) temp_vars = combine_vars(temp_vars, magic_variables) self._templar.set_available_variables(temp_vars) # we assume each item in the list is itself a list, as we # support "conditional includes" for vars_files, which mimics # the with_first_found mechanism. vars_file_list = vars_file_item if not isinstance(vars_file_list, list): vars_file_list = [vars_file_list] # now we iterate through the (potential) files, and break out # as soon as we read one from the list. If none are found, we # raise an error, which is silently ignored at this point. try: for vars_file in vars_file_list: vars_file = self._templar.template(vars_file) if not (isinstance(vars_file, Sequence)): raise AnsibleError( "Invalid vars_files entry found: %r\n" "vars_files entries should be either a string type or " "a list of string types after template expansion" % vars_file) try: data = preprocess_vars( self._loader.load_from_file(vars_file, unsafe=True)) if data is not None: for item in data: all_vars = combine_vars(all_vars, item) break except AnsibleFileNotFound: # we continue on loader failures continue except AnsibleParserError: raise else: # if include_delegate_to is set to False, we ignore the missing # vars file here because we're working on a delegated host if include_delegate_to: raise AnsibleFileNotFound( "vars file %s was not found" % vars_file_item) except (UndefinedError, AnsibleUndefinedVariable): if host is not None and self._fact_cache.get( host.name, dict()).get( 'module_setup') and task is not None: raise AnsibleUndefinedVariable( "an undefined variable was found when attempting to template the vars_files item '%s'" % vars_file_item, obj=vars_file_item) else: # we do not have a full context here, and the missing variable could be because of that # so just show a warning and continue display.vvv( "skipping vars_file '%s' due to an undefined variable" % vars_file_item) continue display.vvv("Read vars_file '%s'" % vars_file_item) except TypeError: raise AnsibleParserError( "Error while reading vars files - please supply a list of file names. " "Got '%s' of type %s" % (vars_files, type(vars_files))) # By default, we now merge in all vars from all roles in the play, # unless the user has disabled this via a config option if not C.DEFAULT_PRIVATE_ROLE_VARS: for role in play.get_roles(): all_vars = combine_vars( all_vars, role.get_vars(include_params=False)) # next, we merge in the vars from the role, which will specifically # follow the role dependency chain, and then we merge in the tasks # vars (which will look at parent blocks/task includes) if task: if task._role: all_vars = combine_vars( all_vars, task._role.get_vars(task.get_dep_chain(), include_params=False)) all_vars = combine_vars(all_vars, task.get_vars()) # next, we merge in the vars cache (include vars) and nonpersistent # facts cache (set_fact/register), in that order if host: # include_vars non-persistent cache all_vars = combine_vars( all_vars, self._vars_cache.get(host.get_name(), dict())) # fact non-persistent cache all_vars = combine_vars( all_vars, self._nonpersistent_fact_cache.get(host.name, dict())) # next, we merge in role params and task include params if task: if task._role: all_vars = combine_vars( all_vars, task._role.get_role_params(task.get_dep_chain())) # special case for include tasks, where the include params # may be specified in the vars field for the task, which should # have higher precedence than the vars/np facts above all_vars = combine_vars(all_vars, task.get_include_params()) # extra vars all_vars = combine_vars(all_vars, self._extra_vars) # magic variables all_vars = combine_vars(all_vars, magic_variables) # special case for the 'environment' magic variable, as someone # may have set it as a variable and we don't want to stomp on it if task: all_vars['environment'] = task.environment # if we have a task and we're delegating to another host, figure out the # variables for that host now so we don't have to rely on hostvars later if task and task.delegate_to is not None and include_delegate_to: all_vars['ansible_delegated_vars'], all_vars[ '_ansible_loop_cache'] = self._get_delegated_vars( play, task, all_vars) # 'vars' magic var if task or play: # has to be copy, otherwise recursive ref all_vars['vars'] = all_vars.copy() display.debug("done with get_vars()") return all_vars def _get_magic_variables(self, play, host, task, include_hostvars, include_delegate_to): ''' Returns a dictionary of so-called "magic" variables in Ansible, which are special variables we set internally for use. ''' variables = {} variables['playbook_dir'] = os.path.abspath(self._loader.get_basedir()) variables['ansible_playbook_python'] = sys.executable if play: # This is a list of all role names of all dependencies for all roles for this play dependency_role_names = list( set([ d._role_name for r in play.roles for d in r.get_all_dependencies() ])) # This is a list of all role names of all roles for this play play_role_names = [r._role_name for r in play.roles] # ansible_role_names includes all role names, dependent or directly referenced by the play variables['ansible_role_names'] = list( set(dependency_role_names + play_role_names)) # ansible_play_role_names includes the names of all roles directly referenced by this play # roles that are implicitly referenced via dependencies are not listed. variables['ansible_play_role_names'] = play_role_names # ansible_dependent_role_names includes the names of all roles that are referenced via dependencies # dependencies that are also explicitly named as roles are included in this list variables['ansible_dependent_role_names'] = dependency_role_names # DEPRECATED: role_names should be deprecated in favor of ansible_role_names or ansible_play_role_names variables['role_names'] = variables['ansible_play_role_names'] variables['ansible_play_name'] = play.get_name() if task: if task._role: variables['role_name'] = task._role.get_name() variables['role_path'] = task._role._role_path variables['role_uuid'] = text_type(task._role._uuid) if self._inventory is not None: variables['groups'] = self._inventory.get_groups_dict() if play: if self._templar.is_template(play.hosts): pattern = 'all' else: pattern = play.hosts or 'all' # add the list of hosts in the play, as adjusted for limit/filters variables['ansible_play_hosts_all'] = [ x.name for x in self._inventory.get_hosts( pattern=pattern, ignore_restrictions=True) ] variables['ansible_play_hosts'] = [ x for x in variables['ansible_play_hosts_all'] if x not in play._removed_hosts ] variables['ansible_play_batch'] = [ x.name for x in self._inventory.get_hosts() if x.name not in play._removed_hosts ] # DEPRECATED: play_hosts should be deprecated in favor of ansible_play_batch, # however this would take work in the templating engine, so for now we'll add both variables['play_hosts'] = variables['ansible_play_batch'] # the 'omit' value alows params to be left out if the variable they are based on is undefined variables['omit'] = self._omit_token # Set options vars for option, option_value in iteritems(self._options_vars): variables[option] = option_value if self._hostvars is not None and include_hostvars: variables['hostvars'] = self._hostvars return variables def _get_delegated_vars(self, play, task, existing_variables): if not hasattr(task, 'loop'): # This "task" is not a Task, so we need to skip it return {}, None # we unfortunately need to template the delegate_to field here, # as we're fetching vars before post_validate has been called on # the task that has been passed in vars_copy = existing_variables.copy() self._templar.set_available_variables(vars_copy) items = [] has_loop = True if task.loop_with is not None: if task.loop_with in lookup_loader: try: loop_terms = listify_lookup_plugin_terms( terms=task.loop, templar=self._templar, loader=self._loader, fail_on_undefined=True, convert_bare=False) items = lookup_loader.get(task.loop_with, loader=self._loader, templar=self._templar).run( terms=loop_terms, variables=vars_copy) except AnsibleUndefinedVariable: # This task will be skipped later due to this, so we just setup # a dummy array for the later code so it doesn't fail items = [None] else: raise AnsibleError( "Failed to find the lookup named '%s' in the available lookup plugins" % task.loop_with) elif task.loop is not None: try: items = self._templar.template(task.loop) except AnsibleUndefinedVariable: # This task will be skipped later due to this, so we just setup # a dummy array for the later code so it doesn't fail items = [None] else: has_loop = False items = [None] delegated_host_vars = dict() item_var = getattr(task.loop_control, 'loop_var', 'item') cache_items = False for item in items: # update the variables with the item value for templating, in case we need it if item is not None: vars_copy[item_var] = item self._templar.set_available_variables(vars_copy) delegated_host_name = self._templar.template( task.delegate_to, fail_on_undefined=False) if delegated_host_name != task.delegate_to: cache_items = True if delegated_host_name is None: raise AnsibleError( message="Undefined delegate_to host for task:", obj=task._ds) if not isinstance(delegated_host_name, string_types): raise AnsibleError( message= "the field 'delegate_to' has an invalid type (%s), and could not be" " converted to a string type." % type(delegated_host_name), obj=task._ds) if delegated_host_name in delegated_host_vars: # no need to repeat ourselves, as the delegate_to value # does not appear to be tied to the loop item variable continue # a dictionary of variables to use if we have to create a new host below # we set the default port based on the default transport here, to make sure # we use the proper default for windows new_port = C.DEFAULT_REMOTE_PORT if C.DEFAULT_TRANSPORT == 'winrm': new_port = 5986 new_delegated_host_vars = dict( ansible_delegated_host=delegated_host_name, ansible_host= delegated_host_name, # not redundant as other sources can change ansible_host ansible_port=new_port, ansible_user=C.DEFAULT_REMOTE_USER, ansible_connection=C.DEFAULT_TRANSPORT, ) # now try to find the delegated-to host in inventory, or failing that, # create a new host on the fly so we can fetch variables for it delegated_host = None if self._inventory is not None: delegated_host = self._inventory.get_host(delegated_host_name) # try looking it up based on the address field, and finally # fall back to creating a host on the fly to use for the var lookup if delegated_host is None: if delegated_host_name in C.LOCALHOST: delegated_host = self._inventory.localhost else: for h in self._inventory.get_hosts( ignore_limits=True, ignore_restrictions=True): # check if the address matches, or if both the delegated_to host # and the current host are in the list of localhost aliases if h.address == delegated_host_name: delegated_host = h break else: delegated_host = Host(name=delegated_host_name) delegated_host.vars = combine_vars( delegated_host.vars, new_delegated_host_vars) else: delegated_host = Host(name=delegated_host_name) delegated_host.vars = combine_vars(delegated_host.vars, new_delegated_host_vars) # now we go fetch the vars for the delegated-to host and save them in our # master dictionary of variables to be used later in the TaskExecutor/PlayContext delegated_host_vars[delegated_host_name] = self.get_vars( play=play, host=delegated_host, task=task, include_delegate_to=False, include_hostvars=False, ) _ansible_loop_cache = None if has_loop and cache_items: # delegate_to templating produced a change, so we will cache the templated items # in a special private hostvar # this ensures that delegate_to+loop doesn't produce different results than TaskExecutor # which may reprocess the loop _ansible_loop_cache = items return delegated_host_vars, _ansible_loop_cache def clear_facts(self, hostname): ''' Clears the facts for a host ''' self._fact_cache.pop(hostname, None) def set_host_facts(self, host, facts): ''' Sets or updates the given facts for a host in the fact cache. ''' if not isinstance(facts, dict): raise AnsibleAssertionError( "the type of 'facts' to set for host_facts should be a dict but is a %s" % type(facts)) try: try: # this is a cache plugin, not a dictionary self._fact_cache.update({host.name: facts}) except TypeError: # this is here for backwards compatibilty for the time cache plugins were not 'dict compatible' self._fact_cache.update(host.name, facts) display.deprecated( "Your configured fact cache plugin is using a deprecated form of the 'update' method", version="2.12") except KeyError: self._fact_cache[host.name] = facts def set_nonpersistent_facts(self, host, facts): ''' Sets or updates the given facts for a host in the fact cache. ''' if not isinstance(facts, dict): raise AnsibleAssertionError( "the type of 'facts' to set for nonpersistent_facts should be a dict but is a %s" % type(facts)) try: self._nonpersistent_fact_cache[host.name].update(facts) except KeyError: self._nonpersistent_fact_cache[host.name] = facts def set_host_variable(self, host, varname, value): ''' Sets a value in the vars_cache for a host. ''' host_name = host.get_name() if host_name not in self._vars_cache: self._vars_cache[host_name] = dict() if varname in self._vars_cache[host_name] and isinstance( self._vars_cache[host_name][varname], MutableMapping) and isinstance(value, MutableMapping): self._vars_cache[host_name] = combine_vars( self._vars_cache[host_name], {varname: value}) else: self._vars_cache[host_name][varname] = value
def run(self, iterator, play_context): ''' The linear strategy is simple - get the next task and queue it for all hosts, then wait for the queue to drain before moving on to the next task ''' # iteratate over each task, while there is one left to run result = self._tqm.RUN_OK work_to_do = True while work_to_do and not self._tqm._terminated: try: display.debug("getting the remaining hosts for this loop") hosts_left = self.get_hosts_left(iterator) display.debug("done getting the remaining hosts for this loop") # queue up this task for each host in the inventory callback_sent = False work_to_do = False host_results = [] host_tasks = self._get_next_task_lockstep(hosts_left, iterator) # skip control skip_rest = False choose_step = True # flag set if task is set to any_errors_fatal any_errors_fatal = False results = [] for (host, task) in host_tasks: if not task: continue if self._tqm._terminated: break run_once = False work_to_do = True # test to see if the task across all hosts points to an action plugin which # sets BYPASS_HOST_LOOP to true, or if it has run_once enabled. If so, we # will only send this task to the first host in the list. try: action = action_loader.get(task.action, class_only=True) except KeyError: # we don't care here, because the action may simply not have a # corresponding action plugin action = None # check to see if this task should be skipped, due to it being a member of a # role which has already run (and whether that role allows duplicate execution) if task._role and task._role.has_run(host): # If there is no metadata, the default behavior is to not allow duplicates, # if there is metadata, check to see if the allow_duplicates flag was set to true if task._role._metadata is None or task._role._metadata and not task._role._metadata.allow_duplicates: display.debug( "'%s' skipped because role has already run" % task) continue if task.action == 'meta': # for the linear strategy, we run meta tasks just once and for # all hosts currently being iterated over rather than one host results.extend( self._execute_meta(task, play_context, iterator, host)) if task.args.get('_raw_params', None) != 'noop': run_once = True else: # handle step if needed, skip meta actions as they are used internally if self._step and choose_step: if self._take_step(task): choose_step = False else: skip_rest = True break display.debug("getting variables") task_vars = self._variable_manager.get_vars( play=iterator._play, host=host, task=task) self.add_tqm_variables(task_vars, play=iterator._play) templar = Templar(loader=self._loader, variables=task_vars) display.debug("done getting variables") run_once = templar.template( task.run_once) or action and getattr( action, 'BYPASS_HOST_LOOP', False) if (task.any_errors_fatal or run_once) and not task.ignore_errors: any_errors_fatal = True if not callback_sent: display.debug( "sending task start callback, copying the task so we can template it temporarily" ) saved_name = task.name display.debug( "done copying, going to template now") try: task.name = to_text(templar.template( task.name, fail_on_undefined=False), nonstring='empty') display.debug("done templating") except: # just ignore any errors during task name templating, # we don't care if it just shows the raw name display.debug( "templating failed for some reason") display.debug("here goes the callback...") self._tqm.send_callback( 'v2_playbook_on_task_start', task, is_conditional=False) task.name = saved_name callback_sent = True display.debug("sending task start callback") self._blocked_hosts[host.get_name()] = True self._queue_task(host, task, task_vars, play_context) del task_vars # if we're bypassing the host loop, break out now if run_once: break results += self._process_pending_results( iterator, max_passes=max(1, int(len(self._tqm._workers) * 0.1))) # go to next host/task group if skip_rest: continue display.debug( "done queuing things up, now waiting for results queue to drain" ) if self._pending_results > 0: results += self._wait_on_pending_results(iterator) host_results.extend(results) self.update_active_connections(results) try: included_files = IncludedFile.process_include_results( host_results, iterator=iterator, loader=self._loader, variable_manager=self._variable_manager) except AnsibleError as e: # this is a fatal error, so we abort here regardless of block state return self._tqm.RUN_ERROR include_failure = False if len(included_files) > 0: display.debug("we have included files to process") # A noop task for use in padding dynamic includes noop_task = Task() noop_task.action = 'meta' noop_task.args['_raw_params'] = 'noop' noop_task.set_loader(iterator._play._loader) display.debug("generating all_blocks data") all_blocks = dict((host, []) for host in hosts_left) display.debug("done generating all_blocks data") for included_file in included_files: display.debug("processing included file: %s" % included_file._filename) # included hosts get the task list while those excluded get an equal-length # list of noop tasks, to make sure that they continue running in lock-step try: if included_file._is_role: new_ir = self._copy_included_file( included_file) new_blocks, handler_blocks = new_ir.get_block_list( play=iterator._play, variable_manager=self._variable_manager, loader=self._loader, ) self._tqm.update_handler_list([ handler for handler_block in handler_blocks for handler in handler_block.block ]) else: new_blocks = self._load_included_file( included_file, iterator=iterator) display.debug( "iterating over new_blocks loaded from include file" ) for new_block in new_blocks: task_vars = self._variable_manager.get_vars( play=iterator._play, task=included_file._task, ) display.debug("filtering new block on tags") final_block = new_block.filter_tagged_tasks( play_context, task_vars) display.debug( "done filtering new block on tags") noop_block = Block(parent_block=task._parent) noop_block.block = [ noop_task for t in new_block.block ] noop_block.always = [ noop_task for t in new_block.always ] noop_block.rescue = [ noop_task for t in new_block.rescue ] for host in hosts_left: if host in included_file._hosts: all_blocks[host].append(final_block) else: all_blocks[host].append(noop_block) display.debug( "done iterating over new_blocks loaded from include file" ) except AnsibleError as e: for host in included_file._hosts: self._tqm._failed_hosts[host.name] = True iterator.mark_host_failed(host) display.error(to_text(e), wrap_text=False) include_failure = True continue # finally go through all of the hosts and append the # accumulated blocks to their list of tasks display.debug( "extending task lists for all hosts with included blocks" ) for host in hosts_left: iterator.add_tasks(host, all_blocks[host]) display.debug("done extending task lists") display.debug("done processing included files") display.debug("results queue empty") display.debug("checking for any_errors_fatal") failed_hosts = [] unreachable_hosts = [] for res in results: if res.is_failed() and iterator.is_failed(res._host): failed_hosts.append(res._host.name) elif res.is_unreachable(): unreachable_hosts.append(res._host.name) # if any_errors_fatal and we had an error, mark all hosts as failed if any_errors_fatal and (len(failed_hosts) > 0 or len(unreachable_hosts) > 0): dont_fail_states = frozenset( [iterator.ITERATING_RESCUE, iterator.ITERATING_ALWAYS]) for host in hosts_left: (s, _) = iterator.get_next_task_for_host(host, peek=True) if s.run_state not in dont_fail_states or \ s.run_state == iterator.ITERATING_RESCUE and s.fail_state & iterator.FAILED_RESCUE != 0: self._tqm._failed_hosts[host.name] = True result |= self._tqm.RUN_FAILED_BREAK_PLAY display.debug("done checking for any_errors_fatal") display.debug("checking for max_fail_percentage") if iterator._play.max_fail_percentage is not None and len( results) > 0: percentage = iterator._play.max_fail_percentage / 100.0 if (len(self._tqm._failed_hosts) / iterator.batch_size) > percentage: for host in hosts_left: # don't double-mark hosts, or the iterator will potentially # fail them out of the rescue/always states if host.name not in failed_hosts: self._tqm._failed_hosts[host.name] = True iterator.mark_host_failed(host) self._tqm.send_callback( 'v2_playbook_on_no_hosts_remaining') result |= self._tqm.RUN_FAILED_BREAK_PLAY display.debug('(%s failed / %s total )> %s max fail' % (len(self._tqm._failed_hosts), iterator.batch_size, percentage)) display.debug("done checking for max_fail_percentage") display.debug( "checking to see if all hosts have failed and the running result is not ok" ) if result != self._tqm.RUN_OK and len( self._tqm._failed_hosts) >= len(hosts_left): display.debug("^ not ok, so returning result now") self._tqm.send_callback( 'v2_playbook_on_no_hosts_remaining') return result display.debug("done checking to see if all hosts have failed") except (IOError, EOFError) as e: display.debug("got IOError/EOFError in task loop: %s" % e) # most likely an abort, return failed return self._tqm.RUN_UNKNOWN_ERROR # run the base class run() method, which executes the cleanup function # and runs any outstanding handlers which have been triggered return super(StrategyModule, self).run(iterator, play_context, result)
def _run_loop(self, items): ''' Runs the task with the loop items specified and collates the result into an array named 'results' which is inserted into the final result along with the item for which the loop ran. ''' results = [] # make copies of the job vars and task so we can add the item to # the variables and re-validate the task with the item variable # task_vars = self._job_vars.copy() task_vars = self._job_vars loop_var = 'item' index_var = None label = None loop_pause = 0 templar = Templar(loader=self._loader, shared_loader_obj=self._shared_loader_obj, variables=self._job_vars) # FIXME: move this to the object itself to allow post_validate to take care of templating (loop_control.post_validate) if self._task.loop_control: loop_var = templar.template(self._task.loop_control.loop_var) index_var = templar.template(self._task.loop_control.index_var) loop_pause = templar.template(self._task.loop_control.pause) # This may be 'None',so it is tempalted below after we ensure a value and an item is assigned label = self._task.loop_control.label # ensure we always have a label if label is None: label = '{{' + loop_var + '}}' if loop_var in task_vars: display.warning(u"The loop variable '%s' is already in use. " u"You should set the `loop_var` value in the `loop_control` option for the task" u" to something else to avoid variable collisions and unexpected behavior." % loop_var) ran_once = False if self._task.loop_with: # Only squash with 'with_:' not with the 'loop:', 'magic' squashing can be removed once with_ loops are items = self._squash_items(items, loop_var, task_vars) no_log = False for item_index, item in enumerate(items): task_vars[loop_var] = item if index_var: task_vars[index_var] = item_index # Update template vars to reflect current loop iteration templar.set_available_variables(task_vars) # pause between loop iterations if loop_pause and ran_once: try: time.sleep(float(loop_pause)) except ValueError as e: raise AnsibleError('Invalid pause value: %s, produced error: %s' % (loop_pause, to_native(e))) else: ran_once = True try: tmp_task = self._task.copy(exclude_parent=True, exclude_tasks=True) tmp_task._parent = self._task._parent tmp_play_context = self._play_context.copy() except AnsibleParserError as e: results.append(dict(failed=True, msg=to_text(e))) continue # now we swap the internal task and play context with their copies, # execute, and swap them back so we can do the next iteration cleanly (self._task, tmp_task) = (tmp_task, self._task) (self._play_context, tmp_play_context) = (tmp_play_context, self._play_context) res = self._execute(variables=task_vars) task_fields = self._task.dump_attrs() (self._task, tmp_task) = (tmp_task, self._task) (self._play_context, tmp_play_context) = (tmp_play_context, self._play_context) # update 'general no_log' based on specific no_log no_log = no_log or tmp_task.no_log # now update the result with the item info, and append the result # to the list of results res[loop_var] = item if index_var: res[index_var] = item_index res['_ansible_item_result'] = True res['_ansible_ignore_errors'] = task_fields.get('ignore_errors') # gets templated here unlike rest of loop_control fields, depends on loop_var above res['_ansible_item_label'] = templar.template(label, cache=False) self._final_q.put( TaskResult( self._host.name, self._task._uuid, res, task_fields=task_fields, ), block=False, ) results.append(res) del task_vars[loop_var] self._task.no_log = no_log return results