def load_inventory_files(self, inventory_files, host_list=None): temp_vars = self._variable_manager.get_vars(loader=self._loader) templar = Templar(loader=self._loader, variables=temp_vars) all_inventory = AbleMapping() try: for inventory_file in inventory_files: inventory_file = templar.template(inventory_file) try: data = InventoryYaml.prepare_data(self._loader.load_from_file(inventory_file)) if data is not None: if self._directory is None: self._directory = os.path.dirname(inventory_file) all_inventory = combine_vars(all_inventory, data) break except AbleFileNotFound as e: # we continue on loader failures continue except AbleParserError as e: raise else: raise AbleFileNotFound("vars file %s was not found" % inventory_files) except (UndefinedError, AbleUndefinedVariable): # we do not have a full context here, and the missing variable could be # because of that, so just show a warning and continue display.vvv("skipping inventory_file '%s' due to an undefined variable" % inventory_files) return all_inventory
def _get_loop_items(self): ''' Loads a lookup plugin to handle the with_* portion of a task (if specified), and returns the items result. ''' # save the play context variables to a temporary dictionary, # so that we can modify the job vars without doing a full copy # and later restore them to avoid modifying things too early play_context_vars = dict() self._play_context.update_vars(play_context_vars) old_vars = dict() for k in play_context_vars.keys(): if k in self._job_vars: old_vars[k] = self._job_vars[k] self._job_vars[k] = play_context_vars[k] templar = Templar(loader=self._loader, shared_loader_obj=self._shared_loader_obj, variables=self._job_vars) items = None if self._task.loop: if self._task.loop in self._shared_loader_obj.lookup_loader: #TODO: remove convert_bare true and deprecate this in with_ if self._task.loop == 'first_found': # first_found loops are special. If the item is undefined # then we want to fall through to the next value rather # than failing. loop_terms = listify_lookup_plugin_terms(terms=self._task.loop_args, templar=templar, loader=self._loader, fail_on_undefined=False, convert_bare=True) loop_terms = [t for t in loop_terms if not templar._contains_vars(t)] else: try: loop_terms = listify_lookup_plugin_terms(terms=self._task.loop_args, templar=templar, loader=self._loader, fail_on_undefined=True, convert_bare=True) except AnsibleUndefinedVariable as e: if u'has no attribute' in to_unicode(e): loop_terms = [] display.deprecated("Skipping task due to undefined attribute, in the future this will be a fatal error.") else: raise items = self._shared_loader_obj.lookup_loader.get(self._task.loop, loader=self._loader, templar=templar).run(terms=loop_terms, variables=self._job_vars) else: raise AnsibleError("Unexpected failure in finding the lookup named '%s' in the available lookup plugins" % self._task.loop) # now we restore any old job variables that may have been modified, # and delete them if they were in the play context vars but not in # the old variables dictionary for k in play_context_vars.keys(): if k in old_vars: self._job_vars[k] = old_vars[k] else: del self._job_vars[k] if items: from ansible.vars.unsafe_proxy import UnsafeProxy for idx, item in enumerate(items): if item is not None and not isinstance(item, UnsafeProxy): items[idx] = UnsafeProxy(item) return items
def search_handler_blocks(handler_name, handler_blocks): for handler_block in handler_blocks: for handler_task in handler_block.block: if handler_task.name: handler_vars = self._variable_manager.get_vars(loader=self._loader, play=iterator._play, task=handler_task) templar = Templar(loader=self._loader, variables=handler_vars) try: # first we check with the full result of get_name(), which may # include the role name (if the handler is from a role). If that # is not found, we resort to the simple name field, which doesn't # have anything extra added to it. target_handler_name = templar.template(handler_task.name) if target_handler_name == handler_name: return handler_task else: target_handler_name = templar.template(handler_task.get_name()) if target_handler_name == handler_name: return handler_task except (UndefinedError, AnsibleUndefinedVariable): # We skip this handler due to the fact that it may be using # a variable in the name that was conditionally included via # set_fact or some other method, and we don't want to error # out unnecessarily continue else: # if the handler name is not set, we check via the handlers uuid. # this is mainly used by listening handlers only if handler_name == handler_task._uuid: return handler_task return None
def test_tests_as_filters_warning(mocker): fake_loader = DictDataLoader({ "/path/to/my_file.txt": "foo\n", }) templar = Templar(loader=fake_loader, variables={}) filters = templar._get_filters(templar.environment.filters) mocker.patch.object(display, 'deprecated') # Call successful test, ensure the message is correct filters['successful']({}) display.deprecated.assert_called_once_with( 'Using tests as filters is deprecated. Instead of using `result|successful` use `result is successful`', version='2.9' ) # Call success test, ensure the message is correct display.deprecated.reset_mock() filters['success']({}) display.deprecated.assert_called_once_with( 'Using tests as filters is deprecated. Instead of using `result|success` use `result is success`', version='2.9' ) # Call bool filter, ensure no deprecation message was displayed display.deprecated.reset_mock() filters['bool'](True) assert display.deprecated.call_count == 0 # Ensure custom test does not override builtin filter assert filters.get('abs') != isabs
def __getitem__(self, host_name): data = self.raw_get(host_name) sha1_hash = sha1(str(data).encode('utf-8')).hexdigest() if sha1_hash not in self._cached_result: templar = Templar(variables=data, loader=self._loader) self._cached_result[sha1_hash] = templar.template(data, fail_on_undefined=False, static_vars=STATIC_VARS) return self._cached_result[sha1_hash]
def run_handlers(self, iterator, play_context): ''' Runs handlers on those hosts which have been notified. ''' result = True for handler_block in iterator._play.handlers: # FIXME: handlers need to support the rescue/always portions of blocks too, # but this may take some work in the iterator and gets tricky when # we consider the ability of meta tasks to flush handlers for handler in handler_block.block: handler_vars = self._variable_manager.get_vars(loader=self._loader, play=iterator._play, task=handler) templar = Templar(loader=self._loader, variables=handler_vars) try: # first we check with the full result of get_name(), which may # include the role name (if the handler is from a role). If that # is not found, we resort to the simple name field, which doesn't # have anything extra added to it. handler_name = templar.template(handler.name) if handler_name not in self._notified_handlers: handler_name = templar.template(handler.get_name()) except (UndefinedError, AnsibleUndefinedVariable): # We skip this handler due to the fact that it may be using # a variable in the name that was conditionally included via # set_fact or some other method, and we don't want to error # out unnecessarily continue if handler_name in self._notified_handlers and len(self._notified_handlers[handler_name]): result = self._do_handler_run(handler, handler_name, iterator=iterator, play_context=play_context) if not result: break return result
def listify_lookup_plugin_terms(terms, variables, loader): if isinstance(terms, basestring): # someone did: # with_items: alist # OR # with_items: {{ alist }} stripped = terms.strip() templar = Templar(loader=loader, variables=variables) if not (stripped.startswith('{') or stripped.startswith('[')) and not stripped.startswith("/") and not stripped.startswith('set([') and not LOOKUP_REGEX.search(terms): # if not already a list, get ready to evaluate with Jinja2 # not sure why the "/" is in above code :) try: new_terms = templar.template("{{ %s }}" % terms) if isinstance(new_terms, basestring) and "{{" in new_terms: pass else: terms = new_terms except: pass else: terms = templar.template(terms) if '{' in terms or '[' in terms: # Jinja2 already evaluated a variable to a list. # Jinja2-ified list needs to be converted back to a real type return safe_eval(terms) if isinstance(terms, basestring): terms = [ terms ] return terms
def load_data(self, ds, basedir, variable_manager=None, loader=None): ''' Overrides the base load_data(), as we're actually going to return a new Playbook() object rather than a PlaybookInclude object ''' # import here to avoid a dependency loop from ansible.playbook import Playbook # first, we use the original parent method to correctly load the object # via the load_data/preprocess_data system we normally use for other # playbook objects new_obj = super(PlaybookInclude, self).load_data(ds, variable_manager, loader) all_vars = self.vars.copy() if variable_manager: all_vars.update(variable_manager.get_vars(loader=loader)) templar = Templar(loader=loader, variables=all_vars) try: forward_conditional = False if not new_obj.evaluate_conditional(templar=templar, all_vars=all_vars): return None except AnsibleError: # conditional evaluation raised an error, so we set a flag to indicate # we need to forward the conditionals on to the included play(s) forward_conditional = True # then we use the object to load a Playbook pb = Playbook(loader=loader) file_name = templar.template(new_obj.include) if not os.path.isabs(file_name): file_name = os.path.join(basedir, file_name) pb._load_playbook_data(file_name=file_name, variable_manager=variable_manager) # finally, update each loaded playbook entry with any variables specified # on the included playbook and/or any tags which may have been set for entry in pb._entries: temp_vars = entry.vars.copy() temp_vars.update(new_obj.vars) param_tags = temp_vars.pop('tags', None) if param_tags is not None: entry.tags.extend(param_tags.split(',')) entry.vars = temp_vars entry.tags = list(set(entry.tags).union(new_obj.tags)) if entry._included_path is None: entry._included_path = os.path.dirname(file_name) # Check to see if we need to forward the conditionals on to the included # plays. If so, we can take a shortcut here and simply prepend them to # those attached to each block (if any) if forward_conditional: for task_block in entry.pre_tasks + entry.roles + entry.tasks + entry.post_tasks: task_block.when = self.when[:] + task_block.when return pb
def __getitem__(self, host_name): if host_name not in self._lookup: return j2undefined data = self._lookup.get(host_name) templar = Templar(variables=data, loader=self._loader) return templar.template(data, fail_on_undefined=False)
def __getitem__(self, host_name): if host_name not in self._lookup: host = self._inventory.get_host(host_name) result = self._vars_manager.get_vars(loader=self._loader, play=self._play, host=host) templar = Templar(variables=result, loader=self._loader) self._lookup[host_name] = templar.template(result) return self._lookup[host_name]
def post_validate(self, variables, loader): ''' Finalizes templated values which may be set on this objects fields. ''' templar = Templar(loader=loader, variables=variables) for field in self._get_fields(): value = templar.template(getattr(self, field)) setattr(self, field, value)
def run(self, tmp=None, task_vars=dict()): ''' handler for template operations ''' source = self._task.args.get('src', None) dest = self._task.args.get('dest', None) if (source is None and 'first_available_file' not in task_vars) or dest is None: return dict(failed=True, msg="src and dest are required") if tmp is None: tmp = self._make_tmp_path() ################################################################################################## # FIXME: this all needs to be sorted out ################################################################################################## # if we have first_available_file in our vars # look up the files and use the first one we find as src #if 'first_available_file' in task_vars: # found = False # for fn in task_vars.get('first_available_file'): # fn_orig = fn # fnt = template.template(self.runner.basedir, fn, task_vars) # fnd = utils.path_dwim(self.runner.basedir, fnt) # if not os.path.exists(fnd) and '_original_file' in task_vars: # fnd = utils.path_dwim_relative(task_vars['_original_file'], 'templates', fnt, self.runner.basedir, check=False) # if os.path.exists(fnd): # source = fnd # found = True # break # if not found: # result = dict(failed=True, msg="could not find src in first_available_file list") # return ReturnData(conn=conn, comm_ok=False, result=result) #else: if 1: if self._task._role is not None: source = self._loader.path_dwim_relative(self._task._role._role_path, 'templates', source) else: source = self._loader.path_dwim(source) ################################################################################################## # END FIXME ################################################################################################## # Expand any user home dir specification dest = self._remote_expand_user(dest, tmp) if dest.endswith("/"): # CCTODO: Fix path for Windows hosts. base = os.path.basename(source) dest = os.path.join(dest, base) # template the source data locally & get ready to transfer templar = Templar(loader=self._loader, variables=task_vars) try: with open(source, 'r') as f: template_data = f.read() resultant = templar.template(template_data, preserve_trailing_newlines=True) except Exception, e: return dict(failed=True, msg=type(e).__name__ + ": " + str(e))
def __getitem__(self, host_name): if host_name not in self._lookup: return j2undefined host = self._lookup.get(host_name) data = self._variable_manager.get_vars(loader=self._loader, host=host, play=self._play, include_hostvars=False) templar = Templar(variables=data, loader=self._loader) return templar.template(data, fail_on_undefined=False)
def load_data(self, ds, variable_manager=None, loader=None, ablefile=None): ''' Overrides the base load_data(), as we're actually going to return a new Command() object rather than a CommandInclude object ''' # first, we use the original parent method to correctly load the object # via the load_data/preprocess_data system we normally use for other # command objects new_obj = super(AbleStageInclude, self).load_data(ds, variable_manager, loader) all_vars = self.vars.copy() if variable_manager: all_vars.update(variable_manager.get_vars(loader=loader)) templar = Templar(loader=loader, variables=all_vars) try: forward_conditional = False if not new_obj.evaluate_conditional(templar=templar, all_vars=all_vars): return None except AnsibleError: # conditional evaluation raised an error, so we set a flag to indicate # we need to forward the conditionals on to the included play(s) forward_conditional = True extend = False if new_obj.extend: file_name = templar.template(new_obj.extend) del ds['extend'] extend = True elif new_obj.include: file_name = templar.template(new_obj.include) del ds['include'] else: raise AnsibleParserError("no include or extend value specified") if not os.path.isabs(file_name): file_name = os.path.join(ablefile.get_basedir(), file_name) new_ds = loader.load_from_file(file_name) if not isinstance(new_ds, dict): raise AnsibleParserError("stage include should be in dictionary format" ) stage = AbleStage.load( new_ds, variable_manager=variable_manager, loader=loader, ablefile=ablefile ) self.merge_data(stage, extend=extend) return stage
def load_data(self, ds, basedir, variable_manager=None, loader=None): ''' Overrides the base load_data(), as we're actually going to return a new Playbook() object rather than a PlaybookInclude object ''' # import here to avoid a dependency loop from ansible.playbook import Playbook from ansible.playbook.play import Play # first, we use the original parent method to correctly load the object # via the load_data/preprocess_data system we normally use for other # playbook objects new_obj = super(PlaybookInclude, self).load_data(ds, variable_manager, loader) all_vars = self.vars.copy() if variable_manager: all_vars.update(variable_manager.get_vars()) templar = Templar(loader=loader, variables=all_vars) # then we use the object to load a Playbook pb = Playbook(loader=loader) file_name = templar.template(new_obj.import_playbook) if not os.path.isabs(file_name): file_name = os.path.join(basedir, file_name) pb._load_playbook_data(file_name=file_name, variable_manager=variable_manager) # finally, update each loaded playbook entry with any variables specified # on the included playbook and/or any tags which may have been set for entry in pb._entries: # conditional includes on a playbook need a marker to skip gathering if new_obj.when and isinstance(entry, Play): entry._included_conditional = new_obj.when[:] temp_vars = entry.vars.copy() temp_vars.update(new_obj.vars) param_tags = temp_vars.pop('tags', None) if param_tags is not None: entry.tags.extend(param_tags.split(',')) entry.vars = temp_vars entry.tags = list(set(entry.tags).union(new_obj.tags)) if entry._included_path is None: entry._included_path = os.path.dirname(file_name) # Check to see if we need to forward the conditionals on to the included # plays. If so, we can take a shortcut here and simply prepend them to # those attached to each block (if any) if new_obj.when: for task_block in (entry.pre_tasks + entry.roles + entry.tasks + entry.post_tasks): task_block._attributes['when'] = new_obj.when[:] + task_block.when[:] return pb
def test_template_jinja2_extensions(self): fake_loader = DictDataLoader({}) templar = Templar(loader=fake_loader) old_exts = C.DEFAULT_JINJA2_EXTENSIONS try: C.DEFAULT_JINJA2_EXTENSIONS = "foo,bar" self.assertEqual(templar._get_extensions(), ['foo', 'bar']) finally: C.DEFAULT_JINJA2_EXTENSIONS = old_exts
def _load_role_path(self, role_name): ''' the 'role', as specified in the ds (or as a bare string), can either be a simple name or a full path. If it is a full path, we use the basename as the role name, otherwise we take the name as-given and append it to the default role path ''' role_path = unfrackpath(role_name) if self._loader.path_exists(role_path): role_name = os.path.basename(role_name) return (role_name, role_path) else: # we always start the search for roles in the base directory of the playbook role_search_paths = [ os.path.join(self._loader.get_basedir(), u'roles'), u'./roles', self._loader.get_basedir(), u'./' ] # also search in the configured roles path if C.DEFAULT_ROLES_PATH: configured_paths = C.DEFAULT_ROLES_PATH.split(os.pathsep) role_search_paths.extend(configured_paths) # finally, append the roles basedir, if it was set, so we can # search relative to that directory for dependent roles if self._role_basedir: role_search_paths.append(self._role_basedir) # create a templar class to template the dependency names, in # case they contain variables if self._variable_manager is not None: all_vars = self._variable_manager.get_vars(loader=self._loader, play=self._play) else: all_vars = dict() templar = Templar(loader=self._loader, variables=all_vars) role_name = templar.template(role_name) # now iterate through the possible paths and return the first one we find for path in role_search_paths: path = templar.template(path) role_path = unfrackpath(os.path.join(path, role_name)) if self._loader.path_exists(role_path): return (role_name, role_path) # FIXME: make the parser smart about list/string entries in # the yaml so the error line/file can be reported here raise AnsibleError("the role '%s' was not found in %s" % (role_name, ":".join(role_search_paths)))
def load_data(self, ds, variable_manager=None, loader=None, file_name=None, basedir=None): ''' Overrides the base load_data(), as we're actually going to return a new Command() object rather than a CommandInclude object ''' # first, we use the original parent method to correctly load the object # via the load_data/preprocess_data system we normally use for other # command objects new_obj = super(AbleFileInclude, self).load_data(ds, variable_manager=variable_manager, loader=loader) all_vars = self.vars.copy() if variable_manager: all_vars.update(variable_manager.get_vars(loader=loader)) templar = Templar(loader=loader, variables=all_vars) extend = False if new_obj.extend: file_name = templar.template(new_obj.extend) del ds['extend'] extend = True elif new_obj.include: file_name = templar.template(new_obj.include) del ds['include'] else: raise AnsibleParserError("no include or extend value specified") new_ds = loader.load_from_file(file_name) if not isinstance(new_ds, dict): raise AnsibleParserError("command include should be in dictionary format" ) af = AbleFile.load( new_ds, file_name=file_name, basedir=basedir, variable_manager=variable_manager, loader=loader ) self.merge_data(af, extend=extend) # Make sure ablefile is set as parent. for stage in af.stages: stage.ablefile = af for cmd in af.commands: cmd.ablefile = af return af
def _load_role_path(self, role_name): ''' the 'role', as specified in the ds (or as a bare string), can either be a simple name or a full path. If it is a full path, we use the basename as the role name, otherwise we take the name as-given and append it to the default role path ''' # we always start the search for roles in the base directory of the playbook role_search_paths = [ os.path.join(self._loader.get_basedir(), u'roles'), ] # also search in the configured roles path if C.DEFAULT_ROLES_PATH: role_search_paths.extend(C.DEFAULT_ROLES_PATH) # next, append the roles basedir, if it was set, so we can # search relative to that directory for dependent roles if self._role_basedir: role_search_paths.append(self._role_basedir) # finally as a last resort we look in the current basedir as set # in the loader (which should be the playbook dir itself) but without # the roles/ dir appended role_search_paths.append(self._loader.get_basedir()) # create a templar class to template the dependency names, in # case they contain variables if self._variable_manager is not None: all_vars = self._variable_manager.get_vars(play=self._play) else: all_vars = dict() templar = Templar(loader=self._loader, variables=all_vars) role_name = templar.template(role_name) # now iterate through the possible paths and return the first one we find for path in role_search_paths: path = templar.template(path) role_path = unfrackpath(os.path.join(path, role_name)) if self._loader.path_exists(role_path): return (role_name, role_path) # if not found elsewhere try to extract path from name role_path = unfrackpath(role_name) if self._loader.path_exists(role_path): role_name = os.path.basename(role_name) return (role_name, role_path) raise AnsibleError("the role '%s' was not found in %s" % (role_name, ":".join(role_search_paths)), obj=self._ds)
def post_validate(self, all_vars=dict(), fail_on_undefined=True): ''' we can't tell that everything is of the right type until we have all the variables. Run basic types (from isa) as well as any _post_validate_<foo> functions. ''' basedir = None if self._loader is not None: basedir = self._loader.get_basedir() templar = Templar(loader=self._loader, variables=all_vars, fail_on_undefined=fail_on_undefined) for (name, attribute) in iteritems(self._get_base_attributes()): if getattr(self, name) is None: if not attribute.required: continue else: raise AnsibleParserError("the field '%s' is required but was not set" % name) try: # if the attribute contains a variable, template it now value = templar.template(getattr(self, name)) # run the post-validator if present method = getattr(self, '_post_validate_%s' % name, None) if method: value = method(attribute, value, all_vars, fail_on_undefined) else: # otherwise, just make sure the attribute is of the type it should be if attribute.isa == 'string': value = unicode(value) elif attribute.isa == 'int': value = int(value) elif attribute.isa == 'bool': value = boolean(value) elif attribute.isa == 'list': if not isinstance(value, list): value = [ value ] elif attribute.isa == 'dict' and not isinstance(value, dict): raise TypeError() # and assign the massaged value back to the attribute field setattr(self, name, value) except (TypeError, ValueError), e: raise AnsibleParserError("the field '%s' has an invalid value (%s), and could not be converted to an %s. Error was: %s" % (name, value, attribute.isa, e), obj=self.get_ds()) except UndefinedError, e: if fail_on_undefined: raise AnsibleParserError("the field '%s' has an invalid value, which appears to include a variable that is undefined. The error was: %s" % (name,e), obj=self.get_ds())
def _normalize_parameters(self, thing, action=None, additional_args=dict()): ''' arguments can be fuzzy. Deal with all the forms. ''' # final args are the ones we'll eventually return, so first update # them with any additional args specified, which have lower priority # than those which may be parsed/normalized next final_args = dict() if additional_args: if isinstance(additional_args, string_types): templar = Templar(loader=None) if templar._contains_vars(additional_args): final_args['_variable_params'] = additional_args else: raise AnsibleParserError("Complex args containing variables cannot use bare variables, and must use the full variable style ('{{var_name}}')") elif isinstance(additional_args, dict): final_args.update(additional_args) else: raise AnsibleParserError('Complex args must be a dictionary or variable string ("{{var}}").') # how we normalize depends if we figured out what the module name is # yet. If we have already figured it out, it's an 'old style' invocation. # otherwise, it's not if action is not None: args = self._normalize_old_style_args(thing, action) else: (action, args) = self._normalize_new_style_args(thing) # this can occasionally happen, simplify if args and 'args' in args: tmp_args = args.pop('args') if isinstance(tmp_args, string_types): tmp_args = parse_kv(tmp_args) args.update(tmp_args) # only internal variables can start with an underscore, so # we don't allow users to set them directy in arguments if args and action not in ('command', 'win_command', 'shell', 'win_shell', 'script', 'raw'): for arg in args: arg = to_text(arg) if arg.startswith('_ansible_'): raise AnsibleError("invalid parameter specified for action '%s': '%s'" % (action, arg)) # finally, update the args we're going to return with the ones # which were normalized above if args: final_args.update(args) return (action, final_args)
def test_action_base__compute_environment_string(self): fake_loader = DictDataLoader({ }) # create our fake task mock_task = MagicMock() mock_task.action = "copy" mock_task.args = dict(a=1) # create a mock connection, so we don't actually try and connect to things def env_prefix(**args): return ' '.join(['%s=%s' % (k, shlex_quote(text_type(v))) for k, v in args.items()]) mock_connection = MagicMock() mock_connection._shell.env_prefix.side_effect = env_prefix # we're using a real play context here play_context = PlayContext() # and we're using a real templar here too templar = Templar(loader=fake_loader) # our test class action_base = DerivedActionBase( task=mock_task, connection=mock_connection, play_context=play_context, loader=fake_loader, templar=templar, shared_loader_obj=None, ) # test standard environment setup mock_task.environment = [dict(FOO='foo'), None] env_string = action_base._compute_environment_string() self.assertEqual(env_string, "FOO=foo") # test where environment is not a list mock_task.environment = dict(FOO='foo') env_string = action_base._compute_environment_string() self.assertEqual(env_string, "FOO=foo") # test environment with a variable in it templar.set_available_variables(variables=dict(the_var='bar')) mock_task.environment = [dict(FOO='{{the_var}}')] env_string = action_base._compute_environment_string() self.assertEqual(env_string, "FOO=bar") # test with a bad environment set mock_task.environment = dict(FOO='foo') mock_task.environment = ['hi there'] self.assertRaises(AnsibleError, action_base._compute_environment_string)
def __getitem__(self, host_name): host = self._find_host(host_name) if host is None: raise j2undefined data = self._variable_manager.get_vars(loader=self._loader, host=host, include_hostvars=False) sha1_hash = sha1(str(data).encode('utf-8')).hexdigest() if sha1_hash in self._cached_result: result = self._cached_result[sha1_hash] else: templar = Templar(variables=data, loader=self._loader) result = templar.template(data, fail_on_undefined=False, static_vars=STATIC_VARS) self._cached_result[sha1_hash] = result return result
def listify_lookup_plugin_terms(terms, variables, loader): if isinstance(terms, basestring): stripped = terms.strip() templar = Templar(loader=loader, variables=variables) # FIXME: warn/deprecation on bare vars in with_ so we can eventually remove fail on undefined override terms = templar.template(terms, convert_bare=True, fail_on_undefined=False) # TODO: check if this is needed as template should also return correct type already terms = safe_eval(terms) if isinstance(terms, basestring) or not isinstance(terms, Iterable): terms = [terms] return terms
def _get_magic_variables(self, play, host, task, include_hostvars, include_delegate_to): ''' Returns a dictionary of so-called "magic" variables in Ansible, which are special variables we set internally for use. ''' variables = {} variables['playbook_dir'] = os.path.abspath(self._loader.get_basedir()) variables['ansible_playbook_python'] = sys.executable if play: variables['role_names'] = [r._role_name for r in play.roles] if task: if task._role: variables['role_name'] = task._role.get_name() variables['role_path'] = task._role._role_path variables['role_uuid'] = text_type(task._role._uuid) if self._inventory is not None: variables['groups'] = self._inventory.get_groups_dict() if play: templar = Templar(loader=self._loader) if templar.is_template(play.hosts): pattern = 'all' else: pattern = play.hosts or 'all' # add the list of hosts in the play, as adjusted for limit/filters variables['ansible_play_hosts_all'] = [x.name for x in self._inventory.get_hosts(pattern=pattern, ignore_restrictions=True)] variables['ansible_play_hosts'] = [x for x in variables['ansible_play_hosts_all'] if x not in play._removed_hosts] variables['ansible_play_batch'] = [x.name for x in self._inventory.get_hosts() if x.name not in play._removed_hosts] # DEPRECATED: play_hosts should be deprecated in favor of ansible_play_batch, # however this would take work in the templating engine, so for now we'll add both variables['play_hosts'] = variables['ansible_play_batch'] # the 'omit' value alows params to be left out if the variable they are based on is undefined variables['omit'] = self._omit_token # Set options vars for option, option_value in iteritems(self._options_vars): variables[option] = option_value if self._hostvars is not None and include_hostvars: variables['hostvars'] = self._hostvars return variables
def load_data(self, ds, basedir, variable_manager=None, loader=None): ''' Overrides the base load_data(), as we're actually going to return a new Playbook() object rather than a PlaybookInclude object ''' # import here to avoid a dependency loop from ansible.playbook import Playbook # first, we use the original parent method to correctly load the object # via the load_data/preprocess_data system we normally use for other # playbook objects new_obj = super(PlaybookInclude, self).load_data(ds, variable_manager, loader) all_vars = self.vars.copy() if variable_manager: all_vars.update(variable_manager.get_vars(loader=loader)) templar = Templar(loader=loader, variables=all_vars) if not new_obj.evaluate_conditional(templar=templar, all_vars=all_vars): return None # then we use the object to load a Playbook pb = Playbook(loader=loader) file_name = templar.template(new_obj.include) if not os.path.isabs(file_name): file_name = os.path.join(basedir, file_name) pb._load_playbook_data(file_name=file_name, variable_manager=variable_manager) # finally, update each loaded playbook entry with any variables specified # on the included playbook and/or any tags which may have been set for entry in pb._entries: temp_vars = entry.vars.copy() temp_vars.update(new_obj.vars) param_tags = temp_vars.pop('tags', None) if param_tags is not None: entry.tags.extend(param_tags.split(',')) entry.vars = temp_vars entry.tags = list(set(entry.tags).union(new_obj.tags)) if entry._included_path is None: entry._included_path = os.path.dirname(file_name) return pb
def run(self, terms, variables, **kwargs): if not isinstance(terms, list): terms = [terms] templar = Templar(loader=self._loader, variables=variables) ret = [] for term in terms: path = self._loader.path_dwim(term) if os.path.exists(path): with open(path, "r") as f: template_data = f.read() res = templar.template(template_data, preserve_trailing_newlines=True) ret.append(res) else: raise AnsibleError("the template file %s could not be found for the lookup" % term) return ret
def parent_handler_match(target_handler, handler_name): if target_handler: if isinstance(target_handler, TaskInclude): try: handler_vars = self._variable_manager.get_vars(loader=self._loader, play=iterator._play, task=target_handler) templar = Templar(loader=self._loader, variables=handler_vars) target_handler_name = templar.template(target_handler.name) if target_handler_name == handler_name: return True else: target_handler_name = templar.template(target_handler.get_name()) if target_handler_name == handler_name: return True except (UndefinedError, AnsibleUndefinedVariable) as e: pass return parent_handler_match(target_handler._parent, handler_name) else: return False
def _squash_items(self, items, variables): ''' Squash items down to a comma-separated list for certain modules which support it (typically package management modules). ''' # _task.action could contain templatable strings (via action: and # local_action:) Template it before comparing. If we don't end up # optimizing it here, the templatable string might use template vars # that aren't available until later (it could even use vars from the # with_items loop) so don't make the templated string permanent yet. templar = Templar(loader=self._loader, shared_loader_obj=self._shared_loader_obj, variables=variables) task_action = self._task.action if templar._contains_vars(task_action): task_action = templar.template(task_action, fail_on_undefined=False) if len(items) > 0 and task_action in self.SQUASH_ACTIONS: if all(isinstance(o, string_types) for o in items): final_items = [] name = None for allowed in ['name', 'pkg', 'package']: name = self._task.args.pop(allowed, None) if name is not None: break # This gets the information to check whether the name field # contains a template that we can squash for template_no_item = template_with_item = None if name: if templar._contains_vars(name): variables['item'] = '\0$' template_no_item = templar.template(name, variables, cache=False) variables['item'] = '\0@' template_with_item = templar.template(name, variables, cache=False) del variables['item'] # Check if the user is doing some operation that doesn't take # name/pkg or the name/pkg field doesn't have any variables # and thus the items can't be squashed if template_no_item != template_with_item: for item in items: variables['item'] = item if self._task.evaluate_conditional(templar, variables): new_item = templar.template(name, cache=False) final_items.append(new_item) self._task.args['name'] = final_items # Wrap this in a list so that the calling function loop # executes exactly once return [final_items] else: # Restore the name parameter self._task.args['name'] = name #elif: # Right now we only optimize single entries. In the future we # could optimize more types: # * lists can be squashed together # * dicts could squash entries that match in all cases except the # name or pkg field. return items
def evaluate_tags(self, only_tags, skip_tags, all_vars): ''' this checks if the current item should be executed depending on tag options ''' should_run = True if self.tags: templar = Templar(loader=self._loader, variables=all_vars) tags = templar.template(self.tags) if not isinstance(tags, list): if tags.find(',') != -1: tags = set(tags.split(',')) else: tags = set([tags]) else: tags = set([i for i,_ in itertools.groupby(tags)]) else: # this makes isdisjoint work for untagged tags = self.untagged if only_tags: should_run = False if 'always' in tags or 'all' in only_tags: should_run = True elif not tags.isdisjoint(only_tags): should_run = True elif 'tagged' in only_tags and tags != self.untagged: should_run = True if should_run and skip_tags: # Check for tags that we need to skip if 'all' in skip_tags: if 'always' not in tags or 'always' in skip_tags: should_run = False elif not tags.isdisjoint(skip_tags): should_run = False elif 'tagged' in skip_tags and tags != self.untagged: should_run = False return should_run
def run(self): ''' Run the given playbook, based on the settings in the play which may limit the runs to serialized groups, etc. Override Playbook class to Backup Playbook ''' result = 0 entrylist = [] entry = {} try: for playbook_path in self._playbooks: pb = BackupPlaybook.load( playbook_path, variable_manager=self._variable_manager, loader=self._loader) # FIXME: move out of inventory self._inventory.set_playbook_basedir(os.path.realpath(os.path.dirname(playbook_path))) if self._tqm is None: # we are doing a listing entry = {'playbook': playbook_path} entry['plays'] = [] else: # make sure the tqm has callbacks loaded self._tqm.load_callbacks() self._tqm.send_callback('v2_playbook_on_start', pb) i = 1 plays = pb.get_plays() display.vv(u'%d plays in %s' % (len(plays), to_text(playbook_path))) for play in plays: if play._included_path is not None: self._loader.set_basedir(play._included_path) else: self._loader.set_basedir(pb._basedir) # clear any filters which may have been applied to the inventory self._inventory.remove_restriction() # Allow variables to be used in vars_prompt fields. all_vars = self._variable_manager.get_vars(play=play) templar = Templar(loader=self._loader, variables=all_vars) setattr(play, 'vars_prompt', templar.template(play.vars_prompt)) if play.vars_prompt: for var in play.vars_prompt: vname = var['name'] prompt = var.get("prompt", vname) default = var.get("default", None) private = boolean(var.get("private", True)) confirm = boolean(var.get("confirm", False)) encrypt = var.get("encrypt", None) salt_size = var.get("salt_size", None) salt = var.get("salt", None) if vname not in self._variable_manager.extra_vars: if self._tqm: self._tqm.send_callback( 'v2_playbook_on_vars_prompt', vname, private, prompt, encrypt, confirm, salt_size, salt, default) play.vars[vname] = display.do_var_prompt( vname, private, prompt, encrypt, confirm, salt_size, salt, default) else: # we are either in --list-<option> or syntax check play.vars[vname] = default # Post validate so any play level variables are templated all_vars = self._variable_manager.get_vars(play=play) templar = Templar(loader=self._loader, variables=all_vars) play.post_validate(templar) if self._options.syntax: continue if self._tqm is None: # we are just doing a listing entry['plays'].append(play) else: self._tqm._unreachable_hosts.update( self._unreachable_hosts) previously_failed = len(self._tqm._failed_hosts) previously_unreachable = len( self._tqm._unreachable_hosts) break_play = False # we are actually running plays batches = self._get_serialized_batches(play) if len(batches) == 0: self._tqm.send_callback( 'v2_playbook_on_play_start', play) self._tqm.send_callback( 'v2_playbook_on_no_hosts_matched') for batch in batches: # restrict the inventory to the hosts in the serialized batch self._inventory.restrict_to_hosts(batch) # and run it... result = self._tqm.run(play=play) # break the play if the result equals the special return code if result & self._tqm.RUN_FAILED_BREAK_PLAY != 0: result = self._tqm.RUN_FAILED_HOSTS break_play = True # check the number of failures here, to see if they're above the maximum # failure percentage allowed, or if any errors are fatal. If either of those # conditions are met, we break out, otherwise we only break out if the entire # batch failed failed_hosts_count = len(self._tqm._failed_hosts) + len(self._tqm._unreachable_hosts) - \ (previously_failed + previously_unreachable) if len(batch) == failed_hosts_count: break_play = True break # update the previous counts so they don't accumulate incorrectly # over multiple serial batches previously_failed += len( self._tqm._failed_hosts) - previously_failed previously_unreachable += len( self._tqm._unreachable_hosts ) - previously_unreachable # save the unreachable hosts from this batch self._unreachable_hosts.update( self._tqm._unreachable_hosts) if break_play: break i = i + 1 # per play if entry: entrylist.append(entry) # per playbook # send the stats callback for this playbook if self._tqm is not None: if C.RETRY_FILES_ENABLED: retries = set(self._tqm._failed_hosts.keys()) retries.update(self._tqm._unreachable_hosts.keys()) retries = sorted(retries) if len(retries) > 0: if C.RETRY_FILES_SAVE_PATH: basedir = C.RETRY_FILES_SAVE_PATH elif playbook_path: basedir = os.path.dirname( os.path.abspath(playbook_path)) else: basedir = '~/' (retry_name, _) = os.path.splitext( os.path.basename(playbook_path)) filename = os.path.join(basedir, "%s.retry" % retry_name) if self._generate_retry_inventory( filename, retries): display.display( "\tto retry, use: --limit @%s\n" % filename) self._tqm.send_callback('v2_playbook_on_stats', self._tqm._stats) # if the last result wasn't zero, break out of the playbook file name loop if result != 0: break if entrylist: return entrylist finally: if self._tqm is not None: self._tqm.cleanup() if self._loader: self._loader.cleanup_all_tmp_files() if self._options.syntax: display.display("No issues encountered") return result return result
def boilerplate_module(modfile, args, interpreters, check, destfile): """ simulate what ansible does with new style modules """ # module_fh = open(modfile) # module_data = module_fh.read() # module_fh.close() # replacer = module_common.ModuleReplacer() loader = DataLoader() # included_boilerplate = module_data.find(module_common.REPLACER) != -1 or module_data.find("import ansible.module_utils") != -1 complex_args = {} # default selinux fs list is pass in as _ansible_selinux_special_fs arg complex_args['_ansible_selinux_special_fs'] = C.DEFAULT_SELINUX_SPECIAL_FS complex_args['_ansible_tmpdir'] = C.DEFAULT_LOCAL_TMP complex_args['_ansible_keep_remote_files'] = C.DEFAULT_KEEP_REMOTE_FILES complex_args['_ansible_version'] = __version__ if args.startswith("@"): # Argument is a YAML file (JSON is a subset of YAML) complex_args = utils_vars.combine_vars(complex_args, loader.load_from_file(args[1:])) args = '' elif args.startswith("{"): # Argument is a YAML document (not a file) complex_args = utils_vars.combine_vars(complex_args, loader.load(args)) args = '' if args: parsed_args = parse_kv(args) complex_args = utils_vars.combine_vars(complex_args, parsed_args) task_vars = interpreters if check: complex_args['_ansible_check_mode'] = True modname = os.path.basename(modfile) modname = os.path.splitext(modname)[0] (module_data, module_style, shebang) = module_common.modify_module(modname, modfile, complex_args, Templar(loader=loader), task_vars=task_vars) if module_style == 'new' and '_ANSIBALLZ_WRAPPER = True' in to_native( module_data): module_style = 'ansiballz' modfile2_path = os.path.expanduser(destfile) print("* including generated source, if any, saving to: %s" % modfile2_path) if module_style not in ('ansiballz', 'old'): print("* this may offset any line numbers in tracebacks/debuggers!") modfile2 = open(modfile2_path, 'wb') modfile2.write(module_data) modfile2.close() modfile = modfile2_path return (modfile2_path, modname, module_style)
def render_template(self, inject, v): return Templar(loader=self._loader, variables=inject).template(v)
def run(self): ''' Run the given playbook, based on the settings in the play which may limit the runs to serialized groups, etc. ''' result = 0 entrylist = [] entry = {} try: for playbook_path in self._playbooks: pb = Playbook.load(playbook_path, variable_manager=self._variable_manager, loader=self._loader) self._inventory.set_playbook_basedir( os.path.dirname(playbook_path)) if self._tqm is None: # we are doing a listing entry = {'playbook': playbook_path} entry['plays'] = [] else: # make sure the tqm has callbacks loaded self._tqm.load_callbacks() self._tqm.send_callback('v2_playbook_on_start', pb) i = 1 plays = pb.get_plays() display.vv(u'%d plays in %s' % (len(plays), to_unicode(playbook_path))) for play in plays: if play._included_path is not None: self._loader.set_basedir(play._included_path) else: self._loader.set_basedir(pb._basedir) # clear any filters which may have been applied to the inventory self._inventory.remove_restriction() if play.vars_prompt: for var in play.vars_prompt: vname = var['name'] prompt = var.get("prompt", vname) default = var.get("default", None) private = var.get("private", True) confirm = var.get("confirm", False) encrypt = var.get("encrypt", None) salt_size = var.get("salt_size", None) salt = var.get("salt", None) if vname not in self._variable_manager.extra_vars: if self._tqm: self._tqm.send_callback( 'v2_playbook_on_vars_prompt', vname, private, prompt, encrypt, confirm, salt_size, salt, default) play.vars[vname] = display.do_var_prompt( vname, private, prompt, encrypt, confirm, salt_size, salt, default) else: # we are either in --list-<option> or syntax check play.vars[vname] = default # Create a temporary copy of the play here, so we can run post_validate # on it without the templating changes affecting the original object. all_vars = self._variable_manager.get_vars( loader=self._loader, play=play) templar = Templar(loader=self._loader, variables=all_vars) new_play = play.copy() new_play.post_validate(templar) if self._options.syntax: continue if self._tqm is None: # we are just doing a listing entry['plays'].append(new_play) else: self._tqm._unreachable_hosts.update( self._unreachable_hosts) break_play = False # we are actually running plays for batch in self._get_serialized_batches(new_play): if len(batch) == 0: self._tqm.send_callback( 'v2_playbook_on_play_start', new_play) self._tqm.send_callback( 'v2_playbook_on_no_hosts_matched') break # restrict the inventory to the hosts in the serialized batch self._inventory.restrict_to_hosts(batch) # and run it... result = self._tqm.run(play=play) # break the play if the result equals the special return code if result == self._tqm.RUN_FAILED_BREAK_PLAY: result = self._tqm.RUN_FAILED_HOSTS break_play = True # check the number of failures here, to see if they're above the maximum # failure percentage allowed, or if any errors are fatal. If either of those # conditions are met, we break out, otherwise we only break out if the entire # batch failed failed_hosts_count = len( self._tqm._failed_hosts) + len( self._tqm._unreachable_hosts) if new_play.max_fail_percentage is not None and \ int((new_play.max_fail_percentage)/100.0 * len(batch)) > int((len(batch) - failed_hosts_count) / len(batch) * 100.0): break_play = True break elif len(batch) == failed_hosts_count: break_play = True break # save the unreachable hosts from this batch self._unreachable_hosts.update( self._tqm._unreachable_hosts) # if the last result wasn't zero or 3 (some hosts were unreachable), # break out of the serial batch loop if result not in (self._tqm.RUN_OK, self._tqm.RUN_UNREACHABLE_HOSTS): break if break_play: break i = i + 1 # per play if entry: entrylist.append(entry) # per playbook # send the stats callback for this playbook if self._tqm is not None: if C.RETRY_FILES_ENABLED: retries = set(self._tqm._failed_hosts.keys()) retries.update(self._tqm._unreachable_hosts.keys()) retries = sorted(retries) if len(retries) > 0: if C.RETRY_FILES_SAVE_PATH: basedir = C.shell_expand( C.RETRY_FILES_SAVE_PATH) elif playbook_path: basedir = os.path.dirname(playbook_path) else: basedir = '~/' (retry_name, _) = os.path.splitext( os.path.basename(playbook_path)) filename = os.path.join(basedir, "%s.retry" % retry_name) if self._generate_retry_inventory( filename, retries): display.display( "\tto retry, use: --limit @%s\n" % filename) self._tqm.send_callback('v2_playbook_on_stats', self._tqm._stats) # if the last result wasn't zero, break out of the playbook file name loop if result != 0: break if entrylist: return entrylist finally: if self._tqm is not None: self._tqm.cleanup() if self._loader: self._loader.cleanup_all_tmp_files() if self._options.syntax: display.display("No issues encountered") return result return result
def get_host_vars(self, host: Host): data = self.var_mgr.get_vars(host=host) templar = Templar(variables=data, loader=self.loader) return templar.template(data, fail_on_undefined=False)
class AnsibleContainerConductorConfig(Mapping): _config = None @container.conductor_only def __init__(self, container_config, skip_services=False): self._skip_services = skip_services self._config = container_config self._templar = Templar(loader=None, variables={}) self._process_defaults() self._process_top_level_sections() self._process_services() def _process_section(self, section_value, callback=None, templar=None): if not templar: templar = self._templar processed = ordereddict() for key, value in section_value.items(): if isinstance(value, string_types): # strings can be templated processed[key] = templar.template(value) if isinstance(processed[key], AnsibleUnsafeText): processed[key] = str(processed[key]) elif isinstance(value, (list, dict)): # if it's a dimensional structure, it's cheaper just to serialize # it, treat it like a template, and then deserialize it again buffer = BytesIO() # use bytes explicitly, not unicode yaml.round_trip_dump(value, buffer) processed[key] = yaml.round_trip_load( templar.template(buffer.getvalue())) else: # ints, booleans, etc. processed[key] = value if callback: callback(processed) return processed def _process_defaults(self): logger.debug('Processing defaults section...') self.defaults = self._process_section( self._config.get('defaults', ordereddict()), callback=lambda processed: self._templar.set_available_variables( dict(processed))) def _process_top_level_sections(self): self._config['settings'] = self._config.get('settings', yaml.compat.ordereddict()) for section in ['volumes', 'registries', 'secrets']: logger.debug('Processing section...', section=section) setattr( self, section, dict( self._process_section( self._config.get(section, ordereddict())))) def _process_services(self): services = ordereddict() for service, service_data in self._config.get('services', ordereddict()).items(): logger.debug('Processing service...', service=service, service_data=service_data) processed = ordereddict() if not self._skip_services: service_defaults = self.defaults.copy() for idx in range(len(service_data.get('volumes', []))): # To mount the project directory, let users specify `$PWD` and # have that filled in with the project path service_data['volumes'][idx] = re.sub( r'\$(PWD|\{PWD\})', self._config['settings'].get('pwd'), service_data['volumes'][idx]) for role_spec in service_data.get('roles', []): if isinstance(role_spec, dict): # A role with parameters to run it with role_spec_copy = copy.deepcopy(role_spec) role_name = role_spec_copy.pop('role') role_args = role_spec_copy else: role_name = role_spec role_args = {} role_metadata = get_metadata_from_role(role_name) processed.update(role_metadata, relax=True) service_defaults.update(get_defaults_from_role(role_name), relax=True) service_defaults.update(role_args, relax=True) processed.update(service_data, relax=True) logger.debug('Rendering service keys from defaults', service=service, defaults=service_defaults) services[service] = self._process_section( processed, templar=Templar(loader=None, variables=service_defaults)) services[service]['defaults'] = service_defaults else: services[service] = service_data self.services = services def __getitem__(self, key): if key.startswith('_'): raise KeyError(key) try: return getattr(self, key) except AttributeError: raise KeyError(key) def __len__(self): # volumes, registries, services, and defaults return 5 def __iter__(self): yield self.defaults yield self.registries yield self.volumes yield self.services yield self.secrets
def _run_loop(self, items): ''' Runs the task with the loop items specified and collates the result into an array named 'results' which is inserted into the final result along with the item for which the loop ran. ''' results = [] # make copies of the job vars and task so we can add the item to # the variables and re-validate the task with the item variable # task_vars = self._job_vars.copy() task_vars = self._job_vars loop_var = 'item' label = None loop_pause = 0 if self._task.loop_control: # the value may be 'None', so we still need to default it back to 'item' loop_var = self._task.loop_control.loop_var or 'item' label = self._task.loop_control.label or ('{{' + loop_var + '}}') loop_pause = self._task.loop_control.pause or 0 if loop_var in task_vars: display.warning( u"The loop variable '%s' is already in use. " u"You should set the `loop_var` value in the `loop_control` option for the task" u" to something else to avoid variable collisions and unexpected behavior." % loop_var) ran_once = False items = self._squash_items(items, loop_var, task_vars) for item in items: task_vars[loop_var] = item # pause between loop iterations if loop_pause and ran_once: time.sleep(loop_pause) else: ran_once = True try: tmp_task = self._task.copy(exclude_parent=True, exclude_tasks=True) tmp_task._parent = self._task._parent tmp_play_context = self._play_context.copy() except AnsibleParserError as e: results.append(dict(failed=True, msg=to_text(e))) continue # now we swap the internal task and play context with their copies, # execute, and swap them back so we can do the next iteration cleanly (self._task, tmp_task) = (tmp_task, self._task) (self._play_context, tmp_play_context) = (tmp_play_context, self._play_context) res = self._execute(variables=task_vars) task_fields = self._task.dump_attrs() (self._task, tmp_task) = (tmp_task, self._task) (self._play_context, tmp_play_context) = (tmp_play_context, self._play_context) # now update the result with the item info, and append the result # to the list of results res[loop_var] = item res['_ansible_item_result'] = True res['_ansible_ignore_errors'] = task_fields.get('ignore_errors') if label is not None: templar = Templar(loader=self._loader, shared_loader_obj=self._shared_loader_obj, variables=self._job_vars) res['_ansible_item_label'] = templar.template(label) self._rslt_q.put( TaskResult( self._host.name, self._task._uuid, res, task_fields=task_fields, ), block=False, ) results.append(res) del task_vars[loop_var] return results
def _get_delegated_vars(self, play, task, existing_variables): # we unfortunately need to template the delegate_to field here, # as we're fetching vars before post_validate has been called on # the task that has been passed in vars_copy = existing_variables.copy() templar = Templar(loader=self._loader, variables=vars_copy) items = [] if task.loop_with is not None: if task.loop_with in lookup_loader: try: loop_terms = listify_lookup_plugin_terms( terms=task.loop, templar=templar, loader=self._loader, fail_on_undefined=True, convert_bare=False) items = lookup_loader.get(task.loop_with, loader=self._loader, templar=templar).run( terms=loop_terms, variables=vars_copy) except AnsibleUndefinedVariable: # This task will be skipped later due to this, so we just setup # a dummy array for the later code so it doesn't fail items = [None] else: raise AnsibleError( "Failed to find the lookup named '%s' in the available lookup plugins" % task.loop_with) elif task.loop is not None: items = templar.template(task.loop) else: items = [None] delegated_host_vars = dict() for item in items: # update the variables with the item value for templating, in case we need it if item is not None: vars_copy['item'] = item templar.set_available_variables(vars_copy) delegated_host_name = templar.template(task.delegate_to, fail_on_undefined=False) if delegated_host_name is None: raise AnsibleError( message="Undefined delegate_to host for task:", obj=task._ds) if delegated_host_name in delegated_host_vars: # no need to repeat ourselves, as the delegate_to value # does not appear to be tied to the loop item variable continue # a dictionary of variables to use if we have to create a new host below # we set the default port based on the default transport here, to make sure # we use the proper default for windows new_port = C.DEFAULT_REMOTE_PORT if C.DEFAULT_TRANSPORT == 'winrm': new_port = 5986 new_delegated_host_vars = dict( ansible_delegated_host=delegated_host_name, ansible_host= delegated_host_name, # not redundant as other sources can change ansible_host ansible_port=new_port, ansible_user=C.DEFAULT_REMOTE_USER, ansible_connection=C.DEFAULT_TRANSPORT, ) # now try to find the delegated-to host in inventory, or failing that, # create a new host on the fly so we can fetch variables for it delegated_host = None if self._inventory is not None: delegated_host = self._inventory.get_host(delegated_host_name) # try looking it up based on the address field, and finally # fall back to creating a host on the fly to use for the var lookup if delegated_host is None: if delegated_host_name in C.LOCALHOST: delegated_host = self._inventory.localhost else: for h in self._inventory.get_hosts( ignore_limits=True, ignore_restrictions=True): # check if the address matches, or if both the delegated_to host # and the current host are in the list of localhost aliases if h.address == delegated_host_name: delegated_host = h break else: delegated_host = Host(name=delegated_host_name) delegated_host.vars = combine_vars( delegated_host.vars, new_delegated_host_vars) else: delegated_host = Host(name=delegated_host_name) delegated_host.vars = combine_vars(delegated_host.vars, new_delegated_host_vars) # now we go fetch the vars for the delegated-to host and save them in our # master dictionary of variables to be used later in the TaskExecutor/PlayContext delegated_host_vars[delegated_host_name] = self.get_vars( play=play, host=delegated_host, task=task, include_delegate_to=False, include_hostvars=False, ) return delegated_host_vars
def _process_pending_results(self, iterator): ''' Reads results off the final queue and takes appropriate action based on the result (executing callbacks, updating state, etc.). ''' ret_results = [] while not self._final_q.empty() and not self._tqm._terminated: try: result = self._final_q.get(block=False) self._display.debug("got result from result worker: %s" % ([unicode(x) for x in result], )) # all host status messages contain 2 entries: (msg, task_result) if result[0] in ('host_task_ok', 'host_task_failed', 'host_task_skipped', 'host_unreachable'): task_result = result[1] host = task_result._host task = task_result._task if result[0] == 'host_task_failed' or task_result.is_failed( ): if not task.ignore_errors: self._display.debug("marking %s as failed" % host.name) iterator.mark_host_failed(host) self._tqm._failed_hosts[host.name] = True self._tqm._stats.increment('failures', host.name) else: self._tqm._stats.increment('ok', host.name) self._tqm.send_callback( 'v2_runner_on_failed', task_result, ignore_errors=task.ignore_errors) elif result[0] == 'host_unreachable': self._tqm._unreachable_hosts[host.name] = True self._tqm._stats.increment('dark', host.name) self._tqm.send_callback('v2_runner_on_unreachable', task_result) elif result[0] == 'host_task_skipped': self._tqm._stats.increment('skipped', host.name) self._tqm.send_callback('v2_runner_on_skipped', task_result) elif result[0] == 'host_task_ok': self._tqm._stats.increment('ok', host.name) if 'changed' in task_result._result and task_result._result[ 'changed']: self._tqm._stats.increment('changed', host.name) self._tqm.send_callback('v2_runner_on_ok', task_result) self._pending_results -= 1 if host.name in self._blocked_hosts: del self._blocked_hosts[host.name] # If this is a role task, mark the parent role as being run (if # the task was ok or failed, but not skipped or unreachable) if task_result._task._role is not None and result[0] in ( 'host_task_ok', 'host_task_failed'): # lookup the role in the ROLE_CACHE to make sure we're dealing # with the correct object and mark it as executed for (entry, role_obj) in iterator._play.ROLE_CACHE[ task_result._task._role._role_name].iteritems( ): params = task_result._task._role._role_params if task_result._task._role.tags is not None: params['tags'] = task_result._task._role.tags if task_result._task._role.when is not None: params['when'] = task_result._task._role.when hashed_entry = hash_params(params) if entry == hashed_entry: role_obj._had_task_run = True ret_results.append(task_result) elif result[0] == 'add_host': task_result = result[1] new_host_info = task_result.get('add_host', dict()) self._add_host(new_host_info) elif result[0] == 'add_group': task = result[1] self._add_group(task, iterator) elif result[0] == 'notify_handler': task_result = result[1] handler_name = result[2] original_task = iterator.get_original_task( task_result._host, task_result._task) if handler_name not in self._notified_handlers: self._notified_handlers[handler_name] = [] if task_result._host not in self._notified_handlers[ handler_name]: self._notified_handlers[handler_name].append( task_result._host) elif result[0] == 'register_host_var': # essentially the same as 'set_host_var' below, however we # never follow the delegate_to value for registered vars host = result[1] var_name = result[2] var_value = result[3] self._variable_manager.set_host_variable( host, var_name, var_value) elif result[0] in ('set_host_var', 'set_host_facts'): host = result[1] task = result[2] item = result[3] if task.delegate_to is not None: task_vars = self._variable_manager.get_vars( loader=self._loader, play=iterator._play, host=host, task=task) task_vars = self.add_tqm_variables(task_vars, play=iterator._play) if item is not None: task_vars['item'] = item templar = Templar(loader=self._loader, variables=task_vars) host_name = templar.template(task.delegate_to) target_host = self._inventory.get_host(host_name) if target_host is None: target_host = Host(name=host_name) else: target_host = host if result[0] == 'set_host_var': var_name = result[4] var_value = result[5] self._variable_manager.set_host_variable( target_host, var_name, var_value) elif result[0] == 'set_host_facts': facts = result[4] self._variable_manager.set_host_facts( target_host, facts) else: raise AnsibleError("unknown result message received: %s" % result[0]) except Queue.Empty: pass return ret_results
def process_include_results(results, tqm, iterator, inventory, loader, variable_manager): included_files = [] def get_original_host(host): if host.name in inventory._hosts_cache: return inventory._hosts_cache[host.name] else: return inventory.get_host(host.name) for res in results: original_host = res._host original_task = res._task if original_task.action == 'include': if original_task.loop: if 'results' not in res._result: continue include_results = res._result['results'] else: include_results = [res._result] for include_result in include_results: # if the task result was skipped or failed, continue if 'skipped' in include_result and include_result[ 'skipped'] or 'failed' in include_result: continue task_vars = variable_manager.get_vars(loader=loader, play=iterator._play, host=original_host, task=original_task) templar = Templar(loader=loader, variables=task_vars) include_variables = include_result.get( 'include_variables', dict()) loop_var = 'item' if original_task.loop_control: loop_var = original_task.loop_control.loop_var or 'item' if loop_var in include_result: task_vars[loop_var] = include_variables[ loop_var] = include_result[loop_var] include_file = None if original_task: if original_task.static: continue if original_task._parent: # handle relative includes by walking up the list of parent include # tasks and checking the relative result to see if it exists parent_include = original_task._parent cumulative_path = None while parent_include is not None: if not isinstance(parent_include, TaskInclude): parent_include = parent_include._parent continue parent_include_dir = templar.template( os.path.dirname( parent_include.args.get( '_raw_params'))) if cumulative_path is None: cumulative_path = parent_include_dir elif not os.path.isabs(cumulative_path): cumulative_path = os.path.join( parent_include_dir, cumulative_path) include_target = templar.template( include_result['include']) if original_task._role: new_basedir = os.path.join( original_task._role._role_path, 'tasks', cumulative_path) include_file = loader.path_dwim_relative( new_basedir, 'tasks', include_target) else: include_file = loader.path_dwim_relative( loader.get_basedir(), cumulative_path, include_target) if os.path.exists(include_file): break else: parent_include = parent_include._parent if include_file is None: if original_task._role: include_target = templar.template( include_result['include']) include_file = loader.path_dwim_relative( original_task._role._role_path, 'tasks', include_target) else: include_file = loader.path_dwim( include_result['include']) include_file = templar.template(include_file) inc_file = IncludedFile(include_file, include_variables, original_task) try: pos = included_files.index(inc_file) inc_file = included_files[pos] except ValueError: included_files.append(inc_file) inc_file.add_host(original_host) return included_files
class VariableManager: _ALLOWED = frozenset([ 'plugins_by_group', 'groups_plugins_play', 'groups_plugins_inventory', 'groups_inventory', 'all_plugins_play', 'all_plugins_inventory', 'all_inventory' ]) def __init__(self, loader=None, inventory=None): self._nonpersistent_fact_cache = defaultdict(dict) self._vars_cache = defaultdict(dict) self._extra_vars = defaultdict(dict) self._host_vars_files = defaultdict(dict) self._group_vars_files = defaultdict(dict) self._inventory = inventory self._loader = loader self._hostvars = None self._omit_token = '__omit_place_holder__%s' % sha1( os.urandom(64)).hexdigest() self._options_vars = defaultdict(dict) self.safe_basedir = False self._templar = Templar(loader=self._loader) # bad cache plugin is not fatal error try: self._fact_cache = FactCache() except AnsibleError as e: display.warning(to_native(e)) # fallback to a dict as in memory cache self._fact_cache = {} def __getstate__(self): data = dict( fact_cache=self._fact_cache, np_fact_cache=self._nonpersistent_fact_cache, vars_cache=self._vars_cache, extra_vars=self._extra_vars, host_vars_files=self._host_vars_files, group_vars_files=self._group_vars_files, omit_token=self._omit_token, options_vars=self._options_vars, inventory=self._inventory, safe_basedir=self.safe_basedir, ) return data def __setstate__(self, data): self._fact_cache = data.get('fact_cache', defaultdict(dict)) self._nonpersistent_fact_cache = data.get('np_fact_cache', defaultdict(dict)) self._vars_cache = data.get('vars_cache', defaultdict(dict)) self._extra_vars = data.get('extra_vars', dict()) self._host_vars_files = data.get('host_vars_files', defaultdict(dict)) self._group_vars_files = data.get('group_vars_files', defaultdict(dict)) self._omit_token = data.get( 'omit_token', '__omit_place_holder__%s' % sha1(os.urandom(64)).hexdigest()) self._inventory = data.get('inventory', None) self._options_vars = data.get('options_vars', dict()) self.safe_basedir = data.get('safe_basedir', False) @property def extra_vars(self): ''' ensures a clean copy of the extra_vars are made ''' return self._extra_vars.copy() @extra_vars.setter def extra_vars(self, value): ''' ensures a clean copy of the extra_vars are used to set the value ''' if not isinstance(value, Mapping): raise AnsibleAssertionError( "the type of 'value' for extra_vars should be a Mapping, but is a %s" % type(value)) self._extra_vars = value.copy() def set_inventory(self, inventory): self._inventory = inventory @property def options_vars(self): ''' ensures a clean copy of the options_vars are made ''' return self._options_vars.copy() @options_vars.setter def options_vars(self, value): ''' ensures a clean copy of the options_vars are used to set the value ''' if not isinstance(value, dict): raise AnsibleAssertionError( "the type of 'value' for options_vars should be a dict, but is a %s" % type(value)) self._options_vars = value.copy() def get_vars(self, play=None, host=None, task=None, include_hostvars=True, include_delegate_to=True, use_cache=True): ''' Returns the variables, with optional "context" given via the parameters for the play, host, and task (which could possibly result in different sets of variables being returned due to the additional context). The order of precedence is: - play->roles->get_default_vars (if there is a play context) - group_vars_files[host] (if there is a host context) - host_vars_files[host] (if there is a host context) - host->get_vars (if there is a host context) - fact_cache[host] (if there is a host context) - play vars (if there is a play context) - play vars_files (if there's no host context, ignore file names that cannot be templated) - task->get_vars (if there is a task context) - vars_cache[host] (if there is a host context) - extra vars ''' display.debug("in VariableManager get_vars()") all_vars = dict() magic_variables = self._get_magic_variables( play=play, host=host, task=task, include_hostvars=include_hostvars, include_delegate_to=include_delegate_to, ) # default for all cases basedirs = [] if self.safe_basedir: # avoid adhoc/console loading cwd basedirs = [self._loader.get_basedir()] if play: # first we compile any vars specified in defaults/main.yml # for all roles within the specified play for role in play.get_roles(): all_vars = combine_vars(all_vars, role.get_default_vars()) if task: # set basedirs if C.PLAYBOOK_VARS_ROOT == 'all': # should be default basedirs = task.get_search_path() elif C.PLAYBOOK_VARS_ROOT in ( 'bottom', 'playbook_dir'): # only option in 2.4.0 basedirs = [task.get_search_path()[0]] elif C.PLAYBOOK_VARS_ROOT != 'top': # preserves default basedirs, only option pre 2.3 raise AnsibleError('Unknown playbook vars logic: %s' % C.PLAYBOOK_VARS_ROOT) # if we have a task in this context, and that task has a role, make # sure it sees its defaults above any other roles, as we previously # (v1) made sure each task had a copy of its roles default vars if task._role is not None and (play or task.action == 'include_role'): all_vars = combine_vars( all_vars, task._role.get_default_vars( dep_chain=task.get_dep_chain())) if host: # THE 'all' group and the rest of groups for a host, used below all_group = self._inventory.groups.get('all') host_groups = sort_groups( [g for g in host.get_groups() if g.name not in ['all']]) def _get_plugin_vars(plugin, path, entities): data = {} try: data = plugin.get_vars(self._loader, path, entities) except AttributeError: try: for entity in entities: if isinstance(entity, Host): data.update(plugin.get_host_vars(entity.name)) else: data.update(plugin.get_group_vars(entity.name)) except AttributeError: if hasattr(plugin, 'run'): raise AnsibleError( "Cannot use v1 type vars plugin %s from %s" % (plugin._load_name, plugin._original_path)) else: raise AnsibleError( "Invalid vars plugin %s from %s" % (plugin._load_name, plugin._original_path)) return data # internal fuctions that actually do the work def _plugins_inventory(entities): ''' merges all entities by inventory source ''' data = {} for inventory_dir in self._inventory._sources: if ',' in inventory_dir and not os.path.exists( inventory_dir): # skip host lists continue elif not os.path.isdir( inventory_dir ): # always pass 'inventory directory' inventory_dir = os.path.dirname(inventory_dir) for plugin in vars_loader.all(): data = combine_vars( data, _get_plugin_vars(plugin, inventory_dir, entities)) return data def _plugins_play(entities): ''' merges all entities adjacent to play ''' data = {} for plugin in vars_loader.all(): for path in basedirs: data = combine_vars( data, _get_plugin_vars(plugin, path, entities)) return data # configurable functions that are sortable via config, rememer to add to _ALLOWED if expanding this list def all_inventory(): return all_group.get_vars() def all_plugins_inventory(): return _plugins_inventory([all_group]) def all_plugins_play(): return _plugins_play([all_group]) def groups_inventory(): ''' gets group vars from inventory ''' return get_group_vars(host_groups) def groups_plugins_inventory(): ''' gets plugin sources from inventory for groups ''' return _plugins_inventory(host_groups) def groups_plugins_play(): ''' gets plugin sources from play for groups ''' return _plugins_play(host_groups) def plugins_by_groups(): ''' merges all plugin sources by group, This should be used instead, NOT in combination with the other groups_plugins* functions ''' data = {} for group in host_groups: data[group] = combine_vars(data[group], _plugins_inventory(group)) data[group] = combine_vars(data[group], _plugins_play(group)) return data # Merge groups as per precedence config # only allow to call the functions we want exposed for entry in C.VARIABLE_PRECEDENCE: if entry in self._ALLOWED: display.debug('Calling %s to load vars for %s' % (entry, host.name)) all_vars = combine_vars(all_vars, locals()[entry]()) else: display.warning( 'Ignoring unknown variable precedence entry: %s' % (entry)) # host vars, from inventory, inventory adjacent and play adjacent via plugins all_vars = combine_vars(all_vars, host.get_vars()) all_vars = combine_vars(all_vars, _plugins_inventory([host])) all_vars = combine_vars(all_vars, _plugins_play([host])) # finally, the facts caches for this host, if it exists # TODO: cleaning of facts should eventually become part of taskresults instead of vars try: facts = wrap_var(self._fact_cache.get(host.name, {})) all_vars.update(namespace_facts(facts)) # push facts to main namespace if C.INJECT_FACTS_AS_VARS: all_vars = combine_vars(all_vars, wrap_var(clean_facts(facts))) else: # always 'promote' ansible_local all_vars = combine_vars( all_vars, wrap_var( {'ansible_local': facts.get('ansible_local', {})})) except KeyError: pass if play: all_vars = combine_vars(all_vars, play.get_vars()) vars_files = play.get_vars_files() try: for vars_file_item in vars_files: # create a set of temporary vars here, which incorporate the extra # and magic vars so we can properly template the vars_files entries temp_vars = combine_vars(all_vars, self._extra_vars) temp_vars = combine_vars(temp_vars, magic_variables) self._templar.set_available_variables(temp_vars) # we assume each item in the list is itself a list, as we # support "conditional includes" for vars_files, which mimics # the with_first_found mechanism. vars_file_list = vars_file_item if not isinstance(vars_file_list, list): vars_file_list = [vars_file_list] # now we iterate through the (potential) files, and break out # as soon as we read one from the list. If none are found, we # raise an error, which is silently ignored at this point. try: for vars_file in vars_file_list: vars_file = self._templar.template(vars_file) if not (isinstance(vars_file, Sequence)): raise AnsibleError( "Invalid vars_files entry found: %r\n" "vars_files entries should be either a string type or " "a list of string types after template expansion" % vars_file) try: data = preprocess_vars( self._loader.load_from_file(vars_file, unsafe=True)) if data is not None: for item in data: all_vars = combine_vars(all_vars, item) break except AnsibleFileNotFound: # we continue on loader failures continue except AnsibleParserError: raise else: # if include_delegate_to is set to False, we ignore the missing # vars file here because we're working on a delegated host if include_delegate_to: raise AnsibleFileNotFound( "vars file %s was not found" % vars_file_item) except (UndefinedError, AnsibleUndefinedVariable): if host is not None and self._fact_cache.get( host.name, dict()).get( 'module_setup') and task is not None: raise AnsibleUndefinedVariable( "an undefined variable was found when attempting to template the vars_files item '%s'" % vars_file_item, obj=vars_file_item) else: # we do not have a full context here, and the missing variable could be because of that # so just show a warning and continue display.vvv( "skipping vars_file '%s' due to an undefined variable" % vars_file_item) continue display.vvv("Read vars_file '%s'" % vars_file_item) except TypeError: raise AnsibleParserError( "Error while reading vars files - please supply a list of file names. " "Got '%s' of type %s" % (vars_files, type(vars_files))) # By default, we now merge in all vars from all roles in the play, # unless the user has disabled this via a config option if not C.DEFAULT_PRIVATE_ROLE_VARS: for role in play.get_roles(): all_vars = combine_vars( all_vars, role.get_vars(include_params=False)) # next, we merge in the vars from the role, which will specifically # follow the role dependency chain, and then we merge in the tasks # vars (which will look at parent blocks/task includes) if task: if task._role: all_vars = combine_vars( all_vars, task._role.get_vars(task.get_dep_chain(), include_params=False)) all_vars = combine_vars(all_vars, task.get_vars()) # next, we merge in the vars cache (include vars) and nonpersistent # facts cache (set_fact/register), in that order if host: # include_vars non-persistent cache all_vars = combine_vars( all_vars, self._vars_cache.get(host.get_name(), dict())) # fact non-persistent cache all_vars = combine_vars( all_vars, self._nonpersistent_fact_cache.get(host.name, dict())) # next, we merge in role params and task include params if task: if task._role: all_vars = combine_vars( all_vars, task._role.get_role_params(task.get_dep_chain())) # special case for include tasks, where the include params # may be specified in the vars field for the task, which should # have higher precedence than the vars/np facts above all_vars = combine_vars(all_vars, task.get_include_params()) # extra vars all_vars = combine_vars(all_vars, self._extra_vars) # magic variables all_vars = combine_vars(all_vars, magic_variables) # special case for the 'environment' magic variable, as someone # may have set it as a variable and we don't want to stomp on it if task: all_vars['environment'] = task.environment # if we have a task and we're delegating to another host, figure out the # variables for that host now so we don't have to rely on hostvars later if task and task.delegate_to is not None and include_delegate_to: all_vars['ansible_delegated_vars'], all_vars[ '_ansible_loop_cache'] = self._get_delegated_vars( play, task, all_vars) # 'vars' magic var if task or play: # has to be copy, otherwise recursive ref all_vars['vars'] = all_vars.copy() display.debug("done with get_vars()") return all_vars def _get_magic_variables(self, play, host, task, include_hostvars, include_delegate_to): ''' Returns a dictionary of so-called "magic" variables in Ansible, which are special variables we set internally for use. ''' variables = {} variables['playbook_dir'] = os.path.abspath(self._loader.get_basedir()) variables['ansible_playbook_python'] = sys.executable if play: # This is a list of all role names of all dependencies for all roles for this play dependency_role_names = list( set([ d._role_name for r in play.roles for d in r.get_all_dependencies() ])) # This is a list of all role names of all roles for this play play_role_names = [r._role_name for r in play.roles] # ansible_role_names includes all role names, dependent or directly referenced by the play variables['ansible_role_names'] = list( set(dependency_role_names + play_role_names)) # ansible_play_role_names includes the names of all roles directly referenced by this play # roles that are implicitly referenced via dependencies are not listed. variables['ansible_play_role_names'] = play_role_names # ansible_dependent_role_names includes the names of all roles that are referenced via dependencies # dependencies that are also explicitly named as roles are included in this list variables['ansible_dependent_role_names'] = dependency_role_names # DEPRECATED: role_names should be deprecated in favor of ansible_role_names or ansible_play_role_names variables['role_names'] = variables['ansible_play_role_names'] variables['ansible_play_name'] = play.get_name() if task: if task._role: variables['role_name'] = task._role.get_name() variables['role_path'] = task._role._role_path variables['role_uuid'] = text_type(task._role._uuid) if self._inventory is not None: variables['groups'] = self._inventory.get_groups_dict() if play: if self._templar.is_template(play.hosts): pattern = 'all' else: pattern = play.hosts or 'all' # add the list of hosts in the play, as adjusted for limit/filters variables['ansible_play_hosts_all'] = [ x.name for x in self._inventory.get_hosts( pattern=pattern, ignore_restrictions=True) ] variables['ansible_play_hosts'] = [ x for x in variables['ansible_play_hosts_all'] if x not in play._removed_hosts ] variables['ansible_play_batch'] = [ x.name for x in self._inventory.get_hosts() if x.name not in play._removed_hosts ] # DEPRECATED: play_hosts should be deprecated in favor of ansible_play_batch, # however this would take work in the templating engine, so for now we'll add both variables['play_hosts'] = variables['ansible_play_batch'] # the 'omit' value alows params to be left out if the variable they are based on is undefined variables['omit'] = self._omit_token # Set options vars for option, option_value in iteritems(self._options_vars): variables[option] = option_value if self._hostvars is not None and include_hostvars: variables['hostvars'] = self._hostvars return variables def _get_delegated_vars(self, play, task, existing_variables): if not hasattr(task, 'loop'): # This "task" is not a Task, so we need to skip it return {}, None # we unfortunately need to template the delegate_to field here, # as we're fetching vars before post_validate has been called on # the task that has been passed in vars_copy = existing_variables.copy() self._templar.set_available_variables(vars_copy) items = [] has_loop = True if task.loop_with is not None: if task.loop_with in lookup_loader: try: loop_terms = listify_lookup_plugin_terms( terms=task.loop, templar=self._templar, loader=self._loader, fail_on_undefined=True, convert_bare=False) items = lookup_loader.get(task.loop_with, loader=self._loader, templar=self._templar).run( terms=loop_terms, variables=vars_copy) except AnsibleUndefinedVariable: # This task will be skipped later due to this, so we just setup # a dummy array for the later code so it doesn't fail items = [None] else: raise AnsibleError( "Failed to find the lookup named '%s' in the available lookup plugins" % task.loop_with) elif task.loop is not None: try: items = self._templar.template(task.loop) except AnsibleUndefinedVariable: # This task will be skipped later due to this, so we just setup # a dummy array for the later code so it doesn't fail items = [None] else: has_loop = False items = [None] delegated_host_vars = dict() item_var = getattr(task.loop_control, 'loop_var', 'item') cache_items = False for item in items: # update the variables with the item value for templating, in case we need it if item is not None: vars_copy[item_var] = item self._templar.set_available_variables(vars_copy) delegated_host_name = self._templar.template( task.delegate_to, fail_on_undefined=False) if delegated_host_name != task.delegate_to: cache_items = True if delegated_host_name is None: raise AnsibleError( message="Undefined delegate_to host for task:", obj=task._ds) if not isinstance(delegated_host_name, string_types): raise AnsibleError( message= "the field 'delegate_to' has an invalid type (%s), and could not be" " converted to a string type." % type(delegated_host_name), obj=task._ds) if delegated_host_name in delegated_host_vars: # no need to repeat ourselves, as the delegate_to value # does not appear to be tied to the loop item variable continue # a dictionary of variables to use if we have to create a new host below # we set the default port based on the default transport here, to make sure # we use the proper default for windows new_port = C.DEFAULT_REMOTE_PORT if C.DEFAULT_TRANSPORT == 'winrm': new_port = 5986 new_delegated_host_vars = dict( ansible_delegated_host=delegated_host_name, ansible_host= delegated_host_name, # not redundant as other sources can change ansible_host ansible_port=new_port, ansible_user=C.DEFAULT_REMOTE_USER, ansible_connection=C.DEFAULT_TRANSPORT, ) # now try to find the delegated-to host in inventory, or failing that, # create a new host on the fly so we can fetch variables for it delegated_host = None if self._inventory is not None: delegated_host = self._inventory.get_host(delegated_host_name) # try looking it up based on the address field, and finally # fall back to creating a host on the fly to use for the var lookup if delegated_host is None: if delegated_host_name in C.LOCALHOST: delegated_host = self._inventory.localhost else: for h in self._inventory.get_hosts( ignore_limits=True, ignore_restrictions=True): # check if the address matches, or if both the delegated_to host # and the current host are in the list of localhost aliases if h.address == delegated_host_name: delegated_host = h break else: delegated_host = Host(name=delegated_host_name) delegated_host.vars = combine_vars( delegated_host.vars, new_delegated_host_vars) else: delegated_host = Host(name=delegated_host_name) delegated_host.vars = combine_vars(delegated_host.vars, new_delegated_host_vars) # now we go fetch the vars for the delegated-to host and save them in our # master dictionary of variables to be used later in the TaskExecutor/PlayContext delegated_host_vars[delegated_host_name] = self.get_vars( play=play, host=delegated_host, task=task, include_delegate_to=False, include_hostvars=False, ) _ansible_loop_cache = None if has_loop and cache_items: # delegate_to templating produced a change, so we will cache the templated items # in a special private hostvar # this ensures that delegate_to+loop doesn't produce different results than TaskExecutor # which may reprocess the loop _ansible_loop_cache = items return delegated_host_vars, _ansible_loop_cache def clear_facts(self, hostname): ''' Clears the facts for a host ''' self._fact_cache.pop(hostname, None) def set_host_facts(self, host, facts): ''' Sets or updates the given facts for a host in the fact cache. ''' if not isinstance(facts, Mapping): raise AnsibleAssertionError( "the type of 'facts' to set for host_facts should be a Mapping but is a %s" % type(facts)) try: host_cache = self._fact_cache[host.name] except KeyError: # We get to set this as new host_cache = facts else: if not isinstance(host_cache, MutableMapping): raise TypeError( 'The object retrieved for {0} must be a MutableMapping but was' ' a {1}'.format(host.name, type(host_cache))) # Update the existing facts host_cache.update(facts) # Save the facts back to the backing store self._fact_cache[host.name] = host_cache def set_nonpersistent_facts(self, host, facts): ''' Sets or updates the given facts for a host in the fact cache. ''' if not isinstance(facts, Mapping): raise AnsibleAssertionError( "the type of 'facts' to set for nonpersistent_facts should be a Mapping but is a %s" % type(facts)) try: self._nonpersistent_fact_cache[host.name].update(facts) except KeyError: self._nonpersistent_fact_cache[host.name] = facts def set_host_variable(self, host, varname, value): ''' Sets a value in the vars_cache for a host. ''' host_name = host.get_name() if host_name not in self._vars_cache: self._vars_cache[host_name] = dict() if varname in self._vars_cache[host_name] and isinstance( self._vars_cache[host_name][varname], MutableMapping) and isinstance(value, MutableMapping): self._vars_cache[host_name] = combine_vars( self._vars_cache[host_name], {varname: value}) else: self._vars_cache[host_name][varname] = value
def inventory_module(): r = InventoryModule() r.inventory = InventoryData() r.templar = Templar(None) r._options = {'leading_separator': True} return r
def parse(self): ''' Given a task in one of the supported forms, parses and returns returns the action, arguments, and delegate_to values for the task, dealing with all sorts of levels of fuzziness. ''' thing = None action = None delegate_to = self._task_ds.get('delegate_to', None) args = dict() # This is the standard YAML form for command-type modules. We grab # the args and pass them in as additional arguments, which can/will # be overwritten via dict updates from the other arg sources below additional_args = self._task_ds.get('args', dict()) # We can have one of action, local_action, or module specified # action if 'action' in self._task_ds: # an old school 'action' statement thing = self._task_ds['action'] action, args = self._normalize_parameters( thing, action=action, additional_args=additional_args) # local_action if 'local_action' in self._task_ds: # local_action is similar but also implies a delegate_to if action is not None: raise AnsibleParserError( "action and local_action are mutually exclusive", obj=self._task_ds) thing = self._task_ds.get('local_action', '') delegate_to = 'localhost' action, args = self._normalize_parameters( thing, action=action, additional_args=additional_args) # module: <stuff> is the more new-style invocation # walk the input dictionary to see we recognize a module name for (item, value) in iteritems(self._task_ds): if item in module_loader or item in [ 'meta', 'include', 'include_role' ]: # finding more than one module name is a problem if action is not None: raise AnsibleParserError("conflicting action statements", obj=self._task_ds) action = item thing = value action, args = self._normalize_parameters( thing, action=action, additional_args=additional_args) # if we didn't see any module in the task at all, it's not a task really if action is None: if 'ping' not in module_loader: raise AnsibleParserError( "The requested action was not found in configured module paths. " "Additionally, core modules are missing. If this is a checkout, " "run 'git pull --rebase' to correct this problem.", obj=self._task_ds) else: raise AnsibleParserError( "no action detected in task. This often indicates a misspelled module name, or incorrect module path.", obj=self._task_ds) elif args.get('_raw_params', '') != '' and action not in RAW_PARAM_MODULES: templar = Templar(loader=None) raw_params = args.pop('_raw_params') if templar._contains_vars(raw_params): args['_variable_params'] = raw_params else: raise AnsibleParserError( "this task '%s' has extra params, which is only allowed in the following modules: %s" % (action, ", ".join(RAW_PARAM_MODULES)), obj=self._task_ds) # shell modules require special handling (action, args) = self._handle_shell_weirdness(action, args) return (action, args, delegate_to)
def _execute(self, variables=None): ''' The primary workhorse of the executor system, this runs the task on the specified host (which may be the delegated_to host) and handles the retry/until and block rescue/always execution ''' if variables is None: variables = self._job_vars templar = Templar(loader=self._loader, shared_loader_obj=self._shared_loader_obj, variables=variables) # fields set from the play/task may be based on variables, so we have to # do the same kind of post validation step on it here before we use it. self._play_context.post_validate(templar=templar) # now that the play context is finalized, we can add 'magic' # variables to the variable dictionary self._play_context.update_vars(variables) # Evaluate the conditional (if any) for this task, which we do before running # the final task post-validation. We do this before the post validation due to # the fact that the conditional may specify that the task be skipped due to a # variable not being present which would otherwise cause validation to fail if not self._task.evaluate_conditional(templar, variables): debug("when evaulation failed, skipping this task") return dict(changed=False, skipped=True, skip_reason='Conditional check failed') # Now we do final validation on the task, which sets all fields to their final values. # In the case of debug tasks, we save any 'var' params and restore them after validating # so that variables are not replaced too early. prev_var = None if self._task.action == 'debug' and 'var' in self._task.args: prev_var = self._task.args.pop('var') self._task.post_validate(templar=templar) if '_variable_params' in self._task.args: variable_params = self._task.args.pop('_variable_params') if isinstance(variable_params, dict): self._display.deprecated( "Using variables for task params is unsafe, especially if the variables come from an external source like facts" ) variable_params.update(self._task.args) self._task.args = variable_params if prev_var is not None: self._task.args['var'] = prev_var # if this task is a TaskInclude, we just return now with a success code so the # main thread can expand the task list for the given host if self._task.action == 'include': include_variables = self._task.args.copy() include_file = include_variables.get('_raw_params') del include_variables['_raw_params'] return dict(include=include_file, include_variables=include_variables) # get the connection and the handler for this execution self._connection = self._get_connection(variables) self._connection.set_host_overrides(host=self._host) self._handler = self._get_action_handler(connection=self._connection, templar=templar) # And filter out any fields which were set to default(omit), and got the omit token value omit_token = variables.get('omit') if omit_token is not None: self._task.args = dict( filter(lambda x: x[1] != omit_token, self._task.args.iteritems())) # Read some values from the task, so that we can modify them if need be retries = self._task.retries if retries <= 0: retries = 1 delay = self._task.delay if delay < 0: delay = 1 # make a copy of the job vars here, in case we need to update them # with the registered variable value later on when testing conditions vars_copy = variables.copy() debug("starting attempt loop") result = None for attempt in range(retries): if attempt > 0: # FIXME: this should use the callback/message passing mechanism print( "FAILED - RETRYING: %s (%d retries left). Result was: %s" % (self._task, retries - attempt, result)) result['attempts'] = attempt + 1 debug("running the handler") result = self._handler.run(task_vars=variables) debug("handler run complete") if self._task. async > 0: # the async_wrapper module returns dumped JSON via its stdout # response, so we parse it here and replace the result try: result = json.loads(result.get('stdout')) except (TypeError, ValueError) as e: return dict( failed=True, msg="The async task did not return valid JSON: %s" % str(e)) if self._task.poll > 0: result = self._poll_async_result(result=result, templar=templar) # update the local copy of vars with the registered value, if specified, # or any facts which may have been generated by the module execution if self._task.register: vars_copy[self._task.register] = result if 'ansible_facts' in result: vars_copy.update(result['ansible_facts']) # create a conditional object to evaluate task conditions cond = Conditional(loader=self._loader) # FIXME: make sure until is mutually exclusive with changed_when/failed_when if self._task.until: cond.when = self._task.until if cond.evaluate_conditional(templar, vars_copy): break elif (self._task.changed_when or self._task.failed_when) and 'skipped' not in result: if self._task.changed_when: cond.when = [self._task.changed_when] result['changed'] = cond.evaluate_conditional( templar, vars_copy) if self._task.failed_when: cond.when = [self._task.failed_when] failed_when_result = cond.evaluate_conditional( templar, vars_copy) result['failed_when_result'] = result[ 'failed'] = failed_when_result if failed_when_result: break elif 'failed' not in result: if result.get('rc', 0) != 0: result['failed'] = True else: # if the result is not failed, stop trying break if attempt < retries - 1: time.sleep(delay)
def _squash_items(self, items, loop_var, variables): ''' Squash items down to a comma-separated list for certain modules which support it (typically package management modules). ''' name = None try: # _task.action could contain templatable strings (via action: and # local_action:) Template it before comparing. If we don't end up # optimizing it here, the templatable string might use template vars # that aren't available until later (it could even use vars from the # with_items loop) so don't make the templated string permanent yet. templar = Templar(loader=self._loader, shared_loader_obj=self._shared_loader_obj, variables=variables) task_action = self._task.action if templar._contains_vars(task_action): task_action = templar.template(task_action, fail_on_undefined=False) if len(items) > 0 and task_action in self.SQUASH_ACTIONS: if all(isinstance(o, string_types) for o in items): final_items = [] for allowed in ['name', 'pkg', 'package']: name = self._task.args.pop(allowed, None) if name is not None: break # This gets the information to check whether the name field # contains a template that we can squash for template_no_item = template_with_item = None if name: if templar._contains_vars(name): variables[loop_var] = '\0$' template_no_item = templar.template(name, variables, cache=False) variables[loop_var] = '\0@' template_with_item = templar.template(name, variables, cache=False) del variables[loop_var] # Check if the user is doing some operation that doesn't take # name/pkg or the name/pkg field doesn't have any variables # and thus the items can't be squashed if template_no_item != template_with_item: for item in items: variables[loop_var] = item if self._task.evaluate_conditional( templar, variables): new_item = templar.template(name, cache=False) final_items.append(new_item) self._task.args['name'] = final_items # Wrap this in a list so that the calling function loop # executes exactly once return [final_items] else: # Restore the name parameter self._task.args['name'] = name # elif: # Right now we only optimize single entries. In the future we # could optimize more types: # * lists can be squashed together # * dicts could squash entries that match in all cases except the # name or pkg field. except: # Squashing is an optimization. If it fails for any reason, # simply use the unoptimized list of items. # Restore the name parameter if name is not None: self._task.args['name'] = name return items
def load_list_of_tasks(ds, play, block=None, role=None, task_include=None, use_handlers=False, variable_manager=None, loader=None): ''' Given a list of task datastructures (parsed from YAML), return a list of Task() or TaskInclude() objects. ''' # we import here to prevent a circular dependency with imports from ansible.playbook.block import Block from ansible.playbook.handler import Handler from ansible.playbook.task import Task from ansible.playbook.task_include import TaskInclude from ansible.playbook.handler_task_include import HandlerTaskInclude from ansible.template import Templar assert isinstance(ds, list) task_list = [] for task_ds in ds: assert isinstance(task_ds, dict) if 'block' in task_ds: t = Block.load( task_ds, play=play, parent_block=block, role=role, task_include=task_include, use_handlers=use_handlers, variable_manager=variable_manager, loader=loader, ) task_list.append(t) else: if 'include' in task_ds: if use_handlers: include_class = HandlerTaskInclude else: include_class = TaskInclude t = include_class.load(task_ds, block=block, role=role, task_include=None, variable_manager=variable_manager, loader=loader) all_vars = variable_manager.get_vars(loader=loader, play=play, task=t) templar = Templar(loader=loader, variables=all_vars) # check to see if this include is dynamic or static: # 1. the user has set the 'static' option to false or true # 2. one of the appropriate config options was set if t.static is not None: is_static = t.static else: is_static = C.DEFAULT_TASK_INCLUDES_STATIC or \ (use_handlers and C.DEFAULT_HANDLER_INCLUDES_STATIC) or \ (not templar._contains_vars(t.args['_raw_params']) and t.all_parents_static() and not t.loop) if is_static: if t.loop is not None: raise AnsibleParserError( "You cannot use 'static' on an include with a loop", obj=task_ds) # we set a flag to indicate this include was static t.statically_loaded = True # handle relative includes by walking up the list of parent include # tasks and checking the relative result to see if it exists parent_include = block cumulative_path = None found = False while parent_include is not None: if not isinstance(parent_include, TaskInclude): parent_include = parent_include._parent continue parent_include_dir = templar.template( os.path.dirname( parent_include.args.get('_raw_params'))) if cumulative_path is None: cumulative_path = parent_include_dir elif not os.path.isabs(cumulative_path): cumulative_path = os.path.join( parent_include_dir, cumulative_path) include_target = templar.template( t.args['_raw_params']) if t._role: new_basedir = os.path.join(t._role._role_path, 'tasks', cumulative_path) include_file = loader.path_dwim_relative( new_basedir, 'tasks', include_target) else: include_file = loader.path_dwim_relative( loader.get_basedir(), cumulative_path, include_target) if os.path.exists(include_file): found = True break else: parent_include = parent_include._parent if not found: try: include_target = templar.template( t.args['_raw_params']) except AnsibleUndefinedVariable as e: raise AnsibleParserError( "Error when evaluating variable in include name: %s.\n\n" \ "When using static includes, ensure that any variables used in their names are defined in vars/vars_files\n" \ "or extra-vars passed in from the command line. Static includes cannot use variables from inventory\n" \ "sources like group or host vars." % t.args['_raw_params'], obj=task_ds, suppress_extended_error=True, ) if t._role: if use_handlers: include_file = loader.path_dwim_relative( t._role._role_path, 'handlers', include_target) else: include_file = loader.path_dwim_relative( t._role._role_path, 'tasks', include_target) else: include_file = loader.path_dwim(include_target) try: data = loader.load_from_file(include_file) if data is None: return [] elif not isinstance(data, list): raise AnsibleError( "included task files must contain a list of tasks", obj=data) else: if t.name: for mapping in data: mapping['name'] = t.name # since we can't send callbacks here, we display a message directly in # the same fashion used by the on_include callback. We also do it here, # because the recursive nature of helper methods means we may be loading # nested includes, and we want the include order printed correctly display.display("statically included: %s" % include_file, color=C.COLOR_SKIP) except AnsibleFileNotFound as e: if t.static or \ C.DEFAULT_TASK_INCLUDES_STATIC or \ C.DEFAULT_HANDLER_INCLUDES_STATIC and use_handlers: raise display.deprecated( "Included file '%s' not found, however since this include is not " \ "explicitly marked as 'static: yes', we will try and include it dynamically " \ "later. In the future, this will be an error unless 'static: no' is used " \ "on the include task. If you do not want missing includes to be considered " \ "dynamic, use 'static: yes' on the include or set the global ansible.cfg " \ "options to make all inclues static for tasks and/or handlers" % include_file, ) task_list.append(t) continue included_blocks = load_list_of_blocks( data, play=play, parent_block=None, task_include=t.copy(), role=role, use_handlers=use_handlers, loader=loader, variable_manager=variable_manager, ) # pop tags out of the include args, if they were specified there, and assign # them to the include. If the include already had tags specified, we raise an # error so that users know not to specify them both ways tags = t.vars.pop('tags', []) if isinstance(tags, string_types): tags = tags.split(',') if len(tags) > 0: if len(t.tags) > 0: raise AnsibleParserError( "Include tasks should not specify tags in more than one way (both via args and directly on the task). " \ "Mixing styles in which tags are specified is prohibited for whole import hierarchy, not only for single import statement", obj=task_ds, suppress_extended_error=True, ) display.deprecated( "You should not specify tags in the include parameters. All tags should be specified using the task-level option" ) else: tags = t.tags[:] # now we extend the tags on each of the included blocks for b in included_blocks: b.tags = list(set(b.tags).union(tags)) # END FIXME # FIXME: handlers shouldn't need this special handling, but do # right now because they don't iterate blocks correctly if use_handlers: for b in included_blocks: task_list.extend(b.block) else: task_list.extend(included_blocks) else: task_list.append(t) else: if use_handlers: t = Handler.load(task_ds, block=block, role=role, task_include=task_include, variable_manager=variable_manager, loader=loader) else: t = Task.load(task_ds, block=block, role=role, task_include=task_include, variable_manager=variable_manager, loader=loader) task_list.append(t) return task_list
def _get_loop_items(self): ''' Loads a lookup plugin to handle the with_* portion of a task (if specified), and returns the items result. ''' # save the play context variables to a temporary dictionary, # so that we can modify the job vars without doing a full copy # and later restore them to avoid modifying things too early play_context_vars = dict() self._play_context.update_vars(play_context_vars) old_vars = dict() for k in play_context_vars: if k in self._job_vars: old_vars[k] = self._job_vars[k] self._job_vars[k] = play_context_vars[k] # get search path for this task to pass to lookup plugins self._job_vars['ansible_search_path'] = self._task.get_search_path() templar = Templar(loader=self._loader, shared_loader_obj=self._shared_loader_obj, variables=self._job_vars) items = None if self._task.loop: if self._task.loop in self._shared_loader_obj.lookup_loader: fail = True if self._task.loop == 'first_found': # first_found loops are special. If the item is undefined then we want to fall through to the next value rather than failing. fail = False loop_terms = listify_lookup_plugin_terms( terms=self._task.loop_args, templar=templar, loader=self._loader, fail_on_undefined=fail, convert_bare=False) if not fail: loop_terms = [ t for t in loop_terms if not templar._contains_vars(t) ] # get lookup mylookup = self._shared_loader_obj.lookup_loader.get( self._task.loop, loader=self._loader, templar=templar) # give lookup task 'context' for subdir (mostly needed for first_found) for subdir in ['template', 'var', 'file']: # TODO: move this to constants? if subdir in self._task.action: break setattr(mylookup, '_subdir', subdir + 's') # run lookup items = mylookup.run(terms=loop_terms, variables=self._job_vars, wantlist=True) else: raise AnsibleError( "Unexpected failure in finding the lookup named '%s' in the available lookup plugins" % self._task.loop) # now we restore any old job variables that may have been modified, # and delete them if they were in the play context vars but not in # the old variables dictionary for k in play_context_vars: if k in old_vars: self._job_vars[k] = old_vars[k] else: del self._job_vars[k] if items: for idx, item in enumerate(items): if item is not None and not isinstance(item, UnsafeProxy): items[idx] = UnsafeProxy(item) # ensure basedir is always in (dwim already searches here but we need to display it) if self._loader.get_basedir( ) not in self._job_vars['ansible_search_path']: self._job_vars['ansible_search_path'].append( self._loader.get_basedir()) return items
def _load_role_path(self, role_name): ''' the 'role', as specified in the ds (or as a bare string), can either be a simple name or a full path. If it is a full path, we use the basename as the role name, otherwise we take the name as-given and append it to the default role path ''' # create a templar class to template the dependency names, in # case they contain variables if self._variable_manager is not None: all_vars = self._variable_manager.get_vars(play=self._play) else: all_vars = dict() templar = Templar(loader=self._loader, variables=all_vars) role_name = templar.template(role_name) role_tuple = None # try to load as a collection-based role first if self._collection_list or AnsibleCollectionRef.is_valid_fqcr( role_name): role_tuple = _get_collection_role_path(role_name, self._collection_list) if role_tuple: # we found it, stash collection data and return the name/path tuple self._role_collection = role_tuple[2] return role_tuple[0:2] # We didn't find a collection role, look in defined role paths # FUTURE: refactor this to be callable from internal so we can properly order # ansible.legacy searches with the collections keyword # we always start the search for roles in the base directory of the playbook role_search_paths = [ os.path.join(self._loader.get_basedir(), u'roles'), ] # also search in the configured roles path if C.DEFAULT_ROLES_PATH: role_search_paths.extend(C.DEFAULT_ROLES_PATH) # next, append the roles basedir, if it was set, so we can # search relative to that directory for dependent roles if self._role_basedir: role_search_paths.append(self._role_basedir) # finally as a last resort we look in the current basedir as set # in the loader (which should be the playbook dir itself) but without # the roles/ dir appended role_search_paths.append(self._loader.get_basedir()) # now iterate through the possible paths and return the first one we find for path in role_search_paths: path = templar.template(path) role_path = unfrackpath(os.path.join(path, role_name)) if self._loader.path_exists(role_path): return (role_name, role_path) # if not found elsewhere try to extract path from name role_path = unfrackpath(role_name) if self._loader.path_exists(role_path): role_name = os.path.basename(role_name) return (role_name, role_path) searches = (self._collection_list or []) + role_search_paths raise AnsibleError("the role '%s' was not found in %s" % (role_name, ":".join(searches)), obj=self._ds)
def _evaluate_conditional(h): all_vars = self._variable_manager.get_vars(play=iterator._play, host=h, task=task) templar = Templar(loader=self._loader, variables=all_vars) return task.evaluate_conditional(templar, all_vars)
def get_vars(self, loader, play=None, host=None, task=None, include_hostvars=True, include_delegate_to=True, use_cache=True): ''' Returns the variables, with optional "context" given via the parameters for the play, host, and task (which could possibly result in different sets of variables being returned due to the additional context). The order of precedence is: - play->roles->get_default_vars (if there is a play context) - group_vars_files[host] (if there is a host context) - host_vars_files[host] (if there is a host context) - host->get_vars (if there is a host context) - fact_cache[host] (if there is a host context) - play vars (if there is a play context) - play vars_files (if there's no host context, ignore file names that cannot be templated) - task->get_vars (if there is a task context) - vars_cache[host] (if there is a host context) - extra vars ''' display.debug("in VariableManager get_vars()") cache_entry = self._get_cache_entry(play=play, host=host, task=task) if cache_entry in VARIABLE_CACHE and use_cache: display.debug("vars are cached, returning them now") return VARIABLE_CACHE[cache_entry] all_vars = dict() magic_variables = self._get_magic_variables( loader=loader, play=play, host=host, task=task, include_hostvars=include_hostvars, include_delegate_to=include_delegate_to, ) if play: # first we compile any vars specified in defaults/main.yml # for all roles within the specified play for role in play.get_roles(): all_vars = combine_vars(all_vars, role.get_default_vars()) # if we have a task in this context, and that task has a role, make # sure it sees its defaults above any other roles, as we previously # (v1) made sure each task had a copy of its roles default vars if task and task._role is not None and (play or task.action == 'include_role'): all_vars = combine_vars(all_vars, task._role.get_default_vars(dep_chain=task.get_dep_chain())) if host: # next, if a host is specified, we load any vars from group_vars # files and then any vars from host_vars files which may apply to # this host or the groups it belongs to # we merge in the special 'all' group_vars first, if they exist if 'all' in self._group_vars_files: data = preprocess_vars(self._group_vars_files['all']) for item in data: all_vars = combine_vars(all_vars, item) # we merge in vars from groups specified in the inventory (INI or script) all_vars = combine_vars(all_vars, host.get_group_vars()) for group in sorted(host.get_groups(), key=lambda g: g.depth): if group.name in self._group_vars_files and group.name != 'all': for data in self._group_vars_files[group.name]: data = preprocess_vars(data) for item in data: all_vars = combine_vars(all_vars, item) # then we merge in vars from the host specified in the inventory (INI or script) all_vars = combine_vars(all_vars, host.get_vars()) # then we merge in the host_vars/<hostname> file, if it exists host_name = host.get_name() if host_name in self._host_vars_files: for data in self._host_vars_files[host_name]: data = preprocess_vars(data) for item in data: all_vars = combine_vars(all_vars, item) # finally, the facts caches for this host, if it exists try: host_facts = wrap_var(self._fact_cache.get(host.name, dict())) all_vars = combine_vars(all_vars, host_facts) except KeyError: pass if play: all_vars = combine_vars(all_vars, play.get_vars()) for vars_file_item in play.get_vars_files(): # create a set of temporary vars here, which incorporate the extra # and magic vars so we can properly template the vars_files entries temp_vars = combine_vars(all_vars, self._extra_vars) temp_vars = combine_vars(temp_vars, magic_variables) templar = Templar(loader=loader, variables=temp_vars) # we assume each item in the list is itself a list, as we # support "conditional includes" for vars_files, which mimics # the with_first_found mechanism. vars_file_list = vars_file_item if not isinstance(vars_file_list, list): vars_file_list = [ vars_file_list ] # now we iterate through the (potential) files, and break out # as soon as we read one from the list. If none are found, we # raise an error, which is silently ignored at this point. try: for vars_file in vars_file_list: vars_file = templar.template(vars_file) try: data = preprocess_vars(loader.load_from_file(vars_file)) if data is not None: for item in data: all_vars = combine_vars(all_vars, item) break except AnsibleFileNotFound as e: # we continue on loader failures continue except AnsibleParserError as e: raise else: raise AnsibleFileNotFound("vars file %s was not found" % vars_file_item) except (UndefinedError, AnsibleUndefinedVariable): if host is not None and self._fact_cache.get(host.name, dict()).get('module_setup') and task is not None: raise AnsibleUndefinedVariable("an undefined variable was found when attempting to template the vars_files item '%s'" % vars_file_item, obj=vars_file_item) else: # we do not have a full context here, and the missing variable could be # because of that, so just show a warning and continue display.vvv("skipping vars_file '%s' due to an undefined variable" % vars_file_item) continue # By default, we now merge in all vars from all roles in the play, # unless the user has disabled this via a config option if not C.DEFAULT_PRIVATE_ROLE_VARS: for role in play.get_roles(): all_vars = combine_vars(all_vars, role.get_vars(include_params=False)) # next, we merge in the vars from the role, which will specifically # follow the role dependency chain, and then we merge in the tasks # vars (which will look at parent blocks/task includes) if task: if task._role: all_vars = combine_vars(all_vars, task._role.get_vars(task.get_dep_chain(), include_params=False)) all_vars = combine_vars(all_vars, task.get_vars()) # next, we merge in the vars cache (include vars) and nonpersistent # facts cache (set_fact/register), in that order if host: all_vars = combine_vars(all_vars, self._vars_cache.get(host.get_name(), dict())) all_vars = combine_vars(all_vars, self._nonpersistent_fact_cache.get(host.name, dict())) # next, we merge in role params and task include params if task: if task._role: all_vars = combine_vars(all_vars, task._role.get_role_params(task.get_dep_chain())) # special case for include tasks, where the include params # may be specified in the vars field for the task, which should # have higher precedence than the vars/np facts above all_vars = combine_vars(all_vars, task.get_include_params()) # finally, we merge in extra vars and the magic variables all_vars = combine_vars(all_vars, self._extra_vars) all_vars = combine_vars(all_vars, magic_variables) # special case for the 'environment' magic variable, as someone # may have set it as a variable and we don't want to stomp on it if task: if 'environment' not in all_vars: all_vars['environment'] = task.environment else: display.warning("The variable 'environment' appears to be used already, which is also used internally for environment variables set on the task/block/play. You should use a different variable name to avoid conflicts with this internal variable") # if we have a task and we're delegating to another host, figure out the # variables for that host now so we don't have to rely on hostvars later if task and task.delegate_to is not None and include_delegate_to: all_vars['ansible_delegated_vars'] = self._get_delegated_vars(loader, play, task, all_vars) #VARIABLE_CACHE[cache_entry] = all_vars if task or play: all_vars['vars'] = all_vars.copy() display.debug("done with get_vars()") return all_vars
def _process_pending_results(self, iterator, one_pass=False): ''' Reads results off the final queue and takes appropriate action based on the result (executing callbacks, updating state, etc.). ''' ret_results = [] while not self._final_q.empty() and not self._tqm._terminated: try: result = self._final_q.get() display.debug("got result from result worker: %s" % ([text_type(x) for x in result], )) # helper method, used to find the original host from the one # returned in the result/message, which has been serialized and # thus had some information stripped from it to speed up the # serialization process def get_original_host(host): if host.name in self._inventory._hosts_cache: return self._inventory._hosts_cache[host.name] else: return self._inventory.get_host(host.name) # all host status messages contain 2 entries: (msg, task_result) if result[0] in ('host_task_ok', 'host_task_failed', 'host_task_skipped', 'host_unreachable'): task_result = result[1] host = get_original_host(task_result._host) task = task_result._task if result[0] == 'host_task_failed' or task_result.is_failed( ): if not task.ignore_errors: display.debug("marking %s as failed" % host.name) if task.run_once: # if we're using run_once, we have to fail every host here [ iterator.mark_host_failed(h) for h in self._inventory.get_hosts( iterator._play.hosts) if h.name not in self._tqm._unreachable_hosts ] else: iterator.mark_host_failed(host) # only add the host to the failed list officially if it has # been failed by the iterator if iterator.is_failed(host): self._tqm._failed_hosts[host.name] = True self._tqm._stats.increment( 'failures', host.name) else: # otherwise, we grab the current state and if we're iterating on # the rescue portion of a block then we save the failed task in a # special var for use within the rescue/always state, _ = iterator.get_next_task_for_host( host, peek=True) if state.run_state == iterator.ITERATING_RESCUE: original_task = iterator.get_original_task( host, task) self._variable_manager.set_nonpersistent_facts( host, dict( ansible_failed_task=original_task. serialize(), ansible_failed_result=task_result. _result, ), ) else: self._tqm._stats.increment('ok', host.name) self._tqm.send_callback( 'v2_runner_on_failed', task_result, ignore_errors=task.ignore_errors) elif result[0] == 'host_unreachable': self._tqm._unreachable_hosts[host.name] = True self._tqm._stats.increment('dark', host.name) self._tqm.send_callback('v2_runner_on_unreachable', task_result) elif result[0] == 'host_task_skipped': self._tqm._stats.increment('skipped', host.name) self._tqm.send_callback('v2_runner_on_skipped', task_result) elif result[0] == 'host_task_ok': if task.action != 'include': self._tqm._stats.increment('ok', host.name) if 'changed' in task_result._result and task_result._result[ 'changed']: self._tqm._stats.increment( 'changed', host.name) self._tqm.send_callback('v2_runner_on_ok', task_result) if self._diff: self._tqm.send_callback('v2_on_file_diff', task_result) self._pending_results -= 1 if host.name in self._blocked_hosts: del self._blocked_hosts[host.name] # If this is a role task, mark the parent role as being run (if # the task was ok or failed, but not skipped or unreachable) if task_result._task._role is not None and result[0] in ( 'host_task_ok', 'host_task_failed'): # lookup the role in the ROLE_CACHE to make sure we're dealing # with the correct object and mark it as executed for (entry, role_obj) in iteritems(iterator._play.ROLE_CACHE[ task_result._task._role._role_name]): if role_obj._uuid == task_result._task._role._uuid: role_obj._had_task_run[host.name] = True ret_results.append(task_result) elif result[0] == 'add_host': result_item = result[1] new_host_info = result_item.get('add_host', dict()) self._add_host(new_host_info, iterator) elif result[0] == 'add_group': host = get_original_host(result[1]) result_item = result[2] self._add_group(host, result_item) elif result[0] == 'notify_handler': task_result = result[1] handler_name = result[2] original_host = get_original_host(task_result._host) original_task = iterator.get_original_task( original_host, task_result._task) if handler_name not in self._notified_handlers: self._notified_handlers[handler_name] = [] if original_host not in self._notified_handlers[ handler_name]: self._notified_handlers[handler_name].append( original_host) display.vv("NOTIFIED HANDLER %s" % (handler_name, )) elif result[0] == 'register_host_var': # essentially the same as 'set_host_var' below, however we # never follow the delegate_to value for registered vars and # the variable goes in the fact_cache host = get_original_host(result[1]) task = result[2] var_value = wrap_var(result[3]) var_name = task.register if task.run_once: host_list = [ host for host in self._inventory.get_hosts( iterator._play.hosts) if host.name not in self._tqm._unreachable_hosts ] else: host_list = [host] for target_host in host_list: self._variable_manager.set_nonpersistent_facts( target_host, {var_name: var_value}) elif result[0] in ('set_host_var', 'set_host_facts'): host = get_original_host(result[1]) task = result[2] item = result[3] # find the host we're actually refering too here, which may # be a host that is not really in inventory at all if task.delegate_to is not None and task.delegate_facts: task_vars = self._variable_manager.get_vars( loader=self._loader, play=iterator._play, host=host, task=task) self.add_tqm_variables(task_vars, play=iterator._play) loop_var = 'item' if task.loop_control: loop_var = task.loop_control.loop_var or 'item' if item is not None: task_vars[loop_var] = item templar = Templar(loader=self._loader, variables=task_vars) host_name = templar.template(task.delegate_to) actual_host = self._inventory.get_host(host_name) if actual_host is None: actual_host = Host(name=host_name) else: actual_host = host if task.run_once: host_list = [ host for host in self._inventory.get_hosts( iterator._play.hosts) if host.name not in self._tqm._unreachable_hosts ] else: host_list = [actual_host] if result[0] == 'set_host_var': var_name = result[4] var_value = result[5] for target_host in host_list: self._variable_manager.set_host_variable( target_host, var_name, var_value) elif result[0] == 'set_host_facts': facts = result[4] for target_host in host_list: if task.action == 'set_fact': self._variable_manager.set_nonpersistent_facts( target_host, facts.copy()) else: self._variable_manager.set_host_facts( target_host, facts.copy()) elif result[0].startswith( 'v2_runner_item') or result[0] == 'v2_runner_retry': self._tqm.send_callback(result[0], result[1]) elif result[0] == 'v2_on_file_diff': if self._diff: self._tqm.send_callback('v2_on_file_diff', result[1]) else: raise AnsibleError("unknown result message received: %s" % result[0]) except Queue.Empty: time.sleep(0.0001) if one_pass: break return ret_results
def test_action_base__configure_module(self): fake_loader = DictDataLoader({}) # create our fake task mock_task = MagicMock() mock_task.action = "copy" mock_task.async_val = 0 mock_task.delegate_to = None # create a mock connection, so we don't actually try and connect to things mock_connection = MagicMock() # create a mock shared loader object def mock_find_plugin_with_context(name, options, collection_list=None): mockctx = MagicMock() if name == 'badmodule': mockctx.resolved = False mockctx.plugin_resolved_path = None elif '.ps1' in options: mockctx.resolved = True mockctx.plugin_resolved_path = '/fake/path/to/%s.ps1' % name else: mockctx.resolved = True mockctx.plugin_resolved_path = '/fake/path/to/%s' % name return mockctx mock_module_loader = MagicMock() mock_module_loader.find_plugin_with_context.side_effect = mock_find_plugin_with_context mock_shared_obj_loader = MagicMock() mock_shared_obj_loader.module_loader = mock_module_loader # we're using a real play context here play_context = PlayContext() # our test class action_base = DerivedActionBase( task=mock_task, connection=mock_connection, play_context=play_context, loader=fake_loader, templar=Templar(loader=fake_loader), shared_loader_obj=mock_shared_obj_loader, ) # test python module formatting with patch.object( builtins, 'open', mock_open(read_data=to_bytes(python_module_replacers.strip(), encoding='utf-8'))): with patch.object(os, 'rename'): mock_task.args = dict(a=1, foo='fö〩') mock_connection.module_implementation_preferences = ('', ) (style, shebang, data, path) = action_base._configure_module( mock_task.action, mock_task.args, task_vars=dict( ansible_python_interpreter='/usr/bin/python')) self.assertEqual(style, "new") self.assertEqual(shebang, u"#!/usr/bin/python") # test module not found self.assertRaises(AnsibleError, action_base._configure_module, 'badmodule', mock_task.args, {}) # test powershell module formatting with patch.object( builtins, 'open', mock_open(read_data=to_bytes( powershell_module_replacers.strip(), encoding='utf-8'))): mock_task.action = 'win_copy' mock_task.args = dict(b=2) mock_connection.module_implementation_preferences = ('.ps1', ) (style, shebang, data, path) = action_base._configure_module('stat', mock_task.args, {}) self.assertEqual(style, "new") self.assertEqual(shebang, u'#!powershell') # test module not found self.assertRaises(AnsibleError, action_base._configure_module, 'badmodule', mock_task.args, {})
def run(self, iterator, play_context): ''' The linear strategy is simple - get the next task and queue it for all hosts, then wait for the queue to drain before moving on to the next task ''' # iteratate over each task, while there is one left to run result = True work_to_do = True while work_to_do and not self._tqm._terminated: try: display.debug("getting the remaining hosts for this loop") hosts_left = [ host for host in self._inventory.get_hosts(iterator._play.hosts) if host.name not in self._tqm._unreachable_hosts ] display.debug("done getting the remaining hosts for this loop") # queue up this task for each host in the inventory callback_sent = False work_to_do = False host_results = [] host_tasks = self._get_next_task_lockstep(hosts_left, iterator) # skip control skip_rest = False choose_step = True # flag set if task is set to any_errors_fatal any_errors_fatal = False results = [] for (host, task) in host_tasks: if not task: continue if self._tqm._terminated: break run_once = False work_to_do = True # test to see if the task across all hosts points to an action plugin which # sets BYPASS_HOST_LOOP to true, or if it has run_once enabled. If so, we # will only send this task to the first host in the list. try: action = action_loader.get(task.action, class_only=True) except KeyError: # we don't care here, because the action may simply not have a # corresponding action plugin action = None # check to see if this task should be skipped, due to it being a member of a # role which has already run (and whether that role allows duplicate execution) if task._role and task._role.has_run(host): # If there is no metadata, the default behavior is to not allow duplicates, # if there is metadata, check to see if the allow_duplicates flag was set to true if task._role._metadata is None or task._role._metadata and not task._role._metadata.allow_duplicates: display.debug( "'%s' skipped because role has already run" % task) continue if task.action == 'meta': self._execute_meta(task, play_context, iterator) else: # handle step if needed, skip meta actions as they are used internally if self._step and choose_step: if self._take_step(task): choose_step = False else: skip_rest = True break display.debug("getting variables") task_vars = self._variable_manager.get_vars( loader=self._loader, play=iterator._play, host=host, task=task) self.add_tqm_variables(task_vars, play=iterator._play) templar = Templar(loader=self._loader, variables=task_vars) display.debug("done getting variables") run_once = templar.template( task.run_once) or action and getattr( action, 'BYPASS_HOST_LOOP', False) if (task.any_errors_fatal or run_once) and not task.ignore_errors: any_errors_fatal = True if not callback_sent: display.debug( "sending task start callback, copying the task so we can template it temporarily" ) saved_name = task.name display.debug( "done copying, going to template now") try: task.name = text_type( templar.template(task.name, fail_on_undefined=False)) display.debug("done templating") except: # just ignore any errors during task name templating, # we don't care if it just shows the raw name display.debug( "templating failed for some reason") pass display.debug("here goes the callback...") self._tqm.send_callback( 'v2_playbook_on_task_start', task, is_conditional=False) task.name = saved_name callback_sent = True display.debug("sending task start callback") self._blocked_hosts[host.get_name()] = True self._queue_task(host, task, task_vars, play_context) # if we're bypassing the host loop, break out now if run_once: break results += self._process_pending_results(iterator, one_pass=True) # go to next host/task group if skip_rest: continue display.debug( "done queuing things up, now waiting for results queue to drain" ) results += self._wait_on_pending_results(iterator) host_results.extend(results) if not work_to_do and len(iterator.get_failed_hosts()) > 0: display.debug("out of hosts to run on") self._tqm.send_callback( 'v2_playbook_on_no_hosts_remaining') result = self._tqm.RUN_ERROR break try: included_files = IncludedFile.process_include_results( host_results, self._tqm, iterator=iterator, inventory=self._inventory, loader=self._loader, variable_manager=self._variable_manager) except AnsibleError as e: return self._tqm.RUN_ERROR include_failure = False if len(included_files) > 0: display.debug("we have included files to process") noop_task = Task() noop_task.action = 'meta' noop_task.args['_raw_params'] = 'noop' noop_task.set_loader(iterator._play._loader) display.debug("generating all_blocks data") all_blocks = dict((host, []) for host in hosts_left) display.debug("done generating all_blocks data") for included_file in included_files: display.debug("processing included file: %s" % included_file._filename) # included hosts get the task list while those excluded get an equal-length # list of noop tasks, to make sure that they continue running in lock-step try: new_blocks = self._load_included_file( included_file, iterator=iterator) display.debug( "iterating over new_blocks loaded from include file" ) for new_block in new_blocks: task_vars = self._variable_manager.get_vars( loader=self._loader, play=iterator._play, task=included_file._task, ) display.debug("filtering new block on tags") final_block = new_block.filter_tagged_tasks( play_context, task_vars) display.debug( "done filtering new block on tags") noop_block = Block(parent_block=task._block) noop_block.block = [ noop_task for t in new_block.block ] noop_block.always = [ noop_task for t in new_block.always ] noop_block.rescue = [ noop_task for t in new_block.rescue ] for host in hosts_left: if host in included_file._hosts: all_blocks[host].append(final_block) else: all_blocks[host].append(noop_block) display.debug( "done iterating over new_blocks loaded from include file" ) except AnsibleError as e: for host in included_file._hosts: self._tqm._failed_hosts[host.name] = True iterator.mark_host_failed(host) display.error(to_unicode(e), wrap_text=False) include_failure = True continue # finally go through all of the hosts and append the # accumulated blocks to their list of tasks display.debug( "extending task lists for all hosts with included blocks" ) for host in hosts_left: iterator.add_tasks(host, all_blocks[host]) display.debug("done extending task lists") display.debug("done processing included files") display.debug("results queue empty") display.debug("checking for any_errors_fatal") failed_hosts = [] for res in results: if res.is_failed(): failed_hosts.append(res._host.name) # if any_errors_fatal and we had an error, mark all hosts as failed if any_errors_fatal and (len(failed_hosts) > 0 or len( self._tqm._unreachable_hosts.keys()) > 0): for host in hosts_left: # don't double-mark hosts, or the iterator will potentially # fail them out of the rescue/always states if host.name not in failed_hosts: self._tqm._failed_hosts[host.name] = True iterator.mark_host_failed(host) self._tqm.send_callback( 'v2_playbook_on_no_hosts_remaining') return self._tqm.RUN_FAILED_BREAK_PLAY display.debug("done checking for any_errors_fatal") display.debug("checking for max_fail_percentage") if iterator._play.max_fail_percentage is not None and len( results) > 0: percentage = iterator._play.max_fail_percentage / 100.0 if (len(self._tqm._failed_hosts) / len(results)) > percentage: for host in hosts_left: # don't double-mark hosts, or the iterator will potentially # fail them out of the rescue/always states if host.name not in failed_hosts: self._tqm._failed_hosts[host.name] = True iterator.mark_host_failed(host) self._tqm.send_callback( 'v2_playbook_on_no_hosts_remaining') return self._tqm.RUN_FAILED_BREAK_PLAY display.debug("done checking for max_fail_percentage") except (IOError, EOFError) as e: display.debug("got IOError/EOFError in task loop: %s" % e) # most likely an abort, return failed return self._tqm.RUN_UNKNOWN_ERROR # run the base class run() method, which executes the cleanup function # and runs any outstanding handlers which have been triggered return super(StrategyModule, self).run(iterator, play_context, result)
def _execute(self, variables=None): ''' The primary workhorse of the executor system, this runs the task on the specified host (which may be the delegated_to host) and handles the retry/until and block rescue/always execution ''' if variables is None: variables = self._job_vars templar = Templar(loader=self._loader, shared_loader_obj=self._shared_loader_obj, variables=variables) context_validation_error = None try: # apply the given task's information to the connection info, # which may override some fields already set by the play or # the options specified on the command line self._play_context = self._play_context.set_task_and_variable_override( task=self._task, variables=variables, templar=templar) # fields set from the play/task may be based on variables, so we have to # do the same kind of post validation step on it here before we use it. self._play_context.post_validate(templar=templar) # now that the play context is finalized, if the remote_addr is not set # default to using the host's address field as the remote address if not self._play_context.remote_addr: self._play_context.remote_addr = self._host.address # We also add "magic" variables back into the variables dict to make sure # a certain subset of variables exist. self._play_context.update_vars(variables) # FIXME: update connection/shell plugin options except AnsibleError as e: # save the error, which we'll raise later if we don't end up # skipping this task during the conditional evaluation step context_validation_error = e # Evaluate the conditional (if any) for this task, which we do before running # the final task post-validation. We do this before the post validation due to # the fact that the conditional may specify that the task be skipped due to a # variable not being present which would otherwise cause validation to fail try: if not self._task.evaluate_conditional(templar, variables): display.debug("when evaluation is False, skipping this task") return dict(changed=False, skipped=True, skip_reason='Conditional result was False', _ansible_no_log=self._play_context.no_log) except AnsibleError: # loop error takes precedence if self._loop_eval_error is not None: raise self._loop_eval_error # skip conditional exception in the case of includes as the vars needed might not be available except in the included tasks or due to tags if self._task.action not in [ 'include', 'include_tasks', 'include_role' ]: raise # Not skipping, if we had loop error raised earlier we need to raise it now to halt the execution of this task if self._loop_eval_error is not None: raise self._loop_eval_error # if we ran into an error while setting up the PlayContext, raise it now if context_validation_error is not None: raise context_validation_error # if this task is a TaskInclude, we just return now with a success code so the # main thread can expand the task list for the given host if self._task.action in ('include', 'include_tasks'): include_variables = self._task.args.copy() include_file = include_variables.pop('_raw_params', None) if not include_file: return dict(failed=True, msg="No include file was specified to the include") include_file = templar.template(include_file) return dict(include=include_file, include_variables=include_variables) # if this task is a IncludeRole, we just return now with a success code so the main thread can expand the task list for the given host elif self._task.action == 'include_role': include_variables = self._task.args.copy() return dict(include_role=self._task, include_variables=include_variables) # Now we do final validation on the task, which sets all fields to their final values. self._task.post_validate(templar=templar) if '_variable_params' in self._task.args: variable_params = self._task.args.pop('_variable_params') if isinstance(variable_params, dict): display.deprecated( "Using variables for task params is unsafe, especially if the variables come from an external source like facts", version="2.6") variable_params.update(self._task.args) self._task.args = variable_params # get the connection and the handler for this execution if (not self._connection or not getattr(self._connection, 'connected', False) or self._play_context.remote_addr != self._connection._play_context.remote_addr): self._connection = self._get_connection(variables=variables, templar=templar) # only template the vars if the connection actually implements set_host_overrides # NB: this is expensive, and should be removed once connection-specific vars are being handled by play_context sho_impl = getattr(type(self._connection), 'set_host_overrides', None) if sho_impl and sho_impl != ConnectionBase.set_host_overrides: self._connection.set_host_overrides(self._host, variables, templar) else: # if connection is reused, its _play_context is no longer valid and needs # to be replaced with the one templated above, in case other data changed self._connection._play_context = self._play_context self._handler = self._get_action_handler(connection=self._connection, templar=templar) # And filter out any fields which were set to default(omit), and got the omit token value omit_token = variables.get('omit') if omit_token is not None: self._task.args = dict((i[0], i[1]) for i in iteritems(self._task.args) if i[1] != omit_token) # Read some values from the task, so that we can modify them if need be if self._task.until: retries = self._task.retries if retries is None: retries = 3 elif retries <= 0: retries = 1 else: retries += 1 else: retries = 1 delay = self._task.delay if delay < 0: delay = 1 # make a copy of the job vars here, in case we need to update them # with the registered variable value later on when testing conditions vars_copy = variables.copy() display.debug("starting attempt loop") result = None for attempt in range(1, retries + 1): display.debug("running the handler") try: result = self._handler.run(task_vars=variables) except AnsibleActionSkip as e: return dict(skipped=True, msg=to_text(e)) except AnsibleActionFail as e: return dict(failed=True, msg=to_text(e)) except AnsibleConnectionFailure as e: return dict(unreachable=True, msg=to_text(e)) display.debug("handler run complete") # preserve no log result["_ansible_no_log"] = self._play_context.no_log # update the local copy of vars with the registered value, if specified, # or any facts which may have been generated by the module execution if self._task.register: vars_copy[self._task.register] = wrap_var(result.copy()) if self._task. async > 0: if self._task.poll > 0 and not result.get( 'skipped') and not result.get('failed'): result = self._poll_async_result(result=result, templar=templar, task_vars=vars_copy) # FIXME callback 'v2_runner_on_async_poll' here # ensure no log is preserved result["_ansible_no_log"] = self._play_context.no_log # helper methods for use below in evaluating changed/failed_when def _evaluate_changed_when_result(result): if self._task.changed_when is not None and self._task.changed_when: cond = Conditional(loader=self._loader) cond.when = self._task.changed_when result['changed'] = cond.evaluate_conditional( templar, vars_copy) def _evaluate_failed_when_result(result): if self._task.failed_when: cond = Conditional(loader=self._loader) cond.when = self._task.failed_when failed_when_result = cond.evaluate_conditional( templar, vars_copy) result['failed_when_result'] = result[ 'failed'] = failed_when_result else: failed_when_result = False return failed_when_result if 'ansible_facts' in result: vars_copy.update(result['ansible_facts']) # set the failed property if it was missing. if 'failed' not in result: # rc is here for backwards compatibility and modules that use it instead of 'failed' if 'rc' in result and result['rc'] not in [0, "0"]: result['failed'] = True else: result['failed'] = False # set the changed property if it was missing. if 'changed' not in result: result['changed'] = False # if we didn't skip this task, use the helpers to evaluate the changed/ # failed_when properties if 'skipped' not in result: _evaluate_changed_when_result(result) _evaluate_failed_when_result(result) if retries > 1: cond = Conditional(loader=self._loader) cond.when = self._task.until result['attempts'] = attempt if cond.evaluate_conditional(templar, vars_copy): break else: # no conditional check, or it failed, so sleep for the specified time if attempt < retries: result['_ansible_retry'] = True result['retries'] = retries display.debug('Retrying task, attempt %d of %d' % (attempt, retries)) self._rslt_q.put(TaskResult( self._host.name, self._task._uuid, result, task_fields=self._task.dump_attrs()), block=False) time.sleep(delay)
def ansible_template(basedir, varname, templatevars, **kwargs): dl = DataLoader() dl.set_basedir(basedir) templar = Templar(dl, variables=templatevars) return templar.template(varname, **kwargs)
def get_vars(self, play=None, host=None, task=None, include_hostvars=True, include_delegate_to=True, use_cache=True): ''' Returns the variables, with optional "context" given via the parameters for the play, host, and task (which could possibly result in different sets of variables being returned due to the additional context). The order of precedence is: - play->roles->get_default_vars (if there is a play context) - group_vars_files[host] (if there is a host context) - host_vars_files[host] (if there is a host context) - host->get_vars (if there is a host context) - fact_cache[host] (if there is a host context) - play vars (if there is a play context) - play vars_files (if there's no host context, ignore file names that cannot be templated) - task->get_vars (if there is a task context) - vars_cache[host] (if there is a host context) - extra vars ''' display.debug("in VariableManager get_vars()") all_vars = dict() magic_variables = self._get_magic_variables( play=play, host=host, task=task, include_hostvars=include_hostvars, include_delegate_to=include_delegate_to, ) if play: # first we compile any vars specified in defaults/main.yml # for all roles within the specified play for role in play.get_roles(): all_vars = combine_vars(all_vars, role.get_default_vars()) basedirs = [] if task: # set basedirs if C.PLAYBOOK_VARS_ROOT == 'all': # should be default basedirs = task.get_search_path() elif C.PLAYBOOK_VARS_ROOT == 'top': # only option pre 2.3 basedirs = [self._loader.get_basedir()] elif C.PLAYBOOK_VARS_ROOT in ( 'bottom', 'playbook_dir'): # only option in 2.4.0 basedirs = [task.get_search_path()[0]] else: raise AnsibleError('Unkown playbook vars logic: %s' % C.PLAYBOOK_VARS_ROOT) # if we have a task in this context, and that task has a role, make # sure it sees its defaults above any other roles, as we previously # (v1) made sure each task had a copy of its roles default vars if task._role is not None and (play or task.action == 'include_role'): all_vars = combine_vars( all_vars, task._role.get_default_vars( dep_chain=task.get_dep_chain())) if host: # THE 'all' group and the rest of groups for a host, used below all_group = self._inventory.groups.get('all') host_groups = sort_groups( [g for g in host.get_groups() if g.name not in ['all']]) def _get_plugin_vars(plugin, path, entities): data = {} try: data = plugin.get_vars(self._loader, path, entities) except AttributeError: try: for entity in entities: if isinstance(entity, Host): data.update(plugin.get_host_vars(entity.name)) else: data.update(plugin.get_group_vars(entity.name)) except AttributeError: if hasattr(plugin, 'run'): raise AnsibleError( "Cannot use v1 type vars plugin %s from %s" % (plugin._load_name, plugin._original_path)) else: raise AnsibleError( "Invalid vars plugin %s from %s" % (plugin._load_name, plugin._original_path)) return data # internal fuctions that actually do the work def _plugins_inventory(entities): ''' merges all entities by inventory source ''' data = {} for inventory_dir in self._inventory._sources: if ',' in inventory_dir: # skip host lists continue elif not os.path.isdir( inventory_dir ): # always pass 'inventory directory' inventory_dir = os.path.dirname(inventory_dir) for plugin in vars_loader.all(): data = combine_vars( data, _get_plugin_vars(plugin, inventory_dir, entities)) return data def _plugins_play(entities): ''' merges all entities adjacent to play ''' data = {} for plugin in vars_loader.all(): for path in basedirs: data = combine_vars( data, _get_plugin_vars(plugin, path, entities)) return data # configurable functions that are sortable via config, rememer to add to _ALLOWED if expanding this list def all_inventory(): return all_group.get_vars() def all_plugins_inventory(): return _plugins_inventory([all_group]) def all_plugins_play(): return _plugins_play([all_group]) def groups_inventory(): ''' gets group vars from inventory ''' return get_group_vars(host_groups) def groups_plugins_inventory(): ''' gets plugin sources from inventory for groups ''' return _plugins_inventory(host_groups) def groups_plugins_play(): ''' gets plugin sources from play for groups ''' return _plugins_play(host_groups) def plugins_by_groups(): ''' merges all plugin sources by group, This should be used instead, NOT in combination with the other groups_plugins* functions ''' data = {} for group in host_groups: data[group] = combine_vars(data[group], _plugins_inventory(group)) data[group] = combine_vars(data[group], _plugins_play(group)) return data # Merge groups as per precedence config, if not implicit localhost # only allow to call the functions we want exposed if not host.implicit: for entry in C.VARIABLE_PRECEDENCE: if entry in self._ALLOWED: display.debug('Calling %s to load vars for %s' % (entry, host.name)) all_vars = combine_vars(all_vars, locals()[entry]()) else: display.warning( 'Ignoring unknown variable precedence entry: %s' % (entry)) # host vars, from inventory, inventory adjacent and play adjacent via plugins all_vars = combine_vars(all_vars, host.get_vars()) all_vars = combine_vars(all_vars, _plugins_inventory([host])) all_vars = combine_vars(all_vars, _plugins_play([host])) # finally, the facts caches for this host, if it exists try: host_facts = wrap_var(self._fact_cache.get(host.name, {})) # push facts to main namespace all_vars = combine_vars(all_vars, host_facts) except KeyError: pass if play: all_vars = combine_vars(all_vars, play.get_vars()) vars_files = play.get_vars_files() try: for vars_file_item in vars_files: # create a set of temporary vars here, which incorporate the extra # and magic vars so we can properly template the vars_files entries temp_vars = combine_vars(all_vars, self._extra_vars) temp_vars = combine_vars(temp_vars, magic_variables) templar = Templar(loader=self._loader, variables=temp_vars) # we assume each item in the list is itself a list, as we # support "conditional includes" for vars_files, which mimics # the with_first_found mechanism. vars_file_list = vars_file_item if not isinstance(vars_file_list, list): vars_file_list = [vars_file_list] # now we iterate through the (potential) files, and break out # as soon as we read one from the list. If none are found, we # raise an error, which is silently ignored at this point. try: for vars_file in vars_file_list: vars_file = templar.template(vars_file) try: data = preprocess_vars( self._loader.load_from_file(vars_file, unsafe=True)) if data is not None: for item in data: all_vars = combine_vars(all_vars, item) break except AnsibleFileNotFound: # we continue on loader failures continue except AnsibleParserError: raise else: # if include_delegate_to is set to False, we ignore the missing # vars file here because we're working on a delegated host if include_delegate_to: raise AnsibleFileNotFound( "vars file %s was not found" % vars_file_item) except (UndefinedError, AnsibleUndefinedVariable): if host is not None and self._fact_cache.get( host.name, dict()).get( 'module_setup') and task is not None: raise AnsibleUndefinedVariable( "an undefined variable was found when attempting to template the vars_files item '%s'" % vars_file_item, obj=vars_file_item) else: # we do not have a full context here, and the missing variable could be because of that # so just show a warning and continue display.vvv( "skipping vars_file '%s' due to an undefined variable" % vars_file_item) continue display.vvv("Read vars_file '%s'" % vars_file_item) except TypeError: raise AnsibleParserError( "Error while reading vars files - please supply a list of file names. " "Got '%s' of type %s" % (vars_files, type(vars_files))) # By default, we now merge in all vars from all roles in the play, # unless the user has disabled this via a config option if not C.DEFAULT_PRIVATE_ROLE_VARS: for role in play.get_roles(): all_vars = combine_vars( all_vars, role.get_vars(include_params=False)) # next, we merge in the vars from the role, which will specifically # follow the role dependency chain, and then we merge in the tasks # vars (which will look at parent blocks/task includes) if task: if task._role: all_vars = combine_vars( all_vars, task._role.get_vars(task.get_dep_chain(), include_params=False)) all_vars = combine_vars(all_vars, task.get_vars()) # next, we merge in the vars cache (include vars) and nonpersistent # facts cache (set_fact/register), in that order if host: all_vars = combine_vars( all_vars, self._vars_cache.get(host.get_name(), dict())) all_vars = combine_vars( all_vars, self._nonpersistent_fact_cache.get(host.name, dict())) # next, we merge in role params and task include params if task: if task._role: all_vars = combine_vars( all_vars, task._role.get_role_params(task.get_dep_chain())) # special case for include tasks, where the include params # may be specified in the vars field for the task, which should # have higher precedence than the vars/np facts above all_vars = combine_vars(all_vars, task.get_include_params()) # extra vars all_vars = combine_vars(all_vars, self._extra_vars) # magic variables all_vars = combine_vars(all_vars, magic_variables) # special case for the 'environment' magic variable, as someone # may have set it as a variable and we don't want to stomp on it if task: all_vars['environment'] = task.environment # if we have a task and we're delegating to another host, figure out the # variables for that host now so we don't have to rely on hostvars later if task and task.delegate_to is not None and include_delegate_to: all_vars['ansible_delegated_vars'] = self._get_delegated_vars( play, task, all_vars) # 'vars' magic var if task or play: # has to be copy, otherwise recursive ref all_vars['vars'] = all_vars.copy() display.debug("done with get_vars()") return all_vars
def run(self, play): ''' Iterates over the roles/tasks in a play, using the given (or default) strategy for queueing tasks. The default is the linear strategy, which operates like classic Ansible by keeping all hosts in lock-step with a given task (meaning no hosts move on to the next task until all hosts are done with the current task). ''' if not self._callbacks_loaded: self.load_callbacks() all_vars = self._variable_manager.get_vars(loader=self._loader, play=play) templar = Templar(loader=self._loader, variables=all_vars) new_play = play.copy() new_play.post_validate(templar) self.hostvars = HostVars( inventory=self._inventory, variable_manager=self._variable_manager, loader=self._loader, ) # Fork # of forks, # of hosts or serial, whichever is lowest contenders = [ self._options.forks, play.serial, len(self._inventory.get_hosts(new_play.hosts)) ] contenders = [v for v in contenders if v is not None and v > 0] self._initialize_processes(min(contenders)) play_context = PlayContext(new_play, self._options, self.passwords, self._connection_lockfile.fileno()) for callback_plugin in self._callback_plugins: if hasattr(callback_plugin, 'set_play_context'): callback_plugin.set_play_context(play_context) self.send_callback('v2_playbook_on_play_start', new_play) # initialize the shared dictionary containing the notified handlers self._initialize_notified_handlers(new_play.handlers) # load the specified strategy (or the default linear one) strategy = strategy_loader.get(new_play.strategy, self) if strategy is None: raise AnsibleError("Invalid play strategy specified: %s" % new_play.strategy, obj=play._ds) # build the iterator iterator = PlayIterator( inventory=self._inventory, play=new_play, play_context=play_context, variable_manager=self._variable_manager, all_vars=all_vars, start_at_done=self._start_at_done, ) # Because the TQM may survive multiple play runs, we start by marking # any hosts as failed in the iterator here which may have been marked # as failed in previous runs. Then we clear the internal list of failed # hosts so we know what failed this round. for host_name in self._failed_hosts.keys(): host = self._inventory.get_host(host_name) iterator.mark_host_failed(host) self.clear_failed_hosts() # during initialization, the PlayContext will clear the start_at_task # field to signal that a matching task was found, so check that here # and remember it so we don't try to skip tasks on future plays if getattr(self._options, 'start_at_task', None) is not None and play_context.start_at_task is None: self._start_at_done = True # and run the play using the strategy and cleanup on way out play_return = strategy.run(iterator, play_context) # now re-save the hosts that failed from the iterator to our internal list for host_name in iterator.get_failed_hosts(): self._failed_hosts[host_name] = True self._cleanup_processes() return play_return
def run(self, iterator, play_context): ''' The linear strategy is simple - get the next task and queue it for all hosts, then wait for the queue to drain before moving on to the next task ''' # iterate over each task, while there is one left to run result = self._tqm.RUN_OK work_to_do = True self._set_hosts_cache(iterator._play) while work_to_do and not self._tqm._terminated: try: display.debug("getting the remaining hosts for this loop") hosts_left = self.get_hosts_left(iterator) display.debug("done getting the remaining hosts for this loop") # queue up this task for each host in the inventory callback_sent = False work_to_do = False host_results = [] host_tasks = self._get_next_task_lockstep(hosts_left, iterator) # skip control skip_rest = False choose_step = True # flag set if task is set to any_errors_fatal any_errors_fatal = False results = [] for (host, task) in host_tasks: if not task: continue if self._tqm._terminated: break run_once = False work_to_do = True # check to see if this task should be skipped, due to it being a member of a # role which has already run (and whether that role allows duplicate execution) if task._role and task._role.has_run(host): # If there is no metadata, the default behavior is to not allow duplicates, # if there is metadata, check to see if the allow_duplicates flag was set to true if task._role._metadata is None or task._role._metadata and not task._role._metadata.allow_duplicates: display.debug( "'%s' skipped because role has already run" % task) continue display.debug("getting variables") task_vars = self._variable_manager.get_vars( play=iterator._play, host=host, task=task, _hosts=self._hosts_cache, _hosts_all=self._hosts_cache_all) self.add_tqm_variables(task_vars, play=iterator._play) templar = Templar(loader=self._loader, variables=task_vars) display.debug("done getting variables") # test to see if the task across all hosts points to an action plugin which # sets BYPASS_HOST_LOOP to true, or if it has run_once enabled. If so, we # will only send this task to the first host in the list. task.action = templar.template(task.action) try: action = action_loader.get( task.action, class_only=True, collection_list=task.collections) except KeyError: # we don't care here, because the action may simply not have a # corresponding action plugin action = None if task.action == 'meta': # for the linear strategy, we run meta tasks just once and for # all hosts currently being iterated over rather than one host results.extend( self._execute_meta(task, play_context, iterator, host)) if task.args.get('_raw_params', None) not in ('noop', 'reset_connection', 'end_host'): run_once = True if (task.any_errors_fatal or run_once) and not task.ignore_errors: any_errors_fatal = True else: # handle step if needed, skip meta actions as they are used internally if self._step and choose_step: if self._take_step(task): choose_step = False else: skip_rest = True break run_once = templar.template( task.run_once) or action and getattr( action, 'BYPASS_HOST_LOOP', False) if (task.any_errors_fatal or run_once) and not task.ignore_errors: any_errors_fatal = True if not callback_sent: display.debug( "sending task start callback, copying the task so we can template it temporarily" ) saved_name = task.name display.debug( "done copying, going to template now") try: task.name = to_text(templar.template( task.name, fail_on_undefined=False), nonstring='empty') display.debug("done templating") except Exception: # just ignore any errors during task name templating, # we don't care if it just shows the raw name display.debug( "templating failed for some reason") display.debug("here goes the callback...") self._tqm.send_callback( 'v2_playbook_on_task_start', task, is_conditional=False) task.name = saved_name callback_sent = True display.debug("sending task start callback") self._blocked_hosts[host.get_name()] = True self._queue_task(host, task, task_vars, play_context) del task_vars # if we're bypassing the host loop, break out now if run_once: break results += self._process_pending_results( iterator, max_passes=max(1, int(len(self._tqm._workers) * 0.1))) # go to next host/task group if skip_rest: continue display.debug( "done queuing things up, now waiting for results queue to drain" ) if self._pending_results > 0: results += self._wait_on_pending_results(iterator) host_results.extend(results) self.update_active_connections(results) included_files = IncludedFile.process_include_results( host_results, iterator=iterator, loader=self._loader, variable_manager=self._variable_manager) include_failure = False if len(included_files) > 0: display.debug("we have included files to process") display.debug("generating all_blocks data") all_blocks = dict((host, []) for host in hosts_left) display.debug("done generating all_blocks data") for included_file in included_files: display.debug("processing included file: %s" % included_file._filename) # included hosts get the task list while those excluded get an equal-length # list of noop tasks, to make sure that they continue running in lock-step try: if included_file._is_role: new_ir = self._copy_included_file( included_file) new_blocks, handler_blocks = new_ir.get_block_list( play=iterator._play, variable_manager=self._variable_manager, loader=self._loader, ) else: new_blocks = self._load_included_file( included_file, iterator=iterator) display.debug( "iterating over new_blocks loaded from include file" ) for new_block in new_blocks: task_vars = self._variable_manager.get_vars( play=iterator._play, task=new_block._parent, _hosts=self._hosts_cache, _hosts_all=self._hosts_cache_all, ) display.debug("filtering new block on tags") final_block = new_block.filter_tagged_tasks( task_vars) display.debug( "done filtering new block on tags") noop_block = self._prepare_and_create_noop_block_from( final_block, task._parent, iterator) for host in hosts_left: if host in included_file._hosts: all_blocks[host].append(final_block) else: all_blocks[host].append(noop_block) display.debug( "done iterating over new_blocks loaded from include file" ) except AnsibleError as e: for host in included_file._hosts: self._tqm._failed_hosts[host.name] = True iterator.mark_host_failed(host) display.error(to_text(e), wrap_text=False) include_failure = True continue # finally go through all of the hosts and append the # accumulated blocks to their list of tasks display.debug( "extending task lists for all hosts with included blocks" ) for host in hosts_left: iterator.add_tasks(host, all_blocks[host]) display.debug("done extending task lists") display.debug("done processing included files") display.debug("results queue empty") display.debug("checking for any_errors_fatal") failed_hosts = [] unreachable_hosts = [] for res in results: # execute_meta() does not set 'failed' in the TaskResult # so we skip checking it with the meta tasks and look just at the iterator if (res.is_failed() or res._task.action == 'meta') and iterator.is_failed(res._host): failed_hosts.append(res._host.name) elif res.is_unreachable(): unreachable_hosts.append(res._host.name) # if any_errors_fatal and we had an error, mark all hosts as failed if any_errors_fatal and (len(failed_hosts) > 0 or len(unreachable_hosts) > 0): dont_fail_states = frozenset( [iterator.ITERATING_RESCUE, iterator.ITERATING_ALWAYS]) for host in hosts_left: (s, _) = iterator.get_next_task_for_host(host, peek=True) # the state may actually be in a child state, use the get_active_state() # method in the iterator to figure out the true active state s = iterator.get_active_state(s) if s.run_state not in dont_fail_states or \ s.run_state == iterator.ITERATING_RESCUE and s.fail_state & iterator.FAILED_RESCUE != 0: self._tqm._failed_hosts[host.name] = True result |= self._tqm.RUN_FAILED_BREAK_PLAY display.debug("done checking for any_errors_fatal") display.debug("checking for max_fail_percentage") if iterator._play.max_fail_percentage is not None and len( results) > 0: percentage = iterator._play.max_fail_percentage / 100.0 if (len(self._tqm._failed_hosts) / iterator.batch_size) > percentage: for host in hosts_left: # don't double-mark hosts, or the iterator will potentially # fail them out of the rescue/always states if host.name not in failed_hosts: self._tqm._failed_hosts[host.name] = True iterator.mark_host_failed(host) self._tqm.send_callback( 'v2_playbook_on_no_hosts_remaining') result |= self._tqm.RUN_FAILED_BREAK_PLAY display.debug('(%s failed / %s total )> %s max fail' % (len(self._tqm._failed_hosts), iterator.batch_size, percentage)) display.debug("done checking for max_fail_percentage") display.debug( "checking to see if all hosts have failed and the running result is not ok" ) if result != self._tqm.RUN_OK and len( self._tqm._failed_hosts) >= len(hosts_left): display.debug("^ not ok, so returning result now") self._tqm.send_callback( 'v2_playbook_on_no_hosts_remaining') return result display.debug("done checking to see if all hosts have failed") except (IOError, EOFError) as e: display.debug("got IOError/EOFError in task loop: %s" % e) # most likely an abort, return failed return self._tqm.RUN_UNKNOWN_ERROR # run the base class run() method, which executes the cleanup function # and runs any outstanding handlers which have been triggered return super(StrategyModule, self).run(iterator, play_context, result)
def _normalize_parameters(self, thing, action=None, additional_args=dict()): ''' arguments can be fuzzy. Deal with all the forms. ''' # final args are the ones we'll eventually return, so first update # them with any additional args specified, which have lower priority # than those which may be parsed/normalized next final_args = dict() if additional_args: if isinstance(additional_args, string_types): templar = Templar(loader=None) if templar._contains_vars(additional_args): final_args['_variable_params'] = additional_args else: raise AnsibleParserError( "Complex args containing variables cannot use bare variables, and must use the full variable style ('{{var_name}}')" ) elif isinstance(additional_args, dict): final_args.update(additional_args) else: raise AnsibleParserError( 'Complex args must be a dictionary or variable string ("{{var}}").' ) # how we normalize depends if we figured out what the module name is # yet. If we have already figured it out, it's a 'new style' invocation. # otherwise, it's not if action is not None: args = self._normalize_new_style_args(thing, action) else: (action, args) = self._normalize_old_style_args(thing) # this can occasionally happen, simplify if args and 'args' in args: tmp_args = args.pop('args') if isinstance(tmp_args, string_types): tmp_args = parse_kv(tmp_args) args.update(tmp_args) # only internal variables can start with an underscore, so # we don't allow users to set them directy in arguments if args and action not in ('command', 'net_command', 'win_command', 'shell', 'win_shell', 'script', 'raw'): for arg in args: arg = to_text(arg) if arg.startswith('_ansible_'): raise AnsibleError( "invalid parameter specified for action '%s': '%s'" % (action, arg)) # finally, update the args we're going to return with the ones # which were normalized above if args: final_args.update(args) return (action, final_args)
def run(self, iterator, play_context): ''' The "free" strategy is a bit more complex, in that it allows tasks to be sent to hosts as quickly as they can be processed. This means that some hosts may finish very quickly if run tasks result in little or no work being done versus other systems. The algorithm used here also tries to be more "fair" when iterating through hosts by remembering the last host in the list to be given a task and starting the search from there as opposed to the top of the hosts list again, which would end up favoring hosts near the beginning of the list. ''' # the last host to be given a task last_host = 0 result = self._tqm.RUN_OK # start with all workers being counted as being free workers_free = len(self._workers) self._set_hosts_cache(iterator._play) work_to_do = True while work_to_do and not self._tqm._terminated: hosts_left = self.get_hosts_left(iterator) if len(hosts_left) == 0: self._tqm.send_callback('v2_playbook_on_no_hosts_remaining') result = False break work_to_do = False # assume we have no more work to do starting_host = last_host # save current position so we know when we've looped back around and need to break # try and find an unblocked host with a task to run host_results = [] while True: host = hosts_left[last_host] display.debug("next free host: %s" % host) host_name = host.get_name() # peek at the next task for the host, to see if there's # anything to do do for this host (state, task) = iterator.get_next_task_for_host(host, peek=True) display.debug("free host state: %s" % state, host=host_name) display.debug("free host task: %s" % task, host=host_name) if host_name not in self._tqm._unreachable_hosts and task: # set the flag so the outer loop knows we've still found # some work which needs to be done work_to_do = True display.debug("this host has work to do", host=host_name) # check to see if this host is blocked (still executing a previous task) if (host_name not in self._blocked_hosts or not self._blocked_hosts[host_name]): display.debug("getting variables", host=host_name) task_vars = self._variable_manager.get_vars( play=iterator._play, host=host, task=task, _hosts=self._hosts_cache, _hosts_all=self._hosts_cache_all) self.add_tqm_variables(task_vars, play=iterator._play) templar = Templar(loader=self._loader, variables=task_vars) display.debug("done getting variables", host=host_name) try: throttle = int(templar.template(task.throttle)) except Exception as e: raise AnsibleError( "Failed to convert the throttle value to an integer.", obj=task._ds, orig_exc=e) if throttle > 0: same_tasks = 0 for worker in self._workers: if worker and worker.is_alive( ) and worker._task._uuid == task._uuid: same_tasks += 1 display.debug("task: %s, same_tasks: %d" % (task.get_name(), same_tasks)) if same_tasks >= throttle: break # pop the task, mark the host blocked, and queue it self._blocked_hosts[host_name] = True (state, task) = iterator.get_next_task_for_host(host) try: action = action_loader.get(task.action, class_only=True) except KeyError: # we don't care here, because the action may simply not have a # corresponding action plugin action = None try: task.name = to_text(templar.template( task.name, fail_on_undefined=False), nonstring='empty') display.debug("done templating", host=host_name) except Exception: # just ignore any errors during task name templating, # we don't care if it just shows the raw name display.debug("templating failed for some reason", host=host_name) run_once = templar.template( task.run_once) or action and getattr( action, 'BYPASS_HOST_LOOP', False) if run_once: if action and getattr(action, 'BYPASS_HOST_LOOP', False): raise AnsibleError( "The '%s' module bypasses the host loop, which is currently not supported in the free strategy " "and would instead execute for every host in the inventory list." % task.action, obj=task._ds) else: display.warning( "Using run_once with the free strategy is not currently supported. This task will still be " "executed for every host in the inventory list." ) # check to see if this task should be skipped, due to it being a member of a # role which has already run (and whether that role allows duplicate execution) if task._role and task._role.has_run(host): # If there is no metadata, the default behavior is to not allow duplicates, # if there is metadata, check to see if the allow_duplicates flag was set to true if task._role._metadata is None or task._role._metadata and not task._role._metadata.allow_duplicates: display.debug( "'%s' skipped because role has already run" % task, host=host_name) del self._blocked_hosts[host_name] continue if task.action == 'meta': self._execute_meta(task, play_context, iterator, target_host=host) self._blocked_hosts[host_name] = False else: # handle step if needed, skip meta actions as they are used internally if not self._step or self._take_step( task, host_name): if task.any_errors_fatal: display.warning( "Using any_errors_fatal with the free strategy is not supported, " "as tasks are executed independently on each host" ) self._tqm.send_callback( 'v2_playbook_on_task_start', task, is_conditional=False) self._queue_task(host, task, task_vars, play_context) # each task is counted as a worker being busy workers_free -= 1 del task_vars else: display.debug("%s is blocked, skipping for now" % host_name) # all workers have tasks to do (and the current host isn't done with the play). # loop back to starting host and break out if self._host_pinned and workers_free == 0 and work_to_do: last_host = starting_host break # move on to the next host and make sure we # haven't gone past the end of our hosts list last_host += 1 if last_host > len(hosts_left) - 1: last_host = 0 # if we've looped around back to the start, break out if last_host == starting_host: break results = self._process_pending_results(iterator) host_results.extend(results) # each result is counted as a worker being free again workers_free += len(results) self.update_active_connections(results) included_files = IncludedFile.process_include_results( host_results, iterator=iterator, loader=self._loader, variable_manager=self._variable_manager) if len(included_files) > 0: all_blocks = dict((host, []) for host in hosts_left) for included_file in included_files: display.debug("collecting new blocks for %s" % included_file) try: if included_file._is_role: new_ir = self._copy_included_file(included_file) new_blocks, handler_blocks = new_ir.get_block_list( play=iterator._play, variable_manager=self._variable_manager, loader=self._loader, ) else: new_blocks = self._load_included_file( included_file, iterator=iterator) except AnsibleError as e: for host in included_file._hosts: iterator.mark_host_failed(host) display.warning(to_text(e)) continue for new_block in new_blocks: task_vars = self._variable_manager.get_vars( play=iterator._play, task=new_block._parent, _hosts=self._hosts_cache, _hosts_all=self._hosts_cache_all) final_block = new_block.filter_tagged_tasks(task_vars) for host in hosts_left: if host in included_file._hosts: all_blocks[host].append(final_block) display.debug("done collecting new blocks for %s" % included_file) display.debug( "adding all collected blocks from %d included file(s) to iterator" % len(included_files)) for host in hosts_left: iterator.add_tasks(host, all_blocks[host]) display.debug("done adding collected blocks to iterator") # pause briefly so we don't spin lock time.sleep(C.DEFAULT_INTERNAL_POLL_INTERVAL) # collect all the final results results = self._wait_on_pending_results(iterator) # run the base class run() method, which executes the cleanup function # and runs any outstanding handlers which have been triggered return super(StrategyModule, self).run(iterator, play_context, result)