def test_valid_check_pkg_version_format(hostvars, host, result): task = FakeTask('sanity_checks', {'checks': []}) plugin = ActionModule(task, None, PlayContext(), None, Templar(None, None, None), None) check = plugin.check_pkg_version_format(hostvars, host) assert check == result
def _get_delegated_vars(self, play, task, existing_variables): # we unfortunately need to template the delegate_to field here, # as we're fetching vars before post_validate has been called on # the task that has been passed in vars_copy = existing_variables.copy() templar = Templar(loader=self._loader, variables=vars_copy) items = [] if task.loop is not None: if task.loop in lookup_loader: try: loop_terms = listify_lookup_plugin_terms( terms=task.loop_args, templar=templar, loader=self._loader, fail_on_undefined=True, convert_bare=False) items = lookup_loader.get(task.loop, loader=self._loader, templar=templar).run( terms=loop_terms, variables=vars_copy) except AnsibleUndefinedVariable: # This task will be skipped later due to this, so we just setup # a dummy array for the later code so it doesn't fail items = [None] else: raise AnsibleError( "Unexpected failure in finding the lookup named '%s' in the available lookup plugins" % task.loop) else: items = [None] delegated_host_vars = dict() for item in items: # update the variables with the item value for templating, in case we need it if item is not None: vars_copy['item'] = item templar.set_available_variables(vars_copy) delegated_host_name = templar.template(task.delegate_to, fail_on_undefined=False) if delegated_host_name is None: raise AnsibleError( message="Undefined delegate_to host for task:", obj=task._ds) if delegated_host_name in delegated_host_vars: # no need to repeat ourselves, as the delegate_to value # does not appear to be tied to the loop item variable continue # a dictionary of variables to use if we have to create a new host below # we set the default port based on the default transport here, to make sure # we use the proper default for windows new_port = C.DEFAULT_REMOTE_PORT if C.DEFAULT_TRANSPORT == 'winrm': new_port = 5986 new_delegated_host_vars = dict( ansible_delegated_host=delegated_host_name, ansible_host= delegated_host_name, # not redundant as other sources can change ansible_host ansible_port=new_port, ansible_user=C.DEFAULT_REMOTE_USER, ansible_connection=C.DEFAULT_TRANSPORT, ) # now try to find the delegated-to host in inventory, or failing that, # create a new host on the fly so we can fetch variables for it delegated_host = None if self._inventory is not None: delegated_host = self._inventory.get_host(delegated_host_name) # try looking it up based on the address field, and finally # fall back to creating a host on the fly to use for the var lookup if delegated_host is None: if delegated_host_name in C.LOCALHOST: delegated_host = self._inventory.localhost else: for h in self._inventory.get_hosts( ignore_limits=True, ignore_restrictions=True): # check if the address matches, or if both the delegated_to host # and the current host are in the list of localhost aliases if h.address == delegated_host_name: delegated_host = h break else: delegated_host = Host(name=delegated_host_name) delegated_host.vars = combine_vars( delegated_host.vars, new_delegated_host_vars) else: delegated_host = Host(name=delegated_host_name) delegated_host.vars = combine_vars(delegated_host.vars, new_delegated_host_vars) # now we go fetch the vars for the delegated-to host and save them in our # master dictionary of variables to be used later in the TaskExecutor/PlayContext delegated_host_vars[delegated_host_name] = self.get_vars( play=play, host=delegated_host, task=task, include_delegate_to=False, include_hostvars=False, ) return delegated_host_vars
def _evaluate_conditional(h): all_vars = self._variable_manager.get_vars(play=iterator._play, host=h, task=task) templar = Templar(loader=self._loader, variables=all_vars) return task.evaluate_conditional(templar, all_vars)
def run(self, iterator, play_context): ''' The "free" strategy is a bit more complex, in that it allows tasks to be sent to hosts as quickly as they can be processed. This means that some hosts may finish very quickly if run tasks result in little or no work being done versus other systems. The algorithm used here also tries to be more "fair" when iterating through hosts by remembering the last host in the list to be given a task and starting the search from there as opposed to the top of the hosts list again, which would end up favoring hosts near the beginning of the list. ''' # the last host to be given a task last_host = 0 result = self._tqm.RUN_OK work_to_do = True while work_to_do and not self._tqm._terminated: hosts_left = self.get_hosts_left(iterator) if len(hosts_left) == 0: self._tqm.send_callback('v2_playbook_on_no_hosts_remaining') result = False break work_to_do = False # assume we have no more work to do starting_host = last_host # save current position so we know when we've looped back around and need to break # try and find an unblocked host with a task to run host_results = [] while True: host = hosts_left[last_host] display.debug("next free host: %s" % host) host_name = host.get_name() # peek at the next task for the host, to see if there's # anything to do do for this host (state, task) = iterator.get_next_task_for_host(host, peek=True) display.debug("free host state: %s" % state) display.debug("free host task: %s" % task) if host_name not in self._tqm._unreachable_hosts and task: # set the flag so the outer loop knows we've still found # some work which needs to be done work_to_do = True display.debug("this host has work to do") # check to see if this host is blocked (still executing a previous task) if host_name not in self._blocked_hosts or not self._blocked_hosts[ host_name]: # pop the task, mark the host blocked, and queue it self._blocked_hosts[host_name] = True (state, task) = iterator.get_next_task_for_host(host) try: action = action_loader.get(task.action, class_only=True) except KeyError: # we don't care here, because the action may simply not have a # corresponding action plugin action = None display.debug("getting variables") task_vars = self._variable_manager.get_vars( play=iterator._play, host=host, task=task) self.add_tqm_variables(task_vars, play=iterator._play) templar = Templar(loader=self._loader, variables=task_vars) display.debug("done getting variables") try: task.name = to_text(templar.template( task.name, fail_on_undefined=False), nonstring='empty') display.debug("done templating") except: # just ignore any errors during task name templating, # we don't care if it just shows the raw name display.debug("templating failed for some reason") pass run_once = templar.template( task.run_once) or action and getattr( action, 'BYPASS_HOST_LOOP', False) if run_once: if action and getattr(action, 'BYPASS_HOST_LOOP', False): raise AnsibleError( "The '%s' module bypasses the host loop, which is currently not supported in the free strategy " "and would instead execute for every host in the inventory list." % task.action, obj=task._ds) else: display.warning( "Using run_once with the free strategy is not currently supported. This task will still be " "executed for every host in the inventory list." ) # check to see if this task should be skipped, due to it being a member of a # role which has already run (and whether that role allows duplicate execution) if task._role and task._role.has_run(host): # If there is no metadata, the default behavior is to not allow duplicates, # if there is metadata, check to see if the allow_duplicates flag was set to true if task._role._metadata is None or task._role._metadata and not task._role._metadata.allow_duplicates: display.debug( "'%s' skipped because role has already run" % task) del self._blocked_hosts[host_name] continue if task.action == 'meta': self._execute_meta(task, play_context, iterator, target_host=host) self._blocked_hosts[host_name] = False else: # handle step if needed, skip meta actions as they are used internally if not self._step or self._take_step( task, host_name): if task.any_errors_fatal: display.warning( "Using any_errors_fatal with the free strategy is not supported, " "as tasks are executed independently on each host" ) self._tqm.send_callback( 'v2_playbook_on_task_start', task, is_conditional=False) self._queue_task(host, task, task_vars, play_context) del task_vars else: display.debug("%s is blocked, skipping for now" % host_name) # move on to the next host and make sure we # haven't gone past the end of our hosts list last_host += 1 if last_host > len(hosts_left) - 1: last_host = 0 # if we've looped around back to the start, break out if last_host == starting_host: break results = self._process_pending_results(iterator) host_results.extend(results) try: included_files = IncludedFile.process_include_results( host_results, self._tqm, iterator=iterator, inventory=self._inventory, loader=self._loader, variable_manager=self._variable_manager) except AnsibleError as e: return self._tqm.RUN_ERROR if len(included_files) > 0: all_blocks = dict((host, []) for host in hosts_left) for included_file in included_files: display.debug("collecting new blocks for %s" % included_file) try: if included_file._is_role: new_ir = self._copy_included_file(included_file) new_blocks, handler_blocks = new_ir.get_block_list( play=iterator._play, variable_manager=self._variable_manager, loader=self._loader, ) self._tqm.update_handler_list([ handler for handler_block in handler_blocks for handler in handler_block.block ]) else: new_blocks = self._load_included_file( included_file, iterator=iterator) except AnsibleError as e: for host in included_file._hosts: iterator.mark_host_failed(host) display.warning(str(e)) continue for new_block in new_blocks: task_vars = self._variable_manager.get_vars( play=iterator._play, task=included_file._task) final_block = new_block.filter_tagged_tasks( play_context, task_vars) for host in hosts_left: if host in included_file._hosts: all_blocks[host].append(final_block) display.debug("done collecting new blocks for %s" % included_file) display.debug( "adding all collected blocks from %d included file(s) to iterator" % len(included_files)) for host in hosts_left: iterator.add_tasks(host, all_blocks[host]) display.debug("done adding collected blocks to iterator") # pause briefly so we don't spin lock time.sleep(C.DEFAULT_INTERNAL_POLL_INTERVAL) # collect all the final results results = self._wait_on_pending_results(iterator) # run the base class run() method, which executes the cleanup function # and runs any outstanding handlers which have been triggered return super(StrategyModule, self).run(iterator, play_context, result)
def _execute(self, variables=None): ''' The primary workhorse of the executor system, this runs the task on the specified host (which may be the delegated_to host) and handles the retry/until and block rescue/always execution ''' if variables is None: variables = self._job_vars templar = Templar(loader=self._loader, shared_loader_obj=self._shared_loader_obj, variables=variables) context_validation_error = None try: # apply the given task's information to the connection info, # which may override some fields already set by the play or # the options specified on the command line self._play_context = self._play_context.set_task_and_variable_override( task=self._task, variables=variables, templar=templar) # fields set from the play/task may be based on variables, so we have to # do the same kind of post validation step on it here before we use it. self._play_context.post_validate(templar=templar) # We also add "magic" variables back into the variables dict to make sure # a certain subset of variables exist. self._play_context.update_vars(variables) except AnsibleError as e: # save the error, which we'll raise later if we don't end up # skipping this task during the conditional evaluation step context_validation_error = e # Evaluate the conditional (if any) for this task, which we do before running # the final task post-validation. We do this before the post validation due to # the fact that the conditional may specify that the task be skipped due to a # variable not being present which would otherwise cause validation to fail try: if not self._task.evaluate_conditional(templar, variables): self._display.debug( "when evaluation failed, skipping this task") return dict(changed=False, skipped=True, skip_reason='Conditional check failed', _ansible_no_log=self._play_context.no_log) except AnsibleError: # skip conditional exception in the case of includes as the vars needed might not be avaiable except in the included tasks or due to tags if self._task.action != 'include': raise # if we ran into an error while setting up the PlayContext, raise it now if context_validation_error is not None: raise context_validation_error # if this task is a TaskInclude, we just return now with a success code so the # main thread can expand the task list for the given host if self._task.action == 'include': include_variables = self._task.args.copy() include_file = include_variables.pop('_raw_params', None) if not include_file: return dict(failed=True, msg="No include file was specified to the include") include_file = templar.template(include_file) return dict(include=include_file, include_variables=include_variables) # Now we do final validation on the task, which sets all fields to their final values. self._task.post_validate(templar=templar) if '_variable_params' in self._task.args: variable_params = self._task.args.pop('_variable_params') if isinstance(variable_params, dict): self._display.deprecated( "Using variables for task params is unsafe, especially if the variables come from an external source like facts" ) variable_params.update(self._task.args) self._task.args = variable_params # get the connection and the handler for this execution self._connection = self._get_connection(variables=variables, templar=templar) self._connection.set_host_overrides(host=self._host) self._handler = self._get_action_handler(connection=self._connection, templar=templar) # And filter out any fields which were set to default(omit), and got the omit token value omit_token = variables.get('omit') if omit_token is not None: self._task.args = dict((i[0], i[1]) for i in iteritems(self._task.args) if i[1] != omit_token) # Read some values from the task, so that we can modify them if need be if self._task.until is not None: retries = self._task.retries if retries <= 0: retries = 1 else: retries = 1 delay = self._task.delay if delay < 0: delay = 1 # make a copy of the job vars here, in case we need to update them # with the registered variable value later on when testing conditions vars_copy = variables.copy() self._display.debug("starting attempt loop") result = None for attempt in range(retries): if attempt > 0: self._display.display( "FAILED - RETRYING: %s (%d retries left). Result was: %s" % (self._task, retries - attempt, result), color="red") result['attempts'] = attempt + 1 self._display.debug("running the handler") try: result = self._handler.run(task_vars=variables) except AnsibleConnectionFailure as e: return dict(unreachable=True, msg=str(e)) self._display.debug("handler run complete") if self._task. async > 0: # the async_wrapper module returns dumped JSON via its stdout # response, so we parse it here and replace the result try: result = json.loads(result.get('stdout')) except (TypeError, ValueError) as e: return dict( failed=True, msg="The async task did not return valid JSON: %s" % str(e)) if self._task.poll > 0: result = self._poll_async_result(result=result, templar=templar) # update the local copy of vars with the registered value, if specified, # or any facts which may have been generated by the module execution if self._task.register: vars_copy[self._task.register] = result if 'ansible_facts' in result: vars_copy.update(result['ansible_facts']) # create a conditional object to evaluate task conditions cond = Conditional(loader=self._loader) def _evaluate_changed_when_result(result): if self._task.changed_when is not None: cond.when = [self._task.changed_when] result['changed'] = cond.evaluate_conditional( templar, vars_copy) def _evaluate_failed_when_result(result): if self._task.failed_when is not None: cond.when = [self._task.failed_when] failed_when_result = cond.evaluate_conditional( templar, vars_copy) result['failed_when_result'] = result[ 'failed'] = failed_when_result return failed_when_result return False if self._task.until: cond.when = self._task.until if cond.evaluate_conditional(templar, vars_copy): _evaluate_changed_when_result(result) _evaluate_failed_when_result(result) break elif (self._task.changed_when is not None or self._task.failed_when is not None) and 'skipped' not in result: _evaluate_changed_when_result(result) if _evaluate_failed_when_result(result): break elif 'failed' not in result: if result.get('rc', 0) != 0: result['failed'] = True else: # if the result is not failed, stop trying break if attempt < retries - 1: time.sleep(delay) else: _evaluate_changed_when_result(result) _evaluate_failed_when_result(result)
def _squash_items(self, items, loop_var, variables): ''' Squash items down to a comma-separated list for certain modules which support it (typically package management modules). ''' name = None try: # _task.action could contain templatable strings (via action: and # local_action:) Template it before comparing. If we don't end up # optimizing it here, the templatable string might use template vars # that aren't available until later (it could even use vars from the # with_items loop) so don't make the templated string permanent yet. templar = Templar(loader=self._loader, shared_loader_obj=self._shared_loader_obj, variables=variables) task_action = self._task.action if templar._contains_vars(task_action): task_action = templar.template(task_action, fail_on_undefined=False) if len(items) > 0 and task_action in self.SQUASH_ACTIONS: if all(isinstance(o, string_types) for o in items): final_items = [] for allowed in ['name', 'pkg', 'package']: name = self._task.args.pop(allowed, None) if name is not None: break # This gets the information to check whether the name field # contains a template that we can squash for template_no_item = template_with_item = None if name: if templar._contains_vars(name): variables[loop_var] = '\0$' template_no_item = templar.template(name, variables, cache=False) variables[loop_var] = '\0@' template_with_item = templar.template(name, variables, cache=False) del variables[loop_var] # Check if the user is doing some operation that doesn't take # name/pkg or the name/pkg field doesn't have any variables # and thus the items can't be squashed if template_no_item != template_with_item: for item in items: variables[loop_var] = item if self._task.evaluate_conditional(templar, variables): new_item = templar.template(name, cache=False) final_items.append(new_item) self._task.args['name'] = final_items # Wrap this in a list so that the calling function loop # executes exactly once return [final_items] else: # Restore the name parameter self._task.args['name'] = name # elif: # Right now we only optimize single entries. In the future we # could optimize more types: # * lists can be squashed together # * dicts could squash entries that match in all cases except the # name or pkg field. except Exception: # Squashing is an optimization. If it fails for any reason, # simply use the unoptimized list of items. # Restore the name parameter if name is not None: self._task.args['name'] = name return items
def run(self, tmp=None, task_vars=dict()): ''' handler for template operations ''' source = self._task.args.get('src', None) dest = self._task.args.get('dest', None) if (source is None and 'first_available_file' not in task_vars) or dest is None: return dict(failed=True, msg="src and dest are required") if tmp is None: tmp = self._make_tmp_path() ################################################################################################## # FIXME: this all needs to be sorted out ################################################################################################## # if we have first_available_file in our vars # look up the files and use the first one we find as src #if 'first_available_file' in task_vars: # found = False # for fn in task_vars.get('first_available_file'): # fn_orig = fn # fnt = template.template(self.runner.basedir, fn, task_vars) # fnd = utils.path_dwim(self.runner.basedir, fnt) # if not os.path.exists(fnd) and '_original_file' in task_vars: # fnd = utils.path_dwim_relative(task_vars['_original_file'], 'templates', fnt, self.runner.basedir, check=False) # if os.path.exists(fnd): # source = fnd # found = True # break # if not found: # result = dict(failed=True, msg="could not find src in first_available_file list") # return ReturnData(conn=conn, comm_ok=False, result=result) #else: if 1: if self._task._role is not None: source = self._loader.path_dwim_relative( self._task._role._role_path, 'templates', source) else: source = self._loader.path_dwim(source) ################################################################################################## # END FIXME ################################################################################################## # Expand any user home dir specification dest = self._remote_expand_user(dest, tmp) directory_prepended = False if dest.endswith(os.sep): directory_prepended = True base = os.path.basename(source) dest = os.path.join(dest, base) # template the source data locally & get ready to transfer templar = Templar(loader=self._loader, variables=task_vars) try: with open(source, 'r') as f: template_data = f.read() resultant = templar.template(template_data, preserve_trailing_newlines=True) except Exception, e: return dict(failed=True, msg=type(e).__name__ + ": " + str(e))
def parse(self): ''' Given a task in one of the supported forms, parses and returns returns the action, arguments, and delegate_to values for the task, dealing with all sorts of levels of fuzziness. ''' thing = None action = None delegate_to = self._task_ds.get('delegate_to', Sentinel) args = dict() # This is the standard YAML form for command-type modules. We grab # the args and pass them in as additional arguments, which can/will # be overwritten via dict updates from the other arg sources below additional_args = self._task_ds.get('args', dict()) # We can have one of action, local_action, or module specified # action if 'action' in self._task_ds: # an old school 'action' statement thing = self._task_ds['action'] action, args = self._normalize_parameters( thing, action=action, additional_args=additional_args) # local_action if 'local_action' in self._task_ds: # local_action is similar but also implies a delegate_to if action is not None: raise AnsibleParserError( "action and local_action are mutually exclusive", obj=self._task_ds) thing = self._task_ds.get('local_action', '') delegate_to = 'localhost' action, args = self._normalize_parameters( thing, action=action, additional_args=additional_args) # module: <stuff> is the more new-style invocation # walk the input dictionary to see we recognize a module name for (item, value) in iteritems(self._task_ds): if item in BUILTIN_TASKS or action_loader.has_plugin(item, collection_list=self._collection_list) or \ module_loader.has_plugin(item, collection_list=self._collection_list): # finding more than one module name is a problem if action is not None: raise AnsibleParserError( "conflicting action statements: %s, %s" % (action, item), obj=self._task_ds) action = item thing = value action, args = self._normalize_parameters( thing, action=action, additional_args=additional_args) # if we didn't see any module in the task at all, it's not a task really if action is None: if 'ping' not in module_loader: raise AnsibleParserError( "The requested action was not found in configured module paths. " "Additionally, core modules are missing. If this is a checkout, " "run 'git pull --rebase' to correct this problem.", obj=self._task_ds) else: raise AnsibleParserError( "no action detected in task. This often indicates a misspelled module name, or incorrect module path.", obj=self._task_ds) elif args.get('_raw_params', '') != '' and action not in RAW_PARAM_MODULES: templar = Templar(loader=None) raw_params = args.pop('_raw_params') if templar.is_template(raw_params): args['_variable_params'] = raw_params else: raise AnsibleParserError( "this task '%s' has extra params, which is only allowed in the following modules: %s" % (action, ", ".join(RAW_PARAM_MODULES)), obj=self._task_ds) return (action, args, delegate_to)
def _get_loop_items(self): ''' Loads a lookup plugin to handle the with_* portion of a task (if specified), and returns the items result. ''' # save the play context variables to a temporary dictionary, # so that we can modify the job vars without doing a full copy # and later restore them to avoid modifying things too early play_context_vars = dict() self._play_context.update_vars(play_context_vars) old_vars = dict() for k in play_context_vars.keys(): if k in self._job_vars: old_vars[k] = self._job_vars[k] self._job_vars[k] = play_context_vars[k] templar = Templar(loader=self._loader, shared_loader_obj=self._shared_loader_obj, variables=self._job_vars) items = None if self._task.loop: if self._task.loop in self._shared_loader_obj.lookup_loader: #TODO: remove convert_bare true and deprecate this in with_ if self._task.loop == 'first_found': # first_found loops are special. If the item is undefined # then we want to fall through to the next value rather # than failing. loop_terms = listify_lookup_plugin_terms( terms=self._task.loop_args, templar=templar, loader=self._loader, fail_on_undefined=False, convert_bare=True) loop_terms = [ t for t in loop_terms if not templar._contains_vars(t) ] else: try: loop_terms = listify_lookup_plugin_terms( terms=self._task.loop_args, templar=templar, loader=self._loader, fail_on_undefined=True, convert_bare=True) except AnsibleUndefinedVariable as e: display.deprecated( "Skipping task due to undefined Error, in the future this will be a fatal error.: %s" % to_bytes(e)) return None # get lookup mylookup = self._shared_loader_obj.lookup_loader.get( self._task.loop, loader=self._loader, templar=templar) # give lookup task 'context' for subdir (mostly needed for first_found) for subdir in ['template', 'var', 'file']: #TODO: move this to constants? if subdir in self._task.action: break setattr(mylookup, '_subdir', subdir + 's') # run lookup items = mylookup.run(terms=loop_terms, variables=self._job_vars, wantlist=True) else: raise AnsibleError( "Unexpected failure in finding the lookup named '%s' in the available lookup plugins" % self._task.loop) # now we restore any old job variables that may have been modified, # and delete them if they were in the play context vars but not in # the old variables dictionary for k in play_context_vars.keys(): if k in old_vars: self._job_vars[k] = old_vars[k] else: del self._job_vars[k] if items: from ansible.vars.unsafe_proxy import UnsafeProxy for idx, item in enumerate(items): if item is not None and not isinstance(item, UnsafeProxy): items[idx] = UnsafeProxy(item) return items
def run(self, play): ''' Iterates over the roles/tasks in a play, using the given (or default) strategy for queueing tasks. The default is the linear strategy, which operates like classic Ansible by keeping all hosts in lock-step with a given task (meaning no hosts move on to the next task until all hosts are done with the current task). ''' # Fork # of forks, # of hosts or serial, whichever is lowest contenders = [ self._options.forks, play.serial, len(self._inventory.get_hosts(play.hosts)) ] contenders = [v for v in contenders if v is not None and v > 0] self._initialize_processes(min(contenders)) if not self._callbacks_loaded: self.load_callbacks() all_vars = self._variable_manager.get_vars(loader=self._loader, play=play) templar = Templar(loader=self._loader, variables=all_vars) new_play = play.copy() new_play.post_validate(templar) play_context = PlayContext(new_play, self._options, self.passwords, self._connection_lockfile.fileno()) for callback_plugin in self._callback_plugins: if hasattr(callback_plugin, 'set_play_context'): callback_plugin.set_play_context(play_context) self.send_callback('v2_playbook_on_play_start', new_play) # initialize the shared dictionary containing the notified handlers self._initialize_notified_handlers(new_play.handlers) # load the specified strategy (or the default linear one) strategy = strategy_loader.get(new_play.strategy, self) if strategy is None: raise AnsibleError("Invalid play strategy specified: %s" % new_play.strategy, obj=play._ds) # build the iterator iterator = PlayIterator( inventory=self._inventory, play=new_play, play_context=play_context, variable_manager=self._variable_manager, all_vars=all_vars, start_at_done=self._start_at_done, ) # during initialization, the PlayContext will clear the start_at_task # field to signal that a matching task was found, so check that here # and remember it so we don't try to skip tasks on future plays if getattr(self._options, 'start_at_task', None) is not None and play_context.start_at_task is None: self._start_at_done = True # and run the play using the strategy and cleanup on way out play_return = strategy.run(iterator, play_context) self._cleanup_processes() return play_return
def run(self, iterator, play_context): ''' The linear strategy is simple - get the next task and queue it for all hosts, then wait for the queue to drain before moving on to the next task ''' # iteratate over each task, while there is one left to run result = True work_to_do = True while work_to_do and not self._tqm._terminated: try: display.debug("getting the remaining hosts for this loop") hosts_left = [ host for host in self._inventory.get_hosts(iterator._play.hosts) if host.name not in self._tqm._unreachable_hosts ] display.debug("done getting the remaining hosts for this loop") # queue up this task for each host in the inventory callback_sent = False work_to_do = False host_results = [] host_tasks = self._get_next_task_lockstep(hosts_left, iterator) # skip control skip_rest = False choose_step = True results = [] for (host, task) in host_tasks: if not task: continue if self._tqm._terminated: break run_once = False work_to_do = True # test to see if the task across all hosts points to an action plugin which # sets BYPASS_HOST_LOOP to true, or if it has run_once enabled. If so, we # will only send this task to the first host in the list. try: action = action_loader.get(task.action, class_only=True) if task.run_once or getattr(action, 'BYPASS_HOST_LOOP', False): run_once = True except KeyError: # we don't care here, because the action may simply not have a # corresponding action plugin pass # check to see if this task should be skipped, due to it being a member of a # role which has already run (and whether that role allows duplicate execution) if task._role and task._role.has_run(host): # If there is no metadata, the default behavior is to not allow duplicates, # if there is metadata, check to see if the allow_duplicates flag was set to true if task._role._metadata is None or task._role._metadata and not task._role._metadata.allow_duplicates: display.debug( "'%s' skipped because role has already run" % task) continue if task.action == 'meta': self._execute_meta(task, play_context, iterator) else: # handle step if needed, skip meta actions as they are used internally if self._step and choose_step: if self._take_step(task): choose_step = False else: skip_rest = True break display.debug("getting variables") task_vars = self._variable_manager.get_vars( loader=self._loader, play=iterator._play, host=host, task=task) self.add_tqm_variables(task_vars, play=iterator._play) templar = Templar(loader=self._loader, variables=task_vars) display.debug("done getting variables") if not callback_sent: display.debug( "sending task start callback, copying the task so we can template it temporarily" ) saved_name = task.name display.debug( "done copying, going to template now") try: task.name = text_type( templar.template(task.name, fail_on_undefined=False)) display.debug("done templating") except: # just ignore any errors during task name templating, # we don't care if it just shows the raw name display.debug( "templating failed for some reason") pass display.debug("here goes the callback...") self._tqm.send_callback( 'v2_playbook_on_task_start', task, is_conditional=False) task.name = saved_name callback_sent = True display.debug("sending task start callback") self._blocked_hosts[host.get_name()] = True self._queue_task(host, task, task_vars, play_context) # if we're bypassing the host loop, break out now if run_once: break results += self._process_pending_results(iterator, one_pass=True) # go to next host/task group if skip_rest: continue display.debug( "done queuing things up, now waiting for results queue to drain" ) results += self._wait_on_pending_results(iterator) host_results.extend(results) if not work_to_do and len(iterator.get_failed_hosts()) > 0: display.debug("out of hosts to run on") self._tqm.send_callback( 'v2_playbook_on_no_hosts_remaining') result = False break try: included_files = IncludedFile.process_include_results( host_results, self._tqm, iterator=iterator, inventory=self._inventory, loader=self._loader, variable_manager=self._variable_manager) except AnsibleError as e: return False if len(included_files) > 0: display.debug("we have included files to process") noop_task = Task() noop_task.action = 'meta' noop_task.args['_raw_params'] = 'noop' noop_task.set_loader(iterator._play._loader) display.debug("generating all_blocks data") all_blocks = dict((host, []) for host in hosts_left) display.debug("done generating all_blocks data") for included_file in included_files: display.debug("processing included file: %s" % included_file._filename) # included hosts get the task list while those excluded get an equal-length # list of noop tasks, to make sure that they continue running in lock-step try: new_blocks = self._load_included_file( included_file, iterator=iterator) display.debug( "iterating over new_blocks loaded from include file" ) for new_block in new_blocks: task_vars = self._variable_manager.get_vars( loader=self._loader, play=iterator._play, task=included_file._task, ) display.debug("filtering new block on tags") final_block = new_block.filter_tagged_tasks( play_context, task_vars) display.debug( "done filtering new block on tags") noop_block = Block(parent_block=task._block) noop_block.block = [ noop_task for t in new_block.block ] noop_block.always = [ noop_task for t in new_block.always ] noop_block.rescue = [ noop_task for t in new_block.rescue ] for host in hosts_left: if host in included_file._hosts: all_blocks[host].append(final_block) else: all_blocks[host].append(noop_block) display.debug( "done iterating over new_blocks loaded from include file" ) except AnsibleError as e: for host in included_file._hosts: self._tqm._failed_hosts[host.name] = True iterator.mark_host_failed(host) display.error(e, wrap_text=False) continue # finally go through all of the hosts and append the # accumulated blocks to their list of tasks display.debug( "extending task lists for all hosts with included blocks" ) for host in hosts_left: iterator.add_tasks(host, all_blocks[host]) display.debug("done extending task lists") display.debug("done processing included files") display.debug("results queue empty") except (IOError, EOFError) as e: display.debug("got IOError/EOFError in task loop: %s" % e) # most likely an abort, return failed return False # run the base class run() method, which executes the cleanup function # and runs any outstanding handlers which have been triggered return super(StrategyModule, self).run(iterator, play_context, result)
def _get_magic_variables(self, play, host, task, include_hostvars, include_delegate_to): ''' Returns a dictionary of so-called "magic" variables in Ansible, which are special variables we set internally for use. ''' variables = {} variables['playbook_dir'] = os.path.abspath(self._loader.get_basedir()) variables['ansible_playbook_python'] = sys.executable if play: # This is a list of all role names of all dependencies for all roles for this play dependency_role_names = list(set([d._role_name for r in play.roles for d in r.get_all_dependencies()])) # This is a list of all role names of all roles for this play play_role_names = [r._role_name for r in play.roles] # ansible_role_names includes all role names, dependent or directly referenced by the play variables['ansible_role_names'] = list(set(dependency_role_names + play_role_names)) # ansible_play_role_names includes the names of all roles directly referenced by this play # roles that are implicitly referenced via dependencies are not listed. variables['ansible_play_role_names'] = play_role_names # ansible_dependent_role_names includes the names of all roles that are referenced via dependencies # dependencies that are also explicitly named as roles are included in this list variables['ansible_dependent_role_names'] = dependency_role_names # DEPRECATED: role_names should be deprecated in favor of ansible_role_names or ansible_play_role_names variables['role_names'] = variables['ansible_play_role_names'] variables['ansible_play_name'] = play.get_name() if task: if task._role: variables['role_name'] = task._role.get_name() variables['role_path'] = task._role._role_path variables['role_uuid'] = text_type(task._role._uuid) if self._inventory is not None: variables['groups'] = self._inventory.get_groups_dict() if play: templar = Templar(loader=self._loader) if templar.is_template(play.hosts): pattern = 'all' else: pattern = play.hosts or 'all' # add the list of hosts in the play, as adjusted for limit/filters variables['ansible_play_hosts_all'] = [x.name for x in self._inventory.get_hosts(pattern=pattern, ignore_restrictions=True)] variables['ansible_play_hosts'] = [x for x in variables['ansible_play_hosts_all'] if x not in play._removed_hosts] variables['ansible_play_batch'] = [x.name for x in self._inventory.get_hosts() if x.name not in play._removed_hosts] # DEPRECATED: play_hosts should be deprecated in favor of ansible_play_batch, # however this would take work in the templating engine, so for now we'll add both variables['play_hosts'] = variables['ansible_play_batch'] # the 'omit' value alows params to be left out if the variable they are based on is undefined variables['omit'] = self._omit_token # Set options vars for option, option_value in iteritems(self._options_vars): variables[option] = option_value if self._hostvars is not None and include_hostvars: variables['hostvars'] = self._hostvars return variables
def parse(self, inventory, loader, path, cache=True): ''' Populates self.groups from the given data. Raises an error on any parse failure. ''' self.loader = loader self.inventory = inventory self.templar = Templar(loader=loader)
def test_invalid_check_pkg_version_format(hostvars, host, result): with pytest.raises(errors.AnsibleModuleError): task = FakeTask('sanity_checks', {'checks': []}) plugin = ActionModule(task, None, PlayContext(), None, Templar(None, None, None), None) plugin.check_pkg_version_format(hostvars, host)
def _get_loop_items(self): ''' Loads a lookup plugin to handle the with_* portion of a task (if specified), and returns the items result. ''' # save the play context variables to a temporary dictionary, # so that we can modify the job vars without doing a full copy # and later restore them to avoid modifying things too early play_context_vars = dict() self._play_context.update_vars(play_context_vars) old_vars = dict() for k in play_context_vars: if k in self._job_vars: old_vars[k] = self._job_vars[k] self._job_vars[k] = play_context_vars[k] # get search path for this task to pass to lookup plugins self._job_vars['ansible_search_path'] = self._task.get_search_path() templar = Templar(loader=self._loader, shared_loader_obj=self._shared_loader_obj, variables=self._job_vars) items = None if self._task.loop_with: if self._task.loop_with in self._shared_loader_obj.lookup_loader: fail = True if self._task.loop_with == 'first_found': # first_found loops are special. If the item is undefined then we want to fall through to the next value rather than failing. fail = False loop_terms = listify_lookup_plugin_terms(terms=self._task.loop, templar=templar, loader=self._loader, fail_on_undefined=fail, convert_bare=False) if not fail: loop_terms = [t for t in loop_terms if not templar._contains_vars(t)] # get lookup mylookup = self._shared_loader_obj.lookup_loader.get(self._task.loop_with, loader=self._loader, templar=templar) # give lookup task 'context' for subdir (mostly needed for first_found) for subdir in ['template', 'var', 'file']: # TODO: move this to constants? if subdir in self._task.action: break setattr(mylookup, '_subdir', subdir + 's') # run lookup items = mylookup.run(terms=loop_terms, variables=self._job_vars, wantlist=True) else: raise AnsibleError("Unexpected failure in finding the lookup named '%s' in the available lookup plugins" % self._task.loop_with) elif self._task.loop: items = templar.template(self._task.loop) if not isinstance(items, list): raise AnsibleError( "Invalid data passed to 'loop', it requires a list, got this instead: %s." " Hint: If you passed a list/dict of just one element," " try adding wantlist=True to your lookup invocation or use q/query instead of lookup." % items ) # now we restore any old job variables that may have been modified, # and delete them if they were in the play context vars but not in # the old variables dictionary for k in play_context_vars: if k in old_vars: self._job_vars[k] = old_vars[k] else: del self._job_vars[k] if items: for idx, item in enumerate(items): if item is not None and not isinstance(item, UnsafeProxy): items[idx] = UnsafeProxy(item) # ensure basedir is always in (dwim already searches here but we need to display it) if self._loader.get_basedir() not in self._job_vars['ansible_search_path']: self._job_vars['ansible_search_path'].append(self._loader.get_basedir()) return items
def _execute(self, variables=None): ''' The primary workhorse of the executor system, this runs the task on the specified host (which may be the delegated_to host) and handles the retry/until and block rescue/always execution ''' if variables is None: variables = self._job_vars templar = Templar(loader=self._loader, shared_loader_obj=self._shared_loader_obj, variables=variables) context_validation_error = None try: # apply the given task's information to the connection info, # which may override some fields already set by the play or # the options specified on the command line self._play_context = self._play_context.set_task_and_variable_override( task=self._task, variables=variables, templar=templar) # fields set from the play/task may be based on variables, so we have to # do the same kind of post validation step on it here before we use it. self._play_context.post_validate(templar=templar) # now that the play context is finalized, if the remote_addr is not set # default to using the host's address field as the remote address if not self._play_context.remote_addr: self._play_context.remote_addr = self._host.address # We also add "magic" variables back into the variables dict to make sure # a certain subset of variables exist. self._play_context.update_vars(variables) except AnsibleError as e: # save the error, which we'll raise later if we don't end up # skipping this task during the conditional evaluation step context_validation_error = e # Evaluate the conditional (if any) for this task, which we do before running # the final task post-validation. We do this before the post validation due to # the fact that the conditional may specify that the task be skipped due to a # variable not being present which would otherwise cause validation to fail try: if not self._task.evaluate_conditional(templar, variables): display.debug("when evaluation failed, skipping this task") return dict(changed=False, skipped=True, skip_reason='Conditional check failed', _ansible_no_log=self._play_context.no_log) except AnsibleError: # skip conditional exception in the case of includes as the vars needed might not be avaiable except in the included tasks or due to tags if self._task.action != 'include': raise # if we ran into an error while setting up the PlayContext, raise it now if context_validation_error is not None: raise context_validation_error # if this task is a TaskInclude, we just return now with a success code so the # main thread can expand the task list for the given host if self._task.action == 'include': include_variables = self._task.args.copy() include_file = include_variables.pop('_raw_params', None) if not include_file: return dict(failed=True, msg="No include file was specified to the include") include_file = templar.template(include_file) return dict(include=include_file, include_variables=include_variables) # Now we do final validation on the task, which sets all fields to their final values. self._task.post_validate(templar=templar) if '_variable_params' in self._task.args: variable_params = self._task.args.pop('_variable_params') if isinstance(variable_params, dict): display.deprecated( "Using variables for task params is unsafe, especially if the variables come from an external source like facts" ) variable_params.update(self._task.args) self._task.args = variable_params # get the connection and the handler for this execution if not self._connection or not getattr( self._connection, 'connected', False ) or self._play_context.remote_addr != self._connection._play_context.remote_addr: self._connection = self._get_connection(variables=variables, templar=templar) self._connection.set_host_overrides( host=self._host, hostvars=variables.get('hostvars', {}).get(self._host.name, {})) else: # if connection is reused, its _play_context is no longer valid and needs # to be replaced with the one templated above, in case other data changed self._connection._play_context = self._play_context self._handler = self._get_action_handler(connection=self._connection, templar=templar) # And filter out any fields which were set to default(omit), and got the omit token value omit_token = variables.get('omit') if omit_token is not None: self._task.args = dict((i[0], i[1]) for i in iteritems(self._task.args) if i[1] != omit_token) # Read some values from the task, so that we can modify them if need be if self._task.until: retries = self._task.retries if retries is None: retries = 3 elif retries <= 0: retries = 1 else: retries += 1 else: retries = 1 delay = self._task.delay if delay < 0: delay = 1 # make a copy of the job vars here, in case we need to update them # with the registered variable value later on when testing conditions vars_copy = variables.copy() display.debug("starting attempt loop") result = None for attempt in range(1, retries + 1): display.debug("running the handler") try: result = self._handler.run(task_vars=variables) except AnsibleConnectionFailure as e: return dict(unreachable=True, msg=to_unicode(e)) display.debug("handler run complete") # preserve no log result["_ansible_no_log"] = self._play_context.no_log # update the local copy of vars with the registered value, if specified, # or any facts which may have been generated by the module execution if self._task.register: vars_copy[self._task.register] = wrap_var(result.copy()) if self._task. async > 0: if self._task.poll > 0: result = self._poll_async_result(result=result, templar=templar, task_vars=vars_copy) # ensure no log is preserved result["_ansible_no_log"] = self._play_context.no_log # helper methods for use below in evaluating changed/failed_when def _evaluate_changed_when_result(result): if self._task.changed_when is not None and self._task.changed_when: cond = Conditional(loader=self._loader) cond.when = self._task.changed_when result['changed'] = cond.evaluate_conditional( templar, vars_copy) def _evaluate_failed_when_result(result): if self._task.failed_when: cond = Conditional(loader=self._loader) cond.when = self._task.failed_when failed_when_result = cond.evaluate_conditional( templar, vars_copy) result['failed_when_result'] = result[ 'failed'] = failed_when_result else: failed_when_result = False return failed_when_result if 'ansible_facts' in result: vars_copy.update(result['ansible_facts']) # set the failed property if the result has a non-zero rc. This will be # overridden below if the failed_when property is set if result.get('rc', 0) != 0: result['failed'] = True # if we didn't skip this task, use the helpers to evaluate the changed/ # failed_when properties if 'skipped' not in result: _evaluate_changed_when_result(result) _evaluate_failed_when_result(result) if retries > 1: cond = Conditional(loader=self._loader) cond.when = self._task.until if cond.evaluate_conditional(templar, vars_copy): break else: # no conditional check, or it failed, so sleep for the specified time if attempt < retries: result['attempts'] = attempt result['_ansible_retry'] = True result['retries'] = retries display.debug('Retrying task, attempt %d of %d' % (attempt, retries)) self._rslt_q.put(TaskResult(self._host.name, self._task._uuid, result), block=False) time.sleep(delay)
def _run_loop(self, items): ''' Runs the task with the loop items specified and collates the result into an array named 'results' which is inserted into the final result along with the item for which the loop ran. ''' results = [] # make copies of the job vars and task so we can add the item to # the variables and re-validate the task with the item variable # task_vars = self._job_vars.copy() task_vars = self._job_vars loop_var = 'item' index_var = None label = None loop_pause = 0 templar = Templar(loader=self._loader, shared_loader_obj=self._shared_loader_obj, variables=self._job_vars) if self._task.loop_control: # FIXME: move this to the object itself to allow post_validate to take care of templating loop_var = templar.template(self._task.loop_control.loop_var) index_var = templar.template(self._task.loop_control.index_var) loop_pause = templar.template(self._task.loop_control.pause) # the these may be 'None', so we still need to default to something useful # this is tempalted below after an item is assigned label = (self._task.loop_control.label or ('{{' + loop_var + '}}')) if loop_var in task_vars: display.warning(u"The loop variable '%s' is already in use. " u"You should set the `loop_var` value in the `loop_control` option for the task" u" to something else to avoid variable collisions and unexpected behavior." % loop_var) ran_once = False if self._task.loop_with: # Only squash with 'with_:' not with the 'loop:', 'magic' squashing can be removed once with_ loops are items = self._squash_items(items, loop_var, task_vars) for item_index, item in enumerate(items): task_vars[loop_var] = item if index_var: task_vars[index_var] = item_index # Update template vars to reflect current loop iteration templar.set_available_variables(task_vars) # pause between loop iterations if loop_pause and ran_once: try: time.sleep(float(loop_pause)) except ValueError as e: raise AnsibleError('Invalid pause value: %s, produced error: %s' % (loop_pause, to_native(e))) else: ran_once = True try: tmp_task = self._task.copy(exclude_parent=True, exclude_tasks=True) tmp_task._parent = self._task._parent tmp_play_context = self._play_context.copy() except AnsibleParserError as e: results.append(dict(failed=True, msg=to_text(e))) continue # now we swap the internal task and play context with their copies, # execute, and swap them back so we can do the next iteration cleanly (self._task, tmp_task) = (tmp_task, self._task) (self._play_context, tmp_play_context) = (tmp_play_context, self._play_context) res = self._execute(variables=task_vars) task_fields = self._task.dump_attrs() (self._task, tmp_task) = (tmp_task, self._task) (self._play_context, tmp_play_context) = (tmp_play_context, self._play_context) # now update the result with the item info, and append the result # to the list of results res[loop_var] = item if index_var: res[index_var] = item_index res['_ansible_item_result'] = True res['_ansible_ignore_errors'] = task_fields.get('ignore_errors') if label is not None: res['_ansible_item_label'] = templar.template(label, cache=False) self._rslt_q.put( TaskResult( self._host.name, self._task._uuid, res, task_fields=task_fields, ), block=False, ) results.append(res) del task_vars[loop_var] return results
def test_post_validate_empty(self): fake_loader = DictDataLoader({}) templar = Templar(loader=fake_loader) ret = self.b.post_validate(templar) self.assertIsNone(ret)
def _execute(self, variables=None): ''' The primary workhorse of the executor system, this runs the task on the specified host (which may be the delegated_to host) and handles the retry/until and block rescue/always execution ''' if variables is None: variables = self._job_vars templar = Templar(loader=self._loader, shared_loader_obj=self._shared_loader_obj, variables=variables) context_validation_error = None try: # apply the given task's information to the connection info, # which may override some fields already set by the play or # the options specified on the command line self._play_context = self._play_context.set_task_and_variable_override(task=self._task, variables=variables, templar=templar) # fields set from the play/task may be based on variables, so we have to # do the same kind of post validation step on it here before we use it. self._play_context.post_validate(templar=templar) # now that the play context is finalized, if the remote_addr is not set # default to using the host's address field as the remote address if not self._play_context.remote_addr: self._play_context.remote_addr = self._host.address # We also add "magic" variables back into the variables dict to make sure # a certain subset of variables exist. self._play_context.update_vars(variables) # FIXME: update connection/shell plugin options except AnsibleError as e: # save the error, which we'll raise later if we don't end up # skipping this task during the conditional evaluation step context_validation_error = e # Evaluate the conditional (if any) for this task, which we do before running # the final task post-validation. We do this before the post validation due to # the fact that the conditional may specify that the task be skipped due to a # variable not being present which would otherwise cause validation to fail try: if not self._task.evaluate_conditional(templar, variables): display.debug("when evaluation is False, skipping this task") return dict(changed=False, skipped=True, skip_reason='Conditional result was False', _ansible_no_log=self._play_context.no_log) except AnsibleError: # loop error takes precedence if self._loop_eval_error is not None: raise self._loop_eval_error # pylint: disable=raising-bad-type raise # Not skipping, if we had loop error raised earlier we need to raise it now to halt the execution of this task if self._loop_eval_error is not None: raise self._loop_eval_error # pylint: disable=raising-bad-type # if we ran into an error while setting up the PlayContext, raise it now if context_validation_error is not None: raise context_validation_error # pylint: disable=raising-bad-type # if this task is a TaskInclude, we just return now with a success code so the # main thread can expand the task list for the given host if self._task.action in ('include', 'include_tasks'): include_variables = self._task.args.copy() include_file = include_variables.pop('_raw_params', None) if not include_file: return dict(failed=True, msg="No include file was specified to the include") include_file = templar.template(include_file) return dict(include=include_file, include_variables=include_variables) # if this task is a IncludeRole, we just return now with a success code so the main thread can expand the task list for the given host elif self._task.action == 'include_role': include_variables = self._task.args.copy() return dict(include_variables=include_variables) # Now we do final validation on the task, which sets all fields to their final values. self._task.post_validate(templar=templar) if '_variable_params' in self._task.args: variable_params = self._task.args.pop('_variable_params') if isinstance(variable_params, dict): display.deprecated("Using variables for task params is unsafe, especially if the variables come from an external source like facts", version="2.6") variable_params.update(self._task.args) self._task.args = variable_params # get the connection and the handler for this execution if (not self._connection or not getattr(self._connection, 'connected', False) or self._play_context.remote_addr != self._connection._play_context.remote_addr): self._connection = self._get_connection(variables=variables, templar=templar) else: # if connection is reused, its _play_context is no longer valid and needs # to be replaced with the one templated above, in case other data changed self._connection._play_context = self._play_context self._set_connection_options(variables, templar) self._set_shell_options(variables, templar) # get handler self._handler = self._get_action_handler(connection=self._connection, templar=templar) # Apply default params for action/module, if present # These are collected as a list of dicts, so we need to merge them module_defaults = {} for default in self._task.module_defaults: module_defaults.update(default) if module_defaults: module_defaults = templar.template(module_defaults) if self._task.action in module_defaults: tmp_args = module_defaults[self._task.action].copy() tmp_args.update(self._task.args) self._task.args = tmp_args # And filter out any fields which were set to default(omit), and got the omit token value omit_token = variables.get('omit') if omit_token is not None: self._task.args = remove_omit(self._task.args, omit_token) # Read some values from the task, so that we can modify them if need be if self._task.until: retries = self._task.retries if retries is None: retries = 3 elif retries <= 0: retries = 1 else: retries += 1 else: retries = 1 delay = self._task.delay if delay < 0: delay = 1 # make a copy of the job vars here, in case we need to update them # with the registered variable value later on when testing conditions vars_copy = variables.copy() display.debug("starting attempt loop") result = None for attempt in range(1, retries + 1): display.debug("running the handler") try: result = self._handler.run(task_vars=variables) except AnsibleActionSkip as e: return dict(skipped=True, msg=to_text(e)) except AnsibleActionFail as e: return dict(failed=True, msg=to_text(e)) except AnsibleConnectionFailure as e: return dict(unreachable=True, msg=to_text(e)) display.debug("handler run complete") # preserve no log result["_ansible_no_log"] = self._play_context.no_log # update the local copy of vars with the registered value, if specified, # or any facts which may have been generated by the module execution if self._task.register: vars_copy[self._task.register] = wrap_var(result) if self._task.async_val > 0: if self._task.poll > 0 and not result.get('skipped') and not result.get('failed'): result = self._poll_async_result(result=result, templar=templar, task_vars=vars_copy) # FIXME callback 'v2_runner_on_async_poll' here # ensure no log is preserved result["_ansible_no_log"] = self._play_context.no_log # helper methods for use below in evaluating changed/failed_when def _evaluate_changed_when_result(result): if self._task.changed_when is not None and self._task.changed_when: cond = Conditional(loader=self._loader) cond.when = self._task.changed_when result['changed'] = cond.evaluate_conditional(templar, vars_copy) def _evaluate_failed_when_result(result): if self._task.failed_when: cond = Conditional(loader=self._loader) cond.when = self._task.failed_when failed_when_result = cond.evaluate_conditional(templar, vars_copy) result['failed_when_result'] = result['failed'] = failed_when_result else: failed_when_result = False return failed_when_result if 'ansible_facts' in result: if self._task.action in ('set_fact', 'include_vars'): vars_copy.update(result['ansible_facts']) else: vars_copy.update(namespace_facts(result['ansible_facts'])) if C.INJECT_FACTS_AS_VARS: vars_copy.update(clean_facts(result['ansible_facts'])) # set the failed property if it was missing. if 'failed' not in result: # rc is here for backwards compatibility and modules that use it instead of 'failed' if 'rc' in result and result['rc'] not in [0, "0"]: result['failed'] = True else: result['failed'] = False # Make attempts and retries available early to allow their use in changed/failed_when if self._task.until: result['attempts'] = attempt # set the changed property if it was missing. if 'changed' not in result: result['changed'] = False # re-update the local copy of vars with the registered value, if specified, # or any facts which may have been generated by the module execution # This gives changed/failed_when access to additional recently modified # attributes of result if self._task.register: vars_copy[self._task.register] = wrap_var(result) # if we didn't skip this task, use the helpers to evaluate the changed/ # failed_when properties if 'skipped' not in result: _evaluate_changed_when_result(result) _evaluate_failed_when_result(result) if retries > 1: cond = Conditional(loader=self._loader) cond.when = self._task.until if cond.evaluate_conditional(templar, vars_copy): break else: # no conditional check, or it failed, so sleep for the specified time if attempt < retries: result['_ansible_retry'] = True result['retries'] = retries display.debug('Retrying task, attempt %d of %d' % (attempt, retries)) self._rslt_q.put(TaskResult(self._host.name, self._task._uuid, result, task_fields=self._task.dump_attrs()), block=False) time.sleep(delay) else: if retries > 1: # we ran out of attempts, so mark the result as failed result['attempts'] = retries - 1 result['failed'] = True # do the final update of the local variables here, for both registered # values and any facts which may have been created if self._task.register: variables[self._task.register] = wrap_var(result) if 'ansible_facts' in result: if self._task.action in ('set_fact', 'include_vars'): variables.update(result['ansible_facts']) else: variables.update(namespace_facts(result['ansible_facts'])) if C.INJECT_FACTS_AS_VARS: variables.update(clean_facts(result['ansible_facts'])) # save the notification target in the result, if it was specified, as # this task may be running in a loop in which case the notification # may be item-specific, ie. "notify: service {{item}}" if self._task.notify is not None: result['_ansible_notify'] = self._task.notify # add the delegated vars to the result, so we can reference them # on the results side without having to do any further templating # FIXME: we only want a limited set of variables here, so this is currently # hardcoded but should be possibly fixed if we want more or if # there is another source of truth we can use delegated_vars = variables.get('ansible_delegated_vars', dict()).get(self._task.delegate_to, dict()).copy() if len(delegated_vars) > 0: result["_ansible_delegated_vars"] = {'ansible_delegated_host': self._task.delegate_to} for k in ('ansible_host', ): result["_ansible_delegated_vars"][k] = delegated_vars.get(k) # and return display.debug("attempt loop complete, returning result") return result
def get_vars(self, loader, play=None, host=None, task=None, include_hostvars=True, include_delegate_to=True, use_cache=True): ''' Returns the variables, with optional "context" given via the parameters for the play, host, and task (which could possibly result in different sets of variables being returned due to the additional context). The order of precedence is: - play->roles->get_default_vars (if there is a play context) - group_vars_files[host] (if there is a host context) - host_vars_files[host] (if there is a host context) - host->get_vars (if there is a host context) - fact_cache[host] (if there is a host context) - play vars (if there is a play context) - play vars_files (if there's no host context, ignore file names that cannot be templated) - task->get_vars (if there is a task context) - vars_cache[host] (if there is a host context) - extra vars ''' debug("in VariableManager get_vars()") cache_entry = self._get_cache_entry(play=play, host=host, task=task) if cache_entry in VARIABLE_CACHE and use_cache: debug("vars are cached, returning them now") return VARIABLE_CACHE[cache_entry] all_vars = dict() magic_variables = self._get_magic_variables( loader=loader, play=play, host=host, task=task, include_hostvars=include_hostvars, include_delegate_to=include_delegate_to, ) if play: # first we compile any vars specified in defaults/main.yml # for all roles within the specified play for role in play.get_roles(): all_vars = combine_vars(all_vars, role.get_default_vars()) # if we have a task in this context, and that task has a role, make # sure it sees its defaults above any other roles, as we previously # (v1) made sure each task had a copy of its roles default vars if task and task._role is not None: all_vars = combine_vars( all_vars, task._role.get_default_vars( dep_chain=task._block._dep_chain)) if host: # next, if a host is specified, we load any vars from group_vars # files and then any vars from host_vars files which may apply to # this host or the groups it belongs to # we merge in vars from groups specified in the inventory (INI or script) all_vars = combine_vars(all_vars, host.get_group_vars()) # then we merge in the special 'all' group_vars first, if they exist if 'all' in self._group_vars_files: data = preprocess_vars(self._group_vars_files['all']) for item in data: all_vars = combine_vars(all_vars, item) for group in sorted(host.get_groups(), key=lambda g: g.depth): if group.name in self._group_vars_files and group.name != 'all': for data in self._group_vars_files[group.name]: data = preprocess_vars(data) for item in data: all_vars = combine_vars(all_vars, item) # then we merge in vars from the host specified in the inventory (INI or script) all_vars = combine_vars(all_vars, host.get_vars()) # then we merge in the host_vars/<hostname> file, if it exists host_name = host.get_name() if host_name in self._host_vars_files: for data in self._host_vars_files[host_name]: data = preprocess_vars(data) for item in data: all_vars = combine_vars(all_vars, item) # finally, the facts caches for this host, if it exists try: host_facts = wrap_var(self._fact_cache.get(host.name, dict())) all_vars = combine_vars(all_vars, host_facts) except KeyError: pass if play: all_vars = combine_vars(all_vars, play.get_vars()) for vars_file_item in play.get_vars_files(): # create a set of temporary vars here, which incorporate the extra # and magic vars so we can properly template the vars_files entries temp_vars = combine_vars(all_vars, self._extra_vars) temp_vars = combine_vars(temp_vars, magic_variables) templar = Templar(loader=loader, variables=temp_vars) # we assume each item in the list is itself a list, as we # support "conditional includes" for vars_files, which mimics # the with_first_found mechanism. vars_file_list = vars_file_item if not isinstance(vars_file_list, list): vars_file_list = [vars_file_list] # now we iterate through the (potential) files, and break out # as soon as we read one from the list. If none are found, we # raise an error, which is silently ignored at this point. try: for vars_file in vars_file_list: vars_file = templar.template(vars_file) try: data = preprocess_vars( loader.load_from_file(vars_file)) if data is not None: for item in data: all_vars = combine_vars(all_vars, item) break except AnsibleFileNotFound as e: # we continue on loader failures continue except AnsibleParserError as e: raise else: raise AnsibleFileNotFound( "vars file %s was not found" % vars_file_item) except (UndefinedError, AnsibleUndefinedVariable): if host is not None and self._fact_cache.get( host.name, dict()).get('module_setup') and task is not None: raise AnsibleUndefinedVariable( "an undefined variable was found when attempting to template the vars_files item '%s'" % vars_file_item, obj=vars_file_item) else: # we do not have a full context here, and the missing variable could be # because of that, so just show a warning and continue display.vvv( "skipping vars_file '%s' due to an undefined variable" % vars_file_item) continue if not C.DEFAULT_PRIVATE_ROLE_VARS: for role in play.get_roles(): all_vars = combine_vars( all_vars, role.get_vars(include_params=False)) if task: if task._role: all_vars = combine_vars(all_vars, task._role.get_vars()) all_vars = combine_vars( all_vars, task._role.get_role_params(task._block._dep_chain)) all_vars = combine_vars(all_vars, task.get_vars()) if host: all_vars = combine_vars( all_vars, self._vars_cache.get(host.get_name(), dict())) all_vars = combine_vars( all_vars, self._nonpersistent_fact_cache.get(host.name, dict())) # special case for include tasks, where the include params # may be specified in the vars field for the task, which should # have higher precedence than the vars/np facts above if task: all_vars = combine_vars(all_vars, task.get_include_params()) all_vars = combine_vars(all_vars, self._extra_vars) all_vars = combine_vars(all_vars, magic_variables) # special case for the 'environment' magic variable, as someone # may have set it as a variable and we don't want to stomp on it if task: if 'environment' not in all_vars: all_vars['environment'] = task.environment else: display.warning( "The variable 'environment' appears to be used already, which is also used internally for environment variables set on the task/block/play. You should use a different variable name to avoid conflicts with this internal variable" ) # if we have a task and we're delegating to another host, figure out the # variables for that host now so we don't have to rely on hostvars later if task and task.delegate_to is not None and include_delegate_to: all_vars['ansible_delegated_vars'] = self._get_delegated_vars( loader, play, task, all_vars) #VARIABLE_CACHE[cache_entry] = all_vars if task or play: all_vars['vars'] = all_vars.copy() debug("done with get_vars()") return all_vars
def process_include_results(results, tqm, iterator, inventory, loader, variable_manager): included_files = [] def get_original_host(host): if host.name in inventory._hosts_cache: return inventory._hosts_cache[host.name] else: return inventory.get_host(host.name) for res in results: original_host = res._host original_task = res._task if original_task.action in ('include', 'include_tasks', 'include_role'): if original_task.loop: if 'results' not in res._result: continue include_results = res._result['results'] else: include_results = [res._result] for include_result in include_results: # if the task result was skipped or failed, continue if 'skipped' in include_result and include_result[ 'skipped'] or 'failed' in include_result and include_result[ 'failed']: continue task_vars = variable_manager.get_vars(play=iterator._play, host=original_host, task=original_task) templar = Templar(loader=loader, variables=task_vars) include_variables = include_result.get( 'include_variables', dict()) loop_var = 'item' if original_task.loop_control: loop_var = original_task.loop_control.loop_var or 'item' if loop_var in include_result: task_vars[loop_var] = include_variables[ loop_var] = include_result[loop_var] if original_task.action in ('include', 'include_tasks'): include_file = None if original_task: if original_task.static: continue if original_task._parent: # handle relative includes by walking up the list of parent include # tasks and checking the relative result to see if it exists parent_include = original_task._parent cumulative_path = None while parent_include is not None: if not isinstance(parent_include, TaskInclude): parent_include = parent_include._parent continue if isinstance(parent_include, IncludeRole): parent_include_dir = parent_include._role_path else: parent_include_dir = os.path.dirname( templar.template( parent_include.args.get( '_raw_params'))) if cumulative_path is not None and not os.path.isabs( cumulative_path): cumulative_path = os.path.join( parent_include_dir, cumulative_path) else: cumulative_path = parent_include_dir include_target = templar.template( include_result['include']) if original_task._role: new_basedir = os.path.join( original_task._role._role_path, 'tasks', cumulative_path) include_file = loader.path_dwim_relative( new_basedir, 'tasks', include_target) else: include_file = loader.path_dwim_relative( loader.get_basedir(), cumulative_path, include_target) if os.path.exists(include_file): break else: parent_include = parent_include._parent if include_file is None: if original_task._role: include_target = templar.template( include_result['include']) include_file = loader.path_dwim_relative( original_task._role._role_path, 'tasks', include_target) else: include_file = loader.path_dwim( include_result['include']) include_file = templar.template(include_file) inc_file = IncludedFile(include_file, include_variables, original_task) else: inc_file = IncludedFile("role", include_variables, original_task, is_role=True) try: pos = included_files.index(inc_file) inc_file = included_files[pos] except ValueError: included_files.append(inc_file) inc_file.add_host(original_host) return included_files
class TestNetworkFacts(unittest.TestCase): task = MagicMock(Task) play_context = MagicMock() play_context.check_mode = False connection = MagicMock() fake_loader = DictDataLoader({}) templar = Templar(loader=fake_loader) def setUp(self): pass def tearDown(self): pass @patch.object(module_common, '_get_collection_metadata', return_value={}) def test_network_gather_facts_smart_facts_module(self, mock_collection_metadata): self.fqcn_task_vars = {'ansible_network_os': 'ios'} self.task.action = 'gather_facts' self.task.async_val = False self.task.args = {} plugin = GatherFactsAction(self.task, self.connection, self.play_context, loader=None, templar=self.templar, shared_loader_obj=None) get_module_args = MagicMock() plugin._get_module_args = get_module_args plugin._execute_module = MagicMock() res = plugin.run(task_vars=self.fqcn_task_vars) # assert the gather_facts config is 'smart' facts_modules = C.config.get_config_value( 'FACTS_MODULES', variables=self.fqcn_task_vars) self.assertEqual(facts_modules, ['smart']) # assert the correct module was found self.assertEqual(get_module_args.call_count, 1) self.assertEqual(get_module_args.call_args.args, ( 'ansible.legacy.ios_facts', { 'ansible_network_os': 'ios' }, )) @patch.object(module_common, '_get_collection_metadata', return_value={}) def test_network_gather_facts_smart_facts_module_fqcn( self, mock_collection_metadata): self.fqcn_task_vars = {'ansible_network_os': 'cisco.ios.ios'} self.task.action = 'gather_facts' self.task.async_val = False self.task.args = {} plugin = GatherFactsAction(self.task, self.connection, self.play_context, loader=None, templar=self.templar, shared_loader_obj=None) get_module_args = MagicMock() plugin._get_module_args = get_module_args plugin._execute_module = MagicMock() res = plugin.run(task_vars=self.fqcn_task_vars) # assert the gather_facts config is 'smart' facts_modules = C.config.get_config_value( 'FACTS_MODULES', variables=self.fqcn_task_vars) self.assertEqual(facts_modules, ['smart']) # assert the correct module was found self.assertEqual(get_module_args.call_count, 1) self.assertEqual(get_module_args.call_args.args, ( 'cisco.ios.ios_facts', { 'ansible_network_os': 'cisco.ios.ios' }, ))
def setUp(self): self._tests = Templar(loader=None).environment.tests
def ansible_template(basedir, varname, templatevars, **kwargs): dl = DataLoader() dl.set_basedir(basedir) templar = Templar(dl, variables=templatevars) return templar.template(varname, **kwargs)
def get_vars(self, play=None, host=None, task=None, include_hostvars=True, include_delegate_to=True, use_cache=True): ''' Returns the variables, with optional "context" given via the parameters for the play, host, and task (which could possibly result in different sets of variables being returned due to the additional context). The order of precedence is: - play->roles->get_default_vars (if there is a play context) - group_vars_files[host] (if there is a host context) - host_vars_files[host] (if there is a host context) - host->get_vars (if there is a host context) - fact_cache[host] (if there is a host context) - play vars (if there is a play context) - play vars_files (if there's no host context, ignore file names that cannot be templated) - task->get_vars (if there is a task context) - vars_cache[host] (if there is a host context) - extra vars ''' display.debug("in VariableManager get_vars()") all_vars = dict() magic_variables = self._get_magic_variables( play=play, host=host, task=task, include_hostvars=include_hostvars, include_delegate_to=include_delegate_to, ) if play: # first we compile any vars specified in defaults/main.yml # for all roles within the specified play for role in play.get_roles(): all_vars = combine_vars(all_vars, role.get_default_vars()) # if we have a task in this context, and that task has a role, make # sure it sees its defaults above any other roles, as we previously # (v1) made sure each task had a copy of its roles default vars if task and task._role is not None and (play or task.action == 'include_role'): all_vars = combine_vars( all_vars, task._role.get_default_vars(dep_chain=task.get_dep_chain())) if host: # INIT WORK (use unsafe as we are going to copy/merge vars, no need to x2 copy) # basedir, THE 'all' group and the rest of groups for a host, used below basedir = self._loader.get_basedir() all_group = self._inventory.groups.get('all') host_groups = sort_groups( [g for g in host.get_groups() if g.name not in ['all']]) def _get_plugin_vars(plugin, path, entities): data = {} try: data = plugin.get_vars(self._loader, path, entities) except AttributeError: try: for entity in entities: if isinstance(entity, Host): data.update(plugin.get_host_vars(entity.name)) else: data.update(plugin.get_group_vars(entity.name)) except AttributeError: if hasattr(plugin, 'run'): raise AnsibleError( "Cannot use v1 type vars plugin %s from %s" % (plugin._load_name, plugin._original_path)) else: raise AnsibleError( "Invalid vars plugin %s from %s" % (plugin._load_name, plugin._original_path)) return data # internal fuctions that actually do the work def _plugins_inventory(entities): ''' merges all entities by inventory source ''' data = {} for inventory_dir in self._inventory._sources: if ',' in inventory_dir: # skip host lists continue elif not os.path.isdir( inventory_dir ): # always pass 'inventory directory' inventory_dir = os.path.dirname(inventory_dir) for plugin in vars_loader.all(): data = combine_vars( data, _get_plugin_vars(plugin, inventory_dir, entities)) return data def _plugins_play(entities): ''' merges all entities adjacent to play ''' data = {} for plugin in vars_loader.all(): data = combine_vars( data, _get_plugin_vars(plugin, basedir, entities)) return data # configurable functions that are sortable via config, rememer to add to _ALLOWED if expanding this list def all_inventory(): return all_group.get_vars() def all_plugins_inventory(): return _plugins_inventory([all_group]) def all_plugins_play(): return _plugins_play([all_group]) def groups_inventory(): ''' gets group vars from inventory ''' return get_group_vars(host_groups) def groups_plugins_inventory(): ''' gets plugin sources from inventory for groups ''' return _plugins_inventory(host_groups) def groups_plugins_play(): ''' gets plugin sources from play for groups ''' return _plugins_play(host_groups) def plugins_by_groups(): ''' merges all plugin sources by group, This should be used instead, NOT in combination with the other groups_plugins* functions ''' data = {} for group in host_groups: data[group] = combine_vars(data[group], _plugins_inventory(group)) data[group] = combine_vars(data[group], _plugins_play(group)) return data # Merge as per precedence config # only allow to call the functions we want exposed for entry in C.VARIABLE_PRECEDENCE: if entry in self._ALLOWED: display.debug('Calling %s to load vars for %s' % (entry, host.name)) all_vars = combine_vars(all_vars, locals()[entry]()) else: display.warning( 'Ignoring unknown variable precedence entry: %s' % (entry)) # host vars, from inventory, inventory adjacent and play adjacent via plugins all_vars = combine_vars(all_vars, host.get_vars()) all_vars = combine_vars(all_vars, _plugins_inventory([host])) all_vars = combine_vars(all_vars, _plugins_play([host])) # finally, the facts caches for this host, if it exists try: host_facts = wrap_var(self._fact_cache.get(host.name, {})) # push facts to main namespace all_vars = combine_vars(all_vars, host_facts) except KeyError: pass if play: all_vars = combine_vars(all_vars, play.get_vars()) for vars_file_item in play.get_vars_files(): # create a set of temporary vars here, which incorporate the extra # and magic vars so we can properly template the vars_files entries temp_vars = combine_vars(all_vars, self._extra_vars) temp_vars = combine_vars(temp_vars, magic_variables) templar = Templar(loader=self._loader, variables=temp_vars) # we assume each item in the list is itself a list, as we # support "conditional includes" for vars_files, which mimics # the with_first_found mechanism. vars_file_list = vars_file_item if not isinstance(vars_file_list, list): vars_file_list = [vars_file_list] # now we iterate through the (potential) files, and break out # as soon as we read one from the list. If none are found, we # raise an error, which is silently ignored at this point. try: for vars_file in vars_file_list: vars_file = templar.template(vars_file) try: data = preprocess_vars( self._loader.load_from_file(vars_file, unsafe=True)) if data is not None: for item in data: all_vars = combine_vars(all_vars, item) break except AnsibleFileNotFound: # we continue on loader failures continue except AnsibleParserError: raise else: # if include_delegate_to is set to False, we ignore the missing # vars file here because we're working on a delegated host if include_delegate_to: raise AnsibleFileNotFound( "vars file %s was not found" % vars_file_item) except (UndefinedError, AnsibleUndefinedVariable): if host is not None and self._fact_cache.get( host.name, dict()).get('module_setup') and task is not None: raise AnsibleUndefinedVariable( "an undefined variable was found when attempting to template the vars_files item '%s'" % vars_file_item, obj=vars_file_item) else: # we do not have a full context here, and the missing variable could be because of that # so just show a warning and continue display.vvv( "skipping vars_file '%s' due to an undefined variable" % vars_file_item) continue display.vvv("Read vars_file '%s'" % vars_file_item) # By default, we now merge in all vars from all roles in the play, # unless the user has disabled this via a config option if not C.DEFAULT_PRIVATE_ROLE_VARS: for role in play.get_roles(): all_vars = combine_vars( all_vars, role.get_vars(include_params=False)) # next, we merge in the vars from the role, which will specifically # follow the role dependency chain, and then we merge in the tasks # vars (which will look at parent blocks/task includes) if task: if task._role: all_vars = combine_vars( all_vars, task._role.get_vars(task.get_dep_chain(), include_params=False)) all_vars = combine_vars(all_vars, task.get_vars()) # next, we merge in the vars cache (include vars) and nonpersistent # facts cache (set_fact/register), in that order if host: all_vars = combine_vars( all_vars, self._vars_cache.get(host.get_name(), dict())) all_vars = combine_vars( all_vars, self._nonpersistent_fact_cache.get(host.name, dict())) # next, we merge in role params and task include params if task: if task._role: all_vars = combine_vars( all_vars, task._role.get_role_params(task.get_dep_chain())) # special case for include tasks, where the include params # may be specified in the vars field for the task, which should # have higher precedence than the vars/np facts above all_vars = combine_vars(all_vars, task.get_include_params()) # extra vars all_vars = combine_vars(all_vars, self._extra_vars) # magic variables all_vars = combine_vars(all_vars, magic_variables) # special case for the 'environment' magic variable, as someone # may have set it as a variable and we don't want to stomp on it if task: all_vars['environment'] = task.environment # if we have a task and we're delegating to another host, figure out the # variables for that host now so we don't have to rely on hostvars later if task and task.delegate_to is not None and include_delegate_to: all_vars['ansible_delegated_vars'] = self._get_delegated_vars( play, task, all_vars) # 'vars' magic var if task or play: # has to be copy, otherwise recursive ref all_vars['vars'] = all_vars.copy() display.debug("done with get_vars()") return all_vars
def run(self): ''' Run the given playbook, based on the settings in the play which may limit the runs to serialized groups, etc. ''' result = 0 entrylist = [] entry = {} try: for playbook_path in self._playbooks: pb = Playbook.load(playbook_path, variable_manager=self._variable_manager, loader=self._loader) self._inventory.set_playbook_basedir( os.path.dirname(playbook_path)) if self._tqm is None: # we are doing a listing entry = {'playbook': playbook_path} entry['plays'] = [] else: # make sure the tqm has callbacks loaded self._tqm.load_callbacks() self._tqm.send_callback('v2_playbook_on_start', pb) i = 1 plays = pb.get_plays() display.vv(u'%d plays in %s' % (len(plays), to_unicode(playbook_path))) for play in plays: if play._included_path is not None: self._loader.set_basedir(play._included_path) else: self._loader.set_basedir(pb._basedir) # clear any filters which may have been applied to the inventory self._inventory.remove_restriction() if play.vars_prompt: for var in play.vars_prompt: vname = var['name'] prompt = var.get("prompt", vname) default = var.get("default", None) private = var.get("private", True) confirm = var.get("confirm", False) encrypt = var.get("encrypt", None) salt_size = var.get("salt_size", None) salt = var.get("salt", None) if vname not in self._variable_manager.extra_vars: if self._tqm: self._tqm.send_callback( 'v2_playbook_on_vars_prompt', vname, private, prompt, encrypt, confirm, salt_size, salt, default) play.vars[vname] = display.do_var_prompt( vname, private, prompt, encrypt, confirm, salt_size, salt, default) else: # we are either in --list-<option> or syntax check play.vars[vname] = default # Create a temporary copy of the play here, so we can run post_validate # on it without the templating changes affecting the original object. all_vars = self._variable_manager.get_vars( loader=self._loader, play=play) templar = Templar(loader=self._loader, variables=all_vars) new_play = play.copy() new_play.post_validate(templar) if self._options.syntax: continue if self._tqm is None: # we are just doing a listing entry['plays'].append(new_play) else: self._tqm._unreachable_hosts.update( self._unreachable_hosts) previously_failed = len(self._tqm._failed_hosts) previously_unreachable = len( self._tqm._unreachable_hosts) break_play = False # we are actually running plays for batch in self._get_serialized_batches(new_play): if len(batch) == 0: self._tqm.send_callback( 'v2_playbook_on_play_start', new_play) self._tqm.send_callback( 'v2_playbook_on_no_hosts_matched') break # restrict the inventory to the hosts in the serialized batch self._inventory.restrict_to_hosts(batch) # and run it... result = self._tqm.run(play=play) # break the play if the result equals the special return code if result == self._tqm.RUN_FAILED_BREAK_PLAY: result = self._tqm.RUN_FAILED_HOSTS break_play = True # check the number of failures here, to see if they're above the maximum # failure percentage allowed, or if any errors are fatal. If either of those # conditions are met, we break out, otherwise we only break out if the entire # batch failed failed_hosts_count = len(self._tqm._failed_hosts) + len(self._tqm._unreachable_hosts) - \ (previously_failed + previously_unreachable) if new_play.max_fail_percentage is not None and \ int((new_play.max_fail_percentage)/100.0 * len(batch)) > int((len(batch) - failed_hosts_count) / len(batch) * 100.0): break_play = True break elif len(batch) == failed_hosts_count: break_play = True break # save the unreachable hosts from this batch self._unreachable_hosts.update( self._tqm._unreachable_hosts) # if the last result wasn't zero or 3 (some hosts were unreachable), # break out of the serial batch loop if result not in (self._tqm.RUN_OK, self._tqm.RUN_UNREACHABLE_HOSTS): break if break_play: break i = i + 1 # per play if entry: entrylist.append(entry) # per playbook # send the stats callback for this playbook if self._tqm is not None: if C.RETRY_FILES_ENABLED: retries = set(self._tqm._failed_hosts.keys()) retries.update(self._tqm._unreachable_hosts.keys()) retries = sorted(retries) if len(retries) > 0: if C.RETRY_FILES_SAVE_PATH: basedir = C.shell_expand( C.RETRY_FILES_SAVE_PATH) elif playbook_path: basedir = os.path.dirname(playbook_path) else: basedir = '~/' (retry_name, _) = os.path.splitext( os.path.basename(playbook_path)) filename = os.path.join(basedir, "%s.retry" % retry_name) if self._generate_retry_inventory( filename, retries): display.display( "\tto retry, use: --limit @%s\n" % filename) self._tqm.send_callback('v2_playbook_on_stats', self._tqm._stats) # if the last result wasn't zero, break out of the playbook file name loop if result != 0: break if entrylist: return entrylist finally: if self._tqm is not None: self._tqm.cleanup() if self._loader: self._loader.cleanup_all_tmp_files() if self._options.syntax: display.display("No issues encountered") return result return result
def _run_playbook(self, yamlfile, is_checksyntax=False): if not is_checksyntax: if self.options.flush_cache: self._flush_cache() self._loading_callback(yamlfile) # self._unreachable_hosts = dict() tqm = TaskQueueManager( inventory=self.inventory, variable_manager=self.variable_manager, loader=self.loader, options=self.options, passwords=self.passwords, stdout_callback=self.callback, ) check_for_controlpersist(C.ANSIBLE_SSH_EXECUTABLE) else: tqm = None try: pb = Playbook.load(yamlfile, variable_manager=self.variable_manager, loader=self.loader) self.inventory.set_playbook_basedir(yamlfile) if tqm is not None: tqm.load_callbacks() tqm.send_callback('v2_playbook_on_start', pb) self.logger.debug(self.log_prefix + '任务开始执行') plays = pb.get_plays() for play in plays: if play._included_path is not None: self.loader.set_basedir(play._included_path) else: self.loader.set_basedir(pb._basedir) self.inventory.remove_restriction() all_vars = self.variable_manager.get_vars(loader=self.loader, play=play) templar = Templar(loader=self.loader, variables=all_vars) new_play = play.copy() new_play.post_validate(templar) if is_checksyntax or tqm is None: return True # tqm._unreachable_hosts.update(self._unreachable_hosts) batches = self._get_serialized_batches(new_play) if len(batches) == 0: tqm.send_callback('v2_playbook_on_play_start', new_play) tqm.send_callback('v2_playbook_on_no_hosts_matched') self.logger.debug(self.log_prefix + '任务开始执行,但没有匹配主机') # 需要写日志,需要写该模块 continue for batch in batches: self.inventory.restrict_to_hosts(batch) try: tqm.run(play) except Exception as e: print(e) pass # self._unreachable_hosts.update(tqm._unreachable_hosts) tqm.send_callback('v2_playbook_on_stats', tqm._stats) self.logger.debug(self.log_prefix + '任务执行完成') # self.host_list = self.inventory.get_hosts(play.hosts) tqm.cleanup() self.loader.cleanup_all_tmp_files() return True except Exception as e: print(e) if is_checksyntax: return False else: return False
def _process_pending_results(self, iterator, one_pass=False, max_passes=None): ''' Reads results off the final queue and takes appropriate action based on the result (executing callbacks, updating state, etc.). ''' ret_results = [] def get_original_host(host_name): host_name = to_text(host_name) if host_name in self._inventory._hosts_cache: return self._inventory._hosts_cache[host_name] else: return self._inventory.get_host(host_name) def search_handler_blocks_by_name(handler_name, handler_blocks): for handler_block in handler_blocks: for handler_task in handler_block.block: if handler_task.name: handler_vars = self._variable_manager.get_vars( loader=self._loader, play=iterator._play, task=handler_task) templar = Templar(loader=self._loader, variables=handler_vars) try: # first we check with the full result of get_name(), which may # include the role name (if the handler is from a role). If that # is not found, we resort to the simple name field, which doesn't # have anything extra added to it. target_handler_name = templar.template( handler_task.name) if target_handler_name == handler_name: return handler_task else: target_handler_name = templar.template( handler_task.get_name()) if target_handler_name == handler_name: return handler_task except (UndefinedError, AnsibleUndefinedVariable): # We skip this handler due to the fact that it may be using # a variable in the name that was conditionally included via # set_fact or some other method, and we don't want to error # out unnecessarily continue return None def search_handler_blocks_by_uuid(handler_uuid, handler_blocks): for handler_block in handler_blocks: for handler_task in handler_block.block: if handler_uuid == handler_task._uuid: return handler_task return None def parent_handler_match(target_handler, handler_name): if target_handler: if isinstance(target_handler, (TaskInclude, IncludeRole)): try: handler_vars = self._variable_manager.get_vars( loader=self._loader, play=iterator._play, task=target_handler) templar = Templar(loader=self._loader, variables=handler_vars) target_handler_name = templar.template( target_handler.name) if target_handler_name == handler_name: return True else: target_handler_name = templar.template( target_handler.get_name()) if target_handler_name == handler_name: return True except (UndefinedError, AnsibleUndefinedVariable): pass return parent_handler_match(target_handler._parent, handler_name) else: return False # a Templar class to use for templating things later, as we're using # original/non-validated objects here on the manager side. We set the # variables in use later inside the loop below templar = Templar(loader=self._loader) cur_pass = 0 while True: try: self._results_lock.acquire() task_result = self._results.pop() except IndexError: break finally: self._results_lock.release() # get the original host and task. We then assign them to the TaskResult for use in callbacks/etc. original_host = get_original_host(task_result._host) original_task = iterator.get_original_task(original_host, task_result._task) task_result._host = original_host task_result._task = original_task # get the correct loop var for use later if original_task.loop_control: loop_var = original_task.loop_control.loop_var or 'item' else: loop_var = 'item' # get the vars for this task/host pair, make them the active set of vars for our templar above task_vars = self._variable_manager.get_vars(loader=self._loader, play=iterator._play, host=original_host, task=original_task) self.add_tqm_variables(task_vars, play=iterator._play) templar.set_available_variables(task_vars) # send callbacks for 'non final' results if '_ansible_retry' in task_result._result: self._tqm.send_callback('v2_runner_retry', task_result) continue elif '_ansible_item_result' in task_result._result: if task_result.is_failed() or task_result.is_unreachable(): self._tqm.send_callback('v2_runner_item_on_failed', task_result) elif task_result.is_skipped(): self._tqm.send_callback('v2_runner_item_on_skipped', task_result) else: if 'diff' in task_result._result: if self._diff: self._tqm.send_callback('v2_on_file_diff', task_result) self._tqm.send_callback('v2_runner_item_on_ok', task_result) continue run_once = templar.template(original_task.run_once) if original_task.register: host_list = self.get_task_hosts(iterator, original_host, original_task) clean_copy = strip_internal_keys(task_result._result) if 'invocation' in clean_copy: del clean_copy['invocation'] for target_host in host_list: self._variable_manager.set_nonpersistent_facts( target_host, {original_task.register: clean_copy}) # all host status messages contain 2 entries: (msg, task_result) role_ran = False if task_result.is_failed(): role_ran = True ignore_errors = templar.template(original_task.ignore_errors) if not ignore_errors: display.debug("marking %s as failed" % original_host.name) if run_once: # if we're using run_once, we have to fail every host here for h in self._inventory.get_hosts( iterator._play.hosts): if h.name not in self._tqm._unreachable_hosts: state, _ = iterator.get_next_task_for_host( h, peek=True) iterator.mark_host_failed(h) state, new_task = iterator.get_next_task_for_host( h, peek=True) else: iterator.mark_host_failed(original_host) # increment the failed count for this host self._tqm._stats.increment('failures', original_host.name) # grab the current state and if we're iterating on the rescue portion # of a block then we save the failed task in a special var for use # within the rescue/always state, _ = iterator.get_next_task_for_host(original_host, peek=True) if iterator.is_failed( original_host ) and state and state.run_state == iterator.ITERATING_COMPLETE: self._tqm._failed_hosts[original_host.name] = True if state and state.run_state == iterator.ITERATING_RESCUE: self._variable_manager.set_nonpersistent_facts( original_host, dict( ansible_failed_task=original_task.serialize(), ansible_failed_result=task_result._result, ), ) else: self._tqm._stats.increment('ok', original_host.name) if 'changed' in task_result._result and task_result._result[ 'changed']: self._tqm._stats.increment('changed', original_host.name) self._tqm.send_callback('v2_runner_on_failed', task_result, ignore_errors=ignore_errors) elif task_result.is_unreachable(): self._tqm._unreachable_hosts[original_host.name] = True iterator._play._removed_hosts.append(original_host.name) self._tqm._stats.increment('dark', original_host.name) self._tqm.send_callback('v2_runner_on_unreachable', task_result) elif task_result.is_skipped(): self._tqm._stats.increment('skipped', original_host.name) self._tqm.send_callback('v2_runner_on_skipped', task_result) else: role_ran = True if original_task.loop: # this task had a loop, and has more than one result, so # loop over all of them instead of a single result result_items = task_result._result.get('results', []) else: result_items = [task_result._result] for result_item in result_items: if '_ansible_notify' in result_item: if task_result.is_changed(): # The shared dictionary for notified handlers is a proxy, which # does not detect when sub-objects within the proxy are modified. # So, per the docs, we reassign the list so the proxy picks up and # notifies all other threads for handler_name in result_item['_ansible_notify']: found = False # Find the handler using the above helper. First we look up the # dependency chain of the current task (if it's from a role), otherwise # we just look through the list of handlers in the current play/all # roles and use the first one that matches the notify name target_handler = search_handler_blocks_by_name( handler_name, iterator._play.handlers) if target_handler is not None: found = True if original_host not in self._notified_handlers[ target_handler._uuid]: self._notified_handlers[ target_handler._uuid].append( original_host) # FIXME: should this be a callback? display.vv("NOTIFIED HANDLER %s" % (handler_name, )) else: # As there may be more than one handler with the notified name as the # parent, so we just keep track of whether or not we found one at all for target_handler_uuid in self._notified_handlers: target_handler = search_handler_blocks_by_uuid( target_handler_uuid, iterator._play.handlers) if target_handler and parent_handler_match( target_handler, handler_name): self._notified_handlers[ target_handler._uuid].append( original_host) display.vv( "NOTIFIED HANDLER %s" % (target_handler.get_name(), )) found = True if handler_name in self._listening_handlers: for listening_handler_uuid in self._listening_handlers[ handler_name]: listening_handler = search_handler_blocks_by_uuid( listening_handler_uuid, iterator._play.handlers) if listening_handler is not None: found = True else: continue if original_host not in self._notified_handlers[ listening_handler._uuid]: self._notified_handlers[ listening_handler. _uuid].append(original_host) display.vv( "NOTIFIED HANDLER %s" % (listening_handler.get_name(), )) # and if none were found, then we raise an error if not found: msg = "The requested handler '%s' was not found in either the main handlers list nor in the listening handlers list" % handler_name if C.ERROR_ON_MISSING_HANDLER: raise AnsibleError(msg) else: display.warning(msg) if 'add_host' in result_item: # this task added a new host (add_host module) new_host_info = result_item.get('add_host', dict()) self._add_host(new_host_info, iterator) elif 'add_group' in result_item: # this task added a new group (group_by module) self._add_group(original_host, result_item) if 'ansible_facts' in result_item: # if delegated fact and we are delegating facts, we need to change target host for them if original_task.delegate_to is not None and original_task.delegate_facts: item = result_item.get(loop_var, None) if item is not None: task_vars[loop_var] = item host_name = templar.template( original_task.delegate_to) actual_host = self._inventory.get_host(host_name) if actual_host is None: actual_host = Host(name=host_name) else: actual_host = original_host host_list = self.get_task_hosts( iterator, actual_host, original_task) if original_task.action == 'include_vars': for (var_name, var_value) in iteritems( result_item['ansible_facts']): # find the host we're actually referring too here, which may # be a host that is not really in inventory at all for target_host in host_list: self._variable_manager.set_host_variable( target_host, var_name, var_value) else: for target_host in host_list: if original_task.action == 'set_fact': self._variable_manager.set_nonpersistent_facts( target_host, result_item['ansible_facts'].copy()) else: self._variable_manager.set_host_facts( target_host, result_item['ansible_facts'].copy()) if 'ansible_stats' in result_item and 'data' in result_item[ 'ansible_stats'] and result_item['ansible_stats'][ 'data']: if 'per_host' not in result_item[ 'ansible_stats'] or result_item[ 'ansible_stats']['per_host']: host_list = self.get_task_hosts( iterator, original_host, original_task) else: host_list = [None] data = result_item['ansible_stats']['data'] aggregate = 'aggregate' in result_item[ 'ansible_stats'] and result_item['ansible_stats'][ 'aggregate'] for myhost in host_list: for k in data.keys(): if aggregate: self._tqm._stats.update_custom_stats( k, data[k], myhost) else: self._tqm._stats.set_custom_stats( k, data[k], myhost) if 'diff' in task_result._result: if self._diff: self._tqm.send_callback('v2_on_file_diff', task_result) if original_task.action not in ['include', 'include_role']: self._tqm._stats.increment('ok', original_host.name) if 'changed' in task_result._result and task_result._result[ 'changed']: self._tqm._stats.increment('changed', original_host.name) # finally, send the ok for this task self._tqm.send_callback('v2_runner_on_ok', task_result) self._pending_results -= 1 if original_host.name in self._blocked_hosts: del self._blocked_hosts[original_host.name] # If this is a role task, mark the parent role as being run (if # the task was ok or failed, but not skipped or unreachable) if original_task._role is not None and role_ran: #TODO: and original_task.action != 'include_role':? # lookup the role in the ROLE_CACHE to make sure we're dealing # with the correct object and mark it as executed for (entry, role_obj) in iteritems(iterator._play.ROLE_CACHE[ original_task._role._role_name]): if role_obj._uuid == original_task._role._uuid: role_obj._had_task_run[original_host.name] = True ret_results.append(task_result) if one_pass or max_passes is not None and (cur_pass + 1) >= max_passes: break cur_pass += 1 return ret_results
def run(self, iterator, play_context): ''' The linear strategy is simple - get the next task and queue it for all hosts, then wait for the queue to drain before moving on to the next task ''' # iteratate over each task, while there is one left to run result = self._tqm.RUN_OK work_to_do = True while work_to_do and not self._tqm._terminated: try: display.debug("getting the remaining hosts for this loop") hosts_left = self.get_hosts_left(iterator) display.debug("done getting the remaining hosts for this loop") # queue up this task for each host in the inventory callback_sent = False work_to_do = False host_results = [] host_tasks = self._get_next_task_lockstep(hosts_left, iterator) # skip control skip_rest = False choose_step = True # flag set if task is set to any_errors_fatal any_errors_fatal = False results = [] for (host, task) in host_tasks: if not task: continue if self._tqm._terminated: break run_once = False work_to_do = True # test to see if the task across all hosts points to an action plugin which # sets BYPASS_HOST_LOOP to true, or if it has run_once enabled. If so, we # will only send this task to the first host in the list. try: action = action_loader.get(task.action, class_only=True) except KeyError: # we don't care here, because the action may simply not have a # corresponding action plugin action = None # check to see if this task should be skipped, due to it being a member of a # role which has already run (and whether that role allows duplicate execution) if task._role and task._role.has_run(host): # If there is no metadata, the default behavior is to not allow duplicates, # if there is metadata, check to see if the allow_duplicates flag was set to true if task._role._metadata is None or task._role._metadata and not task._role._metadata.allow_duplicates: display.debug( "'%s' skipped because role has already run" % task) continue if task.action == 'meta': # for the linear strategy, we run meta tasks just once and for # all hosts currently being iterated over rather than one host results.extend( self._execute_meta(task, play_context, iterator, host)) if task.args.get('_raw_params', None) != 'noop': run_once = True else: # handle step if needed, skip meta actions as they are used internally if self._step and choose_step: if self._take_step(task): choose_step = False else: skip_rest = True break display.debug("getting variables") task_vars = self._variable_manager.get_vars( play=iterator._play, host=host, task=task) self.add_tqm_variables(task_vars, play=iterator._play) templar = Templar(loader=self._loader, variables=task_vars) display.debug("done getting variables") run_once = templar.template( task.run_once) or action and getattr( action, 'BYPASS_HOST_LOOP', False) if (task.any_errors_fatal or run_once) and not task.ignore_errors: any_errors_fatal = True if not callback_sent: display.debug( "sending task start callback, copying the task so we can template it temporarily" ) saved_name = task.name display.debug( "done copying, going to template now") try: task.name = to_text(templar.template( task.name, fail_on_undefined=False), nonstring='empty') display.debug("done templating") except: # just ignore any errors during task name templating, # we don't care if it just shows the raw name display.debug( "templating failed for some reason") display.debug("here goes the callback...") self._tqm.send_callback( 'v2_playbook_on_task_start', task, is_conditional=False) task.name = saved_name callback_sent = True display.debug("sending task start callback") self._blocked_hosts[host.get_name()] = True self._queue_task(host, task, task_vars, play_context) del task_vars # if we're bypassing the host loop, break out now if run_once: break results += self._process_pending_results( iterator, max_passes=max(1, int(len(self._tqm._workers) * 0.1))) # go to next host/task group if skip_rest: continue display.debug( "done queuing things up, now waiting for results queue to drain" ) if self._pending_results > 0: results += self._wait_on_pending_results(iterator) host_results.extend(results) self.update_active_connections(results) try: included_files = IncludedFile.process_include_results( host_results, iterator=iterator, loader=self._loader, variable_manager=self._variable_manager) except AnsibleError as e: # this is a fatal error, so we abort here regardless of block state return self._tqm.RUN_ERROR include_failure = False if len(included_files) > 0: display.debug("we have included files to process") # A noop task for use in padding dynamic includes noop_task = Task() noop_task.action = 'meta' noop_task.args['_raw_params'] = 'noop' noop_task.set_loader(iterator._play._loader) display.debug("generating all_blocks data") all_blocks = dict((host, []) for host in hosts_left) display.debug("done generating all_blocks data") for included_file in included_files: display.debug("processing included file: %s" % included_file._filename) # included hosts get the task list while those excluded get an equal-length # list of noop tasks, to make sure that they continue running in lock-step try: if included_file._is_role: new_ir = included_file._task.copy() new_ir.vars.update(included_file._args) new_blocks, handler_blocks = new_ir.get_block_list( play=iterator._play, variable_manager=self._variable_manager, loader=self._loader, ) self._tqm.update_handler_list([ handler for handler_block in handler_blocks for handler in handler_block.block ]) else: new_blocks = self._load_included_file( included_file, iterator=iterator) display.debug( "iterating over new_blocks loaded from include file" ) for new_block in new_blocks: task_vars = self._variable_manager.get_vars( play=iterator._play, task=included_file._task, ) display.debug("filtering new block on tags") final_block = new_block.filter_tagged_tasks( play_context, task_vars) display.debug( "done filtering new block on tags") noop_block = Block(parent_block=task._parent) noop_block.block = [ noop_task for t in new_block.block ] noop_block.always = [ noop_task for t in new_block.always ] noop_block.rescue = [ noop_task for t in new_block.rescue ] for host in hosts_left: if host in included_file._hosts: all_blocks[host].append(final_block) else: all_blocks[host].append(noop_block) display.debug( "done iterating over new_blocks loaded from include file" ) except AnsibleError as e: for host in included_file._hosts: self._tqm._failed_hosts[host.name] = True iterator.mark_host_failed(host) display.error(to_text(e), wrap_text=False) include_failure = True continue # finally go through all of the hosts and append the # accumulated blocks to their list of tasks display.debug( "extending task lists for all hosts with included blocks" ) for host in hosts_left: iterator.add_tasks(host, all_blocks[host]) display.debug("done extending task lists") display.debug("done processing included files") display.debug("results queue empty") display.debug("checking for any_errors_fatal") failed_hosts = [] unreachable_hosts = [] for res in results: if res.is_failed() and iterator.is_failed(res._host): failed_hosts.append(res._host.name) elif res.is_unreachable(): unreachable_hosts.append(res._host.name) # if any_errors_fatal and we had an error, mark all hosts as failed if any_errors_fatal and (len(failed_hosts) > 0 or len(unreachable_hosts) > 0): for host in hosts_left: (s, _) = iterator.get_next_task_for_host(host, peek=True) if s.run_state != iterator.ITERATING_RESCUE or \ s.run_state == iterator.ITERATING_RESCUE and s.fail_state & iterator.FAILED_RESCUE != 0: self._tqm._failed_hosts[host.name] = True result |= self._tqm.RUN_FAILED_BREAK_PLAY display.debug("done checking for any_errors_fatal") display.debug("checking for max_fail_percentage") if iterator._play.max_fail_percentage is not None and len( results) > 0: percentage = iterator._play.max_fail_percentage / 100.0 if (len(self._tqm._failed_hosts) / iterator.batch_size) > percentage: for host in hosts_left: # don't double-mark hosts, or the iterator will potentially # fail them out of the rescue/always states if host.name not in failed_hosts: self._tqm._failed_hosts[host.name] = True iterator.mark_host_failed(host) self._tqm.send_callback( 'v2_playbook_on_no_hosts_remaining') result |= self._tqm.RUN_FAILED_BREAK_PLAY display.debug('(%s failed / %s total )> %s max fail' % (len(self._tqm._failed_hosts), iterator.batch_size, percentage)) display.debug("done checking for max_fail_percentage") display.debug( "checking to see if all hosts have failed and the running result is not ok" ) if result != self._tqm.RUN_OK and len( self._tqm._failed_hosts) >= len(hosts_left): display.debug("^ not ok, so returning result now") self._tqm.send_callback( 'v2_playbook_on_no_hosts_remaining') return result display.debug("done checking to see if all hosts have failed") except (IOError, EOFError) as e: display.debug("got IOError/EOFError in task loop: %s" % e) # most likely an abort, return failed return self._tqm.RUN_UNKNOWN_ERROR # run the base class run() method, which executes the cleanup function # and runs any outstanding handlers which have been triggered return super(StrategyModule, self).run(iterator, play_context, result)
def test_template_var(hostvars, host, varname, result): task = FakeTask('sanity_checks', {'checks': []}) plugin = ActionModule(task, None, PlayContext(), None, Templar(None, None, None), None) check = plugin.template_var(hostvars, host, varname) assert check == result