def find_plugin(self, name, suffixes=None): ''' Find a plugin named name ''' if not suffixes: if self.class_name: suffixes = ['.py'] else: suffixes = ['.py', ''] potential_names = frozenset('%s%s' % (name, s) for s in suffixes) for full_name in potential_names: if full_name in self._plugin_path_cache: return self._plugin_path_cache[full_name] found = None for path in [ p for p in self._get_paths() if p not in self._searched_paths ]: if os.path.isdir(path): try: full_paths = (os.path.join(path, f) for f in os.listdir(path)) except OSError as e: d = Display() d.warning("Error accessing plugin paths: %s" % str(e)) for full_path in (f for f in full_paths if os.path.isfile(f)): for suffix in suffixes: if full_path.endswith(suffix): full_name = os.path.basename(full_path) break else: # Yes, this is a for-else: http://bit.ly/1ElPkyg continue if full_name not in self._plugin_path_cache: self._plugin_path_cache[full_name] = full_path self._searched_paths.add(path) for full_name in potential_names: if full_name in self._plugin_path_cache: return self._plugin_path_cache[full_name] # if nothing is found, try finding alias/deprecated if not name.startswith('_'): for alias_name in ('_%s' % n for n in potential_names): # We've already cached all the paths at this point if alias_name in self._plugin_path_cache: if not os.path.islink(self._plugin_path_cache[alias_name]): d = Display() d.deprecated('%s is kept for backwards compatibility ' 'but usage is discouraged. The module ' 'documentation details page may explain ' 'more about this rationale.' % name.lstrip('_')) return self._plugin_path_cache[alias_name] return None
class TaskExecutor: ''' This is the main worker class for the executor pipeline, which handles loading an action plugin to actually dispatch the task to a given host. This class roughly corresponds to the old Runner() class. ''' # Modules that we optimize by squashing loop items into a single call to # the module SQUASH_ACTIONS = frozenset(C.DEFAULT_SQUASH_ACTIONS) def __init__(self, host, task, job_vars, play_context, new_stdin, loader, shared_loader_obj): self._host = host self._task = task self._job_vars = job_vars self._play_context = play_context self._new_stdin = new_stdin self._loader = loader self._shared_loader_obj = shared_loader_obj try: from __main__ import display self._display = display except ImportError: from ansible.utils.display import Display self._display = Display() def run(self): ''' The main executor entrypoint, where we determine if the specified task requires looping and either runs the task with ''' self._display.debug("in run()") try: # lookup plugins need to know if this task is executing from # a role, so that it can properly find files/templates/etc. roledir = None if self._task._role: roledir = self._task._role._role_path self._job_vars['roledir'] = roledir items = self._get_loop_items() if items is not None: if len(items) > 0: item_results = self._run_loop(items) # loop through the item results, and remember the changed/failed # result flags based on any item there. changed = False failed = False for item in item_results: if 'changed' in item and item['changed']: changed = True if 'failed' in item and item['failed']: failed = True # create the overall result item, and set the changed/failed # flags there to reflect the overall result of the loop res = dict(results=item_results) if changed: res['changed'] = True if failed: res['failed'] = True res['msg'] = 'One or more items failed' else: res['msg'] = 'All items completed' else: res = dict(changed=False, skipped=True, skipped_reason='No items in the list', results=[]) else: self._display.debug("calling self._execute()") res = self._execute() self._display.debug("_execute() done") # make sure changed is set in the result, if it's not present if 'changed' not in res: res['changed'] = False def _clean_res(res): if isinstance(res, dict): for k in res.keys(): res[k] = _clean_res(res[k]) elif isinstance(res, list): for idx,item in enumerate(res): res[idx] = _clean_res(item) elif isinstance(res, UnsafeProxy): return res._obj return res self._display.debug("dumping result to json") res = _clean_res(res) self._display.debug("done dumping result, returning") return res except AnsibleError as e: return dict(failed=True, msg=to_unicode(e, nonstring='simplerepr')) finally: try: self._connection.close() except AttributeError: pass except Exception as e: self._display.debug("error closing connection: %s" % to_unicode(e)) def _get_loop_items(self): ''' Loads a lookup plugin to handle the with_* portion of a task (if specified), and returns the items result. ''' # create a copy of the job vars here so that we can modify # them temporarily without changing them too early for other # parts of the code that might still need a pristine version vars_copy = self._job_vars.copy() # now we update them with the play context vars self._play_context.update_vars(vars_copy) templar = Templar(loader=self._loader, shared_loader_obj=self._shared_loader_obj, variables=vars_copy) items = None if self._task.loop: if self._task.loop in self._shared_loader_obj.lookup_loader: #TODO: remove convert_bare true and deprecate this in with_ try: loop_terms = listify_lookup_plugin_terms(terms=self._task.loop_args, templar=templar, loader=self._loader, fail_on_undefined=True, convert_bare=True) except AnsibleUndefinedVariable as e: if 'has no attribute' in str(e): loop_terms = [] self._display.deprecated("Skipping task due to undefined attribute, in the future this will be a fatal error.") else: raise items = self._shared_loader_obj.lookup_loader.get(self._task.loop, loader=self._loader, templar=templar).run(terms=loop_terms, variables=vars_copy) else: raise AnsibleError("Unexpected failure in finding the lookup named '%s' in the available lookup plugins" % self._task.loop) if items: from ansible.vars.unsafe_proxy import UnsafeProxy for idx, item in enumerate(items): if item is not None and not isinstance(item, UnsafeProxy): items[idx] = UnsafeProxy(item) return items def _run_loop(self, items): ''' Runs the task with the loop items specified and collates the result into an array named 'results' which is inserted into the final result along with the item for which the loop ran. ''' results = [] # make copies of the job vars and task so we can add the item to # the variables and re-validate the task with the item variable task_vars = self._job_vars.copy() items = self._squash_items(items, task_vars) for item in items: task_vars['item'] = item try: tmp_task = self._task.copy() tmp_play_context = self._play_context.copy() except AnsibleParserError as e: results.append(dict(failed=True, msg=str(e))) continue # now we swap the internal task and play context with their copies, # execute, and swap them back so we can do the next iteration cleanly (self._task, tmp_task) = (tmp_task, self._task) (self._play_context, tmp_play_context) = (tmp_play_context, self._play_context) res = self._execute(variables=task_vars) (self._task, tmp_task) = (tmp_task, self._task) (self._play_context, tmp_play_context) = (tmp_play_context, self._play_context) # now update the result with the item info, and append the result # to the list of results res['item'] = item results.append(res) return results def _squash_items(self, items, variables): ''' Squash items down to a comma-separated list for certain modules which support it (typically package management modules). ''' if len(items) > 0 and self._task.action in self.SQUASH_ACTIONS: final_items = [] name = self._task.args.pop('name', None) or self._task.args.pop('pkg', None) for item in items: variables['item'] = item templar = Templar(loader=self._loader, shared_loader_obj=self._shared_loader_obj, variables=variables) if self._task.evaluate_conditional(templar, variables): if templar._contains_vars(name): new_item = templar.template(name) final_items.append(new_item) else: final_items.append(item) joined_items = ",".join(final_items) self._task.args['name'] = joined_items return [joined_items] else: return items def _execute(self, variables=None): ''' The primary workhorse of the executor system, this runs the task on the specified host (which may be the delegated_to host) and handles the retry/until and block rescue/always execution ''' if variables is None: variables = self._job_vars templar = Templar(loader=self._loader, shared_loader_obj=self._shared_loader_obj, variables=variables) context_validation_error = None try: # apply the given task's information to the connection info, # which may override some fields already set by the play or # the options specified on the command line self._play_context = self._play_context.set_task_and_variable_override(task=self._task, variables=variables, templar=templar) # fields set from the play/task may be based on variables, so we have to # do the same kind of post validation step on it here before we use it. self._play_context.post_validate(templar=templar) # We also add "magic" variables back into the variables dict to make sure # a certain subset of variables exist. self._play_context.update_vars(variables) except AnsibleError as e: # save the error, which we'll raise later if we don't end up # skipping this task during the conditional evaluation step context_validation_error = e # Evaluate the conditional (if any) for this task, which we do before running # the final task post-validation. We do this before the post validation due to # the fact that the conditional may specify that the task be skipped due to a # variable not being present which would otherwise cause validation to fail try: if not self._task.evaluate_conditional(templar, variables): self._display.debug("when evaluation failed, skipping this task") return dict(changed=False, skipped=True, skip_reason='Conditional check failed', _ansible_no_log=self._play_context.no_log) except AnsibleError: # skip conditional exception in the case of includes as the vars needed might not be avaiable except in the included tasks or due to tags if self._task.action != 'include': raise # if we ran into an error while setting up the PlayContext, raise it now if context_validation_error is not None: raise context_validation_error # if this task is a TaskInclude, we just return now with a success code so the # main thread can expand the task list for the given host if self._task.action == 'include': include_variables = self._task.args.copy() include_file = include_variables.pop('_raw_params', None) if not include_file: return dict(failed=True, msg="No include file was specified to the include") include_file = templar.template(include_file) return dict(include=include_file, include_variables=include_variables) # Now we do final validation on the task, which sets all fields to their final values. self._task.post_validate(templar=templar) if '_variable_params' in self._task.args: variable_params = self._task.args.pop('_variable_params') if isinstance(variable_params, dict): self._display.deprecated("Using variables for task params is unsafe, especially if the variables come from an external source like facts") variable_params.update(self._task.args) self._task.args = variable_params # get the connection and the handler for this execution self._connection = self._get_connection(variables=variables, templar=templar) self._connection.set_host_overrides(host=self._host) self._handler = self._get_action_handler(connection=self._connection, templar=templar) # And filter out any fields which were set to default(omit), and got the omit token value omit_token = variables.get('omit') if omit_token is not None: self._task.args = dict((i[0], i[1]) for i in iteritems(self._task.args) if i[1] != omit_token) # Read some values from the task, so that we can modify them if need be if self._task.until is not None: retries = self._task.retries if retries <= 0: retries = 1 else: retries = 1 delay = self._task.delay if delay < 0: delay = 1 # make a copy of the job vars here, in case we need to update them # with the registered variable value later on when testing conditions vars_copy = variables.copy() self._display.debug("starting attempt loop") result = None for attempt in range(retries): if attempt > 0: self._display.display("FAILED - RETRYING: %s (%d retries left). Result was: %s" % (self._task, retries-attempt, result), color="red") result['attempts'] = attempt + 1 self._display.debug("running the handler") try: result = self._handler.run(task_vars=variables) except AnsibleConnectionFailure as e: return dict(unreachable=True, msg=str(e)) self._display.debug("handler run complete") if self._task.async > 0: # the async_wrapper module returns dumped JSON via its stdout # response, so we parse it here and replace the result try: result = json.loads(result.get('stdout')) except (TypeError, ValueError) as e: return dict(failed=True, msg="The async task did not return valid JSON: %s" % str(e)) if self._task.poll > 0: result = self._poll_async_result(result=result, templar=templar) # update the local copy of vars with the registered value, if specified, # or any facts which may have been generated by the module execution if self._task.register: vars_copy[self._task.register] = result if 'ansible_facts' in result: vars_copy.update(result['ansible_facts']) # create a conditional object to evaluate task conditions cond = Conditional(loader=self._loader) def _evaluate_changed_when_result(result): if self._task.changed_when is not None: cond.when = [ self._task.changed_when ] result['changed'] = cond.evaluate_conditional(templar, vars_copy) def _evaluate_failed_when_result(result): if self._task.failed_when is not None: cond.when = [ self._task.failed_when ] failed_when_result = cond.evaluate_conditional(templar, vars_copy) result['failed_when_result'] = result['failed'] = failed_when_result return failed_when_result return False if self._task.until: cond.when = self._task.until if cond.evaluate_conditional(templar, vars_copy): _evaluate_changed_when_result(result) _evaluate_failed_when_result(result) break elif (self._task.changed_when is not None or self._task.failed_when is not None) and 'skipped' not in result: _evaluate_changed_when_result(result) if _evaluate_failed_when_result(result): break elif 'failed' not in result: if result.get('rc', 0) != 0: result['failed'] = True else: # if the result is not failed, stop trying break if attempt < retries - 1: time.sleep(delay) else: _evaluate_changed_when_result(result) _evaluate_failed_when_result(result) # do the final update of the local variables here, for both registered # values and any facts which may have been created if self._task.register: ### FIXME: # If we remove invocation, we should also be removing _ansible* # and maybe ansible_facts. # Remove invocation from registered vars #if 'invocation' in result: # del result['invocation'] variables[self._task.register] = result if 'ansible_facts' in result: variables.update(result['ansible_facts']) # save the notification target in the result, if it was specified, as # this task may be running in a loop in which case the notification # may be item-specific, ie. "notify: service {{item}}" if self._task.notify is not None: result['_ansible_notify'] = self._task.notify # preserve no_log setting result["_ansible_no_log"] = self._play_context.no_log # and return self._display.debug("attempt loop complete, returning result") return result
class VariableManager: def __init__(self): self._fact_cache = FactCache() self._nonpersistent_fact_cache = defaultdict(dict) self._vars_cache = defaultdict(dict) self._extra_vars = defaultdict(dict) self._host_vars_files = defaultdict(dict) self._group_vars_files = defaultdict(dict) self._inventory = None self._omit_token = '__omit_place_holder__%s' % sha1( os.urandom(64)).hexdigest() try: from __main__ import display self._display = display except ImportError: from ansible.utils.display import Display self._display = Display() def __getstate__(self): data = dict( fact_cache=self._fact_cache.copy(), np_fact_cache=self._nonpersistent_fact_cache.copy(), vars_cache=self._vars_cache.copy(), extra_vars=self._extra_vars.copy(), host_vars_files=self._host_vars_files.copy(), group_vars_files=self._group_vars_files.copy(), omit_token=self._omit_token, ) return data def __setstate__(self, data): self._fact_cache = data.get('fact_cache', defaultdict(dict)) self._nonpersistent_fact_cache = data.get('np_fact_cache', defaultdict(dict)) self._vars_cache = data.get('vars_cache', defaultdict(dict)) self._extra_vars = data.get('extra_vars', dict()) self._host_vars_files = data.get('host_vars_files', defaultdict(dict)) self._group_vars_files = data.get('group_vars_files', defaultdict(dict)) self._omit_token = data.get( 'omit_token', '__omit_place_holder__%s' % sha1(os.urandom(64)).hexdigest()) self._inventory = None def _get_cache_entry(self, play=None, host=None, task=None): play_id = "NONE" if play: play_id = play._uuid host_id = "NONE" if host: host_id = host.get_name() task_id = "NONE" if task: task_id = task._uuid return "PLAY:%s;HOST:%s;TASK:%s" % (play_id, host_id, task_id) @property def extra_vars(self): ''' ensures a clean copy of the extra_vars are made ''' return self._extra_vars.copy() @extra_vars.setter def extra_vars(self, value): ''' ensures a clean copy of the extra_vars are used to set the value ''' assert isinstance(value, MutableMapping) self._extra_vars = value.copy() def set_inventory(self, inventory): self._inventory = inventory def _preprocess_vars(self, a): ''' Ensures that vars contained in the parameter passed in are returned as a list of dictionaries, to ensure for instance that vars loaded from a file conform to an expected state. ''' if a is None: return None elif not isinstance(a, list): data = [a] else: data = a for item in data: if not isinstance(item, MutableMapping): raise AnsibleError( "variable files must contain either a dictionary of variables, or a list of dictionaries. Got: %s (%s)" % (a, type(a))) return data def get_vars(self, loader, play=None, host=None, task=None, include_hostvars=True, include_delegate_to=True, use_cache=True): ''' Returns the variables, with optional "context" given via the parameters for the play, host, and task (which could possibly result in different sets of variables being returned due to the additional context). The order of precedence is: - play->roles->get_default_vars (if there is a play context) - group_vars_files[host] (if there is a host context) - host_vars_files[host] (if there is a host context) - host->get_vars (if there is a host context) - fact_cache[host] (if there is a host context) - play vars (if there is a play context) - play vars_files (if there's no host context, ignore file names that cannot be templated) - task->get_vars (if there is a task context) - vars_cache[host] (if there is a host context) - extra vars ''' debug("in VariableManager get_vars()") cache_entry = self._get_cache_entry(play=play, host=host, task=task) if cache_entry in VARIABLE_CACHE and use_cache: debug("vars are cached, returning them now") return VARIABLE_CACHE[cache_entry] all_vars = dict() magic_variables = self._get_magic_variables( loader=loader, play=play, host=host, task=task, include_hostvars=include_hostvars, include_delegate_to=include_delegate_to, ) if play: # first we compile any vars specified in defaults/main.yml # for all roles within the specified play for role in play.get_roles(): all_vars = combine_vars(all_vars, role.get_default_vars()) # if we have a task in this context, and that task has a role, make # sure it sees its defaults above any other roles, as we previously # (v1) made sure each task had a copy of its roles default vars if task and task._role is not None: all_vars = combine_vars(all_vars, task._role.get_default_vars()) if host: # next, if a host is specified, we load any vars from group_vars # files and then any vars from host_vars files which may apply to # this host or the groups it belongs to # we merge in vars from groups specified in the inventory (INI or script) all_vars = combine_vars(all_vars, host.get_group_vars()) # then we merge in the special 'all' group_vars first, if they exist if 'all' in self._group_vars_files: data = preprocess_vars(self._group_vars_files['all']) for item in data: all_vars = combine_vars(all_vars, item) for group in host.get_groups(): if group.name in self._group_vars_files and group.name != 'all': for data in self._group_vars_files[group.name]: data = preprocess_vars(data) for item in data: all_vars = combine_vars(all_vars, item) # then we merge in vars from the host specified in the inventory (INI or script) all_vars = combine_vars(all_vars, host.get_vars()) # then we merge in the host_vars/<hostname> file, if it exists host_name = host.get_name() if host_name in self._host_vars_files: for data in self._host_vars_files[host_name]: data = preprocess_vars(data) for item in data: all_vars = combine_vars(all_vars, item) # finally, the facts caches for this host, if it exists try: host_facts = wrap_var(self._fact_cache.get(host.name, dict())) all_vars = combine_vars(all_vars, host_facts) except KeyError: pass if play: all_vars = combine_vars(all_vars, play.get_vars()) for vars_file_item in play.get_vars_files(): # create a set of temporary vars here, which incorporate the extra # and magic vars so we can properly template the vars_files entries temp_vars = combine_vars(all_vars, self._extra_vars) temp_vars = combine_vars(temp_vars, magic_variables) templar = Templar(loader=loader, variables=temp_vars) # we assume each item in the list is itself a list, as we # support "conditional includes" for vars_files, which mimics # the with_first_found mechanism. #vars_file_list = templar.template(vars_file_item) vars_file_list = vars_file_item if not isinstance(vars_file_list, list): vars_file_list = [vars_file_list] # now we iterate through the (potential) files, and break out # as soon as we read one from the list. If none are found, we # raise an error, which is silently ignored at this point. try: for vars_file in vars_file_list: vars_file = templar.template(vars_file) try: data = preprocess_vars( loader.load_from_file(vars_file)) if data is not None: for item in data: all_vars = combine_vars(all_vars, item) break except AnsibleFileNotFound as e: # we continue on loader failures continue except AnsibleParserError as e: raise else: raise AnsibleFileNotFound( "vars file %s was not found" % vars_file_item) except (UndefinedError, AnsibleUndefinedVariable): if host is not None and self._fact_cache.get( host.name, dict()).get('module_setup') and task is not None: raise AnsibleUndefinedVariable( "an undefined variable was found when attempting to template the vars_files item '%s'" % vars_file_item, obj=vars_file_item) else: # we do not have a full context here, and the missing variable could be # because of that, so just show a warning and continue self._display.vvv( "skipping vars_file '%s' due to an undefined variable" % vars_file_item) continue if not C.DEFAULT_PRIVATE_ROLE_VARS: for role in play.get_roles(): all_vars = combine_vars( all_vars, role.get_vars(include_params=False)) if task: if task._role: all_vars = combine_vars(all_vars, task._role.get_vars()) all_vars = combine_vars(all_vars, task.get_vars()) if host: all_vars = combine_vars( all_vars, self._vars_cache.get(host.get_name(), dict())) all_vars = combine_vars( all_vars, self._nonpersistent_fact_cache.get(host.name, dict())) all_vars = combine_vars(all_vars, self._extra_vars) all_vars = combine_vars(all_vars, magic_variables) # if we have a task and we're delegating to another host, figure out the # variables for that host now so we don't have to rely on hostvars later if task and task.delegate_to is not None and include_delegate_to: all_vars['ansible_delegated_vars'] = self._get_delegated_vars( loader, play, task, all_vars) #VARIABLE_CACHE[cache_entry] = all_vars debug("done with get_vars()") return all_vars def invalidate_hostvars_cache(self, play): hostvars_cache_entry = self._get_cache_entry(play=play) if hostvars_cache_entry in HOSTVARS_CACHE: del HOSTVARS_CACHE[hostvars_cache_entry] def _get_magic_variables(self, loader, play, host, task, include_hostvars, include_delegate_to): ''' Returns a dictionary of so-called "magic" variables in Ansible, which are special variables we set internally for use. ''' variables = dict() variables['playbook_dir'] = loader.get_basedir() if host: variables['group_names'] = [ group.name for group in host.get_groups() if group.name != 'all' ] if self._inventory is not None: variables['groups'] = dict() for (group_name, group) in iteritems(self._inventory.groups): variables['groups'][group_name] = [ h.name for h in group.get_hosts() ] if include_hostvars: hostvars_cache_entry = self._get_cache_entry(play=play) if hostvars_cache_entry in HOSTVARS_CACHE: hostvars = HOSTVARS_CACHE[hostvars_cache_entry] else: hostvars = HostVars(play=play, inventory=self._inventory, loader=loader, variable_manager=self) HOSTVARS_CACHE[hostvars_cache_entry] = hostvars variables['hostvars'] = hostvars variables['vars'] = hostvars[host.get_name()] if play: variables['role_names'] = [r._role_name for r in play.roles] if task: if task._role: variables['role_path'] = task._role._role_path if self._inventory is not None: variables['inventory_dir'] = self._inventory.basedir() variables['inventory_file'] = self._inventory.src() if play: # add the list of hosts in the play, as adjusted for limit/filters # DEPRECATED: play_hosts should be deprecated in favor of ansible_play_hosts, # however this would take work in the templating engine, so for now # we'll add both so we can give users something transitional to use host_list = [x.name for x in self._inventory.get_hosts()] variables['play_hosts'] = host_list variables['ansible_play_hosts'] = host_list # the 'omit' value alows params to be left out if the variable they are based on is undefined variables['omit'] = self._omit_token variables['ansible_version'] = CLI.version_info(gitinfo=False) return variables def _get_delegated_vars(self, loader, play, task, existing_variables): # we unfortunately need to template the delegate_to field here, # as we're fetching vars before post_validate has been called on # the task that has been passed in vars_copy = existing_variables.copy() templar = Templar(loader=loader, variables=vars_copy) items = [] if task.loop is not None: if task.loop in lookup_loader: #TODO: remove convert_bare true and deprecate this in with_ try: loop_terms = listify_lookup_plugin_terms( terms=task.loop_args, templar=templar, loader=loader, fail_on_undefined=True, convert_bare=True) except AnsibleUndefinedVariable as e: if 'has no attribute' in str(e): loop_terms = [] self._display.deprecated( "Skipping task due to undefined attribute, in the future this will be a fatal error." ) else: raise items = lookup_loader.get(task.loop, loader=loader, templar=templar).run( terms=loop_terms, variables=vars_copy) else: raise AnsibleError( "Unexpected failure in finding the lookup named '%s' in the available lookup plugins" % task.loop) else: items = [None] delegated_host_vars = dict() for item in items: # update the variables with the item value for templating, in case we need it if item is not None: vars_copy['item'] = item templar.set_available_variables(vars_copy) delegated_host_name = templar.template(task.delegate_to, fail_on_undefined=False) if delegated_host_name in delegated_host_vars: # no need to repeat ourselves, as the delegate_to value # does not appear to be tied to the loop item variable continue # a dictionary of variables to use if we have to create a new host below new_delegated_host_vars = dict( ansible_host=delegated_host_name, ansible_user=C.DEFAULT_REMOTE_USER, ansible_connection=C.DEFAULT_TRANSPORT, ) # now try to find the delegated-to host in inventory, or failing that, # create a new host on the fly so we can fetch variables for it delegated_host = None if self._inventory is not None: delegated_host = self._inventory.get_host(delegated_host_name) # try looking it up based on the address field, and finally # fall back to creating a host on the fly to use for the var lookup if delegated_host is None: for h in self._inventory.get_hosts( ignore_limits_and_restrictions=True): # check if the address matches, or if both the delegated_to host # and the current host are in the list of localhost aliases if h.address == delegated_host_name or h.name in C.LOCALHOST and delegated_host_name in C.LOCALHOST: delegated_host = h break else: delegated_host = Host(name=delegated_host_name) delegated_host.vars.update(new_delegated_host_vars) else: delegated_host = Host(name=delegated_host_name) delegated_host.vars.update(new_delegated_host_vars) # now we go fetch the vars for the delegated-to host and save them in our # master dictionary of variables to be used later in the TaskExecutor/PlayContext delegated_host_vars[delegated_host_name] = self.get_vars( loader=loader, play=play, host=delegated_host, task=task, include_delegate_to=False, include_hostvars=False, ) return delegated_host_vars def _get_inventory_basename(self, path): ''' Returns the basename minus the extension of the given path, so the bare filename can be matched against host/group names later ''' (name, ext) = os.path.splitext(os.path.basename(path)) if ext not in ('.yml', '.yaml'): return os.path.basename(path) else: return name def _load_inventory_file(self, path, loader): ''' helper function, which loads the file and gets the basename of the file without the extension ''' if loader.is_directory(path): data = dict() try: names = loader.list_directory(path) except os.error as err: raise AnsibleError("This folder cannot be listed: %s: %s." % (path, err.strerror)) # evaluate files in a stable order rather than whatever # order the filesystem lists them. names.sort() # do not parse hidden files or dirs, e.g. .svn/ paths = [ os.path.join(path, name) for name in names if not name.startswith('.') ] for p in paths: _found, results = self._load_inventory_file(path=p, loader=loader) if results is not None: data = combine_vars(data, results) else: file_name, ext = os.path.splitext(path) data = None if not ext or ext not in C.YAML_FILENAME_EXTENSIONS: for test_ext in C.YAML_FILENAME_EXTENSIONS: new_path = path + test_ext if loader.path_exists(new_path): data = loader.load_from_file(new_path) break else: if loader.path_exists(path): data = loader.load_from_file(path) name = self._get_inventory_basename(path) return (name, data) def add_host_vars_file(self, path, loader): ''' Loads and caches a host_vars file in the _host_vars_files dict, where the key to that dictionary is the basename of the file, minus the extension, for matching against a given inventory host name ''' (name, data) = self._load_inventory_file(path, loader) if data: if name not in self._host_vars_files: self._host_vars_files[name] = [] self._host_vars_files[name].append(data) return data else: return dict() def add_group_vars_file(self, path, loader): ''' Loads and caches a host_vars file in the _host_vars_files dict, where the key to that dictionary is the basename of the file, minus the extension, for matching against a given inventory host name ''' (name, data) = self._load_inventory_file(path, loader) if data: if name not in self._group_vars_files: self._group_vars_files[name] = [] self._group_vars_files[name].append(data) return data else: return dict() def set_host_facts(self, host, facts): ''' Sets or updates the given facts for a host in the fact cache. ''' assert isinstance(facts, dict) if host.name not in self._fact_cache: self._fact_cache[host.name] = facts else: try: self._fact_cache.update(host.name, facts) except KeyError: self._fact_cache[host.name] = facts def set_nonpersistent_facts(self, host, facts): ''' Sets or updates the given facts for a host in the fact cache. ''' assert isinstance(facts, dict) if host.name not in self._nonpersistent_fact_cache: self._nonpersistent_fact_cache[host.name] = facts else: try: self._nonpersistent_fact_cache[host.name].update(facts) except KeyError: self._nonpersistent_fact_cache[host.name] = facts def set_host_variable(self, host, varname, value): ''' Sets a value in the vars_cache for a host. ''' host_name = host.get_name() if host_name not in self._vars_cache: self._vars_cache[host_name] = dict() self._vars_cache[host_name][varname] = value
class TaskExecutor: ''' This is the main worker class for the executor pipeline, which handles loading an action plugin to actually dispatch the task to a given host. This class roughly corresponds to the old Runner() class. ''' # Modules that we optimize by squashing loop items into a single call to # the module SQUASH_ACTIONS = frozenset(C.DEFAULT_SQUASH_ACTIONS) def __init__(self, host, task, job_vars, play_context, new_stdin, loader, shared_loader_obj): self._host = host self._task = task self._job_vars = job_vars self._play_context = play_context self._new_stdin = new_stdin self._loader = loader self._shared_loader_obj = shared_loader_obj try: from __main__ import display self._display = display except ImportError: from ansible.utils.display import Display self._display = Display() def run(self): ''' The main executor entrypoint, where we determine if the specified task requires looping and either runs the task with ''' self._display.debug("in run()") try: # lookup plugins need to know if this task is executing from # a role, so that it can properly find files/templates/etc. roledir = None if self._task._role: roledir = self._task._role._role_path self._job_vars['roledir'] = roledir items = self._get_loop_items() if items is not None: if len(items) > 0: item_results = self._run_loop(items) # loop through the item results, and remember the changed/failed # result flags based on any item there. changed = False failed = False for item in item_results: if 'changed' in item and item['changed']: changed = True if 'failed' in item and item['failed']: failed = True # create the overall result item, and set the changed/failed # flags there to reflect the overall result of the loop res = dict(results=item_results) if changed: res['changed'] = True if failed: res['failed'] = True res['msg'] = 'One or more items failed' else: res['msg'] = 'All items completed' else: res = dict(changed=False, skipped=True, skipped_reason='No items in the list', results=[]) else: self._display.debug("calling self._execute()") res = self._execute() self._display.debug("_execute() done") # make sure changed is set in the result, if it's not present if 'changed' not in res: res['changed'] = False def _clean_res(res): if isinstance(res, dict): for k in res.keys(): res[k] = _clean_res(res[k]) elif isinstance(res, list): for idx,item in enumerate(res): res[idx] = _clean_res(item) elif isinstance(res, UnsafeProxy): return res._obj return res self._display.debug("dumping result to json") res = _clean_res(res) self._display.debug("done dumping result, returning") return res except AnsibleError as e: return dict(failed=True, msg=to_unicode(e, nonstring='simplerepr')) finally: try: self._connection.close() except AttributeError: pass except Exception as e: self._display.debug("error closing connection: %s" % to_unicode(e)) def _get_loop_items(self): ''' Loads a lookup plugin to handle the with_* portion of a task (if specified), and returns the items result. ''' # create a copy of the job vars here so that we can modify # them temporarily without changing them too early for other # parts of the code that might still need a pristine version vars_copy = self._job_vars.copy() # now we update them with the play context vars self._play_context.update_vars(vars_copy) templar = Templar(loader=self._loader, shared_loader_obj=self._shared_loader_obj, variables=vars_copy) items = None if self._task.loop: if self._task.loop in self._shared_loader_obj.lookup_loader: #TODO: remove convert_bare true and deprecate this in with_ try: loop_terms = listify_lookup_plugin_terms(terms=self._task.loop_args, templar=templar, loader=self._loader, fail_on_undefined=True, convert_bare=True) except AnsibleUndefinedVariable as e: if 'has no attribute' in str(e): loop_terms = [] self._display.deprecated("Skipping task due to undefined attribute, in the future this will be a fatal error.") else: raise items = self._shared_loader_obj.lookup_loader.get(self._task.loop, loader=self._loader, templar=templar).run(terms=loop_terms, variables=vars_copy) else: raise AnsibleError("Unexpected failure in finding the lookup named '%s' in the available lookup plugins" % self._task.loop) if items: from ansible.vars.unsafe_proxy import UnsafeProxy for idx, item in enumerate(items): if item is not None and not isinstance(item, UnsafeProxy): items[idx] = UnsafeProxy(item) return items def _run_loop(self, items): ''' Runs the task with the loop items specified and collates the result into an array named 'results' which is inserted into the final result along with the item for which the loop ran. ''' results = [] # make copies of the job vars and task so we can add the item to # the variables and re-validate the task with the item variable task_vars = self._job_vars.copy() items = self._squash_items(items, task_vars) for item in items: task_vars['item'] = item try: tmp_task = self._task.copy() tmp_play_context = self._play_context.copy() except AnsibleParserError as e: results.append(dict(failed=True, msg=str(e))) continue # now we swap the internal task and play context with their copies, # execute, and swap them back so we can do the next iteration cleanly (self._task, tmp_task) = (tmp_task, self._task) (self._play_context, tmp_play_context) = (tmp_play_context, self._play_context) res = self._execute(variables=task_vars) (self._task, tmp_task) = (tmp_task, self._task) (self._play_context, tmp_play_context) = (tmp_play_context, self._play_context) # now update the result with the item info, and append the result # to the list of results res['item'] = item results.append(res) return results def _squash_items(self, items, variables): ''' Squash items down to a comma-separated list for certain modules which support it (typically package management modules). ''' if len(items) > 0 and self._task.action in self.SQUASH_ACTIONS: final_items = [] name = self._task.args.pop('name', None) or self._task.args.pop('pkg', None) for item in items: variables['item'] = item templar = Templar(loader=self._loader, shared_loader_obj=self._shared_loader_obj, variables=variables) if self._task.evaluate_conditional(templar, variables): if templar._contains_vars(name): new_item = templar.template(name) final_items.append(new_item) else: final_items.append(item) joined_items = ",".join(final_items) self._task.args['name'] = joined_items return [joined_items] else: return items def _execute(self, variables=None): ''' The primary workhorse of the executor system, this runs the task on the specified host (which may be the delegated_to host) and handles the retry/until and block rescue/always execution ''' if variables is None: variables = self._job_vars templar = Templar(loader=self._loader, shared_loader_obj=self._shared_loader_obj, variables=variables) # apply the given task's information to the connection info, # which may override some fields already set by the play or # the options specified on the command line self._play_context = self._play_context.set_task_and_variable_override(task=self._task, variables=variables, templar=templar) # fields set from the play/task may be based on variables, so we have to # do the same kind of post validation step on it here before we use it. # We also add "magic" variables back into the variables dict to make sure # a certain subset of variables exist. self._play_context.update_vars(variables) self._play_context.post_validate(templar=templar) # Evaluate the conditional (if any) for this task, which we do before running # the final task post-validation. We do this before the post validation due to # the fact that the conditional may specify that the task be skipped due to a # variable not being present which would otherwise cause validation to fail if not self._task.evaluate_conditional(templar, variables): self._display.debug("when evaulation failed, skipping this task") return dict(changed=False, skipped=True, skip_reason='Conditional check failed', _ansible_no_log=self._play_context.no_log) # Now we do final validation on the task, which sets all fields to their final values. # In the case of debug tasks, we save any 'var' params and restore them after validating # so that variables are not replaced too early. prev_var = None if self._task.action == 'debug' and 'var' in self._task.args: prev_var = self._task.args.pop('var') original_args = self._task.args.copy() self._task.post_validate(templar=templar) if '_variable_params' in self._task.args: variable_params = self._task.args.pop('_variable_params') if isinstance(variable_params, dict): self._display.deprecated("Using variables for task params is unsafe, especially if the variables come from an external source like facts") variable_params.update(self._task.args) self._task.args = variable_params if prev_var is not None: self._task.args['var'] = prev_var # if this task is a TaskInclude, we just return now with a success code so the # main thread can expand the task list for the given host if self._task.action == 'include': include_variables = original_args include_file = include_variables.get('_raw_params') del include_variables['_raw_params'] return dict(include=include_file, include_variables=include_variables) # get the connection and the handler for this execution self._connection = self._get_connection(variables=variables, templar=templar) self._connection.set_host_overrides(host=self._host) self._handler = self._get_action_handler(connection=self._connection, templar=templar) # And filter out any fields which were set to default(omit), and got the omit token value omit_token = variables.get('omit') if omit_token is not None: self._task.args = dict((i[0], i[1]) for i in iteritems(self._task.args) if i[1] != omit_token) # Read some values from the task, so that we can modify them if need be retries = self._task.retries if retries <= 0: retries = 1 delay = self._task.delay if delay < 0: delay = 1 # make a copy of the job vars here, in case we need to update them # with the registered variable value later on when testing conditions vars_copy = variables.copy() self._display.debug("starting attempt loop") result = None for attempt in range(retries): if attempt > 0: # FIXME: this should use the self._display.callback/message passing mechanism self._display.display("FAILED - RETRYING: %s (%d retries left). Result was: %s" % (self._task, retries-attempt, result), color="red") result['attempts'] = attempt + 1 self._display.debug("running the handler") try: result = self._handler.run(task_vars=variables) except AnsibleConnectionFailure as e: return dict(unreachable=True, msg=str(e)) self._display.debug("handler run complete") if self._task.async > 0: # the async_wrapper module returns dumped JSON via its stdout # response, so we parse it here and replace the result try: result = json.loads(result.get('stdout')) except (TypeError, ValueError) as e: return dict(failed=True, msg="The async task did not return valid JSON: %s" % str(e)) if self._task.poll > 0: result = self._poll_async_result(result=result, templar=templar) # update the local copy of vars with the registered value, if specified, # or any facts which may have been generated by the module execution if self._task.register: vars_copy[self._task.register] = result if 'ansible_facts' in result: vars_copy.update(result['ansible_facts']) # create a conditional object to evaluate task conditions cond = Conditional(loader=self._loader) def _evaluate_changed_when_result(result): if self._task.changed_when is not None: cond.when = [ self._task.changed_when ] result['changed'] = cond.evaluate_conditional(templar, vars_copy) def _evaluate_failed_when_result(result): if self._task.failed_when is not None: cond.when = [ self._task.failed_when ] failed_when_result = cond.evaluate_conditional(templar, vars_copy) result['failed_when_result'] = result['failed'] = failed_when_result return failed_when_result return False if self._task.until: cond.when = self._task.until if cond.evaluate_conditional(templar, vars_copy): _evaluate_changed_when_result(result) _evaluate_failed_when_result(result) break elif (self._task.changed_when is not None or self._task.failed_when is not None) and 'skipped' not in result: _evaluate_changed_when_result(result) if _evaluate_failed_when_result(result): break elif 'failed' not in result: if result.get('rc', 0) != 0: result['failed'] = True else: # if the result is not failed, stop trying break if attempt < retries - 1: time.sleep(delay) else: _evaluate_changed_when_result(result) _evaluate_failed_when_result(result) # do the final update of the local variables here, for both registered # values and any facts which may have been created if self._task.register: variables[self._task.register] = result if 'ansible_facts' in result: variables.update(result['ansible_facts']) # save the notification target in the result, if it was specified, as # this task may be running in a loop in which case the notification # may be item-specific, ie. "notify: service {{item}}" if self._task.notify is not None: result['_ansible_notify'] = self._task.notify # preserve no_log setting result["_ansible_no_log"] = self._play_context.no_log # and return self._display.debug("attempt loop complete, returning result") return result