def _get_delegated_vars(self, play, task, existing_variables): if not hasattr(task, 'loop'): # This "task" is not a Task, so we need to skip it return {}, None # we unfortunately need to template the delegate_to field here, # as we're fetching vars before post_validate has been called on # the task that has been passed in vars_copy = existing_variables.copy() self._templar.available_variables = vars_copy items = [] has_loop = True if task.loop_with is not None: if task.loop_with in lookup_loader: try: loop_terms = listify_lookup_plugin_terms( terms=task.loop, templar=self._templar, loader=self._loader, fail_on_undefined=True, convert_bare=False) items = lookup_loader.get(task.loop_with, loader=self._loader, templar=self._templar).run( terms=loop_terms, variables=vars_copy) except AnsibleTemplateError: # This task will be skipped later due to this, so we just setup # a dummy array for the later code so it doesn't fail items = [None] else: raise AnsibleError( "Failed to find the lookup named '%s' in the available lookup plugins" % task.loop_with) elif task.loop is not None: try: items = self._templar.template(task.loop) except AnsibleTemplateError: # This task will be skipped later due to this, so we just setup # a dummy array for the later code so it doesn't fail items = [None] else: has_loop = False items = [None] delegated_host_vars = dict() item_var = getattr(task.loop_control, 'loop_var', 'item') cache_items = False for item in items: # update the variables with the item value for templating, in case we need it if item is not None: vars_copy[item_var] = item self._templar.available_variables = vars_copy delegated_host_name = self._templar.template( task.delegate_to, fail_on_undefined=False) if delegated_host_name != task.delegate_to: cache_items = True if delegated_host_name is None: raise AnsibleError( message="Undefined delegate_to host for task:", obj=task._ds) if not isinstance(delegated_host_name, string_types): raise AnsibleError( message= "the field 'delegate_to' has an invalid type (%s), and could not be" " converted to a string type." % type(delegated_host_name), obj=task._ds) if delegated_host_name in delegated_host_vars: # no need to repeat ourselves, as the delegate_to value # does not appear to be tied to the loop item variable continue # a dictionary of variables to use if we have to create a new host below # we set the default port based on the default transport here, to make sure # we use the proper default for windows new_port = C.DEFAULT_REMOTE_PORT if C.DEFAULT_TRANSPORT == 'winrm': new_port = 5986 new_delegated_host_vars = dict( ansible_delegated_host=delegated_host_name, ansible_host= delegated_host_name, # not redundant as other sources can change ansible_host ansible_port=new_port, ansible_user=C.DEFAULT_REMOTE_USER, ansible_connection=C.DEFAULT_TRANSPORT, ) # now try to find the delegated-to host in inventory, or failing that, # create a new host on the fly so we can fetch variables for it delegated_host = None if self._inventory is not None: delegated_host = self._inventory.get_host(delegated_host_name) # try looking it up based on the address field, and finally # fall back to creating a host on the fly to use for the var lookup if delegated_host is None: if delegated_host_name in C.LOCALHOST: delegated_host = self._inventory.localhost else: for h in self._inventory.get_hosts( ignore_limits=True, ignore_restrictions=True): # check if the address matches, or if both the delegated_to host # and the current host are in the list of localhost aliases if h.address == delegated_host_name: delegated_host = h break else: delegated_host = Host(name=delegated_host_name) delegated_host.vars = combine_vars( delegated_host.vars, new_delegated_host_vars) else: delegated_host = Host(name=delegated_host_name) delegated_host.vars = combine_vars(delegated_host.vars, new_delegated_host_vars) # now we go fetch the vars for the delegated-to host and save them in our # master dictionary of variables to be used later in the TaskExecutor/PlayContext delegated_host_vars[delegated_host_name] = self.get_vars( play=play, host=delegated_host, task=task, include_delegate_to=False, include_hostvars=False, ) _ansible_loop_cache = None if has_loop and cache_items: # delegate_to templating produced a change, so we will cache the templated items # in a special private hostvar # this ensures that delegate_to+loop doesn't produce different results than TaskExecutor # which may reprocess the loop _ansible_loop_cache = items return delegated_host_vars, _ansible_loop_cache
def _process_pending_results(self, iterator, one_pass=False): ''' Reads results off the final queue and takes appropriate action based on the result (executing callbacks, updating state, etc.). ''' ret_results = [] while not self._final_q.empty() and not self._tqm._terminated: try: result = self._final_q.get() display.debug("got result from result worker: %s" % ([text_type(x) for x in result],)) # helper method, used to find the original host from the one # returned in the result/message, which has been serialized and # thus had some information stripped from it to speed up the # serialization process def get_original_host(host): if host.name in self._inventory._hosts_cache: return self._inventory._hosts_cache[host.name] else: return self._inventory.get_host(host.name) # all host status messages contain 2 entries: (msg, task_result) if result[0] in ('host_task_ok', 'host_task_failed', 'host_task_skipped', 'host_unreachable'): task_result = result[1] host = get_original_host(task_result._host) task = task_result._task if result[0] == 'host_task_failed' or task_result.is_failed(): if not task.ignore_errors: display.debug("marking %s as failed" % host.name) if task.run_once: # if we're using run_once, we have to fail every host here [iterator.mark_host_failed(h) for h in self._inventory.get_hosts(iterator._play.hosts) if h.name not in self._tqm._unreachable_hosts] else: iterator.mark_host_failed(host) (state, tmp_task) = iterator.get_next_task_for_host(host, peek=True) if not state or state.run_state != PlayIterator.ITERATING_RESCUE: self._tqm._failed_hosts[host.name] = True self._tqm._stats.increment('failures', host.name) else: self._tqm._stats.increment('ok', host.name) self._tqm.send_callback('v2_runner_on_failed', task_result, ignore_errors=task.ignore_errors) elif result[0] == 'host_unreachable': self._tqm._unreachable_hosts[host.name] = True self._tqm._stats.increment('dark', host.name) self._tqm.send_callback('v2_runner_on_unreachable', task_result) elif result[0] == 'host_task_skipped': self._tqm._stats.increment('skipped', host.name) self._tqm.send_callback('v2_runner_on_skipped', task_result) elif result[0] == 'host_task_ok': if task.action != 'include': self._tqm._stats.increment('ok', host.name) if 'changed' in task_result._result and task_result._result['changed']: self._tqm._stats.increment('changed', host.name) self._tqm.send_callback('v2_runner_on_ok', task_result) if self._diff: self._tqm.send_callback('v2_on_file_diff', task_result) self._pending_results -= 1 if host.name in self._blocked_hosts: del self._blocked_hosts[host.name] # If this is a role task, mark the parent role as being run (if # the task was ok or failed, but not skipped or unreachable) if task_result._task._role is not None and result[0] in ('host_task_ok', 'host_task_failed'): # lookup the role in the ROLE_CACHE to make sure we're dealing # with the correct object and mark it as executed for (entry, role_obj) in iteritems(iterator._play.ROLE_CACHE[task_result._task._role._role_name]): if role_obj._uuid == task_result._task._role._uuid: role_obj._had_task_run[host.name] = True ret_results.append(task_result) elif result[0] == 'add_host': result_item = result[1] new_host_info = result_item.get('add_host', dict()) self._add_host(new_host_info, iterator) elif result[0] == 'add_group': host = get_original_host(result[1]) result_item = result[2] self._add_group(host, result_item) elif result[0] == 'notify_handler': task_result = result[1] handler_name = result[2] original_host = get_original_host(task_result._host) original_task = iterator.get_original_task(original_host, task_result._task) if handler_name not in self._notified_handlers: self._notified_handlers[handler_name] = [] if original_host not in self._notified_handlers[handler_name]: self._notified_handlers[handler_name].append(original_host) display.vv("NOTIFIED HANDLER %s" % (handler_name,)) elif result[0] == 'register_host_var': # essentially the same as 'set_host_var' below, however we # never follow the delegate_to value for registered vars and # the variable goes in the fact_cache host = get_original_host(result[1]) task = result[2] var_value = wrap_var(result[3]) var_name = task.register if task.run_once: host_list = [host for host in self._inventory.get_hosts(iterator._play.hosts) if host.name not in self._tqm._unreachable_hosts] else: host_list = [host] for target_host in host_list: self._variable_manager.set_nonpersistent_facts(target_host, {var_name: var_value}) elif result[0] in ('set_host_var', 'set_host_facts'): host = get_original_host(result[1]) task = result[2] item = result[3] # find the host we're actually refering too here, which may # be a host that is not really in inventory at all if task.delegate_to is not None and task.delegate_facts: task_vars = self._variable_manager.get_vars(loader=self._loader, play=iterator._play, host=host, task=task) self.add_tqm_variables(task_vars, play=iterator._play) if item is not None: task_vars['item'] = item templar = Templar(loader=self._loader, variables=task_vars) host_name = templar.template(task.delegate_to) actual_host = self._inventory.get_host(host_name) if actual_host is None: actual_host = Host(name=host_name) else: actual_host = host if task.run_once: host_list = [host for host in self._inventory.get_hosts(iterator._play.hosts) if host.name not in self._tqm._unreachable_hosts] else: host_list = [actual_host] if result[0] == 'set_host_var': var_name = result[4] var_value = result[5] for target_host in host_list: self._variable_manager.set_host_variable(target_host, var_name, var_value) elif result[0] == 'set_host_facts': facts = result[4] for target_host in host_list: if task.action == 'set_fact': self._variable_manager.set_nonpersistent_facts(target_host, facts) else: self._variable_manager.set_host_facts(target_host, facts) else: raise AnsibleError("unknown result message received: %s" % result[0]) except Queue.Empty: time.sleep(0.0001) if one_pass: break return ret_results
def _parse(self, err): all_hosts = {} # not passing from_remote because data from CMDB is trusted self.raw = utils.parse_json(self.data) self.raw = json_dict_unicode_to_bytes(self.raw) all = Group('all') groups = dict(all=all) group = None if 'failed' in self.raw: sys.stderr.write(err + "\n") raise errors.AnsibleError( "failed to parse executable inventory script results: %s" % self.raw) for (group_name, data) in self.raw.items(): # in Ansible 1.3 and later, a "_meta" subelement may contain # a variable "hostvars" which contains a hash for each host # if this "hostvars" exists at all then do not call --host for each # host. This is for efficiency and scripts should still return data # if called with --host for backwards compat with 1.2 and earlier. if group_name == '_meta': if 'hostvars' in data: self.host_vars_from_top = data['hostvars'] continue if group_name != all.name: group = groups[group_name] = Group(group_name) else: group = all host = None if not isinstance(data, dict): data = {'hosts': data} # is not those subkeys, then simplified syntax, host with vars elif not any(k in data for k in ('hosts', 'vars')): data = {'hosts': [group_name], 'vars': data} if 'hosts' in data: if not isinstance(data['hosts'], list): raise errors.AnsibleError( "You defined a group \"%s\" with bad " "data for the host list:\n %s" % (group_name, data)) for hostname in data['hosts']: if not hostname in all_hosts: all_hosts[hostname] = Host(hostname) host = all_hosts[hostname] group.add_host(host) if 'vars' in data: if not isinstance(data['vars'], dict): raise errors.AnsibleError( "You defined a group \"%s\" with bad " "data for variables:\n %s" % (group_name, data)) for k, v in data['vars'].iteritems(): if group.name == all.name: all.set_variable(k, v) else: group.set_variable(k, v) # Separate loop to ensure all groups are defined for (group_name, data) in self.raw.items(): if group_name == '_meta': continue if isinstance(data, dict) and 'children' in data: for child_name in data['children']: if child_name in groups: groups[group_name].add_child_group(groups[child_name]) for group in groups.values(): if group.depth == 0 and group.name != 'all': all.add_child_group(group) return groups
def _parse(self, err): all_hosts = {} # not passing from_remote because data from CMDB is trusted self.raw = utils.parse_json(self.data) # 还是使用self.data来解析标准输出,需要self.data为json格式数据 self.raw = json_dict_bytes_to_unicode(self.raw) # 将self.raw中的kv都转换成unicode格式。 all = Group('all') # 设置Group("all") groups = dict(all=all) # 初始化groups字典 group = None if 'failed' in self.raw: # 如果self.raw中有failed字段,则报错。不过在上面parser_json的时候no_exception是false,不会出现failed情况 sys.stderr.write(err + "\n") raise errors.AnsibleError("failed to parse executable inventory script results: %s" % self.raw) for (group_name, data) in self.raw.items(): # 1.3 以上版本使用--list的时返回结果中会包含_meta这样的key,该key的value中会有一个hostvars变量,该变量包含每个host的主机变量 # 1.2及以下版本仍然需要使用--host命令为每一个host返回主机变量 # in Ansible 1.3 and later, a "_meta" subelement may contain # a variable "hostvars" which contains a hash for each host # if this "hostvars" exists at all then do not call --host for each # host. This is for efficiency and scripts should still return data # if called with --host for backwards compat with 1.2 and earlier. """ { "databases" : { "hosts" : [ "host1.example.com", "host2.example.com" ], "vars" : { "a" : true } }, "webservers" : [ "host2.example.com", "host3.example.com" ], "atlanta" : { "hosts" : [ "host1.example.com", "host4.example.com", "host5.example.com" ], "vars" : { "b" : false }, "children": [ "marietta", "5points" ] }, "marietta" : [ "host6.example.com" ], "5points" : [ "host7.example.com" ] } { # results of inventory script as above go here # ... "_meta" : { "hostvars" : { "moocow.example.com" : { "asdf" : 1234 }, "llama.example.com" : { "asdf" : 5678 }, } } { "moocow.example.com" : { "asdf" : 1234 }, "llama.example.com" : { "asdf" : 5678 } } """ if group_name == '_meta': # 如果key为_meta,则该value为meta数据。 if 'hostvars' in data: self.host_vars_from_top = data['hostvars'] # 如果meta数据中包含hostvars,则缓存到self.host_vars_from_top中。 continue # 跳过之后的处理 if group_name != all.name: # group_name不是all则创建新的Group对象,并加入groups字典。 group = groups[group_name] = Group(group_name) # 如果data中出现无group的hostname,则会出现以hostname为名称的组 else: group = all host = None if not isinstance(data, dict): # 如果data不是字典类型,则表示data为主机列表 data = {'hosts': data} # is not those subkeys, then simplified syntax, host with vars elif not any(k in data for k in ('hosts','vars','children')): # any函数表示可迭代对象中任何一个为True,则为True.否则为False # 如果data对象是字典类型,但key中不存在hosts、vars、children时,既该行数据为主机变量数据时,group_name为主机名 data = {'hosts': [group_name], 'vars': data} # 上面两步为了统一data的格式,不带变量的data,格式为{'hosts': [host1,host2...] # 带变量的data,格式为: {'hosts': [group_name], 'vars': data } if 'hosts' in data: if not isinstance(data['hosts'], list): # 主机列表必须是list对象,上面的'hosts': [group_name] 也是为校验统一格式 raise errors.AnsibleError("You defined a group \"%s\" with bad " "data for the host list:\n %s" % (group_name, data)) for hostname in data['hosts']: # 遍历主机名 if not hostname in all_hosts: all_hosts[hostname] = Host(hostname) # 如果主机对象不存在, 则创建并添加到all_hosts列表中,去重 host = all_hosts[hostname] # 将Host对象赋值给host变量 group.add_host(host) # 将Host对象添加到该group中 if 'vars' in data: # 如果是带有vars的data if not isinstance(data['vars'], dict): raise errors.AnsibleError("You defined a group \"%s\" with bad " "data for variables:\n %s" % (group_name, data)) for k, v in data['vars'].iteritems(): # 遍历所有的变量 if group.name == all.name: # 如果当前组名为“all",则将变量加到all group中,否则加到当前group中。 all.set_variable(k, v) else: group.set_variable(k, v) # Separate loop to ensure all groups are defined for (group_name, data) in self.raw.items(): if group_name == '_meta': continue if isinstance(data, dict) and 'children' in data: # 如果data中包含子组,遍历子组并将子组添加到父组中 for child_name in data['children']: if child_name in groups: groups[group_name].add_child_group(groups[child_name]) for group in groups.values(): if group.depth == 0 and group.name != 'all': # 如果该组深度为0,且名称不是"all",则加入all组的子组列表中。 all.add_child_group(group) return groups
def parse_inventory(self, host_list): if isinstance(host_list, string_types): if "," in host_list: host_list = host_list.split(",") host_list = [h for h in host_list if h and h.strip()] self.parser = None # Always create the 'all' and 'ungrouped' groups, even if host_list is # empty: in this case we will subsequently an the implicit 'localhost' to it. ungrouped = Group('ungrouped') all = Group('all') all.add_child_group(ungrouped) base_groups = frozenset([all, ungrouped]) self.groups = dict(all=all, ungrouped=ungrouped) if host_list is None: pass elif isinstance(host_list, list): for h in host_list: try: (host, port) = parse_address(h, allow_ranges=False) except AnsibleError as e: display.vvv( "Unable to parse address from hostname, leaving unchanged: %s" % to_text(e)) host = h port = None new_host = Host(host, port) if h in C.LOCALHOST: # set default localhost from inventory to avoid creating an implicit one. Last localhost defined 'wins'. if self.localhost is not None: display.warning( "A duplicate localhost-like entry was found (%s). First found localhost was %s" % (h, self.localhost.name)) display.vvvv("Set default localhost to %s" % h) self.localhost = new_host all.add_host(new_host) elif self._loader.path_exists(host_list): # TODO: switch this to a plugin loader and a 'condition' per plugin on which it should be tried, restoring 'inventory pllugins' if self.is_directory(host_list): # Ensure basedir is inside the directory host_list = os.path.join(self.host_list, "") self.parser = InventoryDirectory(loader=self._loader, groups=self.groups, filename=host_list) else: self.parser = get_file_parser(host_list, self.groups, self._loader) vars_loader.add_directory(self._basedir, with_subdir=True) if not self.parser: # should never happen, but JIC raise AnsibleError( "Unable to parse %s as an inventory source" % host_list) else: display.warning("Host file not found: %s" % to_text(host_list)) self._vars_plugins = [x for x in vars_loader.all(self)] ### POST PROCESS groups and hosts after specific parser was invoked group_names = set() # set group vars from group_vars/ files and vars plugins for g in self.groups: group = self.groups[g] group.vars = combine_vars(group.vars, self.get_group_variables(group.name)) self.get_group_vars(group) group_names.add(group.name) host_names = set() # get host vars from host_vars/ files and vars plugins for host in self.get_hosts(ignore_limits=True, ignore_restrictions=True): host.vars = combine_vars(host.vars, self.get_host_variables(host.name)) self.get_host_vars(host) host_names.add(host.name) mygroups = host.get_groups() # ensure hosts are always in 'all' if all not in mygroups: all.add_host(host) if ungrouped in mygroups: # clear ungrouped of any incorrectly stored by parser if set(mygroups).difference(base_groups): host.remove_group(ungrouped) else: # add ungrouped hosts to ungrouped length = len(mygroups) if length == 0 or (length == 1 and all in mygroups): ungrouped.add_host(host) # warn if overloading identifier as both group and host for conflict in group_names.intersection(host_names): display.warning("Found both group and host with same name: %s" % conflict)
def _process_pending_results(self, iterator, one_pass=False, timeout=0.001): ''' Reads results off the final queue and takes appropriate action based on the result (executing callbacks, updating state, etc.). ''' ret_results = [] def get_original_host(host_name): host_name = to_unicode(host_name) if host_name in self._inventory._hosts_cache: return self._inventory._hosts_cache[host_name] else: return self._inventory.get_host(host_name) def search_handler_blocks(handler_name, handler_blocks): for handler_block in handler_blocks: for handler_task in handler_block.block: handler_vars = self._variable_manager.get_vars(loader=self._loader, play=iterator._play, task=handler_task) templar = Templar(loader=self._loader, variables=handler_vars) try: # first we check with the full result of get_name(), which may # include the role name (if the handler is from a role). If that # is not found, we resort to the simple name field, which doesn't # have anything extra added to it. target_handler_name = templar.template(handler_task.name) if target_handler_name == handler_name: return handler_task else: target_handler_name = templar.template(handler_task.get_name()) if target_handler_name == handler_name: return handler_task except (UndefinedError, AnsibleUndefinedVariable) as e: # We skip this handler due to the fact that it may be using # a variable in the name that was conditionally included via # set_fact or some other method, and we don't want to error # out unnecessarily continue return None def parent_handler_match(target_handler, handler_name): if target_handler: if isinstance(target_handler, (TaskInclude, IncludeRole)): try: handler_vars = self._variable_manager.get_vars(loader=self._loader, play=iterator._play, task=target_handler) templar = Templar(loader=self._loader, variables=handler_vars) target_handler_name = templar.template(target_handler.name) if target_handler_name == handler_name: return True else: target_handler_name = templar.template(target_handler.get_name()) if target_handler_name == handler_name: return True except (UndefinedError, AnsibleUndefinedVariable) as e: pass return parent_handler_match(target_handler._parent, handler_name) else: return False passes = 1 while not self._tqm._terminated and passes < 3: try: task_result = self._final_q.get(timeout=timeout) original_host = get_original_host(task_result._host) original_task = iterator.get_original_task(original_host, task_result._task) task_result._host = original_host task_result._task = original_task # send callbacks for 'non final' results if '_ansible_retry' in task_result._result: self._tqm.send_callback('v2_runner_retry', task_result) continue elif '_ansible_item_result' in task_result._result: if task_result.is_failed() or task_result.is_unreachable(): self._tqm.send_callback('v2_runner_item_on_failed', task_result) elif task_result.is_skipped(): self._tqm.send_callback('v2_runner_item_on_skipped', task_result) else: self._tqm.send_callback('v2_runner_item_on_ok', task_result) continue if original_task.register: if original_task.run_once: host_list = [host for host in self._inventory.get_hosts(iterator._play.hosts) if host.name not in self._tqm._unreachable_hosts] else: host_list = [original_host] clean_copy = strip_internal_keys(task_result._result) if 'invocation' in clean_copy: del clean_copy['invocation'] for target_host in host_list: self._variable_manager.set_nonpersistent_facts(target_host, {original_task.register: clean_copy}) # all host status messages contain 2 entries: (msg, task_result) role_ran = False if task_result.is_failed(): role_ran = True if not original_task.ignore_errors: display.debug("marking %s as failed" % original_host.name) if original_task.run_once: # if we're using run_once, we have to fail every host here for h in self._inventory.get_hosts(iterator._play.hosts): if h.name not in self._tqm._unreachable_hosts: state, _ = iterator.get_next_task_for_host(h, peek=True) iterator.mark_host_failed(h) state, new_task = iterator.get_next_task_for_host(h, peek=True) else: iterator.mark_host_failed(original_host) # only add the host to the failed list officially if it has # been failed by the iterator if iterator.is_failed(original_host): self._tqm._failed_hosts[original_host.name] = True self._tqm._stats.increment('failures', original_host.name) else: # otherwise, we grab the current state and if we're iterating on # the rescue portion of a block then we save the failed task in a # special var for use within the rescue/always state, _ = iterator.get_next_task_for_host(original_host, peek=True) if state.run_state == iterator.ITERATING_RESCUE: self._variable_manager.set_nonpersistent_facts( original_host, dict( ansible_failed_task=original_task.serialize(), ansible_failed_result=task_result._result, ), ) else: self._tqm._stats.increment('ok', original_host.name) self._tqm.send_callback('v2_runner_on_failed', task_result, ignore_errors=original_task.ignore_errors) elif task_result.is_unreachable(): self._tqm._unreachable_hosts[original_host.name] = True self._tqm._stats.increment('dark', original_host.name) self._tqm.send_callback('v2_runner_on_unreachable', task_result) elif task_result.is_skipped(): self._tqm._stats.increment('skipped', original_host.name) self._tqm.send_callback('v2_runner_on_skipped', task_result) else: role_ran = True if original_task.loop: # this task had a loop, and has more than one result, so # loop over all of them instead of a single result result_items = task_result._result.get('results', []) else: result_items = [ task_result._result ] for result_item in result_items: if '_ansible_notify' in result_item: if task_result.is_changed(): # The shared dictionary for notified handlers is a proxy, which # does not detect when sub-objects within the proxy are modified. # So, per the docs, we reassign the list so the proxy picks up and # notifies all other threads for handler_name in result_item['_ansible_notify']: # Find the handler using the above helper. First we look up the # dependency chain of the current task (if it's from a role), otherwise # we just look through the list of handlers in the current play/all # roles and use the first one that matches the notify name if handler_name in self._listening_handlers: for listening_handler_name in self._listening_handlers[handler_name]: listening_handler = search_handler_blocks(listening_handler_name, iterator._play.handlers) if listening_handler is None: raise AnsibleError("The requested handler listener '%s' was not found in any of the known handlers" % listening_handler_name) if original_host not in self._notified_handlers[listening_handler]: self._notified_handlers[listening_handler].append(original_host) display.vv("NOTIFIED HANDLER %s" % (listening_handler_name,)) else: target_handler = search_handler_blocks(handler_name, iterator._play.handlers) if target_handler is not None: if original_host not in self._notified_handlers[target_handler]: self._notified_handlers[target_handler].append(original_host) # FIXME: should this be a callback? display.vv("NOTIFIED HANDLER %s" % (handler_name,)) else: # As there may be more than one handler with the notified name as the # parent, so we just keep track of whether or not we found one at all found = False for target_handler in self._notified_handlers: if parent_handler_match(target_handler, handler_name): self._notified_handlers[target_handler].append(original_host) display.vv("NOTIFIED HANDLER %s" % (target_handler.get_name(),)) found = True # and if none were found, then we raise an error if not found: raise AnsibleError("The requested handler '%s' was found in neither the main handlers list nor the listening handlers list" % handler_name) if 'add_host' in result_item: # this task added a new host (add_host module) new_host_info = result_item.get('add_host', dict()) self._add_host(new_host_info, iterator) elif 'add_group' in result_item: # this task added a new group (group_by module) self._add_group(original_host, result_item) elif 'ansible_facts' in result_item: loop_var = 'item' if original_task.loop_control: loop_var = original_task.loop_control.loop_var or 'item' item = result_item.get(loop_var, None) if original_task.action == 'include_vars': for (var_name, var_value) in iteritems(result_item['ansible_facts']): # find the host we're actually refering too here, which may # be a host that is not really in inventory at all if original_task.delegate_to is not None and original_task.delegate_facts: task_vars = self._variable_manager.get_vars(loader=self._loader, play=iterator._play, host=host, task=task) self.add_tqm_variables(task_vars, play=iterator._play) if item is not None: task_vars[loop_var] = item templar = Templar(loader=self._loader, variables=task_vars) host_name = templar.template(original_task.delegate_to) actual_host = self._inventory.get_host(host_name) if actual_host is None: actual_host = Host(name=host_name) else: actual_host = original_host if original_task.run_once: host_list = [host for host in self._inventory.get_hosts(iterator._play.hosts) if host.name not in self._tqm._unreachable_hosts] else: host_list = [actual_host] for target_host in host_list: self._variable_manager.set_host_variable(target_host, var_name, var_value) else: if original_task.run_once: host_list = [host for host in self._inventory.get_hosts(iterator._play.hosts) if host.name not in self._tqm._unreachable_hosts] else: host_list = [original_host] for target_host in host_list: if original_task.action == 'set_fact': self._variable_manager.set_nonpersistent_facts(target_host, result_item['ansible_facts'].copy()) else: self._variable_manager.set_host_facts(target_host, result_item['ansible_facts'].copy()) if 'diff' in task_result._result: if self._diff: self._tqm.send_callback('v2_on_file_diff', task_result) if original_task.action != 'include': self._tqm._stats.increment('ok', original_host.name) if 'changed' in task_result._result and task_result._result['changed']: self._tqm._stats.increment('changed', original_host.name) # finally, send the ok for this task self._tqm.send_callback('v2_runner_on_ok', task_result) self._pending_results -= 1 if original_host.name in self._blocked_hosts: del self._blocked_hosts[original_host.name] # If this is a role task, mark the parent role as being run (if # the task was ok or failed, but not skipped or unreachable) if original_task._role is not None and role_ran and original_task.action != 'include_role': # lookup the role in the ROLE_CACHE to make sure we're dealing # with the correct object and mark it as executed for (entry, role_obj) in iteritems(iterator._play.ROLE_CACHE[original_task._role._role_name]): if role_obj._uuid == original_task._role._uuid: role_obj._had_task_run[original_host.name] = True ret_results.append(task_result) except Queue.Empty: passes += 1 if one_pass: break return ret_results
def _get_delegated_vars(self, play, task, existing_variables): # we unfortunately need to template the delegate_to field here, # as we're fetching vars before post_validate has been called on # the task that has been passed in vars_copy = existing_variables.copy() templar = Templar(loader=self._loader, variables=vars_copy) items = [] if task.loop is not None: if task.loop in lookup_loader: try: loop_terms = listify_lookup_plugin_terms( terms=task.loop_args, templar=templar, loader=self._loader, fail_on_undefined=True, convert_bare=False) items = lookup_loader.get(task.loop, loader=self._loader, templar=templar).run( terms=loop_terms, variables=vars_copy) except AnsibleUndefinedVariable: # This task will be skipped later due to this, so we just setup # a dummy array for the later code so it doesn't fail items = [None] else: raise AnsibleError( "Unexpected failure in finding the lookup named '%s' in the available lookup plugins" % task.loop) else: items = [None] delegated_host_vars = dict() for item in items: # update the variables with the item value for templating, in case we need it if item is not None: vars_copy['item'] = item templar.set_available_variables(vars_copy) delegated_host_name = templar.template(task.delegate_to, fail_on_undefined=False) if delegated_host_name is None: raise AnsibleError( message="Undefined delegate_to host for task:", obj=task._ds) if delegated_host_name in delegated_host_vars: # no need to repeat ourselves, as the delegate_to value # does not appear to be tied to the loop item variable continue # a dictionary of variables to use if we have to create a new host below # we set the default port based on the default transport here, to make sure # we use the proper default for windows new_port = C.DEFAULT_REMOTE_PORT if C.DEFAULT_TRANSPORT == 'winrm': new_port = 5986 new_delegated_host_vars = dict( ansible_delegated_host=delegated_host_name, ansible_host= delegated_host_name, # not redundant as other sources can change ansible_host ansible_port=new_port, ansible_user=C.DEFAULT_REMOTE_USER, ansible_connection=C.DEFAULT_TRANSPORT, ) # now try to find the delegated-to host in inventory, or failing that, # create a new host on the fly so we can fetch variables for it delegated_host = None if self._inventory is not None: delegated_host = self._inventory.get_host(delegated_host_name) # try looking it up based on the address field, and finally # fall back to creating a host on the fly to use for the var lookup if delegated_host is None: if delegated_host_name in C.LOCALHOST: delegated_host = self._inventory.localhost else: for h in self._inventory.get_hosts( ignore_limits=True, ignore_restrictions=True): # check if the address matches, or if both the delegated_to host # and the current host are in the list of localhost aliases if h.address == delegated_host_name: delegated_host = h break else: delegated_host = Host(name=delegated_host_name) delegated_host.vars = combine_vars( delegated_host.vars, new_delegated_host_vars) else: delegated_host = Host(name=delegated_host_name) delegated_host.vars = combine_vars(delegated_host.vars, new_delegated_host_vars) # now we go fetch the vars for the delegated-to host and save them in our # master dictionary of variables to be used later in the TaskExecutor/PlayContext delegated_host_vars[delegated_host_name] = self.get_vars( play=play, host=delegated_host, task=task, include_delegate_to=False, include_hostvars=False, ) return delegated_host_vars
def _parse(self, err): all_hosts = {} # not passing from_remote because data from CMDB is trusted try: self.raw = self._loader.load(self.data) except Exception as e: sys.stderr.write(err + "\n") raise AnsibleError( "failed to parse executable inventory script results from {0}: {1}" .format(to_str(self.filename), to_str(e))) if not isinstance(self.raw, Mapping): sys.stderr.write(err + "\n") raise AnsibleError( "failed to parse executable inventory script results from {0}: data needs to be formatted as a json dict" .format(to_str(self.filename))) group = None for (group_name, data) in self.raw.items(): # in Ansible 1.3 and later, a "_meta" subelement may contain # a variable "hostvars" which contains a hash for each host # if this "hostvars" exists at all then do not call --host for each # host. This is for efficiency and scripts should still return data # if called with --host for backwards compat with 1.2 and earlier. if group_name == '_meta': if 'hostvars' in data: self.host_vars_from_top = data['hostvars'] continue if group_name not in self.groups: group = self.groups[group_name] = Group(group_name) group = self.groups[group_name] host = None if not isinstance(data, dict): data = {'hosts': data} # is not those subkeys, then simplified syntax, host with vars elif not any(k in data for k in ('hosts', 'vars', 'children')): data = {'hosts': [group_name], 'vars': data} if 'hosts' in data: if not isinstance(data['hosts'], list): raise AnsibleError("You defined a group \"%s\" with bad " "data for the host list:\n %s" % (group_name, data)) for hostname in data['hosts']: if hostname not in all_hosts: all_hosts[hostname] = Host(hostname) host = all_hosts[hostname] group.add_host(host) if 'vars' in data: if not isinstance(data['vars'], dict): raise AnsibleError("You defined a group \"%s\" with bad " "data for variables:\n %s" % (group_name, data)) for k, v in iteritems(data['vars']): group.set_variable(k, v) # Separate loop to ensure all groups are defined for (group_name, data) in self.raw.items(): if group_name == '_meta': continue if isinstance(data, dict) and 'children' in data: for child_name in data['children']: if child_name in self.groups: self.groups[group_name].add_child_group( self.groups[child_name]) # Finally, add all top-level groups as children of 'all'. # We exclude ungrouped here because it was already added as a child of # 'all' at the time it was created. for group in self.groups.values(): if group.depth == 0 and group.name not in ('all', 'ungrouped'): self.groups['all'].add_child_group(group)
def parse_inventory(self, host_list): if isinstance(host_list, string_types): if "," in host_list: host_list = host_list.split(",") host_list = [h for h in host_list if h and h.strip()] self.parser = None # Always create the 'all' and 'ungrouped' groups, even if host_list is # empty: in this case we will subsequently an the implicit 'localhost' to it. ungrouped = Group('ungrouped') all = Group('all') all.add_child_group(ungrouped) self.groups = dict(all=all, ungrouped=ungrouped) if host_list is None: pass elif isinstance(host_list, list): for h in host_list: try: (host, port) = parse_address(h, allow_ranges=False) except AnsibleError as e: display.vvv( "Unable to parse address from hostname, leaving unchanged: %s" % to_unicode(e)) host = h port = None new_host = Host(host, port) all.add_host(new_host) if new_host.name in C.LOCALHOST: if self.localhost is None: self.localhost = new_host else: display.warning( "A duplicate localhost-like entry was found (%s). First found localhost was %s" % (new_host.name, self.localhost.name)) elif self._loader.path_exists(host_list): #TODO: switch this to a plugin loader and a 'condition' per plugin on which it should be tried, restoring 'inventory pllugins' if self.is_directory(host_list): # Ensure basedir is inside the directory host_list = os.path.join(self.host_list, "") self.parser = InventoryDirectory(loader=self._loader, groups=self.groups, filename=host_list) else: self.parser = get_file_parser(host_list, self.groups, self._loader) vars_loader.add_directory(self._basedir, with_subdir=True) if not self.parser: # should never happen, but JIC raise AnsibleError( "Unable to parse %s as an inventory source" % host_list) else: display.warning("Host file not found: %s" % to_unicode(host_list)) self._vars_plugins = [x for x in vars_loader.all(self)] # set group vars from group_vars/ files and vars plugins for g in self.groups: group = self.groups[g] group.vars = combine_vars(group.vars, self.get_group_variables(group.name)) self.get_group_vars(group) # set host vars from host_vars/ files and vars plugins for host in self.get_hosts(): host.vars = combine_vars(host.vars, self.get_host_variables(host.name)) self.get_host_vars(host)