def v2_runner_on_ok(self, result, **kwargs): host = result._host action = result._task.action if result._result.get('_ansible_no_log', False) or result._task.no_log: self.results[-1]['tasks'][-1]['hosts'][host.name] = dict( censored="the output has been hidden due to the fact that" " 'no_log: true' was specified for this result") else: # strip_internal_keys makes a deep copy of dict items, but # not lists, so we need to create our own complete deep # copy first so we don't modify the original. myresult = copy.deepcopy(result._result) clean_result = strip_internal_keys(myresult) for index, item_result in enumerate( clean_result.get('results', [])): if not item_result.get('_ansible_no_log', False): continue clean_result['results'][index] = dict( censored="the output has been hidden due to the fact that" " 'no_log: true' was specified for this result") self.results[-1]['tasks'][-1]['hosts'][host.name] = clean_result end_time = current_time() self.results[-1]['tasks'][-1]['task']['duration']['end'] = end_time self.results[-1]['play']['duration']['end'] = end_time self.results[-1]['tasks'][-1]['hosts'][host.name]['action'] = action
def _dump_results(self, result, indent=None, sort_keys=True, keep_invocation=False): if result.get('_ansible_no_log', False): return json.dumps( dict( censored= "the output has been hidden due to the fact that 'no_log: true' was specified for this result" )) if not indent and (result.get('_ansible_verbose_always') or self._display.verbosity > 2): indent = 4 # All result keys stating with _ansible_ are internal, so remove them from the result before we output anything. abridged_result = strip_internal_keys(result) # remove invocation unless specifically wanting it if not keep_invocation and self._display.verbosity < 3 and 'invocation' in result: del abridged_result['invocation'] # remove diff information from screen output if self._display.verbosity < 3 and 'diff' in result: del abridged_result['diff'] # remove exception from screen output if 'exception' in abridged_result: del abridged_result['exception'] return json.dumps(abridged_result, indent=indent, ensure_ascii=False, sort_keys=sort_keys)
def v2_runner_on_ok(self, result, **kwargs): host = result._host if result._result.get('_ansible_no_log', False) or result._task.no_log: self.results[-1]['tasks'][-1]['hosts'][host.name] = dict( censored="the output has been hidden due to the fact that" " 'no_log: true' was specified for this result") else: # strip_internal_keys makes a deep copy of dict items, but # not lists, so we need to create our own complete deep # copy first so we don't modify the original. myresult = copy.deepcopy(result._result) clean_result = strip_internal_keys(myresult) for index, item_result in enumerate( clean_result.get('results', [])): if not item_result.get('_ansible_no_log', False): continue clean_result['results'][index] = dict( censored="the output has been hidden due to the fact that" " 'no_log: true' was specified for this result") self.results[-1]['tasks'][-1]['hosts'][host.name] = clean_result end_time = current_time() self.results[-1]['tasks'][-1]['task']['duration']['end'] = end_time self.results[-1]['play']['duration']['end'] = end_time
def _dump_results(self, result, indent=None, sort_keys=True, keep_invocation=False): if result.get('_ansible_no_log', False): return json.dumps(dict(censored="the output has been hidden due to the fact that 'no_log: true' was specified for this result")) if not indent and '_ansible_verbose_always' in result and result['_ansible_verbose_always']: indent = 4 # All result keys stating with _ansible_ are internal, so remove them from the result before we output anything. abridged_result = strip_internal_keys(result) # remove invocation unless specifically wanting it if not keep_invocation and self._display.verbosity < 3 and 'invocation' in result: del abridged_result['invocation'] return json.dumps(abridged_result, indent=indent, ensure_ascii=False, sort_keys=sort_keys)
def v2_runner_on_ok(self, result): self._clean_results(result._result, result._task.action) if 'ansible_facts' in result._result: return elif 'hostvars[inventory_hostname]' in result._result: facts = result._result['hostvars[inventory_hostname]'] facter_keys = [k for k in facts.keys() if k.startswith('facter_')] for key in facter_keys: del facts[key] result._result['ansible_facts'] = facts self._display.display( "%s | %s | Gathered facts:\n%s" % ( _get_timestamp(), result._host.get_name(), yaml.safe_dump(facts, default_flow_style=False))) return if result._task.action in C.MODULE_NO_JSON: self._display.display( self._command_generic_msg( result._host.get_name(), result._result, result._task, "SUCCESS")) else: if 'changed' in result._result and result._result['changed']: self._display.display( "%s | %s | SUCCESS => %s" % ( _get_timestamp(), result._host.get_name(), self._dump_results( result._result, indent=4))) else: abriged_result = strip_internal_keys(result._result) if 'msg' in abriged_result and len(abriged_result.keys()) == 1: result_text = result._result['msg'] else: result_text = self._dump_results(result._result, indent=4) self._display.display( "%s | %s | %s | %s" % ( _get_timestamp(), result._host.get_name(), result._task.get_name().strip(), result_text)) self._handle_warnings(result._result)
def _dump_results(self, result, indent=4, sort_keys=True, keep_invocation=False): '''Return the text to output for a result.''' if result.get('_ansible_no_log', False): return json.dumps( dict( censored= "The output has been hidden due to the fact that 'no_log: true' was specified for this result." )) # All result keys starting with _ansible_ are for internal use only, # so remove them from the result before we output anything. reformatted_result = strip_internal_keys(result) # remove invocation unless specifically wanting it if not keep_invocation and self._display.verbosity < 3 and 'invocation' in result: del reformatted_result['invocation'] # remove diff information from screen output if self._display.verbosity < 3 and 'diff' in result: del reformatted_result['diff'] # remove exception from screen output if 'exception' in reformatted_result: del reformatted_result['exception'] output = json.dumps(reformatted_result, indent=indent, ensure_ascii=False, sort_keys=sort_keys) output = output.replace('\\r\\n\",', '",') output = output.replace('\\r\\n', "\n\t") output = output.replace('\\n\",', '",') output = output.replace('\\n', "\n\t") return output
def v2_runner_on_ok(self, result): self._clean_results(result._result, result._task.action) if 'ansible_facts' in result._result: return elif 'hostvars[inventory_hostname]' in result._result: facts = result._result['hostvars[inventory_hostname]'] facter_keys = [k for k in facts.keys() if k.startswith('facter_')] for key in facter_keys: del facts[key] result._result['ansible_facts'] = facts self._display.display( "%s | %s | Gathered facts:\n%s" % (_get_timestamp(), result._host.get_name(), yaml.safe_dump(facts, default_flow_style=False))) return if result._task.action in C.MODULE_NO_JSON: self._display.display( self._command_generic_msg(result._host.get_name(), result._result, result._task, "SUCCESS")) else: if 'changed' in result._result and result._result['changed']: self._display.display( "%s | %s | SUCCESS => %s" % (_get_timestamp(), result._host.get_name(), self._dump_results(result._result, indent=4))) else: abriged_result = strip_internal_keys(result._result) if 'msg' in abriged_result and len(abriged_result.keys()) == 1: result_text = result._result['msg'] else: result_text = self._dump_results(result._result, indent=4) self._display.display( "%s | %s | %s | %s" % (_get_timestamp(), result._host.get_name(), result._task.get_name().strip(), result_text)) self._handle_warnings(result._result)
def _process_pending_results(self, iterator, one_pass=False, max_passes=None): ''' Reads results off the final queue and takes appropriate action based on the result (executing callbacks, updating state, etc.). ''' ret_results = [] def get_original_host(host_name): host_name = to_text(host_name) if host_name in self._inventory._hosts_cache: return self._inventory._hosts_cache[host_name] else: return self._inventory.get_host(host_name) def search_handler_blocks_by_name(handler_name, handler_blocks): for handler_block in handler_blocks: for handler_task in handler_block.block: if handler_task.name: handler_vars = self._variable_manager.get_vars(loader=self._loader, play=iterator._play, task=handler_task) templar = Templar(loader=self._loader, variables=handler_vars) try: # first we check with the full result of get_name(), which may # include the role name (if the handler is from a role). If that # is not found, we resort to the simple name field, which doesn't # have anything extra added to it. target_handler_name = templar.template(handler_task.name) if target_handler_name == handler_name: return handler_task else: target_handler_name = templar.template(handler_task.get_name()) if target_handler_name == handler_name: return handler_task except (UndefinedError, AnsibleUndefinedVariable): # We skip this handler due to the fact that it may be using # a variable in the name that was conditionally included via # set_fact or some other method, and we don't want to error # out unnecessarily continue return None def search_handler_blocks_by_uuid(handler_uuid, handler_blocks): for handler_block in handler_blocks: for handler_task in handler_block.block: if handler_uuid == handler_task._uuid: return handler_task return None def parent_handler_match(target_handler, handler_name): if target_handler: if isinstance(target_handler, (TaskInclude, IncludeRole)): try: handler_vars = self._variable_manager.get_vars(loader=self._loader, play=iterator._play, task=target_handler) templar = Templar(loader=self._loader, variables=handler_vars) target_handler_name = templar.template(target_handler.name) if target_handler_name == handler_name: return True else: target_handler_name = templar.template(target_handler.get_name()) if target_handler_name == handler_name: return True except (UndefinedError, AnsibleUndefinedVariable): pass return parent_handler_match(target_handler._parent, handler_name) else: return False cur_pass = 0 while True: try: self._results_lock.acquire() task_result = self._results.pop() except IndexError: break finally: self._results_lock.release() # get the original host and task. We then assign them to the TaskResult for use in callbacks/etc. original_host = get_original_host(task_result._host) found_task = iterator.get_original_task(original_host, task_result._task) original_task = found_task.copy(exclude_parent=True, exclude_tasks=True) original_task._parent = found_task._parent for (attr, val) in iteritems(task_result._task_fields): setattr(original_task, attr, val) task_result._host = original_host task_result._task = original_task # get the correct loop var for use later if original_task.loop_control: loop_var = original_task.loop_control.loop_var or 'item' else: loop_var = 'item' # send callbacks for 'non final' results if '_ansible_retry' in task_result._result: self._tqm.send_callback('v2_runner_retry', task_result) continue elif '_ansible_item_result' in task_result._result: if task_result.is_failed() or task_result.is_unreachable(): self._tqm.send_callback('v2_runner_item_on_failed', task_result) elif task_result.is_skipped(): self._tqm.send_callback('v2_runner_item_on_skipped', task_result) else: if 'diff' in task_result._result: if self._diff: self._tqm.send_callback('v2_on_file_diff', task_result) self._tqm.send_callback('v2_runner_item_on_ok', task_result) continue if original_task.register: host_list = self.get_task_hosts(iterator, original_host, original_task) clean_copy = strip_internal_keys(task_result._result) if 'invocation' in clean_copy: del clean_copy['invocation'] for target_host in host_list: self._variable_manager.set_nonpersistent_facts(target_host, {original_task.register: clean_copy}) # all host status messages contain 2 entries: (msg, task_result) role_ran = False if task_result.is_failed(): role_ran = True ignore_errors = original_task.ignore_errors if not ignore_errors: display.debug("marking %s as failed" % original_host.name) if original_task.run_once: # if we're using run_once, we have to fail every host here for h in self._inventory.get_hosts(iterator._play.hosts): if h.name not in self._tqm._unreachable_hosts: state, _ = iterator.get_next_task_for_host(h, peek=True) iterator.mark_host_failed(h) state, new_task = iterator.get_next_task_for_host(h, peek=True) else: iterator.mark_host_failed(original_host) # increment the failed count for this host self._tqm._stats.increment('failures', original_host.name) # grab the current state and if we're iterating on the rescue portion # of a block then we save the failed task in a special var for use # within the rescue/always state, _ = iterator.get_next_task_for_host(original_host, peek=True) if iterator.is_failed(original_host) and state and state.run_state == iterator.ITERATING_COMPLETE: self._tqm._failed_hosts[original_host.name] = True if state and state.run_state == iterator.ITERATING_RESCUE: self._variable_manager.set_nonpersistent_facts( original_host, dict( ansible_failed_task=original_task.serialize(), ansible_failed_result=task_result._result, ), ) else: self._tqm._stats.increment('ok', original_host.name) if 'changed' in task_result._result and task_result._result['changed']: self._tqm._stats.increment('changed', original_host.name) self._tqm.send_callback('v2_runner_on_failed', task_result, ignore_errors=ignore_errors) elif task_result.is_unreachable(): self._tqm._unreachable_hosts[original_host.name] = True iterator._play._removed_hosts.append(original_host.name) self._tqm._stats.increment('dark', original_host.name) self._tqm.send_callback('v2_runner_on_unreachable', task_result) elif task_result.is_skipped(): self._tqm._stats.increment('skipped', original_host.name) self._tqm.send_callback('v2_runner_on_skipped', task_result) else: role_ran = True if original_task.loop: # this task had a loop, and has more than one result, so # loop over all of them instead of a single result result_items = task_result._result.get('results', []) else: result_items = [ task_result._result ] for result_item in result_items: if '_ansible_notify' in result_item: if task_result.is_changed(): # The shared dictionary for notified handlers is a proxy, which # does not detect when sub-objects within the proxy are modified. # So, per the docs, we reassign the list so the proxy picks up and # notifies all other threads for handler_name in result_item['_ansible_notify']: found = False # Find the handler using the above helper. First we look up the # dependency chain of the current task (if it's from a role), otherwise # we just look through the list of handlers in the current play/all # roles and use the first one that matches the notify name target_handler = search_handler_blocks_by_name(handler_name, iterator._play.handlers) if target_handler is not None: found = True if original_host not in self._notified_handlers[target_handler._uuid]: self._notified_handlers[target_handler._uuid].append(original_host) # FIXME: should this be a callback? display.vv("NOTIFIED HANDLER %s" % (handler_name,)) else: # As there may be more than one handler with the notified name as the # parent, so we just keep track of whether or not we found one at all for target_handler_uuid in self._notified_handlers: target_handler = search_handler_blocks_by_uuid(target_handler_uuid, iterator._play.handlers) if target_handler and parent_handler_match(target_handler, handler_name): self._notified_handlers[target_handler._uuid].append(original_host) display.vv("NOTIFIED HANDLER %s" % (target_handler.get_name(),)) found = True if handler_name in self._listening_handlers: for listening_handler_uuid in self._listening_handlers[handler_name]: listening_handler = search_handler_blocks_by_uuid(listening_handler_uuid, iterator._play.handlers) if listening_handler is not None: found = True else: continue if original_host not in self._notified_handlers[listening_handler._uuid]: self._notified_handlers[listening_handler._uuid].append(original_host) display.vv("NOTIFIED HANDLER %s" % (listening_handler.get_name(),)) # and if none were found, then we raise an error if not found: msg = "The requested handler '%s' was not found in either the main handlers list nor in the listening handlers list" % handler_name if C.ERROR_ON_MISSING_HANDLER: raise AnsibleError(msg) else: display.warning(msg) if 'add_host' in result_item: # this task added a new host (add_host module) new_host_info = result_item.get('add_host', dict()) self._add_host(new_host_info, iterator) elif 'add_group' in result_item: # this task added a new group (group_by module) self._add_group(original_host, result_item) if 'ansible_facts' in result_item: # if delegated fact and we are delegating facts, we need to change target host for them if original_task.delegate_to is not None and original_task.delegate_facts: item = result_item.get(loop_var, None) if item is not None: task_vars[loop_var] = item host_name = original_task.delegate_to actual_host = self._inventory.get_host(host_name) if actual_host is None: actual_host = Host(name=host_name) else: actual_host = original_host host_list = self.get_task_hosts(iterator, actual_host, original_task) if original_task.action == 'include_vars': for (var_name, var_value) in iteritems(result_item['ansible_facts']): # find the host we're actually referring too here, which may # be a host that is not really in inventory at all for target_host in host_list: self._variable_manager.set_host_variable(target_host, var_name, var_value) else: for target_host in host_list: if original_task.action == 'set_fact': self._variable_manager.set_nonpersistent_facts(target_host, result_item['ansible_facts'].copy()) else: self._variable_manager.set_host_facts(target_host, result_item['ansible_facts'].copy()) if 'ansible_stats' in result_item and 'data' in result_item['ansible_stats'] and result_item['ansible_stats']['data']: if 'per_host' not in result_item['ansible_stats'] or result_item['ansible_stats']['per_host']: host_list = self.get_task_hosts(iterator, original_host, original_task) else: host_list = [None] data = result_item['ansible_stats']['data'] aggregate = 'aggregate' in result_item['ansible_stats'] and result_item['ansible_stats']['aggregate'] for myhost in host_list: for k in data.keys(): if aggregate: self._tqm._stats.update_custom_stats(k, data[k], myhost) else: self._tqm._stats.set_custom_stats(k, data[k], myhost) if 'diff' in task_result._result: if self._diff: self._tqm.send_callback('v2_on_file_diff', task_result) if original_task.action not in ['include', 'include_role']: self._tqm._stats.increment('ok', original_host.name) if 'changed' in task_result._result and task_result._result['changed']: self._tqm._stats.increment('changed', original_host.name) # finally, send the ok for this task self._tqm.send_callback('v2_runner_on_ok', task_result) self._pending_results -= 1 if original_host.name in self._blocked_hosts: del self._blocked_hosts[original_host.name] # If this is a role task, mark the parent role as being run (if # the task was ok or failed, but not skipped or unreachable) if original_task._role is not None and role_ran: #TODO: and original_task.action != 'include_role':? # lookup the role in the ROLE_CACHE to make sure we're dealing # with the correct object and mark it as executed for (entry, role_obj) in iteritems(iterator._play.ROLE_CACHE[original_task._role._role_name]): if role_obj._uuid == original_task._role._uuid: role_obj._had_task_run[original_host.name] = True ret_results.append(task_result) if one_pass or max_passes is not None and (cur_pass+1) >= max_passes: break cur_pass += 1 return ret_results
def run(self): ''' The main thread execution, which reads from the results queue indefinitely and sends callbacks/etc. when results are received. ''' if HAS_ATFORK: atfork() while True: try: result = self._read_worker_result() if result is None: time.sleep(0.0001) continue clean_copy = strip_internal_keys(result._result) if 'invocation' in clean_copy: del clean_copy['invocation'] # if this task is registering a result, do it now if result._task.register: self._send_result(('register_host_var', result._host, result._task, clean_copy)) # send callbacks, execute other options based on the result status # TODO: this should all be cleaned up and probably moved to a sub-function. # the fact that this sometimes sends a TaskResult and other times # sends a raw dictionary back may be confusing, but the result vs. # results implementation for tasks with loops should be cleaned up # better than this if result.is_unreachable(): self._send_result(('host_unreachable', result)) elif result.is_failed(): self._send_result(('host_task_failed', result)) elif result.is_skipped(): self._send_result(('host_task_skipped', result)) else: if result._task.loop: # this task had a loop, and has more than one result, so # loop over all of them instead of a single result result_items = result._result['results'] else: result_items = [ result._result ] for result_item in result_items: # if this task is notifying a handler, do it now if '_ansible_notify' in result_item: if result.is_changed(): # The shared dictionary for notified handlers is a proxy, which # does not detect when sub-objects within the proxy are modified. # So, per the docs, we reassign the list so the proxy picks up and # notifies all other threads for notify in result_item['_ansible_notify']: self._send_result(('notify_handler', result, notify)) if 'add_host' in result_item: # this task added a new host (add_host module) self._send_result(('add_host', result_item)) elif 'add_group' in result_item: # this task added a new group (group_by module) self._send_result(('add_group', result._host, result_item)) elif 'ansible_facts' in result_item: # if this task is registering facts, do that now item = result_item.get('item', None) if result._task.action == 'include_vars': for (key, value) in iteritems(result_item['ansible_facts']): self._send_result(('set_host_var', result._host, result._task, item, key, value)) else: self._send_result(('set_host_facts', result._host, result._task, item, result_item['ansible_facts'])) # finally, send the ok for this task self._send_result(('host_task_ok', result)) except queue.Empty: pass except (KeyboardInterrupt, IOError, EOFError): break except: # TODO: we should probably send a proper callback here instead of # simply dumping a stack trace on the screen traceback.print_exc() break
def run(self): ''' The main thread execution, which reads from the results queue indefinitely and sends callbacks/etc. when results are received. ''' if HAS_ATFORK: atfork() while True: try: result = self._read_worker_result() if result is None: time.sleep(0.0001) continue # send callbacks for 'non final' results if '_ansible_retry' in result._result: self._send_result(('v2_runner_retry', result)) continue elif '_ansible_item_result' in result._result: if result.is_failed() or result.is_unreachable(): self._send_result(('v2_runner_item_on_failed', result)) elif result.is_skipped(): self._send_result(('v2_runner_item_on_skipped', result)) else: self._send_result(('v2_runner_item_on_ok', result)) if 'diff' in result._result: self._send_result(('v2_on_file_diff', result)) continue clean_copy = strip_internal_keys(result._result) if 'invocation' in clean_copy: del clean_copy['invocation'] # if this task is registering a result, do it now if result._task.register: self._send_result(('register_host_var', result._host, result._task, clean_copy)) # send callbacks, execute other options based on the result status # TODO: this should all be cleaned up and probably moved to a sub-function. # the fact that this sometimes sends a TaskResult and other times # sends a raw dictionary back may be confusing, but the result vs. # results implementation for tasks with loops should be cleaned up # better than this if result.is_unreachable(): self._send_result(('host_unreachable', result)) elif result.is_failed(): self._send_result(('host_task_failed', result)) elif result.is_skipped(): self._send_result(('host_task_skipped', result)) else: if result._task.loop: # this task had a loop, and has more than one result, so # loop over all of them instead of a single result result_items = result._result.get('results', []) else: result_items = [ result._result ] for result_item in result_items: # if this task is notifying a handler, do it now if '_ansible_notify' in result_item: if result.is_changed(): # The shared dictionary for notified handlers is a proxy, which # does not detect when sub-objects within the proxy are modified. # So, per the docs, we reassign the list so the proxy picks up and # notifies all other threads for notify in result_item['_ansible_notify']: self._send_result(('notify_handler', result, notify)) if 'add_host' in result_item: # this task added a new host (add_host module) self._send_result(('add_host', result_item)) elif 'add_group' in result_item: # this task added a new group (group_by module) self._send_result(('add_group', result._host, result_item)) elif 'ansible_facts' in result_item: # if this task is registering facts, do that now loop_var = 'item' if hasattr(result._task,'loop_control') and result._task.loop_control: loop_var = result._task.loop_control.get('loop_var') or 'item' item = result_item.get(loop_var, None) if result._task.action == 'include_vars': for (key, value) in iteritems(result_item['ansible_facts']): self._send_result(('set_host_var', result._host, result._task, item, key, value)) else: self._send_result(('set_host_facts', result._host, result._task, item, result_item['ansible_facts'])) # finally, send the ok for this task self._send_result(('host_task_ok', result)) except queue.Empty: pass except (KeyboardInterrupt, SystemExit, IOError, EOFError): break except: # TODO: we should probably send a proper callback here instead of # simply dumping a stack trace on the screen traceback.print_exc() break
def _process_pending_results(self, iterator, one_pass=False): ''' Reads results off the final queue and takes appropriate action based on the result (executing callbacks, updating state, etc.). ''' ret_results = [] def get_original_host(host_name): host_name = to_unicode(host_name) if host_name in self._inventory._hosts_cache: return self._inventory._hosts_cache[host_name] else: return self._inventory.get_host(host_name) def search_handler_blocks(handler_name, handler_blocks): for handler_block in handler_blocks: for handler_task in handler_block.block: handler_vars = self._variable_manager.get_vars(loader=self._loader, play=iterator._play, task=handler_task) templar = Templar(loader=self._loader, variables=handler_vars) try: # first we check with the full result of get_name(), which may # include the role name (if the handler is from a role). If that # is not found, we resort to the simple name field, which doesn't # have anything extra added to it. target_handler_name = templar.template(handler_task.name) if target_handler_name == handler_name: return handler_task else: target_handler_name = templar.template(handler_task.get_name()) if target_handler_name == handler_name: return handler_task except (UndefinedError, AnsibleUndefinedVariable) as e: # We skip this handler due to the fact that it may be using # a variable in the name that was conditionally included via # set_fact or some other method, and we don't want to error # out unnecessarily continue return None def parent_handler_match(target_handler, handler_name): if target_handler: if isinstance(target_handler, TaskInclude): try: handler_vars = self._variable_manager.get_vars(loader=self._loader, play=iterator._play, task=target_handler) templar = Templar(loader=self._loader, variables=handler_vars) target_handler_name = templar.template(target_handler.name) if target_handler_name == handler_name: return True else: target_handler_name = templar.template(target_handler.get_name()) if target_handler_name == handler_name: return True except (UndefinedError, AnsibleUndefinedVariable) as e: pass return parent_handler_match(target_handler._parent, handler_name) else: return False passes = 0 while not self._tqm._terminated: try: task_result = self._final_q.get(timeout=0.001) original_host = get_original_host(task_result._host) original_task = iterator.get_original_task(original_host, task_result._task) task_result._host = original_host task_result._task = original_task # send callbacks for 'non final' results if '_ansible_retry' in task_result._result: self._tqm.send_callback('v2_runner_retry', task_result) continue elif '_ansible_item_result' in task_result._result: if task_result.is_failed() or task_result.is_unreachable(): self._tqm.send_callback('v2_runner_item_on_failed', task_result) elif task_result.is_skipped(): self._tqm.send_callback('v2_runner_item_on_skipped', task_result) else: self._tqm.send_callback('v2_runner_item_on_ok', task_result) continue if original_task.register: if original_task.run_once: host_list = [host for host in self._inventory.get_hosts(iterator._play.hosts) if host.name not in self._tqm._unreachable_hosts] else: host_list = [original_host] clean_copy = strip_internal_keys(task_result._result) if 'invocation' in clean_copy: del clean_copy['invocation'] for target_host in host_list: self._variable_manager.set_nonpersistent_facts(target_host, {original_task.register: clean_copy}) # all host status messages contain 2 entries: (msg, task_result) role_ran = False if task_result.is_failed(): role_ran = True if not original_task.ignore_errors: display.debug("marking %s as failed" % original_host.name) if original_task.run_once: # if we're using run_once, we have to fail every host here for h in self._inventory.get_hosts(iterator._play.hosts): if h.name not in self._tqm._unreachable_hosts: state, _ = iterator.get_next_task_for_host(h, peek=True) iterator.mark_host_failed(h) state, new_task = iterator.get_next_task_for_host(h, peek=True) else: iterator.mark_host_failed(original_host) # only add the host to the failed list officially if it has # been failed by the iterator if iterator.is_failed(original_host): self._tqm._failed_hosts[original_host.name] = True self._tqm._stats.increment('failures', original_host.name) else: # otherwise, we grab the current state and if we're iterating on # the rescue portion of a block then we save the failed task in a # special var for use within the rescue/always state, _ = iterator.get_next_task_for_host(original_host, peek=True) if state.run_state == iterator.ITERATING_RESCUE: self._variable_manager.set_nonpersistent_facts( original_host, dict( ansible_failed_task=original_task.serialize(), ansible_failed_result=task_result._result, ), ) else: self._tqm._stats.increment('ok', original_host.name) self._tqm.send_callback('v2_runner_on_failed', task_result, ignore_errors=original_task.ignore_errors) elif task_result.is_unreachable(): self._tqm._unreachable_hosts[original_host.name] = True self._tqm._stats.increment('dark', original_host.name) self._tqm.send_callback('v2_runner_on_unreachable', task_result) elif task_result.is_skipped(): self._tqm._stats.increment('skipped', original_host.name) self._tqm.send_callback('v2_runner_on_skipped', task_result) else: role_ran = True if original_task.loop: # this task had a loop, and has more than one result, so # loop over all of them instead of a single result result_items = task_result._result.get('results', []) else: result_items = [ task_result._result ] for result_item in result_items: if '_ansible_notify' in result_item: if task_result.is_changed(): # The shared dictionary for notified handlers is a proxy, which # does not detect when sub-objects within the proxy are modified. # So, per the docs, we reassign the list so the proxy picks up and # notifies all other threads for handler_name in result_item['_ansible_notify']: # Find the handler using the above helper. First we look up the # dependency chain of the current task (if it's from a role), otherwise # we just look through the list of handlers in the current play/all # roles and use the first one that matches the notify name if handler_name in self._listening_handlers: for listening_handler_name in self._listening_handlers[handler_name]: listening_handler = search_handler_blocks(listening_handler_name, iterator._play.handlers) if listening_handler is None: raise AnsibleError("The requested handler listener '%s' was not found in any of the known handlers" % listening_handler_name) if original_host not in self._notified_handlers[listening_handler]: self._notified_handlers[listening_handler].append(original_host) display.vv("NOTIFIED HANDLER %s" % (listening_handler_name,)) else: target_handler = search_handler_blocks(handler_name, iterator._play.handlers) if target_handler is not None: if original_host not in self._notified_handlers[target_handler]: self._notified_handlers[target_handler].append(original_host) # FIXME: should this be a callback? display.vv("NOTIFIED HANDLER %s" % (handler_name,)) else: # As there may be more than one handler with the notified name as the # parent, so we just keep track of whether or not we found one at all found = False for target_handler in self._notified_handlers: if parent_handler_match(target_handler, handler_name): self._notified_handlers[target_handler].append(original_host) display.vv("NOTIFIED HANDLER %s" % (target_handler.get_name(),)) found = True # and if none were found, then we raise an error if not found: raise AnsibleError("The requested handler '%s' was found in neither the main handlers list nor the listening handlers list" % handler_name) if 'add_host' in result_item: # this task added a new host (add_host module) new_host_info = result_item.get('add_host', dict()) self._add_host(new_host_info, iterator) elif 'add_group' in result_item: # this task added a new group (group_by module) self._add_group(original_host, result_item) elif 'ansible_facts' in result_item: loop_var = 'item' if original_task.loop_control: loop_var = original_task.loop_control.loop_var or 'item' item = result_item.get(loop_var, None) if original_task.action == 'include_vars': for (var_name, var_value) in iteritems(result_item['ansible_facts']): # find the host we're actually refering too here, which may # be a host that is not really in inventory at all if original_task.delegate_to is not None and original_task.delegate_facts: task_vars = self._variable_manager.get_vars(loader=self._loader, play=iterator._play, host=host, task=task) self.add_tqm_variables(task_vars, play=iterator._play) if item is not None: task_vars[loop_var] = item templar = Templar(loader=self._loader, variables=task_vars) host_name = templar.template(original_task.delegate_to) actual_host = self._inventory.get_host(host_name) if actual_host is None: actual_host = Host(name=host_name) else: actual_host = original_host if original_task.run_once: host_list = [host for host in self._inventory.get_hosts(iterator._play.hosts) if host.name not in self._tqm._unreachable_hosts] else: host_list = [actual_host] for target_host in host_list: self._variable_manager.set_host_variable(target_host, var_name, var_value) else: if original_task.run_once: host_list = [host for host in self._inventory.get_hosts(iterator._play.hosts) if host.name not in self._tqm._unreachable_hosts] else: host_list = [original_host] for target_host in host_list: if original_task.action == 'set_fact': self._variable_manager.set_nonpersistent_facts(target_host, result_item['ansible_facts'].copy()) else: self._variable_manager.set_host_facts(target_host, result_item['ansible_facts'].copy()) if 'diff' in task_result._result: if self._diff: self._tqm.send_callback('v2_on_file_diff', task_result) if original_task.action != 'include': self._tqm._stats.increment('ok', original_host.name) if 'changed' in task_result._result and task_result._result['changed']: self._tqm._stats.increment('changed', original_host.name) # finally, send the ok for this task self._tqm.send_callback('v2_runner_on_ok', task_result) self._pending_results -= 1 if original_host.name in self._blocked_hosts: del self._blocked_hosts[original_host.name] # If this is a role task, mark the parent role as being run (if # the task was ok or failed, but not skipped or unreachable) if original_task._role is not None and role_ran: # lookup the role in the ROLE_CACHE to make sure we're dealing # with the correct object and mark it as executed for (entry, role_obj) in iteritems(iterator._play.ROLE_CACHE[original_task._role._role_name]): if role_obj._uuid == original_task._role._uuid: role_obj._had_task_run[original_host.name] = True ret_results.append(task_result) except Queue.Empty: passes += 1 if passes > 2: break if one_pass: break return ret_results
def _process_pending_results(self, iterator, one_pass=False): ''' Reads results off the final queue and takes appropriate action based on the result (executing callbacks, updating state, etc.). ''' ret_results = [] def get_original_host(host_name): host_name = to_unicode(host_name) if host_name in self._inventory._hosts_cache: return self._inventory._hosts_cache[host_name] else: return self._inventory.get_host(host_name) def search_handler_blocks(handler_name, handler_blocks): for handler_block in handler_blocks: for handler_task in handler_block.block: handler_vars = self._variable_manager.get_vars( loader=self._loader, play=iterator._play, task=handler_task) templar = Templar(loader=self._loader, variables=handler_vars) try: # first we check with the full result of get_name(), which may # include the role name (if the handler is from a role). If that # is not found, we resort to the simple name field, which doesn't # have anything extra added to it. target_handler_name = templar.template( handler_task.name) if target_handler_name == handler_name: return handler_task else: target_handler_name = templar.template( handler_task.get_name()) if target_handler_name == handler_name: return handler_task except (UndefinedError, AnsibleUndefinedVariable) as e: # We skip this handler due to the fact that it may be using # a variable in the name that was conditionally included via # set_fact or some other method, and we don't want to error # out unnecessarily continue return None passes = 0 while not self._tqm._terminated: try: task_result = self._final_q.get(timeout=0.001) original_host = get_original_host(task_result._host) original_task = iterator.get_original_task( original_host, task_result._task) task_result._host = original_host task_result._task = original_task # send callbacks for 'non final' results if '_ansible_retry' in task_result._result: self._tqm.send_callback('v2_runner_retry', task_result) continue elif '_ansible_item_result' in task_result._result: if task_result.is_failed() or task_result.is_unreachable(): self._tqm.send_callback('v2_runner_item_on_failed', task_result) elif task_result.is_skipped(): self._tqm.send_callback('v2_runner_item_on_skipped', task_result) else: self._tqm.send_callback('v2_runner_item_on_ok', task_result) continue if original_task.register: if original_task.run_once: host_list = [ host for host in self._inventory.get_hosts( iterator._play.hosts) if host.name not in self._tqm._unreachable_hosts ] else: host_list = [original_host] clean_copy = strip_internal_keys(task_result._result) if 'invocation' in clean_copy: del clean_copy['invocation'] for target_host in host_list: self._variable_manager.set_nonpersistent_facts( target_host, {original_task.register: clean_copy}) # all host status messages contain 2 entries: (msg, task_result) role_ran = False if task_result.is_failed(): role_ran = True if not original_task.ignore_errors: display.debug("marking %s as failed" % original_host.name) if original_task.run_once: # if we're using run_once, we have to fail every host here [ iterator.mark_host_failed(h) for h in self._inventory.get_hosts(iterator._play.hosts) if h.name not in self._tqm._unreachable_hosts ] else: iterator.mark_host_failed(original_host) # only add the host to the failed list officially if it has # been failed by the iterator if iterator.is_failed(original_host): self._tqm._failed_hosts[original_host.name] = True self._tqm._stats.increment('failures', original_host.name) else: # otherwise, we grab the current state and if we're iterating on # the rescue portion of a block then we save the failed task in a # special var for use within the rescue/always state, _ = iterator.get_next_task_for_host( original_host, peek=True) if state.run_state == iterator.ITERATING_RESCUE: self._variable_manager.set_nonpersistent_facts( original_host, dict( ansible_failed_task=original_task. serialize(), ansible_failed_result=task_result. _result, ), ) else: self._tqm._stats.increment('ok', original_host.name) self._tqm.send_callback( 'v2_runner_on_failed', task_result, ignore_errors=original_task.ignore_errors) elif task_result.is_unreachable(): self._tqm._unreachable_hosts[original_host.name] = True self._tqm._stats.increment('dark', original_host.name) self._tqm.send_callback('v2_runner_on_unreachable', task_result) elif task_result.is_skipped(): self._tqm._stats.increment('skipped', original_host.name) self._tqm.send_callback('v2_runner_on_skipped', task_result) else: role_ran = True if original_task.loop: # this task had a loop, and has more than one result, so # loop over all of them instead of a single result result_items = task_result._result.get('results', []) else: result_items = [task_result._result] for result_item in result_items: if '_ansible_notify' in result_item: if task_result.is_changed(): # The shared dictionary for notified handlers is a proxy, which # does not detect when sub-objects within the proxy are modified. # So, per the docs, we reassign the list so the proxy picks up and # notifies all other threads for handler_name in result_item[ '_ansible_notify']: # Find the handler using the above helper. First we look up the # dependency chain of the current task (if it's from a role), otherwise # we just look through the list of handlers in the current play/all # roles and use the first one that matches the notify name if handler_name in self._listening_handlers: for listening_handler_name in self._listening_handlers[ handler_name]: listening_handler = search_handler_blocks( listening_handler_name, iterator._play.handlers) if listening_handler is None: raise AnsibleError( "The requested handler listener '%s' was not found in any of the known handlers" % listening_handler_name) if original_host not in self._notified_handlers[ listening_handler]: self._notified_handlers[ listening_handler].append( original_host) display.vv( "NOTIFIED HANDLER %s" % (listening_handler_name, )) else: target_handler = search_handler_blocks( handler_name, iterator._play.handlers) if target_handler is None: raise AnsibleError( "The requested handler '%s' was not found in any of the known handlers" % handler_name) if target_handler in self._notified_handlers: if original_host not in self._notified_handlers[ target_handler]: self._notified_handlers[ target_handler].append( original_host) # FIXME: should this be a callback? display.vv( "NOTIFIED HANDLER %s" % (handler_name, )) else: raise AnsibleError( "The requested handler '%s' was found in neither the main handlers list nor the listening handlers list" % handler_name) if 'add_host' in result_item: # this task added a new host (add_host module) new_host_info = result_item.get('add_host', dict()) self._add_host(new_host_info, iterator) elif 'add_group' in result_item: # this task added a new group (group_by module) self._add_group(original_host, result_item) elif 'ansible_facts' in result_item: loop_var = 'item' if original_task.loop_control: loop_var = original_task.loop_control.loop_var or 'item' item = result_item.get(loop_var, None) if original_task.action == 'include_vars': for (var_name, var_value) in iteritems( result_item['ansible_facts']): # find the host we're actually refering too here, which may # be a host that is not really in inventory at all if original_task.delegate_to is not None and original_task.delegate_facts: task_vars = self._variable_manager.get_vars( loader=self._loader, play=iterator._play, host=host, task=task) self.add_tqm_variables( task_vars, play=iterator._play) if item is not None: task_vars[loop_var] = item templar = Templar(loader=self._loader, variables=task_vars) host_name = templar.template( original_task.delegate_to) actual_host = self._inventory.get_host( host_name) if actual_host is None: actual_host = Host(name=host_name) else: actual_host = original_host if original_task.run_once: host_list = [ host for host in self._inventory. get_hosts(iterator._play.hosts) if host.name not in self._tqm._unreachable_hosts ] else: host_list = [actual_host] for target_host in host_list: self._variable_manager.set_host_variable( target_host, var_name, var_value) else: if original_task.run_once: host_list = [ host for host in self._inventory.get_hosts( iterator._play.hosts) if host.name not in self._tqm._unreachable_hosts ] else: host_list = [original_host] for target_host in host_list: if original_task.action == 'set_fact': self._variable_manager.set_nonpersistent_facts( target_host, result_item['ansible_facts'].copy( )) else: self._variable_manager.set_host_facts( target_host, result_item['ansible_facts'].copy( )) if 'diff' in task_result._result: if self._diff: self._tqm.send_callback('v2_on_file_diff', task_result) if original_task.action != 'include': self._tqm._stats.increment('ok', original_host.name) if 'changed' in task_result._result and task_result._result[ 'changed']: self._tqm._stats.increment('changed', original_host.name) # finally, send the ok for this task self._tqm.send_callback('v2_runner_on_ok', task_result) self._pending_results -= 1 if original_host.name in self._blocked_hosts: del self._blocked_hosts[original_host.name] # If this is a role task, mark the parent role as being run (if # the task was ok or failed, but not skipped or unreachable) if original_task._role is not None and role_ran: # lookup the role in the ROLE_CACHE to make sure we're dealing # with the correct object and mark it as executed for (entry, role_obj) in iteritems(iterator._play.ROLE_CACHE[ original_task._role._role_name]): if role_obj._uuid == original_task._role._uuid: role_obj._had_task_run[original_host.name] = True ret_results.append(task_result) except Queue.Empty: passes += 1 if passes > 2: break if one_pass: break return ret_results