def clean_copy(self): ''' returns 'clean' taskresult object ''' # FIXME: clean task_fields, _task and _host copies result = TaskResult(self._host, self._task, {}, self._task_fields) # statuses are already reflected on the event type if result._task and result._task.action in ['debug']: # debug is verbose by default to display vars, no need to add invocation ignore = _IGNORE + ('invocation', ) else: ignore = _IGNORE if self._result.get('_ansible_no_log', False): result._result = { "censored": "the output has been hidden due to the fact that 'no_log: true' was specified for this result" } elif self._result: result._result = deepcopy(self._result) # actualy remove for remove_key in ignore: if remove_key in result._result: del result._result[remove_key] # remove almost ALL internal keys, keep ones relevant to callback strip_internal_keys(result._result, exceptions=('_ansible_verbose_always', '_ansible_item_label', '_ansible_no_log')) return result
def clean_copy(self): ''' returns 'clean' taskresult object ''' # FIXME: clean task_fields, _task and _host copies result = TaskResult(self._host, self._task, {}, self._task_fields) # statuses are already reflected on the event type if result._task and result._task.action in ['debug']: # debug is verbose by default to display vars, no need to add invocation ignore = _IGNORE + ('invocation',) else: ignore = _IGNORE if self._result.get('_ansible_no_log', False): x = {"censored": "the output has been hidden due to the fact that 'no_log: true' was specified for this result"} for preserve in _PRESERVE: if preserve in self._result: x[preserve] = self._result[preserve] result._result = x elif self._result: result._result = deepcopy(self._result) # actualy remove for remove_key in ignore: if remove_key in result._result: del result._result[remove_key] # remove almost ALL internal keys, keep ones relevant to callback strip_internal_keys(result._result, exceptions=('_ansible_verbose_always', '_ansible_item_label', '_ansible_no_log')) return result
def clean_copy(self): ''' returns 'clean' taskresult object ''' # FIXME: clean task_fields, _task and _host copies result = TaskResult(self._host, self._task, {}, self._task_fields) # statuses are already reflected on the event type if result._task and result._task.action in ['debug']: # debug is verbose by default to display vars, no need to add invocation ignore = _IGNORE + ('invocation', ) else: ignore = _IGNORE subset = {} # preserve subset for later for sub in _SUB_PRESERVE: if sub in self._result: subset[sub] = {} for key in _SUB_PRESERVE[sub]: if key in self._result[sub]: subset[sub][key] = self._result[sub][key] if isinstance(self._task.no_log, bool) and self._task.no_log or self._result.get( '_ansible_no_log', False): x = { "censored": "the output has been hidden due to the fact that 'no_log: true' was specified for this result" } # preserve full for preserve in _PRESERVE: if preserve in self._result: x[preserve] = self._result[preserve] result._result = x elif self._result: result._result = module_response_deepcopy(self._result) # actualy remove for remove_key in ignore: if remove_key in result._result: del result._result[remove_key] # remove almost ALL internal keys, keep ones relevant to callback strip_internal_keys(result._result, exceptions=CLEAN_EXCEPTIONS) # keep subset result._result.update(subset) return result
def _dump_results(self, result, indent=None, sort_keys=True, keep_invocation=False): if not indent and (result.get('_ansible_verbose_always') or self._display.verbosity > 2): indent = 4 # All result keys stating with _ansible_ are internal, so remove them from the result before we output anything. abridged_result = strip_internal_keys(result) # remove invocation unless specifically wanting it if not keep_invocation and self._display.verbosity < 3 and 'invocation' in result: del abridged_result['invocation'] # remove diff information from screen output if self._display.verbosity < 3 and 'diff' in result: del abridged_result['diff'] # remove exception from screen output if 'exception' in abridged_result: del abridged_result['exception'] return json.dumps(abridged_result, cls=AnsibleJSONEncoder, indent=indent, ensure_ascii=False, sort_keys=sort_keys)
def _dump_results(self, result, indent=None, sort_keys=True, keep_invocation=False): if not indent and (result.get('_ansible_verbose_always') or self._display.verbosity > 2): indent = 4 # All result keys stating with _ansible_ are internal, so remove them from the result before we output anything. abridged_result = strip_internal_keys(module_response_deepcopy(result)) # remove invocation unless specifically wanting it if not keep_invocation and self._display.verbosity < 3 and 'invocation' in result: del abridged_result['invocation'] # remove diff information from screen output if self._display.verbosity < 3 and 'diff' in result: del abridged_result['diff'] # remove exception from screen output if 'exception' in abridged_result: del abridged_result['exception'] try: jsonified_results = json.dumps(abridged_result, cls=AnsibleJSONEncoder, indent=indent, ensure_ascii=False, sort_keys=sort_keys) except TypeError: # Python3 bug: throws an exception when keys are non-homogenous types: # https://bugs.python.org/issue25457 # sort into an OrderedDict and then json.dumps() that instead if not OrderedDict: raise jsonified_results = json.dumps(OrderedDict(sorted(abridged_result.items(), key=to_text)), cls=AnsibleJSONEncoder, indent=indent, ensure_ascii=False, sort_keys=False) return jsonified_results
def _load_result(self, result, status, **kwargs): """ This method is called when an individual task instance on a single host completes. It is responsible for logging a single result to the database. """ hostname = result._host.get_name() self.result_ended[hostname] = datetime.datetime.now( datetime.timezone.utc).isoformat() # Retrieve the host so we can associate the result to the host id host = self._get_or_create_host(hostname) results = strip_internal_keys(module_response_deepcopy(result._result)) # Round-trip through JSON to sort keys and convert Ansible types # to standard types try: jsonified = json.dumps(results, cls=AnsibleJSONEncoder, ensure_ascii=False, sort_keys=True) except TypeError: # Python 3 can't sort non-homogenous keys. # https://bugs.python.org/issue25457 jsonified = json.dumps(results, cls=AnsibleJSONEncoder, ensure_ascii=False, sort_keys=False) results = json.loads(jsonified) # Sanitize facts if "ansible_facts" in results: for fact in self.ignored_facts: if fact in results["ansible_facts"]: self.log.debug("Ignoring fact: %s" % fact) results["ansible_facts"][ fact] = "Not saved by ARA as configured by 'ignored_facts'" self.result = self.client.post( "/api/v1/results", playbook=self.playbook["id"], task=self.task["id"], host=host["id"], play=self.task["play"], content=results, status=status, started=self.result_started[hostname] if hostname in self.result_started else self.task["started"], ended=self.result_ended[hostname], changed=result._result.get("changed", False), # Note: ignore_errors might be None instead of a boolean ignore_errors=kwargs.get("ignore_errors", False) or False, ) if self.task["action"] in ["setup", "gather_facts" ] and "ansible_facts" in results: self.client.patch("/api/v1/hosts/%s" % host["id"], facts=results["ansible_facts"])
def _dump_results(self, result, indent=None, sort_keys=True, keep_invocation=False): if not indent and (result.get('_ansible_verbose_always') or self._display.verbosity > 2): indent = 4 # All result keys stating with _ansible_ are internal, so remove them from the result before we output anything. abridged_result = strip_internal_keys(result) # remove invocation unless specifically wanting it if not keep_invocation and self._display.verbosity < 3 and 'invocation' in result: del abridged_result['invocation'] # remove diff information from screen output if self._display.verbosity < 3 and 'diff' in result: del abridged_result['diff'] # remove exception from screen output if 'exception' in abridged_result: del abridged_result['exception'] return json.dumps(abridged_result, cls=AnsibleJSONEncoder, indent=indent, ensure_ascii=False, sort_keys=sort_keys)
def _dump_results(self, result, indent=None, sort_keys=True, keep_invocation=False, serialize=True): try: result_format = self.get_option('result_format') except KeyError: # Callback does not declare result_format nor extend result_format_callback result_format = 'json' try: pretty_results = self.get_option('pretty_results') except KeyError: # Callback does not declare pretty_results nor extend result_format_callback pretty_results = None indent_conditions = ( result.get('_ansible_verbose_always'), pretty_results is None and result_format != 'json', pretty_results is True, self._display.verbosity > 2, ) if not indent and any(indent_conditions): indent = 4 if pretty_results is False: # pretty_results=False overrides any specified indentation indent = None # All result keys stating with _ansible_ are internal, so remove them from the result before we output anything. abridged_result = strip_internal_keys(module_response_deepcopy(result)) # remove invocation unless specifically wanting it if not keep_invocation and self._display.verbosity < 3 and 'invocation' in result: del abridged_result['invocation'] # remove diff information from screen output if self._display.verbosity < 3 and 'diff' in result: del abridged_result['diff'] # remove exception from screen output if 'exception' in abridged_result: del abridged_result['exception'] if not serialize: # Just return ``abridged_result`` without going through serialization # to permit callbacks to take advantage of ``_dump_results`` # that want to further modify the result, or use custom serialization return abridged_result if result_format == 'json': try: return json.dumps(abridged_result, cls=AnsibleJSONEncoder, indent=indent, ensure_ascii=False, sort_keys=sort_keys) except TypeError: # Python3 bug: throws an exception when keys are non-homogenous types: # https://bugs.python.org/issue25457 # sort into an OrderedDict and then json.dumps() that instead if not OrderedDict: raise return json.dumps(OrderedDict( sorted(abridged_result.items(), key=to_text)), cls=AnsibleJSONEncoder, indent=indent, ensure_ascii=False, sort_keys=False) elif result_format == 'yaml': # None is a sentinel in this case that indicates default behavior # default behavior for yaml is to prettify results lossy = pretty_results in (None, True) if lossy: # if we already have stdout, we don't need stdout_lines if 'stdout' in abridged_result and 'stdout_lines' in abridged_result: abridged_result['stdout_lines'] = '<omitted>' # if we already have stderr, we don't need stderr_lines if 'stderr' in abridged_result and 'stderr_lines' in abridged_result: abridged_result['stderr_lines'] = '<omitted>' return '\n%s' % textwrap.indent( yaml.dump( abridged_result, allow_unicode=True, Dumper=_AnsibleCallbackDumper(lossy=lossy), default_flow_style=False, indent=indent, # sort_keys=sort_keys # This requires PyYAML>=5.1 ), ' ' * (indent or 4))
def _process_pending_results(self, iterator, one_pass=False, max_passes=None): ''' Reads results off the final queue and takes appropriate action based on the result (executing callbacks, updating state, etc.). ''' ret_results = [] def get_original_host(host_name): # FIXME: this should not need x2 _inventory host_name = to_text(host_name) if host_name in self._inventory.hosts: return self._inventory.hosts[host_name] else: return self._inventory.get_host(host_name) def search_handler_blocks_by_name(handler_name, handler_blocks): for handler_block in handler_blocks: for handler_task in handler_block.block: if handler_task.name: handler_vars = self._variable_manager.get_vars( play=iterator._play, task=handler_task) templar = Templar(loader=self._loader, variables=handler_vars) try: # first we check with the full result of get_name(), which may # include the role name (if the handler is from a role). If that # is not found, we resort to the simple name field, which doesn't # have anything extra added to it. target_handler_name = templar.template( handler_task.name) if target_handler_name == handler_name: return handler_task else: target_handler_name = templar.template( handler_task.get_name()) if target_handler_name == handler_name: return handler_task except (UndefinedError, AnsibleUndefinedVariable): # We skip this handler due to the fact that it may be using # a variable in the name that was conditionally included via # set_fact or some other method, and we don't want to error # out unnecessarily continue return None def search_handler_blocks_by_uuid(handler_uuid, handler_blocks): for handler_block in handler_blocks: for handler_task in handler_block.block: if handler_uuid == handler_task._uuid: return handler_task return None def parent_handler_match(target_handler, handler_name): if target_handler: if isinstance(target_handler, (TaskInclude, IncludeRole)): try: handler_vars = self._variable_manager.get_vars( play=iterator._play, task=target_handler) templar = Templar(loader=self._loader, variables=handler_vars) target_handler_name = templar.template( target_handler.name) if target_handler_name == handler_name: return True else: target_handler_name = templar.template( target_handler.get_name()) if target_handler_name == handler_name: return True except (UndefinedError, AnsibleUndefinedVariable): pass return parent_handler_match(target_handler._parent, handler_name) else: return False cur_pass = 0 while True: try: self._results_lock.acquire() task_result = self._results.popleft() except IndexError: break finally: self._results_lock.release() # get the original host and task. We then assign them to the TaskResult for use in callbacks/etc. original_host = get_original_host(task_result._host) found_task = iterator.get_original_task(original_host, task_result._task) original_task = found_task.copy(exclude_parent=True, exclude_tasks=True) original_task._parent = found_task._parent original_task.from_attrs(task_result._task_fields) task_result._host = original_host task_result._task = original_task # get the correct loop var for use later if original_task.loop_control: loop_var = original_task.loop_control.loop_var or 'item' else: loop_var = 'item' # send callbacks for 'non final' results if '_ansible_retry' in task_result._result: self._tqm.send_callback('v2_runner_retry', task_result) continue elif '_ansible_item_result' in task_result._result: if task_result.is_failed() or task_result.is_unreachable(): self._tqm.send_callback('v2_runner_item_on_failed', task_result) elif task_result.is_skipped(): self._tqm.send_callback('v2_runner_item_on_skipped', task_result) else: if 'diff' in task_result._result: if self._diff: self._tqm.send_callback('v2_on_file_diff', task_result) self._tqm.send_callback('v2_runner_item_on_ok', task_result) continue if original_task.register: host_list = self.get_task_hosts(iterator, original_host, original_task) clean_copy = strip_internal_keys(task_result._result) if 'invocation' in clean_copy: del clean_copy['invocation'] for target_host in host_list: self._variable_manager.set_nonpersistent_facts( target_host, {original_task.register: clean_copy}) # all host status messages contain 2 entries: (msg, task_result) role_ran = False if task_result.is_failed(): role_ran = True ignore_errors = original_task.ignore_errors if not ignore_errors: display.debug("marking %s as failed" % original_host.name) if original_task.run_once: # if we're using run_once, we have to fail every host here for h in self._inventory.get_hosts( iterator._play.hosts): if h.name not in self._tqm._unreachable_hosts: state, _ = iterator.get_next_task_for_host( h, peek=True) iterator.mark_host_failed(h) state, new_task = iterator.get_next_task_for_host( h, peek=True) else: iterator.mark_host_failed(original_host) # increment the failed count for this host self._tqm._stats.increment('failures', original_host.name) # grab the current state and if we're iterating on the rescue portion # of a block then we save the failed task in a special var for use # within the rescue/always state, _ = iterator.get_next_task_for_host(original_host, peek=True) if iterator.is_failed( original_host ) and state and state.run_state == iterator.ITERATING_COMPLETE: self._tqm._failed_hosts[original_host.name] = True if state and state.run_state == iterator.ITERATING_RESCUE: self._variable_manager.set_nonpersistent_facts( original_host, dict( ansible_failed_task=original_task.serialize(), ansible_failed_result=task_result._result, ), ) else: self._tqm._stats.increment('ok', original_host.name) if 'changed' in task_result._result and task_result._result[ 'changed']: self._tqm._stats.increment('changed', original_host.name) self._tqm.send_callback('v2_runner_on_failed', task_result, ignore_errors=ignore_errors) elif task_result.is_unreachable(): self._tqm._unreachable_hosts[original_host.name] = True iterator._play._removed_hosts.append(original_host.name) self._tqm._stats.increment('dark', original_host.name) self._tqm.send_callback('v2_runner_on_unreachable', task_result) elif task_result.is_skipped(): self._tqm._stats.increment('skipped', original_host.name) self._tqm.send_callback('v2_runner_on_skipped', task_result) else: role_ran = True if original_task.loop: # this task had a loop, and has more than one result, so # loop over all of them instead of a single result result_items = task_result._result.get('results', []) else: result_items = [task_result._result] for result_item in result_items: if '_ansible_notify' in result_item: if task_result.is_changed(): # The shared dictionary for notified handlers is a proxy, which # does not detect when sub-objects within the proxy are modified. # So, per the docs, we reassign the list so the proxy picks up and # notifies all other threads for handler_name in result_item['_ansible_notify']: found = False # Find the handler using the above helper. First we look up the # dependency chain of the current task (if it's from a role), otherwise # we just look through the list of handlers in the current play/all # roles and use the first one that matches the notify name target_handler = search_handler_blocks_by_name( handler_name, iterator._play.handlers) if target_handler is not None: found = True if target_handler._uuid not in self._notified_handlers: self._notified_handlers[ target_handler._uuid] = [] if original_host not in self._notified_handlers[ target_handler._uuid]: self._notified_handlers[ target_handler._uuid].append( original_host) self._tqm.send_callback( 'v2_playbook_on_notify', target_handler, original_host) else: # As there may be more than one handler with the notified name as the # parent, so we just keep track of whether or not we found one at all for target_handler_uuid in self._notified_handlers: target_handler = search_handler_blocks_by_uuid( target_handler_uuid, iterator._play.handlers) if target_handler and parent_handler_match( target_handler, handler_name): found = True if original_host not in self._notified_handlers[ target_handler._uuid]: self._notified_handlers[ target_handler. _uuid].append( original_host) self._tqm.send_callback( 'v2_playbook_on_notify', target_handler, original_host) if handler_name in self._listening_handlers: for listening_handler_uuid in self._listening_handlers[ handler_name]: listening_handler = search_handler_blocks_by_uuid( listening_handler_uuid, iterator._play.handlers) if listening_handler is not None: found = True else: continue if original_host not in self._notified_handlers[ listening_handler._uuid]: self._notified_handlers[ listening_handler. _uuid].append(original_host) self._tqm.send_callback( 'v2_playbook_on_notify', listening_handler, original_host) # and if none were found, then we raise an error if not found: msg = ( "The requested handler '%s' was not found in either the main handlers list nor in the listening " "handlers list" % handler_name) if C.ERROR_ON_MISSING_HANDLER: raise AnsibleError(msg) else: display.warning(msg) if 'add_host' in result_item: # this task added a new host (add_host module) new_host_info = result_item.get('add_host', dict()) self._add_host(new_host_info, iterator) elif 'add_group' in result_item: # this task added a new group (group_by module) self._add_group(original_host, result_item) if 'ansible_facts' in result_item: # if delegated fact and we are delegating facts, we need to change target host for them if original_task.delegate_to is not None and original_task.delegate_facts: host_list = self.get_delegated_hosts( result_item, original_task) else: host_list = self.get_task_hosts( iterator, original_host, original_task) if original_task.action == 'include_vars': for (var_name, var_value) in iteritems( result_item['ansible_facts']): # find the host we're actually referring too here, which may # be a host that is not really in inventory at all for target_host in host_list: self._variable_manager.set_host_variable( target_host, var_name, var_value) else: cacheable = result_item.pop( '_ansible_facts_cacheable', False) for target_host in host_list: if not original_task.action == 'set_fact' or cacheable: self._variable_manager.set_host_facts( target_host, result_item['ansible_facts'].copy()) if original_task.action == 'set_fact': self._variable_manager.set_nonpersistent_facts( target_host, result_item['ansible_facts'].copy()) if 'ansible_stats' in result_item and 'data' in result_item[ 'ansible_stats'] and result_item['ansible_stats'][ 'data']: if 'per_host' not in result_item[ 'ansible_stats'] or result_item[ 'ansible_stats']['per_host']: host_list = self.get_task_hosts( iterator, original_host, original_task) else: host_list = [None] data = result_item['ansible_stats']['data'] aggregate = 'aggregate' in result_item[ 'ansible_stats'] and result_item['ansible_stats'][ 'aggregate'] for myhost in host_list: for k in data.keys(): if aggregate: self._tqm._stats.update_custom_stats( k, data[k], myhost) else: self._tqm._stats.set_custom_stats( k, data[k], myhost) if 'diff' in task_result._result: if self._diff: self._tqm.send_callback('v2_on_file_diff', task_result) if not isinstance(original_task, TaskInclude): self._tqm._stats.increment('ok', original_host.name) if 'changed' in task_result._result and task_result._result[ 'changed']: self._tqm._stats.increment('changed', original_host.name) # finally, send the ok for this task self._tqm.send_callback('v2_runner_on_ok', task_result) self._pending_results -= 1 if original_host.name in self._blocked_hosts: del self._blocked_hosts[original_host.name] # If this is a role task, mark the parent role as being run (if # the task was ok or failed, but not skipped or unreachable) if original_task._role is not None and role_ran: # TODO: and original_task.action != 'include_role':? # lookup the role in the ROLE_CACHE to make sure we're dealing # with the correct object and mark it as executed for (entry, role_obj) in iteritems(iterator._play.ROLE_CACHE[ original_task._role._role_name]): if role_obj._uuid == original_task._role._uuid: role_obj._had_task_run[original_host.name] = True ret_results.append(task_result) if one_pass or max_passes is not None and (cur_pass + 1) >= max_passes: break cur_pass += 1 return ret_results
def _load_result(self, result, status, **kwargs): """ This method is called when an individual task instance on a single host completes. It is responsible for logging a single result to the database. """ hostname = result._host.get_name() self.result_ended[hostname] = datetime.datetime.now( datetime.timezone.utc).isoformat() # Retrieve the host so we can associate the result to the host id host = self._get_or_create_host(hostname) # If the task was delegated to another host, retrieve that too. # Since a single task can be delegated to multiple hosts (ex: looping on a host group and using delegate_to) # this must be a list of hosts. delegated_to = [] # The value of result._task.delegate_to doesn't get templated if the task was skipped # https://github.com/ansible/ansible/issues/75339#issuecomment-888724838 if result._task.delegate_to and status != "skipped": task_uuid = str(result._task._uuid[:36]) if task_uuid in self.delegation_cache: for delegated in self.delegation_cache[task_uuid]: delegated_to.append(self._get_or_create_host(delegated)) else: delegated_to.append( self._get_or_create_host(result._task.delegate_to)) # Retrieve the task so we can associate the result to the task id task = self._get_or_create_task(result._task) results = strip_internal_keys(module_response_deepcopy(result._result)) # Round-trip through JSON to sort keys and convert Ansible types # to standard types try: jsonified = json.dumps(results, cls=AnsibleJSONEncoder, ensure_ascii=False, sort_keys=True) except TypeError: # Python 3 can't sort non-homogenous keys. # https://bugs.python.org/issue25457 jsonified = json.dumps(results, cls=AnsibleJSONEncoder, ensure_ascii=False, sort_keys=False) results = json.loads(jsonified) # Sanitize facts if "ansible_facts" in results: for fact in self.ignored_facts: if fact in results["ansible_facts"]: self.log.debug("Ignoring fact: %s" % fact) results["ansible_facts"][ fact] = "Not saved by ARA as configured by 'ignored_facts'" self.result = self.client.post( "/api/v1/results", playbook=self.playbook["id"], task=task["id"], host=host["id"], delegated_to=[h["id"] for h in delegated_to], play=task["play"], content=results, status=status, started=self.result_started[hostname] if hostname in self.result_started else task["started"], ended=self.result_ended[hostname], changed=result._result.get("changed", False), # Note: ignore_errors might be None instead of a boolean ignore_errors=kwargs.get("ignore_errors", False) or False, ) if task["action"] in ["setup", "gather_facts" ] and "ansible_facts" in results: self.client.patch("/api/v1/hosts/%s" % host["id"], facts=results["ansible_facts"])