def run(self, tmp=None, task_vars=None): """Please refer to Ansible API documentation for more details on ActionModules. """ if task_vars is None: task_vars = dict() result = super(ActionModule, self).run(tmp, task_vars) facts = dict() self._validate_input_data() output = [] for tmp in self._task.args["current_elements_hash"]['results']: if 'skipped' in tmp and tmp['skipped']: continue for entry in tmp['stdout_lines']: if entry in self._task.args['desired_elements_list']: continue output_element = {'item_key': tmp['item'], 'item_val': entry} display.vv("Adding {} to output".format(output_element)) output.append(output_element) facts[self._task.args['output_variable_name']] = output result['changed'] = False result['ansible_facts'] = facts return result
def run(self, tmp=None, task_vars=None): display.v("a log") display.vv("Kind of verbose") display.vvv("Verbose") display.vvvv("Lookout!") display.verbose("Super custom verbosity", caplevel=6) return {'msg': 'done'}
def run(self): """Run the ansible command Subclasses must implement this method. It does the actual work of running an Ansible command. """ display.vv(to_text(self.parser.get_version())) if C.CONFIG_FILE: display.v(u"Using %s as config file" % to_text(C.CONFIG_FILE)) else: display.v(u"No config file found; using defaults") # warn about deprecated config options for deprecated in C.config.DEPRECATED: name = deprecated[0] why = deprecated[1]['why'] if 'alternatives' in deprecated[1]: alt = ', use %s instead' % deprecated[1]['alternatives'] else: alt = '' ver = deprecated[1]['version'] display.deprecated("%s option, %s %s" % (name, why, alt), version=ver) # warn about typing issues with configuration entries for unable in C.config.UNABLE: display.warning("Unable to set correct type for configuration entry: %s" % unable)
def exec_command(self, *args, **kwargs): """ Wrapper around _exec_command to retry in the case of an ssh failure Will retry if: * an exception is caught * ssh returns 255 Will not retry if * remaining_tries is <2 * retries limit reached """ remaining_tries = int(C.ANSIBLE_SSH_RETRIES) + 1 cmd_summary = "%s..." % args[0] for attempt in range(remaining_tries): try: return_tuple = self._exec_command(*args, **kwargs) # 0 = success # 1-254 = remote command return code # 255 = failure from the ssh command itself if return_tuple[0] != 255: break else: raise AnsibleConnectionFailure("Failed to connect to the host via ssh.") except (AnsibleConnectionFailure, Exception) as e: if attempt == remaining_tries - 1: raise else: pause = 2 ** attempt - 1 if pause > 30: pause = 30 if isinstance(e, AnsibleConnectionFailure): msg = "ssh_retry: attempt: %d, ssh return code is 255. cmd (%s), pausing for %d seconds" % ( attempt, cmd_summary, pause, ) else: msg = "ssh_retry: attempt: %d, caught exception(%s) from cmd (%s), pausing for %d seconds" % ( attempt, e, cmd_summary, pause, ) display.vv(msg, host=self.host) time.sleep(pause) continue return return_tuple
def run(self, tmp=None, task_vars=None): self._supports_check_mode = True result = super(ActionModule, self).run(tmp, task_vars) del tmp # tmp no longer has any effect # Parse out any hostname:port patterns new_name = self._task.args.get('name', self._task.args.get('hostname', self._task.args.get('host', None))) if new_name is None: result['failed'] = True result['msg'] = 'name or hostname arg needs to be provided' return result display.vv("creating host via 'add_host': hostname=%s" % new_name) try: name, port = parse_address(new_name, allow_ranges=False) except: # not a parsable hostname, but might still be usable name = new_name port = None if port: self._task.args['ansible_ssh_port'] = port groups = self._task.args.get('groupname', self._task.args.get('groups', self._task.args.get('group', ''))) # add it to the group if that was specified new_groups = [] if groups: if isinstance(groups, list): group_list = groups elif isinstance(groups, string_types): group_list = groups.split(",") else: raise AnsibleError("Groups must be specified as a list.", obj=self._task) for group_name in group_list: if group_name not in new_groups: new_groups.append(group_name.strip()) # Add any variables to the new_host host_vars = dict() special_args = frozenset(('name', 'hostname', 'groupname', 'groups')) for k in self._task.args.keys(): if k not in special_args: host_vars[k] = self._task.args[k] result['changed'] = True result['add_host'] = dict(host_name=name, groups=new_groups, host_vars=host_vars) return result
def run(self, tmp=None, task_vars=None): if task_vars is None: task_vars = dict() result = super(ActionModule, self).run(tmp, task_vars) logrotate_conf_d = self._templar.template(task_vars['logrotate_conf_d']) display.vv("logrotate_conf_d: %s" % logrotate_conf_d) if not 'config_dir' in self._task.args: self._task.args['config_dir'] = logrotate_conf_d result.update(self._execute_module(module_name='logrotate', module_args=self._task.args, task_vars=task_vars)) return result
def wrapped(self, *args, **kwargs): remaining_tries = int(C.ANSIBLE_SSH_RETRIES) + 1 cmd_summary = "%s..." % args[0] for attempt in range(remaining_tries): cmd = args[0] if attempt != 0 and self._play_context.password and isinstance(cmd, list): # If this is a retry, the fd/pipe for sshpass is closed, and we need a new one self.sshpass_pipe = os.pipe() cmd[1] = b'-d' + to_bytes(self.sshpass_pipe[0], nonstring='simplerepr', errors='surrogate_or_strict') try: try: return_tuple = func(self, *args, **kwargs) display.vvv(return_tuple, host=self.host) # 0 = success # 1-254 = remote command return code # 255 = failure from the ssh command itself except (AnsibleControlPersistBrokenPipeError) as e: # Retry one more time because of the ControlPersist broken pipe (see #16731) display.vvv(u"RETRYING BECAUSE OF CONTROLPERSIST BROKEN PIPE") return_tuple = func(self, *args, **kwargs) if return_tuple[0] != 255: break else: raise AnsibleConnectionFailure("Failed to connect to the host via ssh: %s" % to_native(return_tuple[2])) except (AnsibleConnectionFailure, Exception) as e: if attempt == remaining_tries - 1: raise else: pause = 2 ** attempt - 1 if pause > 30: pause = 30 if isinstance(e, AnsibleConnectionFailure): msg = "ssh_retry: attempt: %d, ssh return code is 255. cmd (%s), pausing for %d seconds" % (attempt, cmd_summary, pause) else: msg = "ssh_retry: attempt: %d, caught exception(%s) from cmd (%s), pausing for %d seconds" % (attempt, e, cmd_summary, pause) display.vv(msg, host=self.host) time.sleep(pause) continue return return_tuple
def _validate_input_data(self): msg = [] for key in self._MODULE_ARGUMENTS: if key not in self._task.args: msg.append("{0} is a mandatory argument and it's missing".format(key)) msg.extend( self._validate_output_variable_name(self._templar.template(self._task.args["output_variable_name"]))) msg.extend(self._validate_elements_list(self._templar.template(self._task.args["desired_elements_list"]))) msg.extend(self._validate_elements_hash(self._templar.template(self._task.args["current_elements_hash"]))) if len(msg): txt = ', '.join(msg) + '.' raise errors.AnsibleError(txt) for key in self._MODULE_ARGUMENTS: display.vv("{0}: {1}".format(key, self._task.args[key]))
def run(self, tmp=None, task_vars=None): if task_vars is None: task_vars = dict() result = super(ActionModule, self).run(tmp, task_vars) if self._play_context.check_mode: result['skipped'] = True result['msg'] = 'check mode not supported for this module' return result # Parse out any hostname:port patterns new_name = self._task.args.get('name', self._task.args.get('hostname', None)) display.vv("creating host via 'add_host': hostname=%s" % new_name) try: name, port = parse_address(new_name, allow_ranges=False) except: # not a parsable hostname, but might still be usable name = new_name port = None if port: self._task.args['ansible_ssh_port'] = port groups = self._task.args.get('groupname', self._task.args.get('groups', self._task.args.get('group', ''))) # add it to the group if that was specified new_groups = [] if groups: for group_name in groups.split(","): if group_name not in new_groups: new_groups.append(group_name.strip()) # Add any variables to the new_host host_vars = dict() special_args = frozenset(('name', 'hostname', 'groupname', 'groups')) for k in self._task.args.keys(): if k not in special_args: host_vars[k] = self._task.args[k] result['changed'] = True result['add_host'] = dict(host_name=name, groups=new_groups, host_vars=host_vars) return result
def run(self): ''' Run the given playbook, based on the settings in the play which may limit the runs to serialized groups, etc. ''' result = 0 entrylist = [] entry = {} try: for playbook_path in self._playbooks: pb = Playbook.load(playbook_path, variable_manager=self._variable_manager, loader=self._loader) # FIXME: move out of inventory self._inventory.set_playbook_basedir(os.path.realpath(os.path.dirname(playbook_path))) if self._tqm is None: # we are doing a listing entry = {'playbook': playbook_path} entry['plays'] = [] else: # make sure the tqm has callbacks loaded self._tqm.load_callbacks() self._tqm.send_callback('v2_playbook_on_start', pb) i = 1 plays = pb.get_plays() display.vv(u'%d plays in %s' % (len(plays), to_text(playbook_path))) for play in plays: if play._included_path is not None: self._loader.set_basedir(play._included_path) else: self._loader.set_basedir(pb._basedir) # clear any filters which may have been applied to the inventory self._inventory.remove_restriction() # Allow variables to be used in vars_prompt fields. all_vars = self._variable_manager.get_vars(play=play) templar = Templar(loader=self._loader, variables=all_vars) setattr(play, 'vars_prompt', templar.template(play.vars_prompt)) if play.vars_prompt: for var in play.vars_prompt: vname = var['name'] prompt = var.get("prompt", vname) default = var.get("default", None) private = boolean(var.get("private", True)) confirm = boolean(var.get("confirm", False)) encrypt = var.get("encrypt", None) salt_size = var.get("salt_size", None) salt = var.get("salt", None) if vname not in self._variable_manager.extra_vars: if self._tqm: self._tqm.send_callback('v2_playbook_on_vars_prompt', vname, private, prompt, encrypt, confirm, salt_size, salt, default) play.vars[vname] = display.do_var_prompt(vname, private, prompt, encrypt, confirm, salt_size, salt, default) else: # we are either in --list-<option> or syntax check play.vars[vname] = default # Post validate so any play level variables are templated all_vars = self._variable_manager.get_vars(play=play) templar = Templar(loader=self._loader, variables=all_vars) play.post_validate(templar) if self._options.syntax: continue if self._tqm is None: # we are just doing a listing entry['plays'].append(play) else: self._tqm._unreachable_hosts.update(self._unreachable_hosts) previously_failed = len(self._tqm._failed_hosts) previously_unreachable = len(self._tqm._unreachable_hosts) break_play = False # we are actually running plays batches = self._get_serialized_batches(play) if len(batches) == 0: self._tqm.send_callback('v2_playbook_on_play_start', play) self._tqm.send_callback('v2_playbook_on_no_hosts_matched') for batch in batches: # restrict the inventory to the hosts in the serialized batch self._inventory.restrict_to_hosts(batch) # and run it... result = self._tqm.run(play=play) # break the play if the result equals the special return code if result & self._tqm.RUN_FAILED_BREAK_PLAY != 0: result = self._tqm.RUN_FAILED_HOSTS break_play = True # check the number of failures here, to see if they're above the maximum # failure percentage allowed, or if any errors are fatal. If either of those # conditions are met, we break out, otherwise we only break out if the entire # batch failed failed_hosts_count = len(self._tqm._failed_hosts) + len(self._tqm._unreachable_hosts) - \ (previously_failed + previously_unreachable) if len(batch) == failed_hosts_count: break_play = True break # update the previous counts so they don't accumulate incorrectly # over multiple serial batches previously_failed += len(self._tqm._failed_hosts) - previously_failed previously_unreachable += len(self._tqm._unreachable_hosts) - previously_unreachable # save the unreachable hosts from this batch self._unreachable_hosts.update(self._tqm._unreachable_hosts) if break_play: break i = i + 1 # per play if entry: entrylist.append(entry) # per playbook # send the stats callback for this playbook if self._tqm is not None: if C.RETRY_FILES_ENABLED: retries = set(self._tqm._failed_hosts.keys()) retries.update(self._tqm._unreachable_hosts.keys()) retries = sorted(retries) if len(retries) > 0: if C.RETRY_FILES_SAVE_PATH: basedir = C.RETRY_FILES_SAVE_PATH elif playbook_path: basedir = os.path.dirname(os.path.abspath(playbook_path)) else: basedir = '~/' (retry_name, _) = os.path.splitext(os.path.basename(playbook_path)) filename = os.path.join(basedir, "%s.retry" % retry_name) if self._generate_retry_inventory(filename, retries): display.display("\tto retry, use: --limit @%s\n" % filename) self._tqm.send_callback('v2_playbook_on_stats', self._tqm._stats) # if the last result wasn't zero, break out of the playbook file name loop if result != 0: break if entrylist: return entrylist finally: if self._tqm is not None: self._tqm.cleanup() if self._loader: self._loader.cleanup_all_tmp_files() if self._options.syntax: display.display("No issues encountered") return result return result
def load_list_of_tasks(ds, play, block=None, role=None, task_include=None, use_handlers=False, variable_manager=None, loader=None): ''' Given a list of task datastructures (parsed from YAML), return a list of Task() or TaskInclude() objects. ''' # we import here to prevent a circular dependency with imports from ansible.playbook.block import Block from ansible.playbook.handler import Handler from ansible.playbook.task import Task from ansible.playbook.task_include import TaskInclude from ansible.playbook.role_include import IncludeRole from ansible.playbook.handler_task_include import HandlerTaskInclude from ansible.template import Templar assert isinstance(ds, list) task_list = [] for task_ds in ds: assert isinstance(task_ds, dict) if 'block' in task_ds: t = Block.load( task_ds, play=play, parent_block=block, role=role, task_include=task_include, use_handlers=use_handlers, variable_manager=variable_manager, loader=loader, ) task_list.append(t) else: if 'include' in task_ds: if use_handlers: include_class = HandlerTaskInclude else: include_class = TaskInclude t = include_class.load( task_ds, block=block, role=role, task_include=None, variable_manager=variable_manager, loader=loader ) all_vars = variable_manager.get_vars(loader=loader, play=play, task=t) templar = Templar(loader=loader, variables=all_vars) # check to see if this include is dynamic or static: # 1. the user has set the 'static' option to false or true # 2. one of the appropriate config options was set if t.static is not None: is_static = t.static else: is_static = C.DEFAULT_TASK_INCLUDES_STATIC or \ (use_handlers and C.DEFAULT_HANDLER_INCLUDES_STATIC) or \ (not templar._contains_vars(t.args['_raw_params']) and t.all_parents_static() and not t.loop) if is_static: if t.loop is not None: raise AnsibleParserError("You cannot use 'static' on an include with a loop", obj=task_ds) # we set a flag to indicate this include was static t.statically_loaded = True # handle relative includes by walking up the list of parent include # tasks and checking the relative result to see if it exists parent_include = block cumulative_path = None found = False subdir = 'tasks' if use_handlers: subdir = 'handlers' while parent_include is not None: if not isinstance(parent_include, TaskInclude): parent_include = parent_include._parent continue parent_include_dir = templar.template(os.path.dirname(parent_include.args.get('_raw_params'))) if cumulative_path is None: cumulative_path = parent_include_dir elif not os.path.isabs(cumulative_path): cumulative_path = os.path.join(parent_include_dir, cumulative_path) include_target = templar.template(t.args['_raw_params']) if t._role: new_basedir = os.path.join(t._role._role_path, subdir, cumulative_path) include_file = loader.path_dwim_relative(new_basedir, subdir, include_target) else: include_file = loader.path_dwim_relative(loader.get_basedir(), cumulative_path, include_target) if os.path.exists(include_file): found = True break else: parent_include = parent_include._parent if not found: try: include_target = templar.template(t.args['_raw_params']) except AnsibleUndefinedVariable: raise AnsibleParserError( "Error when evaluating variable in include name: %s.\n\n" \ "When using static includes, ensure that any variables used in their names are defined in vars/vars_files\n" \ "or extra-vars passed in from the command line. Static includes cannot use variables from inventory\n" \ "sources like group or host vars." % t.args['_raw_params'], obj=task_ds, suppress_extended_error=True, ) if t._role: include_file = loader.path_dwim_relative(t._role._role_path, subdir, include_target) else: include_file = loader.path_dwim(include_target) try: data = loader.load_from_file(include_file) if data is None: return [] elif not isinstance(data, list): raise AnsibleParserError("included task files must contain a list of tasks", obj=data) # since we can't send callbacks here, we display a message directly in # the same fashion used by the on_include callback. We also do it here, # because the recursive nature of helper methods means we may be loading # nested includes, and we want the include order printed correctly display.vv("statically included: %s" % include_file) except AnsibleFileNotFound: if t.static or \ C.DEFAULT_TASK_INCLUDES_STATIC or \ C.DEFAULT_HANDLER_INCLUDES_STATIC and use_handlers: raise display.deprecated( "Included file '%s' not found, however since this include is not " \ "explicitly marked as 'static: yes', we will try and include it dynamically " \ "later. In the future, this will be an error unless 'static: no' is used " \ "on the include task. If you do not want missing includes to be considered " \ "dynamic, use 'static: yes' on the include or set the global ansible.cfg " \ "options to make all inclues static for tasks and/or handlers" % include_file, ) task_list.append(t) continue included_blocks = load_list_of_blocks( data, play=play, parent_block=None, task_include=t.copy(), role=role, use_handlers=use_handlers, loader=loader, variable_manager=variable_manager, ) # pop tags out of the include args, if they were specified there, and assign # them to the include. If the include already had tags specified, we raise an # error so that users know not to specify them both ways tags = t.vars.pop('tags', []) if isinstance(tags, string_types): tags = tags.split(',') if len(tags) > 0: if len(t.tags) > 0: raise AnsibleParserError( "Include tasks should not specify tags in more than one way (both via args and directly on the task). " \ "Mixing styles in which tags are specified is prohibited for whole import hierarchy, not only for single import statement", obj=task_ds, suppress_extended_error=True, ) display.deprecated("You should not specify tags in the include parameters. All tags should be specified using the task-level option") else: tags = t.tags[:] # now we extend the tags on each of the included blocks for b in included_blocks: b.tags = list(set(b.tags).union(tags)) # END FIXME # FIXME: handlers shouldn't need this special handling, but do # right now because they don't iterate blocks correctly if use_handlers: for b in included_blocks: task_list.extend(b.block) else: task_list.extend(included_blocks) else: task_list.append(t) elif 'include_role' in task_ds: ir = IncludeRole.load( task_ds, block=block, role=role, task_include=None, variable_manager=variable_manager, loader=loader ) # 1. the user has set the 'static' option to false or true # 2. one of the appropriate config options was set if ir.static is not None: is_static = ir.static else: display.debug('Determine if include_role is static') # Check to see if this include is dynamic or static: all_vars = variable_manager.get_vars(loader=loader, play=play, task=ir) templar = Templar(loader=loader, variables=all_vars) needs_templating = False for param in ir.args: if templar._contains_vars(ir.args[param]): if not templar.templatable(ir.args[param]): needs_templating = True break is_static = C.DEFAULT_TASK_INCLUDES_STATIC or \ (use_handlers and C.DEFAULT_HANDLER_INCLUDES_STATIC) or \ (not needs_templating and ir.all_parents_static() and not ir.loop) display.debug('Determined that if include_role static is %s' % str(is_static)) if is_static: # uses compiled list from object t = task_list.extend(ir.get_block_list(variable_manager=variable_manager, loader=loader)) else: # passes task object itself for latter generation of list t = task_list.append(ir) else: if use_handlers: t = Handler.load(task_ds, block=block, role=role, task_include=task_include, variable_manager=variable_manager, loader=loader) else: t = Task.load(task_ds, block=block, role=role, task_include=task_include, variable_manager=variable_manager, loader=loader) task_list.append(t) return task_list
def _process_pending_results(self, iterator, one_pass=False): ''' Reads results off the final queue and takes appropriate action based on the result (executing callbacks, updating state, etc.). ''' ret_results = [] while not self._final_q.empty() and not self._tqm._terminated: try: result = self._final_q.get() display.debug("got result from result worker: %s" % ([text_type(x) for x in result], )) # helper method, used to find the original host from the one # returned in the result/message, which has been serialized and # thus had some information stripped from it to speed up the # serialization process def get_original_host(host): if host.name in self._inventory._hosts_cache: return self._inventory._hosts_cache[host.name] else: return self._inventory.get_host(host.name) # all host status messages contain 2 entries: (msg, task_result) if result[0] in ('host_task_ok', 'host_task_failed', 'host_task_skipped', 'host_unreachable'): task_result = result[1] host = get_original_host(task_result._host) task = task_result._task if result[0] == 'host_task_failed' or task_result.is_failed( ): if not task.ignore_errors: display.debug("marking %s as failed" % host.name) if task.run_once: # if we're using run_once, we have to fail every host here [ iterator.mark_host_failed(h) for h in self._inventory.get_hosts( iterator._play.hosts) if h.name not in self._tqm._unreachable_hosts ] else: iterator.mark_host_failed(host) # only add the host to the failed list officially if it has # been failed by the iterator if iterator.is_failed(host): self._tqm._failed_hosts[host.name] = True self._tqm._stats.increment( 'failures', host.name) else: self._tqm._stats.increment('ok', host.name) self._tqm.send_callback( 'v2_runner_on_failed', task_result, ignore_errors=task.ignore_errors) elif result[0] == 'host_unreachable': self._tqm._unreachable_hosts[host.name] = True self._tqm._stats.increment('dark', host.name) self._tqm.send_callback('v2_runner_on_unreachable', task_result) elif result[0] == 'host_task_skipped': self._tqm._stats.increment('skipped', host.name) self._tqm.send_callback('v2_runner_on_skipped', task_result) elif result[0] == 'host_task_ok': if task.action != 'include': self._tqm._stats.increment('ok', host.name) if 'changed' in task_result._result and task_result._result[ 'changed']: self._tqm._stats.increment( 'changed', host.name) self._tqm.send_callback('v2_runner_on_ok', task_result) if self._diff: self._tqm.send_callback('v2_on_file_diff', task_result) self._pending_results -= 1 if host.name in self._blocked_hosts: del self._blocked_hosts[host.name] # If this is a role task, mark the parent role as being run (if # the task was ok or failed, but not skipped or unreachable) if task_result._task._role is not None and result[0] in ( 'host_task_ok', 'host_task_failed'): # lookup the role in the ROLE_CACHE to make sure we're dealing # with the correct object and mark it as executed for (entry, role_obj) in iteritems(iterator._play.ROLE_CACHE[ task_result._task._role._role_name]): if role_obj._uuid == task_result._task._role._uuid: role_obj._had_task_run[host.name] = True ret_results.append(task_result) elif result[0] == 'add_host': result_item = result[1] new_host_info = result_item.get('add_host', dict()) self._add_host(new_host_info, iterator) elif result[0] == 'add_group': host = get_original_host(result[1]) result_item = result[2] self._add_group(host, result_item) elif result[0] == 'notify_handler': task_result = result[1] handler_name = result[2] original_host = get_original_host(task_result._host) original_task = iterator.get_original_task( original_host, task_result._task) if handler_name not in self._notified_handlers: self._notified_handlers[handler_name] = [] if original_host not in self._notified_handlers[ handler_name]: self._notified_handlers[handler_name].append( original_host) display.vv("NOTIFIED HANDLER %s" % (handler_name, )) elif result[0] == 'register_host_var': # essentially the same as 'set_host_var' below, however we # never follow the delegate_to value for registered vars and # the variable goes in the fact_cache host = get_original_host(result[1]) task = result[2] var_value = wrap_var(result[3]) var_name = task.register if task.run_once: host_list = [ host for host in self._inventory.get_hosts( iterator._play.hosts) if host.name not in self._tqm._unreachable_hosts ] else: host_list = [host] for target_host in host_list: self._variable_manager.set_nonpersistent_facts( target_host, {var_name: var_value}) elif result[0] in ('set_host_var', 'set_host_facts'): host = get_original_host(result[1]) task = result[2] item = result[3] # find the host we're actually refering too here, which may # be a host that is not really in inventory at all if task.delegate_to is not None and task.delegate_facts: task_vars = self._variable_manager.get_vars( loader=self._loader, play=iterator._play, host=host, task=task) self.add_tqm_variables(task_vars, play=iterator._play) if item is not None: task_vars['item'] = item templar = Templar(loader=self._loader, variables=task_vars) host_name = templar.template(task.delegate_to) actual_host = self._inventory.get_host(host_name) if actual_host is None: actual_host = Host(name=host_name) else: actual_host = host if task.run_once: host_list = [ host for host in self._inventory.get_hosts( iterator._play.hosts) if host.name not in self._tqm._unreachable_hosts ] else: host_list = [actual_host] if result[0] == 'set_host_var': var_name = result[4] var_value = result[5] for target_host in host_list: self._variable_manager.set_host_variable( target_host, var_name, var_value) elif result[0] == 'set_host_facts': facts = result[4] for target_host in host_list: if task.action == 'set_fact': self._variable_manager.set_nonpersistent_facts( target_host, facts) else: self._variable_manager.set_host_facts( target_host, facts) else: raise AnsibleError("unknown result message received: %s" % result[0]) except Queue.Empty: time.sleep(0.0001) if one_pass: break return ret_results
def _process_pending_results(self, iterator, one_pass=False): ''' Reads results off the final queue and takes appropriate action based on the result (executing callbacks, updating state, etc.). ''' ret_results = [] def get_original_host(host_name): host_name = to_unicode(host_name) if host_name in self._inventory._hosts_cache: return self._inventory._hosts_cache[host_name] else: return self._inventory.get_host(host_name) def search_handler_blocks(handler_name, handler_blocks): for handler_block in handler_blocks: for handler_task in handler_block.block: handler_vars = self._variable_manager.get_vars(loader=self._loader, play=iterator._play, task=handler_task) templar = Templar(loader=self._loader, variables=handler_vars) try: # first we check with the full result of get_name(), which may # include the role name (if the handler is from a role). If that # is not found, we resort to the simple name field, which doesn't # have anything extra added to it. target_handler_name = templar.template(handler_task.name) if target_handler_name == handler_name: return handler_task else: target_handler_name = templar.template(handler_task.get_name()) if target_handler_name == handler_name: return handler_task except (UndefinedError, AnsibleUndefinedVariable) as e: # We skip this handler due to the fact that it may be using # a variable in the name that was conditionally included via # set_fact or some other method, and we don't want to error # out unnecessarily continue return None def parent_handler_match(target_handler, handler_name): if target_handler: if isinstance(target_handler, TaskInclude): try: handler_vars = self._variable_manager.get_vars(loader=self._loader, play=iterator._play, task=target_handler) templar = Templar(loader=self._loader, variables=handler_vars) target_handler_name = templar.template(target_handler.name) if target_handler_name == handler_name: return True else: target_handler_name = templar.template(target_handler.get_name()) if target_handler_name == handler_name: return True except (UndefinedError, AnsibleUndefinedVariable) as e: pass return parent_handler_match(target_handler._parent, handler_name) else: return False passes = 0 while not self._tqm._terminated: try: task_result = self._final_q.get(timeout=0.001) original_host = get_original_host(task_result._host) original_task = iterator.get_original_task(original_host, task_result._task) task_result._host = original_host task_result._task = original_task # send callbacks for 'non final' results if '_ansible_retry' in task_result._result: self._tqm.send_callback('v2_runner_retry', task_result) continue elif '_ansible_item_result' in task_result._result: if task_result.is_failed() or task_result.is_unreachable(): self._tqm.send_callback('v2_runner_item_on_failed', task_result) elif task_result.is_skipped(): self._tqm.send_callback('v2_runner_item_on_skipped', task_result) else: self._tqm.send_callback('v2_runner_item_on_ok', task_result) continue if original_task.register: if original_task.run_once: host_list = [host for host in self._inventory.get_hosts(iterator._play.hosts) if host.name not in self._tqm._unreachable_hosts] else: host_list = [original_host] clean_copy = strip_internal_keys(task_result._result) if 'invocation' in clean_copy: del clean_copy['invocation'] for target_host in host_list: self._variable_manager.set_nonpersistent_facts(target_host, {original_task.register: clean_copy}) # all host status messages contain 2 entries: (msg, task_result) role_ran = False if task_result.is_failed(): role_ran = True if not original_task.ignore_errors: display.debug("marking %s as failed" % original_host.name) if original_task.run_once: # if we're using run_once, we have to fail every host here for h in self._inventory.get_hosts(iterator._play.hosts): if h.name not in self._tqm._unreachable_hosts: state, _ = iterator.get_next_task_for_host(h, peek=True) iterator.mark_host_failed(h) state, new_task = iterator.get_next_task_for_host(h, peek=True) else: iterator.mark_host_failed(original_host) # only add the host to the failed list officially if it has # been failed by the iterator if iterator.is_failed(original_host): self._tqm._failed_hosts[original_host.name] = True self._tqm._stats.increment('failures', original_host.name) else: # otherwise, we grab the current state and if we're iterating on # the rescue portion of a block then we save the failed task in a # special var for use within the rescue/always state, _ = iterator.get_next_task_for_host(original_host, peek=True) if state.run_state == iterator.ITERATING_RESCUE: self._variable_manager.set_nonpersistent_facts( original_host, dict( ansible_failed_task=original_task.serialize(), ansible_failed_result=task_result._result, ), ) else: self._tqm._stats.increment('ok', original_host.name) self._tqm.send_callback('v2_runner_on_failed', task_result, ignore_errors=original_task.ignore_errors) elif task_result.is_unreachable(): self._tqm._unreachable_hosts[original_host.name] = True self._tqm._stats.increment('dark', original_host.name) self._tqm.send_callback('v2_runner_on_unreachable', task_result) elif task_result.is_skipped(): self._tqm._stats.increment('skipped', original_host.name) self._tqm.send_callback('v2_runner_on_skipped', task_result) else: role_ran = True if original_task.loop: # this task had a loop, and has more than one result, so # loop over all of them instead of a single result result_items = task_result._result.get('results', []) else: result_items = [ task_result._result ] for result_item in result_items: if '_ansible_notify' in result_item: if task_result.is_changed(): # The shared dictionary for notified handlers is a proxy, which # does not detect when sub-objects within the proxy are modified. # So, per the docs, we reassign the list so the proxy picks up and # notifies all other threads for handler_name in result_item['_ansible_notify']: # Find the handler using the above helper. First we look up the # dependency chain of the current task (if it's from a role), otherwise # we just look through the list of handlers in the current play/all # roles and use the first one that matches the notify name if handler_name in self._listening_handlers: for listening_handler_name in self._listening_handlers[handler_name]: listening_handler = search_handler_blocks(listening_handler_name, iterator._play.handlers) if listening_handler is None: raise AnsibleError("The requested handler listener '%s' was not found in any of the known handlers" % listening_handler_name) if original_host not in self._notified_handlers[listening_handler]: self._notified_handlers[listening_handler].append(original_host) display.vv("NOTIFIED HANDLER %s" % (listening_handler_name,)) else: target_handler = search_handler_blocks(handler_name, iterator._play.handlers) if target_handler is not None: if original_host not in self._notified_handlers[target_handler]: self._notified_handlers[target_handler].append(original_host) # FIXME: should this be a callback? display.vv("NOTIFIED HANDLER %s" % (handler_name,)) else: # As there may be more than one handler with the notified name as the # parent, so we just keep track of whether or not we found one at all found = False for target_handler in self._notified_handlers: if parent_handler_match(target_handler, handler_name): self._notified_handlers[target_handler].append(original_host) display.vv("NOTIFIED HANDLER %s" % (target_handler.get_name(),)) found = True # and if none were found, then we raise an error if not found: raise AnsibleError("The requested handler '%s' was found in neither the main handlers list nor the listening handlers list" % handler_name) if 'add_host' in result_item: # this task added a new host (add_host module) new_host_info = result_item.get('add_host', dict()) self._add_host(new_host_info, iterator) elif 'add_group' in result_item: # this task added a new group (group_by module) self._add_group(original_host, result_item) elif 'ansible_facts' in result_item: loop_var = 'item' if original_task.loop_control: loop_var = original_task.loop_control.loop_var or 'item' item = result_item.get(loop_var, None) if original_task.action == 'include_vars': for (var_name, var_value) in iteritems(result_item['ansible_facts']): # find the host we're actually refering too here, which may # be a host that is not really in inventory at all if original_task.delegate_to is not None and original_task.delegate_facts: task_vars = self._variable_manager.get_vars(loader=self._loader, play=iterator._play, host=host, task=task) self.add_tqm_variables(task_vars, play=iterator._play) if item is not None: task_vars[loop_var] = item templar = Templar(loader=self._loader, variables=task_vars) host_name = templar.template(original_task.delegate_to) actual_host = self._inventory.get_host(host_name) if actual_host is None: actual_host = Host(name=host_name) else: actual_host = original_host if original_task.run_once: host_list = [host for host in self._inventory.get_hosts(iterator._play.hosts) if host.name not in self._tqm._unreachable_hosts] else: host_list = [actual_host] for target_host in host_list: self._variable_manager.set_host_variable(target_host, var_name, var_value) else: if original_task.run_once: host_list = [host for host in self._inventory.get_hosts(iterator._play.hosts) if host.name not in self._tqm._unreachable_hosts] else: host_list = [original_host] for target_host in host_list: if original_task.action == 'set_fact': self._variable_manager.set_nonpersistent_facts(target_host, result_item['ansible_facts'].copy()) else: self._variable_manager.set_host_facts(target_host, result_item['ansible_facts'].copy()) if 'diff' in task_result._result: if self._diff: self._tqm.send_callback('v2_on_file_diff', task_result) if original_task.action != 'include': self._tqm._stats.increment('ok', original_host.name) if 'changed' in task_result._result and task_result._result['changed']: self._tqm._stats.increment('changed', original_host.name) # finally, send the ok for this task self._tqm.send_callback('v2_runner_on_ok', task_result) self._pending_results -= 1 if original_host.name in self._blocked_hosts: del self._blocked_hosts[original_host.name] # If this is a role task, mark the parent role as being run (if # the task was ok or failed, but not skipped or unreachable) if original_task._role is not None and role_ran: # lookup the role in the ROLE_CACHE to make sure we're dealing # with the correct object and mark it as executed for (entry, role_obj) in iteritems(iterator._play.ROLE_CACHE[original_task._role._role_name]): if role_obj._uuid == original_task._role._uuid: role_obj._had_task_run[original_host.name] = True ret_results.append(task_result) except Queue.Empty: passes += 1 if passes > 2: break if one_pass: break return ret_results
def _process_pending_results(self, iterator, one_pass=False): ''' Reads results off the final queue and takes appropriate action based on the result (executing callbacks, updating state, etc.). ''' ret_results = [] while not self._final_q.empty() and not self._tqm._terminated: try: result = self._final_q.get() display.debug("got result from result worker: %s" % ([text_type(x) for x in result],)) # helper method, used to find the original host from the one # returned in the result/message, which has been serialized and # thus had some information stripped from it to speed up the # serialization process def get_original_host(host): if host.name in self._inventory._hosts_cache: return self._inventory._hosts_cache[host.name] else: return self._inventory.get_host(host.name) # all host status messages contain 2 entries: (msg, task_result) if result[0] in ('host_task_ok', 'host_task_failed', 'host_task_skipped', 'host_unreachable'): task_result = result[1] host = get_original_host(task_result._host) task = task_result._task if result[0] == 'host_task_failed' or task_result.is_failed(): if not task.ignore_errors: display.debug("marking %s as failed" % host.name) if task.run_once: # if we're using run_once, we have to fail every host here [iterator.mark_host_failed(h) for h in self._inventory.get_hosts(iterator._play.hosts) if h.name not in self._tqm._unreachable_hosts] else: iterator.mark_host_failed(host) # only add the host to the failed list officially if it has # been failed by the iterator if iterator.is_failed(host): self._tqm._failed_hosts[host.name] = True self._tqm._stats.increment('failures', host.name) else: self._tqm._stats.increment('ok', host.name) self._tqm.send_callback('v2_runner_on_failed', task_result, ignore_errors=task.ignore_errors) elif result[0] == 'host_unreachable': self._tqm._unreachable_hosts[host.name] = True self._tqm._stats.increment('dark', host.name) self._tqm.send_callback('v2_runner_on_unreachable', task_result) elif result[0] == 'host_task_skipped': self._tqm._stats.increment('skipped', host.name) self._tqm.send_callback('v2_runner_on_skipped', task_result) elif result[0] == 'host_task_ok': if task.action != 'include': self._tqm._stats.increment('ok', host.name) if 'changed' in task_result._result and task_result._result['changed']: self._tqm._stats.increment('changed', host.name) self._tqm.send_callback('v2_runner_on_ok', task_result) if self._diff: self._tqm.send_callback('v2_on_file_diff', task_result) self._pending_results -= 1 if host.name in self._blocked_hosts: del self._blocked_hosts[host.name] # If this is a role task, mark the parent role as being run (if # the task was ok or failed, but not skipped or unreachable) if task_result._task._role is not None and result[0] in ('host_task_ok', 'host_task_failed'): # lookup the role in the ROLE_CACHE to make sure we're dealing # with the correct object and mark it as executed for (entry, role_obj) in iteritems(iterator._play.ROLE_CACHE[task_result._task._role._role_name]): if role_obj._uuid == task_result._task._role._uuid: role_obj._had_task_run[host.name] = True ret_results.append(task_result) elif result[0] == 'add_host': result_item = result[1] new_host_info = result_item.get('add_host', dict()) self._add_host(new_host_info, iterator) elif result[0] == 'add_group': host = get_original_host(result[1]) result_item = result[2] self._add_group(host, result_item) elif result[0] == 'notify_handler': task_result = result[1] handler_name = result[2] original_host = get_original_host(task_result._host) original_task = iterator.get_original_task(original_host, task_result._task) if handler_name not in self._notified_handlers: self._notified_handlers[handler_name] = [] if original_host not in self._notified_handlers[handler_name]: self._notified_handlers[handler_name].append(original_host) display.vv("NOTIFIED HANDLER %s" % (handler_name,)) elif result[0] == 'register_host_var': # essentially the same as 'set_host_var' below, however we # never follow the delegate_to value for registered vars and # the variable goes in the fact_cache host = get_original_host(result[1]) task = result[2] var_value = wrap_var(result[3]) var_name = task.register if task.run_once: host_list = [host for host in self._inventory.get_hosts(iterator._play.hosts) if host.name not in self._tqm._unreachable_hosts] else: host_list = [host] for target_host in host_list: self._variable_manager.set_nonpersistent_facts(target_host, {var_name: var_value}) elif result[0] in ('set_host_var', 'set_host_facts'): host = get_original_host(result[1]) task = result[2] item = result[3] # find the host we're actually refering too here, which may # be a host that is not really in inventory at all if task.delegate_to is not None and task.delegate_facts: task_vars = self._variable_manager.get_vars(loader=self._loader, play=iterator._play, host=host, task=task) self.add_tqm_variables(task_vars, play=iterator._play) if item is not None: task_vars['item'] = item templar = Templar(loader=self._loader, variables=task_vars) host_name = templar.template(task.delegate_to) actual_host = self._inventory.get_host(host_name) if actual_host is None: actual_host = Host(name=host_name) else: actual_host = host if task.run_once: host_list = [host for host in self._inventory.get_hosts(iterator._play.hosts) if host.name not in self._tqm._unreachable_hosts] else: host_list = [actual_host] if result[0] == 'set_host_var': var_name = result[4] var_value = result[5] for target_host in host_list: self._variable_manager.set_host_variable(target_host, var_name, var_value) elif result[0] == 'set_host_facts': facts = result[4] for target_host in host_list: if task.action == 'set_fact': self._variable_manager.set_nonpersistent_facts(target_host, facts) else: self._variable_manager.set_host_facts(target_host, facts) elif result[0].startswith('v2_playbook_item') or result[0] == 'v2_playbook_retry': self._tqm.send_callback(result[0], result[1]) else: raise AnsibleError("unknown result message received: %s" % result[0]) except Queue.Empty: time.sleep(0.0001) if one_pass: break return ret_results
def run(self): ''' Run the given playbook, based on the settings in the play which may limit the runs to serialized groups, etc. ''' signal.signal(signal.SIGINT, self._cleanup) result = 0 entrylist = [] entry = {} try: for playbook_path in self._playbooks: pb = Playbook.load(playbook_path, variable_manager=self._variable_manager, loader=self._loader) self._inventory.set_playbook_basedir( os.path.dirname(playbook_path)) if self._tqm is None: # we are doing a listing entry = {'playbook': playbook_path} entry['plays'] = [] else: # make sure the tqm has callbacks loaded self._tqm.load_callbacks() self._tqm.send_callback('v2_playbook_on_start', pb) i = 1 plays = pb.get_plays() display.vv('%d plays in %s' % (len(plays), playbook_path)) for play in plays: if play._included_path is not None: self._loader.set_basedir(play._included_path) else: self._loader.set_basedir(pb._basedir) # clear any filters which may have been applied to the inventory self._inventory.remove_restriction() if play.vars_prompt: for var in play.vars_prompt: vname = var['name'] prompt = var.get("prompt", vname) default = var.get("default", None) private = var.get("private", True) confirm = var.get("confirm", False) encrypt = var.get("encrypt", None) salt_size = var.get("salt_size", None) salt = var.get("salt", None) if vname not in self._variable_manager.extra_vars: if self._tqm: self._tqm.send_callback( 'v2_playbook_on_vars_prompt', vname, private, prompt, encrypt, confirm, salt_size, salt, default) play.vars[vname] = display.do_var_prompt( vname, private, prompt, encrypt, confirm, salt_size, salt, default) else: # we are either in --list-<option> or syntax check play.vars[vname] = default # Create a temporary copy of the play here, so we can run post_validate # on it without the templating changes affecting the original object. all_vars = self._variable_manager.get_vars( loader=self._loader, play=play) templar = Templar(loader=self._loader, variables=all_vars) new_play = play.copy() new_play.post_validate(templar) if self._options.syntax: continue if self._tqm is None: # we are just doing a listing entry['plays'].append(new_play) else: self._tqm._unreachable_hosts.update( self._unreachable_hosts) # we are actually running plays for batch in self._get_serialized_batches(new_play): if len(batch) == 0: self._tqm.send_callback( 'v2_playbook_on_play_start', new_play) self._tqm.send_callback( 'v2_playbook_on_no_hosts_matched') break # restrict the inventory to the hosts in the serialized batch self._inventory.restrict_to_hosts(batch) # and run it... result = self._tqm.run(play=play) # check the number of failures here, to see if they're above the maximum # failure percentage allowed, or if any errors are fatal. If either of those # conditions are met, we break out, otherwise we only break out if the entire # batch failed failed_hosts_count = len( self._tqm._failed_hosts) + len( self._tqm._unreachable_hosts) if new_play.max_fail_percentage is not None and \ int((new_play.max_fail_percentage)/100.0 * len(batch)) > int((len(batch) - failed_hosts_count) / len(batch) * 100.0): break elif len(batch) == failed_hosts_count: break # clear the failed hosts dictionaires in the TQM for the next batch self._unreachable_hosts.update( self._tqm._unreachable_hosts) self._tqm.clear_failed_hosts() # if the last result wasn't zero or 3 (some hosts were unreachable), # break out of the serial batch loop if result not in (0, 3): break i = i + 1 # per play if entry: entrylist.append(entry) # per playbook # send the stats callback for this playbook if self._tqm is not None: self._tqm.send_callback('v2_playbook_on_stats', self._tqm._stats) # if the last result wasn't zero, break out of the playbook file name loop if result != 0: break if entrylist: return entrylist finally: if self._tqm is not None: self._cleanup() if self._options.syntax: display.display("No issues encountered") return result return result
def _process_pending_results(self, iterator, one_pass=False): ''' Reads results off the final queue and takes appropriate action based on the result (executing callbacks, updating state, etc.). ''' ret_results = [] while not self._final_q.empty() and not self._tqm._terminated: try: result = self._final_q.get() display.debug("got result from result worker: %s" % ([text_type(x) for x in result], )) # helper method, used to find the original host from the one # returned in the result/message, which has been serialized and # thus had some information stripped from it to speed up the # serialization process def get_original_host(host): if host.name in self._inventory._hosts_cache: return self._inventory._hosts_cache[host.name] else: return self._inventory.get_host(host.name) # all host status messages contain 2 entries: (msg, task_result) if result[0] in ('host_task_ok', 'host_task_failed', 'host_task_skipped', 'host_unreachable'): task_result = result[1] host = get_original_host(task_result._host) task = task_result._task if result[0] == 'host_task_failed' or task_result.is_failed( ): if not task.ignore_errors: display.debug("marking %s as failed" % host.name) if task.run_once: # if we're using run_once, we have to fail every host here [ iterator.mark_host_failed(h) for h in self._inventory.get_hosts( iterator._play.hosts) if h.name not in self._tqm._unreachable_hosts ] else: iterator.mark_host_failed(host) # only add the host to the failed list officially if it has # been failed by the iterator if iterator.is_failed(host): self._tqm._failed_hosts[host.name] = True self._tqm._stats.increment( 'failures', host.name) else: # otherwise, we grab the current state and if we're iterating on # the rescue portion of a block then we save the failed task in a # special var for use within the rescue/always state, _ = iterator.get_next_task_for_host( host, peek=True) if state.run_state == iterator.ITERATING_RESCUE: original_task = iterator.get_original_task( host, task) self._variable_manager.set_nonpersistent_facts( host, dict( ansible_failed_task=original_task. serialize(), ansible_failed_result=task_result. _result, ), ) else: self._tqm._stats.increment('ok', host.name) self._tqm.send_callback( 'v2_runner_on_failed', task_result, ignore_errors=task.ignore_errors) elif result[0] == 'host_unreachable': self._tqm._unreachable_hosts[host.name] = True self._tqm._stats.increment('dark', host.name) self._tqm.send_callback('v2_runner_on_unreachable', task_result) elif result[0] == 'host_task_skipped': self._tqm._stats.increment('skipped', host.name) self._tqm.send_callback('v2_runner_on_skipped', task_result) elif result[0] == 'host_task_ok': if task.action != 'include': self._tqm._stats.increment('ok', host.name) if 'changed' in task_result._result and task_result._result[ 'changed']: self._tqm._stats.increment( 'changed', host.name) self._tqm.send_callback('v2_runner_on_ok', task_result) if self._diff: self._tqm.send_callback('v2_on_file_diff', task_result) self._pending_results -= 1 if host.name in self._blocked_hosts: del self._blocked_hosts[host.name] # If this is a role task, mark the parent role as being run (if # the task was ok or failed, but not skipped or unreachable) if task_result._task._role is not None and result[0] in ( 'host_task_ok', 'host_task_failed'): # lookup the role in the ROLE_CACHE to make sure we're dealing # with the correct object and mark it as executed for (entry, role_obj) in iteritems(iterator._play.ROLE_CACHE[ task_result._task._role._role_name]): if role_obj._uuid == task_result._task._role._uuid: role_obj._had_task_run[host.name] = True ret_results.append(task_result) elif result[0] == 'add_host': result_item = result[1] new_host_info = result_item.get('add_host', dict()) self._add_host(new_host_info, iterator) elif result[0] == 'add_group': host = get_original_host(result[1]) result_item = result[2] self._add_group(host, result_item) elif result[0] == 'notify_handler': task_result = result[1] handler_name = result[2] original_host = get_original_host(task_result._host) original_task = iterator.get_original_task( original_host, task_result._task) def search_handler_blocks(handler_name, handler_blocks): for handler_block in handler_blocks: for handler_task in handler_block.block: handler_vars = self._variable_manager.get_vars( loader=self._loader, play=iterator._play, task=handler_task) templar = Templar(loader=self._loader, variables=handler_vars) try: # first we check with the full result of get_name(), which may # include the role name (if the handler is from a role). If that # is not found, we resort to the simple name field, which doesn't # have anything extra added to it. target_handler_name = templar.template( handler_task.name) if target_handler_name == handler_name: return handler_task else: target_handler_name = templar.template( handler_task.get_name()) if target_handler_name == handler_name: return handler_task except (UndefinedError, AnsibleUndefinedVariable): # We skip this handler due to the fact that it may be using # a variable in the name that was conditionally included via # set_fact or some other method, and we don't want to error # out unnecessarily continue return None # Find the handler using the above helper. First we look up the # dependency chain of the current task (if it's from a role), otherwise # we just look through the list of handlers in the current play/all # roles and use the first one that matches the notify name if handler_name in self._listening_handlers: for listening_handler_name in self._listening_handlers[ handler_name]: listening_handler = search_handler_blocks( listening_handler_name, iterator._play.handlers) if listening_handler is None: raise AnsibleError( "The requested handler listener '%s' was not found in any of the known handlers" % listening_handler_name) if original_host not in self._notified_handlers[ listening_handler]: self._notified_handlers[ listening_handler].append(original_host) display.vv("NOTIFIED HANDLER %s" % (listening_handler_name, )) else: target_handler = search_handler_blocks( handler_name, iterator._play.handlers) if target_handler is None: raise AnsibleError( "The requested handler '%s' was not found in any of the known handlers" % handler_name) if target_handler in self._notified_handlers: if original_host not in self._notified_handlers[ target_handler]: self._notified_handlers[target_handler].append( original_host) # FIXME: should this be a callback? display.vv("NOTIFIED HANDLER %s" % (handler_name, )) else: raise AnsibleError( "The requested handler '%s' was found in neither the main handlers list nor the listening handlers list" % handler_name) elif result[0] == 'register_host_var': # essentially the same as 'set_host_var' below, however we # never follow the delegate_to value for registered vars and # the variable goes in the fact_cache host = get_original_host(result[1]) task = result[2] var_value = wrap_var(result[3]) var_name = task.register if task.run_once: host_list = [ host for host in self._inventory.get_hosts( iterator._play.hosts) if host.name not in self._tqm._unreachable_hosts ] else: host_list = [host] for target_host in host_list: self._variable_manager.set_nonpersistent_facts( target_host, {var_name: var_value}) elif result[0] in ('set_host_var', 'set_host_facts'): host = get_original_host(result[1]) task = result[2] item = result[3] # find the host we're actually refering too here, which may # be a host that is not really in inventory at all if task.delegate_to is not None and task.delegate_facts: task_vars = self._variable_manager.get_vars( loader=self._loader, play=iterator._play, host=host, task=task) self.add_tqm_variables(task_vars, play=iterator._play) loop_var = 'item' if task.loop_control: loop_var = task.loop_control.loop_var or 'item' if item is not None: task_vars[loop_var] = item templar = Templar(loader=self._loader, variables=task_vars) host_name = templar.template(task.delegate_to) actual_host = self._inventory.get_host(host_name) if actual_host is None: actual_host = Host(name=host_name) else: actual_host = host if task.run_once: host_list = [ host for host in self._inventory.get_hosts( iterator._play.hosts) if host.name not in self._tqm._unreachable_hosts ] else: host_list = [actual_host] if result[0] == 'set_host_var': var_name = result[4] var_value = result[5] for target_host in host_list: self._variable_manager.set_host_variable( target_host, var_name, var_value) elif result[0] == 'set_host_facts': facts = result[4] for target_host in host_list: if task.action == 'set_fact': self._variable_manager.set_nonpersistent_facts( target_host, facts.copy()) else: self._variable_manager.set_host_facts( target_host, facts.copy()) elif result[0].startswith( 'v2_runner_item') or result[0] == 'v2_runner_retry': self._tqm.send_callback(result[0], result[1]) elif result[0] == 'v2_on_file_diff': if self._diff: self._tqm.send_callback('v2_on_file_diff', result[1]) else: raise AnsibleError("unknown result message received: %s" % result[0]) except Queue.Empty: time.sleep(0.005) if one_pass: break return ret_results
def run(self): ''' Run the given playbook, based on the settings in the play which may limit the runs to serialized groups, etc. ''' result = 0 entrylist = [] entry = {} try: for playbook_path in self._playbooks: pb = Playbook.load(playbook_path, variable_manager=self._variable_manager, loader=self._loader) self._inventory.set_playbook_basedir(os.path.realpath(os.path.dirname(playbook_path))) if self._tqm is None: # we are doing a listing entry = {'playbook': playbook_path} entry['plays'] = [] else: # make sure the tqm has callbacks loaded self._tqm.load_callbacks() self._tqm.send_callback('v2_playbook_on_start', pb) i = 1 plays = pb.get_plays() display.vv(u'%d plays in %s' % (len(plays), to_unicode(playbook_path))) for play in plays: if play._included_path is not None: self._loader.set_basedir(play._included_path) else: self._loader.set_basedir(pb._basedir) # clear any filters which may have been applied to the inventory self._inventory.remove_restriction() if play.vars_prompt: for var in play.vars_prompt: vname = var['name'] prompt = var.get("prompt", vname) default = var.get("default", None) private = var.get("private", True) confirm = var.get("confirm", False) encrypt = var.get("encrypt", None) salt_size = var.get("salt_size", None) salt = var.get("salt", None) if vname not in self._variable_manager.extra_vars: if self._tqm: self._tqm.send_callback('v2_playbook_on_vars_prompt', vname, private, prompt, encrypt, confirm, salt_size, salt, default) play.vars[vname] = display.do_var_prompt(vname, private, prompt, encrypt, confirm, salt_size, salt, default) else: # we are either in --list-<option> or syntax check play.vars[vname] = default # Create a temporary copy of the play here, so we can run post_validate # on it without the templating changes affecting the original object. all_vars = self._variable_manager.get_vars(loader=self._loader, play=play) templar = Templar(loader=self._loader, variables=all_vars) new_play = play.copy() new_play.post_validate(templar) if self._options.syntax: continue if self._tqm is None: # we are just doing a listing entry['plays'].append(new_play) else: self._tqm._unreachable_hosts.update(self._unreachable_hosts) previously_failed = len(self._tqm._failed_hosts) previously_unreachable = len(self._tqm._unreachable_hosts) break_play = False # we are actually running plays for batch in self._get_serialized_batches(new_play): if len(batch) == 0: self._tqm.send_callback('v2_playbook_on_play_start', new_play) self._tqm.send_callback('v2_playbook_on_no_hosts_matched') break # restrict the inventory to the hosts in the serialized batch self._inventory.restrict_to_hosts(batch) # and run it... result = self._tqm.run(play=play) # break the play if the result equals the special return code if result & self._tqm.RUN_FAILED_BREAK_PLAY != 0: result = self._tqm.RUN_FAILED_HOSTS break_play = True # check the number of failures here, to see if they're above the maximum # failure percentage allowed, or if any errors are fatal. If either of those # conditions are met, we break out, otherwise we only break out if the entire # batch failed failed_hosts_count = len(self._tqm._failed_hosts) + len(self._tqm._unreachable_hosts) - \ (previously_failed + previously_unreachable) if len(batch) == failed_hosts_count: break_play = True break # update the previous counts so they don't accumulate incorrectly # over multiple serial batches previously_failed += len(self._tqm._failed_hosts) - previously_failed previously_unreachable += len(self._tqm._unreachable_hosts) - previously_unreachable # save the unreachable hosts from this batch self._unreachable_hosts.update(self._tqm._unreachable_hosts) if break_play: break i = i + 1 # per play if entry: entrylist.append(entry) # per playbook # send the stats callback for this playbook if self._tqm is not None: if C.RETRY_FILES_ENABLED: retries = set(self._tqm._failed_hosts.keys()) retries.update(self._tqm._unreachable_hosts.keys()) retries = sorted(retries) if len(retries) > 0: if C.RETRY_FILES_SAVE_PATH: basedir = C.shell_expand(C.RETRY_FILES_SAVE_PATH) elif playbook_path: basedir = os.path.dirname(playbook_path) else: basedir = '~/' (retry_name, _) = os.path.splitext(os.path.basename(playbook_path)) filename = os.path.join(basedir, "%s.retry" % retry_name) if self._generate_retry_inventory(filename, retries): display.display("\tto retry, use: --limit @%s\n" % filename) self._tqm.send_callback('v2_playbook_on_stats', self._tqm._stats) # if the last result wasn't zero, break out of the playbook file name loop if result != 0: break if entrylist: return entrylist finally: if self._tqm is not None: self._tqm.cleanup() if self._loader: self._loader.cleanup_all_tmp_files() if self._options.syntax: display.display("No issues encountered") return result return result
def _process_pending_results(self, iterator, one_pass=False): ''' Reads results off the final queue and takes appropriate action based on the result (executing callbacks, updating state, etc.). ''' ret_results = [] while not self._final_q.empty() and not self._tqm._terminated: try: result = self._final_q.get() display.debug("got result from result worker: %s" % ([text_type(x) for x in result],)) # helper method, used to find the original host from the one # returned in the result/message, which has been serialized and # thus had some information stripped from it to speed up the # serialization process def get_original_host(host): if host.name in self._inventory._hosts_cache: return self._inventory._hosts_cache[host.name] else: return self._inventory.get_host(host.name) # all host status messages contain 2 entries: (msg, task_result) if result[0] in ('host_task_ok', 'host_task_failed', 'host_task_skipped', 'host_unreachable'): task_result = result[1] host = get_original_host(task_result._host) task = task_result._task if result[0] == 'host_task_failed' or task_result.is_failed(): if not task.ignore_errors: display.debug("marking %s as failed" % host.name) if task.run_once: # if we're using run_once, we have to fail every host here [iterator.mark_host_failed(h) for h in self._inventory.get_hosts(iterator._play.hosts) if h.name not in self._tqm._unreachable_hosts] else: iterator.mark_host_failed(host) # only add the host to the failed list officially if it has # been failed by the iterator if iterator.is_failed(host): self._tqm._failed_hosts[host.name] = True self._tqm._stats.increment('failures', host.name) else: # otherwise, we grab the current state and if we're iterating on # the rescue portion of a block then we save the failed task in a # special var for use within the rescue/always state, _ = iterator.get_next_task_for_host(host, peek=True) if state.run_state == iterator.ITERATING_RESCUE: original_task = iterator.get_original_task(host, task) self._variable_manager.set_nonpersistent_facts( host, dict( ansible_failed_task=original_task.serialize(), ansible_failed_result=task_result._result, ), ) else: self._tqm._stats.increment('ok', host.name) self._tqm.send_callback('v2_runner_on_failed', task_result, ignore_errors=task.ignore_errors) elif result[0] == 'host_unreachable': self._tqm._unreachable_hosts[host.name] = True self._tqm._stats.increment('dark', host.name) self._tqm.send_callback('v2_runner_on_unreachable', task_result) elif result[0] == 'host_task_skipped': self._tqm._stats.increment('skipped', host.name) self._tqm.send_callback('v2_runner_on_skipped', task_result) elif result[0] == 'host_task_ok': if task.action != 'include': self._tqm._stats.increment('ok', host.name) if 'changed' in task_result._result and task_result._result['changed']: self._tqm._stats.increment('changed', host.name) self._tqm.send_callback('v2_runner_on_ok', task_result) if self._diff: self._tqm.send_callback('v2_on_file_diff', task_result) self._pending_results -= 1 if host.name in self._blocked_hosts: del self._blocked_hosts[host.name] # If this is a role task, mark the parent role as being run (if # the task was ok or failed, but not skipped or unreachable) if task_result._task._role is not None and result[0] in ('host_task_ok', 'host_task_failed'): # lookup the role in the ROLE_CACHE to make sure we're dealing # with the correct object and mark it as executed for (entry, role_obj) in iteritems(iterator._play.ROLE_CACHE[task_result._task._role._role_name]): if role_obj._uuid == task_result._task._role._uuid: role_obj._had_task_run[host.name] = True ret_results.append(task_result) elif result[0] == 'add_host': result_item = result[1] new_host_info = result_item.get('add_host', dict()) self._add_host(new_host_info, iterator) elif result[0] == 'add_group': host = get_original_host(result[1]) result_item = result[2] self._add_group(host, result_item) elif result[0] == 'notify_handler': task_result = result[1] handler_name = result[2] original_host = get_original_host(task_result._host) original_task = iterator.get_original_task(original_host, task_result._task) def search_handler_blocks(handler_name, handler_blocks): for handler_block in handler_blocks: for handler_task in handler_block.block: handler_vars = self._variable_manager.get_vars(loader=self._loader, play=iterator._play, task=handler_task) templar = Templar(loader=self._loader, variables=handler_vars) try: # first we check with the full result of get_name(), which may # include the role name (if the handler is from a role). If that # is not found, we resort to the simple name field, which doesn't # have anything extra added to it. target_handler_name = templar.template(handler_task.name) if target_handler_name == handler_name: return handler_task else: target_handler_name = templar.template(handler_task.get_name()) if target_handler_name == handler_name: return handler_task except (UndefinedError, AnsibleUndefinedVariable): # We skip this handler due to the fact that it may be using # a variable in the name that was conditionally included via # set_fact or some other method, and we don't want to error # out unnecessarily continue return None # Find the handler using the above helper. First we look up the # dependency chain of the current task (if it's from a role), otherwise # we just look through the list of handlers in the current play/all # roles and use the first one that matches the notify name if handler_name in self._listening_handlers: for listening_handler_name in self._listening_handlers[handler_name]: listening_handler = search_handler_blocks(listening_handler_name, iterator._play.handlers) if listening_handler is None: raise AnsibleError("The requested handler listener '%s' was not found in any of the known handlers" % listening_handler_name) if original_host not in self._notified_handlers[listening_handler]: self._notified_handlers[listening_handler].append(original_host) display.vv("NOTIFIED HANDLER %s" % (listening_handler_name,)) else: target_handler = search_handler_blocks(handler_name, iterator._play.handlers) if target_handler is None: raise AnsibleError("The requested handler '%s' was not found in any of the known handlers" % handler_name) if target_handler in self._notified_handlers: if original_host not in self._notified_handlers[target_handler]: self._notified_handlers[target_handler].append(original_host) # FIXME: should this be a callback? display.vv("NOTIFIED HANDLER %s" % (handler_name,)) else: raise AnsibleError("The requested handler '%s' was found in neither the main handlers list nor the listening handlers list" % handler_name) elif result[0] == 'register_host_var': # essentially the same as 'set_host_var' below, however we # never follow the delegate_to value for registered vars and # the variable goes in the fact_cache host = get_original_host(result[1]) task = result[2] var_value = wrap_var(result[3]) var_name = task.register if task.run_once: host_list = [host for host in self._inventory.get_hosts(iterator._play.hosts) if host.name not in self._tqm._unreachable_hosts] else: host_list = [host] for target_host in host_list: self._variable_manager.set_nonpersistent_facts(target_host, {var_name: var_value}) elif result[0] in ('set_host_var', 'set_host_facts'): host = get_original_host(result[1]) task = result[2] item = result[3] # find the host we're actually refering too here, which may # be a host that is not really in inventory at all if task.delegate_to is not None and task.delegate_facts: task_vars = self._variable_manager.get_vars(loader=self._loader, play=iterator._play, host=host, task=task) self.add_tqm_variables(task_vars, play=iterator._play) loop_var = 'item' if task.loop_control: loop_var = task.loop_control.loop_var or 'item' if item is not None: task_vars[loop_var] = item templar = Templar(loader=self._loader, variables=task_vars) host_name = templar.template(task.delegate_to) actual_host = self._inventory.get_host(host_name) if actual_host is None: actual_host = Host(name=host_name) else: actual_host = host if task.run_once: host_list = [host for host in self._inventory.get_hosts(iterator._play.hosts) if host.name not in self._tqm._unreachable_hosts] else: host_list = [actual_host] if result[0] == 'set_host_var': var_name = result[4] var_value = result[5] for target_host in host_list: self._variable_manager.set_host_variable(target_host, var_name, var_value) elif result[0] == 'set_host_facts': facts = result[4] for target_host in host_list: if task.action == 'set_fact': self._variable_manager.set_nonpersistent_facts(target_host, facts.copy()) else: self._variable_manager.set_host_facts(target_host, facts.copy()) elif result[0].startswith('v2_runner_item') or result[0] == 'v2_runner_retry': self._tqm.send_callback(result[0], result[1]) elif result[0] == 'v2_on_file_diff': if self._diff: self._tqm.send_callback('v2_on_file_diff', result[1]) else: raise AnsibleError("unknown result message received: %s" % result[0]) except Queue.Empty: time.sleep(0.005) if one_pass: break return ret_results
def _process_pending_results(self, iterator, one_pass=False, max_passes=None): ''' Reads results off the final queue and takes appropriate action based on the result (executing callbacks, updating state, etc.). ''' ret_results = [] def get_original_host(host_name): host_name = to_text(host_name) if host_name in self._inventory._hosts_cache: return self._inventory._hosts_cache[host_name] else: return self._inventory.get_host(host_name) def search_handler_blocks_by_name(handler_name, handler_blocks): for handler_block in handler_blocks: for handler_task in handler_block.block: if handler_task.name: handler_vars = self._variable_manager.get_vars( loader=self._loader, play=iterator._play, task=handler_task) templar = Templar(loader=self._loader, variables=handler_vars) try: # first we check with the full result of get_name(), which may # include the role name (if the handler is from a role). If that # is not found, we resort to the simple name field, which doesn't # have anything extra added to it. target_handler_name = templar.template( handler_task.name) if target_handler_name == handler_name: return handler_task else: target_handler_name = templar.template( handler_task.get_name()) if target_handler_name == handler_name: return handler_task except (UndefinedError, AnsibleUndefinedVariable): # We skip this handler due to the fact that it may be using # a variable in the name that was conditionally included via # set_fact or some other method, and we don't want to error # out unnecessarily continue return None def search_handler_blocks_by_uuid(handler_uuid, handler_blocks): for handler_block in handler_blocks: for handler_task in handler_block.block: if handler_uuid == handler_task._uuid: return handler_task return None def parent_handler_match(target_handler, handler_name): if target_handler: if isinstance(target_handler, (TaskInclude, IncludeRole)): try: handler_vars = self._variable_manager.get_vars( loader=self._loader, play=iterator._play, task=target_handler) templar = Templar(loader=self._loader, variables=handler_vars) target_handler_name = templar.template( target_handler.name) if target_handler_name == handler_name: return True else: target_handler_name = templar.template( target_handler.get_name()) if target_handler_name == handler_name: return True except (UndefinedError, AnsibleUndefinedVariable): pass return parent_handler_match(target_handler._parent, handler_name) else: return False # a Templar class to use for templating things later, as we're using # original/non-validated objects here on the manager side. We set the # variables in use later inside the loop below templar = Templar(loader=self._loader) cur_pass = 0 while True: try: self._results_lock.acquire() task_result = self._results.pop() except IndexError: break finally: self._results_lock.release() # get the original host and task. We then assign them to the TaskResult for use in callbacks/etc. original_host = get_original_host(task_result._host) original_task = iterator.get_original_task(original_host, task_result._task) task_result._host = original_host task_result._task = original_task # get the correct loop var for use later if original_task.loop_control: loop_var = original_task.loop_control.loop_var or 'item' else: loop_var = 'item' # get the vars for this task/host pair, make them the active set of vars for our templar above task_vars = self._variable_manager.get_vars(loader=self._loader, play=iterator._play, host=original_host, task=original_task) self.add_tqm_variables(task_vars, play=iterator._play) templar.set_available_variables(task_vars) # send callbacks for 'non final' results if '_ansible_retry' in task_result._result: self._tqm.send_callback('v2_runner_retry', task_result) continue elif '_ansible_item_result' in task_result._result: if task_result.is_failed() or task_result.is_unreachable(): self._tqm.send_callback('v2_runner_item_on_failed', task_result) elif task_result.is_skipped(): self._tqm.send_callback('v2_runner_item_on_skipped', task_result) else: if 'diff' in task_result._result: if self._diff: self._tqm.send_callback('v2_on_file_diff', task_result) self._tqm.send_callback('v2_runner_item_on_ok', task_result) continue run_once = templar.template(original_task.run_once) if original_task.register: host_list = self.get_task_hosts(iterator, original_host, original_task) clean_copy = strip_internal_keys(task_result._result) if 'invocation' in clean_copy: del clean_copy['invocation'] for target_host in host_list: self._variable_manager.set_nonpersistent_facts( target_host, {original_task.register: clean_copy}) # all host status messages contain 2 entries: (msg, task_result) role_ran = False if task_result.is_failed(): role_ran = True ignore_errors = templar.template(original_task.ignore_errors) if not ignore_errors: display.debug("marking %s as failed" % original_host.name) if run_once: # if we're using run_once, we have to fail every host here for h in self._inventory.get_hosts( iterator._play.hosts): if h.name not in self._tqm._unreachable_hosts: state, _ = iterator.get_next_task_for_host( h, peek=True) iterator.mark_host_failed(h) state, new_task = iterator.get_next_task_for_host( h, peek=True) else: iterator.mark_host_failed(original_host) # increment the failed count for this host self._tqm._stats.increment('failures', original_host.name) # grab the current state and if we're iterating on the rescue portion # of a block then we save the failed task in a special var for use # within the rescue/always state, _ = iterator.get_next_task_for_host(original_host, peek=True) if iterator.is_failed( original_host ) and state and state.run_state == iterator.ITERATING_COMPLETE: self._tqm._failed_hosts[original_host.name] = True if state and state.run_state == iterator.ITERATING_RESCUE: self._variable_manager.set_nonpersistent_facts( original_host, dict( ansible_failed_task=original_task.serialize(), ansible_failed_result=task_result._result, ), ) else: self._tqm._stats.increment('ok', original_host.name) if 'changed' in task_result._result and task_result._result[ 'changed']: self._tqm._stats.increment('changed', original_host.name) self._tqm.send_callback('v2_runner_on_failed', task_result, ignore_errors=ignore_errors) elif task_result.is_unreachable(): self._tqm._unreachable_hosts[original_host.name] = True iterator._play._removed_hosts.append(original_host.name) self._tqm._stats.increment('dark', original_host.name) self._tqm.send_callback('v2_runner_on_unreachable', task_result) elif task_result.is_skipped(): self._tqm._stats.increment('skipped', original_host.name) self._tqm.send_callback('v2_runner_on_skipped', task_result) else: role_ran = True if original_task.loop: # this task had a loop, and has more than one result, so # loop over all of them instead of a single result result_items = task_result._result.get('results', []) else: result_items = [task_result._result] for result_item in result_items: if '_ansible_notify' in result_item: if task_result.is_changed(): # The shared dictionary for notified handlers is a proxy, which # does not detect when sub-objects within the proxy are modified. # So, per the docs, we reassign the list so the proxy picks up and # notifies all other threads for handler_name in result_item['_ansible_notify']: found = False # Find the handler using the above helper. First we look up the # dependency chain of the current task (if it's from a role), otherwise # we just look through the list of handlers in the current play/all # roles and use the first one that matches the notify name target_handler = search_handler_blocks_by_name( handler_name, iterator._play.handlers) if target_handler is not None: found = True if original_host not in self._notified_handlers[ target_handler._uuid]: self._notified_handlers[ target_handler._uuid].append( original_host) # FIXME: should this be a callback? display.vv("NOTIFIED HANDLER %s" % (handler_name, )) else: # As there may be more than one handler with the notified name as the # parent, so we just keep track of whether or not we found one at all for target_handler_uuid in self._notified_handlers: target_handler = search_handler_blocks_by_uuid( target_handler_uuid, iterator._play.handlers) if target_handler and parent_handler_match( target_handler, handler_name): self._notified_handlers[ target_handler._uuid].append( original_host) display.vv( "NOTIFIED HANDLER %s" % (target_handler.get_name(), )) found = True if handler_name in self._listening_handlers: for listening_handler_uuid in self._listening_handlers[ handler_name]: listening_handler = search_handler_blocks_by_uuid( listening_handler_uuid, iterator._play.handlers) if listening_handler is not None: found = True else: continue if original_host not in self._notified_handlers[ listening_handler._uuid]: self._notified_handlers[ listening_handler. _uuid].append(original_host) display.vv( "NOTIFIED HANDLER %s" % (listening_handler.get_name(), )) # and if none were found, then we raise an error if not found: msg = "The requested handler '%s' was not found in either the main handlers list nor in the listening handlers list" % handler_name if C.ERROR_ON_MISSING_HANDLER: raise AnsibleError(msg) else: display.warning(msg) if 'add_host' in result_item: # this task added a new host (add_host module) new_host_info = result_item.get('add_host', dict()) self._add_host(new_host_info, iterator) elif 'add_group' in result_item: # this task added a new group (group_by module) self._add_group(original_host, result_item) if 'ansible_facts' in result_item: # if delegated fact and we are delegating facts, we need to change target host for them if original_task.delegate_to is not None and original_task.delegate_facts: item = result_item.get(loop_var, None) if item is not None: task_vars[loop_var] = item host_name = templar.template( original_task.delegate_to) actual_host = self._inventory.get_host(host_name) if actual_host is None: actual_host = Host(name=host_name) else: actual_host = original_host host_list = self.get_task_hosts( iterator, actual_host, original_task) if original_task.action == 'include_vars': for (var_name, var_value) in iteritems( result_item['ansible_facts']): # find the host we're actually referring too here, which may # be a host that is not really in inventory at all for target_host in host_list: self._variable_manager.set_host_variable( target_host, var_name, var_value) else: for target_host in host_list: if original_task.action == 'set_fact': self._variable_manager.set_nonpersistent_facts( target_host, result_item['ansible_facts'].copy()) else: self._variable_manager.set_host_facts( target_host, result_item['ansible_facts'].copy()) if 'ansible_stats' in result_item and 'data' in result_item[ 'ansible_stats'] and result_item['ansible_stats'][ 'data']: if 'per_host' not in result_item[ 'ansible_stats'] or result_item[ 'ansible_stats']['per_host']: host_list = self.get_task_hosts( iterator, original_host, original_task) else: host_list = [None] data = result_item['ansible_stats']['data'] aggregate = 'aggregate' in result_item[ 'ansible_stats'] and result_item['ansible_stats'][ 'aggregate'] for myhost in host_list: for k in data.keys(): if aggregate: self._tqm._stats.update_custom_stats( k, data[k], myhost) else: self._tqm._stats.set_custom_stats( k, data[k], myhost) if 'diff' in task_result._result: if self._diff: self._tqm.send_callback('v2_on_file_diff', task_result) if original_task.action not in ['include', 'include_role']: self._tqm._stats.increment('ok', original_host.name) if 'changed' in task_result._result and task_result._result[ 'changed']: self._tqm._stats.increment('changed', original_host.name) # finally, send the ok for this task self._tqm.send_callback('v2_runner_on_ok', task_result) self._pending_results -= 1 if original_host.name in self._blocked_hosts: del self._blocked_hosts[original_host.name] # If this is a role task, mark the parent role as being run (if # the task was ok or failed, but not skipped or unreachable) if original_task._role is not None and role_ran: #TODO: and original_task.action != 'include_role':? # lookup the role in the ROLE_CACHE to make sure we're dealing # with the correct object and mark it as executed for (entry, role_obj) in iteritems(iterator._play.ROLE_CACHE[ original_task._role._role_name]): if role_obj._uuid == original_task._role._uuid: role_obj._had_task_run[original_host.name] = True ret_results.append(task_result) if one_pass or max_passes is not None and (cur_pass + 1) >= max_passes: break cur_pass += 1 return ret_results
def run(self): ''' Run the given playbook, based on the settings in the play which may limit the runs to serialized groups, etc. ''' result = 0 entrylist = [] entry = {} try: for playbook_path in self._playbooks: pb = Playbook.load(playbook_path, variable_manager=self._variable_manager, loader=self._loader) self._inventory.set_playbook_basedir( os.path.dirname(playbook_path)) if self._tqm is None: # we are doing a listing entry = {'playbook': playbook_path} entry['plays'] = [] else: # make sure the tqm has callbacks loaded self._tqm.load_callbacks() self._tqm.send_callback('v2_playbook_on_start', pb) i = 1 plays = pb.get_plays() display.vv(u'%d plays in %s' % (len(plays), to_unicode(playbook_path))) for play in plays: if play._included_path is not None: self._loader.set_basedir(play._included_path) else: self._loader.set_basedir(pb._basedir) # clear any filters which may have been applied to the inventory self._inventory.remove_restriction() if play.vars_prompt: for var in play.vars_prompt: vname = var['name'] prompt = var.get("prompt", vname) default = var.get("default", None) private = var.get("private", True) confirm = var.get("confirm", False) encrypt = var.get("encrypt", None) salt_size = var.get("salt_size", None) salt = var.get("salt", None) if vname not in self._variable_manager.extra_vars: if self._tqm: self._tqm.send_callback( 'v2_playbook_on_vars_prompt', vname, private, prompt, encrypt, confirm, salt_size, salt, default) play.vars[vname] = display.do_var_prompt( vname, private, prompt, encrypt, confirm, salt_size, salt, default) else: # we are either in --list-<option> or syntax check play.vars[vname] = default # Create a temporary copy of the play here, so we can run post_validate # on it without the templating changes affecting the original object. all_vars = self._variable_manager.get_vars( loader=self._loader, play=play) templar = Templar(loader=self._loader, variables=all_vars) new_play = play.copy() new_play.post_validate(templar) if self._options.syntax: continue if self._tqm is None: # we are just doing a listing entry['plays'].append(new_play) else: self._tqm._unreachable_hosts.update( self._unreachable_hosts) previously_failed = len(self._tqm._failed_hosts) previously_unreachable = len( self._tqm._unreachable_hosts) break_play = False # we are actually running plays for batch in self._get_serialized_batches(new_play): if len(batch) == 0: self._tqm.send_callback( 'v2_playbook_on_play_start', new_play) self._tqm.send_callback( 'v2_playbook_on_no_hosts_matched') break # restrict the inventory to the hosts in the serialized batch self._inventory.restrict_to_hosts(batch) # and run it... result = self._tqm.run(play=play) # break the play if the result equals the special return code if result == self._tqm.RUN_FAILED_BREAK_PLAY: result = self._tqm.RUN_FAILED_HOSTS break_play = True # check the number of failures here, to see if they're above the maximum # failure percentage allowed, or if any errors are fatal. If either of those # conditions are met, we break out, otherwise we only break out if the entire # batch failed failed_hosts_count = len(self._tqm._failed_hosts) + len(self._tqm._unreachable_hosts) - \ (previously_failed + previously_unreachable) if new_play.max_fail_percentage is not None and \ int((new_play.max_fail_percentage)/100.0 * len(batch)) > int((len(batch) - failed_hosts_count) / len(batch) * 100.0): break_play = True break elif len(batch) == failed_hosts_count: break_play = True break # save the unreachable hosts from this batch self._unreachable_hosts.update( self._tqm._unreachable_hosts) # if the last result wasn't zero or 3 (some hosts were unreachable), # break out of the serial batch loop if result not in (self._tqm.RUN_OK, self._tqm.RUN_UNREACHABLE_HOSTS): break if break_play: break i = i + 1 # per play if entry: entrylist.append(entry) # per playbook # send the stats callback for this playbook if self._tqm is not None: if C.RETRY_FILES_ENABLED: retries = set(self._tqm._failed_hosts.keys()) retries.update(self._tqm._unreachable_hosts.keys()) retries = sorted(retries) if len(retries) > 0: if C.RETRY_FILES_SAVE_PATH: basedir = C.shell_expand( C.RETRY_FILES_SAVE_PATH) elif playbook_path: basedir = os.path.dirname(playbook_path) else: basedir = '~/' (retry_name, _) = os.path.splitext( os.path.basename(playbook_path)) filename = os.path.join(basedir, "%s.retry" % retry_name) if self._generate_retry_inventory( filename, retries): display.display( "\tto retry, use: --limit @%s\n" % filename) self._tqm.send_callback('v2_playbook_on_stats', self._tqm._stats) # if the last result wasn't zero, break out of the playbook file name loop if result != 0: break if entrylist: return entrylist finally: if self._tqm is not None: self._tqm.cleanup() if self._loader: self._loader.cleanup_all_tmp_files() if self._options.syntax: display.display("No issues encountered") return result return result
def run(self): ''' Run the given playbook, based on the settings in the play which may limit the runs to serialized groups, etc. ''' #signal.signal(signal.SIGINT, self._cleanup) result = 0 entrylist = [] entry = {} try: for playbook_path in self._playbooks: pb = Playbook.load(playbook_path, variable_manager=self._variable_manager, loader=self._loader) self._inventory.set_playbook_basedir(os.path.dirname(playbook_path)) if self._tqm is None: # we are doing a listing entry = {'playbook': playbook_path} entry['plays'] = [] else: # make sure the tqm has callbacks loaded self._tqm.load_callbacks() self._tqm.send_callback('v2_playbook_on_start', pb) i = 1 plays = pb.get_plays() display.vv('%d plays in %s' % (len(plays), playbook_path)) for play in plays: if play._included_path is not None: self._loader.set_basedir(play._included_path) else: self._loader.set_basedir(pb._basedir) # clear any filters which may have been applied to the inventory self._inventory.remove_restriction() if play.vars_prompt: for var in play.vars_prompt: vname = var['name'] prompt = var.get("prompt", vname) default = var.get("default", None) private = var.get("private", True) confirm = var.get("confirm", False) encrypt = var.get("encrypt", None) salt_size = var.get("salt_size", None) salt = var.get("salt", None) if vname not in self._variable_manager.extra_vars: self._tqm.send_callback('v2_playbook_on_vars_prompt', vname, private, prompt, encrypt, confirm, salt_size, salt, default) if self._tqm: play.vars[vname] = display.do_var_prompt(vname, private, prompt, encrypt, confirm, salt_size, salt, default) else: # we are either in --list-<option> or syntax check play.vars[vname] = default # Create a temporary copy of the play here, so we can run post_validate # on it without the templating changes affecting the original object. all_vars = self._variable_manager.get_vars(loader=self._loader, play=play) templar = Templar(loader=self._loader, variables=all_vars) new_play = play.copy() new_play.post_validate(templar) if self._options.syntax: continue if self._tqm is None: # we are just doing a listing entry['plays'].append(new_play) else: self._tqm._unreachable_hosts.update(self._unreachable_hosts) # we are actually running plays for batch in self._get_serialized_batches(new_play): if len(batch) == 0: self._tqm.send_callback('v2_playbook_on_play_start', new_play) self._tqm.send_callback('v2_playbook_on_no_hosts_matched') break # restrict the inventory to the hosts in the serialized batch self._inventory.restrict_to_hosts(batch) # and run it... result = self._tqm.run(play=play) # check the number of failures here, to see if they're above the maximum # failure percentage allowed, or if any errors are fatal. If either of those # conditions are met, we break out, otherwise we only break out if the entire # batch failed failed_hosts_count = len(self._tqm._failed_hosts) + len(self._tqm._unreachable_hosts) if new_play.any_errors_fatal and failed_hosts_count > 0: break elif new_play.max_fail_percentage is not None and \ int((new_play.max_fail_percentage)/100.0 * len(batch)) > int((len(batch) - failed_hosts_count) / len(batch) * 100.0): break elif len(batch) == failed_hosts_count: break # clear the failed hosts dictionaires in the TQM for the next batch self._unreachable_hosts.update(self._tqm._unreachable_hosts) self._tqm.clear_failed_hosts() # if the last result wasn't zero or 3 (some hosts were unreachable), # break out of the serial batch loop if result not in (0, 3): break i = i + 1 # per play if entry: entrylist.append(entry) # per playbook # send the stats callback for this playbook if self._tqm is not None: self._tqm.send_callback('v2_playbook_on_stats', self._tqm._stats) # if the last result wasn't zero, break out of the playbook file name loop if result != 0: break if entrylist: return entrylist finally: if self._tqm is not None: self._cleanup() if self._options.syntax: display.display("No issues encountered") return result return result
def _process_pending_results(self, iterator, one_pass=False): ''' Reads results off the final queue and takes appropriate action based on the result (executing callbacks, updating state, etc.). ''' ret_results = [] def get_original_host(host_name): host_name = to_unicode(host_name) if host_name in self._inventory._hosts_cache: return self._inventory._hosts_cache[host_name] else: return self._inventory.get_host(host_name) def search_handler_blocks(handler_name, handler_blocks): for handler_block in handler_blocks: for handler_task in handler_block.block: handler_vars = self._variable_manager.get_vars( loader=self._loader, play=iterator._play, task=handler_task) templar = Templar(loader=self._loader, variables=handler_vars) try: # first we check with the full result of get_name(), which may # include the role name (if the handler is from a role). If that # is not found, we resort to the simple name field, which doesn't # have anything extra added to it. target_handler_name = templar.template( handler_task.name) if target_handler_name == handler_name: return handler_task else: target_handler_name = templar.template( handler_task.get_name()) if target_handler_name == handler_name: return handler_task except (UndefinedError, AnsibleUndefinedVariable) as e: # We skip this handler due to the fact that it may be using # a variable in the name that was conditionally included via # set_fact or some other method, and we don't want to error # out unnecessarily continue return None passes = 0 while not self._tqm._terminated: try: task_result = self._final_q.get(timeout=0.001) original_host = get_original_host(task_result._host) original_task = iterator.get_original_task( original_host, task_result._task) task_result._host = original_host task_result._task = original_task # send callbacks for 'non final' results if '_ansible_retry' in task_result._result: self._tqm.send_callback('v2_runner_retry', task_result) continue elif '_ansible_item_result' in task_result._result: if task_result.is_failed() or task_result.is_unreachable(): self._tqm.send_callback('v2_runner_item_on_failed', task_result) elif task_result.is_skipped(): self._tqm.send_callback('v2_runner_item_on_skipped', task_result) else: self._tqm.send_callback('v2_runner_item_on_ok', task_result) continue if original_task.register: if original_task.run_once: host_list = [ host for host in self._inventory.get_hosts( iterator._play.hosts) if host.name not in self._tqm._unreachable_hosts ] else: host_list = [original_host] clean_copy = strip_internal_keys(task_result._result) if 'invocation' in clean_copy: del clean_copy['invocation'] for target_host in host_list: self._variable_manager.set_nonpersistent_facts( target_host, {original_task.register: clean_copy}) # all host status messages contain 2 entries: (msg, task_result) role_ran = False if task_result.is_failed(): role_ran = True if not original_task.ignore_errors: display.debug("marking %s as failed" % original_host.name) if original_task.run_once: # if we're using run_once, we have to fail every host here [ iterator.mark_host_failed(h) for h in self._inventory.get_hosts(iterator._play.hosts) if h.name not in self._tqm._unreachable_hosts ] else: iterator.mark_host_failed(original_host) # only add the host to the failed list officially if it has # been failed by the iterator if iterator.is_failed(original_host): self._tqm._failed_hosts[original_host.name] = True self._tqm._stats.increment('failures', original_host.name) else: # otherwise, we grab the current state and if we're iterating on # the rescue portion of a block then we save the failed task in a # special var for use within the rescue/always state, _ = iterator.get_next_task_for_host( original_host, peek=True) if state.run_state == iterator.ITERATING_RESCUE: self._variable_manager.set_nonpersistent_facts( original_host, dict( ansible_failed_task=original_task. serialize(), ansible_failed_result=task_result. _result, ), ) else: self._tqm._stats.increment('ok', original_host.name) self._tqm.send_callback( 'v2_runner_on_failed', task_result, ignore_errors=original_task.ignore_errors) elif task_result.is_unreachable(): self._tqm._unreachable_hosts[original_host.name] = True self._tqm._stats.increment('dark', original_host.name) self._tqm.send_callback('v2_runner_on_unreachable', task_result) elif task_result.is_skipped(): self._tqm._stats.increment('skipped', original_host.name) self._tqm.send_callback('v2_runner_on_skipped', task_result) else: role_ran = True if original_task.loop: # this task had a loop, and has more than one result, so # loop over all of them instead of a single result result_items = task_result._result.get('results', []) else: result_items = [task_result._result] for result_item in result_items: if '_ansible_notify' in result_item: if task_result.is_changed(): # The shared dictionary for notified handlers is a proxy, which # does not detect when sub-objects within the proxy are modified. # So, per the docs, we reassign the list so the proxy picks up and # notifies all other threads for handler_name in result_item[ '_ansible_notify']: # Find the handler using the above helper. First we look up the # dependency chain of the current task (if it's from a role), otherwise # we just look through the list of handlers in the current play/all # roles and use the first one that matches the notify name if handler_name in self._listening_handlers: for listening_handler_name in self._listening_handlers[ handler_name]: listening_handler = search_handler_blocks( listening_handler_name, iterator._play.handlers) if listening_handler is None: raise AnsibleError( "The requested handler listener '%s' was not found in any of the known handlers" % listening_handler_name) if original_host not in self._notified_handlers[ listening_handler]: self._notified_handlers[ listening_handler].append( original_host) display.vv( "NOTIFIED HANDLER %s" % (listening_handler_name, )) else: target_handler = search_handler_blocks( handler_name, iterator._play.handlers) if target_handler is None: raise AnsibleError( "The requested handler '%s' was not found in any of the known handlers" % handler_name) if target_handler in self._notified_handlers: if original_host not in self._notified_handlers[ target_handler]: self._notified_handlers[ target_handler].append( original_host) # FIXME: should this be a callback? display.vv( "NOTIFIED HANDLER %s" % (handler_name, )) else: raise AnsibleError( "The requested handler '%s' was found in neither the main handlers list nor the listening handlers list" % handler_name) if 'add_host' in result_item: # this task added a new host (add_host module) new_host_info = result_item.get('add_host', dict()) self._add_host(new_host_info, iterator) elif 'add_group' in result_item: # this task added a new group (group_by module) self._add_group(original_host, result_item) elif 'ansible_facts' in result_item: loop_var = 'item' if original_task.loop_control: loop_var = original_task.loop_control.loop_var or 'item' item = result_item.get(loop_var, None) if original_task.action == 'include_vars': for (var_name, var_value) in iteritems( result_item['ansible_facts']): # find the host we're actually refering too here, which may # be a host that is not really in inventory at all if original_task.delegate_to is not None and original_task.delegate_facts: task_vars = self._variable_manager.get_vars( loader=self._loader, play=iterator._play, host=host, task=task) self.add_tqm_variables( task_vars, play=iterator._play) if item is not None: task_vars[loop_var] = item templar = Templar(loader=self._loader, variables=task_vars) host_name = templar.template( original_task.delegate_to) actual_host = self._inventory.get_host( host_name) if actual_host is None: actual_host = Host(name=host_name) else: actual_host = original_host if original_task.run_once: host_list = [ host for host in self._inventory. get_hosts(iterator._play.hosts) if host.name not in self._tqm._unreachable_hosts ] else: host_list = [actual_host] for target_host in host_list: self._variable_manager.set_host_variable( target_host, var_name, var_value) else: if original_task.run_once: host_list = [ host for host in self._inventory.get_hosts( iterator._play.hosts) if host.name not in self._tqm._unreachable_hosts ] else: host_list = [original_host] for target_host in host_list: if original_task.action == 'set_fact': self._variable_manager.set_nonpersistent_facts( target_host, result_item['ansible_facts'].copy( )) else: self._variable_manager.set_host_facts( target_host, result_item['ansible_facts'].copy( )) if 'diff' in task_result._result: if self._diff: self._tqm.send_callback('v2_on_file_diff', task_result) if original_task.action != 'include': self._tqm._stats.increment('ok', original_host.name) if 'changed' in task_result._result and task_result._result[ 'changed']: self._tqm._stats.increment('changed', original_host.name) # finally, send the ok for this task self._tqm.send_callback('v2_runner_on_ok', task_result) self._pending_results -= 1 if original_host.name in self._blocked_hosts: del self._blocked_hosts[original_host.name] # If this is a role task, mark the parent role as being run (if # the task was ok or failed, but not skipped or unreachable) if original_task._role is not None and role_ran: # lookup the role in the ROLE_CACHE to make sure we're dealing # with the correct object and mark it as executed for (entry, role_obj) in iteritems(iterator._play.ROLE_CACHE[ original_task._role._role_name]): if role_obj._uuid == original_task._role._uuid: role_obj._had_task_run[original_host.name] = True ret_results.append(task_result) except Queue.Empty: passes += 1 if passes > 2: break if one_pass: break return ret_results
def load_list_of_tasks(ds, play, block=None, role=None, task_include=None, use_handlers=False, variable_manager=None, loader=None): ''' Given a list of task datastructures (parsed from YAML), return a list of Task() or TaskInclude() objects. ''' # we import here to prevent a circular dependency with imports from ansible.playbook.block import Block from ansible.playbook.handler import Handler from ansible.playbook.task import Task from ansible.playbook.task_include import TaskInclude from ansible.playbook.role_include import IncludeRole from ansible.playbook.handler_task_include import HandlerTaskInclude from ansible.template import Templar if not isinstance(ds, list): raise AnsibleAssertionError( 'The ds (%s) should be a list but was a %s' % (ds, type(ds))) task_list = [] for task_ds in ds: if not isinstance(task_ds, dict): AnsibleAssertionError('The ds (%s) should be a dict but was a %s' % (ds, type(ds))) if 'block' in task_ds: t = Block.load( task_ds, play=play, parent_block=block, role=role, task_include=task_include, use_handlers=use_handlers, variable_manager=variable_manager, loader=loader, ) task_list.append(t) else: if 'include' in task_ds or 'import_tasks' in task_ds or 'include_tasks' in task_ds: if 'include' in task_ds: display.deprecated( "The use of 'include' for tasks has been deprecated. " "Use 'import_tasks' for static inclusions or 'include_tasks' for dynamic inclusions" ) if use_handlers: include_class = HandlerTaskInclude else: include_class = TaskInclude t = include_class.load(task_ds, block=block, role=role, task_include=None, variable_manager=variable_manager, loader=loader) all_vars = variable_manager.get_vars(play=play, task=t) templar = Templar(loader=loader, variables=all_vars) # check to see if this include is dynamic or static: # 1. the user has set the 'static' option to false or true # 2. one of the appropriate config options was set if 'include_tasks' in task_ds: is_static = False elif 'import_tasks' in task_ds: is_static = True elif t.static is not None: display.deprecated( "The use of 'static' has been deprecated. " "Use 'import_tasks' for static inclusion, or 'include_tasks' for dynamic inclusion" ) is_static = t.static else: is_static = C.DEFAULT_TASK_INCLUDES_STATIC or \ (use_handlers and C.DEFAULT_HANDLER_INCLUDES_STATIC) or \ (not templar._contains_vars(t.args['_raw_params']) and t.all_parents_static() and not t.loop) if is_static: if t.loop is not None: if 'import_tasks' in task_ds: raise AnsibleParserError( "You cannot use loops on 'import_tasks' statements. You should use 'include_tasks' instead.", obj=task_ds) else: raise AnsibleParserError( "You cannot use 'static' on an include with a loop", obj=task_ds) # we set a flag to indicate this include was static t.statically_loaded = True # handle relative includes by walking up the list of parent include # tasks and checking the relative result to see if it exists parent_include = block cumulative_path = None found = False subdir = 'tasks' if use_handlers: subdir = 'handlers' while parent_include is not None: if not isinstance(parent_include, TaskInclude): parent_include = parent_include._parent continue parent_include_dir = os.path.dirname( templar.template( parent_include.args.get('_raw_params'))) if cumulative_path is None: cumulative_path = parent_include_dir elif not os.path.isabs(cumulative_path): cumulative_path = os.path.join( parent_include_dir, cumulative_path) include_target = templar.template( t.args['_raw_params']) if t._role: new_basedir = os.path.join(t._role._role_path, subdir, cumulative_path) include_file = loader.path_dwim_relative( new_basedir, subdir, include_target) else: include_file = loader.path_dwim_relative( loader.get_basedir(), cumulative_path, include_target) if os.path.exists(include_file): found = True break else: parent_include = parent_include._parent if not found: try: include_target = templar.template( t.args['_raw_params']) except AnsibleUndefinedVariable as e: raise AnsibleParserError( "Error when evaluating variable in include name: %s.\n\n" "When using static includes, ensure that any variables used in their names are defined in vars/vars_files\n" "or extra-vars passed in from the command line. Static includes cannot use variables from inventory\n" "sources like group or host vars." % t.args['_raw_params'], obj=task_ds, suppress_extended_error=True, orig_exc=e) if t._role: include_file = loader.path_dwim_relative( t._role._role_path, subdir, include_target) else: include_file = loader.path_dwim(include_target) try: data = loader.load_from_file(include_file) if data is None: display.warning( 'file %s is empty and had no tasks to include' % include_file) continue elif not isinstance(data, list): raise AnsibleParserError( "included task files must contain a list of tasks", obj=data) # since we can't send callbacks here, we display a message directly in # the same fashion used by the on_include callback. We also do it here, # because the recursive nature of helper methods means we may be loading # nested includes, and we want the include order printed correctly display.vv("statically imported: %s" % include_file) except AnsibleFileNotFound: if t.static or \ C.DEFAULT_TASK_INCLUDES_STATIC or \ C.DEFAULT_HANDLER_INCLUDES_STATIC and use_handlers: raise display.deprecated( "Included file '%s' not found, however since this include is not " "explicitly marked as 'static: yes', we will try and include it dynamically " "later. In the future, this will be an error unless 'static: no' is used " "on the include task. If you do not want missing includes to be considered " "dynamic, use 'static: yes' on the include or set the global ansible.cfg " "options to make all includes static for tasks and/or handlers" % include_file, version="2.7") task_list.append(t) continue ti_copy = t.copy(exclude_parent=True) ti_copy._parent = block included_blocks = load_list_of_blocks( data, play=play, parent_block=None, task_include=ti_copy, role=role, use_handlers=use_handlers, loader=loader, variable_manager=variable_manager, ) # pop tags out of the include args, if they were specified there, and assign # them to the include. If the include already had tags specified, we raise an # error so that users know not to specify them both ways tags = ti_copy.vars.pop('tags', []) if isinstance(tags, string_types): tags = tags.split(',') if len(tags) > 0: if len(ti_copy.tags) > 0: raise AnsibleParserError( "Include tasks should not specify tags in more than one way (both via args and directly on the task). " "Mixing styles in which tags are specified is prohibited for whole import hierarchy, not only for single import statement", obj=task_ds, suppress_extended_error=True, ) display.deprecated( "You should not specify tags in the include parameters. All tags should be specified using the task-level option", version="2.7") else: tags = ti_copy.tags[:] # now we extend the tags on each of the included blocks for b in included_blocks: b.tags = list(set(b.tags).union(tags)) # END FIXME # FIXME: handlers shouldn't need this special handling, but do # right now because they don't iterate blocks correctly if use_handlers: for b in included_blocks: task_list.extend(b.block) else: task_list.extend(included_blocks) else: t.is_static = False task_list.append(t) elif 'include_role' in task_ds or 'import_role' in task_ds: ir = IncludeRole.load( task_ds, block=block, role=role, task_include=None, variable_manager=variable_manager, loader=loader, ) # 1. the user has set the 'static' option to false or true # 2. one of the appropriate config options was set is_static = False if 'import_role' in task_ds: is_static = True elif ir.static is not None: display.deprecated( "The use of 'static' for 'include_role' has been deprecated. " "Use 'import_role' for static inclusion, or 'include_role' for dynamic inclusion" ) is_static = ir.static if is_static: if ir.loop is not None: if 'import_tasks' in task_ds: raise AnsibleParserError( "You cannot use loops on 'import_role' statements. You should use 'include_role' instead.", obj=task_ds) else: raise AnsibleParserError( "You cannot use 'static' on an include_role with a loop", obj=task_ds) # we set a flag to indicate this include was static ir.statically_loaded = True # template the role name now, if needed all_vars = variable_manager.get_vars(play=play, task=ir) templar = Templar(loader=loader, variables=all_vars) if templar._contains_vars(ir._role_name): ir._role_name = templar.template(ir._role_name) # uses compiled list from object blocks, _ = ir.get_block_list( variable_manager=variable_manager, loader=loader) t = task_list.extend(blocks) else: # passes task object itself for latter generation of list t = task_list.append(ir) else: if use_handlers: t = Handler.load(task_ds, block=block, role=role, task_include=task_include, variable_manager=variable_manager, loader=loader) else: t = Task.load(task_ds, block=block, role=role, task_include=task_include, variable_manager=variable_manager, loader=loader) task_list.append(t) return task_list
def wrapped(self, *args, **kwargs): import re remaining_tries = int(C.ANSIBLE_SSH_RETRIES) + 1 cmd_summary = "%s..." % args[0] for attempt in range(remaining_tries): cmd = args[0] if attempt != 0 and self._play_context.password and isinstance( cmd, list): # If this is a retry, the fd/pipe for sshpass is closed, and we need a new one self.sshpass_pipe = os.pipe() cmd[1] = b'-d' + to_bytes(self.sshpass_pipe[0], nonstring='simplerepr', errors='surrogate_or_strict') try: try: ################################## # override to remove/intersept 'Done' of citrix adc return_tuple = func(self, *args, **kwargs) return_tuple = list(return_tuple) subst = "" regex = r"(\r\n|\r|\n|)( Done)(\r\n|\r|\n)+" return_tuple[1] = re.sub(regex, subst, codecs.decode(return_tuple[1]), 0, re.UNICODE) return_tuple = tuple(return_tuple) # end ############################### display.vvv(return_tuple, host=self.host) # 0 = success # 1-254 = remote command return code # 255 = failure from the ssh command itself # except (AnsibleControlPersistBrokenPipeError) as e: # Retry one more time because of the ControlPersist broken pipe (see #16731) display.vvv( u"RETRYING BECAUSE OF CONTROLPERSIST BROKEN PIPE") return_tuple = func(self, *args, **kwargs) if return_tuple[0] != 255: break else: raise AnsibleConnectionFailure( "Failed to connect to the host via ssh: %s" % to_native(return_tuple[2])) except (AnsibleConnectionFailure, Exception) as e: if attempt == remaining_tries - 1: raise else: pause = 2**attempt - 1 if pause > 30: pause = 30 if isinstance(e, AnsibleConnectionFailure): msg = "ssh_retry: attempt: %d, ssh return code is 255. cmd (%s), pausing for %d seconds" % ( attempt, cmd_summary, pause) else: msg = "ssh_retry: attempt: %d, caught exception(%s) from cmd (%s), pausing for %d seconds" % ( attempt, e, cmd_summary, pause) display.vv(msg, host=self.host) time.sleep(pause) continue return return_tuple
def dbg(msg): if show_debug_messages: display.vv(msg)
def _execute_meta(self, task, play_context, iterator, target_host): # meta tasks store their args in the _raw_params field of args, # since they do not use k=v pairs, so get that meta_action = task.args.get('_raw_params') # FIXME(s): # * raise an error or show a warning when a conditional is used # on a meta task that doesn't support them def _evaluate_conditional(h): all_vars = self._variable_manager.get_vars(play=iterator._play, host=h, task=task) templar = Templar(loader=self._loader, variables=all_vars) return task.evaluate_conditional(templar, all_vars) skipped = False msg = '' if meta_action == 'noop': # FIXME: issue a callback for the noop here? msg = "noop" elif meta_action == 'flush_handlers': self.run_handlers(iterator, play_context) msg = "ran handlers" elif meta_action == 'refresh_inventory': self._inventory.refresh_inventory() msg = "inventory successfully refreshed" elif meta_action == 'clear_facts': if _evaluate_conditional(target_host): for host in self._inventory.get_hosts(iterator._play.hosts): hostname = host.get_name() self._variable_manager.clear_facts(hostname) msg = "facts cleared" else: skipped = True elif meta_action == 'clear_host_errors': if _evaluate_conditional(target_host): for host in self._inventory.get_hosts(iterator._play.hosts): self._tqm._failed_hosts.pop(host.name, False) self._tqm._unreachable_hosts.pop(host.name, False) iterator._host_states[ host.name].fail_state = iterator.FAILED_NONE msg = "cleared host errors" else: skipped = True elif meta_action == 'end_play': if _evaluate_conditional(target_host): for host in self._inventory.get_hosts(iterator._play.hosts): if host.name not in self._tqm._unreachable_hosts: iterator._host_states[ host.name].run_state = iterator.ITERATING_COMPLETE msg = "ending play" elif meta_action == 'reset_connection': if target_host in self._active_connections: connection = Connection(self._active_connections[target_host]) del self._active_connections[target_host] else: connection = connection_loader.get(play_context.connection, play_context, os.devnull) play_context.set_options_from_plugin(connection) if connection: connection.reset() msg = 'reset connection' else: msg = 'no connection, nothing to reset' else: raise AnsibleError("invalid meta action requested: %s" % meta_action, obj=task._ds) result = {'msg': msg} if skipped: result['skipped'] = True else: result['changed'] = False display.vv("META: %s" % msg) return [TaskResult(target_host, task, result)]
def add_fields(self, text, fields, limit, opt_indent): for o in sorted(fields): opt = fields[o] required = opt.pop('required', False) if not isinstance(required, bool): raise AnsibleError("Incorrect value for 'Required', a boolean is needed.: %s" % required) if required: opt_leadin = "=" else: opt_leadin = "-" text.append("%s %s" % (opt_leadin, o)) if isinstance(opt['description'], list): for entry in opt['description']: text.append(textwrap.fill(CLI.tty_ify(entry), limit, initial_indent=opt_indent, subsequent_indent=opt_indent)) else: text.append(textwrap.fill(CLI.tty_ify(opt['description']), limit, initial_indent=opt_indent, subsequent_indent=opt_indent)) del opt['description'] aliases = '' if 'aliases' in opt: choices = "(Aliases: " + ", ".join(str(i) for i in opt['aliases']) + ")" del opt['aliases'] choices = '' if 'choices' in opt: choices = "(Choices: " + ", ".join(str(i) for i in opt['choices']) + ")" del opt['choices'] default = '' if 'default' in opt or not required: default = "[Default: " + str(opt.pop('default', '(null)')) + "]" text.append(textwrap.fill(CLI.tty_ify(aliases + choices + default), limit, initial_indent=opt_indent, subsequent_indent=opt_indent)) if 'options' in opt: text.append(opt_indent + "options:\n") self.add_fields(text, opt['options'], limit, opt_indent + opt_indent) text.append('') del opt['options'] if 'spec' in opt: text.append(opt_indent + "spec:\n") self.add_fields(text, opt['spec'], limit, opt_indent + opt_indent) text.append('') del opt['spec'] for conf in ('config', 'env_vars', 'host_vars'): if conf in opt: text.append(textwrap.fill(CLI.tty_ify("%s: " % conf), limit, initial_indent=opt_indent, subsequent_indent=opt_indent)) for entry in opt[conf]: if isinstance(entry, dict): pre = " -" for key in entry: text.append(textwrap.fill(CLI.tty_ify("%s %s: %s" % (pre, key, entry[key])), limit, initial_indent=opt_indent, subsequent_indent=opt_indent)) pre = " " else: text.append(textwrap.fill(CLI.tty_ify(" - %s" % entry), limit, initial_indent=opt_indent, subsequent_indent=opt_indent)) del opt[conf] # unspecified keys for k in opt: if k.startswith('_'): continue if isinstance(opt[k], string_types): text.append(textwrap.fill(CLI.tty_ify("%s: %s" % (k, opt[k])), limit, initial_indent=opt_indent, subsequent_indent=opt_indent)) elif isinstance(opt[k], (list, dict)): text.append(textwrap.fill(CLI.tty_ify("%s: %s" % (k, yaml.dump(opt[k], Dumper=AnsibleDumper, default_flow_style=False))), limit, initial_indent=opt_indent, subsequent_indent=opt_indent)) else: display.vv("Skipping %s key cuase we don't know how to handle eet" % k)