def deserialize(self, data): # import here to prevent circular importing issues from ansible.playbook.block import Block from ansible.playbook.role import Role block_data = data.get('block') if block_data: b = Block() b.deserialize(block_data) self._block = b del data['block'] role_data = data.get('role') if role_data: r = Role() r.deserialize(role_data) self._role = r del data['role'] ti_data = data.get('task_include') if ti_data: ti = TaskInclude() ti.deserialize(ti_data) self._task_include = ti del data['task_include'] super(TaskInclude, self).deserialize(data)
def _create_noop_block_from(self, original_block, parent): noop_block = Block(parent_block=parent) noop_block.block = self._replace_with_noop(original_block.block) noop_block.always = self._replace_with_noop(original_block.always) noop_block.rescue = self._replace_with_noop(original_block.rescue) return noop_block
def _load_list_of_blocks(self, ds): assert type(ds) == list block_list = [] for block in ds: b = Block(block) block_list.append(b) return block_list
def deserialize(self, data): # import is here to avoid import loops from ansible.playbook.task_include import TaskInclude from ansible.playbook.handler_task_include import HandlerTaskInclude parent_data = data.get('parent', None) if parent_data: parent_type = data.get('parent_type') if parent_type == 'Block': p = Block() elif parent_type == 'TaskInclude': p = TaskInclude() elif parent_type == 'HandlerTaskInclude': p = HandlerTaskInclude() p.deserialize(parent_data) self._parent = p del data['parent'] role_data = data.get('role') if role_data: r = Role() r.deserialize(role_data) self._role = r del data['role'] self._ansible_internal_redirect_list = data.get( '_ansible_internal_redirect_list', []) self.implicit = data.get('implicit', False) super(Task, self).deserialize(data)
def deserialize(self, data): # import is here to avoid import loops #from ansible.playbook.task_include import TaskInclude block_data = data.get('block') if block_data: b = Block() b.deserialize(block_data) self._block = b del data['block'] role_data = data.get('role') if role_data: r = Role() r.deserialize(role_data) self._role = r del data['role'] ti_data = data.get('task_include') if ti_data: #ti = TaskInclude() ti = Task() ti.deserialize(ti_data) self._task_include = ti del data['task_include'] super(Task, self).deserialize(data)
def __init__(self, inventory, play, play_context, variable_manager, all_vars, start_at_done=False): self._play = play self._blocks = [] setup_block = Block(play=self._play) setup_task = Task(block=setup_block) setup_task.action = 'setup' setup_task.tags = ['always'] setup_task.args = {} setup_task.set_loader(self._play._loader) setup_block.block = [setup_task] setup_block = setup_block.filter_tagged_tasks(play_context, all_vars) self._blocks.append(setup_block) for block in self._play.compile(): new_block = block.filter_tagged_tasks(play_context, all_vars) if new_block.has_tasks(): self._blocks.append(new_block) self._host_states = {} start_at_matched = False for host in inventory.get_hosts(self._play.hosts): self._host_states[host.name] = HostState(blocks=self._blocks) # if the host's name is in the variable manager's fact cache, then set # its _gathered_facts flag to true for smart gathering tests later if host.name in variable_manager._fact_cache: host._gathered_facts = True # if we're looking to start at a specific task, iterate through # the tasks for this host until we find the specified task if play_context.start_at_task is not None and not start_at_done: while True: (s, task) = self.get_next_task_for_host(host, peek=True) if s.run_state == self.ITERATING_COMPLETE: break if task.name == play_context.start_at_task or fnmatch.fnmatch(task.name, play_context.start_at_task) or \ task.get_name() == play_context.start_at_task or fnmatch.fnmatch(task.get_name(), play_context.start_at_task): start_at_matched = True break else: self.get_next_task_for_host(host) # finally, reset the host's state to ITERATING_SETUP self._host_states[host.name].run_state = self.ITERATING_SETUP if start_at_matched: # we have our match, so clear the start_at_task field on the # play context to flag that we've started at a task (and future # plays won't try to advance) play_context.start_at_task = None # Extend the play handlers list to include the handlers defined in roles self._play.handlers.extend(play.compile_roles_handlers())
def _create_noop_block_from(self, original_block, parent): """Create a noop block from a block""" self._debug('_create_noop_block_from...') noop_block = Block(parent_block=parent) noop_block.block = self._replace_with_noop(original_block.block) noop_block.always = self._replace_with_noop(original_block.always) noop_block.rescue = self._replace_with_noop(original_block.rescue) return noop_block
def test_block__load_list_of_tasks(self): task = dict(action='test') b = Block() self.assertEqual(b._load_list_of_tasks([]), []) res = b._load_list_of_tasks([task]) self.assertEqual(len(res), 1) assert isinstance(res[0], Task) res = b._load_list_of_tasks([task,task,task]) self.assertEqual(len(res), 3)
def compile(self, play, dep_chain=None): ''' Returns the task list for this role, which is created by first recursively compiling the tasks for all direct dependencies, and then adding on the tasks for this role. The role compile() also remembers and saves the dependency chain with each task, so tasks know by which route they were found, and can correctly take their parent's tags/conditionals into account. ''' from ansible.playbook.block import Block from ansible.playbook.task import Task block_list = [] # update the dependency chain here if dep_chain is None: dep_chain = [] new_dep_chain = dep_chain + [self] deps = self.get_direct_dependencies() for dep in deps: dep_blocks = dep.compile(play=play, dep_chain=new_dep_chain) block_list.extend(dep_blocks) for task_block in self._task_blocks: new_task_block = task_block.copy() new_task_block._dep_chain = new_dep_chain new_task_block._play = play block_list.append(new_task_block) eor_block = Block(play=play) eor_block._loader = self._loader eor_block._role = self eor_block._variable_manager = self._variable_manager eor_block.run_once = False eor_task = Task(block=eor_block) eor_task._role = self eor_task.action = 'meta' eor_task.args = {'_raw_params': 'role_complete'} eor_task.implicit = True eor_task.tags = ['always'] eor_task.when = True eor_block.block = [eor_task] block_list.append(eor_block) return block_list
def create_action_module(name, args=None, task_vars=None): play = Play.load(dict()) play_context = PlayContext(play=play) module = LintActionModule(task=Task.load(data=dict(local_action=name, args=args), block=Block(play=play)), connection=Connection(play_context, new_stdin=False), play_context=play_context, loader=play._loader, templar=Templar(play._loader), shared_loader_obj=None) module.use_display(NullDisplay()) return module
def deserialize(self, data): block_data = data.get('block') self._dep_chain = data.get('dep_chain', []) if block_data: b = Block() b.deserialize(block_data) self._block = b del data['block'] role_data = data.get('role') if role_data: r = Role() r.deserialize(role_data) self._role = r del data['role'] super(Task, self).deserialize(data)
def __init__(self, inventory, play, play_context, variable_manager, all_vars, start_at_done=False): self._play = play self._blocks = [] self._task_uuid_cache = dict() # Default options to gather gather_subset = play_context.gather_subset gather_timeout = play_context.gather_timeout fact_path = play_context.fact_path # Retrieve subset to gather if self._play.gather_subset is not None: gather_subset = self._play.gather_subset # Retrieve timeout for gather if self._play.gather_timeout is not None: gather_timeout = self._play.gather_timeout # Retrieve fact_path if self._play.fact_path is not None: fact_path = self._play.fact_path setup_block = Block(play=self._play) setup_task = Task(block=setup_block) setup_task.action = 'setup' setup_task.name = 'Gathering Facts' setup_task.tags = ['always'] setup_task.args = { 'gather_subset': gather_subset, } if gather_timeout: setup_task.args['gather_timeout'] = gather_timeout if fact_path: setup_task.args['fact_path'] = fact_path setup_task.set_loader(self._play._loader) # short circuit fact gathering if the entire playbook is conditional if self._play._included_conditional is not None: setup_task.when = self._play._included_conditional[:] setup_block.block = [setup_task] setup_block = setup_block.filter_tagged_tasks(play_context, all_vars) self._blocks.append(setup_block) self.cache_block_tasks(setup_block) for block in self._play.compile(): new_block = block.filter_tagged_tasks(play_context, all_vars) if new_block.has_tasks(): self.cache_block_tasks(new_block) self._blocks.append(new_block) for handler_block in self._play.handlers: self.cache_block_tasks(handler_block) self._host_states = {} start_at_matched = False for host in inventory.get_hosts(self._play.hosts): self._host_states[host.name] = HostState(blocks=self._blocks) # if the host's name is in the variable manager's fact cache, then set # its _gathered_facts flag to true for smart gathering tests later if host.name in variable_manager._fact_cache and variable_manager._fact_cache.get(host.name).get('module_setup', False): host._gathered_facts = True # if we're looking to start at a specific task, iterate through # the tasks for this host until we find the specified task if play_context.start_at_task is not None and not start_at_done: while True: (s, task) = self.get_next_task_for_host(host, peek=True) if s.run_state == self.ITERATING_COMPLETE: break if task.name == play_context.start_at_task or fnmatch.fnmatch(task.name, play_context.start_at_task) or \ task.get_name() == play_context.start_at_task or fnmatch.fnmatch(task.get_name(), play_context.start_at_task): start_at_matched = True break else: self.get_next_task_for_host(host) # finally, reset the host's state to ITERATING_SETUP if start_at_matched: self._host_states[host.name].did_start_at_task = True self._host_states[host.name].run_state = self.ITERATING_SETUP if start_at_matched: # we have our match, so clear the start_at_task field on the # play context to flag that we've started at a task (and future # plays won't try to advance) play_context.start_at_task = None
def run(self, iterator, play_context): ''' The linear strategy is simple - get the next task and queue it for all hosts, then wait for the queue to drain before moving on to the next task ''' # iteratate over each task, while there is one left to run result = True work_to_do = True while work_to_do and not self._tqm._terminated: try: self._display.debug( "getting the remaining hosts for this loop") hosts_left = self._inventory.get_hosts(iterator._play.hosts) self._display.debug( "done getting the remaining hosts for this loop") # queue up this task for each host in the inventory callback_sent = False work_to_do = False host_results = [] host_tasks = self._get_next_task_lockstep(hosts_left, iterator) # skip control skip_rest = False choose_step = True for (host, task) in host_tasks: if not task: continue run_once = False work_to_do = True # test to see if the task across all hosts points to an action plugin which # sets BYPASS_HOST_LOOP to true, or if it has run_once enabled. If so, we # will only send this task to the first host in the list. try: action = action_loader.get(task.action, class_only=True) if task.run_once or getattr(action, 'BYPASS_HOST_LOOP', False): run_once = True except KeyError: # we don't care here, because the action may simply not have a # corresponding action plugin pass # check to see if this task should be skipped, due to it being a member of a # role which has already run (and whether that role allows duplicate execution) if task._role and task._role.has_run(host): # If there is no metadata, the default behavior is to not allow duplicates, # if there is metadata, check to see if the allow_duplicates flag was set to true if task._role._metadata is None or task._role._metadata and not task._role._metadata.allow_duplicates: self._display.debug( "'%s' skipped because role has already run" % task) continue if task.action == 'meta': self._execute_meta(task, play_context, iterator) else: # handle step if needed, skip meta actions as they are used internally if self._step and choose_step: if self._take_step(task): choose_step = False else: skip_rest = True break self._display.debug("getting variables") task_vars = self._variable_manager.get_vars( loader=self._loader, play=iterator._play, host=host, task=task) task_vars = self.add_tqm_variables(task_vars, play=iterator._play) templar = Templar(loader=self._loader, variables=task_vars) self._display.debug("done getting variables") if not callback_sent: display.debug( "sending task start callback, copying the task so we can template it temporarily" ) saved_name = task.name display.debug( "done copying, going to template now") try: task.name = unicode( templar.template(task.name, fail_on_undefined=False)) display.debug("done templating") except: # just ignore any errors during task name templating, # we don't care if it just shows the raw name display.debug( "templating failed for some reason") pass display.debug("here goes the callback...") self._tqm.send_callback( 'v2_playbook_on_task_start', task, is_conditional=False) task.name = saved_name callback_sent = True display.debug("sending task start callback") self._blocked_hosts[host.get_name()] = True self._queue_task(host, task, task_vars, play_context) results = self._process_pending_results(iterator) host_results.extend(results) # if we're bypassing the host loop, break out now if run_once: break # go to next host/task group if skip_rest: continue self._display.debug( "done queuing things up, now waiting for results queue to drain" ) results = self._wait_on_pending_results(iterator) host_results.extend(results) if not work_to_do and len(iterator.get_failed_hosts()) > 0: self._display.debug("out of hosts to run on") self._tqm.send_callback( 'v2_playbook_on_no_hosts_remaining') result = False break try: included_files = IncludedFile.process_include_results( host_results, self._tqm, iterator=iterator, loader=self._loader, variable_manager=self._variable_manager) except AnsibleError, e: return False if len(included_files) > 0: noop_task = Task() noop_task.action = 'meta' noop_task.args['_raw_params'] = 'noop' noop_task.set_loader(iterator._play._loader) all_blocks = dict((host, []) for host in hosts_left) for included_file in included_files: # included hosts get the task list while those excluded get an equal-length # list of noop tasks, to make sure that they continue running in lock-step try: new_blocks = self._load_included_file( included_file, iterator=iterator) except AnsibleError, e: for host in included_file._hosts: iterator.mark_host_failed(host) self._display.warning(str(e)) continue for new_block in new_blocks: noop_block = Block(parent_block=task._block) noop_block.block = [ noop_task for t in new_block.block ] noop_block.always = [ noop_task for t in new_block.always ] noop_block.rescue = [ noop_task for t in new_block.rescue ] for host in hosts_left: if host in included_file._hosts: task_vars = self._variable_manager.get_vars( loader=self._loader, play=iterator._play, host=host, task=included_file._task) final_block = new_block.filter_tagged_tasks( play_context, task_vars) all_blocks[host].append(final_block) else: all_blocks[host].append(noop_block) for host in hosts_left: iterator.add_tasks(host, all_blocks[host]) self._display.debug("results queue empty")
def test_construct_empty_block(self): b = Block()
def __init__(self, inventory, play, play_context, variable_manager, all_vars, start_at_done=False): self._play = play self._blocks = [] self._variable_manager = variable_manager setup_block = Block(play=self._play) # Gathering facts with run_once would copy the facts from one host to # the others. setup_block.run_once = False setup_task = Task(block=setup_block) setup_task.action = 'gather_facts' # TODO: hardcoded resolution here, but should use actual resolution code in the end, # in case of 'legacy' mismatch setup_task.resolved_action = 'ansible.builtin.gather_facts' setup_task.name = 'Gathering Facts' setup_task.args = {} # Unless play is specifically tagged, gathering should 'always' run if not self._play.tags: setup_task.tags = ['always'] # Default options to gather for option in ('gather_subset', 'gather_timeout', 'fact_path'): value = getattr(self._play, option, None) if value is not None: setup_task.args[option] = value setup_task.set_loader(self._play._loader) # short circuit fact gathering if the entire playbook is conditional if self._play._included_conditional is not None: setup_task.when = self._play._included_conditional[:] setup_block.block = [setup_task] setup_block = setup_block.filter_tagged_tasks(all_vars) self._blocks.append(setup_block) # keep flatten (no blocks) list of all tasks from the play # used for the lockstep mechanism in the linear strategy self.all_tasks = setup_block.get_tasks() for block in self._play.compile(): new_block = block.filter_tagged_tasks(all_vars) if new_block.has_tasks(): self._blocks.append(new_block) self.all_tasks.extend(new_block.get_tasks()) # keep list of all handlers, it is copied into each HostState # at the beginning of IteratingStates.HANDLERS # the copy happens at each flush in order to restore the original # list and remove any included handlers that might not be notified # at the particular flush self.handlers = [h for b in self._play.handlers for h in b.block] self._host_states = {} start_at_matched = False batch = inventory.get_hosts(self._play.hosts, order=self._play.order) self.batch_size = len(batch) for host in batch: self.set_state_for_host(host.name, HostState(blocks=self._blocks)) # if we're looking to start at a specific task, iterate through # the tasks for this host until we find the specified task if play_context.start_at_task is not None and not start_at_done: while True: (s, task) = self.get_next_task_for_host(host, peek=True) if s.run_state == IteratingStates.COMPLETE: break if task.name == play_context.start_at_task or (task.name and fnmatch.fnmatch(task.name, play_context.start_at_task)) or \ task.get_name() == play_context.start_at_task or fnmatch.fnmatch(task.get_name(), play_context.start_at_task): start_at_matched = True break self.set_state_for_host(host.name, s) # finally, reset the host's state to IteratingStates.SETUP if start_at_matched: self._host_states[host.name].did_start_at_task = True self._host_states[ host.name].run_state = IteratingStates.SETUP if start_at_matched: # we have our match, so clear the start_at_task field on the # play context to flag that we've started at a task (and future # plays won't try to advance) play_context.start_at_task = None self.end_play = False self.cur_task = 0
def run(self, iterator, connection_info): ''' The linear strategy is simple - get the next task and queue it for all hosts, then wait for the queue to drain before moving on to the next task ''' # iteratate over each task, while there is one left to run result = True work_to_do = True while work_to_do and not self._tqm._terminated: try: debug("getting the remaining hosts for this loop") self._tqm._failed_hosts = iterator.get_failed_hosts() hosts_left = self.get_hosts_remaining(iterator._play) debug("done getting the remaining hosts for this loop") if len(hosts_left) == 0: debug("out of hosts to run on") self._tqm.send_callback( 'v2_playbook_on_no_hosts_remaining') result = False break # queue up this task for each host in the inventory callback_sent = False work_to_do = False host_results = [] host_tasks = self._get_next_task_lockstep(hosts_left, iterator) for (host, task) in host_tasks: if not task: continue run_once = False work_to_do = True # test to see if the task across all hosts points to an action plugin which # sets BYPASS_HOST_LOOP to true, or if it has run_once enabled. If so, we # will only send this task to the first host in the list. try: action = action_loader.get(task.action, class_only=True) if task.run_once or getattr(action, 'BYPASS_HOST_LOOP', False): run_once = True except KeyError: # we don't care here, because the action may simply not have a # corresponding action plugin pass # check to see if this task should be skipped, due to it being a member of a # role which has already run (and whether that role allows duplicate execution) if task._role and task._role.has_run(): # If there is no metadata, the default behavior is to not allow duplicates, # if there is metadata, check to see if the allow_duplicates flag was set to true if task._role._metadata is None or task._role._metadata and not task._role._metadata.allow_duplicates: debug("'%s' skipped because role has already run" % task) continue if task.action == 'meta': # meta tasks store their args in the _raw_params field of args, # since they do not use k=v pairs, so get that meta_action = task.args.get('_raw_params') if meta_action == 'noop': # FIXME: issue a callback for the noop here? continue elif meta_action == 'flush_handlers': self.run_handlers(iterator, connection_info) else: raise AnsibleError( "invalid meta action requested: %s" % meta_action, obj=task._ds) else: debug("getting variables") task_vars = self._variable_manager.get_vars( loader=self._loader, play=iterator._play, host=host, task=task) task_vars = self.add_tqm_variables(task_vars, play=iterator._play) templar = Templar(loader=self._loader, variables=task_vars) debug("done getting variables") if not callback_sent: temp_task = task.copy() temp_task.name = templar.template( temp_task.get_name(), fail_on_undefined=False) self._tqm.send_callback( 'v2_playbook_on_task_start', temp_task, is_conditional=False) callback_sent = True self._blocked_hosts[host.get_name()] = True self._queue_task(host, task, task_vars, connection_info) results = self._process_pending_results(iterator) host_results.extend(results) # if we're bypassing the host loop, break out now if run_once: break debug( "done queuing things up, now waiting for results queue to drain" ) results = self._wait_on_pending_results(iterator) host_results.extend(results) try: included_files = IncludedFile.process_include_results( host_results, self._tqm, iterator=iterator, loader=self._loader) except AnsibleError, e: return False if len(included_files) > 0: noop_task = Task() noop_task.action = 'meta' noop_task.args['_raw_params'] = 'noop' noop_task.set_loader(iterator._play._loader) all_blocks = dict((host, []) for host in hosts_left) for included_file in included_files: # included hosts get the task list while those excluded get an equal-length # list of noop tasks, to make sure that they continue running in lock-step try: new_blocks = self._load_included_file( included_file, iterator=iterator) except AnsibleError, e: for host in included_file._hosts: iterator.mark_host_failed(host) # FIXME: callback here? print(e) continue for new_block in new_blocks: noop_block = Block(parent_block=task._block) noop_block.block = [ noop_task for t in new_block.block ] noop_block.always = [ noop_task for t in new_block.always ] noop_block.rescue = [ noop_task for t in new_block.rescue ] for host in hosts_left: if host in included_file._hosts: task_vars = self._variable_manager.get_vars( loader=self._loader, play=iterator._play, host=host, task=included_file._task) final_block = new_block.filter_tagged_tasks( connection_info, task_vars) all_blocks[host].append(final_block) else: all_blocks[host].append(noop_block) for host in hosts_left: iterator.add_tasks(host, all_blocks[host]) debug("results queue empty")
def __init__(self, inventory, play, play_context, variable_manager, all_vars, start_at_done=False): self._play = play self._blocks = [] self._variable_manager = variable_manager # Default options to gather gather_subset = self._play.gather_subset gather_timeout = self._play.gather_timeout fact_path = self._play.fact_path setup_block = Block(play=self._play) # Gathering facts with run_once would copy the facts from one host to # the others. setup_block.run_once = False setup_task = Task(block=setup_block) setup_task.action = 'gather_facts' setup_task.name = 'Gathering Facts' setup_task.args = { 'gather_subset': gather_subset, } # Unless play is specifically tagged, gathering should 'always' run if not self._play.tags: setup_task.tags = ['always'] if gather_timeout: setup_task.args['gather_timeout'] = gather_timeout if fact_path: setup_task.args['fact_path'] = fact_path setup_task.set_loader(self._play._loader) # short circuit fact gathering if the entire playbook is conditional if self._play._included_conditional is not None: setup_task.when = self._play._included_conditional[:] setup_block.block = [setup_task] setup_block = setup_block.filter_tagged_tasks(all_vars) self._blocks.append(setup_block) for block in self._play.compile(): new_block = block.filter_tagged_tasks(all_vars) if new_block.has_tasks(): self._blocks.append(new_block) self._host_states = {} start_at_matched = False batch = inventory.get_hosts(self._play.hosts, order=self._play.order) self.batch_size = len(batch) for host in batch: self._host_states[host.name] = HostState(blocks=self._blocks) # if we're looking to start at a specific task, iterate through # the tasks for this host until we find the specified task if play_context.start_at_task is not None and not start_at_done: while True: (s, task) = self.get_next_task_for_host(host, peek=True) if s.run_state == self.ITERATING_COMPLETE: break if task.name == play_context.start_at_task or fnmatch.fnmatch(task.name, play_context.start_at_task) or \ task.get_name() == play_context.start_at_task or fnmatch.fnmatch(task.get_name(), play_context.start_at_task): start_at_matched = True break else: self.get_next_task_for_host(host) # finally, reset the host's state to ITERATING_SETUP if start_at_matched: self._host_states[host.name].did_start_at_task = True self._host_states[ host.name].run_state = self.ITERATING_SETUP if start_at_matched: # we have our match, so clear the start_at_task field on the # play context to flag that we've started at a task (and future # plays won't try to advance) play_context.start_at_task = None
def run(self, iterator, play_context): ''' The linear strategy is simple - get the next task and queue it for all hosts, then wait for the queue to drain before moving on to the next task ''' # iteratate over each task, while there is one left to run result = self._tqm.RUN_OK work_to_do = True while work_to_do and not self._tqm._terminated: try: display.debug("getting the remaining hosts for this loop") hosts_left = self.get_hosts_left(iterator) display.debug("done getting the remaining hosts for this loop") # queue up this task for each host in the inventory callback_sent = False work_to_do = False host_results = [] host_tasks = self._get_next_task_lockstep(hosts_left, iterator) # skip control skip_rest = False choose_step = True # flag set if task is set to any_errors_fatal any_errors_fatal = False succeeded_hosts = {} retry_task = True while retry_task: results = [] for (host, task) in host_tasks: if not task: continue if self._tqm._terminated: break run_once = False work_to_do = True # test to see if the task across all hosts points to an action plugin which # sets BYPASS_HOST_LOOP to true, or if it has run_once enabled. If so, we # will only send this task to the first host in the list. try: action = action_loader.get(task.action, class_only=True) except KeyError: # we don't care here, because the action may simply not have a # corresponding action plugin action = None # check to see if this task should be skipped, due to it being a member of a # role which has already run (and whether that role allows duplicate execution) if task._role and task._role.has_run(host): # If there is no metadata, the default behavior is to not allow duplicates, # if there is metadata, check to see if the allow_duplicates flag was set to true if task._role._metadata is None or task._role._metadata and not task._role._metadata.allow_duplicates: display.debug( "'%s' skipped because role has already run" % task) continue if task.action == 'meta': # for the linear strategy, we run meta tasks just once and for # all hosts currently being iterated over rather than one host results.extend( self._execute_meta(task, play_context, iterator, host)) if task.args.get('_raw_params', None) != 'noop': run_once = True else: # handle step if needed, skip meta actions as they are used internally if self._step and choose_step: if self._take_step(task): choose_step = False else: skip_rest = True break display.debug("getting variables") task_vars = self._variable_manager.get_vars( play=iterator._play, host=host, task=task) self.add_tqm_variables(task_vars, play=iterator._play) templar = Templar(loader=self._loader, variables=task_vars) display.debug("done getting variables") run_once = templar.template( task.run_once) or action and getattr( action, 'BYPASS_HOST_LOOP', False) if (task.any_errors_fatal or run_once) and not task.ignore_errors: any_errors_fatal = True if not callback_sent: display.debug( "sending task start callback, copying the task so we can template it temporarily" ) saved_name = task.name display.debug( "done copying, going to template now") try: task.name = to_text(templar.template( task.name, fail_on_undefined=False), nonstring='empty') display.debug("done templating") except: # just ignore any errors during task name templating, # we don't care if it just shows the raw name display.debug( "templating failed for some reason") pass display.debug("here goes the callback...") self._tqm.send_callback( 'v2_playbook_on_task_start', task, is_conditional=False) task.name = saved_name callback_sent = True display.debug("sending task start callback") if host not in succeeded_hosts: self._blocked_hosts[host.get_name()] = True self._queue_task(host, task, task_vars, play_context) del task_vars # if we're bypassing the host loop, break out now if run_once: break results += self._process_pending_results( iterator, max_passes=max(1, int(len(self._tqm._workers) * 0.1))) # go to next host/task group if skip_rest: continue display.debug( "done queuing things up, now waiting for results queue to drain" ) if self._pending_results > 0: results += self._wait_on_pending_results(iterator) host_results.extend(results) try: included_files = IncludedFile.process_include_results( host_results, self._tqm, iterator=iterator, inventory=self._inventory, loader=self._loader, variable_manager=self._variable_manager) except AnsibleError as e: # this is a fatal error, so we abort here regardless of block state return self._tqm.RUN_ERROR include_failure = False if len(included_files) > 0: display.debug("we have included files to process") # A noop task for use in padding dynamic includes noop_task = Task() noop_task.action = 'meta' noop_task.args['_raw_params'] = 'noop' noop_task.set_loader(iterator._play._loader) display.debug("generating all_blocks data") all_blocks = dict((host, []) for host in hosts_left) display.debug("done generating all_blocks data") for included_file in included_files: display.debug("processing included file: %s" % included_file._filename) # included hosts get the task list while those excluded get an equal-length # list of noop tasks, to make sure that they continue running in lock-step try: if included_file._is_role: new_ir = included_file._task.copy() new_ir.vars.update(included_file._args) new_blocks, handler_blocks = new_ir.get_block_list( play=iterator._play, variable_manager=self. _variable_manager, loader=self._loader, ) self._tqm.update_handler_list([ handler for handler_block in handler_blocks for handler in handler_block.block ]) else: new_blocks = self._load_included_file( included_file, iterator=iterator) display.debug( "iterating over new_blocks loaded from include file" ) for new_block in new_blocks: task_vars = self._variable_manager.get_vars( play=iterator._play, task=included_file._task, ) display.debug( "filtering new block on tags") final_block = new_block.filter_tagged_tasks( play_context, task_vars) display.debug( "done filtering new block on tags") noop_block = Block( parent_block=task._parent) noop_block.block = [ noop_task for t in new_block.block ] noop_block.always = [ noop_task for t in new_block.always ] noop_block.rescue = [ noop_task for t in new_block.rescue ] for host in hosts_left: if host in included_file._hosts: all_blocks[host].append( final_block) else: all_blocks[host].append(noop_block) display.debug( "done iterating over new_blocks loaded from include file" ) except AnsibleError as e: for host in included_file._hosts: self._tqm._failed_hosts[host.name] = True iterator.mark_host_failed(host) display.error(to_text(e), wrap_text=False) include_failure = True continue # finally go through all of the hosts and append the # accumulated blocks to their list of tasks display.debug( "extending task lists for all hosts with included blocks" ) for host in hosts_left: iterator.add_tasks(host, all_blocks[host]) display.debug("done extending task lists") display.debug("done processing included files") display.debug("results queue empty") display.debug("checking for any_errors_fatal") failed_hosts = [] unreachable_hosts = [] for res in results: if res.is_failed() and iterator.is_failed(res._host): failed_hosts.append(res._host.name) elif res.is_unreachable(): unreachable_hosts.append(res._host.name) else: succeeded_hosts[res._host] = res if (len(failed_hosts) == 0): # No more failures means we do not need to retry anymore. retry_task = False else: msg = 'Host failed, ignore/retry/abort/debug: %s on [%s] (i/r/a/d): ' % ( str(task), ' '.join(failed_hosts)) + '\n' resp = 'bogus' while resp.strip().lower()[:1] not in [ 'i', 'r', 'a', 'd' ]: resp = display.prompt(msg) # Currently the only difference between ignore and repeat is setting the retry_task to 'False' if resp.strip().lower() in ['i', 'ignore']: retry_task = False if resp.strip().lower() in [ 'r', 'repeat', 'i', 'ignore' ]: # We need to revert the internal failed host states if we want to ignore the errors # The internal state should be identical to that of a successful host for failed_host in failed_hosts: iterator._host_states[ failed_host].run_state = PlayIterator.ITERATING_TASKS iterator._host_states[ failed_host].fail_state = PlayIterator.FAILED_NONE # Fix child states as well if iterator._host_states[ failed_host].tasks_child_state: iterator._host_states[ failed_host].tasks_child_state.run_state = PlayIterator.ITERATING_TASKS iterator._host_states[ failed_host].tasks_child_state.fail_state = PlayIterator.FAILED_NONE self._tqm.clear_failed_hosts() elif resp.strip().lower() in ['a', 'abort']: retry_task = False for host in hosts_left: (s, _) = iterator.get_next_task_for_host( host, peek=True) if s.run_state != iterator.ITERATING_RESCUE or \ s.run_state == iterator.ITERATING_RESCUE and s.fail_state & iterator.FAILED_RESCUE != 0: self._tqm._failed_hosts[host.name] = True result |= self._tqm.RUN_FAILED_BREAK_PLAY # don't double-mark hosts, or the iterator will potentially # fail them out of the rescue/always states if host.name not in failed_hosts: self._tqm._failed_hosts[host.name] = True iterator.mark_host_failed(host) self._tqm.send_callback( 'v2_playbook_on_no_hosts_remaining') result |= self._tqm.RUN_FAILED_BREAK_PLAY elif resp.strip().lower() in ['d', 'debug']: msg = "Running PDB. Refer to https://pymotw.com/2/pdb/ for instruction.\n\n" msg += "Commmon commands:\n\n" msg += "Print the task's error message: print(failure_debug_message)\n" msg += 'Add a variable to a host: host_to_edit = hosts_left[0]; task._variable_manager.set_host_variable(host_to_edit,"new_var_key","new_var_value")\n' msg += 'Print task vars: print(task_vars["var_key_name"])\n' display.display(msg, color='blue', stderr=False, screen_only=False, log_only=False) pdb.set_trace() except (IOError, EOFError) as e: display.debug("got IOError/EOFError in task loop: %s" % e) # most likely an abort, return failed return self._tqm.RUN_UNKNOWN_ERROR # run the base class run() method, which executes the cleanup function # and runs any outstanding handlers which have been triggered return super(StrategyModule, self).run(iterator, play_context, result)
def run(self, iterator, play_context): ''' The linear strategy is simple - get the next task and queue it for all hosts, then wait for the queue to drain before moving on to the next task ''' # iteratate over each task, while there is one left to run result = True work_to_do = True while work_to_do and not self._tqm._terminated: try: display.debug("getting the remaining hosts for this loop") hosts_left = [ host for host in self._inventory.get_hosts(iterator._play.hosts) if host.name not in self._tqm._unreachable_hosts ] display.debug("done getting the remaining hosts for this loop") # queue up this task for each host in the inventory callback_sent = False work_to_do = False host_results = [] host_tasks = self._get_next_task_lockstep(hosts_left, iterator) # skip control skip_rest = False choose_step = True # flag set if task is set to any_errors_fatal any_errors_fatal = False results = [] for (host, task) in host_tasks: if not task: continue if self._tqm._terminated: break run_once = False work_to_do = True # test to see if the task across all hosts points to an action plugin which # sets BYPASS_HOST_LOOP to true, or if it has run_once enabled. If so, we # will only send this task to the first host in the list. try: action = action_loader.get(task.action, class_only=True) except KeyError: # we don't care here, because the action may simply not have a # corresponding action plugin action = None # check to see if this task should be skipped, due to it being a member of a # role which has already run (and whether that role allows duplicate execution) if task._role and task._role.has_run(host): # If there is no metadata, the default behavior is to not allow duplicates, # if there is metadata, check to see if the allow_duplicates flag was set to true if task._role._metadata is None or task._role._metadata and not task._role._metadata.allow_duplicates: display.debug( "'%s' skipped because role has already run" % task) continue if task.action == 'meta': self._execute_meta(task, play_context, iterator) else: # handle step if needed, skip meta actions as they are used internally if self._step and choose_step: if self._take_step(task): choose_step = False else: skip_rest = True break display.debug("getting variables") task_vars = self._variable_manager.get_vars( loader=self._loader, play=iterator._play, host=host, task=task) self.add_tqm_variables(task_vars, play=iterator._play) templar = Templar(loader=self._loader, variables=task_vars) display.debug("done getting variables") run_once = templar.template( task.run_once) or action and getattr( action, 'BYPASS_HOST_LOOP', False) if (task.any_errors_fatal or run_once) and not task.ignore_errors: any_errors_fatal = True if not callback_sent: display.debug( "sending task start callback, copying the task so we can template it temporarily" ) saved_name = task.name display.debug( "done copying, going to template now") try: task.name = to_unicode(templar.template( task.name, fail_on_undefined=False), nonstring='empty') display.debug("done templating") except: # just ignore any errors during task name templating, # we don't care if it just shows the raw name display.debug( "templating failed for some reason") pass display.debug("here goes the callback...") self._tqm.send_callback( 'v2_playbook_on_task_start', task, is_conditional=False) task.name = saved_name callback_sent = True display.debug("sending task start callback") self._blocked_hosts[host.get_name()] = True self._queue_task(host, task, task_vars, play_context) # if we're bypassing the host loop, break out now if run_once: break results += self._process_pending_results(iterator, one_pass=True) # go to next host/task group if skip_rest: continue display.debug( "done queuing things up, now waiting for results queue to drain" ) results += self._wait_on_pending_results(iterator) host_results.extend(results) if not work_to_do and len(iterator.get_failed_hosts()) > 0: display.debug("out of hosts to run on") self._tqm.send_callback( 'v2_playbook_on_no_hosts_remaining') result = self._tqm.RUN_ERROR break try: included_files = IncludedFile.process_include_results( host_results, self._tqm, iterator=iterator, inventory=self._inventory, loader=self._loader, variable_manager=self._variable_manager) except AnsibleError as e: return self._tqm.RUN_ERROR include_failure = False if len(included_files) > 0: display.debug("we have included files to process") noop_task = Task() noop_task.action = 'meta' noop_task.args['_raw_params'] = 'noop' noop_task.set_loader(iterator._play._loader) display.debug("generating all_blocks data") all_blocks = dict((host, []) for host in hosts_left) display.debug("done generating all_blocks data") for included_file in included_files: display.debug("processing included file: %s" % included_file._filename) # included hosts get the task list while those excluded get an equal-length # list of noop tasks, to make sure that they continue running in lock-step try: new_blocks = self._load_included_file( included_file, iterator=iterator) display.debug( "iterating over new_blocks loaded from include file" ) for new_block in new_blocks: task_vars = self._variable_manager.get_vars( loader=self._loader, play=iterator._play, task=included_file._task, ) display.debug("filtering new block on tags") final_block = new_block.filter_tagged_tasks( play_context, task_vars) display.debug( "done filtering new block on tags") noop_block = Block(parent_block=task._block) noop_block.block = [ noop_task for t in new_block.block ] noop_block.always = [ noop_task for t in new_block.always ] noop_block.rescue = [ noop_task for t in new_block.rescue ] for host in hosts_left: if host in included_file._hosts: all_blocks[host].append(final_block) else: all_blocks[host].append(noop_block) display.debug( "done iterating over new_blocks loaded from include file" ) except AnsibleError as e: for host in included_file._hosts: self._tqm._failed_hosts[host.name] = True iterator.mark_host_failed(host) display.error(to_unicode(e), wrap_text=False) include_failure = True continue # finally go through all of the hosts and append the # accumulated blocks to their list of tasks display.debug( "extending task lists for all hosts with included blocks" ) for host in hosts_left: iterator.add_tasks(host, all_blocks[host]) display.debug("done extending task lists") display.debug("done processing included files") display.debug("results queue empty") display.debug("checking for any_errors_fatal") failed_hosts = [] unreachable_hosts = [] for res in results: if res.is_failed(): failed_hosts.append(res._host.name) elif res.is_unreachable(): unreachable_hosts.append(res._host.name) # if any_errors_fatal and we had an error, mark all hosts as failed if any_errors_fatal and (len(failed_hosts) > 0 or len(unreachable_hosts) > 0): for host in hosts_left: # don't double-mark hosts, or the iterator will potentially # fail them out of the rescue/always states if host.name not in failed_hosts: self._tqm._failed_hosts[host.name] = True iterator.mark_host_failed(host) self._tqm.send_callback( 'v2_playbook_on_no_hosts_remaining') return self._tqm.RUN_FAILED_BREAK_PLAY display.debug("done checking for any_errors_fatal") display.debug("checking for max_fail_percentage") if iterator._play.max_fail_percentage is not None and len( results) > 0: percentage = iterator._play.max_fail_percentage / 100.0 if (len(self._tqm._failed_hosts) / len(results)) > percentage: for host in hosts_left: # don't double-mark hosts, or the iterator will potentially # fail them out of the rescue/always states if host.name not in failed_hosts: self._tqm._failed_hosts[host.name] = True iterator.mark_host_failed(host) self._tqm.send_callback( 'v2_playbook_on_no_hosts_remaining') return self._tqm.RUN_FAILED_BREAK_PLAY display.debug("done checking for max_fail_percentage") except (IOError, EOFError) as e: display.debug("got IOError/EOFError in task loop: %s" % e) # most likely an abort, return failed return self._tqm.RUN_UNKNOWN_ERROR # run the base class run() method, which executes the cleanup function # and runs any outstanding handlers which have been triggered return super(StrategyModule, self).run(iterator, play_context, result)
def run(self, iterator, connection_info): ''' The linear strategy is simple - get the next task and queue it for all hosts, then wait for the queue to drain before moving on to the next task ''' result = True # iteratate over each task, while there is one left to run work_to_do = True while work_to_do and not self._tqm._terminated: try: debug("getting the remaining hosts for this loop") self._tqm._failed_hosts = iterator.get_failed_hosts() hosts_left = self.get_hosts_remaining(iterator._play) debug("done getting the remaining hosts for this loop") if len(hosts_left) == 0: debug("out of hosts to run on") self._tqm.send_callback( 'v2_playbook_on_no_hosts_remaining') result = False break # queue up this task for each host in the inventory callback_sent = False work_to_do = False host_results = [] host_tasks = self._get_next_task_lockstep(hosts_left, iterator) for (host, task) in host_tasks: if not task: continue run_once = False work_to_do = True # test to see if the task across all hosts points to an action plugin which # sets BYPASS_HOST_LOOP to true, or if it has run_once enabled. If so, we # will only send this task to the first host in the list. try: action = action_loader.get(task.action, class_only=True) if task.run_once or getattr(action, 'BYPASS_HOST_LOOP', False): run_once = True except KeyError: # we don't care here, because the action may simply not have a # corresponding action plugin pass debug("getting variables") task_vars = self._variable_manager.get_vars( loader=self._loader, play=iterator._play, host=host, task=task) debug("done getting variables") # check to see if this task should be skipped, due to it being a member of a # role which has already run (and whether that role allows duplicate execution) if task._role and task._role.has_run(): # If there is no metadata, the default behavior is to not allow duplicates, # if there is metadata, check to see if the allow_duplicates flag was set to true if task._role._metadata is None or task._role._metadata and not task._role._metadata.allow_duplicates: debug("'%s' skipped because role has already run" % task) continue if task.action == 'meta': # meta tasks store their args in the _raw_params field of args, # since they do not use k=v pairs, so get that meta_action = task.args.get('_raw_params') if meta_action == 'noop': # FIXME: issue a callback for the noop here? continue elif meta_action == 'flush_handlers': self.run_handlers(iterator, connection_info) else: raise AnsibleError( "invalid meta action requested: %s" % meta_action, obj=task._ds) else: if not callback_sent: self._tqm.send_callback( 'v2_playbook_on_task_start', task, is_conditional=False) callback_sent = True self._blocked_hosts[host.get_name()] = True self._queue_task(host, task, task_vars, connection_info) results = self._process_pending_results(iterator) host_results.extend(results) # if we're bypassing the host loop, break out now if run_once: break debug( "done queuing things up, now waiting for results queue to drain" ) results = self._wait_on_pending_results(iterator) host_results.extend(results) # FIXME: this needs to be somewhere else class IncludedFile: def __init__(self, filename, args, task): self._filename = filename self._args = args self._task = task self._hosts = [] def add_host(self, host): if host not in self._hosts: self._hosts.append(host) def __eq__(self, other): return other._filename == self._filename and other._args == self._args def __repr__(self): return "%s (%s): %s" % (self._filename, self._args, self._hosts) # FIXME: this should also be moved to the base class in a method included_files = [] for res in host_results: if res._task.action == 'include': if res._task.loop: include_results = res._result['results'] else: include_results = [res._result] for include_result in include_results: # if the task result was skipped or failed, continue if 'skipped' in include_result and include_result[ 'skipped'] or 'failed' in include_result: continue original_task = iterator.get_original_task( res._host, res._task) if original_task and original_task._role: include_file = self._loader.path_dwim_relative( original_task._role._role_path, 'tasks', include_result['include']) else: include_file = self._loader.path_dwim( res._task.args.get('_raw_params')) include_variables = include_result.get( 'include_variables', dict()) if 'item' in include_result: include_variables['item'] = include_result[ 'item'] inc_file = IncludedFile(include_file, include_variables, original_task) try: pos = included_files.index(inc_file) inc_file = included_files[pos] except ValueError: included_files.append(inc_file) inc_file.add_host(res._host) # FIXME: should this be moved into the iterator class? Main downside would be # that accessing the TQM's callback member would be more difficult, if # we do want to send callbacks from here if len(included_files) > 0: noop_task = Task() noop_task.action = 'meta' noop_task.args['_raw_params'] = 'noop' noop_task.set_loader(iterator._play._loader) all_blocks = dict((host, []) for host in hosts_left) for included_file in included_files: # included hosts get the task list while those excluded get an equal-length # list of noop tasks, to make sure that they continue running in lock-step try: new_blocks = self._load_included_file( included_file) except AnsibleError, e: for host in included_file._hosts: iterator.mark_host_failed(host) # FIXME: callback here? print(e) for new_block in new_blocks: noop_block = Block(parent_block=task._block) noop_block.block = [ noop_task for t in new_block.block ] noop_block.always = [ noop_task for t in new_block.always ] noop_block.rescue = [ noop_task for t in new_block.rescue ] for host in hosts_left: if host in included_file._hosts: task_vars = self._variable_manager.get_vars( loader=self._loader, play=iterator._play, host=host, task=included_file._task) final_block = new_block.filter_tagged_tasks( connection_info, task_vars) all_blocks[host].append(final_block) else: all_blocks[host].append(noop_block) for host in hosts_left: iterator.add_tasks(host, all_blocks[host]) debug("results queue empty") except (IOError, EOFError), e: debug("got IOError/EOFError in task loop: %s" % e) # most likely an abort, return failed return 1