def _remote_checksum(self, tmp, path): ''' Takes a remote checksum and returns 1 if no file ''' # FIXME: figure out how this will work, probably pulled from the # variable manager data #python_interp = inject['hostvars'][inject['inventory_hostname']].get('ansible_python_interpreter', 'python') python_interp = 'python' cmd = self._shell.checksum(path, python_interp) debug("calling _low_level_execute_command to get the remote checksum") data = self._low_level_execute_command(cmd, tmp, sudoable=True) debug("done getting the remote checksum") # FIXME: implement this function? #data2 = utils.last_non_blank_line(data['stdout']) try: data2 = data['stdout'].strip().splitlines()[-1] if data2 == '': # this may happen if the connection to the remote server # failed, so just return "INVALIDCHECKSUM" to avoid errors return "INVALIDCHECKSUM" else: return data2.split()[0] except IndexError: # FIXME: this should probably not print to sys.stderr, but should instead # fail in a more normal way? sys.stderr.write("warning: Calculating checksum failed unusually, please report this to the list so it can be fixed\n") sys.stderr.write("command: %s\n" % cmd) sys.stderr.write("----\n") sys.stderr.write("output: %s\n" % data) sys.stderr.write("----\n") # this will signal that it changed and allow things to keep going return "INVALIDCHECKSUM"
def run_handlers(self, iterator, connection_info): ''' Runs handlers on those hosts which have been notified. ''' result = True # FIXME: getting the handlers from the iterators play should be # a method on the iterator, which may also filter the list # of handlers based on the notified list for handler_block in iterator._play.handlers: # FIXME: handlers need to support the rescue/always portions of blocks too, # but this may take some work in the iterator and gets tricky when # we consider the ability of meta tasks to flush handlers for handler in handler_block.block: handler_name = handler.get_name() if handler_name in self._notified_handlers and len(self._notified_handlers[handler_name]): if not len(self.get_hosts_remaining(iterator._play)): self._tqm.send_callback('v2_playbook_on_no_hosts_remaining') result = False break self._tqm.send_callback('v2_playbook_on_handler_task_start', handler) for host in self._notified_handlers[handler_name]: if not handler.has_triggered(host): task_vars = self._variable_manager.get_vars(loader=self._loader, play=iterator._play, host=host, task=handler) self._queue_task(host, handler, task_vars, connection_info) handler.flag_for_host(host) self._process_pending_results(iterator) self._wait_on_pending_results(iterator) # wipe the notification list self._notified_handlers[handler_name] = [] debug("done running handlers, result is: %s" % result) return result
def _queue_task(self, host, task, task_vars, connection_info): ''' handles queueing the task up to be sent to a worker ''' debug("entering _queue_task() for %s/%s" % (host, task)) # and then queue the new task debug("%s - putting task (%s) in queue" % (host, task)) try: debug("worker is %d (out of %d available)" % (self._cur_worker+1, len(self._workers))) (worker_prc, main_q, rslt_q) = self._workers[self._cur_worker] self._cur_worker += 1 if self._cur_worker >= len(self._workers): self._cur_worker = 0 self._pending_results += 1 # create a dummy object with plugin loaders set as an easier # way to share them with the forked processes shared_loader_obj = SharedPluginLoaderObj() main_q.put((host, task, self._loader.get_basedir(), task_vars, connection_info, shared_loader_obj), block=False) except (EOFError, IOError, AssertionError) as e: # most likely an abort debug("got an error while queuing: %s" % e) return debug("exiting _queue_task() for %s/%s" % (host, task))
def _remote_expand_user(self, path, tmp): ''' takes a remote path and performs tilde expansion on the remote host ''' if not path.startswith('~'): return path split_path = path.split(os.path.sep, 1) expand_path = split_path[0] if expand_path == '~': if self._connection_info.sudo and self._connection_info.sudo_user: expand_path = '~%s' % self._connection_info.sudo_user elif self._connection_info.su and self._connection_info.su_user: expand_path = '~%s' % self._connection_info.su_user cmd = self._shell.expand_user(expand_path) debug("calling _low_level_execute_command to expand the remote user path") data = self._low_level_execute_command(cmd, tmp, sudoable=False) debug("done expanding the remote user path") #initial_fragment = utils.last_non_blank_line(data['stdout']) initial_fragment = data['stdout'].strip().splitlines()[-1] if not initial_fragment: # Something went wrong trying to expand the path remotely. Return # the original string return path if len(split_path) > 1: return self._shell.join_path(initial_fragment, *split_path[1:]) else: return initial_fragment
def run(self, iterator, connection_info, result=True): # save the failed/unreachable hosts, as the run_handlers() # method will clear that information during its execution failed_hosts = self._tqm._failed_hosts.keys() unreachable_hosts = self._tqm._unreachable_hosts.keys() debug("running handlers") result &= self.run_handlers(iterator, connection_info) # now update with the hosts (if any) that failed or were # unreachable during the handler execution phase failed_hosts = set(failed_hosts).union(self._tqm._failed_hosts.keys()) unreachable_hosts = set(unreachable_hosts).union(self._tqm._unreachable_hosts.keys()) # send the stats callback self._tqm.send_callback('v2_playbook_on_stats', self._tqm._stats) if len(unreachable_hosts) > 0: return 3 elif len(failed_hosts) > 0: return 2 elif not result: return 1 else: return 0
def run(self): ''' The main executor entrypoint, where we determine if the specified task requires looping and either runs the task with ''' debug("in run()") try: # lookup plugins need to know if this task is executing from # a role, so that it can properly find files/templates/etc. roledir = None if self._task._role: roledir = self._task._role._role_path self._job_vars['roledir'] = roledir items = self._get_loop_items() if items is not None: if len(items) > 0: item_results = self._run_loop(items) # loop through the item results, and remember the changed/failed # result flags based on any item there. changed = False failed = False for item in item_results: if 'changed' in item: changed = True if 'failed' in item: failed = True # create the overall result item, and set the changed/failed # flags there to reflect the overall result of the loop res = dict(results=item_results) if changed: res['changed'] = True if failed: res['failed'] = True res['msg'] = 'One or more items failed' else: res['msg'] = 'All items completed' else: res = dict(changed=False, skipped=True, skipped_reason='No items in the list', results=[]) else: debug("calling self._execute()") res = self._execute() debug("_execute() done") # make sure changed is set in the result, if it's not present if 'changed' not in res: res['changed'] = False debug("dumping result to json") result = json.dumps(res) debug("done dumping result, returning") return result except AnsibleError, e: return dict(failed=True, msg=to_unicode(e, nonstring='simplerepr'))
def cleanup(self, iterator, connection_info): ''' Iterates through failed hosts and runs any outstanding rescue/always blocks and handlers which may still need to be run after a failure. ''' debug("in cleanup") result = True debug("getting failed hosts") failed_hosts = self.get_failed_hosts(iterator._play) if len(failed_hosts) == 0: debug("there are no failed hosts") return result debug("marking hosts failed in the iterator") # mark the host as failed in the iterator so it will take # any required rescue paths which may be outstanding for host in failed_hosts: iterator.mark_host_failed(host) debug("clearing the failed hosts list") # clear the failed hosts dictionary now while also for entry in self._tqm._failed_hosts.keys(): del self._tqm._failed_hosts[entry] work_to_do = True while work_to_do: work_to_do = False for host in failed_hosts: host_name = host.name if host_name in self._tqm._failed_hosts: iterator.mark_host_failed(host) del self._tqm._failed_hosts[host_name] if host_name in self._blocked_hosts: work_to_do = True continue elif iterator.get_next_task_for_host(host, peek=True) and host_name not in self._tqm._unreachable_hosts: work_to_do = True # pop the task, mark the host blocked, and queue it self._blocked_hosts[host_name] = True task = iterator.get_next_task_for_host(host) task_vars = self._variable_manager.get_vars(loader=self._loader, play=iterator._play, host=host, task=task) self._tqm.send_callback('v2_playbook_on_cleanup_task_start', task) self._queue_task(host, task, task_vars, connection_info) self._process_pending_results(iterator) time.sleep(0.01) # no more work, wait until the queue is drained self._wait_on_pending_results(iterator) return result
def _remote_chmod(self, tmp, mode, path, sudoable=False): ''' Issue a remote chmod command ''' cmd = self._shell.chmod(mode, path) debug("calling _low_level_execute_command to chmod the remote path") res = self._low_level_execute_command(cmd, tmp, sudoable=sudoable) debug("done with chmod call") return res
def _remove_tmp_path(self, tmp_path): '''Remove a temporary path we created. ''' if tmp_path and "-tmp-" in tmp_path: cmd = self._shell.remove(tmp_path, recurse=True) # If we have gotten here we have a working ssh configuration. # If ssh breaks we could leave tmp directories out on the remote system. debug("calling _low_level_execute_command to remove the tmp path") self._low_level_execute_command(cmd, None, sudoable=False) debug("done removing the tmp path")
def deserialize(self, data): debug("deserializing group, data is: %s" % data) self.__init__() self.name = data.get('name') self.vars = data.get('vars', dict()) parent_groups = data.get('parent_groups', []) for parent_data in parent_groups: g = Group() g.deserialize(parent_data) self.parent_groups.append(g)
def _process_pending_results(): global res_q global pending_results while not res_q.empty(): try: result = res_q.get(block=False) debug("got final result: %s" % (result,)) pending_results -= 1 except Queue.Empty: pass
def cleanup(self): debug("RUNNING CLEANUP") self.terminate() self._final_q.close() self._result_prc.terminate() for (worker_prc, main_q, rslt_q) in self._workers: rslt_q.close() main_q.close() worker_prc.terminate()
def _process_pending_results(): global out_p global pending_results try: #p_lock.acquire() while out_p.poll(0.01): result = out_p.recv() debug("got final result: %s" % (result,)) pending_results -= 1 finally: #p_lock.release() pass
def serialize(self): parent_groups = [] for parent in self.parent_groups: parent_groups.append(parent.serialize()) result = dict( name=self.name, vars=self.vars.copy(), parent_groups=parent_groups, depth=self.depth, ) debug("serializing group, result is: %s" % result) return result
def _make_tmp_path(self): ''' Create and return a temporary path on a remote box. ''' basefile = 'ansible-tmp-%s-%s' % (time.time(), random.randint(0, 2**48)) use_system_tmp = False if (self._connection_info.sudo and self._connection_info.sudo_user != 'root') or (self._connection_info.su and self._connection_info.su_user != 'root'): use_system_tmp = True tmp_mode = None if self._connection_info.remote_user != 'root' or \ ((self._connection_info.sudo and self._connection_info.sudo_user != 'root') or (self._connection_info.su and self._connection_info.su_user != 'root')): tmp_mode = 'a+rx' cmd = self._shell.mkdtemp(basefile, use_system_tmp, tmp_mode) debug("executing _low_level_execute_command to create the tmp path") result = self._low_level_execute_command(cmd, None, sudoable=False) debug("done with creation of tmp path") # error handling on this seems a little aggressive? if result['rc'] != 0: if result['rc'] == 5: output = 'Authentication failure.' elif result['rc'] == 255 and self._connection.get_transport() in ['ssh']: # FIXME: more utils.VERBOSITY #if utils.VERBOSITY > 3: # output = 'SSH encountered an unknown error. The output was:\n%s' % (result['stdout']+result['stderr']) #else: # output = 'SSH encountered an unknown error during the connection. We recommend you re-run the command using -vvvv, which will enable SSH debugging output to help diagnose the issue' output = 'SSH encountered an unknown error. The output was:\n%s' % (result['stdout']+result['stderr']) elif 'No space left on device' in result['stderr']: output = result['stderr'] else: output = 'Authentication or permission failure. In some cases, you may have been able to authenticate and did not have permissions on the remote directory. Consider changing the remote temp path in ansible.cfg to a path rooted in "/tmp". Failed command was: %s, exited with result %d' % (cmd, result['rc']) if 'stdout' in result and result['stdout'] != '': output = output + ": %s" % result['stdout'] raise AnsibleError(output) # FIXME: do we still need to do this? #rc = self._shell.join_path(utils.last_non_blank_line(result['stdout']).strip(), '') rc = self._shell.join_path(result['stdout'].strip(), '').splitlines()[-1] # Catch failure conditions, files should never be # written to locations in /. if rc == '/': raise AnsibleError('failed to resolve remote temporary directory from %s: `%s` returned empty string' % (basetmp, cmd)) return rc
def _wait_on_pending_results(self, iterator): ''' Wait for the shared counter to drop to zero, using a short sleep between checks to ensure we don't spin lock ''' ret_results = [] while self._pending_results > 0 and not self._tqm._terminated: debug("waiting for pending results (%d left)" % self._pending_results) results = self._process_pending_results(iterator) ret_results.extend(results) time.sleep(0.01) return ret_results
def worker(main_pipe, res_pipe): while True: foo = None try: if main_pipe.poll(0.01): foo = main_pipe.recv() time.sleep(0.07) res_pipe.send(foo) else: time.sleep(0.01) except (IOError, EOFError, KeyboardInterrupt), e: debug("got a breaking error: %s" % e) break except Exception, e: debug("EXCEPTION DURING WORKER PROCESSING: %s" % e) traceback.print_exc() break
def _low_level_execute_command(self, cmd, tmp, executable=None, sudoable=True, in_data=None): ''' This is the function which executes the low level shell command, which may be commands to create/remove directories for temporary files, or to run the module code or python directly when pipelining. ''' debug("in _low_level_execute_command() (%s)" % (cmd,)) if not cmd: # this can happen with powershell modules when there is no analog to a Windows command (like chmod) debug("no command, exiting _low_level_execute_command()") return dict(stdout='', stderr='') if executable is None: executable = C.DEFAULT_EXECUTABLE prompt = None success_key = None if sudoable: if self._connection_info.su and self._connection_info.su_user: cmd, prompt, success_key = self._connection_info.make_su_cmd(executable, cmd) elif self._connection_info.sudo and self._connection_info.sudo_user: # FIXME: hard-coded sudo_exe here cmd, prompt, success_key = self._connection_info.make_sudo_cmd('/usr/bin/sudo', executable, cmd) debug("executing the command %s through the connection" % cmd) rc, stdin, stdout, stderr = self._connection.exec_command(cmd, tmp, executable=executable, in_data=in_data) debug("command execution done") if not isinstance(stdout, basestring): out = ''.join(stdout.readlines()) else: out = stdout if not isinstance(stderr, basestring): err = ''.join(stderr.readlines()) else: err = stderr debug("done with _low_level_execute_command() (%s)" % (cmd,)) if rc is not None: return dict(rc=rc, stdout=out, stderr=err) else: return dict(stdout=out, stderr=err)
def _read_worker_result(cur_worker): result = None starting_point = cur_worker while True: (worker_prc, main_pipe, res_pipe) = workers[cur_worker] cur_worker += 1 if cur_worker >= len(workers): cur_worker = 0 if res_pipe[1].poll(0.01): debug("worker %d has data to read" % cur_worker) result = res_pipe[1].recv() debug("got a result from worker %d: %s" % (cur_worker, result)) break if cur_worker == starting_point: break return (result, cur_worker)
def run(self, iterator, connection_info, result=True): # save the counts on failed/unreachable hosts, as the cleanup/handler # methods will clear that information during their runs num_failed = len(self._tqm._failed_hosts) num_unreachable = len(self._tqm._unreachable_hosts) #debug("running the cleanup portion of the play") #result &= self.cleanup(iterator, connection_info) debug("running handlers") result &= self.run_handlers(iterator, connection_info) if not result: if num_unreachable > 0: return 3 elif num_failed > 0: return 2 else: return 1 else: return 0
def results(final_q, workers): cur_worker = 0 def _read_worker_result(cur_worker): result = None starting_point = cur_worker while True: (worker_prc, main_q, res_q) = workers[cur_worker] cur_worker += 1 if cur_worker >= len(workers): cur_worker = 0 try: if not res_q.empty(): debug("worker %d has data to read" % cur_worker) result = res_q.get() debug("got a result from worker %d: %s" % (cur_worker, result)) break except: pass if cur_worker == starting_point: break return (result, cur_worker) while True: result = None try: (result, cur_worker) = _read_worker_result(cur_worker) if result is None: time.sleep(0.01) continue final_q.put(result, block=False) except (IOError, EOFError, KeyboardInterrupt) as e: debug("got a breaking error: %s" % e) break except Exception as e: debug("EXCEPTION DURING RESULTS PROCESSING: %s" % e) traceback.print_exc() break
def _low_level_execute_command(self, cmd, tmp, sudoable=True, in_data=None): ''' This is the function which executes the low level shell command, which may be commands to create/remove directories for temporary files, or to run the module code or python directly when pipelining. ''' debug("in _low_level_execute_command() (%s)" % (cmd,)) if not cmd: # this can happen with powershell modules when there is no analog to a Windows command (like chmod) debug("no command, exiting _low_level_execute_command()") return dict(stdout='', stderr='') #FIXME: disabled as this should happen in the connection plugin, verify before removing #prompt = None #success_key = None # #if sudoable: # cmd, prompt, success_key = self._connection_info.make_become_cmd(cmd) debug("executing the command %s through the connection" % cmd) rc, stdin, stdout, stderr = self._connection.exec_command(cmd, tmp, in_data=in_data, sudoable=sudoable) debug("command execution done") if not isinstance(stdout, basestring): out = ''.join(stdout.readlines()) else: out = stdout if not isinstance(stderr, basestring): err = ''.join(stderr.readlines()) else: err = stderr debug("done with _low_level_execute_command() (%s)" % (cmd,)) if rc is None: rc = 0 return dict(rc=rc, stdout=out, stderr=err)
def _read_worker_result(cur_worker): result = None starting_point = cur_worker while True: (worker_prc, main_q, res_q) = workers[cur_worker] cur_worker += 1 if cur_worker >= len(workers): cur_worker = 0 try: if not res_q.empty(): debug("worker %d has data to read" % cur_worker) result = res_q.get() debug("got a result from worker %d: %s" % (cur_worker, result)) break except: pass if cur_worker == starting_point: break return (result, cur_worker)
def _read_worker_result(self): result = None starting_point = self._cur_worker while True: (worker_prc, main_q, rslt_q) = self._workers[self._cur_worker] self._cur_worker += 1 if self._cur_worker >= len(self._workers): self._cur_worker = 0 try: if not rslt_q.empty(): debug("worker %d has data to read" % self._cur_worker) result = rslt_q.get() debug("got a result from worker %d: %s" % (self._cur_worker, result)) break except queue.Empty: pass if self._cur_worker == starting_point: break return result
def run_handlers(self, iterator, connection_info): ''' Runs handlers on those hosts which have been notified. ''' result = True # FIXME: getting the handlers from the iterators play should be # a method on the iterator, which may also filter the list # of handlers based on the notified list handlers = compile_block_list(iterator._play.handlers) debug("handlers are: %s" % handlers) for handler in handlers: handler_name = handler.get_name() if handler_name in self._notified_handlers and len(self._notified_handlers[handler_name]): if not len(self.get_hosts_remaining(iterator._play)): self._callback.playbook_on_no_hosts_remaining() result = False break self._callback.playbook_on_handler_task_start(handler_name) for host in self._notified_handlers[handler_name]: if not handler.has_triggered(host): task_vars = self._variable_manager.get_vars(loader=self._loader, play=iterator._play, host=host, task=handler) self._queue_task(host, handler, task_vars, connection_info) handler.flag_for_host(host) self._process_pending_results(iterator) self._wait_on_pending_results(iterator) # wipe the notification list self._notified_handlers[handler_name] = [] debug("done running handlers, result is: %s" % result) return result
def run(self): ''' The main executor entrypoint, where we determine if the specified task requires looping and either runs the task with ''' debug("in run()") try: # lookup plugins need to know if this task is executing from # a role, so that it can properly find files/templates/etc. roledir = None if self._task._role: roledir = self._task._role._role_path self._job_vars['roledir'] = roledir items = self._get_loop_items() if items is not None: if len(items) > 0: item_results = self._run_loop(items) res = dict(results=item_results) else: res = dict(changed=False, skipped=True, skipped_reason='No items in the list', results=[]) else: debug("calling self._execute()") res = self._execute() debug("_execute() done") # make sure changed is set in the result, if it's not present if 'changed' not in res: res['changed'] = False debug("dumping result to json") result = json.dumps(res) debug("done dumping result, returning") return result except AnsibleError, e: return dict(failed=True, msg=str(e))
def results(pipe, workers): cur_worker = 0 def _read_worker_result(cur_worker): result = None starting_point = cur_worker while True: (worker_prc, main_pipe, res_pipe) = workers[cur_worker] cur_worker += 1 if cur_worker >= len(workers): cur_worker = 0 if res_pipe[1].poll(0.01): debug("worker %d has data to read" % cur_worker) result = res_pipe[1].recv() debug("got a result from worker %d: %s" % (cur_worker, result)) break if cur_worker == starting_point: break return (result, cur_worker) while True: result = None try: (result, cur_worker) = _read_worker_result(cur_worker) if result is None: time.sleep(0.01) continue pipe.send(result) except (IOError, EOFError, KeyboardInterrupt) as e: debug("got a breaking error: %s" % e) break except Exception as e: debug("EXCEPTION DURING RESULTS PROCESSING: %s" % e) traceback.print_exc() break
def _queue_task(self, host, task, task_vars, connection_info): ''' handles queueing the task up to be sent to a worker ''' debug("entering _queue_task() for %s/%s" % (host, task)) # and then queue the new task debug("%s - putting task (%s) in queue" % (host, task)) try: debug("worker is %d (out of %d available)" % (self._cur_worker+1, len(self._workers))) (worker_prc, main_q, rslt_q) = self._workers[self._cur_worker] self._cur_worker += 1 if self._cur_worker >= len(self._workers): self._cur_worker = 0 self._pending_results += 1 main_q.put((host, task, self._loader.get_basedir(), task_vars, connection_info, module_loader), block=False) except (EOFError, IOError, AssertionError), e: # most likely an abort debug("got an error while queuing: %s" % e) return
def worker(main_q, res_q, loader): while True: task = None try: if not main_q.empty(): (host, task, task_vars, conn_info) = main_q.get(block=False) executor_result = TaskExecutor(host, task, task_vars, conn_info, loader).run() debug("executor result: %s" % executor_result) task_result = TaskResult(host, task, executor_result) res_q.put(task_result) else: time.sleep(0.01) except Queue.Empty: pass except (IOError, EOFError, KeyboardInterrupt) as e: debug("got a breaking error: %s" % e) break except Exception as e: debug("EXCEPTION DURING WORKER PROCESSING: %s" % e) traceback.print_exc() break
def _send_result(self, result): debug(u"sending result: %s" % ([text_type(x) for x in result], )) self._final_q.put(result, block=False) debug("done sending result")
def run(self): ''' Called when the process is started, and loops indefinitely until an error is encountered (typically an IOerror from the queue pipe being disconnected). During the loop, we attempt to pull tasks off the job queue and run them, pushing the result onto the results queue. We also remove the host from the blocked hosts list, to signify that they are ready for their next task. ''' if HAS_ATFORK: atfork() while True: task = None try: (host, task, basedir, job_vars, play_context, shared_loader_obj) = self._main_q.get() debug("there's work to be done!") debug("got a task/handler to work on: %s" % task) # because the task queue manager starts workers (forks) before the # playbook is loaded, set the basedir of the loader inherted by # this fork now so that we can find files correctly self._loader.set_basedir(basedir) # Serializing/deserializing tasks does not preserve the loader attribute, # since it is passed to the worker during the forking of the process and # would be wasteful to serialize. So we set it here on the task now, and # the task handles updating parent/child objects as needed. task.set_loader(self._loader) # apply the given task's information to the connection info, # which may override some fields already set by the play or # the options specified on the command line new_play_context = play_context.set_task_and_variable_override(task=task, variables=job_vars) # execute the task and build a TaskResult from the result debug("running TaskExecutor() for %s/%s" % (host, task)) executor_result = TaskExecutor(host, task, job_vars, new_play_context, self._new_stdin, self._loader, shared_loader_obj).run() debug("done running TaskExecutor() for %s/%s" % (host, task)) task_result = TaskResult(host, task, executor_result) # put the result on the result queue debug("sending task result") self._rslt_q.put(task_result) debug("done sending task result") except queue.Empty: pass except (IOError, EOFError, KeyboardInterrupt): break except AnsibleConnectionFailure: try: if task: task_result = TaskResult(host, task, dict(unreachable=True)) self._rslt_q.put(task_result, block=False) except: # FIXME: most likely an abort, catch those kinds of errors specifically break except Exception as e: debug("WORKER EXCEPTION: %s" % e) debug("WORKER EXCEPTION: %s" % traceback.format_exc()) try: if task: task_result = TaskResult(host, task, dict(failed=True, exception=traceback.format_exc(), stdout='')) self._rslt_q.put(task_result, block=False) except: # FIXME: most likely an abort, catch those kinds of errors specifically break debug("WORKER PROCESS EXITING")
def _wait_on_pending_results(): global pending_results while pending_results > 0: debug("waiting for pending results (%d left)" % pending_results) _process_pending_results() time.sleep(0.01)
def __setstate__(self, data): debug("unpickling...") self._foo = data.get('foo', "BAD PICKLE!") debug("unpickled %s" % self._foo)
def get_vars(self, loader, play=None, host=None, task=None, include_hostvars=True, include_delegate_to=True, use_cache=True): ''' Returns the variables, with optional "context" given via the parameters for the play, host, and task (which could possibly result in different sets of variables being returned due to the additional context). The order of precedence is: - play->roles->get_default_vars (if there is a play context) - group_vars_files[host] (if there is a host context) - host_vars_files[host] (if there is a host context) - host->get_vars (if there is a host context) - fact_cache[host] (if there is a host context) - play vars (if there is a play context) - play vars_files (if there's no host context, ignore file names that cannot be templated) - task->get_vars (if there is a task context) - vars_cache[host] (if there is a host context) - extra vars ''' debug("in VariableManager get_vars()") cache_entry = self._get_cache_entry(play=play, host=host, task=task) if cache_entry in VARIABLE_CACHE and use_cache: debug("vars are cached, returning them now") return VARIABLE_CACHE[cache_entry] all_vars = defaultdict(dict) if play: # first we compile any vars specified in defaults/main.yml # for all roles within the specified play for role in play.get_roles(): all_vars = combine_vars(all_vars, role.get_default_vars()) # if we have a task in this context, and that task has a role, make # sure it sees its defaults above any other roles, as we previously # (v1) made sure each task had a copy of its roles default vars if task and task._role is not None: all_vars = combine_vars(all_vars, task._role.get_default_vars()) if host: # next, if a host is specified, we load any vars from group_vars # files and then any vars from host_vars files which may apply to # this host or the groups it belongs to # we merge in vars from groups specified in the inventory (INI or script) all_vars = combine_vars(all_vars, host.get_group_vars()) # then we merge in the special 'all' group_vars first, if they exist if 'all' in self._group_vars_files: data = preprocess_vars(self._group_vars_files['all']) for item in data: all_vars = combine_vars(all_vars, item) for group in host.get_groups(): if group.name in self._group_vars_files and group.name != 'all': for data in self._group_vars_files[group.name]: data = preprocess_vars(data) for item in data: all_vars = combine_vars(all_vars, item) # then we merge in vars from the host specified in the inventory (INI or script) all_vars = combine_vars(all_vars, host.get_vars()) # then we merge in the host_vars/<hostname> file, if it exists host_name = host.get_name() if host_name in self._host_vars_files: for data in self._host_vars_files[host_name]: data = preprocess_vars(data) for item in data: all_vars = combine_vars(all_vars, item) # finally, the facts caches for this host, if it exists try: host_facts = self._fact_cache.get(host.name, dict()) for k in host_facts.keys(): if host_facts[k] is not None and not isinstance( host_facts[k], UnsafeProxy): host_facts[k] = UnsafeProxy(host_facts[k]) all_vars = combine_vars(all_vars, host_facts) except KeyError: pass if play: all_vars = combine_vars(all_vars, play.get_vars()) for vars_file_item in play.get_vars_files(): # create a set of temporary vars here, which incorporate the # extra vars so we can properly template the vars_files entries temp_vars = combine_vars(all_vars, self._extra_vars) templar = Templar(loader=loader, variables=temp_vars) # we assume each item in the list is itself a list, as we # support "conditional includes" for vars_files, which mimics # the with_first_found mechanism. #vars_file_list = templar.template(vars_file_item) vars_file_list = vars_file_item if not isinstance(vars_file_list, list): vars_file_list = [vars_file_list] # now we iterate through the (potential) files, and break out # as soon as we read one from the list. If none are found, we # raise an error, which is silently ignored at this point. try: for vars_file in vars_file_list: vars_file = templar.template(vars_file) try: data = preprocess_vars( loader.load_from_file(vars_file)) if data is not None: for item in data: all_vars = combine_vars(all_vars, item) break except AnsibleFileNotFound as e: # we continue on loader failures continue except AnsibleParserError as e: raise else: raise AnsibleFileNotFound( "vars file %s was not found" % vars_file_item) except (UndefinedError, AnsibleUndefinedVariable): if host is not None and self._fact_cache.get( host.name, dict()).get('module_setup') and task is not None: raise AnsibleUndefinedVariable( "an undefined variable was found when attempting to template the vars_files item '%s'" % vars_file_item, obj=vars_file_item) else: # we do not have a full context here, and the missing variable could be # because of that, so just show a warning and continue display.vvv( "skipping vars_file '%s' due to an undefined variable" % vars_file_item) continue if not C.DEFAULT_PRIVATE_ROLE_VARS: for role in play.get_roles(): all_vars = combine_vars( all_vars, role.get_vars(include_params=False)) if task: if task._role: all_vars = combine_vars(all_vars, task._role.get_vars()) all_vars = combine_vars(all_vars, task.get_vars()) if host: all_vars = combine_vars( all_vars, self._vars_cache.get(host.get_name(), dict())) all_vars = combine_vars( all_vars, self._nonpersistent_fact_cache.get(host.name, dict())) all_vars = combine_vars(all_vars, self._extra_vars) # FIXME: make sure all special vars are here # Finally, we create special vars all_vars['playbook_dir'] = loader.get_basedir() if host: all_vars['group_names'] = [ group.name for group in host.get_groups() ] if self._inventory is not None: all_vars['groups'] = dict() for (group_name, group) in iteritems(self._inventory.groups): all_vars['groups'][group_name] = [ h.name for h in group.get_hosts() ] if include_hostvars: hostvars_cache_entry = self._get_cache_entry(play=play) if hostvars_cache_entry in HOSTVARS_CACHE: hostvars = HOSTVARS_CACHE[hostvars_cache_entry] else: hostvars = HostVars(play=play, inventory=self._inventory, loader=loader, variable_manager=self) HOSTVARS_CACHE[hostvars_cache_entry] = hostvars all_vars['hostvars'] = hostvars if task: if task._role: all_vars['role_path'] = task._role._role_path # if we have a task and we're delegating to another host, figure out the # variables for that host now so we don't have to rely on hostvars later if task.delegate_to is not None and include_delegate_to: # we unfortunately need to template the delegate_to field here, # as we're fetching vars before post_validate has been called on # the task that has been passed in templar = Templar(loader=loader, variables=all_vars) items = [] if task.loop is not None: if task.loop in lookup_loader: #TODO: remove convert_bare true and deprecate this in with_ try: loop_terms = listify_lookup_plugin_terms( terms=task.loop_args, templar=templar, loader=loader, fail_on_undefined=True, convert_bare=True) except AnsibleUndefinedVariable as e: if 'has no attribute' in str(e): loop_terms = [] self._display.deprecated( "Skipping task due to undefined attribute, in the future this will be a fatal error." ) else: raise items = lookup_loader.get(task.loop, loader=loader, templar=templar).run( terms=loop_terms, variables=all_vars) else: raise AnsibleError( "Unexpected failure in finding the lookup named '%s' in the available lookup plugins" % task.loop) else: items = [None] vars_copy = all_vars.copy() delegated_host_vars = dict() for item in items: # update the variables with the item value for templating, in case we need it if item is not None: vars_copy['item'] = item templar.set_available_variables(vars_copy) delegated_host_name = templar.template( task.delegate_to, fail_on_undefined=False) if delegated_host_name in delegated_host_vars: # no need to repeat ourselves, as the delegate_to value # does not appear to be tied to the loop item variable continue # a dictionary of variables to use if we have to create a new host below new_delegated_host_vars = dict( ansible_host=delegated_host_name, ansible_user=C.DEFAULT_REMOTE_USER, ansible_connection=C.DEFAULT_TRANSPORT, ) # now try to find the delegated-to host in inventory, or failing that, # create a new host on the fly so we can fetch variables for it delegated_host = None if self._inventory is not None: delegated_host = self._inventory.get_host( delegated_host_name) # try looking it up based on the address field, and finally # fall back to creating a host on the fly to use for the var lookup if delegated_host is None: for h in self._inventory.get_hosts( ignore_limits_and_restrictions=True): # check if the address matches, or if both the delegated_to host # and the current host are in the list of localhost aliases if h.address == delegated_host_name or h.name in C.LOCALHOST and delegated_host_name in C.LOCALHOST: delegated_host = h break else: delegated_host = Host(name=delegated_host_name) delegated_host.vars.update( new_delegated_host_vars) else: delegated_host = Host(name=delegated_host_name) delegated_host.vars.update(new_delegated_host_vars) # now we go fetch the vars for the delegated-to host and save them in our # master dictionary of variables to be used later in the TaskExecutor/PlayContext delegated_host_vars[delegated_host_name] = self.get_vars( loader=loader, play=play, host=delegated_host, task=task, include_delegate_to=False, include_hostvars=False, ) all_vars['ansible_delegated_vars'] = delegated_host_vars if self._inventory is not None: all_vars['inventory_dir'] = self._inventory.basedir() if play: # add the list of hosts in the play, as adjusted for limit/filters # DEPRECATED: play_hosts should be deprecated in favor of ansible_play_hosts, # however this would take work in the templating engine, so for now # we'll add both so we can give users something transitional to use host_list = [x.name for x in self._inventory.get_hosts()] all_vars['play_hosts'] = host_list all_vars['ansible_play_hosts'] = host_list # the 'omit' value alows params to be left out if the variable they are based on is undefined all_vars['omit'] = self._omit_token all_vars['ansible_version'] = CLI.version_info(gitinfo=False) if 'hostvars' in all_vars and host: all_vars['vars'] = all_vars['hostvars'][host.get_name()] #VARIABLE_CACHE[cache_entry] = all_vars debug("done with get_vars()") return all_vars
def get_vars(self, loader, play=None, host=None, task=None, include_hostvars=True, include_delegate_to=True, use_cache=True): ''' Returns the variables, with optional "context" given via the parameters for the play, host, and task (which could possibly result in different sets of variables being returned due to the additional context). The order of precedence is: - play->roles->get_default_vars (if there is a play context) - group_vars_files[host] (if there is a host context) - host_vars_files[host] (if there is a host context) - host->get_vars (if there is a host context) - fact_cache[host] (if there is a host context) - play vars (if there is a play context) - play vars_files (if there's no host context, ignore file names that cannot be templated) - task->get_vars (if there is a task context) - vars_cache[host] (if there is a host context) - extra vars ''' debug("in VariableManager get_vars()") cache_entry = self._get_cache_entry(play=play, host=host, task=task) if cache_entry in VARIABLE_CACHE and use_cache: debug("vars are cached, returning them now") return VARIABLE_CACHE[cache_entry] all_vars = defaultdict(dict) magic_variables = self._get_magic_variables( loader=loader, play=play, host=host, task=task, include_hostvars=include_hostvars, include_delegate_to=include_delegate_to, ) if play: # first we compile any vars specified in defaults/main.yml # for all roles within the specified play for role in play.get_roles(): all_vars = combine_vars(all_vars, role.get_default_vars()) # if we have a task in this context, and that task has a role, make # sure it sees its defaults above any other roles, as we previously # (v1) made sure each task had a copy of its roles default vars if task and task._role is not None: all_vars = combine_vars(all_vars, task._role.get_default_vars()) if host: # next, if a host is specified, we load any vars from group_vars # files and then any vars from host_vars files which may apply to # this host or the groups it belongs to # we merge in vars from groups specified in the inventory (INI or script) all_vars = combine_vars(all_vars, host.get_group_vars()) # then we merge in the special 'all' group_vars first, if they exist if 'all' in self._group_vars_files: data = preprocess_vars(self._group_vars_files['all']) for item in data: all_vars = combine_vars(all_vars, item) for group in host.get_groups(): if group.name in self._group_vars_files and group.name != 'all': for data in self._group_vars_files[group.name]: data = preprocess_vars(data) for item in data: all_vars = combine_vars(all_vars, item) # then we merge in vars from the host specified in the inventory (INI or script) all_vars = combine_vars(all_vars, host.get_vars()) # then we merge in the host_vars/<hostname> file, if it exists host_name = host.get_name() if host_name in self._host_vars_files: for data in self._host_vars_files[host_name]: data = preprocess_vars(data) for item in data: all_vars = combine_vars(all_vars, item) # finally, the facts caches for this host, if it exists try: host_facts = wrap_var(self._fact_cache.get(host.name, dict())) all_vars = combine_vars(all_vars, host_facts) except KeyError: pass if play: all_vars = combine_vars(all_vars, play.get_vars()) for vars_file_item in play.get_vars_files(): # create a set of temporary vars here, which incorporate the extra # and magic vars so we can properly template the vars_files entries temp_vars = combine_vars(all_vars, self._extra_vars) temp_vars = combine_vars(temp_vars, magic_variables) templar = Templar(loader=loader, variables=temp_vars) # we assume each item in the list is itself a list, as we # support "conditional includes" for vars_files, which mimics # the with_first_found mechanism. #vars_file_list = templar.template(vars_file_item) vars_file_list = vars_file_item if not isinstance(vars_file_list, list): vars_file_list = [vars_file_list] # now we iterate through the (potential) files, and break out # as soon as we read one from the list. If none are found, we # raise an error, which is silently ignored at this point. try: for vars_file in vars_file_list: vars_file = templar.template(vars_file) try: data = preprocess_vars( loader.load_from_file(vars_file)) if data is not None: for item in data: all_vars = combine_vars(all_vars, item) break except AnsibleFileNotFound as e: # we continue on loader failures continue except AnsibleParserError as e: raise else: raise AnsibleFileNotFound( "vars file %s was not found" % vars_file_item) except (UndefinedError, AnsibleUndefinedVariable): if host is not None and self._fact_cache.get( host.name, dict()).get('module_setup') and task is not None: raise AnsibleUndefinedVariable( "an undefined variable was found when attempting to template the vars_files item '%s'" % vars_file_item, obj=vars_file_item) else: # we do not have a full context here, and the missing variable could be # because of that, so just show a warning and continue display.vvv( "skipping vars_file '%s' due to an undefined variable" % vars_file_item) continue if not C.DEFAULT_PRIVATE_ROLE_VARS: for role in play.get_roles(): all_vars = combine_vars( all_vars, role.get_vars(include_params=False)) if task: if task._role: all_vars = combine_vars(all_vars, task._role.get_vars()) all_vars = combine_vars(all_vars, task.get_vars()) if host: all_vars = combine_vars( all_vars, self._vars_cache.get(host.get_name(), dict())) all_vars = combine_vars( all_vars, self._nonpersistent_fact_cache.get(host.name, dict())) all_vars = combine_vars(all_vars, self._extra_vars) all_vars = combine_vars(all_vars, magic_variables) # if we have a task and we're delegating to another host, figure out the # variables for that host now so we don't have to rely on hostvars later if task and task.delegate_to is not None and include_delegate_to: all_vars['ansible_delegated_vars'] = self._get_delegated_vars( loader, play, task, all_vars) #VARIABLE_CACHE[cache_entry] = all_vars debug("done with get_vars()") return all_vars
def exec_command(self, cmd, tmp_path, in_data=None, sudoable=True): ''' run a command on the local host ''' super(Connection, self).exec_command(cmd, tmp_path, in_data=in_data, sudoable=sudoable) debug("in local.exec_command()") if in_data: raise AnsibleError( "Internal Error: this module does not support optimized module pipelining" ) executable = C.DEFAULT_EXECUTABLE.split( )[0] if C.DEFAULT_EXECUTABLE else None if sudoable: cmd, self.prompt, self.success_key = self._connection_info.make_become_cmd( cmd) self._display.vvv("{0} EXEC {1}".format( self._connection_info.remote_addr, cmd)) # FIXME: cwd= needs to be set to the basedir of the playbook debug("opening command with Popen()") p = subprocess.Popen( cmd, shell=isinstance(cmd, basestring), executable=executable, #cwd=... stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, ) debug("done running command with Popen()") if self.prompt and self._connection_info.become_pass: fcntl.fcntl(p.stdout, fcntl.F_SETFL, fcntl.fcntl(p.stdout, fcntl.F_GETFL) | os.O_NONBLOCK) fcntl.fcntl(p.stderr, fcntl.F_SETFL, fcntl.fcntl(p.stderr, fcntl.F_GETFL) | os.O_NONBLOCK) become_output = '' while not self.check_become_success( become_output) and not self.check_password_prompt( become_output): rfd, wfd, efd = select.select([p.stdout, p.stderr], [], [p.stdout, p.stderr], self._connection_info.timeout) if p.stdout in rfd: chunk = p.stdout.read() elif p.stderr in rfd: chunk = p.stderr.read() else: stdout, stderr = p.communicate() raise AnsibleError( 'timeout waiting for privilege escalation password prompt:\n' + become_output) if not chunk: stdout, stderr = p.communicate() raise AnsibleError( 'privilege output closed while waiting for password prompt:\n' + become_output) become_output += chunk if not self.check_become_success(become_output): p.stdin.write(self._connection_info.become_pass + '\n') fcntl.fcntl(p.stdout, fcntl.F_SETFL, fcntl.fcntl(p.stdout, fcntl.F_GETFL) & ~os.O_NONBLOCK) fcntl.fcntl(p.stderr, fcntl.F_SETFL, fcntl.fcntl(p.stderr, fcntl.F_GETFL) & ~os.O_NONBLOCK) debug("getting output with communicate()") stdout, stderr = p.communicate() debug("done communicating") debug("done with local.exec_command()") return (p.returncode, '', stdout, stderr)
def exec_command(self, cmd, tmp_path, in_data=None, sudoable=True): ''' run a command on the local host ''' super(Connection, self).exec_command(cmd, tmp_path, in_data=in_data) debug("in local.exec_command()") # su requires to be run from a terminal, and therefore isn't supported here (yet?) #if self._connection_info.su: # raise AnsibleError("Internal Error: this module does not support running commands via su") if in_data: raise AnsibleError("Internal Error: this module does not support optimized module pipelining") executable = C.DEFAULT_EXECUTABLE.split()[0] if C.DEFAULT_EXECUTABLE else None self._display.vvv("{0} EXEC {1}".format(self._connection_info.remote_addr, cmd)) # FIXME: cwd= needs to be set to the basedir of the playbook debug("opening command with Popen()") p = subprocess.Popen( cmd, shell=isinstance(cmd, basestring), executable=executable, #cwd=... stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, ) debug("done running command with Popen()") # FIXME: more su/sudo stuff #if self.runner.sudo and sudoable and self.runner.sudo_pass: # fcntl.fcntl(p.stdout, fcntl.F_SETFL, # fcntl.fcntl(p.stdout, fcntl.F_GETFL) | os.O_NONBLOCK) # fcntl.fcntl(p.stderr, fcntl.F_SETFL, # fcntl.fcntl(p.stderr, fcntl.F_GETFL) | os.O_NONBLOCK) # sudo_output = '' # while not sudo_output.endswith(prompt) and success_key not in sudo_output: # rfd, wfd, efd = select.select([p.stdout, p.stderr], [], # [p.stdout, p.stderr], self.runner.timeout) # if p.stdout in rfd: # chunk = p.stdout.read() # elif p.stderr in rfd: # chunk = p.stderr.read() # else: # stdout, stderr = p.communicate() # raise AnsibleError('timeout waiting for sudo password prompt:\n' + sudo_output) # if not chunk: # stdout, stderr = p.communicate() # raise AnsibleError('sudo output closed while waiting for password prompt:\n' + sudo_output) # sudo_output += chunk # if success_key not in sudo_output: # p.stdin.write(self.runner.sudo_pass + '\n') # fcntl.fcntl(p.stdout, fcntl.F_SETFL, fcntl.fcntl(p.stdout, fcntl.F_GETFL) & ~os.O_NONBLOCK) # fcntl.fcntl(p.stderr, fcntl.F_SETFL, fcntl.fcntl(p.stderr, fcntl.F_GETFL) & ~os.O_NONBLOCK) debug("getting output with communicate()") stdout, stderr = p.communicate() debug("done communicating") debug("done with local.exec_command()") return (p.returncode, '', stdout, stderr)
except queue.Empty: pass except (IOError, EOFError, KeyboardInterrupt): break except AnsibleConnectionFailure: try: if task: task_result = TaskResult(host, task, dict(unreachable=True)) self._rslt_q.put(task_result, block=False) except: # FIXME: most likely an abort, catch those kinds of errors specifically break except Exception, e: debug("WORKER EXCEPTION: %s" % e) debug("WORKER EXCEPTION: %s" % traceback.format_exc()) try: if task: task_result = TaskResult( host, task, dict(failed=True, exception=traceback.format_exc(), stdout='')) self._rslt_q.put(task_result, block=False) except: # FIXME: most likely an abort, catch those kinds of errors specifically break debug("WORKER PROCESS EXITING")
def run(self, iterator, play_context): ''' The "free" strategy is a bit more complex, in that it allows tasks to be sent to hosts as quickly as they can be processed. This means that some hosts may finish very quickly if run tasks result in little or no work being done versus other systems. The algorithm used here also tries to be more "fair" when iterating through hosts by remembering the last host in the list to be given a task and starting the search from there as opposed to the top of the hosts list again, which would end up favoring hosts near the beginning of the list. ''' # the last host to be given a task last_host = 0 result = True work_to_do = True while work_to_do and not self._tqm._terminated: hosts_left = self.get_hosts_remaining(iterator._play) if len(hosts_left) == 0: self._tqm.send_callback('v2_playbook_on_no_hosts_remaining') result = False break work_to_do = False # assume we have no more work to do starting_host = last_host # save current position so we know when we've # looped back around and need to break # try and find an unblocked host with a task to run host_results = [] while True: host = hosts_left[last_host] debug("next free host: %s" % host) host_name = host.get_name() # peek at the next task for the host, to see if there's # anything to do do for this host (state, task) = iterator.get_next_task_for_host(host, peek=True) debug("free host state: %s" % state) debug("free host task: %s" % task) if host_name not in self._tqm._failed_hosts and host_name not in self._tqm._unreachable_hosts and task: # set the flag so the outer loop knows we've still found # some work which needs to be done work_to_do = True debug("this host has work to do") # check to see if this host is blocked (still executing a previous task) if not host_name in self._blocked_hosts: # pop the task, mark the host blocked, and queue it self._blocked_hosts[host_name] = True (state, task) = iterator.get_next_task_for_host(host) debug("getting variables") task_vars = self._variable_manager.get_vars(loader=self._loader, play=iterator._play, host=host, task=task) debug("done getting variables") # check to see if this task should be skipped, due to it being a member of a # role which has already run (and whether that role allows duplicate execution) if task._role and task._role.has_run(): # If there is no metadata, the default behavior is to not allow duplicates, # if there is metadata, check to see if the allow_duplicates flag was set to true if task._role._metadata is None or task._role._metadata and not task._role._metadata.allow_duplicates: debug("'%s' skipped because role has already run" % task) continue if not task.evaluate_tags(play_context.only_tags, play_context.skip_tags, task_vars) and task.action != 'setup': debug("'%s' failed tag evaluation" % task) continue if task.action == 'meta': # meta tasks store their args in the _raw_params field of args, # since they do not use k=v pairs, so get that meta_action = task.args.get('_raw_params') if meta_action == 'noop': # FIXME: issue a callback for the noop here? continue elif meta_action == 'flush_handlers': # FIXME: in the 'free' mode, flushing handlers should result in # only those handlers notified for the host doing the flush self.run_handlers(iterator, play_context) else: raise AnsibleError("invalid meta action requested: %s" % meta_action, obj=task._ds) self._blocked_hosts[host_name] = False else: self._tqm.send_callback('v2_playbook_on_task_start', task, is_conditional=False) self._queue_task(host, task, task_vars, play_context) # move on to the next host and make sure we # haven't gone past the end of our hosts list last_host += 1 if last_host > len(hosts_left) - 1: last_host = 0 # if we've looped around back to the start, break out if last_host == starting_host: break results = self._process_pending_results(iterator) host_results.extend(results) # pause briefly so we don't spin lock time.sleep(0.05) try: results = self._wait_on_pending_results(iterator) host_results.extend(results) except Exception as e: # FIXME: ctrl+c can cause some failures here, so catch them # with the appropriate error type print("wtf: %s" % e) pass # run the base class run() method, which executes the cleanup function # and runs any outstanding handlers which have been triggered super(StrategyModule, self).run(iterator, play_context)
def _do_template(self, data, preserve_trailing_newlines=True, escape_backslashes=True, fail_on_undefined=None, overrides=None): # For preserving the number of input newlines in the output (used # later in this method) data_newlines = _count_newlines_from_end(data) if fail_on_undefined is None: fail_on_undefined = self._fail_on_undefined_errors try: # allows template header overrides to change jinja2 options. if overrides is None: myenv = self.environment.overlay() else: myenv = self.environment.overlay(overrides) # Get jinja env overrides from template if data.startswith(JINJA2_OVERRIDE): eol = data.find('\n') line = data[len(JINJA2_OVERRIDE):eol] data = data[eol + 1:] for pair in line.split(','): (key, val) = pair.split(':') key = key.strip() setattr(myenv, key, ast.literal_eval(val.strip())) #FIXME: add tests myenv.filters.update(self._get_filters()) myenv.tests.update(self._get_tests()) if escape_backslashes: # Allow users to specify backslashes in playbooks as "\\" # instead of as "\\\\". data = _escape_backslashes(data, myenv) try: t = myenv.from_string(data) except TemplateSyntaxError as e: raise AnsibleError( "template error while templating string: %s" % str(e)) except Exception as e: if 'recursion' in str(e): raise AnsibleError( "recursive loop detected in template string: %s" % data) else: return data t.globals['lookup'] = self._lookup t.globals['finalize'] = self._finalize jvars = AnsibleJ2Vars(self, t.globals) new_context = t.new_context(jvars, shared=True) rf = t.root_render_func(new_context) try: res = j2_concat(rf) except TypeError as te: if 'StrictUndefined' in str(te): raise AnsibleUndefinedVariable( "Unable to look up a name or access an attribute in template string. " + \ "Make sure your variable name does not contain invalid characters like '-'." ) else: debug( "failing because of a type error, template data is: %s" % data) raise AnsibleError( "an unexpected type error occurred. Error was %s" % te) if preserve_trailing_newlines: # The low level calls above do not preserve the newline # characters at the end of the input data, so we use the # calculate the difference in newlines and append them # to the resulting output for parity # # jinja2 added a keep_trailing_newline option in 2.7 when # creating an Environment. That would let us make this code # better (remove a single newline if # preserve_trailing_newlines is False). Once we can depend on # that version being present, modify our code to set that when # initializing self.environment and remove a single trailing # newline here if preserve_newlines is False. res_newlines = _count_newlines_from_end(res) if data_newlines > res_newlines: res += '\n' * (data_newlines - res_newlines) return res except (UndefinedError, AnsibleUndefinedVariable) as e: if fail_on_undefined: raise AnsibleUndefinedVariable(e) else: #TODO: return warning about undefined var return data
def _do_template(self, data, preserve_trailing_newlines=False, fail_on_undefined=None, overrides=None): if fail_on_undefined is None: fail_on_undefined = self._fail_on_undefined_errors try: # allows template header overrides to change jinja2 options. if overrides is None: myenv = self.environment.overlay() else: myenv = self.environment.overlay(overrides) # Get jinja env overrides from template if data.startswith(JINJA2_OVERRIDE): eol = data.find('\n') line = data[len(JINJA2_OVERRIDE):eol] data = data[eol+1:] for pair in line.split(','): (key,val) = pair.split(':') key = key.strip() setattr(myenv, key, ast.literal_eval(val.strip())) #FIXME: add tests myenv.filters.update(self._get_filters()) myenv.tests.update(self._get_tests()) try: t = myenv.from_string(data) except TemplateSyntaxError as e: raise AnsibleError("template error while templating string: %s" % str(e)) except Exception as e: if 'recursion' in str(e): raise AnsibleError("recursive loop detected in template string: %s" % data) else: return data t.globals['lookup'] = self._lookup t.globals['finalize'] = self._finalize jvars = AnsibleJ2Vars(self, t.globals) new_context = t.new_context(jvars, shared=True) rf = t.root_render_func(new_context) try: res = j2_concat(rf) except TypeError as te: if 'StrictUndefined' in str(te): raise AnsibleUndefinedVariable( "Unable to look up a name or access an attribute in template string. " + \ "Make sure your variable name does not contain invalid characters like '-'." ) else: debug("failing because of a type error, template data is: %s" % data) raise AnsibleError("an unexpected type error occurred. Error was %s" % te) if preserve_trailing_newlines: # The low level calls above do not preserve the newline # characters at the end of the input data, so we use the # calculate the difference in newlines and append them # to the resulting output for parity res_newlines = self._count_newlines_from_end(res) data_newlines = self._count_newlines_from_end(data) if data_newlines > res_newlines: res += '\n' * (data_newlines - res_newlines) return res except (UndefinedError, AnsibleUndefinedVariable) as e: if fail_on_undefined: raise AnsibleUndefinedVariable(e) else: #TODO: return warning about undefined var return data
# do the final update of the local variables here, for both registered # values and any facts which may have been created if self._task.register: variables[self._task.register] = result if 'ansible_facts' in result: variables.update(result['ansible_facts']) # save the notification target in the result, if it was specified, as # this task may be running in a loop in which case the notification # may be item-specific, ie. "notify: service {{item}}" if self._task.notify is not None: result['_ansible_notify'] = self._task.notify # and return debug("attempt loop complete, returning result") return result def _poll_async_result(self, result, templar): ''' Polls for the specified JID to be complete ''' async_jid = result.get('ansible_job_id') if async_jid is None: return dict(failed=True, msg="No job id was returned by the async task") # Create a new psuedo-task to run the async_status module, and run # that (with a sleep for "poll" seconds between each retry) until the # async time limit is exceeded.
debug("got final result: %s" % (result, )) pending_results -= 1 finally: #p_lock.release() pass def _wait_on_pending_results(): global pending_results while pending_results > 0: debug("waiting for pending results (%d left)" % pending_results) _process_pending_results() time.sleep(0.01) debug("starting") cur_worker = 0 pending_results = 0 sample_play = Play() for i in range(NUM_TASKS): for j in range(NUM_HOSTS): debug("queuing %d, %d" % (i, j)) send_data(Task().load(dict(name="task %d %d" % (i, j), ping=""), sample_play)) debug("done queuing %d, %d" % (i, j)) _process_pending_results() debug("waiting for the results to drain...") _wait_on_pending_results() in_p.close()
def _execute_module(self, module_name=None, module_args=None, tmp=None, persist_files=False, delete_remote_tmp=True): ''' Transfer and run a module along with its arguments. ''' # if a module name was not specified for this execution, use # the action from the task if module_name is None: module_name = self._task.action if module_args is None: module_args = self._task.args # set check mode in the module arguments, if required if self._connection_info.check_mode and not self._task.always_run: if not self._supports_check_mode: raise AnsibleError( "check mode is not supported for this operation") module_args['_ansible_check_mode'] = True # set no log in the module arguments, if required if self._connection_info.no_log: module_args['_ansible_no_log'] = True debug("in _execute_module (%s, %s)" % (module_name, module_args)) (module_style, shebang, module_data) = self._configure_module(module_name=module_name, module_args=module_args) if not shebang: raise AnsibleError("module is missing interpreter line") # a remote tmp path may be necessary and not already created remote_module_path = None if not tmp and self._late_needs_tmp_path(tmp, module_style): tmp = self._make_tmp_path() remote_module_path = self._shell.join_path(tmp, module_name) # FIXME: async stuff here? #if (module_style != 'new' or async_jid is not None or not self._connection._has_pipelining or not C.ANSIBLE_SSH_PIPELINING or C.DEFAULT_KEEP_REMOTE_FILES): if remote_module_path: debug("transfering module to remote") self._transfer_data(remote_module_path, module_data) debug("done transfering module to remote") environment_string = self._compute_environment_string() if tmp and "tmp" in tmp and self._connection_info.become and self._connection_info.become_user != 'root': # deal with possible umask issues once sudo'ed to other user self._remote_chmod(tmp, 'a+r', remote_module_path) cmd = "" in_data = None # FIXME: all of the old-module style and async stuff has been removed from here, and # might need to be re-added (unless we decide to drop support for old-style modules # at this point and rework things to support non-python modules specifically) if self._connection._has_pipelining and C.ANSIBLE_SSH_PIPELINING and not C.DEFAULT_KEEP_REMOTE_FILES: in_data = module_data else: if remote_module_path: cmd = remote_module_path rm_tmp = None if tmp and "tmp" in tmp and not C.DEFAULT_KEEP_REMOTE_FILES and not persist_files and delete_remote_tmp: if not self._connection_info.become or self._connection_info.become_user == 'root': # not sudoing or sudoing to root, so can cleanup files in the same step rm_tmp = tmp cmd = self._shell.build_module_command(environment_string, shebang, cmd, rm_tmp) cmd = cmd.strip() sudoable = True if module_name == "accelerate": # always run the accelerate module as the user # specified in the play, not the sudo_user sudoable = False debug("calling _low_level_execute_command() for command %s" % cmd) res = self._low_level_execute_command(cmd, tmp, sudoable=sudoable, in_data=in_data) debug("_low_level_execute_command returned ok") if tmp and "tmp" in tmp and not C.DEFAULT_KEEP_REMOTE_FILES and not persist_files and delete_remote_tmp: if self._connection_info.become and self._connection_info.become_user != 'root': # not sudoing to root, so maybe can't delete files as that other user # have to clean up temp files as original user in a second step cmd2 = self._shell.remove(tmp, recurse=True) self._low_level_execute_command(cmd2, tmp, sudoable=False) # FIXME: in error situations, the stdout may not contain valid data, so we # should check for bad rc codes better to catch this here if 'stdout' in res and res['stdout'].strip(): data = json.loads( self._filter_leading_non_json_lines(res['stdout'])) if 'parsed' in data and data['parsed'] == False: data['msg'] += res['stderr'] # pre-split stdout into lines, if stdout is in the data and there # isn't already a stdout_lines value there if 'stdout' in data and 'stdout_lines' not in data: data['stdout_lines'] = data.get('stdout', '').splitlines() else: data = dict() # store the module invocation details back into the result data['invocation'] = dict( module_args=module_args, module_name=module_name, ) debug("done with _execute_module (%s, %s)" % (module_name, module_args)) return data
def exec_command(self, cmd, tmp_path, in_data=None, sudoable=True): ''' run a command on the remote host ''' super(Connection, self).exec_command(cmd, tmp_path, in_data=in_data, sudoable=sudoable) if in_data: raise AnsibleError( "Internal Error: this module does not support optimized module pipelining" ) bufsize = 4096 try: self.ssh.get_transport().set_keepalive(5) chan = self.ssh.get_transport().open_session() except Exception as e: msg = "Failed to open session" if len(str(e)) > 0: msg += ": %s" % str(e) raise AnsibleConnectionFailure(msg) # sudo usually requires a PTY (cf. requiretty option), therefore # we give it one by default (pty=True in ansble.cfg), and we try # to initialise from the calling environment if C.PARAMIKO_PTY: chan.get_pty(term=os.getenv('TERM', 'vt100'), width=int(os.getenv('COLUMNS', 0)), height=int(os.getenv('LINES', 0))) self._display.vvv("EXEC %s" % cmd, host=self._connection_info.remote_addr) no_prompt_out = '' no_prompt_err = '' become_output = '' try: chan.exec_command(cmd) if self._connection_info.prompt: if self._connection_info.become and self._connection_info.become_pass: while True: debug('Waiting for Privilege Escalation input') if self.check_become_success( become_output) or self.check_password_prompt( become_output): break chunk = chan.recv(bufsize) print("chunk is: %s" % chunk) if not chunk: if 'unknown user' in become_output: raise AnsibleError('user %s does not exist' % become_user) else: raise AnsibleError( 'ssh connection ' + 'closed waiting for password prompt') become_output += chunk if not self.check_become_success(become_output): if self._connection_info.become: chan.sendall(self._connection_info.become_pass + '\n') else: no_prompt_out += become_output no_prompt_err += become_output except socket.timeout: raise AnsibleError( 'ssh timed out waiting for privilege escalation.\n' + become_output) stdout = ''.join(chan.makefile('rb', bufsize)) stderr = ''.join(chan.makefile_stderr('rb', bufsize)) return (chan.recv_exit_status(), '', no_prompt_out + stdout, no_prompt_out + stderr)
class TaskExecutor: ''' This is the main worker class for the executor pipeline, which handles loading an action plugin to actually dispatch the task to a given host. This class roughly corresponds to the old Runner() class. ''' # Modules that we optimize by squashing loop items into a single call to # the module SQUASH_ACTIONS = frozenset(C.DEFAULT_SQUASH_ACTIONS) def __init__(self, host, task, job_vars, play_context, new_stdin, loader, shared_loader_obj): self._host = host self._task = task self._job_vars = job_vars self._play_context = play_context self._new_stdin = new_stdin self._loader = loader self._shared_loader_obj = shared_loader_obj try: from __main__ import display self._display = display except ImportError: from ansible.utils.display import Display self._display = Display() def run(self): ''' The main executor entrypoint, where we determine if the specified task requires looping and either runs the task with ''' debug("in run()") try: # lookup plugins need to know if this task is executing from # a role, so that it can properly find files/templates/etc. roledir = None if self._task._role: roledir = self._task._role._role_path self._job_vars['roledir'] = roledir items = self._get_loop_items() if items is not None: if len(items) > 0: item_results = self._run_loop(items) # loop through the item results, and remember the changed/failed # result flags based on any item there. changed = False failed = False for item in item_results: if 'changed' in item and item['changed']: changed = True if 'failed' in item and item['failed']: failed = True # create the overall result item, and set the changed/failed # flags there to reflect the overall result of the loop res = dict(results=item_results) if changed: res['changed'] = True if failed: res['failed'] = True res['msg'] = 'One or more items failed' else: res['msg'] = 'All items completed' else: res = dict(changed=False, skipped=True, skipped_reason='No items in the list', results=[]) else: debug("calling self._execute()") res = self._execute() debug("_execute() done") # make sure changed is set in the result, if it's not present if 'changed' not in res: res['changed'] = False debug("dumping result to json") result = json.dumps(res) debug("done dumping result, returning") return result except AnsibleError as e: return dict(failed=True, msg=to_unicode(e, nonstring='simplerepr')) finally: try: self._connection.close() except AttributeError: pass except Exception as e: debug("error closing connection: %s" % to_unicode(e)) def _get_loop_items(self): ''' Loads a lookup plugin to handle the with_* portion of a task (if specified), and returns the items result. ''' # create a copy of the job vars here so that we can modify # them temporarily without changing them too early for other # parts of the code that might still need a pristine version vars_copy = self._job_vars.copy() # now we update them with the play context vars self._play_context.update_vars(vars_copy) templar = Templar(loader=self._loader, shared_loader_obj=self._shared_loader_obj, variables=vars_copy) items = None if self._task.loop: if self._task.loop in self._shared_loader_obj.lookup_loader: #TODO: remove convert_bare true and deprecate this in with_ try: loop_terms = listify_lookup_plugin_terms(terms=self._task.loop_args, templar=templar, loader=self._loader, fail_on_undefined=True, convert_bare=True) except AnsibleUndefinedVariable as e: if 'has no attribute' in str(e): loop_terms = [] self._display.deprecated("Skipping task due to undefined attribute, in the future this will be a fatal error.") else: raise items = self._shared_loader_obj.lookup_loader.get(self._task.loop, loader=self._loader, templar=templar).run(terms=loop_terms, variables=vars_copy) else: raise AnsibleError("Unexpected failure in finding the lookup named '%s' in the available lookup plugins" % self._task.loop) return items def _run_loop(self, items): ''' Runs the task with the loop items specified and collates the result into an array named 'results' which is inserted into the final result along with the item for which the loop ran. ''' results = [] # make copies of the job vars and task so we can add the item to # the variables and re-validate the task with the item variable task_vars = self._job_vars.copy() items = self._squash_items(items, task_vars) for item in items: task_vars['item'] = item try: tmp_task = self._task.copy() except AnsibleParserError as e: results.append(dict(failed=True, msg=str(e))) continue # now we swap the internal task with the copy, execute, # and swap them back so we can do the next iteration cleanly (self._task, tmp_task) = (tmp_task, self._task) res = self._execute(variables=task_vars) (self._task, tmp_task) = (tmp_task, self._task) # now update the result with the item info, and append the result # to the list of results res['item'] = item results.append(res) return results def _squash_items(self, items, variables): ''' Squash items down to a comma-separated list for certain modules which support it (typically package management modules). ''' if len(items) > 0 and self._task.action in self.SQUASH_ACTIONS: final_items = [] name = self._task.args.pop('name', None) or self._task.args.pop('pkg', None) for item in items: variables['item'] = item templar = Templar(loader=self._loader, shared_loader_obj=self._shared_loader_obj, variables=variables) if self._task.evaluate_conditional(templar, variables): if templar._contains_vars(name): new_item = templar.template(name) final_items.append(new_item) else: final_items.append(item) joined_items = ",".join(final_items) self._task.args['name'] = joined_items return [joined_items] else: return items def _execute(self, variables=None): ''' The primary workhorse of the executor system, this runs the task on the specified host (which may be the delegated_to host) and handles the retry/until and block rescue/always execution ''' if variables is None: variables = self._job_vars templar = Templar(loader=self._loader, shared_loader_obj=self._shared_loader_obj, variables=variables) # fields set from the play/task may be based on variables, so we have to # do the same kind of post validation step on it here before we use it. self._play_context.post_validate(templar=templar) # now that the play context is finalized, we can add 'magic' # variables to the variable dictionary self._play_context.update_vars(variables) # Evaluate the conditional (if any) for this task, which we do before running # the final task post-validation. We do this before the post validation due to # the fact that the conditional may specify that the task be skipped due to a # variable not being present which would otherwise cause validation to fail if not self._task.evaluate_conditional(templar, variables): debug("when evaulation failed, skipping this task") return dict(changed=False, skipped=True, skip_reason='Conditional check failed') # Now we do final validation on the task, which sets all fields to their final values. # In the case of debug tasks, we save any 'var' params and restore them after validating # so that variables are not replaced too early. prev_var = None if self._task.action == 'debug' and 'var' in self._task.args: prev_var = self._task.args.pop('var') self._task.post_validate(templar=templar) if '_variable_params' in self._task.args: variable_params = self._task.args.pop('_variable_params') if isinstance(variable_params, dict): self._display.deprecated("Using variables for task params is unsafe, especially if the variables come from an external source like facts") variable_params.update(self._task.args) self._task.args = variable_params if prev_var is not None: self._task.args['var'] = prev_var # if this task is a TaskInclude, we just return now with a success code so the # main thread can expand the task list for the given host if self._task.action == 'include': include_variables = self._task.args.copy() include_file = include_variables.get('_raw_params') del include_variables['_raw_params'] return dict(include=include_file, include_variables=include_variables) # get the connection and the handler for this execution self._connection = self._get_connection(variables) self._connection.set_host_overrides(host=self._host) self._handler = self._get_action_handler(connection=self._connection, templar=templar) # And filter out any fields which were set to default(omit), and got the omit token value omit_token = variables.get('omit') if omit_token is not None: self._task.args = dict(filter(lambda x: x[1] != omit_token, self._task.args.iteritems())) # Read some values from the task, so that we can modify them if need be retries = self._task.retries if retries <= 0: retries = 1 delay = self._task.delay if delay < 0: delay = 1 # make a copy of the job vars here, in case we need to update them # with the registered variable value later on when testing conditions vars_copy = variables.copy() debug("starting attempt loop") result = None for attempt in range(retries): if attempt > 0: # FIXME: this should use the callback/message passing mechanism print("FAILED - RETRYING: %s (%d retries left). Result was: %s" % (self._task, retries-attempt, result)) result['attempts'] = attempt + 1 debug("running the handler") result = self._handler.run(task_vars=variables) debug("handler run complete") if self._task.async > 0: # the async_wrapper module returns dumped JSON via its stdout # response, so we parse it here and replace the result try: result = json.loads(result.get('stdout')) except (TypeError, ValueError) as e: return dict(failed=True, msg="The async task did not return valid JSON: %s" % str(e)) if self._task.poll > 0: result = self._poll_async_result(result=result, templar=templar) # update the local copy of vars with the registered value, if specified, # or any facts which may have been generated by the module execution if self._task.register: vars_copy[self._task.register] = result if 'ansible_facts' in result: vars_copy.update(result['ansible_facts']) # create a conditional object to evaluate task conditions cond = Conditional(loader=self._loader) # FIXME: make sure until is mutually exclusive with changed_when/failed_when if self._task.until: cond.when = self._task.until if cond.evaluate_conditional(templar, vars_copy): break elif (self._task.changed_when or self._task.failed_when) and 'skipped' not in result: if self._task.changed_when: cond.when = [ self._task.changed_when ] result['changed'] = cond.evaluate_conditional(templar, vars_copy) if self._task.failed_when: cond.when = [ self._task.failed_when ] failed_when_result = cond.evaluate_conditional(templar, vars_copy) result['failed_when_result'] = result['failed'] = failed_when_result if failed_when_result: break elif 'failed' not in result: if result.get('rc', 0) != 0: result['failed'] = True else: # if the result is not failed, stop trying break if attempt < retries - 1: time.sleep(delay) # do the final update of the local variables here, for both registered # values and any facts which may have been created if self._task.register: variables[self._task.register] = result if 'ansible_facts' in result: variables.update(result['ansible_facts']) # save the notification target in the result, if it was specified, as # this task may be running in a loop in which case the notification # may be item-specific, ie. "notify: service {{item}}" if self._task.notify is not None: result['_ansible_notify'] = self._task.notify # and return debug("attempt loop complete, returning result") return result
def cleanup(self, iterator, connection_info): ''' Iterates through failed hosts and runs any outstanding rescue/always blocks and handlers which may still need to be run after a failure. ''' debug("in cleanup") result = True debug("getting failed hosts") failed_hosts = self.get_failed_hosts(iterator._play) if len(failed_hosts) == 0: debug("there are no failed hosts") return result debug("marking hosts failed in the iterator") # mark the host as failed in the iterator so it will take # any required rescue paths which may be outstanding for host in failed_hosts: iterator.mark_host_failed(host) debug("clearing the failed hosts list") # clear the failed hosts dictionary now while also for entry in self._tqm._failed_hosts.keys(): del self._tqm._failed_hosts[entry] work_to_do = True while work_to_do: work_to_do = False for host in failed_hosts: host_name = host.name if host_name in self._tqm._failed_hosts: iterator.mark_host_failed(host) del self._tqm._failed_hosts[host_name] if host_name in self._blocked_hosts: work_to_do = True continue elif iterator.get_next_task_for_host( host, peek=True ) and host_name not in self._tqm._unreachable_hosts: work_to_do = True # pop the task, mark the host blocked, and queue it self._blocked_hosts[host_name] = True task = iterator.get_next_task_for_host(host) task_vars = self._variable_manager.get_vars( loader=self._loader, play=iterator._play, host=host, task=task) self._tqm.send_callback( 'v2_playbook_on_cleanup_task_start', task) self._queue_task(host, task, task_vars, connection_info) self._process_pending_results(iterator) time.sleep(0.01) # no more work, wait until the queue is drained self._wait_on_pending_results(iterator) return result
def __getstate__(self): debug("pickling %s" % self._foo) return dict(foo=self._foo)
def _execute(self, variables=None): ''' The primary workhorse of the executor system, this runs the task on the specified host (which may be the delegated_to host) and handles the retry/until and block rescue/always execution ''' if variables is None: variables = self._job_vars templar = Templar(loader=self._loader, shared_loader_obj=self._shared_loader_obj, variables=variables) # fields set from the play/task may be based on variables, so we have to # do the same kind of post validation step on it here before we use it. self._connection_info.post_validate(templar=templar) # now that the connection information is finalized, we can add 'magic' # variables to the variable dictionary self._connection_info.update_vars(variables) # get the connection and the handler for this execution self._connection = self._get_connection(variables) self._connection.set_host_overrides(host=self._host) self._handler = self._get_action_handler(connection=self._connection, templar=templar) # Evaluate the conditional (if any) for this task, which we do before running # the final task post-validation. We do this before the post validation due to # the fact that the conditional may specify that the task be skipped due to a # variable not being present which would otherwise cause validation to fail if not self._task.evaluate_conditional(templar, variables): debug("when evaulation failed, skipping this task") return dict(changed=False, skipped=True, skip_reason='Conditional check failed') # Now we do final validation on the task, which sets all fields to their final values self._task.post_validate(templar=templar) # if this task is a TaskInclude, we just return now with a success code so the # main thread can expand the task list for the given host if self._task.action == 'include': include_variables = self._task.args.copy() include_file = include_variables.get('_raw_params') del include_variables['_raw_params'] return dict(changed=True, include=include_file, include_variables=include_variables) # And filter out any fields which were set to default(omit), and got the omit token value omit_token = variables.get('omit') if omit_token is not None: self._task.args = dict( filter(lambda x: x[1] != omit_token, self._task.args.iteritems())) # Read some values from the task, so that we can modify them if need be retries = self._task.retries if retries <= 0: retries = 1 delay = self._task.delay if delay < 0: delay = 1 # make a copy of the job vars here, in case we need to update them # with the registered variable value later on when testing conditions vars_copy = variables.copy() debug("starting attempt loop") result = None for attempt in range(retries): if attempt > 0: # FIXME: this should use the callback/message passing mechanism print("FAILED - RETRYING: %s (%d retries left)" % (self._task, retries - attempt)) result['attempts'] = attempt + 1 debug("running the handler") result = self._handler.run(task_vars=variables) debug("handler run complete") if self._task. async > 0: # the async_wrapper module returns dumped JSON via its stdout # response, so we parse it here and replace the result try: result = json.loads(result.get('stdout')) except ValueError, e: return dict( failed=True, msg="The async task did not return valid JSON: %s" % str(e)) if self._task.poll > 0: result = self._poll_async_result(result=result, templar=templar)
def _do_template(self, data, preserve_trailing_newlines=False): try: environment = Environment(trim_blocks=True, undefined=StrictUndefined, extensions=self._get_extensions(), finalize=self._finalize) environment.filters.update(self._get_filters()) environment.template_class = AnsibleJ2Template # FIXME: may not be required anymore, as the basedir stuff will # be handled by the loader? #if '_original_file' in vars: # basedir = os.path.dirname(vars['_original_file']) # filesdir = os.path.abspath(os.path.join(basedir, '..', 'files')) # if os.path.exists(filesdir): # basedir = filesdir try: t = environment.from_string(data) except TemplateSyntaxError as e: raise AnsibleError( "template error while templating string: %s" % str(e)) except Exception as e: if 'recursion' in str(e): raise AnsibleError( "recursive loop detected in template string: %s" % data) else: return data t.globals['lookup'] = self._lookup t.globals['finalize'] = self._finalize jvars = AnsibleJ2Vars(self, t.globals) new_context = t.new_context(jvars, shared=True) rf = t.root_render_func(new_context) try: res = j2_concat(rf) except TypeError as te: if 'StrictUndefined' in str(te): raise AnsibleUndefinedVariable( "Unable to look up a name or access an attribute in template string. " + \ "Make sure your variable name does not contain invalid characters like '-'." ) else: debug( "failing because of a type error, template data is: %s" % data) raise AnsibleError( "an unexpected type error occurred. Error was %s" % te) if preserve_trailing_newlines: # The low level calls above do not preserve the newline # characters at the end of the input data, so we use the # calculate the difference in newlines and append them # to the resulting output for parity res_newlines = self._count_newlines_from_end(res) data_newlines = self._count_newlines_from_end(data) if data_newlines > res_newlines: res += '\n' * (data_newlines - res_newlines) return res except (UndefinedError, AnsibleUndefinedVariable) as e: if self._fail_on_undefined_errors: raise else: return data
def _process_pending_results(self, iterator): ''' Reads results off the final queue and takes appropriate action based on the result (executing callbacks, updating state, etc.). ''' ret_results = [] while not self._final_q.empty() and not self._tqm._terminated: try: result = self._final_q.get(block=False) debug("got result from result worker: %s" % (result,)) # all host status messages contain 2 entries: (msg, task_result) if result[0] in ('host_task_ok', 'host_task_failed', 'host_task_skipped', 'host_unreachable'): task_result = result[1] host = task_result._host task = task_result._task if result[0] == 'host_task_failed' or task_result.is_failed(): if not task.ignore_errors: debug("marking %s as failed" % host.name) iterator.mark_host_failed(host) self._tqm._failed_hosts[host.name] = True self._tqm._stats.increment('failures', host.name) else: self._tqm._stats.increment('ok', host.name) self._tqm.send_callback('v2_runner_on_failed', task_result) elif result[0] == 'host_unreachable': self._tqm._unreachable_hosts[host.name] = True self._tqm._stats.increment('dark', host.name) self._tqm.send_callback('v2_runner_on_unreachable', task_result) elif result[0] == 'host_task_skipped': self._tqm._stats.increment('skipped', host.name) self._tqm.send_callback('v2_runner_on_skipped', task_result) elif result[0] == 'host_task_ok': self._tqm._stats.increment('ok', host.name) if 'changed' in task_result._result and task_result._result['changed']: self._tqm._stats.increment('changed', host.name) self._tqm.send_callback('v2_runner_on_ok', task_result) self._pending_results -= 1 if host.name in self._blocked_hosts: del self._blocked_hosts[host.name] # If this is a role task, mark the parent role as being run (if # the task was ok or failed, but not skipped or unreachable) if task_result._task._role is not None and result[0] in ('host_task_ok', 'host_task_failed'): # lookup the role in the ROLE_CACHE to make sure we're dealing # with the correct object and mark it as executed for (entry, role_obj) in ROLE_CACHE[task_result._task._role._role_name].iteritems(): hashed_entry = hash_params(task_result._task._role._role_params) if entry == hashed_entry: role_obj._had_task_run = True ret_results.append(task_result) elif result[0] == 'add_host': task_result = result[1] new_host_info = task_result.get('add_host', dict()) self._add_host(new_host_info) elif result[0] == 'add_group': host = result[1] task_result = result[2] group_name = task_result.get('add_group') self._add_group(host, group_name) elif result[0] == 'notify_handler': host = result[1] handler_name = result[2] if handler_name not in self._notified_handlers: self._notified_handlers[handler_name] = [] if host not in self._notified_handlers[handler_name]: self._notified_handlers[handler_name].append(host) elif result[0] in ('set_host_var', 'set_host_facts'): host = result[1] task = result[2] item = result[3] if task.delegate_to is not None: task_vars = self._variable_manager.get_vars(loader=self._loader, play=iterator._play, host=host, task=task) task_vars = self.add_tqm_variables(task_vars, play=iterator._play) if item is not None: task_vars['item'] = item templar = Templar(loader=self._loader, variables=task_vars) host_name = templar.template(task.delegate_to) target_host = self._inventory.get_host(host_name) if target_host is None: target_host = Host(name=host_name) else: target_host = host if result[0] == 'set_host_var': var_name = result[4] var_value = result[5] self._variable_manager.set_host_variable(target_host, var_name, var_value) elif result[0] == 'set_host_facts': facts = result[4] self._variable_manager.set_host_facts(target_host, facts) else: raise AnsibleError("unknown result message received: %s" % result[0]) except Queue.Empty: pass return ret_results
def run(self): ''' Called when the process is started, and loops indefinitely until an error is encountered (typically an IOerror from the queue pipe being disconnected). During the loop, we attempt to pull tasks off the job queue and run them, pushing the result onto the results queue. We also remove the host from the blocked hosts list, to signify that they are ready for their next task. ''' if HAS_ATFORK: atfork() try: # execute the task and build a TaskResult from the result debug("running TaskExecutor() for %s/%s" % (self._host, self._task)) executor_result = TaskExecutor( self._host, self._task, self._task_vars, self._play_context, self._new_stdin, self._loader, self._shared_loader_obj, ).run() debug("done running TaskExecutor() for %s/%s" % (self._host, self._task)) self._host.vars = dict() self._host.groups = [] task_result = TaskResult(self._host, self._task, executor_result) # put the result on the result queue debug("sending task result") self._rslt_q.put(task_result) debug("done sending task result") except AnsibleConnectionFailure: self._host.vars = dict() self._host.groups = [] task_result = TaskResult(self._host, self._task, dict(unreachable=True)) self._rslt_q.put(task_result, block=False) except Exception as e: if not isinstance( e, (IOError, EOFError, KeyboardInterrupt)) or isinstance( e, TemplateNotFound): try: self._host.vars = dict() self._host.groups = [] task_result = TaskResult( self._host, self._task, dict(failed=True, exception=traceback.format_exc(), stdout='')) self._rslt_q.put(task_result, block=False) except: debug("WORKER EXCEPTION: %s" % e) debug("WORKER EXCEPTION: %s" % traceback.format_exc()) debug("WORKER PROCESS EXITING")
def get_vars(self, loader, play=None, host=None, task=None): ''' Returns the variables, with optional "context" given via the parameters for the play, host, and task (which could possibly result in different sets of variables being returned due to the additional context). The order of precedence is: - play->roles->get_default_vars (if there is a play context) - group_vars_files[host] (if there is a host context) - host_vars_files[host] (if there is a host context) - host->get_vars (if there is a host context) - fact_cache[host] (if there is a host context) - vars_cache[host] (if there is a host context) - play vars (if there is a play context) - play vars_files (if there's no host context, ignore file names that cannot be templated) - task->get_vars (if there is a task context) - extra vars ''' debug("in VariableManager get_vars()") cache_entry = self._get_cache_entry(play=play, host=host, task=task) if cache_entry in CACHED_VARS: debug("vars are cached, returning them now") return CACHED_VARS[cache_entry] all_vars = defaultdict(dict) if play: # first we compile any vars specified in defaults/main.yml # for all roles within the specified play for role in play.get_roles(): all_vars = self._combine_vars(all_vars, role.get_default_vars()) if host: # next, if a host is specified, we load any vars from group_vars # files and then any vars from host_vars files which may apply to # this host or the groups it belongs to # we merge in the special 'all' group_vars first, if they exist if 'all' in self._group_vars_files: all_vars = self._combine_vars(all_vars, self._group_vars_files['all']) for group in host.get_groups(): all_vars = self._combine_vars(all_vars, group.get_vars()) if group.name in self._group_vars_files and group.name != 'all': all_vars = self._combine_vars( all_vars, self._group_vars_files[group.name]) host_name = host.get_name() if host_name in self._host_vars_files: all_vars = self._combine_vars(all_vars, self._host_vars_files[host_name]) # then we merge in vars specified for this host all_vars = self._combine_vars(all_vars, host.get_vars()) # next comes the facts cache and the vars cache, respectively all_vars = self._combine_vars( all_vars, self._fact_cache.get(host.get_name(), dict())) if play: all_vars = self._combine_vars(all_vars, play.get_vars()) templar = Templar(loader=loader, variables=all_vars) for vars_file in play.get_vars_files(): try: vars_file = templar.template(vars_file) data = loader.load_from_file(vars_file) all_vars = self._combine_vars(all_vars, data) except: # FIXME: get_vars should probably be taking a flag to determine # whether or not vars files errors should be fatal at this # stage, or just base it on whether a host was specified? pass for role in play.get_roles(): all_vars = self._combine_vars(all_vars, role.get_vars()) if host: all_vars = self._combine_vars( all_vars, self._vars_cache.get(host.get_name(), dict())) if task: if task._role: all_vars = self._combine_vars(all_vars, task._role.get_vars()) all_vars = self._combine_vars(all_vars, task.get_vars()) all_vars = self._combine_vars(all_vars, self._extra_vars) # FIXME: make sure all special vars are here # Finally, we create special vars if host and self._inventory is not None: hostvars = HostVars(vars_manager=self, inventory=self._inventory, loader=loader) all_vars['hostvars'] = hostvars if self._inventory is not None: all_vars['inventory_dir'] = self._inventory.basedir() # the 'omit' value alows params to be left out if the variable they are based on is undefined all_vars['omit'] = self._omit_token CACHED_VARS[cache_entry] = all_vars debug("done with get_vars()") return all_vars
def run(self, iterator, connection_info): ''' The linear strategy is simple - get the next task and queue it for all hosts, then wait for the queue to drain before moving on to the next task ''' # iteratate over each task, while there is one left to run result = True work_to_do = True while work_to_do and not self._tqm._terminated: try: debug("getting the remaining hosts for this loop") self._tqm._failed_hosts = iterator.get_failed_hosts() hosts_left = self.get_hosts_remaining(iterator._play) debug("done getting the remaining hosts for this loop") if len(hosts_left) == 0: debug("out of hosts to run on") self._tqm.send_callback( 'v2_playbook_on_no_hosts_remaining') result = False break # queue up this task for each host in the inventory callback_sent = False work_to_do = False host_results = [] host_tasks = self._get_next_task_lockstep(hosts_left, iterator) for (host, task) in host_tasks: if not task: continue run_once = False work_to_do = True # test to see if the task across all hosts points to an action plugin which # sets BYPASS_HOST_LOOP to true, or if it has run_once enabled. If so, we # will only send this task to the first host in the list. try: action = action_loader.get(task.action, class_only=True) if task.run_once or getattr(action, 'BYPASS_HOST_LOOP', False): run_once = True except KeyError: # we don't care here, because the action may simply not have a # corresponding action plugin pass # check to see if this task should be skipped, due to it being a member of a # role which has already run (and whether that role allows duplicate execution) if task._role and task._role.has_run(): # If there is no metadata, the default behavior is to not allow duplicates, # if there is metadata, check to see if the allow_duplicates flag was set to true if task._role._metadata is None or task._role._metadata and not task._role._metadata.allow_duplicates: debug("'%s' skipped because role has already run" % task) continue if task.action == 'meta': # meta tasks store their args in the _raw_params field of args, # since they do not use k=v pairs, so get that meta_action = task.args.get('_raw_params') if meta_action == 'noop': # FIXME: issue a callback for the noop here? continue elif meta_action == 'flush_handlers': self.run_handlers(iterator, connection_info) else: raise AnsibleError( "invalid meta action requested: %s" % meta_action, obj=task._ds) else: debug("getting variables") task_vars = self._variable_manager.get_vars( loader=self._loader, play=iterator._play, host=host, task=task) task_vars = self.add_tqm_variables(task_vars, play=iterator._play) templar = Templar(loader=self._loader, variables=task_vars) debug("done getting variables") if not callback_sent: temp_task = task.copy() temp_task.name = templar.template( temp_task.get_name(), fail_on_undefined=False) self._tqm.send_callback( 'v2_playbook_on_task_start', temp_task, is_conditional=False) callback_sent = True self._blocked_hosts[host.get_name()] = True self._queue_task(host, task, task_vars, connection_info) results = self._process_pending_results(iterator) host_results.extend(results) # if we're bypassing the host loop, break out now if run_once: break debug( "done queuing things up, now waiting for results queue to drain" ) results = self._wait_on_pending_results(iterator) host_results.extend(results) try: included_files = IncludedFile.process_include_results( host_results, self._tqm, iterator=iterator, loader=self._loader) except AnsibleError, e: return False if len(included_files) > 0: noop_task = Task() noop_task.action = 'meta' noop_task.args['_raw_params'] = 'noop' noop_task.set_loader(iterator._play._loader) all_blocks = dict((host, []) for host in hosts_left) for included_file in included_files: # included hosts get the task list while those excluded get an equal-length # list of noop tasks, to make sure that they continue running in lock-step try: new_blocks = self._load_included_file( included_file, iterator=iterator) except AnsibleError, e: for host in included_file._hosts: iterator.mark_host_failed(host) # FIXME: callback here? print(e) continue for new_block in new_blocks: noop_block = Block(parent_block=task._block) noop_block.block = [ noop_task for t in new_block.block ] noop_block.always = [ noop_task for t in new_block.always ] noop_block.rescue = [ noop_task for t in new_block.rescue ] for host in hosts_left: if host in included_file._hosts: task_vars = self._variable_manager.get_vars( loader=self._loader, play=iterator._play, host=host, task=included_file._task) final_block = new_block.filter_tagged_tasks( connection_info, task_vars) all_blocks[host].append(final_block) else: all_blocks[host].append(noop_block) for host in hosts_left: iterator.add_tasks(host, all_blocks[host]) debug("results queue empty")
def _execute(self, variables=None): ''' The primary workhorse of the executor system, this runs the task on the specified host (which may be the delegated_to host) and handles the retry/until and block rescue/always execution ''' if variables is None: variables = self._job_vars templar = Templar(loader=self._loader, shared_loader_obj=self._shared_loader_obj, variables=variables) # fields set from the play/task may be based on variables, so we have to # do the same kind of post validation step on it here before we use it. self._play_context.post_validate(templar=templar) # now that the play context is finalized, we can add 'magic' # variables to the variable dictionary self._play_context.update_vars(variables) # Evaluate the conditional (if any) for this task, which we do before running # the final task post-validation. We do this before the post validation due to # the fact that the conditional may specify that the task be skipped due to a # variable not being present which would otherwise cause validation to fail if not self._task.evaluate_conditional(templar, variables): debug("when evaulation failed, skipping this task") return dict(changed=False, skipped=True, skip_reason='Conditional check failed') # Now we do final validation on the task, which sets all fields to their final values. # In the case of debug tasks, we save any 'var' params and restore them after validating # so that variables are not replaced too early. prev_var = None if self._task.action == 'debug' and 'var' in self._task.args: prev_var = self._task.args.pop('var') self._task.post_validate(templar=templar) if '_variable_params' in self._task.args: variable_params = self._task.args.pop('_variable_params') if isinstance(variable_params, dict): self._display.deprecated("Using variables for task params is unsafe, especially if the variables come from an external source like facts") variable_params.update(self._task.args) self._task.args = variable_params if prev_var is not None: self._task.args['var'] = prev_var # if this task is a TaskInclude, we just return now with a success code so the # main thread can expand the task list for the given host if self._task.action == 'include': include_variables = self._task.args.copy() include_file = include_variables.get('_raw_params') del include_variables['_raw_params'] return dict(include=include_file, include_variables=include_variables) # get the connection and the handler for this execution self._connection = self._get_connection(variables) self._connection.set_host_overrides(host=self._host) self._handler = self._get_action_handler(connection=self._connection, templar=templar) # And filter out any fields which were set to default(omit), and got the omit token value omit_token = variables.get('omit') if omit_token is not None: self._task.args = dict(filter(lambda x: x[1] != omit_token, self._task.args.iteritems())) # Read some values from the task, so that we can modify them if need be retries = self._task.retries if retries <= 0: retries = 1 delay = self._task.delay if delay < 0: delay = 1 # make a copy of the job vars here, in case we need to update them # with the registered variable value later on when testing conditions vars_copy = variables.copy() debug("starting attempt loop") result = None for attempt in range(retries): if attempt > 0: # FIXME: this should use the callback/message passing mechanism print("FAILED - RETRYING: %s (%d retries left). Result was: %s" % (self._task, retries-attempt, result)) result['attempts'] = attempt + 1 debug("running the handler") result = self._handler.run(task_vars=variables) debug("handler run complete") if self._task.async > 0: # the async_wrapper module returns dumped JSON via its stdout # response, so we parse it here and replace the result try: result = json.loads(result.get('stdout')) except (TypeError, ValueError) as e: return dict(failed=True, msg="The async task did not return valid JSON: %s" % str(e)) if self._task.poll > 0: result = self._poll_async_result(result=result, templar=templar) # update the local copy of vars with the registered value, if specified, # or any facts which may have been generated by the module execution if self._task.register: vars_copy[self._task.register] = result if 'ansible_facts' in result: vars_copy.update(result['ansible_facts']) # create a conditional object to evaluate task conditions cond = Conditional(loader=self._loader) # FIXME: make sure until is mutually exclusive with changed_when/failed_when if self._task.until: cond.when = self._task.until if cond.evaluate_conditional(templar, vars_copy): break elif (self._task.changed_when or self._task.failed_when) and 'skipped' not in result: if self._task.changed_when: cond.when = [ self._task.changed_when ] result['changed'] = cond.evaluate_conditional(templar, vars_copy) if self._task.failed_when: cond.when = [ self._task.failed_when ] failed_when_result = cond.evaluate_conditional(templar, vars_copy) result['failed_when_result'] = result['failed'] = failed_when_result if failed_when_result: break elif 'failed' not in result: if result.get('rc', 0) != 0: result['failed'] = True else: # if the result is not failed, stop trying break if attempt < retries - 1: time.sleep(delay)
class StrategyModule(StrategyBase): def _get_next_task_lockstep(self, hosts, iterator): ''' Returns a list of (host, task) tuples, where the task may be a noop task to keep the iterator in lock step across all hosts. ''' noop_task = Task() noop_task.action = 'meta' noop_task.args['_raw_params'] = 'noop' noop_task.set_loader(iterator._play._loader) host_tasks = {} for host in hosts: host_tasks[host.name] = iterator.get_next_task_for_host(host, peek=True) num_setups = 0 num_tasks = 0 num_rescue = 0 num_always = 0 lowest_cur_block = len(iterator._blocks) for (k, v) in host_tasks.iteritems(): if v is None: continue (s, t) = v if s.cur_block < lowest_cur_block and s.run_state != PlayIterator.ITERATING_COMPLETE: lowest_cur_block = s.cur_block if s.run_state == PlayIterator.ITERATING_SETUP: num_setups += 1 elif s.run_state == PlayIterator.ITERATING_TASKS: num_tasks += 1 elif s.run_state == PlayIterator.ITERATING_RESCUE: num_rescue += 1 elif s.run_state == PlayIterator.ITERATING_ALWAYS: num_always += 1 def _advance_selected_hosts(hosts, cur_block, cur_state): ''' This helper returns the task for all hosts in the requested state, otherwise they get a noop dummy task. This also advances the state of the host, since the given states are determined while using peek=True. ''' # we return the values in the order they were originally # specified in the given hosts array rvals = [] for host in hosts: (s, t) = host_tasks[host.name] if s.run_state == cur_state and s.cur_block == cur_block: new_t = iterator.get_next_task_for_host(host) #if new_t != t: # raise AnsibleError("iterator error, wtf?") rvals.append((host, t)) else: rvals.append((host, noop_task)) return rvals # if any hosts are in ITERATING_SETUP, return the setup task # while all other hosts get a noop if num_setups: return _advance_selected_hosts(hosts, lowest_cur_block, PlayIterator.ITERATING_SETUP) # if any hosts are in ITERATING_TASKS, return the next normal # task for these hosts, while all other hosts get a noop if num_tasks: return _advance_selected_hosts(hosts, lowest_cur_block, PlayIterator.ITERATING_TASKS) # if any hosts are in ITERATING_RESCUE, return the next rescue # task for these hosts, while all other hosts get a noop if num_rescue: return _advance_selected_hosts(hosts, lowest_cur_block, PlayIterator.ITERATING_RESCUE) # if any hosts are in ITERATING_ALWAYS, return the next always # task for these hosts, while all other hosts get a noop if num_always: return _advance_selected_hosts(hosts, lowest_cur_block, PlayIterator.ITERATING_ALWAYS) # at this point, everything must be ITERATING_COMPLETE, so we # return None for all hosts in the list return [(host, None) for host in hosts] def run(self, iterator, connection_info): ''' The linear strategy is simple - get the next task and queue it for all hosts, then wait for the queue to drain before moving on to the next task ''' # iteratate over each task, while there is one left to run result = True work_to_do = True while work_to_do and not self._tqm._terminated: try: debug("getting the remaining hosts for this loop") self._tqm._failed_hosts = iterator.get_failed_hosts() hosts_left = self.get_hosts_remaining(iterator._play) debug("done getting the remaining hosts for this loop") if len(hosts_left) == 0: debug("out of hosts to run on") self._tqm.send_callback( 'v2_playbook_on_no_hosts_remaining') result = False break # queue up this task for each host in the inventory callback_sent = False work_to_do = False host_results = [] host_tasks = self._get_next_task_lockstep(hosts_left, iterator) for (host, task) in host_tasks: if not task: continue run_once = False work_to_do = True # test to see if the task across all hosts points to an action plugin which # sets BYPASS_HOST_LOOP to true, or if it has run_once enabled. If so, we # will only send this task to the first host in the list. try: action = action_loader.get(task.action, class_only=True) if task.run_once or getattr(action, 'BYPASS_HOST_LOOP', False): run_once = True except KeyError: # we don't care here, because the action may simply not have a # corresponding action plugin pass # check to see if this task should be skipped, due to it being a member of a # role which has already run (and whether that role allows duplicate execution) if task._role and task._role.has_run(): # If there is no metadata, the default behavior is to not allow duplicates, # if there is metadata, check to see if the allow_duplicates flag was set to true if task._role._metadata is None or task._role._metadata and not task._role._metadata.allow_duplicates: debug("'%s' skipped because role has already run" % task) continue if task.action == 'meta': # meta tasks store their args in the _raw_params field of args, # since they do not use k=v pairs, so get that meta_action = task.args.get('_raw_params') if meta_action == 'noop': # FIXME: issue a callback for the noop here? continue elif meta_action == 'flush_handlers': self.run_handlers(iterator, connection_info) else: raise AnsibleError( "invalid meta action requested: %s" % meta_action, obj=task._ds) else: debug("getting variables") task_vars = self._variable_manager.get_vars( loader=self._loader, play=iterator._play, host=host, task=task) task_vars = self.add_tqm_variables(task_vars, play=iterator._play) templar = Templar(loader=self._loader, variables=task_vars) debug("done getting variables") if not callback_sent: temp_task = task.copy() temp_task.name = templar.template( temp_task.get_name(), fail_on_undefined=False) self._tqm.send_callback( 'v2_playbook_on_task_start', temp_task, is_conditional=False) callback_sent = True self._blocked_hosts[host.get_name()] = True self._queue_task(host, task, task_vars, connection_info) results = self._process_pending_results(iterator) host_results.extend(results) # if we're bypassing the host loop, break out now if run_once: break debug( "done queuing things up, now waiting for results queue to drain" ) results = self._wait_on_pending_results(iterator) host_results.extend(results) try: included_files = IncludedFile.process_include_results( host_results, self._tqm, iterator=iterator, loader=self._loader) except AnsibleError, e: return False if len(included_files) > 0: noop_task = Task() noop_task.action = 'meta' noop_task.args['_raw_params'] = 'noop' noop_task.set_loader(iterator._play._loader) all_blocks = dict((host, []) for host in hosts_left) for included_file in included_files: # included hosts get the task list while those excluded get an equal-length # list of noop tasks, to make sure that they continue running in lock-step try: new_blocks = self._load_included_file( included_file, iterator=iterator) except AnsibleError, e: for host in included_file._hosts: iterator.mark_host_failed(host) # FIXME: callback here? print(e) continue for new_block in new_blocks: noop_block = Block(parent_block=task._block) noop_block.block = [ noop_task for t in new_block.block ] noop_block.always = [ noop_task for t in new_block.always ] noop_block.rescue = [ noop_task for t in new_block.rescue ] for host in hosts_left: if host in included_file._hosts: task_vars = self._variable_manager.get_vars( loader=self._loader, play=iterator._play, host=host, task=included_file._task) final_block = new_block.filter_tagged_tasks( connection_info, task_vars) all_blocks[host].append(final_block) else: all_blocks[host].append(noop_block) for host in hosts_left: iterator.add_tasks(host, all_blocks[host]) debug("results queue empty") except (IOError, EOFError), e: debug("got IOError/EOFError in task loop: %s" % e) # most likely an abort, return failed return False
def run(self): ''' The main executor entrypoint, where we determine if the specified task requires looping and either runs the task with ''' debug("in run()") try: # lookup plugins need to know if this task is executing from # a role, so that it can properly find files/templates/etc. roledir = None if self._task._role: roledir = self._task._role._role_path self._job_vars['roledir'] = roledir items = self._get_loop_items() if items is not None: if len(items) > 0: item_results = self._run_loop(items) # loop through the item results, and remember the changed/failed # result flags based on any item there. changed = False failed = False for item in item_results: if 'changed' in item and item['changed']: changed = True if 'failed' in item and item['failed']: failed = True # create the overall result item, and set the changed/failed # flags there to reflect the overall result of the loop res = dict(results=item_results) if changed: res['changed'] = True if failed: res['failed'] = True res['msg'] = 'One or more items failed' else: res['msg'] = 'All items completed' else: res = dict(changed=False, skipped=True, skipped_reason='No items in the list', results=[]) else: debug("calling self._execute()") res = self._execute() debug("_execute() done") # make sure changed is set in the result, if it's not present if 'changed' not in res: res['changed'] = False debug("dumping result to json") result = json.dumps(res) debug("done dumping result, returning") return result except AnsibleError as e: return dict(failed=True, msg=to_unicode(e, nonstring='simplerepr')) finally: try: self._connection.close() except AttributeError: pass except Exception as e: debug("error closing connection: %s" % to_unicode(e))
jvars = AnsibleJ2Vars(self, t.globals) new_context = t.new_context(jvars, shared=True) rf = t.root_render_func(new_context) try: res = j2_concat(rf) except TypeError, te: if 'StrictUndefined' in str(te): raise AnsibleUndefinedVariable( "Unable to look up a name or access an attribute in template string. " + \ "Make sure your variable name does not contain invalid characters like '-'." ) else: debug( "failing because of a type error, template data is: %s" % data) raise AnsibleError( "an unexpected type error occurred. Error was %s" % te) if preserve_trailing_newlines: # The low level calls above do not preserve the newline # characters at the end of the input data, so we use the # calculate the difference in newlines and append them # to the resulting output for parity res_newlines = self._count_newlines_from_end(res) data_newlines = self._count_newlines_from_end(data) if data_newlines > res_newlines: res += '\n' * (data_newlines - res_newlines) return res
def run(self, iterator, connection_info): ''' The linear strategy is simple - get the next task and queue it for all hosts, then wait for the queue to drain before moving on to the next task ''' result = True # iteratate over each task, while there is one left to run work_to_do = True while work_to_do and not self._tqm._terminated: try: debug("getting the remaining hosts for this loop") self._tqm._failed_hosts = iterator.get_failed_hosts() hosts_left = self.get_hosts_remaining(iterator._play) debug("done getting the remaining hosts for this loop") if len(hosts_left) == 0: debug("out of hosts to run on") self._callback.playbook_on_no_hosts_remaining() result = False break # queue up this task for each host in the inventory callback_sent = False work_to_do = False host_results = [] host_tasks = self._get_next_task_lockstep(hosts_left, iterator) for (host, task) in host_tasks: if not task: continue run_once = False work_to_do = True # test to see if the task across all hosts points to an action plugin which # sets BYPASS_HOST_LOOP to true, or if it has run_once enabled. If so, we # will only send this task to the first host in the list. try: action = action_loader.get(task.action, class_only=True) if task.run_once or getattr(action, 'BYPASS_HOST_LOOP', False): run_once = True except KeyError: # we don't care here, because the action may simply not have a # corresponding action plugin pass debug("getting variables") task_vars = self._variable_manager.get_vars(loader=self._loader, play=iterator._play, host=host, task=task) debug("done getting variables") # check to see if this task should be skipped, due to it being a member of a # role which has already run (and whether that role allows duplicate execution) if task._role and task._role.has_run(): # If there is no metadata, the default behavior is to not allow duplicates, # if there is metadata, check to see if the allow_duplicates flag was set to true if task._role._metadata is None or task._role._metadata and not task._role._metadata.allow_duplicates: debug("'%s' skipped because role has already run" % task) continue if not task.evaluate_tags(connection_info.only_tags, connection_info.skip_tags, task_vars) and task.action != 'setup': debug("'%s' failed tag evaluation" % task) continue if task.action == 'meta': # meta tasks store their args in the _raw_params field of args, # since they do not use k=v pairs, so get that meta_action = task.args.get('_raw_params') if meta_action == 'noop': # FIXME: issue a callback for the noop here? print("%s => NOOP" % host) continue elif meta_action == 'flush_handlers': self.run_handlers(iterator, connection_info) else: raise AnsibleError("invalid meta action requested: %s" % meta_action, obj=task._ds) else: if not callback_sent: self._callback.playbook_on_task_start(task.get_name(), False) callback_sent = True self._blocked_hosts[host.get_name()] = True self._queue_task(host, task, task_vars, connection_info) results = self._process_pending_results(iterator) host_results.extend(results) # if we're bypassing the host loop, break out now if run_once: break debug("done queuing things up, now waiting for results queue to drain") results = self._wait_on_pending_results(iterator) host_results.extend(results) # FIXME: this needs to be somewhere else class IncludedFile: def __init__(self, filename, args, task): self._filename = filename self._args = args self._task = task self._hosts = [] def add_host(self, host): if host not in self._hosts: self._hosts.append(host) def __eq__(self, other): return other._filename == self._filename and other._args == self._args def __repr__(self): return "%s (%s): %s" % (self._filename, self._args, self._hosts) included_files = [] for res in host_results: if res._task.action == 'include': if res._task.loop: include_results = res._result['results'] else: include_results = [ res._result ] for include_result in include_results: original_task = iterator.get_original_task(res._host, res._task) if original_task and original_task._role: include_file = self._loader.path_dwim_relative(original_task._role._role_path, 'tasks', include_file) else: include_file = self._loader.path_dwim(res._task.args.get('_raw_params')) include_variables = include_result.get('include_variables', dict()) if 'item' in include_result: include_variables['item'] = include_result['item'] inc_file = IncludedFile(include_file, include_variables, original_task) try: pos = included_files.index(inc_file) inc_file = included_files[pos] except ValueError: included_files.append(inc_file) inc_file.add_host(res._host) if len(included_files) > 0: noop_task = Task() noop_task.action = 'meta' noop_task.args['_raw_params'] = 'noop' noop_task.set_loader(iterator._play._loader) all_tasks = dict((host, []) for host in hosts_left) for included_file in included_files: # included hosts get the task list while those excluded get an equal-length # list of noop tasks, to make sure that they continue running in lock-step new_tasks = self._load_included_file(included_file) noop_tasks = [noop_task for t in new_tasks] for host in hosts_left: if host in included_file._hosts: all_tasks[host].extend(new_tasks) else: all_tasks[host].extend(noop_tasks) for host in hosts_left: iterator.add_tasks(host, all_tasks[host]) debug("results queue empty") except (IOError, EOFError), e: debug("got IOError/EOFError in task loop: %s" % e) # most likely an abort, return failed return 1
def _send_result(self, result): debug("sending result: %s" % (result, )) self._final_q.put(result, block=False) debug("done sending result")