def ansible_adhoc(ips): logger.info("ansible需要采集%d个IP" % (len(ips))) inventory = Inventory(loader=loader, variable_manager=variable_manager, host_list=ips) # 根据 inventory 加载对应变量 variable_manager.set_inventory(inventory) # 增加外部变量 variable_manager.extra_vars = {"ansible_ssh_user": ansible_username, "ansible_ssh_pass": ansible_password} play_source = {"name": "Ansible Ad-Hoc", "hosts": ips, "gather_facts": "no", "tasks": [{"action": {"module": "setup", "args": ""}}]} play = Play().load(play_source, variable_manager=variable_manager, loader=loader) tqm = None try: tqm = TaskQueueManager( inventory=inventory, variable_manager=variable_manager, loader=loader, options=options, passwords=None, stdout_callback=resultcallback, run_tree=False, ) tqm.run(play) return ansible_facts_info finally: if tqm is not None: tqm.cleanup()
def run_model(self, host_list, module_name, module_args): """ run module from andible ad-hoc. module_name: ansible module_name module_args: ansible module args """ play_source = dict( name="Ansible Play", hosts=host_list, gather_facts='no', tasks=[dict(action=dict(module=module_name, args=module_args))] ) play = Play().load(play_source, variable_manager=self.variable_manager, loader=self.loader) tqm = None if self.redisKey:self.callback = ModelResultsCollectorToSave(self.redisKey,self.logId) else:self.callback = ModelResultsCollector() try: tqm = TaskQueueManager( inventory=self.inventory, variable_manager=self.variable_manager, loader=self.loader, options=self.options, passwords=self.passwords, ) tqm._stdout_callback = self.callback tqm.run(play) finally: if tqm is not None: tqm.cleanup()
def run_model(self, host_list, module_name, module_args): """ run module from andible ad-hoc. module_name: ansible module_name module_args: ansible module args """ play_source = dict( name="Ansible Play", hosts=host_list, gather_facts='no', tasks=[dict(action=dict(module=module_name, args=module_args))] ) play = Play().load(play_source, variable_manager=self.variable_manager, loader=self.loader) tqm = None if self.redisKey or self.logId:self.callback = ModelResultsCollectorToSave(self.redisKey,self.logId) else:self.callback = ModelResultsCollector() try: tqm = TaskQueueManager( inventory=self.inventory, variable_manager=self.variable_manager, loader=self.loader, options=self.options, passwords=self.passwords, ) tqm._stdout_callback = self.callback constants.HOST_KEY_CHECKING = False #关闭第一次使用ansible连接客户端是输入命令 tqm.run(play) except Exception as err: if self.redisKey:DsRedis.OpsAnsibleModel.lpush(self.redisKey,data=err) if self.logId:AnsibleSaveResult.Model.insert(self.logId, err) finally: if tqm is not None: tqm.cleanup()
def run_module(self, hosts='localhost', rules=[{'module': 'setup'}]): ''' rules=[ {'module': 'shell', 'args': 'echo "ok"', 'register': 'echo_ok'}, {'module': 'debug', 'args': {'msg': '{{echo_ok.stdout}}'}} ] ''' tasks = [] for rule in rules: if 'register' in rule: register = rule.pop('register') tasks.append(dict(dict(action=rule), register=register)) play_source = dict( name = "Ansible Play", hosts = hosts, gather_facts = 'no', tasks = tasks ) results_callback = ResultCallback() play = Play().load(play_source, variable_manager=self.variable_manager, loader=self.loader) qm = None try: tqm = TaskQueueManager( inventory=self.inventory, variable_manager=self.variable_manager, loader=self.loader, options=self.options, passwords=self.passwords, stdout_callback=results_callback, ) tqm.run(play) return results_callback finally: if tqm is not None: tqm.cleanup()
def runansible(self,host_list, task_list): play_source = dict( name = "Ansible Play", hosts = host_list, gather_facts = 'no', tasks = task_list ) play = Play().load(play_source, variable_manager=self.variable_manager, loader=self.loader) tqm = TaskQueueManager( inventory=self.inventory, variable_manager=self.variable_manager, loader=self.loader, options=self.options, passwords=self.passwords, stdout_callback=self.callback, ) result = tqm.run(play) result_raw = {'success':{},'failed':{},'unreachable':{}} for host,result in self.callback.host_ok.items(): result_raw['success'][host] = result._result for host,result in self.callback.host_failed.items(): result_raw['failed'][host] = result._result for host,result in self.callback.host_unreachable.items(): result_raw['unreachable'][host] = result._result js = json.dumps(result_raw, sort_keys=False, indent=4) return js
def AnsibleTask(task_list,host_list,user): '''ansible python api 2.0''' loader = DataLoader() variable_manager = VariableManager() inventory = Inventory(loader=loader, variable_manager=variable_manager, host_list=host_list) variable_manager.set_inventory(inventory) task_dict = [] for i in task_list: task_dict.append({"action": {"module": i[0], "args": i[1] }}) variable_manager.extra_vars = {"ansible_ssh_user": user, "ansible_ssh_pass": ""} play_source = {"name" : "Ansible PlayBook Run", "hosts": host_list[0], "gather_facts": "no","tasks": task_dict} play = Play().load(play_source, variable_manager=variable_manager, loader=loader) tqm = None try: tqm = TaskQueueManager( inventory = inventory, variable_manager = variable_manager, loader = loader, options = options, passwords = None, stdout_callback = 'minimal', run_tree = False, ) result = tqm.run(play) except Exception,e: result = e
def run_adhoc(): variable_manager.extra_vars = { "ansible_ssh_user": "******", "ansible_ssh_pass": "******" } # 增加外部变量 # 构建pb, 这里很有意思, 新版本运行ad-hoc或playbook都需要构建这样的pb, 只是最后调用play的类不一样 # :param name: 任务名,类似playbook中tasks中的name # :param hosts: playbook中的hosts # :param tasks: playbook中的tasks, 其实这就是playbook的语法, 因为tasks的值是个列表,因此可以写入多个task play_source = { "name": "Ansible Ad-Hoc", "hosts": "localhost", "gather_facts": "no", "tasks": [ {"action": {"module": "shell", "args": "w"}} ] } play = Play().load(play_source, variable_manager=variable_manager, loader=loader) tqm = None try: tqm = TaskQueueManager( inventory=inventory, variable_manager=variable_manager, loader=loader, options=options, passwords=None, stdout_callback='minimal', run_tree=False, ) result = tqm.run(play) print result finally: if tqm is not None: tqm.cleanup()
def run(self): # create play with tasks play_source = dict( name = "Ansible Play", hosts = self.host_list, #hosts = 'localhost', gather_facts = 'no', tasks = [ dict(action=dict(module='shell', args='ls')), #dict(action=dict(module='shell', args='ls'), register='shell_out'), #dict(action=dict(module='debug', args=dict(msg='{{shell_out.stdout}}'))) ] ) play = Play().load(play_source, variable_manager=self.variable_manager, loader=self.loader) # actually run it tqm = None try: tqm = TaskQueueManager( inventory=self.inventory, variable_manager=self.variable_manager, loader=self.loader, options=self.options, passwords=self.passwords, stdout_callback=self.results_callback, # Use our custom callback instead of the ``default`` callback plugin ) result = tqm.run(play) finally: if tqm is not None: tqm.cleanup()
def run_module(self, hosts, module_name, module_args): # create play with tasks play_source = dict( name="Ansible Play", hosts=hosts, gather_facts='no', tasks=[dict(action=dict(module=module_name, args=module_args))]) play = Play().load( play_source, variable_manager=self.variable_manager, loader=self.loader) self.results_callback = ModuleResultCallback() # actually run it tqm = None try: tqm = TaskQueueManager( inventory=self.inventory, variable_manager=self.variable_manager, loader=self.loader, options=self.options, passwords=self.passwords, stdout_callback=self. results_callback, # Use our custom callback instead of the ``default`` callback plugin ) result = tqm.run(play) finally: if tqm is not None: tqm.cleanup()
def run(self, play_data): """ paly_data = dict( name="Ansible Ad-Hoc", hosts=pattern, gather_facts=True, tasks=[dict(action=dict(module='service', args={'name': 'vsftpd', 'state': 'restarted'}), async=async, poll=poll)] ) """ self._prepare_run() play = Play().load(play_data, variable_manager=self.variable_manager, loader=self.loader) tqm = None try: tqm = TaskQueueManager( inventory=self.inventory, variable_manager=self.variable_manager, loader=self.loader, options=self.options, passwords=self.passwords, stdout_callback=self.cb, run_additional_callbacks=C.DEFAULT_LOAD_CALLBACK_PLUGINS, run_tree=False, ) result = tqm.run(play) return result finally: if tqm: tqm.cleanup()
def run(self): tqm = None try: tqm = TaskQueueManager( inventory=self.inventory, variable_manager=self.vmanager, loader=self.loader, options=self.options, passwords=self.passwords, stdout_callback=self.callback, ) for name, play in self.plays.items(): tqm.run(play) finally: if tqm is not None: tqm.cleanup()
def run_command(cmd, host): callback_ob = callback_class() play_source = dict( name = "Ansible Play", hosts = host, gather_facts = 'no', tasks = [ dict(action=dict(module='shell', args=cmd), register='shell_out'), dict(action=dict(module='debug', args=dict(msg='{{shell_out.stdout}}'))) ] ) play = Play().load(play_source, variable_manager=variable_manager, loader=loader) task = TaskQueueManager( inventory=inventory, variable_manager=variable_manager, loader=loader, options=options, passwords=passwords, stdout_callback=callback_ob, ) # redirecting stdout to grab the output from the task normal_stdout = sys.stdout sys.stdout = new_stdout = StringIO() result = task.run(play) sys.stdout = normal_stdout # import pdb; pdb.set_trace() return new_stdout.readlines()
def my_runner(host_list,module_name,module_args): variable_manager.extra_vars={} # 增加外部变量 # 构建pb, 这里很有意思, 新版本运行ad-hoc或playbook都需要构建这样的pb, 只是最后调用play的类不一样 # :param name: 任务名,类似playbook中tasks中的name # :param hosts: playbook中的hosts # :param tasks: playbook中的tasks, 其实这就是playbook的语法, 因为tasks的值是个列表,因此可以写入多个task play_source = {"name":"Ansible Ad-Hoc","hosts":host_list,"gather_facts":"no","tasks":[{"action":{"module":module_name,"args":module_args}}]} play = Play().load(play_source,variable_manager=variable_manager,loader=loader) tqm = None # results_callback = call_json.CallbackModule() try: tqm = TaskQueueManager( inventory=inventory, variable_manager=variable_manager, loader=loader, options=options, passwords=None, stdout_callback='minimal', # stdout_callback=results_callback, # Use our custom callback instead of the ``default`` callback plugin ) savedStdout = sys.stdout with open(File_PATH,'w+') as file: sys.stdout = file #标准输出重定向至文件 result = tqm.run(play) sys.stdout = savedStdout return result finally: if tqm is not None: tqm.cleanup()
def run(self, host_list, module_name, module_args,): """ run module from andible ad-hoc. module_name: ansible module_name module_args: ansible module args """ # create play with tasks play_source = dict( name="Ansible Play", hosts=host_list, gather_facts='no', tasks=[dict(action=dict(module=module_name, args=module_args))] ) play = Play().load(play_source, variable_manager=self.variable_manager, loader=self.loader) # actually run it tqm = None self.callback = ResultsCollector() try: tqm = TaskQueueManager( inventory=self.inventory, variable_manager=self.variable_manager, loader=self.loader, options=self.options, passwords=self.passwords, ) tqm._stdout_callback = self.callback result = tqm.run(play) finally: if tqm is not None: tqm.cleanup()
def run( self, tasks, pattern, play_name='Ansible Ad-hoc', gather_facts='no',): """ :param gather_facts: :param tasks: [{'action': {'module': 'shell', 'args': 'ls'}, ...}, ] :param pattern: all, *, or others :param play_name: The play name :return: """ self.check_pattern(pattern) results_callback = self.results_callback_class() cleaned_tasks = self.clean_tasks(tasks) play_source = dict( name=play_name, hosts=pattern, gather_facts=gather_facts, tasks=cleaned_tasks ) play = Play().load( play_source, variable_manager=self.variable_manager, loader=self.loader, ) tqm = TaskQueueManager( inventory=self.inventory, variable_manager=self.variable_manager, loader=self.loader, options=self.options, stdout_callback=results_callback, passwords=self.options.passwords, ) try: tqm.run(play) return results_callback except Exception as e: raise AnsibleError(e) finally: tqm.cleanup() self.loader.cleanup_all_tmp_files()
def main(host_list,module,args): Options = namedtuple('Options', ['connection','module_path', 'forks', 'remote_user', 'ssh_common_args', 'ssh_extra_args', 'sftp_extra_args', 'scp_extra_args', 'become', 'become_method', 'become_user', 'verbosity', 'check']) # initialize needed objects variable_manager = VariableManager() loader = DataLoader() options = Options(connection='smart', module_path='xxx', forks=50, remote_user='******', ssh_common_args=None, ssh_extra_args=None, sftp_extra_args=None, scp_extra_args=None, become=None, become_method=None, become_user=None, verbosity=None, check=False) passwords = dict(sshpass=None, becomepass=None) # create inventory and pass to var manager inventory = Inventory(loader=loader, variable_manager=variable_manager, host_list=host_list) variable_manager.set_inventory(inventory) # create play with tasks play_source = dict( name = "Ansible Play", hosts = host_list, gather_facts = 'no', tasks = [ dict(action=dict(module=module, args=args)) ] ) play = Play().load(play_source, variable_manager=variable_manager, loader=loader) # actually run it tqm = None callback = ResultsCollector() try: tqm = TaskQueueManager( inventory=inventory, variable_manager=variable_manager, loader=loader, options=options, passwords=passwords, ) tqm._stdout_callback = callback tqm.run(play) return callback.results[0]['tasks'][0]['hosts'] finally: if tqm is not None: tqm.cleanup()
def main(): host_list = ['localhost', 'www.example.com', 'www.google.com'] Options = namedtuple('Options', ['connection', 'module_path', 'forks', 'remote_user', 'private_key_file', 'ssh_common_args', 'ssh_extra_args', 'sftp_extra_args', 'scp_extra_args', 'become', 'become_method', 'become_user', 'verbosity', 'check']) # initialize needed objects variable_manager = VariableManager() loader = DataLoader() options = Options(connection='smart', module_path='/usr/share/ansible', forks=100, remote_user=None, private_key_file=None, ssh_common_args=None, ssh_extra_args=None, sftp_extra_args=None, scp_extra_args=None, become=None, become_method=None, become_user=None, verbosity=None, check=False) passwords = dict() # create inventory and pass to var manager inventory = Inventory(loader=loader, variable_manager=variable_manager, host_list=host_list) variable_manager.set_inventory(inventory) # create play with tasks play_source = dict( name="Ansible Play", hosts=host_list, gather_facts='no', tasks=[dict(action=dict(module='command', args=dict(cmd='/usr/bin/uptime')))] ) play = Play().load(play_source, variable_manager=variable_manager, loader=loader) # actually run it tqm = None callback = ResultsCollector() try: tqm = TaskQueueManager( inventory=inventory, variable_manager=variable_manager, loader=loader, options=options, passwords=passwords, ) tqm._stdout_callback = callback result = tqm.run(play) finally: if tqm is not None: tqm.cleanup() print("UP ***********") for host, result in callback.host_ok.items(): print('{} >>> {}'.format(host, result._result['stdout'])) print("FAILED *******") for host, result in callback.host_failed.items(): print('{} >>> {}'.format(host, result._result['msg'])) print("DOWN *********") for host, result in callback.host_unreachable.items(): print('{} >>> {}'.format(host, result._result['msg']))
def run(self): result = {} results_callback = ResultCallback() try: tqm = TaskQueueManager( inventory=self.inventory, variable_manager=self.variable_manager, loader=self.loader, options=self.options, passwords=self.passwords, stdout_callback=results_callback ) tqm.run(self.play) finally: result = results_callback.result if tqm is not None: tqm.cleanup() return result
def run(self, host_list, module_name, module_args,): """ run module from andible ad-hoc. module_name: ansible module_name module_args: ansible module args """ self.results_raw = {'success':{}, 'failed':{}, 'unreachable':{}} Options = namedtuple('Options', ['connection','module_path', 'forks', 'timeout', 'remote_user', 'ask_pass', 'private_key_file', 'ssh_common_args', 'ssh_extra_args', 'sftp_extra_args', 'scp_extra_args', 'become', 'become_method', 'become_user', 'ask_value_pass', 'verbosity', 'check']) options = Options(connection='smart', module_path='/usr/share/ansible', forks=100, timeout=10, remote_user='******', ask_pass=False, private_key_file=None, ssh_common_args=None, ssh_extra_args=None, sftp_extra_args=None, scp_extra_args=None, become=None, become_method=None, become_user='******',ask_value_pass=False, verbosity=None, check=False) passwords = dict(sshpass=None, becomepass=None) # create play with tasks play_source = dict( name="Ansible Play", hosts=host_list, gather_facts='no', tasks=[dict(action=dict(module=module_name, args=module_args))] ) play = Play().load(play_source, variable_manager=self.variable_manager, loader=self.loader) # actually run it tqm = None callback = ResultsCollector() try: tqm = TaskQueueManager( inventory=self.inventory, variable_manager=self.variable_manager, loader=self.loader, options=options, passwords=passwords, ) tqm._stdout_callback = callback result = tqm.run(play) finally: if tqm is not None: tqm.cleanup() for host, result in callback.host_ok.items(): self.results_raw['success'][host] = result._result.get('stdout') + result._result.get('stderr') for host, result in callback.host_failed.items(): self.results_raw['failed'][host] = result._result.get('stdout') + result._result.get('stderr') for host, result in callback.host_unreachable.items(): self.results_raw['unreachable'][host]= result._result['msg'] logger.info(self.results_raw) return self.results_raw
def _run_play(self, play_src): self.options = options = self.Options(**self.options_args) vm = self.variable_manager vm.extra_vars = load_extra_vars(loader=self.loader, options=options) play = Play().load(play_src, loader=self.loader, variable_manager=vm) tqm = None try: tqm = TaskQueueManager( inventory=self.inventory, variable_manager=vm, loader=self.loader, options=options, passwords=dict(), stdout_callback='default') tqm.run(play) return vm.get_vars(self.loader) finally: if tqm is not None: tqm.cleanup()
def run(self, tasks, pattern, play_name='Ansible Ad-hoc', gather_facts='no'): """ :param tasks: [{'action': {'module': 'shell', 'args': 'ls'}, ...}, ] :param pattern: all, *, or others :param play_name: The play name :param gather_facts: :return: """ self.check_pattern(pattern) self.results_callback = self.get_result_callback() cleaned_tasks = self.clean_tasks(tasks) context.CLIARGS = ImmutableDict(self.options) play_source = dict( name=play_name, hosts=pattern, gather_facts=gather_facts, tasks=cleaned_tasks ) play = Play().load( play_source, variable_manager=self.variable_manager, loader=self.loader, ) tqm = TaskQueueManager( inventory=self.inventory, variable_manager=self.variable_manager, loader=self.loader, stdout_callback=self.results_callback, passwords={"conn_pass": self.options.get("password", "")} ) try: tqm.run(play) return self.results_callback except Exception as e: raise AnsibleError(e) finally: if tqm is not None: tqm.cleanup() shutil.rmtree(C.DEFAULT_LOCAL_TMP, True)
def main(args): # Options definition # Custom tuple to store playbook options Options = namedtuple('Options', ['connection', 'module_path', 'forks', 'become', 'become_method', 'become_user', 'check']) # Object initialization variable_manager = VariableManager() loader = DataLoader() options = Options(connection='ssh',module_path='library', forks=100, become=None, become_method=None, become_user=None, check=False) passwords = {} # Dinamyc inventory inventory = Inventory(loader=loader, variable_manager=variable_manager, host_list=args) # Inventory assignation variable_manager.set_inventory(inventory) # Play creation with tasks play_source = dict( name="Ansible Play", hosts=args, gather_facts='no', tasks=[ dict(action=dict(module='shell', args='hostname -f'), register='shell_out'), dict(action=dict(module='debug', args=dict(msg='{{shell_out.stdout}}'))) ] ) play = Play().load(play_source, variable_manager=variable_manager, loader=loader) # Running it tqm = None try: tqm = TaskQueueManager( inventory=inventory, variable_manager=variable_manager, loader=loader, options=options, passwords=passwords, stdout_callback='default' ) result = tqm.run(play) finally: if tqm is not None: tqm.cleanup()
def run_play(self, host_list, module_name, module_args): """ run play :param host_list is list, :param module_name is string :param module_args is string """ play = None # create play with tasks play_source = dict( name="Ansible Play", hosts=host_list, gather_facts='no', tasks=[dict(action=dict(module=module_name, args=module_args))] ) play = Play().load(play_source, variable_manager=self.variable_manager, loader=self.loader) # actually run it tqm = None display = LogDisplay(logname=self.job_id) callback = CALLBACKMODULE[CALLBACK](display=display) if module_name == 'backup': # 对于备份模块,特殊处理,必须使用minimal,才能获取到文件名属性 callback = minimal_callback(display=display) try: tqm = TaskQueueManager( inventory=self.inventory, variable_manager=self.variable_manager, loader=self.loader, options=self.options, passwords=self.passwords, ) tqm._stdout_callback = callback tqm.run(play) finally: if tqm is not None: tqm.cleanup() return display.get_log_json()
def main(argv=sys.argv[1:]): Options = namedtuple('Options', ['connection', 'module_path', 'forks', 'become', 'become_method', 'become_user', 'check']) # initialize needed objects variable_manager = VariableManager() loader = DataLoader() options = Options(connection='local', module_path='/path/to/mymodules', forks=100, become=None, become_method=None, become_user=None, check=False) passwords = dict(vault_pass='******') # create inventory and pass to var manager inventory = Inventory(loader=loader, variable_manager=variable_manager, host_list='localhost') variable_manager.set_inventory(inventory) # create play with tasks play_source = dict(name="Ansible Play", hosts='localhost', gather_facts='no', tasks=[ dict(action=dict(module='shell', args='uname -a'), register='shell_out'), dict(action=dict(module='debug', args=dict(msg='{{shell_out.stdout}}'))) ] ) play = Play().load(play_source, variable_manager=variable_manager, loader=loader) # actually run it tqm = None try: tqm = TaskQueueManager(inventory=inventory, variable_manager=variable_manager, loader=loader, options=options, passwords=passwords, stdout_callback='default', ) result = tqm.run(play) print result finally: if tqm is not None: tqm.cleanup()
def test_base(self): test_inv_dir = 'test/inventory' for inv in os.listdir(test_inv_dir): print "Processing ", inv res = dynlxc.main(os.path.join(test_inv_dir, inv), '') variable_manager = VariableManager() loader = DataLoader() self.mock_rv.communicate.return_value = [ json.dumps(res), 'mocked_err'] try: inventory = Inventory( loader=loader, variable_manager=variable_manager, host_list='inventory/dynlxc.py' ) except Exception as err: raise Exception("Inventory file {0} processing result '{1}' " "failed with {2}".format(inv, res, err)) variable_manager.set_inventory(inventory) play_source = dict(name="Ansible Play", hosts='localhost', gather_facts='no') playbook = os.path.abspath(os.path.join(test_inv_dir, '../playbooks', inv)) if os.path.isfile(playbook): with open(playbook) as fh: real_playbook = yaml.load(fh)[0] play_source.update(real_playbook) play = Play().load(play_source, variable_manager=variable_manager, loader=loader) tqm = None try: tqm = TaskQueueManager( inventory=inventory, variable_manager=variable_manager, loader=loader, options=self.options, passwords=None, stdout_callback='default', ) result = tqm.run(play) assert result == 0, ("Ansible playbook exitcode " "different from 0") finally: if tqm is not None: tqm.cleanup()
def run(play): tqm = None results = None try: tqm = TaskQueueManager( inventory=inventory, variable_manager=variable_manager, loader=loader, options=options, passwords=passwords, stdout_callback='default', ) results = tqm.run(play) finally: if tqm is not None: tqm.cleanup() return tqm, results
def execute_tasks(play_name, tasks, hosts, host_list=os.path.join(DIRNAME, 'ansible_hosts.py'), callback="default"): # NOTICE: Here is a trick. The host we acquired must in the host list # everytime. However I can't get the host list in advance. So I add the # hosts into the host list eveytime if it doesn't exist. if hosts not in ansible_hosts.hosts["all"]["hosts"]: ansible_hosts.hosts["all"]["hosts"].append(hosts) # initialize needed objects variable_manager = VariableManager() loader = DataLoader() # create inventory and pass to var manager inventory = Inventory(loader=loader, variable_manager=variable_manager, host_list=host_list) variable_manager.set_inventory(inventory) # create play with tasks play_source = dict( name=play_name, hosts=hosts, gather_facts='no', tasks=tasks) play = Play().load(play_source, variable_manager=variable_manager, loader=loader) Options = namedtuple('Options', ['connection', 'module_path', 'forks', 'become', 'become_method', 'become_user', 'check']) options = Options(connection=None, module_path=None, forks=10, become=None, become_method=None, become_user=None, check=False) passwords = dict() # actually run it tqm = None try: tqm = TaskQueueManager( inventory=inventory, variable_manager=variable_manager, loader=loader, options=options, passwords=passwords, stdout_callback=callback) return tqm.run(play) finally: if tqm is not None: tqm.cleanup() pass
def run(self): self.variable_manager.set_inventory(self.inventory) play = Play().load(self.create_play_tasks(), variable_manager=self.variable_manager, loader=self.loader) tqm = None callback = ResultsCollector() tqm = TaskQueueManager( inventory=self.inventory, variable_manager=self.variable_manager, loader=self.loader, options=self.options, passwords=self.passwords ) tqm._stdout_callback = callback result = tqm.run(play) return result, callback
def run(self, playbook): pb = Playbook.load(playbook, variable_manager=self.variable_manager, loader=self.loader) # only support one playbook.yml play = pb.get_plays()[0] tqm = None try: tqm = TaskQueueManager( inventory=self.inventory, variable_manager=self.variable_manager, loader=self.loader, options=self.options, passwords={'conn_pass': None, 'become_pass': None}, stdout_callback=self.callback, ) return tqm.run(play), self.callback.result finally: if tqm is not None: tqm.cleanup()
def ansible_play(self): # run it tqm = None try: tqm = TaskQueueManager( inventory=self.inventory, variable_manager=self.variable_manager, loader=self.loader, options=self.options, passwords=self.passwords, stdout_callback=self.results_callback, # stdout_callback='default', ) result = tqm.run(self.play) finally: if tqm is not None: tqm.cleanup() #self.inventory.clear_pattern_cache() if result != 0: raise ValueError('SSH Command Failed, resturn: %s' % result) return self.return_results
class PlaybookExecutor: ''' This is the primary class for executing playbooks, and thus the basis for bin/ansible-playbook operation. ''' def __init__(self, playbooks, inventory, variable_manager, loader, options, passwords): self._playbooks = playbooks self._inventory = inventory self._variable_manager = variable_manager self._loader = loader self._options = options self.passwords = passwords self._unreachable_hosts = dict() if options.listhosts or options.listtasks or options.listtags or options.syntax: self._tqm = None else: self._tqm = TaskQueueManager(inventory=inventory, variable_manager=variable_manager, loader=loader, options=options, passwords=self.passwords) def run(self): ''' Run the given playbook, based on the settings in the play which may limit the runs to serialized groups, etc. ''' result = 0 entrylist = [] entry = {} try: for playbook_path in self._playbooks: pb = Playbook.load(playbook_path, variable_manager=self._variable_manager, loader=self._loader) self._inventory.set_playbook_basedir( os.path.dirname(playbook_path)) if self._tqm is None: # we are doing a listing entry = {'playbook': playbook_path} entry['plays'] = [] else: # make sure the tqm has callbacks loaded self._tqm.load_callbacks() self._tqm.send_callback('v2_playbook_on_start', pb) i = 1 plays = pb.get_plays() display.vv(u'%d plays in %s' % (len(plays), to_unicode(playbook_path))) for play in plays: if play._included_path is not None: self._loader.set_basedir(play._included_path) else: self._loader.set_basedir(pb._basedir) # clear any filters which may have been applied to the inventory self._inventory.remove_restriction() if play.vars_prompt: for var in play.vars_prompt: vname = var['name'] prompt = var.get("prompt", vname) default = var.get("default", None) private = var.get("private", True) confirm = var.get("confirm", False) encrypt = var.get("encrypt", None) salt_size = var.get("salt_size", None) salt = var.get("salt", None) if vname not in self._variable_manager.extra_vars: if self._tqm: self._tqm.send_callback( 'v2_playbook_on_vars_prompt', vname, private, prompt, encrypt, confirm, salt_size, salt, default) play.vars[vname] = display.do_var_prompt( vname, private, prompt, encrypt, confirm, salt_size, salt, default) else: # we are either in --list-<option> or syntax check play.vars[vname] = default # Create a temporary copy of the play here, so we can run post_validate # on it without the templating changes affecting the original object. all_vars = self._variable_manager.get_vars( loader=self._loader, play=play) templar = Templar(loader=self._loader, variables=all_vars) new_play = play.copy() new_play.post_validate(templar) if self._options.syntax: continue if self._tqm is None: # we are just doing a listing entry['plays'].append(new_play) else: self._tqm._unreachable_hosts.update( self._unreachable_hosts) previously_failed = len(self._tqm._failed_hosts) previously_unreachable = len( self._tqm._unreachable_hosts) break_play = False # we are actually running plays for batch in self._get_serialized_batches(new_play): if len(batch) == 0: self._tqm.send_callback( 'v2_playbook_on_play_start', new_play) self._tqm.send_callback( 'v2_playbook_on_no_hosts_matched') break # restrict the inventory to the hosts in the serialized batch self._inventory.restrict_to_hosts(batch) # and run it... result = self._tqm.run(play=play) # break the play if the result equals the special return code if result == self._tqm.RUN_FAILED_BREAK_PLAY: result = self._tqm.RUN_FAILED_HOSTS break_play = True # check the number of failures here, to see if they're above the maximum # failure percentage allowed, or if any errors are fatal. If either of those # conditions are met, we break out, otherwise we only break out if the entire # batch failed failed_hosts_count = len(self._tqm._failed_hosts) + len(self._tqm._unreachable_hosts) - \ (previously_failed + previously_unreachable) if new_play.max_fail_percentage is not None and \ int((new_play.max_fail_percentage)/100.0 * len(batch)) > int((len(batch) - failed_hosts_count) / len(batch) * 100.0): break_play = True break elif len(batch) == failed_hosts_count: break_play = True break # save the unreachable hosts from this batch self._unreachable_hosts.update( self._tqm._unreachable_hosts) # if the last result wasn't zero or 3 (some hosts were unreachable), # break out of the serial batch loop if result not in (self._tqm.RUN_OK, self._tqm.RUN_UNREACHABLE_HOSTS): break if break_play: break i = i + 1 # per play if entry: entrylist.append(entry) # per playbook # send the stats callback for this playbook if self._tqm is not None: if C.RETRY_FILES_ENABLED: retries = set(self._tqm._failed_hosts.keys()) retries.update(self._tqm._unreachable_hosts.keys()) retries = sorted(retries) if len(retries) > 0: if C.RETRY_FILES_SAVE_PATH: basedir = C.shell_expand( C.RETRY_FILES_SAVE_PATH) elif playbook_path: basedir = os.path.dirname(playbook_path) else: basedir = '~/' (retry_name, _) = os.path.splitext( os.path.basename(playbook_path)) filename = os.path.join(basedir, "%s.retry" % retry_name) if self._generate_retry_inventory( filename, retries): display.display( "\tto retry, use: --limit @%s\n" % filename) self._tqm.send_callback('v2_playbook_on_stats', self._tqm._stats) # if the last result wasn't zero, break out of the playbook file name loop if result != 0: break if entrylist: return entrylist finally: if self._tqm is not None: self._tqm.cleanup() if self._loader: self._loader.cleanup_all_tmp_files() if self._options.syntax: display.display("No issues encountered") return result return result def _get_serialized_batches(self, play): ''' Returns a list of hosts, subdivided into batches based on the serial size specified in the play. ''' # make sure we have a unique list of hosts all_hosts = self._inventory.get_hosts(play.hosts) # check to see if the serial number was specified as a percentage, # and convert it to an integer value based on the number of hosts if isinstance(play.serial, string_types) and play.serial.endswith('%'): serial_pct = int(play.serial.replace("%", "")) serial = int((serial_pct / 100.0) * len(all_hosts)) or 1 else: if play.serial is None: serial = -1 else: serial = int(play.serial) # if the serial count was not specified or is invalid, default to # a list of all hosts, otherwise split the list of hosts into chunks # which are based on the serial size if serial <= 0: return [all_hosts] else: serialized_batches = [] while len(all_hosts) > 0: play_hosts = [] for x in range(serial): if len(all_hosts) > 0: play_hosts.append(all_hosts.pop(0)) serialized_batches.append(play_hosts) return serialized_batches def _generate_retry_inventory(self, retry_path, replay_hosts): ''' Called when a playbook run fails. It generates an inventory which allows re-running on ONLY the failed hosts. This may duplicate some variable information in group_vars/host_vars but that is ok, and expected. ''' try: makedirs_safe(os.path.dirname(retry_path)) with open(retry_path, 'w') as fd: for x in replay_hosts: fd.write("%s\n" % x) except Exception as e: display.warning("Could not create retry file '%s'.\n\t%s" % (retry_path, to_str(e))) return False return True
class PlaybookExecutor: ''' This is the primary class for executing playbooks, and thus the basis for bin/ansible-playbook operation. ''' def __init__(self, playbooks, inventory, variable_manager, loader, display, options, passwords): self._playbooks = playbooks self._inventory = inventory self._variable_manager = variable_manager self._loader = loader self._display = display self._options = options self.passwords = passwords # make sure the module path (if specified) is parsed and # added to the module_loader object if options.module_path is not None: for path in options.module_path.split(os.pathsep): module_loader.add_directory(path) if options.listhosts or options.listtasks or options.listtags or options.syntax: self._tqm = None else: self._tqm = TaskQueueManager(inventory=inventory, variable_manager=variable_manager, loader=loader, display=display, options=options, passwords=self.passwords) def run(self): ''' Run the given playbook, based on the settings in the play which may limit the runs to serialized groups, etc. ''' signal.signal(signal.SIGINT, self._cleanup) result = 0 entrylist = [] entry = {} try: for playbook_path in self._playbooks: pb = Playbook.load(playbook_path, variable_manager=self._variable_manager, loader=self._loader) self._inventory.set_playbook_basedir( os.path.dirname(playbook_path)) if self._tqm is None: # we are doing a listing entry = {'playbook': playbook_path} entry['plays'] = [] i = 1 plays = pb.get_plays() self._display.vv('%d plays in %s' % (len(plays), playbook_path)) for play in plays: # clear any filters which may have been applied to the inventory self._inventory.remove_restriction() if play.vars_prompt: for var in play.vars_prompt: if 'name' not in var: raise AnsibleError( "'vars_prompt' item is missing 'name:'", obj=play._ds) vname = var['name'] prompt = var.get("prompt", vname) default = var.get("default", None) private = var.get("private", True) confirm = var.get("confirm", False) encrypt = var.get("encrypt", None) salt_size = var.get("salt_size", None) salt = var.get("salt", None) if vname not in play.vars: self._tqm.send_callback( 'v2_playbook_on_vars_prompt', vname, private, prompt, encrypt, confirm, salt_size, salt, default) play.vars[vname] = self._do_var_prompt( vname, private, prompt, encrypt, confirm, salt_size, salt, default) # Create a temporary copy of the play here, so we can run post_validate # on it without the templating changes affecting the original object. all_vars = self._variable_manager.get_vars( loader=self._loader, play=play) templar = Templar(loader=self._loader, variables=all_vars) new_play = play.copy() new_play.post_validate(templar) if self._options.syntax: continue if self._tqm is None: # we are just doing a listing pname = new_play.get_name().strip() if pname == 'PLAY: <no name specified>': pname = 'PLAY: #%d' % i p = {'name': pname} if self._options.listhosts: p['pattern'] = play.hosts p['hosts'] = set( self._inventory.get_hosts(new_play.hosts)) #TODO: play tasks are really blocks, need to figure out how to get task objects from them elif self._options.listtasks: p['tasks'] = [] for task in play.get_tasks(): p['tasks'].append(task) #p['tasks'].append({'name': task.get_name().strip(), 'tags': task.tags}) elif self._options.listtags: p['tags'] = set(new_play.tags) for task in play.get_tasks(): p['tags'].update(task) #p['tags'].update(task.tags) entry['plays'].append(p) else: # make sure the tqm has callbacks loaded self._tqm.load_callbacks() # we are actually running plays for batch in self._get_serialized_batches(new_play): if len(batch) == 0: self._tqm.send_callback( 'v2_playbook_on_play_start', new_play) self._tqm.send_callback( 'v2_playbook_on_no_hosts_matched') break # restrict the inventory to the hosts in the serialized batch self._inventory.restrict_to_hosts(batch) # and run it... result = self._tqm.run(play=play) # if the last result wasn't zero, break out of the serial batch loop if result != 0: break # if the last result wasn't zero, break out of the play loop if result != 0: break i = i + 1 # per play if entry: entrylist.append(entry) # per playbook # if the last result wasn't zero, break out of the playbook file name loop if result != 0: break if entrylist: return entrylist finally: if self._tqm is not None: self._cleanup() if self._options.syntax: self.display.display("No issues encountered") return result # FIXME: this stat summary stuff should be cleaned up and moved # to a new method, if it even belongs here... self._display.banner("PLAY RECAP") hosts = sorted(self._tqm._stats.processed.keys()) for h in hosts: t = self._tqm._stats.summarize(h) self._display.display( "%s : %s %s %s %s" % (hostcolor(h, t), colorize('ok', t['ok'], 'green'), colorize('changed', t['changed'], 'yellow'), colorize('unreachable', t['unreachable'], 'red'), colorize('failed', t['failures'], 'red')), screen_only=True) self._display.display( "%s : %s %s %s %s" % (hostcolor(h, t, False), colorize('ok', t['ok'], None), colorize('changed', t['changed'], None), colorize('unreachable', t['unreachable'], None), colorize('failed', t['failures'], None)), log_only=True) self._display.display("", screen_only=True) # END STATS STUFF return result def _cleanup(self, signum=None, framenum=None): return self._tqm.cleanup() def _get_serialized_batches(self, play): ''' Returns a list of hosts, subdivided into batches based on the serial size specified in the play. ''' # make sure we have a unique list of hosts all_hosts = self._inventory.get_hosts(play.hosts) # check to see if the serial number was specified as a percentage, # and convert it to an integer value based on the number of hosts if isinstance(play.serial, basestring) and play.serial.endswith('%'): serial_pct = int(play.serial.replace("%", "")) serial = int((serial_pct / 100.0) * len(all_hosts)) else: serial = int(play.serial) # if the serial count was not specified or is invalid, default to # a list of all hosts, otherwise split the list of hosts into chunks # which are based on the serial size if serial <= 0: return [all_hosts] else: serialized_batches = [] while len(all_hosts) > 0: play_hosts = [] for x in range(serial): if len(all_hosts) > 0: play_hosts.append(all_hosts.pop(0)) serialized_batches.append(play_hosts) return serialized_batches def _do_var_prompt(self, varname, private=True, prompt=None, encrypt=None, confirm=False, salt_size=None, salt=None, default=None): if prompt and default is not None: msg = "%s [%s]: " % (prompt, default) elif prompt: msg = "%s: " % prompt else: msg = 'input for %s: ' % varname def do_prompt(prompt, private): if sys.stdout.encoding: msg = prompt.encode(sys.stdout.encoding) else: # when piping the output, or at other times when stdout # may not be the standard file descriptor, the stdout # encoding may not be set, so default to something sane msg = prompt.encode(locale.getpreferredencoding()) if private: return getpass.getpass(msg) return raw_input(msg) if confirm: while True: result = do_prompt(msg, private) second = do_prompt("confirm " + msg, private) if result == second: break display("***** VALUES ENTERED DO NOT MATCH ****") else: result = do_prompt(msg, private) # if result is false and default is not None if not result and default is not None: result = default # FIXME: make this work with vault or whatever this old method was #if encrypt: # result = utils.do_encrypt(result, encrypt, salt_size, salt) # handle utf-8 chars # FIXME: make this work #result = to_unicode(result, errors='strict') return result
def exec_ansible(host, tasks, remote_user='******', become=False, become_user=None): # initialize needed objects loader = DataLoader() Options = namedtuple('Options', [ 'connection', 'module_path', 'forks', 'become', 'become_method', 'become_user', 'check', 'diff', 'remote_user']) options = Options( connection='smart', module_path=MODULE_PATH, forks=200, become=become, become_method='sudo', become_user=become_user, remote_user=remote_user, check=False, diff=False) passwords = dict(vault_pass='******') # Instantiate our ResultCallback for handling results as they come in results_callback = ResultCallback() # create inventory and pass to var manager inventory = InventoryManager(loader=loader) inventory.add_host(host) variable_manager = VariableManager(loader=loader, inventory=inventory) # create play with tasks play_source = dict( name="Ansible Play", hosts=[host], gather_facts='no', tasks=tasks) play = Play().load( play_source, variable_manager=variable_manager, loader=loader) # actually run it tqm = None try: display.warning(tasks) tqm = TaskQueueManager( inventory=inventory, variable_manager=variable_manager, loader=loader, options=options, passwords=passwords, stdout_callback=results_callback ) tqm.run(play) except Exception as e: display.warning('error') display.warning(e) finally: if tqm is not None: tqm.cleanup() def get_result(): results_raw = {'success': {}, 'failed': {}, 'unreachable': {}} for _hosts, result in results_callback.host_ok.items(): results_raw['success'][_hosts] = result for _hosts, result in results_callback.host_failed.items(): results_raw['failed'][_hosts] = result for _hosts, result in results_callback.host_unreachable.items(): results_raw['unreachable'][_hosts] = result return results_raw _get_result = get_result() return _get_result
# create inventory and pass to var manager inventory = Inventory(loader=loader, variable_manager=variable_manager, host_list='localhost') variable_manager.set_inventory(inventory) # create play with tasks play_source = dict( name = "Ansible Play", hosts = 'localhost', gather_facts = 'no', tasks = [ dict(action=dict(module='debug', args=(msg='Hello Galaxy!'))) ] ) play = Play().load(play_source, variable_manager=variable_manager, loader=loader) # actually run it tqm = None try: tqm = TaskQueueManager( inventory=inventory, variable_manager=variable_manager, loader=loader, options=options, passwords=passwords, stdout_callback='default', ) result = tqm.run(play) finally: if tqm is not None: tqm.cleanup()
def chaosansible_run( host_list: list = ("localhost"), configuration: Configuration = None, facts: bool = False, become: bool = False, run_once: bool = False, ansible: dict = {}, num_target: str = "all", secrets: Secrets = None, ): """ Run a task through ansible and eventually gather facts from host """ # Check for correct inputs if ansible: if ansible.get("module") is None: raise InvalidActivity("No ansible module defined") if ansible.get("args") is None: raise InvalidActivity("No ansible module args defined") configuration = configuration or {} # Ansible configuration elements module_path = configuration.get("ansible_module_path") become_user = configuration.get("ansible_become_user") ssh_key_path = configuration.get("ansible_ssh_private_key") ansible_user = configuration.get("ansible_user") become_ask_pass = configuration.get("become_ask_pass") ssh_extra_args = configuration.get("ansible_ssh_extra_args") context.CLIARGS = ImmutableDict( connection="smart", verbosity=0, module_path=module_path, forks=10, become=become, become_method="sudo", become_user=become_user, check=False, diff=False, private_key_file=ssh_key_path, remote_user=ansible_user, ssh_extra_args=ssh_extra_args, ) # Update host_list regarding the number of desired target. # Need to generate a new host-list because after being update # and will be used later if num_target != "all": new_host_list = random_host(host_list, int(num_target)) else: new_host_list = host_list[:] # Create an inventory sources = ",".join(new_host_list) if len(new_host_list) == 1: sources += "," loader = DataLoader() inventory = InventoryManager(loader=loader, sources=sources) # Instantiate callback for storing results results_callback = ResultsCollectorJSONCallback() variable_manager = VariableManager(loader=loader, inventory=inventory) if become_ask_pass: passwords = dict(become_pass=become_ask_pass) else: passwords = None # Ansible taskmanager tqm = TaskQueueManager( inventory=inventory, variable_manager=variable_manager, loader=loader, passwords=passwords, stdout_callback=results_callback, run_additional_callbacks=False, ) # Ansible playbook play_source = dict( name="Ansible Play", hosts=new_host_list, gather_facts=facts, tasks=[ dict( name="facts", action=dict(module="debug", args=dict(var="ansible_facts")), ), ], ) # In cas we only want to gather facts if ansible: module = ansible.get("module") args = ansible.get("args") play_source["tasks"].append( dict( name="task", run_once=run_once, action=dict(module=module, args=args), register="shell_out", ) ) # Create an ansible playbook play = Play().load(play_source, variable_manager=variable_manager, loader=loader) # Run it try: result = tqm.run(play) finally: tqm.cleanup() if loader: loader.cleanup_all_tmp_files() # Remove ansible tmpdir shutil.rmtree(C.DEFAULT_LOCAL_TMP, True) if len(results_callback.host_failed) > 0: print("Ansible error(s): ") for error in results_callback.host_failed: print(results_callback.host_failed[error].__dict__) raise FailedActivity("Failed to run ansible task") elif len(results_callback.host_unreachable) > 0: print("Unreachable host(s): ") for error in results_callback.host_unreachable: print(error) raise FailedActivity("At least one target is down") else: results = {} for host, result in results_callback.host_ok.items(): results[host] = result return json.dumps(results)
class PlaybookExecutor: ''' This is the primary class for executing playbooks, and thus the basis for bin/ansible-playbook operation. ''' def __init__(self, playbooks, inventory, variable_manager, loader, options, passwords): self._playbooks = playbooks self._inventory = inventory self._variable_manager = variable_manager self._loader = loader self._options = options self.passwords = passwords self._unreachable_hosts = dict() if options.listhosts or options.listtasks or options.listtags or options.syntax: self._tqm = None else: self._tqm = TaskQueueManager(inventory=inventory, variable_manager=variable_manager, loader=loader, options=options, passwords=self.passwords) def run(self): ''' Run the given playbook, based on the settings in the play which may limit the runs to serialized groups, etc. ''' signal.signal(signal.SIGINT, self._cleanup) result = 0 entrylist = [] entry = {} try: for playbook_path in self._playbooks: pb = Playbook.load(playbook_path, variable_manager=self._variable_manager, loader=self._loader) self._inventory.set_playbook_basedir( os.path.dirname(playbook_path)) if self._tqm is None: # we are doing a listing entry = {'playbook': playbook_path} entry['plays'] = [] else: # make sure the tqm has callbacks loaded self._tqm.load_callbacks() self._tqm.send_callback('v2_playbook_on_start', pb) i = 1 plays = pb.get_plays() display.vv('%d plays in %s' % (len(plays), playbook_path)) for play in plays: if play._included_path is not None: self._loader.set_basedir(play._included_path) else: self._loader.set_basedir(pb._basedir) # clear any filters which may have been applied to the inventory self._inventory.remove_restriction() if play.vars_prompt: for var in play.vars_prompt: vname = var['name'] prompt = var.get("prompt", vname) default = var.get("default", None) private = var.get("private", True) confirm = var.get("confirm", False) encrypt = var.get("encrypt", None) salt_size = var.get("salt_size", None) salt = var.get("salt", None) if vname not in self._variable_manager.extra_vars: self._tqm.send_callback( 'v2_playbook_on_vars_prompt', vname, private, prompt, encrypt, confirm, salt_size, salt, default) if self._tqm: play.vars[vname] = display.do_var_prompt( vname, private, prompt, encrypt, confirm, salt_size, salt, default) else: # we are either in --list-<option> or syntax check play.vars[vname] = default # Create a temporary copy of the play here, so we can run post_validate # on it without the templating changes affecting the original object. all_vars = self._variable_manager.get_vars( loader=self._loader, play=play) templar = Templar(loader=self._loader, variables=all_vars) new_play = play.copy() new_play.post_validate(templar) if self._options.syntax: continue if self._tqm is None: # we are just doing a listing entry['plays'].append(new_play) else: self._tqm._unreachable_hosts.update( self._unreachable_hosts) # we are actually running plays for batch in self._get_serialized_batches(new_play): if len(batch) == 0: self._tqm.send_callback( 'v2_playbook_on_play_start', new_play) self._tqm.send_callback( 'v2_playbook_on_no_hosts_matched') break # restrict the inventory to the hosts in the serialized batch self._inventory.restrict_to_hosts(batch) # and run it... result = self._tqm.run(play=play) # check the number of failures here, to see if they're above the maximum # failure percentage allowed, or if any errors are fatal. If either of those # conditions are met, we break out, otherwise we only break out if the entire # batch failed failed_hosts_count = len( self._tqm._failed_hosts) + len( self._tqm._unreachable_hosts) if new_play.any_errors_fatal and failed_hosts_count > 0: break elif new_play.max_fail_percentage is not None and \ int((new_play.max_fail_percentage)/100.0 * len(batch)) > int((len(batch) - failed_hosts_count) / len(batch) * 100.0): break elif len(batch) == failed_hosts_count: break # clear the failed hosts dictionaires in the TQM for the next batch self._unreachable_hosts.update( self._tqm._unreachable_hosts) self._tqm.clear_failed_hosts() # if the last result wasn't zero or 3 (some hosts were unreachable), # break out of the serial batch loop if result not in (0, 3): break i = i + 1 # per play if entry: entrylist.append(entry) # per playbook # send the stats callback for this playbook if self._tqm is not None: self._tqm.send_callback('v2_playbook_on_stats', self._tqm._stats) # if the last result wasn't zero, break out of the playbook file name loop if result != 0: break if entrylist: return entrylist finally: if self._tqm is not None: self._cleanup() if self._options.syntax: display.display("No issues encountered") return result return result def _cleanup(self, signum=None, framenum=None): return self._tqm.cleanup() def _get_serialized_batches(self, play): ''' Returns a list of hosts, subdivided into batches based on the serial size specified in the play. ''' # make sure we have a unique list of hosts all_hosts = self._inventory.get_hosts(play.hosts) # check to see if the serial number was specified as a percentage, # and convert it to an integer value based on the number of hosts if isinstance(play.serial, string_types) and play.serial.endswith('%'): serial_pct = int(play.serial.replace("%", "")) serial = int((serial_pct / 100.0) * len(all_hosts)) else: if play.serial is None: serial = -1 else: serial = int(play.serial) # if the serial count was not specified or is invalid, default to # a list of all hosts, otherwise split the list of hosts into chunks # which are based on the serial size if serial <= 0: return [all_hosts] else: serialized_batches = [] while len(all_hosts) > 0: play_hosts = [] for x in range(serial): if len(all_hosts) > 0: play_hosts.append(all_hosts.pop(0)) serialized_batches.append(play_hosts) return serialized_batches
class Runner(object): """ 仿照ansible1.9 的python API,制作的ansible2.0 API的简化版本。 参数说明: inventory:: 仓库对象,可以是列表,逗号间隔的ip字符串,可执行文件. 默认/etc/ansible/hosts module_name:: 指定要使用的模块 module_args:: 模块参数 forks:: 并发数量, 默认5 timeout:: 连接等待超时时间,默认10秒 pattern:: 模式匹配,指定要连接的主机名, 默认all remote_user:: 指定连接用户, 默认root private_key_files:: 指定私钥文件 """ def __init__( self, hosts=C.DEFAULT_HOST_LIST, module_name=C.DEFAULT_MODULE_NAME, # * command module_args=C.DEFAULT_MODULE_ARGS, # * 'cmd args' forks=C.DEFAULT_FORKS, # 5 timeout=C.DEFAULT_TIMEOUT, # SSH timeout = 10s pattern="all", # all remote_user=C.DEFAULT_REMOTE_USER, # root module_path=None, # dirs of custome modules connection_type="smart", become=None, become_method=None, become_user=None, check=False, passwords=None, extra_vars = None, private_key_file=None ): # storage & defaults self.pattern = pattern self.loader = DataLoader() self.module_name = module_name self.module_args = module_args self.check_module_args() self.gather_facts = 'no' self.resultcallback = ResultCallback() self.options = Options( connection=connection_type, timeout=timeout, module_path=module_path, forks=forks, become=become, become_method=become_method, become_user=become_user, check=check, remote_user=remote_user, extra_vars=extra_vars or [], private_key_file=private_key_file, diff=False ) self.inventory = MyInventory(host_list=hosts) self.variable_manager = VariableManager(self.loader, self.inventory) self.variable_manager.extra_vars = load_extra_vars(loader=self.loader, options=self.options) self.variable_manager.options_vars = load_options_vars(self.options, "") self.passwords = passwords or {} self.play_source = dict( name="Ansible Ad-hoc", hosts=self.pattern, gather_facts=self.gather_facts, tasks=[dict(action=dict( module=self.module_name, args=self.module_args))] ) self.play = Play().load( self.play_source, variable_manager=self.variable_manager, loader=self.loader) self.runner = TaskQueueManager( inventory=self.inventory, variable_manager=self.variable_manager, loader=self.loader, options=self.options, passwords=self.passwords, stdout_callback=self.resultcallback ) # ** end __init__() ** def run(self): if not self.inventory.list_hosts("all"): raise AnsibleError("Inventory is empty.") if not self.inventory.list_hosts(self.pattern): raise AnsibleError( "pattern: %s dose not match any hosts." % self.pattern) try: self.runner.run(self.play) finally: if self.runner: self.runner.cleanup() if self.loader: self.loader.cleanup_all_tmp_files() return self.resultcallback.result_q def check_module_args(self): if self.module_name in C.MODULE_REQUIRE_ARGS and not self.module_args: err = "No argument passed to '%s' module." % self.module_name raise AnsibleError(err)
def run_ansible(inventory_filename, become=None, hosts="all", forks=10): """Run ansible with the provided inventory file and host group.""" # Since the API is constructed for CLI it expects certain options to # always be set in the context object. context.CLIARGS = ImmutableDict( connection="ssh", module_path=[], forks=forks, become=become, become_method="sudo", become_user=None, check=False, diff=False, verbosity=0, ) # Initialize required objects. # Takes care of finding and reading yaml, json and ini files. loader = DataLoader() passwords = dict(vault_pass="******") # nosec # Instantiate our ResultCallback for handling results as they come in. # Ansible expects this to be one of its main display outlets. results_callback = ResultCallback() # Create inventory, use path to host config file as source or # hosts in a comma separated string. logging.debug("Reading inventory from: %s", inventory_filename) inventory = InventoryManager(loader=loader, sources=inventory_filename) # Variable manager takes care of merging all the different sources to # give you a unified view of variables available in each context. variable_manager = VariableManager(loader=loader, inventory=inventory) # Create data structure that represents our play, including tasks, # this is basically what our YAML loader does internally. play_source = dict( name="Ansible Play", hosts=hosts, gather_facts="yes", tasks=[ dict(action=dict(module="stat", get_checksum=False, path=LAST_SCAN_LOG_FILENAME)), dict(action=dict(module="stat", get_checksum=False, path=LAST_DETECTION_FILENAME)), dict(action=dict( module="stat", get_checksum=False, path=CLAMAV_DB_FILENAME)), ], ) # Create play object, playbook objects use .load instead of init or new methods, # this will also automatically create the task objects from the # info provided in play_source. play = Play().load(play_source, variable_manager=variable_manager, loader=loader) # Run it - instantiate task queue manager, which takes care of forking # and setting up all objects to iterate over host list and tasks. tqm = None try: tqm = TaskQueueManager( inventory=inventory, variable_manager=variable_manager, loader=loader, passwords=passwords, stdout_callback=results_callback, # Use our custom callback. ) logging.debug("Starting task queue manager with forks=%d.", forks) tqm.run(play) finally: # We always need to cleanup child procs and # the structures we use to communicate with them. if tqm is not None: logging.debug("Cleaning up task queue manager.") tqm.cleanup() # Remove ansible temporary directory logging.debug("Cleaning up temporary file in %s", ANSIBLE_CONST.DEFAULT_LOCAL_TMP) shutil.rmtree(ANSIBLE_CONST.DEFAULT_LOCAL_TMP, True) return results_callback.results
def execute(self, *args, **kwargs): """ Puts args and kwargs in a way ansible can understand. Calls ansible and interprets the result. """ assert self.is_hooked_up, "the module should be hooked up to the api" self.module_args = module_args = self.get_module_args(args, kwargs) loader = DataLoader() inventory_manager = SourcelessInventoryManager(loader=loader) for host in self.api.servers: inventory_manager._inventory.add_host(host, group='all') for key, value in self.api.options.extra_vars.items(): inventory_manager._inventory.set_variable('all', key, value) variable_manager = VariableManager(loader=loader, inventory=inventory_manager) play_source = { 'name': "Suitable Play", 'hosts': self.api.servers, 'gather_facts': 'no', 'tasks': [{ 'action': { 'module': self.module_name, 'args': module_args } }] } play = Play.load(play_source, variable_manager=variable_manager, loader=loader) log.info(u'running {}'.format(u'- {module_name}: {module_args}'.format( module_name=self.module_name, module_args=module_args))) start = datetime.utcnow() task_queue_manager = None callback = SilentCallbackModule() try: task_queue_manager = TaskQueueManager( inventory=inventory_manager, variable_manager=variable_manager, loader=loader, options=self.api.options, passwords=getattr(self.api.options, 'passwords', {}), stdout_callback=callback) task_queue_manager.run(play) finally: if task_queue_manager is not None: task_queue_manager.cleanup() log.info(u'took {} to complete'.format(datetime.utcnow() - start)) return self.evaluate_results(callback)
dict(action=dict(module='debug', args=dict( msg='{{shell_out.stdout}}'))) ]) # Create play object, playbook objects use .load instead of init or new methods, # this will also automatically create the task objects from the info provided in play_source #yuki: create manager and execute play play = Play().load(play_source, variable_manager=variable_manager, loader=loader) # Run it - instantiate task queue manager, which takes care of forking and setting up all objects to iterate over host list and tasks tqm = None try: tqm = TaskQueueManager( inventory=inventory, variable_manager=variable_manager, loader=loader, options=options, passwords=passwords, ) result = tqm.run( play ) # most interesting data for a play is actually sent to the callback's methods finally: # we always need to cleanup child procs and the structres we use to communicate with them if tqm is not None: tqm.cleanup() # Remove ansible tmpdir shutil.rmtree(C.DEFAULT_LOCAL_TMP, True)
def ansible_command(): # 获取主机组信息 BaseDir = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) source = os.path.join(BaseDir, '/etc/ansible/hosts') # 实例化loader对象 loader = DataLoader() results_callback = ResultCallback() myinven = InventoryManager(loader=loader, sources=[ source, ]) # 实例化inventory对象 varmanager = VariableManager(loader=loader, inventory=myinven) # 实例化VariableManager对象 context.CLIARGS = ImmutableDict( connection='smart', module_path='/root/.ansible/plugins/modules', forks=10, become=None, become_method=None, become_user=None, check=False, diff=False) # 执行对象和模块 play_data = dict( name="Ansible adhoc example", hosts='all', gather_facts='no', tasks=[ dict(action=dict(module='setup', args=""), register='shell_out'), dict(action=dict(module='debug', args=dict( msg="{{ shell_out }}"))), ], ) play = Play().load(data=play_data, loader=loader, variable_manager=varmanager) passwords = {} tqm = None try: tqm = TaskQueueManager( inventory=myinven, variable_manager=varmanager, loader=loader, passwords=passwords, stdout_callback=results_callback, # Use our custom callback instead of the ``default`` callback plugin, which prints to stdout ) result = tqm.run( play ) # most interesting data for a play is actually sent to the callback's methods finally: # we always need to cleanup child procs and the structres we use to communicate with them if tqm is not None: list = [] dict2 = {} dict2.update(tqm.__dict__) list.append(dict2) dict3 = dict2['hostvars'] list_1 = [] for v in dict3: ip = v list_1.append(ip) for j in list_1: dict4 = dict3[j]['ansible_facts'] for k in dict4: print(k) if k == 'hostname': print(dict4['hostname']) elif k == 'fqdn': print(dict4['fqdn']) elif k == 'uptime_seconds': print(dict4['uptime_seconds']) elif k == 'domain': print(dict4['domain']) elif k == 'memtotal_mb': print(str(dict4['memtotal_mb'] / 1024) + 'M') elif k == 'default_ipv4': print(dict4['default_ipv4']['address'])
class ConsoleCLI(CLI, cmd.Cmd): ''' a REPL that allows for running ad-hoc tasks against a chosen inventory (based on dominis' ansible-shell).''' modules = [] ARGUMENTS = { 'host-pattern': 'A name of a group in the inventory, a shell-like glob ' 'selecting hosts in inventory or any combination of the two separated by commas.' } def __init__(self, args): super(ConsoleCLI, self).__init__(args) self.intro = 'Welcome to the ansible console.\nType help or ? to list commands.\n' self.groups = [] self.hosts = [] self.pattern = None self.variable_manager = None self.loader = None self.passwords = dict() self.modules = None cmd.Cmd.__init__(self) def parse(self): self.parser = CLI.base_parser( usage='%prog [<host-pattern>] [options]', runas_opts=True, inventory_opts=True, connect_opts=True, check_opts=True, vault_opts=True, fork_opts=True, module_opts=True, basedir_opts=True, desc="REPL console for executing Ansible tasks.", epilog= "This is not a live session/connection, each task executes in the background and returns it's results." ) # options unique to shell self.parser.add_option( '--step', dest='step', action='store_true', help="one-step-at-a-time: confirm each task before running") self.parser.set_defaults(cwd='*') super(ConsoleCLI, self).parse() display.verbosity = self.options.verbosity self.validate_conflicts(runas_opts=True, vault_opts=True, fork_opts=True) def get_names(self): return dir(self) def cmdloop(self): try: cmd.Cmd.cmdloop(self) except KeyboardInterrupt: self.do_exit(self) def set_prompt(self): login_user = self.options.remote_user or getpass.getuser() self.selected = self.inventory.list_hosts(self.options.cwd) prompt = "%s@%s (%d)[f:%s]" % (login_user, self.options.cwd, len(self.selected), self.options.forks) if self.options.become and self.options.become_user in [None, 'root']: prompt += "# " color = C.COLOR_ERROR else: prompt += "$ " color = C.COLOR_HIGHLIGHT self.prompt = stringc(prompt, color) def list_modules(self): modules = set() if self.options.module_path: for path in self.options.module_path: if path: module_loader.add_directory(path) module_paths = module_loader._get_paths() for path in module_paths: if path is not None: modules.update(self._find_modules_in_path(path)) return modules def _find_modules_in_path(self, path): if os.path.isdir(path): for module in os.listdir(path): if module.startswith('.'): continue elif os.path.isdir(module): self._find_modules_in_path(module) elif module.startswith('__'): continue elif any(module.endswith(x) for x in C.BLACKLIST_EXTS): continue elif module in C.IGNORE_FILES: continue elif module.startswith('_'): fullpath = '/'.join([path, module]) if os.path.islink(fullpath): # avoids aliases continue module = module.replace('_', '', 1) module = os.path.splitext(module)[0] # removes the extension yield module def default(self, arg, forceshell=False): """ actually runs modules """ if arg.startswith("#"): return False if not self.options.cwd: display.error("No host found") return False if arg.split()[0] in self.modules: module = arg.split()[0] module_args = ' '.join(arg.split()[1:]) else: module = 'shell' module_args = arg if forceshell is True: module = 'shell' module_args = arg self.options.module_name = module result = None try: check_raw = self.options.module_name in ('command', 'shell', 'script', 'raw') play_ds = dict( name="Ansible Shell", hosts=self.options.cwd, gather_facts='no', tasks=[ dict(action=dict(module=module, args=parse_kv(module_args, check_raw=check_raw))) ]) play = Play().load(play_ds, variable_manager=self.variable_manager, loader=self.loader) except Exception as e: display.error(u"Unable to build command: %s" % to_text(e)) return False try: cb = 'minimal' # FIXME: make callbacks configurable # now create a task queue manager to execute the play self._tqm = None try: self._tqm = TaskQueueManager( inventory=self.inventory, variable_manager=self.variable_manager, loader=self.loader, options=self.options, passwords=self.passwords, stdout_callback=cb, run_additional_callbacks=C.DEFAULT_LOAD_CALLBACK_PLUGINS, run_tree=False, ) result = self._tqm.run(play) finally: if self._tqm: self._tqm.cleanup() if self.loader: self.loader.cleanup_all_tmp_files() if result is None: display.error("No hosts found") return False except KeyboardInterrupt: display.error('User interrupted execution') return False except Exception as e: display.error(to_text(e)) # FIXME: add traceback in very very verbose mode return False def emptyline(self): return def do_shell(self, arg): """ You can run shell commands through the shell module. eg.: shell ps uax | grep java | wc -l shell killall python shell halt -n You can use the ! to force the shell module. eg.: !ps aux | grep java | wc -l """ self.default(arg, True) def do_forks(self, arg): """Set the number of forks""" if not arg: display.display('Usage: forks <number>') return self.options.forks = int(arg) self.set_prompt() do_serial = do_forks def do_verbosity(self, arg): """Set verbosity level""" if not arg: display.display('Usage: verbosity <number>') else: display.verbosity = int(arg) display.v('verbosity level set to %s' % arg) def do_cd(self, arg): """ Change active host/group. You can use hosts patterns as well eg.: cd webservers cd webservers:dbservers cd webservers:!phoenix cd webservers:&staging cd webservers:dbservers:&staging:!phoenix """ if not arg: self.options.cwd = '*' elif arg in '/*': self.options.cwd = 'all' elif self.inventory.get_hosts(arg): self.options.cwd = arg else: display.display("no host matched") self.set_prompt() def do_list(self, arg): """List the hosts in the current group""" if arg == 'groups': for group in self.groups: display.display(group) else: for host in self.selected: display.display(host.name) def do_become(self, arg): """Toggle whether plays run with become""" if arg: self.options.become = boolean(arg, strict=False) display.v("become changed to %s" % self.options.become) self.set_prompt() else: display.display("Please specify become value, e.g. `become yes`") def do_remote_user(self, arg): """Given a username, set the remote user plays are run by""" if arg: self.options.remote_user = arg self.set_prompt() else: display.display( "Please specify a remote user, e.g. `remote_user root`") def do_become_user(self, arg): """Given a username, set the user that plays are run by when using become""" if arg: self.options.become_user = arg else: display.display( "Please specify a user, e.g. `become_user jenkins`") display.v("Current user is %s" % self.options.become_user) self.set_prompt() def do_become_method(self, arg): """Given a become_method, set the privilege escalation method when using become""" if arg: self.options.become_method = arg display.v("become_method changed to %s" % self.options.become_method) else: display.display( "Please specify a become_method, e.g. `become_method su`") def do_check(self, arg): """Toggle whether plays run with check mode""" if arg: self.options.check = boolean(arg, strict=False) display.v("check mode changed to %s" % self.options.check) else: display.display( "Please specify check mode value, e.g. `check yes`") def do_diff(self, arg): """Toggle whether plays run with diff""" if arg: self.options.diff = boolean(arg, strict=False) display.v("diff mode changed to %s" % self.options.diff) else: display.display("Please specify a diff value , e.g. `diff yes`") def do_exit(self, args): """Exits from the console""" sys.stdout.write('\n') return -1 do_EOF = do_exit def helpdefault(self, module_name): if module_name in self.modules: in_path = module_loader.find_plugin(module_name) if in_path: oc, a, _, _ = plugin_docs.get_docstring(in_path) if oc: display.display(oc['short_description']) display.display('Parameters:') for opt in oc['options'].keys(): display.display(' ' + stringc(opt, C.COLOR_HIGHLIGHT) + ' ' + oc['options'][opt]['description'][0]) else: display.error('No documentation found for %s.' % module_name) else: display.error( '%s is not a valid command, use ? to list all valid commands.' % module_name) def complete_cd(self, text, line, begidx, endidx): mline = line.partition(' ')[2] offs = len(mline) - len(text) if self.options.cwd in ('all', '*', '\\'): completions = self.hosts + self.groups else: completions = [ x.name for x in self.inventory.list_hosts(self.options.cwd) ] return [ to_native(s)[offs:] for s in completions if to_native(s).startswith(to_native(mline)) ] def completedefault(self, text, line, begidx, endidx): if line.split()[0] in self.modules: mline = line.split(' ')[-1] offs = len(mline) - len(text) completions = self.module_args(line.split()[0]) return [s[offs:] + '=' for s in completions if s.startswith(mline)] def module_args(self, module_name): in_path = module_loader.find_plugin(module_name) oc, a, _, _ = plugin_docs.get_docstring(in_path) return list(oc['options'].keys()) def run(self): super(ConsoleCLI, self).run() sshpass = None becomepass = None # hosts if len(self.args) != 1: self.pattern = 'all' else: self.pattern = self.args[0] self.options.cwd = self.pattern # dynamically add modules as commands self.modules = self.list_modules() for module in self.modules: setattr( self, 'do_' + module, lambda arg, module=module: self.default(module + ' ' + arg)) setattr(self, 'help_' + module, lambda module=module: self.helpdefault(module)) self.normalize_become_options() (sshpass, becomepass) = self.ask_passwords() self.passwords = {'conn_pass': sshpass, 'become_pass': becomepass} self.loader, self.inventory, self.variable_manager = self._play_prereqs( self.options) default_vault_ids = C.DEFAULT_VAULT_IDENTITY_LIST vault_ids = self.options.vault_ids vault_ids = default_vault_ids + vault_ids vault_secrets = self.setup_vault_secrets( self.loader, vault_ids=vault_ids, vault_password_files=self.options.vault_password_files, ask_vault_pass=self.options.ask_vault_pass) self.loader.set_vault_secrets(vault_secrets) hosts = CLI.get_host_list(self.inventory, self.options.subset, self.pattern) self.groups = self.inventory.list_groups() self.hosts = [x.name for x in hosts] # This hack is to work around readline issues on a mac: # http://stackoverflow.com/a/7116997/541202 if 'libedit' in readline.__doc__: readline.parse_and_bind("bind ^I rl_complete") else: readline.parse_and_bind("tab: complete") histfile = os.path.join(os.path.expanduser("~"), ".ansible-console_history") try: readline.read_history_file(histfile) except IOError: pass atexit.register(readline.write_history_file, histfile) self.set_prompt() self.cmdloop()
self.host_ok[result._host.get_name()] = result def v2_runner_on_failed(self, result, ignore_errors=False): self.host_failed[result._host.get_name()] = result callback = ModelResultsCollector() passwords = dict() tqm = TaskQueueManager( inventory=inventory, variable_manager=variable_manager, loader=loader, options=options, passwords=passwords, stdout_callback=callback, ) tqm.run(play) #print callback.host_ok.items() result_raw = {'success': {}, 'failed': {}, 'unreachable': {}} for host, result in callback.host_ok.items(): result_raw['success'][host] = result._result for host, result in callback.host_failed.items(): result_raw['failed'][host] = result._result for host, result in callback.host_unreachable.items(): result_raw['unreachable'][host] = result._result print result_raw
def exec_playbook(host_list, username, password, group_name): loader = DataLoader() passwords = dict() inventory_manager = InventoryManager(loader=loader, sources=','.join(host_list)) variable_manager = VariableManager(loader=loader, inventory=inventory_manager) variable_manager.extra_vars = { 'ansible_ssh_user': username, 'ansible_ssh_pass': password } play_source = dict( name='Greengrass Group Playbook', hosts='all', remote_user=username, gather_facts='yes', tasks=[ dict(name='Set Playbook Facts', action=dict(module='set_fact', args=dict(group_name=group_name))), dict(name='Stop Greengrass Core', become=True, action=dict(module='systemd', args=dict(name='greengrass', state='stopped'))), dict(name='Copy Greengrass Config', become=True, action=dict(module='copy', args=dict(src='.gg/config/', dest='/greengrass/config/'))), dict(name='Copy Greengrass Root Certs', become=True, action=dict(module='copy', args=dict(src='{{ item }}', dest='/greengrass/certs/')), with_fileglob=['.gg/certs/root.*']), dict(name='Copy Greengrass Core Certs', become=True, action=dict(module='copy', args=dict(src='{{ item }}', dest='/greengrass/certs')), with_fileglob=['.gg/certs/{{ groups_name }}*']), dict(name='Start Greengrass Core', become=True, action=dict(module='greengrass', args=dict(name='greengrass', enabled=True, state='started'))) ]) play = Play().load(play_source, variable_manager=variable_manager, loader=loader) tqm = None try: tqm = TaskQueueManager( inventory=inventory_manager, variable_manager=variable_manager, loader=loader, options=options, passwords=passwords, ) result = tqm.run(play) finally: if tqm is not None: tqm.cleanup() shutil.rmtree(C.DEFAULT_LOCAL_TMP, True)
class adhoc(Base): def run(self, inventory_content, pattern='all'): ''' 运行adhoc ''' self.pattern = pattern self.inventory_content = inventory_content if not self.options.module_name: self.logger.error(self.log_prefix + '准备工作失败,原因:执行模块不能为空') return (False, '执行模块不能为空,请输入模块名') else: if self.options.module_name in C.MODULE_REQUIRE_ARGS and not self.options.module_args: self.logger.error(self.log_prefix + '准备工作失败,原因:执行模块参数为空') return (False, '执行模块参数为空,请输入模块参数') for name, obj in get_all_plugin_loaders(): name = name if obj.subdir: plugin_path = os.path.join('.', obj.subdir) if os.path.isdir(plugin_path): obj.add_directory(plugin_path) self._gen_tasks() play = Play().load(self.tasks_dict, variable_manager=self.variable_manager, loader=self.loader) try: self.host_list = self.inventory.list_hosts(self.pattern) except: self.host_list = [] if len(self.host_list) == 0: self.logger.error(self.log_prefix + '准备工作失败,原因:没有匹配主机名') return (False, '执行失败,没有匹配主机名') self._loading_callback() self._tqm = None try: self._tqm = TaskQueueManager( inventory=self.inventory, variable_manager=self.variable_manager, loader=self.loader, options=self.options, passwords=self.passwords, stdout_callback=self.callback, # run_additional_callbacks=C.DEFAULT_LOAD_CALLBACK_PLUGINS, # run_tree=False, ) self._tqm.run(play) finally: if self._tqm: self._tqm.cleanup() if self.loader: self.loader.cleanup_all_tmp_files() self.logger.info(self.log_prefix + '发送成功') return True def _gen_tasks(self): ''' 生成tasks ''' check_raw = self.options.module_name in ('command', 'win_command', 'shell', 'win_shell', 'script', 'raw') args = parse_kv(self.options.module_args, check_raw=check_raw) action_obj = {'module': self.options.module_name, 'args': args} self.task_list = [{ 'action': action_obj, 'async': self.options.seconds, 'poll': self.options.poll_interval }] self.tasks_dict = { 'name': self.work_name, 'hosts': self.pattern, 'gather_facts': 'no', 'tasks': self.task_list } def _loading_callback(self): if not self.log_router: self.log_router = Routing_Logging() self.callback = Write_Storage(self.work_uuid, self.work_name, self.username, self.exec_mode, self._show_protectfield(), self.inventory_content, self.describe, module_name=self.options.module_name, module_args=self.options.module_args, log_router=self.log_router, pattern=self.pattern, mongoclient=self.mongoclient) if self.callback: self.logger.info(self.log_prefix + '使用程序自己编写的callback函数') pass elif self.options.one_line: self.callback = 'oneline' else: self.callback = 'minimal'
def run(self): host_list = [] [host_list.append(i.get("host", '0.0.0.0')) for i in self.hostinfo] Options = namedtuple('Options', [ 'connection', 'module_path', 'forks', 'remote_user', 'private_key_file', 'ssh_common_args', 'ssh_extra_args', 'sftp_extra_args', 'scp_extra_args', 'become', 'become_method', 'become_user', 'verbosity', 'check', 'diff' ]) # required for # https://github.com/ansible/ansible/blob/devel/lib/ansible/inventory/manager.py#L204 sources = ','.join(host_list) if len(host_list) == 1: sources += ',' # initialize needed objects loader = DataLoader() options = Options( connection='smart', module_path=['/usr/share/ansible'], forks=100, remote_user=None, private_key_file=None, ssh_common_args=None, ssh_extra_args=None, sftp_extra_args=None, scp_extra_args=None, become=None, become_method=None, become_user=None, verbosity=None, check=False, diff=False, ) passwords = dict(sshpass=None, becomepass=None) # create inventory and pass to var manager inventory = InventoryManager(loader=loader, sources=sources) inventory.add_group('default') for host in self.hostinfo: inventory.add_host(host=host.get('host'), port=host.get('port')) hostname = inventory.get_host(hostname=host.get('host')) hostname.set_variable('ansible_ssh_host', hostname) hostname.set_variable('ansible_ssh_port', host.get('port')) hostname.set_variable('ansible_ssh_user', host.get('user')) hostname.set_variable('ansible_ssh_pass', host.get('password')) hostname.set_variable('ansible_sudo_pass', host.get('password')) variable_manager = VariableManager(loader=loader, inventory=inventory) # for host in self.hostinfo: # inventory.add_host(host=host.get('host'), port=host.get('port')) # hostname = inventory.get_host(hostname=host.get('host')) # variable_manager.set_host_variable(host=hostname, varname='ansible_ssh_pass', value=host.get('password')) # variable_manager.set_host_variable(host=hostname, varname='ansible_ssh_user', value=host.get('user')) # variable_manager.set_host_variable(host=hostname, varname='ansible_ssh_port', value=host.get('port')) # # print("设置全局管理变量 %s"%host.get('host')) # print(inventory.__dict__) # for i in inventory.__dict__: # if i == '_inventory': # print(inventory.__dict__[i].__dict__) print(inventory.get_host(hostname='172.18.108.96')) # create play with tasks tasks = [] # print(self.taskinfo) for task in self.taskinfo: # tasks.append(dict(action=dict(module=task.get("module"), args=task.get("args")))) play_source = dict( name="Ansible API Play", hosts=host_list, gather_facts='no', # tasks=[dict(action=dict(module='command', args=dict(cmd='/usr/bin/uptime')))] # tasks=[dict(action=dict(module='shell', args='/usr/sbin/ip a'))] tasks=[ dict(action=dict(module=task.get("module"), args=task.get("args"))) ] # dict(action=dict(module='setup', args='')), # dict(action=dict(module='setup', args=''),register='shell_out'), # dict(action=dict(module='debug', args=dict(msg='{{shell_out.stdout}}'))) ) play = Play().load(play_source, variable_manager=variable_manager, loader=loader) # actually run it tqm = None callback = ResultsCollector() try: tqm = TaskQueueManager( inventory=inventory, variable_manager=variable_manager, loader=loader, options=options, passwords=passwords, ) tqm._stdout_callback = callback result = tqm.run(play) print(result) finally: if tqm is not None: tqm.cleanup() # print("UP ***********") # print(callback.host_ok.items()) for host, result in callback.host_ok.items(): try: # print('{0} >>> {1}'.format(host, result._result['stdout'])) self.resultinfo.append({ host: { "message": result._result['stdout'], "code": 0 } }) except: # print('{0} >>> {1}'.format(host, result._result['ansible_facts'])) self.resultinfo.append({ host: { "message": result._result['ansible_facts'], "code": 0 } }) # print("FAILED *******") for host, result in callback.host_failed.items(): # print('{0} >>> {1}'.format(host, result._result['msg'])) self.resultinfo.append( {host: { "message": result._result['msg'], "code": 1 }}) # print("DOWN *********") for host, result in callback.host_unreachable.items(): # print('{0} >>> {1}'.format(host, result._result['msg'])) self.resultinfo.append( {host: { "message": result._result['msg'], "code": -1 }}) return self.resultinfo
class PlaybookExecutor: ''' This is the primary class for executing playbooks, and thus the basis for bin/ansible-playbook operation. ''' def __init__(self, playbooks, inventory, variable_manager, loader, display, options, passwords): self._playbooks = playbooks self._inventory = inventory self._variable_manager = variable_manager self._loader = loader self._display = display self._options = options self.passwords = passwords # make sure the module path (if specified) is parsed and # added to the module_loader object if options.module_path is not None: for path in options.module_path.split(os.pathsep): module_loader.add_directory(path) if options.listhosts or options.listtasks or options.listtags or options.syntax: self._tqm = None else: self._tqm = TaskQueueManager(inventory=inventory, variable_manager=variable_manager, loader=loader, display=display, options=options, passwords=self.passwords) def run(self): ''' Run the given playbook, based on the settings in the play which may limit the runs to serialized groups, etc. ''' signal.signal(signal.SIGINT, self._cleanup) result = 0 entrylist = [] entry = {} try: for playbook_path in self._playbooks: pb = Playbook.load(playbook_path, variable_manager=self._variable_manager, loader=self._loader) if self._tqm is None: # we are doing a listing entry = {'playbook': playbook_path} entry['plays'] = [] i = 1 plays = pb.get_plays() self._display.vv('%d plays in %s' % (len(plays), playbook_path)) for play in plays: # clear out the flag on all roles indicating they had any tasks run role_reset_has_run() # clear any filters which may have been applied to the inventory self._inventory.remove_restriction() # Create a temporary copy of the play here, so we can run post_validate # on it without the templating changes affecting the original object. all_vars = self._variable_manager.get_vars( loader=self._loader, play=play) templar = Templar(loader=self._loader, variables=all_vars) new_play = play.copy() new_play.post_validate(templar) if self._options.syntax: continue if self._tqm is None: # we are just doing a listing pname = new_play.get_name().strip() if pname == 'PLAY: <no name specified>': pname = 'PLAY: #%d' % i p = {'name': pname} if self._options.listhosts: p['pattern'] = play.hosts p['hosts'] = set( self._inventory.get_hosts(new_play.hosts)) #TODO: play tasks are really blocks, need to figure out how to get task objects from them elif self._options.listtasks: p['tasks'] = [] for task in play.get_tasks(): p['tasks'].append(task) #p['tasks'].append({'name': task.get_name().strip(), 'tags': task.tags}) elif self._options.listtags: p['tags'] = set(new_play.tags) for task in play.get_tasks(): p['tags'].update(task) #p['tags'].update(task.tags) entry['plays'].append(p) else: # make sure the tqm has callbacks loaded self._tqm.load_callbacks() # we are actually running plays for batch in self._get_serialized_batches(new_play): if len(batch) == 0: self._tqm.send_callback( 'v2_playbook_on_play_start', new_play) self._tqm.send_callback( 'v2_playbook_on_no_hosts_matched') break # restrict the inventory to the hosts in the serialized batch self._inventory.restrict_to_hosts(batch) # and run it... result = self._tqm.run(play=play) # if the last result wasn't zero, break out of the serial batch loop if result != 0: break # if the last result wasn't zero, break out of the play loop if result != 0: break i = i + 1 # per play if entry: entrylist.append(entry) # per playbook # if the last result wasn't zero, break out of the playbook file name loop if result != 0: break if entrylist: return entrylist finally: if self._tqm is not None: self._cleanup() if self._options.syntax: self.display.display("No issues encountered") return result # FIXME: this stat summary stuff should be cleaned up and moved # to a new method, if it even belongs here... self._display.banner("PLAY RECAP") hosts = sorted(self._tqm._stats.processed.keys()) for h in hosts: t = self._tqm._stats.summarize(h) self._display.display( "%s : %s %s %s %s" % (hostcolor(h, t), colorize('ok', t['ok'], 'green'), colorize('changed', t['changed'], 'yellow'), colorize('unreachable', t['unreachable'], 'red'), colorize('failed', t['failures'], 'red')), screen_only=True) self._display.display( "%s : %s %s %s %s" % (hostcolor(h, t, False), colorize('ok', t['ok'], None), colorize('changed', t['changed'], None), colorize('unreachable', t['unreachable'], None), colorize('failed', t['failures'], None)), log_only=True) self._display.display("", screen_only=True) # END STATS STUFF return result def _cleanup(self, signum=None, framenum=None): return self._tqm.cleanup() def _get_serialized_batches(self, play): ''' Returns a list of hosts, subdivided into batches based on the serial size specified in the play. ''' # make sure we have a unique list of hosts all_hosts = self._inventory.get_hosts(play.hosts) # check to see if the serial number was specified as a percentage, # and convert it to an integer value based on the number of hosts if isinstance(play.serial, basestring) and play.serial.endswith('%'): serial_pct = int(play.serial.replace("%", "")) serial = int((serial_pct / 100.0) * len(all_hosts)) else: serial = int(play.serial) # if the serial count was not specified or is invalid, default to # a list of all hosts, otherwise split the list of hosts into chunks # which are based on the serial size if serial <= 0: return [all_hosts] else: serialized_batches = [] while len(all_hosts) > 0: play_hosts = [] for x in range(serial): if len(all_hosts) > 0: play_hosts.append(all_hosts.pop(0)) serialized_batches.append(play_hosts) return serialized_batches
class IMPlaybookExecutor(PlaybookExecutor): ''' Simplified version of the PlaybookExecutor ''' def __init__(self, playbooks, inventory, variable_manager, loader, options, passwords, output): self._playbooks = playbooks self._inventory = inventory self._variable_manager = variable_manager self._loader = loader self._options = options self.passwords = passwords self._unreachable_hosts = dict() self._tqm = TaskQueueManager(inventory=inventory, variable_manager=variable_manager, loader=loader, options=options, passwords=self.passwords) # Set out Callback as the stdout one to avoid stdout messages self._tqm._stdout_callback = AnsibleCallbacks(output) def run(self): ''' Run the given playbook, based on the settings in the play which may limit the runs to serialized groups, etc. ''' result = 0 try: for playbook_path in self._playbooks: pb = Playbook.load(playbook_path, variable_manager=self._variable_manager, loader=self._loader) self._inventory.set_playbook_basedir( os.path.dirname(playbook_path)) i = 1 # make sure the tqm has callbacks loaded self._tqm.load_callbacks() self._tqm.send_callback('v2_playbook_on_start', pb) for play in pb.get_plays(): if play._included_path is not None: self._loader.set_basedir(play._included_path) else: self._loader.set_basedir(pb._basedir) # clear any filters which may have been applied to the # inventory self._inventory.remove_restriction() # Create a temporary copy of the play here, so we can run post_validate # on it without the templating changes affecting the # original object. all_vars = self._variable_manager.get_vars( loader=self._loader, play=play) templar = Templar(loader=self._loader, variables=all_vars) new_play = play.copy() new_play.post_validate(templar) self._tqm._unreachable_hosts.update( self._unreachable_hosts) # we are actually running plays for batch in self._get_serialized_batches(new_play): if len(batch) == 0: self._tqm.send_callback( 'v2_playbook_on_play_start', new_play) self._tqm.send_callback( 'v2_playbook_on_no_hosts_matched') break # restrict the inventory to the hosts in the serialized # batch self._inventory.restrict_to_hosts(batch) # and run it... result = self._tqm.run(play=play) # check the number of failures here, to see if they're above the maximum # failure percentage allowed, or if any errors are fatal. If either of those # conditions are met, we break out, otherwise we only break out if the entire # batch failed failed_hosts_count = len( self._tqm._failed_hosts) + len( self._tqm._unreachable_hosts) if new_play.any_errors_fatal and failed_hosts_count > 0: break elif new_play.max_fail_percentage is not None and \ (int((new_play.max_fail_percentage) / 100.0 * len(batch)) > int((len(batch) - failed_hosts_count) / len(batch) * 100.0)): break elif len(batch) == failed_hosts_count: break # clear the failed hosts dictionaires in the TQM for # the next batch self._unreachable_hosts.update( self._tqm._unreachable_hosts) self._tqm.clear_failed_hosts() # if the last result wasn't zero or 3 (some hosts were unreachable), # break out of the serial batch loop if result not in (0, 3): break i = i + 1 # per play self._tqm.send_callback('v2_playbook_on_stats', self._tqm._stats) # if the last result wasn't zero, break out of the playbook # file name loop if result != 0: break finally: if self._tqm is not None: self._tqm.cleanup() return result
class PlayRun: Options = namedtuple('Options', [ 'connection', 'module_path', 'forks', 'become', 'become_method', 'become_user', 'check' ]) def __init__(self, connection_type="ssh", forks=100, host_list="/etc/ansible/hosts", passwords=None, callback=None): self.connection_type = connection_type self.forks = forks self.host_list = host_list self.passwords = passwords or None self.variable_manager = VariableManager() self.loader = DataLoader() self.options = self.Options(connection=self.connection_type, module_path=None, forks=forks, become=None, become_method=None, become_user=None, check=False) self.results_callback = callback() self.inventory = Inventory(loader=self.loader, variable_manager=self.variable_manager, host_list="/etc/ansible/hosts") self.variable_manager.set_inventory(self.inventory) self.play_source = None self.play = None self.runner = None def run(self, action_tuple, hosts="all", task_name="Ansible Ad-hoc"): """ :param action_tuple: [('shell', 'ls'), ('ping', '')] """ tasks = [] for item in action_tuple: assert len(item) == 2 module, args = item tasks.append(dict(action=dict(module=module, args=args))) self.play_source = dict(name=task_name, hosts=hosts, gather_facts='no', tasks=tasks) self.play = Play().load(self.play_source, variable_manager=self.variable_manager, loader=self.loader) self.runner = TaskQueueManager( inventory=self.inventory, variable_manager=self.variable_manager, loader=self.loader, options=self.options, passwords=self.passwords, stdout_callback=self.results_callback, ) try: self.runner.run(self.play) finally: if self.runner: self.runner.cleanup() if self.loader: self.loader.cleanup_all_tmp_files() return self.results_callback.result_q # run()最终将call_back的结果返回
class AdHocCLI(CLI): ''' is an extra-simple tool/framework/API for doing 'remote things'. this command allows you to define and run a single task 'playbook' against a set of hosts ''' def parse(self): ''' create an options parser for bin/ansible ''' self.parser = CLI.base_parser( usage='%prog <host-pattern> [options]', runas_opts=True, inventory_opts=True, async_opts=True, output_opts=True, connect_opts=True, check_opts=True, runtask_opts=True, vault_opts=True, fork_opts=True, module_opts=True, basedir_opts=True, desc= "Define and run a single task 'playbook' against a set of hosts", epilog= "Some modules do not make sense in Ad-Hoc (include, meta, etc)", ) # options unique to ansible ad-hoc self.parser.add_option('-a', '--args', dest='module_args', help="module arguments", default=C.DEFAULT_MODULE_ARGS) self.parser.add_option('-m', '--module-name', dest='module_name', help="module name to execute (default=%s)" % C.DEFAULT_MODULE_NAME, default=C.DEFAULT_MODULE_NAME) super(AdHocCLI, self).parse() if len(self.args) < 1: raise AnsibleOptionsError("Missing target hosts") elif len(self.args) > 1: raise AnsibleOptionsError("Extraneous options or arguments") display.verbosity = self.options.verbosity self.validate_conflicts(runas_opts=True, vault_opts=True, fork_opts=True) def _play_ds(self, pattern, async_val, poll): check_raw = self.options.module_name in ('command', 'win_command', 'shell', 'win_shell', 'script', 'raw') mytask = { 'action': { 'module': self.options.module_name, 'args': parse_kv(self.options.module_args, check_raw=check_raw) } } # avoid adding to tasks that don't support it, unless set, then give user an error if self.options.module_name not in ('include_role', 'include_tasks') or any( async_val, poll): mytask['async_val'] = async_val mytask['poll'] = poll return dict(name="Ansible Ad-Hoc", hosts=pattern, gather_facts='no', tasks=[mytask]) def run(self): ''' create and execute the single task playbook ''' super(AdHocCLI, self).run() # only thing left should be host pattern pattern = to_text(self.args[0], errors='surrogate_or_strict') sshpass = None becomepass = None self.normalize_become_options() (sshpass, becomepass) = self.ask_passwords() passwords = {'conn_pass': sshpass, 'become_pass': becomepass} # dynamically load any plugins get_all_plugin_loaders() loader, inventory, variable_manager = self._play_prereqs(self.options) try: hosts = CLI.get_host_list(inventory, self.options.subset, pattern) except AnsibleError: if self.options.subset: raise else: hosts = [] display.warning("No hosts matched, nothing to do") if self.options.listhosts: display.display(' hosts (%d):' % len(hosts)) for host in hosts: display.display(' %s' % host) return 0 if self.options.module_name in C.MODULE_REQUIRE_ARGS and not self.options.module_args: err = "No argument passed to %s module" % self.options.module_name if pattern.endswith(".yml"): err = err + ' (did you mean to run ansible-playbook?)' raise AnsibleOptionsError(err) # Avoid modules that don't work with ad-hoc if self.options.module_name in ('import_playbook', ): raise AnsibleOptionsError( "'%s' is not a valid action for ad-hoc commands" % self.options.module_name) play_ds = self._play_ds(pattern, self.options.seconds, self.options.poll_interval) play = Play().load(play_ds, variable_manager=variable_manager, loader=loader) # used in start callback playbook = Playbook(loader) playbook._entries.append(play) playbook._file_name = '__adhoc_playbook__' if self.callback: cb = self.callback elif self.options.one_line: cb = 'oneline' # Respect custom 'stdout_callback' only with enabled 'bin_ansible_callbacks' elif C.DEFAULT_LOAD_CALLBACK_PLUGINS and C.DEFAULT_STDOUT_CALLBACK != 'default': cb = C.DEFAULT_STDOUT_CALLBACK else: cb = 'minimal' run_tree = False if self.options.tree: C.DEFAULT_CALLBACK_WHITELIST.append('tree') C.TREE_DIR = self.options.tree run_tree = True # now create a task queue manager to execute the play self._tqm = None try: self._tqm = TaskQueueManager( inventory=inventory, variable_manager=variable_manager, loader=loader, options=self.options, passwords=passwords, stdout_callback=cb, run_additional_callbacks=C.DEFAULT_LOAD_CALLBACK_PLUGINS, run_tree=run_tree, ) self._tqm.send_callback('v2_playbook_on_start', playbook) result = self._tqm.run(play) self._tqm.send_callback('v2_playbook_on_stats', self._tqm._stats) finally: if self._tqm: self._tqm.cleanup() if loader: loader.cleanup_all_tmp_files() return result
def _install(self, ip_address): # initialize needed objects # self.variable_manager_initialization(ip_address) host_list = [ip_address] hosts = ','.join(host_list) if len(host_list) == 1: hosts += ',' package = [self.package] # create data structure that represents our play # including tasks, this is basically what our YAML loader does internally. print(f'hosts : {ip_address} \n roles : {package}') play_source = { 'hosts': hosts, 'gather_facts': True, 'become': True, 'become_user': '******', 'become_method': 'sudo', 'roles': package } passwords = dict() results_callback = ResultsCollectorJSONCallback() tqm = TaskQueueManager( inventory=self.inventory, variable_manager=self.variable_manager, loader=self.loader, passwords=passwords, stdout_callback=results_callback, ) # Create play object, playbook objects use .load instead of init or new methods, # this will also automatically create the task objects from the info provided in play_source play = Play().load(play_source, variable_manager=self.variable_manager, loader=self.loader, vars={'ansible_become_pass': '******'}) # Actually run it try: result = tqm.run( play ) # most interesting data for a play is actually sent to the callback's methods finally: # we always need to cleanup child procs and the structures we use to communicate with them tqm.cleanup() if self.loader: self.loader.cleanup_all_tmp_files() shutil.rmtree(C.DEFAULT_LOCAL_TMP, True) print("UP ***********") for host, result in results_callback.host_ok.items(): print('{0} >>> {1}'.format(host, result._result)) print("FAILED *******") for host, result in results_callback.host_failed.items(): print('{0} >>> {1}'.format(host, result._result['msg'])) print("DOWN *********") for host, result in results_callback.host_unreachable.items(): print('{0} >>> {1}'.format(host, result._result['msg']))
def run_ansible(module_name, module_args, host_list, ansible_user="******"): """ansible api""" # 负责查找和读取yaml、json和ini文件 loader = DataLoader() # 初始化需要的对象 Options = namedtuple('Options', [ 'connection', 'module_path', 'forks', 'become', 'become_method', 'private_key_file', 'become_user', 'remote_user', 'check', 'diff' ]) options = Options(connection='ssh', module_path=None, forks=5, become=True, become_method='sudo', private_key_file="/root/.ssh/id_rsa", become_user='******', remote_user=ansible_user, check=False, diff=False) passwords = dict(vault_pass='******') # 实例化ResultCallback来处理结果 callback = ResultsCollector() # 创建库存(inventory)并传递给VariableManager inventory = InventoryManager(loader=loader, sources='') for ip in host_list: inventory.add_host(host=ip, port=22) # 管理变量的类,包括主机,组,扩展等变量 variable_manager = VariableManager(loader=loader, inventory=inventory) for ip in host_list: host = inventory.get_host(hostname=ip) variable_manager.set_host_variable(host=host, varname='ansible_ssh_pass', value='lzx@2019') # 创建任务 host = ",".join(host_list) play_source = dict(name="Ansible Play", hosts=host, gather_facts='no', tasks=[ dict(action=dict(module=module_name, args=module_args), register='shell_out'), ]) play = Play().load(play_source, variable_manager=variable_manager, loader=loader) # 开始执行 tqm = None tqm = TaskQueueManager( inventory=inventory, variable_manager=variable_manager, loader=loader, options=options, passwords=passwords, stdout_callback=callback, ) result = tqm.run(play) result_raw = {'success': {}, 'failed': {}, 'unreachable': {}} for host, result in callback.host_ok.items(): result_raw['success'][host] = result._result for host, result in callback.host_failed.items(): result_raw['failed'][host] = result._result for host, result in callback.host_unreachable.items(): result_raw['unreachable'][host] = result._result for host, result in callback.host_skipped.items(): result_raw['skipped'][host] = result._result return json.dumps(result_raw, indent=4, ensure_ascii=False)
def handle(self, *args, **options): Options = namedtuple('Options', [ 'connection', 'module_path', 'forks', 'become', 'become_method', 'become_user', 'check', 'diff' ]) options = Options(connection='smart', module_path=[], forks=10, become=None, become_method=None, become_user=None, check=False, diff=False) loader = DataLoader() passwords = {} results_callback = ResultCallback() inventory = InventoryManager(loader=loader, sources=os.path.join( settings.BASE_DIR, 'etc', 'hosts')) # 剧本的位置 variable_manager = VariableManager(loader=loader, inventory=inventory) dest_path = '/tmp/resources.py' # ansible all -i etc/hosts -m setup play_source = { 'name': "CMDB Collect", 'hosts': 'all', # 在哪些主机上执行 'gather_facts': 'no', 'tasks': [ # 执行的任务列表 { 'name': 'collect_server_info', # 任务名称 'setup': '' # 执行任务模块 }, { 'name': 'copy_file', # 任务名称 'copy': 'src=' + os.path.join(settings.BASE_DIR, 'etc', 'resources.py') + ' dest=' + dest_path # 执行任务模块 }, { 'name': 'collect_resource', # 任务名称 'command': 'python3 ' + dest_path # 执行任务模块 } ] } play = Play().load(play_source, variable_manager=variable_manager, loader=loader) tqm = None try: tqm = TaskQueueManager( inventory=inventory, variable_manager=variable_manager, loader=loader, options=options, passwords=passwords, stdout_callback=results_callback, ) result = tqm.run(play) finally: if tqm is not None: tqm.cleanup() shutil.rmtree(C.DEFAULT_LOCAL_TMP, True)
def run_ansible(module_name, module_args, host_list, option_dict): # 初始化需要的对象 Options = namedtuple('Options', [ 'connection', 'module_path', 'forks', 'become', 'become_method', 'private_key_file', 'become_user', 'remote_user', 'check', 'diff' ]) #负责查找和读取yaml、json和ini文件 loader = DataLoader() options = Options(connection='ssh', module_path=None, forks=5, become=option_dict['become'], become_method='sudo', private_key_file="/root/.ssh/id_rsa", become_user='******', remote_user=option_dict['remote_user'], check=False, diff=False) passwords = dict(vault_pass='******') # 实例化ResultCallback来处理结果 callback = ResultsCollector() # 创建库存(inventory)并传递给VariableManager inventory = InventoryManager(loader=loader, sources=['/etc/ansible/hosts']) variable_manager = VariableManager(loader=loader, inventory=inventory) # 创建任务 host = ",".join(host_list) play_source = dict(name="Ansible Play", hosts=host, gather_facts='no', tasks=[ dict(action=dict(module=module_name, args=module_args), register='shell_out'), ]) play = Play().load(play_source, variable_manager=variable_manager, loader=loader) # 开始执行 tqm = None tqm = TaskQueueManager( inventory=inventory, variable_manager=variable_manager, loader=loader, options=options, passwords=passwords, stdout_callback=callback, ) result = tqm.run(play) result_raw = {'success': {}, 'failed': {}, 'unreachable': {}} for host, result in callback.host_ok.items(): result_raw['success'][host] = result._result['stdout_lines'] for host, result in callback.host_failed.items(): result_raw['failed'][host] = result._result['stderr_lines'] for host, result in callback.host_unreachable.items(): result_raw['unreachable'][host] = result._result["msg"] return json.dumps(result_raw, indent=4)
class PlaybookExecutor: ''' This is the primary class for executing playbooks, and thus the basis for bin/ansible-playbook operation. ''' def __init__(self, playbooks, inventory, variable_manager, loader, passwords): self._playbooks = playbooks self._inventory = inventory self._variable_manager = variable_manager self._loader = loader self.passwords = passwords self._unreachable_hosts = dict() if context.CLIARGS.get('listhosts') or context.CLIARGS.get('listtasks') or \ context.CLIARGS.get('listtags') or context.CLIARGS.get('syntax'): self._tqm = None else: self._tqm = TaskQueueManager( inventory=inventory, variable_manager=variable_manager, loader=loader, passwords=self.passwords, forks=context.CLIARGS.get('forks'), ) # Note: We run this here to cache whether the default ansible ssh # executable supports control persist. Sometime in the future we may # need to enhance this to check that ansible_ssh_executable specified # in inventory is also cached. We can't do this caching at the point # where it is used (in task_executor) because that is post-fork and # therefore would be discarded after every task. check_for_controlpersist(C.ANSIBLE_SSH_EXECUTABLE) def run(self): ''' Run the given playbook, based on the settings in the play which may limit the runs to serialized groups, etc. ''' result = 0 entrylist = [] entry = {} try: # preload become/connection/shell to set config defs cached list(connection_loader.all(class_only=True)) list(shell_loader.all(class_only=True)) list(become_loader.all(class_only=True)) for playbook_path in self._playbooks: pb = Playbook.load(playbook_path, variable_manager=self._variable_manager, loader=self._loader) # FIXME: move out of inventory self._inventory.set_playbook_basedir(os.path.realpath(os.path.dirname(playbook_path))) if self._tqm is None: # we are doing a listing entry = {'playbook': playbook_path} entry['plays'] = [] else: # make sure the tqm has callbacks loaded self._tqm.load_callbacks() self._tqm.send_callback('v2_playbook_on_start', pb) i = 1 plays = pb.get_plays() display.vv(u'%d plays in %s' % (len(plays), to_text(playbook_path))) for play in plays: if play._included_path is not None: self._loader.set_basedir(play._included_path) else: self._loader.set_basedir(pb._basedir) # clear any filters which may have been applied to the inventory self._inventory.remove_restriction() # Allow variables to be used in vars_prompt fields. all_vars = self._variable_manager.get_vars(play=play) templar = Templar(loader=self._loader, variables=all_vars) setattr(play, 'vars_prompt', templar.template(play.vars_prompt)) # FIXME: this should be a play 'sub object' like loop_control if play.vars_prompt: for var in play.vars_prompt: vname = var['name'] prompt = var.get("prompt", vname) default = var.get("default", None) private = boolean(var.get("private", True)) confirm = boolean(var.get("confirm", False)) encrypt = var.get("encrypt", None) salt_size = var.get("salt_size", None) salt = var.get("salt", None) unsafe = var.get("unsafe", None) if vname not in self._variable_manager.extra_vars: if self._tqm: self._tqm.send_callback( 'v2_playbook_on_vars_prompt', vname, private, prompt, encrypt, confirm, salt_size, salt, default, unsafe) play.vars[vname] = display.do_var_prompt( vname, private, prompt, encrypt, confirm, salt_size, salt, default, unsafe) else: # we are either in --list-<option> or syntax check play.vars[vname] = default # Post validate so any play level variables are templated all_vars = self._variable_manager.get_vars(play=play) templar = Templar(loader=self._loader, variables=all_vars) play.post_validate(templar) if context.CLIARGS['syntax']: continue if self._tqm is None: # we are just doing a listing entry['plays'].append(play) else: self._tqm._unreachable_hosts.update( self._unreachable_hosts) previously_failed = len(self._tqm._failed_hosts) previously_unreachable = len( self._tqm._unreachable_hosts) break_play = False # we are actually running plays batches, ignores = self._get_serialized_batches( new_play) if len(batches) == 0: self._tqm.send_callback( 'v2_playbook_on_play_start', play) self._tqm.send_callback( 'v2_playbook_on_no_hosts_matched') for batch, ignore in zip(batches, ignores): # restrict the inventory to the hosts in the serialized batch self._inventory.restrict_to_hosts(batch) # and run it... result = self._tqm.run(play=play) # break the play if the result equals the special return code if result & self._tqm.RUN_FAILED_BREAK_PLAY != 0: result = self._tqm.RUN_FAILED_HOSTS break_play = True # check the number of failures here, to see if they're above the maximum # failure percentage allowed, or if any errors are fatal. If either of those # conditions are met, we break out, otherwise we only break out if the entire # batch failed. If ignore value is 1 we do not count unreachable hosts as failed. # We have an ignore value for every hosts group. if ignore == 1: failed_hosts_count = len( self._tqm._failed_hosts ) - previously_failed else: failed_hosts_count = len(self._tqm._failed_hosts) + len(self._tqm._unreachable_hosts) - \ (previously_failed + previously_unreachable) if len(batch) == failed_hosts_count: break_play = True break # update the previous counts so they don't accumulate incorrectly # over multiple serial batches previously_failed += len( self._tqm._failed_hosts) - previously_failed previously_unreachable += len( self._tqm._unreachable_hosts ) - previously_unreachable # save the unreachable hosts from this batch self._unreachable_hosts.update( self._tqm._unreachable_hosts) if break_play: break i = i + 1 # per play if entry: entrylist.append(entry) # per playbook # send the stats callback for this playbook if self._tqm is not None: if C.RETRY_FILES_ENABLED: retries = set(self._tqm._failed_hosts.keys()) retries.update(self._tqm._unreachable_hosts.keys()) retries = sorted(retries) if len(retries) > 0: if C.RETRY_FILES_SAVE_PATH: basedir = C.RETRY_FILES_SAVE_PATH elif playbook_path: basedir = os.path.dirname( os.path.abspath(playbook_path)) else: basedir = '~/' (retry_name, _) = os.path.splitext( os.path.basename(playbook_path)) filename = os.path.join(basedir, "%s.retry" % retry_name) if self._generate_retry_inventory( filename, retries): display.display( "\tto retry, use: --limit @%s\n" % filename) self._tqm.send_callback('v2_playbook_on_stats', self._tqm._stats) # if the last result wasn't zero, break out of the playbook file name loop if result != 0: break if entrylist: return entrylist finally: if self._tqm is not None: self._tqm.cleanup() if self._loader: self._loader.cleanup_all_tmp_files() if context.CLIARGS['syntax']: display.display("No issues encountered") return result if context.CLIARGS['start_at_task'] and not self._tqm._start_at_done: display.error( "No matching task \"%s\" found." " Note: --start-at-task can only follow static includes." % context.CLIARGS['start_at_task']) return result def _get_serialized_batches(self, play): ''' Returns a list of hosts subdivided into batches based on the serial size specified in the play and a list of 0 and 1 values, used to ignore or not unreachable hosts during the play. ''' # make sure we have a unique list of hosts all_hosts = self._inventory.get_hosts(play.hosts, order=play.order) all_hosts_len = len(all_hosts) # Extract serial batch list serial_batch_list = [ i[0] if isinstance(i, list) else i for i in play.serial ] # ignore_unreachable_list contains 0,1 value, if 0, host unreachable are counted as failed, othewise # are not counted as failed. If a value is not 0 or 1, we pass 0 as standard ignore_unreachable_list = [ i[1] if isinstance(i, list) and i[1] == 1 else 0 for i in play.serial ] # the serial value can be listed as a scalar or a list of # scalars, so we make sure it's a list here if len(serial_batch_list) == 0: serial_batch_list = [-1] cur_item = 0 serialized_batches = [] ignore_unreachable = [] while len(all_hosts) > 0: # get the serial value from current item in the list serial = pct_to_int(serial_batch_list[cur_item], all_hosts_len) # if the serial count was not specified or is invalid, default to # a list of all hosts, otherwise grab a chunk of the hosts equal # to the current serial item size if serial <= 0: serialized_batches.append(all_hosts) ignore_unreachable.append(0) break else: play_hosts = [] for x in range(serial): if len(all_hosts) > 0: play_hosts.append(all_hosts.pop(0)) serialized_batches.append(play_hosts) ignore_unreachable.append(ignore_unreachable_list[cur_item]) # increment the current batch list item number, and if we've hit # the end keep using the last element until we've consumed all of # the hosts in the inventory cur_item += 1 if cur_item > len(serial_batch_list) - 1: cur_item = len(serial_batch_list) - 1 return serialized_batches, ignore_unreachable def _generate_retry_inventory(self, retry_path, replay_hosts): ''' Called when a playbook run fails. It generates an inventory which allows re-running on ONLY the failed hosts. This may duplicate some variable information in group_vars/host_vars but that is ok, and expected. ''' try: makedirs_safe(os.path.dirname(retry_path)) with open(retry_path, 'w') as fd: for x in replay_hosts: fd.write("%s\n" % x) except Exception as e: display.warning("Could not create retry file '%s'.\n\t%s" % (retry_path, to_text(e))) return False return True
def start(self, json_data): configMysql = ConfigParser.ConfigParser() configMysql.read(config.get('DEFAULT', 'DB_CONNECTION_FILE')) db = mysql.connector.connect(host=configMysql.get('INVENTORY', 'host'), user=configMysql.get('INVENTORY', 'user'), passwd=configMysql.get( 'INVENTORY', 'pass'), db=configMysql.get('INVENTORY', 'dbname'), charset='utf8mb4') cursor = db.cursor() get_hosts = "select name from hosts" get_ips = "select ip from hosts" cursor.execute(get_hosts) hosts = [item[0] for item in cursor.fetchall()] cursor.execute(get_ips) ips = [item[0] for item in cursor.fetchall()] data = {} data["hosts"] = hosts + ips # create play with tasks play_source = json.loads(json_data) Options = namedtuple('Options', [ 'module_path', 'forks', 'become', 'become_method', 'become_user', 'check' ]) # initialize needed objects variable_manager = VariableManager() loader = DataLoader() options = Options(module_path='/path/to/mymodules', forks=100, become=None, become_method=None, become_user=None, check=False) passwords = dict(vault_pass='******') # Instantiate our ResultCallback for handling results as they come in results_callback = ResultCallback() # create inventory and pass to var manager inventory = Inventory(loader=loader, variable_manager=variable_manager, host_list=data["hosts"]) play = Play().load(play_source, variable_manager=variable_manager, loader=loader) # actually run it tqm = None try: tqm = TaskQueueManager( inventory=inventory, options=options, passwords=passwords, loader=loader, variable_manager=variable_manager, stdout_callback=results_callback, ) tqm.run(play) finally: if tqm is not None: tqm.cleanup() return results_callback.data
def Order_Run(host, module_name, module_args): class ResultCallback(CallbackBase): def __init__(self, *args, **kwargs): self.host_ok = {} self.host_unreachable = {} self.host_failed = {} def v2_runner_on_unreachable(self, result): self.host_unreachable[result._host.get_name()] = result def v2_runner_on_ok(self, result, *args, **kwargs): self.host_ok[result._host.get_name()] = result def v2_runner_on_failed(self, result, *args, **kwargs): self.host_failed[result._host.get_name()] = result variable_manager = VariableManager() loader = DataLoader() invertory = Inventory(loader=loader, variable_manager=variable_manager, host_list=host) Options = namedtuple('Options', [ 'listtags', 'listtasks', 'listhosts', 'syntax', 'connection', 'module_path', 'forks', 'remote_user', 'private_key_file', 'ssh_common_args', 'ssh_extra_args', 'sftp_extra_args', 'scp_extra_args', 'become', 'become_method', 'become_user', 'verbosity', 'check' ]) options = Options(listtags=False, listtasks=False, listhosts=False, syntax=False, connection='ssh', module_path=None, forks=100, remote_user='******', private_key_file=None, ssh_common_args=None, ssh_extra_args=None, sftp_extra_args=None, scp_extra_args=None, become=True, become_method=None, become_user='******', verbosity=None, check=False) passwords = {} play_source = dict( name='Ansible Play', hosts=host, gather_facts='no', tasks=[dict(action=dict(module=module_name, args=module_args))]) play = Play().load(play_source, variable_manager=variable_manager, loader=loader) tqm = None callback = ResultCallback() try: tqm = TaskQueueManager( inventory=invertory, variable_manager=variable_manager, loader=loader, options=options, passwords=passwords, stdout_callback=callback, run_additional_callbacks=C.DEFAULT_LOAD_CALLBACK_PLUGINS, run_tree=False, ) tqm.run(play) finally: if tqm is not None: tqm.cleanup() results_raw = {} results_raw['success'] = {} results_raw['failed'] = {} results_raw['unreachable'] = {} for host, result in callback.host_ok.items(): if result._result.get('stdout_lines'): if result._result['stdout_lines']: results_raw['success'][host] = json.dumps( result._result['stdout_lines']) else: result._result['stdout_lines'] = 'success'.split( ) # 成功返回success字符串 results_raw['success'][host] = json.dumps( result._result['stdout_lines']) for host, result in callback.host_failed.items(): if result._result.get('stderr'): results_raw['failed'][host] = result._result['stderr'] elif result._result.get('msg'): results_raw['failed'][host] = result._result[ 'msg'] # copy模块error信息返回 for host, result in callback.host_unreachable.items(): results_raw['unreachable'][host] = result._result['msg'] return results_raw
class AdHocRunner(object): """ ADHoc接口 """ Options = namedtuple("Options", [ 'connection', 'module_path', 'private_key_file', "remote_user", 'timeout', 'forks', 'become', 'become_method', 'become_user', 'check', 'extra_vars', ] ) results_callback_class = AdHocResultCallback def __init__(self, hosts=C.DEFAULT_HOST_LIST, forks=C.DEFAULT_FORKS, # 5 timeout=C.DEFAULT_TIMEOUT, # SSH timeout = 10s remote_user=C.DEFAULT_REMOTE_USER, # root module_path=None, # dirs of custome modules connection_type="smart", become=None, become_method=None, become_user=None, check=False, passwords=None, extra_vars=None, private_key_file=None, gather_facts='no'): self.pattern = '' self.variable_manager = VariableManager() self.loader = DataLoader() self.gather_facts = gather_facts self.results_callback = AdHocRunner.results_callback_class() self.options = self.Options( connection=connection_type, timeout=timeout, module_path=module_path, forks=forks, become=become, become_method=become_method, become_user=become_user, check=check, remote_user=remote_user, extra_vars=extra_vars or [], private_key_file=private_key_file, ) self.variable_manager.extra_vars = load_extra_vars(self.loader, options=self.options) self.variable_manager.options_vars = load_options_vars(self.options) self.passwords = passwords or {} self.inventory = JMSInventory(hosts) self.variable_manager.set_inventory(self.inventory) self.tasks = [] self.play_source = None self.play = None self.runner = None @staticmethod def check_module_args(module_name, module_args=''): if module_name in C.MODULE_REQUIRE_ARGS and not module_args: err = "No argument passed to '%s' module." % module_name print(err) return False return True def run(self, task_tuple, pattern='all', task_name='Ansible Ad-hoc'): """ :param task_tuple: (('shell', 'ls'), ('ping', '')) :param pattern: :param task_name: :return: """ for module, args in task_tuple: if not self.check_module_args(module, args): return self.tasks.append( dict(action=dict( module=module, args=args, )) ) self.play_source = dict( name=task_name, hosts=pattern, gather_facts=self.gather_facts, tasks=self.tasks ) self.play = Play().load( self.play_source, variable_manager=self.variable_manager, loader=self.loader, ) self.runner = TaskQueueManager( inventory=self.inventory, variable_manager=self.variable_manager, loader=self.loader, options=self.options, passwords=self.passwords, stdout_callback=self.results_callback, ) if not self.inventory.list_hosts("all"): raise AnsibleError("Inventory is empty.") if not self.inventory.list_hosts(self.pattern): raise AnsibleError( "pattern: %s dose not match any hosts." % self.pattern) try: self.runner.run(self.play) except Exception as e: logger.warning(e) else: # logger.debug(self.results_callback.result_q) return self.results_callback.result_q finally: if self.runner: self.runner.cleanup() if self.loader: self.loader.cleanup_all_tmp_files() def clean_result(self): """ :return: { "success": ['hostname',], "failed": [('hostname', 'msg'), {}], } """ result = {'success': [], 'failed': []} for host in self.results_callback.result_q['contacted']: result['success'].append(host) for host, msgs in self.results_callback.result_q['dark'].items(): msg = '\n'.join(['{} {}: {}'.format( msg.get('module_stdout', ''), msg.get('invocation', {}).get('module_name'), msg.get('msg', '')) for msg in msgs]) result['failed'].append((host, msg)) return result
def execute_playbook(play_book, host_list=[]): host_list = host_list # since the API is constructed for CLI it expects certain options to always be set in the context object context.CLIARGS = ImmutableDict(connection='smart', module_path=None, become=None, become_method=None, become_user=None, check=False, forks=4) sources = ','.join(host_list) if len(host_list) == 1: sources += ',' # initialize needed objects loader = DataLoader( ) # Takes care of finding and reading yaml, json and ini files passwords = dict(vault_pass='******') # Instantiate our ResultsCollectorJSONCallback for handling results as they come in. Ansible expects this to be one of its main display outlets results_callback = ResultsCollectorJSONCallback() inventory = InventoryManager(loader=loader, sources=sources) variable_manager = VariableManager(loader=loader, inventory=inventory) tqm = TaskQueueManager( inventory=inventory, variable_manager=variable_manager, loader=loader, passwords=passwords, stdout_callback= results_callback, # Use our custom callback instead of the ``default`` callback plugin, which prints to stdout ) pbex = PlaybookExecutor(playbooks=[play_book], inventory=inventory, variable_manager=variable_manager, loader=loader, passwords=passwords) playbook = Playbook.load(pbex._playbooks[0], variable_manager=variable_manager, loader=loader) play = playbook.get_plays()[0] # Actually run it try: result = tqm.run( play ) # most interesting data for a play is actually sent to the callback's methods finally: # we always need to cleanup child procs and the structures we use to communicate with them tqm.cleanup() if loader: loader.cleanup_all_tmp_files() # Remove ansible tmpdir shutil.rmtree(C.DEFAULT_LOCAL_TMP, True) results_raw = {'success': {}, 'failed': {}, 'unreachable': {}} for host, result in results_callback.host_ok.items(): results_raw['success'][host] = result._result for host, result in results_callback.host_failed.items(): results_raw['failed'][host] = result._result for host, result in results_callback.host_unreachable.items(): results_raw['unreachable'][host] = result._result return results_raw
def _run_ansible(self, args): """Actually build an run an ansible play and return the results""" zclass = args.pop('zbx_class') # The leadup to the TaskQueueManager() call below is # copy pasted from Ansible's example: # https://docs.ansible.com/ansible/developing_api.html#python-api-2-0 # pylint: disable=invalid-name Options = namedtuple('Options', ['connection', 'module_path', 'forks', 'become', 'become_method', 'become_user', 'check']) loader = DataLoader() options = Options(connection='local', module_path=None, forks=1, become=None, become_method=None, become_user=None, check=False) passwords = dict(vault_pass='******') results_callback = ResultsCallback() inventory = InventoryManager(loader=loader) variable_manager = VariableManager(loader=loader, inventory=inventory) play_source = dict(name="Ansible Play", hosts=self.pattern, gather_facts='no', tasks=[ dict(action=dict(module=zclass, args=args)), ] ) play = Play().load(play_source, variable_manager=variable_manager, loader=loader) tqm = None try: tqm = TaskQueueManager(inventory=inventory, variable_manager=variable_manager, loader=loader, options=options, passwords=passwords, stdout_callback=results_callback ) return_code = tqm.run(play) finally: if tqm is not None: tqm.cleanup() if return_code != 0: raise ResultsException("Ansible module run failed, no results given.") if results_callback.result.is_unreachable(): message = "Ansible module run failed: module output:\n%s" % \ json.dumps(results_callback.raw_result, indent=4) raise ResultsException(message) if results_callback.result.is_failed(): raise ResultsException(results_callback.raw_result) return results_callback.raw_result