def ansible_main(): frame_info = getframeinfo(currentframe()) print(f"Start [{frame_info.function}]!") workflow_id = str(uuid.uuid1()) print(f"[{frame_info.function}] bind_key : {workflow_id}") now_time = time.strftime('%Y-%m-%d_%H:%M:%S') print(f"[{frame_info.function}] now_time : {now_time}") # Check files if not os.path.exists(INVENTORY_FILE): print(f"[{frame_info.function}] Errror. File not exist. {INVENTORY_FILE}):") sys.exit() else: print(f"[{frame_info.function}] INVENTORY_FILE : {INVENTORY_FILE}") if not os.path.exists(PLAYBOOK_FILE): print(f"[{frame_info.function}] Errror. File not exist. {PLAYBOOK_FILE}):") sys.exit() else: print(f"[{frame_info.function}] PLAYBOOK_FILE : {PLAYBOOK_FILE}") # Code for Ansible execute context.CLIARGS = ImmutableDict( # connection='local', # timeout=3, # module_path=['/data1/e2e/flexconf_venv/lib/python3.6/site-packages/ansible-2.9.9-py3.6.egg/ansible'], # forks=10, check=False, diff=False, syntax=False, start_at_task=None, verbosity=3, # extra_vars=test_extra_vars # host_key_checking=False, ) loader = DataLoader() inventory = InventoryManager(loader=loader, sources=(INVENTORY_FILE,)) list_hosts = inventory.list_hosts() idx = 0 for host in list_hosts: idx = idx + 1 host = str(host) print(f"[{frame_info.function}] ({idx}) th, host : {host}") ansible_run_start_time = time.time() print(f"[{frame_info.function}] ansible_run_start_time : {ansible_run_start_time}") extra_vars = {} return_code, results = ansible_run(loader, inventory, PLAYBOOK_FILE, extra_vars) print("[{frame_info.function}] return_code : ", return_code) print("[{frame_info.function}] results : ") print(results) if return_code == 0: for result in results: if 'ansible_raw_data' in result._result.keys(): # result_msg = result._result # print(result_msg) system_id = str(result._host) print(system_id) # raw_stdout_lines = result._result["ansible_raw_data"]["stdout_lines"] # print(raw_stdout_lines) raw_ansible_facts = result._result["ansible_raw_data"]["ansible_facts"] print(f"raw_ansible_facts : {raw_ansible_facts}") raw_changed = result._result["ansible_raw_data"]["changed"] print(f"raw_changed : {raw_changed}") raw_cmd = result._result["ansible_raw_data"]["cmd"] print(f"raw_cmd : {raw_cmd}") raw_start = result._result["ansible_raw_data"]["start"] print(f"raw_start : {raw_start}") raw_end = result._result["ansible_raw_data"]["end"] print(f"raw_end : {raw_end}") else: for result in results: result_msg = result._result print(f"Error : {result_msg}") split_lines = result._result["ansible_raw_data"]["stdout_lines"] line_num = len(split_lines) print(f"line_num = {line_num}") print(f"===================================== Result =======================================") i = 1 for split_line in split_lines: tmp_line = split_line.split(' ', 2) proto = tmp_line[0] ref_cnt = tmp_line[1] tmp_line = tmp_line[2].split(' ', 2) if 'ACC' in tmp_line[0]: flags = 'ACC' else: flags = '' types = tmp_line[1].lstrip() tmp_line = tmp_line[2].split(' ') state = tmp_line[0].strip() if len(tmp_line) < 3: i_node = -1 path = "" elif len(tmp_line) == 3: if len(tmp_line[1].lstrip()) < 1: temp_data = tmp_line[2].lstrip() if temp_data.isalpha() == True: i_node = -1 path = temp_data else: i_node = int(temp_data) path = "" else: temp_data = tmp_line[1].lstrip() if temp_data.isalpha() == True: i_node = -1 path = temp_data else: i_node = int(temp_data) path = tmp_line[2].lstrip() elif len(tmp_line) == 4: temp_data = tmp_line[2].lstrip() if temp_data.isalpha() == True: i_node = -1 path = temp_data else: if len(temp_data) > 1: temp_val = tmp_line[3].lstrip() if temp_val.isalpha() == True: i_node = -1 path = temp_val else: i_node = int(temp_val) path = "" else: i_node = -1 path = "" elif len(tmp_line) == 5: temp_data = tmp_line[3].lstrip() if temp_data.isalpha() == True: i_node = -1 path = temp_data else: if len(temp_data) > 1: temp_val = tmp_line[4].lstrip() if temp_val.isalpha() == True: i_node = -1 path = temp_val else: if len(temp_val) > 1 and temp_val.isalpha() == True: i_node = -1 path = temp_val else: i_node = -1 path = "" else: i_node = -1 path = "" else: print(f"Error. unknow tmp_line, tmp_line_len={len(tmp_line)}") sql_insert = "INSERT INTO tb_e2eo_ansible_result_raw(workflow_id, system_id, requested_at, result_idx, protocol, ref_cnt, flags, types, state, i_node, path) VALUES " \ "('" + workflow_id + "','" + host + "', NOW(), " + str(i) + ", '" + proto + "', " + str(ref_cnt) + ", '" + flags + "', '" + types + "', '" + state + "', \ " + str(i_node) + ", '" + path + "');" run_sql(sql_insert) # print(f"{i} th, sql_insert={sql_insert}") print(f"{i} th, proto={proto}, ref_cnt={ref_cnt}, flags={flags}, types={types}, state={state}, i_node={i_node}, path={path}") i = i +1 print(f"=======================================================================================") return
def test_len(self): imdict = ImmutableDict({1: 2, 'a': 'b'}) assert len(imdict) == 2
"""A sample callback plugin used for performing an action as results come in If you want to collect all results into a single object for processing at the end of the execution, look into utilizing the ``json`` callback plugin or writing your own custom callback plugin """ def v2_runner_on_ok(self, result, **kwargs): """Print a json representation of the result This method could store the result in an instance attribute for retrieval later """ host = result._host print(json.dumps({host.name: result._result}, indent=4)) # since the API is constructed for CLI it expects certain options to always be set in the context object context.CLIARGS = ImmutableDict(connection='local', module_path=None, forks=10, become=None, become_method=None, become_user='******', check=False, diff=False) # initialize needed objects loader = DataLoader() # Takes care of finding and reading yaml, json and ini files passwords = dict(vault_pass='******') # Instantiate our ResultCallback for handling results as they come in. Ansible expects this to be one of its main display outlets results_callback = ResultCallback() # create inventory, use path to host config file as source or hosts in a comma separated string inventory = InventoryManager(loader=loader, sources='localhost,') # variable manager takes care of merging all the different sources to give you a unified view of variables available in each context variable_manager = VariableManager(loader=loader, inventory=inventory) # create data structure that represents our play, including tasks, this is basically what our YAML loader does internally.
def test_container(self): imdict = ImmutableDict({(1, 2): ['1', '2']}) assert imdict[(1, 2)] == ['1', '2']
def test_from_kwargs(self): imdict = ImmutableDict(a=1, b=2) assert frozenset(imdict.items()) == frozenset((('a', 1), ('b', 2)))
#connection 有3个选项local/ssh/smart, local表示本地执行 #ssh表示通过ssh协议执行 smart表示自动选择 #module_path=自定义的模块文件路径,默认路径是~/.ansible/plugins/modules #默认模块文件路径也可以是/usr/share/ansible/plugins/modules/ #forks设置子进程数量 #become=None, become_method=None,become_user=None #become=None 不改变当前用户身份 #become_method= , --become-method <BECOME_METHOD>要使用的权限升级方法sudo #become_user= , --become-user <BECOME_USER>以此用户身份运行操作(默认= root) #become表示是否转变当前用户身份,如果是以root身份登录,则不需要修改默认值 #check=False 不做任何改变 #--diff 更改(小)文件和模板时,显示这些文件的差异,diff=False不显示差异 context.CLIARGS = ImmutableDict(connection='smart', module_path=['/to/mymodules'], forks=10, become=None, become_method=None, become_user=None, check=False, diff=False) # 初始化所需对象 ##DataLoader用于解析yaml/json/ini格式的文件 loader = DataLoader() # 负责查找和读取yaml、json和ini文件 #要以交互方式指定保管库密码:ansible-playbook site.yml --ask-vault-pass #passwords = dict(vault_pass='******') #设置无密码(例如存储加密,远程链接或提升权限等密码)登录 passwords = dict() # 实例化resultcallback,以便在结果传入时处理它们。 # Ansible希望这是其主要的展示渠道之一。
def install_dns_playbook(): content = request.get_json(force=True) tagsexc = content['tagsexc'] ipmanage = content['ipmanage'] keyfile = content['fileprivatekey'] play = content['play'] passwd = content['passwd'] user = content['user'] inventory = content['inventory'] logging.info('runnig ansible-playbook ' + tagsexc + ' ' + ipmanage) file = open(inventory, 'w') file.write('[hostexec]\n') file.write(ipmanage) file.close() loader = DataLoader() if passwd: context.CLIARGS = ImmutableDict( tags={tagsexc}, listtags=False, listtasks=False, listhosts=False, syntax=False, connection='ssh', module_path=None, forks=10, remote_user=user, private_key_file=None, ssh_common_args=None, ssh_extra_args=None, sftp_extra_args=None, scp_extra_args=None, become=True, become_method='sudo', become_user='******', verbosity=True, check=False, start_at_task=None, extra_vars={ 'ansible_ssh_user='******'', 'ansible_ssh_pass='******'', 'ansible_become_pass='******'' }) else: context.CLIARGS = ImmutableDict( tags={tagsexc}, listtags=False, listtasks=False, listhosts=False, syntax=False, connection='ssh', module_path=None, forks=10, remote_user=user, private_key_file=None, ssh_common_args=None, ssh_extra_args=None, sftp_extra_args=None, scp_extra_args=None, become=True, become_method='sudo', become_user='******', verbosity=True, check=False, start_at_task=None, extra_vars={ 'ansible_ssh_user='******'', 'ansible_ssh_private_key_file=' + keyfile + '' }) inventory = InventoryManager(loader=loader, sources=(inventory)) variable_manager = VariableManager( loader=loader, inventory=inventory, version_info=CLI.version_info(gitinfo=False)) pbex = PlaybookExecutor(playbooks=[play], inventory=inventory, variable_manager=variable_manager, loader=loader, passwords={}) results = pbex.run() db.session.commit() return jsonify({'status': results})
def __init__( self, connection='local', # 连接方式 local 本地方式,smart ssh方式 remote_user=None, # ssh 用户 remote_password=None, # ssh 用户的密码,应该是一个字典, key 必须是 conn_pass private_key_file=None, # 指定自定义的私钥地址 sudo=None, sudo_user=None, ask_sudo_pass=None, module_path=None, # 模块路径,可以指定一个自定义模块的路径 become=None, # 是否提权 become_method=None, # 提权方式 默认 sudo 可以是 su become_user=None, # 提权后,要成为的用户,并非登录用户 check=False, diff=False, listhosts=None, listtasks=None, listtags=None, verbosity=3, syntax=None, start_at_task=None, inventory=None): # 函数文档注释 """ 初始化函数,定义的默认的选项值, 在初始化的时候可以传参,以便覆盖默认选项的值 """ context.CLIARGS = ImmutableDict( connection=connection, remote_user=remote_user, private_key_file=private_key_file, sudo=sudo, sudo_user=sudo_user, ask_sudo_pass=ask_sudo_pass, module_path=module_path, become=become, become_method=become_method, become_user=become_user, verbosity=verbosity, listhosts=listhosts, listtasks=listtasks, listtags=listtags, syntax=syntax, start_at_task=start_at_task, ) # 三元表达式,假如没有传递 inventory, 就使用 "localhost," # 指定 inventory 文件 # inventory 的值可以是一个 资产清单文件 # 也可以是一个包含主机的元组,这个仅仅适用于测试 # 比如 : 1.1.1.1, # 如果只有一个 IP 最后必须有英文的逗号 # 或者: 1.1.1.1, 2.2.2.2 self.inventory = inventory if inventory else "localhost," # 实例化数据解析器 self.loader = DataLoader() # 实例化 资产配置对象 self.inv_obj = InventoryManager(loader=self.loader, sources=self.inventory) # 设置密码 self.passwords = remote_password # 实例化回调插件对象 self.results_callback = ResultCallback() # 变量管理器 self.variable_manager = VariableManager(self.loader, self.inv_obj)
def handle(self, *args, **options): host_list = ['192.168.37.100', '192.168.37.31'] # since the API is constructed for CLI it expects certain options to always be set in the context object context.CLIARGS = ImmutableDict( connection='smart', module_path=['/to/mymodules', '/usr/share/ansible'], forks=10, become=None, become_method=None, become_user=None, check=False, diff=False, verbosity=1) # required for # https://github.com/ansible/ansible/blob/devel/lib/ansible/inventory/manager.py#L204 sources = ','.join(host_list) if len(host_list) == 1: sources += ',' # initialize needed objects loader = DataLoader( ) # Takes care of finding and reading yaml, json and ini files passwords = dict(vault_pass='******') # Instantiate our ResultsCollectorJSONCallback for handling results as they come in. Ansible expects this to be one of its main display outlets results_callback = ResultsCollectorJSONCallback() # create inventory, use path to host config file as source or hosts in a comma separated string inventory = InventoryManager(loader=loader, sources=sources) #inventory = InventoryManager(loader=loader, sources=['./hosts']) # variable manager takes care of merging all the different sources to give you a unified view of variables available in each context variable_manager = VariableManager(loader=loader, inventory=inventory) # instantiate task queue manager, which takes care of forking and setting up all objects to iterate over host list and tasks # IMPORTANT: This also adds library dirs paths to the module loader # IMPORTANT: and so it must be initialized before calling `Play.load()`. tqm = TaskQueueManager( inventory=inventory, variable_manager=variable_manager, loader=loader, passwords=passwords, stdout_callback= results_callback, # Use our custom callback instead of the ``default`` callback plugin, which prints to stdout ) # create data structure that represents our play, including tasks, this is basically what our YAML loader does internally. play_source = dict(name="Ansible Play", hosts=host_list, gather_facts='no', tasks=[ dict(action=dict(module='setup', args={})), ]) # Create play object, playbook objects use .load instead of init or new methods, # this will also automatically create the task objects from the info provided in play_source play = Play().load(play_source, variable_manager=variable_manager, loader=loader) # Actually run it try: result = tqm.run( play ) # most interesting data for a play is actually sent to the callback's methods finally: # we always need to cleanup child procs and the structures we use to communicate with them tqm.cleanup() if loader: loader.cleanup_all_tmp_files() # Remove ansible tmpdir shutil.rmtree(C.DEFAULT_LOCAL_TMP, True)
def index(): if request.method == 'POST': final = request.data beta = ast.literal_eval(str(final)) try: d = request.headers #accepting input in json format acc_id = d[ 'cb-user-provider-account'] #storing account id in a var cred_id = d[ 'cb-provider-credential-refid'] #storing credential id in a var except: print print 'NO HEADER FILE SUPPLIED CONTAINING ACCOUNT-ID AND CREDENTIAL-ID' print sys.exit() api_key = '97ea0f84-d73f-5533-954f-22a4d98ae619' #hardcoding apikey and username username = '******' head = {'Apikey': api_key, 'Username': username} request_url = 'https://partner-dev2-api.gravitant.net/cb-credential-service/api/v2.0/accounts/' + acc_id + '/credentials?credentialId=' + cred_id #final url to be called response = requests.get(request_url, headers=head) #storing response of the url in a var if str(response.status_code)[0] == '4': print print 'INVALID CREDENTIALS i.e. ANY OF (APIKEY, USERNAME, ACCOUNT_ID, CREDENTIAL_ID)' print sys.exit() if str(response.status_code)[0] == '5': print print 'AWS SERVER ERROR, TRY AGAIN LATER' print sys.exit() dic = {} dic['aws_access_key'] = ( json.loads(str(response.content)) )['credentials'][0]['passwordFields']['accessKey'] dic['aws_secret_key'] = ( json.loads(str(response.content)) )['credentials'][0]['passwordFields']['secretKey'] print beta beta['tasks'][0]['ec2']['aws_access_key'] = dic['aws_access_key'] beta['tasks'][0]['ec2']['aws_secret_key'] = dic['aws_secret_key'] print beta context.CLIARGS = ImmutableDict(module_path=['/to/mymodules'], forks=10, become=None, become_method=None, become_user=None, check=False, diff=False, inventory_path=['/etc/ansible/hosts']) # initialize needed objects loader = DataLoader( ) # Takes care of finding and reading yaml, json and ini files passwords = dict(vault_pass='******') # create inventory, use path to host config file as source or hosts in a comma separated string inventory = InventoryManager(loader=loader, sources='akhilaws,') # variable manager takes care of merging all the different sources to give you a unified view of variables available in each context variable_manager = VariableManager(loader=loader, inventory=inventory) play = Play().load(beta, variable_manager=variable_manager, loader=loader) # Run it - instantiate task queue manager, which takes care of forking and setting up all objects to iterate over host list and tasks tqm = None try: tqm = TaskQueueManager(inventory=inventory, variable_manager=variable_manager, loader=loader, passwords=passwords) result = tqm.run( play ) # most interesting data for a play is actually sent to the callback's methods finally: # we always need to cleanup child procs and the structures we use to communicate with them if tqm is not None: tqm.cleanup() # Remove ansible tmpdir shutil.rmtree(C.DEFAULT_LOCAL_TMP, True) if 'stop' not in str( final ): #checking if the server is stopped, getting new ip command wont get executed if machine is being stopped or is currently powered off print print('Processing IP and printing it...') new_ip = os.popen( 'aws ec2 describe-instances --instance-ids i-0b7ca4b790357709e --region us-east-2 --query \'Reservations[*].Instances[*].PublicIpAddress\' --output text' ).read() print new_ip print print('Updating hosts file by saving in the new ip...') with open('/etc/ansible/hosts') as var1: lines = var1.readlines() with open('/etc/ansible/hosts', 'w') as var2: var2.writelines(lines[0]) var2.writelines('ec2-user@' + str(new_ip)) print('hosts file updated. ^_^') print return jsonify({'Successful': new_ip}) return jsonify({'stop found': 'no new ip returned'}) else: return jsonify( {'get_method': 'kindly switch to post method to perform d2 ops'})
def run_ansible(inventory_filename, hosts="all", forks=10): """Run ansible with the provided inventory file and host group.""" # Since the API is constructed for CLI it expects certain options to # always be set in the context object. context.CLIARGS = ImmutableDict( connection="ssh", module_path=[], forks=forks, become=None, become_method="sudo", become_user=None, check=False, diff=False, verbosity=0, ) # Initialize required objects. # Takes care of finding and reading yaml, json and ini files. loader = DataLoader() passwords = dict(vault_pass="******") # nosec # Instantiate our ResultCallback for handling results as they come in. # Ansible expects this to be one of its main display outlets. results_callback = ResultCallback() # Create inventory, use path to host config file as source or # hosts in a comma separated string. logging.debug(f"Reading inventory from: {inventory_filename}") inventory = InventoryManager(loader=loader, sources=inventory_filename) # Variable manager takes care of merging all the different sources to # give you a unified view of variables available in each context. variable_manager = VariableManager(loader=loader, inventory=inventory) # Create data structure that represents our play, including tasks, # this is basically what our YAML loader does internally. play_source = dict( name="Ansible Play", hosts=hosts, gather_facts="yes", tasks=[ dict(action=dict(module="stat", get_checksum=False, path=LAST_SCAN_LOG_FILENAME)), dict(action=dict(module="stat", get_checksum=False, path=LAST_DETECTION_FILENAME)), dict(action=dict( module="stat", get_checksum=False, path=CLAMAV_DB_FILENAME)), ], ) # Create play object, playbook objects use .load instead of init or new methods, # this will also automatically create the task objects from the # info provided in play_source. play = Play().load(play_source, variable_manager=variable_manager, loader=loader) # Run it - instantiate task queue manager, which takes care of forking # and setting up all objects to iterate over host list and tasks. tqm = None try: tqm = TaskQueueManager( inventory=inventory, variable_manager=variable_manager, loader=loader, passwords=passwords, stdout_callback=results_callback, # Use our custom callback. ) logging.debug(f"Starting task queue manager with forks={forks}.") tqm.run(play) finally: # We always need to cleanup child procs and # the structures we use to communicate with them. if tqm is not None: logging.debug(f"Cleaning up task queue manager.") tqm.cleanup() # Remove ansible temporary directory logging.debug( f"Cleaning up temporary file in {ANSIBLE_CONST.DEFAULT_LOCAL_TMP}") shutil.rmtree(ANSIBLE_CONST.DEFAULT_LOCAL_TMP, True) return results_callback.results
def run(self): """Run the Application""" # Since the API is constructed for CLI it expects certain options to # always be set in the context object context.CLIARGS = ImmutableDict( connection="smart", module_path=[get_path("./modules"), "/usr/share/ansible"], forks=10, become=None, become_method=None, become_user=None, check=False, diff=False, verbosity=3, ) # Initialize needed objects loader = DataLoader( ) # Takes care of finding and reading yaml, json and # ini files passwords = dict(vault_pass="******") # Instantiate our ResultCallback for handling results as they come in if self.args.measurement: results_callback = MeasurementsResultCallback( plugins=self._plugins) else: results_callback = ResultCallback(plugins=self._plugins) # Create inventory, use path to host config file as source or hosts in a # comma separated string inventory = InventoryManager(loader=loader, sources=self._sources) # Variable manager takes care of merging all the different sources to give # you a unified view of variables available in each context variable_manager = VariableManager(loader=loader, inventory=inventory) # Instantiate task queue manager, which takes care of forking and setting # up all objects to iterate over host list and tasks # IMPORTANT: This also adds library dirs paths to the module loader # IMPORTANT: and so it must be initialized before calling `Play.load()`. tqm = TaskQueueManager( inventory=inventory, variable_manager=variable_manager, loader=loader, passwords=passwords, stdout_callback= results_callback, # Use our custom callback instead of # the ``default`` callback plugin, # which prints to stdout ) # Create data structure that represents our play, including tasks, this is # basically what our YAML loader does internally. play_source = dict( name="Ansible Play", hosts="all", gather_facts="yes", tasks=self.playbook_tasks(), ) # Create play object, playbook objects use .load instead of init or new # methods, this will also automatically create the task objects from the # info provided in play_source play = Play().load(play_source, variable_manager=variable_manager, loader=loader) # Actually run it try: tqm.load_callbacks() tqm.run(play) tqm.send_callback( "v2_playbook_on_stats", tqm._stats, # pylint: disable=protected-access ) finally: # We always need to cleanup child procs and the structures we use to # communicate with them tqm.cleanup() if loader: loader.cleanup_all_tmp_files() # Remove ansible tmpdir shutil.rmtree(C.DEFAULT_LOCAL_TMP, True) # pylint: disable=no-member if tqm is not None: tqm.cleanup()
def playbook(playbooks, sources, **kwargs): context.CLIARGS = ImmutableDict( connection='smart', ask_pass=kwargs.get('ask_pass', False), private_key_file=kwargs.get('private_key_file', None), remote_user=kwargs.get('remote_user', None), module_path=[ '/data/.virtualenvs/small_platform/lib/python3.6/site-packages/ansible' ], forks=kwargs.get('forks', 10), become=None, become_method=None, become_user=None, check=False, diff=False, timeout=kwargs.get('timeout', 10), syntax=None, start_at_task=None, extra_vars=kwargs.get('extra_vars', None), tags=kwargs.get('tags', ['all']), verbosity=kwargs.get('verbosity', 0), listtasks=kwargs.get('listtasks', None), listtags=kwargs.get('listtags', None), listhosts=kwargs.get('listhosts', None), ) code, playbooks = check_file_exists(playbooks, new_dir='/data/ansible/playbook') if code == 1: return playbooks code, sources = check_file_exists(sources, new_dir='/data/ansible/playbook/hosts') if code == 1: return sources loader = DataLoader() inventory = InventoryManager(loader=loader, sources=sources) variable_manager = VariableManager(loader=loader, inventory=inventory) passwords = dict(vault_pass='******') pbex = PlaybookExecutor(playbooks=(playbooks, ), inventory=inventory, variable_manager=variable_manager, loader=loader, passwords=passwords) results_callback = JSONCallback() if pbex._tqm: pbex._tqm._stdout_callback = results_callback pbex.run() results = dict(host_ok=results_callback.host_ok, host_failed=results_callback.host_failed, host_unreachable=results_callback.host_unreachable, host_skipped=results_callback.host_skipped) host_failed, host_unreachable = results['host_failed'], results[ 'host_unreachable'] if host_failed or host_unreachable: return 1, { 'host_failed': host_failed, 'host_unreachable': host_unreachable } else: return 0, results
inventory_manager = InventoryManager(loader=loader, sources=['my_hots.txt']) ############################### # VariableManager 类 ############################### variable_manager = VariableManager(loader=loader, inventory=inventory_manager) # 方法2 context.CLIARGS = ImmutableDict( connection='smart', module_path=None, verbosity=5, forks=10, become=None, become_method=None, become_user=None, check=False, diff=False, syntax=None, start_at_task=None, gather_facts='no', ) passwords = dict() play_book = PlaybookExecutor(playbooks=['testplaybook.yml'], inventory=inventory_manager, variable_manager=variable_manager, loader=loader, passwords=passwords)
from ansible import context from ansible.cli import CLI from ansible.module_utils.common.collections import ImmutableDict from ansible.executor.playbook_executor import PlaybookExecutor from ansible.parsing.dataloader import DataLoader from ansible.inventory.manager import InventoryManager from ansible.vars.manager import VariableManager loader = DataLoader() inventory = InventoryManager(loader=loader, sources=('/ansible/provision/inventory/myhost')) context.CLIARGS = ImmutableDict(tags={}, listtags=False, listtasks=False, listhosts=False, syntax=False, connection='ssh', module_path=None, forks=100, remote_user='******', private_key_file=None, ssh_common_args=None, ssh_extra_args=None, sftp_extra_args=None, scp_extra_args=None, become=True, become_method='sudo', become_user='******', verbosity=True, check=False, start_at_task=None) variable_manager = VariableManager(loader=loader, inventory=inventory, version_info=CLI.version_info(gitinfo=False)) pbex = PlaybookExecutor(playbooks=['/ansible/provision/maria.yml'], inventory=inventory, loader=loader , variable_manager=variable_manager , passwords={} ) results = pbex.run() print ('Services that can be installed by this script:', '\n', 'keystone, nova ,compute, cinder ,horizon ' ,'\n' ) first_input = input( 'Enter the service to be installed:') services = ('mohammad' , 'ali' )
} # Get the cli options from the current context options = context.CLIARGS._store play_options = copy(options) # Ensure --check option is disabled if play_options.get('check') != False: play_options['check'] = False # Ensure all tags are cleared if play_options.get('tags'): play_options['tags'] = () if play_options.get('skip_tags'): play_options['skip_tags'] = () # Override the current context context.CLIARGS = ImmutableDict(**play_options) # Create play object, playbook objects use .load instead of init or new methods, # this will also automatically create the task objects from the info provided in play_source play = Play().load(play_source, variable_manager=variable_manager, loader=loader) # Run it - instantiate task queue manager, which takes care of forking and setting up all objects to iterate over host list and tasks tqm = None try: tqm = TaskQueueManager(inventory=inventory, variable_manager=variable_manager, loader=loader, passwords=passwords, stdout_callback=results_callback)
def prepare_connection(ssid, bssid, interface, auth): """ Prepare a connection to a given ssid and bssid using wpa_supplicant Configure and connect on given interface Decide connect method and config file based on auth """ connect_msg = "Connecting to " + ssid + " on " + bssid syslog.syslog(syslog.LOG_LOCAL3 | syslog.LOG_INFO, connect_msg) paranoid = False pscheduler_restart = False apache_restart = False postgres_restart = False wait_time = 0 # Defaults can be changed by input object if 'paranoid' in auth['connection_flags']: paranoid = auth['connection_flags']['paranoid'] if 'pscheduler_restart' in auth['connection_flags']: pscheduler_restart = auth['connection_flags']['pscheduler_restart'] if 'apache_restart' in auth['connection_flags']: apache_restart = auth['connection_flags']['apache_restart'] if 'postgres_restart' in auth['connection_flags']: postgres_restart = auth['connection_flags']['postgres_restart'] if 'wait_time' in auth['connection_flags']: wait_time = auth['connection_flags']['wait_time'] start_time = time.time() # Determine auth method if auth['type'] == 'MacAddress': print('Connect to MSetup') exit() elif auth['type'] == 'User': wpa_supp_path = '/etc/wpa_supplicant/wpa_supplicant_' + ssid + '.conf' if DEBUG: print('User auth') # Format SSID and BSSID for wpa supplicant ssid_line = ' ssid="' + ssid + '"' bssid_line = ' bssid=' + bssid # Add interface to ip commands bring_down = ('ip link set ' + interface + ' down') flush_config = ('ip addr flush dev ' + interface) bring_up = ('ip link set ' + interface + ' up') # Add interface to wpa supplicant and dhclient commands run_wpa_supplicant = ('wpa_supplicant -B -c ' + wpa_supp_path + ' -i ' + interface) dhclient = ('dhclient ' + interface) # since the API is constructed for CLI it expects certain options to always be set in the context object context.CLIARGS = ImmutableDict(connection='local', module_path=['/to/mymodules'], forks=10, become=None, become_method=None, become_user=None, check=False, diff=False) # initialize needed objects loader = DataLoader( ) # Takes care of finding and reading yaml, json and ini files passwords = dict(vault_pass='******') # Instantiate our ResultCallback for handling results as they come in. Ansible expects this to be one of its main display outlets results_callback = ResultCallback() # create inventory, use path to host config file as source or hosts in a comma separated string inventory = InventoryManager(loader=loader, sources='localhost,') # variable manager takes care of merging all the different sources to give you a unified view of variables available in each context variable_manager = VariableManager(loader=loader, inventory=inventory) # Playbook to connect to a bssid play_source = dict( name="Ansible Play", hosts='localhost', gather_facts='no', tasks=[ # Check for wpa_supplicant file dict(action=dict(module='stat', path=wpa_supp_path), register='wpa_exists'), # Exit play if wpa_supplicant is not found dict(action=dict( module='debug', msg='Could not find wpa_supplicant with given ssid'), when='not wpa_exists.stat.exists'), dict(action=dict( module='fail', msg='Could not find wpa_supplicant for given ssid'), when='not wpa_exists.stat.exists'), dict(action=dict(module='meta', args='end_play'), when='not wpa_exists.stat.exists'), # Take down pscheduler services if in paranoid mode dict(action=dict(module='systemd', name='pscheduler-archiver', state='stopped'), when=pscheduler_restart), dict(action=dict(module='systemd', name='pscheduler-runner', state='stopped'), when=pscheduler_restart), dict(action=dict(module='systemd', name='pscheduler-scheduler', state='stopped'), when=pscheduler_restart), dict(action=dict(module='systemd', name='pscheduler-ticker', state='stopped'), when=pscheduler_restart), # Stop apache if toggled dict(action=dict(module='systemd', name='apache2', state='stopped'), when=apache_restart), # Stop postgres if toggled dict(action=dict(module='systemd', name='postgresql', state='stopped'), when=postgres_restart), # Remove default route to make dhclient happy dict(action=dict(module='command', args='ip route del default'), ignore_errors='yes'), # Remove WiFi interface config dict(action=dict(module='file', path='/var/run/wpa_supplicant/wlan0', state='absent')), # Kill wpa_supplicant dict(action=dict(module='command', args='killall wpa_supplicant'), ignore_errors='yes'), # Kill dhclient #dict(action=dict(module='command', args='killall dhclient'), ignore_errors='yes'), # Bring WiFi interface down dict(action=dict(module='command', args=bring_down)), # Flush WiFi interface config dict(action=dict(module='command', args=flush_config)), # Bring interface back up dict(action=dict(module='command', args=bring_up)), # Add SSID to wpa_supplicant dict(action=dict(module='lineinfile', path=wpa_supp_path, regexp='^(.*)ssid=(.*)$', line=ssid_line)), # Add BSSID to wpa_supplicant dict(action=dict(module='lineinfile', path=wpa_supp_path, regexp='^(.*)bssid=(.*)$', line=bssid_line)), # Connect to WiFi dict(action=dict(module='command', args=run_wpa_supplicant)), # Get an IP dict(action=dict(module='command', args=dhclient)), # Bring pScheduler services back dict(action=dict(module='systemd', name='pscheduler-archiver', state='started'), when=pscheduler_restart), dict(action=dict(module='systemd', name='pscheduler-runner', state='started'), when=pscheduler_restart), dict(action=dict(module='systemd', name='pscheduler-scheduler', state='started'), when=pscheduler_restart), dict(action=dict(module='systemd', name='pscheduler-ticker', state='started'), when=pscheduler_restart), # Start apache if toggled dict(action=dict(module='systemd', name='apache2', state='started'), when=apache_restart), # Start postgres if toggled dict(action=dict(module='systemd', name='postgresql', state='started'), when=postgres_restart), # Restart resolver #dict(action=dict(module='systemd', state='restarted', name='systemd-resolved')) ]) # Create the playbook play = Play().load(play_source, variable_manager=variable_manager, loader=loader) # Run the playbook connected = True tqm = None try: tqm = TaskQueueManager( inventory=inventory, variable_manager=variable_manager, loader=loader, passwords=passwords, stdout_callback= results_callback, # Use our custom callback instead of the ``default`` callback plugin, which prints to stdout ) result = tqm.run( play ) # most interesting data for a play is actually sent to the callback's methods if result != 0: connected = False finally: # we always need to cleanup child procs and the structures we use to communicate with them if tqm is not None: tqm.cleanup() # Remove ansible tmpdir shutil.rmtree(C.DEFAULT_LOCAL_TMP, True) # Wait for pscheduler to come back if wait_time != 0: print('Sleeping for', wait_time) time.sleep(wait_time) end_time = time.time() elapsed_time = end_time - start_time # Check if connection is successful if paranoid: connected = bssid_validator.validate_connect(bssid) # Get ip ni.ifaddresses('wlan0') ip = ni.ifaddresses('wlan0')[ni.AF_INET][0]['addr'] connection_info = {} connection_info['ssid'] = ssid connection_info['bssid'] = bssid connection_info['time'] = elapsed_time connection_info['new_ip'] = ip connection_info['operation'] = 'connection' connection_info['connected'] = connected json_info = json.dumps(connection_info) # Log status of connection if connected: log_msg = 'Connected: ' + json_info syslog.syslog(syslog.LOG_LOCAL3 | syslog.LOG_INFO, log_msg) else: log_msg = 'Failed to connect: ' + json_info syslog.syslog(syslog.LOG_LOCAL3 | syslog.LOG_INFO, log_msg) return json_info
def _get_playbook_executor(self, variables, verbosity): # -v given to us enables ansibles non-debug output. # So -vv should become ansibles -v. __main__.display.verbosity = max(0, verbosity - 1) # make sure ansible does not output warnings for our paternoster pseudo-play __main__._real_warning = __main__.display.warning def display_warning(msg, *args, **kwargs): if not msg.startswith('Could not match supplied host pattern'): __main__._real_warning(msg, *args, **kwargs) __main__.display.warning = display_warning loader = DataLoader() if ANSIBLE_VERSION < LooseVersion('2.4.0'): variable_manager = VariableManager() inventory = Inventory(loader=loader, variable_manager=variable_manager, host_list='localhost,') variable_manager.set_inventory(inventory) else: inventory = InventoryManager(loader=loader, sources='localhost,') variable_manager = VariableManager(loader=loader, inventory=inventory) if ANSIBLE_VERSION < LooseVersion('2.9.0'): localhost = inventory.localhost else: localhost = inventory.localhost.get_name() # force ansible to use the current python executable. Otherwise # it can end up choosing a python3 one (named python) or a different # python 2 version variable_manager.set_host_variable(localhost, 'ansible_python_interpreter', sys.executable) for name, value in variables: variable_manager.set_host_variable(localhost, name, value) if ANSIBLE_VERSION < LooseVersion('2.8.0'): cli_options = Options( become=None, become_method=None, become_user=None, check=False, connection='local', diff=False, forks=1, listhosts=False, listtags=False, listtasks=False, module_path=None, syntax=False, ) else: cli_options = ImmutableDict( become=None, become_method=None, become_user=None, check=False, connection='local', diff=False, forks=1, listhosts=False, listtags=False, listtasks=False, module_path=None, syntax=False, start_at_task=None, ) if ANSIBLE_VERSION < LooseVersion('2.8.0'): pexec = PlaybookExecutor( playbooks=[self._playbook], inventory=inventory, variable_manager=variable_manager, loader=loader, options=cli_options, passwords={}, ) else: context.CLIARGS = cli_options pexec = PlaybookExecutor([self._playbook], inventory, variable_manager, loader, {}) ansible.constants.RETRY_FILES_ENABLED = False if not verbosity: # ansible doesn't provide a proper API to overwrite this, # if you're using PlaybookExecutor instead of initializing # the TaskQueueManager (_tqm) yourself, like in the offical # example. pexec._tqm._stdout_callback = MinimalAnsibleCallback() return pexec
def main(): host_list = ['localhost', 'www.example.com', 'www.google.com'] # since the API is constructed for CLI it expects certain options to always be set in the context object context.CLIARGS = ImmutableDict(connection='smart', module_path=['/usr/share/ansible'], forks=10, become=None, become_method=None, become_user=None, check=False, diff=False) # required for # https://github.com/ansible/ansible/blob/devel/lib/ansible/inventory/manager.py#L204 sources = ','.join(host_list) if len(host_list) == 1: sources += ',' # initialize needed objects loader = DataLoader() passwords = dict() # Instantiate our ResultsCollector for handling results as # they come in. Ansible expects this to be one of its main # display outlets. callback = ResultsCollector() # create inventory and pass to var manager inventory = InventoryManager(loader=loader, sources=sources) variable_manager = VariableManager(loader=loader, inventory=inventory) # Instantiate task queue manager, which takes care of forking # and setting up all objects to iterate over host list and tasks. # IMPORTANT: This also adds library dirs paths to the module loader # IMPORTANT: and so it must be initialized before calling `Play.load()`. tqm = TaskQueueManager( inventory=inventory, variable_manager=variable_manager, loader=loader, passwords=passwords, stdout_callback=callback, ) # create play with tasks play_source = dict( name="Ansible Play", hosts=host_list, gather_facts='no', tasks=[dict(action=dict(module='command', args=dict(cmd='/usr/bin/uptime')))] ) play = Play().load(play_source, variable_manager=variable_manager, loader=loader) # actually run it try: result = tqm.run(play) finally: if tqm is not None: tqm.cleanup() if loader: loader.cleanup_all_tmp_files() print("UP ***********") for host, result in callback.host_ok.items(): print('{0} >>> {1}'.format(host, result._result['stdout'])) print("FAILED *******") for host, result in callback.host_failed.items(): print('{0} >>> {1}'.format(host, result._result['msg'])) print("DOWN *********") for host, result in callback.host_unreachable.items(): print('{0} >>> {1}'.format(host, result._result['msg']))
def run_playbook(self, affected_host, host_file, playbook_path, namePlay, selectuser): if not os.path.exists(playbook_path): print('[INFO] The playbook does not exist: "{0}"'.format( playbook_path)) sys.exit() if not os.path.isfile(host_file): print('[INFO] Host file does not exist: "{0}"'.format(host_file)) sys.exit() try: loader = DataLoader() passwords = dict(vault_pass='******') context.CLIARGS = ImmutableDict(tags={}, listtags=False, listtasks=False, listhosts=False, syntax=False, connection='ssh', module_path=None, forks=100, remote_user=selectuser, private_key_file=None, extra_vars=[{ 'affected_hosts': '' + affected_host + '' }], ssh_common_args=None, ssh_extra_args=None, sftp_extra_args=None, scp_extra_args=None, become=True, become_method='sudo', become_user=selectuser, verbosity=True, check=False, start_at_task=None) inventory = InventoryManager(loader=loader, sources=(host_file)) variable_manager = VariableManager( loader=loader, inventory=inventory, version_info=CLI.version_info(gitinfo=False)) pbex = PlaybookExecutor(playbooks=[playbook_path], inventory=inventory, variable_manager=variable_manager, loader=loader, passwords=passwords) try: execPlay = json.dumps({affected_host: pbex.run()}, indent=4) check = json.loads(execPlay) if (check[affected_host] == 2): flow = ModuleResultsCollector() flow.failed(IDOPeration, affected_host, namePlay, 'FAILED') print('[INFO] Playbook failed: {}'.format(playbook_path)) elif (check[affected_host] == 4): flow = ModuleResultsCollector() flow.failed(IDOPeration, affected_host, namePlay, 'FAILED') print('[INFO] Playbook unreachable: {}'.format( playbook_path)) else: flow = ModuleResultsCollector() flow.passed(IDOPeration, affected_host, namePlay, 'PASSED') print( '[INFO] Playbook pass: {} [ok]'.format(playbook_path)) except AnsibleError as ansError: flow = ModuleResultsCollector() flow.failed(IDOPeration, affected_host, namePlay, 'FAILED') print('[INFO] Ansible error: {}'.format(ansError)) except Exception as e: flow = ModuleResultsCollector() flow.failed(IDOPeration, affected_host, namePlay, 'FAILED') print('[INFO]: {} - Failed caused by: {}'.format(playbook_path, e))
def test_string(self): imdict = ImmutableDict({u'café': u'くらとみ'}) assert imdict[u'café'] == u'くらとみ'
def main(): host_list = ['172.17.0.10'] # since the API is constructed for CLI it expects certain options to always be set in the context object context.CLIARGS = ImmutableDict(connection='smart', private_key_file="~/.ssh/id_rsa", forks=10, become=None, become_method=None, become_user=None, check=False, diff=False, verbosity=0) # required for # https://github.com/ansible/ansible/blob/devel/lib/ansible/inventory/manager.py#L204 sources = ','.join(host_list) if len(host_list) == 1: sources += ',' # initialize needed objects loader = DataLoader( ) # Takes care of finding and reading yaml, json and ini files passwords = dict(vault_pass='') # Instantiate our ResultsCollectorJSONCallback for handling results as they come in. Ansible expects this to be one of its main display outlets results_callback = ResultsCollectorJSONCallback() # create inventory, use path to host config file as source or hosts in a comma separated string inventory = InventoryManager(loader=loader, sources=sources) # variable manager takes care of merging all the different sources to give you a unified view of variables available in each context variable_manager = VariableManager(loader=loader, inventory=inventory) # instantiate task queue manager, which takes care of forking and setting up all objects to iterate over host list and tasks # IMPORTANT: This also adds library dirs paths to the module loader # IMPORTANT: and so it must be initialized before calling `Play.load()`. tqm = TaskQueueManager( inventory=inventory, variable_manager=variable_manager, loader=loader, passwords=passwords, stdout_callback= results_callback, # Use our custom callback instead of the ``default`` callback plugin, which prints to stdout ) play_sources = [] with open('./mysql.yml') as f: data = yaml.load(f, yaml.SafeLoader) if isinstance(data, list): play_sources.extend(data) else: play_source.append(data) for play_book in play_sources: play_book['hosts'] = host_list play = Play().load(play_book, variable_manager=variable_manager, loader=loader) # Actually run it try: result = tqm.run( play ) # most interesting data for a play is actually sent to the callback's methods finally: # we always need to cleanup child procs and the structures we use to communicate with them tqm.cleanup() if loader: loader.cleanup_all_tmp_files() # Create play object, playbook objects use .load instead of init or new methods, # this will also automatically create the task objects from the info provided in play_source # Remove ansible tmpdir shutil.rmtree(C.DEFAULT_LOCAL_TMP, True) print("UP ***********") for host, result in results_callback.host_ok.items(): print('{0} >>> {1}'.format(host, result._result['stdout'])) print("FAILED *******") for host, result in results_callback.host_failed.items(): print('{0} >>> {1}'.format(host, result._result['msg'])) print("DOWN *********") for host, result in results_callback.host_unreachable.items(): print('{0} >>> {1}'.format(host, result._result['msg']))
def test_from_tuples(self): imdict = ImmutableDict((('a', 1), ('b', 2))) assert frozenset(imdict.items()) == frozenset((('a', 1), ('b', 2)))
def chaosansible_run(host_list: list = ('localhost'), configuration: Configuration = None, facts: bool = False, become: bool = False, run_once: bool = False, ansible: dict = {}, num_target: str = 'all', secrets: Secrets = None): """ Run a task through ansible and eventually gather facts from host """ # Check for correct inputs if ansible: if ansible.get('module') is None: raise InvalidActivity('No ansible module defined') if ansible.get('args') is None: raise InvalidActivity('No ansible module args defined') configuration = configuration or {} # Ansible configuration elements module_path = configuration.get('ansible_module_path') or None become_user = configuration.get('ansible_become_user') or None ssh_key_path = configuration.get('ansible_ssh_private_key') or None ansible_user = configuration.get('ansible_user') or None become_ask_pass = configuration.get('become_ask_pass') or None context.CLIARGS = ImmutableDict(connection='smart', verbosity=0, module_path=module_path, forks=10, become=become, become_method='sudo', become_user=become_user, check=False, diff=False, private_key_file=ssh_key_path, remote_user=ansible_user, become_ask_pass=become_ask_pass) # Update host_list regarding the number of desired target. # Need to generate a new host-list because after being update # and will be used later if num_target != 'all': new_host_list = random_host(host_list, int(num_target)) else: new_host_list = host_list[:] # Create an inventory sources = ','.join(new_host_list) if len(new_host_list) == 1: sources += ',' loader = DataLoader() inventory = InventoryManager(loader=loader, sources=sources) # Instantiate callback for storing results results_callback = ResultsCollectorJSONCallback() variable_manager = VariableManager(loader=loader, inventory=inventory) passwords = dict(vault_pass='******') # Ansible taskmanager tqm = TaskQueueManager( inventory=inventory, variable_manager=variable_manager, loader=loader, passwords=passwords, stdout_callback=results_callback, run_additional_callbacks=False, ) # Ansible playbook play_source = dict(name="Ansible Play", hosts=new_host_list, gather_facts=facts, tasks=[ dict(name='facts', action=dict(module='debug', args=dict(var='ansible_facts'))), ]) # In cas we only want to gather facts if ansible: module = ansible.get('module') args = ansible.get('args') play_source['tasks'].append( dict(name='task', run_once=run_once, action=dict(module=module, args=args), register='shell_out')) # Create an ansible playbook play = Play().load(play_source, variable_manager=variable_manager, loader=loader) # Run it try: result = tqm.run(play) finally: tqm.cleanup() if loader: loader.cleanup_all_tmp_files() # Remove ansible tmpdir shutil.rmtree(C.DEFAULT_LOCAL_TMP, True) if len(results_callback.host_failed) > 0: raise FailedActivity("Failed to run ansible task") elif len(results_callback.host_unreachable) > 0: print("Unreach") raise FailedActivity("At least one target is down") else: results = {} for host, result in results_callback.host_ok.items(): results[host] = result return json.dumps(results)
def test_hashable(self): # ImmutableDict is hashable when all of its values are hashable imdict = ImmutableDict({u'café': u'くらとみ'}) assert hash(imdict)
"failed": t['failures'] } loader = DataLoader() inventory = InventoryManager(loader=loader, sources="/etc/ansible/hosts") variable_manager = VariableManager(loader=loader, inventory=inventory) context.CLIARGS = ImmutableDict(listtags=False, listtasks=False, listhosts=False, syntax=False, connection="ssh", module_path=None, forks=5, private_key_file=None, ssh_common_args=None, ssh_extra_args=None, sftp_extra_args=None, scp_extra_args=None, become=False, become_method=None, become_user=None, start_at_task=None, verbosity=0, check=False) # PlaybookExecutor执行playbook playbook = PlaybookExecutor(loader=loader, inventory=inventory, passwords={}, variable_manager=variable_manager, playbooks=['/etc/ansible/playbooks/test_ping.yml']) callback = MyCallBack()
def v2_runner_on_unreachable(self, result): self.host_unreachable[result._host.get_name()] = result def v2_runner_on_ok(self, result, *args, **kwargs): self.host_ok[result._host.get_name()] = result def v2_runner_on_failed(self, result, *args, **kwargs): self.host_ok[result._host.get_name()] = result #context.CLIARGS = ImmutableDict(connection='local',module_path=None,forks=2,become=None,become_method=None,become_user=None,check=False,diff=False) context.CLIARGS = ImmutableDict(connection='smart', module_path=None, verbosity=5, forks=1, become=None, become_method=None, become_user=None, check=False, diff=False) class AnsibleApi(object): def __init__(self): self.options = { 'verbosity': 0, 'ask_pass': False, 'private_key_file': None, 'remote_user': None, 'connection': 'smart', 'timeout': 10,
def apply_playbook(playbook_path, hosts_inv=None, host_user=None, ssh_priv_key_file_path=None, password=None, variables=None, proxy_setting=None, inventory_file=None, become_user=None): """ Executes an Ansible playbook to the given host :param playbook_path: the (relative) path to the Ansible playbook :param hosts_inv: a list of hostnames/ip addresses to which to apply the Ansible playbook (not required when PB is configured for localhost) :param host_user: A user for the host instances (must be a password-less sudo user if playbook has "sudo: yes") (not required when PB is configured for localhost) :param ssh_priv_key_file_path: the file location of the ssh key. Required if password is None (not required when PB is configured for localhost) :param password: the file location of the ssh key. Required if ssh_priv_key_file_path is None (not required when PB is configured for localhost) :param variables: a dictionary containing any substitution variables needed by the Jinga 2 templates :param proxy_setting: instance of os_credentials.ProxySettings class :param inventory_file: an inventory file that will supercede the hosts_inv :param become_user: the username on this host that the playbook must run as. When used, the become_method wil be sudo and become will be 'yes' :raises AnsibleException when the return code from the Ansible library is not 0 :return: the return code from the Ansible library only when 0. Implementation now raises an exception otherwise """ if not os.path.isfile(playbook_path): raise AnsibleException('Requested playbook not found - ' + playbook_path) else: logger.info('Applying playbook [%s] with variables - %s', playbook_path, variables) pk_file_path = None if ssh_priv_key_file_path: pk_file_path = os.path.expanduser(ssh_priv_key_file_path) if not password: if not os.path.isfile(pk_file_path): raise AnsibleException( 'Requested private SSH key not found - ' + pk_file_path) passwords = None if password: passwords = {'conn_pass': password, 'become_pass': password} import ansible.constants ansible.constants.HOST_KEY_CHECKING = False loader = DataLoader() if inventory_file: inventory = InventoryManager(loader=loader, sources=inventory_file) connection = 'ssh' elif hosts_inv: inventory = InventoryManager(loader=loader) for host in hosts_inv: inventory.add_host(host=host, group='ungrouped') connection = 'ssh' else: loader = DataLoader() inventory = InventoryManager(loader=loader) connection = 'local' ssh_extra_args = None if proxy_setting and proxy_setting.ssh_proxy_cmd: ssh_extra_args = '-o ProxyCommand=\'%s\'' % proxy_setting.ssh_proxy_cmd become = None become_method = None if become_user: become = 'yes' become_method = 'sudo' context.CLIARGS = ImmutableDict(tags={}, listtags=False, listtasks=False, listhosts=False, syntax=False, connection=connection, module_path=None, forks=100, remote_user=host_user, private_key_file=pk_file_path, ssh_common_args=None, ssh_extra_args=ssh_extra_args, become=become, become_method=become_method, become_user=become_user, verbosity=11111, check=False, timeout=30, diff=None, start_at_task=None, extra_vars=[variables]) variable_manager = VariableManager(loader=loader, inventory=inventory) logger.debug('Setting up Ansible Playbook Executor for playbook - ' + playbook_path) executor = PlaybookExecutor(playbooks=[playbook_path], inventory=inventory, variable_manager=variable_manager, loader=loader, passwords=passwords) logger.debug('Executing Ansible Playbook - ' + playbook_path) ret_val = executor.run() if ret_val != 0: raise AnsibleException( 'Error applying playbook [{}] with value [{}] using the connection' ' type of [{}]'.format(playbook_path, ret_val, connection)) return ret_val
def test_scalar(self): imdict = ImmutableDict({1: 2}) assert imdict[1] == 2
result._result["ansible_facts"] ["ansible_processor_threads_per_core"] } print(json.dumps({host.name: data}, indent=4)) # since the API is constructed for CLI it expects certain options to always be set in the context object # context.CLIARGS = ImmutableDict(connection='local', module_path=['/to/mymodules'], forks=10, become=None, # become_method=None, become_user=None, check=False, diff=False) context.CLIARGS = ImmutableDict(connection='ssh', module_path=None, become=None, become_method=None, forks=10, start_at_task=None, become_user=None, check=False, diff=False, syntax=None, ansible_cfg=None, verbosity=3) # initialize needed objects loader = DataLoader( ) # Takes care of finding and reading yaml, json and ini files passwords = dict(vault_pass='') # Instantiate our ResultCallback for handling results as they come in. Ansible expects this to be one of its main display outlets results_callback = ResultCallback() # create inventory, use path to host config file as source or hosts in a comma separated string