def run_module(): # module arguments module_args = dict( hostname=dict(type='str', required=True), username=dict(type='str', required=True), password=dict(type='str', required=True, no_log=True), ssh_public_key=dict(type='str', required=True), hetzner_storagebox=dict(type='str', required=False), ssh_port=dict(type='str', required=False) ) # results dictionary result = dict( changed=False, original_message='', message='' ) # create ansible module object module = AnsibleModule( argument_spec=module_args, supports_check_mode=True ) # set local variables hostname = module.params['hostname'] username = module.params['username'] password = module.params['password'] public_key = module.params['ssh_public_key'] hetzner_storagebox = module.params['hetzner_storagebox'] port = module.params['ssh_port'] # MODULE TASKS TO BE PERFORMED BELOW.. # set authorized key path # Add Hetzner Storage-box support if hetzner_storagebox.lower() == 'true': auth_key = '.ssh/authorized_keys' else: if username == 'root': base_dir = '/root/' auth_key = join(base_dir, '.ssh/authorized_keys') else: base_dir = '/home/%s' % username auth_key = join(base_dir, '.ssh/authorized_keys') # prior to creating ssh connection via paramiko, lets verify the public # key supplied exists on disk if isfile(public_key): module.log('SSH public key %s exists!' % public_key) else: result['message'] = 'Unable to locate ssh public key %s' % public_key module.fail_json(msg=result['message']) module.exit_json(**result) # create ssh client via paramiko ssh_con = paramiko.SSHClient() ssh_con.set_missing_host_key_policy(paramiko.WarningPolicy()) # connect to remote system try: ssh_con.connect( hostname=hostname, username=username, password=password, look_for_keys=False, allow_agent=False, port=port ) except (paramiko.BadHostKeyException, paramiko.AuthenticationException, paramiko.SSHException, socket.error) as ex: result['message'] = ex module.fail_json(msg='Connection failed to %s' % hostname, **result) module.exit_json(**result) # create sftp client sftp_con = ssh_con.open_sftp() # read ssh public key with open(public_key, 'r') as fh: data = fh.read() add_key = True # read remote system authorized key (if applicable) try: file_reader = sftp_con.open( auth_key, mode='r' ) authorized_key_data = file_reader.read() file_reader.close() # check if key already exists if data in authorized_key_data.decode('utf-8'): result['message'] = 'SSH public key already injected!' result['changed'] = False module.log(result['message']) add_key = False except IOError: module.warn('Authorized keys file %s not found!' % auth_key) # make the .ssh directory if hetzner_storagebox.lower() == 'true': ssh_dir = '.ssh' else: ssh_dir = '/'.join(auth_key.split('/')[:-1]) try: sftp_con.lstat(ssh_dir) except IOError: module.warn('%s user .ssh dir not found! Creating..' % username) sftp_con.mkdir(ssh_dir) # inject ssh public key into authorized keys if add_key: file_handler = sftp_con.file( auth_key, mode='a' ) file_handler.write(data) file_handler.flush() file_handler.chmod(int('0600', 8)) file_handler.close() result['message'] = 'SSH public key injected!' result['changed'] = True module.log(result['message']) # close sftp/ssh connection sftp_con.close() ssh_con.close() # exit with results module.exit_json(**result)
def main(): global results global OUTPUT global NIM_NODE global vioshc_cmd module = AnsibleModule( argument_spec=dict( targets=dict(required=True, type='list', elements='str'), action=dict(required=True, choices=['health_check'], type='str'), vars=dict(type='dict'), ) ) results = dict( changed=False, msg='', stdout='', stderr='', ) # ========================================================================= # Get module params # ========================================================================= targets = module.params['targets'] OUTPUT.append('VIOS Health Check operation for {}'.format(targets)) target_list = [] targets_health_status = {} # ========================================================================= # Build nim node info # ========================================================================= build_nim_node(module) ret = check_vios_targets(module, targets) if (ret is None) or (not ret): OUTPUT.append(' Warning: Empty target list') module.warn('Empty target list: "{}"'.format(targets)) else: target_list = ret OUTPUT.append(' Targets list: {}'.format(target_list)) module.debug('Targets list: {}'.format(target_list)) # Check vioshc script is present, fail_json if not vioshc_cmd = module.get_bin_path('vioshc.py', required=True) module.debug('Using vioshc.py script at {}'.format(vioshc_cmd)) targets_health_status = health_check(module, target_list) OUTPUT.append('VIOS Health Check status:') module.log('VIOS Health Check status:') for vios_key in targets_health_status.keys(): OUTPUT.append(" {} : {}".format(vios_key, targets_health_status[vios_key])) module.log(' {} : {}'.format(vios_key, targets_health_status[vios_key])) results['targets'] = target_list results['nim_node'] = NIM_NODE results['status'] = targets_health_status results['output'] = OUTPUT results['msg'] = "VIOS Health Check completed successfully" module.exit_json(**results)
def main(): global results suma_params = {} module = AnsibleModule( argument_spec=dict( action=dict(required=False, choices=['download', 'preview'], type='str', default='preview'), targets=dict(required=True, type='list', elements='str'), oslevel=dict(required=False, type='str', default='Latest'), lpp_source_name=dict(required=False, type='str'), download_dir=dict(required=False, type='path'), download_only=dict(required=False, type='bool', default=False), extend_fs=dict(required=False, type='bool', default=True), description=dict(required=False, type='str'), metadata_dir=dict(required=False, type='path', default='/var/adm/ansible/metadata'), ), supports_check_mode=True ) results = dict( changed=False, msg='', cmd='', stdout='', stderr='', targets=[], meta={'messages': []}, ) module.debug('*** START ***') suma_params['LppSource'] = '' # Get Module params action = module.params['action'] suma_params['action'] = action suma_params['targets'] = module.params['targets'] suma_params['download_dir'] = module.params['download_dir'] suma_params['download_only'] = module.params['download_only'] suma_params['lpp_source_name'] = module.params['lpp_source_name'] suma_params['extend_fs'] = module.params['extend_fs'] if module.params['oslevel'].upper() == 'LATEST': suma_params['req_oslevel'] = 'Latest' else: suma_params['req_oslevel'] = module.params['oslevel'] if module.params['description']: suma_params['description'] = module.params['description'] else: suma_params['description'] = "{0} request for oslevel {1}".format(action, suma_params['req_oslevel']) suma_params['metadata_dir'] = module.params['metadata_dir'] # Run Suma preview or download suma_download(module, suma_params) # Exit msg = 'Suma {0} completed successfully'.format(action) module.log(msg) results['msg'] = msg results['lpp_source_name'] = suma_params['LppSource'] module.exit_json(**results)
def main(): ansible_module = AnsibleModule( argument_spec=dict( ### basic ### force=dict(required=False, type='bool', default=False), dm_password=dict(required=True, no_log=True), password=dict(required=True, no_log=True), master_password=dict(required=False, no_log=True), ip_addresses=dict(required=False, type='list', default=[]), domain=dict(required=False), realm=dict(required=False), hostname=dict(required=False), ca_cert_files=dict(required=False, type='list', default=[]), no_host_dns=dict(required=False, type='bool', default=False), ### server ### setup_adtrust=dict(required=False, type='bool', default=False), setup_kra=dict(required=False, type='bool', default=False), setup_dns=dict(required=False, type='bool', default=False), idstart=dict(required=False, type='int'), idmax=dict(required=False, type='int'), # no_hbac_allow no_pkinit=dict(required=False, type='bool', default=False), # no_ui_redirect dirsrv_config_file=dict(required=False), ### ssl certificate ### dirsrv_cert_files=dict(required=False, type='list', default=[]), http_cert_files=dict(required=False, type='list', default=[]), pkinit_cert_files=dict(required=False, type='list', default=[]), # dirsrv_pin # http_pin # pkinit_pin # dirsrv_name # http_name # pkinit_name ### client ### # mkhomedir no_ntp=dict(required=False, type='bool', default=False), # ssh_trust_dns # no_ssh # no_sshd # no_dns_sshfp ### certificate system ### external_ca=dict(required=False, type='bool', default=False), external_ca_type=dict(required=False), external_cert_files=dict(required=False, type='list', default=[]), subject_base=dict(required=False), ca_subject=dict(required=False), # ca_signing_algorithm ### dns ### allow_zone_overlap=dict(required=False, type='bool', default=False), reverse_zones=dict(required=False, type='list', default=[]), no_reverse=dict(required=False, type='bool', default=False), auto_reverse=dict(required=False, type='bool', default=False), zonemgr=dict(required=False), forwarders=dict(required=False, type='list', default=[]), no_forwarders=dict(required=False, type='bool', default=False), auto_forwarders=dict(required=False, type='bool', default=False), forward_policy=dict(default=None, choices=['first', 'only']), no_dnssec_validation=dict(required=False, type='bool', default=False), ### ad trust ### enable_compat=dict(required=False, type='bool', default=False), netbios_name=dict(required=False), rid_base=dict(required=False, type='int'), secondary_rid_base=dict(required=False, type='int'), ### additional ### ), supports_check_mode=True, ) ansible_module._ansible_debug = True ansible_log = AnsibleModuleLog(ansible_module) # set values ############################################################ ### basic ### options.force = ansible_module.params.get('force') options.dm_password = ansible_module.params.get('dm_password') options.admin_password = ansible_module.params.get('password') options.master_password = ansible_module.params.get('master_password') options.ip_addresses = ansible_module_get_parsed_ip_addresses( ansible_module) options.domain_name = ansible_module.params.get('domain') options.realm_name = ansible_module.params.get('realm') options.host_name = ansible_module.params.get('hostname') options.ca_cert_files = ansible_module.params.get('ca_cert_files') options.no_host_dns = ansible_module.params.get('no_host_dns') ### server ### options.setup_adtrust = ansible_module.params.get('setup_adtrust') options.setup_dns = ansible_module.params.get('setup_dns') options.setup_kra = ansible_module.params.get('setup_kra') options.idstart = ansible_module.params.get('idstart') options.idmax = ansible_module.params.get('idmax') # no_hbac_allow options.no_pkinit = ansible_module.params.get('no_pkinit') # no_ui_redirect options.dirsrv_config_file = ansible_module.params.get( 'dirsrv_config_file') ### ssl certificate ### options.dirsrv_cert_files = ansible_module.params.get('dirsrv_cert_files') options.http_cert_files = ansible_module.params.get('http_cert_files') options.pkinit_cert_files = ansible_module.params.get('pkinit_cert_files') # dirsrv_pin # http_pin # pkinit_pin # dirsrv_name # http_name # pkinit_name ### client ### # mkhomedir options.no_ntp = ansible_module.params.get('no_ntp') # ssh_trust_dns # no_ssh # no_sshd # no_dns_sshfp ### certificate system ### options.external_ca = ansible_module.params.get('external_ca') options.external_ca_type = ansible_module.params.get('external_ca_type') options.external_cert_files = ansible_module.params.get( 'external_cert_files') options.subject_base = ansible_module.params.get('subject_base') options.ca_subject = ansible_module.params.get('ca_subject') # ca_signing_algorithm ### dns ### options.allow_zone_overlap = ansible_module.params.get( 'allow_zone_overlap') options.reverse_zones = ansible_module.params.get('reverse_zones') options.no_reverse = ansible_module.params.get('no_reverse') options.auto_reverse = ansible_module.params.get('auto_reverse') options.zonemgr = ansible_module.params.get('zonemgr') options.forwarders = ansible_module.params.get('forwarders') options.no_forwarders = ansible_module.params.get('no_forwarders') options.auto_forwarders = ansible_module.params.get('auto_forwarders') options.forward_policy = ansible_module.params.get('forward_policy') options.no_dnssec_validation = ansible_module.params.get( 'no_dnssec_validation') ### ad trust ### options.enable_compat = ansible_module.params.get('enable_compat') options.netbios_name = ansible_module.params.get('netbios_name') options.rid_base = ansible_module.params.get('rid_base') options.secondary_rid_base = ansible_module.params.get( 'secondary_rid_base') ### additional ### options.kasp_db_file = None # version specific ###################################################### if options.setup_adtrust and not adtrust_imported: #if "adtrust" not in options._allow_missing: ansible_module.fail_json(msg="adtrust can not be imported") #else: # options.setup_adtrust = False # ansible_module.warn(msg="adtrust is not supported, disabling") if options.setup_kra and not kra_imported: #if "kra" not in options._allow_missing: ansible_module.fail_json(msg="kra can not be imported") #else: # options.setup_kra = False # ansible_module.warn(msg="kra is not supported, disabling") # validation ############################################################# if options.dm_password is None: ansible_module.fail_json(msg="Directory Manager password required") if options.admin_password is None: ansible_module.fail_json(msg="IPA admin password required") # This will override any settings passed in on the cmdline if os.path.isfile(paths.ROOT_IPA_CACHE): # dm_password check removed, checked already try: cache_vars = read_cache(options.dm_password) options.__dict__.update(cache_vars) if cache_vars.get('external_ca', False): options.external_ca = False options.interactive = False except Exception as e: ansible_module.fail_json(msg="Cannot process the cache file: %s" % str(e)) # default values ######################################################## # idstart and idmax if options.idstart is None: options.idstart = random.randint(1, 10000) * 200000 if options.idmax is None or options.idmax == 0: options.idmax = options.idstart + 199999 # validation ############################################################ # domain_level if options.domain_level < MIN_DOMAIN_LEVEL: ansible_module.fail_json(msg="Domain Level cannot be lower than %d" % MIN_DOMAIN_LEVEL) elif options.domain_level > MAX_DOMAIN_LEVEL: ansible_module.fail_json(msg="Domain Level cannot be higher than %d" % MAX_DOMAIN_LEVEL) # dirsrv_config_file if options.dirsrv_config_file is not None: if not os.path.exists(options.dirsrv_config_file): ansible_module.fail_json(msg="File %s does not exist." % options.dirsrv_config_file) # domain_name if (options.setup_dns and not options.allow_zone_overlap and \ options.domain_name is not None): try: check_zone_overlap(options.domain_name, False) except ValueError as e: ansible_module.fail_json(msg=str(e)) # dm_password with redirect_stdout(ansible_log): validate_dm_password(options.dm_password) # admin_password with redirect_stdout(ansible_log): validate_admin_password(options.admin_password) # pkinit is not supported on DL0, don't allow related options # replica install: if not self.replica_file is None: if (not options._replica_install and \ not options.domain_level > DOMAIN_LEVEL_0) or \ (options._replica_install and self.replica_file is not None): if (options.no_pkinit or options.pkinit_cert_files is not None or options.pkinit_pin is not None): ansible_module.fail_json( msg="pkinit on domain level 0 is not supported. Please " "don't use any pkinit-related options.") options.no_pkinit = True # If any of the key file options are selected, all are required. cert_file_req = (options.dirsrv_cert_files, options.http_cert_files) cert_file_opt = (options.pkinit_cert_files, ) if not options.no_pkinit: cert_file_req += cert_file_opt if options.no_pkinit and options.pkinit_cert_files: ansible_module.fail_json( msg="no-pkinit and pkinit-cert-file cannot be specified together") if any(cert_file_req + cert_file_opt) and not all(cert_file_req): ansible_module.fail_json( msg="dirsrv-cert-file, http-cert-file, and pkinit-cert-file " "or no-pkinit are required if any key file options are used.") if not options.interactive: if options.dirsrv_cert_files and options.dirsrv_pin is None: ansible_module.fail_json( msg="You must specify dirsrv-pin with dirsrv-cert-file") if options.http_cert_files and options.http_pin is None: ansible_module.fail_json( msg="You must specify http-pin with http-cert-file") if options.pkinit_cert_files and options.pkinit_pin is None: ansible_module.fail_json( msg="You must specify pkinit-pin with pkinit-cert-file") if not options.setup_dns: # lists for x in ["forwarders", "reverse_zones"]: if len(getattr(options, x)) > 1: ansible_module.fail_json( msg="You cannot specify %s without setting setup-dns" % x) # bool and str values for x in [ "auto_forwarders", "no_forwarders", "auto_reverse", "no_reverse", "no_dnssec_validation", "forward_policy" ]: if getattr(options, x) == True: ansible_module.fail_json( msg="You cannot specify %s without setting setup-dns" % x) elif len(options.forwarders) > 0 and options.no_forwarders: ansible_module.fail_json( msg="You cannot specify forwarders together with no-forwarders") elif options.auto_forwarders and options.no_forwarders: ansible_module.fail_json( msg="You cannot specify auto-forwarders together with no-forwarders" ) elif len(options.reverse_zones) > 0 and options.no_reverse: ansible_module.fail_json( msg="You cannot specify reverse-zones together with no-reverse") elif options.auto_reverse and options.no_reverse: ansible_module.fail_json( msg="You cannot specify auto-reverse together with no-reverse") if not options._replica_install: if options.external_cert_files and options.dirsrv_cert_files: ansible_module.fail_json( msg="Service certificate file options cannot be used with the " "external CA options.") if options.external_ca_type and not options.external_ca: ansible_module.fail_json( msg="You cannot specify external-ca-type without external-ca") #if options.uninstalling: # if (options.realm_name or options.admin_password or # options.master_password): # ansible_module.fail_json( # msg="In uninstall mode, -a, -r and -P options are not " # "allowed") #elif not options.interactive: # if (not options.realm_name or not options.dm_password or # not options.admin_password): # ansible_module.fail_json(msg= # "In unattended mode you need to provide at least -r, " # "-p and -a options") # if options.setup_dns: # if (not options.forwarders and # not options.no_forwarders and # not options.auto_forwarders): # ansible_module.fail_json(msg= # "You must specify at least one of --forwarder, " # "--auto-forwarders, or --no-forwarders options") if (not options.realm_name or not options.dm_password or not options.admin_password): ansible_module.fail_json( msg="You need to provide at least realm_name, dm_password " "and admin_password") if options.setup_dns: if len(options.forwarders) < 1 and not options.no_forwarders and \ not options.auto_forwarders: ansible_module.fail_json( msg="You must specify at least one of forwarders, " "auto-forwarders or no-forwarders") #any_ignore_option_true = any( # [options.ignore_topology_disconnect, options.ignore_last_of_role]) #if any_ignore_option_true and not options.uninstalling: # ansible_module.fail_json( # msg="ignore-topology-disconnect and ignore-last-of-role " # "can be used only during uninstallation") if options.idmax < options.idstart: ansible_module.fail_json( msg="idmax (%s) cannot be smaller than idstart (%s)" % (options.idmax, options.idstart)) else: # replica install if options.replica_file is None: if options.servers and not options.domain_name: ansible_module.fail_json( msg="servers cannot be used without providing domain") else: if not os.path.isfile(options.replica_file): ansible_module.fail_json(msg="Replica file %s does not exist" % options.replica_file) if any(cert_file_req + cert_file_opt): ansible_module.fail_json( msg="You cannot specify dirsrv-cert-file, http-cert-file, " "or pkinit-cert-file together with replica file") conflicting = { "realm": options.realm_name, "domain": options.domain_name, "hostname": options.host_name, "servers": options.servers, "principal": options.principal } conflicting_names = [ name for name in conflicting if conflicting[name] is not None ] if len(conflicting_names) > 0: ansible_module.fail_json( msg="You cannot specify %s option(s) with replica file." % \ ", ".join(conflicting_names)) if options.setup_dns: if len(options.forwarders) < 1 and not options.no_forwarders and \ not options.auto_forwarders: ansible_module.fail_json( msg="You must specify at least one of forwarders, " "auto-forwarders or no-forwarders") if NUM_VERSION >= 40200 and options.master_password: ansible_module.warn("Specifying master-password is deprecated") options._installation_cleanup = True if not options.external_ca and len(options.external_cert_files) < 1 and \ is_ipa_configured(): options._installation_cleanup = False ansible_module.log( "IPA server is already configured on this system. If you want " "to reinstall the IPA server, please uninstall it first.") ansible_module.exit_json(changed=False, server_already_configured=True) client_fstore = sysrestore.FileStore(paths.IPA_CLIENT_SYSRESTORE) if client_fstore.has_files(): options._installation_cleanup = False ansible_module.log( "IPA client is already configured on this system. " "Please uninstall it before configuring the IPA server.") ansible_module.exit_json(changed=False, client_already_configured=True) # validate reverse_zones if not options.allow_zone_overlap: for zone in options.reverse_zones: with redirect_stdout(ansible_log): check_zone_overlap(zone) # validate zonemgr if options.zonemgr: try: # IDNA support requires unicode encoding = getattr(sys.stdin, 'encoding', None) if encoding is None: encoding = 'utf-8' value = options.zonemgr.decode(encoding) with redirect_stdout(ansible_log): bindinstance.validate_zonemgr_str(value) except ValueError as e: # FIXME we can do this in better way # https://fedorahosted.org/freeipa/ticket/4804 # decode to proper stderr encoding stderr_encoding = getattr(sys.stderr, 'encoding', None) if stderr_encoding is None: stderr_encoding = 'utf-8' error = unicode(e).encode(stderr_encoding) ansible_module.fail_json(msg=error) # external cert file paths are absolute for path in options.external_cert_files: if not os.path.isabs(path): ansible_module.fail_json( msg="External cert file '%s' must use an absolute path" % path) options.setup_ca = True # We only set up the CA if the PKCS#12 options are not given. if options.dirsrv_cert_files and len(options.dirsrv_cert_files) > 0: options.setup_ca = False else: options.setup_ca = True if not options.setup_ca and options.ca_subject: ansible_module.fail_json( msg="--ca-subject cannot be used with CA-less installation") if not options.setup_ca and options.subject_base: ansible_module.fail_json( msg="--subject-base cannot be used with CA-less installation") if not options.setup_ca and options.setup_kra: ansible_module.fail_json( msg="--setup-kra cannot be used with CA-less installation") # ca_subject if options.ca_subject: ca.subject_validator(ca.VALID_SUBJECT_ATTRS, options.ca_subject) # IPv6 and SELinux check tasks.check_ipv6_stack_enabled() tasks.check_selinux_status() if check_ldap_conf is not None: check_ldap_conf() _installation_cleanup = True if not options.external_ca and not options.external_cert_files and \ is_ipa_configured(): _installation_cleanup = False ansible_module.fail_json( msg="IPA server is already configured on this system.") if not options.no_ntp: try: timeconf.check_timedate_services() except timeconf.NTPConflictingService as e: ansible_module.log("Conflicting time&date synchronization service '%s'" " will be disabled in favor of %s" % \ (e.conflicting_service, time_service)) except timeconf.NTPConfigurationError: pass if hasattr(httpinstance, "httpd_443_configured"): # Check to see if httpd is already configured to listen on 443 if httpinstance.httpd_443_configured(): ansible_module.fail_json( msg="httpd is already configured to listen on 443.") if not options.external_cert_files: # Make sure the 389-ds ports are available try: check_dirsrv(True) except ScriptError as e: ansible_module.fail_json(msg=e) # check bind packages are installed if options.setup_dns: # Don't require an external DNS to say who we are if we are # setting up a local DNS server. options.no_host_dns = True # host name if options.host_name: options.host_default = options.host_name else: options.host_default = get_fqdn() try: verify_fqdn(options.host_default, options.no_host_dns) options.host_name = options.host_default except BadHostError as e: ansible_module.fail_json(msg=e) options.host_name = options.host_name.lower() if not options.domain_name: options.domain_name = options.host_name[options.host_name.find(".") + 1:] try: validate_domain_name(options.domain_name) except ValueError as e: ansible_module.fail_json(msg="Invalid domain name: %s" % unicode(e)) options.domain_name = options.domain_name.lower() if not options.realm_name: options.realm_name = options.domain_name options.realm_name = options.realm_name.upper() argspec = inspect.getargspec(validate_domain_name) if "entity" in argspec.args: # NUM_VERSION >= 40690: try: validate_domain_name(options.realm_name, entity="realm") except ValueError as e: raise ScriptError("Invalid realm name: {}".format(unicode(e))) if not options.setup_adtrust: # If domain name and realm does not match, IPA server will not be able # to establish trust with Active Directory. Fail. if options.domain_name.upper() != options.realm_name: ansible_module.warn( "Realm name does not match the domain name: " "You will not be able to establish trusts with Active " "Directory.") ######################################################################### http_pkcs12_file = None http_pkcs12_info = None http_ca_cert = None dirsrv_pkcs12_file = None dirsrv_pkcs12_info = None dirsrv_ca_cert = None pkinit_pkcs12_file = None pkinit_pkcs12_info = None pkinit_ca_cert = None if options.http_cert_files: if options.http_pin is None: ansible_module.fail_json( msg="Apache Server private key unlock password required") http_pkcs12_file, http_pin, http_ca_cert = load_pkcs12( cert_files=options.http_cert_files, key_password=options.http_pin, key_nickname=options.http_cert_name, ca_cert_files=options.ca_cert_files, host_name=options.host_name) http_pkcs12_info = (http_pkcs12_file.name, options.http_pin) if options.dirsrv_cert_files: if options.dirsrv_pin is None: ansible_module.fail_json( msg="Directory Server private key unlock password required") dirsrv_pkcs12_file, dirsrv_pin, dirsrv_ca_cert = load_pkcs12( cert_files=options.dirsrv_cert_files, key_password=options.dirsrv_pin, key_nickname=options.dirsrv_cert_name, ca_cert_files=options.ca_cert_files, host_name=options.host_name) dirsrv_pkcs12_info = (dirsrv_pkcs12_file.name, options.dirsrv_pin) if options.pkinit_cert_files: if options.pkinit_pin is None: ansible_module.fail_json( msg="Kerberos KDC private key unlock password required") pkinit_pkcs12_file, pkinit_pin, pkinit_ca_cert = load_pkcs12( cert_files=options.pkinit_cert_files, key_password=options.pkinit_pin, key_nickname=options.pkinit_cert_name, ca_cert_files=options.ca_cert_files, realm_name=options.realm_name) pkinit_pkcs12_info = (pkinit_pkcs12_file.name, options.pkinit_pin) if (options.http_cert_files and options.dirsrv_cert_files and http_ca_cert != dirsrv_ca_cert): ansible_module.fail_json( msg="Apache Server SSL certificate and Directory Server SSL " "certificate are not signed by the same CA certificate") if (options.http_cert_files and options.pkinit_cert_files and http_ca_cert != pkinit_ca_cert): ansible_module.fail_json( msg="Apache Server SSL certificate and PKINIT KDC " "certificate are not signed by the same CA certificate") # subject_base if not options.subject_base: options.subject_base = str(default_subject_base(options.realm_name)) # set options.subject for old ipa releases options.subject = options.subject_base if not options.ca_subject: options.ca_subject = str(default_ca_subject_dn(options.subject_base)) # temporary ipa configuration ########################################### ipa_tempdir = tempfile.mkdtemp(prefix="ipaconf") try: # Configuration for ipalib, we will bootstrap and finalize later, after # we are sure we have the configuration file ready. cfg = dict( context='installer', confdir=ipa_tempdir, in_server=True, # make sure host name specified by user is used instead of default host=options.host_name, ) if options.setup_ca: # we have an IPA-integrated CA cfg['ca_host'] = options.host_name # Create the management framework config file and finalize api target_fname = "%s/default.conf" % ipa_tempdir fd = open(target_fname, "w") fd.write("[global]\n") fd.write("host=%s\n" % options.host_name) fd.write("basedn=%s\n" % ipautil.realm_to_suffix(options.realm_name)) fd.write("realm=%s\n" % options.realm_name) fd.write("domain=%s\n" % options.domain_name) fd.write("xmlrpc_uri=https://%s/ipa/xml\n" % ipautil.format_netloc(options.host_name)) fd.write("ldap_uri=ldapi://%%2fvar%%2frun%%2fslapd-%s.socket\n" % installutils.realm_to_serverid(options.realm_name)) if options.setup_ca: fd.write("enable_ra=True\n") fd.write("ra_plugin=dogtag\n") fd.write("dogtag_version=10\n") else: fd.write("enable_ra=False\n") fd.write("ra_plugin=none\n") fd.write("mode=production\n") fd.close() # Must be readable for everyone os.chmod(target_fname, 0o644) api.bootstrap(**cfg) api.finalize() # install checks #################################################### if options.setup_ca: ca.install_check(False, None, options) if options.setup_kra: kra.install_check(api, None, options) if options.setup_dns: with redirect_stdout(ansible_log): dns.install_check(False, api, False, options, options.host_name) ip_addresses = dns.ip_addresses else: ip_addresses = get_server_ip_address(options.host_name, False, False, options.ip_addresses) # check addresses here, dns ansible_module is doing own check no_matching_interface_for_ip_address_warning(ip_addresses) options.ip_addresses = ip_addresses options.reverse_zones = dns.reverse_zones instance_name = "-".join(options.realm_name.split(".")) dirsrv = services.knownservices.dirsrv if (options.external_cert_files and dirsrv.is_installed(instance_name) and not dirsrv.is_running(instance_name)): logger.debug('Starting Directory Server') services.knownservices.dirsrv.start(instance_name) if options.setup_adtrust: adtrust.install_check(False, options, api) except (RuntimeError, ValueError, ScriptError) as e: module.fail_json(msg=str(e)) finally: try: shutil.rmtree(ipa_tempdir, ignore_errors=True) except OSError: ansible_module.fail_json(msg="Could not remove %s" % ipa_tempdir) # Always set _host_name_overridden options._host_name_overridden = bool(options.host_name) # done ################################################################## ansible_module.exit_json( changed=False, ipa_python_version=IPA_PYTHON_VERSION, ### basic ### domain=options.domain_name, realm=options.realm_name, ip_addresses=[str(ip) for ip in ip_addresses], hostname=options.host_name, _hostname_overridden=options._host_name_overridden, no_host_dns=options.no_host_dns, ### server ### setup_adtrust=options.setup_adtrust, setup_kra=options.setup_kra, setup_ca=options.setup_ca, idstart=options.idstart, idmax=options.idmax, no_pkinit=options.no_pkinit, ### ssl certificate ### _dirsrv_pkcs12_file=dirsrv_pkcs12_file, _dirsrv_pkcs12_info=dirsrv_pkcs12_info, _dirsrv_ca_cert=dirsrv_ca_cert, _http_pkcs12_file=http_pkcs12_file, _http_pkcs12_info=http_pkcs12_info, _http_ca_cert=http_ca_cert, _pkinit_pkcs12_file=pkinit_pkcs12_file, _pkinit_pkcs12_info=pkinit_pkcs12_info, _pkinit_ca_cert=pkinit_ca_cert, ### certificate system ### subject_base=options.subject_base, _subject_base=options._subject_base, ca_subject=options.ca_subject, _ca_subject=options._ca_subject, ### dns ### reverse_zones=options.reverse_zones, forward_policy=options.forward_policy, forwarders=options.forwarders, no_dnssec_validation=options.no_dnssec_validation, ### additional ### _installation_cleanup=_installation_cleanup, domainlevel=options.domainlevel, dns_ip_addresses=[str(ip) for ip in dns.ip_addresses], dns_reverse_zones=dns.reverse_zones)
def main(): ansible_module = AnsibleModule( argument_spec = dict( ### basic ### force=dict(required=False, type='bool', default=False), dm_password=dict(required=True, no_log=True), password=dict(required=True, no_log=True), master_password=dict(required=False, no_log=True), domain=dict(required=False), realm=dict(required=False), hostname=dict(required=False), ca_cert_files=dict(required=False, type='list', default=[]), no_host_dns=dict(required=False, type='bool', default=False), pki_config_override=dict(required=False), ### server ### setup_adtrust=dict(required=False, type='bool', default=False), setup_kra=dict(required=False, type='bool', default=False), setup_dns=dict(required=False, type='bool', default=False), idstart=dict(required=False, type='int'), idmax=dict(required=False, type='int'), # no_hbac_allow no_pkinit=dict(required=False, type='bool', default=False), # no_ui_redirect dirsrv_config_file=dict(required=False), ### ssl certificate ### dirsrv_cert_files=dict(required=False, type='list', default=None), http_cert_files=dict(required=False, type='list', defaullt=None), pkinit_cert_files=dict(required=False, type='list', default=None), dirsrv_pin=dict(required=False), http_pin=dict(required=False), pkinit_pin=dict(required=False), dirsrv_cert_name=dict(required=False), http_cert_name=dict(required=False), pkinit_cert_name=dict(required=False), ### client ### # mkhomedir ntp_servers=dict(required=False, type='list', default=None), ntp_pool=dict(required=False, default=None), no_ntp=dict(required=False, type='bool', default=False), # ssh_trust_dns # no_ssh # no_sshd # no_dns_sshfp ### certificate system ### external_ca=dict(required=False, type='bool', default=False), external_ca_type=dict(required=False), external_ca_profile=dict(required=False), external_cert_files=dict(required=False, type='list', default=None), subject_base=dict(required=False), ca_subject=dict(required=False), # ca_signing_algorithm ### dns ### allow_zone_overlap=dict(required=False, type='bool', default=False), reverse_zones=dict(required=False, type='list', default=[]), no_reverse=dict(required=False, type='bool', default=False), auto_reverse=dict(required=False, type='bool', default=False), zonemgr=dict(required=False), forwarders=dict(required=False, type='list', default=[]), no_forwarders=dict(required=False, type='bool', default=False), auto_forwarders=dict(required=False, type='bool', default=False), forward_policy=dict(default=None, choices=['first', 'only']), no_dnssec_validation=dict(required=False, type='bool', default=False), ### ad trust ### enable_compat=dict(required=False, type='bool', default=False), netbios_name=dict(required=False), rid_base=dict(required=False, type='int', default=1000), secondary_rid_base=dict(required=False, type='int', default=100000000), ### additional ### ), supports_check_mode = True, ) ansible_module._ansible_debug = True ansible_log = AnsibleModuleLog(ansible_module) # set values ############################################################ ### basic ### options.force = ansible_module.params.get('force') options.dm_password = ansible_module.params.get('dm_password') options.admin_password = ansible_module.params.get('password') options.master_password = ansible_module.params.get('master_password') options.domain_name = ansible_module.params.get('domain') options.realm_name = ansible_module.params.get('realm') options.host_name = ansible_module.params.get('hostname') options.ca_cert_files = ansible_module.params.get('ca_cert_files') options.no_host_dns = ansible_module.params.get('no_host_dns') options.pki_config_override = ansible_module.params.get( 'pki_config_override') ### server ### options.setup_adtrust = ansible_module.params.get('setup_adtrust') options.setup_dns = ansible_module.params.get('setup_dns') options.setup_kra = ansible_module.params.get('setup_kra') options.idstart = ansible_module.params.get('idstart') options.idmax = ansible_module.params.get('idmax') # no_hbac_allow options.no_pkinit = ansible_module.params.get('no_pkinit') # no_ui_redirect options.dirsrv_config_file = ansible_module.params.get('dirsrv_config_file') ### ssl certificate ### options.dirsrv_cert_files = ansible_module.params.get('dirsrv_cert_files') options.http_cert_files = ansible_module.params.get('http_cert_files') options.pkinit_cert_files = ansible_module.params.get('pkinit_cert_files') options.dirsrv_pin = ansible_module.params.get('dirsrv_pin'), options.http_pin = ansible_module.params.get('http_pin'), options.pkinit_pin = ansible_module.params.get('pkinit_pin'), options.dirsrv_cert_name = ansible_module.params.get('dirsrv_cert_name'), options.http_cert_name = ansible_module.params.get('http_cert_name'), options.pkinit_cert_name = ansible_module.params.get('pkinit_cert_name'), ### client ### # mkhomedir options.ntp_servers = ansible_module.params.get('ntp_servers') options.ntp_pool = ansible_module.params.get('ntp_pool') options.no_ntp = ansible_module.params.get('no_ntp') # ssh_trust_dns # no_ssh # no_sshd # no_dns_sshfp ### certificate system ### options.external_ca = ansible_module.params.get('external_ca') options.external_ca_type = ansible_module.params.get('external_ca_type') options.external_ca_profile = ansible_module.params.get( 'external_ca_profile') options.external_cert_files = ansible_module.params.get( 'external_cert_files') options.subject_base = ansible_module.params.get('subject_base') options.ca_subject = ansible_module.params.get('ca_subject') # ca_signing_algorithm ### dns ### options.allow_zone_overlap = ansible_module.params.get('allow_zone_overlap') options.reverse_zones = ansible_module.params.get('reverse_zones') options.no_reverse = ansible_module.params.get('no_reverse') options.auto_reverse = ansible_module.params.get('auto_reverse') options.zonemgr = ansible_module.params.get('zonemgr') options.forwarders = ansible_module.params.get('forwarders') options.no_forwarders = ansible_module.params.get('no_forwarders') options.auto_forwarders = ansible_module.params.get('auto_forwarders') options.forward_policy = ansible_module.params.get('forward_policy') options.no_dnssec_validation = ansible_module.params.get( 'no_dnssec_validation') ### ad trust ### options.enable_compat = ansible_module.params.get('enable_compat') options.netbios_name = ansible_module.params.get('netbios_name') options.rid_base = ansible_module.params.get('rid_base') options.secondary_rid_base = ansible_module.params.get('secondary_rid_base') ### additional ### options.kasp_db_file = None # version specific ###################################################### if options.setup_adtrust and not adtrust_imported: #if "adtrust" not in options._allow_missing: ansible_module.fail_json(msg="adtrust can not be imported") #else: # options.setup_adtrust = False # ansible_module.warn(msg="adtrust is not supported, disabling") if options.setup_kra and not kra_imported: #if "kra" not in options._allow_missing: ansible_module.fail_json(msg="kra can not be imported") #else: # options.setup_kra = False # ansible_module.warn(msg="kra is not supported, disabling") if options.pki_config_override is not None: if PKIIniLoader is None: ansible_module.warn("The use of pki_config_override is not " "supported for this IPA version") else: # From DogtagInstallInterface @pki_config_override.validator try: PKIIniLoader.verify_pki_config_override( options.pki_config_override) except ValueError as e: ansible_module.fail_json( msg="pki_config_override: %s" % str(e)) # default values ######################################################## # idstart and idmax if options.idstart is None: options.idstart = random.randint(1, 10000) * 200000 if options.idmax is None or options.idmax == 0: options.idmax = options.idstart + 199999 #class ServerInstallInterface(ServerCertificateInstallInterface, # client.ClientInstallInterface, # ca.CAInstallInterface, # kra.KRAInstallInterface, # dns.DNSInstallInterface, # adtrust.ADTrustInstallInterface, # conncheck.ConnCheckInterface, # ServerUninstallInterface): # ServerInstallInterface.__init__ ####################################### try: self = options # If any of the key file options are selected, all are required. cert_file_req = (self.dirsrv_cert_files, self.http_cert_files) cert_file_opt = (self.pkinit_cert_files,) if not self.no_pkinit: cert_file_req += cert_file_opt if self.no_pkinit and self.pkinit_cert_files: raise RuntimeError( "--no-pkinit and --pkinit-cert-file cannot be specified " "together" ) if any(cert_file_req + cert_file_opt) and not all(cert_file_req): raise RuntimeError( "--dirsrv-cert-file, --http-cert-file, and --pkinit-cert-file " "or --no-pkinit are required if any key file options are used." ) if not self.interactive: if self.dirsrv_cert_files and self.dirsrv_pin is None: raise RuntimeError( "You must specify --dirsrv-pin with --dirsrv-cert-file") if self.http_cert_files and self.http_pin is None: raise RuntimeError( "You must specify --http-pin with --http-cert-file") if self.pkinit_cert_files and self.pkinit_pin is None: raise RuntimeError( "You must specify --pkinit-pin with --pkinit-cert-file") if not self.setup_dns: if self.forwarders: raise RuntimeError( "You cannot specify a --forwarder option without the " "--setup-dns option") if self.auto_forwarders: raise RuntimeError( "You cannot specify a --auto-forwarders option without " "the --setup-dns option") if self.no_forwarders: raise RuntimeError( "You cannot specify a --no-forwarders option without the " "--setup-dns option") if self.forward_policy: raise RuntimeError( "You cannot specify a --forward-policy option without the " "--setup-dns option") if self.reverse_zones: raise RuntimeError( "You cannot specify a --reverse-zone option without the " "--setup-dns option") if self.auto_reverse: raise RuntimeError( "You cannot specify a --auto-reverse option without the " "--setup-dns option") if self.no_reverse: raise RuntimeError( "You cannot specify a --no-reverse option without the " "--setup-dns option") if self.no_dnssec_validation: raise RuntimeError( "You cannot specify a --no-dnssec-validation option " "without the --setup-dns option") elif self.forwarders and self.no_forwarders: raise RuntimeError( "You cannot specify a --forwarder option together with " "--no-forwarders") elif self.auto_forwarders and self.no_forwarders: raise RuntimeError( "You cannot specify a --auto-forwarders option together with " "--no-forwarders") elif self.reverse_zones and self.no_reverse: raise RuntimeError( "You cannot specify a --reverse-zone option together with " "--no-reverse") elif self.auto_reverse and self.no_reverse: raise RuntimeError( "You cannot specify a --auto-reverse option together with " "--no-reverse") if not self.setup_adtrust: if self.add_agents: raise RuntimeError( "You cannot specify an --add-agents option without the " "--setup-adtrust option") if self.enable_compat: raise RuntimeError( "You cannot specify an --enable-compat option without the " "--setup-adtrust option") if self.netbios_name: raise RuntimeError( "You cannot specify a --netbios-name option without the " "--setup-adtrust option") if self.no_msdcs: raise RuntimeError( "You cannot specify a --no-msdcs option without the " "--setup-adtrust option") if not hasattr(self, 'replica_install'): if self.external_cert_files and self.dirsrv_cert_files: raise RuntimeError( "Service certificate file options cannot be used with the " "external CA options.") if self.external_ca_type and not self.external_ca: raise RuntimeError( "You cannot specify --external-ca-type without " "--external-ca") if self.external_ca_profile and not self.external_ca: raise RuntimeError( "You cannot specify --external-ca-profile without " "--external-ca") if self.uninstalling: if (self.realm_name or self.admin_password or self.master_password): raise RuntimeError( "In uninstall mode, -a, -r and -P options are not " "allowed") elif not self.interactive: if (not self.realm_name or not self.dm_password or not self.admin_password): raise RuntimeError( "In unattended mode you need to provide at least -r, " "-p and -a options") if self.setup_dns: if (not self.forwarders and not self.no_forwarders and not self.auto_forwarders): raise RuntimeError( "You must specify at least one of --forwarder, " "--auto-forwarders, or --no-forwarders options") any_ignore_option_true = any( [self.ignore_topology_disconnect, self.ignore_last_of_role]) if any_ignore_option_true and not self.uninstalling: raise RuntimeError( "'--ignore-topology-disconnect/--ignore-last-of-role' " "options can be used only during uninstallation") if self.idmax < self.idstart: raise RuntimeError( "idmax (%s) cannot be smaller than idstart (%s)" % (self.idmax, self.idstart)) else: # replica installers if self.servers and not self.domain_name: raise RuntimeError( "The --server option cannot be used without providing " "domain via the --domain option") if self.setup_dns: if (not self.forwarders and not self.no_forwarders and not self.auto_forwarders): raise RuntimeError( "You must specify at least one of --forwarder, " "--auto-forwarders, or --no-forwarders options") except RuntimeError as e: ansible_module.fail_json(msg=e) # ####################################################################### # If any of the key file options are selected, all are required. cert_file_req = (options.dirsrv_cert_files, options.http_cert_files) cert_file_opt = (options.pkinit_cert_files,) if not options.no_pkinit: cert_file_req += cert_file_opt if options.no_pkinit and options.pkinit_cert_files: ansible_module.fail_json( msg="no-pkinit and pkinit-cert-file cannot be specified together" ) if any(cert_file_req + cert_file_opt) and not all(cert_file_req): ansible_module.fail_json( msg="dirsrv-cert-file, http-cert-file, and pkinit-cert-file " "or no-pkinit are required if any key file options are used." ) if not options.interactive: if options.dirsrv_cert_files and options.dirsrv_pin is None: ansible_module.fail_json( msg="You must specify dirsrv-pin with dirsrv-cert-file") if options.http_cert_files and options.http_pin is None: ansible_module.fail_json( msg="You must specify http-pin with http-cert-file") if options.pkinit_cert_files and options.pkinit_pin is None: ansible_module.fail_json( msg="You must specify pkinit-pin with pkinit-cert-file") if not options.setup_dns: # lists for x in [ "forwarders", "reverse_zones" ]: if len(getattr(options, x)) > 1: ansible_module.fail_json( msg="You cannot specify %s without setting setup-dns" % x) # bool and str values for x in [ "auto_forwarders", "no_forwarders", "auto_reverse", "no_reverse", "no_dnssec_validation", "forward_policy" ]: if getattr(options, x) == True: ansible_module.fail_json( msg="You cannot specify %s without setting setup-dns" % x) elif len(options.forwarders) > 0 and options.no_forwarders: ansible_module.fail_json( msg="You cannot specify forwarders together with no-forwarders") elif options.auto_forwarders and options.no_forwarders: ansible_module.fail_json( msg="You cannot specify auto-forwarders together with no-forwarders") elif len(options.reverse_zones) > 0 and options.no_reverse: ansible_module.fail_json( msg="You cannot specify reverse-zones together with no-reverse") elif options.auto_reverse and options.no_reverse: ansible_module.fail_json( msg="You cannot specify auto-reverse together with no-reverse") if not hasattr(self, 'replica_install'): if options.external_cert_files and options.dirsrv_cert_files: ansible_module.fail_json( msg="Service certificate file options cannot be used with the " "external CA options.") if options.external_ca_type and not options.external_ca: ansible_module.fail_json( msg="You cannot specify external-ca-type without external-ca") #if options.uninstalling: # if (options.realm_name or options.admin_password or # options.master_password): # ansible_module.fail_json( # msg="In uninstall mode, -a, -r and -P options are not " # "allowed") #elif not options.interactive: # if (not options.realm_name or not options.dm_password or # not options.admin_password): # ansible_module.fail_json(msg= # "In unattended mode you need to provide at least -r, " # "-p and -a options") # if options.setup_dns: # if (not options.forwarders and # not options.no_forwarders and # not options.auto_forwarders): # ansible_module.fail_json(msg= # "You must specify at least one of --forwarder, " # "--auto-forwarders, or --no-forwarders options") if (not options.realm_name or not options.dm_password or not options.admin_password): ansible_module.fail_json( msg="You need to provide at least realm_name, dm_password " "and admin_password") if options.setup_dns: if len(options.forwarders) < 1 and not options.no_forwarders and \ not options.auto_forwarders: ansible_module.fail_json( msg="You must specify at least one of forwarders, " "auto-forwarders or no-forwarders") #any_ignore_option_true = any( # [options.ignore_topology_disconnect, options.ignore_last_of_role]) #if any_ignore_option_true and not options.uninstalling: # ansible_module.fail_json( # msg="ignore-topology-disconnect and ignore-last-of-role " # "can be used only during uninstallation") if options.idmax < options.idstart: ansible_module.fail_json( msg="idmax (%s) cannot be smaller than idstart (%s)" % (options.idmax, options.idstart)) # validation ############################################################# if options.dm_password is None: ansible_module.fail_json(msg="Directory Manager password required") if options.admin_password is None: ansible_module.fail_json(msg="IPA admin password required") # validation ############################################################ # domain_level if options.domain_level < MIN_DOMAIN_LEVEL: ansible_module.fail_json( msg="Domain Level cannot be lower than %d" % MIN_DOMAIN_LEVEL) elif options.domain_level > MAX_DOMAIN_LEVEL: ansible_module.fail_json( msg="Domain Level cannot be higher than %d" % MAX_DOMAIN_LEVEL) # dirsrv_config_file if options.dirsrv_config_file is not None: if not os.path.exists(options.dirsrv_config_file): ansible_module.fail_json( msg="File %s does not exist." % options.dirsrv_config_file) # domain_name if (options.setup_dns and not options.allow_zone_overlap and \ options.domain_name is not None): try: check_zone_overlap(options.domain_name, False) except ValueError as e: ansible_module.fail_json(str(e)) # dm_password with redirect_stdout(ansible_log): validate_dm_password(options.dm_password) # admin_password with redirect_stdout(ansible_log): validate_admin_password(options.admin_password) # pkinit is not supported on DL0, don't allow related options """ # replica install: if not options.replica_file is None: if (not options._replica_install and \ not options.domain_level > DOMAIN_LEVEL_0) or \ (options._replica_install and options.replica_file is not None): if (options.no_pkinit or options.pkinit_cert_files is not None or options.pkinit_pin is not None): ansible_module.fail_json( msg="pkinit on domain level 0 is not supported. Please " "don't use any pkinit-related options.") options.no_pkinit = True """ if options.setup_dns: if len(options.forwarders) < 1 and not options.no_forwarders and \ not options.auto_forwarders: ansible_module.fail_json( msg="You must specify at least one of forwarders, " "auto-forwarders or no-forwarders") if NUM_VERSION >= 40200 and options.master_password and \ not options.external_cert_files: ansible_module.warn("Specifying kerberos master-password is deprecated") options._installation_cleanup = True if not options.external_ca and not options.external_cert_files and \ is_ipa_configured(): options._installation_cleanup = False ansible_module.log( "IPA server is already configured on this system. If you want " "to reinstall the IPA server, please uninstall it first.") ansible_module.exit_json(changed=False, server_already_configured=True) client_fstore = sysrestore.FileStore(paths.IPA_CLIENT_SYSRESTORE) if client_fstore.has_files(): options._installation_cleanup = False ansible_module.log( "IPA client is already configured on this system. " "Please uninstall it before configuring the IPA server.") ansible_module.exit_json(changed=False, client_already_configured=True) # validate reverse_zones if not options.allow_zone_overlap: for zone in options.reverse_zones: with redirect_stdout(ansible_log): check_zone_overlap(zone) # validate zonemgr if options.zonemgr: if six.PY3: with redirect_stdout(ansible_log): bindinstance.validate_zonemgr_str(options.zonemgr) else: try: # IDNA support requires unicode encoding = getattr(sys.stdin, 'encoding', None) if encoding is None: encoding = 'utf-8' if not isinstance(value, unicode): value = options.zonemgr.decode(encoding) else: value = options.zonemgr with redirect_stdout(ansible_log): bindinstance.validate_zonemgr_str(value) except ValueError as e: # FIXME we can do this in better way # https://fedorahosted.org/freeipa/ticket/4804 # decode to proper stderr encoding stderr_encoding = getattr(sys.stderr, 'encoding', None) if stderr_encoding is None: stderr_encoding = 'utf-8' error = unicode(e).encode(stderr_encoding) ansible_module.fail_json(msg=error) # external cert file paths are absolute if options.external_cert_files: for path in options.external_cert_files: if not os.path.isabs(path): ansible_module.fail_json( msg="External cert file '%s' must use an absolute path" % path) options.setup_ca = True # We only set up the CA if the PKCS#12 options are not given. if options.dirsrv_cert_files and len(options.dirsrv_cert_files) > 0: options.setup_ca = False else: options.setup_ca = True if not options.setup_ca and options.ca_subject: ansible_module.fail_json(msg= "--ca-subject cannot be used with CA-less installation") if not options.setup_ca and options.subject_base: ansible_module.fail_json(msg= "--subject-base cannot be used with CA-less installation") if not options.setup_ca and options.setup_kra: ansible_module.fail_json(msg= "--setup-kra cannot be used with CA-less installation") # This will override any settings passed in on the cmdline if os.path.isfile(paths.ROOT_IPA_CACHE): # dm_password check removed, checked already try: cache_vars = read_cache(options.dm_password) options.__dict__.update(cache_vars) if cache_vars.get('external_ca', False): options.external_ca = False options.interactive = False except Exception as e: ansible_module.fail_json(msg="Cannot process the cache file: %s" % str(e)) # ca_subject if options.ca_subject: ca.subject_validator(ca.VALID_SUBJECT_ATTRS, options.ca_subject) # IPv6 and SELinux check tasks.check_ipv6_stack_enabled() tasks.check_selinux_status() if check_ldap_conf is not None: check_ldap_conf() _installation_cleanup = True if not options.external_ca and not options.external_cert_files and \ is_ipa_configured(): _installation_cleanup = False ansible_module.fail_json(msg="IPA server is already configured on this system.") if not options.no_ntp: try: timeconf.check_timedate_services() except timeconf.NTPConflictingService as e: ansible_module.log( "WARNING: conflicting time&date synchronization service " "'%s' will be disabled in favor of chronyd" % \ e.conflicting_service) except timeconf.NTPConfigurationError: pass if hasattr(httpinstance, "httpd_443_configured"): # Check to see if httpd is already configured to listen on 443 if httpinstance.httpd_443_configured(): ansible_module.fail_json(msg="httpd is already configured to listen on 443.") if not options.external_cert_files: # Make sure the 389-ds ports are available try: check_dirsrv(True) except ScriptError as e: ansible_module.fail_json(msg=e) # check bind packages are installed if options.setup_dns: # Don't require an external DNS to say who we are if we are # setting up a local DNS server. options.no_host_dns = True # host name if options.host_name: host_default = options.host_name else: host_default = get_fqdn() try: verify_fqdn(host_default, options.no_host_dns) host_name = host_default except BadHostError as e: ansible_module.fail_json(msg=e) host_name = host_name.lower() if not options.domain_name: domain_name = host_name[host_name.find(".")+1:] try: validate_domain_name(domain_name) except ValueError as e: ansible_module.fail_json(msg="Invalid domain name: %s" % unicode(e)) else: domain_name = options.domain_name domain_name = domain_name.lower() if not options.realm_name: realm_name = domain_name.upper() else: realm_name = options.realm_name.upper() argspec = inspect.getargspec(validate_domain_name) if "entity" in argspec.args: # NUM_VERSION >= 40690: try: validate_domain_name(realm_name, entity="realm") except ValueError as e: raise ScriptError("Invalid realm name: {}".format(unicode(e))) if not options.setup_adtrust: # If domain name and realm does not match, IPA server will not be able # to establish trust with Active Directory. Fail. if domain_name.upper() != realm_name: ansible_module.warn( "Realm name does not match the domain name: " "You will not be able to establish trusts with Active " "Directory.") # Do not ask for time source #if not options.no_ntp and not options.unattended and not ( # options.ntp_servers or options.ntp_pool): # options.ntp_servers, options.ntp_pool = timeconf.get_time_source() ######################################################################### http_pkcs12_file = None http_pkcs12_info = None http_ca_cert = None dirsrv_pkcs12_file = None dirsrv_pkcs12_info = None dirsrv_ca_cert = None pkinit_pkcs12_file = None pkinit_pkcs12_info = None pkinit_ca_cert = None if options.http_cert_files: if options.http_pin is None: ansible_module.fail_json(msg= "Apache Server private key unlock password required") http_pkcs12_file, http_pin, http_ca_cert = load_pkcs12( cert_files=options.http_cert_files, key_password=options.http_pin, key_nickname=options.http_cert_name, ca_cert_files=options.ca_cert_files, host_name=host_name) http_pkcs12_info = (http_pkcs12_file.name, options.http_pin) if options.dirsrv_cert_files: if options.dirsrv_pin is None: ansible_module.fail_json(msg= "Directory Server private key unlock password required") dirsrv_pkcs12_file, dirsrv_pin, dirsrv_ca_cert = load_pkcs12( cert_files=options.dirsrv_cert_files, key_password=options.dirsrv_pin, key_nickname=options.dirsrv_cert_name, ca_cert_files=options.ca_cert_files, host_name=host_name) dirsrv_pkcs12_info = (dirsrv_pkcs12_file.name, options.dirsrv_pin) if options.pkinit_cert_files: if options.pkinit_pin is None: ansible_module.fail_json(msg= "Kerberos KDC private key unlock password required") pkinit_pkcs12_file, pkinit_pin, pkinit_ca_cert = load_pkcs12( cert_files=options.pkinit_cert_files, key_password=options.pkinit_pin, key_nickname=options.pkinit_cert_name, ca_cert_files=options.ca_cert_files, realm_name=realm_name) pkinit_pkcs12_info = (pkinit_pkcs12_file.name, options.pkinit_pin) if (options.http_cert_files and options.dirsrv_cert_files and http_ca_cert != dirsrv_ca_cert): ansible_module.fail_json(msg= "Apache Server SSL certificate and Directory Server SSL " "certificate are not signed by the same CA certificate") if (options.http_cert_files and options.pkinit_cert_files and http_ca_cert != pkinit_ca_cert): ansible_module.fail_json(msg= "Apache Server SSL certificate and PKINIT KDC " "certificate are not signed by the same CA certificate") # done ################################################################## ansible_module.exit_json(changed=False, ipa_python_version=IPA_PYTHON_VERSION, ### basic ### domain=options.domain_name, realm=realm_name, hostname=host_name, _hostname_overridden=bool(options.host_name), no_host_dns=options.no_host_dns, ### server ### setup_adtrust=options.setup_adtrust, setup_kra=options.setup_kra, setup_ca=options.setup_ca, idstart=options.idstart, idmax=options.idmax, no_pkinit=options.no_pkinit, ### ssl certificate ### _dirsrv_pkcs12_file=dirsrv_pkcs12_file, _dirsrv_pkcs12_info=dirsrv_pkcs12_info, _dirsrv_ca_cert=dirsrv_ca_cert, _http_pkcs12_file=http_pkcs12_file, _http_pkcs12_info=http_pkcs12_info, _http_ca_cert=http_ca_cert, _pkinit_pkcs12_file=pkinit_pkcs12_file, _pkinit_pkcs12_info=pkinit_pkcs12_info, _pkinit_ca_cert=pkinit_ca_cert, ### certificate system ### external_ca=options.external_ca, external_ca_type=options.external_ca_type, external_ca_profile=options.external_ca_profile, ### ad trust ### rid_base=options.rid_base, secondary_rid_base=options.secondary_rid_base, ### client ### ntp_servers=options.ntp_servers, ntp_pool=options.ntp_pool, ### additional ### _installation_cleanup=_installation_cleanup, domainlevel=options.domainlevel)
class ArubaAnsibleModule: ''' Aruba ansible mdule wrapper class ''' def __init__(self, module_args, store_config=True): ''' module init function ''' self.module = AnsibleModule(argument_spec=module_args, supports_check_mode=True) self.warnings = list() self.changed = False self.original_config = None self.running_config = None self.switch_current_firmware = None self.switch_platform = None self.get_switch_platform() self.get_switch_firmware_version() self.get_switch_config(store_config=store_config) if "10.00" in self.switch_current_firmware: self.module.fail_json(msg="Minimum supported " "firmware version is 10.03") if "10.01" in self.switch_current_firmware: self.module.fail_json(msg="Minimum supported " "firmware version is 10.03") if "10.02" in self.switch_current_firmware: self.module.fail_json(msg="Minimum supported " "firmware version is 10.03") def get_switch_platform(self): ''' Returns the switch platform ''' platform_url = '/rest/v1/system?attributes=platform_name' platform = get(self.module, platform_url) self.switch_platform = platform["platform_name"] def get_switch_firmware_version(self): ''' Returns the switch firmware ''' firmware_url = '/rest/v1/firmware' firmware_versions = get(self.module, firmware_url) self.switch_current_firmware = firmware_versions["current_version"] def get_firmware_upgrade_status(self): ''' Returns the firmware upgrade status ''' fimrware_status_url = '/rest/v1/firmware/status' firmware_update_status = get(self.module, fimrware_status_url) return firmware_update_status def get_switch_config(self, config_name='running-config', store_config=True): ''' Returns the switch config ''' config_url = '/rest/v1/fullconfigs/{cfg}'.format(cfg=config_name) running_config = get(self.module, config_url) if store_config: self.running_config = copy.deepcopy(running_config) self.original_config = copy.deepcopy(running_config) return running_config def copy_switch_config_to_remote_location(self, config_name, config_type, destination, vrf): ''' TFTP switch config to TFTP server ''' config_url = ('/rest/v1/fullconfigs/' '{cfg}?to={dest}&type={type}' '&vrf={vrf}'.format(cfg=config_name, dest=destination, type=config_type, vrf=vrf)) get(self.module, config_url) return def tftp_switch_config_from_remote_location(self, config_file_location, config_name, vrf): ''' TFTP switch config from TFTP server ''' config_url = ('/rest/v1/fullconfigs/' '{cfg}?from={dest}&vrf={vrf}' ''.format(cfg=config_name, dest=config_file_location, vrf=vrf)) put(self.module, config_url) return def upload_switch_config(self, config, config_name='running-config'): ''' Upload switch config ''' config_url = '/rest/v1/fullconfigs/{cfg}'.format(cfg=config_name) config_json = json.dumps(config) put(self.module, config_url, config_json) return def update_switch_config(self): ''' Update switch config ''' self.result = dict(changed=self.changed, warnings=self.warnings) if self.original_config != self.running_config: self.upload_switch_config(self.running_config) self.result["changed"] = True else: self.result["changed"] = False self.module.log("============================ No Change =======" "===========================") self.module.exit_json(**self.result) with open('/tmp/debugging_running_config.json', 'w') as to_file: json.dump(self.running_config, to_file, indent=4) to_file.write("\n") self.module.exit_json(**self.result)
def run_module(): # module arguments module_args = dict(hostname=dict(type='str', required=True), username=dict(type='str', required=True), password=dict(type='str', required=True, no_log=True), ssh_public_key=dict(type='str', required=True)) # results dictionary result = dict(changed=False, original_message='', message='') # create ansible module object module = AnsibleModule(argument_spec=module_args, supports_check_mode=True) # MODULE TASKS TO BE PERFORMED BELOW.. # set authorized key path if module.params['username'] == 'root': base_dir = '/root/' else: base_dir = '/home/%s' % module.params['username'] auth_key = join(base_dir, '.ssh/authorized_keys') # create ssh client via paramiko ssh_con = paramiko.SSHClient() ssh_con.load_system_host_keys() ssh_con.set_missing_host_key_policy(paramiko.AutoAddPolicy) # connect to remote system ssh_con.connect(hostname=module.params['hostname'], username=module.params['username'], password=module.params['password']) # create sftp client sftp_con = ssh_con.open_sftp() # read ssh public key with open(module.params['ssh_public_key'], 'r') as fh: data = fh.read() # read remote system authorized key file_reader = sftp_con.open(auth_key, mode='r') authorized_key_data = file_reader.read() file_reader.close() # inject ssh public key if data in authorized_key_data: result['message'] = 'SSH public key already injected!' result['changed'] = False module.log(result['message']) else: file_handler = sftp_con.file(auth_key, mode='a') file_handler.write(data) file_handler.flush() file_handler.close() result['message'] = 'SSH public key injected!' result['changed'] = True module.log(result['message']) # close sftp connection sftp_con.close() # close ssh connection ssh_con.close() # exit with results module.exit_json(**result)
def main(): module = AnsibleModule( argument_spec=dict( ### basic ### ntp_servers=dict(required=False, type='list', default=None), ntp_pool=dict(required=False, default=None), no_ntp=dict(required=False, type='bool', default=False), #force_ntpd=dict(required=False, type='bool', default=False), on_master=dict(required=False, type='bool', default=False), ### additional ### servers=dict(required=False, type='list', default=None), domain=dict(required=False, default=None), ), supports_check_mode=True, ) #module._ansible_debug = True options.ntp_servers = module.params.get('ntp_servers') options.ntp_pool = module.params.get('ntp_pool') options.no_ntp = module.params.get('no_ntp') #options.force_ntpd = module.params.get('force_ntpd') options.on_master = module.params.get('on_master') cli_server = module.params.get('servers') cli_domain = module.params.get('domain') options.conf_ntp = not options.no_ntp fstore = sysrestore.FileStore(paths.IPA_CLIENT_SYSRESTORE) statestore = sysrestore.StateFile(paths.IPA_CLIENT_SYSRESTORE) ntp_servers = [] synced_ntp = False if sync_time is not None: if options.conf_ntp: # Attempt to configure and sync time with NTP server (chrony). synced_ntp = sync_time(options, fstore, statestore) elif options.on_master: # If we're on master skipping the time sync here because it was done # in ipa-server-install logger.info( "Skipping attempt to configure and synchronize time with" " chrony server as it has been already done on master.") else: logger.info("Skipping chrony configuration") elif not options.on_master and options.conf_ntp: # Attempt to sync time with IPA server. # If we're skipping NTP configuration, we also skip the time sync here. # We assume that NTP servers are discoverable through SRV records # in the DNS. # If that fails, we try to sync directly with IPA server, # assuming it runs NTP if not options.ntp_servers: # Detect NTP servers ds = ipadiscovery.IPADiscovery() ntp_servers = ds.ipadns_search_srv(cli_domain, '_ntp._udp', None, break_on_first=False) else: ntp_servers = options.ntp_servers # Attempt to sync time: # At first with given or dicovered time servers. If no ntp # servers have been given or discovered, then with the ipa # server. module.log('Synchronizing time ...') synced_ntp = False # use user specified NTP servers if there are any for s in ntp_servers: synced_ntp = timeconf.synconce_ntp(s, False) if synced_ntp: break if not synced_ntp and not ntp_servers: synced_ntp = timeconf.synconce_ntp(cli_server[0], False) if not synced_ntp: module.warn("Unable to sync time with NTP server") # Done module.exit_json(changed=True, synced_ntp=synced_ntp)
def main(): global CHANGED global NIM_NODE global OUTPUT global DEBUG_DATA MODULE = AnsibleModule( argument_spec=dict( targets=dict(required=True, type='str'), filesets=dict(required=False, type='str'), installp_bundle=dict(required=False, type='str'), lpp_source=dict(required=False, type='str'), accept_licenses=dict(required=False, type='str'), action=dict(choices=['install', 'commit', 'reject', 'cleanup', 'remove'], required=True, type='str'), preview=dict(required=False, type='str'), time_limit=dict(required=False, type='str'), vars=dict(required=False, type='dict'), vios_status=dict(required=False, type='dict'), nim_node=dict(required=False, type='dict') ), required_if=[ ['action', 'install', ['lpp_source']], ], mutually_exclusive=[ ['filesets', 'installp_bundle'], ], ) # ========================================================================= # Get Module params # ========================================================================= targets_update_status = {} vios_status = {} targets = MODULE.params['targets'] if MODULE.params['vios_status']: vios_status = MODULE.params['vios_status'] else: vios_status = None # build a time structure for time_limit attribute, time_limit = None if MODULE.params['time_limit']: match_key = re.match(r"^\s*\d{2}/\d{2}/\d{4} \S*\d{2}:\d{2}\s*$", MODULE.params['time_limit']) if match_key: time_limit = time.strptime(MODULE.params['time_limit'], '%m/%d/%Y %H:%M') else: msg = 'Malformed time limit "{0}", please use mm/dd/yyyy hh:mm format.'\ .format(MODULE.params['time_limit']) MODULE.fail_json(msg=msg) MODULE.debug('*** START NIM UPDATE VIOS OPERATION ***') OUTPUT.append('Updateios operation for {0}'.format(MODULE.params['targets'])) MODULE.log('Action {0} for {1} targets'.format(MODULE.params['action'], targets)) # ========================================================================= # build nim node info # ========================================================================= if MODULE.params['nim_node']: NIM_NODE = MODULE.params['nim_node'] else: build_nim_node(MODULE) # ========================================================================= # Perfom checks # ========================================================================= ret = check_vios_targets(MODULE, targets) if (not ret) or (ret is None): OUTPUT.append('Empty target list') MODULE.warn('Warning: Empty target list: "{0}"'.format(targets)) else: targets_list = ret OUTPUT.append('Targets list:{0}'.format(targets_list)) MODULE.debug('Target list: {0}'.format(targets_list)) # ========================================================================= # Perfom the update # ========================================================================= ret = nim_updateios(MODULE, targets_list, vios_status, targets_update_status, time_limit) if targets_update_status: OUTPUT.append('NIM updateios operation status:') MODULE.log('NIM updateios operation status:') for vios_key in targets_update_status: OUTPUT.append(" {0} : {1}".format(vios_key, targets_update_status[vios_key])) MODULE.log(' {0} : {1}'.format(vios_key, targets_update_status[vios_key])) MODULE.log('NIM updateios operation result: {0}'.format(targets_update_status)) else: MODULE.log('NIM updateios operation: status table is empty') OUTPUT.append('NIM updateios operation: Error getting the status') targets_update_status = vios_status # ========================================================================= # Exit # ========================================================================= MODULE.exit_json( changed=CHANGED, msg="NIM updateios operation completed successfully", targets=MODULE.params['targets'], debug_output=DEBUG_DATA, output=OUTPUT, status=targets_update_status)
def main(): module_args = dict(interface=dict(type='str', required=True), admin_state=dict(default='up', choices=['up', 'down']), description=dict(type='str', default=None), ipv4=dict(type='list', default=None), ipv6=dict(type='list', default=None), vrf=dict(type='str', default=None), ip_helper_address=dict(type='list', default=None), active_gateway_ip=dict(type='str', default=None), active_gateway_mac_v4=dict(type='str', default=None), state=dict(default='present', choices=['present', 'absent'])) warnings = list() result = dict(changed=False, warnings=warnings) module = AnsibleModule(argument_spec=module_args, supports_check_mode=True) if module.check_mode: module.exit_json(changed=False) connection = Connection(module._socket_path) get_response = connection.get_running_config() module.log(msg=get_response) interface = module.params['interface'].lower() admin_state = module.params['admin_state'] description = module.params['description'] ipv4 = module.params['ipv4'] ipv6 = module.params['ipv6'] vrf = module.params['vrf'] ip_helper_address = module.params['ip_helper_address'] active_gateway_ip = module.params['active_gateway_ip'] active_gateway_mac_v4 = module.params['active_gateway_mac_v4'] state = module.params['state'] try: json_data = json.loads(get_response) except ValueError: module.fail_json(msg=get_response) ''' Verify if input interface string is valid ''' if (len(interface.encode('utf-8')) > 8) or (not bool( re.match('^[a-zA-Z0-9/]+$', interface))): module.fail_json(msg='Interface is not valid.') ''' Deleting interface ''' if state == 'absent': get_json = json.loads(get_response) encode_interface = interface.replace('/', '%2F') if encode_interface in json_data["Port"].keys(): json_data["Port"].pop(encode_interface) else: warnings.append("Interface " + interface + " has already been removed.") if encode_interface in json_data["System"]["bridge"][ "Bridge1"].setdefault("ports", []): json_data["System"]["bridge"]["Bridge1"]["ports"].remove( encode_interface) if "Interface" in json_data.keys(): if encode_interface in json_data["Interface"].keys(): json_data["Interface"].pop(encode_interface) for item in json_data["System"]["vrfs"].keys(): if encode_interface in json_data["System"]["vrfs"][ item].setdefault("ports", []): json_data["System"]["vrfs"][item].get("ports").remove( encode_interface) ''' Adding interface ''' if state == 'present': get_json = json.loads(get_response) if interface is not None: vid = interface.replace('vlan', '') if "vlans" not in json_data["System"]["bridge"]["Bridge1"].keys(): module.fail_json( msg='VLAN ' + str(vid) + ' should be created before creating interface ' + interface) if vid not in json_data["System"]["bridge"]["Bridge1"][ "vlans"].keys(): module.fail_json( msg='VLAN ' + str(vid) + ' should be created before creating interface ' + interface) if "Port" not in json_data: json_data["Port"] = {} json_data["Port"].setdefault(interface, {})["name"] = interface json_data["Port"][interface]["vlan_tag"] = interface.replace( 'vlan', '') if interface not in json_data["Port"][interface].setdefault( "interfaces", []): json_data["Port"][interface]["interfaces"].append(interface) if "default" in json_data["System"]["vrfs"].keys(): if interface not in json_data["System"]["vrfs"]["default"][ "ports"]: json_data["System"]["vrfs"]["default"].setdefault( "ports", []).append(interface) else: json_data["System"]["vrfs"]["default"] = { "name": "default", "ports": [interface] } if interface not in json_data["System"]["bridge"][ "Bridge1"].setdefault("ports", []): json_data["System"]["bridge"]["Bridge1"]["ports"].append( interface) module.log(msg='Added Interface: ' + interface) if admin_state == "up": json_data["Port"][interface]["admin"] = "up" json_data.setdefault("Interface", {})[interface] = { "name": interface, "type": "internal", "user_config": { "admin": "up" } } elif admin_state == "down": json_data["Port"][interface]["admin"] = "down" json_data.setdefault("Interface", {})[interface] = { "name": interface, "type": "internal", "user_config": { "admin": "down" } } if description is not None: json_data["Port"][interface]["description"] = description module.log(msg="Added interface with name='" + interface + "' description='" + description + "'") ''' Attaching IPv4 address to interface ''' if ipv4 is not None: json_data["Port"][interface]["ip4_address"] = ipv4[0] if len(ipv4) > 2: json_data["Port"][interface]["ip4_address_secondary"] = [] for item in ipv4[1:]: json_data["Port"][interface][ "ip4_address_secondary"].append(item) elif len(ipv4) == 2: json_data["Port"][interface]["ip4_address_secondary"] = ipv4[1] module.log(msg='Attached IPv4 address ' + ''.join(ipv4) + ' to interface ' + interface) ''' Attaching IPv6 address to interface ''' if ipv6 is not None: if "ip6_addresses" in json_data["Port"][interface].keys(): json_data["Port"][interface]["ip6_addresses"] = {} for item in ipv6: json_data["Port"][interface].setdefault("ip6_addresses", {})[item] =\ {"node_address": True, "preferred_lifetime": 604800, "ra_prefix": True, "type": "global-unicast", "valid_lifetime": 2592000} module.log(msg='Attached IPv6 address ' + ' '.join(ipv6) + ' to interface ' + interface) ''' Attaching interface to non-default VRF ''' if vrf is not None: if vrf not in json_data["System"]["vrfs"].keys(): warnings.append('VRF ' + vrf + ' does not exist on switch.') elif interface not in json_data["System"]["vrfs"][vrf].setdefault( "ports", []): json_data["System"]["vrfs"][vrf]["ports"].append(interface) if vrf != 'default' and (interface in json_data["System"]["vrfs"].get( "default", {}).get("ports", [])): json_data["System"]["vrfs"]["default"]["ports"].remove( interface) if not json_data["System"]["vrfs"]["default"]["ports"]: json_data["System"]["vrfs"].pop("default") module.log(msg='Attached interface ' + interface + ' to VRF ' + vrf) elif interface in json_data["System"]["vrfs"][vrf].get( "ports", []): if vrf != 'default' and (interface in json_data["System"]["vrfs"].get( "default", {}).get("ports", [])): json_data["System"]["vrfs"]["default"]["ports"].remove( interface) if not json_data["System"]["vrfs"]["default"]["ports"]: json_data["System"]["vrfs"].pop("default") ''' Attaching helper-address to interface ''' if ip_helper_address is not None: if (vrf is not None) and (vrf in json_data["System"]["vrfs"].keys()): vrf_dhcp = vrf else: vrf_dhcp = "default" dhcp_name = vrf_dhcp + "/" + interface if len(ip_helper_address) <= 1: json_data.setdefault("DHCP_Relay", {})[dhcp_name] = { "ipv4_ucast_server": ip_helper_address[0], "port": interface, "vrf": vrf_dhcp } else: json_data.setdefault("DHCP_Relay", {})[dhcp_name] = { "ipv4_ucast_server": [], "port": interface, "vrf": vrf_dhcp } for item in ip_helper_address: json_data["DHCP_Relay"][dhcp_name][ "ipv4_ucast_server"].append(item) json_data["DHCP_Relay"][dhcp_name][ "ipv4_ucast_server"].sort() ''' Attaching active gateway to interface ''' if (active_gateway_ip is None) and (active_gateway_mac_v4 is not None): warnings.append( "Both active_gateway_ip and active_gateway_mac_v4 are required for configure active gateway." ) if (active_gateway_ip is not None) and (active_gateway_mac_v4 is not None): json_data["Port"][interface]["vsx_virtual_ip4"] = active_gateway_ip json_data["Port"][interface][ "vsx_virtual_gw_mac_v4"] = active_gateway_mac_v4 ''' Updating running config on remote switch ''' connection.put_running_config(json.dumps(json_data)) ''' Writing Debugging File ''' with open('/tmp/debugging_running_config.json', 'w') as to_file: json.dump(json_data, to_file, indent=4) to_file.write("\n") ''' Checking if change is idempotent ''' if get_json != json_data: result["changed"] = True else: module.log(msg="========Nothing Changed=========") module.exit_json(**result)
def main(): global module global results module = AnsibleModule( argument_spec=dict( action=dict(choices=['migrate'], required=False, type='str', default='migrate'), targets=dict(required=True, type='list', elements='str'), time_limit=dict(required=False, type='str'), vios_status=dict(required=False, type='dict'), nim_node=dict(required=False, type='dict'), # migrate operation mksysb_name=dict(type='str'), mksysb_prefix=dict(type='str'), mksysb_postfix=dict(type='str'), backup_name=dict(type='str'), backup_prefix=dict(type='str'), backup_postfix=dict(type='str'), spot_name=dict(type='str'), spot_prefix=dict(type='str'), spot_postfix=dict(type='str'), lpp_source=dict(type='str'), bosinst_data=dict(type='str'), resolv_conf=dict(type='str'), image_data=dict(type='str'), log=dict(type='str'), file_resource=dict(type='str'), group=dict(type='str'), disk=dict(type='str'), cluster=dict(type='str'), current_database=dict(type='str'), command_flags=dict(type='str'), viosbr_flags=dict(type='str'), mk_image=dict(type='bool', default=False), boot_client=dict(type='bool', default=False), set_boot_list=dict(type='bool', default=False), concurrent=dict(type='bool', default=False), manage_cluster=dict(type='bool', default=True), debug=dict(type='bool', default=False), ), ) results = dict( changed=False, msg='', targets=[], stdout='', stderr='', meta={'messages': []}, # meta structure will be updated as follow: # meta={ # 'messages': [], # target_key:{ # 'messages': [], # vios:{ # 'stdout': '', # 'stderr': '', # } # } # } nim_node={}, status={}, ) module.run_command_environ_update = dict(LANG='C', LC_ALL='C', LC_MESSAGES='C', LC_CTYPE='C') # build a time structure for time_limit attribute, time_limit = None if module.params['time_limit']: match_key = re.match(r"^\s*\d{2}/\d{2}/\d{4} \S*\d{2}:\d{2}\s*$", module.params['time_limit']) if match_key: time_limit = time.strptime(module.params['time_limit'], '%m/%d/%Y %H:%M') module.time_limit = time_limit else: results['msg'] = 'Malformed time limit "{0}", please use mm/dd/yyyy hh:mm format.'.format(module.params['time_limit']) module.fail_json(**results) module.debug('*** START NIM UPGRADE VIOS OPERATION ***') results['meta']['messages'].append('Upgradeios operation for {0}'.format(module.params['targets'])) module.log('Upgradeios operation {0} for targets: {1}'.format(module.params['action'], module.params['targets'])) # build nim_node refresh_nim_node(module, 'vios') # check targets are valid NIM clients results['targets'] = check_vios_targets(module, module.params['targets']) if not results['targets']: module.log('Warning: Empty target list, targets: \'{0}\''.format(module.params['targets'])) results['msg'] = 'Empty target list, please check their NIM states and they are reacheable.' module.exit_json(**results) module.debug('Target list: {0}'.format(results['targets'])) # initialize the results dictionary for target tuple keys for target in results['targets']: vios_key = tuple_str(target) results['status'][vios_key] = '' results['meta'][vios_key] = {'messages': []} for vios in target: results['meta'][vios_key][vios] = {} # set default postfix if not module.params['mksysb_name'] and module.params['mksysb_postfix'] is None: module.params['mksysb_postfix'] = '_sysb' if not module.params['backup_name'] and module.params['backup_postfix'] is None: module.params['backup_postfix'] = '_iosb' # perfom the operation if module.params['action'] == 'migrate': nim_migvios_all(module, results['targets'], time_limit) # set status and exit if not results['status']: module.log('NIM upgradeios operation: status table is empty') results['meta']['messages'].append('Warning: status table is empty, returning initial vios_status.') results['status'] = module.params['vios_status'] results['msg'] = "NIM upgradeios operation completed. See meta data for details." module.log(results['msg']) else: target_errored = [key for key, val in results['status'].items() if 'FAILURE' in val] if len(target_errored): results['msg'] = "NIM upgradeios operation failed for {0}. See status and meta for details.".format(target_errored) module.log(results['msg']) module.fail_json(**results) else: results['msg'] = "NIM upgradeios operation completed. See status and meta for details." module.log(results['msg']) module.exit_json(**results)
class OpenStackModule: """Openstack Module is a base class for all Openstack Module classes. The class has `run` function that should be overriden in child classes, the provided methods include: Methods: params: Dictionary of Ansible module parameters. module_name: Module name (i.e. server_action) sdk_version: Version of used OpenstackSDK. results: Dictionary for return of Ansible module, must include `changed` keyword. exit, exit_json: Exit module and return data inside, must include changed` keyword in a data. fail, fail_json: Exit module with failure, has `msg` keyword to specify a reason of failure. conn: Connection to SDK object. log: Print message to system log. debug: Print debug message to system log, prints if Ansible Debug is enabled or verbosity is more than 2. check_deprecated_names: Function that checks if module was called with a deprecated name and prints the correct name with deprecation warning. check_versioned: helper function to check that all arguments are known in the current SDK version. run: method that executes and shall be overriden in inherited classes. Args: deprecated_names: Should specify deprecated modules names for current module. argument_spec: Used for construction of Openstack common arguments. module_kwargs: Additional arguments for Ansible Module. """ deprecated_names = () argument_spec = {} module_kwargs = {} module_min_sdk_version = None def __init__(self): """Initialize Openstack base class. Set up variables, connection to SDK and check if there are deprecated names. """ self.ansible = AnsibleModule( openstack_full_argument_spec(**self.argument_spec), **self.module_kwargs) self.params = self.ansible.params self.module_name = self.ansible._name self.sdk_version = None self.results = {'changed': False} self.exit = self.exit_json = self.ansible.exit_json self.fail = self.fail_json = self.ansible.fail_json self.sdk, self.conn = self.openstack_cloud_from_module() self.check_deprecated_names() def log(self, msg): """Prints log message to system log. Arguments: msg {str} -- Log message """ self.ansible.log(msg) def debug(self, msg): """Prints debug message to system log Arguments: msg {str} -- Debug message. """ if self.ansible._debug or self.ansible._verbosity > 2: self.ansible.log(" ".join(['[DEBUG]', msg])) def check_deprecated_names(self): """Check deprecated module names if `deprecated_names` variable is set. """ new_module_name = OVERRIDES.get(self.module_name) if self.module_name in self.deprecated_names and new_module_name: self.ansible.deprecate( "The '%s' module has been renamed to '%s' in openstack " "collection: openstack.cloud.%s" % (self.module_name, new_module_name, new_module_name), version='2.10') def openstack_cloud_from_module(self): """Sets up connection to cloud using provided options. Checks if all provided variables are supported for the used SDK version. """ try: # Due to the name shadowing we should import other way sdk = importlib.import_module('openstack') sdk_version_lib = importlib.import_module('openstack.version') self.sdk_version = sdk_version_lib.__version__ except ImportError: self.fail_json(msg='openstacksdk is required for this module') # Fail if the available SDK version doesn't meet the minimum version # requirements if self.module_min_sdk_version: min_version = max(StrictVersion(MINIMUM_SDK_VERSION), StrictVersion(self.module_min_sdk_version)) else: min_version = StrictVersion(MINIMUM_SDK_VERSION) if StrictVersion(self.sdk_version) < min_version: self.fail( msg="To utilize this module, the installed version of " "the openstacksdk library MUST be >={min_version}.".format( min_version=min_version)) # Fail if there are set unsupported for this version parameters # New parameters should NOT use 'default' but rely on SDK defaults for param in self.argument_spec: if (self.params[param] is not None and 'min_ver' in self.argument_spec[param] and StrictVersion(self.sdk_version) < self.argument_spec[param]['min_ver']): self.fail_json( msg= "To use parameter '{param}' with module '{module}', the installed version of " "the openstacksdk library MUST be >={min_version}.".format( min_version=self.argument_spec[param]['min_ver'], param=param, module=self.module_name)) if (self.params[param] is not None and 'max_ver' in self.argument_spec[param] and StrictVersion(self.sdk_version) > self.argument_spec[param]['max_ver']): self.fail_json( msg= "To use parameter '{param}' with module '{module}', the installed version of " "the openstacksdk library MUST be <={max_version}.".format( max_version=self.argument_spec[param]['max_ver'], param=param, module=self.module_name)) cloud_config = self.params.pop('cloud', None) if isinstance(cloud_config, dict): fail_message = ( "A cloud config dict was provided to the cloud parameter" " but also a value was provided for {param}. If a cloud" " config dict is provided, {param} should be" " excluded.") for param in ('auth', 'region_name', 'validate_certs', 'ca_cert', 'client_key', 'api_timeout', 'auth_type'): if self.params[param] is not None: self.fail_json(msg=fail_message.format(param=param)) # For 'interface' parameter, fail if we receive a non-default value if self.params['interface'] != 'public': self.fail_json(msg=fail_message.format(param='interface')) else: cloud_config = dict( cloud=cloud_config, auth_type=self.params['auth_type'], auth=self.params['auth'], region_name=self.params['region_name'], verify=self.params['validate_certs'], cacert=self.params['ca_cert'], key=self.params['client_key'], api_timeout=self.params['api_timeout'], interface=self.params['interface'], ) try: return sdk, sdk.connect(**cloud_config) except sdk.exceptions.SDKException as e: # Probably a cloud configuration/login error self.fail_json(msg=str(e)) # Filter out all arguments that are not from current SDK version def check_versioned(self, **kwargs): """Check that provided arguments are supported by current SDK version Returns: versioned_result {dict} dictionary of only arguments that are supported by current SDK version. All others are dropped. """ versioned_result = {} for var_name in kwargs: if ('min_ver' in self.argument_spec[var_name] and StrictVersion(self.sdk_version) < self.argument_spec[var_name]['min_ver']): continue if ('max_ver' in self.argument_spec[var_name] and StrictVersion(self.sdk_version) > self.argument_spec[var_name]['max_ver']): continue versioned_result.update({var_name: kwargs[var_name]}) return versioned_result @abc.abstractmethod def run(self): """Function for overriding in inhetired classes, it's executed by default. """ pass def __call__(self): """Execute `run` function when calling the class. """ try: results = self.run() if results and isinstance(results, dict): self.ansible.exit_json(**results) except self.sdk.exceptions.OpenStackCloudException as e: params = { 'msg': str(e), 'extra_data': { 'data': getattr(e, 'extra_data', 'None'), 'details': getattr(e, 'details', 'None'), 'response': getattr(getattr(e, 'response', ''), 'text', 'None') } } self.ansible.fail_json(**params) # if we got to this place, modules didn't exit self.ansible.exit_json(**self.results)
def run_module(): # define available arguments/parameters a user can pass to the module module_args = dict( nodes=dict(type='json', required=True), amount=dict(type='int', required=True), duration=dict(type='int', required=True), ) global module module = AnsibleModule(argument_spec=module_args, supports_check_mode=True) rc = 0 stderr = "err" stderr_lines = ["errl1", "errl2"] stdout = "out" stdout_lines = ["outl1", "outl1"] nodes = module.params['nodes'] amount = module.params['amount'] duration = module.params['duration'] result = dict( changed=True, stdout=stdout, stdout_lines=stdout_lines, stderr=stderr, stderr_lines=stderr_lines, rc=rc, ) # Load the Kubernetes configuration load_kubernetes_config() configuration = client.Configuration() configuration.assert_hostname = False client.api_client.ApiClient(configuration=configuration) nodes_list = json.loads(nodes) module.log(msg='Nodes list size:' + str(len(nodes_list))) # We get the final list of nodes to drain all_workers = get_worker_nodes() if len(nodes_list) == 0: # This means that there are no specific nodes, get get random nodes module.log(msg='Nodes list is empty, we get random nodes') if amount >= len(all_workers): amount = len(all_workers) nodes_list = sample(all_workers, amount) else: aux_nodes = [] for node in nodes_list: if node in all_workers: aux_nodes.append(node) nodes_list = aux_nodes module.log(msg='Nodes to drain: ' + str(nodes_list)) # We cordon the nodes and drain them for node_name in nodes_list: module.log(msg='Node to cordon:' + node_name) if cordon_node(node_name): module.log(msg='Cordon OK - Node to drain:' + node_name) drain_node(node_name) else: module.log(msg='Cordon NOT OK') # We wait until the duration pass module.log(msg='Waiting for: ' + str(duration)) time.sleep(duration) # We restore the nodes for node_name in nodes_list: module.log(msg='Node to uncordon:' + node_name) if uncordon_node(node_name): module.log(msg='Uncordon OK') else: module.log(msg='Uncordon NOT OK') if module.check_mode: return result module.exit_json(**result)
def main(): global module global results global suma_params module = AnsibleModule( argument_spec=dict( action=dict(required=False, choices=['download', 'preview', 'list', 'edit', 'run', 'unschedule', 'delete', 'config', 'default'], type='str', default='preview'), oslevel=dict(required=False, type='str', default='Latest'), last_sp=dict(required=False, type='bool', default=False), extend_fs=dict(required=False, type='bool', default=True), download_dir=dict(required=False, type='path', default='/usr/sys/inst.images'), download_only=dict(required=False, type='bool', default=False), save_task=dict(required=False, type='bool', default=False), task_id=dict(required=False, type='str'), sched_time=dict(required=False, type='str'), description=dict(required=False, type='str'), metadata_dir=dict(required=False, type='path', default='/var/adm/ansible/metadata'), ), required_if=[ ['action', 'edit', ['task_id']], ['action', 'delete', ['task_id']], ['action', 'run', ['task_id']], ['action', 'download', ['oslevel']], ['action', 'preview', ['oslevel']], ['action', 'unschedule', ['task_id']], ], supports_check_mode=True ) results = dict( changed=False, msg='', stdout='', stderr='', meta={'messages': []}, ) module.debug('*** START ***') module.run_command_environ_update = dict(LANG='C', LC_ALL='C', LC_MESSAGES='C', LC_CTYPE='C') action = module.params['action'] # switch action if action == 'list': suma_params['task_id'] = module.params['task_id'] suma_list() elif action == 'edit': suma_params['task_id'] = module.params['task_id'] suma_params['sched_time'] = module.params['sched_time'] suma_edit() elif action == 'unschedule': suma_params['task_id'] = module.params['task_id'] suma_unschedule() elif action == 'delete': suma_params['task_id'] = module.params['task_id'] suma_delete() elif action == 'run': suma_params['task_id'] = module.params['task_id'] suma_run() elif action == 'config': suma_config() elif action == 'default': suma_default() elif action == 'download' or action == 'preview': suma_params['oslevel'] = module.params['oslevel'] suma_params['download_dir'] = module.params['download_dir'] suma_params['metadata_dir'] = module.params['metadata_dir'] suma_params['download_only'] = module.params['download_only'] suma_params['save_task'] = module.params['save_task'] suma_params['last_sp'] = module.params['last_sp'] suma_params['extend_fs'] = module.params['extend_fs'] if module.params['description']: suma_params['description'] = module.params['description'] else: suma_params['description'] = "{0} request for oslevel {1}".format(action, module.params['oslevel']) suma_params['action'] = action suma_download() # Exit msg = 'Suma {0} completed successfully'.format(action) module.log(msg) results['msg'] = msg module.exit_json(**results)
def run_module(): # define available arguments/parameters a user can pass to the module module_args = dict( namespace=dict(type='str', required=True), distribution=dict(type='str', required=True), amount=dict(type='int', required=True), ) module = AnsibleModule(argument_spec=module_args, supports_check_mode=True) out = "" err = "" rc = 0 module.log(msg='test!!!!!!!!!!!!!!!!!') namespace = module.params['namespace'] amount = module.params['amount'] result = dict( changed=True, stdout=out, stderr=err, rc=rc, ) result['fact'] = random.choice(FACTS).format( name=module.params['namespace']) # random numbers from poisson distribution n = amount a = 0 data_poisson = poisson.rvs(mu=10, size=n, loc=a) counts, bins, bars = plt.hist(data_poisson) plt.close() load_kubernetes_config() configuration = client.Configuration() configuration.assert_hostname = False client.api_client.ApiClient(configuration=configuration) for experiment in counts: pod_list = get_pods(namespace=namespace) aux_li = [] for fil in pod_list.items: if fil.status.phase == "Running": aux_li.append(fil) pod_list = aux_li # From the Running pods I randomly choose those to die # based on the histogram length print("-------") print("Pod list length: " + str(len(pod_list))) print("Number of pods to get: " + str(int(experiment))) print("-------") # In the case of the experiment being longer than the pod list, # then the maximum will be the lenght of the pod list if (int(experiment) > len(pod_list)): to_be_killed = random.sample(pod_list, len(pod_list)) else: to_be_killed = random.sample(pod_list, int(experiment)) for pod in to_be_killed: delete_pod(pod.metadata.name, pod.metadata.namespace) print("To be killed: " + str(experiment)) global_kill.append((datetime.datetime.now(), int(experiment))) print(datetime.datetime.now()) print("Ending histogram execution") if module.check_mode: return result module.exit_json(**result)
class OneViewModuleBase(object): MSG_CREATED = 'Resource created successfully.' MSG_UPDATED = 'Resource updated successfully.' MSG_DELETED = 'Resource deleted successfully.' MSG_ALREADY_PRESENT = 'Resource is already present.' MSG_ALREADY_ABSENT = 'Resource is already absent.' MSG_DIFF_AT_KEY = 'Difference found at key \'{0}\'. ' HPE_ONEVIEW_SDK_REQUIRED = 'HPE OneView Python SDK is required for this module.' ONEVIEW_COMMON_ARGS = dict( config=dict(type='path'), hostname=dict(type='str'), username=dict(type='str'), password=dict(type='str'), api_version=dict(type='int'), image_streamer_hostname=dict(type='str') ) ONEVIEW_VALIDATE_ETAG_ARGS = dict(validate_etag=dict(type='bool', default=True)) resource_client = None def __init__(self, additional_arg_spec=None, validate_etag_support=False): """ OneViewModuleBase constructor. :arg dict additional_arg_spec: Additional argument spec definition. :arg bool validate_etag_support: Enables support to eTag validation. """ argument_spec = self._build_argument_spec(additional_arg_spec, validate_etag_support) self.module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=False) self._check_hpe_oneview_sdk() self._create_oneview_client() self.state = self.module.params.get('state') self.data = self.module.params.get('data') # Preload params for get_all - used by facts self.facts_params = self.module.params.get('params') or {} # Preload options as dict - used by facts self.options = transform_list_to_dict(self.module.params.get('options')) self.validate_etag_support = validate_etag_support def _build_argument_spec(self, additional_arg_spec, validate_etag_support): merged_arg_spec = dict() merged_arg_spec.update(self.ONEVIEW_COMMON_ARGS) if validate_etag_support: merged_arg_spec.update(self.ONEVIEW_VALIDATE_ETAG_ARGS) if additional_arg_spec: merged_arg_spec.update(additional_arg_spec) return merged_arg_spec def _check_hpe_oneview_sdk(self): if not HAS_HPE_ONEVIEW: self.module.fail_json(msg=self.HPE_ONEVIEW_SDK_REQUIRED) def _create_oneview_client(self): if self.module.params.get('hostname'): config = dict(ip=self.module.params['hostname'], credentials=dict(userName=self.module.params['username'], password=self.module.params['password']), api_version=self.module.params['api_version'], image_streamer_ip=self.module.params['image_streamer_hostname']) self.oneview_client = OneViewClient(config) elif not self.module.params['config']: self.oneview_client = OneViewClient.from_environment_variables() else: self.oneview_client = OneViewClient.from_json_file(self.module.params['config']) @abc.abstractmethod def execute_module(self): """ Abstract method, must be implemented by the inheritor. This method is called from the run method. It should contains the module logic :return: dict: It must return a dictionary with the attributes for the module result, such as ansible_facts, msg and changed. """ pass def run(self): """ Common implementation of the OneView run modules. It calls the inheritor 'execute_module' function and sends the return to the Ansible. It handles any OneViewModuleException in order to signal a failure to Ansible, with a descriptive error message. """ try: if self.validate_etag_support: if not self.module.params.get('validate_etag'): self.oneview_client.connection.disable_etag_validation() result = self.execute_module() if "changed" not in result: result['changed'] = False self.module.exit_json(**result) except OneViewModuleException as exception: error_msg = '; '.join(to_native(e) for e in exception.args) self.module.fail_json(msg=error_msg, exception=traceback.format_exc()) def resource_absent(self, resource, method='delete'): """ Generic implementation of the absent state for the OneView resources. It checks if the resource needs to be removed. :arg dict resource: Resource to delete. :arg str method: Function of the OneView client that will be called for resource deletion. Usually delete or remove. :return: A dictionary with the expected arguments for the AnsibleModule.exit_json """ if resource: getattr(self.resource_client, method)(resource) return {"changed": True, "msg": self.MSG_DELETED} else: return {"changed": False, "msg": self.MSG_ALREADY_ABSENT} def get_by_name(self, name): """ Generic get by name implementation. :arg str name: Resource name to search for. :return: The resource found or None. """ result = self.resource_client.get_by('name', name) return result[0] if result else None def resource_present(self, resource, fact_name, create_method='create'): """ Generic implementation of the present state for the OneView resources. It checks if the resource needs to be created or updated. :arg dict resource: Resource to create or update. :arg str fact_name: Name of the fact returned to the Ansible. :arg str create_method: Function of the OneView client that will be called for resource creation. Usually create or add. :return: A dictionary with the expected arguments for the AnsibleModule.exit_json """ changed = False if "newName" in self.data: self.data["name"] = self.data.pop("newName") if not resource: resource = getattr(self.resource_client, create_method)(self.data) msg = self.MSG_CREATED changed = True else: merged_data = resource.copy() merged_data.update(self.data) if self.compare(resource, merged_data): msg = self.MSG_ALREADY_PRESENT else: resource = self.resource_client.update(merged_data) changed = True msg = self.MSG_UPDATED return dict( msg=msg, changed=changed, ansible_facts={fact_name: resource} ) def resource_scopes_set(self, state, fact_name, scope_uris): """ Generic implementation of the scopes update PATCH for the OneView resources. It checks if the resource needs to be updated with the current scopes. This method is meant to be run after ensuring the present state. :arg dict state: Dict containing the data from the last state results in the resource. It needs to have the 'msg', 'changed', and 'ansible_facts' entries. :arg str fact_name: Name of the fact returned to the Ansible. :arg list scope_uris: List with all the scope URIs to be added to the resource. :return: A dictionary with the expected arguments for the AnsibleModule.exit_json """ if scope_uris is None: scope_uris = [] resource = state['ansible_facts'][fact_name] operation_data = dict(operation='replace', path='/scopeUris', value=scope_uris) if resource['scopeUris'] is None or set(resource['scopeUris']) != set(scope_uris): state['ansible_facts'][fact_name] = self.resource_client.patch(resource['uri'], **operation_data) state['changed'] = True state['msg'] = self.MSG_UPDATED return state def compare(self, first_resource, second_resource): """ Recursively compares dictionary contents equivalence, ignoring types and elements order. Particularities of the comparison: - Inexistent key = None - These values are considered equal: None, empty, False - Lists are compared value by value after a sort, if they have same size. - Each element is converted to str before the comparison. :arg dict first_resource: first dictionary :arg dict second_resource: second dictionary :return: bool: True when equal, False when different. """ resource1 = first_resource resource2 = second_resource debug_resources = "resource1 = {0}, resource2 = {1}".format(resource1, resource2) # The first resource is True / Not Null and the second resource is False / Null if resource1 and not resource2: self.module.log("resource1 and not resource2. " + debug_resources) return False # Checks all keys in first dict against the second dict for key in resource1: if key not in resource2: if resource1[key] is not None: # Inexistent key is equivalent to exist with value None self.module.log(self.MSG_DIFF_AT_KEY.format(key) + debug_resources) return False # If both values are null, empty or False it will be considered equal. elif not resource1[key] and not resource2[key]: continue elif isinstance(resource1[key], collections.Mapping): # recursive call if not self.compare(resource1[key], resource2[key]): self.module.log(self.MSG_DIFF_AT_KEY.format(key) + debug_resources) return False elif isinstance(resource1[key], list): # change comparison function to compare_list if not self.compare_list(resource1[key], resource2[key]): self.module.log(self.MSG_DIFF_AT_KEY.format(key) + debug_resources) return False elif _standardize_value(resource1[key]) != _standardize_value(resource2[key]): self.module.log(self.MSG_DIFF_AT_KEY.format(key) + debug_resources) return False # Checks all keys in the second dict, looking for missing elements for key in resource2.keys(): if key not in resource1: if resource2[key] is not None: # Inexistent key is equivalent to exist with value None self.module.log(self.MSG_DIFF_AT_KEY.format(key) + debug_resources) return False return True def compare_list(self, first_resource, second_resource): """ Recursively compares lists contents equivalence, ignoring types and element orders. Lists with same size are compared value by value after a sort, each element is converted to str before the comparison. :arg list first_resource: first list :arg list second_resource: second list :return: True when equal; False when different. """ resource1 = first_resource resource2 = second_resource debug_resources = "resource1 = {0}, resource2 = {1}".format(resource1, resource2) # The second list is null / empty / False if not resource2: self.module.log("resource 2 is null. " + debug_resources) return False if len(resource1) != len(resource2): self.module.log("resources have different length. " + debug_resources) return False resource1 = sorted(resource1, key=_str_sorted) resource2 = sorted(resource2, key=_str_sorted) for i, val in enumerate(resource1): if isinstance(val, collections.Mapping): # change comparison function to compare dictionaries if not self.compare(val, resource2[i]): self.module.log("resources are different. " + debug_resources) return False elif isinstance(val, list): # recursive call if not self.compare_list(val, resource2[i]): self.module.log("lists are different. " + debug_resources) return False elif _standardize_value(val) != _standardize_value(resource2[i]): self.module.log("values are different. " + debug_resources) return False # no differences found return True
def main(): module = AnsibleModule( argument_spec=dict( servers=dict(required=True, type='list'), domain=dict(required=True), ntp=dict(required=False, type='bool', default='no'), force_ntpd=dict(required=False, type='bool', default='no'), ntp_servers=dict(required=False, type='list'), ssh=dict(required=False, type='bool', default='yes'), sssd=dict(required=False, type='bool', default='yes'), trust_sshfp=dict(required=False, type='bool', default='yes'), sshd=dict(required=False, type='bool', default='yes'), automount_location=dict(required=False), firefox=dict(required=False, type='bool', default='no'), firefox_dir=dict(required=False), no_nisdomain=dict(required=False, type='bool', default='no'), nisdomain=dict(required=False), on_master=dict(required=False, type='bool', default='no'), ), supports_check_mode=True, ) module._ansible_debug = True servers = module.params.get('servers') domain = module.params.get('domain') ntp = module.params.get('ntp') force_ntpd = module.params.get('force_ntpd') ntp_servers = module.params.get('ntp_servers') ssh = module.params.get('ssh') sssd = module.params.get('sssd') trust_sshfp = module.params.get('trust_sshfp') sshd = module.params.get('sshd') automount_location = module.params.get('automount_location') firefox = module.params.get('firefox') firefox_dir = module.params.get('firefox_dir') no_nisdomain = module.params.get('no_nisdomain') nisdomain = module.params.get('nisdomain') on_master = module.params.get('on_master') fstore = sysrestore.FileStore(paths.IPA_CLIENT_SYSRESTORE) statestore = sysrestore.StateFile(paths.IPA_CLIENT_SYSRESTORE) logger = logging.getLogger("ipa-client-install") os.environ['KRB5CCNAME'] = CCACHE_FILE class Object(object): pass options = Object() options.sssd = sssd options.trust_sshfp = trust_sshfp options.location = automount_location options.server = servers options.firefox_dir = firefox_dir options.nisdomain = nisdomain if ntp and not on_master: # disable other time&date services first if force_ntpd: ntpconf.force_ntpd(statestore) ntpconf.config_ntp(ntp_servers, fstore, statestore) module.log("NTP enabled") if ssh: configure_ssh_config(fstore, options) if sshd: configure_sshd_config(fstore, options) if automount_location: configure_automount(options) if firefox: configure_firefox(options, statestore, domain) if not no_nisdomain: configure_nisdomain(options=options, domain=domain, statestore=statestore) # Cleanup: Remove CCACHE_FILE try: os.remove(CCACHE_FILE) except Exception: pass module.exit_json(changed=True)
def main(): module = AnsibleModule( argument_spec = dict( servers=dict(required=True, type='list'), domain=dict(required=True), realm=dict(required=True), hostname=dict(required=True), basedn=dict(required=True), principal=dict(required=False), subject_base=dict(required=True), ca_enabled=dict(required=True, type='bool'), mkhomedir=dict(required=False, type='bool'), on_master=dict(required=False, type='bool'), ), supports_check_mode = True, ) module._ansible_debug = True servers = module.params.get('servers') realm = module.params.get('realm') hostname = module.params.get('hostname') basedn = module.params.get('basedn') domain = module.params.get('domain') principal = module.params.get('principal') subject_base = module.params.get('subject_base') ca_enabled = module.params.get('ca_enabled') mkhomedir = module.params.get('mkhomedir') on_master = module.params.get('on_master') fstore = sysrestore.FileStore(paths.IPA_CLIENT_SYSRESTORE) statestore = sysrestore.StateFile(paths.IPA_CLIENT_SYSRESTORE) ########################################################################### os.environ['KRB5CCNAME'] = CCACHE_FILE class Object(object): pass options = Object() options.dns_updates = False options.all_ip_addresses = False options.ip_addresses = None options.request_cert = False options.hostname = hostname options.preserve_sssd = False options.on_master = False options.conf_ssh = True options.conf_sshd = True options.conf_sudo = True options.primary = False options.permit = False options.krb5_offline_passwords = False options.create_sshfp = True ########################################################################## # Create IPA NSS database try: create_ipa_nssdb() except ipautil.CalledProcessError as e: module.fail_json(msg="Failed to create IPA NSS database: %s" % e) # Get CA certificates from the certificate store try: ca_certs = get_certs_from_ldap(servers[0], basedn, realm, ca_enabled) except errors.NoCertificateError: if ca_enabled: ca_subject = DN(('CN', 'Certificate Authority'), subject_base) else: ca_subject = None ca_certs = certstore.make_compat_ca_certs(ca_certs, realm, ca_subject) ca_certs_trust = [(c, n, certstore.key_policy_to_trust_flags(t, True, u)) for (c, n, t, u) in ca_certs] if hasattr(paths, "KDC_CA_BUNDLE_PEM"): x509.write_certificate_list( [c for c, n, t, u in ca_certs if t is not False], paths.KDC_CA_BUNDLE_PEM) if hasattr(paths, "CA_BUNDLE_PEM"): x509.write_certificate_list( [c for c, n, t, u in ca_certs if t is not False], paths.CA_BUNDLE_PEM) # Add the CA certificates to the IPA NSS database module.debug("Adding CA certificates to the IPA NSS database.") ipa_db = certdb.NSSDatabase(paths.IPA_NSSDB_DIR) for cert, nickname, trust_flags in ca_certs_trust: try: ipa_db.add_cert(cert, nickname, trust_flags) except CalledProcessError as e: module.fail_json(msg="Failed to add %s to the IPA NSS database." % nickname) # Add the CA certificates to the platform-dependant systemwide CA store tasks.insert_ca_certs_into_systemwide_ca_store(ca_certs) if not on_master: client_dns(servers[0], hostname, options) configure_certmonger(fstore, subject_base, realm, hostname, options, ca_enabled) if hasattr(paths, "SSH_CONFIG_DIR"): ssh_config_dir = paths.SSH_CONFIG_DIR else: ssh_config_dir = services.knownservices.sshd.get_config_dir() update_ssh_keys(hostname, ssh_config_dir, options.create_sshfp) try: os.remove(CCACHE_FILE) except Exception: pass ########################################################################## # Name Server Caching Daemon. Disable for SSSD, use otherwise # (if installed) nscd = services.knownservices.nscd if nscd.is_installed(): save_state(nscd, statestore) try: nscd_service_action = 'stop' nscd.stop() except Exception: module.warn("Failed to %s the %s daemon" % (nscd_service_action, nscd.service_name)) try: nscd.disable() except Exception: module.warn("Failed to disable %s daemon. Disable it manually." % nscd.service_name) nslcd = services.knownservices.nslcd if nslcd.is_installed(): save_state(nslcd, statestore) retcode, conf = (0, None) ########################################################################## # Modify nsswitch/pam stack tasks.modify_nsswitch_pam_stack(sssd=True, mkhomedir=mkhomedir, statestore=statestore) module.log("SSSD enabled") argspec = inspect.getargspec(services.service) if len(argspec.args) > 1: sssd = services.service('sssd', api) else: sssd = services.service('sssd') try: sssd.restart() except CalledProcessError: module.warn("SSSD service restart was unsuccessful.") try: sssd.enable() except CalledProcessError as e: module.warn( "Failed to enable automatic startup of the SSSD daemon: " "%s", e) if configure_openldap_conf(fstore, basedn, servers): module.log("Configured /etc/openldap/ldap.conf") else: module.log("Failed to configure /etc/openldap/ldap.conf") # Check that nss is working properly if not on_master: user = principal if user is None or user == "": user = "******" % domain module.log("Principal is not set when enrolling with OTP" "; using principal '%s' for 'getent passwd'" % user) elif '@' not in user: user = "******" % (user, domain) n = 0 found = False # Loop for up to 10 seconds to see if nss is working properly. # It can sometimes take a few seconds to connect to the remote # provider. # Particulary, SSSD might take longer than 6-8 seconds. while n < 10 and not found: try: ipautil.run(["getent", "passwd", user]) found = True except Exception as e: time.sleep(1) n = n + 1 if not found: module.fail_json(msg="Unable to find '%s' user with 'getent " "passwd %s'!" % (user.split("@")[0], user)) if conf: module.log("Recognized configuration: %s" % conf) else: module.fail_json(msg= "Unable to reliably detect " "configuration. Check NSS setup manually.") try: hardcode_ldap_server(servers) except Exception as e: module.fail_json(msg="Adding hardcoded server name to " "/etc/ldap.conf failed: %s" % str(e)) ########################################################################## module.exit_json(changed=True, ca_enabled_ra=ca_enabled)
def main(): module = AnsibleModule( argument_spec=dict( host=dict(type='str', default='127.0.0.1'), port=dict(type='int', default=8443), definition=dict(aliases=['def', 'inline'], type='dict'), kind=dict(type='str'), name=dict(type='str'), namespace=dict(type='str'), token=dict(required=True, type='str', no_log=True), state=dict(required=True, choices=['present', 'absent']), validate_certs=dict(type='bool', default='yes') ), mutually_exclusive=(['kind', 'definition'], ['name', 'definition'], ['namespace', 'definition']), required_if=([['state', 'absent', ['kind']]]), required_one_of=([['kind', 'definition']]), no_log=False, supports_check_mode=True ) kind = None definition = None name = None namespace = None host = module.params['host'] port = module.params['port'] definition = module.params['definition'] state = module.params['state'] kind = module.params['kind'] name = module.params['name'] namespace = module.params['namespace'] token = module.params['token'] if definition is None: definition = {} definition['metadata'] = {} definition['metadata']['name'] = name definition['metadata']['namespace'] = namespace if "apiVersion" not in definition.keys(): definition['apiVersion'] = 'v1' if "kind" not in definition.keys(): definition['kind'] = kind result = None oc = OC(module, token, host, port) resource = NamedResource(module, definition, oc.get_resource_endpoint(definition['kind'])) changed = False method = '' exists = oc.exists(resource) module.log(msg="URL %s" % resource.url()) if state == 'present' and exists: method = 'put' result, changed = oc.replace(resource, module.check_mode) elif state == 'present' and not exists and definition is not None: method = 'create' if not module.check_mode: result, changed = oc.create(resource) else: changed = True result = definition elif state == 'absent' and exists: method = 'delete' if not module.check_mode: result, changed = oc.delete(resource) else: changed = True result = definition facts = {} if result is not None and "items" in result: result['item_list'] = result.pop('items') elif result is None and state == 'present': result = 'Resource not present and no inline provided.' facts['oc'] = {'definition': result, 'url': resource.url(), 'method': method} module.exit_json(changed=changed, ansible_facts=facts)
class OneViewModuleBase(object): MSG_CREATED = 'Resource created successfully.' MSG_UPDATED = 'Resource updated successfully.' MSG_DELETED = 'Resource deleted successfully.' MSG_ALREADY_PRESENT = 'Resource is already present.' MSG_ALREADY_ABSENT = 'Resource is already absent.' MSG_DIFF_AT_KEY = 'Difference found at key \'{0}\'. ' HPE_ONEVIEW_SDK_REQUIRED = 'HPE OneView Python SDK is required for this module.' ONEVIEW_COMMON_ARGS = dict( config=dict(required=False, type='str') ) ONEVIEW_VALIDATE_ETAG_ARGS = dict( validate_etag=dict( required=False, type='bool', default=True) ) resource_client = None def __init__(self, additional_arg_spec=None, validate_etag_support=False): """ OneViewModuleBase constructor. :arg dict additional_arg_spec: Additional argument spec definition. :arg bool validate_etag_support: Enables support to eTag validation. """ argument_spec = self._build_argument_spec(additional_arg_spec, validate_etag_support) self.module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=False) self._check_hpe_oneview_sdk() self._create_oneview_client() self.state = self.module.params.get('state') self.data = self.module.params.get('data') # Preload params for get_all - used by facts self.facts_params = self.module.params.get('params') or {} # Preload options as dict - used by facts self.options = transform_list_to_dict(self.module.params.get('options')) self.validate_etag_support = validate_etag_support def _build_argument_spec(self, additional_arg_spec, validate_etag_support): merged_arg_spec = dict() merged_arg_spec.update(self.ONEVIEW_COMMON_ARGS) if validate_etag_support: merged_arg_spec.update(self.ONEVIEW_VALIDATE_ETAG_ARGS) if additional_arg_spec: merged_arg_spec.update(additional_arg_spec) return merged_arg_spec def _check_hpe_oneview_sdk(self): if not HAS_HPE_ONEVIEW: self.module.fail_json(msg=self.HPE_ONEVIEW_SDK_REQUIRED) def _create_oneview_client(self): if not self.module.params['config']: self.oneview_client = OneViewClient.from_environment_variables() else: self.oneview_client = OneViewClient.from_json_file(self.module.params['config']) @abc.abstractmethod def execute_module(self): """ Abstract method, must be implemented by the inheritor. This method is called from the run method. It should contains the module logic :return: dict: It must return a dictionary with the attributes for the module result, such as ansible_facts, msg and changed. """ pass def run(self): """ Common implementation of the OneView run modules. It calls the inheritor 'execute_module' function and sends the return to the Ansible. It handles any HPOneViewException in order to signal a failure to Ansible, with a descriptive error message. """ try: if self.validate_etag_support: if not self.module.params.get('validate_etag'): self.oneview_client.connection.disable_etag_validation() result = self.execute_module() if "changed" not in result: result['changed'] = False self.module.exit_json(**result) except HPOneViewException as exception: error_msg = '; '.join(to_native(e) for e in exception.args) self.module.fail_json(msg=error_msg, exception=traceback.format_exc()) def resource_absent(self, resource, method='delete'): """ Generic implementation of the absent state for the OneView resources. It checks if the resource needs to be removed. :arg dict resource: Resource to delete. :arg str method: Function of the OneView client that will be called for resource deletion. Usually delete or remove. :return: A dictionary with the expected arguments for the AnsibleModule.exit_json """ if resource: getattr(self.resource_client, method)(resource) return {"changed": True, "msg": self.MSG_DELETED} else: return {"changed": False, "msg": self.MSG_ALREADY_ABSENT} def get_by_name(self, name): """ Generic get by name implementation. :arg str name: Resource name to search for. :return: The resource found or None. """ result = self.resource_client.get_by('name', name) return result[0] if result else None def resource_present(self, resource, fact_name, create_method='create'): """ Generic implementation of the present state for the OneView resources. It checks if the resource needs to be created or updated. :arg dict resource: Resource to create or update. :arg str fact_name: Name of the fact returned to the Ansible. :arg str create_method: Function of the OneView client that will be called for resource creation. Usually create or add. :return: A dictionary with the expected arguments for the AnsibleModule.exit_json """ changed = False if "newName" in self.data: self.data["name"] = self.data.pop("newName") if not resource: resource = getattr(self.resource_client, create_method)(self.data) msg = self.MSG_CREATED changed = True else: merged_data = resource.copy() merged_data.update(self.data) if self.compare(resource, merged_data): msg = self.MSG_ALREADY_PRESENT else: resource = self.resource_client.update(merged_data) changed = True msg = self.MSG_UPDATED return dict( msg=msg, changed=changed, ansible_facts={fact_name: resource} ) def resource_scopes_set(self, state, fact_name, scope_uris): """ Generic implementation of the scopes update PATCH for the OneView resources. It checks if the resource needs to be updated with the current scopes. This method is meant to be run after ensuring the present state. :arg dict state: Dict containing the data from the last state results in the resource. It needs to have the 'msg', 'changed', and 'ansible_facts' entries. :arg str fact_name: Name of the fact returned to the Ansible. :arg list scope_uris: List with all the scope URIs to be added to the resource. :return: A dictionary with the expected arguments for the AnsibleModule.exit_json """ if scope_uris is None: scope_uris = [] resource = state['ansible_facts'][fact_name] operation_data = dict(operation='replace', path='/scopeUris', value=scope_uris) if resource['scopeUris'] is None or set(resource['scopeUris']) != set(scope_uris): state['ansible_facts'][fact_name] = self.resource_client.patch(resource['uri'], **operation_data) state['changed'] = True state['msg'] = self.MSG_UPDATED return state def compare(self, first_resource, second_resource): """ Recursively compares dictionary contents equivalence, ignoring types and elements order. Particularities of the comparison: - Inexistent key = None - These values are considered equal: None, empty, False - Lists are compared value by value after a sort, if they have same size. - Each element is converted to str before the comparison. :arg dict first_resource: first dictionary :arg dict second_resource: second dictionary :return: bool: True when equal, False when different. """ resource1 = first_resource resource2 = second_resource debug_resources = "resource1 = {0}, resource2 = {1}".format(resource1, resource2) # The first resource is True / Not Null and the second resource is False / Null if resource1 and not resource2: self.module.log("resource1 and not resource2. " + debug_resources) return False # Checks all keys in first dict against the second dict for key in resource1: if key not in resource2: if resource1[key] is not None: # Inexistent key is equivalent to exist with value None self.module.log(self.MSG_DIFF_AT_KEY.format(key) + debug_resources) return False # If both values are null, empty or False it will be considered equal. elif not resource1[key] and not resource2[key]: continue elif isinstance(resource1[key], collections.Mapping): # recursive call if not self.compare(resource1[key], resource2[key]): self.module.log(self.MSG_DIFF_AT_KEY.format(key) + debug_resources) return False elif isinstance(resource1[key], list): # change comparison function to compare_list if not self.compare_list(resource1[key], resource2[key]): self.module.log(self.MSG_DIFF_AT_KEY.format(key) + debug_resources) return False elif _standardize_value(resource1[key]) != _standardize_value(resource2[key]): self.module.log(self.MSG_DIFF_AT_KEY.format(key) + debug_resources) return False # Checks all keys in the second dict, looking for missing elements for key in resource2.keys(): if key not in resource1: if resource2[key] is not None: # Inexistent key is equivalent to exist with value None self.module.log(self.MSG_DIFF_AT_KEY.format(key) + debug_resources) return False return True def compare_list(self, first_resource, second_resource): """ Recursively compares lists contents equivalence, ignoring types and element orders. Lists with same size are compared value by value after a sort, each element is converted to str before the comparison. :arg list first_resource: first list :arg list second_resource: second list :return: True when equal; False when different. """ resource1 = first_resource resource2 = second_resource debug_resources = "resource1 = {0}, resource2 = {1}".format(resource1, resource2) # The second list is null / empty / False if not resource2: self.module.log("resource 2 is null. " + debug_resources) return False if len(resource1) != len(resource2): self.module.log("resources have different length. " + debug_resources) return False resource1 = sorted(resource1, key=_str_sorted) resource2 = sorted(resource2, key=_str_sorted) for i, val in enumerate(resource1): if isinstance(val, collections.Mapping): # change comparison function to compare dictionaries if not self.compare(val, resource2[i]): self.module.log("resources are different. " + debug_resources) return False elif isinstance(val, list): # recursive call if not self.compare_list(val, resource2[i]): self.module.log("lists are different. " + debug_resources) return False elif _standardize_value(val) != _standardize_value(resource2[i]): self.module.log("values are different. " + debug_resources) return False # no differences found return True
def main(): module = AnsibleModule( argument_spec=dict( product_id=dict(type='list', elements='str', default=['*ALL']), virtual_image_name_list=dict(type='list', elements='str', default=['*ALL']), fix_omit_list=dict(type='list', elements='dict'), use_temp_path=dict(type='bool', default=True), src=dict(type='str', required=True), apply_type=dict(type='str', default='*DLYALL', choices=['*DLYALL', '*IMMDLY', '*IMMONLY']), hiper_only=dict(type='bool', default=False), rollback=dict(type='bool', default=True), ), supports_check_mode=True, ) if HAS_ITOOLKIT is False: module.fail_json(msg="itoolkit package is required") if HAS_IBM_DB is False: module.fail_json(msg="ibm_db package is required") product_id = module.params['product_id'] fix_file_name_list = module.params['virtual_image_name_list'] fix_omit_list = module.params['fix_omit_list'] delayed_option = module.params['apply_type'] path = module.params['src'] use_temp_path = module.params['use_temp_path'] hiper_only = module.params['hiper_only'] rollback = module.params['rollback'] if not os.path.exists(path): return module.fail_json( msg="The path specified in src does not exist. The value is: " + path) if not os.path.isdir(path): return module.fail_json( msg= "The value specified in src is not a valid directory. The value is " + path) startd = datetime.datetime.now() connection_id = None try: connection_id = dbi.connect() except Exception as e_db_connect: module.fail_json(msg="Exception when connecting to IBM i Db2. " + str(e_db_connect)) catalog_name = generate_object_name(connection_id, "QUSRSYS", "*IMGCLG", "ANSIBCLG") dev_name = generate_object_name(connection_id, "QSYS", "*DEVD", "ANSIBOPT") if use_temp_path: with TemporaryDirectory() as tmp_dir: module.log("Creating temp dir: " + tmp_dir) if os.path.isdir(tmp_dir): if (fix_file_name_list == ["*ALL" ]) or (fix_file_name_list is None): # move all the objects to the target folder for f in os.listdir(path): source_file = os.path.join(path, f) target_file = os.path.join(tmp_dir, f) if os.path.isfile(source_file): shutil.copy(source_file, tmp_dir) else: # move specific file to the target for fix_file_name in fix_file_name_list: source_file = os.path.join(path, fix_file_name) if os.path.exists(source_file): if os.path.isfile(source_file): shutil.copy(source_file, tmp_dir) else: return module.fail_json(msg=source_file + " is not a file.") else: return module.fail_json(msg="Image file " + source_file + " does not exist.") rc, out, err, command_log = install_by_image_catalog( module, product_id, None, tmp_dir, str(dev_name), str(catalog_name), fix_omit_list, is_rollback=rollback, delayed_option=delayed_option, hiper_only=hiper_only) else: module.fail_json(msg="Failed creating temp dir.") else: rc, out, err, command_log = install_by_image_catalog( module, product_id, fix_file_name_list, path, dev_name, catalog_name, fix_omit_list, is_rollback=rollback, delayed_option=delayed_option, hiper_only=hiper_only) endd = datetime.datetime.now() delta = endd - startd out_ptf_list, query_err = return_fix_information(connection_id, product_id, str(startd), str(endd)) if connection_id is not None: try: connection_id.close() except Exception as e_disconnect: err = "ERROR: Unable to disconnect from the database. " + str( e_disconnect) if rc > 0: result_failed = dict( stderr=err, stdout=command_log, rc=rc, # changed=True, ) module.fail_json(msg='Install from image catalog failed.', **result_failed) else: result_success = dict( start=str(startd), end=str(endd), delta=str(delta), rc=rc, changed=True, need_action_ptf_list=out_ptf_list, ) module.exit_json(**result_success)
class NetAppESeriesModule(object): """Base class for all NetApp E-Series modules. Provides a set of common methods for NetApp E-Series modules, including version checking, mode (proxy, embedded) verification, http requests, secure http redirection for embedded web services, and logging setup. Be sure to add the following lines in the module's documentation section: extends_documentation_fragment: - netapp.eseries :param dict(dict) ansible_options: dictionary of ansible option definitions :param str web_services_version: minimally required web services rest api version (default value: "02.00.0000.0000") :param bool supports_check_mode: whether the module will support the check_mode capabilities (default=False) :param list(list) mutually_exclusive: list containing list(s) of mutually exclusive options (optional) :param list(list) required_if: list containing list(s) containing the option, the option value, and then a list of required options. (optional) :param list(list) required_one_of: list containing list(s) of options for which at least one is required. (optional) :param list(list) required_together: list containing list(s) of options that are required together. (optional) :param bool log_requests: controls whether to log each request (default: True) """ DEFAULT_TIMEOUT = 60 DEFAULT_SECURE_PORT = "8443" DEFAULT_REST_API_PATH = "devmgr/v2/" DEFAULT_REST_API_ABOUT_PATH = "devmgr/utils/about" DEFAULT_HEADERS = { "Content-Type": "application/json", "Accept": "application/json", "netapp-client-type": "Ansible-%s" % ansible_version } HTTP_AGENT = "Ansible / %s" % ansible_version SIZE_UNIT_MAP = dict(bytes=1, b=1, kb=1024, mb=1024**2, gb=1024**3, tb=1024**4, pb=1024**5, eb=1024**6, zb=1024**7, yb=1024**8) def __init__(self, ansible_options, web_services_version=None, supports_check_mode=False, mutually_exclusive=None, required_if=None, required_one_of=None, required_together=None, log_requests=True): argument_spec = eseries_host_argument_spec() argument_spec.update(ansible_options) self.module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=supports_check_mode, mutually_exclusive=mutually_exclusive, required_if=required_if, required_one_of=required_one_of, required_together=required_together) args = self.module.params self.web_services_version = web_services_version if web_services_version else "02.00.0000.0000" self.ssid = args["ssid"] self.url = args["api_url"] self.log_requests = log_requests self.creds = dict(url_username=args["api_username"], url_password=args["api_password"], validate_certs=args["validate_certs"]) if not self.url.endswith("/"): self.url += "/" self.is_embedded_mode = None self.is_web_services_valid_cache = None def _check_web_services_version(self): """Verify proxy or embedded web services meets minimum version required for module. The minimum required web services version is evaluated against version supplied through the web services rest api. AnsibleFailJson exception will be raised when the minimum is not met or exceeded. This helper function will update the supplied api url if secure http is not used for embedded web services :raise AnsibleFailJson: raised when the contacted api service does not meet the minimum required version. """ if not self.is_web_services_valid_cache: url_parts = list(urlparse(self.url)) if not url_parts[0] or not url_parts[1]: self.module.fail_json( msg= "Failed to provide valid API URL. Example: https://192.168.1.100:8443/devmgr/v2. URL [%s]." % self.url) if url_parts[0] not in ["http", "https"]: self.module.fail_json( msg="Protocol must be http or https. URL [%s]." % self.url) self.url = "%s://%s/" % (url_parts[0], url_parts[1]) about_url = self.url + self.DEFAULT_REST_API_ABOUT_PATH rc, data = request(about_url, timeout=self.DEFAULT_TIMEOUT, headers=self.DEFAULT_HEADERS, ignore_errors=True, **self.creds) if rc != 200: self.module.warn( "Failed to retrieve web services about information! Retrying with secure ports. Array Id [%s]." % self.ssid) self.url = "https://%s:8443/" % url_parts[1].split(":")[0] about_url = self.url + self.DEFAULT_REST_API_ABOUT_PATH try: rc, data = request(about_url, timeout=self.DEFAULT_TIMEOUT, headers=self.DEFAULT_HEADERS, **self.creds) except Exception as error: self.module.fail_json( msg= "Failed to retrieve the webservices about information! Array Id [%s]. Error [%s]." % (self.ssid, to_native(error))) major, minor, other, revision = data["version"].split(".") minimum_major, minimum_minor, other, minimum_revision = self.web_services_version.split( ".") if not (major > minimum_major or (major == minimum_major and minor > minimum_minor) or (major == minimum_major and minor == minimum_minor and revision >= minimum_revision)): self.module.fail_json( msg= "Web services version does not meet minimum version required. Current version: [%s]." " Version required: [%s]." % (data["version"], self.web_services_version)) self.module.log( "Web services rest api version met the minimum required version." ) self.is_web_services_valid_cache = True def is_embedded(self): """Determine whether web services server is the embedded web services. If web services about endpoint fails based on an URLError then the request will be attempted again using secure http. :raise AnsibleFailJson: raised when web services about endpoint failed to be contacted. :return bool: whether contacted web services is running from storage array (embedded) or from a proxy. """ self._check_web_services_version() if self.is_embedded_mode is None: about_url = self.url + self.DEFAULT_REST_API_ABOUT_PATH try: rc, data = request(about_url, timeout=self.DEFAULT_TIMEOUT, headers=self.DEFAULT_HEADERS, **self.creds) self.is_embedded_mode = not data["runningAsProxy"] except Exception as error: self.module.fail_json( msg= "Failed to retrieve the webservices about information! Array Id [%s]. Error [%s]." % (self.ssid, to_native(error))) return self.is_embedded_mode def request(self, path, data=None, method='GET', headers=None, ignore_errors=False): """Issue an HTTP request to a url, retrieving an optional JSON response. :param str path: web services rest api endpoint path (Example: storage-systems/1/graph). Note that when the full url path is specified then that will be used without supplying the protocol, hostname, port and rest path. :param data: data required for the request (data may be json or any python structured data) :param str method: request method such as GET, POST, DELETE. :param dict headers: dictionary containing request headers. :param bool ignore_errors: forces the request to ignore any raised exceptions. """ self._check_web_services_version() if headers is None: headers = self.DEFAULT_HEADERS if not isinstance( data, str) and headers["Content-Type"] == "application/json": data = json.dumps(data) if path.startswith("/"): path = path[1:] request_url = self.url + self.DEFAULT_REST_API_PATH + path if self.log_requests or True: self.module.log( pformat(dict(url=request_url, data=data, method=method))) return request(url=request_url, data=data, method=method, headers=headers, use_proxy=True, force=False, last_mod_time=None, timeout=self.DEFAULT_TIMEOUT, http_agent=self.HTTP_AGENT, force_basic_auth=True, ignore_errors=ignore_errors, **self.creds)
def run_module(): # Define the available arguments/parameters that a user can pass to # the module. # Defaults for VDO parameters are None, in order to facilitate # the detection of parameters passed from the playbook. # Creation param defaults are determined by the creation section. module_args = dict(name=dict(type='str', required=True), state=dict(type='str', default='present', choices=['absent', 'present']), activated=dict(type='bool'), running=dict(type='bool'), growphysical=dict(type='bool', default=False), device=dict(type='str'), logicalsize=dict(type='str'), deduplication=dict(type='str', choices=['disabled', 'enabled']), compression=dict(type='str', choices=['disabled', 'enabled']), blockmapcachesize=dict(type='str'), readcache=dict(type='str', choices=['disabled', 'enabled']), readcachesize=dict(type='str'), emulate512=dict(type='bool', default=False), slabsize=dict(type='str'), writepolicy=dict(type='str', choices=['async', 'auto', 'sync']), indexmem=dict(type='str'), indexmode=dict(type='str', choices=['dense', 'sparse']), ackthreads=dict(type='str'), biothreads=dict(type='str'), cputhreads=dict(type='str'), logicalthreads=dict(type='str'), physicalthreads=dict(type='str')) # Seed the result dictionary in the object. There will be an # 'invocation' dictionary added with 'module_args' (arguments # given). result = dict(changed=False, ) # the AnsibleModule object will be our abstraction working with Ansible # this includes instantiation, a couple of common attr would be the # args/params passed to the execution, as well as if the module # supports check mode module = AnsibleModule( argument_spec=module_args, supports_check_mode=False, ) if not HAS_YAML: module.fail_json(msg=missing_required_lib('PyYAML'), exception=YAML_IMP_ERR) vdocmd = module.get_bin_path("vdo", required=True) if not vdocmd: module.fail_json(msg='VDO is not installed.', **result) # Print a pre-run list of VDO volumes in the result object. vdolist = inventory_vdos(module, vdocmd) runningvdolist = list_running_vdos(module, vdocmd) # Collect the name of the desired VDO volume, and its state. These will # determine what to do. desiredvdo = module.params['name'] state = module.params['state'] # Create a desired VDO volume that doesn't exist yet. if (desiredvdo not in vdolist) and (state == 'present'): device = module.params['device'] if device is None: module.fail_json(msg="Creating a VDO volume requires specifying " "a 'device' in the playbook.") # Create a dictionary of the options from the AnsibleModule # parameters, compile the vdo command options, and run "vdo create" # with those options. # Since this is a creation of a new VDO volume, it will contain all # all of the parameters given by the playbook; the rest will # assume default values. options = module.params vdocmdoptions = add_vdooptions(options) rc, out, err = module.run_command( "%s create --name=%s --device=%s %s" % (vdocmd, desiredvdo, device, vdocmdoptions)) if rc == 0: result['changed'] = True else: module.fail_json(msg="Creating VDO %s failed." % desiredvdo, rc=rc, err=err) if (module.params['compression'] == 'disabled'): rc, out, err = module.run_command( "%s disableCompression --name=%s" % (vdocmd, desiredvdo)) if ((module.params['deduplication'] is not None) and module.params['deduplication'] == 'disabled'): rc, out, err = module.run_command("%s disableDeduplication " "--name=%s" % (vdocmd, desiredvdo)) if module.params['activated'] == 'no': deactivate_vdo(module, desiredvdo, vdocmd) if module.params['running'] == 'no': stop_vdo(module, desiredvdo, vdocmd) # Print a post-run list of VDO volumes in the result object. vdolist = inventory_vdos(module, vdocmd) module.log("created VDO volume %s" % desiredvdo) module.exit_json(**result) # Modify the current parameters of a VDO that exists. if (desiredvdo in vdolist) and (state == 'present'): rc, vdostatusoutput, err = module.run_command("%s status" % (vdocmd)) vdostatusyaml = yaml.load(vdostatusoutput) # An empty dictionary to contain dictionaries of VDO statistics processedvdos = {} vdoyamls = vdostatusyaml['VDOs'] if vdoyamls is not None: processedvdos = vdoyamls # The 'vdo status' keys that are currently modifiable. statusparamkeys = [ 'Acknowledgement threads', 'Bio submission threads', 'Block map cache size', 'CPU-work threads', 'Logical threads', 'Physical threads', 'Read cache', 'Read cache size', 'Configured write policy', 'Compression', 'Deduplication' ] # A key translation table from 'vdo status' output to Ansible # module parameters. This covers all of the 'vdo status' # parameter keys that could be modified with the 'vdo' # command. vdokeytrans = { 'Logical size': 'logicalsize', 'Compression': 'compression', 'Deduplication': 'deduplication', 'Block map cache size': 'blockmapcachesize', 'Read cache': 'readcache', 'Read cache size': 'readcachesize', 'Configured write policy': 'writepolicy', 'Acknowledgement threads': 'ackthreads', 'Bio submission threads': 'biothreads', 'CPU-work threads': 'cputhreads', 'Logical threads': 'logicalthreads', 'Physical threads': 'physicalthreads' } # Build a dictionary of the current VDO status parameters, with # the keys used by VDO. (These keys will be converted later.) currentvdoparams = {} # Build a "lookup table" dictionary containing a translation table # of the parameters that can be modified modtrans = {} for statfield in statusparamkeys: if statfield in processedvdos[desiredvdo]: currentvdoparams[statfield] = processedvdos[desiredvdo][ statfield] modtrans[statfield] = vdokeytrans[statfield] # Build a dictionary of current parameters formatted with the # same keys as the AnsibleModule parameters. currentparams = {} for paramkey in modtrans.keys(): currentparams[modtrans[paramkey]] = modtrans[paramkey] diffparams = {} # Check for differences between the playbook parameters and the # current parameters. This will need a comparison function; # since AnsibleModule params are all strings, compare them as # strings (but if it's None; skip). for key in currentparams.keys(): if module.params[key] is not None: if str(currentparams[key]) != module.params[key]: diffparams[key] = module.params[key] if diffparams: vdocmdoptions = add_vdooptions(diffparams) if vdocmdoptions: rc, out, err = module.run_command( "%s modify --name=%s %s" % (vdocmd, desiredvdo, vdocmdoptions)) if rc == 0: result['changed'] = True else: module.fail_json(msg="Modifying VDO %s failed." % desiredvdo, rc=rc, err=err) if 'deduplication' in diffparams.keys(): dedupemod = diffparams['deduplication'] if dedupemod == 'disabled': rc, out, err = module.run_command("%s " "disableDeduplication " "--name=%s" % (vdocmd, desiredvdo)) if rc == 0: result['changed'] = True else: module.fail_json(msg="Changing deduplication on " "VDO volume %s failed." % desiredvdo, rc=rc, err=err) if dedupemod == 'enabled': rc, out, err = module.run_command("%s " "enableDeduplication " "--name=%s" % (vdocmd, desiredvdo)) if rc == 0: result['changed'] = True else: module.fail_json(msg="Changing deduplication on " "VDO volume %s failed." % desiredvdo, rc=rc, err=err) if 'compression' in diffparams.keys(): compressmod = diffparams['compression'] if compressmod == 'disabled': rc, out, err = module.run_command("%s disableCompression " "--name=%s" % (vdocmd, desiredvdo)) if rc == 0: result['changed'] = True else: module.fail_json(msg="Changing compression on " "VDO volume %s failed." % desiredvdo, rc=rc, err=err) if compressmod == 'enabled': rc, out, err = module.run_command("%s enableCompression " "--name=%s" % (vdocmd, desiredvdo)) if rc == 0: result['changed'] = True else: module.fail_json(msg="Changing compression on " "VDO volume %s failed." % desiredvdo, rc=rc, err=err) if 'writepolicy' in diffparams.keys(): writepolmod = diffparams['writepolicy'] if writepolmod == 'auto': rc, out, err = module.run_command( "%s " "changeWritePolicy " "--name=%s " "--writePolicy=%s" % (vdocmd, desiredvdo, writepolmod)) if rc == 0: result['changed'] = True else: module.fail_json(msg="Changing write policy on " "VDO volume %s failed." % desiredvdo, rc=rc, err=err) if writepolmod == 'sync': rc, out, err = module.run_command( "%s " "changeWritePolicy " "--name=%s " "--writePolicy=%s" % (vdocmd, desiredvdo, writepolmod)) if rc == 0: result['changed'] = True else: module.fail_json(msg="Changing write policy on " "VDO volume %s failed." % desiredvdo, rc=rc, err=err) if writepolmod == 'async': rc, out, err = module.run_command( "%s " "changeWritePolicy " "--name=%s " "--writePolicy=%s" % (vdocmd, desiredvdo, writepolmod)) if rc == 0: result['changed'] = True else: module.fail_json(msg="Changing write policy on " "VDO volume %s failed." % desiredvdo, rc=rc, err=err) # Process the size parameters, to determine of a growPhysical or # growLogical operation needs to occur. sizeparamkeys = [ 'Logical size', ] currentsizeparams = {} sizetrans = {} for statfield in sizeparamkeys: currentsizeparams[statfield] = processedvdos[desiredvdo][statfield] sizetrans[statfield] = vdokeytrans[statfield] sizeparams = {} for paramkey in currentsizeparams.keys(): sizeparams[sizetrans[paramkey]] = currentsizeparams[paramkey] diffsizeparams = {} for key in sizeparams.keys(): if module.params[key] is not None: if str(sizeparams[key]) != module.params[key]: diffsizeparams[key] = module.params[key] if module.params['growphysical']: physdevice = module.params['device'] rc, devsectors, err = module.run_command("blockdev --getsz %s" % (physdevice)) devblocks = (int(devsectors) / 8) dmvdoname = ('/dev/mapper/' + desiredvdo) currentvdostats = ( processedvdos[desiredvdo]['VDO statistics'][dmvdoname]) currentphysblocks = currentvdostats['physical blocks'] # Set a growPhysical threshold to grow only when there is # guaranteed to be more than 2 slabs worth of unallocated # space on the device to use. For now, set to device # size + 64 GB, since 32 GB is the largest possible # slab size. growthresh = devblocks + 16777216 if currentphysblocks > growthresh: result['changed'] = True rc, out, err = module.run_command("%s growPhysical --name=%s" % (vdocmd, desiredvdo)) if 'logicalsize' in diffsizeparams.keys(): result['changed'] = True vdocmdoptions = ("--vdoLogicalSize=" + diffsizeparams['logicalsize']) rc, out, err = module.run_command( "%s growLogical --name=%s %s" % (vdocmd, desiredvdo, vdocmdoptions)) vdoactivatestatus = processedvdos[desiredvdo]['Activate'] if ((module.params['activated'] == 'no') and (vdoactivatestatus == 'enabled')): deactivate_vdo(module, desiredvdo, vdocmd) if not result['changed']: result['changed'] = True if ((module.params['activated'] == 'yes') and (vdoactivatestatus == 'disabled')): activate_vdo(module, desiredvdo, vdocmd) if not result['changed']: result['changed'] = True if ((module.params['running'] == 'no') and (desiredvdo in runningvdolist)): stop_vdo(module, desiredvdo, vdocmd) if not result['changed']: result['changed'] = True # Note that a disabled VDO volume cannot be started by the # 'vdo start' command, by design. To accurately track changed # status, don't try to start a disabled VDO volume. # If the playbook contains 'activated: yes', assume that # the activate_vdo() operation succeeded, as 'vdoactivatestatus' # will have the activated status prior to the activate_vdo() # call. if (((vdoactivatestatus == 'enabled') or (module.params['activated'] == 'yes')) and (module.params['running'] == 'yes') and (desiredvdo not in runningvdolist)): start_vdo(module, desiredvdo, vdocmd) if not result['changed']: result['changed'] = True # Print a post-run list of VDO volumes in the result object. vdolist = inventory_vdos(module, vdocmd) if diffparams: module.log("modified parameters of VDO volume %s" % desiredvdo) module.exit_json(**result) # Remove a desired VDO that currently exists. if (desiredvdo in vdolist) and (state == 'absent'): rc, out, err = module.run_command("%s remove --name=%s" % (vdocmd, desiredvdo)) if rc == 0: result['changed'] = True else: module.fail_json(msg="Removing VDO %s failed." % desiredvdo, rc=rc, err=err) # Print a post-run list of VDO volumes in the result object. vdolist = inventory_vdos(module, vdocmd) module.log("removed VDO volume %s" % desiredvdo) module.exit_json(**result) # fall through # The state for the desired VDO volume was absent, and it does # not exist. Print a post-run list of VDO volumes in the result # object. vdolist = inventory_vdos(module, vdocmd) module.log("received request to remove non-existent VDO volume %s" % desiredvdo) module.exit_json(**result)
def main(): global module global results global workdir module = AnsibleModule( argument_spec=dict( apar=dict(required=False, type='str', choices=['sec', 'hiper', 'all', None], default=None), filesets=dict(required=False, type='str'), csv=dict(required=False, type='str'), path=dict(required=False, type='str', default='/var/adm/ansible'), save_report=dict(required=False, type='bool', default=False), verbose=dict(required=False, type='bool', default=False), force=dict(required=False, type='bool', default=False), clean=dict(required=False, type='bool', default=False), check_only=dict(required=False, type='bool', default=False), download_only=dict(required=False, type='bool', default=False), extend_fs=dict(required=False, type='bool', default=True), ), supports_check_mode=True ) results = dict( changed=False, msg='', meta={'messages': []} # meta structure will be updated as follow: # meta={'messages': [], detail execution messages # '0.report': [], run_flrtvc reports the vulnerabilities # '1.parse': [], run_parser builds the list of URLs # '2.discover': [], run_downloader builds the list of epkgs found in URLs # '3.download': [], run_downloader builds the list of downloaded epkgs # '4.1.reject': [], check_epkgs builds the list of rejected epkgs # '4.2.check': [], check_epkgs builds the list of epkgs checking prerequisites # '5.install': []} run_installer builds the list of installed epkgs ) module.debug('*** START ***') module.run_command_environ_update = dict(LANG='C', LC_ALL='C', LC_MESSAGES='C', LC_CTYPE='C') # =========================================== # Get module params # =========================================== module.debug('*** INIT ***') # Used for independence vs Ansible options flrtvc_params = {'apar_type': module.params['apar'], 'apar_csv': module.params['csv'], 'filesets': module.params['filesets'], 'dst_path': module.params['path'], 'save_report': module.params['save_report'], 'verbose': module.params['verbose']} force = module.params['force'] clean = module.params['clean'] check_only = module.params['check_only'] download_only = module.params['download_only'] resize_fs = module.params['extend_fs'] # Create working directory if needed workdir = os.path.abspath(os.path.join(flrtvc_params['dst_path'], 'work')) if not os.path.exists(workdir): os.makedirs(workdir, mode=0o744) # =========================================== # Install flrtvc script # =========================================== module.debug('*** INSTALL ***') flrtvc_dir = os.path.abspath(os.path.join('usr', 'bin')) flrtvc_path = os.path.abspath(os.path.join(flrtvc_dir, 'flrtvc.ksh')) if os.path.exists(flrtvc_path): try: os.remove(flrtvc_path) except OSError as exc: msg = 'Exception removing {0}, exception={1}'.format(flrtvc_path, exc) module.log(msg) results['meta']['messages'].append(msg) flrtvc_dst = os.path.abspath(os.path.join(workdir, 'FLRTVC-latest.zip')) if not download('https://www-304.ibm.com/webapp/set2/sas/f/flrt3/FLRTVC-latest.zip', flrtvc_dst, resize_fs): if clean and os.path.exists(workdir): shutil.rmtree(workdir, ignore_errors=True) results['msg'] = 'Failed to download FLRTVC-latest.zip' module.fail_json(**results) if not unzip(flrtvc_dst, flrtvc_dir, resize_fs): if clean and os.path.exists(workdir): shutil.rmtree(workdir, ignore_errors=True) results['msg'] = 'Failed to unzip FLRTVC-latest.zip' module.fail_json(**results) flrtvc_stat = os.stat(flrtvc_path) if not flrtvc_stat.st_mode & stat.S_IEXEC: os.chmod(flrtvc_path, flrtvc_stat.st_mode | stat.S_IEXEC) # =========================================== # Run flrtvc script # =========================================== module.debug('*** REPORT ***') if not run_flrtvc(flrtvc_path, flrtvc_params, force): msg = 'Failed to get vulnerabilities report, system will not be updated' results['msg'] = msg if clean and os.path.exists(workdir): shutil.rmtree(workdir, ignore_errors=True) module.fail_json(**results) if check_only: if clean and os.path.exists(workdir): shutil.rmtree(workdir, ignore_errors=True) results['msg'] = 'exit on check only' module.exit_json(**results) # =========================================== # Parse flrtvc report # =========================================== module.debug('*** PARSE ***') run_parser(results['meta']['0.report']) # =========================================== # Download and check efixes # =========================================== module.debug('*** DOWNLOAD ***') run_downloader(results['meta']['1.parse'], workdir, resize_fs) if download_only: if clean and os.path.exists(workdir): shutil.rmtree(workdir, ignore_errors=True) results['msg'] = 'exit on download only' module.exit_json(**results) # =========================================== # Install efixes # =========================================== module.debug('*** UPDATE ***') if not run_installer(results['meta']['4.2.check'], workdir, resize_fs): msg = 'Failed to install fixes, please check meta and log data.' results['msg'] = msg if clean and os.path.exists(workdir): shutil.rmtree(workdir, ignore_errors=True) module.fail_json(**results) if clean and os.path.exists(workdir): shutil.rmtree(workdir, ignore_errors=True) results['msg'] = 'FLRTVC completed successfully' module.log(results['msg']) module.exit_json(**results)
def main(): module = AnsibleModule( argument_spec=dict( servers=dict(required=True, type='list'), domain=dict(required=True), realm=dict(required=True), hostname=dict(required=True), services=dict(required=True, type='list'), krb5_offline_passwords=dict(required=False, type='bool'), on_master=dict(required=False, type='bool'), primary=dict(required=False, type='bool'), preserve_sssd=dict(required=False, type='bool'), permit=dict(required=False, type='bool'), dns_updates=dict(required=False, type='bool'), all_ip_addresses=dict(required=False, type='bool'), ), supports_check_mode=True, ) module._ansible_debug = True cli_servers = module.params.get('servers') cli_domain = module.params.get('domain') cli_realm = module.params.get('realm') client_hostname = module.params.get('hostname') services = module.params.get('services') krb5_offline_passwords = module.params.get('krb5_offline_passwords') on_master = module.params.get('on_master') primary = module.params.get('primary') preserve_sssd = module.params.get('preserve_sssd') permit = module.params.get('permit') dns_updates = module.params.get('dns_updates') all_ip_addresses = module.params.get('all_ip_addresses') fstore = sysrestore.FileStore(paths.IPA_CLIENT_SYSRESTORE) client_domain = client_hostname[client_hostname.find(".") + 1:] try: sssdconfig = SSSDConfig.SSSDConfig() sssdconfig.import_config() except Exception as e: if os.path.exists(paths.SSSD_CONF) and preserve_sssd: # SSSD config is in place but we are unable to read it # In addition, we are instructed to preserve it # This all means we can't use it and have to bail out module.fail_json( msg="SSSD config exists but cannot be parsed: %s" % str(e)) # SSSD configuration does not exist or we are not asked to preserve it, # create new one # We do make new SSSDConfig instance because IPAChangeConf-derived # classes have no means to reset their state and ParseError exception # could come due to parsing error from older version which cannot be # upgraded anymore, leaving sssdconfig instance practically unusable # Note that we already backed up sssd.conf before going into this # routine if isinstance(e, IOError): pass else: # It was not IOError so it must have been parsing error module.fail_json(msg="Unable to parse existing SSSD config.") module.log("New SSSD config will be created") sssdconfig = SSSDConfig.SSSDConfig() sssdconfig.new_config() try: domain = sssdconfig.new_domain(cli_domain) except SSSDConfig.DomainAlreadyExistsError: module.log("Domain %s is already configured in existing SSSD " "config, creating a new one." % cli_domain) sssdconfig = SSSDConfig.SSSDConfig() sssdconfig.new_config() domain = sssdconfig.new_domain(cli_domain) if on_master: sssd_enable_service(module, sssdconfig, 'ifp') if (("ssh" in services and file_exists(paths.SSH_CONFIG)) or ("sshd" in services and file_exists(paths.SSHD_CONFIG))): sssd_enable_service(module, sssdconfig, 'ssh') if "sudo" in services: sssd_enable_service(module, sssdconfig, 'sudo') configure_nsswitch_database(fstore, 'sudoers', ['sss'], default_value=['files']) domain.add_provider('ipa', 'id') # add discovery domain if client domain different from server domain # do not set this config in server mode (#3947) if not on_master and cli_domain != client_domain: domain.set_option('dns_discovery_domain', cli_domain) if not on_master: if primary: domain.set_option('ipa_server', ', '.join(cli_servers)) else: domain.set_option('ipa_server', '_srv_, %s' % ', '.join(cli_servers)) else: domain.set_option('ipa_server_mode', 'True') # the master should only use itself for Kerberos domain.set_option('ipa_server', cli_servers[0]) # increase memcache timeout to 10 minutes when in server mode try: nss_service = sssdconfig.get_service('nss') except SSSDConfig.NoServiceError: nss_service = sssdconfig.new_service('nss') nss_service.set_option('memcache_timeout', 600) sssdconfig.save_service(nss_service) domain.set_option('ipa_domain', cli_domain) domain.set_option('ipa_hostname', client_hostname) if cli_domain.lower() != cli_realm.lower(): domain.set_option('krb5_realm', cli_realm) # Might need this if /bin/hostname doesn't return a FQDN # domain.set_option('ipa_hostname', 'client.example.com') domain.add_provider('ipa', 'auth') domain.add_provider('ipa', 'chpass') if not permit: domain.add_provider('ipa', 'access') else: domain.add_provider('permit', 'access') domain.set_option('cache_credentials', True) # SSSD will need TLS for checking if ipaMigrationEnabled attribute is set # Note that SSSD will force StartTLS because the channel is later used for # authentication as well if password migration is enabled. Thus set # the option unconditionally. domain.set_option('ldap_tls_cacert', paths.IPA_CA_CRT) if dns_updates: domain.set_option('dyndns_update', True) if all_ip_addresses: domain.set_option('dyndns_iface', '*') else: iface = get_server_connection_interface(cli_servers[0]) domain.set_option('dyndns_iface', iface) if krb5_offline_passwords: domain.set_option('krb5_store_password_if_offline', True) domain.set_active(True) sssdconfig.save_domain(domain) sssdconfig.write(paths.SSSD_CONF) module.exit_json(changed=True)
def main(): module = AnsibleModule(argument_spec=dict( name=dict(required=True, type='str'), conf_path=dict(required=False, default='/etc/aide.conf', type='path'), options=dict(required=False, type='list'), options_string=dict(required=False, type='str'), state=dict(required=False, default="updated", choices=['absent', 'present', 'updated']), backup=dict(default=False, type='bool'), add_if_not_present=dict(default=False, required=False)), supports_check_mode=True, mutually_exclusive=[['options', 'options_string']], required_one_of=[['options', 'options_string']]) # For existing rule sets rulesets = dict() content = str() options = [] options_changed = [] existing_options = [] backupdest = "" fname = module.params['conf_path'] action = module.params['state'] add_if_not_present = module.params['add_if_not_present'] ruleset_name = module.params['name'].rstrip().lstrip() # Get the options from either the 'options' or 'options_string' module arguments if module.params['options']: options = module.params['options'] else: options = module.params['options_string'].split('+') # Open the file and read the content or fail try: with open(fname, 'r') as aide_file_obj: content = aide_file_obj.read() except IOError as e: # If unable to read the file, fail out module.fail_json(msg='Unable to open/read AIDE configuration \ file %s with error %s.' % (fname, str(e))) matches = RULESET_REGEX.findall(content) # Load existing rule set for match in matches: existing_options = match[1].split('+') rulesets[match[0]] = existing_options module.log(msg="EXISTING RULESET KEYS: " + str(rulesets.keys())) module.log(msg="EXISTING RULESET: " + str(rulesets)) # Take action if action == 'absent': changed, rulesets[ruleset_name], options_changed = remove_options( rulesets[ruleset_name], options) elif action == 'present': changed, rulesets[ruleset_name], options_changed = add_options( rulesets[ruleset_name], options) elif action == 'updated': if ruleset_name in rulesets.keys() or add_if_not_present: rulesets[ruleset_name] = options options_changed = options changed = True # Write file if not module.check_mode and changed: module.log(msg="WRITING") # Update the content pattern = r"^" + ruleset_name + r"\s?=\s?.*$" module.log(msg="PATTERN: " + pattern) replacement = ruleset_name + " = " + "+".join(rulesets[ruleset_name]) module.log(msg="REPLACEMENT: " + replacement) new_content = re.sub(pattern, replacement, content, flags=re.MULTILINE) module.log(msg="OLD CONTENT equals NEW CONTENT: " + str(content == new_content)) # First, create a backup if desired. if module.params['backup']: backupdest = module.backup_local(fname) # Write the file try: temp_file = NamedTemporaryFile(mode='w') module.log(msg="TEMP FILE NAME: " + temp_file.name) with open(temp_file.name, 'w') as fd: fd.write(new_content) except IOError: module.fail_json(msg='Unable to create temporary \ file %s' % temp_file) module.atomic_move(temp_file.name, fname) facts = {} facts['aide_ruleset'] = { 'action': action, 'name': ruleset_name, 'existing_options': existing_options, 'options_changed': options_changed, 'backupdest': backupdest } module.exit_json(changed=changed, ansible_facts=facts)
def main(): module = AnsibleModule( argument_spec = dict( servers=dict(required=True, type='list'), domain=dict(required=True), realm=dict(required=True), hostname=dict(required=True), kdc=dict(required=True), basedn=dict(required=True), principal=dict(required=False), password=dict(required=False, no_log=True), keytab=dict(required=False), admin_keytab=dict(required=False), ca_cert_file=dict(required=False), force_join=dict(required=False, type='bool'), kinit_attempts=dict(required=False, type='int', default=5), debug=dict(required=False, type='bool'), ), supports_check_mode = True, ) module._ansible_debug = True servers = module.params.get('servers') domain = module.params.get('domain') realm = module.params.get('realm') hostname = module.params.get('hostname') basedn = module.params.get('basedn') kdc = module.params.get('kdc') force_join = module.params.get('force_join') principal = module.params.get('principal') password = module.params.get('password') keytab = module.params.get('keytab') admin_keytab = module.params.get('admin_keytab') ca_cert_file = module.params.get('ca_cert_file') kinit_attempts = module.params.get('kinit_attempts') debug = module.params.get('debug') if password is not None and keytab is not None: module.fail_json(msg="Password and keytab cannot be used together") if password is None and admin_keytab is None: module.fail_json(msg="Password or admin_keytab is needed") client_domain = hostname[hostname.find(".")+1:] nolog = tuple() env = {'PATH': SECURE_PATH} fstore = sysrestore.FileStore(paths.IPA_CLIENT_SYSRESTORE) host_principal = 'host/%s@%s' % (hostname, realm) sssd = True options.ca_cert_file = ca_cert_file options.unattended = True options.principal = principal options.force = False options.password = password ccache_dir = None changed = False already_joined = False try: (krb_fd, krb_name) = tempfile.mkstemp() os.close(krb_fd) configure_krb5_conf( cli_realm=realm, cli_domain=domain, cli_server=servers, cli_kdc=kdc, dnsok=False, filename=krb_name, client_domain=client_domain, client_hostname=hostname, configure_sssd=sssd, force=False) env['KRB5_CONFIG'] = krb_name ccache_dir = tempfile.mkdtemp(prefix='krbcc') ccache_name = os.path.join(ccache_dir, 'ccache') join_args = [paths.SBIN_IPA_JOIN, "-s", servers[0], "-b", str(realm_to_suffix(realm)), "-h", hostname] if debug: join_args.append("-d") env['XMLRPC_TRACE_CURL'] = 'yes' if force_join: join_args.append("-f") if principal is not None: if principal.find('@') == -1: principal = '%s@%s' % (principal, realm) if admin_keytab: join_args.append("-f") if not os.path.exists(admin_keytab): module.fail_json( msg="Keytab file could not be found: %s" % \ admin_keytab) try: kinit_keytab(principal, admin_keytab, ccache_name, config=krb_name, attempts=kinit_attempts) except GSSError as e: module.fail_json( msg="Kerberos authentication failed: %s" % str(e)) else: try: kinit_password(principal, password, ccache_name, config=krb_name) except RuntimeError as e: module.fail_json( msg="Kerberos authentication failed: {}".format(e)) elif keytab: join_args.append("-f") if os.path.exists(keytab): try: kinit_keytab(host_principal, keytab, ccache_name, config=krb_name, attempts=kinit_attempts) except GSSError as e: module.fail_json( msg="Kerberos authentication failed: {}".format(e)) else: module.fail_json( msg="Keytab file could not be found: {}".format(keytab)) elif password: join_args.append("-w") join_args.append(password) nolog = (password,) env['KRB5CCNAME'] = os.environ['KRB5CCNAME'] = ccache_name # Get the CA certificate try: os.environ['KRB5_CONFIG'] = env['KRB5_CONFIG'] if NUM_VERSION < 40100: get_ca_cert(fstore, options, servers[0], basedn) else: get_ca_certs(fstore, options, servers[0], basedn, realm) del os.environ['KRB5_CONFIG'] except errors.FileError as e: module.fail_json(msg='%s' % e) except Exception as e: module.fail_json(msg="Cannot obtain CA certificate\n%s" % e) # Now join the domain result = run( join_args, raiseonerr=False, env=env, nolog=nolog, capture_error=True) stderr = result.error_output if result.returncode != 0: if result.returncode == 13: already_joined = True module.log("Host is already joined") else: if principal: run([paths.KDESTROY], raiseonerr=False, env=env) module.fail_json(msg="Joining realm failed: %s" % stderr) else: changed = True module.log("Enrolled in IPA realm %s" % realm) # Fail for missing krb5.keytab on already joined host if already_joined and not os.path.exists(paths.KRB5_KEYTAB): module.fail_json(msg="krb5.keytab missing! Retry with ipaclient_force_join=yes to generate a new one.") if principal: run([paths.KDESTROY], raiseonerr=False, env=env) # Obtain the TGT. We do it with the temporary krb5.conf, sot # tha only the KDC we're installing under is contacted. # Other KDCs might not have replicated the principal yet. # Once we have the TGT, it's usable on any server. try: kinit_keytab(host_principal, paths.KRB5_KEYTAB, paths.IPA_DNS_CCACHE, config=krb_name, attempts=kinit_attempts) env['KRB5CCNAME'] = os.environ['KRB5CCNAME'] = paths.IPA_DNS_CCACHE except GSSError as e: # failure to get ticket makes it impossible to login and # bind from sssd to LDAP, abort installation module.fail_json(msg="Failed to obtain host TGT: %s" % e) finally: try: os.remove(krb_name) except OSError: module.fail_json(msg="Could not remove %s" % krb_name) if ccache_dir is not None: try: os.rmdir(ccache_dir) except OSError: pass if os.path.exists(krb_name + ".ipabkp"): try: os.remove(krb_name + ".ipabkp") except OSError: module.fail_json(msg="Could not remove %s.ipabkp" % krb_name) module.exit_json(changed=changed, already_joined=already_joined)
def main(): global CHANGED DEBUG_DATA = [] OUTPUT = [] MODULE = AnsibleModule( # TODO: remove not needed attributes argument_spec=dict( # description=dict(required=False, type='str'), # IBM automation generic attributes action=dict( required=True, type='str', choices=['altdisk_install', 'bos_install', 'get_status']), vars=dict(required=False, type='dict'), vios_status=dict(required=False, type='dict'), # not used so far, can be used to get if1 for hostname resolution # nim_node=dict(required=False, type='dict'), # nim_migvios_setup not supported yet? # nim_migvios_setup [ -a [ mk_resource={yes|no}] [ file_system=fs_name ] # [ volume_group=vg_name ] [ disk=disk_name ] # [device=device ] # ] [ -B ] [ -F ] [ -S ] [ -v ] # mutually exclisive targets=dict(required=False, type='list', elements='str'), target_file_name=dict(required=False, type='str'), # following attributes are dictionaries with # key: 'all' or hostname and value: a string # example: # mksysb_name={"tgt1": "hdisk1", "tgt2": "hdisk1"} # mksysb_name={"all": "hdisk1"} mksysb_name=dict(required=False, type='dict'), spot_name=dict(required=False, type='dict'), backup_file=dict(required=False, type='dict'), rootvg_clone_disk=dict(required=False, type='dict'), rootvg_install_disk=dict(required=False, type='dict'), # Resources (-e option): res_resolv_conf=dict(required=False, type='dict'), res_script=dict(required=False, type='dict'), res_fb_script=dict(required=False, type='dict'), res_file_res=dict(required=False, type='dict'), res_image_data=dict(required=False, type='dict'), res_log=dict(required=False, type='dict'), # dictionaries with key: 'all' or hostname and value: bool cluster_exists=dict(required=False, type='dict'), validate_input_data=dict(required=False, type='dict'), skip_rootvg_cloning=dict(required=False, type='dict'), ), mutually_exclusive=[['targets', 'target_file_name']], required_one_of=[['targets', 'target_file_name']], # TODO: determine mandatory attributes required_if=[], ) # ========================================================================= # Get Module params # ========================================================================= MODULE.status = {} MODULE.targets = [] MODULE.nim_node = {} MODULE.debug('*** START NIM VIOSUPGRADE OPERATION ***') OUTPUT.append('VIOSUpgrade operation for {0}'.format( MODULE.params['targets'])) MODULE.log('Action {0} for {1} targets'.format(MODULE.params['action'], MODULE.params['targets'])) # build NIM node info (if needed) if MODULE.params['nim_node']: MODULE.nim_node = MODULE.params['nim_node'] # TODO: remove this, not needed, except maybe for hostname # if 'nim_vios' not in MODULE.nim_node: # MODULE.nim_node['nim_vios'] = get_nim_clients_info(MODULE, 'vios') # MODULE.debug('NIM VIOS: {0}'.format(MODULE.nim_node['nim_vios'])) if MODULE.params['target_file_name']: try: myfile = open(MODULE.params['target_file_name'], 'r') csvreader = csv.reader(myfile, delimiter=':') for line in csvreader: MODULE.targets.append(line[0].strip()) myfile.close() except IOError as e: msg = 'Failed to parse file {0}: {1}.'.format( e.filename, e.strerror) MODULE.log(msg) MODULE.fail_json(changed=CHANGED, msg=msg, output=OUTPUT, debug_output=DEBUG_DATA, status=MODULE.status) else: MODULE.params['target_file_name'] = "" MODULE.targets = MODULE.params['targets'] if not MODULE.targets: msg = 'Empty target list' OUTPUT.append(msg) MODULE.warn(msg + ': {0}'.format(MODULE.params['targets'])) MODULE.exit_json(changed=False, msg=msg, nim_node=MODULE.nim_node, debug_output=DEBUG_DATA, output=OUTPUT, status=MODULE.status) OUTPUT.append('Targets list:{0}'.format(MODULE.targets)) MODULE.debug('Target list: {0}'.format(MODULE.targets)) if MODULE.params['target_file_name']: if MODULE.params['action'] != 'get_status': viosupgrade_file(MODULE, MODULE.params['target_file_name']) if 'get_status' in MODULE.params['action']: viosupgrade_query(MODULE) elif MODULE.params['targets']: if MODULE.params['action'] != 'get_status': viosupgrade_list(MODULE, MODULE.params['targets']) if 'get_status' in MODULE.params['action']: viosupgrade_query(MODULE) else: # should not happen msg = 'Please speficy one of "targets" or "target_file_name" parameters.' MODULE.log(msg) MODULE.fail_json(changed=CHANGED, msg=msg, output=OUTPUT, debug_output=DEBUG_DATA, status=MODULE.status) # # Prints status for each targets # nb_error = 0 # msg = 'VIOSUpgrade {0} operation status:'.format(MODULE.params['action']) # if MODULE.status: # OUTPUT.append(msg) # MODULE.log(msg) # for vios_key in MODULE.status: # OUTPUT.append(' {0} : {1}'.format(vios_key, MODULE.status[vios_key])) # MODULE.log(' {0} : {1}'.format(vios_key, MODULE.status[vios_key])) # if not re.match(r"^SUCCESS", MODULE.status[vios_key]): # nb_error += 1 # else: # MODULE.log(msg + ' MODULE.status table is empty') # OUTPUT.append(msg + ' Error getting the status') # MODULE.status = MODULE.params['vios_status'] # can be None # # Prints a global result statement # if nb_error == 0: # msg = 'VIOSUpgrade {0} operation succeeded'\ # .format(MODULE.params['action']) # OUTPUT.append(msg) # MODULE.log(msg) # else: # msg = 'VIOSUpgrade {0} operation failed: {1} errors'\ # .format(MODULE.params['action'], nb_error) # OUTPUT.append(msg) # MODULE.log(msg) # # ========================================================================= # # Exit # # ========================================================================= # if nb_error == 0: # MODULE.exit_json( # changed=CHANGED, # msg=msg, # targets=MODULE.targets, # nim_node=MODULE.nim_node, # debug_output=DEBUG_DATA, # output=OUTPUT, # status=MODULE.status) MODULE.fail_json(changed=CHANGED, msg=msg, targets=MODULE.targets, nim_node=MODULE.nim_node, debug_output=DEBUG_DATA, output=OUTPUT, status=MODULE.status)
def main(): module = AnsibleModule( argument_spec=dict( product_id=dict(type='str'), fix_list=dict(type='list', elements='str', default=['*ALL']), fix_omit_list=dict(type='list', elements='str'), save_file_object=dict(type='str'), save_file_lib=dict(type='str', default='QGPL'), delayed_option=dict(type='str', default='*NO', choices=['*YES', '*NO']), temp_or_perm=dict(type='str', default='*TEMP', choices=['*TEMP', '*PERM']), operation=dict(type='str', default='load_and_apply', choices=[ 'load_and_apply', 'load_only', 'apply_only', 'remove', 'query' ]), ), required_if=[["operation", "apply_only", ["product_id"]], ["operation", "remove", ["product_id"]], [ "operation", "load_and_apply", ["product_id", "save_file_object"] ], [ "operation", "load_only", ["product_id", "save_file_object"] ]], supports_check_mode=True, ) if HAS_ITOOLKIT is False: module.fail_json(msg="itoolkit package is required") if HAS_IBM_DB is False: module.fail_json(msg="ibm_db package is required") product_id = module.params['product_id'] ptf_list_to_select = module.params['fix_list'] ptf_list_to_omit = module.params['fix_omit_list'] save_file_object = module.params['save_file_object'] save_file_lib = module.params['save_file_lib'] delayed_option = module.params['delayed_option'] temp_or_perm = module.params['temp_or_perm'] operation = module.params['operation'] if operation in ['load_and_apply', 'load_only', 'remove']: if product_id == '*ALL': module.fail_json( msg= "product_id cannot be *ALL when operation is remove, load_and_apply and load_only." ) startd = datetime.datetime.now() connection_id = None try: connection_id = dbi.connect() except Exception as e_db_connect: module.fail_json(msg="Exception when connecting to IBM i Db2. " + str(e_db_connect)) if operation in ['load_and_apply', 'load_only', 'apply_only']: operation_bool_map = { 'load_and_apply': [False, False], 'load_only': [True, False], 'apply_only': [False, True] } # install single or a list of PTFs savf_obj = save_file_lib + "/" + save_file_object rc, out, err = install_ptf(connection_id, module, product_id, ptf_list_to_select, ptf_list_to_omit, "*SAVF", savf_obj, delayed_option, temp_or_perm, operation_bool_map[operation][0], operation_bool_map[operation][1]) # Need to query the status of the PTF elif operation in ['remove']: rc, out, err = remove_ptf(connection_id, module, product_id, ptf_list_to_select, ptf_list_to_omit, temp_or_perm=temp_or_perm, delayed_option=delayed_option) # Need to query the status of the PTF # return the status of the ptf if ptf_list_to_select is not None: ptf_list, query_err = return_fix_information(connection_id, product_id, ptf_list_to_select) else: module.fail_json(msg="PTF list contains no PTF.") if operation == "query": if query_err is not None: rc = IBMi_COMMAND_RC_ERROR err = query_err else: rc = IBMi_COMMAND_RC_SUCCESS # job_log, get_joblog_err = db2i_tools.get_job_log(connection_id, "*") if connection_id is not None: try: connection_id.close() except Exception as e_disconnect: module.log("ERROR: Unable to disconnect from the database. " + str(e_disconnect)) endd = datetime.datetime.now() delta = endd - startd if rc > 0: result_failed = dict( start=str(startd), end=str(endd), delta=str(delta), stdout=out, stderr=err, rc=rc, # changed=True, ) module.fail_json(msg='non-zero return code', **result_failed) else: result_success = dict( start=str(startd), end=str(endd), delta=str(delta), ptf_list=ptf_list, rc=rc, # job_log=job_log, # changed=True, ) module.exit_json(**result_success)
def main(): module = AnsibleModule( argument_spec=dict( servers=dict(required=True, type='list'), realm=dict(required=True), hostname=dict(required=True), debug=dict(required=False, type='bool', default="false"), ), supports_check_mode=True, ) module._ansible_debug = True setup_logging() realm = module.params.get('realm') hostname = module.params.get('hostname') debug = module.params.get('debug') host_principal = 'host/%s@%s' % (hostname, realm) os.environ['KRB5CCNAME'] = paths.IPA_DNS_CCACHE ca_certs = x509.load_certificate_list_from_file(paths.IPA_CA_CRT) if 40500 <= NUM_VERSION < 40590: ca_certs = [ cert.public_bytes(serialization.Encoding.DER) for cert in ca_certs ] elif NUM_VERSION < 40500: ca_certs = [cert.der_data for cert in ca_certs] with certdb.NSSDatabase() as tmp_db: api.bootstrap(context='cli_installer', confdir=paths.ETC_IPA, debug=debug, delegate=False, nss_dir=tmp_db.secdir) if 'config_loaded' not in api.env: module.fail_json(msg="Failed to initialize IPA API.") # Clear out any current session keyring information try: delete_persistent_client_session_data(host_principal) except ValueError: pass # Add CA certs to a temporary NSS database try: # pylint: disable=deprecated-method argspec = inspect.getargspec(tmp_db.create_db) # pylint: enable=deprecated-method if "password_filename" not in argspec.args: tmp_db.create_db() else: pwd_file = write_tmp_file(ipa_generate_password()) tmp_db.create_db(pwd_file.name) for i, cert in enumerate(ca_certs): if hasattr(certdb, "EXTERNAL_CA_TRUST_FLAGS"): tmp_db.add_cert(cert, 'CA certificate %d' % (i + 1), certdb.EXTERNAL_CA_TRUST_FLAGS) else: tmp_db.add_cert(cert, 'CA certificate %d' % (i + 1), 'C,,') except CalledProcessError: module.fail_json(msg="Failed to add CA to temporary NSS database.") api.finalize() # Now, let's try to connect to the server's RPC interface connected = False try: api.Backend.rpcclient.connect() connected = True module.debug("Try RPC connection") api.Backend.rpcclient.forward('ping') except errors.KerberosError as e: if connected: api.Backend.rpcclient.disconnect() module.log( "Cannot connect to the server due to Kerberos error: %s. " "Trying with delegate=True" % e) try: api.Backend.rpcclient.connect(delegate=True) module.debug("Try RPC connection") api.Backend.rpcclient.forward('ping') module.log("Connection with delegate=True successful") # The remote server is not capable of Kerberos S4U2Proxy # delegation. This features is implemented in IPA server # version 2.2 and higher module.warn( "Target IPA server has a lower version than the enrolled " "client") module.warn( "Some capabilities including the ipa command capability " "may not be available") except errors.PublicError as e2: # pylint: disable=invalid-name module.fail_json( msg="Cannot connect to the IPA server RPC interface: " "%s" % e2) except errors.PublicError as e: module.fail_json( msg="Cannot connect to the server due to generic error: " "%s" % e) # Use the RPC directly so older servers are supported try: result = api.Backend.rpcclient.forward( 'ca_is_enabled', version=u'2.107', ) ca_enabled = result['result'] except (errors.CommandError, errors.NetworkError): result = api.Backend.rpcclient.forward( 'env', server=True, version=u'2.0', ) ca_enabled = result['result']['enable_ra'] if not ca_enabled: disable_ra() # Get subject base from ipa server try: config = api.Command['config_show']()['result'] subject_base = str(DN(config['ipacertificatesubjectbase'][0])) except errors.PublicError: try: config = api.Backend.rpcclient.forward( 'config_show', raw=True, # so that servroles are not queried version=u'2.0')['result'] except Exception as e: logger.debug("config_show failed %s", e, exc_info=True) module.fail_json( "Failed to retrieve CA certificate subject base: {}".format(e), rval=CLIENT_INSTALL_ERROR) else: subject_base = str(DN(config['ipacertificatesubjectbase'][0])) module.exit_json(changed=True, ca_enabled=ca_enabled, subject_base=subject_base)
def main(): module = AnsibleModule( argument_spec=dict( servers=dict(required=False, type='list', default=[]), domain=dict(required=False), realm=dict(required=False), hostname=dict(required=False), ca_cert_file=dict(required=False), check=dict(required=False, type='bool', default=False), ), supports_check_mode=True, ) module._ansible_debug = True opt_domain = module.params.get('domain') opt_servers = module.params.get('servers') opt_realm = module.params.get('realm') opt_hostname = module.params.get('hostname') opt_ca_cert_file = module.params.get('ca_cert_file') opt_check = module.params.get('check') hostname = None hostname_source = None dnsok = False cli_domain = None cli_server = None cli_realm = None cli_kdc = None client_domain = None cli_basedn = None if opt_hostname: hostname = opt_hostname hostname_source = 'Provided as option' else: hostname = socket.getfqdn() hostname_source = "Machine's FQDN" if hostname != hostname.lower(): module.fail_json(msg="Invalid hostname '%s', must be lower-case." % hostname) if (hostname == 'localhost') or (hostname == 'localhost.localdomain'): module.fail_json(msg="Invalid hostname, '%s' must not be used." % hostname) # Get domain from first server if domain is not set, but there are servers if opt_domain is None and len(opt_servers) > 0: opt_domain = opt_servers[0][opt_servers[0].find(".") + 1:] # Create the discovery instance ds = ipadiscovery.IPADiscovery() ret = ds.search(domain=opt_domain, servers=opt_servers, realm=opt_realm, hostname=hostname, ca_cert_path=get_cert_path(opt_ca_cert_file)) if opt_servers and ret != 0: # There is no point to continue with installation as server list was # passed as a fixed list of server and thus we cannot discover any # better result module.fail_json(msg="Failed to verify that %s is an IPA Server." % \ ', '.join(opt_servers)) if ret == ipadiscovery.BAD_HOST_CONFIG: module.fail_json(msg="Can't get the fully qualified name of this host") if ret == ipadiscovery.NOT_FQDN: module.fail_json(msg="%s is not a fully-qualified hostname" % hostname) if ret in (ipadiscovery.NO_LDAP_SERVER, ipadiscovery.NOT_IPA_SERVER) \ or not ds.domain: if ret == ipadiscovery.NO_LDAP_SERVER: if ds.server: module.log("%s is not an LDAP server" % ds.server) else: module.log("No LDAP server found") elif ret == ipadiscovery.NOT_IPA_SERVER: if ds.server: module.log("%s is not an IPA server" % ds.server) else: module.log("No IPA server found") else: module.log("Domain not found") if opt_domain: cli_domain = opt_domain cli_domain_source = 'Provided as option' else: module.fail_json( msg="Unable to discover domain, not provided on command line") ret = ds.search(domain=cli_domain, servers=opt_servers, hostname=hostname, ca_cert_path=get_cert_path(opt_ca_cert_file)) if not cli_domain: if ds.domain: cli_domain = ds.domain cli_domain_source = ds.domain_source module.debug("will use discovered domain: %s" % cli_domain) client_domain = hostname[hostname.find(".") + 1:] if ret in (ipadiscovery.NO_LDAP_SERVER, ipadiscovery.NOT_IPA_SERVER) \ or not ds.server: module.debug("IPA Server not found") if opt_servers: cli_server = opt_servers cli_server_source = 'Provided as option' else: module.fail_json(msg="Unable to find IPA Server to join") ret = ds.search(domain=cli_domain, servers=cli_server, hostname=hostname, ca_cert_path=get_cert_path(opt_ca_cert_file)) else: # Only set dnsok to True if we were not passed in one or more servers # and if DNS discovery actually worked. if not opt_servers: (server, domain) = ds.check_domain(ds.domain, set(), "Validating DNS Discovery") if server and domain: module.debug("DNS validated, enabling discovery") dnsok = True else: module.debug("DNS discovery failed, disabling discovery") else: module.debug( "Using servers from command line, disabling DNS discovery") if not cli_server: if opt_servers: cli_server = ds.servers cli_server_source = 'Provided as option' module.debug("will use provided server: %s" % ', '.join(opt_servers)) elif ds.server: cli_server = ds.servers cli_server_source = ds.server_source module.debug("will use discovered server: %s" % cli_server[0]) if ret == ipadiscovery.NOT_IPA_SERVER: module.fail_json(msg="%s is not an IPA v2 Server." % cli_server[0]) if ret == ipadiscovery.NO_ACCESS_TO_LDAP: module.warn("Anonymous access to the LDAP server is disabled.") ret = 0 if ret == ipadiscovery.NO_TLS_LDAP: module.warn( "The LDAP server requires TLS is but we do not have the CA.") ret = 0 if ret != 0: module.fail_json(msg="Failed to verify that %s is an IPA Server." % cli_server[0]) cli_kdc = ds.kdc if dnsok and not cli_kdc: module.fail_json(msg="DNS domain '%s' is not configured for automatic " "KDC address lookup." % ds.realm.lower()) if dnsok: module.log("Discovery was successful!") cli_realm = ds.realm cli_realm_source = ds.realm_source module.debug("will use discovered realm: %s" % cli_realm) if opt_realm and opt_realm != cli_realm: module.fail_json( msg= "The provided realm name [%s] does not match discovered one [%s]" % (opt_realm, cli_realm)) cli_basedn = str(ds.basedn) cli_basedn_source = ds.basedn_source module.debug("will use discovered basedn: %s" % cli_basedn) module.log("Client hostname: %s" % hostname) module.debug("Hostname source: %s" % hostname_source) module.log("Realm: %s" % cli_realm) module.debug("Realm source: %s" % cli_realm_source) module.log("DNS Domain: %s" % cli_domain) module.debug("DNS Domain source: %s" % cli_domain_source) module.log("IPA Server: %s" % ', '.join(cli_server)) module.debug("IPA Server source: %s" % cli_server_source) module.log("BaseDN: %s" % cli_basedn) module.debug("BaseDN source: %s" % cli_basedn_source) # ipa-join would fail with IP address instead of a FQDN for srv in cli_server: try: socket.inet_pton(socket.AF_INET, srv) is_ipaddr = True except socket.error: try: socket.inet_pton(socket.AF_INET6, srv) is_ipaddr = True except socket.error: is_ipaddr = False if is_ipaddr: module.warn("It seems that you are using an IP address " "instead of FQDN as an argument to --server. The " "installation may fail.") break # Detect NTP servers ds = ipadiscovery.IPADiscovery() ntp_servers = ds.ipadns_search_srv(cli_domain, '_ntp._udp', None, break_on_first=False) # Check if ipa client is already configured if is_client_configured(): # Check that realm and domain match current_config = get_ipa_conf() if cli_domain != current_config.get('domain'): return module.fail_json(msg="IPA client already installed " "with a conflicting domain") if cli_realm != current_config.get('realm'): return module.fail_json(msg="IPA client already installed " "with a conflicting realm") # Done module.exit_json(changed=True, servers=cli_server, domain=cli_domain, realm=cli_realm, kdc=cli_kdc, basedn=cli_basedn, hostname=hostname, client_domain=client_domain, dnsok=dnsok, ntp_servers=ntp_servers, ipa_python_version=IPA_PYTHON_VERSION)
def run_module(): # Define the available arguments/parameters that a user can pass to # the module. # Defaults for VDO parameters are None, in order to facilitate # the detection of parameters passed from the playbook. # Creation param defaults are determined by the creation section. module_args = dict( name=dict(required=True), state=dict(choices=['absent', 'present'], default='present'), activated=dict(choices=['yes', 'no']), running=dict(choices=['yes', 'no']), growphysical=dict(type='bool', default=False), device=dict(), logicalsize=dict(), deduplication=dict(choices=['enabled', 'disabled']), compression=dict(choices=['enabled', 'disabled']), blockmapcachesize=dict(type='str'), readcache=dict(choices=['enabled', 'disabled']), readcachesize=dict(), emulate512=dict(type='bool', default=False), slabsize=dict(), writepolicy=dict(choices=['auto', 'sync', 'async']), indexmem=dict(), indexmode=dict(choices=['dense', 'sparse']), ackthreads=dict(), biothreads=dict(), cputhreads=dict(), logicalthreads=dict(), physicalthreads=dict() ) # Seed the result dictionary in the object. There will be an # 'invocation' dictionary added with 'module_args' (arguments # given). result = dict( changed=False ) # the AnsibleModule object will be our abstraction working with Ansible # this includes instantiation, a couple of common attr would be the # args/params passed to the execution, as well as if the module # supports check mode module = AnsibleModule( argument_spec=module_args, supports_check_mode=False ) if not HAS_YAML: module.fail_json(msg='PyYAML is required for this module.') vdocmd = module.get_bin_path("vdo", required=True) if not vdocmd: module.fail_json(msg='VDO is not installed.', **result) # Print a pre-run list of VDO volumes in the result object. vdolist = inventory_vdos(module, vdocmd) runningvdolist = list_running_vdos(module, vdocmd) # Collect the name of the desired VDO volume, and its state. These will # determine what to do. desiredvdo = module.params['name'] state = module.params['state'] # Create a desired VDO volume that doesn't exist yet. if (desiredvdo not in vdolist) and (state == 'present'): device = module.params['device'] if device is None: module.fail_json(msg="Creating a VDO volume requires specifying " "a 'device' in the playbook.") # Create a dictionary of the options from the AnsibleModule # parameters, compile the vdo command options, and run "vdo create" # with those options. # Since this is a creation of a new VDO volume, it will contain all # all of the parameters given by the playbook; the rest will # assume default values. options = module.params vdocmdoptions = add_vdooptions(options) rc, out, err = module.run_command("%s create --name=%s --device=%s %s" % (vdocmd, desiredvdo, device, vdocmdoptions)) if rc == 0: result['changed'] = True else: module.fail_json(msg="Creating VDO %s failed." % desiredvdo, rc=rc, err=err) if (module.params['compression'] == 'disabled'): rc, out, err = module.run_command("%s disableCompression --name=%s" % (vdocmd, desiredvdo)) if ((module.params['deduplication'] is not None) and module.params['deduplication'] == 'disabled'): rc, out, err = module.run_command("%s disableDeduplication " "--name=%s" % (vdocmd, desiredvdo)) if module.params['activated'] == 'no': deactivate_vdo(module, desiredvdo, vdocmd) if module.params['running'] == 'no': stop_vdo(module, desiredvdo, vdocmd) # Print a post-run list of VDO volumes in the result object. vdolist = inventory_vdos(module, vdocmd) module.log("created VDO volume %s" % desiredvdo) module.exit_json(**result) # Modify the current parameters of a VDO that exists. if (desiredvdo in vdolist) and (state == 'present'): rc, vdostatusoutput, err = module.run_command("%s status" % (vdocmd)) vdostatusyaml = yaml.load(vdostatusoutput) # An empty dictionary to contain dictionaries of VDO statistics processedvdos = {} vdoyamls = vdostatusyaml['VDOs'] if vdoyamls is not None: processedvdos = vdoyamls # The 'vdo status' keys that are currently modifiable. statusparamkeys = ['Acknowledgement threads', 'Bio submission threads', 'Block map cache size', 'CPU-work threads', 'Logical threads', 'Physical threads', 'Read cache', 'Read cache size', 'Configured write policy', 'Compression', 'Deduplication'] # A key translation table from 'vdo status' output to Ansible # module parameters. This covers all of the 'vdo status' # parameter keys that could be modified with the 'vdo' # command. vdokeytrans = { 'Logical size': 'logicalsize', 'Compression': 'compression', 'Deduplication': 'deduplication', 'Block map cache size': 'blockmapcachesize', 'Read cache': 'readcache', 'Read cache size': 'readcachesize', 'Configured write policy': 'writepolicy', 'Acknowledgement threads': 'ackthreads', 'Bio submission threads': 'biothreads', 'CPU-work threads': 'cputhreads', 'Logical threads': 'logicalthreads', 'Physical threads': 'physicalthreads' } # Build a dictionary of the current VDO status parameters, with # the keys used by VDO. (These keys will be converted later.) currentvdoparams = {} # Build a "lookup table" dictionary containing a translation table # of the parameters that can be modified modtrans = {} for statfield in statusparamkeys: currentvdoparams[statfield] = processedvdos[desiredvdo][statfield] modtrans[statfield] = vdokeytrans[statfield] # Build a dictionary of current parameters formatted with the # same keys as the AnsibleModule parameters. currentparams = {} for paramkey in currentvdoparams.keys(): currentparams[modtrans[paramkey]] = currentvdoparams[paramkey] diffparams = {} # Check for differences between the playbook parameters and the # current parameters. This will need a comparison function; # since AnsibleModule params are all strings, compare them as # strings (but if it's None; skip). for key in currentparams.keys(): if module.params[key] is not None: if str(currentparams[key]) != module.params[key]: diffparams[key] = module.params[key] if diffparams: vdocmdoptions = add_vdooptions(diffparams) if vdocmdoptions: rc, out, err = module.run_command("%s modify --name=%s %s" % (vdocmd, desiredvdo, vdocmdoptions)) if rc == 0: result['changed'] = True else: module.fail_json(msg="Modifying VDO %s failed." % desiredvdo, rc=rc, err=err) if 'deduplication' in diffparams.keys(): dedupemod = diffparams['deduplication'] if dedupemod == 'disabled': rc, out, err = module.run_command("%s " "disableDeduplication " "--name=%s" % (vdocmd, desiredvdo)) if rc == 0: result['changed'] = True else: module.fail_json(msg="Changing deduplication on " "VDO volume %s failed." % desiredvdo, rc=rc, err=err) if dedupemod == 'enabled': rc, out, err = module.run_command("%s " "enableDeduplication " "--name=%s" % (vdocmd, desiredvdo)) if rc == 0: result['changed'] = True else: module.fail_json(msg="Changing deduplication on " "VDO volume %s failed." % desiredvdo, rc=rc, err=err) if 'compression' in diffparams.keys(): compressmod = diffparams['compression'] if compressmod == 'disabled': rc, out, err = module.run_command("%s disableCompression " "--name=%s" % (vdocmd, desiredvdo)) if rc == 0: result['changed'] = True else: module.fail_json(msg="Changing compression on " "VDO volume %s failed." % desiredvdo, rc=rc, err=err) if compressmod == 'enabled': rc, out, err = module.run_command("%s enableCompression " "--name=%s" % (vdocmd, desiredvdo)) if rc == 0: result['changed'] = True else: module.fail_json(msg="Changing compression on " "VDO volume %s failed." % desiredvdo, rc=rc, err=err) if 'writepolicy' in diffparams.keys(): writepolmod = diffparams['writepolicy'] if writepolmod == 'auto': rc, out, err = module.run_command("%s " "changeWritePolicy " "--name=%s " "--writePolicy=%s" % (vdocmd, desiredvdo, writepolmod)) if rc == 0: result['changed'] = True else: module.fail_json(msg="Changing write policy on " "VDO volume %s failed." % desiredvdo, rc=rc, err=err) if writepolmod == 'sync': rc, out, err = module.run_command("%s " "changeWritePolicy " "--name=%s " "--writePolicy=%s" % (vdocmd, desiredvdo, writepolmod)) if rc == 0: result['changed'] = True else: module.fail_json(msg="Changing write policy on " "VDO volume %s failed." % desiredvdo, rc=rc, err=err) if writepolmod == 'async': rc, out, err = module.run_command("%s " "changeWritePolicy " "--name=%s " "--writePolicy=%s" % (vdocmd, desiredvdo, writepolmod)) if rc == 0: result['changed'] = True else: module.fail_json(msg="Changing write policy on " "VDO volume %s failed." % desiredvdo, rc=rc, err=err) # Process the size parameters, to determine of a growPhysical or # growLogical operation needs to occur. sizeparamkeys = ['Logical size', ] currentsizeparams = {} sizetrans = {} for statfield in sizeparamkeys: currentsizeparams[statfield] = processedvdos[desiredvdo][statfield] sizetrans[statfield] = vdokeytrans[statfield] sizeparams = {} for paramkey in currentsizeparams.keys(): sizeparams[sizetrans[paramkey]] = currentsizeparams[paramkey] diffsizeparams = {} for key in sizeparams.keys(): if module.params[key] is not None: if str(sizeparams[key]) != module.params[key]: diffsizeparams[key] = module.params[key] if module.params['growphysical']: physdevice = module.params['device'] rc, devsectors, err = module.run_command("blockdev --getsz %s" % (physdevice)) devblocks = (int(devsectors) / 8) dmvdoname = ('/dev/mapper/' + desiredvdo) currentvdostats = (processedvdos[desiredvdo] ['VDO statistics'] [dmvdoname]) currentphysblocks = currentvdostats['physical blocks'] # Set a growPhysical threshold to grow only when there is # guaranteed to be more than 2 slabs worth of unallocated # space on the device to use. For now, set to device # size + 64 GB, since 32 GB is the largest possible # slab size. growthresh = devblocks + 16777216 if currentphysblocks > growthresh: result['changed'] = True rc, out, err = module.run_command("%s growPhysical --name=%s" % (vdocmd, desiredvdo)) if 'logicalsize' in diffsizeparams.keys(): result['changed'] = True vdocmdoptions = ("--vdoLogicalSize=" + diffsizeparams['logicalsize']) rc, out, err = module.run_command("%s growLogical --name=%s %s" % (vdocmd, desiredvdo, vdocmdoptions)) vdoactivatestatus = processedvdos[desiredvdo]['Activate'] if ((module.params['activated'] == 'no') and (vdoactivatestatus == 'enabled')): deactivate_vdo(module, desiredvdo, vdocmd) if not result['changed']: result['changed'] = True if ((module.params['activated'] == 'yes') and (vdoactivatestatus == 'disabled')): activate_vdo(module, desiredvdo, vdocmd) if not result['changed']: result['changed'] = True if ((module.params['running'] == 'no') and (desiredvdo in runningvdolist)): stop_vdo(module, desiredvdo, vdocmd) if not result['changed']: result['changed'] = True # Note that a disabled VDO volume cannot be started by the # 'vdo start' command, by design. To accurately track changed # status, don't try to start a disabled VDO volume. # If the playbook contains 'activated: yes', assume that # the activate_vdo() operation succeeded, as 'vdoactivatestatus' # will have the activated status prior to the activate_vdo() # call. if (((vdoactivatestatus == 'enabled') or (module.params['activated'] == 'yes')) and (module.params['running'] == 'yes') and (desiredvdo not in runningvdolist)): start_vdo(module, desiredvdo, vdocmd) if not result['changed']: result['changed'] = True # Print a post-run list of VDO volumes in the result object. vdolist = inventory_vdos(module, vdocmd) if diffparams: module.log("modified parameters of VDO volume %s" % desiredvdo) module.exit_json(**result) # Remove a desired VDO that currently exists. if (desiredvdo in vdolist) and (state == 'absent'): rc, out, err = module.run_command("%s remove --name=%s" % (vdocmd, desiredvdo)) if rc == 0: result['changed'] = True else: module.fail_json(msg="Removing VDO %s failed." % desiredvdo, rc=rc, err=err) # Print a post-run list of VDO volumes in the result object. vdolist = inventory_vdos(module, vdocmd) module.log("removed VDO volume %s" % desiredvdo) module.exit_json(**result) # fall through # The state for the desired VDO volume was absent, and it does # not exist. Print a post-run list of VDO volumes in the result # object. vdolist = inventory_vdos(module, vdocmd) module.log("received request to remove non-existent VDO volume %s" % desiredvdo) module.exit_json(**result)