def synconce_ntp(server_fqdn, debug=False): """ Syncs time with specified server using ntpd. Primarily designed to be used before Kerberos setup to get time following the KDC time Returns True if sync was successful """ ntpd = paths.NTPD if not os.path.exists(ntpd): return False # The ntpd command will never exit if it is unable to reach the # server, so timeout after 15 seconds. timeout = 15 tmp_ntp_conf = ipautil.write_tmp_file('server %s' % server_fqdn) args = [paths.BIN_TIMEOUT, str(timeout), ntpd, '-qgc', tmp_ntp_conf.name] if debug: args.append('-d') try: root_logger.info('Attempting to sync time using ntpd. ' 'Will timeout after %d seconds' % timeout) ipautil.run(args) return True except ipautil.CalledProcessError as e: if e.returncode == 124: root_logger.debug('Process did not complete before timeout') return False
def check_zone_overlap(zone, raise_on_error=True): root_logger.info("Checking DNS domain %s, please wait ..." % zone) if not isinstance(zone, DNSName): zone = DNSName(zone).make_absolute() # automatic empty zones always exist so checking them is pointless, # do not report them to avoid meaningless error messages if is_auto_empty_zone(zone): return try: containing_zone = dns.resolver.zone_for_name(zone) except dns.exception.DNSException as e: msg = ("DNS check for domain %s failed: %s." % (zone, e)) if raise_on_error: raise ValueError(msg) else: root_logger.warning(msg) return if containing_zone == zone: try: ns = [ans.to_text() for ans in dns.resolver.query(zone, 'NS')] except dns.exception.DNSException as e: root_logger.debug("Failed to resolve nameserver(s) for domain" " {0}: {1}".format(zone, e)) ns = [] msg = u"DNS zone {0} already exists in DNS".format(zone) if ns: msg += u" and is handled by server(s): {0}".format(', '.join(ns)) raise ValueError(msg)
def __request(self, name, args): payload = {"method": unicode(name), "params": args, "id": 0} version = args[1].get("version", VERSION_WITHOUT_CAPABILITIES) payload = json_encode_binary(payload, version) if self.__verbose >= 2: root_logger.info("Request: %s", json.dumps(payload, sort_keys=True, indent=4)) response = self.__transport.request( self.__host, self.__handler, json.dumps(payload).encode("utf-8"), verbose=self.__verbose >= 3 ) try: response = json_decode_binary(json.loads(response.decode("ascii"))) except ValueError as e: raise JSONError(error=str(e)) if self.__verbose >= 2: root_logger.info( "Response: %s", json.dumps(json_encode_binary(response, version), sort_keys=True, indent=4) ) error = response.get("error") if error: try: error_class = errors_by_code[error["code"]] except KeyError: raise UnknownError(code=error.get("code"), error=error.get("message"), server=self.__host) else: kw = error.get("data", {}) kw["message"] = error["message"] raise error_class(**kw) return response["result"]
def execute(self, **options): ldap = self.api.Backend.ldap2 base_dn = DN(self.api.env.container_ranges, self.api.env.basedn) search_filter = ("(&(objectClass=ipaTrustedADDomainRange)" "(ipaRangeType=ipa-ad-trust-posix)" "(!(ipaBaseRID=0)))") root_logger.debug( "update_idrange_baserid: search for ipa-ad-trust-posix ID ranges " "with ipaBaseRID != 0") try: (entries, _truncated) = ldap.find_entries(search_filter, ['ipabaserid'], base_dn, paged_search=True, time_limit=0, size_limit=0) except errors.NotFound: root_logger.debug("update_idrange_baserid: no AD domain " "range with posix attributes found") return False, [] except errors.ExecutionError as e: root_logger.error( "update_idrange_baserid: cannot retrieve " "list of affected ranges: %s", e) return False, [] root_logger.debug( "update_idrange_baserid: found %d " "idranges possible to update", len(entries)) error = False # Set the range type for entry in entries: entry['ipabaserid'] = 0 try: root_logger.debug("Updating existing idrange: %s" % (entry.dn)) ldap.update_entry(entry) root_logger.info("Done") except (errors.EmptyModlist, errors.NotFound): pass except errors.ExecutionError as e: root_logger.debug( "update_idrange_type: cannot " "update idrange: %s", e) error = True if error: root_logger.error("update_idrange_baserid: error(s) " "detected during idrange baserid update") else: # All affected entries updated, exit the loop root_logger.debug("update_idrange_baserid: all affected " "idranges updated") return False, []
def synconce_ntp(server_fqdn, debug=False): """ Syncs time with specified server using ntpd. Primarily designed to be used before Kerberos setup to get time following the KDC time Returns True if sync was successful """ ntpd = paths.NTPD if not os.path.exists(ntpd): return False tmp_ntp_conf = ipautil.write_tmp_file('server %s' % server_fqdn) args = [ntpd, '-qgc', tmp_ntp_conf.name] if debug: args.append('-d') try: # The ntpd command will never exit if it is unable to reach the # server, so timeout after 15 seconds. timeout = 15 root_logger.info('Attempting to sync time using ntpd. ' 'Will timeout after %d seconds' % timeout) ipautil.run(args, timeout=timeout) return True except ipautil.CalledProcessError: return False
def execute(self, **options): root_logger.debug( "Upgrading referential integrity plugin configuration") ldap = self.api.Backend.ldap2 try: entry = ldap.get_entry(self.referint_dn) except errors.NotFound: root_logger.error("Referential integrity configuration not found") return False, [] referint_membership_attrs = [] root_logger.debug("Initial value: %s", repr(entry)) # nsslapd-pluginArg0 -> referint-update-delay update_delay = entry.get('nsslapd-pluginArg0') if update_delay: root_logger.debug("add: referint-update-delay: %s", update_delay) entry['referint-update-delay'] = update_delay entry['nsslapd-pluginArg0'] = None else: root_logger.info("Plugin already uses new style, skipping") return False, [] # nsslapd-pluginArg1 -> referint-logfile logfile = entry.get('nsslapd-pluginArg1') if logfile: root_logger.debug("add: referint-logfile: %s", logfile) entry['referint-logfile'] = logfile entry['nsslapd-pluginArg1'] = None # nsslapd-pluginArg2 -> referint-logchanges logchanges = entry.get('nsslapd-pluginArg2') if logchanges: root_logger.debug("add: referint-logchanges: %s", logchanges) entry['referint-logchanges'] = logchanges entry['nsslapd-pluginArg2'] = None # nsslapd-pluginArg3..N -> referint-membership-attr [3..N] for key in entry.keys(): if key.lower().startswith('nsslapd-pluginarg'): arg_val = entry.single_value[key] if arg_val: referint_membership_attrs.append(arg_val) entry[key] = None if referint_membership_attrs: # entry['referint-membership-attr'] is None, plugin doesn't allow # mixing old and new style entry['referint-membership-attr'] = referint_membership_attrs root_logger.debug("Final value: %s", repr(entry)) try: ldap.update_entry(entry) except errors.EmptyModlist: root_logger.debug("No modifications required") return False, [] return False, []
def add_ca_schema(): """Copy IPA schema files into the CA DS instance """ pki_pent = pwd.getpwnam(PKI_USER) ds_pent = pwd.getpwnam(DS_USER) for schema_fname in SCHEMA_FILENAMES: source_fname = os.path.join(paths.USR_SHARE_IPA_DIR, schema_fname) target_fname = os.path.join(schema_dirname(SERVERID), schema_fname) if not os.path.exists(source_fname): root_logger.debug("File does not exist: %s", source_fname) continue if os.path.exists(target_fname): target_sha1 = _sha1_file(target_fname) source_sha1 = _sha1_file(source_fname) if target_sha1 != source_sha1: target_size = os.stat(target_fname).st_size source_size = os.stat(source_fname).st_size root_logger.info("Target file %s exists but the content is " "different", target_fname) root_logger.info("\tTarget file: sha1: %s, size: %s B", target_sha1, target_size) root_logger.info("\tSource file: sha1: %s, size: %s B", source_sha1, source_size) if not ipautil.user_input("Do you want replace %s file?" % target_fname, True): continue else: root_logger.info("Target exists, not overwriting: %s", target_fname) continue try: shutil.copyfile(source_fname, target_fname) except IOError as e: root_logger.warning("Could not install %s: %s", target_fname, e) else: root_logger.info("Installed %s", target_fname) os.chmod(target_fname, 0o440) # read access for dirsrv user/group os.chown(target_fname, pki_pent.pw_uid, ds_pent.pw_gid)
def install_dns_records(config, options, remote_api): if not bindinstance.dns_container_exists( config.master_host_name, ipautil.realm_to_suffix(config.realm_name), dm_password=config.dirman_password): return try: bind = bindinstance.BindInstance(dm_password=config.dirman_password, api=remote_api) for ip in config.ips: reverse_zone = bindinstance.find_reverse_zone(ip, remote_api) bind.add_master_dns_records(config.host_name, str(ip), config.realm_name, config.domain_name, reverse_zone, not options.no_ntp, options.setup_ca) except errors.NotFound as e: root_logger.debug('Replica DNS records could not be added ' 'on master: %s', str(e)) # we should not fail here no matter what except Exception as e: root_logger.info('Replica DNS records could not be added ' 'on master: %s', str(e))
def execute(self, **options): root_logger.debug("Upgrading referential integrity plugin configuration") ldap = self.api.Backend.ldap2 try: entry = ldap.get_entry(self.referint_dn) except errors.NotFound: root_logger.error("Referential integrity configuration not found") return False, [] referint_membership_attrs = [] root_logger.debug("Initial value: %s", repr(entry)) # nsslapd-pluginArg0 -> referint-update-delay update_delay = entry.get('nsslapd-pluginArg0') if update_delay: root_logger.debug("add: referint-update-delay: %s", update_delay) entry['referint-update-delay'] = update_delay entry['nsslapd-pluginArg0'] = None else: root_logger.info("Plugin already uses new style, skipping") return False, [] # nsslapd-pluginArg1 -> referint-logfile logfile = entry.get('nsslapd-pluginArg1') if logfile: root_logger.debug("add: referint-logfile: %s", logfile) entry['referint-logfile'] = logfile entry['nsslapd-pluginArg1'] = None # nsslapd-pluginArg2 -> referint-logchanges logchanges = entry.get('nsslapd-pluginArg2') if logchanges: root_logger.debug("add: referint-logchanges: %s", logchanges) entry['referint-logchanges'] = logchanges entry['nsslapd-pluginArg2'] = None # nsslapd-pluginArg3..N -> referint-membership-attr [3..N] for key in entry.keys(): if key.lower().startswith('nsslapd-pluginarg'): arg_val = entry.single_value[key] if arg_val: referint_membership_attrs.append(arg_val) entry[key] = None if referint_membership_attrs: # entry['referint-membership-attr'] is None, plugin doesn't allow # mixing old and new style entry['referint-membership-attr'] = referint_membership_attrs root_logger.debug("Final value: %s", repr(entry)) try: ldap.update_entry(entry) except errors.EmptyModlist: root_logger.debug("No modifications required") return False, [] return False, []
def upgrade_instance(self): if not sysupgrade.get_upgrade_state("custodia", "installed"): root_logger.info("Custodia service is being configured") self.create_instance() mode = os.stat(self.server_keys).st_mode if stat.S_IMODE(mode) != 0o600: root_logger.info("Secure server.keys mode") os.chmod(self.server_keys, 0o600)
def check_reverse_zones(ip_addresses, reverse_zones, options, unattended, search_reverse_zones=False): checked_reverse_zones = [] if (not options.no_reverse and not reverse_zones and not options.auto_reverse): if unattended: options.no_reverse = True else: options.no_reverse = not create_reverse() # shortcut if options.no_reverse: return [] # verify zones passed in options for rz in reverse_zones: # isn't the zone managed by someone else if not options.allow_zone_overlap: try: ipautil.check_zone_overlap(rz) except ValueError as e: msg = "Reverse zone %s will not be used: %s" % (rz, e) if unattended: sys.exit(msg) else: root_logger.warning(msg) continue checked_reverse_zones.append(normalize_zone(rz)) # check that there is reverse zone for every IP ips_missing_reverse = [] for ip in ip_addresses: if search_reverse_zones and find_reverse_zone(str(ip)): # reverse zone is already in LDAP continue for rz in checked_reverse_zones: if verify_reverse_zone(rz, ip): # reverse zone was entered by user break else: ips_missing_reverse.append(ip) # create reverse zone for IP addresses that does not have one for (ip, rz) in get_auto_reverse_zones(ips_missing_reverse): if options.auto_reverse: root_logger.info("Reverse zone %s will be created" % rz) checked_reverse_zones.append(rz) elif unattended: root_logger.warning("Missing reverse record for IP address %s" % ip) else: if ipautil.user_input("Do you want to create reverse zone for IP " "%s" % ip, True): rz = read_reverse_zone(rz, str(ip), options.allow_zone_overlap) checked_reverse_zones.append(rz) return checked_reverse_zones
def execute(self, **options): ldap = self.api.Backend.ldap2 base_dn = DN(self.api.env.container_ranges, self.api.env.basedn) search_filter = ("(&(objectClass=ipaTrustedADDomainRange)" "(ipaRangeType=ipa-ad-trust-posix)" "(!(ipaBaseRID=0)))") root_logger.debug( "update_idrange_baserid: search for ipa-ad-trust-posix ID ranges " "with ipaBaseRID != 0" ) try: (entries, truncated) = ldap.find_entries( search_filter, ['ipabaserid'], base_dn, paged_search=True, time_limit=0, size_limit=0) except errors.NotFound: root_logger.debug("update_idrange_baserid: no AD domain " "range with posix attributes found") return False, [] except errors.ExecutionError as e: root_logger.error("update_idrange_baserid: cannot retrieve " "list of affected ranges: %s", e) return False, [] root_logger.debug("update_idrange_baserid: found %d " "idranges possible to update", len(entries)) error = False # Set the range type for entry in entries: entry['ipabaserid'] = 0 try: root_logger.debug("Updating existing idrange: %s" % (entry.dn)) ldap.update_entry(entry) root_logger.info("Done") except (errors.EmptyModlist, errors.NotFound): pass except errors.ExecutionError as e: root_logger.debug("update_idrange_type: cannot " "update idrange: %s", e) error = True if error: root_logger.error("update_idrange_baserid: error(s) " "detected during idrange baserid update") else: # All affected entries updated, exit the loop root_logger.debug("update_idrange_baserid: all affected " "idranges updated") return False, []
def reload_systemwide_ca_store(self): try: ipautil.run([paths.UPDATE_CA_TRUST]) except CalledProcessError as e: root_logger.error( "Could not update systemwide CA trust database: %s", e) return False else: root_logger.info("Systemwide CA database updated.") return True
def __enable(self): self.backup_state("enabled", self.is_enabled()) # We do not let the system start IPA components on its own, # Instead we reply on the IPA init script to start only enabled # components as found in our LDAP configuration tree # Note that self.dm_password is None for ADTrustInstance because # we ensure to be called as root and using ldapi to use autobind try: self.ldap_enable('ADTRUST', self.fqdn, self.dm_password, \ self.suffix) except (ldap.ALREADY_EXISTS, errors.DuplicateEntry), e: root_logger.info("ADTRUST Service startup entry already exists.")
def tune_nofile_platform(self, num=8192, fstore=None): """ Increase the number of files descriptors available to directory server from the default 1024 to 8192. This will allow to support a greater number of clients out of the box. This is a part of the implementation that is sysV-specific. Returns False if the setting of the nofile limit needs to be skipped. """ DS_USER = '******' # check limits.conf need_limits = True with open("/etc/security/limits.conf", "r") as f: for line in f: sline = line.strip() if not sline.startswith(DS_USER) or sline.find('nofile') == -1: continue # ok we already have an explicit entry for user/nofile need_limits = False # check sysconfig/dirsrv need_sysconf = True with open("/etc/sysconfig/dirsrv", "r") as f: for line in f: sline = line.strip() if not sline.startswith('ulimit') or sline.find('-n') == -1: continue # ok we already have an explicit entry for file limits need_sysconf = False #if sysconf or limits are set avoid messing up and defer to the admin if need_sysconf and need_limits: if fstore: fstore.backup_file("/etc/security/limits.conf") with open("/etc/security/limits.conf", "a+") as f: f.write('%s\t\t-\tnofile\t\t%s\n' % (DS_USER, str(num))) with open("/etc/sysconfig/dirsrv", "a+") as f: f.write('ulimit -n %s\n' % str(num)) else: root_logger.info("Custom file limits are already set! Skipping\n") return False return True
def main(): if os.getegid() != 0: sys.exit("Must be root to run this script") standard_logging_setup(verbose=True) # In 3.0, restarting needs access to api.env (options, argv) = api.bootstrap_with_global_options(context='server') add_ca_schema() restart_pki_ds() root_logger.info('Schema updated successfully')
def insert_ca_cert_into_systemwide_ca_store(cacert_path): # Add the 'ipa-' prefix to cert name to avoid name collisions cacert_name = os.path.basename(cacert_path) new_cacert_path = os.path.join(systemwide_ca_store, 'ipa-%s' % cacert_name) # Add the CA to the systemwide CA trust database try: shutil.copy(cacert_path, new_cacert_path) run(['/usr/bin/update-ca-trust']) except OSError, e: root_logger.info("Failed to copy %s to %s" % (cacert_path, new_cacert_path))
def main(): if os.getegid() != 0: sys.exit("Must be root to run this script") standard_logging_setup(verbose=True) # In 3.0, restarting needs access to api.env api.bootstrap_with_global_options(context="server", confdir=paths.ETC_IPA) add_ca_schema() restart_pki_ds() root_logger.info("Schema updated successfully")
def main(): if os.getegid() != 0: sys.exit("Must be root to run this script") standard_logging_setup(verbose=True) # In 3.0, restarting needs access to api.env api.bootstrap_with_global_options(context='server', confdir=paths.ETC_IPA) add_ca_schema() restart_pki_ds() root_logger.info('Schema updated successfully')
def __request(self, name, args): print_json = self.__verbose >= 2 payload = {'method': unicode(name), 'params': args, 'id': 0} version = args[1].get('version', VERSION_WITHOUT_CAPABILITIES) payload = json_encode_binary( payload, version, pretty_print=print_json) if print_json: root_logger.info( 'Request: %s', payload ) response = self.__transport.request( self.__host, self.__handler, payload.encode('utf-8'), verbose=self.__verbose >= 3, ) if print_json: root_logger.info( 'Response: %s', json.dumps(json.loads(response), sort_keys=True, indent=4) ) try: response = json_decode_binary(response) except ValueError as e: raise JSONError(error=str(e)) error = response.get('error') if error: try: error_class = errors_by_code[error['code']] except KeyError: raise UnknownError( code=error.get('code'), error=error.get('message'), server=self.__host, ) else: kw = error.get('data', {}) kw['message'] = error['message'] raise error_class(**kw) return response['result']
def get_auto_reverse_zones(ip_addresses): auto_zones = [] for ip in ip_addresses: if ipautil.reverse_record_exists(ip): # PTR exist there is no reason to create reverse zone root_logger.info("Reverse record for IP address %s already " "exists" % ip) continue default_reverse = get_reverse_zone_default(ip) try: ipautil.check_zone_overlap(default_reverse) except ValueError: root_logger.info("Reverse zone %s for IP address %s already exists" % (default_reverse, ip)) continue auto_zones.append((ip, default_reverse)) return auto_zones
class JSONServerProxy(object): def __init__(self, uri, transport, encoding, verbose, allow_none): type, uri = urllib.splittype(uri) if type not in ("http", "https"): raise IOError("unsupported XML-RPC protocol") self.__host, self.__handler = urllib.splithost(uri) self.__transport = transport assert encoding == 'UTF-8' assert allow_none self.__verbose = verbose # FIXME: Some of our code requires ServerProxy internals. # But, xmlrpclib.ServerProxy's _ServerProxy__transport can be accessed # by calling serverproxy('transport') self._ServerProxy__transport = transport def __request(self, name, args): payload = {'method': unicode(name), 'params': args, 'id': 0} version = args[1].get('version', VERSION_WITHOUT_CAPABILITIES) payload = json_encode_binary(payload, version) if self.__verbose >= 2: root_logger.info('Request: %s', json.dumps(payload, sort_keys=True, indent=4)) response = self.__transport.request( self.__host, self.__handler, json.dumps(payload), verbose=self.__verbose >= 3, ) try: response = json_decode_binary(json.loads(response)) except ValueError, e: raise JSONError(str(e)) if self.__verbose >= 2: root_logger.info( 'Response: %s', json.dumps(json_encode_binary(response, version), sort_keys=True, indent=4)) error = response.get('error') if error: try: error_class = errors_by_code[error['code']] except KeyError: raise UnknownError( code=error.get('code'), error=error.get('message'), server=self.__host, ) else: raise error_class(message=error['message']) return response['result']
def convert_ipa_ca_cnames(self, domain_name): # get ipa-ca CNAMEs cnames = get_rr(domain_name, IPA_CA_RECORD, "CNAME", api=self.api) if not cnames: return root_logger.info('Converting IPA CA CNAME records to A/AAAA records') # create CNAME to FQDN mapping cname_fqdn = {} for cname in cnames: if cname.endswith('.'): fqdn = cname[:-1] else: fqdn = '%s.%s' % (cname, domain_name) cname_fqdn[cname] = fqdn # get FQDNs of all IPA masters ldap = self.api.Backend.ldap2 try: entries = ldap.get_entries( DN(('cn', 'masters'), ('cn', 'ipa'), ('cn', 'etc'), self.api.env.basedn), ldap.SCOPE_ONELEVEL, None, ['cn']) masters = set(e['cn'][0] for e in entries) except errors.NotFound: masters = set() # check if all CNAMEs point to IPA masters for cname in cnames: fqdn = cname_fqdn[cname] if fqdn not in masters: root_logger.warning( "Cannot convert IPA CA CNAME records to A/AAAA records, " "please convert them manually if necessary") return # delete all CNAMEs for cname in cnames: del_rr(domain_name, IPA_CA_RECORD, "CNAME", cname, api=self.api) # add A/AAAA records for cname in cnames: fqdn = cname_fqdn[cname] self.add_ipa_ca_dns_records(fqdn, domain_name, None)
def get_auto_reverse_zones(ip_addresses): auto_zones = [] for ip in ip_addresses: if ipautil.reverse_record_exists(ip): # PTR exist there is no reason to create reverse zone root_logger.info("Reverse record for IP address %s already " "exists" % ip) continue default_reverse = get_reverse_zone_default(ip) try: dnsutil.check_zone_overlap(default_reverse) except ValueError: root_logger.info( "Reverse zone %s for IP address %s already exists" % (default_reverse, ip)) continue auto_zones.append((ip, default_reverse)) return auto_zones
def run_script(main_function, operation_name, log_file_name=None, fail_message=None): """Run the given function as a command-line utility This function: - Runs the given function - Formats any errors - Exits with the appropriate code :param main_function: Function to call :param log_file_name: Name of the log file (displayed on unexpected errors) :param operation_name: Name of the script :param fail_message: Optional message displayed on failure """ root_logger.info('Starting script: %s', operation_name) try: try: return_value = main_function() except BaseException as e: if (isinstance(e, SystemExit) and (e.code is None or e.code == 0) # pylint: disable=no-member ): # Not an error after all root_logger.info('The %s command was successful', operation_name) else: # Log at the DEBUG level, which is not output to the console # (unless in debug/verbose mode), but is written to a logfile # if one is open. tb = sys.exc_info()[2] root_logger.debug('\n'.join(traceback.format_tb(tb))) root_logger.debug('The %s command failed, exception: %s: %s', operation_name, type(e).__name__, e) if fail_message and not isinstance(e, SystemExit): print(fail_message) raise else: if return_value: root_logger.info('The %s command failed, return value %s', operation_name, return_value) else: root_logger.info('The %s command was successful', operation_name) sys.exit(return_value) except BaseException as error: message, exitcode = handle_error(error, log_file_name) if message: print(message, file=sys.stderr) sys.exit(exitcode)
def remove_ipa_ca_cnames(self, domain_name): # get ipa-ca CNAMEs try: cnames = get_rr(domain_name, IPA_CA_RECORD, "CNAME", api=self.api) except errors.NotFound: # zone does not exists cnames = None if not cnames: return root_logger.info('Removing IPA CA CNAME records') # create CNAME to FQDN mapping cname_fqdn = {} for cname in cnames: if cname.endswith('.'): fqdn = cname[:-1] else: fqdn = '%s.%s' % (cname, domain_name) cname_fqdn[cname] = fqdn # get FQDNs of all IPA masters ldap = self.api.Backend.ldap2 try: entries = ldap.get_entries( DN(('cn', 'masters'), ('cn', 'ipa'), ('cn', 'etc'), self.api.env.basedn), ldap.SCOPE_ONELEVEL, None, ['cn']) masters = set(e['cn'][0] for e in entries) except errors.NotFound: masters = set() # check if all CNAMEs point to IPA masters for cname in cnames: fqdn = cname_fqdn[cname] if fqdn not in masters: root_logger.warning( "Cannot remove IPA CA CNAME please remove them manually " "if necessary") return # delete all CNAMEs for cname in cnames: del_rr(domain_name, IPA_CA_RECORD, "CNAME", cname, api=self.api)
def run_script(main_function, operation_name, log_file_name=None, fail_message=None): """Run the given function as a command-line utility This function: - Runs the given function - Formats any errors - Exits with the appropriate code :param main_function: Function to call :param log_file_name: Name of the log file (displayed on unexpected errors) :param operation_name: Name of the script :param fail_message: Optional message displayed on failure """ root_logger.info('Starting script: %s', operation_name) try: try: return_value = main_function() except BaseException as e: if ( isinstance(e, SystemExit) and (e.code is None or e.code == 0) # pylint: disable=no-member ): # Not an error after all root_logger.info('The %s command was successful', operation_name) else: # Log at the DEBUG level, which is not output to the console # (unless in debug/verbose mode), but is written to a logfile # if one is open. tb = sys.exc_info()[2] root_logger.debug('\n'.join(traceback.format_tb(tb))) root_logger.debug('The %s command failed, exception: %s: %s', operation_name, type(e).__name__, e) if fail_message and not isinstance(e, SystemExit): print(fail_message) raise else: if return_value: root_logger.info('The %s command failed, return value %s', operation_name, return_value) else: root_logger.info('The %s command was successful', operation_name) sys.exit(return_value) except BaseException as error: message, exitcode = handle_error(error, log_file_name) if message: print(message, file=sys.stderr) sys.exit(exitcode)
def __wait_keys(self, host, timeout=300): ldap_uri = 'ldap://%s' % host deadline = int(time.time()) + timeout root_logger.info("Waiting up to {} seconds to see our keys " "appear on host: {}".format(timeout, host)) konn = KEMLdap(ldap_uri) saved_e = None while True: try: return konn.check_host_keys(self.fqdn) except Exception as e: # log only once for the same error if not isinstance(e, type(saved_e)): root_logger.debug( "Transient error getting keys: '{err}'".format(err=e)) saved_e = e if int(time.time()) > deadline: raise RuntimeError("Timed out trying to obtain keys.") time.sleep(1)
def __request(self, name, args): payload = {'method': unicode(name), 'params': args, 'id': 0} version = args[1].get('version', VERSION_WITHOUT_CAPABILITIES) payload = json_encode_binary(payload, version) if self.__verbose >= 2: root_logger.info('Request: %s', json.dumps(payload, sort_keys=True, indent=4)) response = self.__transport.request( self.__host, self.__handler, json.dumps(payload), verbose=self.__verbose >= 3, ) try: response = json_decode_binary(json.loads(response)) except ValueError, e: raise JSONError(str(e))
def add_ca_schema(): """Copy IPA schema files into the CA DS instance """ pki_pent = pwd.getpwnam(PKI_USER) ds_pent = pwd.getpwnam(DS_USER) for schema_fname in SCHEMA_FILENAMES: source_fname = os.path.join(ipautil.SHARE_DIR, schema_fname) target_fname = os.path.join(schema_dirname(SERVERID), schema_fname) if not os.path.exists(source_fname): root_logger.debug('File does not exist: %s', source_fname) continue if os.path.exists(target_fname): target_sha1 = _sha1_file(target_fname) source_sha1 = _sha1_file(source_fname) if target_sha1 != source_sha1: target_size = os.stat(target_fname).st_size source_size = os.stat(source_fname).st_size root_logger.info( 'Target file %s exists but the content is ' 'different', target_fname) root_logger.info('\tTarget file: sha1: %s, size: %s B', target_sha1, target_size) root_logger.info('\tSource file: sha1: %s, size: %s B', source_sha1, source_size) if not ipautil.user_input( "Do you want replace %s file?" % target_fname, True): continue else: root_logger.info('Target exists, not overwriting: %s', target_fname) continue try: shutil.copyfile(source_fname, target_fname) except IOError as e: root_logger.warning('Could not install %s: %s', target_fname, e) else: root_logger.info('Installed %s', target_fname) os.chmod(target_fname, 0o440) # read access for dirsrv user/group os.chown(target_fname, pki_pent.pw_uid, ds_pent.pw_gid)
def add_ca_schema(): """Copy IPA schema files into the CA DS instance """ pki_pent = pwd.getpwnam(PKI_USER) ds_pent = pwd.getpwnam(DS_USER) for schema_fname in SCHEMA_FILENAMES: source_fname = os.path.join(ipautil.SHARE_DIR, schema_fname) target_fname = os.path.join(schema_dirname(SERVERID), schema_fname) if not os.path.exists(source_fname): root_logger.debug('File does not exist: %s', source_fname) continue if os.path.exists(target_fname): root_logger.info('Target exists, not overwriting: %s', target_fname) continue try: shutil.copyfile(source_fname, target_fname) except IOError, e: root_logger.warning('Could not install %s: %s', target_fname, e) else: root_logger.info('Installed %s', target_fname) os.chmod(target_fname, 0440) # read access for dirsrv user/group os.chown(target_fname, pki_pent.pw_uid, ds_pent.pw_gid)
def synconce_ntp(server_fqdn): """ Syncs time with specified server using ntpd. Primarily designed to be used before Kerberos setup to get time following the KDC time Returns True if sync was successful """ ntpd = paths.NTPD if not os.path.exists(ntpd): return False tmp_ntp_conf = ipautil.write_tmp_file('server %s' % server_fqdn) try: # The ntpd command will never exit if it is unable to reach the # server, so timeout after 15 seconds. timeout = 15 root_logger.info('Attempting to sync time using ntpd. ' 'Will timeout after %d seconds' % timeout) ipautil.run([ntpd, '-qgc', tmp_ntp_conf.name], timeout=timeout) return True except ipautil.CalledProcessError: return False
def add_ca_schema(): """Copy IPA schema files into the CA DS instance """ pki_pent = pwd.getpwnam(PKI_USER) ds_pent = pwd.getpwnam(DS_USER) for schema_fname in SCHEMA_FILENAMES: source_fname = os.path.join(ipautil.SHARE_DIR, schema_fname) target_fname = os.path.join(schema_dirname(SERVERID), schema_fname) if not os.path.exists(source_fname): root_logger.debug('File does not exist: %s', source_fname) continue if os.path.exists(target_fname): root_logger.info( 'Target exists, not overwriting: %s', target_fname) continue try: shutil.copyfile(source_fname, target_fname) except IOError, e: root_logger.warning('Could not install %s: %s', target_fname, e) else: root_logger.info('Installed %s', target_fname) os.chmod(target_fname, 0440) # read access for dirsrv user/group os.chown(target_fname, pki_pent.pw_uid, ds_pent.pw_gid)
def update_mod_nss_cipher_suite(): add_ciphers = ['ecdhe_rsa_aes_128_sha', 'ecdhe_rsa_aes_256_sha'] ciphers = installutils.get_directive(NSS_CONF, 'NSSCipherSuite') # Run through once to see if any of the new ciphers are there but # disabled. If they are then enable them. lciphers = ciphers.split(',') new_ciphers = [] for cipher in lciphers: for add in add_ciphers: if cipher.endswith(add): if cipher.startswith('-'): cipher = '+%s' % add new_ciphers.append(cipher) # Run through again and add remaining ciphers as enabled. for add in add_ciphers: if add not in ciphers: new_ciphers.append('+%s' % add) ciphers = ','.join(new_ciphers) set_directive(NSS_CONF, 'NSSCipherSuite', ciphers, False) root_logger.info('Updated Apache cipher list')
def upgrade_instance(self): if not sysupgrade.get_upgrade_state("custodia", "installed"): root_logger.info("Custodia service is being configured") self.create_instance() else: old_config = open(self.config_file).read() self.__config_file() new_config = open(self.config_file).read() if new_config != old_config: root_logger.info("Restarting Custodia") self.restart() mode = os.stat(self.server_keys).st_mode if stat.S_IMODE(mode) != 0o600: root_logger.info("Secure server.keys mode") os.chmod(self.server_keys, 0o600)
def install_check(standalone, replica_config, options): global external_cert_file global external_ca_file realm_name = options.realm_name host_name = options.host_name subject_base = options.subject if replica_config is not None: if standalone and api.env.ra_plugin == 'selfsign': sys.exit('A selfsign CA can not be added') if ((not options.promote and not ipautil.file_exists(replica_config.dir + "/cacert.p12"))): print('CA cannot be installed in CA-less setup.') sys.exit(1) if standalone and not options.skip_conncheck: principal = options.principal replica_conn_check( replica_config.master_host_name, host_name, realm_name, True, replica_config.ca_ds_port, options.admin_password, principal=principal, ca_cert_file=options.ca_cert_file) if options.skip_schema_check or options.promote: root_logger.info("Skipping CA DS schema check") else: cainstance.replica_ca_install_check(replica_config) return if standalone: if api.Command.ca_is_enabled()['result']: sys.exit( "One or more CA masters are already present in IPA realm " "'%s'.\nIf you wish to replicate CA to this host, please " "re-run 'ipa-ca-install'\nwith a replica file generated on " "an existing CA master as argument." % realm_name ) if options.external_cert_files: if not cainstance.is_step_one_done(): # This can happen if someone passes external_ca_file without # already having done the first stage of the CA install. print("CA is not installed yet. To install with an external CA " "is a two-stage process.\nFirst run the installer with " "--external-ca.") sys.exit(1) external_cert_file, external_ca_file = installutils.load_external_cert( options.external_cert_files, options.subject) elif options.external_ca: if cainstance.is_step_one_done(): print("CA is already installed.\nRun the installer with " "--external-cert-file.") sys.exit(1) if ipautil.file_exists(paths.ROOT_IPA_CSR): print(("CA CSR file %s already exists.\nIn order to continue " "remove the file and run the installer again." % paths.ROOT_IPA_CSR)) sys.exit(1) if not options.external_cert_files: if not cainstance.check_port(): print("IPA requires port 8443 for PKI but it is currently in use.") sys.exit("Aborting installation") if standalone: dirname = dsinstance.config_dirname( installutils.realm_to_serverid(realm_name)) cadb = certs.CertDB(realm_name, subject_base=subject_base) dsdb = certs.CertDB(realm_name, nssdir=dirname, subject_base=subject_base) for db in (cadb, dsdb): for nickname, trust_flags in db.list_certs(): if nickname in (certdb.get_ca_nickname(realm_name), 'ipaCert', 'Signing-Cert'): print(("Certificate with nickname %s is present in %s, " "cannot continue." % (nickname, db.secdir))) sys.exit(1) cert = db.get_cert_from_db(nickname) if not cert: continue subject = DN(str(x509.get_subject(cert))) if subject in (DN('CN=Certificate Authority', subject_base), DN('CN=IPA RA', subject_base), DN('CN=Object Signing Cert', subject_base)): print(("Certificate with subject %s is present in %s, " "cannot continue." % (subject, db.secdir))) sys.exit(1)
def restart_http(): root_logger.info('Restarting HTTP') fstore = sysrestore.FileStore('/var/lib/ipa/sysrestore') http = HTTPInstance(fstore) http.restart()
def insert_ca_certs_into_systemwide_ca_store(self, ca_certs): # pylint: disable=ipa-forbidden-import from ipalib import x509 # FixMe: break import cycle from ipalib.errors import CertificateError # pylint: enable=ipa-forbidden-import new_cacert_path = paths.SYSTEMWIDE_IPA_CA_CRT if os.path.exists(new_cacert_path): try: os.remove(new_cacert_path) except OSError as e: root_logger.error("Could not remove %s: %s", new_cacert_path, e) return False new_cacert_path = paths.IPA_P11_KIT try: f = open(new_cacert_path, 'w') except IOError as e: root_logger.info("Failed to open %s: %s" % (new_cacert_path, e)) return False f.write("# This file was created by IPA. Do not edit.\n" "\n") has_eku = set() for cert, nickname, trusted, ext_key_usage in ca_certs: try: subject = x509.get_der_subject(cert, x509.DER) issuer = x509.get_der_issuer(cert, x509.DER) serial_number = x509.get_der_serial_number(cert, x509.DER) public_key_info = x509.get_der_public_key_info(cert, x509.DER) except (PyAsn1Error, ValueError, CertificateError) as e: root_logger.warning("Failed to decode certificate \"%s\": %s", nickname, e) continue label = urllib.parse.quote(nickname) subject = urllib.parse.quote(subject) issuer = urllib.parse.quote(issuer) serial_number = urllib.parse.quote(serial_number) public_key_info = urllib.parse.quote(public_key_info) cert = base64.b64encode(cert) cert = x509.make_pem(cert) obj = ("[p11-kit-object-v1]\n" "class: certificate\n" "certificate-type: x-509\n" "certificate-category: authority\n" "label: \"%(label)s\"\n" "subject: \"%(subject)s\"\n" "issuer: \"%(issuer)s\"\n" "serial-number: \"%(serial_number)s\"\n" "x-public-key-info: \"%(public_key_info)s\"\n" % dict(label=label, subject=subject, issuer=issuer, serial_number=serial_number, public_key_info=public_key_info)) if trusted is True: obj += "trusted: true\n" elif trusted is False: obj += "x-distrusted: true\n" obj += "%s\n\n" % cert f.write(obj) if ext_key_usage is not None and public_key_info not in has_eku: if not ext_key_usage: ext_key_usage = {x509.EKU_PLACEHOLDER} try: ext_key_usage = x509.encode_ext_key_usage(ext_key_usage) except PyAsn1Error as e: root_logger.warning( "Failed to encode extended key usage for \"%s\": %s", nickname, e) continue value = urllib.parse.quote(ext_key_usage) obj = ("[p11-kit-object-v1]\n" "class: x-certificate-extension\n" "label: \"ExtendedKeyUsage for %(label)s\"\n" "x-public-key-info: \"%(public_key_info)s\"\n" "object-id: 2.5.29.37\n" "value: \"%(value)s\"\n\n" % dict(label=label, public_key_info=public_key_info, value=value)) f.write(obj) has_eku.add(public_key_info) f.close() # Add the CA to the systemwide CA trust database if not self.reload_systemwide_ca_store(): return False return True
def install_check(standalone, replica_config, options): global external_cert_file global external_ca_file realm_name = options.realm_name host_name = options.host_name subject_base = options.subject if replica_config is not None: if standalone and api.env.ra_plugin == 'selfsign': raise ScriptError('A selfsign CA can not be added') if ((not options.promote and not ipautil.file_exists(replica_config.dir + "/cacert.p12"))): raise ScriptError('CA cannot be installed in CA-less setup.') if standalone and not options.skip_conncheck: principal = options.principal replica_conn_check(replica_config.master_host_name, host_name, realm_name, True, replica_config.ca_ds_port, options.admin_password, principal=principal, ca_cert_file=options.ca_cert_file) if options.skip_schema_check or options.promote: root_logger.info("Skipping CA DS schema check") else: cainstance.replica_ca_install_check(replica_config) return if standalone: if api.Command.ca_is_enabled()['result']: raise ScriptError( "One or more CA masters are already present in IPA realm " "'%s'.\nIf you wish to replicate CA to this host, please " "re-run 'ipa-ca-install'\nwith a replica file generated on " "an existing CA master as argument." % realm_name) if options.external_cert_files: if not cainstance.is_step_one_done(): # This can happen if someone passes external_ca_file without # already having done the first stage of the CA install. raise ScriptError( "CA is not installed yet. To install with an external CA " "is a two-stage process.\nFirst run the installer with " "--external-ca.") external_cert_file, external_ca_file = installutils.load_external_cert( options.external_cert_files, options.subject) elif options.external_ca: if cainstance.is_step_one_done(): raise ScriptError( "CA is already installed.\nRun the installer with " "--external-cert-file.") if ipautil.file_exists(paths.ROOT_IPA_CSR): raise ScriptError( "CA CSR file %s already exists.\nIn order to continue " "remove the file and run the installer again." % paths.ROOT_IPA_CSR) if not options.external_cert_files: if not cainstance.check_port(): print("IPA requires port 8443 for PKI but it is currently in use.") raise ScriptError("Aborting installation") if standalone: dirname = dsinstance.config_dirname( installutils.realm_to_serverid(realm_name)) cadb = certs.CertDB(realm_name, subject_base=subject_base) dsdb = certs.CertDB(realm_name, nssdir=dirname, subject_base=subject_base) for db in (cadb, dsdb): for nickname, _trust_flags in db.list_certs(): if nickname in (certdb.get_ca_nickname(realm_name), 'ipaCert', 'Signing-Cert'): raise ScriptError( "Certificate with nickname %s is present in %s, " "cannot continue." % (nickname, db.secdir)) cert = db.get_cert_from_db(nickname) if not cert: continue subject = DN(str(x509.get_subject(cert))) if subject in (DN('CN=Certificate Authority', subject_base), DN('CN=IPA RA', subject_base), DN('CN=Object Signing Cert', subject_base)): raise ScriptError( "Certificate with subject %s is present in %s, " "cannot continue." % (subject, db.secdir))
def uninstall(self): if not self.is_configured(): return self.print_msg("Unconfiguring %s" % self.service_name) running = self.restore_state("running") enabled = self.restore_state("enabled") # stop DNSSEC services before backing up kasp.db try: self.stop() except Exception: pass ods_exporter = services.service('ipa-ods-exporter') try: ods_exporter.stop() except Exception: pass # remove directive from ipa-dnskeysyncd, this server is not DNSSEC # master anymore installutils.set_directive(paths.SYSCONFIG_IPA_DNSKEYSYNCD, 'ISMASTER', None, quotes=False, separator='=') restore_list = [paths.OPENDNSSEC_CONF_FILE, paths.OPENDNSSEC_KASP_FILE, paths.SYSCONFIG_ODS, paths.OPENDNSSEC_ZONELIST_FILE] if ipautil.file_exists(paths.OPENDNSSEC_KASP_DB): # force to export data ods_enforcerd = services.knownservices.ods_enforcerd cmd = [paths.IPA_ODS_EXPORTER, 'ipa-full-update'] try: self.print_msg("Exporting DNSSEC data before uninstallation") ipautil.run(cmd, runas=constants.ODS_USER) except CalledProcessError: root_logger.error("DNSSEC data export failed") try: shutil.copy(paths.OPENDNSSEC_KASP_DB, paths.IPA_KASP_DB_BACKUP) except IOError as e: root_logger.error( "Unable to backup OpenDNSSEC database %s, " "restore will be skipped: %s", paths.OPENDNSSEC_KASP_DB, e) else: root_logger.info("OpenDNSSEC database backed up in %s", paths.IPA_KASP_DB_BACKUP) # restore OpenDNSSEC's KASP DB only if backup succeeded # removing the file without backup could totally break DNSSEC restore_list.append(paths.OPENDNSSEC_KASP_DB) for f in restore_list: try: self.fstore.restore_file(f) except ValueError as error: root_logger.debug(error) self.restore_state("kasp_db_configured") # just eat state # disabled by default, by ldap_enable() if enabled: self.enable() if running: self.restart()
def check_reverse_zones(ip_addresses, reverse_zones, options, unattended, search_reverse_zones=False): checked_reverse_zones = [] if (not options.no_reverse and not reverse_zones and not options.auto_reverse): if unattended: options.no_reverse = True else: options.no_reverse = not create_reverse() # shortcut if options.no_reverse: return [] # verify zones passed in options for rz in reverse_zones: # isn't the zone managed by someone else if not options.allow_zone_overlap: try: dnsutil.check_zone_overlap(rz) except ValueError as e: msg = "Reverse zone %s will not be used: %s" % (rz, e) if unattended: raise ScriptError(msg) else: root_logger.warning(msg) continue checked_reverse_zones.append(normalize_zone(rz)) # check that there is reverse zone for every IP ips_missing_reverse = [] for ip in ip_addresses: if search_reverse_zones and find_reverse_zone(str(ip)): # reverse zone is already in LDAP continue for rz in checked_reverse_zones: if verify_reverse_zone(rz, ip): # reverse zone was entered by user break else: ips_missing_reverse.append(ip) # create reverse zone for IP addresses that does not have one for (ip, rz) in get_auto_reverse_zones(ips_missing_reverse): if options.auto_reverse: root_logger.info("Reverse zone %s will be created" % rz) checked_reverse_zones.append(rz) elif unattended: root_logger.warning("Missing reverse record for IP address %s" % ip) else: if ipautil.user_input( "Do you want to create reverse zone for IP " "%s" % ip, True): rz = read_reverse_zone(rz, str(ip), options.allow_zone_overlap) checked_reverse_zones.append(rz) return checked_reverse_zones
def insert_ca_certs_into_systemwide_ca_store(self, ca_certs): new_cacert_path = paths.SYSTEMWIDE_IPA_CA_CRT if os.path.exists(new_cacert_path): try: os.remove(new_cacert_path) except OSError as e: root_logger.error( "Could not remove %s: %s", new_cacert_path, e) return False new_cacert_path = paths.IPA_P11_KIT try: f = open(new_cacert_path, 'w') except IOError as e: root_logger.info("Failed to open %s: %s" % (new_cacert_path, e)) return False f.write("# This file was created by IPA. Do not edit.\n" "\n") has_eku = set() for cert, nickname, trusted, ext_key_usage in ca_certs: try: subject = x509.get_der_subject(cert, x509.DER) issuer = x509.get_der_issuer(cert, x509.DER) serial_number = x509.get_der_serial_number(cert, x509.DER) public_key_info = x509.get_der_public_key_info(cert, x509.DER) except (NSPRError, PyAsn1Error, ValueError) as e: root_logger.warning( "Failed to decode certificate \"%s\": %s", nickname, e) continue label = urllib.parse.quote(nickname) subject = urllib.parse.quote(subject) issuer = urllib.parse.quote(issuer) serial_number = urllib.parse.quote(serial_number) public_key_info = urllib.parse.quote(public_key_info) cert = base64.b64encode(cert) cert = x509.make_pem(cert) obj = ("[p11-kit-object-v1]\n" "class: certificate\n" "certificate-type: x-509\n" "certificate-category: authority\n" "label: \"%(label)s\"\n" "subject: \"%(subject)s\"\n" "issuer: \"%(issuer)s\"\n" "serial-number: \"%(serial_number)s\"\n" "x-public-key-info: \"%(public_key_info)s\"\n" % dict(label=label, subject=subject, issuer=issuer, serial_number=serial_number, public_key_info=public_key_info)) if trusted is True: obj += "trusted: true\n" elif trusted is False: obj += "x-distrusted: true\n" obj += "%s\n\n" % cert f.write(obj) if ext_key_usage is not None and public_key_info not in has_eku: if not ext_key_usage: ext_key_usage = {x509.EKU_PLACEHOLDER} try: ext_key_usage = x509.encode_ext_key_usage(ext_key_usage) except PyAsn1Error as e: root_logger.warning( "Failed to encode extended key usage for \"%s\": %s", nickname, e) continue value = urllib.parse.quote(ext_key_usage) obj = ("[p11-kit-object-v1]\n" "class: x-certificate-extension\n" "label: \"ExtendedKeyUsage for %(label)s\"\n" "x-public-key-info: \"%(public_key_info)s\"\n" "object-id: 2.5.29.37\n" "value: \"%(value)s\"\n\n" % dict(label=label, public_key_info=public_key_info, value=value)) f.write(obj) has_eku.add(public_key_info) f.close() # Add the CA to the systemwide CA trust database if not self.reload_systemwide_ca_store(): return False return True
def restart_pki_ds(): """Restart the CA DS instance to pick up schema changes """ root_logger.info('Restarting CA DS') services.service('dirsrv').restart(SERVERID)
def check_creds(options, realm_name): # Check if ccache is available default_cred = None try: root_logger.debug('KRB5CCNAME set to %s' % os.environ.get('KRB5CCNAME', None)) # get default creds, will raise if none found default_cred = gssapi.creds.Credentials() principal = str(default_cred.name) except gssapi.raw.misc.GSSError as e: root_logger.debug('Failed to find default ccache: %s' % e) principal = None # Check if the principal matches the requested one (if any) if principal is not None and options.principal is not None: op = options.principal if op.find('@') == -1: op = '%s@%s' % (op, realm_name) if principal != op: root_logger.debug('Specified principal %s does not match ' 'available credentials (%s)' % (options.principal, principal)) principal = None if principal is None: (ccache_fd, ccache_name) = tempfile.mkstemp() os.close(ccache_fd) options.created_ccache_file = ccache_name if options.principal is not None: principal = options.principal else: principal = 'admin' stdin = None if principal.find('@') == -1: principal = '%s@%s' % (principal, realm_name) if options.admin_password is not None: stdin = options.admin_password else: if not options.unattended: try: stdin = getpass.getpass("Password for %s: " % principal) except EOFError: stdin = None if not stdin: root_logger.error( "Password must be provided for %s.", principal) raise ScriptError("Missing password for %s" % principal) else: if sys.stdin.isatty(): root_logger.error("Password must be provided in " + "non-interactive mode.") root_logger.info("This can be done via " + "echo password | ipa-client-install " + "... or with the -w option.") raise ScriptError("Missing password for %s" % principal) else: stdin = sys.stdin.readline() # set options.admin_password for future use options.admin_password = stdin try: ipautil.kinit_password(principal, stdin, ccache_name) except RuntimeError as e: root_logger.error("Kerberos authentication failed: %s" % e) raise ScriptError("Invalid credentials: %s" % e) os.environ['KRB5CCNAME'] = ccache_name
def uninstall(self): if not self.is_configured(): return self.print_msg("Unconfiguring %s" % self.service_name) running = self.restore_state("running") enabled = self.restore_state("enabled") # stop DNSSEC services before backing up kasp.db try: self.stop() except Exception: pass ods_exporter = services.service('ipa-ods-exporter', api) try: ods_exporter.stop() except Exception: pass # remove directive from ipa-dnskeysyncd, this server is not DNSSEC # master anymore installutils.set_directive(paths.SYSCONFIG_IPA_DNSKEYSYNCD, 'ISMASTER', None, quotes=False, separator='=') restore_list = [ paths.OPENDNSSEC_CONF_FILE, paths.OPENDNSSEC_KASP_FILE, paths.SYSCONFIG_ODS, paths.OPENDNSSEC_ZONELIST_FILE ] if ipautil.file_exists(paths.OPENDNSSEC_KASP_DB): # force to export data cmd = [paths.IPA_ODS_EXPORTER, 'ipa-full-update'] try: self.print_msg("Exporting DNSSEC data before uninstallation") ipautil.run(cmd, runas=constants.ODS_USER) except CalledProcessError: root_logger.error("DNSSEC data export failed") try: shutil.copy(paths.OPENDNSSEC_KASP_DB, paths.IPA_KASP_DB_BACKUP) except IOError as e: root_logger.error( "Unable to backup OpenDNSSEC database %s, " "restore will be skipped: %s", paths.OPENDNSSEC_KASP_DB, e) else: root_logger.info("OpenDNSSEC database backed up in %s", paths.IPA_KASP_DB_BACKUP) # restore OpenDNSSEC's KASP DB only if backup succeeded # removing the file without backup could totally break DNSSEC restore_list.append(paths.OPENDNSSEC_KASP_DB) for f in restore_list: try: self.fstore.restore_file(f) except ValueError as error: root_logger.debug(error) self.restore_state("kasp_db_configured") # just eat state # disabled by default, by ldap_enable() if enabled: self.enable() if running: self.restart()
def modify_pam_to_use_krb5(self, statestore): auth_config = RedHatAuthConfig() statestore.backup_state('authconfig', 'krb5', True) auth_config.enable("krb5") auth_config.add_option("nostart") auth_config.execute() def reload_systemwide_ca_store(self): try: ipautil.run([paths.UPDATE_CA_TRUST]) except CalledProcessError, e: root_logger.error( "Could not update systemwide CA trust database: %s", e) return False else: root_logger.info("Systemwide CA database updated.") return True def insert_ca_certs_into_systemwide_ca_store(self, ca_certs): new_cacert_path = paths.SYSTEMWIDE_IPA_CA_CRT if os.path.exists(new_cacert_path): try: os.remove(new_cacert_path) except OSError, e: root_logger.error("Could not remove %s: %s", new_cacert_path, e) return False new_cacert_path = paths.IPA_P11_KIT
# We do not let the system start IPA components on its own, # Instead we reply on the IPA init script to start only enabled # components as found in our LDAP configuration tree # Note that self.dm_password is None for ADTrustInstance because # we ensure to be called as root and using ldapi to use autobind try: self.ldap_enable('ADTRUST', self.fqdn, self.dm_password, \ self.suffix) except (ldap.ALREADY_EXISTS, errors.DuplicateEntry), e: root_logger.info("ADTRUST Service startup entry already exists.") try: self.ldap_enable('EXTID', self.fqdn, self.dm_password, \ self.suffix) except (ldap.ALREADY_EXISTS, errors.DuplicateEntry), e: root_logger.info("EXTID Service startup entry already exists.") def __setup_sub_dict(self): self.sub_dict = dict(REALM=self.realm, SUFFIX=self.suffix, NETBIOS_NAME=self.netbios_name, HOST_NETBIOS_NAME=self.host_netbios_name, SMB_DN=self.smb_dn, LDAPI_SOCKET=self.ldapi_socket, FQDN=self.fqdn) def setup(self, fqdn, realm_name, domain_name, netbios_name,
def install(filename, options): global config dogtag_constants = dogtag.install_constants # Create the management framework config file # Note: We must do this before bootstraping and finalizing ipalib.api old_umask = os.umask(022) # must be readable for httpd try: fd = open(paths.IPA_DEFAULT_CONF, "w") fd.write("[global]\n") fd.write("host=%s\n" % config.host_name) fd.write("basedn=%s\n" % str(ipautil.realm_to_suffix(config.realm_name))) fd.write("realm=%s\n" % config.realm_name) fd.write("domain=%s\n" % config.domain_name) fd.write("xmlrpc_uri=https://%s/ipa/xml\n" % ipautil.format_netloc(config.host_name)) fd.write("ldap_uri=ldapi://%%2fvar%%2frun%%2fslapd-%s.socket\n" % installutils.realm_to_serverid(config.realm_name)) if ipautil.file_exists(config.dir + "/cacert.p12"): fd.write("enable_ra=True\n") fd.write("ra_plugin=dogtag\n") fd.write("dogtag_version=%s\n" % dogtag_constants.DOGTAG_VERSION) else: fd.write("enable_ra=False\n") fd.write("ra_plugin=none\n") fd.write("enable_kra=%s\n" % config.setup_kra) fd.write("mode=production\n") fd.close() finally: os.umask(old_umask) api.bootstrap(in_server=True, context='installer') api.finalize() # Create DS user/group if it doesn't exist yet dsinstance.create_ds_user() cafile = config.dir + "/ca.crt" ldapuri = 'ldaps://%s' % ipautil.format_netloc(config.master_host_name) remote_api = create_api(mode=None) remote_api.bootstrap(in_server=True, context='installer', ldap_uri=ldapuri, basedn=DN()) remote_api.finalize() conn = remote_api.Backend.ldap2 replman = None try: try: # Try out the password conn.connect(bind_dn=DIRMAN_DN, bind_pw=config.dirman_password, tls_cacertfile=cafile) replman = ReplicationManager(config.realm_name, config.master_host_name, config.dirman_password) # Check that we don't already have a replication agreement try: (agreement_cn, agreement_dn) = replman.agreement_dn( config.host_name) entry = conn.get_entry(agreement_dn, ['*']) except errors.NotFound: pass else: root_logger.info('Error: A replication agreement for this ' 'host already exists.') print('A replication agreement for this host already exists. ' 'It needs to be removed.') print "Run this on the master that generated the info file:" print(" %% ipa-replica-manage del %s --force" % config.host_name) sys.exit(3) # Detect the current domain level try: current = remote_api.Command['domainlevel_get']()['result'] except errors.NotFound: # If we're joining an older master, domain entry is not # available current = 0 # Detect if current level is out of supported range # for this IPA version under_lower_bound = current < constants.MIN_DOMAIN_LEVEL above_upper_bound = current > constants.MAX_DOMAIN_LEVEL if under_lower_bound or above_upper_bound: message = ("This version of FreeIPA does not support " "the Domain Level which is currently set for " "this domain. The Domain Level needs to be " "raised before installing a replica with " "this version is allowed to be installed " "within this domain.") root_logger.error(message) print(message) sys.exit(3) # Check pre-existing host entry try: entry = conn.find_entries(u'fqdn=%s' % config.host_name, ['fqdn'], DN(api.env.container_host, api.env.basedn)) except errors.NotFound: pass else: root_logger.info('Error: Host %s already exists on the master ' 'server.' % config.host_name) print('The host %s already exists on the master server.' % config.host_name) print "You should remove it before proceeding:" print " %% ipa host-del %s" % config.host_name sys.exit(3) # Install CA cert so that we can do SSL connections with ldap install_ca_cert(conn, api.env.basedn, api.env.realm, cafile) dns_masters = remote_api.Object['dnsrecord'].get_dns_masters() if dns_masters: if not options.no_host_dns: master = config.master_host_name root_logger.debug('Check forward/reverse DNS resolution') resolution_ok = ( check_dns_resolution(master, dns_masters) and check_dns_resolution(config.host_name, dns_masters)) if not resolution_ok and not options.unattended: if not ipautil.user_input("Continue?", False): sys.exit(0) else: root_logger.debug('No IPA DNS servers, ' 'skipping forward/reverse resolution check') except errors.ACIError: sys.exit("\nThe password provided is incorrect for LDAP server " "%s" % config.master_host_name) except errors.LDAPError: sys.exit("\nUnable to connect to LDAP server %s" % config.master_host_name) finally: if replman and replman.conn: replman.conn.unbind() # Configure ntpd if options.conf_ntp: ipaclient.ntpconf.force_ntpd(sstore) ntp = ntpinstance.NTPInstance() ntp.create_instance() # Configure dirsrv ds = install_replica_ds(config) # Always try to install DNS records install_dns_records(config, options, remote_api) finally: if conn.isconnected(): conn.disconnect() if config.setup_ca: options.realm_name = config.realm_name options.domain_name = config.domain_name options.dm_password = config.dirman_password options.host_name = config.host_name ca.install(False, config, options) krb = install_krb(config, setup_pkinit=options.setup_pkinit) http = install_http(config, auto_redirect=options.ui_redirect) otpd = otpdinstance.OtpdInstance() otpd.create_instance('OTPD', config.host_name, config.dirman_password, ipautil.realm_to_suffix(config.realm_name)) # The DS instance is created before the keytab, add the SSL cert we # generated ds.add_cert_to_service() # Apply any LDAP updates. Needs to be done after the replica is synced-up service.print_msg("Applying LDAP updates") ds.apply_updates() if options.setup_kra: kra.install(config, options, config.dirman_password) else: service.print_msg("Restarting the directory server") ds.restart() service.print_msg("Restarting the KDC") krb.restart() if config.setup_ca: dogtag_service = services.knownservices[dogtag_constants.SERVICE_NAME] dogtag_service.restart(dogtag_constants.PKI_INSTANCE_NAME) if options.setup_dns: api.Backend.ldap2.connect(autobind=True) dns.install(False, True, options) # Restart httpd to pick up the new IPA configuration service.print_msg("Restarting the web server") http.restart() # Call client install script try: args = [paths.IPA_CLIENT_INSTALL, "--on-master", "--unattended", "--domain", config.domain_name, "--server", config.host_name, "--realm", config.realm_name] if not options.create_sshfp: args.append("--no-dns-sshfp") if options.trust_sshfp: args.append("--ssh-trust-dns") if not options.conf_ssh: args.append("--no-ssh") if not options.conf_sshd: args.append("--no-sshd") if options.mkhomedir: args.append("--mkhomedir") ipautil.run(args) except Exception, e: print "Configuration of client side components failed!" print "ipa-client-install returned: " + str(e) raise RuntimeError("Failed to configure the client")
operation_name) else: # Log at the DEBUG level, which is not output to the console # (unless in debug/verbose mode), but is written to a logfile # if one is open. tb = sys.exc_info()[2] root_logger.debug('\n'.join(traceback.format_tb(tb))) root_logger.debug('The %s command failed, exception: %s: %s', operation_name, type(e).__name__, e) if fail_message and not isinstance(e, SystemExit): print fail_message raise else: if return_value: root_logger.info('The %s command failed, return value %s', operation_name, return_value) else: root_logger.info('The %s command was successful', operation_name) sys.exit(return_value) except BaseException, error: message, exitcode = handle_error(error, log_file_name) if message: print >> sys.stderr, message sys.exit(exitcode) def handle_error(error, log_file_name=None): """Handle specific errors. Returns a message and return code"""
reverse_zone = bindinstance.find_reverse_zone(ip, remote_api) bind.add_master_dns_records(config.host_name, str(ip), config.realm_name, config.domain_name, reverse_zone, options.conf_ntp, options.setup_ca) except errors.NotFound, e: root_logger.debug('Replica DNS records could not be added ' 'on master: %s', str(e)) # we should not fail here no matter what except Exception, e: root_logger.info('Replica DNS records could not be added ' 'on master: %s', str(e)) def check_dirsrv(): (ds_unsecure, ds_secure) = dsinstance.check_ports() if not ds_unsecure or not ds_secure: print "IPA requires ports 389 and 636 for the Directory Server." print "These are currently in use:" if not ds_unsecure: print "\t389" if not ds_secure: print "\t636" sys.exit(1) def check_dns_resolution(host_name, dns_servers):
def install_check(standalone, replica_config, options): global external_cert_file global external_ca_file realm_name = options.realm_name host_name = options.host_name if replica_config is None: options._subject_base = options.subject_base options._ca_subject = options.ca_subject else: # during replica install, this gets invoked before local DS is # available, so use the remote api. _api = api if standalone else options._remote_api # for replica-install the knobs cannot be written, hence leading '_' options._subject_base = six.text_type(replica_config.subject_base) options._ca_subject = lookup_ca_subject(_api, options._subject_base) if replica_config is not None and not replica_config.setup_ca: return if replica_config is not None: if standalone and api.env.ra_plugin == 'selfsign': raise ScriptError('A selfsign CA can not be added') cafile = os.path.join(replica_config.dir, 'cacert.p12') if not options.promote and not ipautil.file_exists(cafile): raise ScriptError('CA cannot be installed in CA-less setup.') if standalone and not options.skip_conncheck: principal = options.principal replica_conn_check( replica_config.ca_host_name, host_name, realm_name, True, replica_config.ca_ds_port, options.admin_password, principal=principal, ca_cert_file=options.ca_cert_file) if options.skip_schema_check: root_logger.info("Skipping CA DS schema check") else: cainstance.replica_ca_install_check(replica_config, options.promote) return if standalone: if api.Command.ca_is_enabled()['result']: raise ScriptError( "One or more CA masters are already present in IPA realm " "'%s'.\nIf you wish to replicate CA to this host, please " "re-run 'ipa-ca-install'\nwith a replica file generated on " "an existing CA master as argument." % realm_name ) if options.external_cert_files: if not cainstance.is_step_one_done(): # This can happen if someone passes external_ca_file without # already having done the first stage of the CA install. raise ScriptError( "CA is not installed yet. To install with an external CA " "is a two-stage process.\nFirst run the installer with " "--external-ca.") external_cert_file, external_ca_file = installutils.load_external_cert( options.external_cert_files, options._ca_subject) elif options.external_ca: if cainstance.is_step_one_done(): raise ScriptError( "CA is already installed.\nRun the installer with " "--external-cert-file.") if ipautil.file_exists(paths.ROOT_IPA_CSR): raise ScriptError( "CA CSR file %s already exists.\nIn order to continue " "remove the file and run the installer again." % paths.ROOT_IPA_CSR) if not options.external_cert_files: if not cainstance.check_port(): print("IPA requires port 8443 for PKI but it is currently in use.") raise ScriptError("Aborting installation") if standalone: dirname = dsinstance.config_dirname( installutils.realm_to_serverid(realm_name)) cadb = certs.CertDB(realm_name, nssdir=paths.PKI_TOMCAT_ALIAS_DIR, subject_base=options._subject_base) dsdb = certs.CertDB( realm_name, nssdir=dirname, subject_base=options._subject_base) for db in (cadb, dsdb): if not db.exists(): continue for nickname, _trust_flags in db.list_certs(): if nickname == certdb.get_ca_nickname(realm_name): raise ScriptError( "Certificate with nickname %s is present in %s, " "cannot continue." % (nickname, db.secdir)) cert = db.get_cert_from_db(nickname) if not cert: continue subject = DN(x509.load_certificate(cert).subject) if subject == DN(options._ca_subject): raise ScriptError( "Certificate with subject %s is present in %s, " "cannot continue." % (subject, db.secdir))