def synconce_ntp(server_fqdn, debug=False): """ Syncs time with specified server using ntpd. Primarily designed to be used before Kerberos setup to get time following the KDC time Returns True if sync was successful """ ntpd = paths.NTPD if not os.path.exists(ntpd): return False # The ntpd command will never exit if it is unable to reach the # server, so timeout after 15 seconds. timeout = 15 tmp_ntp_conf = ipautil.write_tmp_file('server %s' % server_fqdn) args = [paths.BIN_TIMEOUT, str(timeout), ntpd, '-qgc', tmp_ntp_conf.name] if debug: args.append('-d') try: root_logger.info('Attempting to sync time using ntpd. ' 'Will timeout after %d seconds' % timeout) ipautil.run(args) return True except ipautil.CalledProcessError as e: if e.returncode == 124: root_logger.debug('Process did not complete before timeout') return False
def uninstall(self): if self.is_configured(): self.print_msg("Unconfiguring %s" % self.service_name) running = self.restore_state("running") enabled = self.restore_state("enabled") try: self.stop() except: pass for f in [paths.KRB5KDC_KDC_CONF, paths.KRB5_CONF]: try: self.fstore.restore_file(f) except ValueError as error: root_logger.debug(error) # disabled by default, by ldap_enable() if enabled: self.enable() if running: self.restart() self.kpasswd = KpasswdInstance() self.kpasswd.uninstall()
def release_ipa_ccache(ccache_name): ''' Stop using the current request's ccache. * Remove KRB5CCNAME from the enviroment * Remove the ccache file from the file system Note, we do not demand any of these elements exist, but if they do we'll remove them. ''' if 'KRB5CCNAME' in os.environ: if ccache_name != os.environ['KRB5CCNAME']: root_logger.error('release_ipa_ccache: ccache_name (%s) != KRB5CCNAME environment variable (%s)', ccache_name, os.environ['KRB5CCNAME']) del os.environ['KRB5CCNAME'] else: root_logger.debug('release_ipa_ccache: KRB5CCNAME environment variable not set') scheme, name = krb5_parse_ccache(ccache_name) if scheme == 'FILE': if os.path.exists(name): try: os.unlink(name) except Exception as e: root_logger.error('unable to delete session ccache file "%s", %s', name, e) else: raise ValueError('ccache scheme "%s" unsupported (%s)', scheme, ccache_name)
def store_session_cookie(self, cookie_header): ''' Given the contents of a Set-Cookie header scan the header and extract each cookie contained within until the session cookie is located. Examine the session cookie if the domain and path are specified, if not update the cookie with those values from the request URL. Then write the session cookie into the key store for the principal. If the cookie header is None or the session cookie is not present in the header no action is taken. Context Dependencies: The per thread context is expected to contain: principal The current pricipal the HTTP request was issued for. request_url The URL of the HTTP request. ''' if cookie_header is None: return principal = getattr(context, 'principal', None) request_url = getattr(context, 'request_url', None) root_logger.debug("received Set-Cookie '%s'", cookie_header) # Search for the session cookie try: session_cookie = Cookie.get_named_cookie_from_string(cookie_header, COOKIE_NAME, request_url) except Exception, e: root_logger.error("unable to parse cookie header '%s': %s", cookie_header, e) return
def save(self): """Save the modules to @_path. If @modules is an empty dict, then @_path should be removed. """ root_logger.debug("Saving StateFile to '%s'", self._path) for module in list(self.modules.keys()): if len(self.modules[module]) == 0: del self.modules[module] if len(self.modules) == 0: root_logger.debug(" -> no modules, removing file") if os.path.exists(self._path): os.remove(self._path) return p = SafeConfigParser() p.optionxform = str for module in self.modules.keys(): p.add_section(module) for (key, value) in self.modules[module].items(): p.set(module, key, str(value)) with open(self._path, "w") as f: p.write(f)
def uninstall(self): if self.is_configured(): self.print_msg("Unconfiguring %s" % self.service_name) running = self.restore_state("running") enabled = self.restore_state("enabled") named_regular_running = self.restore_state("named-regular-running") named_regular_enabled = self.restore_state("named-regular-enabled") self.dns_backup.clear_records(self.api.Backend.ldap2.isconnected()) for f in [NAMED_CONF, RESOLV_CONF]: try: self.fstore.restore_file(f) except ValueError as error: root_logger.debug(error) # disabled by default, by ldap_enable() if enabled: self.enable() if running: self.restart() self.named_regular.unmask() if named_regular_enabled: self.named_regular.enable() if named_regular_running: self.named_regular.start() installutils.remove_keytab(paths.NAMED_KEYTAB) installutils.remove_ccache(run_as='named')
def __generate_master_key(self): with open(paths.DNSSEC_SOFTHSM_PIN, "r") as f: pin = f.read() os.environ["SOFTHSM2_CONF"] = paths.DNSSEC_SOFTHSM2_CONF p11 = p11helper.P11_Helper(softhsm_slot, pin, paths.LIBSOFTHSM2_SO) try: # generate master key root_logger.debug("Creating master key") p11helper.generate_master_key(p11) # change tokens mod/owner root_logger.debug("Changing ownership of token files") for (root, dirs, files) in os.walk(paths.DNSSEC_TOKENS_DIR): for directory in dirs: dir_path = os.path.join(root, directory) os.chmod(dir_path, 0o770 | stat.S_ISGID) os.chown(dir_path, self.ods_uid, self.named_gid) # chown to ods:named for filename in files: file_path = os.path.join(root, filename) os.chmod(file_path, 0o770 | stat.S_ISGID) os.chown(file_path, self.ods_uid, self.named_gid) # chown to ods:named finally: p11.finalize()
def __enable(self): if self.get_state("enabled") is None: self.backup_state("enabled", self.is_running()) self.backup_state("named-regular-enabled", self.named_regular.is_running()) # We do not let the system start IPA components on its own, # Instead we reply on the IPA init script to start only enabled # components as found in our LDAP configuration tree try: self.ldap_enable('DNS', self.fqdn, None, self.suffix) except errors.DuplicateEntry: # service already exists (forced DNS reinstall) # don't crash, just report error root_logger.error("DNS service already exists") # disable named, we need to run named-pkcs11 only if self.get_state("named-regular-running") is None: # first time store status self.backup_state("named-regular-running", self.named_regular.is_running()) try: self.named_regular.stop() except Exception as e: root_logger.debug("Unable to stop named (%s)", e) try: self.named_regular.mask() except Exception as e: root_logger.debug("Unable to mask named (%s)", e)
def check_domain(self, domain, tried, reason): """ Given a domain search it for SRV records, breaking it down to search all subdomains too. Returns a tuple (servers, domain) or (None,None) if a SRV record isn't found. servers is a list of servers found. domain is a string. :param tried: A set of domains that were tried already :param reason: Reason this domain is searched (included in the log) """ servers = None root_logger.debug('Start searching for LDAP SRV record in "%s" (%s) ' + 'and its sub-domains', domain, reason) while not servers: if domain in tried: root_logger.debug("Already searched %s; skipping", domain) break tried.add(domain) servers = self.ipadns_search_srv(domain, '_ldap._tcp', 389, break_on_first=False) if servers: return (servers, domain) else: p = domain.find(".") if p == -1: #no ldap server found and last component of the domain already tested return (None, None) domain = domain[p+1:] return (None, None)
def run_step(message, method): self.print_msg(message) s = datetime.datetime.now() method() e = datetime.datetime.now() d = e - s root_logger.debug(" duration: %d seconds" % d.seconds)
def release_ipa_ccache(ccache_name): ''' Stop using the current request's ccache. * Remove KRB5CCNAME from the enviroment * Remove the ccache file from the file system Note, we do not demand any of these elements exist, but if they do we'll remove them. ''' if 'KRB5CCNAME' in os.environ: if ccache_name != os.environ['KRB5CCNAME']: root_logger.error( 'release_ipa_ccache: ccache_name (%s) != KRB5CCNAME environment variable (%s)', ccache_name, os.environ['KRB5CCNAME']) del os.environ['KRB5CCNAME'] else: root_logger.debug( 'release_ipa_ccache: KRB5CCNAME environment variable not set') scheme, name = krb5_parse_ccache(ccache_name) if scheme == 'FILE': if os.path.exists(name): try: os.unlink(name) except Exception as e: root_logger.error( 'unable to delete session ccache file "%s", %s', name, e) else: raise ValueError('ccache scheme "%s" unsupported (%s)', scheme, ccache_name)
def uninstall(self): if self.is_configured(): self.print_msg("Unconfiguring %s" % self.service_name) running = self.restore_state("running") enabled = self.restore_state("enabled") try: self.stop() except Exception: pass for f in [paths.KRB5KDC_KDC_CONF, paths.KRB5_CONF]: try: self.fstore.restore_file(f) except ValueError as error: root_logger.debug(error) # disabled by default, by ldap_enable() if enabled: self.enable() # stop tracking and remove certificates self.stop_tracking_certs() installutils.remove_file(paths.CACERT_PEM) installutils.remove_file(paths.KDC_CERT) installutils.remove_file(paths.KDC_KEY) if running: self.restart() self.kpasswd = KpasswdInstance() self.kpasswd.uninstall()
def retrieve_and_ask_about_sids(api, options): entries = [] if api.Backend.ldap2.isconnected(): entries = retrieve_entries_without_sid(api) else: root_logger.debug( "LDAP backend not connected, can not retrieve entries " "with missing SID") object_count = len(entries) if object_count > 0: print("") print("WARNING: %d existing users or groups do not have " "a SID identifier assigned." % len(entries)) print("Installer can run a task to have ipa-sidgen " "Directory Server plugin generate") print("the SID identifier for all these users. Please note, " "in case of a high") print("number of users and groups, the operation might " "lead to high replication") print("traffic and performance degradation. Refer to " "ipa-adtrust-install(1) man page") print("for details.") print("") if options.unattended: print("Unattended mode was selected, installer will " "NOT run ipa-sidgen task!") else: if ipautil.user_input("Do you want to run the ipa-sidgen task?", default=False, allow_empty=False): options.add_sids = True
def ipadns_search_srv(self, domain, srv_record_name, default_port, break_on_first=True): """ Search for SRV records in given domain. When no record is found, en empty list is returned :param domain: Search domain name :param srv_record_name: SRV record name, e.g. "_ldap._tcp" :param default_port: When default_port is not None, it is being checked with the port in SRV record and if they don't match, the port from SRV record is appended to found hostname in this format: "hostname:port" :param break_on_first: break on the first find and return just one entry """ servers = [] qname = '%s.%s' % (srv_record_name, domain) root_logger.debug("Search DNS for SRV record of %s", qname) try: answers = resolver.query(qname, rdatatype.SRV) except DNSException, e: root_logger.debug("DNS record not found: %s", e.__class__.__name__) answers = []
def retrieve_entries_without_sid(api): """ Retrieve a list of entries without assigned SIDs. :returns: a list of entries or an empty list if an error occurs """ # The filter corresponds to ipa_sidgen_task.c LDAP search filter filter = '(&(objectclass=ipaobject)(!(objectclass=mepmanagedentry))' \ '(|(objectclass=posixaccount)(objectclass=posixgroup)' \ '(objectclass=ipaidobject))(!(ipantsecurityidentifier=*)))' base_dn = api.env.basedn try: root_logger.debug( "Searching for objects with missing SID with " "filter=%s, base_dn=%s", filter, base_dn) entries, _truncated = api.Backend.ldap2.find_entries(filter=filter, base_dn=base_dn, attrs_list=['']) return entries except errors.NotFound: # All objects have SIDs assigned pass except (errors.DatabaseError, errors.NetworkError) as e: root_logger.error( "Could not retrieve a list of objects that need a SID " "identifier assigned: %s", e) return []
def retrieve_and_ask_about_sids(api, options): entries = [] if api.Backend.ldap2.isconnected(): entries = retrieve_entries_without_sid(api) else: root_logger.debug( "LDAP backend not connected, can not retrieve entries " "with missing SID") object_count = len(entries) if object_count > 0: print("") print("WARNING: %d existing users or groups do not have " "a SID identifier assigned." % len(entries)) print("Installer can run a task to have ipa-sidgen " "Directory Server plugin generate") print("the SID identifier for all these users. Please note, " "in case of a high") print("number of users and groups, the operation might " "lead to high replication") print("traffic and performance degradation. Refer to " "ipa-adtrust-install(1) man page") print("for details.") print("") if options.unattended: print("Unattended mode was selected, installer will " "NOT run ipa-sidgen task!") else: if ipautil.user_input( "Do you want to run the ipa-sidgen task?", default=False, allow_empty=False): options.add_sids = True
def retrieve_entries_without_sid(api): """ Retrieve a list of entries without assigned SIDs. :returns: a list of entries or an empty list if an error occurs """ # The filter corresponds to ipa_sidgen_task.c LDAP search filter filter = '(&(objectclass=ipaobject)(!(objectclass=mepmanagedentry))' \ '(|(objectclass=posixaccount)(objectclass=posixgroup)' \ '(objectclass=ipaidobject))(!(ipantsecurityidentifier=*)))' base_dn = api.env.basedn try: root_logger.debug( "Searching for objects with missing SID with " "filter=%s, base_dn=%s", filter, base_dn) entries, _truncated = api.Backend.ldap2.find_entries( filter=filter, base_dn=base_dn, attrs_list=['']) return entries except errors.NotFound: # All objects have SIDs assigned pass except (errors.DatabaseError, errors.NetworkError) as e: root_logger.error( "Could not retrieve a list of objects that need a SID " "identifier assigned: %s", e) return []
def restore_all_files(self): """Restore the files in the inbdex to their original location and delete the copy. Returns #True if the file was restored, #False if there was no backup file to restore """ if len(self.files) == 0: return False for (filename, value) in self.files.items(): (mode,uid,gid,path) = value.split(',', 3) backup_path = os.path.join(self._path, filename) if not os.path.exists(backup_path): root_logger.debug(" -> Not restoring - '%s' doesn't exist", backup_path) continue shutil.copy(backup_path, path) # SELinux needs copy os.remove(backup_path) os.chown(path, int(uid), int(gid)) os.chmod(path, int(mode)) tasks.restore_context(path) # force file to be deleted self.files = {} self.save() return True
def check_domain(self, domain, tried, reason): """ Given a domain search it for SRV records, breaking it down to search all subdomains too. Returns a tuple (servers, domain) or (None,None) if a SRV record isn't found. servers is a list of servers found. domain is a string. :param tried: A set of domains that were tried already :param reason: Reason this domain is searched (included in the log) """ servers = None root_logger.debug( 'Start searching for LDAP SRV record in "%s" (%s) ' + 'and its sub-domains', domain, reason) while not servers: if domain in tried: root_logger.debug("Already searched %s; skipping", domain) break tried.add(domain) servers = self.ipadns_search_srv(domain, '_ldap._tcp', 389, break_on_first=False) if servers: return (servers, domain) else: p = domain.find(".") if p == -1: #no ldap server found and last component of the domain already tested return (None, None) domain = domain[p + 1:] return (None, None)
def check_zone_overlap(zone, raise_on_error=True): root_logger.info("Checking DNS domain %s, please wait ..." % zone) if not isinstance(zone, DNSName): zone = DNSName(zone).make_absolute() # automatic empty zones always exist so checking them is pointless, # do not report them to avoid meaningless error messages if is_auto_empty_zone(zone): return try: containing_zone = dns.resolver.zone_for_name(zone) except dns.exception.DNSException as e: msg = ("DNS check for domain %s failed: %s." % (zone, e)) if raise_on_error: raise ValueError(msg) else: root_logger.warning(msg) return if containing_zone == zone: try: ns = [ans.to_text() for ans in dns.resolver.query(zone, 'NS')] except dns.exception.DNSException as e: root_logger.debug("Failed to resolve nameserver(s) for domain" " {0}: {1}".format(zone, e)) ns = [] msg = u"DNS zone {0} already exists in DNS".format(zone) if ns: msg += u" and is handled by server(s): {0}".format(', '.join(ns)) raise ValueError(msg)
def add_ca_schema(): """Copy IPA schema files into the CA DS instance """ pki_pent = pwd.getpwnam(PKI_USER) ds_pent = pwd.getpwnam(DS_USER) for schema_fname in SCHEMA_FILENAMES: source_fname = os.path.join(paths.USR_SHARE_IPA_DIR, schema_fname) target_fname = os.path.join(schema_dirname(SERVERID), schema_fname) if not os.path.exists(source_fname): root_logger.debug("File does not exist: %s", source_fname) continue if os.path.exists(target_fname): target_sha1 = _sha1_file(target_fname) source_sha1 = _sha1_file(source_fname) if target_sha1 != source_sha1: target_size = os.stat(target_fname).st_size source_size = os.stat(source_fname).st_size root_logger.info("Target file %s exists but the content is " "different", target_fname) root_logger.info("\tTarget file: sha1: %s, size: %s B", target_sha1, target_size) root_logger.info("\tSource file: sha1: %s, size: %s B", source_sha1, source_size) if not ipautil.user_input("Do you want replace %s file?" % target_fname, True): continue else: root_logger.info("Target exists, not overwriting: %s", target_fname) continue try: shutil.copyfile(source_fname, target_fname) except IOError as e: root_logger.warning("Could not install %s: %s", target_fname, e) else: root_logger.info("Installed %s", target_fname) os.chmod(target_fname, 0o440) # read access for dirsrv user/group os.chown(target_fname, pki_pent.pw_uid, ds_pent.pw_gid)
def uninstall(self): if self.is_configured(): self.print_msg("Unconfiguring %s" % self.service_name) running = self.restore_state("running") enabled = self.restore_state("enabled") try: self.stop() except Exception: pass for f in [paths.KRB5KDC_KDC_CONF, paths.KRB5_CONF]: try: self.fstore.restore_file(f) except ValueError as error: root_logger.debug(error) # disabled by default, by ldap_enable() if enabled: self.enable() if running: self.restart() self.kpasswd = KpasswdInstance() self.kpasswd.uninstall()
def install_dns_records(config, options, remote_api): if not bindinstance.dns_container_exists( config.master_host_name, ipautil.realm_to_suffix(config.realm_name), dm_password=config.dirman_password): return try: bind = bindinstance.BindInstance(dm_password=config.dirman_password, api=remote_api) for ip in config.ips: reverse_zone = bindinstance.find_reverse_zone(ip, remote_api) bind.add_master_dns_records(config.host_name, str(ip), config.realm_name, config.domain_name, reverse_zone, not options.no_ntp, options.setup_ca) except errors.NotFound as e: root_logger.debug('Replica DNS records could not be added ' 'on master: %s', str(e)) # we should not fail here no matter what except Exception as e: root_logger.info('Replica DNS records could not be added ' 'on master: %s', str(e))
def uninstall(self): if self.is_configured(): self.print_msg("Unconfiguring web server") running = self.restore_state("running") enabled = self.restore_state("enabled") self.stop_tracking_certificates() helper = self.restore_state('certmonger_ipa_helper') if helper: bus = dbus.SystemBus() obj = bus.get_object('org.fedorahosted.certmonger', '/org/fedorahosted/certmonger') iface = dbus.Interface(obj, 'org.fedorahosted.certmonger') path = iface.find_ca_by_nickname('IPA') if path: ca_obj = bus.get_object('org.fedorahosted.certmonger', path) ca_iface = dbus.Interface(ca_obj, 'org.freedesktop.DBus.Properties') ca_iface.Set('org.fedorahosted.certmonger.ca', 'external-helper', helper) for f in [paths.HTTPD_IPA_CONF, paths.HTTPD_SSL_CONF, paths.HTTPD_NSS_CONF]: try: self.fstore.restore_file(f) except ValueError, error: root_logger.debug(error) pass
def __update_dse_ldif(self): """ This method updates dse.ldif right after instance creation. This is supposed to allow admin modify configuration of the DS which has to be done before IPA is fully installed (for example: settings for replication on replicas) DS must be turned off. """ dse_filename = os.path.join( paths.ETC_DIRSRV_SLAPD_INSTANCE_TEMPLATE % self.serverid, 'dse.ldif' ) with tempfile.NamedTemporaryFile(delete=False) as new_dse_ldif: temp_filename = new_dse_ldif.name with open(dse_filename, "r") as input_file: parser = installutils.ModifyLDIF(input_file, new_dse_ldif) parser.replace_value( 'cn=config,cn=ldbm database,cn=plugins,cn=config', 'nsslapd-db-locks', ['50000'] ) if self.config_ldif: # parse modifications from ldif file supplied by the admin with open(self.config_ldif, "r") as config_ldif: parser.modifications_from_ldif(config_ldif) parser.parse() new_dse_ldif.flush() shutil.copy2(temp_filename, dse_filename) try: os.remove(temp_filename) except OSError as e: root_logger.debug("Failed to clean temporary file: %s" % e)
def __setup_dnssec(self): # run once only if self.get_state("kasp_db_configured") and not self.kasp_db_file: root_logger.debug("Already configured, skipping step") return self.backup_state("kasp_db_configured", True) if not self.fstore.has_file(paths.OPENDNSSEC_KASP_DB): self.fstore.backup_file(paths.OPENDNSSEC_KASP_DB) if self.kasp_db_file: # copy user specified kasp.db to proper location and set proper # privileges shutil.copy(self.kasp_db_file, paths.OPENDNSSEC_KASP_DB) os.chown(paths.OPENDNSSEC_KASP_DB, self.ods_uid, self.ods_gid) os.chmod(paths.OPENDNSSEC_KASP_DB, 0o660) # regenerate zonelist.xml cmd = [paths.ODS_KSMUTIL, 'zonelist', 'export'] result = ipautil.run(cmd, runas=constants.ODS_USER, capture_output=True) with open(paths.OPENDNSSEC_ZONELIST_FILE, 'w') as zonelistf: zonelistf.write(result.output) os.chown(paths.OPENDNSSEC_ZONELIST_FILE, self.ods_uid, self.ods_gid) os.chmod(paths.OPENDNSSEC_ZONELIST_FILE, 0o660) else: # initialize new kasp.db command = [paths.ODS_KSMUTIL, 'setup'] ipautil.run(command, stdin="y", runas=constants.ODS_USER)
def uninstall(self): if self.is_configured(): self.print_msg("Unconfiguring %s" % self.service_name) running = self.restore_state("running") enabled = self.restore_state("enabled") named_regular_running = self.restore_state("named-regular-running") named_regular_enabled = self.restore_state("named-regular-enabled") self.dns_backup.clear_records(self.api.Backend.ldap2.isconnected()) for f in [NAMED_CONF, RESOLV_CONF]: try: self.fstore.restore_file(f) except ValueError as error: root_logger.debug(error) # disabled by default, by ldap_enable() if enabled: self.enable() if running: self.restart() self.named_regular.unmask() if named_regular_enabled: self.named_regular.enable() if named_regular_running: self.named_regular.start() installutils.remove_keytab(self.keytab) installutils.remove_ccache(run_as=self.service_user)
def __update_dse_ldif(self): """ This method updates dse.ldif right after instance creation. This is supposed to allow admin modify configuration of the DS which has to be done before IPA is fully installed (for example: settings for replication on replicas) DS must be turned off. """ dse_filename = os.path.join(paths.ETC_DIRSRV_SLAPD_INSTANCE_TEMPLATE % self.serverid, "dse.ldif") with tempfile.NamedTemporaryFile(delete=False) as new_dse_ldif: temp_filename = new_dse_ldif.name with open(dse_filename, "r") as input_file: parser = installutils.ModifyLDIF(input_file, new_dse_ldif) parser.replace_value("cn=config,cn=ldbm database,cn=plugins,cn=config", "nsslapd-db-locks", ["50000"]) if self.config_ldif: # parse modifications from ldif file supplied by the admin with open(self.config_ldif, "r") as config_ldif: parser.modifications_from_ldif(config_ldif) parser.parse() new_dse_ldif.flush() shutil.copy2(temp_filename, dse_filename) try: os.remove(temp_filename) except OSError as e: root_logger.debug("Failed to clean temporary file: %s" % e)
def restore_all_files(self): """Restore the files in the inbdex to their original location and delete the copy. Returns #True if the file was restored, #False if there was no backup file to restore """ if len(self.files) == 0: return False for (filename, value) in self.files.items(): (mode, uid, gid, path) = value.split(',', 3) backup_path = os.path.join(self._path, filename) if not os.path.exists(backup_path): root_logger.debug(" -> Not restoring - '%s' doesn't exist", backup_path) continue shutil.copy(backup_path, path) # SELinux needs copy os.remove(backup_path) os.chown(path, int(uid), int(gid)) os.chmod(path, int(mode)) tasks.restore_context(path) # force file to be deleted self.files = {} self.save() return True
def __enable(self): if self.get_state("enabled") is None: self.backup_state("enabled", self.is_running()) self.backup_state("named-regular-enabled", self.named_regular.is_running()) # We do not let the system start IPA components on its own, # Instead we reply on the IPA init script to start only enabled # components as found in our LDAP configuration tree try: self.ldap_enable('DNS', self.fqdn, self.dm_password, self.suffix) except errors.DuplicateEntry: # service already exists (forced DNS reinstall) # don't crash, just report error root_logger.error("DNS service already exists") # disable named, we need to run named-pkcs11 only if self.get_state("named-regular-running") is None: # first time store status self.backup_state("named-regular-running", self.named_regular.is_running()) try: self.named_regular.stop() except Exception as e: root_logger.debug("Unable to stop named (%s)", e) try: self.named_regular.mask() except Exception as e: root_logger.debug("Unable to mask named (%s)", e)
def save(self): """Save the modules to @_path. If @modules is an empty dict, then @_path should be removed. """ root_logger.debug("Saving StateFile to '%s'", self._path) for module in list(self.modules): if len(self.modules[module]) == 0: del self.modules[module] if len(self.modules) == 0: root_logger.debug(" -> no modules, removing file") if os.path.exists(self._path): os.remove(self._path) return p = SafeConfigParser() p.optionxform = str for module in self.modules: p.add_section(module) for (key, value) in self.modules[module].items(): p.set(module, key, str(value)) with open(self._path, "w") as f: p.write(f)
def __add_ipa_ca_record(self): self.__add_ipa_ca_records(self.fqdn, self.ip_addresses, self.ca_configured) if self.first_instance: ldap = self.api.Backend.ldap2 try: entries = ldap.get_entries( DN(('cn', 'masters'), ('cn', 'ipa'), ('cn', 'etc'), self.api.env.basedn), ldap.SCOPE_SUBTREE, '(&(objectClass=ipaConfigObject)(cn=CA))', ['dn']) except errors.NotFound: root_logger.debug('No server with CA found') entries = [] for entry in entries: fqdn = entry.dn[1]['cn'] if fqdn == self.fqdn: continue host, zone = fqdn.split('.', 1) if dns_zone_exists(zone, self.api): addrs = get_fwd_rr(zone, host, api=self.api) else: addrs = installutils.resolve_host(fqdn) self.__add_ipa_ca_records(fqdn, addrs, True)
def resolve_ip_addresses_nss(fqdn): """Get list of IP addresses for given host (using NSS/getaddrinfo). :returns: list of IP addresses as UnsafeIPAddress objects """ # make sure the name is fully qualified # so search path from resolv.conf does not apply fqdn = str(dnsutil.DNSName(fqdn).make_absolute()) try: addrinfos = socket.getaddrinfo(fqdn, None, socket.AF_UNSPEC, socket.SOCK_STREAM) except socket.error as ex: if ex.errno == socket.EAI_NODATA or ex.errno == socket.EAI_NONAME: root_logger.debug('Name %s does not have any address: %s', fqdn, ex) return set() else: raise # accept whatever we got from NSS ip_addresses = set() for ai in addrinfos: try: ip = ipautil.UnsafeIPAddress(ai[4][0]) except ValueError as ex: # getaddinfo may return link-local address other similar oddities # which are not accepted by CheckedIPAddress - skip these root_logger.warning('Name %s resolved to an unacceptable IP ' 'address %s: %s', fqdn, ai[4][0], ex) else: ip_addresses.add(ip) root_logger.debug('Name %s resolved to %s', fqdn, ip_addresses) return ip_addresses
def store_session_cookie(self, cookie_header): ''' Given the contents of a Set-Cookie header scan the header and extract each cookie contained within until the session cookie is located. Examine the session cookie if the domain and path are specified, if not update the cookie with those values from the request URL. Then write the session cookie into the key store for the principal. If the cookie header is None or the session cookie is not present in the header no action is taken. Context Dependencies: The per thread context is expected to contain: principal The current pricipal the HTTP request was issued for. request_url The URL of the HTTP request. ''' if cookie_header is None: return principal = getattr(context, 'principal', None) request_url = getattr(context, 'request_url', None) root_logger.debug("received Set-Cookie (%s)'%s'", type(cookie_header), cookie_header) if not isinstance(cookie_header, list): cookie_header = [cookie_header] # Search for the session cookie session_cookie = None try: for cookie in cookie_header: session_cookie = (Cookie.get_named_cookie_from_string( cookie, COOKIE_NAME, request_url, timestamp=datetime.datetime.utcnow())) if session_cookie is not None: break except Exception as e: root_logger.error("unable to parse cookie header '%s': %s", cookie_header, e) return if session_cookie is None: return cookie_string = self._slice_session_cookie(session_cookie) root_logger.debug("storing cookie '%s' for principal %s", cookie_string, principal) try: update_persistent_client_session_data(principal, cookie_string) except Exception as e: # Not fatal, we just can't use the session cookie we were sent. pass
def __add_self_ns(self): # add NS record to all zones ns_hostname = normalize_zone(self.api.env.host) result = self.api.Command.dnszone_find() for zone in result['result']: zone = unicode(zone['idnsname'][0]) # we need unicode due to backup root_logger.debug("adding self NS to zone %s apex", zone) add_ns_rr(zone, ns_hostname, self.dns_backup, force=True, api=self.api)
def trust_root_cert(self, root_nickname, trust_flags=None): if root_nickname is None: root_logger.debug("Unable to identify root certificate to trust. Continuing but things are likely to fail.") return try: self.nssdb.trust_root_cert(root_nickname, trust_flags) except RuntimeError: pass
def trust_root_cert(self, root_nickname, trust_flags): if root_nickname is None: root_logger.debug("Unable to identify root certificate to trust. Continuing but things are likely to fail.") return try: self.nssdb.trust_root_cert(root_nickname, trust_flags) except RuntimeError: pass
def store_session_cookie(self, cookie_header): ''' Given the contents of a Set-Cookie header scan the header and extract each cookie contained within until the session cookie is located. Examine the session cookie if the domain and path are specified, if not update the cookie with those values from the request URL. Then write the session cookie into the key store for the principal. If the cookie header is None or the session cookie is not present in the header no action is taken. Context Dependencies: The per thread context is expected to contain: principal The current pricipal the HTTP request was issued for. request_url The URL of the HTTP request. ''' if cookie_header is None: return principal = getattr(context, 'principal', None) request_url = getattr(context, 'request_url', None) root_logger.debug("received Set-Cookie (%s)'%s'", type(cookie_header), cookie_header) if not isinstance(cookie_header, list): cookie_header = [cookie_header] # Search for the session cookie session_cookie = None try: for cookie in cookie_header: session_cookie = ( Cookie.get_named_cookie_from_string( cookie, COOKIE_NAME, request_url, timestamp=datetime.datetime.utcnow()) ) if session_cookie is not None: break except Exception as e: root_logger.error("unable to parse cookie header '%s': %s", cookie_header, e) return if session_cookie is None: return cookie_string = self._slice_session_cookie(session_cookie) root_logger.debug("storing cookie '%s' for principal %s", cookie_string, principal) try: update_persistent_client_session_data(principal, cookie_string) except Exception as e: # Not fatal, we just can't use the session cookie we were sent. pass
def ldap_remove_service_container(self, name, fqdn, ldap_suffix): entry_dn = DN(('cn', name), ('cn', fqdn), ('cn', 'masters'), ('cn', 'ipa'), ('cn', 'etc'), ldap_suffix) try: api.Backend.ldap2.delete_entry(entry_dn) except errors.NotFound: root_logger.debug("service %s container already removed", name) else: root_logger.debug("service %s container sucessfully removed", name)
def execute(self, **options): ldap = self.api.Backend.ldap2 old_style_plugin_search_filter = ( "(&" "(objectclass=nsSlapdPlugin)" "(nsslapd-pluginId=NSUniqueAttr)" "(nsslapd-pluginPath=libattr-unique-plugin)" "(nsslapd-pluginarg0=*)" # only entries with old configuration ")") try: entries, _truncated = ldap.find_entries( filter=old_style_plugin_search_filter, base_dn=self.plugins_dn, ) except errors.NotFound: root_logger.debug("No uniqueness plugin entries with old style " "configuration found") return False, [] update_list = [] new_attributes = [ 'uniqueness-subtree-entries-oc', 'uniqueness-top-entry-oc', 'uniqueness-attribute-name', 'uniqueness-subtrees', 'uniqueness-across-all-subtrees', ] for entry in entries: # test for mixed configuration if any(attr in entry for attr in new_attributes): root_logger.critical( "Mixed old and new style configuration " "for plugin %s. Plugin will not work. " "Skipping plugin migration, please fix it " "manually", entry.dn) continue root_logger.debug( "Configuration of plugin %s will be migrated " "to new style", entry.dn) try: # detect which configuration was used arg0 = entry.get('nsslapd-pluginarg0') if '=' in arg0: update = self.__objectclass_style(entry) else: update = self.__subtree_style(entry) except ValueError as e: root_logger.error( "Unable to migrate configuration of " "plugin %s (%s)", entry.dn, e) else: update_list.append(update) return False, update_list
def issue_server_cert(self, certreq_fname, cert_fname): self.setup_cert_request() if self.host_name is None: raise RuntimeError("CA Host is not set.") f = open(certreq_fname, "r") csr = f.readlines() f.close() csr = "".join(csr) # We just want the CSR bits, make sure there is nothing else csr = pkcs10.strip_header(csr) params = { 'profileId': dogtag.DEFAULT_PROFILE, 'cert_request_type': 'pkcs10', 'requestor_name': 'IPA Installer', 'cert_request': csr, 'xmlOutput': 'true' } # Send the request to the CA f = open(self.passwd_fname, "r") password = f.readline() f.close() result = dogtag.https_request(self.host_name, 8443, "/ca/ee/ca/profileSubmitSSLClient", self.secdir, password, "ipaCert", **params) http_status, _http_headers, http_body = result root_logger.debug("CA answer: %s", http_body) if http_status != 200: raise CertificateOperationError( error=_('Unable to communicate with CMS (status %d)') % http_status) # The result is an XML blob. Pull the certificate out of that doc = xml.dom.minidom.parseString(http_body) item_node = doc.getElementsByTagName("b64") try: try: cert = item_node[0].childNodes[0].data except IndexError: raise RuntimeError("Certificate issuance failed") finally: doc.unlink() # base64-decode the result for uniformity cert = base64.b64decode(cert) # Write the certificate to a file. It will be imported in a later # step. This file will be read later to be imported. f = open(cert_fname, "w") f.write(cert) f.close()
def execute(self, **options): ldap = self.api.Backend.ldap2 old_style_plugin_search_filter = ( "(&" "(objectclass=nsSlapdPlugin)" "(nsslapd-pluginId=NSUniqueAttr)" "(nsslapd-pluginPath=libattr-unique-plugin)" "(nsslapd-pluginarg0=*)" # only entries with old configuration ")" ) try: entries, truncated = ldap.find_entries( filter=old_style_plugin_search_filter, base_dn=self.plugins_dn, ) except errors.NotFound: root_logger.debug("No uniqueness plugin entries with old style " "configuration found") return False, [] update_list = [] new_attributes = [ 'uniqueness-subtree-entries-oc', 'uniqueness-top-entry-oc', 'uniqueness-attribute-name', 'uniqueness-subtrees', 'uniqueness-across-all-subtrees', ] for entry in entries: # test for mixed configuration if any(attr in entry for attr in new_attributes): root_logger.critical("Mixed old and new style configuration " "for plugin %s. Plugin will not work. " "Skipping plugin migration, please fix it " "manually", entry.dn) continue root_logger.debug("Configuration of plugin %s will be migrated " "to new style", entry.dn) try: # detect which configuration was used arg0 = entry.get('nsslapd-pluginarg0') if '=' in arg0: update = self.__objectclass_style(entry) else: update = self.__subtree_style(entry) except ValueError as e: root_logger.error("Unable to migrate configuration of " "plugin %s (%s)", entry.dn, e) else: update_list.append(update) return False, update_list
def uninstall(self): if self.is_configured(): self.print_msg("Unconfiguring web server") running = self.restore_state("running") enabled = self.restore_state("enabled") self.stop_tracking_certificates() helper = self.restore_state('certmonger_ipa_helper') if helper: bus = dbus.SystemBus() obj = bus.get_object('org.fedorahosted.certmonger', '/org/fedorahosted/certmonger') iface = dbus.Interface(obj, 'org.fedorahosted.certmonger') path = iface.find_ca_by_nickname('IPA') if path: ca_obj = bus.get_object('org.fedorahosted.certmonger', path) ca_iface = dbus.Interface(ca_obj, 'org.freedesktop.DBus.Properties') ca_iface.Set('org.fedorahosted.certmonger.ca', 'external-helper', helper) for f in [paths.HTTPD_IPA_CONF, paths.HTTPD_SSL_CONF, paths.HTTPD_NSS_CONF]: try: self.fstore.restore_file(f) except ValueError as error: root_logger.debug(error) pass # Remove the ccache file for the HTTPD service ipautil.run([paths.KDESTROY, '-c', paths.KRB5CC_HTTPD], runas='apache', raiseonerr=False) # Remove the configuration files we create installutils.remove_file(paths.HTTPD_IPA_REWRITE_CONF) installutils.remove_file(paths.HTTPD_IPA_CONF) installutils.remove_file(paths.HTTPD_IPA_PKI_PROXY_CONF) installutils.remove_file(paths.HTTPD_IPA_KDCPROXY_CONF_SYMLINK) installutils.remove_file(paths.HTTPD_IPA_KDCPROXY_CONF) # Restore SELinux boolean states boolean_states = {name: self.restore_state(name) for name in SELINUX_BOOLEAN_SETTINGS} try: tasks.set_selinux_booleans(boolean_states) except ipapython.errors.SetseboolError as e: self.print_msg('WARNING: ' + str(e)) if running: self.restart() # disabled by default, by ldap_enable() if enabled: self.enable()
def load_ccache_data(ccache_name): scheme, name = krb5_parse_ccache(ccache_name) if scheme == 'FILE': root_logger.debug('reading ccache data from file "%s"', name) src = open(name) ccache_data = src.read() src.close() return ccache_data else: raise ValueError('ccache scheme "%s" unsupported (%s)', scheme, ccache_name)
def ldap_enable(self, name, fqdn, dm_password=None, ldap_suffix='', config=[]): assert isinstance(ldap_suffix, DN) self.disable() entry_name = DN(('cn', name), ('cn', fqdn), ('cn', 'masters'), ('cn', 'ipa'), ('cn', 'etc'), ldap_suffix) # enable disabled service try: entry = api.Backend.ldap2.get_entry(entry_name, ['ipaConfigString']) except errors.NotFound: pass else: if any(u'enabledservice' == val.lower() for val in entry.get('ipaConfigString', [])): root_logger.debug("service %s startup entry already enabled", name) return entry.setdefault('ipaConfigString', []).append(u'enabledService') try: api.Backend.ldap2.update_entry(entry) except errors.EmptyModlist: root_logger.debug("service %s startup entry already enabled", name) return except: root_logger.debug("failed to enable service %s startup entry", name) raise root_logger.debug("service %s startup entry enabled", name) return order = SERVICE_LIST[name][1] entry = api.Backend.ldap2.make_entry( entry_name, objectclass=["nsContainer", "ipaConfigObject"], cn=[name], ipaconfigstring=["enabledService", "startOrder " + str(order)] + config, ) try: api.Backend.ldap2.add_entry(entry) except (errors.DuplicateEntry) as e: root_logger.debug("failed to add service %s startup entry", name) raise e
def restart(self, instance_name="", capture_output=True, wait=True): try: super(RedHatHTTPDService, self).restart(instance_name, capture_output, wait) except ipautil.CalledProcessError: # http may have issues with binding to ports, try to fallback # https://bugzilla.redhat.com/show_bug.cgi?id=845405 root_logger.debug("%s restart failed, try to stop&start again", self.service_name) time.sleep(5) self.stop(instance_name, capture_output) time.sleep(5) self.start(instance_name, capture_output, wait)
def single_request(self, host, handler, request_body, verbose=0): # Based on Python 2.7's xmllib.Transport.single_request try: h = self.make_connection(host) if verbose: h.set_debuglevel(1) self.get_auth_info() while True: if six.PY2: # pylint: disable=no-value-for-parameter self.send_request(h, handler, request_body) # pylint: enable=no-value-for-parameter self.send_host(h, host) self.send_user_agent(h) self.send_content(h, request_body) response = h.getresponse(buffering=True) else: self.__send_request(h, host, handler, request_body, verbose) response = h.getresponse() if response.status != 200: if (response.getheader("content-length", 0)): response.read() if response.status == 401: if not self._auth_complete(response): continue raise ProtocolError(host + handler, response.status, response.reason, response.msg) self.verbose = verbose if not self._auth_complete(response): continue return self.parse_response(response) except gssapi.exceptions.GSSError as e: self._handle_exception(e) except RemoteDisconnected: # keep-alive connection was terminated by remote peer, close # connection and let transport handle reconnect for us. self.close() root_logger.debug("HTTP server has closed connection (%s)", host) raise except BaseException as e: # Unexpected exception may leave connections in a bad state. self.close() root_logger.debug("HTTP connection destroyed (%s)", host, exc_info=True) raise
def add_sidgen_plugin(self, suffix): """ Add sidgen plugin configuration only if it does not already exist. """ dn = DN('cn=IPA SIDGEN,cn=plugins,cn=config') try: api.Backend.ldap2.get_entry(dn) except errors.NotFound: self._ldap_mod('ipa-sidgen-conf.ldif', dict(SUFFIX=suffix)) else: root_logger.debug("sidgen plugin is already configured")
def add_extdom_plugin(self, suffix): """ Add extdom configuration if it does not already exist. """ dn = DN('cn=ipa_extdom_extop,cn=plugins,cn=config') try: api.Backend.ldap2.get_entry(dn) except errors.NotFound: self._ldap_mod('ipa-extdom-extop-conf.ldif', dict(SUFFIX=suffix)) else: root_logger.debug("extdom plugin is already configured")
def run_script(main_function, operation_name, log_file_name=None, fail_message=None): """Run the given function as a command-line utility This function: - Runs the given function - Formats any errors - Exits with the appropriate code :param main_function: Function to call :param log_file_name: Name of the log file (displayed on unexpected errors) :param operation_name: Name of the script :param fail_message: Optional message displayed on failure """ root_logger.info('Starting script: %s', operation_name) try: try: return_value = main_function() except BaseException as e: if (isinstance(e, SystemExit) and (e.code is None or e.code == 0) # pylint: disable=no-member ): # Not an error after all root_logger.info('The %s command was successful', operation_name) else: # Log at the DEBUG level, which is not output to the console # (unless in debug/verbose mode), but is written to a logfile # if one is open. tb = sys.exc_info()[2] root_logger.debug('\n'.join(traceback.format_tb(tb))) root_logger.debug('The %s command failed, exception: %s: %s', operation_name, type(e).__name__, e) if fail_message and not isinstance(e, SystemExit): print(fail_message) raise else: if return_value: root_logger.info('The %s command failed, return value %s', operation_name, return_value) else: root_logger.info('The %s command was successful', operation_name) sys.exit(return_value) except BaseException as error: message, exitcode = handle_error(error, log_file_name) if message: print(message, file=sys.stderr) sys.exit(exitcode)
def restore(self): for filename in NSS_FILES: path = os.path.join(self.secdir, filename) backup_path = path + '.orig' save_path = path + '.ipasave' try: if os.path.exists(path): os.rename(path, save_path) if os.path.exists(backup_path): os.rename(backup_path, path) except OSError as e: root_logger.debug(e)