def __init__(self, path, name, log): self.log = log self.log.debug(f"olOverlay path -> {path}/{name}") entries = ldif_parse(path, name) assert len(entries) == 1 self.config = entries.pop() self.log.debug(f"{self.config}") # olcOverlay self.name = ensure_str(self.config[1]['olcOverlay'][0]).split('}', 1)[1] self.classes = ensure_list_str(self.config[1]['objectClass']) self.log.debug(f"{self.name} {self.classes}") if 'olcMemberOf' in self.classes: self.otype = olOverlayType.MEMBEROF # elif 'olcRefintConfig' in self.classes: self.otype = olOverlayType.REFINT # olcRefintAttribute self.attrs = ensure_list_str(self.config[1]['olcRefintAttribute']) elif 'olcUniqueConfig' in self.classes: self.otype = olOverlayType.UNIQUE # olcUniqueURI self.attrs = ensure_list_str([ # This is a ldap:///?uid?sub, so split ? [1] will give uid. attr.split('?')[1] for attr in ensure_list_str(self.config[1]['olcUniqueURI']) ]) else: self.otype = olOverlayType.UNKNOWN
def get_attr_vals_utf8_l(self, key, use_json=False): """Get attribute values from the entry in utf8 type and lowercase :param key: An attribute name :type key: str :returns: A single bytes value :raises: ValueError - if instance is offline """ return [x.lower() for x in ensure_list_str(self.get_attr_vals(key))]
def get_attr_vals_utf8(self, key): """Get attribute values from the entry in utf8 type :param key: An attribute name :type key: str :returns: A single bytes value :raises: ValueError - if instance is offline """ return ensure_list_str(self.get_attr_vals(key))
def get_attrs_vals_utf8(self, keys, use_json=False): self._log.debug("%s get_attrs_vals_utf8(%r)" % (self._dn, keys)) if self._instance.state != DIRSRV_STATE_ONLINE: raise ValueError("Invalid state. Cannot get properties on instance that is not ONLINE") entry = self._instance.search_ext_s(self._dn, ldap.SCOPE_BASE, self._object_filter, attrlist=keys, serverctrls=self._server_controls, clientctrls=self._client_controls, escapehatch='i am sure')[0] vset = entry.getValuesSet(keys) r = {} for (k, vo) in vset.items(): r[k] = ensure_list_str(vo) return r
def _jsonify(self, fn, *args, **kwargs): # This needs to map all the values to ensure_str attrs = fn(*args, **kwargs) str_attrs = {} for k in attrs: str_attrs[ensure_str(k)] = ensure_list_str(attrs[k]) response = {"dn": ensure_str(self._dn), "attrs": str_attrs} print('json response') print(response) return response
def __init__(self, schemas, log): self.log = log self.log.debug(f"olSchemas -> {schemas}") self.raw_schema = [] for schema in schemas: entries = ldif_parse(schema) assert len(entries) == 1 self.raw_schema.append(entries.pop()) # self.log.debug(f"raw_schema -> {self.raw_schema}") self.raw_attrs = [] self.raw_classes = [] for (cn, rs) in self.raw_schema: self.raw_attrs += ensure_list_str(rs['olcAttributeTypes']) self.raw_classes += ensure_list_str(rs['olcObjectClasses']) self.attrs = [olAttribute(x, self.log) for x in self.raw_attrs] self.classes = [olClass(x, self.log) for x in self.raw_classes]
def _jsonify(self, fn, *args, **kwargs): # This needs to map all the values to ensure_str attrs = fn(use_json=True, *args, **kwargs) str_attrs = {} for k in attrs: str_attrs[ensure_str(k)] = ensure_list_str(attrs[k]) # ensure all the keys are lowercase str_attrs = dict((k.lower(), v) for k, v in list(str_attrs.items())) response = json.dumps({"type": "entry", "dn": ensure_str(self._dn), "attrs": str_attrs}, indent=4) return response
def _jsonify(self, fn, *args, **kwargs): # This needs to map all the values to ensure_str attrs = fn(*args, **kwargs) str_attrs = {} for k in attrs: str_attrs[ensure_str(k)] = ensure_list_str(attrs[k]) response = json.dumps({ "type": "entry", "dn": ensure_str(self._dn), "attrs": str_attrs }) return response
def get_all_attrs_utf8(self, use_json=False): """Get a dictionary having all the attributes of the entry :returns: Dict with real attributes and operational attributes """ self._log.debug("%s get_all_attrs" % (self._dn)) if self._instance.state != DIRSRV_STATE_ONLINE: raise ValueError("Invalid state. Cannot get properties on instance that is not ONLINE") else: # retrieving real(*) and operational attributes(+) attrs_entry = self._instance.search_ext_s(self._dn, ldap.SCOPE_BASE, self._object_filter, attrlist=["*", "+"], serverctrls=self._server_controls, clientctrls=self._client_controls, escapehatch='i am sure')[0] # getting dict from 'entry' object r = {} for (k, vo) in attrs_entry.data.items(): r[k] = ensure_list_str(vo) return r
def _install_ds(self, general, slapd, backends): """ Actually install the Ds from the dicts provided. You should never call this directly, as it bypasses assertions. """ ######################## WARNING ############################# # DO NOT CHANGE THIS FUNCTION OR ITS CONTENTS WITHOUT READING # ALL OF THE COMMENTS FIRST. THERE ARE VERY DELICATE # AND DETAILED INTERACTIONS OF COMPONENTS IN THIS FUNCTION. # # IF IN DOUBT CONTACT WILLIAM BROWN <*****@*****.**> ### This first section is about creating the *minimal* required paths and config to get # directory server to start: After this, we then perform all configuration as online # changes from after this point. # Create dse.ldif with a temporary root password. # This is done first, because instances are found for removal and listing by detecting # the present of their dse.ldif!!!! # The template is in slapd['data_dir']/dirsrv/data/template-dse.ldif # Variables are done with %KEY%. self.log.debug("ACTION: Creating dse.ldif") try: os.umask( 0o007 ) # For parent dirs that get created -> sets 770 for perms os.makedirs(slapd['config_dir'], mode=0o770) except OSError: pass # Get suffix for some plugin defaults (if possible) # annoyingly for legacy compat backend takes TWO key types # and we have to now deal with that .... # # Create ds_suffix here else it won't be in scope .... ds_suffix = '' if len(backends) > 0: ds_suffix = normalizeDN(backends[0]['nsslapd-suffix']) dse = "" with open( os.path.join(slapd['data_dir'], 'dirsrv', 'data', 'template-dse.ldif')) as template_dse: for line in template_dse.readlines(): dse += line.replace('%', '{', 1).replace('%', '}', 1) # Check if we are in a container, if so don't use /dev/shm for the db home dir # as containers typically don't allocate enough space for dev/shm and we don't # want to unexpectedly break the server after an upgrade # # If we know we are are in a container, we don't need to re-detect on systemd. # It actually turns out if you add systemd-detect-virt, that pulls in system # which subsequently breaks containers starting as instance.start then believes # it COULD check the ds status. The times we need to check for systemd are mainly # in other environments that use systemd natively in their containers. container_result = 1 if not self.containerised: container_result = subprocess.run(["systemd-detect-virt", "-c"], stdout=subprocess.PIPE) if self.containerised or container_result.returncode == 0: # In a container, set the db_home_dir to the db path self.log.debug( "Container detected setting db home directory to db directory." ) slapd['db_home_dir'] = slapd['db_dir'] with open(os.path.join(slapd['config_dir'], 'dse.ldif'), 'w') as file_dse: dse_fmt = dse.format( schema_dir=slapd['schema_dir'], lock_dir=slapd['lock_dir'], tmp_dir=slapd['tmp_dir'], cert_dir=slapd['cert_dir'], ldif_dir=slapd['ldif_dir'], bak_dir=slapd['backup_dir'], run_dir=slapd['run_dir'], inst_dir=slapd['inst_dir'], log_dir=slapd['log_dir'], fqdn=general['full_machine_name'], ds_port=slapd['port'], ds_user=slapd['user'], rootdn=slapd['root_dn'], instance_name=slapd['instance_name'], ds_passwd=self. _secure_password, # We set our own password here, so we can connect and mod. # This is because we never know the users input root password as they can validly give # us a *hashed* input. ds_suffix=ds_suffix, config_dir=slapd['config_dir'], db_dir=slapd['db_dir'], db_home_dir=slapd['db_home_dir'], db_lib=slapd['db_lib'], ldapi_enabled="on", ldapi=slapd['ldapi'], ldapi_autobind="on", ) file_dse.write(dse_fmt) self.log.info("Create file system structures ...") # Create all the needed paths # we should only need to make bak_dir, cert_dir, config_dir, db_dir, ldif_dir, lock_dir, log_dir, run_dir? for path in ('backup_dir', 'cert_dir', 'db_dir', 'db_home_dir', 'ldif_dir', 'lock_dir', 'log_dir', 'run_dir'): self.log.debug("ACTION: creating %s", slapd[path]) try: os.umask( 0o007 ) # For parent dirs that get created -> sets 770 for perms os.makedirs(slapd[path], mode=0o770) except OSError: pass os.chown(slapd[path], slapd['user_uid'], slapd['group_gid']) # /var/lock/dirsrv needs special attention... parentdir = os.path.abspath(os.path.join(slapd['lock_dir'], os.pardir)) os.chown(parentdir, slapd['user_uid'], slapd['group_gid']) ### Warning! We need to down the directory under db too for .restore to work. # During a restore, the db dir is deleted and recreated, which is why we need # to own it for a restore. # # However, in a container, we can't always guarantee this due to how the volumes # work and are mounted. Specifically, if we have an anonymous volume we will # NEVER be able to own it, but in a true deployment it is reasonable to expect # we DO own it. Thus why we skip it in this specific context if not self.containerised: db_parent = os.path.join(slapd['db_dir'], '..') os.chown(db_parent, slapd['user_uid'], slapd['group_gid']) # Copy correct data to the paths. # Copy in the schema # This is a little fragile, make it better. # It won't matter when we move schema to usr anyway ... _ds_shutil_copytree( os.path.join(slapd['sysconf_dir'], 'dirsrv/schema'), slapd['schema_dir']) os.chown(slapd['schema_dir'], slapd['user_uid'], slapd['group_gid']) os.chmod(slapd['schema_dir'], 0o770) # Copy in the collation srcfile = os.path.join(slapd['sysconf_dir'], 'dirsrv/config/slapd-collations.conf') dstfile = os.path.join(slapd['config_dir'], 'slapd-collations.conf') shutil.copy(srcfile, dstfile) os.chown(dstfile, slapd['user_uid'], slapd['group_gid']) os.chmod(dstfile, 0o440) # Copy in the certmap configuration srcfile = os.path.join(slapd['sysconf_dir'], 'dirsrv/config/certmap.conf') dstfile = os.path.join(slapd['config_dir'], 'certmap.conf') shutil.copy(srcfile, dstfile) os.chown(dstfile, slapd['user_uid'], slapd['group_gid']) os.chmod(dstfile, 0o440) # If we are on the correct platform settings, systemd if general['systemd']: # Should create the symlink we need, but without starting it. result = subprocess.run( ["systemctl", "enable", "dirsrv@%s" % slapd['instance_name']], stdout=subprocess.PIPE, stderr=subprocess.PIPE) args = ' '.join(ensure_list_str(result.args)) stdout = ensure_str(result.stdout) stderr = ensure_str(result.stderr) # Systemd encodes some odd charecters into it's symlink output on newer versions which # can trip up the logger. self.log.debug( f"CMD: {args} ; STDOUT: {stdout} ; STDERR: {stderr}".encode( "utf-8")) # Setup tmpfiles_d tmpfile_d = ds_paths.tmpfiles_d + "/dirsrv-" + slapd[ 'instance_name'] + ".conf" with open(tmpfile_d, "w") as TMPFILE_D: TMPFILE_D.write("d {} 0770 {} {}\n".format( slapd['run_dir'], slapd['user'], slapd['group'])) TMPFILE_D.write("d {} 0770 {} {}\n".format( slapd['lock_dir'].replace( "slapd-" + slapd['instance_name'], ""), slapd['user'], slapd['group'])) TMPFILE_D.write("d {} 0770 {} {}\n".format( slapd['lock_dir'], slapd['user'], slapd['group'])) # Else we need to detect other init scripts? # WB: No, we just install and assume that docker will start us ... # Bind sockets to our type? # Create certdb in sysconfidir self.log.debug("ACTION: Creating certificate database is %s", slapd['cert_dir']) # BELOW THIS LINE - all actions are now ONLINE changes to the directory server. # if it all possible, ALWAYS ADD NEW INSTALLER CHANGES AS ONLINE ACTIONS. # Should I move this import? I think this prevents some recursion from lib389 import DirSrv ds_instance = DirSrv(self.verbose, containerised=self.containerised) if self.containerised: ds_instance.systemd_override = general['systemd'] # By default SUSE does something extremely silly - it creates a hostname # that CANT be resolved by DNS. As a result this causes all installs to # fail. We need to guarantee that we only connect to localhost here, as # it's the only stable and guaranteed way to connect to the instance # at this point. # # Use ldapi which would prevent the need # to configure a temp root pw in the setup phase. args = { SER_HOST: "localhost", SER_PORT: slapd['port'], SER_SERVERID_PROP: slapd['instance_name'], SER_ROOT_DN: slapd['root_dn'], SER_ROOT_PW: self._raw_secure_password, SER_DEPLOYED_DIR: slapd['prefix'], SER_LDAPI_ENABLED: 'on', SER_LDAPI_SOCKET: slapd['ldapi'], SER_LDAPI_AUTOBIND: 'on' } ds_instance.allocate(args) # Does this work? assert_c(ds_instance.exists(), "Instance failed to install, does not exist when expected") # Create a certificate database. tlsdb = NssSsl(dirsrv=ds_instance, dbpath=slapd['cert_dir']) if not tlsdb._db_exists(): tlsdb.reinit() if slapd['self_sign_cert']: self.log.info("Create self-signed certificate database ...") etc_dirsrv_path = os.path.join(slapd['sysconf_dir'], 'dirsrv/') ssca_path = os.path.join(etc_dirsrv_path, 'ssca/') ssca = NssSsl(dbpath=ssca_path) # If it doesn't exist, create a CA DB if not ssca._db_exists(): ssca.reinit() ssca.create_rsa_ca(months=slapd['self_sign_cert_valid_months']) # If CA is expired or will expire soon, # Reissue it and resign the existing certs that were signed by the cert previously elif ssca.rsa_ca_needs_renew(): ca = ssca.renew_rsa_ca( months=slapd['self_sign_cert_valid_months']) # Import CA to the existing instances except the one we install now (we import it later) for dir in os.listdir(etc_dirsrv_path): if dir.startswith("slapd-") and dir != slapd['cert_dir']: tlsdb_inst = NssSsl( dbpath=os.path.join(etc_dirsrv_path, dir)) tlsdb_inst.import_rsa_crt(ca) csr = tlsdb.create_rsa_key_and_csr( alt_names=[general['full_machine_name']]) (ca, crt) = ssca.rsa_ca_sign_csr(csr) tlsdb.import_rsa_crt(ca, crt) if general['selinux']: # Set selinux port label selinux_label_port(slapd['secure_port']) # Do selinux fixups if general['selinux']: self.log.info("Perform SELinux labeling ...") selinux_paths = ('backup_dir', 'cert_dir', 'config_dir', 'db_dir', 'ldif_dir', 'lock_dir', 'log_dir', 'db_home_dir', 'run_dir', 'schema_dir', 'tmp_dir') for path in selinux_paths: selinux_restorecon(slapd[path]) selinux_label_port(slapd['port']) # Start the server # Make changes using the temp root self.log.debug(f"asan_enabled={ds_instance.has_asan()}") self.log.debug( f"libfaketime installed ={'libfaketime' in sys.modules}") assert_c( not ds_instance.has_asan() or 'libfaketime' not in sys.modules, "libfaketime python module is incompatible with ASAN build.") ds_instance.start(timeout=60) ds_instance.open() # In some cases we may want to change log settings # ds_instance.config.enable_log('audit') # Create the configs related to this version. base_config = get_config(general['defaults']) base_config_inst = base_config(ds_instance) base_config_inst.apply_config(install=True) # Setup TLS with the instance. # We *ALWAYS* set secure port, even if security is off, because it breaks # tests with standalone.enable_tls if we do not. It's only when security; on # that we actually start listening on it. if not slapd['secure_port']: slapd['secure_port'] = "636" ds_instance.config.set('nsslapd-secureport', '%s' % slapd['secure_port']) if slapd['self_sign_cert']: ds_instance.config.set('nsslapd-security', 'on') # Before we create any backends, create any extra default indexes that may be # dynamically provisioned, rather than from template-dse.ldif. Looking at you # entryUUID (requires rust enabled). # # Indexes defaults to default_index_dn indexes = Indexes(ds_instance) if ds_instance.ds_paths.rust_enabled: indexes.create( properties={ 'cn': 'entryUUID', 'nsSystemIndex': 'false', 'nsIndexType': ['eq', 'pres'], }) # Create the backends as listed # Load example data if needed. for backend in backends: self.log.info( f"Create database backend: {backend['nsslapd-suffix']} ...") is_sample_entries_in_props = "sample_entries" in backend create_suffix_entry_in_props = backend.pop('create_suffix_entry', False) ds_instance.backends.create(properties=backend) if not is_sample_entries_in_props and create_suffix_entry_in_props: # Set basic ACIs c_aci = '(targetattr="c || description || objectClass")(targetfilter="(objectClass=country)")(version 3.0; acl "Enable anyone c read"; allow (read, search, compare)(userdn="ldap:///anyone");)' o_aci = '(targetattr="o || description || objectClass")(targetfilter="(objectClass=organization)")(version 3.0; acl "Enable anyone o read"; allow (read, search, compare)(userdn="ldap:///anyone");)' dc_aci = '(targetattr="dc || description || objectClass")(targetfilter="(objectClass=domain)")(version 3.0; acl "Enable anyone domain read"; allow (read, search, compare)(userdn="ldap:///anyone");)' ou_aci = '(targetattr="ou || description || objectClass")(targetfilter="(objectClass=organizationalUnit)")(version 3.0; acl "Enable anyone ou read"; allow (read, search, compare)(userdn="ldap:///anyone");)' cn_aci = '(targetattr="cn || description || objectClass")(targetfilter="(objectClass=nscontainer)")(version 3.0; acl "Enable anyone cn read"; allow (read, search, compare)(userdn="ldap:///anyone");)' suffix_rdn_attr = backend['nsslapd-suffix'].split( '=')[0].lower() if suffix_rdn_attr == 'dc': domain = create_base_domain(ds_instance, backend['nsslapd-suffix']) domain.add('aci', dc_aci) elif suffix_rdn_attr == 'o': org = create_base_org(ds_instance, backend['nsslapd-suffix']) org.add('aci', o_aci) elif suffix_rdn_attr == 'ou': orgunit = create_base_orgunit(ds_instance, backend['nsslapd-suffix']) orgunit.add('aci', ou_aci) elif suffix_rdn_attr == 'cn': cn = create_base_cn(ds_instance, backend['nsslapd-suffix']) cn.add('aci', cn_aci) elif suffix_rdn_attr == 'c': c = create_base_c(ds_instance, backend['nsslapd-suffix']) c.add('aci', c_aci) else: # Unsupported rdn raise ValueError( "Suffix RDN '{}' in '{}' is not supported. Supported RDN's are: 'c', 'cn', 'dc', 'o', and 'ou'" .format(suffix_rdn_attr, backend['nsslapd-suffix'])) # Create all required sasl maps: if we have a single backend ... # our default maps are really really bad, and we should feel bad. # they basically only work with a single backend, and they'll break # GSSAPI in some cases too :( if len(backends) > 0: self.log.debug("Adding sasl maps for suffix %s" % backend['nsslapd-suffix']) backend = backends[0] saslmappings = SaslMappings(ds_instance) saslmappings.create( properties={ 'cn': 'rfc 2829 u syntax', 'nsSaslMapRegexString': '^u:\\(.*\\)', 'nsSaslMapBaseDNTemplate': backend['nsslapd-suffix'], 'nsSaslMapFilterTemplate': '(uid=\\1)' }) # I think this is for LDAPI saslmappings.create( properties={ 'cn': 'uid mapping', 'nsSaslMapRegexString': '^[^:@]+$', 'nsSaslMapBaseDNTemplate': backend['nsslapd-suffix'], 'nsSaslMapFilterTemplate': '(uid=&)' }) else: self.log.debug("Skipping default SASL maps - no backend found!") self.log.info("Perform post-installation tasks ...") # Change the root password finally ds_instance.config.set('nsslapd-rootpw', slapd['root_password']) # We need to log the password when containerised if self.containerised: self.log.debug("Root DN password: {}".format( slapd['root_password'])) # Complete. if general['start']: # Restart for changes to take effect - this could be removed later ds_instance.restart(post_open=False) else: # Just stop the instance now. ds_instance.stop() self.log.debug(" 🎉 Instance setup complete")
def get_consumer_maxcsn(self, binddn=None, bindpw=None): """Attempt to get the consumer's maxcsn from its database RUV entry :param binddn: Specifies a specific bind DN to use when contacting the remote consumer :type binddn: str :param bindpw: Password for the bind DN :type bindpw: str :returns: CSN string if found, otherwise "Unavailable" is returned """ host = self.get_attr_val_utf8(AGMT_HOST) port = self.get_attr_val_utf8(AGMT_PORT) suffix = self.get_attr_val_utf8(REPL_ROOT) protocol = self.get_attr_val_utf8('nsds5replicatransportinfo').lower() result_msg = "Unavailable" # If we are using LDAPI we need to provide the credentials, otherwise # use the existing credentials if binddn is None: binddn = self._instance.binddn if bindpw is None: bindpw = self._instance.bindpw # Get the replica id from supplier to compare to the consumer's rid from lib389.replica import Replicas replicas = Replicas(self._instance) replica = replicas.get(suffix) rid = replica.get_attr_val_utf8(REPL_ID) # Open a connection to the consumer consumer = DirSrv(verbose=self._instance.verbose) args_instance[SER_HOST] = host if protocol == "ssl" or protocol == "ldaps": args_instance[SER_SECURE_PORT] = int(port) else: args_instance[SER_PORT] = int(port) args_instance[SER_ROOT_DN] = binddn args_instance[SER_ROOT_PW] = bindpw args_standalone = args_instance.copy() consumer.allocate(args_standalone) try: consumer.open() except ldap.INVALID_CREDENTIALS as e: raise (e) except ldap.LDAPError as e: self._log.debug( 'Connection to consumer ({}:{}) failed, error: {}'.format( host, port, e)) return result_msg # Search for the tombstone RUV entry try: entry = consumer.search_s(suffix, ldap.SCOPE_SUBTREE, REPLICA_RUV_FILTER, ['nsds50ruv']) if not entry: self._log.debug( "Failed to retrieve database RUV entry from consumer") else: elements = ensure_list_str(entry[0].getValues('nsds50ruv')) for ruv in elements: if ('replica %s ' % rid) in ruv: ruv_parts = ruv.split() if len(ruv_parts) == 5: result_msg = ruv_parts[4] break except ldap.INVALID_CREDENTIALS as e: raise (e) except ldap.LDAPError as e: self._log.debug('Failed to search for the suffix ' + '({}) consumer ({}:{}) failed, error: {}'.format( suffix, host, port, e)) consumer.close() return result_msg
def remove_ds_instance(dirsrv, force=False): """ This will delete the instance as it is define. This must be a local instance. This is designed to raise exceptions quickly and often if *any* error is hit. However, this can be run repeatedly, and only when the instance is truely removed, will this program fail to run further. :param dirsrv: A directory server instance :type dirsrv: DirSrv :param force: A psychological aid, for people who think force means do something, harder. Does literally nothing in this program because state machines are a thing. :type force: bool """ _log = dirsrv.log.getChild('remove_ds') _log.debug("Removing instance %s" % dirsrv.serverid) # Copy all the paths we are about to tamper with remove_paths = {} remove_paths['backup_dir'] = dirsrv.ds_paths.backup_dir remove_paths['cert_dir'] = dirsrv.ds_paths.cert_dir remove_paths['config_dir'] = dirsrv.ds_paths.config_dir remove_paths['db_dir'] = dirsrv.ds_paths.db_dir remove_paths['db_home_dir'] = dirsrv.ds_paths.db_home_dir remove_paths['db_dir_parent'] = dirsrv.ds_paths.db_dir + "/../" ### WARNING: The changelogdb isn't removed. we assume it's in: # db_dir ../changelogdb. So remove that too! # abspath will resolve the ".." down. remove_paths['changelogdb_dir'] = dirsrv.get_changelog_dir() remove_paths['ldif_dir'] = dirsrv.ds_paths.ldif_dir remove_paths['lock_dir'] = dirsrv.ds_paths.lock_dir remove_paths['log_dir'] = dirsrv.ds_paths.log_dir # remove_paths['run_dir'] = dirsrv.ds_paths.run_dir remove_paths['inst_dir'] = dirsrv.ds_paths.inst_dir remove_paths['etc_sysconfig'] = "%s/sysconfig/dirsrv-%s" % ( dirsrv.ds_paths.sysconf_dir, dirsrv.serverid) tmpfiles_d_path = dirsrv.ds_paths.tmpfiles_d + "/dirsrv-" + dirsrv.serverid + ".conf" # These are handled in a special way. dse_ldif_path = os.path.join(dirsrv.ds_paths.config_dir, 'dse.ldif') # Check the marker exists. If it *does not* warn about this, and say that to # force removal you should touch this file. _log.debug("Checking for instance marker at %s" % dse_ldif_path) if not os.path.exists(dse_ldif_path): _log.info("Instance configuration not found, no action will be taken") _log.info("If you want us to cleanup anyway, recreate '%s'" % dse_ldif_path) return _log.debug("Found instance marker at %s! Proceeding to remove ..." % dse_ldif_path) # Stop the instance (if running) and now we know it really does exist # and hopefully have permission to access it ... _log.debug("Stopping instance %s" % dirsrv.serverid) dirsrv.stop() _log.debug("Found instance marker at %s! Proceeding to remove ..." % dse_ldif_path) # Stop the instance (if running) and now we know it really does exist # and hopefully have permission to access it ... _log.debug("Stopping instance %s" % dirsrv.serverid) dirsrv.stop() ### ANY NEW REMOVAL ACTION MUST BE BELOW THIS LINE!!! # Remove these paths: # for path in ('backup_dir', 'cert_dir', 'config_dir', 'db_dir', # 'ldif_dir', 'lock_dir', 'log_dir', 'run_dir'): for path_k in remove_paths: _log.debug("Removing %s" % remove_paths[path_k]) shutil.rmtree(remove_paths[path_k], ignore_errors=True) # Remove parent (/var/lib/dirsrv/slapd-INST) shutil.rmtree(remove_paths['db_dir'].replace('db', ''), ignore_errors=True) # We can not assume we have systemd ... if dirsrv.ds_paths.with_systemd: # Remove the systemd symlink _log.debug("Removing the systemd symlink") result = subprocess.run( ["systemctl", "disable", "dirsrv@{}".format(dirsrv.serverid)], stdout=subprocess.PIPE, stderr=subprocess.PIPE) args = ' '.join(ensure_list_str(result.args)) stdout = ensure_str(result.stdout) stderr = ensure_str(result.stderr) _log.debug(f"CMD: {args} ; STDOUT: {stdout} ; STDERR: {stderr}") _log.debug("Removing %s" % tmpfiles_d_path) try: os.remove(tmpfiles_d_path) except OSError as e: _log.debug("Failed to remove tmpfile: " + str(e)) # Nor can we assume we have selinux. Try docker sometime ;) if dirsrv.ds_paths.with_selinux: # Remove selinux port label _log.debug("Removing the port labels") selinux_label_port(dirsrv.port, remove_label=True) # This is a compatability with ancient installs, all modern install have tls port if dirsrv.sslport is not None: selinux_label_port(dirsrv.sslport, remove_label=True) # If this was the last instance, remove the ssca instance insts = dirsrv.list(all=True) if len(insts) == 0: ssca = NssSsl(dbpath=dirsrv.get_ssca_dir()) ssca.remove_db() ### ANY NEW REMOVAL ACTIONS MUST BE ABOVE THIS LINE!!! # Finally means FINALLY, the last thing, the LAST LAST thing. By doing this absolutely # last, it means that we can have any failure above, and continue to re-run until resolved # because this instance marker (dse.ldif) continues to exist! # Move the config_dir to config_dir.removed config_dir = dirsrv.ds_paths.config_dir config_dir_rm = "{}.removed".format(config_dir) if os.path.exists(config_dir_rm): _log.debug("Removing previously existed %s" % config_dir_rm) shutil.rmtree(config_dir_rm) assert_c(not os.path.exists(config_dir_rm)) # That's it, everything before this MUST have suceeded, so now we can move the # config dir (containing dse.ldif, the marker) out of the way. _log.debug("Moving %s to %s" % (config_dir, config_dir_rm)) try: shutil.move(config_dir, config_dir_rm) except FileNotFoundError: pass # DO NOT PUT ANY CODE BELOW THIS COMMENT BECAUSE THAT WOULD VIOLATE THE ASSERTIONS OF THE # ABOVE CODE. # Done! _log.debug("Complete")