def test_get_indexes(topology_st, backend): """Test basic get_indexes method functionality :id: 4e01d9e8-c355-4dd4-b7d9-a1d26afed768 :setup: Standalone instance :steps: 1. Create a backend 2. Get an Indexes object instance using get_indexes function 3. Directly define an Indexes instance 4. Assert that the objects are the same :expectedresults: 1. Operation should be successful 2. Operation should be successful 3. Operation should be successful 4. The entries should match """ log.info('Use get_indexes method to get Indexes object') backend_indexes = backend.get_indexes() log.info('Directly define an Indexes instance') indexes = Indexes(topology_st.standalone, "cn=index,{}".format(backend.dn)) log.info('Check the objects are the same') index_found = False for i, j in zip(indexes.list(), backend_indexes.list()): if i.dn == j.dn: index_found = True assert index_found
def test_import_with_index(topo, _import_clean): """ Add an index, then import via cn=tasks :id: 5bf75c47-a283-430e-a65c-3c5fd8dbadb8 :setup: Standalone Instance :steps: 1. Creating the room number index 2. Importing online 3. Import is done -- verifying that it worked :expected results: 1. Operation successful 2. Operation successful 3. Operation successful """ place = topo.standalone.dbdir assert f'{place}/userRoot/roomNumber.db' not in glob.glob( f'{place}/userRoot/*.db', recursive=True) # Creating the room number index indexes = Indexes(topo.standalone) indexes.create(properties={ 'cn': 'roomNumber', 'nsSystemIndex': 'false', 'nsIndexType': 'eq' }) topo.standalone.restart() # Importing online _import_online(topo, 5) # Import is done -- verifying that it worked assert f'{place}/userRoot/roomNumber.db' in glob.glob( f'{place}/userRoot/*.db', recursive=True)
def test_delete(topology_st, backend): """Delete the backend and check that mapping tree and index were deleted too :id: d44dac3a-dae8-48e8-bd43-5be15237d093 :setup: Standalone instance :steps: 1. Create a backend 2. Delete the backend 3. Check all backend indexes were deleted 4. Check backend mapping tree was deleted :expectedresults: 1. Operation should be successful 2. Operation should be successful 3. There should be no backend indexes 4. There should be no backend mapping tree """ log.info('Delete a backend') backend.delete() log.info("Check that all indices are deleted") indexes = Indexes(topology_st.standalone, "cn=index,{}".format(backend.dn)) assert not indexes.list() with pytest.raises(ldap.NO_SUCH_OBJECT): mts = MappingTrees(topology_st.standalone) mts.get(BACKEND_NAME_1)
def setup_attruniq_index_be_import(topology_st_fn): """Enable Attribute Uniqueness, disable indexes and import 120000 entries to the default backend """ inst = topology_st_fn.standalone inst.config.loglevel([AccessLog.DEFAULT, AccessLog.INTERNAL], service='access') inst.config.set('nsslapd-plugin-logging', 'on') inst.restart() attruniq = AttributeUniquenessPlugin(inst, dn="cn=attruniq,cn=plugins,cn=config") attruniq.create(properties={'cn': 'attruniq'}) for cn in [ 'uid', 'cn', 'sn', 'uidNumber', 'gidNumber', 'homeDirectory', 'givenName', 'description' ]: attruniq.add_unique_attribute(cn) attruniq.add_unique_subtree(DEFAULT_SUFFIX) attruniq.enable_all_subtrees() attruniq.enable() indexes = Indexes(inst) for cn in [ 'uid', 'cn', 'sn', 'uidNumber', 'gidNumber', 'homeDirectory', 'givenName', 'description' ]: indexes.ensure_state(properties={ 'cn': cn, 'nsSystemIndex': 'false', 'nsIndexType': 'none' }) bdb_config = BDB_LDBMConfig(inst) bdb_config.replace("nsslapd-db-locks", "130000") inst.restart() ldif_dir = inst.get_ldif_dir() import_ldif = ldif_dir + '/perf_import.ldif' # Valid online import import_task = ImportTask(inst) dbgen_users(inst, 120000, import_ldif, DEFAULT_SUFFIX, entry_name="userNew") import_task.import_suffix_from_ldif(ldiffile=import_ldif, suffix=DEFAULT_SUFFIX) import_task.wait() assert import_task.is_complete()
def _create_index_entry(topology_st): """Create index entries. :id: 9c93aec8-b87d-11e9-93b0-8c16451d917b :setup: Standalone :steps: 1. Test index entries can be created. :expected results: 1. Pass """ indexes = Indexes(topology_st.standalone) for cn_cn, index_type in LIST_CN_INDEX: indexes.create(properties={ 'cn': cn_cn, 'nsSystemIndex': 'true', 'nsIndexType': index_type })
def test_default_index_list(topology_st): indexes = Indexes(topology_st.standalone) # create and delete a default index. index = indexes.create(properties={ 'cn': 'modifytimestamp', 'nsSystemIndex': 'false', 'nsIndexType': 'eq' }) default_index_list = indexes.list() found = False for i in default_index_list: if i.dn.startswith('cn=modifytimestamp'): found = True assert found index.delete() default_index_list = indexes.list() found = False for i in default_index_list: if i.dn.startswith('cn=modifytimestamp'): found = True assert not found
def _install_ds(self, general, slapd, backends): """ Actually install the Ds from the dicts provided. You should never call this directly, as it bypasses assertions. """ ######################## WARNING ############################# # DO NOT CHANGE THIS FUNCTION OR ITS CONTENTS WITHOUT READING # ALL OF THE COMMENTS FIRST. THERE ARE VERY DELICATE # AND DETAILED INTERACTIONS OF COMPONENTS IN THIS FUNCTION. # # IF IN DOUBT CONTACT WILLIAM BROWN <*****@*****.**> ### This first section is about creating the *minimal* required paths and config to get # directory server to start: After this, we then perform all configuration as online # changes from after this point. # Create dse.ldif with a temporary root password. # This is done first, because instances are found for removal and listing by detecting # the present of their dse.ldif!!!! # The template is in slapd['data_dir']/dirsrv/data/template-dse.ldif # Variables are done with %KEY%. self.log.debug("ACTION: Creating dse.ldif") try: os.umask( 0o007 ) # For parent dirs that get created -> sets 770 for perms os.makedirs(slapd['config_dir'], mode=0o770) except OSError: pass # Get suffix for some plugin defaults (if possible) # annoyingly for legacy compat backend takes TWO key types # and we have to now deal with that .... # # Create ds_suffix here else it won't be in scope .... ds_suffix = '' if len(backends) > 0: ds_suffix = normalizeDN(backends[0]['nsslapd-suffix']) dse = "" with open( os.path.join(slapd['data_dir'], 'dirsrv', 'data', 'template-dse.ldif')) as template_dse: for line in template_dse.readlines(): dse += line.replace('%', '{', 1).replace('%', '}', 1) # Check if we are in a container, if so don't use /dev/shm for the db home dir # as containers typically don't allocate enough space for dev/shm and we don't # want to unexpectedly break the server after an upgrade # # If we know we are are in a container, we don't need to re-detect on systemd. # It actually turns out if you add systemd-detect-virt, that pulls in system # which subsequently breaks containers starting as instance.start then believes # it COULD check the ds status. The times we need to check for systemd are mainly # in other environments that use systemd natively in their containers. container_result = 1 if not self.containerised: container_result = subprocess.run(["systemd-detect-virt", "-c"], stdout=subprocess.PIPE) if self.containerised or container_result.returncode == 0: # In a container, set the db_home_dir to the db path self.log.debug( "Container detected setting db home directory to db directory." ) slapd['db_home_dir'] = slapd['db_dir'] with open(os.path.join(slapd['config_dir'], 'dse.ldif'), 'w') as file_dse: dse_fmt = dse.format( schema_dir=slapd['schema_dir'], lock_dir=slapd['lock_dir'], tmp_dir=slapd['tmp_dir'], cert_dir=slapd['cert_dir'], ldif_dir=slapd['ldif_dir'], bak_dir=slapd['backup_dir'], run_dir=slapd['run_dir'], inst_dir=slapd['inst_dir'], log_dir=slapd['log_dir'], fqdn=general['full_machine_name'], ds_port=slapd['port'], ds_user=slapd['user'], rootdn=slapd['root_dn'], instance_name=slapd['instance_name'], ds_passwd=self. _secure_password, # We set our own password here, so we can connect and mod. # This is because we never know the users input root password as they can validly give # us a *hashed* input. ds_suffix=ds_suffix, config_dir=slapd['config_dir'], db_dir=slapd['db_dir'], db_home_dir=slapd['db_home_dir'], db_lib=slapd['db_lib'], ldapi_enabled="on", ldapi=slapd['ldapi'], ldapi_autobind="on", ) file_dse.write(dse_fmt) self.log.info("Create file system structures ...") # Create all the needed paths # we should only need to make bak_dir, cert_dir, config_dir, db_dir, ldif_dir, lock_dir, log_dir, run_dir? for path in ('backup_dir', 'cert_dir', 'db_dir', 'db_home_dir', 'ldif_dir', 'lock_dir', 'log_dir', 'run_dir'): self.log.debug("ACTION: creating %s", slapd[path]) try: os.umask( 0o007 ) # For parent dirs that get created -> sets 770 for perms os.makedirs(slapd[path], mode=0o770) except OSError: pass os.chown(slapd[path], slapd['user_uid'], slapd['group_gid']) # /var/lock/dirsrv needs special attention... parentdir = os.path.abspath(os.path.join(slapd['lock_dir'], os.pardir)) os.chown(parentdir, slapd['user_uid'], slapd['group_gid']) ### Warning! We need to down the directory under db too for .restore to work. # During a restore, the db dir is deleted and recreated, which is why we need # to own it for a restore. # # However, in a container, we can't always guarantee this due to how the volumes # work and are mounted. Specifically, if we have an anonymous volume we will # NEVER be able to own it, but in a true deployment it is reasonable to expect # we DO own it. Thus why we skip it in this specific context if not self.containerised: db_parent = os.path.join(slapd['db_dir'], '..') os.chown(db_parent, slapd['user_uid'], slapd['group_gid']) # Copy correct data to the paths. # Copy in the schema # This is a little fragile, make it better. # It won't matter when we move schema to usr anyway ... _ds_shutil_copytree( os.path.join(slapd['sysconf_dir'], 'dirsrv/schema'), slapd['schema_dir']) os.chown(slapd['schema_dir'], slapd['user_uid'], slapd['group_gid']) os.chmod(slapd['schema_dir'], 0o770) # Copy in the collation srcfile = os.path.join(slapd['sysconf_dir'], 'dirsrv/config/slapd-collations.conf') dstfile = os.path.join(slapd['config_dir'], 'slapd-collations.conf') shutil.copy(srcfile, dstfile) os.chown(dstfile, slapd['user_uid'], slapd['group_gid']) os.chmod(dstfile, 0o440) # Copy in the certmap configuration srcfile = os.path.join(slapd['sysconf_dir'], 'dirsrv/config/certmap.conf') dstfile = os.path.join(slapd['config_dir'], 'certmap.conf') shutil.copy(srcfile, dstfile) os.chown(dstfile, slapd['user_uid'], slapd['group_gid']) os.chmod(dstfile, 0o440) # If we are on the correct platform settings, systemd if general['systemd']: # Should create the symlink we need, but without starting it. result = subprocess.run( ["systemctl", "enable", "dirsrv@%s" % slapd['instance_name']], stdout=subprocess.PIPE, stderr=subprocess.PIPE) args = ' '.join(ensure_list_str(result.args)) stdout = ensure_str(result.stdout) stderr = ensure_str(result.stderr) # Systemd encodes some odd charecters into it's symlink output on newer versions which # can trip up the logger. self.log.debug( f"CMD: {args} ; STDOUT: {stdout} ; STDERR: {stderr}".encode( "utf-8")) # Setup tmpfiles_d tmpfile_d = ds_paths.tmpfiles_d + "/dirsrv-" + slapd[ 'instance_name'] + ".conf" with open(tmpfile_d, "w") as TMPFILE_D: TMPFILE_D.write("d {} 0770 {} {}\n".format( slapd['run_dir'], slapd['user'], slapd['group'])) TMPFILE_D.write("d {} 0770 {} {}\n".format( slapd['lock_dir'].replace( "slapd-" + slapd['instance_name'], ""), slapd['user'], slapd['group'])) TMPFILE_D.write("d {} 0770 {} {}\n".format( slapd['lock_dir'], slapd['user'], slapd['group'])) # Else we need to detect other init scripts? # WB: No, we just install and assume that docker will start us ... # Bind sockets to our type? # Create certdb in sysconfidir self.log.debug("ACTION: Creating certificate database is %s", slapd['cert_dir']) # BELOW THIS LINE - all actions are now ONLINE changes to the directory server. # if it all possible, ALWAYS ADD NEW INSTALLER CHANGES AS ONLINE ACTIONS. # Should I move this import? I think this prevents some recursion from lib389 import DirSrv ds_instance = DirSrv(self.verbose, containerised=self.containerised) if self.containerised: ds_instance.systemd_override = general['systemd'] # By default SUSE does something extremely silly - it creates a hostname # that CANT be resolved by DNS. As a result this causes all installs to # fail. We need to guarantee that we only connect to localhost here, as # it's the only stable and guaranteed way to connect to the instance # at this point. # # Use ldapi which would prevent the need # to configure a temp root pw in the setup phase. args = { SER_HOST: "localhost", SER_PORT: slapd['port'], SER_SERVERID_PROP: slapd['instance_name'], SER_ROOT_DN: slapd['root_dn'], SER_ROOT_PW: self._raw_secure_password, SER_DEPLOYED_DIR: slapd['prefix'], SER_LDAPI_ENABLED: 'on', SER_LDAPI_SOCKET: slapd['ldapi'], SER_LDAPI_AUTOBIND: 'on' } ds_instance.allocate(args) # Does this work? assert_c(ds_instance.exists(), "Instance failed to install, does not exist when expected") # Create a certificate database. tlsdb = NssSsl(dirsrv=ds_instance, dbpath=slapd['cert_dir']) if not tlsdb._db_exists(): tlsdb.reinit() if slapd['self_sign_cert']: self.log.info("Create self-signed certificate database ...") etc_dirsrv_path = os.path.join(slapd['sysconf_dir'], 'dirsrv/') ssca_path = os.path.join(etc_dirsrv_path, 'ssca/') ssca = NssSsl(dbpath=ssca_path) # If it doesn't exist, create a CA DB if not ssca._db_exists(): ssca.reinit() ssca.create_rsa_ca(months=slapd['self_sign_cert_valid_months']) # If CA is expired or will expire soon, # Reissue it and resign the existing certs that were signed by the cert previously elif ssca.rsa_ca_needs_renew(): ca = ssca.renew_rsa_ca( months=slapd['self_sign_cert_valid_months']) # Import CA to the existing instances except the one we install now (we import it later) for dir in os.listdir(etc_dirsrv_path): if dir.startswith("slapd-") and dir != slapd['cert_dir']: tlsdb_inst = NssSsl( dbpath=os.path.join(etc_dirsrv_path, dir)) tlsdb_inst.import_rsa_crt(ca) csr = tlsdb.create_rsa_key_and_csr( alt_names=[general['full_machine_name']]) (ca, crt) = ssca.rsa_ca_sign_csr(csr) tlsdb.import_rsa_crt(ca, crt) if general['selinux']: # Set selinux port label selinux_label_port(slapd['secure_port']) # Do selinux fixups if general['selinux']: self.log.info("Perform SELinux labeling ...") selinux_paths = ('backup_dir', 'cert_dir', 'config_dir', 'db_dir', 'ldif_dir', 'lock_dir', 'log_dir', 'db_home_dir', 'run_dir', 'schema_dir', 'tmp_dir') for path in selinux_paths: selinux_restorecon(slapd[path]) selinux_label_port(slapd['port']) # Start the server # Make changes using the temp root self.log.debug(f"asan_enabled={ds_instance.has_asan()}") self.log.debug( f"libfaketime installed ={'libfaketime' in sys.modules}") assert_c( not ds_instance.has_asan() or 'libfaketime' not in sys.modules, "libfaketime python module is incompatible with ASAN build.") ds_instance.start(timeout=60) ds_instance.open() # In some cases we may want to change log settings # ds_instance.config.enable_log('audit') # Create the configs related to this version. base_config = get_config(general['defaults']) base_config_inst = base_config(ds_instance) base_config_inst.apply_config(install=True) # Setup TLS with the instance. # We *ALWAYS* set secure port, even if security is off, because it breaks # tests with standalone.enable_tls if we do not. It's only when security; on # that we actually start listening on it. if not slapd['secure_port']: slapd['secure_port'] = "636" ds_instance.config.set('nsslapd-secureport', '%s' % slapd['secure_port']) if slapd['self_sign_cert']: ds_instance.config.set('nsslapd-security', 'on') # Before we create any backends, create any extra default indexes that may be # dynamically provisioned, rather than from template-dse.ldif. Looking at you # entryUUID (requires rust enabled). # # Indexes defaults to default_index_dn indexes = Indexes(ds_instance) if ds_instance.ds_paths.rust_enabled: indexes.create( properties={ 'cn': 'entryUUID', 'nsSystemIndex': 'false', 'nsIndexType': ['eq', 'pres'], }) # Create the backends as listed # Load example data if needed. for backend in backends: self.log.info( f"Create database backend: {backend['nsslapd-suffix']} ...") is_sample_entries_in_props = "sample_entries" in backend create_suffix_entry_in_props = backend.pop('create_suffix_entry', False) ds_instance.backends.create(properties=backend) if not is_sample_entries_in_props and create_suffix_entry_in_props: # Set basic ACIs c_aci = '(targetattr="c || description || objectClass")(targetfilter="(objectClass=country)")(version 3.0; acl "Enable anyone c read"; allow (read, search, compare)(userdn="ldap:///anyone");)' o_aci = '(targetattr="o || description || objectClass")(targetfilter="(objectClass=organization)")(version 3.0; acl "Enable anyone o read"; allow (read, search, compare)(userdn="ldap:///anyone");)' dc_aci = '(targetattr="dc || description || objectClass")(targetfilter="(objectClass=domain)")(version 3.0; acl "Enable anyone domain read"; allow (read, search, compare)(userdn="ldap:///anyone");)' ou_aci = '(targetattr="ou || description || objectClass")(targetfilter="(objectClass=organizationalUnit)")(version 3.0; acl "Enable anyone ou read"; allow (read, search, compare)(userdn="ldap:///anyone");)' cn_aci = '(targetattr="cn || description || objectClass")(targetfilter="(objectClass=nscontainer)")(version 3.0; acl "Enable anyone cn read"; allow (read, search, compare)(userdn="ldap:///anyone");)' suffix_rdn_attr = backend['nsslapd-suffix'].split( '=')[0].lower() if suffix_rdn_attr == 'dc': domain = create_base_domain(ds_instance, backend['nsslapd-suffix']) domain.add('aci', dc_aci) elif suffix_rdn_attr == 'o': org = create_base_org(ds_instance, backend['nsslapd-suffix']) org.add('aci', o_aci) elif suffix_rdn_attr == 'ou': orgunit = create_base_orgunit(ds_instance, backend['nsslapd-suffix']) orgunit.add('aci', ou_aci) elif suffix_rdn_attr == 'cn': cn = create_base_cn(ds_instance, backend['nsslapd-suffix']) cn.add('aci', cn_aci) elif suffix_rdn_attr == 'c': c = create_base_c(ds_instance, backend['nsslapd-suffix']) c.add('aci', c_aci) else: # Unsupported rdn raise ValueError( "Suffix RDN '{}' in '{}' is not supported. Supported RDN's are: 'c', 'cn', 'dc', 'o', and 'ou'" .format(suffix_rdn_attr, backend['nsslapd-suffix'])) # Create all required sasl maps: if we have a single backend ... # our default maps are really really bad, and we should feel bad. # they basically only work with a single backend, and they'll break # GSSAPI in some cases too :( if len(backends) > 0: self.log.debug("Adding sasl maps for suffix %s" % backend['nsslapd-suffix']) backend = backends[0] saslmappings = SaslMappings(ds_instance) saslmappings.create( properties={ 'cn': 'rfc 2829 u syntax', 'nsSaslMapRegexString': '^u:\\(.*\\)', 'nsSaslMapBaseDNTemplate': backend['nsslapd-suffix'], 'nsSaslMapFilterTemplate': '(uid=\\1)' }) # I think this is for LDAPI saslmappings.create( properties={ 'cn': 'uid mapping', 'nsSaslMapRegexString': '^[^:@]+$', 'nsSaslMapBaseDNTemplate': backend['nsslapd-suffix'], 'nsSaslMapFilterTemplate': '(uid=&)' }) else: self.log.debug("Skipping default SASL maps - no backend found!") self.log.info("Perform post-installation tasks ...") # Change the root password finally ds_instance.config.set('nsslapd-rootpw', slapd['root_password']) # We need to log the password when containerised if self.containerised: self.log.debug("Root DN password: {}".format( slapd['root_password'])) # Complete. if general['start']: # Restart for changes to take effect - this could be removed later ds_instance.restart(post_open=False) else: # Just stop the instance now. ds_instance.stop() self.log.debug(" 🎉 Instance setup complete")
def test_critical_msg_on_empty_range_idl(topology_st): """Doing a range index lookup should not report a critical message even if IDL is empty :id: a07a2222-0551-44a6-b113-401d23799364 :setup: Standalone instance :steps: 1. Create an index for internationalISDNNumber. (attribute chosen because it is unlikely that previous tests used it) 2. telephoneNumber being indexed by default create 20 users without telephoneNumber 3. add a telephoneNumber value and delete it to trigger an empty index database 4. Do a search that triggers a range lookup on empty telephoneNumber 5. Check that the critical message is not logged in error logs :expectedresults: 1. This should pass 2. This should pass 3. This should pass 4. This should pass on normal build but could abort a debug build 4. This should pass """ indexedAttr = 'internationalISDNNumber' # Step 1 from lib389.index import Indexes indexes = Indexes(topology_st.standalone) indexes.create(properties={ 'cn': indexedAttr, 'nsSystemIndex': 'false', 'nsIndexType': 'eq' }) topology_st.standalone.restart() # Step 2 users = UserAccounts(topology_st.standalone, DEFAULT_SUFFIX) log.info('Adding 20 users without "%s"' % indexedAttr) for i in range(20): name = 'user_%d' % i last_user = users.create( properties={ 'uid': name, 'sn': name, 'cn': name, 'uidNumber': '1000', 'gidNumber': '1000', 'homeDirectory': '/home/%s' % name, 'mail': '*****@*****.**' % name, 'userpassword': '******' % name, }) # Step 3 # required update to create the indexAttr (i.e. 'loginShell') database, and then make it empty topology_st.standalone.modify_s(last_user.dn, [(ldap.MOD_ADD, indexedAttr, b'1234')]) ent = topology_st.standalone.getEntry( last_user.dn, ldap.SCOPE_BASE, ) assert ent assert ent.hasAttr(indexedAttr) topology_st.standalone.modify_s(last_user.dn, [(ldap.MOD_DELETE, indexedAttr, None)]) ent = topology_st.standalone.getEntry( last_user.dn, ldap.SCOPE_BASE, ) assert ent assert not ent.hasAttr(indexedAttr) # Step 4 # The first component being not indexed the range on second is evaluated try: ents = topology_st.standalone.search_s( DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, '(&(sudoNotAfter=*)(%s>=111))' % indexedAttr) assert len(ents) == 0 except ldap.SERVER_DOWN: log.error('Likely testing against a debug version that asserted') pass # Step 5 assert not topology_st.standalone.searchErrorsLog( 'CRIT - list_candidates - NULL idl was recieved from filter_candidates_ext.' )
def get_indexes(self): """Get an Indexes(DSLdapObject) for the backend""" indexes = Indexes(self._instance, basedn="cn=index,%s" % self._dn) return indexes