def test_config_deadlock_policy(topology_m2): """Check that nsslapd-db-deadlock-policy acted as expected :id: a24e25fd-bc15-47fa-b018-372f6a2ec59c :setup: MMR with two suppliers :steps: 1. Search for nsslapd-db-deadlock-policy and check if it contains a default value 2. Set nsslapd-db-deadlock-policy to a positive value 3. Set nsslapd-db-deadlock-policy to a negative value 4. Set nsslapd-db-deadlock-policy to an invalid value 5. Set nsslapd-db-deadlock-policy back to a default value :expectedresults: 1. Search should be a successful and should contain a default value 2. nsslapd-db-deadlock-policy should be successfully set 3. nsslapd-db-deadlock-policy should be successfully set 4. Modification with an invalid value should throw an error 5. nsslapd-db-deadlock-policy should be successfully set """ default_val = b'9' ldbmconfig = LDBMConfig(topology_m2.ms["supplier1"]) bdbconfig = BDB_LDBMConfig(topology_m2.ms["supplier1"]) if ds_is_older('1.4.2'): deadlock_policy = ldbmconfig.get_attr_val_bytes( 'nsslapd-db-deadlock-policy') else: deadlock_policy = bdbconfig.get_attr_val_bytes( 'nsslapd-db-deadlock-policy') assert deadlock_policy == default_val # Try a range of valid values for val in (b'0', b'5', b'9'): ldbmconfig.replace('nsslapd-db-deadlock-policy', val) if ds_is_older('1.4.2'): deadlock_policy = ldbmconfig.get_attr_val_bytes( 'nsslapd-db-deadlock-policy') else: deadlock_policy = bdbconfig.get_attr_val_bytes( 'nsslapd-db-deadlock-policy') assert deadlock_policy == val # Try a range of invalid values for val in ('-1', '10'): with pytest.raises(ldap.LDAPError): ldbmconfig.replace('nsslapd-db-deadlock-policy', val) # Cleanup - undo what we've done ldbmconfig.replace('nsslapd-db-deadlock-policy', deadlock_policy)
def setup_attruniq_index_be_import(topology_st_fn): """Enable Attribute Uniqueness, disable indexes and import 120000 entries to the default backend """ inst = topology_st_fn.standalone inst.config.loglevel([AccessLog.DEFAULT, AccessLog.INTERNAL], service='access') inst.config.set('nsslapd-plugin-logging', 'on') inst.restart() attruniq = AttributeUniquenessPlugin(inst, dn="cn=attruniq,cn=plugins,cn=config") attruniq.create(properties={'cn': 'attruniq'}) for cn in [ 'uid', 'cn', 'sn', 'uidNumber', 'gidNumber', 'homeDirectory', 'givenName', 'description' ]: attruniq.add_unique_attribute(cn) attruniq.add_unique_subtree(DEFAULT_SUFFIX) attruniq.enable_all_subtrees() attruniq.enable() indexes = Indexes(inst) for cn in [ 'uid', 'cn', 'sn', 'uidNumber', 'gidNumber', 'homeDirectory', 'givenName', 'description' ]: indexes.ensure_state(properties={ 'cn': cn, 'nsSystemIndex': 'false', 'nsIndexType': 'none' }) bdb_config = BDB_LDBMConfig(inst) bdb_config.replace("nsslapd-db-locks", "130000") inst.restart() ldif_dir = inst.get_ldif_dir() import_ldif = ldif_dir + '/perf_import.ldif' # Valid online import import_task = ImportTask(inst) dbgen_users(inst, 120000, import_ldif, DEFAULT_SUFFIX, entry_name="userNew") import_task.import_suffix_from_ldif(ldiffile=import_ldif, suffix=DEFAULT_SUFFIX) import_task.wait() assert import_task.is_complete()
def test_db_home_dir_online_backup(topo): """Test that if the dbhome directory is set causing an online backup to fail, the dblayer_backup function should go to error processing section. :id: cfc495d6-2a58-4e4e-aa40-39a15c71f973 :setup: Standalone Instance :steps: 1. Change the dbhome to directory to eg-/tmp/test 2. Perform an online back-up 3. Check for the correct errors in the log :expectedresults: 1. Success 2. Failure 3. Success """ bdb_ldbmconfig = BDB_LDBMConfig(topo.standalone) dseldif = DSEldif(topo.standalone) topo.standalone.stop() with tempfile.TemporaryDirectory() as backup_dir: dseldif.replace(bdb_ldbmconfig.dn, 'nsslapd-db-home-directory', f'{backup_dir}') topo.standalone.start() topo.standalone.tasks.db2bak(backup_dir=f'{backup_dir}', args={TASK_WAIT: True}) assert topo.standalone.ds_error_log.match( f".*Failed renaming {backup_dir}.bak back to {backup_dir}")
def test_exhaust_db_locks_big_pause(topology_st_fn, setup_attruniq_index_be_import): """Test that DB lock pause setting increases the wait interval value for the monitoring thread :id: 7d5bf838-5d4e-4ad5-8c03-5716afb84ea6 :customerscenario: True :setup: Standalone instance with Attr Uniq plugin and user indexes disabled :steps: 1. Set nsslapd-db-locks to 20000 while using the default threshold value (95%) 2. Set nsslapd-db-locks-monitoring-pause to 10000 (10 seconds) 3. Make sure that the pause is successfully increased a few times in a row :expectedresults: 1. Success 2. Success 3. Success """ inst = topology_st_fn.standalone bdb_config = BDB_LDBMConfig(inst) bdb_config.replace("nsslapd-db-locks", "20000") lock_pause = bdb_config.get_attr_val_int( "nsslapd-db-locks-monitoring-pause") assert lock_pause == 500 lock_pause = "10000" bdb_config.replace("nsslapd-db-locks-monitoring-pause", lock_pause) # Restart server inst.restart() lock_enabled = bdb_config.get_attr_val_utf8_l( "nsslapd-db-locks-monitoring-enabled") lock_threshold = bdb_config.get_attr_val_int( "nsslapd-db-locks-monitoring-threshold") assert lock_enabled == "on" assert lock_threshold == 90 users = UserAccounts(inst, DEFAULT_SUFFIX) start = datetime.datetime.now() with pytest.raises(ldap.OPERATIONS_ERROR): spawn_worker_thread( create_user_wrapper, users, log, 30, f"Adding user with monitoring enabled='{lock_enabled}'; " f"threshold='{lock_threshold}'; pause='{lock_pause}'. Expect it to 'Work'" ) end = datetime.datetime.now() time_delta = end - start if time_delta.seconds < 9: raise RuntimeError( "nsslapd-db-locks-monitoring-pause attribute doesn't function correctly. " f"Finished the execution in {time_delta.seconds} seconds") # In case something has failed - restart for the clean up inst.restart()
def test_check_db_home_dir_in_config(topo): """Test to check nsslapd-db-home-directory is set to /dev/shm/slapd-instance in cn=config :id: 9a1d0fcf-ca31-4f60-8b31-4de495b0b3ce :customerscenario: True :setup: Standalone Instance :steps: 1. Create instance 2. Check nsslapd-db-home-directory is set to /dev/shm/slapd-instance in cn=config :expectedresults: 1. Success 2. Success """ standalone = topo.standalone dbhome_value = '/dev/shm/slapd-{}'.format(standalone.serverid) bdb_ldbmconfig = BDB_LDBMConfig(standalone) log.info('Check the config value of nsslapd-db-home-directory') assert bdb_ldbmconfig.get_attr_val_utf8('nsslapd-db-home-directory') == dbhome_value
def test_invalid_db_locks_value(topology_st_fn, locks_invalid): """Test that setting nsslapd-db-locks to 0 is rejected :id: bbb40279-d622-4f36-a129-c54f963f494a :customerscenario: True :parametrized: yes :setup: Standalone instance :steps: 1. Set nsslapd-db-locks to 0 2. Check if exception message contains info about invalid value :expectedresults: 1. Exception is raised 2. Success """ inst = topology_st_fn.standalone bdb_config = BDB_LDBMConfig(inst) msg = 'Invalid value for nsslapd-db-locks ({}). Must be greater than 10000'.format( locks_invalid) try: bdb_config.replace("nsslapd-db-locks", locks_invalid) except ldap.UNWILLING_TO_PERFORM as e: log.info('Got expected error: {}'.format(str(e))) assert msg in str(e)
def test_invalid_threshold_range(topology_st_fn, invalid_value): """Test that setting nsslapd-db-locks-monitoring-threshold to 60 % is rejected :id: e4551de1-8582-4c13-b59d-3d5ec4701457 :customerscenario: True :parametrized: yes :setup: Standalone instance :steps: 1. Set nsslapd-db-locks-monitoring-threshold to 60 % 2. Check if exception message contains info about invalid value range :expectedresults: 1. Exception is raised 2. Success """ inst = topology_st_fn.standalone bdb_config = BDB_LDBMConfig(inst) msg = 'threshold is indicated as a percentage and it must lie in range of 70 and 95' try: bdb_config.replace("nsslapd-db-locks-monitoring-threshold", invalid_value) except ldap.OPERATIONS_ERROR as e: log.info('Got expected error: {}'.format(str(e))) assert msg in str(e)
def test_check_db_home_dir_in_dse(topo): """Test to check nsslapd-db-home-directory is set to /dev/shm/slapd-instance in dse.ldif :id: f25befd2-a57c-4365-8eaf-70ea5fb987ea :customerscenario: True :setup: Standalone Instance :steps: 1. Create instance 2. Check nsslapd-db-home-directory is set to /dev/shm/slapd-instance in dse.ldif :expectedresults: 1. Success 2. Success """ standalone = topo.standalone bdb_ldbmconfig = BDB_LDBMConfig(standalone) dbhome_value = '/dev/shm/slapd-{}'.format(standalone.serverid) dse_ldif = DSEldif(standalone) log.info('Check value of nsslapd-db-home-directory in dse.ldif') dse_value = dse_ldif.get(bdb_ldbmconfig.dn, 'nsslapd-db-home-directory', True) assert dse_value == dbhome_value
def test_exhaust_db_locks_basic(topology_st_fn, setup_attruniq_index_be_import, lock_threshold): """Test that when all of the locks are exhausted the instance still working and database is not corrupted :id: 299108cc-04d8-4ddc-b58e-99157fccd643 :customerscenario: True :parametrized: yes :setup: Standalone instance with Attr Uniq plugin and user indexes disabled :steps: 1. Set nsslapd-db-locks to 11000 2. Check that we stop acquiring new locks when the threshold is reached 3. Check that we can regulate a pause interval for DB locks monitoring thread 4. Make sure the feature works for different backends on the same suffix :expectedresults: 1. Success 2. Success 3. Success 4. Success """ inst = topology_st_fn.standalone ADDITIONAL_SUFFIX = 'ou=newpeople,dc=example,dc=com' backends = Backends(inst) backends.create(properties={ 'nsslapd-suffix': ADDITIONAL_SUFFIX, 'name': ADDITIONAL_SUFFIX[-3:] }) ous = OrganizationalUnits(inst, DEFAULT_SUFFIX) ous.create(properties={'ou': 'newpeople'}) bdb_config = BDB_LDBMConfig(inst) bdb_config.replace("nsslapd-db-locks", "11000") # Restart server inst.restart() for lock_enabled in ["on", "off"]: for lock_pause in ["100", "500", "1000"]: bdb_config.replace("nsslapd-db-locks-monitoring-enabled", lock_enabled) bdb_config.replace("nsslapd-db-locks-monitoring-threshold", lock_threshold) bdb_config.replace("nsslapd-db-locks-monitoring-pause", lock_pause) inst.restart() if lock_enabled == "off": raised_exception = (RuntimeError, ldap.SERVER_DOWN) else: raised_exception = ldap.OPERATIONS_ERROR users = UserAccounts(inst, DEFAULT_SUFFIX) with pytest.raises(raised_exception): spawn_worker_thread( create_user_wrapper, users, log, 30, f"Adding user with monitoring enabled='{lock_enabled}'; " f"threshold='{lock_threshold}'; pause='{lock_pause}'.") # Restart because we already run out of locks and the next unindexed searches will fail eventually if lock_enabled == "off": _kill_ns_slapd(inst) inst.restart() users = UserAccounts(inst, ADDITIONAL_SUFFIX, rdn=None) with pytest.raises(raised_exception): spawn_worker_thread( create_user_wrapper, users, log, 30, f"Adding user with monitoring enabled='{lock_enabled}'; " f"threshold='{lock_threshold}'; pause='{lock_pause}'.") # In case feature is disabled - restart for the clean up if lock_enabled == "off": _kill_ns_slapd(inst) inst.restart()