def test_ldbm_modification_audit_log(topology_st): """When updating LDBM config attributes, those attributes/values are not listed in the audit log :id: 5bf75c47-a283-430e-a65c-3c5fd8dbadb8 :setup: Standalone Instance :steps: 1. Bind as DM 2. Enable audit log 3. Update a set of config attrs in LDBM config 4. Restart the server 5. Check that config attrs are listed in the audit log :expectedresults: 1. Operation successful 2. Operation successful 3. Operation successful 4. Operation successful 5. Audit log should contain modification of attrs" """ VALUE = '10001' d_manager = DirectoryManager(topology_st.standalone) conn = d_manager.bind() config_ldbm = LDBMConfig(conn) log.info("Enable audit logging") conn.config.enable_log('audit') attrs = [ 'nsslapd-lookthroughlimit', 'nsslapd-pagedidlistscanlimit', 'nsslapd-idlistscanlimit', 'nsslapd-db-locks' ] for attr in attrs: log.info("Set attribute %s to value %s" % (attr, VALUE)) config_ldbm.set(attr, VALUE) log.info('Restart the server to flush the logs') conn.restart() for attr in attrs: log.info("Check if attribute %s is replaced in the audit log" % attr) assert conn.searchAuditLog('replace: %s' % attr) assert conn.searchAuditLog('%s: %s' % (attr, VALUE))
def test_set_cachememsize_to_custom_value(topo): """Test if value nsslapd-cachememsize remains set at the custom setting of value above 3805132804 bytes after changing the value to 9100100100 bytes :id: 8a3efc00-65a9-4ee7-b8ee-e35840991ea9 :setup: Standalone Instance :steps: 1. Disable in the cn=config,cn=ldbm database,cn=plugins,cn=config: nsslapd-cache-autosize by setting it to 0 2. Disable in the cn=config,cn=ldbm database,cn=plugins,cn=config: nsslapd-cache-autosize-split by setting it to 0 3. Restart the instance 4. Set in the cn=UserRoot,cn=ldbm database,cn=plugins,cn=config: nsslapd-cachememsize: CUSTOM_MEM :expectedresults: 1. nsslapd-cache-autosize is successfully disabled 2. nsslapd-cache-autosize-split is successfully disabled 3. The instance should be successfully restarted 4. nsslapd-cachememsize is successfully set """ config_ldbm = LDBMConfig(topo.standalone) backends = Backends(topo.standalone) userroot_ldbm = backends.get("userroot") log.info("Disabling nsslapd-cache-autosize by setting it to 0") assert config_ldbm.set('nsslapd-cache-autosize', '0') log.info("Disabling nsslapd-cache-autosize-split by setting it to 0") assert config_ldbm.set('nsslapd-cache-autosize-split', '0') log.info("Restarting instance") topo.standalone.restart() log.info("Instance restarted successfully") log.info("Set nsslapd-cachememsize to value {}".format(CUSTOM_MEM)) assert userroot_ldbm.set('nsslapd-cachememsize', CUSTOM_MEM)
def test_stress_clean(topology_m4, m4rid): """Put each server(m1 - m4) under a stress, and perform the entire clean process :id: a8263cd6-f068-4357-86e0-e7c34504c8c5 :setup: Replication setup with four masters :steps: 1. Add a bunch of updates to all masters 2. Put master 4 to read-only mode 3. Disable replication on master 4 5. Remove agreements to master 4 from other masters 6. Run a cleanallruv task on master 1 7. Check that everything was cleaned :expectedresults: 1. Operation should be successful 2. Master 4 should be put to read-only mode 3. Replication on master 4 should be disabled 2. Agreements to master 4 should be removed 5. Agreements to master 4 should be removed 6. Operation should be successful 7. Everything should be cleaned """ log.info('Running test_stress_clean...') log.info('test_stress_clean: put all the masters under load...') ldbm_config = LDBMConfig(topology_m4.ms["master4"]) # Put all the masters under load m1_add_users = AddUsers(topology_m4.ms["master1"], 2000) m1_add_users.start() m2_add_users = AddUsers(topology_m4.ms["master2"], 2000) m2_add_users.start() m3_add_users = AddUsers(topology_m4.ms["master3"], 2000) m3_add_users.start() m4_add_users = AddUsers(topology_m4.ms["master4"], 2000) m4_add_users.start() # Allow sometime to get replication flowing in all directions log.info( 'test_stress_clean: allow some time for replication to get flowing...') time.sleep(5) # Put master 4 into read only mode ldbm_config.set('nsslapd-readonly', 'on') # We need to wait for master 4 to push its changes out log.info( 'test_stress_clean: allow some time for master 4 to push changes out (60 seconds)...' ) time.sleep(30) # Remove the agreements from the other masters that point to master 4 remove_master4_agmts("test_stress_clean", topology_m4) # Run the task cruv_task = CleanAllRUVTask(topology_m4.ms["master1"]) cruv_task.create( properties={ 'replica-id': m4rid, 'replica-base-dn': DEFAULT_SUFFIX, 'replica-force-cleaning': 'no' }) cruv_task.wait() # Wait for the update to finish log.info('test_stress_clean: wait for all the updates to finish...') m1_add_users.join() m2_add_users.join() m3_add_users.join() m4_add_users.join() # Check the other master's RUV for 'replica 4' log.info( 'test_stress_clean: check if all the replicas have been cleaned...') clean = check_ruvs("test_stress_clean", topology_m4, m4rid) assert clean log.info('test_stress_clean: PASSED, restoring master 4...') # Sleep for a bit to replication complete log.info("Sleep for 120 seconds to allow replication to complete...") repl = ReplicationManager(DEFAULT_SUFFIX) repl.test_replication_topology([ topology_m4.ms["master1"], topology_m4.ms["master2"], topology_m4.ms["master3"], ], timeout=120) # Turn off readonly mode ldbm_config.set('nsslapd-readonly', 'off')