def test_indexing_is_online(topo): """Test that the changenmumber index is online right after enabling the plugin :id: 16f4c001-9e0c-4448-a2b3-08ac1e85d40f :setup: Standalone Instance :steps: 1. Enable retro cl 2. Perform some updates 3. Search for "(changenumber>=-1)", and it is not partially unindexed 4. Search for "(&(changenumber>=-1)(targetuniqueid=*))", and it is not partially unindexed :expectedresults: 1. Success 2. Success 3. Success 4. Success """ # Enable plugin topo.standalone.config.set('nsslapd-accesslog-logbuffering', 'off') plugin = RetroChangelogPlugin(topo.standalone) plugin.enable() topo.standalone.restart() # Do a bunch of updates users = UserAccounts(topo.standalone, DEFAULT_SUFFIX) user_entry = users.create( properties={ 'sn': '1', 'cn': 'user 1', 'uid': 'user1', 'uidNumber': '11', 'gidNumber': '111', 'givenname': 'user1', 'homePhone': '0861234567', 'carLicense': '131D16674', 'mail': '*****@*****.**', 'homeDirectory': '/home' }) for count in range(0, 10): user_entry.replace('mail', f'test{count}@test.com') # Search the retro cl, and check for error messages filter_simple = '(changenumber>=-1)' filter_compound = '(&(changenumber>=-1)(targetuniqueid=*))' retro_changelog_suffix = DSLdapObjects(topo.standalone, basedn=RETROCL_SUFFIX) retro_changelog_suffix.filter(filter_simple) assert not topo.standalone.searchAccessLog('Partially Unindexed Filter') # Search the retro cl again with compound filter retro_changelog_suffix.filter(filter_compound) assert not topo.standalone.searchAccessLog('Partially Unindexed Filter')
def test_syncrepl_openldap(topology): """ Test basic functionality of the openldap syncrepl compatability handler. :id: 03039178-2cc6-40bd-b32c-7d6de108828b :setup: Standalone instance :steps: 1. Enable Retro Changelog 2. Enable Syncrepl 3. Run the syncstate test to check refresh, add, delete, mod. :expectedresults: 1. Success 1. Success 1. Success """ st = topology.standalone # Enable RetroChangelog. rcl = RetroChangelogPlugin(st) rcl.enable() # Set the default targetid rcl.replace('nsslapd-attribute', 'nsuniqueid:targetUniqueId') # Enable sync repl csp = ContentSyncPlugin(st) csp.enable() # Restart DS st.restart() # log.error("+++++++++++") # time.sleep(60) # Setup the syncer sync = ISyncRepl(st, openldap=True) # Run the checks syncstate_assert(st, sync)
def test_syncrepl_basic(topology): """ Test basic functionality of the SyncRepl interface :id: f9fea826-8ae2-412a-8e88-b8e0ba939b06 :setup: Standalone instance :steps: 1. Enable Retro Changelog 2. Enable Syncrepl 3. Run the syncstate test to check refresh, add, delete, mod. :expectedresults: 1. Success 1. Success 1. Success """ st = topology.standalone # Enable RetroChangelog. rcl = RetroChangelogPlugin(st) rcl.enable() # Set the default targetid rcl.replace('nsslapd-attribute', 'nsuniqueid:targetUniqueId') # Enable sync repl csp = ContentSyncPlugin(st) csp.enable() # Restart DS st.restart() # Setup the syncer sync = ISyncRepl(st) # Run the checks syncstate_assert(st, sync)
def test_retrochangelog_trimming_crash(topo, changelog_init): """Check that when retroCL nsslapd-retrocthangelog contains invalid value, then the instance does not crash at shutdown :id: 5d9bd7ca-e9bf-4be9-8fc8-902aa5513052 :customerscenario: True :setup: Replication with two supplier, change nsslapd-changelogdir to '/var/lib/dirsrv/slapd-supplier1/changelog' and set cn=Retro Changelog Plugin,cn=plugins,cn=config to 'on' :steps: 1. Set nsslapd-changelogmaxage in cn=Retro Changelog Plugin,cn=plugins,cn=config to value '-1' This value is invalid. To disable retroCL trimming it should be set to 0 2. Do several restart 3. check there is no 'Detected Disorderly Shutdown' message (crash) 4. restore valid value for nsslapd-changelogmaxage '1w' :expectedresults: 1. Operation should be successful 2. Operation should be successful 3. Operation should be successful 4. Operation should be successful """ log.info( '1. Test retroCL trimming crash in cn=Retro Changelog Plugin,cn=plugins,cn=config' ) # set the nsslapd-changelogmaxage directly on dse.ldif # because the set value is invalid topo.ms["supplier1"].log.info("ticket50736 start verification") topo.ms["supplier1"].stop() retroPlugin = RetroChangelogPlugin(topo.ms["supplier1"]) dse_ldif = DSEldif(topo.ms["supplier1"]) dse_ldif.replace(retroPlugin.dn, 'nsslapd-changelogmaxage', '-1') topo.ms["supplier1"].start() # The crash should be systematic, but just in case do several restart # with a delay to let all plugin init for i in range(5): time.sleep(1) topo.ms["supplier1"].stop() topo.ms["supplier1"].start() assert not topo.ms["supplier1"].detectDisorderlyShutdown() topo.ms["supplier1"].log.info("ticket 50736 was successfully verified.")
def test_retrocl_trimming(topology_st): """Test retrocl trimming works :id: 54c6747f-6772-43b7-8b03-09e13fa0c205 :setup: Standalone Instance :steps: 1. Enable Retro changelog 2. Add a bunch of entries 3. Configure trimming 4. Verify trimming occurred :expectedresults: 1. Success 2. Success 3. Success 4. Success """ inst = topology_st.standalone # Configure plugin log.info('Configure retrocl plugin') rcl = RetroChangelogPlugin(inst) rcl.enable() inst.restart() # Do some updates suffix = Domain(inst, DEFAULT_SUFFIX) for idx in range(0, 10): suffix.replace('description', str(idx)) # Setup trimming rcl.replace('nsslapd-changelog-trim-interval', '2') rcl.replace('nsslapd-changelogmaxage', '5s') inst.config.set('nsslapd-errorlog-level', '65536') # plugin logging inst.restart() # Verify trimming occurs time.sleep(5) assert inst.searchErrorsLog("trim_changelog: removed ") # Clean up inst.config.set('nsslapd-errorlog-level', '0')
def test_syncrepl_openldap(topology): """ Test basic functionality of the openldap syncrepl compatability handler. :id: 03039178-2cc6-40bd-b32c-7d6de108828b :setup: Standalone instance :steps: 1. Enable Retro Changelog 2. Enable Syncrepl 3. Run the syncstate test to check refresh, add, delete, mod. :expectedresults: 1. Success 1. Success 1. Success """ st = topology.standalone # Ensure entryuuid is setup plug = EntryUUIDPlugin(st) task = plug.fixup(DEFAULT_SUFFIX) task.wait() st.config.loglevel(vals=(ErrorLog.DEFAULT, ErrorLog.PLUGIN)) assert (task.is_complete() and task.get_exit_code() == 0) # Enable RetroChangelog. rcl = RetroChangelogPlugin(st) rcl.enable() # Set the default targetid rcl.add('nsslapd-attribute', 'nsuniqueid:targetUniqueId') rcl.add('nsslapd-attribute', 'entryuuid:targetEntryUUID') # Enable sync repl csp = ContentSyncPlugin(st) csp.add('syncrepl-allow-openldap', 'on') csp.enable() # Restart DS st.restart() # Setup the syncer sync = ISyncRepl(st, openldap=True) # Run the checks syncstate_assert(st, sync)
def test_sync_repl_cookie_with_failure(topology, request): """Test sync_repl cookie are progressing is the right order when there is a failure in nested updates :id: e0103448-170e-4080-8f22-c34606447ce2 :setup: Standalone Instance :steps: 1.: enable retroCL 2.: configure retroCL to log nsuniqueid as targetUniqueId 3.: enable content_sync plugin 4.: enable automember 5.: create (4) groups. make group2 groupOfUniqueNames so the automember will fail to add 'member' (uniqueMember expected) 6.: configure automember to provision those groups with 'member' 7.: enable and configure memberof plugin 8.: enable plugin log level 9.: restart the server 10.: create a thread dedicated to run a sync repl client 11.: Create a group that will be the only update received by sync repl client 12.: Create (9) users that will generate nested updates (automember/memberof) 13.: stop sync repl client and collect the list of cookie.change_no 14.: check that the list of cookie.change_no contains only the group 'step 11' :expectedresults: 1.: succeeds 2.: succeeds 3.: succeeds 4.: succeeds 5.: succeeds 6.: succeeds 7.: succeeds 8.: succeeds 9.: succeeds 10.: succeeds 11.: succeeds 12.: Fails (expected) 13.: succeeds 14.: succeeds """ inst = topology[0] # Enable/configure retroCL plugin = RetroChangelogPlugin(inst) plugin.disable() plugin.enable() plugin.set('nsslapd-attribute', 'nsuniqueid:targetuniqueid') # Enable sync plugin plugin = ContentSyncPlugin(inst) plugin.enable() # Enable automember plugin = AutoMembershipPlugin(inst) plugin.disable() plugin.enable() # Add the automember group groups = Groups(inst, DEFAULT_SUFFIX) group = [] for i in range(1, 5): group.append(groups.create(properties={'cn': 'group%d' % i})) # Set group2 as a groupOfUniqueNames so that automember will fail to update that group # This will trigger a failure in internal MOD and a failure to add member group[1].replace('objectclass', 'groupOfUniqueNames') # Add the automember config entry am_configs = AutoMembershipDefinitions(inst) for g in group: am_config = am_configs.create( properties={ 'cn': 'config %s' % g.get_attr_val_utf8('cn'), 'autoMemberScope': DEFAULT_SUFFIX, 'autoMemberFilter': 'uid=*', 'autoMemberDefaultGroup': g.dn, 'autoMemberGroupingAttr': 'member:dn' }) # Enable and configure memberof plugin plugin = MemberOfPlugin(inst) plugin.disable() plugin.enable() plugin.replace_groupattr('member') memberof_config = MemberOfSharedConfig( inst, 'cn=memberOf config,{}'.format(DEFAULT_SUFFIX)) memberof_config.create( properties={ 'cn': 'memberOf config', 'memberOfGroupAttr': 'member', 'memberOfAttr': 'memberof' }) # Enable plugin log level (usefull for debug) inst.setLogLevel(65536) inst.restart() # create a sync repl client and wait 5 seconds to be sure it is running sync_repl = Sync_persist(inst) sync_repl.start() time.sleep(5) # Add a test group just to check that sync_repl receives only one update group.append(groups.create(properties={'cn': 'group%d' % 10})) # create users, that automember/memberof will generate nested updates users = UserAccounts(inst, DEFAULT_SUFFIX) users_set = [] for i in range(1000, 1010): try: users_set.append(users.create_test_user(uid=i)) # Automember should fail to add uid=1000 in group2 assert (False) except ldap.UNWILLING_TO_PERFORM: pass # stop the server to get the sync_repl result set (exit from while loop). # Only way I found to acheive that. # and wait a bit to let sync_repl thread time to set its result before fetching it. inst.stop() time.sleep(10) cookies = sync_repl.get_result() # checking that the cookie list contains only one entry assert len(cookies) == 1 prev = 0 for cookie in cookies: log.info('Check cookie %s' % cookie) assert int(cookie) > 0 assert int(cookie) < 1000 assert int(cookie) > prev prev = int(cookie) sync_repl.join() log.info('test_sync_repl_cookie_with_failure: PASS\n') def fin(): inst.restart() for user in users_set: try: user.delete() except: pass for g in group: try: g.delete() except: pass request.addfinalizer(fin)
def test_sync_repl_cookie_add_del(topology, request): """Test sync_repl cookie are progressing is an increasing order when there add and del :id: 83e11038-6ed0-4a5b-ac77-e44887ab11e3 :setup: Standalone Instance :steps: 1.: enable retroCL 2.: configure retroCL to log nsuniqueid as targetUniqueId 3.: enable content_sync plugin 4.: enable automember 5.: create (2) groups. Few groups can help to reproduce the concurrent updates problem. 6.: configure automember to provision those groups with 'member' 7.: enable and configure memberof plugin 8.: enable plugin log level 9.: restart the server 10.: create a thread dedicated to run a sync repl client 11.: Create (3) users that will generate nested updates (automember/memberof) 12.: Delete (3) users 13.: stop sync repl client and collect the list of cookie.change_no 14.: check that cookies.change_no are in increasing order :expectedresults: 1.: succeeds 2.: succeeds 3.: succeeds 4.: succeeds 5.: succeeds 6.: succeeds 7.: succeeds 8.: succeeds 9.: succeeds 10.: succeeds 11.: succeeds 12.: succeeds 13.: succeeds 14.: succeeds """ inst = topology[0] # Enable/configure retroCL plugin = RetroChangelogPlugin(inst) plugin.disable() plugin.enable() plugin.set('nsslapd-attribute', 'nsuniqueid:targetuniqueid') # Enable sync plugin plugin = ContentSyncPlugin(inst) plugin.enable() # Enable automember plugin = AutoMembershipPlugin(inst) plugin.disable() plugin.enable() # Add the automember group groups = Groups(inst, DEFAULT_SUFFIX) group = [] for i in range(1, 3): group.append(groups.create(properties={'cn': 'group%d' % i})) # Add the automember config entry am_configs = AutoMembershipDefinitions(inst) for g in group: am_config = am_configs.create( properties={ 'cn': 'config %s' % g.get_attr_val_utf8('cn'), 'autoMemberScope': DEFAULT_SUFFIX, 'autoMemberFilter': 'uid=*', 'autoMemberDefaultGroup': g.dn, 'autoMemberGroupingAttr': 'member:dn' }) # Enable and configure memberof plugin plugin = MemberOfPlugin(inst) plugin.disable() plugin.enable() plugin.replace_groupattr('member') memberof_config = MemberOfSharedConfig( inst, 'cn=memberOf config,{}'.format(DEFAULT_SUFFIX)) memberof_config.create( properties={ 'cn': 'memberOf config', 'memberOfGroupAttr': 'member', 'memberOfAttr': 'memberof' }) # Enable plugin log level (usefull for debug) inst.setLogLevel(65536) inst.restart() # create a sync repl client and wait 5 seconds to be sure it is running sync_repl = Sync_persist(inst) sync_repl.start() time.sleep(5) # create users, that automember/memberof will generate nested updates users = UserAccounts(inst, DEFAULT_SUFFIX) users_set = [] for i in range(10001, 10004): users_set.append(users.create_test_user(uid=i)) time.sleep(10) # delete users, that automember/memberof will generate nested updates for user in users_set: user.delete() # stop the server to get the sync_repl result set (exit from while loop). # Only way I found to acheive that. # and wait a bit to let sync_repl thread time to set its result before fetching it. inst.stop() cookies = sync_repl.get_result() # checking that the cookie are in increasing and in an acceptable range (0..1000) assert len(cookies) > 0 prev = 0 for cookie in cookies: log.info('Check cookie %s' % cookie) assert int(cookie) > 0 assert int(cookie) < 1000 assert int(cookie) > prev prev = int(cookie) sync_repl.join() log.info('test_sync_repl_cookie_add_del: PASS\n') def fin(): inst.restart() for g in group: try: g.delete() except: pass request.addfinalizer(fin) return
def test_sync_repl_mep(topology, request): """Test sync repl with MEP plugin that triggers several updates on the same entry :id: d9515930-293e-42da-9835-9f255fa6111b :setup: Standalone Instance :steps: 1. enable retro/sync_repl/mep 2. Add mep Template and definition entry 3. start sync_repl client 4. Add users with PosixAccount ObjectClass (mep will update it several times) 5. Check that the received cookie are progressing :expected results: 1. Success 2. Success 3. Success 4. Success 5. Success """ inst = topology[0] # Enable/configure retroCL plugin = RetroChangelogPlugin(inst) plugin.disable() plugin.enable() plugin.set('nsslapd-attribute', 'nsuniqueid:targetuniqueid') # Enable sync plugin plugin = ContentSyncPlugin(inst) plugin.enable() # Check the plug-in status mana = ManagedEntriesPlugin(inst) plugin.enable() # Add Template and definition entry org1 = OrganizationalUnits( inst, DEFAULT_SUFFIX).create(properties={'ou': 'Users'}) org2 = OrganizationalUnit(inst, f'ou=Groups,{DEFAULT_SUFFIX}') meps = MEPTemplates(inst, DEFAULT_SUFFIX) mep_template1 = meps.create( properties={ 'cn': 'UPG Template1', 'mepRDNAttr': 'cn', 'mepStaticAttr': 'objectclass: posixGroup', 'mepMappedAttr': 'cn: $uid|gidNumber: $gidNumber|description: User private group for $uid' .split('|') }) conf_mep = MEPConfigs(inst) mep_config = conf_mep.create( properties={ 'cn': 'UPG Definition2', 'originScope': org1.dn, 'originFilter': 'objectclass=posixaccount', 'managedBase': org2.dn, 'managedTemplate': mep_template1.dn }) # Enable plugin log level (usefull for debug) inst.setLogLevel(65536) inst.restart() # create a sync repl client and wait 5 seconds to be sure it is running sync_repl = Sync_persist(inst) sync_repl.start() time.sleep(5) # Add users with PosixAccount ObjectClass and verify creation of User Private Group user = UserAccounts(inst, f'ou=Users,{DEFAULT_SUFFIX}', rdn=None).create_test_user() assert user.get_attr_val_utf8( 'mepManagedEntry') == f'cn=test_user_1000,ou=Groups,{DEFAULT_SUFFIX}' # stop the server to get the sync_repl result set (exit from while loop). # Only way I found to acheive that. # and wait a bit to let sync_repl thread time to set its result before fetching it. inst.stop() time.sleep(10) cookies = sync_repl.get_result() # checking that the cookie are in increasing and in an acceptable range (0..1000) assert len(cookies) > 0 prev = 0 for cookie in cookies: log.info('Check cookie %s' % cookie) assert int(cookie) > 0 assert int(cookie) < 1000 assert int(cookie) > prev prev = int(cookie) sync_repl.join() log.info('test_sync_repl_map: PASS\n')
def test_retrocl_exclude_attr_add(topology_st): """ Test exclude attribute feature of the retrocl plugin for add operation :id: 3481650f-2070-45ef-9600-2500cfc51559 :setup: Standalone instance :steps: 1. Enable dynamic plugins 2. Confige retro changelog plugin 3. Add an entry 4. Ensure entry attrs are in the changelog 5. Exclude an attr 6. Add another entry 7. Ensure excluded attr is not in the changelog :expectedresults: 1. Success 2. Success 3. Success 4. Success 5. Success 6. Success 7. Success """ st = topology_st.standalone log.info('Configure retrocl plugin') rcl = RetroChangelogPlugin(st) rcl.disable() rcl.enable() rcl.replace('nsslapd-attribute', 'nsuniqueid:targetUniqueId') log.info('Restarting instance') try: st.restart() except ldap.LDAPError as e: ldap.error('Failed to restart instance ' + e.args[0]['desc']) assert False users = UserAccounts(st, DEFAULT_SUFFIX) log.info('Adding user1') try: users.create( properties={ 'sn': '1', 'cn': 'user 1', 'uid': 'user1', 'uidNumber': '11', 'gidNumber': '111', 'givenname': 'user1', 'homePhone': '0861234567', 'carLicense': '131D16674', 'mail': '*****@*****.**', 'homeDirectory': '/home/user1', 'userpassword': USER_PW }) except ldap.ALREADY_EXISTS: pass except ldap.LDAPError as e: log.error("Failed to add user1: " + str(e)) log.info( 'Verify homePhone and carLicense attrs are in the changelog changestring' ) try: retro_changelog_suffix = DSLdapObjects(st, basedn=RETROCL_SUFFIX) cllist = retro_changelog_suffix.filter(f'(targetDn={USER1_DN})') except ldap.LDAPError as e: log.fatal("Changelog search failed, error: " + str(e)) assert False assert len(cllist) > 0 if cllist[0].present('changes'): clstr = str(cllist[0].get_attr_vals_utf8('changes')) assert ATTR_HOMEPHONE in clstr assert ATTR_CARLICENSE in clstr log.info('Excluding attribute ' + ATTR_HOMEPHONE) args = FakeArgs() args.connections = [ st.host + ':' + str(st.port) + ':' + DN_DM + ':' + PW_DM ] args.instance = 'standalone1' args.basedn = None args.binddn = None args.starttls = False args.pwdfile = None args.bindpw = None args.prompt = False args.exclude_attrs = ATTR_HOMEPHONE args.func = retrochangelog_add dsrc_inst = dsrc_arg_concat(args, None) inst = connect_instance(dsrc_inst, False, args) result = args.func(inst, None, log, args) disconnect_instance(inst) assert result is None log.info('Restarting instance') try: st.restart() except ldap.LDAPError as e: ldap.error('Failed to restart instance ' + e.args[0]['desc']) assert False log.info('Adding user2') try: users.create( properties={ 'sn': '2', 'cn': 'user 2', 'uid': 'user2', 'uidNumber': '22', 'gidNumber': '222', 'givenname': 'user2', 'homePhone': '0879088363', 'carLicense': '04WX11038', 'mail': '*****@*****.**', 'homeDirectory': '/home/user2', 'userpassword': USER_PW }) except ldap.ALREADY_EXISTS: pass except ldap.LDAPError as e: log.error("Failed to add user2: " + str(e)) log.info('Verify homePhone attr is not in the changelog changestring') try: cllist = retro_changelog_suffix.filter(f'(targetDn={USER2_DN})') assert len(cllist) > 0 if cllist[0].present('changes'): clstr = str(cllist[0].get_attr_vals_utf8('changes')) assert ATTR_HOMEPHONE not in clstr assert ATTR_CARLICENSE in clstr except ldap.LDAPError as e: log.fatal("Changelog search failed, error: " + str(e)) assert False
def test_retrocl_exclude_attr_mod(topology_st): """ Test exclude attribute feature of the retrocl plugin for mod operation :id: f6bef689-685b-4f86-a98d-f7e6b1fcada3 :setup: Standalone instance :steps: 1. Enable dynamic plugins 2. Confige retro changelog plugin 3. Add user1 entry 4. Ensure entry attrs are in the changelog 5. Exclude an attr 6. Modify user1 entry 7. Ensure excluded attr is not in the changelog :expectedresults: 1. Success 2. Success 3. Success 4. Success 5. Success 6. Success 7. Success """ st = topology_st.standalone log.info('Configure retrocl plugin') rcl = RetroChangelogPlugin(st) rcl.disable() rcl.enable() rcl.replace('nsslapd-attribute', 'nsuniqueid:targetUniqueId') log.info('Restarting instance') try: st.restart() except ldap.LDAPError as e: ldap.error('Failed to restart instance ' + e.args[0]['desc']) assert False users = UserAccounts(st, DEFAULT_SUFFIX) log.info('Adding user1') try: user1 = users.create( properties={ 'sn': '1', 'cn': 'user 1', 'uid': 'user1', 'uidNumber': '11', 'gidNumber': '111', 'givenname': 'user1', 'homePhone': '0861234567', 'carLicense': '131D16674', 'mail': '*****@*****.**', 'homeDirectory': '/home/user1', 'userpassword': USER_PW }) except ldap.ALREADY_EXISTS: user1 = UserAccount(st, dn=USER1_DN) except ldap.LDAPError as e: log.error("Failed to add user1: " + str(e)) log.info( 'Verify homePhone and carLicense attrs are in the changelog changestring' ) try: retro_changelog_suffix = DSLdapObjects(st, basedn=RETROCL_SUFFIX) cllist = retro_changelog_suffix.filter(f'(targetDn={USER1_DN})') except ldap.LDAPError as e: log.fatal("Changelog search failed, error: " + str(e)) assert False assert len(cllist) > 0 if cllist[0].present('changes'): clstr = str(cllist[0].get_attr_vals_utf8('changes')) assert ATTR_HOMEPHONE in clstr assert ATTR_CARLICENSE in clstr log.info('Excluding attribute ' + ATTR_CARLICENSE) args = FakeArgs() args.connections = [ st.host + ':' + str(st.port) + ':' + DN_DM + ':' + PW_DM ] args.instance = 'standalone1' args.basedn = None args.binddn = None args.starttls = False args.pwdfile = None args.bindpw = None args.prompt = False args.exclude_attrs = ATTR_CARLICENSE args.func = retrochangelog_add dsrc_inst = dsrc_arg_concat(args, None) inst = connect_instance(dsrc_inst, False, args) result = args.func(inst, None, log, args) disconnect_instance(inst) assert result is None log.info('Restarting instance') try: st.restart() except ldap.LDAPError as e: ldap.error('Failed to restart instance ' + e.args[0]['desc']) assert False log.info('Modify user1 carLicense attribute') try: user1.replace(ATTR_CARLICENSE, "123WX321") except ldap.LDAPError as e: log.fatal( 'test_retrocl_exclude_attr_mod: Failed to update user1 attribute: error ' + e.message['desc']) assert False log.info('Verify carLicense attr is not in the changelog changestring') try: cllist = retro_changelog_suffix.filter(f'(targetDn={USER1_DN})') assert len(cllist) > 0 # There will be 2 entries in the changelog for this user, we are only #interested in the second one, the modify operation. if cllist[1].present('changes'): clstr = str(cllist[1].get_attr_vals_utf8('changes')) assert ATTR_CARLICENSE not in clstr except ldap.LDAPError as e: log.fatal("Changelog search failed, error: " + str(e)) assert False
def retrochangelog_edit(inst, basedn, log, args): log = log.getChild('retrochangelog_edit') plugin = RetroChangelogPlugin(inst) generic_object_edit(plugin, log, args, arg_to_attr)
def test_sync_repl_cenotaph(topo_m2, request): """Test the creation of a cenotaph while a sync repl client is running :id: 8ca1724a-cf42-4880-bf0f-be451f9bd3b4 :setup: MMR with 2 suppliers :steps: 1. Enable retroCL/content_sync 2. Run a sync repl client 3. create users 4. do a MODRDN of a user entry => creation of cenotaph 5. stop sync repl client :expectedresults: 1. Should succeeds 2. Should succeeds 3. Should succeeds 4. Should succeeds 5. Should succeeds """ m1 = topo_m2.ms["supplier1"] # Enable/configure retroCL plugin = RetroChangelogPlugin(m1) plugin.disable() plugin.enable() plugin.set('nsslapd-attribute', 'nsuniqueid:targetuniqueid') # Enable sync plugin plugin = ContentSyncPlugin(m1) plugin.enable() # Restart DS m1.restart() # create a sync repl client and wait 5 seconds to be sure it is running sync_repl = Sync_persist(m1) sync_repl.start() time.sleep(5) # create users users = UserAccounts(m1, DEFAULT_SUFFIX) users_set = [] for i in range(10001, 10003): users_set.append(users.create_test_user(uid=i)) # rename the entry that would trigger the creation of a cenotaph users_set[0].rename("uid=foo") # stop the server to get the sync_repl result set (exit from while loop). # Only way I found to acheive that. # and wait a bit to let sync_repl thread time to set its result before fetching it. m1.stop() time.sleep(2) def fin(): m1.restart() for user in users_set: try: user.delete() except: pass request.addfinalizer(fin)
def init_sync_repl_plugins(topology, request): """Prepare test environment (retroCL/sync_repl/ automember/memberof) and cleanup at the end of the test 1.: enable retroCL 2.: configure retroCL to log nsuniqueid as targetUniqueId 3.: enable content_sync plugin 4.: enable automember 5.: create (2) groups. Few groups can help to reproduce the concurrent updates problem. 6.: configure automember to provision those groups with 'member' 7.: enable and configure memberof plugin 8.: enable plugin log level 9.: restart the server """ inst = topology[0] inst.restart() # Enable/configure retroCL plugin = RetroChangelogPlugin(inst) plugin.disable() plugin.enable() plugin.set('nsslapd-attribute', 'nsuniqueid:targetuniqueid') # Enable sync plugin plugin = ContentSyncPlugin(inst) plugin.enable() # Enable automember plugin = AutoMembershipPlugin(inst) plugin.disable() plugin.enable() # Add the automember group groups = Groups(inst, DEFAULT_SUFFIX) group = [] for i in range(1, 5): group.append(groups.create(properties={'cn': 'group%d' % i})) # Add the automember config entry am_configs = AutoMembershipDefinitions(inst) am_configs_cleanup = [] for g in group: am_config = am_configs.create( properties={ 'cn': 'config %s' % g.get_attr_val_utf8('cn'), 'autoMemberScope': DEFAULT_SUFFIX, 'autoMemberFilter': 'uid=*', 'autoMemberDefaultGroup': g.dn, 'autoMemberGroupingAttr': 'member:dn' }) am_configs_cleanup.append(am_config) # Enable and configure memberof plugin plugin = MemberOfPlugin(inst) plugin.disable() plugin.enable() plugin.replace_groupattr('member') memberof_config = MemberOfSharedConfig( inst, 'cn=memberOf config,{}'.format(DEFAULT_SUFFIX)) try: memberof_config.create( properties={ 'cn': 'memberOf config', 'memberOfGroupAttr': 'member', 'memberOfAttr': 'memberof' }) except ldap.ALREADY_EXISTS: pass # Enable plugin log level (usefull for debug) inst.setLogLevel(65536) inst.restart() def fin(): inst.restart() for am_config in am_configs_cleanup: am_config.delete() for g in group: try: g.delete() except: pass request.addfinalizer(fin)