def test_syncrepl_basic(topology): """ Test basic functionality of the SyncRepl interface :id: f9fea826-8ae2-412a-8e88-b8e0ba939b06 :setup: Standalone instance :steps: 1. Enable Retro Changelog 2. Enable Syncrepl 3. Run the syncstate test to check refresh, add, delete, mod. :expectedresults: 1. Success 1. Success 1. Success """ st = topology.standalone # Enable RetroChangelog. rcl = RetroChangelogPlugin(st) rcl.enable() # Set the default targetid rcl.replace('nsslapd-attribute', 'nsuniqueid:targetUniqueId') # Enable sync repl csp = ContentSyncPlugin(st) csp.enable() # Restart DS st.restart() # Setup the syncer sync = ISyncRepl(st) # Run the checks syncstate_assert(st, sync)
def test_syncrepl_openldap(topology): """ Test basic functionality of the openldap syncrepl compatability handler. :id: 03039178-2cc6-40bd-b32c-7d6de108828b :setup: Standalone instance :steps: 1. Enable Retro Changelog 2. Enable Syncrepl 3. Run the syncstate test to check refresh, add, delete, mod. :expectedresults: 1. Success 1. Success 1. Success """ st = topology.standalone # Enable RetroChangelog. rcl = RetroChangelogPlugin(st) rcl.enable() # Set the default targetid rcl.replace('nsslapd-attribute', 'nsuniqueid:targetUniqueId') # Enable sync repl csp = ContentSyncPlugin(st) csp.enable() # Restart DS st.restart() # log.error("+++++++++++") # time.sleep(60) # Setup the syncer sync = ISyncRepl(st, openldap=True) # Run the checks syncstate_assert(st, sync)
def test_syncrepl_openldap(topology): """ Test basic functionality of the openldap syncrepl compatability handler. :id: 03039178-2cc6-40bd-b32c-7d6de108828b :setup: Standalone instance :steps: 1. Enable Retro Changelog 2. Enable Syncrepl 3. Run the syncstate test to check refresh, add, delete, mod. :expectedresults: 1. Success 1. Success 1. Success """ st = topology.standalone # Ensure entryuuid is setup plug = EntryUUIDPlugin(st) task = plug.fixup(DEFAULT_SUFFIX) task.wait() st.config.loglevel(vals=(ErrorLog.DEFAULT, ErrorLog.PLUGIN)) assert (task.is_complete() and task.get_exit_code() == 0) # Enable RetroChangelog. rcl = RetroChangelogPlugin(st) rcl.enable() # Set the default targetid rcl.add('nsslapd-attribute', 'nsuniqueid:targetUniqueId') rcl.add('nsslapd-attribute', 'entryuuid:targetEntryUUID') # Enable sync repl csp = ContentSyncPlugin(st) csp.add('syncrepl-allow-openldap', 'on') csp.enable() # Restart DS st.restart() # Setup the syncer sync = ISyncRepl(st, openldap=True) # Run the checks syncstate_assert(st, sync)
def test_sync_repl_cookie_with_failure(topology, request): """Test sync_repl cookie are progressing is the right order when there is a failure in nested updates :id: e0103448-170e-4080-8f22-c34606447ce2 :setup: Standalone Instance :steps: 1.: enable retroCL 2.: configure retroCL to log nsuniqueid as targetUniqueId 3.: enable content_sync plugin 4.: enable automember 5.: create (4) groups. make group2 groupOfUniqueNames so the automember will fail to add 'member' (uniqueMember expected) 6.: configure automember to provision those groups with 'member' 7.: enable and configure memberof plugin 8.: enable plugin log level 9.: restart the server 10.: create a thread dedicated to run a sync repl client 11.: Create a group that will be the only update received by sync repl client 12.: Create (9) users that will generate nested updates (automember/memberof) 13.: stop sync repl client and collect the list of cookie.change_no 14.: check that the list of cookie.change_no contains only the group 'step 11' :expectedresults: 1.: succeeds 2.: succeeds 3.: succeeds 4.: succeeds 5.: succeeds 6.: succeeds 7.: succeeds 8.: succeeds 9.: succeeds 10.: succeeds 11.: succeeds 12.: Fails (expected) 13.: succeeds 14.: succeeds """ inst = topology[0] # Enable/configure retroCL plugin = RetroChangelogPlugin(inst) plugin.disable() plugin.enable() plugin.set('nsslapd-attribute', 'nsuniqueid:targetuniqueid') # Enable sync plugin plugin = ContentSyncPlugin(inst) plugin.enable() # Enable automember plugin = AutoMembershipPlugin(inst) plugin.disable() plugin.enable() # Add the automember group groups = Groups(inst, DEFAULT_SUFFIX) group = [] for i in range(1, 5): group.append(groups.create(properties={'cn': 'group%d' % i})) # Set group2 as a groupOfUniqueNames so that automember will fail to update that group # This will trigger a failure in internal MOD and a failure to add member group[1].replace('objectclass', 'groupOfUniqueNames') # Add the automember config entry am_configs = AutoMembershipDefinitions(inst) for g in group: am_config = am_configs.create( properties={ 'cn': 'config %s' % g.get_attr_val_utf8('cn'), 'autoMemberScope': DEFAULT_SUFFIX, 'autoMemberFilter': 'uid=*', 'autoMemberDefaultGroup': g.dn, 'autoMemberGroupingAttr': 'member:dn' }) # Enable and configure memberof plugin plugin = MemberOfPlugin(inst) plugin.disable() plugin.enable() plugin.replace_groupattr('member') memberof_config = MemberOfSharedConfig( inst, 'cn=memberOf config,{}'.format(DEFAULT_SUFFIX)) memberof_config.create( properties={ 'cn': 'memberOf config', 'memberOfGroupAttr': 'member', 'memberOfAttr': 'memberof' }) # Enable plugin log level (usefull for debug) inst.setLogLevel(65536) inst.restart() # create a sync repl client and wait 5 seconds to be sure it is running sync_repl = Sync_persist(inst) sync_repl.start() time.sleep(5) # Add a test group just to check that sync_repl receives only one update group.append(groups.create(properties={'cn': 'group%d' % 10})) # create users, that automember/memberof will generate nested updates users = UserAccounts(inst, DEFAULT_SUFFIX) users_set = [] for i in range(1000, 1010): try: users_set.append(users.create_test_user(uid=i)) # Automember should fail to add uid=1000 in group2 assert (False) except ldap.UNWILLING_TO_PERFORM: pass # stop the server to get the sync_repl result set (exit from while loop). # Only way I found to acheive that. # and wait a bit to let sync_repl thread time to set its result before fetching it. inst.stop() time.sleep(10) cookies = sync_repl.get_result() # checking that the cookie list contains only one entry assert len(cookies) == 1 prev = 0 for cookie in cookies: log.info('Check cookie %s' % cookie) assert int(cookie) > 0 assert int(cookie) < 1000 assert int(cookie) > prev prev = int(cookie) sync_repl.join() log.info('test_sync_repl_cookie_with_failure: PASS\n') def fin(): inst.restart() for user in users_set: try: user.delete() except: pass for g in group: try: g.delete() except: pass request.addfinalizer(fin)
def test_sync_repl_cookie_add_del(topology, request): """Test sync_repl cookie are progressing is an increasing order when there add and del :id: 83e11038-6ed0-4a5b-ac77-e44887ab11e3 :setup: Standalone Instance :steps: 1.: enable retroCL 2.: configure retroCL to log nsuniqueid as targetUniqueId 3.: enable content_sync plugin 4.: enable automember 5.: create (2) groups. Few groups can help to reproduce the concurrent updates problem. 6.: configure automember to provision those groups with 'member' 7.: enable and configure memberof plugin 8.: enable plugin log level 9.: restart the server 10.: create a thread dedicated to run a sync repl client 11.: Create (3) users that will generate nested updates (automember/memberof) 12.: Delete (3) users 13.: stop sync repl client and collect the list of cookie.change_no 14.: check that cookies.change_no are in increasing order :expectedresults: 1.: succeeds 2.: succeeds 3.: succeeds 4.: succeeds 5.: succeeds 6.: succeeds 7.: succeeds 8.: succeeds 9.: succeeds 10.: succeeds 11.: succeeds 12.: succeeds 13.: succeeds 14.: succeeds """ inst = topology[0] # Enable/configure retroCL plugin = RetroChangelogPlugin(inst) plugin.disable() plugin.enable() plugin.set('nsslapd-attribute', 'nsuniqueid:targetuniqueid') # Enable sync plugin plugin = ContentSyncPlugin(inst) plugin.enable() # Enable automember plugin = AutoMembershipPlugin(inst) plugin.disable() plugin.enable() # Add the automember group groups = Groups(inst, DEFAULT_SUFFIX) group = [] for i in range(1, 3): group.append(groups.create(properties={'cn': 'group%d' % i})) # Add the automember config entry am_configs = AutoMembershipDefinitions(inst) for g in group: am_config = am_configs.create( properties={ 'cn': 'config %s' % g.get_attr_val_utf8('cn'), 'autoMemberScope': DEFAULT_SUFFIX, 'autoMemberFilter': 'uid=*', 'autoMemberDefaultGroup': g.dn, 'autoMemberGroupingAttr': 'member:dn' }) # Enable and configure memberof plugin plugin = MemberOfPlugin(inst) plugin.disable() plugin.enable() plugin.replace_groupattr('member') memberof_config = MemberOfSharedConfig( inst, 'cn=memberOf config,{}'.format(DEFAULT_SUFFIX)) memberof_config.create( properties={ 'cn': 'memberOf config', 'memberOfGroupAttr': 'member', 'memberOfAttr': 'memberof' }) # Enable plugin log level (usefull for debug) inst.setLogLevel(65536) inst.restart() # create a sync repl client and wait 5 seconds to be sure it is running sync_repl = Sync_persist(inst) sync_repl.start() time.sleep(5) # create users, that automember/memberof will generate nested updates users = UserAccounts(inst, DEFAULT_SUFFIX) users_set = [] for i in range(10001, 10004): users_set.append(users.create_test_user(uid=i)) time.sleep(10) # delete users, that automember/memberof will generate nested updates for user in users_set: user.delete() # stop the server to get the sync_repl result set (exit from while loop). # Only way I found to acheive that. # and wait a bit to let sync_repl thread time to set its result before fetching it. inst.stop() cookies = sync_repl.get_result() # checking that the cookie are in increasing and in an acceptable range (0..1000) assert len(cookies) > 0 prev = 0 for cookie in cookies: log.info('Check cookie %s' % cookie) assert int(cookie) > 0 assert int(cookie) < 1000 assert int(cookie) > prev prev = int(cookie) sync_repl.join() log.info('test_sync_repl_cookie_add_del: PASS\n') def fin(): inst.restart() for g in group: try: g.delete() except: pass request.addfinalizer(fin) return
def test_sync_repl_mep(topology, request): """Test sync repl with MEP plugin that triggers several updates on the same entry :id: d9515930-293e-42da-9835-9f255fa6111b :setup: Standalone Instance :steps: 1. enable retro/sync_repl/mep 2. Add mep Template and definition entry 3. start sync_repl client 4. Add users with PosixAccount ObjectClass (mep will update it several times) 5. Check that the received cookie are progressing :expected results: 1. Success 2. Success 3. Success 4. Success 5. Success """ inst = topology[0] # Enable/configure retroCL plugin = RetroChangelogPlugin(inst) plugin.disable() plugin.enable() plugin.set('nsslapd-attribute', 'nsuniqueid:targetuniqueid') # Enable sync plugin plugin = ContentSyncPlugin(inst) plugin.enable() # Check the plug-in status mana = ManagedEntriesPlugin(inst) plugin.enable() # Add Template and definition entry org1 = OrganizationalUnits( inst, DEFAULT_SUFFIX).create(properties={'ou': 'Users'}) org2 = OrganizationalUnit(inst, f'ou=Groups,{DEFAULT_SUFFIX}') meps = MEPTemplates(inst, DEFAULT_SUFFIX) mep_template1 = meps.create( properties={ 'cn': 'UPG Template1', 'mepRDNAttr': 'cn', 'mepStaticAttr': 'objectclass: posixGroup', 'mepMappedAttr': 'cn: $uid|gidNumber: $gidNumber|description: User private group for $uid' .split('|') }) conf_mep = MEPConfigs(inst) mep_config = conf_mep.create( properties={ 'cn': 'UPG Definition2', 'originScope': org1.dn, 'originFilter': 'objectclass=posixaccount', 'managedBase': org2.dn, 'managedTemplate': mep_template1.dn }) # Enable plugin log level (usefull for debug) inst.setLogLevel(65536) inst.restart() # create a sync repl client and wait 5 seconds to be sure it is running sync_repl = Sync_persist(inst) sync_repl.start() time.sleep(5) # Add users with PosixAccount ObjectClass and verify creation of User Private Group user = UserAccounts(inst, f'ou=Users,{DEFAULT_SUFFIX}', rdn=None).create_test_user() assert user.get_attr_val_utf8( 'mepManagedEntry') == f'cn=test_user_1000,ou=Groups,{DEFAULT_SUFFIX}' # stop the server to get the sync_repl result set (exit from while loop). # Only way I found to acheive that. # and wait a bit to let sync_repl thread time to set its result before fetching it. inst.stop() time.sleep(10) cookies = sync_repl.get_result() # checking that the cookie are in increasing and in an acceptable range (0..1000) assert len(cookies) > 0 prev = 0 for cookie in cookies: log.info('Check cookie %s' % cookie) assert int(cookie) > 0 assert int(cookie) < 1000 assert int(cookie) > prev prev = int(cookie) sync_repl.join() log.info('test_sync_repl_map: PASS\n')
def test_sync_repl_cenotaph(topo_m2, request): """Test the creation of a cenotaph while a sync repl client is running :id: 8ca1724a-cf42-4880-bf0f-be451f9bd3b4 :setup: MMR with 2 suppliers :steps: 1. Enable retroCL/content_sync 2. Run a sync repl client 3. create users 4. do a MODRDN of a user entry => creation of cenotaph 5. stop sync repl client :expectedresults: 1. Should succeeds 2. Should succeeds 3. Should succeeds 4. Should succeeds 5. Should succeeds """ m1 = topo_m2.ms["supplier1"] # Enable/configure retroCL plugin = RetroChangelogPlugin(m1) plugin.disable() plugin.enable() plugin.set('nsslapd-attribute', 'nsuniqueid:targetuniqueid') # Enable sync plugin plugin = ContentSyncPlugin(m1) plugin.enable() # Restart DS m1.restart() # create a sync repl client and wait 5 seconds to be sure it is running sync_repl = Sync_persist(m1) sync_repl.start() time.sleep(5) # create users users = UserAccounts(m1, DEFAULT_SUFFIX) users_set = [] for i in range(10001, 10003): users_set.append(users.create_test_user(uid=i)) # rename the entry that would trigger the creation of a cenotaph users_set[0].rename("uid=foo") # stop the server to get the sync_repl result set (exit from while loop). # Only way I found to acheive that. # and wait a bit to let sync_repl thread time to set its result before fetching it. m1.stop() time.sleep(2) def fin(): m1.restart() for user in users_set: try: user.delete() except: pass request.addfinalizer(fin)
def init_sync_repl_plugins(topology, request): """Prepare test environment (retroCL/sync_repl/ automember/memberof) and cleanup at the end of the test 1.: enable retroCL 2.: configure retroCL to log nsuniqueid as targetUniqueId 3.: enable content_sync plugin 4.: enable automember 5.: create (2) groups. Few groups can help to reproduce the concurrent updates problem. 6.: configure automember to provision those groups with 'member' 7.: enable and configure memberof plugin 8.: enable plugin log level 9.: restart the server """ inst = topology[0] inst.restart() # Enable/configure retroCL plugin = RetroChangelogPlugin(inst) plugin.disable() plugin.enable() plugin.set('nsslapd-attribute', 'nsuniqueid:targetuniqueid') # Enable sync plugin plugin = ContentSyncPlugin(inst) plugin.enable() # Enable automember plugin = AutoMembershipPlugin(inst) plugin.disable() plugin.enable() # Add the automember group groups = Groups(inst, DEFAULT_SUFFIX) group = [] for i in range(1, 5): group.append(groups.create(properties={'cn': 'group%d' % i})) # Add the automember config entry am_configs = AutoMembershipDefinitions(inst) am_configs_cleanup = [] for g in group: am_config = am_configs.create( properties={ 'cn': 'config %s' % g.get_attr_val_utf8('cn'), 'autoMemberScope': DEFAULT_SUFFIX, 'autoMemberFilter': 'uid=*', 'autoMemberDefaultGroup': g.dn, 'autoMemberGroupingAttr': 'member:dn' }) am_configs_cleanup.append(am_config) # Enable and configure memberof plugin plugin = MemberOfPlugin(inst) plugin.disable() plugin.enable() plugin.replace_groupattr('member') memberof_config = MemberOfSharedConfig( inst, 'cn=memberOf config,{}'.format(DEFAULT_SUFFIX)) try: memberof_config.create( properties={ 'cn': 'memberOf config', 'memberOfGroupAttr': 'member', 'memberOfAttr': 'memberof' }) except ldap.ALREADY_EXISTS: pass # Enable plugin log level (usefull for debug) inst.setLogLevel(65536) inst.restart() def fin(): inst.restart() for am_config in am_configs_cleanup: am_config.delete() for g in group: try: g.delete() except: pass request.addfinalizer(fin)
def contentsync_add(inst, basedn, log, args): log = log.getChild('contentsync_add') plugin = ContentSyncPlugin(inst) generic_object_add_attr(plugin, log, args, arg_to_attr)