def m4rid(request, topology_m4): log.debug( "Wait a bit before the reset - it is required for the slow machines") time.sleep(5) log.debug("-------------- BEGIN RESET of m4 -----------------") repl = ReplicationManager(DEFAULT_SUFFIX) repl.test_replication_topology(topology_m4.ms.values()) # What is master4's rid? m4rid = repl.get_rid(topology_m4.ms["master4"]) def fin(): try: # Restart the masters and rerun cleanallruv for inst in topology_m4.ms.values(): inst.restart() cruv_task = CleanAllRUVTask(topology_m4.ms["master1"]) cruv_task.create( properties={ 'replica-id': m4rid, 'replica-base-dn': DEFAULT_SUFFIX, 'replica-force-cleaning': 'no', }) cruv_task.wait() except ldap.UNWILLING_TO_PERFORM: # In some casse we already cleaned rid4, so if we fail, it's okay pass restore_master4(topology_m4) # Make sure everything works. repl.test_replication_topology(topology_m4.ms.values()) request.addfinalizer(fin) log.debug("-------------- FINISH RESET of m4 -----------------") return m4rid
def test_extract_pemfiles(tls_client_auth): """Test TLS client authentication between two masters operates as expected with 'on' and 'off' options of nsslapd-extract-pemfiles :id: 922d16f8-662a-4915-a39e-0aecd7c8e6e1 :setup: Two master replication, enabled TLS client auth :steps: 1. Check that nsslapd-extract-pemfiles default value is right 2. Check that replication works with both 'on' and 'off' values :expectedresults: 1. Success 2. Replication works """ m1 = tls_client_auth.ms['master1'] m2 = tls_client_auth.ms['master2'] repl = ReplicationManager(DEFAULT_SUFFIX) if ds_is_older('1.3.7'): default_val = 'off' else: default_val = 'on' attr_val = m1.config.get_attr_val_utf8('nsslapd-extract-pemfiles') log.info("Check that nsslapd-extract-pemfiles is {}".format(default_val)) assert attr_val == default_val for extract_pemfiles in ('on', 'off'): log.info( "Set nsslapd-extract-pemfiles = '{}' and check replication works)") m1.config.set('nsslapd-extract-pemfiles', extract_pemfiles) m2.config.set('nsslapd-extract-pemfiles', extract_pemfiles) repl.test_replication_topology(tls_client_auth)
def m4rid(request, topology_m4): log.debug("-------------- BEGIN RESET of m4 -----------------") repl = ReplicationManager(DEFAULT_SUFFIX) repl.test_replication_topology(topology_m4.ms.values()) # What is master4's rid? m4rid = repl.get_rid(topology_m4.ms["master4"]) def fin(): try: cruv_task = CleanAllRUVTask(topology_m4.ms["master1"]) cruv_task.create(properties={ 'replica-id': m4rid, 'replica-base-dn': DEFAULT_SUFFIX }) cruv_task.wait() except ldap.UNWILLING_TO_PERFORM: # In some casse we already cleaned rid4, so if we fail, it's okay pass restore_master4(topology_m4) # Make sure everything works. repl.test_replication_topology(topology_m4.ms.values()) request.addfinalizer(fin) log.debug("-------------- FINISH RESET of m4 -----------------") return m4rid
def test_ssl_transport(tls_client_auth): """Test different combinations for nsDS5ReplicaTransportInfo values :id: 922d16f8-662a-4915-a39e-0aecd7c8e6e2 :setup: Two master replication, enabled TLS client auth :steps: 1. Set nsDS5ReplicaTransportInfoCheck: SSL or StartTLS or TLS 2. Restart the instance 3. Check that replication works 4. Set nsDS5ReplicaTransportInfoCheck: LDAPS back :expectedresults: 1. Success 2. Success 3. Replication works 4. Success """ m1 = tls_client_auth.ms['master1'] m2 = tls_client_auth.ms['master2'] repl = ReplicationManager(DEFAULT_SUFFIX) replica_m1 = Replicas(m1).get(DEFAULT_SUFFIX) replica_m2 = Replicas(m2).get(DEFAULT_SUFFIX) agmt_m1 = replica_m1.get_agreements().list()[0] agmt_m2 = replica_m2.get_agreements().list()[0] if ds_is_older('1.4.0.6'): check_list = (('TLS', False), ) else: check_list = (('SSL', True), ('StartTLS', False), ('TLS', False)) for transport, secure_port in check_list: agmt_m1.replace_many( ('nsDS5ReplicaTransportInfo', transport), ('nsDS5ReplicaPort', '{}'.format(m2.port if not secure_port else m2.sslport))) agmt_m2.replace_many( ('nsDS5ReplicaTransportInfo', transport), ('nsDS5ReplicaPort', '{}'.format(m1.port if not secure_port else m1.sslport))) repl.test_replication_topology(tls_client_auth) if ds_is_older('1.4.0.6'): agmt_m1.replace_many(('nsDS5ReplicaTransportInfo', 'SSL'), ('nsDS5ReplicaPort', str(m2.sslport))) agmt_m2.replace_many(('nsDS5ReplicaTransportInfo', 'SSL'), ('nsDS5ReplicaPort', str(m1.sslport))) else: agmt_m1.replace_many(('nsDS5ReplicaTransportInfo', 'LDAPS'), ('nsDS5ReplicaPort', str(m2.sslport))) agmt_m2.replace_many(('nsDS5ReplicaTransportInfo', 'LDAPS'), ('nsDS5ReplicaPort', str(m1.sslport))) repl.test_replication_topology(tls_client_auth)
def test_online_reinit_may_hang(topo_with_sigkill): """Online reinitialization may hang when the first entry of the DB is RUV entry instead of the suffix :id: cded6afa-66c0-4c65-9651-993ba3f7a49c :setup: 2 Supplier Instances :steps: 1. Export the database 2. Move RUV entry to the top in the ldif file 3. Import the ldif file 4. Check that replication is still working 5. Online replica initializaton :expectedresults: 1. Ldif file should be created successfully 2. RUV entry should be on top in the ldif file 3. Import should be successful 4. Replication should work 5. Server should not hang and consume 100% CPU """ M1 = topo_with_sigkill.ms["supplier1"] M2 = topo_with_sigkill.ms["supplier2"] M1.stop() ldif_file = '%s/supplier1.ldif' % M1.get_ldif_dir() M1.db2ldif(bename=DEFAULT_BENAME, suffixes=[DEFAULT_SUFFIX], excludeSuffixes=None, repl_data=True, outputfile=ldif_file, encrypt=False) _move_ruv(ldif_file) M1.ldif2db(DEFAULT_BENAME, None, None, None, ldif_file) M1.start() # After this server may hang # Exporting idle server with replication data and reimporting # should not break replication (Unless we hit issue 5098) # So let check that replication is still working. repl = ReplicationManager(DEFAULT_SUFFIX) repl.test_replication_topology(topo_with_sigkill) agmt = Agreements(M1).list()[0] agmt.begin_reinit() (done, error) = agmt.wait_reinit() assert done is True assert error is False repl = ReplicationManager(DEFAULT_SUFFIX) repl.test_replication_topology(topo_with_sigkill) if DEBUGGING: # Add debugging steps(if any)... pass
def test_healthcheck_replication_presence_of_conflict_entries(topology_m2): """Check if HealthCheck returns DSREPLLE0002 code :id: 43abc6c6-2075-42eb-8fa3-aa092ff64cba :setup: Replicated topology :steps: 1. Create a replicated topology 2. Create conflict entries : different entries renamed to the same dn 3. Use HealthCheck without --json option 4. Use HealthCheck with --json option :expectedresults: 1. Success 2. Success 3. Healthcheck reports DSREPLLE0002 code and related details 4. Healthcheck reports DSREPLLE0002 code and related details """ RET_CODE = 'DSREPLLE0002' M1 = topology_m2.ms['master1'] M2 = topology_m2.ms['master2'] repl = ReplicationManager(DEFAULT_SUFFIX) repl.wait_for_replication(M1, M2) topology_m2.pause_all_replicas() log.info("Create conflict entries") test_users_m1 = UserAccounts(M1, DEFAULT_SUFFIX) test_users_m2 = UserAccounts(M2, DEFAULT_SUFFIX) user_num = 1000 test_users_m1.create_test_user(user_num, 2000) test_users_m2.create_test_user(user_num, 2000) topology_m2.resume_all_replicas() repl.test_replication_topology(topology_m2) run_healthcheck_and_flush_log(topology_m2, M1, RET_CODE, json=False) run_healthcheck_and_flush_log(topology_m2, M1, RET_CODE, json=True)
def check_replicas(topology_m2): """Check that replication is in sync and working""" m1 = topology_m2.ms["master1"] m2 = topology_m2.ms["master2"] log.info('Checking if replication is in sync...') repl = ReplicationManager(DEFAULT_SUFFIX) repl.test_replication_topology(topology_m2) # # Verify the databases are identical. There should not be any "user, entry, employee" entries # log.info('Checking if the data is the same between the replicas...') # Check the master try: entries = m1.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, "(|(uid=person*)(uid=entry*)(uid=employee*))") if len(entries) > 0: log.error('Master database has incorrect data set!\n') assert False except ldap.LDAPError as e: log.fatal('Unable to search db on master: ' + e.message['desc']) assert False # Check the consumer try: entries = m2.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, "(|(uid=person*)(uid=entry*)(uid=employee*))") if len(entries) > 0: log.error( 'Consumer database in not consistent with master database') assert False except ldap.LDAPError as e: log.fatal('Unable to search db on consumer: ' + e.message['desc']) assert False log.info('Data is consistent across the replicas.\n')
def test_dblib_migration(topo_m2, init_user): """ Verify dsctl dblib xxxxxxx sub commands ( migration between bdb and lmdb ) :id: 5d327c34-e77a-46e5-a8aa-0a552f9bbdef :setup: Two suppliers Instance :steps: 1. Determine current database 2. Switch to the other database 3 Check that :expectedresults: 1. Success 2. Success """ s1 = topo_m2.ms["supplier1"] s2 = topo_m2.ms["supplier2"] db_lib = s1.get_db_lib() repl = ReplicationManager(DEFAULT_SUFFIX) users = UserAccounts(s1, DEFAULT_SUFFIX) assert users.get('test entry') args = FakeArgs({'tmpdir': None}) if db_lib == 'bdb': dblib_bdb2mdb(s1, log, args) dblib_cleanup(s1, log, args) _check_db(s1, log, 'mdb') repl.test_replication_topology([s1, s2]) dblib_mdb2bdb(s1, log, args) dblib_cleanup(s1, log, args) _check_db(s1, log, 'bdb') repl.test_replication_topology([s1, s2]) else: dblib_mdb2bdb(s1, log, args) dblib_cleanup(s1, log, args) _check_db(s1, log, 'bdb') repl.test_replication_topology([s1, s2]) dblib_bdb2mdb(s1, log, args) dblib_cleanup(s1, log, args) _check_db(s1, log, 'mdb') repl.test_replication_topology([s1, s2])
def test_nested_entries_with_children(self, topology_m2, base_m2): """Check that conflict properly resolved for operations with nested entries with children :id: 77f09b18-03d1-45da-940b-1ad2c2908eb5 :setup: Two master replication, test container for entries, enable plugin logging, audit log, error log for replica and access log for internal :steps: 1. Add 15 containers to m1 and wait for replication to happen 2. Pause replication 3. Create parent-child on master2 and master1 4. Create parent-child on master1 and master2 5. Create parent-child on master1 and master2 different child rdn 6. Create parent-child on master1 and delete parent on master2 7. Create parent on master1, delete it and parent-child on master2, delete them 8. Create parent on master1, delete it and parent-two children on master2 9. Create parent-two children on master1 and parent-child on master2, delete them 10. Create three subsets inside existing container entry, applying only part of changes on m2 11. Create more combinations of the subset with parent-child on m1 and parent on m2 12. Delete container on m1, modify user1 on m1, create parent on m2 and modify user2 on m2 13. Resume replication 14. Check that the entries on both masters are the same and replication is working :expectedresults: 1. It should pass 2. It should pass 3. It should pass 4. It should pass 5. It should pass 6. It should pass 7. It should pass 8. It should pass 9. It should pass 10. It should pass 11. It should pass 12. It should pass 13. It should pass 14. It should pass """ pytest.xfail("Issue 49591 - work in progress") M1 = topology_m2.ms["master1"] M2 = topology_m2.ms["master2"] repl = ReplicationManager(SUFFIX) test_users_m1 = UserAccounts(M1, base_m2.dn, rdn=None) test_users_m2 = UserAccounts(M2, base_m2.dn, rdn=None) _create_user(test_users_m1, 4000) _create_user(test_users_m1, 4001) cont_list = [] for num in range(15): cont = _create_container(M1, base_m2.dn, 'sub{}'.format(num)) cont_list.append(cont) repl.test_replication(M1, M2) topology_m2.pause_all_replicas() log.info("Create parent-child on master2 and master1") _create_container(M2, base_m2.dn, 'p0', sleep=True) cont_p = _create_container(M1, base_m2.dn, 'p0', sleep=True) _create_container(M1, cont_p.dn, 'c0', sleep=True) _create_container(M2, cont_p.dn, 'c0', sleep=True) log.info("Create parent-child on master1 and master2") cont_p = _create_container(M1, base_m2.dn, 'p1', sleep=True) _create_container(M2, base_m2.dn, 'p1', sleep=True) _create_container(M1, cont_p.dn, 'c1', sleep=True) _create_container(M2, cont_p.dn, 'c1', sleep=True) log.info( "Create parent-child on master1 and master2 different child rdn") cont_p = _create_container(M1, base_m2.dn, 'p2', sleep=True) _create_container(M2, base_m2.dn, 'p2', sleep=True) _create_container(M1, cont_p.dn, 'c2', sleep=True) _create_container(M2, cont_p.dn, 'c3', sleep=True) log.info("Create parent-child on master1 and delete parent on master2") cont_num = 0 cont_p_m1 = _create_container(M1, cont_list[cont_num].dn, 'p0', sleep=True) cont_p_m2 = _create_container(M2, cont_list[cont_num].dn, 'p0', sleep=True) _create_container(M1, cont_p_m1.dn, 'c0', sleep=True) _delete_container(cont_p_m2) cont_num += 1 cont_p_m2 = _create_container(M2, cont_list[cont_num].dn, 'p0', sleep=True) cont_p_m1 = _create_container(M1, cont_list[cont_num].dn, 'p0') _create_container(M1, cont_p_m1.dn, 'c0', sleep=True) _delete_container(cont_p_m2, sleep=True) log.info( "Create parent on master1, delete it and parent-child on master2, delete them" ) cont_num += 1 cont_p_m1 = _create_container(M1, cont_list[cont_num].dn, 'p0') _delete_container(cont_p_m1, sleep=True) cont_p_m2 = _create_container(M2, cont_list[cont_num].dn, 'p0') cont_c_m2 = _create_container(M2, cont_p_m2.dn, 'c0') _delete_container(cont_c_m2) _delete_container(cont_p_m2) cont_num += 1 cont_p_m2 = _create_container(M2, cont_list[cont_num].dn, 'p0') cont_c_m2 = _create_container(M2, cont_p_m2.dn, 'c0') _delete_container(cont_c_m2) _delete_container(cont_p_m2, sleep=True) cont_p_m1 = _create_container(M1, cont_list[cont_num].dn, 'p0') _delete_container(cont_p_m1) log.info( "Create parent on master1, delete it and parent-two children on master2" ) cont_num += 1 cont_p_m1 = _create_container(M1, cont_list[cont_num].dn, 'p0') _delete_container(cont_p_m1, sleep=True) cont_p_m2 = _create_container(M2, cont_list[cont_num].dn, 'p0') _create_container(M2, cont_p_m2.dn, 'c0') _create_container(M2, cont_p_m2.dn, 'c1') cont_num += 1 cont_p_m2 = _create_container(M2, cont_list[cont_num].dn, 'p0') _create_container(M2, cont_p_m2.dn, 'c0') _create_container(M2, cont_p_m2.dn, 'c1', sleep=True) cont_p_m1 = _create_container(M1, cont_list[cont_num].dn, 'p0') _delete_container(cont_p_m1, sleep=True) log.info( "Create parent-two children on master1 and parent-child on master2, delete them" ) cont_num += 1 cont_p_m2 = _create_container(M2, cont_list[cont_num].dn, 'p0') cont_c_m2 = _create_container(M2, cont_p_m2.dn, 'c0') _delete_container(cont_c_m2) _delete_container(cont_p_m2, sleep=True) cont_p_m1 = _create_container(M1, cont_list[cont_num].dn, 'p0') _create_container(M1, cont_p_m1.dn, 'c0') _create_container(M1, cont_p_m1.dn, 'c1') cont_num += 1 cont_p_m1 = _create_container(M1, cont_list[cont_num].dn, 'p0') _create_container(M1, cont_p_m1.dn, 'c0') _create_container(M1, cont_p_m1.dn, 'c1', sleep=True) cont_p_m2 = _create_container(M2, cont_list[cont_num].dn, 'p0') cont_c_m2 = _create_container(M2, cont_p_m2.dn, 'c0') _delete_container(cont_c_m2) _delete_container(cont_p_m2, sleep=True) log.info( "Create three subsets inside existing container entry, applying only part of changes on m2" ) cont_num += 1 cont_p_m1 = _create_container(M1, cont_list[cont_num].dn, 'p0') _create_container(M1, cont_p_m1.dn, 'c0') _create_container(M1, cont_p_m1.dn, 'c1', sleep=True) _create_container(M2, cont_list[cont_num].dn, 'p0', sleep=True) cont_num += 1 cont_p_m1 = _create_container(M1, cont_list[cont_num].dn, 'p0') _create_container(M1, cont_p_m1.dn, 'c0') _create_container(M1, cont_p_m1.dn, 'c1', sleep=True) cont_p_m2 = _create_container(M2, cont_list[cont_num].dn, 'p0') _create_container(M2, cont_p_m2.dn, 'c0', sleep=True) cont_num += 1 cont_p_m1 = _create_container(M1, cont_list[cont_num].dn, 'p0') _create_container(M1, cont_p_m1.dn, 'c0') _create_container(M1, cont_p_m1.dn, 'c1', sleep=True) cont_p_m2 = _create_container(M2, cont_list[cont_num].dn, 'p0') cont_c_m2 = _create_container(M2, cont_p_m2.dn, 'c0') _delete_container(cont_c_m2, sleep=True) log.info( "Create more combinations of the subset with parent-child on m1 and parent on m2" ) cont_num += 1 cont_p_m1 = _create_container(M1, cont_list[cont_num].dn, 'p0', sleep=True) cont_p_m2 = _create_container(M2, cont_list[cont_num].dn, 'p0', sleep=True) _delete_container(cont_p_m1, sleep=True) cont_c_m2 = _create_container(M2, cont_p_m2.dn, 'c0') _delete_container(cont_c_m2) _delete_container(cont_p_m2, sleep=True) cont_num += 1 cont_p_m1 = _create_container(M1, cont_list[cont_num].dn, 'p0', sleep=True) cont_p_m2 = _create_container(M2, cont_list[cont_num].dn, 'p0', sleep=True) _delete_container(cont_p_m1, sleep=True) _create_container(M2, cont_p_m2.dn, 'c0', sleep=True) cont_num += 1 cont_p_m1 = _create_container(M1, cont_list[cont_num].dn, 'p0', sleep=True) cont_p_m2 = _create_container(M2, cont_list[cont_num].dn, 'p0', sleep=True) cont_c_m1 = _create_container(M1, cont_p_m1.dn, 'c0', sleep=True) _create_container(M2, cont_p_m2.dn, 'c0', sleep=True) _delete_container(cont_c_m1, sleep=True) _create_container(M2, cont_p_m2.dn, 'c1', sleep=True) _delete_container(cont_p_m1, sleep=True) log.info( "Delete container on m1, modify user1 on m1, create parent on m2 and modify user2 on m2" ) cont_num += 1 _delete_container(cont_list[cont_num]) _modify_user(test_users_m1, 4000, sleep=True) _create_container(M2, cont_list[cont_num].dn, 'p0') _modify_user(test_users_m2, 4001) topology_m2.resume_all_replicas() repl.test_replication_topology(topology_m2, timeout=60) conts_dns = {} for num in range(1, 3): inst = topology_m2.ms["master{}".format(num)] conts_dns[inst.serverid] = [] conts = nsContainers(inst, base_m2.dn) for cont in conts.list(): conts_p = nsContainers(inst, cont.dn) for cont_p in conts_p.list(): conts_c = nsContainers(inst, cont_p.dn) conts_dns[inst.serverid].extend( [cont_c.dn for cont_c in conts_c.list()]) conts_dns[inst.serverid].extend( [cont_p.dn for cont_p in conts_p.list()]) conts_dns[inst.serverid].extend([cont.dn for cont in conts.list()]) assert set(conts_dns[M1.serverid]) == set(conts_dns[M2.serverid]) user_dns_m1 = [user.dn for user in test_users_m1.list()] user_dns_m2 = [user.dn for user in test_users_m2.list()] assert set(user_dns_m1) == set(user_dns_m2)
def test_memberof_with_changelog_reset(topo_m2): """Test that replication does not break, after DS stop-start, due to changelog reset :id: 60c11636-55a1-4704-9e09-2c6bcc828de4 :setup: 2 Masters :steps: 1. On M1 and M2, Enable memberof 2. On M1, add 999 entries allowing memberof 3. On M1, add a group with these 999 entries as members 4. Stop M1 in between, when add the group memerof is called and before it is finished the add, so step 4 should be executed after memberof has started and before the add has finished 5. Check that replication is working fine :expectedresults: 1. memberof should be enabled 2. Entries should be added 3. Add operation should start 4. M1 should be stopped 5. Replication should be working fine """ m1 = topo_m2.ms["master1"] m2 = topo_m2.ms["master2"] log.info("Configure memberof on M1 and M2") memberof = MemberOfPlugin(m1) memberof.enable() memberof.set_autoaddoc('nsMemberOf') m1.restart() memberof = MemberOfPlugin(m2) memberof.enable() memberof.set_autoaddoc('nsMemberOf') m2.restart() log.info("On M1, add 999 test entries allowing memberof") users_list = add_users(topo_m2, 999, DEFAULT_SUFFIX) log.info("On M1, add a group with these 999 entries as members") dic_of_attributes = { 'cn': ensure_bytes('testgroup'), 'objectclass': ensure_list_bytes(['top', 'groupOfNames']) } for user in users_list: dic_of_attributes.setdefault('member', []) dic_of_attributes['member'].append(user.dn) log.info('Adding the test group using async function') groupdn = 'cn=testgroup,%s' % DEFAULT_SUFFIX m1.add(Entry((groupdn, dic_of_attributes))) #shutdown the server in-between adding the group m1.stop() #start the server m1.start() log.info("Check the log messages for error") error_msg = "ERR - NSMMReplicationPlugin - ruv_compare_ruv" assert not m1.ds_error_log.match(error_msg) log.info("Check that the replication is working fine both ways, M1 <-> M2") repl = ReplicationManager(DEFAULT_SUFFIX) repl.test_replication_topology(topo_m2)
def test_nested_entries(self, topology_m3, base_m3): """Check that conflict properly resolved for operations with nested entries with children :id: 77f09b18-03d1-45da-940b-1ad2c2908eb6 :setup: Three master replication, test container for entries, enable plugin logging, audit log, error log for replica and access log for internal :steps: 1. Add 15 containers to m1 and wait for replication to happen 2. Pause replication 3. Create two child entries under each of two entries 4. Create three child entries under each of three entries 5. Create two parents on m1 and m2, then on m1 - create a child and delete one parent, on m2 - delete one parent and create a child 6. Test a few more parent-child combinations with three instances 7. Resume replication 8. Check that the entries on both masters are the same and replication is working :expectedresults: 1. It should pass 2. It should pass 3. It should pass 4. It should pass 5. It should pass 6. It should pass 7. It should pass 8. It should pass """ pytest.xfail("Issue 49591 - work in progress") M1 = topology_m3.ms["master1"] M2 = topology_m3.ms["master2"] M3 = topology_m3.ms["master3"] repl = ReplicationManager(SUFFIX) cont_list = [] for num in range(11): cont = _create_container(M1, base_m3.dn, 'sub{}'.format(num)) cont_list.append(cont) repl.test_replication(M1, M2) repl.test_replication(M1, M3) topology_m3.pause_all_replicas() log.info("Create two child entries under each of two entries") cont_num = -1 for num in range(2): cont_num += 1 _create_container(M1, cont_list[cont_num].dn, 'p0', sleep=True) _create_container(M2, cont_list[cont_num].dn, 'p1', sleep=True) log.info("Create three child entries under each of three entries") for num in range(3): cont_num += 1 _create_container(M1, cont_list[cont_num].dn, 'p0', sleep=True) _create_container(M2, cont_list[cont_num].dn, 'p1', sleep=True) _create_container(M3, cont_list[cont_num].dn, 'p2', sleep=True) log.info( "Create two parents on m1 and m2, then on m1 - create a child and delete one parent," "on m2 - delete one parent and create a child") for inst1, inst2 in ((M1, M2), (M2, M1)): cont_num += 1 cont_p_m1_1 = _create_container(inst1, cont_list[cont_num].dn, 'p0') cont_p_m1_2 = _create_container(inst1, cont_list[cont_num].dn, 'p1', sleep=True) cont_p_m2_1 = _create_container(inst2, cont_list[cont_num].dn, 'p0') cont_p_m2_2 = _create_container(inst2, cont_list[cont_num].dn, 'p1', sleep=True) _create_container(inst1, cont_p_m1_1.dn, 'c0', sleep=True) _delete_container(cont_p_m2_1, sleep=True) _delete_container(cont_p_m1_2, sleep=True) _create_container(inst2, cont_p_m2_2.dn, 'c0', sleep=True) log.info( "Test a few more parent-child combinations on three instances") for inst1, inst2, inst3 in ((M1, M2, M3), (M2, M1, M3), (M3, M1, M2)): cont_num += 1 cont_p_m1 = _create_container(inst1, cont_list[cont_num].dn, 'p0') _delete_container(cont_p_m1, sleep=True) cont_p_m2 = _create_container(inst2, cont_list[cont_num].dn, 'p0') cont_c_m2 = _create_container(inst2, cont_p_m2.dn, 'c0') _delete_container(cont_c_m2) _delete_container(cont_p_m2, sleep=True) cont_p_m3 = _create_container(inst3, cont_list[cont_num].dn, 'p0') _create_container(inst3, cont_p_m3.dn, 'c0') _create_container(inst3, cont_p_m3.dn, 'c1', sleep=True) topology_m3.resume_all_replicas() repl.test_replication_topology(topology_m3) conts_dns = {} for num in range(1, 4): inst = topology_m3.ms["master{}".format(num)] conts_dns[inst.serverid] = [] conts = nsContainers(inst, base_m3.dn) for cont in conts.list(): conts_p = nsContainers(inst, cont.dn) for cont_p in conts_p.list(): conts_c = nsContainers(inst, cont_p.dn) conts_dns[inst.serverid].extend( [cont_c.dn for cont_c in conts_c.list()]) conts_dns[inst.serverid].extend( [cont_p.dn for cont_p in conts_p.list()]) conts_dns[inst.serverid].extend([cont.dn for cont in conts.list()]) for conts1, conts2 in permutations(conts_dns.values(), 2): assert set(conts1) == set(conts2)
def tls_client_auth(topo_m2): """Enable TLS on both masters and reconfigure both agreements to use TLS Client auth """ m1 = topo_m2.ms['master1'] m2 = topo_m2.ms['master2'] if ds_is_older('1.4.0.6'): transport = 'SSL' else: transport = 'LDAPS' # Create the certmap before we restart for enable_tls cm_m1 = CertmapLegacy(m1) cm_m2 = CertmapLegacy(m2) # We need to configure the same maps for both .... certmaps = cm_m1.list() certmaps['default']['DNComps'] = None certmaps['default']['CmapLdapAttr'] = 'nsCertSubjectDN' cm_m1.set(certmaps) cm_m2.set(certmaps) [i.enable_tls() for i in topo_m2] # Create the replication dns services = ServiceAccounts(m1, DEFAULT_SUFFIX) repl_m1 = services.get('%s:%s' % (m1.host, m1.sslport)) repl_m1.set('nsCertSubjectDN', m1.get_server_tls_subject()) repl_m2 = services.get('%s:%s' % (m2.host, m2.sslport)) repl_m2.set('nsCertSubjectDN', m2.get_server_tls_subject()) # Check the replication is "done". repl = ReplicationManager(DEFAULT_SUFFIX) repl.wait_for_replication(m1, m2) # Now change the auth type replica_m1 = Replicas(m1).get(DEFAULT_SUFFIX) agmt_m1 = replica_m1.get_agreements().list()[0] agmt_m1.replace_many( ('nsDS5ReplicaBindMethod', 'SSLCLIENTAUTH'), ('nsDS5ReplicaTransportInfo', transport), ('nsDS5ReplicaPort', str(m2.sslport)), ) agmt_m1.remove_all('nsDS5ReplicaBindDN') replica_m2 = Replicas(m2).get(DEFAULT_SUFFIX) agmt_m2 = replica_m2.get_agreements().list()[0] agmt_m2.replace_many( ('nsDS5ReplicaBindMethod', 'SSLCLIENTAUTH'), ('nsDS5ReplicaTransportInfo', transport), ('nsDS5ReplicaPort', str(m1.sslport)), ) agmt_m2.remove_all('nsDS5ReplicaBindDN') repl.test_replication_topology(topo_m2) return topo_m2
def test_stress_clean(topology_m4, m4rid): """Put each server(m1 - m4) under a stress, and perform the entire clean process :id: a8263cd6-f068-4357-86e0-e7c34504c8c5 :setup: Replication setup with four masters :steps: 1. Add a bunch of updates to all masters 2. Put master 4 to read-only mode 3. Disable replication on master 4 5. Remove agreements to master 4 from other masters 6. Run a cleanallruv task on master 1 7. Check that everything was cleaned :expectedresults: 1. Operation should be successful 2. Master 4 should be put to read-only mode 3. Replication on master 4 should be disabled 2. Agreements to master 4 should be removed 5. Agreements to master 4 should be removed 6. Operation should be successful 7. Everything should be cleaned """ log.info('Running test_stress_clean...') log.info('test_stress_clean: put all the masters under load...') ldbm_config = LDBMConfig(topology_m4.ms["master4"]) # Put all the masters under load m1_add_users = AddUsers(topology_m4.ms["master1"], 2000) m1_add_users.start() m2_add_users = AddUsers(topology_m4.ms["master2"], 2000) m2_add_users.start() m3_add_users = AddUsers(topology_m4.ms["master3"], 2000) m3_add_users.start() m4_add_users = AddUsers(topology_m4.ms["master4"], 2000) m4_add_users.start() # Allow sometime to get replication flowing in all directions log.info( 'test_stress_clean: allow some time for replication to get flowing...') time.sleep(5) # Put master 4 into read only mode ldbm_config.set('nsslapd-readonly', 'on') # We need to wait for master 4 to push its changes out log.info( 'test_stress_clean: allow some time for master 4 to push changes out (60 seconds)...' ) time.sleep(30) # Remove the agreements from the other masters that point to master 4 remove_master4_agmts("test_stress_clean", topology_m4) # Run the task cruv_task = CleanAllRUVTask(topology_m4.ms["master1"]) cruv_task.create( properties={ 'replica-id': m4rid, 'replica-base-dn': DEFAULT_SUFFIX, 'replica-force-cleaning': 'no' }) cruv_task.wait() # Wait for the update to finish log.info('test_stress_clean: wait for all the updates to finish...') m1_add_users.join() m2_add_users.join() m3_add_users.join() m4_add_users.join() # Check the other master's RUV for 'replica 4' log.info( 'test_stress_clean: check if all the replicas have been cleaned...') clean = check_ruvs("test_stress_clean", topology_m4, m4rid) assert clean log.info('test_stress_clean: PASSED, restoring master 4...') # Sleep for a bit to replication complete log.info("Sleep for 120 seconds to allow replication to complete...") repl = ReplicationManager(DEFAULT_SUFFIX) repl.test_replication_topology([ topology_m4.ms["master1"], topology_m4.ms["master2"], topology_m4.ms["master3"], ], timeout=120) # Turn off readonly mode ldbm_config.set('nsslapd-readonly', 'off')
def test_add_modrdn(self, topology_m2, base_m2): """Check that conflict properly resolved for create - modrdn operations :id: 77f09b18-03d1-45da-940b-1ad2c2908ebb :setup: Two master replication, test container for entries, enable plugin logging, audit log, error log for replica and access log for internal :steps: 1. Add five users to m1 and wait for replication to happen 2. Pause replication 3. Create an entry on m1 and m2 4. Create an entry on m1 and rename on m2 5. Rename an entry on m1 and create on m2 6. Rename an entry on m1 and rename on m2 7. Rename an entry on m1 and rename on m2. Use different entries but rename them to the same entry 8. Resume replication 9. Check that the entries on both masters are the same and replication is working :expectedresults: 1. It should pass 2. It should pass 3. It should pass 4. It should pass 5. It should pass 6. It should pass 7. It should pass 8. It should pass """ M1 = topology_m2.ms["master1"] M2 = topology_m2.ms["master2"] test_users_m1 = UserAccounts(M1, base_m2.dn, rdn=None) test_users_m2 = UserAccounts(M2, base_m2.dn, rdn=None) repl = ReplicationManager(SUFFIX) for user_num in range(1000, 1005): _create_user(test_users_m1, user_num) repl.test_replication(M1, M2) topology_m2.pause_all_replicas() log.info("Test create - modrdn") user_num += 1 _create_user(test_users_m1, user_num, sleep=True) _create_user(test_users_m2, user_num, sleep=True) user_num += 1 _create_user(test_users_m1, user_num, sleep=True) _rename_user(test_users_m2, 1000, user_num, sleep=True) user_num += 1 _rename_user(test_users_m1, 1001, user_num, sleep=True) _create_user(test_users_m2, user_num, sleep=True) user_num += 1 _rename_user(test_users_m1, 1002, user_num, sleep=True) _rename_user(test_users_m2, 1002, user_num, sleep=True) user_num += 1 _rename_user(test_users_m1, 1003, user_num, sleep=True) _rename_user(test_users_m2, 1004, user_num) topology_m2.resume_all_replicas() repl.test_replication_topology(topology_m2) user_dns_m1 = [user.dn for user in test_users_m1.list()] user_dns_m2 = [user.dn for user in test_users_m2.list()] assert set(user_dns_m1) == set(user_dns_m2)
def test_complex_add_modify_modrdn_delete(self, topology_m2, base_m2): """Check that conflict properly resolved for complex operations which involve add, modify, modrdn and delete :id: 77f09b18-03d1-45da-940b-1ad2c2908eb1 :customerscenario: True :setup: Two master replication, test container for entries, enable plugin logging, audit log, error log for replica and access log for internal :steps: 1. Add ten users to m1 and wait for replication to happen 2. Pause replication 3. Test add-del on m1 and add on m2 4. Test add-mod on m1 and add on m2 5. Test add-modrdn on m1 and add on m2 6. Test multiple add, modrdn 7. Test Add-del on both masters 8. Test modrdn-modrdn 9. Test modrdn-del 10. Resume replication 11. Check that the entries on both masters are the same and replication is working :expectedresults: 1. It should pass 2. It should pass 3. It should pass 4. It should pass 5. It should pass 6. It should pass 7. It should pass 8. It should pass 9. It should pass 10. It should pass 11. It should pass """ M1 = topology_m2.ms["master1"] M2 = topology_m2.ms["master2"] test_users_m1 = UserAccounts(M1, base_m2.dn, rdn=None) test_users_m2 = UserAccounts(M2, base_m2.dn, rdn=None) repl = ReplicationManager(SUFFIX) for user_num in range(1100, 1110): _create_user(test_users_m1, user_num) repl.test_replication(M1, M2) topology_m2.pause_all_replicas() log.info("Test add-del on M1 and add on M2") user_num += 1 _create_user(test_users_m1, user_num) _delete_user(test_users_m1, user_num, sleep=True) _create_user(test_users_m2, user_num, sleep=True) user_num += 1 _create_user(test_users_m1, user_num, sleep=True) _create_user(test_users_m2, user_num, sleep=True) _delete_user(test_users_m1, user_num, sleep=True) user_num += 1 _create_user(test_users_m2, user_num, sleep=True) _create_user(test_users_m1, user_num) _delete_user(test_users_m1, user_num) log.info("Test add-mod on M1 and add on M2") user_num += 1 _create_user(test_users_m1, user_num) _modify_user(test_users_m1, user_num, sleep=True) _create_user(test_users_m2, user_num, sleep=True) user_num += 1 _create_user(test_users_m1, user_num, sleep=True) _create_user(test_users_m2, user_num, sleep=True) _modify_user(test_users_m1, user_num, sleep=True) user_num += 1 _create_user(test_users_m2, user_num, sleep=True) _create_user(test_users_m1, user_num) _modify_user(test_users_m1, user_num) log.info("Test add-modrdn on M1 and add on M2") user_num += 1 _create_user(test_users_m1, user_num) _rename_user(test_users_m1, user_num, user_num + 20, sleep=True) _create_user(test_users_m2, user_num, sleep=True) user_num += 1 _create_user(test_users_m1, user_num, sleep=True) _create_user(test_users_m2, user_num, sleep=True) _rename_user(test_users_m1, user_num, user_num + 20, sleep=True) user_num += 1 _create_user(test_users_m2, user_num, sleep=True) _create_user(test_users_m1, user_num) _rename_user(test_users_m1, user_num, user_num + 20) log.info("Test multiple add, modrdn") user_num += 1 _create_user(test_users_m1, user_num, sleep=True) _create_user(test_users_m2, user_num, sleep=True) _rename_user(test_users_m1, user_num, user_num + 20) _create_user(test_users_m1, user_num, sleep=True) _modify_user(test_users_m2, user_num, sleep=True) log.info("Add - del on both masters") user_num += 1 _create_user(test_users_m1, user_num) _delete_user(test_users_m1, user_num, sleep=True) _create_user(test_users_m2, user_num) _delete_user(test_users_m2, user_num, sleep=True) log.info("Test modrdn - modrdn") user_num += 1 _rename_user(test_users_m1, 1109, 1129, sleep=True) _rename_user(test_users_m2, 1109, 1129, sleep=True) log.info("Test modrdn - del") user_num += 1 _rename_user(test_users_m1, 1100, 1120, sleep=True) _delete_user(test_users_m2, 1100) user_num += 1 _delete_user(test_users_m2, 1101, sleep=True) _rename_user(test_users_m1, 1101, 1121) topology_m2.resume_all_replicas() repl.test_replication_topology(topology_m2) time.sleep(30) user_dns_m1 = [user.dn for user in test_users_m1.list()] user_dns_m2 = [user.dn for user in test_users_m2.list()] assert set(user_dns_m1) == set(user_dns_m2)
def test_managed_entries(self, topology_m2): """Check that conflict properly resolved for operations with managed entries :id: 77f09b18-03d1-45da-940b-1ad2c2908eb4 :setup: Two master replication, test container for entries, enable plugin logging, audit log, error log for replica and access log for internal :steps: 1. Create ou=managed_users and ou=managed_groups under test container 2. Configure managed entries plugin and add a template to test container 3. Add a user to m1 and wait for replication to happen 4. Pause replication 5. Create a user on m1 and m2 with a same group ID on both master 6. Create a user on m1 and m2 with a different group ID on both master 7. Resume replication 8. Check that the entries on both masters are the same and replication is working :expectedresults: 1. It should pass 2. It should pass 3. It should pass 4. It should pass 5. It should pass 6. It should pass 7. It should pass 8. It should pass """ pytest.xfail("Issue 49591 - work in progress") M1 = topology_m2.ms["master1"] M2 = topology_m2.ms["master2"] repl = ReplicationManager(SUFFIX) ous = OrganizationalUnits(M1, DEFAULT_SUFFIX) ou_people = ous.create(properties={'ou': 'managed_people'}) ou_groups = ous.create(properties={'ou': 'managed_groups'}) test_users_m1 = UserAccounts(M1, DEFAULT_SUFFIX, rdn='ou={}'.format(ou_people.rdn)) test_users_m2 = UserAccounts(M2, DEFAULT_SUFFIX, rdn='ou={}'.format(ou_people.rdn)) # TODO: Refactor ManagedPlugin class functionality (also add configs and templates) conts = nsContainers(M1, SUFFIX) template = conts.create( properties={ 'objectclass': 'top mepTemplateEntry extensibleObject'.split(), 'cn': 'MEP Template', 'mepRDNAttr': 'cn', 'mepStaticAttr': ['objectclass: posixGroup', 'objectclass: extensibleObject'], 'mepMappedAttr': ['cn: $uid', 'uid: $cn', 'gidNumber: $uidNumber'] }) repl.test_replication(M1, M2) for inst in topology_m2.ms.values(): conts = nsContainers( inst, "cn={},{}".format(PLUGIN_MANAGED_ENTRY, DN_PLUGIN)) conts.create( properties={ 'objectclass': 'top extensibleObject'.split(), 'cn': 'config', 'originScope': ou_people.dn, 'originFilter': 'objectclass=posixAccount', 'managedBase': ou_groups.dn, 'managedTemplate': template.dn }) inst.restart() _create_user(test_users_m1, 1, 1) topology_m2.pause_all_replicas() _create_user(test_users_m1, 2, 2, sleep=True) _create_user(test_users_m2, 2, 2, sleep=True) _create_user(test_users_m1, 3, 3, sleep=True) _create_user(test_users_m2, 3, 33) topology_m2.resume_all_replicas() repl.test_replication_topology(topology_m2) user_dns_m1 = [user.dn for user in test_users_m1.list()] user_dns_m2 = [user.dn for user in test_users_m2.list()] assert set(user_dns_m1) == set(user_dns_m2)
def test_memberof_groups(self, topology_m2, base_m2): """Check that conflict properly resolved for operations with memberOf and groups :id: 77f09b18-03d1-45da-940b-1ad2c2908eb3 :setup: Two master replication, test container for entries, enable plugin logging, audit log, error log for replica and access log for internal :steps: 1. Enable memberOf plugin 2. Add 30 users to m1 and wait for replication to happen 3. Pause replication 4. Create a group on m1 and m2 5. Create a group on m1 and m2, delete from m1 6. Create a group on m1, delete from m1, and create on m2, 7. Create a group on m2 and m1, delete from m1 8. Create two different groups on m2 9. Resume replication 10. Check that the entries on both masters are the same and replication is working :expectedresults: 1. It should pass 2. It should pass 3. It should pass 4. It should pass 5. It should pass 6. It should pass 7. It should pass 8. It should pass 9. It should pass 10. It should pass """ pytest.xfail("Issue 49591 - work in progress") M1 = topology_m2.ms["master1"] M2 = topology_m2.ms["master2"] test_users_m1 = UserAccounts(M1, base_m2.dn, rdn=None) test_groups_m1 = Groups(M1, base_m2.dn, rdn=None) test_groups_m2 = Groups(M2, base_m2.dn, rdn=None) repl = ReplicationManager(SUFFIX) for inst in topology_m2.ms.values(): memberof = MemberOfPlugin(inst) memberof.enable() agmt = Agreements(inst).list()[0] agmt.replace_many(('nsDS5ReplicatedAttributeListTotal', '(objectclass=*) $ EXCLUDE '), ('nsDS5ReplicatedAttributeList', '(objectclass=*) $ EXCLUDE memberOf')) inst.restart() user_dns = [] for user_num in range(10): user_trio = [] for num in range(0, 30, 10): user = _create_user(test_users_m1, 1200 + user_num + num) user_trio.append(user.dn) user_dns.append(user_trio) repl.test_replication(M1, M2) topology_m2.pause_all_replicas() log.info("Check a simple conflict") group_num = 0 _create_group(test_groups_m1, group_num, user_dns[group_num], sleep=True) _create_group(test_groups_m2, group_num, user_dns[group_num], sleep=True) log.info("Check a add - del") group_num += 1 _create_group(test_groups_m1, group_num, user_dns[group_num], sleep=True) _create_group(test_groups_m2, group_num, user_dns[group_num], sleep=True) _delete_group(test_groups_m1, group_num) group_num += 1 _create_group(test_groups_m1, group_num, user_dns[group_num]) _delete_group(test_groups_m1, group_num, sleep=True) _create_group(test_groups_m2, group_num, user_dns[group_num]) group_num += 1 _create_group(test_groups_m2, group_num, user_dns[group_num], sleep=True) _create_group(test_groups_m1, group_num, user_dns[group_num]) _delete_group(test_groups_m1, group_num, sleep=True) group_num += 1 _create_group(test_groups_m2, group_num, user_dns[group_num]) group_num += 1 _create_group(test_groups_m2, group_num, user_dns[group_num]) topology_m2.resume_all_replicas() repl.test_replication_topology(topology_m2) group_dns_m1 = [group.dn for group in test_groups_m1.list()] group_dns_m2 = [group.dn for group in test_groups_m2.list()] assert set(group_dns_m1) == set(group_dns_m2)