def test_ticket50232_normal(topology_st): """ The fix for ticket 50232 The test sequence is: - create suffix - add suffix entry and some child entries - "normally" done after populating suffix: enable replication - get RUV and database generation - export -r - import - get RUV and database generation - assert database generation has not changed """ log.info('Testing Ticket 50232 - export creates not imprtable ldif file, normal creation order') topology_st.standalone.backend.create(NORMAL_SUFFIX, {BACKEND_NAME: NORMAL_BACKEND_NAME}) topology_st.standalone.mappingtree.create(NORMAL_SUFFIX, bename=NORMAL_BACKEND_NAME, parent=None) _populate_suffix(topology_st.standalone, NORMAL_BACKEND_NAME) repl = ReplicationManager(DEFAULT_SUFFIX) repl._ensure_changelog(topology_st.standalone) replicas = Replicas(topology_st.standalone) replicas.create(properties={ 'cn': 'replica', 'nsDS5ReplicaRoot': NORMAL_SUFFIX, 'nsDS5ReplicaId': '1', 'nsDS5Flags': '1', 'nsDS5ReplicaType': '3' }) _test_export_import(topology_st.standalone, NORMAL_SUFFIX, NORMAL_BACKEND_NAME)
def _enable_replica(instance, suffix): repl = ReplicationManager(DEFAULT_SUFFIX) repl._ensure_changelog(instance) replicas = Replicas(instance) replicas.create(properties={ 'cn': 'replica', 'nsDS5ReplicaRoot': suffix, 'nsDS5ReplicaId': '1', 'nsDS5Flags': '1', 'nsDS5ReplicaType': '3' })
def test_ticket48325(topology_m1h1c1): """ Test that the RUV element order is correctly maintained when promoting a hub or consumer. """ # # Promote consumer to master # C1 = topology_m1h1c1.cs["consumer1"] M1 = topology_m1h1c1.ms["master1"] H1 = topology_m1h1c1.hs["hub1"] repl = ReplicationManager(DEFAULT_SUFFIX) repl._ensure_changelog(C1) DN = topology_m1h1c1.cs["consumer1"].replica._get_mt_entry(DEFAULT_SUFFIX) topology_m1h1c1.cs["consumer1"].modify_s(DN, [(ldap.MOD_REPLACE, 'nsDS5ReplicaType', b'3'), (ldap.MOD_REPLACE, 'nsDS5ReplicaID', b'1234'), (ldap.MOD_REPLACE, 'nsDS5Flags', b'1')]) time.sleep(1) # # Check ruv has been reordered # if not checkFirstElement(topology_m1h1c1.cs["consumer1"], '1234'): log.fatal('RUV was not reordered') assert False topology_m1h1c1.ms["master1"].add_s(Entry((defaultProperties[REPLICATION_BIND_DN], {'objectclass': 'top netscapeServer'.split(), 'cn': 'replication manager', 'userPassword': '******'}))) DN = topology_m1h1c1.ms["master1"].replica._get_mt_entry(DEFAULT_SUFFIX) topology_m1h1c1.ms["master1"].modify_s(DN, [(ldap.MOD_REPLACE, 'nsDS5ReplicaBindDN', ensure_bytes(defaultProperties[REPLICATION_BIND_DN]))]) # # Create repl agreement from the newly promoted master to master1 properties = {RA_NAME: 'meTo_{}:{}'.format(topology_m1h1c1.ms["master1"].host, str(topology_m1h1c1.ms["master1"].port)), RA_BINDDN: defaultProperties[REPLICATION_BIND_DN], RA_BINDPW: defaultProperties[REPLICATION_BIND_PW], RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD], RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]} new_agmt = topology_m1h1c1.cs["consumer1"].agreement.create(suffix=SUFFIX, host=topology_m1h1c1.ms["master1"].host, port=topology_m1h1c1.ms["master1"].port, properties=properties) if not new_agmt: log.fatal("Fail to create new agmt from old consumer to the master") assert False # Test replication is working repl.test_replication(C1, M1) # # Promote hub to master # DN = topology_m1h1c1.hs["hub1"].replica._get_mt_entry(DEFAULT_SUFFIX) topology_m1h1c1.hs["hub1"].modify_s(DN, [(ldap.MOD_REPLACE, 'nsDS5ReplicaType', b'3'), (ldap.MOD_REPLACE, 'nsDS5ReplicaID', b'5678')]) time.sleep(1) # # Check ruv has been reordered # if not checkFirstElement(topology_m1h1c1.hs["hub1"], '5678'): log.fatal('RUV was not reordered') assert False # Test replication is working repl.test_replication(M1, H1) # Done log.info('Test complete')