Esempio n. 1
0
def topology_m1h1c1(request):
    """Create Replication Deployment with one master, one consumer and one hub"""

    topo_roles = {ReplicaRole.MASTER: 1, ReplicaRole.HUB: 1, ReplicaRole.CONSUMER: 1}
    topology = _create_instances(topo_roles, DEFAULT_SUFFIX)
    master = topology.ms["master1"]
    hub = topology.hs["hub1"]
    consumer = topology.cs["consumer1"]

    # Start with the master, and create it "first".
    log.info("Creating replication topology.")
    # Now get the first master ready.
    repl = ReplicationManager(DEFAULT_SUFFIX)
    repl.create_first_master(master)
    # Finish the topology creation
    repl.join_hub(master, hub)
    repl.join_consumer(hub, consumer)

    repl.test_replication(master, consumer)

    # Clear out the tmp dir
    for instance in topology:
        instance.clearTmpDir(__file__)

    def fin():
        if DEBUGGING:
            [inst.stop() for inst in topology]
        else:
            assert _remove_ssca_db(topology)
            [inst.delete(pyinstall=PYINSTALL) for inst in topology if inst.exists()]
    request.addfinalizer(fin)

    topology.logcap = LogCapture()
    return topology
Esempio n. 2
0
def test_new_suffix(topo_m4, new_suffix):
    """Check that we can enable replication on a new suffix

    :id: d44a9ed4-26b0-4189-b0d0-b2b336ddccbd
    :setup: Four masters replication setup, a new suffix
    :steps:
        1. Enable replication on the new suffix
        2. Check if replication works
        3. Disable replication on the new suffix
    :expectedresults:
        1. Replication on the new suffix should be enabled
        2. Replication should work
        3. Replication on the new suffix should be disabled
    """
    m1 = topo_m4.ms["master1"]
    m2 = topo_m4.ms["master2"]

    repl = ReplicationManager(NEW_SUFFIX)

    repl.create_first_master(m1)

    repl.join_master(m1, m2)

    repl.test_replication(m1, m2)
    repl.test_replication(m2, m1)

    repl.remove_master(m1)
    repl.remove_master(m2)
Esempio n. 3
0
def test_lastupdate_attr_before_init(topo_nr):
    """Check that LastUpdate replica attributes show right values

    :id: bc8ce431-ff65-41f5-9331-605cbcaaa887
    :setup: Replication setup with master and consumer instances
            without initialization
    :steps:
        1. Check nsds5replicaLastUpdateStart value
        2. Check nsds5replicaLastUpdateEnd value
        3. Check nsds5replicaLastUpdateStatus value
        4. Check nsds5replicaLastUpdateStatusJSON is parsable
    :expectedresults:
        1. nsds5replicaLastUpdateStart should be equal to 0
        2. nsds5replicaLastUpdateEnd should be equal to 0
        3. nsds5replicaLastUpdateStatus should not be equal
           to "Replica acquired successfully: Incremental update started"
        4. Success
    """

    master = topo_nr.ins["standalone1"]
    consumer = topo_nr.ins["standalone2"]

    repl = ReplicationManager(DEFAULT_SUFFIX)
    repl.create_first_master(master)

    # Manually create an un-synced consumer.

    consumer_replicas = Replicas(consumer)
    consumer_replicas.create(
        properties={
            'cn': 'replica',
            'nsDS5ReplicaRoot': DEFAULT_SUFFIX,
            'nsDS5ReplicaId': '65535',
            'nsDS5Flags': '0',
            'nsDS5ReplicaType': '2',
        })

    agmt = repl.ensure_agreement(master, consumer)
    with pytest.raises(Exception):
        repl.wait_for_replication(master, consumer, timeout=5)

    assert agmt.get_attr_val_utf8(
        'nsds5replicaLastUpdateStart') == "19700101000000Z"
    assert agmt.get_attr_val_utf8(
        "nsds5replicaLastUpdateEnd") == "19700101000000Z"
    assert "replica acquired successfully" not in agmt.get_attr_val_utf8_l(
        "nsds5replicaLastUpdateStatus")

    # make sure the JSON attribute is parsable
    json_status = agmt.get_attr_val_utf8("nsds5replicaLastUpdateStatusJSON")
    if json_status is not None:
        json_obj = json.loads(json_status)
        log.debug("JSON status message: {}".format(json_obj))
Esempio n. 4
0
def test_special_symbol_replica_agreement(topo_i2):
    """ Check if agreement starts with "cn=->..." then
    after upgrade does it get removed.
    
    :id: 68aa0072-4dd4-4e33-b107-cb383a439125
    :setup: two standalone instance
    :steps:
        1. Create and Enable Replication on standalone2 and role as consumer
        2. Create and Enable Replication on standalone1 and role as master
        3. Create a Replication agreement starts with "cn=->..."
        4. Perform an upgrade operation over the master
        5. Check if the agreement is still present or not.
    :expectedresults:
        1. It should be successful
        2. It should be successful
        3. It should be successful
        4. It should be successful
        5. It should be successful
    """

    master = topo_i2.ins["standalone1"]
    consumer = topo_i2.ins["standalone2"]
    consumer.replica.enableReplication(suffix=DEFAULT_SUFFIX,
                                       role=ReplicaRole.CONSUMER,
                                       replicaId=CONSUMER_REPLICAID)
    repl = ReplicationManager(DEFAULT_SUFFIX)
    repl.create_first_master(master)

    properties = {
        RA_NAME: '-\\3meTo_{}:{}'.format(consumer.host, str(consumer.port)),
        RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
        RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
        RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
        RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]
    }

    master.agreement.create(suffix=SUFFIX,
                            host=consumer.host,
                            port=consumer.port,
                            properties=properties)

    master.agreement.init(SUFFIX, consumer.host, consumer.port)

    replica_server = Replicas(master).get(DEFAULT_SUFFIX)

    master.upgrade('online')

    agmt = replica_server.get_agreements().list()[0]

    assert agmt.get_attr_val_utf8('cn') == '-\\3meTo_{}:{}'.format(
        consumer.host, str(consumer.port))
Esempio n. 5
0
def test_lastupdate_attr_before_init(topo_nr):
    """Check that LastUpdate replica attributes show right values

    :id: bc8ce431-ff65-41f5-9331-605cbcaaa887
    :setup: Replication setup with master and consumer instances
            without initialization
    :steps:
        1. Check nsds5replicaLastUpdateStart value
        2. Check nsds5replicaLastUpdateEnd value
        3. Check nsds5replicaLastUpdateStatus value
    :expectedresults:
        1. nsds5replicaLastUpdateStart should be equal to 0
        2. nsds5replicaLastUpdateEnd should be equal to 0
        3. nsds5replicaLastUpdateStatus should not be equal
           to "0 Replica acquired successfully: Incremental update started"
    """

    master = topo_nr.ins["standalone1"]
    consumer = topo_nr.ins["standalone2"]

    repl = ReplicationManager(DEFAULT_SUFFIX)
    repl.create_first_master(master)

    # Manually create an un-synced consumer.

    consumer_replicas = Replicas(consumer)
    consumer_replicas.create(
        properties={
            'cn': 'replica',
            'nsDS5ReplicaRoot': DEFAULT_SUFFIX,
            'nsDS5ReplicaId': '65535',
            'nsDS5Flags': '0',
            'nsDS5ReplicaType': '2',
        })

    agmt = repl.ensure_agreement(master, consumer)
    with pytest.raises(Exception):
        repl.wait_for_replication(master, consumer, timeout=5)

    assert agmt.get_attr_val_bytes(
        'nsds5replicaLastUpdateStart') == b"19700101000000Z"
    assert agmt.get_attr_val_bytes(
        "nsds5replicaLastUpdateEnd") == b"19700101000000Z"
    assert b"Replica acquired successfully" not in agmt.get_attr_val_bytes(
        "nsds5replicaLastUpdateStatus")
def test_multiple_changelogs(topo):
    """Test the multiple suffixes can be replicated with the new per backend
    changelog.

    :id: eafcdb57-4ea2-4887-a0a8-9e4d295f4f4d
    :setup: Master Instance, Consumer Instance
    :steps:
        1. Create s second suffix
        2. Enable replication for second backend
        3. Perform some updates on both backends and make sure replication is
           working for both backends

    :expectedresults:
        1. Success
        2. Success
        3. Success
    """
    supplier = topo.ms['master1']
    consumer = topo.cs['consumer1']

    # Create second suffix dc=second_backend on both replicas
    for inst in [supplier, consumer]:
        # Create the backends
        props = {'cn': 'secondRoot', 'nsslapd-suffix': SECOND_SUFFIX}
        be = Backend(inst)
        be.create(properties=props)
        be.create_sample_entries('001004002')

    # Setup replication for second suffix
    repl = ReplicationManager(SECOND_SUFFIX)
    repl.create_first_master(supplier)
    repl.join_consumer(supplier, consumer)

    # Test replication works for each backend
    for suffix in [DEFAULT_SUFFIX, SECOND_SUFFIX]:
        replicas = Replicas(supplier)
        replica = replicas.get(suffix)
        log.info("Testing replication for: " + suffix)
        assert replica.test_replication([consumer])
Esempio n. 7
0
def replicate_backend(s1, s2, beSuffix):
    repl = ReplicationManager(beSuffix)
    repl.create_first_master(s1)
    repl.join_master(s1, s2)
    repl.ensure_agreement(s1, s2)
    repl.ensure_agreement(s2, s2)
Esempio n. 8
0
def create_topology(topo_dict, suffix=DEFAULT_SUFFIX):
    """Create a requested topology. Cascading replication scenario isn't supported

    :param topo_dict: a dictionary {ReplicaRole.STANDALONE: num, ReplicaRole.MASTER: num,
                                   ReplicaRole.CONSUMER: num}
    :type topo_dict: dict
    :param suffix: a suffix for the replication
    :type suffix: str

    :return - TopologyMain object
    """

    if not topo_dict:
        ValueError("You need to specify the dict. For instance: {ReplicaRole.STANDALONE: 1}")

    if ReplicaRole.HUB in topo_dict.keys():
        NotImplementedError("Cascading replication scenario isn't supported."
                            "Please, use existing topology or create your own.")

    topo = _create_instances(topo_dict, suffix)

    # Start with a single master, and create it "first".
    first_master = None
    try:
        first_master = list(topo.ms.values())[0]
        log.info("Creating replication topology.")
        # Now get the first master ready.
        repl = ReplicationManager(DEFAULT_SUFFIX)
        repl.create_first_master(first_master)
    except IndexError:
        pass

    # Now init the other masters from this.
    # This will reinit m, and put a bi-directional agreement
    # in place.
    for m in topo.ms.values():
        # Skip firstmaster.
        if m is first_master:
            continue
        log.info("Joining master %s to %s ..." % (m.serverid, first_master.serverid))
        repl.join_master(first_master, m)

    # Mesh the master agreements.
    for mo in topo.ms.values():
        for mi in topo.ms.values():
            if mo is mi:
                continue
            log.info("Ensuring master %s to %s ..." % (mo.serverid, mi.serverid))
            repl.ensure_agreement(mo, mi)

    # Add master -> consumer agreements.
    for c in topo.cs.values():
        log.info("Joining consumer %s from %s ..." % (c.serverid, first_master.serverid))
        repl.join_consumer(first_master, c)

    for m in topo.ms.values():
        for c in topo.cs.values():
            log.info("Ensuring consumer %s from %s ..." % (c.serverid, m.serverid))
            repl.ensure_agreement(m, c)

    # Clear out the tmp dir
    for instance in topo:
        instance.clearTmpDir(__file__)

    return topo
Esempio n. 9
0
def test_ticket47819(topology_st):
    """
from lib389.utils import *

# Skip on older versions
pytestmark = pytest.mark.skipif(ds_is_older('1.3.4'), reason="Not implemented")
        Testing precise tombstone purging:
            [1]  Make sure "nsTombstoneCSN" is added to new tombstones
            [2]  Make sure an import of a replication ldif adds "nsTombstoneCSN"
                 to old tombstones
            [4]  Test fixup task
            [3]  Make sure tombstone purging works
    """

    log.info('Testing Ticket 47819 - Test precise tombstone purging')

    #
    # Setup Replication
    #
    master = topology_st.standalone
    repl = ReplicationManager(DEFAULT_SUFFIX)
    repl.create_first_master(master)
    repl.ensure_agreement(master, master)

    #
    # Part 1 create a tombstone entry and make sure nsTombstoneCSN is added
    #
    log.info('Part 1:  Add and then delete an entry to create a tombstone...')

    try:
        topology_st.standalone.add_s(
            Entry(('cn=entry1,dc=example,dc=com', {
                'objectclass': 'top person'.split(),
                'sn': 'user',
                'cn': 'entry1'
            })))
    except ldap.LDAPError as e:
        log.error('Failed to add entry: ' + e.message['desc'])
        assert False

    try:
        topology_st.standalone.delete_s('cn=entry1,dc=example,dc=com')
    except ldap.LDAPError as e:
        log.error('Failed to delete entry: ' + e.message['desc'])
        assert False

    log.info('Search for tombstone entries...')
    try:
        entries = topology_st.standalone.search_s(
            DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE,
            '(&(nsTombstoneCSN=*)(objectclass=nsTombstone))')
        if not entries:
            log.fatal(
                'Search failed to the new tombstone(nsTombstoneCSN is probably missing).'
            )
            assert False
    except ldap.LDAPError as e:
        log.fatal('Search failed: ' + e.message['desc'])
        assert False

    log.info('Part 1 - passed')

    #
    # Part 2 - import ldif with tombstones missing 'nsTombstoneCSN'
    #
    # First, export the replication ldif, edit the file(remove nstombstonecsn),
    # and reimport it.
    #
    log.info('Part 2:  Exporting replication ldif...')

    # Get the the full path and name for our LDIF we will be exporting
    ldif_file = "/tmp/export.ldif"

    args = {EXPORT_REPL_INFO: True, TASK_WAIT: True}
    exportTask = Tasks(topology_st.standalone)
    try:
        exportTask.exportLDIF(DEFAULT_SUFFIX, None, ldif_file, args)
    except ValueError:
        assert False
    time.sleep(1)

    # open the ldif file, get the lines, then rewrite the file
    ldif = open(ldif_file, "r")
    lines = ldif.readlines()
    ldif.close()
    time.sleep(1)

    ldif = open(ldif_file, "w")
    for line in lines:
        if not line.lower().startswith('nstombstonecsn'):
            ldif.write(line)
    ldif.close()
    time.sleep(1)

    # import the new ldif file
    log.info('Import replication LDIF file...')
    importTask = Tasks(topology_st.standalone)
    args = {TASK_WAIT: True}
    try:
        importTask.importLDIF(DEFAULT_SUFFIX, None, ldif_file, args)
        os.remove(ldif_file)
    except ValueError:
        os.remove(ldif_file)
        assert False
    time.sleep(1)

    # Search for the tombstone again
    log.info('Search for tombstone entries...')
    try:
        entries = topology_st.standalone.search_s(
            DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE,
            '(&(nsTombstoneCSN=*)(objectclass=nsTombstone))')
        if not entries:
            log.fatal(
                'Search failed to fine the new tombstone(nsTombstoneCSN is probably missing).'
            )
            assert False
    except ldap.LDAPError as e:
        log.fatal('Search failed: ' + e.message['desc'])
        assert False

    log.info('Part 2 - passed')

    #
    # Part 3 - test fixup task
    #
    log.info('Part 3:  test the fixup task')

    # Run fixup task using the strip option.  This removes nsTombstoneCSN
    # so we can test if the fixup task works.
    args = {TASK_WAIT: True, TASK_TOMB_STRIP: True}
    fixupTombTask = Tasks(topology_st.standalone)
    try:
        fixupTombTask.fixupTombstones(DEFAULT_BENAME, args)
    except:
        assert False
    time.sleep(1)

    # Search for tombstones with nsTombstoneCSN - better not find any
    log.info('Search for tombstone entries...')
    try:
        entries = topology_st.standalone.search_s(
            DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE,
            '(&(nsTombstoneCSN=*)(objectclass=nsTombstone))')
        if entries:
            log.fatal('Search found tombstones with nsTombstoneCSN')
            assert False
    except ldap.LDAPError as e:
        log.fatal('Search failed: ' + e.message['desc'])
        assert False

    # Now run the fixup task
    args = {TASK_WAIT: True}
    fixupTombTask = Tasks(topology_st.standalone)
    try:
        fixupTombTask.fixupTombstones(DEFAULT_BENAME, args)
    except:
        assert False
    time.sleep(1)

    # Search for tombstones with nsTombstoneCSN - better find some
    log.info('Search for tombstone entries...')
    try:
        entries = topology_st.standalone.search_s(
            DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE,
            '(&(nsTombstoneCSN=*)(objectclass=nsTombstone))')
        if not entries:
            log.fatal('Search did not find any fixed-up tombstones')
            assert False
    except ldap.LDAPError as e:
        log.fatal('Search failed: ' + e.message['desc'])
        assert False

    log.info('Part 3 - passed')

    #
    # Part 4 - Test tombstone purging
    #
    log.info('Part 4:  test tombstone purging...')

    args = {
        REPLICA_PRECISE_PURGING: b'on',
        REPLICA_PURGE_DELAY: b'5',
        REPLICA_PURGE_INTERVAL: b'5'
    }
    try:
        topology_st.standalone.replica.setProperties(DEFAULT_SUFFIX, None,
                                                     None, args)
    except:
        log.fatal('Failed to configure replica')
        assert False

    # Wait for the interval to pass
    log.info('Wait for tombstone purge interval to pass...')
    time.sleep(10)

    # Add an entry to trigger replication
    log.info('Perform an update to help trigger tombstone purging...')
    try:
        topology_st.standalone.add_s(
            Entry(('cn=test_entry,dc=example,dc=com', {
                'objectclass': 'top person'.split(),
                'sn': 'user',
                'cn': 'entry1'
            })))
    except ldap.LDAPError as e:
        log.error('Failed to add entry: ' + e.message['desc'])
        assert False

    # Wait for the interval to pass again
    log.info('Wait for tombstone purge interval to pass again...')
    time.sleep(10)

    # search for tombstones, there should be none
    log.info('Search for tombstone entries...')
    try:
        entries = topology_st.standalone.search_s(
            DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE,
            '(&(nsTombstoneCSN=*)(objectclass=nsTombstone))')
        if entries:
            log.fatal('Search unexpectedly found tombstones')
            assert False
    except ldap.LDAPError as e:
        log.fatal('Search failed: ' + e.message['desc'])
        assert False

    log.info('Part 4 - passed')
Esempio n. 10
0
def test_ticket47781(topology_st):
    """
        Testing for a deadlock after doing an online import of an LDIF with
        replication data.  The replication agreement should be invalid.
    """

    log.info(
        'Testing Ticket 47781 - Testing for deadlock after importing LDIF with replication data'
    )

    master = topology_st.standalone
    repl = ReplicationManager(DEFAULT_SUFFIX)
    repl.create_first_master(master)

    properties = {
        RA_NAME: r'meTo_$host:$port',
        RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
        RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
        RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
        RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]
    }
    # The agreement should point to a server that does NOT exist (invalid port)
    repl_agreement = master.agreement.create(suffix=DEFAULT_SUFFIX,
                                             host=master.host,
                                             port=5555,
                                             properties=properties)

    #
    # add two entries
    #
    log.info('Adding two entries...')

    master.add_s(
        Entry(('cn=entry1,dc=example,dc=com', {
            'objectclass': 'top person'.split(),
            'sn': 'user',
            'cn': 'entry1'
        })))

    master.add_s(
        Entry(('cn=entry2,dc=example,dc=com', {
            'objectclass': 'top person'.split(),
            'sn': 'user',
            'cn': 'entry2'
        })))

    #
    # export the replication ldif
    #
    log.info('Exporting replication ldif...')
    args = {EXPORT_REPL_INFO: True}
    exportTask = Tasks(master)
    exportTask.exportLDIF(DEFAULT_SUFFIX, None, "/tmp/export.ldif", args)

    #
    # Restart the server
    #
    log.info('Restarting server...')
    master.stop()
    master.start()

    #
    # Import the ldif
    #
    log.info('Import replication LDIF file...')
    importTask = Tasks(master)
    args = {TASK_WAIT: True}
    importTask.importLDIF(DEFAULT_SUFFIX, None, "/tmp/export.ldif", args)
    os.remove("/tmp/export.ldif")

    #
    # Search for tombstones - we should not hang/timeout
    #
    log.info('Search for tombstone entries(should find one and not hang)...')
    master.set_option(ldap.OPT_NETWORK_TIMEOUT, 5)
    master.set_option(ldap.OPT_TIMEOUT, 5)
    entries = master.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE,
                              'objectclass=nsTombstone')
    if not entries:
        log.fatal('Search failed to find any entries.')
        assert PR_False
Esempio n. 11
0
def test_ticket48759(topology_st):
    """
    The fix for ticket 48759 has to prevent plugin calls for tombstone purging

    The test uses the memberof and retrocl plugins to verify this.
    In tombstone purging without the fix the mmeberof plugin is called,
        if the tombstone entry is a group,
        it  modifies the user entries for the group
        and if retrocl is enabled this mod is written to the retrocl

    The test sequence is:
    - enable replication
    - enable memberof and retro cl plugin
    - add user entries
    - add a group and add the users as members
    - verify memberof is set to users
    - delete the group
    - verify memberof is removed from users
    - add group again
    - verify memberof is set to users
    - get number of changes in retro cl for one user
    - configure tombstone purging
    - wait for purge interval to pass
    - add a dummy entry to increase maxcsn
    - wait for purge interval to pass two times
    - get number of changes in retro cl for user again
    - assert there was no additional change
    """

    log.info('Testing Ticket 48759 - no plugin calls for tombstone purging')

    #
    # Setup Replication
    #
    log.info('Setting up replication...')
    repl = ReplicationManager(DEFAULT_SUFFIX)
    repl.create_first_master(topology_st.standalone)
    #
    # enable dynamic plugins, memberof and retro cl plugin
    #
    log.info('Enable plugins...')
    try:
        topology_st.standalone.config.set('nsslapd-dynamic-plugins', 'on')
    except ldap.LDAPError as e:
        ldap.error('Failed to enable dynamic plugins! ' + e.args[0]['desc'])
        assert False

    topology_st.standalone.plugins.enable(name=PLUGIN_MEMBER_OF)
    topology_st.standalone.plugins.enable(name=PLUGIN_RETRO_CHANGELOG)
    # Configure memberOf group attribute
    try:
        topology_st.standalone.modify_s(MEMBEROF_PLUGIN_DN,
                                        [(ldap.MOD_REPLACE,
                                          'memberofgroupattr',
                                          b'member')])
    except ldap.LDAPError as e:
        log.fatal('Failed to configure memberOf plugin: error ' + e.args[0]['desc'])
        assert False

    #
    #  create some users and a group
    #
    log.info('create users and group...')
    for idx in range(1, 5):
        try:
            USER_DN = ("uid=member%d,%s" % (idx, DEFAULT_SUFFIX))
            topology_st.standalone.add_s(Entry((USER_DN,
                                                {'objectclass': 'top extensibleObject'.split(),
                                                 'uid': 'member%d' % (idx)})))
        except ldap.LDAPError as e:
            log.fatal('Failed to add user (%s): error %s' % (USER_DN, e.args[0]['desc']))
            assert False

    _add_group_with_members(topology_st)

    MEMBER_VAL = ("uid=member2,%s" % DEFAULT_SUFFIX)
    time.sleep(1)
    _find_memberof(topology_st, MEMBER_VAL, GROUP_DN, True)

    # delete group
    log.info('delete group...')
    try:
        topology_st.standalone.delete_s(GROUP_DN)
    except ldap.LDAPError as e:
        log.error('Failed to delete entry: ' + e.args[0]['desc'])
        assert False

    time.sleep(1)
    _find_memberof(topology_st, MEMBER_VAL, GROUP_DN, False)

    # add group again
    log.info('add group again')
    _add_group_with_members(topology_st)
    time.sleep(1)
    _find_memberof(topology_st, MEMBER_VAL, GROUP_DN, True)

    #
    # get number of changelog records for one user entry
    log.info('get number of changes for %s before tombstone purging' % MEMBER_VAL)
    changes_pre = _find_retrocl_changes(topology_st, MEMBER_VAL)

    # configure tombstone purging
    args = {REPLICA_PRECISE_PURGING: 'on',
             REPLICA_PURGE_DELAY: '5',
             REPLICA_PURGE_INTERVAL: '5'}
    try:
        Repl_DN = 'cn=replica,cn=dc\\3Dexample\\2Cdc\\3Dcom,cn=mapping tree,cn=config'
        topology_st.standalone.modify_s(Repl_DN,
                                        [(ldap.MOD_ADD, 'nsDS5ReplicaPreciseTombstonePurging', b'on'),
                                         (ldap.MOD_ADD, 'nsDS5ReplicaPurgeDelay', b'5'),
                                         (ldap.MOD_ADD, 'nsDS5ReplicaTombstonePurgeInterval', b'5')])
    except:
        log.fatal('Failed to configure replica')
        assert False

    # Wait for the interval to pass
    log.info('Wait for tombstone purge interval to pass ...')
    time.sleep(6)

    # Add an entry to trigger replication
    log.info('add dummy entry')
    try:
        topology_st.standalone.add_s(Entry(('cn=test_entry,dc=example,dc=com', {
            'objectclass': 'top person'.split(),
            'sn': 'user',
            'cn': 'entry1'})))
    except ldap.LDAPError as e:
        log.error('Failed to add entry: ' + e.args[0]['desc'])
        assert False

    # check memberof is still correct
    time.sleep(1)
    _find_memberof(topology_st, MEMBER_VAL, GROUP_DN, True)

    # Wait for the interval to pass again
    log.info('Wait for tombstone purge interval to pass again...')
    time.sleep(10)

    #
    # get number of changelog records for one user entry
    log.info('get number of changes for %s before tombstone purging' % MEMBER_VAL)
    changes_post = _find_retrocl_changes(topology_st, MEMBER_VAL)

    assert (changes_pre == changes_post)
def test_multiple_changelogs_export_import(topo):
    """Test that we can export and import the replication changelog

    :id: b74fcaaf-a13f-4ee0-98f9-248b281f8700
    :setup: Master Instance, Consumer Instance
    :steps:
        1. Create s second suffix
        2. Enable replication for second backend
        3. Perform some updates on a backend, and export the changelog
        4. Do an export and import while the server is idle
        5. Do an import while the server is under load

    :expectedresults:
        1. Success
        2. Success
        3. Success
        4. Success
        5. Success
    """
    SECOND_SUFFIX = 'dc=second_suffix'
    supplier = topo.ms['master1']
    consumer = topo.cs['consumer1']
    supplier.config.set('nsslapd-errorlog-level', '0')
    # Create second suffix dc=second_backend on both replicas
    for inst in [supplier, consumer]:
        # Create the backends
        props = {'cn': 'secondRoot', 'nsslapd-suffix': SECOND_SUFFIX}
        be = Backend(inst)
        try:
            be.create(properties=props)
            be.create_sample_entries('001004002')
        except ldap.UNWILLING_TO_PERFORM:
            pass

    # Setup replication for second suffix
    try:
        repl = ReplicationManager(SECOND_SUFFIX)
        repl.create_first_master(supplier)
        repl.join_consumer(supplier, consumer)
    except ldap.ALREADY_EXISTS:
        pass

    # Put the replica under load, and export the changelog
    replicas = Replicas(supplier)
    replica = replicas.get(DEFAULT_SUFFIX)
    doMods1 = DoMods(supplier, task="export")
    doMods1.start()
    replica.begin_task_cl2ldif()
    doMods1.join()
    replica.task_finished()

    # allow some time to pass, and test replication
    time.sleep(1)
    assert replica.test_replication([consumer])

    # While idle, go an export and import, and make sure replication still works
    log.info("Testing idle server with CL export and import...")
    replica.begin_task_cl2ldif()
    replica.task_finished()
    replica.begin_task_ldif2cl()
    replica.task_finished()
    assert replica.test_replication([consumer])

    # stability test, put the replica under load, import the changelog, and make
    # sure server did not crash.
    log.info("Testing busy server with CL import...")
    doMods2 = DoMods(supplier, task="import")
    doMods2.start()
    replica.begin_task_ldif2cl()
    doMods2.join()
    replica.task_finished()
    # Replication will be broken so no need to test it.  This is just make sure
    # the import works, and the server is stable
    assert supplier.status()
    assert consumer.status()