Beispiel #1
0
def test_48294_init(topology):
    """
    Set up Linked Attribute
    """
    _header(topology, 'Testing Ticket 48294 - Linked Attributes plug-in - won\'t update links after MODRDN operation')

    log.info('Enable Dynamic plugins, and the linked Attrs plugin')
    try:
        topology.standalone.modify_s(DN_CONFIG, [(ldap.MOD_REPLACE, 'nsslapd-dynamic-plugins', 'on')])
    except ldap.LDAPError as e:
        ldap.fatal('Failed to enable dynamic plugin!' + e.message['desc'])
        assert False

    try:
        topology.standalone.plugins.enable(name=PLUGIN_LINKED_ATTRS)
    except ValueError as e:
        ldap.fatal('Failed to enable linked attributes plugin!' + e.message['desc'])
        assert False

    log.info('Add the plugin config entry')
    try:
        topology.standalone.add_s(Entry((MANAGER_LINK, {
                          'objectclass': 'top extensibleObject'.split(),
                          'cn': 'Manager Link',
                          'linkType': LINKTYPE,
                          'managedType': MANAGEDTYPE
                          })))
    except ldap.LDAPError as e:
        log.fatal('Failed to add linked attr config entry: error ' + e.message['desc'])
        assert False

    log.info('Add 2 entries: manager1 and employee1')
    try:
        topology.standalone.add_s(Entry(('uid=manager1,%s' % OU_PEOPLE, {
                          'objectclass': 'top extensibleObject'.split(),
                          'uid': 'manager1'})))
    except ldap.LDAPError as e:
        log.fatal('Add manager1 failed: error ' + e.message['desc'])
        assert False

    try:
        topology.standalone.add_s(Entry(('uid=employee1,%s' % OU_PEOPLE, {
                          'objectclass': 'top extensibleObject'.split(),
                          'uid': 'employee1'})))
    except ldap.LDAPError as e:
        log.fatal('Add employee1 failed: error ' + e.message['desc'])
        assert False

    log.info('Add linktype to manager1')
    topology.standalone.modify_s('uid=manager1,%s' % OU_PEOPLE,
                                 [(ldap.MOD_ADD, LINKTYPE, 'uid=employee1,%s' % OU_PEOPLE)])

    log.info('Check managed attribute')
    check_attr_val(topology, 'uid=employee1,%s' % OU_PEOPLE, MANAGEDTYPE, 'uid=manager1,%s' % OU_PEOPLE)

    log.info('PASSED')
Beispiel #2
0
def test_attr_uniqueness_init(topology):
    '''
    Enable dynamic plugins - makes things easier
    '''
    try:
        topology.standalone.modify_s(DN_CONFIG, [(ldap.MOD_REPLACE, 'nsslapd-dynamic-plugins', 'on')])
    except ldap.LDAPError as e:
        ldap.fatal('Failed to enable dynamic plugin!' + e.message['desc'])
        assert False

    topology.standalone.plugins.enable(name=PLUGIN_ATTR_UNIQUENESS)
Beispiel #3
0
def test_ticket47640(topology):
    '''
    Linked Attrs Plugins - verify that if the plugin fails to update the link entry
    that the entire operation is aborted
    '''

    # Enable Dynamic plugins, and the linked Attrs plugin
    try:
        topology.standalone.modify_s(DN_CONFIG, [(ldap.MOD_REPLACE, 'nsslapd-dynamic-plugins', 'on')])
    except ldap.LDAPError as e:
        ldap.fatal('Failed to enable dynamic plugin!' + e.message['desc'])
        assert False

    try:
        topology.standalone.plugins.enable(name=PLUGIN_LINKED_ATTRS)
    except ValueError as e:
        ldap.fatal('Failed to enable linked attributes plugin!' + e.message['desc'])
        assert False

    # Add the plugin config entry
    try:
        topology.standalone.add_s(Entry(('cn=manager link,cn=Linked Attributes,cn=plugins,cn=config', {
                          'objectclass': 'top extensibleObject'.split(),
                          'cn': 'Manager Link',
                          'linkType': 'seeAlso',
                          'managedType': 'seeAlso'
                          })))
    except ldap.LDAPError as e:
        log.fatal('Failed to add linked attr config entry: error ' + e.message['desc'])
        assert False

    # Add an entry who has a link to an entry that does not exist
    OP_REJECTED = False
    try:
        topology.standalone.add_s(Entry(('uid=manager,' + DEFAULT_SUFFIX, {
                          'objectclass': 'top extensibleObject'.split(),
                          'uid': 'manager',
                          'seeAlso': 'uid=user,dc=example,dc=com'
                          })))
    except ldap.UNWILLING_TO_PERFORM:
        # Success
        log.info('Add operation correctly rejected.')
        OP_REJECTED = True
    except ldap.LDAPError as e:
        log.fatal('Add operation incorrectly rejected: error %s - ' +
                  'expected "unwilling to perform"' % e.message['desc'])
        assert False
    if not OP_REJECTED:
        log.fatal('Add operation incorrectly allowed')
        assert False

    log.info('Test complete')
Beispiel #4
0
def test_48295_init(topology):
    """
    Set up Linked Attribute
    """
    _header(topology, 'Testing Ticket 48295 - Entry cache is not rolled back -- Linked Attributes plug-in - wrong behaviour when adding valid and broken links')

    log.info('Enable Dynamic plugins, and the linked Attrs plugin')
    try:
        topology.standalone.modify_s(DN_CONFIG, [(ldap.MOD_REPLACE, 'nsslapd-dynamic-plugins', 'on')])
    except ldap.LDAPError as e:
        ldap.fatal('Failed to enable dynamic plugin!' + e.message['desc'])
        assert False

    try:
        topology.standalone.plugins.enable(name=PLUGIN_LINKED_ATTRS)
    except ValueError as e:
        ldap.fatal('Failed to enable linked attributes plugin!' + e.message['desc'])
        assert False

    log.info('Add the plugin config entry')
    try:
        topology.standalone.add_s(Entry((MANAGER_LINK, {
                          'objectclass': 'top extensibleObject'.split(),
                          'cn': 'Manager Link',
                          'linkType': LINKTYPE,
                          'managedType': MANAGEDTYPE
                          })))
    except ldap.LDAPError as e:
        log.fatal('Failed to add linked attr config entry: error ' + e.message['desc'])
        assert False

    log.info('Add 2 entries: manager1 and employee1')
    try:
        topology.standalone.add_s(Entry(('uid=manager1,%s' % OU_PEOPLE, {
                          'objectclass': 'top extensibleObject'.split(),
                          'uid': 'manager1'})))
    except ldap.LDAPError as e:
        log.fatal('Add manager1 failed: error ' + e.message['desc'])
        assert False

    try:
        topology.standalone.add_s(Entry(('uid=employee1,%s' % OU_PEOPLE, {
                          'objectclass': 'top extensibleObject'.split(),
                          'uid': 'employee1'})))
    except ldap.LDAPError as e:
        log.fatal('Add employee1 failed: error ' + e.message['desc'])
        assert False

    log.info('PASSED')
Beispiel #5
0
def test_ticket48312(topology):
    """
    Configure managed entries plugins(tempalte/definition), then perform a
    modrdn(deleteoldrdn 1), and make sure the server does not crash.
    """

    GROUP_OU = 'ou=groups,' + DEFAULT_SUFFIX
    PEOPLE_OU = 'ou=people,' + DEFAULT_SUFFIX
    USER_DN = 'uid=user1,ou=people,' + DEFAULT_SUFFIX
    CONFIG_DN = 'cn=config,cn=' + PLUGIN_MANAGED_ENTRY + ',cn=plugins,cn=config'
    TEMPLATE_DN = 'cn=MEP Template,' + DEFAULT_SUFFIX
    USER_NEWRDN = 'uid=\+user1'

    #
    # First enable dynamic plugins
    #
    try:
        topology.standalone.modify_s(DN_CONFIG, [(ldap.MOD_REPLACE, 'nsslapd-dynamic-plugins', 'on')])
    except ldap.LDAPError as e:
        ldap.fatal('Failed to enable dynamic plugin!' + e.message['desc'])
        assert False
    topology.standalone.plugins.enable(name=PLUGIN_MANAGED_ENTRY)

    #
    # Add our org units (they should already exist, but do it just in case)
    #
    try:
        topology.standalone.add_s(Entry((PEOPLE_OU, {
                   'objectclass': 'top extensibleObject'.split(),
                   'ou': 'people'})))
    except ldap.ALREADY_EXISTS:
        pass
    except ldap.LDAPError as e:
        log.fatal('test_mep: Failed to add people org unit: error ' + e.message['desc'])
        assert False

    try:
        topology.standalone.add_s(Entry((GROUP_OU, {
                   'objectclass': 'top extensibleObject'.split(),
                   'ou': 'people'})))
    except ldap.ALREADY_EXISTS:
        pass
    except ldap.LDAPError as e:
        log.fatal('test_mep: Failed to add people org unit: error ' + e.message['desc'])
        assert False

    #
    # Add the template entry
    #
    try:
        topology.standalone.add_s(Entry((TEMPLATE_DN, {
                   'objectclass': 'top mepTemplateEntry extensibleObject'.split(),
                   'cn': 'MEP Template',
                   'mepRDNAttr': 'cn',
                   'mepStaticAttr': ['objectclass: posixGroup', 'objectclass: extensibleObject'],
                   'mepMappedAttr': ['cn: $uid', 'uid: $cn', 'gidNumber: $uidNumber']
                   })))
    except ldap.LDAPError as e:
        log.fatal('test_mep: Failed to add template entry: error ' + e.message['desc'])
        assert False

    #
    # Add the definition entry
    #
    try:
        topology.standalone.add_s(Entry((CONFIG_DN, {
                          'objectclass': 'top extensibleObject'.split(),
                          'cn': 'config',
                          'originScope': PEOPLE_OU,
                          'originFilter': 'objectclass=posixAccount',
                          'managedBase': GROUP_OU,
                          'managedTemplate': TEMPLATE_DN
                          })))
    except ldap.LDAPError as e:
        log.fatal('test_mep: Failed to add config entry: error ' + e.message['desc'])
        assert False

    #
    # Add an entry that meets the MEP scope
    #
    try:
        topology.standalone.add_s(Entry((USER_DN, {
                          'objectclass': 'top posixAccount extensibleObject'.split(),
                          'uid': 'user1',
                          'cn': 'user1',
                          'uidNumber': '1',
                          'gidNumber': '1',
                          'homeDirectory': '/home/user1',
                          'description': 'uiser description'
                          })))
    except ldap.LDAPError as e:
        log.fatal('test_mep: Failed to user1: error ' + e.message['desc'])
        assert False

    #
    # Perform a modrdn on USER_DN
    #
    try:
        topology.standalone.rename_s(USER_DN, USER_NEWRDN, delold=1)
    except ldap.LDAPError as e:
        log.error('Failed to modrdn: error ' + e.message['desc'])
        assert False

    log.info('Test complete')
Beispiel #6
0
def test_dynamic_plugins(topology):
    """
        Test Dynamic Plugins - exercise each plugin and its main features, while
        changing the configuration without restarting the server.

        Need to test: functionality, stability, and stress.  These tests need to run
                      with replication disabled, and with replication setup with a
                      second instance.  Then test if replication is working, and we have
                      same entries on each side.

        Functionality - Make sure that as configuration changes are made they take
                        effect immediately.  Cross plugin interaction (e.g. automember/memberOf)
                        needs to tested, as well as plugin tasks.  Need to test plugin
                        config validation(dependencies, etc).

        Memory Corruption - Restart the plugins many times, and in different orders and test
                            functionality, and stability.  This will excerise the internal
                            plugin linked lists, dse callbacks, and task handlers.

        Stress - Put the server under load that will trigger multiple plugins(MO, RI, DNA, etc)
                 Restart various plugins while these operations are going on.  Perform this test
                 5 times(stress_max_run).

    """

    REPLICA_PORT = 33334
    RUV_FILTER = '(&(nsuniqueid=ffffffff-ffffffff-ffffffff-ffffffff)(objectclass=nstombstone))'
    master_maxcsn = 0
    replica_maxcsn = 0
    msg = ' (no replication)'
    replication_run = False
    stress_max_runs = 5

    # First enable dynamic plugins
    try:
        topology.standalone.modify_s(DN_CONFIG, [(ldap.MOD_REPLACE, 'nsslapd-dynamic-plugins', 'on')])
    except ldap.LDAPError as e:
        ldap.fatal('Failed to enable dynamic plugin!' + e.message['desc'])
        assert False

    # Test that critical plugins can be updated even though the change might not be applied
    try:
        topology.standalone.modify_s(DN_LDBM, [(ldap.MOD_REPLACE, 'description', 'test')])
    except ldap.LDAPError as e:
        ldap.fatal('Failed to apply change to critical plugin' + e.message['desc'])
        assert False

    while 1:
        #
        # First run the tests with replication disabled, then rerun them with replication set up
        #

        ############################################################################
        #  Test plugin functionality
        ############################################################################

        log.info('####################################################################')
        log.info('Testing Dynamic Plugins Functionality' + msg + '...')
        log.info('####################################################################\n')

        plugin_tests.test_all_plugins(topology.standalone)

        log.info('####################################################################')
        log.info('Successfully Tested Dynamic Plugins Functionality' + msg + '.')
        log.info('####################################################################\n')

        ############################################################################
        # Test the stability by exercising the internal lists, callabcks, and task handlers
        ############################################################################

        log.info('####################################################################')
        log.info('Testing Dynamic Plugins for Memory Corruption' + msg + '...')
        log.info('####################################################################\n')
        prev_plugin_test = None
        prev_prev_plugin_test = None

        for plugin_test in plugin_tests.func_tests:
            #
            # Restart the plugin several times (and prev plugins) - work that linked list
            #
            plugin_test(topology.standalone, "restart")

            if prev_prev_plugin_test:
                prev_prev_plugin_test(topology.standalone, "restart")

            plugin_test(topology.standalone, "restart")

            if prev_plugin_test:
                prev_plugin_test(topology.standalone, "restart")

            plugin_test(topology.standalone, "restart")

            # Now run the functional test
            plugin_test(topology.standalone)

            # Set the previous tests
            if prev_plugin_test:
                prev_prev_plugin_test = prev_plugin_test
            prev_plugin_test = plugin_test

        log.info('####################################################################')
        log.info('Successfully Tested Dynamic Plugins for Memory Corruption' + msg + '.')
        log.info('####################################################################\n')

        ############################################################################
        # Stress two plugins while restarting it, and while restarting other plugins.
        # The goal is to not crash, and have the plugins work after stressing them.
        ############################################################################

        log.info('####################################################################')
        log.info('Stressing Dynamic Plugins' + msg + '...')
        log.info('####################################################################\n')

        stress_tests.configureMO(topology.standalone)
        stress_tests.configureRI(topology.standalone)

        stress_count = 0
        while stress_count < stress_max_runs:
            log.info('####################################################################')
            log.info('Running stress test' + msg + '.  Run (%d/%d)...' % (stress_count + 1, stress_max_runs))
            log.info('####################################################################\n')

            try:
                # Launch three new threads to add a bunch of users
                add_users = stress_tests.AddUsers(topology.standalone, 'employee', True)
                add_users.start()
                add_users2 = stress_tests.AddUsers(topology.standalone, 'entry', True)
                add_users2.start()
                add_users3 = stress_tests.AddUsers(topology.standalone, 'person', True)
                add_users3.start()
                time.sleep(1)

                # While we are adding users restart the MO plugin and an idle plugin
                topology.standalone.plugins.disable(name=PLUGIN_MEMBER_OF)
                topology.standalone.plugins.enable(name=PLUGIN_MEMBER_OF)
                time.sleep(1)
                topology.standalone.plugins.disable(name=PLUGIN_MEMBER_OF)
                time.sleep(1)
                topology.standalone.plugins.enable(name=PLUGIN_MEMBER_OF)
                topology.standalone.plugins.disable(name=PLUGIN_LINKED_ATTRS)
                topology.standalone.plugins.enable(name=PLUGIN_LINKED_ATTRS)
                time.sleep(1)
                topology.standalone.plugins.disable(name=PLUGIN_MEMBER_OF)
                topology.standalone.plugins.enable(name=PLUGIN_MEMBER_OF)
                time.sleep(2)
                topology.standalone.plugins.disable(name=PLUGIN_MEMBER_OF)
                time.sleep(1)
                topology.standalone.plugins.enable(name=PLUGIN_MEMBER_OF)
                topology.standalone.plugins.disable(name=PLUGIN_LINKED_ATTRS)
                topology.standalone.plugins.enable(name=PLUGIN_LINKED_ATTRS)
                topology.standalone.plugins.disable(name=PLUGIN_MEMBER_OF)
                time.sleep(1)
                topology.standalone.plugins.enable(name=PLUGIN_MEMBER_OF)
                topology.standalone.plugins.disable(name=PLUGIN_MEMBER_OF)
                topology.standalone.plugins.enable(name=PLUGIN_MEMBER_OF)

                # Wait for the 'adding' threads to complete
                add_users.join()
                add_users2.join()
                add_users3.join()

                # Now launch three threads to delete the users
                del_users = stress_tests.DelUsers(topology.standalone, 'employee')
                del_users.start()
                del_users2 = stress_tests.DelUsers(topology.standalone, 'entry')
                del_users2.start()
                del_users3 = stress_tests.DelUsers(topology.standalone, 'person')
                del_users3.start()
                time.sleep(1)

                # Restart both the MO, RI plugins during these deletes, and an idle plugin
                topology.standalone.plugins.disable(name=PLUGIN_REFER_INTEGRITY)
                topology.standalone.plugins.disable(name=PLUGIN_MEMBER_OF)
                topology.standalone.plugins.enable(name=PLUGIN_MEMBER_OF)
                topology.standalone.plugins.enable(name=PLUGIN_REFER_INTEGRITY)
                time.sleep(1)
                topology.standalone.plugins.disable(name=PLUGIN_REFER_INTEGRITY)
                time.sleep(1)
                topology.standalone.plugins.disable(name=PLUGIN_MEMBER_OF)
                time.sleep(1)
                topology.standalone.plugins.enable(name=PLUGIN_MEMBER_OF)
                time.sleep(1)
                topology.standalone.plugins.enable(name=PLUGIN_REFER_INTEGRITY)
                topology.standalone.plugins.disable(name=PLUGIN_LINKED_ATTRS)
                topology.standalone.plugins.enable(name=PLUGIN_LINKED_ATTRS)
                topology.standalone.plugins.disable(name=PLUGIN_REFER_INTEGRITY)
                topology.standalone.plugins.disable(name=PLUGIN_MEMBER_OF)
                topology.standalone.plugins.enable(name=PLUGIN_MEMBER_OF)
                topology.standalone.plugins.enable(name=PLUGIN_REFER_INTEGRITY)
                time.sleep(2)
                topology.standalone.plugins.disable(name=PLUGIN_REFER_INTEGRITY)
                time.sleep(1)
                topology.standalone.plugins.disable(name=PLUGIN_MEMBER_OF)
                time.sleep(1)
                topology.standalone.plugins.enable(name=PLUGIN_MEMBER_OF)
                time.sleep(1)
                topology.standalone.plugins.enable(name=PLUGIN_REFER_INTEGRITY)
                topology.standalone.plugins.disable(name=PLUGIN_LINKED_ATTRS)
                topology.standalone.plugins.enable(name=PLUGIN_LINKED_ATTRS)

                # Wait for the 'deleting' threads to complete
                del_users.join()
                del_users2.join()
                del_users3.join()

                # Now make sure both the MO and RI plugins still work correctly
                plugin_tests.func_tests[8](topology.standalone)  # RI plugin
                plugin_tests.func_tests[5](topology.standalone)  # MO plugin

                # Cleanup the stress tests
                stress_tests.cleanup(topology.standalone)

            except:
                log.info('Stress test failed!')
                repl_fail(replica_inst)

            stress_count += 1
            log.info('####################################################################')
            log.info('Successfully Stressed Dynamic Plugins' + msg +
                     '.  Completed (%d/%d)' % (stress_count, stress_max_runs))
            log.info('####################################################################\n')

        if replication_run:
            # We're done.
            break
        else:
            #
            # Enable replication and run everything one more time
            #
            log.info('Setting up replication, and rerunning the tests...\n')

            # Create replica instance
            replica_inst = DirSrv(verbose=False)
            args_instance[SER_HOST] = LOCALHOST
            args_instance[SER_PORT] = REPLICA_PORT
            args_instance[SER_SERVERID_PROP] = 'replica'
            args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX

            args_replica_inst = args_instance.copy()
            replica_inst.allocate(args_replica_inst)
            replica_inst.create()
            replica_inst.open()

            try:
                topology.standalone.replica.enableReplication(suffix=DEFAULT_SUFFIX,
                                                              role=REPLICAROLE_MASTER,
                                                              replicaId=1)
                replica_inst.replica.enableReplication(suffix=DEFAULT_SUFFIX,
                                                              role=REPLICAROLE_CONSUMER,
                                                              replicaId=65535)
                properties = {RA_NAME: r'to_replica',
                              RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
                              RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
                              RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
                              RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}

                repl_agreement = topology.standalone.agreement.create(suffix=DEFAULT_SUFFIX,
                                                                      host=LOCALHOST,
                                                                      port=REPLICA_PORT,
                                                                      properties=properties)

                if not repl_agreement:
                    log.fatal("Fail to create a replica agreement")
                    repl_fail(replica_inst)

                topology.standalone.agreement.init(DEFAULT_SUFFIX, LOCALHOST, REPLICA_PORT)
                topology.standalone.waitForReplInit(repl_agreement)
            except:
                log.info('Failed to setup replication!')
                repl_fail(replica_inst)

            replication_run = True
            msg = ' (replication enabled)'
            time.sleep(1)

    ############################################################################
    # Check replication, and data are in sync, and remove the instance
    ############################################################################

    log.info('Checking if replication is in sync...')

    try:
        # Grab master's max CSN
        entry = topology.standalone.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, RUV_FILTER)
        if not entry:
            log.error('Failed to find db tombstone entry from master')
            repl_fail(replica_inst)
        elements = entry[0].getValues('nsds50ruv')
        for ruv in elements:
            if 'replica 1' in ruv:
                parts = ruv.split()
                if len(parts) == 5:
                    master_maxcsn = parts[4]
                    break
                else:
                    log.error('RUV is incomplete')
                    repl_fail(replica_inst)
        if master_maxcsn == 0:
            log.error('Failed to find maxcsn on master')
            repl_fail(replica_inst)

    except ldap.LDAPError as e:
        log.fatal('Unable to search masterfor db tombstone: ' + e.message['desc'])
        repl_fail(replica_inst)

    # Loop on the consumer - waiting for it to catch up
    count = 0
    insync = False
    while count < 60:
        try:
            # Grab master's max CSN
            entry = replica_inst.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, RUV_FILTER)
            if not entry:
                log.error('Failed to find db tombstone entry on consumer')
                repl_fail(replica_inst)
            elements = entry[0].getValues('nsds50ruv')
            for ruv in elements:
                if 'replica 1' in ruv:
                    parts = ruv.split()
                    if len(parts) == 5:
                        replica_maxcsn = parts[4]
                        break
            if replica_maxcsn == 0:
                log.error('Failed to find maxcsn on consumer')
                repl_fail(replica_inst)
        except ldap.LDAPError as e:
            log.fatal('Unable to search for db tombstone on consumer: ' + e.message['desc'])
            repl_fail(replica_inst)

        if master_maxcsn == replica_maxcsn:
            insync = True
            log.info('Replication is in sync.\n')
            break
        count += 1
        time.sleep(1)

    # Report on replication status
    if not insync:
        log.error('Consumer not in sync with master!')
        repl_fail(replica_inst)

    #
    # Verify the databases are identical. There should not be any "user, entry, employee" entries
    #
    log.info('Checking if the data is the same between the replicas...')

    # Check the master
    try:
        entries = topology.standalone.search_s(DEFAULT_SUFFIX,
                                        ldap.SCOPE_SUBTREE,
                                        "(|(uid=person*)(uid=entry*)(uid=employee*))")
        if len(entries) > 0:
            log.error('Master database has incorrect data set!\n')
            repl_fail(replica_inst)
    except ldap.LDAPError as e:
        log.fatal('Unable to search db on master: ' + e.message['desc'])
        repl_fail(replica_inst)

    # Check the consumer
    try:
        entries = replica_inst.search_s(DEFAULT_SUFFIX,
                                        ldap.SCOPE_SUBTREE,
                                        "(|(uid=person*)(uid=entry*)(uid=employee*))")
        if len(entries) > 0:
            log.error('Consumer database in not consistent with master database')
            repl_fail(replica_inst)
    except ldap.LDAPError as e:
        log.fatal('Unable to search db on consumer: ' + e.message['desc'])
        repl_fail(replica_inst)

    log.info('Data is consistent across the replicas.\n')

    log.info('####################################################################')
    log.info('Replication consistency test passed')
    log.info('####################################################################\n')

    # Remove the replica instance
    replica_inst.delete()

    ############################################################################
    # We made it to the end!
    ############################################################################

    log.info('#####################################################')
    log.info('#####################################################')
    log.info("Dynamic Plugins Testsuite: Completed Successfully!")
    log.info('#####################################################')
    log.info('#####################################################\n')