Beispiel #1
0
def test_issue_a_warning_if_the_cache_size_is_smaller(topo, _import_clean):
    """Report during startup if nsslapd-cachememsize is too small

    :id: 1aa8cbda-9c0e-11ea-9297-8c16451d917b
    :setup: Standalone Instance
    :steps:
        1. Set nsslapd-cache-autosize to 0
        2. Change cachememsize
        3. Check that cachememsize is sufficiently small
        4. Import some users to make id2entry.db big
        5. Warning message should be there in error logs
    :expected results:
        1. Operation successful
        2. Operation successful
        3. Operation successful
        4. Operation successful
        5. Operation successful
    """
    config = LDBMConfig(topo.standalone)
    backend = Backends(topo.standalone).list()[0]
    # Set nsslapd-cache-autosize to 0
    config.replace('nsslapd-cache-autosize', '0')
    # Change cachememsize
    backend.replace('nsslapd-cachememsize', '1')
    # Check that cachememsize is sufficiently small
    assert int(backend.get_attr_val_utf8('nsslapd-cachememsize')) < 1500000
    # Import some users to make id2entry.db big
    _import_offline(topo, 20)
    # warning message should look like
    assert topo.standalone.searchErrorsLog(
        'INFO - ldbm_instance_config_cachememsize_set - '
        'force a minimal value 512000')
Beispiel #2
0
def test_fast_slow_import(topo, _toggle_private_import_mem, _import_clean):
    """With nsslapd-db-private-import-mem: on is faster import.

    :id: 3044331c-9c0e-11ea-ac9f-8c16451d917b
    :setup: Standalone Instance
    :steps:
        1. Let's set nsslapd-db-private-import-mem:on, nsslapd-import-cache-autosize: 0
        2. Measure offline import time duration total_time1
        3. Now nsslapd-db-private-import-mem:off
        4. Measure offline import time duration total_time2
        5. total_time1 < total_time2
        6. Set nsslapd-db-private-import-mem:on, nsslapd-import-cache-autosize: -1
        7. Measure offline import time duration total_time1
        8. Now nsslapd-db-private-import-mem:off
        9. Measure offline import time duration total_time2
        10. total_time1 < total_time2
    :expected results:
        1. Operation successful
        2. Operation successful
        3. Operation successful
        4. Operation successful
        5. Operation successful
        6. Operation successful
        7. Operation successful
        8. Operation successful
        9. Operation successful
        10. Operation successful
    """
    # Let's set nsslapd-db-private-import-mem:on, nsslapd-import-cache-autosize: 0
    config = LDBMConfig(topo.standalone)
    # Measure offline import time duration total_time1
    total_time1 = _import_offline(topo, 20)
    # Now nsslapd-db-private-import-mem:off
    config.replace('nsslapd-db-private-import-mem', 'off')
    accounts = Accounts(topo.standalone, DEFAULT_SUFFIX)
    for i in accounts.filter('(uid=*)'):
        UserAccount(topo.standalone, i.dn).delete()
    # Measure offline import time duration total_time2
    total_time2 = _import_offline(topo, 20)
    # total_time1 < total_time2
    assert total_time1 < total_time2
    # Set nsslapd-db-private-import-mem:on, nsslapd-import-cache-autosize: -1
    config.replace_many(('nsslapd-db-private-import-mem', 'on'),
                        ('nsslapd-import-cache-autosize', '-1'))
    for i in accounts.filter('(uid=*)'):
        UserAccount(topo.standalone, i.dn).delete()
    # Measure offline import time duration total_time1
    total_time1 = _import_offline(topo, 20)
    # Now nsslapd-db-private-import-mem:off
    config.replace('nsslapd-db-private-import-mem', 'off')
    for i in accounts.filter('(uid=*)'):
        UserAccount(topo.standalone, i.dn).delete()
    # Measure offline import time duration total_time2
    total_time2 = _import_offline(topo, 20)
    # total_time1 < total_time2
    assert total_time1 < total_time2
Beispiel #3
0
def test_config_deadlock_policy(topology_m2):
    """Check that nsslapd-db-deadlock-policy acted as expected

    :id: a24e25fd-bc15-47fa-b018-372f6a2ec59c
    :setup: MMR with two suppliers
    :steps:
        1. Search for nsslapd-db-deadlock-policy and check if
           it contains a default value
        2. Set nsslapd-db-deadlock-policy to a positive value
        3. Set nsslapd-db-deadlock-policy to a negative value
        4. Set nsslapd-db-deadlock-policy to an invalid value
        5. Set nsslapd-db-deadlock-policy back to a default value
    :expectedresults:
        1. Search should be a successful and should contain a default value
        2. nsslapd-db-deadlock-policy should be successfully set
        3. nsslapd-db-deadlock-policy should be successfully set
        4. Modification with an invalid value should throw an error
        5. nsslapd-db-deadlock-policy should be successfully set
    """

    default_val = b'9'

    ldbmconfig = LDBMConfig(topology_m2.ms["supplier1"])
    bdbconfig = BDB_LDBMConfig(topology_m2.ms["supplier1"])

    if ds_is_older('1.4.2'):
        deadlock_policy = ldbmconfig.get_attr_val_bytes(
            'nsslapd-db-deadlock-policy')
    else:
        deadlock_policy = bdbconfig.get_attr_val_bytes(
            'nsslapd-db-deadlock-policy')

    assert deadlock_policy == default_val

    # Try a range of valid values
    for val in (b'0', b'5', b'9'):
        ldbmconfig.replace('nsslapd-db-deadlock-policy', val)
        if ds_is_older('1.4.2'):
            deadlock_policy = ldbmconfig.get_attr_val_bytes(
                'nsslapd-db-deadlock-policy')
        else:
            deadlock_policy = bdbconfig.get_attr_val_bytes(
                'nsslapd-db-deadlock-policy')

        assert deadlock_policy == val

    # Try a range of invalid values
    for val in ('-1', '10'):
        with pytest.raises(ldap.LDAPError):
            ldbmconfig.replace('nsslapd-db-deadlock-policy', val)

    # Cleanup - undo what we've done
    ldbmconfig.replace('nsslapd-db-deadlock-policy', deadlock_policy)
def test_acceptance(topology_m2):
    """Exercise each plugin and its main features, while
    changing the configuration without restarting the server.

    Make sure that as configuration changes are made they take
    effect immediately.  Cross plugin interaction (e.g. automember/memberOf)
    needs to tested, as well as plugin tasks.  Need to test plugin
    config validation(dependencies, etc).
    """

    m1 = topology_m2.ms["master1"]
    msg = ' (no replication)'
    replication_run = False

    # First part of the test should be without replication
    topology_m2.pause_all_replicas()

    # First enable dynamic plugins
    m1.config.replace('nsslapd-dynamic-plugins', 'on')

    # Test that critical plugins can be updated even though the change might not be applied
    ldbm_config = LDBMConfig(m1)
    ldbm_config.replace('description', 'test')

    while True:
        # First run the tests with replication disabled, then rerun them with replication set up

        ############################################################################
        #  Test plugin functionality
        ############################################################################

        log.info(
            '####################################################################'
        )
        log.info('Testing Dynamic Plugins Functionality' + msg + '...')
        log.info(
            '####################################################################\n'
        )

        acceptance_test.check_all_plugins(topology_m2)

        log.info(
            '####################################################################'
        )
        log.info('Successfully Tested Dynamic Plugins Functionality' + msg +
                 '.')
        log.info(
            '####################################################################\n'
        )

        if replication_run:
            # We're done.
            break
        else:
            log.info('Resume replication and run everything one more time')
            topology_m2.resume_all_replicas()

            replication_run = True
            msg = ' (replication enabled)'
            time.sleep(1)

    ############################################################################
    # Check replication, and data are in sync
    ############################################################################
    check_replicas(topology_m2)
def test_stress(topology_m2):
    """Test dynamic plugins got

    Stress - Put the server under load that will trigger multiple plugins(MO, RI, DNA, etc)
    Restart various plugins while these operations are going on.  Perform this test
    5 times(stress_max_run).
    """

    m1 = topology_m2.ms["master1"]
    msg = ' (no replication)'
    replication_run = False
    stress_max_runs = 5

    # First part of the test should be without replication
    topology_m2.pause_all_replicas()

    # First enable dynamic plugins
    m1.config.replace('nsslapd-dynamic-plugins', 'on')

    # Test that critical plugins can be updated even though the change might not be applied
    ldbm_config = LDBMConfig(m1)
    ldbm_config.replace('description', 'test')

    while True:
        # First run the tests with replication disabled, then rerun them with replication set up

        log.info('Do one run through all tests ' + msg + '...')
        acceptance_test.check_all_plugins(topology_m2)

        log.info(
            '####################################################################'
        )
        log.info('Stressing Dynamic Plugins' + msg + '...')
        log.info(
            '####################################################################\n'
        )

        stress_tests.configureMO(m1)
        stress_tests.configureRI(m1)

        stress_count = 0
        while stress_count < stress_max_runs:
            log.info(
                '####################################################################'
            )
            log.info('Running stress test' + msg + '.  Run (%d/%d)...' %
                     (stress_count + 1, stress_max_runs))
            log.info(
                '####################################################################\n'
            )

            # Launch three new threads to add a bunch of users
            add_users = stress_tests.AddUsers(m1, 'employee', True)
            add_users.start()
            add_users2 = stress_tests.AddUsers(m1, 'entry', True)
            add_users2.start()
            add_users3 = stress_tests.AddUsers(m1, 'person', True)
            add_users3.start()
            time.sleep(1)

            # While we are adding users restart the MO plugin and an idle plugin
            m1.plugins.disable(name=PLUGIN_MEMBER_OF)
            m1.plugins.enable(name=PLUGIN_MEMBER_OF)
            time.sleep(1)
            m1.plugins.disable(name=PLUGIN_MEMBER_OF)
            time.sleep(1)
            m1.plugins.enable(name=PLUGIN_MEMBER_OF)
            m1.plugins.disable(name=PLUGIN_LINKED_ATTRS)
            m1.plugins.enable(name=PLUGIN_LINKED_ATTRS)
            time.sleep(1)
            m1.plugins.disable(name=PLUGIN_MEMBER_OF)
            m1.plugins.enable(name=PLUGIN_MEMBER_OF)
            time.sleep(2)
            m1.plugins.disable(name=PLUGIN_MEMBER_OF)
            time.sleep(1)
            m1.plugins.enable(name=PLUGIN_MEMBER_OF)
            m1.plugins.disable(name=PLUGIN_LINKED_ATTRS)
            m1.plugins.enable(name=PLUGIN_LINKED_ATTRS)
            m1.plugins.disable(name=PLUGIN_MEMBER_OF)
            time.sleep(1)
            m1.plugins.enable(name=PLUGIN_MEMBER_OF)
            m1.plugins.disable(name=PLUGIN_MEMBER_OF)
            m1.plugins.enable(name=PLUGIN_MEMBER_OF)

            # Wait for the 'adding' threads to complete
            add_users.join()
            add_users2.join()
            add_users3.join()

            # Now launch three threads to delete the users
            del_users = stress_tests.DelUsers(m1, 'employee')
            del_users.start()
            del_users2 = stress_tests.DelUsers(m1, 'entry')
            del_users2.start()
            del_users3 = stress_tests.DelUsers(m1, 'person')
            del_users3.start()
            time.sleep(1)

            # Restart both the MO, RI plugins during these deletes, and an idle plugin
            m1.plugins.disable(name=PLUGIN_REFER_INTEGRITY)
            m1.plugins.disable(name=PLUGIN_MEMBER_OF)
            m1.plugins.enable(name=PLUGIN_MEMBER_OF)
            m1.plugins.enable(name=PLUGIN_REFER_INTEGRITY)
            time.sleep(1)
            m1.plugins.disable(name=PLUGIN_REFER_INTEGRITY)
            time.sleep(1)
            m1.plugins.disable(name=PLUGIN_MEMBER_OF)
            time.sleep(1)
            m1.plugins.enable(name=PLUGIN_MEMBER_OF)
            time.sleep(1)
            m1.plugins.enable(name=PLUGIN_REFER_INTEGRITY)
            m1.plugins.disable(name=PLUGIN_LINKED_ATTRS)
            m1.plugins.enable(name=PLUGIN_LINKED_ATTRS)
            m1.plugins.disable(name=PLUGIN_REFER_INTEGRITY)
            m1.plugins.disable(name=PLUGIN_MEMBER_OF)
            m1.plugins.enable(name=PLUGIN_MEMBER_OF)
            m1.plugins.enable(name=PLUGIN_REFER_INTEGRITY)
            time.sleep(2)
            m1.plugins.disable(name=PLUGIN_REFER_INTEGRITY)
            time.sleep(1)
            m1.plugins.disable(name=PLUGIN_MEMBER_OF)
            time.sleep(1)
            m1.plugins.enable(name=PLUGIN_MEMBER_OF)
            time.sleep(1)
            m1.plugins.enable(name=PLUGIN_REFER_INTEGRITY)
            m1.plugins.disable(name=PLUGIN_LINKED_ATTRS)
            m1.plugins.enable(name=PLUGIN_LINKED_ATTRS)

            # Wait for the 'deleting' threads to complete
            del_users.join()
            del_users2.join()
            del_users3.join()

            # Now make sure both the MO and RI plugins still work correctly
            acceptance_test.func_tests[8](topology_m2, "dynamic")  # RI plugin
            acceptance_test.func_tests[5](topology_m2, "dynamic")  # MO plugin

            # Cleanup the stress tests
            stress_tests.cleanup(m1)

            stress_count += 1
            log.info(
                '####################################################################'
            )
            log.info('Successfully Stressed Dynamic Plugins' + msg +
                     '.  Completed (%d/%d)' % (stress_count, stress_max_runs))
            log.info(
                '####################################################################\n'
            )

        if replication_run:
            # We're done.
            break
        else:
            log.info('Resume replication and run everything one more time')
            topology_m2.resume_all_replicas()

            replication_run = True
            msg = ' (replication enabled)'
            time.sleep(1)

    ############################################################################
    # Check replication, and data are in sync
    ############################################################################
    check_replicas(topology_m2)
def test_memory_corruption(topology_m2):
    """Memory Corruption - Restart the plugins many times, and in different orders and test
    functionality, and stability.  This will excerise the internal
    plugin linked lists, dse callbacks, and task handlers.
    """

    m1 = topology_m2.ms["master1"]
    msg = ' (no replication)'
    replication_run = False

    # First part of the test should be without replication
    topology_m2.pause_all_replicas()

    # First enable dynamic plugins
    m1.config.replace('nsslapd-dynamic-plugins', 'on')

    # Test that critical plugins can be updated even though the change might not be applied
    ldbm_config = LDBMConfig(m1)
    ldbm_config.replace('description', 'test')

    while True:
        # First run the tests with replication disabled, then rerun them with replication set up

        ############################################################################
        # Test the stability by exercising the internal lists, callabcks, and task handlers
        ############################################################################

        log.info(
            '####################################################################'
        )
        log.info('Testing Dynamic Plugins for Memory Corruption' + msg + '...')
        log.info(
            '####################################################################\n'
        )
        prev_plugin_test = None
        prev_prev_plugin_test = None

        for plugin_test in acceptance_test.func_tests:
            #
            # Restart the plugin several times (and prev plugins) - work that linked list
            #
            plugin_test(topology_m2, "restart")

            if prev_prev_plugin_test:
                prev_prev_plugin_test(topology_m2, "restart")

            plugin_test(topology_m2, "restart")

            if prev_plugin_test:
                prev_plugin_test(topology_m2, "restart")

            plugin_test(topology_m2, "restart")

            # Now run the functional test
            plugin_test(topology_m2, "dynamic")

            # Set the previous tests
            if prev_plugin_test:
                prev_prev_plugin_test = prev_plugin_test
            prev_plugin_test = plugin_test

        log.info(
            '####################################################################'
        )
        log.info('Successfully Tested Dynamic Plugins for Memory Corruption' +
                 msg + '.')
        log.info(
            '####################################################################\n'
        )

        if replication_run:
            # We're done.
            break
        else:
            log.info('Resume replication and run everything one more time')
            topology_m2.resume_all_replicas()

            replication_run = True
            msg = ' (replication enabled)'
            time.sleep(1)

    ############################################################################
    # Check replication, and data are in sync
    ############################################################################
    check_replicas(topology_m2)
Beispiel #7
0
def test_acceptance(topology_m2):
    """Exercise each plugin and its main features, while
    changing the configuration without restarting the server.

    :id: 96136538-0151-4b09-9933-0e0cbf2c786c
    :setup: 2 Supplier Instances
    :steps:
        1. Pause all replication
        2. Set nsslapd-dynamic-plugins to on
        3. Try to update LDBM config entry
        4. Go through all plugin basic functionality
        5. Resume replication
        6. Go through all plugin basic functionality again
        7. Check that data in sync and replication is working
    :expectedresults:
        1. Success
        2. Success
        3. Success
        4. Success
        5. Success
        6. Success
        7. Success
    """

    m1 = topology_m2.ms["supplier1"]
    msg = ' (no replication)'
    replication_run = False

    # First part of the test should be without replication
    topology_m2.pause_all_replicas()

    # First enable dynamic plugins
    m1.config.replace('nsslapd-dynamic-plugins', 'on')

    # Test that critical plugins can be updated even though the change might not be applied
    ldbm_config = LDBMConfig(m1)
    ldbm_config.replace('description', 'test')

    while True:
        # First run the tests with replication disabled, then rerun them with replication set up

        ############################################################################
        #  Test plugin functionality
        ############################################################################

        log.info(
            '####################################################################'
        )
        log.info('Testing Dynamic Plugins Functionality' + msg + '...')
        log.info(
            '####################################################################\n'
        )

        acceptance_test.check_all_plugins(topology_m2)

        log.info(
            '####################################################################'
        )
        log.info('Successfully Tested Dynamic Plugins Functionality' + msg +
                 '.')
        log.info(
            '####################################################################\n'
        )

        if replication_run:
            # We're done.
            break
        else:
            log.info('Resume replication and run everything one more time')
            topology_m2.resume_all_replicas()

            replication_run = True
            msg = ' (replication enabled)'
            time.sleep(1)

    ############################################################################
    # Check replication, and data are in sync
    ############################################################################
    check_replicas(topology_m2)
Beispiel #8
0
def test_stress(topology_m2):
    """Test plugins while under a big load. Perform the test 5 times

    :id: 96136538-0151-4b09-9933-0e0cbf2c7863
    :setup: 2 Supplier Instances
    :steps:
        1. Pause all replication
        2. Set nsslapd-dynamic-plugins to on
        3. Try to update LDBM config entry
        4. Do one run through all tests
        5. Enable Referential integrity and MemberOf plugins
        6. Launch three new threads to add a bunch of users
        7. While we are adding users restart the MemberOf and
           Linked Attributes plugins many times
        8. Wait for the 'adding' threads to complete
        9. Now launch three threads to delete the users
        10. Restart both the MemberOf, Referential integrity and
            Linked Attributes plugins during these deletes
        11. Wait for the 'deleting' threads to complete
        12. Now make sure both the MemberOf and Referential integrity plugins still work correctly
        13. Cleanup the stress tests (delete the group entry)
        14. Perform 4-13 steps five times
        15. Resume replication
        16. Go through 4-14 steps once more
        17. Check that data in sync and replication is working
    :expectedresults:
        1. Success
        2. Success
        3. Success
        4. Success
        5. Success
        6. Success
        7. Success
        8. Success
        9. Success
        10. Success
        11. Success
        12. Success
        13. Success
        14. Success
        15. Success
        16. Success
        17. Success
    """

    m1 = topology_m2.ms["supplier1"]
    msg = ' (no replication)'
    replication_run = False
    stress_max_runs = 5

    # First part of the test should be without replication
    topology_m2.pause_all_replicas()

    # First enable dynamic plugins
    m1.config.replace('nsslapd-dynamic-plugins', 'on')

    # Test that critical plugins can be updated even though the change might not be applied
    ldbm_config = LDBMConfig(m1)
    ldbm_config.replace('description', 'test')

    while True:
        # First run the tests with replication disabled, then rerun them with replication set up

        log.info('Do one run through all tests ' + msg + '...')
        acceptance_test.check_all_plugins(topology_m2)

        log.info(
            '####################################################################'
        )
        log.info('Stressing Dynamic Plugins' + msg + '...')
        log.info(
            '####################################################################\n'
        )

        stress_tests.configureMO(m1)
        stress_tests.configureRI(m1)

        stress_count = 0
        while stress_count < stress_max_runs:
            log.info(
                '####################################################################'
            )
            log.info('Running stress test' + msg + '.  Run (%d/%d)...' %
                     (stress_count + 1, stress_max_runs))
            log.info(
                '####################################################################\n'
            )

            # Launch three new threads to add a bunch of users
            add_users = stress_tests.AddUsers(m1, 'employee', True)
            add_users.start()
            add_users2 = stress_tests.AddUsers(m1, 'entry', True)
            add_users2.start()
            add_users3 = stress_tests.AddUsers(m1, 'person', True)
            add_users3.start()
            time.sleep(1)

            # While we are adding users restart the MO plugin and an idle plugin
            m1.plugins.disable(name=PLUGIN_MEMBER_OF)
            m1.plugins.enable(name=PLUGIN_MEMBER_OF)
            time.sleep(1)
            m1.plugins.disable(name=PLUGIN_MEMBER_OF)
            time.sleep(1)
            m1.plugins.enable(name=PLUGIN_MEMBER_OF)
            m1.plugins.disable(name=PLUGIN_LINKED_ATTRS)
            m1.plugins.enable(name=PLUGIN_LINKED_ATTRS)
            time.sleep(1)
            m1.plugins.disable(name=PLUGIN_MEMBER_OF)
            m1.plugins.enable(name=PLUGIN_MEMBER_OF)
            time.sleep(2)
            m1.plugins.disable(name=PLUGIN_MEMBER_OF)
            time.sleep(1)
            m1.plugins.enable(name=PLUGIN_MEMBER_OF)
            m1.plugins.disable(name=PLUGIN_LINKED_ATTRS)
            m1.plugins.enable(name=PLUGIN_LINKED_ATTRS)
            m1.plugins.disable(name=PLUGIN_MEMBER_OF)
            time.sleep(1)
            m1.plugins.enable(name=PLUGIN_MEMBER_OF)
            m1.plugins.disable(name=PLUGIN_MEMBER_OF)
            m1.plugins.enable(name=PLUGIN_MEMBER_OF)

            # Wait for the 'adding' threads to complete
            add_users.join()
            add_users2.join()
            add_users3.join()

            # Now launch three threads to delete the users
            del_users = stress_tests.DelUsers(m1, 'employee')
            del_users.start()
            del_users2 = stress_tests.DelUsers(m1, 'entry')
            del_users2.start()
            del_users3 = stress_tests.DelUsers(m1, 'person')
            del_users3.start()
            time.sleep(1)

            # Restart both the MO, RI plugins during these deletes, and an idle plugin
            m1.plugins.disable(name=PLUGIN_REFER_INTEGRITY)
            m1.plugins.disable(name=PLUGIN_MEMBER_OF)
            m1.plugins.enable(name=PLUGIN_MEMBER_OF)
            m1.plugins.enable(name=PLUGIN_REFER_INTEGRITY)
            time.sleep(1)
            m1.plugins.disable(name=PLUGIN_REFER_INTEGRITY)
            time.sleep(1)
            m1.plugins.disable(name=PLUGIN_MEMBER_OF)
            time.sleep(1)
            m1.plugins.enable(name=PLUGIN_MEMBER_OF)
            time.sleep(1)
            m1.plugins.enable(name=PLUGIN_REFER_INTEGRITY)
            m1.plugins.disable(name=PLUGIN_LINKED_ATTRS)
            m1.plugins.enable(name=PLUGIN_LINKED_ATTRS)
            m1.plugins.disable(name=PLUGIN_REFER_INTEGRITY)
            m1.plugins.disable(name=PLUGIN_MEMBER_OF)
            m1.plugins.enable(name=PLUGIN_MEMBER_OF)
            m1.plugins.enable(name=PLUGIN_REFER_INTEGRITY)
            time.sleep(2)
            m1.plugins.disable(name=PLUGIN_REFER_INTEGRITY)
            time.sleep(1)
            m1.plugins.disable(name=PLUGIN_MEMBER_OF)
            time.sleep(1)
            m1.plugins.enable(name=PLUGIN_MEMBER_OF)
            time.sleep(1)
            m1.plugins.enable(name=PLUGIN_REFER_INTEGRITY)
            m1.plugins.disable(name=PLUGIN_LINKED_ATTRS)
            m1.plugins.enable(name=PLUGIN_LINKED_ATTRS)

            # Wait for the 'deleting' threads to complete
            del_users.join()
            del_users2.join()
            del_users3.join()

            # Now make sure both the MO and RI plugins still work correctly
            acceptance_test.func_tests[8](topology_m2, "dynamic")  # RI plugin
            acceptance_test.func_tests[5](topology_m2, "dynamic")  # MO plugin

            # Cleanup the stress tests
            stress_tests.cleanup(m1)

            stress_count += 1
            log.info(
                '####################################################################'
            )
            log.info('Successfully Stressed Dynamic Plugins' + msg +
                     '.  Completed (%d/%d)' % (stress_count, stress_max_runs))
            log.info(
                '####################################################################\n'
            )

        if replication_run:
            # We're done.
            break
        else:
            log.info('Resume replication and run everything one more time')
            topology_m2.resume_all_replicas()

            replication_run = True
            msg = ' (replication enabled)'
            time.sleep(1)

    ############################################################################
    # Check replication, and data are in sync
    ############################################################################
    check_replicas(topology_m2)
Beispiel #9
0
def test_memory_corruption(topology_m2):
    """Check the plugins for memory corruption issues while
    dynamic plugins option is enabled

    :id: 96136538-0151-4b09-9933-0e0cbf2c7862
    :setup: 2 Supplier Instances
    :steps:
        1. Pause all replication
        2. Set nsslapd-dynamic-plugins to on
        3. Try to update LDBM config entry
        4. Restart the plugin many times in a linked list fashion
           restarting previous and preprevious plugins in the list of all plugins
        5. Run the functional test
        6. Repeat 4 and 5 steps for all plugins
        7. Resume replication
        8. Go through 4-6 steps once more
        9. Check that data in sync and replication is working
    :expectedresults:
        1. Success
        2. Success
        3. Success
        4. Success
        5. Success
        6. Success
        7. Success
        8. Success
        9. Success
    """

    m1 = topology_m2.ms["supplier1"]
    msg = ' (no replication)'
    replication_run = False

    # First part of the test should be without replication
    topology_m2.pause_all_replicas()

    # First enable dynamic plugins
    m1.config.replace('nsslapd-dynamic-plugins', 'on')

    # Test that critical plugins can be updated even though the change might not be applied
    ldbm_config = LDBMConfig(m1)
    ldbm_config.replace('description', 'test')

    while True:
        # First run the tests with replication disabled, then rerun them with replication set up

        ############################################################################
        # Test the stability by exercising the internal lists, callabcks, and task handlers
        ############################################################################

        log.info(
            '####################################################################'
        )
        log.info('Testing Dynamic Plugins for Memory Corruption' + msg + '...')
        log.info(
            '####################################################################\n'
        )
        prev_plugin_test = None
        prev_prev_plugin_test = None

        for plugin_test in acceptance_test.func_tests:
            #
            # Restart the plugin several times (and prev plugins) - work that linked list
            #
            plugin_test(topology_m2, "restart")

            if prev_prev_plugin_test:
                prev_prev_plugin_test(topology_m2, "restart")

            plugin_test(topology_m2, "restart")

            if prev_plugin_test:
                prev_plugin_test(topology_m2, "restart")

            plugin_test(topology_m2, "restart")

            # Now run the functional test
            plugin_test(topology_m2, "dynamic")

            # Set the previous tests
            if prev_plugin_test:
                prev_prev_plugin_test = prev_plugin_test
            prev_plugin_test = plugin_test

        log.info(
            '####################################################################'
        )
        log.info('Successfully Tested Dynamic Plugins for Memory Corruption' +
                 msg + '.')
        log.info(
            '####################################################################\n'
        )

        if replication_run:
            # We're done.
            break
        else:
            log.info('Resume replication and run everything one more time')
            topology_m2.resume_all_replicas()

            replication_run = True
            msg = ' (replication enabled)'
            time.sleep(1)

    ############################################################################
    # Check replication, and data are in sync
    ############################################################################
    check_replicas(topology_m2)