コード例 #1
0
def test_healthcheck_replication_presence_of_conflict_entries(topology_m2):
    """Check if HealthCheck returns DSREPLLE0002 code

    :id: 43abc6c6-2075-42eb-8fa3-aa092ff64cba
    :setup: Replicated topology
    :steps:
        1. Create a replicated topology
        2. Create conflict entries : different entries renamed to the same dn
        3. Use HealthCheck without --json option
        4. Use HealthCheck with --json option
    :expectedresults:
        1. Success
        2. Success
        3. Healthcheck reports DSREPLLE0002 code and related details
        4. Healthcheck reports DSREPLLE0002 code and related details
    """

    RET_CODE = 'DSREPLLE0002'

    M1 = topology_m2.ms['master1']
    M2 = topology_m2.ms['master2']

    repl = ReplicationManager(DEFAULT_SUFFIX)
    repl.wait_for_replication(M1, M2)

    topology_m2.pause_all_replicas()

    log.info("Create conflict entries")
    test_users_m1 = UserAccounts(M1, DEFAULT_SUFFIX)
    test_users_m2 = UserAccounts(M2, DEFAULT_SUFFIX)
    user_num = 1000
    test_users_m1.create_test_user(user_num, 2000)
    test_users_m2.create_test_user(user_num, 2000)

    topology_m2.resume_all_replicas()

    repl.test_replication_topology(topology_m2)

    run_healthcheck_and_flush_log(topology_m2, M1, RET_CODE, json=False)
    run_healthcheck_and_flush_log(topology_m2, M1, RET_CODE, json=True)
コード例 #2
0
def test_repl_modrdn(topo_m2):
    """Test that replicated MODRDN does not break replication

    :id: a3e17698-9eb4-41e0-b537-8724b9915fa6
    :setup: Two masters replication setup
    :steps:
        1. Add 3 test OrganizationalUnits A, B and C
        2. Add 1 test user under OU=A
        3. Add same test user under OU=B
        4. Stop Replication
        5. Apply modrdn to M1 - move test user from OU A -> C
        6. Apply modrdn on M2 - move test user from OU B -> C
        7. Start Replication
        8. Check that there should be only one test entry under ou=C on both masters
        9. Check that the replication is working fine both ways M1 <-> M2
    :expectedresults:
        1. This should pass
        2. This should pass
        3. This should pass
        4. This should pass
        5. This should pass
        6. This should pass
        7. This should pass
        8. This should pass
        9. This should pass
    """

    master1 = topo_m2.ms["master1"]
    master2 = topo_m2.ms["master2"]

    repl = ReplicationManager(DEFAULT_SUFFIX)

    log.info(
        "Add test entries - Add 3 OUs and 2 same users under 2 different OUs")
    OUs = OrganizationalUnits(master1, DEFAULT_SUFFIX)
    OU_A = OUs.create(properties={
        'ou': 'A',
        'description': 'A',
    })
    OU_B = OUs.create(properties={
        'ou': 'B',
        'description': 'B',
    })
    OU_C = OUs.create(properties={
        'ou': 'C',
        'description': 'C',
    })

    users = UserAccounts(master1, DEFAULT_SUFFIX, rdn='ou={}'.format(OU_A.rdn))
    tuser_A = users.create(properties=TEST_USER_PROPERTIES)

    users = UserAccounts(master1, DEFAULT_SUFFIX, rdn='ou={}'.format(OU_B.rdn))
    tuser_B = users.create(properties=TEST_USER_PROPERTIES)

    repl.test_replication(master1, master2)
    repl.test_replication(master2, master1)

    log.info("Stop Replication")
    topo_m2.pause_all_replicas()

    log.info("Apply modrdn to M1 - move test user from OU A -> C")
    master1.rename_s(tuser_A.dn,
                     'uid=testuser1',
                     newsuperior=OU_C.dn,
                     delold=1)

    log.info("Apply modrdn on M2 - move test user from OU B -> C")
    master2.rename_s(tuser_B.dn,
                     'uid=testuser1',
                     newsuperior=OU_C.dn,
                     delold=1)

    log.info("Start Replication")
    topo_m2.resume_all_replicas()

    log.info("Wait for sometime for repl to resume")
    repl.test_replication(master1, master2)
    repl.test_replication(master2, master1)

    log.info(
        "Check that there should be only one test entry under ou=C on both masters"
    )
    users = UserAccounts(master1, DEFAULT_SUFFIX, rdn='ou={}'.format(OU_C.rdn))
    assert len(users.list()) == 1

    users = UserAccounts(master2, DEFAULT_SUFFIX, rdn='ou={}'.format(OU_C.rdn))
    assert len(users.list()) == 1

    log.info("Check that the replication is working fine both ways, M1 <-> M2")
    repl.test_replication(master1, master2)
    repl.test_replication(master2, master1)
コード例 #3
0
def test_conflict_cli(topo):
    """Test manageing replication conflict entries

    :id: 800f432a-52ab-4661-ac66-a2bdd9b984d8
    :setup: two masters
    :steps:
        1. Create replication conflict entries
        2. List conflicts
        3. Compare conflict entry
        4. Delete conflict
        5. Resurrect conflict
        6. Swap conflict
        7. List glue entry
        8. Delete glue entry
        9. Convert glue entry

    :expectedresults:
        1. Success
        2. Success
        3. Success
        4. Success
        5. Success
        6. Success
        7. Success
        8. Success
        9. Success
        10. Success
    """

    # Setup our default parameters for CLI functions
    topo.logcap = LogCapture()
    sys.stdout = io.StringIO()
    args = FakeArgs()
    args.DN = ""
    args.suffix = DEFAULT_SUFFIX
    args.json = True

    m1 = topo.ms["master1"]
    m2 = topo.ms["master2"]

    topo.pause_all_replicas()

    # Create entries
    _create_container(m1, DEFAULT_SUFFIX, 'conflict_parent1')
    _create_container(m2, DEFAULT_SUFFIX, 'conflict_parent1')
    _create_container(m1, DEFAULT_SUFFIX, 'conflict_parent2')
    _create_container(m2, DEFAULT_SUFFIX, 'conflict_parent2')
    cont_parent_m1 = _create_container(m1, DEFAULT_SUFFIX, 'conflict_parent3')
    cont_parent_m2 = _create_container(m2, DEFAULT_SUFFIX, 'conflict_parent3')
    cont_glue_m1 = _create_container(m1, DEFAULT_SUFFIX, 'conflict_parent4')
    cont_glue_m2 = _create_container(m2, DEFAULT_SUFFIX, 'conflict_parent4')

    # Create the conflicts
    _delete_container(cont_parent_m1)
    _create_container(m2, cont_parent_m2.dn, 'conflict_child1')
    _delete_container(cont_glue_m1)
    _create_container(m2, cont_glue_m2.dn, 'conflict_child2')

    # Resume replication
    topo.resume_all_replicas()
    time.sleep(5)

    # Test "list"
    list_conflicts(m2, None, topo.logcap.log, args)
    conflicts = json.loads(topo.logcap.outputs[0].getMessage())
    assert len(conflicts['items']) == 4
    conflict_1_DN = conflicts['items'][0]['dn']
    conflict_2_DN = conflicts['items'][1]['dn']
    conflict_3_DN = conflicts['items'][2]['dn']
    topo.logcap.flush()

    # Test compare
    args.DN = conflict_1_DN
    cmp_conflict(m2, None, topo.logcap.log, args)
    conflicts = json.loads(topo.logcap.outputs[0].getMessage())
    assert len(conflicts['items']) == 2
    topo.logcap.flush()

    # Test delete
    del_conflict(m2, None, topo.logcap.log, args)
    list_conflicts(m2, None, topo.logcap.log, args)
    conflicts = json.loads(topo.logcap.outputs[0].getMessage())
    assert len(conflicts['items']) == 3
    topo.logcap.flush()

    # Test swap
    args.DN = conflict_2_DN
    swap_conflict(m2, None, topo.logcap.log, args)
    list_conflicts(m2, None, topo.logcap.log, args)
    conflicts = json.loads(topo.logcap.outputs[0].getMessage())
    assert len(conflicts['items']) == 2
    topo.logcap.flush()

    # Test conflict convert
    args.DN = conflict_3_DN
    args.new_rdn = "cn=testing convert"
    convert_conflict(m2, None, topo.logcap.log, args)
    list_conflicts(m2, None, topo.logcap.log, args)
    conflicts = json.loads(topo.logcap.outputs[0].getMessage())
    assert len(conflicts['items']) == 1
    topo.logcap.flush()

    # Test list glue entries
    list_glue(m2, None, topo.logcap.log, args)
    glues = json.loads(topo.logcap.outputs[0].getMessage())
    assert len(glues['items']) == 2
    topo.logcap.flush()

    # Test delete glue entries
    args.DN = "cn=conflict_parent3,dc=example,dc=com"
    del_glue(m2, None, topo.logcap.log, args)
    list_glue(m2, None, topo.logcap.log, args)
    glues = json.loads(topo.logcap.outputs[0].getMessage())
    assert len(glues['items']) == 1
    topo.logcap.flush()

    # Test convert glue entries
    args.DN = "cn=conflict_parent4,dc=example,dc=com"
    convert_glue(m2, None, topo.logcap.log, args)
    list_glue(m2, None, topo.logcap.log, args)
    glues = json.loads(topo.logcap.outputs[0].getMessage())
    assert len(glues['items']) == 0
    topo.logcap.flush()
コード例 #4
0
def test_acceptance(topology_m2):
    """Exercise each plugin and its main features, while
    changing the configuration without restarting the server.

    Make sure that as configuration changes are made they take
    effect immediately.  Cross plugin interaction (e.g. automember/memberOf)
    needs to tested, as well as plugin tasks.  Need to test plugin
    config validation(dependencies, etc).
    """

    m1 = topology_m2.ms["master1"]
    msg = ' (no replication)'
    replication_run = False

    # First part of the test should be without replication
    topology_m2.pause_all_replicas()

    # First enable dynamic plugins
    m1.config.replace('nsslapd-dynamic-plugins', 'on')

    # Test that critical plugins can be updated even though the change might not be applied
    ldbm_config = LDBMConfig(m1)
    ldbm_config.replace('description', 'test')

    while True:
        # First run the tests with replication disabled, then rerun them with replication set up

        ############################################################################
        #  Test plugin functionality
        ############################################################################

        log.info(
            '####################################################################'
        )
        log.info('Testing Dynamic Plugins Functionality' + msg + '...')
        log.info(
            '####################################################################\n'
        )

        acceptance_test.check_all_plugins(topology_m2)

        log.info(
            '####################################################################'
        )
        log.info('Successfully Tested Dynamic Plugins Functionality' + msg +
                 '.')
        log.info(
            '####################################################################\n'
        )

        if replication_run:
            # We're done.
            break
        else:
            log.info('Resume replication and run everything one more time')
            topology_m2.resume_all_replicas()

            replication_run = True
            msg = ' (replication enabled)'
            time.sleep(1)

    ############################################################################
    # Check replication, and data are in sync
    ############################################################################
    check_replicas(topology_m2)
コード例 #5
0
def test_fetch_bindDnGroup(topo_m2):
    """Check the bindDNGroup is fetched on first replication session

    :id: 5f1b1f59-6744-4260-b091-c82d22130025
    :setup: 2 Master Instances
    :steps:
        1. Create a replication bound user and group, but the user *not* member of the group
        2. Check that replication is working
        3. Some preparation is required because of lib389 magic that already define a replication via group
           - define the group as groupDN for replication and 60sec as fetch interval
           - pause RA in both direction
           - Define the user as bindDn of the RAs
        4. restart servers.
            It sets the fetch time to 0, so next session will refetch the group
        5. Before resuming RA, add user to groupDN (on both side as replication is not working at that time)
        6. trigger an update and check replication is working and
           there is no failure logged on supplier side 'does not have permission to supply replication updates to the replica'
    :expectedresults:
        1. Success
        2. Success
        3. Success
        4. Success
        5. Success
        6. Success
    """

    # If you need any test suite initialization,
    # please, write additional fixture for that (including finalizer).
    # Topology for suites are predefined in lib389/topologies.py.

    # If you need host, port or any other data about instance,
    # Please, use the instance object attributes for that (for example, topo.ms["master1"].serverid)
    M1 = topo_m2.ms['master1']
    M2 = topo_m2.ms['master2']

    # Enable replication log level. Not really necessary
    M1.modify_s('cn=config',
                [(ldap.MOD_REPLACE, 'nsslapd-errorlog-level', b'8192')])
    M2.modify_s('cn=config',
                [(ldap.MOD_REPLACE, 'nsslapd-errorlog-level', b'8192')])

    # Create a group and a user
    PEOPLE = "ou=People,%s" % SUFFIX
    PASSWD = 'password'
    REPL_MGR_BOUND_DN = 'repl_mgr_bound_dn'

    uid = REPL_MGR_BOUND_DN.encode()
    users = UserAccounts(M1, PEOPLE, rdn=None)
    user_props = TEST_USER_PROPERTIES.copy()
    user_props.update({
        'uid': uid,
        'cn': uid,
        'sn': '_%s' % uid,
        'userpassword': PASSWD.encode(),
        'description': b'value creation'
    })
    create_user = users.create(properties=user_props)

    groups_M1 = Groups(M1, DEFAULT_SUFFIX)
    group_properties = {'cn': 'group1', 'description': 'testgroup'}
    group_M1 = groups_M1.create(properties=group_properties)
    group_M2 = Group(M2, group_M1.dn)
    assert (not group_M1.is_member(create_user.dn))

    # Check that M1 and M2 are in sync
    repl = ReplicationManager(DEFAULT_SUFFIX)
    repl.wait_for_replication(M1, M2, timeout=20)

    # Define the group as the replication manager and fetch interval as 60sec
    replicas = Replicas(M1)
    replica = replicas.list()[0]
    replica.apply_mods([
        (ldap.MOD_REPLACE, 'nsDS5ReplicaBindDnGroupCheckInterval', '60'),
        (ldap.MOD_REPLACE, 'nsDS5ReplicaBindDnGroup', group_M1.dn)
    ])

    replicas = Replicas(M2)
    replica = replicas.list()[0]
    replica.apply_mods([
        (ldap.MOD_REPLACE, 'nsDS5ReplicaBindDnGroupCheckInterval', '60'),
        (ldap.MOD_REPLACE, 'nsDS5ReplicaBindDnGroup', group_M1.dn)
    ])

    # Then pause the replication agreement to prevent them trying to acquire
    # while the user is not member of the group
    topo_m2.pause_all_replicas()

    # Define the user as the bindDN of the RAs
    for inst in (M1, M2):
        agmts = Agreements(inst)
        agmt = agmts.list()[0]
        agmt.replace('nsDS5ReplicaBindDN', create_user.dn.encode())
        agmt.replace('nsds5ReplicaCredentials', PASSWD.encode())

    # Key step
    # The restart will fetch the group/members define in the replica
    #
    # The user NOT member of the group replication will not work until bindDNcheckInterval
    #
    # With the fix, the first fetch is not taken into account (fetch time=0)
    # so on the first session, the group will be fetched
    M1.restart()
    M2.restart()

    # Replication being broken here we need to directly do the same update.
    # Sorry not found another solution except total update
    group_M1.add_member(create_user.dn)
    group_M2.add_member(create_user.dn)

    topo_m2.resume_all_replicas()

    # trigger updates to be sure to have a replication session, giving some time
    M1.modify_s(create_user.dn, [(ldap.MOD_ADD, 'description', b'value_1_1')])
    M2.modify_s(create_user.dn, [(ldap.MOD_ADD, 'description', b'value_2_2')])
    time.sleep(10)

    # Check replication is working
    ents = M1.search_s(create_user.dn, ldap.SCOPE_BASE, '(objectclass=*)')
    for ent in ents:
        assert (ent.hasAttr('description'))
        found = 0
        for val in ent.getValues('description'):
            if (val == b'value_1_1'):
                found = found + 1
            elif (val == b'value_2_2'):
                found = found + 1
        assert (found == 2)

    ents = M2.search_s(create_user.dn, ldap.SCOPE_BASE, '(objectclass=*)')
    for ent in ents:
        assert (ent.hasAttr('description'))
        found = 0
        for val in ent.getValues('description'):
            if (val == b'value_1_1'):
                found = found + 1
            elif (val == b'value_2_2'):
                found = found + 1
        assert (found == 2)

    # Check in the logs that the member was detected in the group although
    # at startup it was not member of the group
    regex = re.compile(
        "does not have permission to supply replication updates to the replica."
    )
    errorlog_M1 = open(M1.errlog, "r")
    errorlog_M2 = open(M1.errlog, "r")

    # Find the last restart position
    restart_location_M1 = find_start_location(errorlog_M1, 2)
    assert (restart_location_M1 != -1)
    restart_location_M2 = find_start_location(errorlog_M2, 2)
    assert (restart_location_M2 != -1)

    # Then check there is no failure to authenticate
    count = pattern_errorlog(errorlog_M1,
                             regex,
                             start_location=restart_location_M1)
    assert (count <= 1)
    count = pattern_errorlog(errorlog_M2,
                             regex,
                             start_location=restart_location_M2)
    assert (count <= 1)

    if DEBUGGING:
        # Add debugging steps(if any)...
        pass
コード例 #6
0
def test_stress(topology_m2):
    """Test dynamic plugins got

    Stress - Put the server under load that will trigger multiple plugins(MO, RI, DNA, etc)
    Restart various plugins while these operations are going on.  Perform this test
    5 times(stress_max_run).
    """

    m1 = topology_m2.ms["master1"]
    msg = ' (no replication)'
    replication_run = False
    stress_max_runs = 5

    # First part of the test should be without replication
    topology_m2.pause_all_replicas()

    # First enable dynamic plugins
    m1.config.replace('nsslapd-dynamic-plugins', 'on')

    # Test that critical plugins can be updated even though the change might not be applied
    ldbm_config = LDBMConfig(m1)
    ldbm_config.replace('description', 'test')

    while True:
        # First run the tests with replication disabled, then rerun them with replication set up

        log.info('Do one run through all tests ' + msg + '...')
        acceptance_test.check_all_plugins(topology_m2)

        log.info(
            '####################################################################'
        )
        log.info('Stressing Dynamic Plugins' + msg + '...')
        log.info(
            '####################################################################\n'
        )

        stress_tests.configureMO(m1)
        stress_tests.configureRI(m1)

        stress_count = 0
        while stress_count < stress_max_runs:
            log.info(
                '####################################################################'
            )
            log.info('Running stress test' + msg + '.  Run (%d/%d)...' %
                     (stress_count + 1, stress_max_runs))
            log.info(
                '####################################################################\n'
            )

            # Launch three new threads to add a bunch of users
            add_users = stress_tests.AddUsers(m1, 'employee', True)
            add_users.start()
            add_users2 = stress_tests.AddUsers(m1, 'entry', True)
            add_users2.start()
            add_users3 = stress_tests.AddUsers(m1, 'person', True)
            add_users3.start()
            time.sleep(1)

            # While we are adding users restart the MO plugin and an idle plugin
            m1.plugins.disable(name=PLUGIN_MEMBER_OF)
            m1.plugins.enable(name=PLUGIN_MEMBER_OF)
            time.sleep(1)
            m1.plugins.disable(name=PLUGIN_MEMBER_OF)
            time.sleep(1)
            m1.plugins.enable(name=PLUGIN_MEMBER_OF)
            m1.plugins.disable(name=PLUGIN_LINKED_ATTRS)
            m1.plugins.enable(name=PLUGIN_LINKED_ATTRS)
            time.sleep(1)
            m1.plugins.disable(name=PLUGIN_MEMBER_OF)
            m1.plugins.enable(name=PLUGIN_MEMBER_OF)
            time.sleep(2)
            m1.plugins.disable(name=PLUGIN_MEMBER_OF)
            time.sleep(1)
            m1.plugins.enable(name=PLUGIN_MEMBER_OF)
            m1.plugins.disable(name=PLUGIN_LINKED_ATTRS)
            m1.plugins.enable(name=PLUGIN_LINKED_ATTRS)
            m1.plugins.disable(name=PLUGIN_MEMBER_OF)
            time.sleep(1)
            m1.plugins.enable(name=PLUGIN_MEMBER_OF)
            m1.plugins.disable(name=PLUGIN_MEMBER_OF)
            m1.plugins.enable(name=PLUGIN_MEMBER_OF)

            # Wait for the 'adding' threads to complete
            add_users.join()
            add_users2.join()
            add_users3.join()

            # Now launch three threads to delete the users
            del_users = stress_tests.DelUsers(m1, 'employee')
            del_users.start()
            del_users2 = stress_tests.DelUsers(m1, 'entry')
            del_users2.start()
            del_users3 = stress_tests.DelUsers(m1, 'person')
            del_users3.start()
            time.sleep(1)

            # Restart both the MO, RI plugins during these deletes, and an idle plugin
            m1.plugins.disable(name=PLUGIN_REFER_INTEGRITY)
            m1.plugins.disable(name=PLUGIN_MEMBER_OF)
            m1.plugins.enable(name=PLUGIN_MEMBER_OF)
            m1.plugins.enable(name=PLUGIN_REFER_INTEGRITY)
            time.sleep(1)
            m1.plugins.disable(name=PLUGIN_REFER_INTEGRITY)
            time.sleep(1)
            m1.plugins.disable(name=PLUGIN_MEMBER_OF)
            time.sleep(1)
            m1.plugins.enable(name=PLUGIN_MEMBER_OF)
            time.sleep(1)
            m1.plugins.enable(name=PLUGIN_REFER_INTEGRITY)
            m1.plugins.disable(name=PLUGIN_LINKED_ATTRS)
            m1.plugins.enable(name=PLUGIN_LINKED_ATTRS)
            m1.plugins.disable(name=PLUGIN_REFER_INTEGRITY)
            m1.plugins.disable(name=PLUGIN_MEMBER_OF)
            m1.plugins.enable(name=PLUGIN_MEMBER_OF)
            m1.plugins.enable(name=PLUGIN_REFER_INTEGRITY)
            time.sleep(2)
            m1.plugins.disable(name=PLUGIN_REFER_INTEGRITY)
            time.sleep(1)
            m1.plugins.disable(name=PLUGIN_MEMBER_OF)
            time.sleep(1)
            m1.plugins.enable(name=PLUGIN_MEMBER_OF)
            time.sleep(1)
            m1.plugins.enable(name=PLUGIN_REFER_INTEGRITY)
            m1.plugins.disable(name=PLUGIN_LINKED_ATTRS)
            m1.plugins.enable(name=PLUGIN_LINKED_ATTRS)

            # Wait for the 'deleting' threads to complete
            del_users.join()
            del_users2.join()
            del_users3.join()

            # Now make sure both the MO and RI plugins still work correctly
            acceptance_test.func_tests[8](topology_m2, "dynamic")  # RI plugin
            acceptance_test.func_tests[5](topology_m2, "dynamic")  # MO plugin

            # Cleanup the stress tests
            stress_tests.cleanup(m1)

            stress_count += 1
            log.info(
                '####################################################################'
            )
            log.info('Successfully Stressed Dynamic Plugins' + msg +
                     '.  Completed (%d/%d)' % (stress_count, stress_max_runs))
            log.info(
                '####################################################################\n'
            )

        if replication_run:
            # We're done.
            break
        else:
            log.info('Resume replication and run everything one more time')
            topology_m2.resume_all_replicas()

            replication_run = True
            msg = ' (replication enabled)'
            time.sleep(1)

    ############################################################################
    # Check replication, and data are in sync
    ############################################################################
    check_replicas(topology_m2)
コード例 #7
0
def test_memory_corruption(topology_m2):
    """Memory Corruption - Restart the plugins many times, and in different orders and test
    functionality, and stability.  This will excerise the internal
    plugin linked lists, dse callbacks, and task handlers.
    """

    m1 = topology_m2.ms["master1"]
    msg = ' (no replication)'
    replication_run = False

    # First part of the test should be without replication
    topology_m2.pause_all_replicas()

    # First enable dynamic plugins
    m1.config.replace('nsslapd-dynamic-plugins', 'on')

    # Test that critical plugins can be updated even though the change might not be applied
    ldbm_config = LDBMConfig(m1)
    ldbm_config.replace('description', 'test')

    while True:
        # First run the tests with replication disabled, then rerun them with replication set up

        ############################################################################
        # Test the stability by exercising the internal lists, callabcks, and task handlers
        ############################################################################

        log.info(
            '####################################################################'
        )
        log.info('Testing Dynamic Plugins for Memory Corruption' + msg + '...')
        log.info(
            '####################################################################\n'
        )
        prev_plugin_test = None
        prev_prev_plugin_test = None

        for plugin_test in acceptance_test.func_tests:
            #
            # Restart the plugin several times (and prev plugins) - work that linked list
            #
            plugin_test(topology_m2, "restart")

            if prev_prev_plugin_test:
                prev_prev_plugin_test(topology_m2, "restart")

            plugin_test(topology_m2, "restart")

            if prev_plugin_test:
                prev_plugin_test(topology_m2, "restart")

            plugin_test(topology_m2, "restart")

            # Now run the functional test
            plugin_test(topology_m2, "dynamic")

            # Set the previous tests
            if prev_plugin_test:
                prev_prev_plugin_test = prev_plugin_test
            prev_plugin_test = plugin_test

        log.info(
            '####################################################################'
        )
        log.info('Successfully Tested Dynamic Plugins for Memory Corruption' +
                 msg + '.')
        log.info(
            '####################################################################\n'
        )

        if replication_run:
            # We're done.
            break
        else:
            log.info('Resume replication and run everything one more time')
            topology_m2.resume_all_replicas()

            replication_run = True
            msg = ' (replication enabled)'
            time.sleep(1)

    ############################################################################
    # Check replication, and data are in sync
    ############################################################################
    check_replicas(topology_m2)
コード例 #8
0
def test_acceptance(topology_m2):
    """Exercise each plugin and its main features, while
    changing the configuration without restarting the server.

    :id: 96136538-0151-4b09-9933-0e0cbf2c786c
    :setup: 2 Supplier Instances
    :steps:
        1. Pause all replication
        2. Set nsslapd-dynamic-plugins to on
        3. Try to update LDBM config entry
        4. Go through all plugin basic functionality
        5. Resume replication
        6. Go through all plugin basic functionality again
        7. Check that data in sync and replication is working
    :expectedresults:
        1. Success
        2. Success
        3. Success
        4. Success
        5. Success
        6. Success
        7. Success
    """

    m1 = topology_m2.ms["supplier1"]
    msg = ' (no replication)'
    replication_run = False

    # First part of the test should be without replication
    topology_m2.pause_all_replicas()

    # First enable dynamic plugins
    m1.config.replace('nsslapd-dynamic-plugins', 'on')

    # Test that critical plugins can be updated even though the change might not be applied
    ldbm_config = LDBMConfig(m1)
    ldbm_config.replace('description', 'test')

    while True:
        # First run the tests with replication disabled, then rerun them with replication set up

        ############################################################################
        #  Test plugin functionality
        ############################################################################

        log.info(
            '####################################################################'
        )
        log.info('Testing Dynamic Plugins Functionality' + msg + '...')
        log.info(
            '####################################################################\n'
        )

        acceptance_test.check_all_plugins(topology_m2)

        log.info(
            '####################################################################'
        )
        log.info('Successfully Tested Dynamic Plugins Functionality' + msg +
                 '.')
        log.info(
            '####################################################################\n'
        )

        if replication_run:
            # We're done.
            break
        else:
            log.info('Resume replication and run everything one more time')
            topology_m2.resume_all_replicas()

            replication_run = True
            msg = ' (replication enabled)'
            time.sleep(1)

    ############################################################################
    # Check replication, and data are in sync
    ############################################################################
    check_replicas(topology_m2)
コード例 #9
0
def test_stress(topology_m2):
    """Test plugins while under a big load. Perform the test 5 times

    :id: 96136538-0151-4b09-9933-0e0cbf2c7863
    :setup: 2 Supplier Instances
    :steps:
        1. Pause all replication
        2. Set nsslapd-dynamic-plugins to on
        3. Try to update LDBM config entry
        4. Do one run through all tests
        5. Enable Referential integrity and MemberOf plugins
        6. Launch three new threads to add a bunch of users
        7. While we are adding users restart the MemberOf and
           Linked Attributes plugins many times
        8. Wait for the 'adding' threads to complete
        9. Now launch three threads to delete the users
        10. Restart both the MemberOf, Referential integrity and
            Linked Attributes plugins during these deletes
        11. Wait for the 'deleting' threads to complete
        12. Now make sure both the MemberOf and Referential integrity plugins still work correctly
        13. Cleanup the stress tests (delete the group entry)
        14. Perform 4-13 steps five times
        15. Resume replication
        16. Go through 4-14 steps once more
        17. Check that data in sync and replication is working
    :expectedresults:
        1. Success
        2. Success
        3. Success
        4. Success
        5. Success
        6. Success
        7. Success
        8. Success
        9. Success
        10. Success
        11. Success
        12. Success
        13. Success
        14. Success
        15. Success
        16. Success
        17. Success
    """

    m1 = topology_m2.ms["supplier1"]
    msg = ' (no replication)'
    replication_run = False
    stress_max_runs = 5

    # First part of the test should be without replication
    topology_m2.pause_all_replicas()

    # First enable dynamic plugins
    m1.config.replace('nsslapd-dynamic-plugins', 'on')

    # Test that critical plugins can be updated even though the change might not be applied
    ldbm_config = LDBMConfig(m1)
    ldbm_config.replace('description', 'test')

    while True:
        # First run the tests with replication disabled, then rerun them with replication set up

        log.info('Do one run through all tests ' + msg + '...')
        acceptance_test.check_all_plugins(topology_m2)

        log.info(
            '####################################################################'
        )
        log.info('Stressing Dynamic Plugins' + msg + '...')
        log.info(
            '####################################################################\n'
        )

        stress_tests.configureMO(m1)
        stress_tests.configureRI(m1)

        stress_count = 0
        while stress_count < stress_max_runs:
            log.info(
                '####################################################################'
            )
            log.info('Running stress test' + msg + '.  Run (%d/%d)...' %
                     (stress_count + 1, stress_max_runs))
            log.info(
                '####################################################################\n'
            )

            # Launch three new threads to add a bunch of users
            add_users = stress_tests.AddUsers(m1, 'employee', True)
            add_users.start()
            add_users2 = stress_tests.AddUsers(m1, 'entry', True)
            add_users2.start()
            add_users3 = stress_tests.AddUsers(m1, 'person', True)
            add_users3.start()
            time.sleep(1)

            # While we are adding users restart the MO plugin and an idle plugin
            m1.plugins.disable(name=PLUGIN_MEMBER_OF)
            m1.plugins.enable(name=PLUGIN_MEMBER_OF)
            time.sleep(1)
            m1.plugins.disable(name=PLUGIN_MEMBER_OF)
            time.sleep(1)
            m1.plugins.enable(name=PLUGIN_MEMBER_OF)
            m1.plugins.disable(name=PLUGIN_LINKED_ATTRS)
            m1.plugins.enable(name=PLUGIN_LINKED_ATTRS)
            time.sleep(1)
            m1.plugins.disable(name=PLUGIN_MEMBER_OF)
            m1.plugins.enable(name=PLUGIN_MEMBER_OF)
            time.sleep(2)
            m1.plugins.disable(name=PLUGIN_MEMBER_OF)
            time.sleep(1)
            m1.plugins.enable(name=PLUGIN_MEMBER_OF)
            m1.plugins.disable(name=PLUGIN_LINKED_ATTRS)
            m1.plugins.enable(name=PLUGIN_LINKED_ATTRS)
            m1.plugins.disable(name=PLUGIN_MEMBER_OF)
            time.sleep(1)
            m1.plugins.enable(name=PLUGIN_MEMBER_OF)
            m1.plugins.disable(name=PLUGIN_MEMBER_OF)
            m1.plugins.enable(name=PLUGIN_MEMBER_OF)

            # Wait for the 'adding' threads to complete
            add_users.join()
            add_users2.join()
            add_users3.join()

            # Now launch three threads to delete the users
            del_users = stress_tests.DelUsers(m1, 'employee')
            del_users.start()
            del_users2 = stress_tests.DelUsers(m1, 'entry')
            del_users2.start()
            del_users3 = stress_tests.DelUsers(m1, 'person')
            del_users3.start()
            time.sleep(1)

            # Restart both the MO, RI plugins during these deletes, and an idle plugin
            m1.plugins.disable(name=PLUGIN_REFER_INTEGRITY)
            m1.plugins.disable(name=PLUGIN_MEMBER_OF)
            m1.plugins.enable(name=PLUGIN_MEMBER_OF)
            m1.plugins.enable(name=PLUGIN_REFER_INTEGRITY)
            time.sleep(1)
            m1.plugins.disable(name=PLUGIN_REFER_INTEGRITY)
            time.sleep(1)
            m1.plugins.disable(name=PLUGIN_MEMBER_OF)
            time.sleep(1)
            m1.plugins.enable(name=PLUGIN_MEMBER_OF)
            time.sleep(1)
            m1.plugins.enable(name=PLUGIN_REFER_INTEGRITY)
            m1.plugins.disable(name=PLUGIN_LINKED_ATTRS)
            m1.plugins.enable(name=PLUGIN_LINKED_ATTRS)
            m1.plugins.disable(name=PLUGIN_REFER_INTEGRITY)
            m1.plugins.disable(name=PLUGIN_MEMBER_OF)
            m1.plugins.enable(name=PLUGIN_MEMBER_OF)
            m1.plugins.enable(name=PLUGIN_REFER_INTEGRITY)
            time.sleep(2)
            m1.plugins.disable(name=PLUGIN_REFER_INTEGRITY)
            time.sleep(1)
            m1.plugins.disable(name=PLUGIN_MEMBER_OF)
            time.sleep(1)
            m1.plugins.enable(name=PLUGIN_MEMBER_OF)
            time.sleep(1)
            m1.plugins.enable(name=PLUGIN_REFER_INTEGRITY)
            m1.plugins.disable(name=PLUGIN_LINKED_ATTRS)
            m1.plugins.enable(name=PLUGIN_LINKED_ATTRS)

            # Wait for the 'deleting' threads to complete
            del_users.join()
            del_users2.join()
            del_users3.join()

            # Now make sure both the MO and RI plugins still work correctly
            acceptance_test.func_tests[8](topology_m2, "dynamic")  # RI plugin
            acceptance_test.func_tests[5](topology_m2, "dynamic")  # MO plugin

            # Cleanup the stress tests
            stress_tests.cleanup(m1)

            stress_count += 1
            log.info(
                '####################################################################'
            )
            log.info('Successfully Stressed Dynamic Plugins' + msg +
                     '.  Completed (%d/%d)' % (stress_count, stress_max_runs))
            log.info(
                '####################################################################\n'
            )

        if replication_run:
            # We're done.
            break
        else:
            log.info('Resume replication and run everything one more time')
            topology_m2.resume_all_replicas()

            replication_run = True
            msg = ' (replication enabled)'
            time.sleep(1)

    ############################################################################
    # Check replication, and data are in sync
    ############################################################################
    check_replicas(topology_m2)
コード例 #10
0
def test_memory_corruption(topology_m2):
    """Check the plugins for memory corruption issues while
    dynamic plugins option is enabled

    :id: 96136538-0151-4b09-9933-0e0cbf2c7862
    :setup: 2 Supplier Instances
    :steps:
        1. Pause all replication
        2. Set nsslapd-dynamic-plugins to on
        3. Try to update LDBM config entry
        4. Restart the plugin many times in a linked list fashion
           restarting previous and preprevious plugins in the list of all plugins
        5. Run the functional test
        6. Repeat 4 and 5 steps for all plugins
        7. Resume replication
        8. Go through 4-6 steps once more
        9. Check that data in sync and replication is working
    :expectedresults:
        1. Success
        2. Success
        3. Success
        4. Success
        5. Success
        6. Success
        7. Success
        8. Success
        9. Success
    """

    m1 = topology_m2.ms["supplier1"]
    msg = ' (no replication)'
    replication_run = False

    # First part of the test should be without replication
    topology_m2.pause_all_replicas()

    # First enable dynamic plugins
    m1.config.replace('nsslapd-dynamic-plugins', 'on')

    # Test that critical plugins can be updated even though the change might not be applied
    ldbm_config = LDBMConfig(m1)
    ldbm_config.replace('description', 'test')

    while True:
        # First run the tests with replication disabled, then rerun them with replication set up

        ############################################################################
        # Test the stability by exercising the internal lists, callabcks, and task handlers
        ############################################################################

        log.info(
            '####################################################################'
        )
        log.info('Testing Dynamic Plugins for Memory Corruption' + msg + '...')
        log.info(
            '####################################################################\n'
        )
        prev_plugin_test = None
        prev_prev_plugin_test = None

        for plugin_test in acceptance_test.func_tests:
            #
            # Restart the plugin several times (and prev plugins) - work that linked list
            #
            plugin_test(topology_m2, "restart")

            if prev_prev_plugin_test:
                prev_prev_plugin_test(topology_m2, "restart")

            plugin_test(topology_m2, "restart")

            if prev_plugin_test:
                prev_plugin_test(topology_m2, "restart")

            plugin_test(topology_m2, "restart")

            # Now run the functional test
            plugin_test(topology_m2, "dynamic")

            # Set the previous tests
            if prev_plugin_test:
                prev_prev_plugin_test = prev_plugin_test
            prev_plugin_test = plugin_test

        log.info(
            '####################################################################'
        )
        log.info('Successfully Tested Dynamic Plugins for Memory Corruption' +
                 msg + '.')
        log.info(
            '####################################################################\n'
        )

        if replication_run:
            # We're done.
            break
        else:
            log.info('Resume replication and run everything one more time')
            topology_m2.resume_all_replicas()

            replication_run = True
            msg = ' (replication enabled)'
            time.sleep(1)

    ############################################################################
    # Check replication, and data are in sync
    ############################################################################
    check_replicas(topology_m2)