Esempio n. 1
0
def test_agmt_num_add(topo, attr, too_small, too_big, overflow, notnum, valid):
    """Test all the number values you can set for a replica config entry

    :id: a8b47d4a-a089-4d70-8070-e6181209bf94
    :setup: standalone instance
    :steps:
        1. Use a value that is too small
        2. Use a value that is too big
        3. Use a value that overflows the int
        4. Use a value with character value (not a number)
        5. Use a valid value
    :expectedresults:
        1. Add is rejected
        2. Add is rejected
        3. Add is rejected
        4. Add is rejected
        5. Add is allowed
    """
    agmt_reset(topo)
    replica = replica_setup(topo)

    agmts = Agreements(topo.standalone, basedn=replica.dn)

    # Test too small
    perform_invalid_create(agmts, agmt_dict, attr, too_small)
    # Test too big
    perform_invalid_create(agmts, agmt_dict, attr, too_big)
    # Test overflow
    perform_invalid_create(agmts, agmt_dict, attr, overflow)
    # test not a number
    perform_invalid_create(agmts, agmt_dict, attr, notnum)
    # Test valid value
    my_agmt = copy.deepcopy(agmt_dict)
    my_agmt[attr] = valid
    agmts.create(properties=my_agmt)
Esempio n. 2
0
def _create_entries(topology_m2c2):
    """
    A fixture that will create first test user and create fractional Agreement
    """
    # Defining as global , as same value will be used everywhere with same name.
    global MASTER1, MASTER2, CONSUMER1, CONSUMER2
    MASTER1 = topology_m2c2.ms['master1']
    MASTER2 = topology_m2c2.ms['master2']
    CONSUMER1 = topology_m2c2.cs['consumer1']
    CONSUMER2 = topology_m2c2.cs['consumer2']
    users = UserAccounts(MASTER1, DEFAULT_SUFFIX)
    _create_users(users, 'Sam Carter', 'Carter', 'Sam',
                  ['Accounting', 'People'], 'Sunnyvale', 'scarter',
                  '*****@*****.**', '+1 408 555 4798', '+1 408 555 9751',
                  '4612')
    for ins, num in [(MASTER1, 1), (MASTER2, 2), (MASTER1, 2), (MASTER2, 1)]:
        Agreements(ins).list()[num].replace(
            'nsDS5ReplicatedAttributeList',
            '(objectclass=*) $ EXCLUDE audio businessCategory carLicense departmentNumber '
            'destinationIndicator displayName employeeNumber employeeType facsimileTelephoneNumber '
            'roomNumber telephoneNumber memberOf manager accountUnlockTime '
            'passwordRetryCount retryCountResetTime')
        Agreements(ins).list()[num].replace(
            'nsDS5ReplicatedAttributeListTotal',
            '(objectclass=*) $ EXCLUDE audio businessCategory carLicense departmentNumber '
            'destinationIndicator displayName employeeNumber employeeType facsimileTelephoneNumber '
            'roomNumber telephoneNumber accountUnlockTime passwordRetryCount retryCountResetTime'
        )
        Agreements(ins).list()[num].begin_reinit()
        Agreements(ins).list()[num].wait_reinit()
Esempio n. 3
0
def agmt_setup(topo):
    """Add a valid replica config entry to modify
    """
    # Reset the agreements too.
    replica = replica_setup(topo)
    agmts = Agreements(topo.standalone, basedn=replica.dn)
    for a in agmts.list():
        a.delete()
    return agmts.create(properties=agmt_dict)
Esempio n. 4
0
def config_memberof(server):
    """Configure memberOf plugin and configure fractional
    to prevent total init to send memberof
    """

    memberof = MemberOfPlugin(server)
    memberof.enable()
    memberof.set_autoaddoc('nsMemberOf')
    server.restart()
    agmts = Agreements(server)
    for agmt in agmts.list():
        log.info('update %s to add nsDS5ReplicatedAttributeListTotal' % agmt.dn)
        agmt.replace_many(('nsDS5ReplicatedAttributeListTotal', '(objectclass=*) $ EXCLUDE '),
                          ('nsDS5ReplicatedAttributeList', '(objectclass=*) $ EXCLUDE memberOf'))
Esempio n. 5
0
def test_attribute_nsds5replicatedattributelisttotal(_create_entries,
                                                     _add_user_clean):
    """This test case is to test the newly added attribute nsds5replicatedattributelistTotal.

    :id: 35de9ff0-38eb-11ea-b420-8c16451d917b
    :setup: Master and Consumer
    :steps:
        1. Add a new entry to MASTER1.
        2. Enabling memberOf plugin and then adding few groups with member attributes.
        3. No memberOf plugin enabled in other consumers,ie., the read only replicas
           won't get incremental updates for the attributes mentioned in the list.
        4. Run total update and verify the same attributes added/modified in the read-only replicas.
    :expected results:
        1. Success
        2. Success
        3. Success
        4. Success
    """
    # Run total update and verify the same attributes added/modified in the read-only replicas.
    user = f'uid=test_user_1000,ou=People,{DEFAULT_SUFFIX}'
    for agreement in Agreements(MASTER1).list():
        agreement.begin_reinit()
        agreement.wait_reinit()
    check_all_replicated()
    for instance in (MASTER1, MASTER2):
        assert Groups(
            MASTER1,
            DEFAULT_SUFFIX).list()[0].get_attr_val_utf8("member") == user
        assert UserAccount(instance,
                           user).get_attr_val_utf8("sn") == "test_user_1000"
    for instance in (CONSUMER1, CONSUMER2):
        for value in ("memberOf", "manager", "sn"):
            assert UserAccount(instance, user).get_attr_val_utf8(value)
Esempio n. 6
0
def test_not_int_value(topology_m2):
    """Tests not integer value

    :id: 67c9994f-9251-425a-8197-8d12ad9beafc
    :setup: Replication with two suppliers
    :steps:
        1. Try to set some string value
           to nsDS5ReplicaWaitForAsyncResults
    :expectedresults:
        1. Invalid syntax error should be raised
    """
    supplier1 = topology_m2.ms["supplier1"]
    agmt = Agreements(supplier1).list()[0]

    with pytest.raises(ldap.INVALID_SYNTAX):
        agmt.set_wait_for_async_results("ws2")
def test_referral_during_tot(topology_m2):

    master1 = topology_m2.ms["master1"]
    master2 = topology_m2.ms["master2"]

    users = UserAccounts(master2, DEFAULT_SUFFIX)

    u = users.create(properties=TEST_USER_PROPERTIES)
    u.set('userPassword', 'password')

    binddn = u.dn
    bindpw = 'password'

    # Create a bunch of entries on master1
    ldif_dir = master1.get_ldif_dir()
    import_ldif = ldif_dir + '/ref_during_tot_import.ldif'
    dbgen(master1, 10000, import_ldif, DEFAULT_SUFFIX)

    master1.stop()
    master1.ldif2db(bename=None,
                    excludeSuffixes=None,
                    encrypt=False,
                    suffixes=[DEFAULT_SUFFIX],
                    import_file=import_ldif)
    master1.start()
    # Recreate the user on m1 also, so that if the init finishes first ew don't lose the user on m2
    users = UserAccounts(master1, DEFAULT_SUFFIX)
    u = users.create(properties=TEST_USER_PROPERTIES)
    u.set('userPassword', 'password')
    # Now export them to master2
    agmts = Agreements(master1)
    agmts.list()[0].begin_reinit()

    # While that's happening try to bind as a user to master 2
    # This should trigger the referral code.
    referred = False
    for i in range(0, 100):
        conn = ldap.initialize(master2.toLDAPURL())
        conn.set_option(ldap.OPT_REFERRALS, False)
        try:
            conn.simple_bind_s(binddn, bindpw)
            conn.unbind_s()
        except ldap.REFERRAL:
            referred = True
            break
    # Means we never go a referral, should not happen!
    assert referred
def test_rename_entry(topo_m2, _delete_after):
    """Rename entry crashed a server

    :id: 3866f9d6-a946-11ea-a3f8-8c16451d917b
    :setup: MMR with 2 masters
    :steps:
        1. Adding entry
        2. Stop Agreement for both
        3. Change description
        4. Change will not reflect on other master
        5. Turn on agreement on both
        6. Change will reflect on other master
    :expected results:
        1. Should succeeds
        2. Should succeeds
        3. Should succeeds
        4. Should succeeds
        5. Should succeeds
        6. Should succeeds
    """
    m1 = topo_m2.ms["master1"]
    m2 = topo_m2.ms["master2"]
    # Adding entry
    user1 = UserAccounts(m1, DEFAULT_SUFFIX, rdn=None).create_test_user(uid=1,
                                                                        gid=1)
    repl_manager = ReplicationManager(DEFAULT_SUFFIX)
    repl_manager.wait_for_replication(m1, m2, timeout=100)
    user2 = UserAccount(m2, user1.dn)
    assert user2.status()
    # Stop Agreement for both
    agree1 = Agreements(m1).list()[0]
    agree2 = Agreements(m2).list()[0]
    for agree in [agree1, agree2]:
        agree.pause()
    # change description
    user1.replace('description', 'New Des')
    assert user1.get_attr_val_utf8('description')
    # Change will not reflect on other master
    with pytest.raises(AssertionError):
        assert user2.get_attr_val_utf8('description')
    # Turn on agreement on both
    for agree in [agree1, agree2]:
        agree.resume()
    repl_manager.wait_for_replication(m1, m2, timeout=100)
    for instance in [user1, user2]:
        assert instance.get_attr_val_utf8('description')
Esempio n. 9
0
def test_online_reinit_may_hang(topo_with_sigkill):
    """Online reinitialization may hang when the first
       entry of the DB is RUV entry instead of the suffix

    :id: cded6afa-66c0-4c65-9651-993ba3f7a49c
    :setup: 2 Supplier Instances
    :steps:
        1. Export the database
        2. Move RUV entry to the top in the ldif file
        3. Import the ldif file
        4. Check that replication is still working
        5. Online replica initializaton
    :expectedresults:
        1. Ldif file should be created successfully
        2. RUV entry should be on top in the ldif file
        3. Import should be successful
        4. Replication should work
        5. Server should not hang and consume 100% CPU
    """
    M1 = topo_with_sigkill.ms["supplier1"]
    M2 = topo_with_sigkill.ms["supplier2"]
    M1.stop()
    ldif_file = '%s/supplier1.ldif' % M1.get_ldif_dir()
    M1.db2ldif(bename=DEFAULT_BENAME, suffixes=[DEFAULT_SUFFIX],
               excludeSuffixes=None, repl_data=True,
               outputfile=ldif_file, encrypt=False)
    _move_ruv(ldif_file)
    M1.ldif2db(DEFAULT_BENAME, None, None, None, ldif_file)
    M1.start()
    # After this server may hang
    # Exporting idle server with replication data and reimporting
    # should not break replication (Unless we hit issue 5098)
    # So let check that replication is still working.
    repl = ReplicationManager(DEFAULT_SUFFIX)
    repl.test_replication_topology(topo_with_sigkill)
    agmt = Agreements(M1).list()[0]
    agmt.begin_reinit()
    (done, error) = agmt.wait_reinit()
    assert done is True
    assert error is False
    repl = ReplicationManager(DEFAULT_SUFFIX)
    repl.test_replication_topology(topo_with_sigkill)

    if DEBUGGING:
        # Add debugging steps(if any)...
        pass
Esempio n. 10
0
def test_healthcheck_replication_out_of_sync_not_broken(topology_m3):
    """Check if HealthCheck returns DSREPLLE0003 code

    :id: 8305000d-ba4d-4c00-8331-be0e8bd92150
    :setup: 3 MMR topology
    :steps:
        1. Create a 3 masters full-mesh topology, all replicas being synchronized
        2. Stop M1
        3. Perform an update on M2 and M3.
        4. Check M2 and M3 are synchronized.
        5. From M2, reinitialize the M3 agreement
        6. Stop M2 and M3
        7. Restart M1
        8. Start M3
        9. Use HealthCheck without --json option
        10. Use HealthCheck with --json option
    :expectedresults:
        1. Success
        2. Success
        3. Success
        4. Success
        5. Success
        6. Success
        7. Success
        8. Success
        9. Healthcheck reports DSREPLLE0003 code and related details
        10. Healthcheck reports DSREPLLE0003 code and related details
    """

    RET_CODE = 'DSREPLLE0003'

    M1 = topology_m3.ms['master1']
    M2 = topology_m3.ms['master2']
    M3 = topology_m3.ms['master3']

    log.info('Stop master1')
    M1.stop()

    log.info('Perform update on master2 and master3')
    test_users_m2 = UserAccounts(M2, DEFAULT_SUFFIX)
    test_users_m3 = UserAccounts(M3, DEFAULT_SUFFIX)
    test_users_m2.create_test_user(1000, 2000)
    test_users_m3.create_test_user(1001, 2000)

    log.info('Init M2->M3 agreement')
    agmt = Agreements(M2).list()[1]
    agmt.begin_reinit()
    agmt.wait_reinit()

    log.info('Stop M2 and M3')
    M2.stop()
    M3.stop()

    log.info('Start M1 first, then M3')
    M1.start()
    M3.start()

    run_healthcheck_and_flush_log(topology_m3, M3, RET_CODE, json=False)
    run_healthcheck_and_flush_log(topology_m3, M3, RET_CODE, json=True)
def test_value_check(topology_m2, waitfor_async_attr):
    """Checks that value has been set correctly

    :id: 3e81afe9-5130-410d-a1bb-d798d8ab8519
    :setup: Replication with two masters,
        wait for async set on all masters, try:
        None, '2000', '0', '-5'
    :steps:
        1. Search for nsDS5ReplicaWaitForAsyncResults on master 1
        2. Search for nsDS5ReplicaWaitForAsyncResults on master 2
    :expectedresults:
        1. nsDS5ReplicaWaitForAsyncResults should be set correctly
        2. nsDS5ReplicaWaitForAsyncResults should be set correctly
    """

    attr_value = waitfor_async_attr[0]

    for master in topology_m2.ms.values():
        agmt = Agreements(master).list()[0]

        server_value = agmt.get_wait_for_async_results_utf8()
        assert server_value == attr_value
Esempio n. 12
0
def test_multi_value(topology_m2):
    """Tests multi value

    :id: 1932301a-db29-407e-b27e-4466a876d1d3
    :setup: Replication with two suppliers
    :steps:
        1. Set nsDS5ReplicaWaitForAsyncResults to some int
        2. Try to add one more int value
           to nsDS5ReplicaWaitForAsyncResults
    :expectedresults:
        1. nsDS5ReplicaWaitForAsyncResults should be set
        2. Object class violation error should be raised
    """

    supplier1 = topology_m2.ms["supplier1"]
    agmt = Agreements(supplier1).list()[0]

    agmt.set_wait_for_async_results('100')
    with pytest.raises(ldap.OBJECT_CLASS_VIOLATION):
        agmt.add('nsDS5ReplicaWaitForAsyncResults', '101')
Esempio n. 13
0
def waitfor_async_attr(topology_m2, request):
    """Sets attribute on all replicas"""

    attr_value = request.param[0]
    expected_result = request.param[1]

    # Run through all suppliers

    for supplier in topology_m2.ms.values():
        agmt = Agreements(supplier).list()[0]

        if attr_value:
            agmt.set_wait_for_async_results(attr_value)
        else:
            try:
                # Sometimes we can double remove this.
                agmt.remove_wait_for_async_results()
            except ldap.NO_SUCH_ATTRIBUTE:
                pass

    return (attr_value, expected_result)
Esempio n. 14
0
def agmt_reset(topo):
    """Purge all existing agreements for testing"""
    agmts = Agreements(topo.standalone)
    for a in agmts.list():
        a.delete()
Esempio n. 15
0
    def test_memberof_groups(self, topology_m2, base_m2):
        """Check that conflict properly resolved for operations
        with memberOf and groups

        :id: 77f09b18-03d1-45da-940b-1ad2c2908eb3
        :setup: Two master replication, test container for entries, enable plugin logging,
                audit log, error log for replica and access log for internal
        :steps:
            1. Enable memberOf plugin
            2. Add 30 users to m1 and wait for replication to happen
            3. Pause replication
            4. Create a group on m1 and m2
            5. Create a group on m1 and m2, delete from m1
            6. Create a group on m1, delete from m1, and create on m2,
            7. Create a group on m2 and m1, delete from m1
            8. Create two different groups on m2
            9. Resume replication
            10. Check that the entries on both masters are the same and replication is working
        :expectedresults:
            1. It should pass
            2. It should pass
            3. It should pass
            4. It should pass
            5. It should pass
            6. It should pass
            7. It should pass
            8. It should pass
            9. It should pass
            10. It should pass
        """

        pytest.xfail("Issue 49591 - work in progress")

        M1 = topology_m2.ms["master1"]
        M2 = topology_m2.ms["master2"]
        test_users_m1 = UserAccounts(M1, base_m2.dn, rdn=None)
        test_groups_m1 = Groups(M1, base_m2.dn, rdn=None)
        test_groups_m2 = Groups(M2, base_m2.dn, rdn=None)

        repl = ReplicationManager(SUFFIX)

        for inst in topology_m2.ms.values():
            memberof = MemberOfPlugin(inst)
            memberof.enable()
            agmt = Agreements(inst).list()[0]
            agmt.replace_many(('nsDS5ReplicatedAttributeListTotal',
                               '(objectclass=*) $ EXCLUDE '),
                              ('nsDS5ReplicatedAttributeList',
                               '(objectclass=*) $ EXCLUDE memberOf'))
            inst.restart()
        user_dns = []
        for user_num in range(10):
            user_trio = []
            for num in range(0, 30, 10):
                user = _create_user(test_users_m1, 1200 + user_num + num)
                user_trio.append(user.dn)
            user_dns.append(user_trio)

        repl.test_replication(M1, M2)
        topology_m2.pause_all_replicas()

        log.info("Check a simple conflict")
        group_num = 0
        _create_group(test_groups_m1,
                      group_num,
                      user_dns[group_num],
                      sleep=True)
        _create_group(test_groups_m2,
                      group_num,
                      user_dns[group_num],
                      sleep=True)

        log.info("Check a add - del")
        group_num += 1
        _create_group(test_groups_m1,
                      group_num,
                      user_dns[group_num],
                      sleep=True)
        _create_group(test_groups_m2,
                      group_num,
                      user_dns[group_num],
                      sleep=True)
        _delete_group(test_groups_m1, group_num)

        group_num += 1
        _create_group(test_groups_m1, group_num, user_dns[group_num])
        _delete_group(test_groups_m1, group_num, sleep=True)
        _create_group(test_groups_m2, group_num, user_dns[group_num])

        group_num += 1
        _create_group(test_groups_m2,
                      group_num,
                      user_dns[group_num],
                      sleep=True)
        _create_group(test_groups_m1, group_num, user_dns[group_num])
        _delete_group(test_groups_m1, group_num, sleep=True)

        group_num += 1
        _create_group(test_groups_m2, group_num, user_dns[group_num])
        group_num += 1
        _create_group(test_groups_m2, group_num, user_dns[group_num])

        topology_m2.resume_all_replicas()

        repl.test_replication_topology(topology_m2)

        group_dns_m1 = [group.dn for group in test_groups_m1.list()]
        group_dns_m2 = [group.dn for group in test_groups_m2.list()]
        assert set(group_dns_m1) == set(group_dns_m2)
Esempio n. 16
0
def test_csngen_state_not_updated_if_different_uuid(topo_m2c2):
    """Check that csngen remote offset is not updated if RUV generation uuid are different

    :id: 77694b8e-22ae-11eb-89b2-482ae39447e5
    :setup: Two suppliers + two consumers replication setup
    :steps:
        1. Disable m1<->m2 agreement to avoid propagate timeSkew
        2. Generate ldif without replication data
        3. Increase time skew on supplier2
        4. Init both suppliers from that ldif
             (to clear the ruvs and generates different generation uuid)
        5. Perform on line init from supplier1 to consumer1 and supplier2 to consumer2
        6. Perform update on both suppliers
        7: Check that c1 has no time skew
        8: Check that c2 has time skew
        9. Init supplier2 from supplier1
        10. Perform update on supplier2
        11. Check that c1 has time skew
    :expectedresults:
        1. No error
        2. No error while generating ldif
        3. No error
        4. No error while importing the ldif file
        5. No error and Initialization done.
        6. No error
        7. c1 time skew should be lesser than threshold
        8. c2 time skew should be higher than threshold
        9. No error and Initialization done.
        10. No error
        11. c1 time skew should be higher than threshold

    """

    # Variables initialization
    repl = ReplicationManager(DEFAULT_SUFFIX)

    m1 = topo_m2c2.ms["supplier1"]
    m2 = topo_m2c2.ms["supplier2"]
    c1 = topo_m2c2.cs["consumer1"]
    c2 = topo_m2c2.cs["consumer2"]

    replica_m1 = Replicas(m1).get(DEFAULT_SUFFIX)
    replica_m2 = Replicas(m2).get(DEFAULT_SUFFIX)
    replica_c1 = Replicas(c1).get(DEFAULT_SUFFIX)
    replica_c2 = Replicas(c2).get(DEFAULT_SUFFIX)

    replicid_m2 = replica_m2.get_rid()

    agmts_m1 = Agreements(m1, replica_m1.dn)
    agmts_m2 = Agreements(m2, replica_m2.dn)

    m1_m2 = get_agreement(agmts_m1, m2)
    m1_c1 = get_agreement(agmts_m1, c1)
    m1_c2 = get_agreement(agmts_m1, c2)
    m2_m1 = get_agreement(agmts_m2, m1)
    m2_c1 = get_agreement(agmts_m2, c1)
    m2_c2 = get_agreement(agmts_m2, c2)

    # Step 1: Disable m1<->m2 agreement to avoid propagate timeSkew
    m1_m2.pause()
    m2_m1.pause()

    # Step 2: Generate ldif without replication data
    m1.stop()
    m2.stop()
    ldif_file = '%s/norepl.ldif' % m1.get_ldif_dir()
    m1.db2ldif(bename=DEFAULT_BENAME,
               suffixes=[DEFAULT_SUFFIX],
               excludeSuffixes=None,
               repl_data=False,
               outputfile=ldif_file,
               encrypt=False)
    # Remove replication metadata that are still in the ldif
    # _remove_replication_data(ldif_file)

    # Step 3: Increase time skew on supplier2
    timeSkew = 6 * 3600
    # We can modify supplier2 time skew
    # But the time skew on the consumer may be smaller
    # depending on when the cnsgen generation time is updated
    # and when first csn get replicated.
    # Since we use timeSkew has threshold value to detect
    # whether there are time skew or not,
    # lets add a significative margin (longer than the test duration)
    # to avoid any risk of erroneous failure
    timeSkewMargin = 300
    DSEldif(m2)._increaseTimeSkew(DEFAULT_SUFFIX, timeSkew + timeSkewMargin)

    # Step 4: Init both suppliers from that ldif
    m1.ldif2db(DEFAULT_BENAME, None, None, None, ldif_file)
    m2.ldif2db(DEFAULT_BENAME, None, None, None, ldif_file)
    m1.start()
    m2.start()

    # Step 5: Perform on line init from supplier1 to consumer1
    #          and from supplier2 to consumer2
    m1_c1.begin_reinit()
    m2_c2.begin_reinit()
    (done, error) = m1_c1.wait_reinit()
    assert done is True
    assert error is False
    (done, error) = m2_c2.wait_reinit()
    assert done is True
    assert error is False

    # Step 6: Perform update on both suppliers
    repl.test_replication(m1, c1)
    repl.test_replication(m2, c2)

    # Step 7: Check that c1 has no time skew
    # Stop server to insure that dse.ldif is uptodate
    c1.stop()
    c1_nsState = DSEldif(c1).readNsState(DEFAULT_SUFFIX)[0]
    c1_timeSkew = int(c1_nsState['time_skew'])
    log.debug(f"c1 time skew: {c1_timeSkew}")
    if (c1_timeSkew >= timeSkew):
        log.error(
            f"c1 csngen state has unexpectedly been synchronized with m2: time skew {c1_timeSkew}"
        )
        assert False
    c1.start()

    # Step 8: Check that c2 has time skew
    # Stop server to insure that dse.ldif is uptodate
    c2.stop()
    c2_nsState = DSEldif(c2).readNsState(DEFAULT_SUFFIX)[0]
    c2_timeSkew = int(c2_nsState['time_skew'])
    log.debug(f"c2 time skew: {c2_timeSkew}")
    if (c2_timeSkew < timeSkew):
        log.error(
            f"c2 csngen state has not been synchronized with m2: time skew {c2_timeSkew}"
        )
        assert False
    c2.start()

    # Step 9: Perform on line init from supplier1 to supplier2
    m1_c1.pause()
    m1_m2.resume()
    m1_m2.begin_reinit()
    (done, error) = m1_m2.wait_reinit()
    assert done is True
    assert error is False

    # Step 10: Perform update on supplier2
    repl.test_replication(m2, c1)

    # Step 11: Check that c1 has time skew
    # Stop server to insure that dse.ldif is uptodate
    c1.stop()
    c1_nsState = DSEldif(c1).readNsState(DEFAULT_SUFFIX)[0]
    c1_timeSkew = int(c1_nsState['time_skew'])
    log.debug(f"c1 time skew: {c1_timeSkew}")
    if (c1_timeSkew < timeSkew):
        log.error(
            f"c1 csngen state has not been synchronized with m2: time skew {c1_timeSkew}"
        )
        assert False
Esempio n. 17
0
def test_fetch_bindDnGroup(topo_m2):
    """Check the bindDNGroup is fetched on first replication session

    :id: 5f1b1f59-6744-4260-b091-c82d22130025
    :setup: 2 Master Instances
    :steps:
        1. Create a replication bound user and group, but the user *not* member of the group
        2. Check that replication is working
        3. Some preparation is required because of lib389 magic that already define a replication via group
           - define the group as groupDN for replication and 60sec as fetch interval
           - pause RA in both direction
           - Define the user as bindDn of the RAs
        4. restart servers.
            It sets the fetch time to 0, so next session will refetch the group
        5. Before resuming RA, add user to groupDN (on both side as replication is not working at that time)
        6. trigger an update and check replication is working and
           there is no failure logged on supplier side 'does not have permission to supply replication updates to the replica'
    :expectedresults:
        1. Success
        2. Success
        3. Success
        4. Success
        5. Success
        6. Success
    """

    # If you need any test suite initialization,
    # please, write additional fixture for that (including finalizer).
    # Topology for suites are predefined in lib389/topologies.py.

    # If you need host, port or any other data about instance,
    # Please, use the instance object attributes for that (for example, topo.ms["master1"].serverid)
    M1 = topo_m2.ms['master1']
    M2 = topo_m2.ms['master2']

    # Enable replication log level. Not really necessary
    M1.modify_s('cn=config',
                [(ldap.MOD_REPLACE, 'nsslapd-errorlog-level', b'8192')])
    M2.modify_s('cn=config',
                [(ldap.MOD_REPLACE, 'nsslapd-errorlog-level', b'8192')])

    # Create a group and a user
    PEOPLE = "ou=People,%s" % SUFFIX
    PASSWD = 'password'
    REPL_MGR_BOUND_DN = 'repl_mgr_bound_dn'

    uid = REPL_MGR_BOUND_DN.encode()
    users = UserAccounts(M1, PEOPLE, rdn=None)
    user_props = TEST_USER_PROPERTIES.copy()
    user_props.update({
        'uid': uid,
        'cn': uid,
        'sn': '_%s' % uid,
        'userpassword': PASSWD.encode(),
        'description': b'value creation'
    })
    create_user = users.create(properties=user_props)

    groups_M1 = Groups(M1, DEFAULT_SUFFIX)
    group_properties = {'cn': 'group1', 'description': 'testgroup'}
    group_M1 = groups_M1.create(properties=group_properties)
    group_M2 = Group(M2, group_M1.dn)
    assert (not group_M1.is_member(create_user.dn))

    # Check that M1 and M2 are in sync
    repl = ReplicationManager(DEFAULT_SUFFIX)
    repl.wait_for_replication(M1, M2, timeout=20)

    # Define the group as the replication manager and fetch interval as 60sec
    replicas = Replicas(M1)
    replica = replicas.list()[0]
    replica.apply_mods([
        (ldap.MOD_REPLACE, 'nsDS5ReplicaBindDnGroupCheckInterval', '60'),
        (ldap.MOD_REPLACE, 'nsDS5ReplicaBindDnGroup', group_M1.dn)
    ])

    replicas = Replicas(M2)
    replica = replicas.list()[0]
    replica.apply_mods([
        (ldap.MOD_REPLACE, 'nsDS5ReplicaBindDnGroupCheckInterval', '60'),
        (ldap.MOD_REPLACE, 'nsDS5ReplicaBindDnGroup', group_M1.dn)
    ])

    # Then pause the replication agreement to prevent them trying to acquire
    # while the user is not member of the group
    topo_m2.pause_all_replicas()

    # Define the user as the bindDN of the RAs
    for inst in (M1, M2):
        agmts = Agreements(inst)
        agmt = agmts.list()[0]
        agmt.replace('nsDS5ReplicaBindDN', create_user.dn.encode())
        agmt.replace('nsds5ReplicaCredentials', PASSWD.encode())

    # Key step
    # The restart will fetch the group/members define in the replica
    #
    # The user NOT member of the group replication will not work until bindDNcheckInterval
    #
    # With the fix, the first fetch is not taken into account (fetch time=0)
    # so on the first session, the group will be fetched
    M1.restart()
    M2.restart()

    # Replication being broken here we need to directly do the same update.
    # Sorry not found another solution except total update
    group_M1.add_member(create_user.dn)
    group_M2.add_member(create_user.dn)

    topo_m2.resume_all_replicas()

    # trigger updates to be sure to have a replication session, giving some time
    M1.modify_s(create_user.dn, [(ldap.MOD_ADD, 'description', b'value_1_1')])
    M2.modify_s(create_user.dn, [(ldap.MOD_ADD, 'description', b'value_2_2')])
    time.sleep(10)

    # Check replication is working
    ents = M1.search_s(create_user.dn, ldap.SCOPE_BASE, '(objectclass=*)')
    for ent in ents:
        assert (ent.hasAttr('description'))
        found = 0
        for val in ent.getValues('description'):
            if (val == b'value_1_1'):
                found = found + 1
            elif (val == b'value_2_2'):
                found = found + 1
        assert (found == 2)

    ents = M2.search_s(create_user.dn, ldap.SCOPE_BASE, '(objectclass=*)')
    for ent in ents:
        assert (ent.hasAttr('description'))
        found = 0
        for val in ent.getValues('description'):
            if (val == b'value_1_1'):
                found = found + 1
            elif (val == b'value_2_2'):
                found = found + 1
        assert (found == 2)

    # Check in the logs that the member was detected in the group although
    # at startup it was not member of the group
    regex = re.compile(
        "does not have permission to supply replication updates to the replica."
    )
    errorlog_M1 = open(M1.errlog, "r")
    errorlog_M2 = open(M1.errlog, "r")

    # Find the last restart position
    restart_location_M1 = find_start_location(errorlog_M1, 2)
    assert (restart_location_M1 != -1)
    restart_location_M2 = find_start_location(errorlog_M2, 2)
    assert (restart_location_M2 != -1)

    # Then check there is no failure to authenticate
    count = pattern_errorlog(errorlog_M1,
                             regex,
                             start_location=restart_location_M1)
    assert (count <= 1)
    count = pattern_errorlog(errorlog_M2,
                             regex,
                             start_location=restart_location_M2)
    assert (count <= 1)

    if DEBUGGING:
        # Add debugging steps(if any)...
        pass
Esempio n. 18
0
def test_online_init_should_create_keepalive_entries(topo_m2):
    """Check that keep alive entries are created when initializinf a supplier from another one

    :id: d5940e71-d18a-4b71-aaf7-b9185361fffe
    :setup: Two suppliers replication setup
    :steps:
        1. Generate ldif without replication data
        2  Init both suppliers from that ldif
        3  Check that keep alive entries does not exists
        4  Perform on line init of supplier2 from supplier1
        5  Check that keep alive entries exists
    :expectedresults:
        1. No error while generating ldif
        2. No error while importing the ldif file
        3. No keepalive entrie should exists on any suppliers
        4. No error while initializing supplier2
        5. All keepalive entries should exist on every suppliers

    """

    repl = ReplicationManager(DEFAULT_SUFFIX)
    m1 = topo_m2.ms["supplier1"]
    m2 = topo_m2.ms["supplier2"]
    # Step 1: Generate ldif without replication data
    m1.stop()
    m2.stop()
    ldif_file = '%s/norepl.ldif' % m1.get_ldif_dir()
    m1.db2ldif(bename=DEFAULT_BENAME,
               suffixes=[DEFAULT_SUFFIX],
               excludeSuffixes=None,
               repl_data=False,
               outputfile=ldif_file,
               encrypt=False)
    # Remove replication metadata that are still in the ldif
    _remove_replication_data(ldif_file)

    # Step 2: Init both suppliers from that ldif
    m1.ldif2db(DEFAULT_BENAME, None, None, None, ldif_file)
    m2.ldif2db(DEFAULT_BENAME, None, None, None, ldif_file)
    m1.start()
    m2.start()
    """ Replica state is now as if CLI setup has been done using:
        dsconf supplier1 replication enable --suffix "${SUFFIX}" --role supplier
        dsconf supplier2 replication enable --suffix "${SUFFIX}" --role supplier
        dsconf supplier1 replication create-manager --name "${REPLICATION_MANAGER_NAME}" --passwd "${REPLICATION_MANAGER_PASSWORD}"
        dsconf supplier2 replication create-manager --name "${REPLICATION_MANAGER_NAME}" --passwd "${REPLICATION_MANAGER_PASSWORD}"
        dsconf supplier1 repl-agmt create --suffix "${SUFFIX}"
        dsconf supplier2 repl-agmt create --suffix "${SUFFIX}"
    """

    # Step 3: No keepalive entrie should exists on any suppliers
    verify_keepalive_entries(topo_m2, False)

    # Step 4: Perform on line init of supplier2 from supplier1
    agmt = Agreements(m1).list()[0]
    agmt.begin_reinit()
    (done, error) = agmt.wait_reinit()
    assert done is True
    assert error is False

    # Step 5: All keepalive entries should exists on every suppliers
    #  Verify the keep alive entry once replication is in sync
    # (that is the step that fails when bug is not fixed)
    repl.wait_for_ruv(m2, m1)
    verify_keepalive_entries(topo_m2, True)
Esempio n. 19
0
def test_ruv_url_not_added_if_different_uuid(topo_m2c2):
    """Check that RUV url is not updated if RUV generation uuid are different

    :id: 7cc30a4e-0ffd-4758-8f00-e500279af344
    :setup: Two suppliers + two consumers replication setup
    :steps:
        1. Generate ldif without replication data
        2. Init both suppliers from that ldif
             (to clear the ruvs and generates different generation uuid)
        3. Perform on line init from supplier1 to consumer1
               and from supplier2 to consumer2
        4. Perform update on both suppliers
        5. Check that c1 RUV does not contains URL towards m2
        6. Check that c2 RUV does contains URL towards m2
        7. Perform on line init from supplier1 to supplier2
        8. Perform update on supplier2
        9. Check that c1 RUV does contains URL towards m2
    :expectedresults:
        1. No error while generating ldif
        2. No error while importing the ldif file
        3. No error and Initialization done.
        4. No error
        5. supplier2 replicaid should not be in the consumer1 RUV
        6. supplier2 replicaid should be in the consumer2 RUV
        7. No error and Initialization done.
        8. No error
        9. supplier2 replicaid should be in the consumer1 RUV

    """

    # Variables initialization
    repl = ReplicationManager(DEFAULT_SUFFIX)

    m1 = topo_m2c2.ms["supplier1"]
    m2 = topo_m2c2.ms["supplier2"]
    c1 = topo_m2c2.cs["consumer1"]
    c2 = topo_m2c2.cs["consumer2"]

    replica_m1 = Replicas(m1).get(DEFAULT_SUFFIX)
    replica_m2 = Replicas(m2).get(DEFAULT_SUFFIX)
    replica_c1 = Replicas(c1).get(DEFAULT_SUFFIX)
    replica_c2 = Replicas(c2).get(DEFAULT_SUFFIX)

    replicid_m2 = replica_m2.get_rid()

    agmts_m1 = Agreements(m1, replica_m1.dn)
    agmts_m2 = Agreements(m2, replica_m2.dn)

    m1_m2 = get_agreement(agmts_m1, m2)
    m1_c1 = get_agreement(agmts_m1, c1)
    m1_c2 = get_agreement(agmts_m1, c2)
    m2_m1 = get_agreement(agmts_m2, m1)
    m2_c1 = get_agreement(agmts_m2, c1)
    m2_c2 = get_agreement(agmts_m2, c2)

    # Step 1: Generate ldif without replication data
    m1.stop()
    m2.stop()
    ldif_file = '%s/norepl.ldif' % m1.get_ldif_dir()
    m1.db2ldif(bename=DEFAULT_BENAME,
               suffixes=[DEFAULT_SUFFIX],
               excludeSuffixes=None,
               repl_data=False,
               outputfile=ldif_file,
               encrypt=False)
    # Remove replication metadata that are still in the ldif
    # _remove_replication_data(ldif_file)

    # Step 2: Init both suppliers from that ldif
    m1.ldif2db(DEFAULT_BENAME, None, None, None, ldif_file)
    m2.ldif2db(DEFAULT_BENAME, None, None, None, ldif_file)
    m1.start()
    m2.start()

    # Step 3: Perform on line init from supplier1 to consumer1
    #          and from supplier2 to consumer2
    m1_c1.begin_reinit()
    m2_c2.begin_reinit()
    (done, error) = m1_c1.wait_reinit()
    assert done is True
    assert error is False
    (done, error) = m2_c2.wait_reinit()
    assert done is True
    assert error is False

    # Step 4: Perform update on both suppliers
    repl.test_replication(m1, c1)
    repl.test_replication(m2, c2)

    # Step 5: Check that c1 RUV does not contains URL towards m2
    ruv = replica_c1.get_ruv()
    log.debug(f"c1 RUV: {ruv}")
    url = ruv._rid_url.get(replica_m2.get_rid())
    if url is None:
        log.debug(f"No URL for RID {replica_m2.get_rid()} in RUV")
    else:
        log.debug(f"URL for RID {replica_m2.get_rid()} in RUV is {url}")
        log.error(f"URL for RID {replica_m2.get_rid()} found in RUV")
        # Note: this assertion fails if issue 2054 is not fixed.
        assert False

    # Step 6: Check that c2 RUV does contains URL towards m2
    ruv = replica_c2.get_ruv()
    log.debug(f"c1 RUV: {ruv} {ruv._rids} ")
    url = ruv._rid_url.get(replica_m2.get_rid())
    if url is None:
        log.error(f"No URL for RID {replica_m2.get_rid()} in RUV")
        assert False
    else:
        log.debug(f"URL for RID {replica_m2.get_rid()} in RUV is {url}")

    # Step 7: Perform on line init from supplier1 to supplier2
    m1_m2.begin_reinit()
    (done, error) = m1_m2.wait_reinit()
    assert done is True
    assert error is False

    # Step 8: Perform update on supplier2
    repl.test_replication(m2, c1)

    # Step 9: Check that c1 RUV does contains URL towards m2
    ruv = replica_c1.get_ruv()
    log.debug(f"c1 RUV: {ruv} {ruv._rids} ")
    url = ruv._rid_url.get(replica_m2.get_rid())
    if url is None:
        log.error(f"No URL for RID {replica_m2.get_rid()} in RUV")
        assert False
    else:
        log.debug(f"URL for RID {replica_m2.get_rid()} in RUV is {url}")