def replica_setup(topo):
    """Add a valid replica config entry to modify
    """
    replicas = Replicas(topo.standalone)
    for r in replicas.list():
        r.delete()
    return replicas.create(properties=replica_dict)
Example #2
0
def test_csnpurge_large_valueset(topo_m2):
    """Test csn generator test

    :id: 63e2bdb2-0a8f-4660-9465-7b80a9f72a74
    :setup: MMR with 2 masters
    :steps:
        1. Create a test_user
        2. add a large set of values (more than 10)
        3. delete all the values (more than 10)
        4. configure the replica to purge those values (purgedelay=5s)
        5. Waiting for 6 second
        6. do a series of update
    :expectedresults:
        1. Should succeeds
        2. Should succeeds
        3. Should succeeds
        4. Should succeeds
        5. Should succeeds
        6. Should not crash
    """
    m1 = topo_m2.ms["master2"]

    test_user = UserAccount(m1, TEST_ENTRY_DN)
    if test_user.exists():
        log.info('Deleting entry {}'.format(TEST_ENTRY_DN))
        test_user.delete()
    test_user.create(
        properties={
            'uid': TEST_ENTRY_NAME,
            'cn': TEST_ENTRY_NAME,
            'sn': TEST_ENTRY_NAME,
            'userPassword': TEST_ENTRY_NAME,
            'uidNumber': '1000',
            'gidNumber': '2000',
            'homeDirectory': '/home/mmrepl_test',
        })

    # create a large value set so that it is sorted
    for i in range(1, 20):
        test_user.add('description', 'value {}'.format(str(i)))

    # delete all values of the valueset
    for i in range(1, 20):
        test_user.remove('description', 'value {}'.format(str(i)))

    # set purging delay to 5 second and wait more that 5second
    replicas = Replicas(m1)
    replica = replicas.list()[0]
    log.info('nsds5ReplicaPurgeDelay to 5')
    replica.set('nsds5ReplicaPurgeDelay', '5')
    time.sleep(6)

    # add some new values to the valueset containing entries that should be purged
    for i in range(21, 25):
        test_user.add('description', 'value {}'.format(str(i)))
Example #3
0
def test_healthcheck_replication_out_of_sync_broken(topology_m3):
    """Check if HealthCheck returns DSREPLLE0001 code

    :id: b5ae7cae-de0f-4206-95a4-f81538764bea
    :setup: 3 MMR topology
    :steps:
        1. Create a 3 masters full-mesh topology, on M2 and M3 don’t set nsds5BeginReplicaRefresh:start
        2. Perform modifications on M1
        3. Use HealthCheck without --json option
        4. Use HealthCheck with --json option
    :expectedresults:
        1. Success
        2. Success
        3. Healthcheck reports DSREPLLE0001 code and related details
        4. Healthcheck reports DSREPLLE0001 code and related details
    """

    RET_CODE = 'DSREPLLE0001'

    M1 = topology_m3.ms['master1']
    M2 = topology_m3.ms['master2']
    M3 = topology_m3.ms['master3']

    log.info('Break master2 and master3')
    replicas = Replicas(M2)
    replica = replicas.list()[0]
    replica.replace('nsds5ReplicaBindDNGroup', 'cn=repl')

    replicas = Replicas(M3)
    replica = replicas.list()[0]
    replica.replace('nsds5ReplicaBindDNGroup', 'cn=repl')

    log.info('Perform update on master1')
    test_users_m1 = UserAccounts(M1, DEFAULT_SUFFIX)
    test_users_m1.create_test_user(1005, 2000)

    run_healthcheck_and_flush_log(topology_m3, M1, RET_CODE, json=False)
    run_healthcheck_and_flush_log(topology_m3, M1, RET_CODE, json=True)
Example #4
0
def _compare_memoryruv_and_databaseruv(topo, operation_type):
    """Compare the memoryruv and databaseruv for ldap operations"""

    log.info(
        'Checking memory ruv for ldap: {} operation'.format(operation_type))
    replicas = Replicas(topo.ms['master1'])
    replica = replicas.list()[0]
    memory_ruv = replica.get_attr_val_utf8('nsds50ruv')

    log.info(
        'Checking database ruv for ldap: {} operation'.format(operation_type))
    entry = replicas.get_ruv_entry(DEFAULT_SUFFIX)
    database_ruv = entry.getValues('nsds50ruv')[0]
    assert memory_ruv == database_ruv
Example #5
0
def test_warining_for_invalid_replica(topo_m4):
    """Testing logs to indicate the inconsistency when configuration is performed.

    :id: dd689d03-69b8-4bf9-a06e-2acd19d5e2c8
    :setup: MMR with four masters
    :steps:
        1. Setup nsds5ReplicaBackoffMin to 20
        2. Setup nsds5ReplicaBackoffMax to 10
    :expectedresults:
        1. nsds5ReplicaBackoffMin should set to 20
        2. An error should be generated and also logged in the error logs.
    """
    replicas = Replicas(topo_m4.ms["master1"])
    replica = replicas.list()[0]
    log.info('Set nsds5ReplicaBackoffMin to 20')
    replica.set('nsds5ReplicaBackoffMin', '20')
    with pytest.raises(ldap.UNWILLING_TO_PERFORM):
        log.info('Set nsds5ReplicaBackoffMax to 10')
        replica.set('nsds5ReplicaBackoffMax', '10')
    log.info('Resetting configuration: nsds5ReplicaBackoffMin')
    replica.remove_all('nsds5ReplicaBackoffMin')
    log.info('Check the error log for the error')
    assert topo_m4.ms["master1"].ds_error_log.match(
        '.*nsds5ReplicaBackoffMax.*10.*invalid.*')
Example #6
0
def test_ticket_49463(topo):
    """Specify a test case purpose or name here

    :id: 2a68e8be-387d-4ac7-9452-1439e8483c13
    :setup: Fill in set up configuration here
    :steps:
        1. Enable fractional replication
        2. Enable replication logging
        3. Check that replication is working fine
        4. Generate skipped updates to create keep alive entries
        5. Remove M3 from the topology
        6. issue cleanAllRuv FORCE that will run on M1 then propagated M2 and M4
        7. Check that Number DEL keep alive '3' is <= 1
        8. Check M1 is the originator of cleanAllRuv and M2/M4 the propagated ones
        9. Check replication M1,M2 and M4 can recover
        10. Remove M4 from the topology
        11. Issue cleanAllRuv not force  while M2 is stopped (that hangs the cleanAllRuv)
        12. Check that nsds5ReplicaCleanRUV is correctly encoded on M1 (last value: 1)
        13. Check that nsds5ReplicaCleanRUV encoding survives M1 restart
        14. Check that nsds5ReplicaCleanRUV encoding is valid on M2 (last value: 0)
        15. Check that (for M4 cleanAllRUV) M1 is Originator and M2 propagation
    :expectedresults:
        1. No report of failure when the RUV is updated
    """

    # Step 1 - Configure fractional (skip telephonenumber) replication
    M1 = topo.ms["master1"]
    M2 = topo.ms["master2"]
    M3 = topo.ms["master3"]
    M4 = topo.ms["master4"]
    repl = ReplicationManager(DEFAULT_SUFFIX)
    fractional_server_to_replica(M1, M2)
    fractional_server_to_replica(M1, M3)
    fractional_server_to_replica(M1, M4)

    fractional_server_to_replica(M2, M1)
    fractional_server_to_replica(M2, M3)
    fractional_server_to_replica(M2, M4)

    fractional_server_to_replica(M3, M1)
    fractional_server_to_replica(M3, M2)
    fractional_server_to_replica(M3, M4)

    fractional_server_to_replica(M4, M1)
    fractional_server_to_replica(M4, M2)
    fractional_server_to_replica(M4, M3)

    # Step 2 - enable internal op logging and replication debug
    for i in (M1, M2, M3, M4):
        i.config.loglevel(vals=[256 + 4], service='access')
        i.config.loglevel(vals=[LOG_REPLICA, LOG_DEFAULT], service='error')

    # Step 3 - Check that replication is working fine
    add_user(M1, 11, desc="add to M1")
    add_user(M2, 21, desc="add to M2")
    add_user(M3, 31, desc="add to M3")
    add_user(M4, 41, desc="add to M4")

    for i in (M1, M2, M3, M4):
        for j in (M1, M2, M3, M4):
            if i == j:
                continue
            repl.wait_for_replication(i, j)

    # Step 4 - Generate skipped updates to create keep alive entries
    for i in (M1, M2, M3, M4):
        cn = '%s_%d' % (USER_CN, 11)
        dn = 'uid=%s,ou=People,%s' % (cn, SUFFIX)
        users = UserAccount(i, dn)
        for j in range(110):
            users.set('telephoneNumber', str(j))

    # Step 5 - Remove M3 from the topology
    M3.stop()
    M1.agreement.delete(suffix=SUFFIX, consumer_host=M3.host, consumer_port=M3.port)
    M2.agreement.delete(suffix=SUFFIX, consumer_host=M3.host, consumer_port=M3.port)
    M4.agreement.delete(suffix=SUFFIX, consumer_host=M3.host, consumer_port=M3.port)

    # Step 6 - Then issue cleanAllRuv FORCE that will run on M1, M2 and M4
    M1.tasks.cleanAllRUV(suffix=SUFFIX, replicaid='3',
                         force=True, args={TASK_WAIT: True})

    # Step 7 - Count the number of received DEL of the keep alive 3
    for i in (M1, M2, M4):
        i.restart()
    regex = re.compile(".*DEL dn=.cn=repl keep alive 3.*")
    for i in (M1, M2, M4):
        count = count_pattern_accesslog(M1, regex)
        log.debug("count on %s = %d" % (i, count))

        # check that DEL is replicated once (If DEL is kept in the fix)
        # check that DEL is is not replicated (If DEL is finally no long done in the fix)
        assert ((count == 1) or (count == 0))

    # Step 8 - Check that M1 is Originator of cleanAllRuv and M2, M4 propagation
    regex = re.compile(".*Original task deletes Keep alive entry .3.*")
    assert pattern_errorlog(M1, regex)

    regex = re.compile(".*Propagated task does not delete Keep alive entry .3.*")
    assert pattern_errorlog(M2, regex)
    assert pattern_errorlog(M4, regex)

    # Step 9 - Check replication M1,M2 and M4 can recover
    add_user(M1, 12, desc="add to M1")
    add_user(M2, 22, desc="add to M2")
    for i in (M1, M2, M4):
        for j in (M1, M2, M4):
            if i == j:
                continue
            repl.wait_for_replication(i, j)

    # Step 10 - Remove M4 from the topology
    M4.stop()
    M1.agreement.delete(suffix=SUFFIX, consumer_host=M4.host, consumer_port=M4.port)
    M2.agreement.delete(suffix=SUFFIX, consumer_host=M4.host, consumer_port=M4.port)

    # Step 11 - Issue cleanAllRuv not force  while M2 is stopped (that hangs the cleanAllRuv)
    M2.stop()
    M1.tasks.cleanAllRUV(suffix=SUFFIX, replicaid='4',
                         force=False, args={TASK_WAIT: False})

    # Step 12
    # CleanAllRuv is hanging waiting for M2 to restart
    # Check that nsds5ReplicaCleanRUV is correctly encoded on M1
    replicas = Replicas(M1)
    replica = replicas.list()[0]
    time.sleep(0.5)
    replica.present('nsds5ReplicaCleanRUV')
    log.info("M1: nsds5ReplicaCleanRUV=%s" % replica.get_attr_val_utf8('nsds5replicacleanruv'))
    regex = re.compile("^4:.*:no:1$")
    assert regex.match(replica.get_attr_val_utf8('nsds5replicacleanruv'))

    # Step 13
    # Check that it encoding survives restart
    M1.restart()
    assert replica.present('nsds5ReplicaCleanRUV')
    assert regex.match(replica.get_attr_val_utf8('nsds5replicacleanruv'))

    # Step 14 - Check that nsds5ReplicaCleanRUV encoding is valid on M2
    M1.stop()
    M2.start()
    replicas = Replicas(M2)
    replica = replicas.list()[0]
    M1.start()
    time.sleep(0.5)
    if replica.present('nsds5ReplicaCleanRUV'):
        log.info("M2: nsds5ReplicaCleanRUV=%s" % replica.get_attr_val_utf8('nsds5replicacleanruv'))
        regex = re.compile("^4:.*:no:0$")
        assert regex.match(replica.get_attr_val_utf8('nsds5replicacleanruv'))

    # time to run cleanAllRuv
    for i in (M1, M2):
        for j in (M1, M2):
            if i == j:
                continue
            repl.wait_for_replication(i, j)

    # Step 15 - Check that M1 is Originator of cleanAllRuv and M2 propagation
    regex = re.compile(".*Original task deletes Keep alive entry .4.*")
    assert pattern_errorlog(M1, regex)

    regex = re.compile(".*Propagated task does not delete Keep alive entry .4.*")
    assert pattern_errorlog(M2, regex)
def replica_reset(topo):
    """Purge all existing replica details"""
    replicas = Replicas(topo.standalone)
    for r in replicas.list():
        r.delete()
Example #8
0
def test_fetch_bindDnGroup(topo_m2):
    """Check the bindDNGroup is fetched on first replication session

    :id: 5f1b1f59-6744-4260-b091-c82d22130025
    :setup: 2 Master Instances
    :steps:
        1. Create a replication bound user and group, but the user *not* member of the group
        2. Check that replication is working
        3. Some preparation is required because of lib389 magic that already define a replication via group
           - define the group as groupDN for replication and 60sec as fetch interval
           - pause RA in both direction
           - Define the user as bindDn of the RAs
        4. restart servers.
            It sets the fetch time to 0, so next session will refetch the group
        5. Before resuming RA, add user to groupDN (on both side as replication is not working at that time)
        6. trigger an update and check replication is working and
           there is no failure logged on supplier side 'does not have permission to supply replication updates to the replica'
    :expectedresults:
        1. Success
        2. Success
        3. Success
        4. Success
        5. Success
        6. Success
    """

    # If you need any test suite initialization,
    # please, write additional fixture for that (including finalizer).
    # Topology for suites are predefined in lib389/topologies.py.

    # If you need host, port or any other data about instance,
    # Please, use the instance object attributes for that (for example, topo.ms["master1"].serverid)
    M1 = topo_m2.ms['master1']
    M2 = topo_m2.ms['master2']

    # Enable replication log level. Not really necessary
    M1.modify_s('cn=config',
                [(ldap.MOD_REPLACE, 'nsslapd-errorlog-level', b'8192')])
    M2.modify_s('cn=config',
                [(ldap.MOD_REPLACE, 'nsslapd-errorlog-level', b'8192')])

    # Create a group and a user
    PEOPLE = "ou=People,%s" % SUFFIX
    PASSWD = 'password'
    REPL_MGR_BOUND_DN = 'repl_mgr_bound_dn'

    uid = REPL_MGR_BOUND_DN.encode()
    users = UserAccounts(M1, PEOPLE, rdn=None)
    user_props = TEST_USER_PROPERTIES.copy()
    user_props.update({
        'uid': uid,
        'cn': uid,
        'sn': '_%s' % uid,
        'userpassword': PASSWD.encode(),
        'description': b'value creation'
    })
    create_user = users.create(properties=user_props)

    groups_M1 = Groups(M1, DEFAULT_SUFFIX)
    group_properties = {'cn': 'group1', 'description': 'testgroup'}
    group_M1 = groups_M1.create(properties=group_properties)
    group_M2 = Group(M2, group_M1.dn)
    assert (not group_M1.is_member(create_user.dn))

    # Check that M1 and M2 are in sync
    repl = ReplicationManager(DEFAULT_SUFFIX)
    repl.wait_for_replication(M1, M2, timeout=20)

    # Define the group as the replication manager and fetch interval as 60sec
    replicas = Replicas(M1)
    replica = replicas.list()[0]
    replica.apply_mods([
        (ldap.MOD_REPLACE, 'nsDS5ReplicaBindDnGroupCheckInterval', '60'),
        (ldap.MOD_REPLACE, 'nsDS5ReplicaBindDnGroup', group_M1.dn)
    ])

    replicas = Replicas(M2)
    replica = replicas.list()[0]
    replica.apply_mods([
        (ldap.MOD_REPLACE, 'nsDS5ReplicaBindDnGroupCheckInterval', '60'),
        (ldap.MOD_REPLACE, 'nsDS5ReplicaBindDnGroup', group_M1.dn)
    ])

    # Then pause the replication agreement to prevent them trying to acquire
    # while the user is not member of the group
    topo_m2.pause_all_replicas()

    # Define the user as the bindDN of the RAs
    for inst in (M1, M2):
        agmts = Agreements(inst)
        agmt = agmts.list()[0]
        agmt.replace('nsDS5ReplicaBindDN', create_user.dn.encode())
        agmt.replace('nsds5ReplicaCredentials', PASSWD.encode())

    # Key step
    # The restart will fetch the group/members define in the replica
    #
    # The user NOT member of the group replication will not work until bindDNcheckInterval
    #
    # With the fix, the first fetch is not taken into account (fetch time=0)
    # so on the first session, the group will be fetched
    M1.restart()
    M2.restart()

    # Replication being broken here we need to directly do the same update.
    # Sorry not found another solution except total update
    group_M1.add_member(create_user.dn)
    group_M2.add_member(create_user.dn)

    topo_m2.resume_all_replicas()

    # trigger updates to be sure to have a replication session, giving some time
    M1.modify_s(create_user.dn, [(ldap.MOD_ADD, 'description', b'value_1_1')])
    M2.modify_s(create_user.dn, [(ldap.MOD_ADD, 'description', b'value_2_2')])
    time.sleep(10)

    # Check replication is working
    ents = M1.search_s(create_user.dn, ldap.SCOPE_BASE, '(objectclass=*)')
    for ent in ents:
        assert (ent.hasAttr('description'))
        found = 0
        for val in ent.getValues('description'):
            if (val == b'value_1_1'):
                found = found + 1
            elif (val == b'value_2_2'):
                found = found + 1
        assert (found == 2)

    ents = M2.search_s(create_user.dn, ldap.SCOPE_BASE, '(objectclass=*)')
    for ent in ents:
        assert (ent.hasAttr('description'))
        found = 0
        for val in ent.getValues('description'):
            if (val == b'value_1_1'):
                found = found + 1
            elif (val == b'value_2_2'):
                found = found + 1
        assert (found == 2)

    # Check in the logs that the member was detected in the group although
    # at startup it was not member of the group
    regex = re.compile(
        "does not have permission to supply replication updates to the replica."
    )
    errorlog_M1 = open(M1.errlog, "r")
    errorlog_M2 = open(M1.errlog, "r")

    # Find the last restart position
    restart_location_M1 = find_start_location(errorlog_M1, 2)
    assert (restart_location_M1 != -1)
    restart_location_M2 = find_start_location(errorlog_M2, 2)
    assert (restart_location_M2 != -1)

    # Then check there is no failure to authenticate
    count = pattern_errorlog(errorlog_M1,
                             regex,
                             start_location=restart_location_M1)
    assert (count <= 1)
    count = pattern_errorlog(errorlog_M2,
                             regex,
                             start_location=restart_location_M2)
    assert (count <= 1)

    if DEBUGGING:
        # Add debugging steps(if any)...
        pass
Example #9
0
def test_basic(topo, new_suffixes, clean_up):
    """Check basic replica functionality

    :feature: Replication
    :steps: 1. Enable replication on supplier. hub and consumer
            2. Create agreements: supplier-hub, hub-consumer
            3. Test supplier-consumer replication
            4. Disable replication
            5. Check that replica, agreements and changelog were deleted
    :expectedresults: No errors happen, replication is successfully enabled and disabled
    """

    supplier = topo.ins["standalone1"]
    hub = topo.ins["standalone2"]
    consumer = topo.ins["standalone3"]

    log.info("Enable replicas (create replica and changelog entries)")
    supplier_replicas = Replicas(supplier)
    supplier_replicas.enable(suffix=NEW_SUFFIX,
                             role=ReplicaRole.SUPPLIER,
                             replicaID=REPLICA_SUPPLIER_ID)
    ents = supplier_replicas.list()
    assert len(ents) == 1
    ents = supplier.changelog.list()
    assert len(ents) == 1

    hub_replicas = Replicas(hub)
    hub_replicas.enable(suffix=NEW_SUFFIX,
                        role=ReplicaRole.HUB,
                        replicaID=CONSUMER_REPLICAID)
    ents = hub_replicas.list()
    assert len(ents) == 1
    ents = hub.changelog.list()
    assert len(ents) == 1

    consumer_replicas = Replicas(consumer)
    consumer_replicas.enable(suffix=NEW_SUFFIX, role=ReplicaRole.CONSUMER)
    ents = consumer_replicas.list()
    assert len(ents) == 1

    log.info("Create agreements between the instances")
    supplier.agreement.create(suffix=NEW_SUFFIX, host=hub.host, port=hub.port)
    ents = supplier.agreement.list(suffix=NEW_SUFFIX)
    assert len(ents) == 1
    hub.agreement.create(suffix=NEW_SUFFIX,
                         host=consumer.host,
                         port=consumer.port)
    ents = hub.agreement.list(suffix=NEW_SUFFIX)
    assert len(ents) == 1

    log.info("Test replication")
    supplier_replicas.test(NEW_SUFFIX, consumer)

    log.info("Disable replication")
    supplier_replicas.disable(suffix=NEW_SUFFIX)
    hub_replicas.disable(suffix=NEW_SUFFIX)
    consumer_replicas.disable(suffix=NEW_SUFFIX)

    log.info("Check that replica, agreements and changelog were deleted")
    for num in range(1, 4):
        log.info("Checking standalone{} instance".format(num))
        inst = topo.ins["standalone{}".format(num)]

        log.info("Checking that replica entries don't exist")
        replicas = Replicas(inst)
        ents = replicas.list()
        assert len(ents) == 0

        log.info("Checking that changelog doesn't exist")
        ents = inst.changelog.list()
        assert len(ents) == 0

        log.info(
            "Checking that agreements can't be acquired because the replica entry doesn't exist"
        )
        with pytest.raises(NoSuchEntryError) as e:
            inst.agreement.list(suffix=NEW_SUFFIX)
            assert "no replica set up" in e.msg