def test_userpassword_attribute(topo_m2, _delete_after):
    """Modifications of userpassword attribute in an MMR environment were successful
        however a error message was displayed in the error logs which was curious.

    :id: bdcf0464-a947-11ea-9f0d-8c16451d917b
    :setup: MMR with 2 masters
    :steps:
        1. Add the test user to S1
        2. Check that user's  has been propogated to Supplier 2
        3. modify user's userpassword attribute on supplier 2
        4. check the error logs on suppler 1 to make sure the error message is not there
    :expected results:
        1. Should succeeds
        2. Should succeeds
        3. Should succeeds
        4. Should succeeds
    """
    m1 = topo_m2.ms["master1"]
    m2 = topo_m2.ms["master2"]
    # Add the test user to S1
    user1 = UserAccounts(m1, DEFAULT_SUFFIX, rdn=None).create_test_user(uid=1,
                                                                        gid=1)
    repl_manager = ReplicationManager(DEFAULT_SUFFIX)
    repl_manager.wait_for_replication(m1, m2, timeout=100)
    # Check that user's  has been propogated to Supplier 2
    user2 = UserAccount(m2, user1.dn)
    assert user2.status()
    # modify user's userpassword attribute on supplier 2
    user2.replace('userpassword', 'fred1')
    repl_manager.wait_for_replication(m1, m2, timeout=100)
    assert user1.get_attr_val_utf8('userpassword')
    # check the error logs on suppler 1 to make sure the error message is not there
    assert not m1.searchErrorsLog("can\'t add a change for uid=")
def test_deleting_twice(topo_m2):
    """Deleting entry twice crashed a server

    :id: 94045560-a64c-11ea-93d6-8c16451d917b
    :setup: MMR with 2 masters
    :steps:
        1. Adding entry
        2. Deleting the same entry from s1
        3. Deleting the same entry from s2 after some seconds
    :expected results:
        1. Should succeeds
        2. Should succeeds
        3. Should succeeds
    """
    m1 = topo_m2.ms["master1"]
    m2 = topo_m2.ms["master2"]
    # Adding entry
    user1 = UserAccounts(m1, DEFAULT_SUFFIX, rdn=None).create_test_user(uid=1,
                                                                        gid=1)
    repl_manager = ReplicationManager(DEFAULT_SUFFIX)
    repl_manager.wait_for_replication(m1, m2, timeout=100)
    user2 = UserAccount(m2, f'uid=test_user_1,{DEFAULT_SUFFIX}')
    assert user2.status()
    # Deleting the same entry from s1
    user1.delete()
    repl_manager.wait_for_replication(m1, m2, timeout=100)
    # Deleting the same entry from s2 after some seconds
    with pytest.raises(ldap.NO_SUCH_OBJECT):
        user2.delete()
    assert m1.status()
    assert m2.status()
Esempio n. 3
0
def test_lastupdate_attr_before_init(topo_nr):
    """Check that LastUpdate replica attributes show right values

    :id: bc8ce431-ff65-41f5-9331-605cbcaaa887
    :customerscenario: True
    :setup: Replication setup with supplier and consumer instances
            without initialization
    :steps:
        1. Check nsds5replicaLastUpdateStart value
        2. Check nsds5replicaLastUpdateEnd value
        3. Check nsds5replicaLastUpdateStatus value
        4. Check nsds5replicaLastUpdateStatusJSON is parsable
    :expectedresults:
        1. nsds5replicaLastUpdateStart should be equal to 0
        2. nsds5replicaLastUpdateEnd should be equal to 0
        3. nsds5replicaLastUpdateStatus should not be equal
           to "Replica acquired successfully: Incremental update started"
        4. Success
    """

    supplier = topo_nr.ins["standalone1"]
    consumer = topo_nr.ins["standalone2"]

    repl = ReplicationManager(DEFAULT_SUFFIX)
    repl.create_first_supplier(supplier)

    # Manually create an un-synced consumer.

    consumer_replicas = Replicas(consumer)
    consumer_replicas.create(
        properties={
            'cn': 'replica',
            'nsDS5ReplicaRoot': DEFAULT_SUFFIX,
            'nsDS5ReplicaId': '65535',
            'nsDS5Flags': '0',
            'nsDS5ReplicaType': '2',
        })

    agmt = repl.ensure_agreement(supplier, consumer)
    with pytest.raises(Exception):
        repl.wait_for_replication(supplier, consumer, timeout=5)

    assert agmt.get_attr_val_utf8(
        'nsds5replicaLastUpdateStart') == "19700101000000Z"
    assert agmt.get_attr_val_utf8(
        "nsds5replicaLastUpdateEnd") == "19700101000000Z"
    assert "replica acquired successfully" not in agmt.get_attr_val_utf8_l(
        "nsds5replicaLastUpdateStatus")

    # make sure the JSON attribute is parsable
    json_status = agmt.get_attr_val_utf8("nsds5replicaLastUpdateStatusJSON")
    if json_status is not None:
        json_obj = json.loads(json_status)
        log.debug("JSON status message: {}".format(json_obj))
Esempio n. 4
0
def test_entryuuid_with_replication(topo_m2):
    """ Check that entryuuid works with replication

    :id: a5f15bf9-7f63-473a-840c-b9037b787024

    :setup: two node mmr

    :steps:
        1. Create an entry on one server
        2. Wait for replication
        3. Assert it is on the second

    :expectedresults:
        1. Success
        1. Success
        1. Success
    """

    server_a = topo_m2.ms["supplier1"]
    server_b = topo_m2.ms["supplier2"]
    server_a.config.loglevel(vals=(ErrorLog.DEFAULT, ErrorLog.TRACE))
    server_b.config.loglevel(vals=(ErrorLog.DEFAULT, ErrorLog.TRACE))

    repl = ReplicationManager(DEFAULT_SUFFIX)

    account_a = nsUserAccounts(server_a,
                               DEFAULT_SUFFIX).create_test_user(uid=2000)
    euuid_a = account_a.get_attr_vals_utf8('entryUUID')
    print("🧩 %s" % euuid_a)
    assert (euuid_a is not None)
    assert (len(euuid_a) == 1)

    repl.wait_for_replication(server_a, server_b)

    account_b = nsUserAccounts(server_b, DEFAULT_SUFFIX).get("test_user_2000")
    euuid_b = account_b.get_attr_vals_utf8('entryUUID')
    print("🧩 %s" % euuid_b)

    server_a.config.loglevel(vals=(ErrorLog.DEFAULT, ))
    server_b.config.loglevel(vals=(ErrorLog.DEFAULT, ))

    assert (euuid_b is not None)
    assert (len(euuid_b) == 1)
    assert (euuid_b == euuid_a)

    account_b.set("description", "update")
    repl.wait_for_replication(server_b, server_a)

    euuid_c = account_a.get_attr_vals_utf8('entryUUID')
    print("🧩 %s" % euuid_c)
    assert (euuid_c is not None)
    assert (len(euuid_c) == 1)
    assert (euuid_c == euuid_a)
def test_deletions_are_not_replicated(topo_m2):
    """usn + mmr = deletions are not replicated

    :id: aa4f67ce-a64c-11ea-a6fd-8c16451d917b
    :setup: MMR with 2 masters
    :steps:
        1. Enable USN plugin on both servers
        2. Enable USN plugin on Supplier 2
        3. Add user
        4. Check that user propagated to Supplier 2
        5. Check user`s USN on Supplier 1
        6. Check user`s USN on Supplier 2
        7. Delete user
        8. Check that deletion of user propagated to Supplier 1
    :expected results:
        1. Should succeeds
        2. Should succeeds
        3. Should succeeds
        4. Should succeeds
        5. Should succeeds
        6. Should succeeds
        7. Should succeeds
        8. Should succeeds
    """
    m1 = topo_m2.ms["master1"]
    m2 = topo_m2.ms["master2"]
    # Enable USN plugin on both servers
    usn1 = USNPlugin(m1)
    usn2 = USNPlugin(m2)
    for usn_usn in [usn1, usn2]:
        usn_usn.enable()
    for instance in [m1, m2]:
        instance.restart()
    # Add user
    user = UserAccounts(m1, DEFAULT_SUFFIX, rdn=None).create_test_user(uid=1,
                                                                       gid=1)
    repl_manager = ReplicationManager(DEFAULT_SUFFIX)
    repl_manager.wait_for_replication(m1, m2, timeout=100)
    # Check that user propagated to Supplier 2
    assert user.dn in [
        i.dn for i in UserAccounts(m2, DEFAULT_SUFFIX, rdn=None).list()
    ]
    user2 = UserAccount(m2, f'uid=test_user_1,{DEFAULT_SUFFIX}')
    # Check user`s USN on Supplier 1
    assert user.get_attr_val_utf8('entryusn')
    # Check user`s USN on Supplier 2
    assert user2.get_attr_val_utf8('entryusn')
    # Delete user
    user2.delete()
    repl_manager.wait_for_replication(m1, m2, timeout=100)
    # Check that deletion of user propagated to Supplier 1
    with pytest.raises(ldap.NO_SUCH_OBJECT):
        user.status()
Esempio n. 6
0
def test_healthcheck_replication_replica_not_reachable(topology_m2):
    """Check if HealthCheck returns DSREPLLE0005 code

    :id: d452a564-7b82-4c1a-b331-a71abbd82a10
    :setup: Replicated topology
    :steps:
        1. Create a replicated topology
        2. On M1, set nsds5replicaport for the replication agreement to an unreachable port on the replica
        3. Use HealthCheck without --json option
        4. Use HealthCheck with --json option
        5. On M1, set nsds5replicaport for the replication agreement to a reachable port number
        6. Use HealthCheck without --json option
        7. Use HealthCheck with --json option
    :expectedresults:
        1. Success
        2. Success
        3. Healthcheck reports DSREPLLE0005 code and related details
        4. Healthcheck reports DSREPLLE0005 code and related details
        5. Success
        6. Healthcheck reports no issue found
        7. Healthcheck reports no issue found
    """

    RET_CODE = 'DSREPLLE0005'

    M1 = topology_m2.ms['master1']
    M2 = topology_m2.ms['master2']

    set_changelog_trimming(M1)

    log.info(
        'Set nsds5replicaport for the replication agreement to an unreachable port'
    )
    repl = ReplicationManager(DEFAULT_SUFFIX)
    repl.wait_for_replication(M1, M2)

    replica_m1 = Replicas(M1).get(DEFAULT_SUFFIX)
    agmt_m1 = replica_m1.get_agreements().list()[0]
    agmt_m1.replace('nsds5replicaport', '4389')

    run_healthcheck_and_flush_log(topology_m2, M1, RET_CODE, json=False)
    run_healthcheck_and_flush_log(topology_m2, M1, RET_CODE, json=True)

    log.info(
        'Set nsds5replicaport for the replication agreement to a reachable port'
    )
    agmt_m1.replace('nsDS5ReplicaPort', '{}'.format(M2.port))
    repl.wait_for_replication(M1, M2)

    run_healthcheck_and_flush_log(topology_m2, M1, CMD_OUTPUT, json=False)
    run_healthcheck_and_flush_log(topology_m2, M1, JSON_OUTPUT, json=True)
Esempio n. 7
0
def test_password_repl_error(topo_m2, test_entry):
    """Check that error about userpassword replication is properly logged

    :id: 714130ff-e4f0-4633-9def-c1f4b24abfef
    :setup: Four masters replication setup, a test entry
    :steps:
        1. Change userpassword on the first master
        2. Restart the servers to flush the logs
        3. Check the error log for an replication error
    :expectedresults:
        1. Password should be successfully changed
        2. Server should be successfully restarted
        3. There should be no replication errors in the error log
    """

    m1 = topo_m2.ms["master1"]
    m2 = topo_m2.ms["master2"]
    TEST_ENTRY_NEW_PASS = '******'

    log.info('Clean the error log')
    m2.deleteErrorLogs()

    log.info('Set replication loglevel')
    m2.config.loglevel((ErrorLog.REPLICA, ))

    log.info('Modifying entry {} - change userpassword on master 1'.format(
        test_entry.dn))

    test_entry.set('userpassword', TEST_ENTRY_NEW_PASS)

    repl = ReplicationManager(DEFAULT_SUFFIX)
    repl.wait_for_replication(m1, m2)

    log.info('Restart the servers to flush the logs')
    for num in range(1, 3):
        topo_m2.ms["master{}".format(num)].restart()

    try:
        log.info('Check that password works on master 2')
        test_entry_m2 = UserAccount(m2, test_entry.dn)
        test_entry_m2.bind(TEST_ENTRY_NEW_PASS)

        log.info('Check the error log for the error with {}'.format(
            test_entry.dn))
        assert not m2.ds_error_log.match(
            '.*can.t add a change for {}.*'.format(test_entry.dn))
    finally:
        log.info('Set the default loglevel')
        m2.config.loglevel((ErrorLog.DEFAULT, ))
def test_rename_entry(topo_m2, _delete_after):
    """Rename entry crashed a server

    :id: 3866f9d6-a946-11ea-a3f8-8c16451d917b
    :setup: MMR with 2 masters
    :steps:
        1. Adding entry
        2. Stop Agreement for both
        3. Change description
        4. Change will not reflect on other master
        5. Turn on agreement on both
        6. Change will reflect on other master
    :expected results:
        1. Should succeeds
        2. Should succeeds
        3. Should succeeds
        4. Should succeeds
        5. Should succeeds
        6. Should succeeds
    """
    m1 = topo_m2.ms["master1"]
    m2 = topo_m2.ms["master2"]
    # Adding entry
    user1 = UserAccounts(m1, DEFAULT_SUFFIX, rdn=None).create_test_user(uid=1,
                                                                        gid=1)
    repl_manager = ReplicationManager(DEFAULT_SUFFIX)
    repl_manager.wait_for_replication(m1, m2, timeout=100)
    user2 = UserAccount(m2, user1.dn)
    assert user2.status()
    # Stop Agreement for both
    agree1 = Agreements(m1).list()[0]
    agree2 = Agreements(m2).list()[0]
    for agree in [agree1, agree2]:
        agree.pause()
    # change description
    user1.replace('description', 'New Des')
    assert user1.get_attr_val_utf8('description')
    # Change will not reflect on other master
    with pytest.raises(AssertionError):
        assert user2.get_attr_val_utf8('description')
    # Turn on agreement on both
    for agree in [agree1, agree2]:
        agree.resume()
    repl_manager.wait_for_replication(m1, m2, timeout=100)
    for instance in [user1, user2]:
        assert instance.get_attr_val_utf8('description')
Esempio n. 9
0
def test_lastupdate_attr_before_init(topo_nr):
    """Check that LastUpdate replica attributes show right values

    :id: bc8ce431-ff65-41f5-9331-605cbcaaa887
    :setup: Replication setup with master and consumer instances
            without initialization
    :steps:
        1. Check nsds5replicaLastUpdateStart value
        2. Check nsds5replicaLastUpdateEnd value
        3. Check nsds5replicaLastUpdateStatus value
    :expectedresults:
        1. nsds5replicaLastUpdateStart should be equal to 0
        2. nsds5replicaLastUpdateEnd should be equal to 0
        3. nsds5replicaLastUpdateStatus should not be equal
           to "0 Replica acquired successfully: Incremental update started"
    """

    master = topo_nr.ins["standalone1"]
    consumer = topo_nr.ins["standalone2"]

    repl = ReplicationManager(DEFAULT_SUFFIX)
    repl.create_first_master(master)

    # Manually create an un-synced consumer.

    consumer_replicas = Replicas(consumer)
    consumer_replicas.create(
        properties={
            'cn': 'replica',
            'nsDS5ReplicaRoot': DEFAULT_SUFFIX,
            'nsDS5ReplicaId': '65535',
            'nsDS5Flags': '0',
            'nsDS5ReplicaType': '2',
        })

    agmt = repl.ensure_agreement(master, consumer)
    with pytest.raises(Exception):
        repl.wait_for_replication(master, consumer, timeout=5)

    assert agmt.get_attr_val_bytes(
        'nsds5replicaLastUpdateStart') == b"19700101000000Z"
    assert agmt.get_attr_val_bytes(
        "nsds5replicaLastUpdateEnd") == b"19700101000000Z"
    assert b"Replica acquired successfully" not in agmt.get_attr_val_bytes(
        "nsds5replicaLastUpdateStatus")
Esempio n. 10
0
def test_user_compare_m2Repl(topology_m2):
    """
    User compare test between users of master to master replicaton topology.

    :id: 7c243bea-4075-4304-864d-5b789d364871

    :setup: 2 master MMR

    :steps: 1. Add a user to m1
            2. Wait for replication
            3. Compare if the user is the same

    :expectedresults: 1. User is added
                      2. Replication success
                      3. The user is the same
    """
    rm = ReplicationManager(DEFAULT_SUFFIX)
    m1 = topology_m2.ms.get('master1')
    m2 = topology_m2.ms.get('master2')

    m1_users = UserAccounts(m1, DEFAULT_SUFFIX)
    m2_users = UserAccounts(m2, DEFAULT_SUFFIX)

    # Create 1st user
    user1_properties = {
        'uid': 'testuser',
        'cn': 'testuser',
        'sn': 'user',
        'uidNumber': '1000',
        'gidNumber': '2000',
        'homeDirectory': '/home/testuser'
    }

    m1_users.create(properties=user1_properties)
    m1_testuser = m1_users.get('testuser')

    rm.wait_for_replication(m1, m2)

    m2_testuser = m2_users.get('testuser')

    assert UserAccount.compare(m1_testuser, m2_testuser)
def test_error_20(topo_m2, _delete_after):
    """DS returns error 20 when replacing values of a multi-valued attribute (only when replication is enabled)

    :id: a55bccc6-a64c-11ea-bac8-8c16451d917b
    :setup: MMR with 2 masters
    :steps:
        1. Add user
        2. Change multivalue attribute
    :expected results:
        1. Should succeeds
        2. Should succeeds
    """
    m1 = topo_m2.ms["master1"]
    m2 = topo_m2.ms["master2"]
    # Add user
    user = UserAccounts(m1, DEFAULT_SUFFIX, rdn=None).create_test_user(uid=1,
                                                                       gid=1)
    repl_manager = ReplicationManager(DEFAULT_SUFFIX)
    repl_manager.wait_for_replication(m1, m2, timeout=100)
    # Change multivalue attribute
    assert user.replace_many(('cn', 'BUG 891866'), ('cn', 'Test'))
Esempio n. 12
0
def test_healthcheck_replication_presence_of_conflict_entries(topology_m2):
    """Check if HealthCheck returns DSREPLLE0002 code

    :id: 43abc6c6-2075-42eb-8fa3-aa092ff64cba
    :setup: Replicated topology
    :steps:
        1. Create a replicated topology
        2. Create conflict entries : different entries renamed to the same dn
        3. Use HealthCheck without --json option
        4. Use HealthCheck with --json option
    :expectedresults:
        1. Success
        2. Success
        3. Healthcheck reports DSREPLLE0002 code and related details
        4. Healthcheck reports DSREPLLE0002 code and related details
    """

    RET_CODE = 'DSREPLLE0002'

    M1 = topology_m2.ms['master1']
    M2 = topology_m2.ms['master2']

    repl = ReplicationManager(DEFAULT_SUFFIX)
    repl.wait_for_replication(M1, M2)

    topology_m2.pause_all_replicas()

    log.info("Create conflict entries")
    test_users_m1 = UserAccounts(M1, DEFAULT_SUFFIX)
    test_users_m2 = UserAccounts(M2, DEFAULT_SUFFIX)
    user_num = 1000
    test_users_m1.create_test_user(user_num, 2000)
    test_users_m2.create_test_user(user_num, 2000)

    topology_m2.resume_all_replicas()

    repl.test_replication_topology(topology_m2)

    run_healthcheck_and_flush_log(topology_m2, M1, RET_CODE, json=False)
    run_healthcheck_and_flush_log(topology_m2, M1, RET_CODE, json=True)
Esempio n. 13
0
def test_ticket_49463(topo):
    """Specify a test case purpose or name here

    :id: 2a68e8be-387d-4ac7-9452-1439e8483c13
    :setup: Fill in set up configuration here
    :steps:
        1. Enable fractional replication
        2. Enable replication logging
        3. Check that replication is working fine
        4. Generate skipped updates to create keep alive entries
        5. Remove M3 from the topology
        6. issue cleanAllRuv FORCE that will run on M1 then propagated M2 and M4
        7. Check that Number DEL keep alive '3' is <= 1
        8. Check M1 is the originator of cleanAllRuv and M2/M4 the propagated ones
        9. Check replication M1,M2 and M4 can recover
        10. Remove M4 from the topology
        11. Issue cleanAllRuv not force  while M2 is stopped (that hangs the cleanAllRuv)
        12. Check that nsds5ReplicaCleanRUV is correctly encoded on M1 (last value: 1)
        13. Check that nsds5ReplicaCleanRUV encoding survives M1 restart
        14. Check that nsds5ReplicaCleanRUV encoding is valid on M2 (last value: 0)
        15. Check that (for M4 cleanAllRUV) M1 is Originator and M2 propagation
    :expectedresults:
        1. No report of failure when the RUV is updated
    """

    # Step 1 - Configure fractional (skip telephonenumber) replication
    M1 = topo.ms["master1"]
    M2 = topo.ms["master2"]
    M3 = topo.ms["master3"]
    M4 = topo.ms["master4"]
    repl = ReplicationManager(DEFAULT_SUFFIX)
    fractional_server_to_replica(M1, M2)
    fractional_server_to_replica(M1, M3)
    fractional_server_to_replica(M1, M4)

    fractional_server_to_replica(M2, M1)
    fractional_server_to_replica(M2, M3)
    fractional_server_to_replica(M2, M4)

    fractional_server_to_replica(M3, M1)
    fractional_server_to_replica(M3, M2)
    fractional_server_to_replica(M3, M4)

    fractional_server_to_replica(M4, M1)
    fractional_server_to_replica(M4, M2)
    fractional_server_to_replica(M4, M3)

    # Step 2 - enable internal op logging and replication debug
    for i in (M1, M2, M3, M4):
        i.config.loglevel(vals=[256 + 4], service='access')
        i.config.loglevel(vals=[LOG_REPLICA, LOG_DEFAULT], service='error')

    # Step 3 - Check that replication is working fine
    add_user(M1, 11, desc="add to M1")
    add_user(M2, 21, desc="add to M2")
    add_user(M3, 31, desc="add to M3")
    add_user(M4, 41, desc="add to M4")

    for i in (M1, M2, M3, M4):
        for j in (M1, M2, M3, M4):
            if i == j:
                continue
            repl.wait_for_replication(i, j)

    # Step 4 - Generate skipped updates to create keep alive entries
    for i in (M1, M2, M3, M4):
        cn = '%s_%d' % (USER_CN, 11)
        dn = 'uid=%s,ou=People,%s' % (cn, SUFFIX)
        users = UserAccount(i, dn)
        for j in range(110):
            users.set('telephoneNumber', str(j))

    # Step 5 - Remove M3 from the topology
    M3.stop()
    M1.agreement.delete(suffix=SUFFIX, consumer_host=M3.host, consumer_port=M3.port)
    M2.agreement.delete(suffix=SUFFIX, consumer_host=M3.host, consumer_port=M3.port)
    M4.agreement.delete(suffix=SUFFIX, consumer_host=M3.host, consumer_port=M3.port)

    # Step 6 - Then issue cleanAllRuv FORCE that will run on M1, M2 and M4
    M1.tasks.cleanAllRUV(suffix=SUFFIX, replicaid='3',
                         force=True, args={TASK_WAIT: True})

    # Step 7 - Count the number of received DEL of the keep alive 3
    for i in (M1, M2, M4):
        i.restart()
    regex = re.compile(".*DEL dn=.cn=repl keep alive 3.*")
    for i in (M1, M2, M4):
        count = count_pattern_accesslog(M1, regex)
        log.debug("count on %s = %d" % (i, count))

        # check that DEL is replicated once (If DEL is kept in the fix)
        # check that DEL is is not replicated (If DEL is finally no long done in the fix)
        assert ((count == 1) or (count == 0))

    # Step 8 - Check that M1 is Originator of cleanAllRuv and M2, M4 propagation
    regex = re.compile(".*Original task deletes Keep alive entry .3.*")
    assert pattern_errorlog(M1, regex)

    regex = re.compile(".*Propagated task does not delete Keep alive entry .3.*")
    assert pattern_errorlog(M2, regex)
    assert pattern_errorlog(M4, regex)

    # Step 9 - Check replication M1,M2 and M4 can recover
    add_user(M1, 12, desc="add to M1")
    add_user(M2, 22, desc="add to M2")
    for i in (M1, M2, M4):
        for j in (M1, M2, M4):
            if i == j:
                continue
            repl.wait_for_replication(i, j)

    # Step 10 - Remove M4 from the topology
    M4.stop()
    M1.agreement.delete(suffix=SUFFIX, consumer_host=M4.host, consumer_port=M4.port)
    M2.agreement.delete(suffix=SUFFIX, consumer_host=M4.host, consumer_port=M4.port)

    # Step 11 - Issue cleanAllRuv not force  while M2 is stopped (that hangs the cleanAllRuv)
    M2.stop()
    M1.tasks.cleanAllRUV(suffix=SUFFIX, replicaid='4',
                         force=False, args={TASK_WAIT: False})

    # Step 12
    # CleanAllRuv is hanging waiting for M2 to restart
    # Check that nsds5ReplicaCleanRUV is correctly encoded on M1
    replicas = Replicas(M1)
    replica = replicas.list()[0]
    time.sleep(0.5)
    replica.present('nsds5ReplicaCleanRUV')
    log.info("M1: nsds5ReplicaCleanRUV=%s" % replica.get_attr_val_utf8('nsds5replicacleanruv'))
    regex = re.compile("^4:.*:no:1$")
    assert regex.match(replica.get_attr_val_utf8('nsds5replicacleanruv'))

    # Step 13
    # Check that it encoding survives restart
    M1.restart()
    assert replica.present('nsds5ReplicaCleanRUV')
    assert regex.match(replica.get_attr_val_utf8('nsds5replicacleanruv'))

    # Step 14 - Check that nsds5ReplicaCleanRUV encoding is valid on M2
    M1.stop()
    M2.start()
    replicas = Replicas(M2)
    replica = replicas.list()[0]
    M1.start()
    time.sleep(0.5)
    if replica.present('nsds5ReplicaCleanRUV'):
        log.info("M2: nsds5ReplicaCleanRUV=%s" % replica.get_attr_val_utf8('nsds5replicacleanruv'))
        regex = re.compile("^4:.*:no:0$")
        assert regex.match(replica.get_attr_val_utf8('nsds5replicacleanruv'))

    # time to run cleanAllRuv
    for i in (M1, M2):
        for j in (M1, M2):
            if i == j:
                continue
            repl.wait_for_replication(i, j)

    # Step 15 - Check that M1 is Originator of cleanAllRuv and M2 propagation
    regex = re.compile(".*Original task deletes Keep alive entry .4.*")
    assert pattern_errorlog(M1, regex)

    regex = re.compile(".*Propagated task does not delete Keep alive entry .4.*")
    assert pattern_errorlog(M2, regex)
Esempio n. 14
0
def test_vattr_on_cos_definition_with_replication(topo, reset_ignore_vattr):
    """Test nsslapd-ignore-virtual-attrs configuration attribute
       The attribute is ON by default. If a cos definition is
       added it is moved to OFF in replication scenario

    :id: c1fd8fa1-bd13-478b-9b33-e33b49c587bd
    :customerscenario: True
    :setup: Supplier Consumer
    :steps:
         1. Check the attribute nsslapd-ignore-virtual-attrs is present in cn=config over consumer
         2. Check the default value of attribute nsslapd-ignore-virtual-attrs should be ON over consumer
         3. Create a cos definition for employeeType in supplier
         4. Check the value of nsslapd-ignore-virtual-attrs should be OFF (with a delay for postop processing) over consumer
         5. Check a message "slapi_vattrspi_regattr - Because employeeType,.." in error logs of consumer
         6. Check after deleting cos definition value of attribute nsslapd-ignore-virtual-attrs is set back to ON over consumer
    :expectedresults:
         1. This should be successful
         2. This should be successful
         3. This should be successful
         4. This should be successful
         5. This should be successful
         6. This should be successful
    """
    s = topo.ms['supplier1']
    c = topo.cs['consumer1']
    log.info(
        "Check the attribute nsslapd-ignore-virtual-attrs is present in cn=config over consumer"
    )
    assert c.config.present('nsslapd-ignore-virtual-attrs')

    log.info(
        "Check the default value of attribute nsslapd-ignore-virtual-attrs should be ON over consumer"
    )
    assert c.config.get_attr_val_utf8('nsslapd-ignore-virtual-attrs') == "on"

    # creating CosClassicDefinition in supplier
    log.info("Create a cos definition")
    properties = {
        'cosTemplateDn':
        'cn=cosClassicGenerateEmployeeTypeUsingnsroleTemplates,{}'.format(
            DEFAULT_SUFFIX),
        'cosAttribute':
        'employeeType',
        'cosSpecifier':
        'nsrole',
        'cn':
        'cosClassicGenerateEmployeeTypeUsingnsrole'
    }
    cosdef = CosClassicDefinition(s,'cn=cosClassicGenerateEmployeeTypeUsingnsrole,{}'.format(DEFAULT_SUFFIX))\
        .create(properties=properties)

    log.info(
        "Check the default value of attribute nsslapd-ignore-virtual-attrs is OFF over consumer"
    )
    time.sleep(2)
    assert c.config.present('nsslapd-ignore-virtual-attrs', 'off')

    #stop both supplier and consumer
    c.stop()
    assert c.searchErrorsLog(
        "slapi_vattrspi_regattr - Because employeeType is a new registered virtual attribute , nsslapd-ignore-virtual-attrs was set to \'off\'"
    )
    c.start()
    log.info("Delete a cos definition")
    cosdef.delete()
    repl = ReplicationManager(DEFAULT_SUFFIX)
    log.info("Check Delete was propagated")
    repl.wait_for_replication(s, c)

    log.info(
        "Check the default value of attribute nsslapd-ignore-virtual-attrs is back to ON over consumer"
    )
    s.restart()
    c.restart()
    assert c.config.get_attr_val_utf8('nsslapd-ignore-virtual-attrs') == "on"
Esempio n. 15
0
def test_openldap_no_nss_crypto(topology_m2):
    """Check that we allow usage of OpenLDAP libraries
    that don't use NSS for crypto

    :id: 0a622f3d-8ba5-4df2-a1de-1fb2237da40a
    :setup: Replication with two masters:
        master_1 ----- startTLS -----> master_2;
        master_1 <-- TLS_clientAuth -- master_2;
        nsslapd-extract-pemfiles set to 'on' on both masters
        without specifying cert names
    :steps:
        1. Add 5 users to master 1 and 2
        2. Check that the users were successfully replicated
        3. Relocate PEM files on master 1
        4. Check PEM files in master 1 config directory
        5. Add 5 users more to master 1 and 2
        6. Check that the users were successfully replicated
        7. Export userRoot on master 1
    :expectedresults:
        1. Users should be successfully added
        2. Users should be successfully replicated
        3. Operation should be successful
        4. PEM files should be found
        5. Users should be successfully added
        6. Users should be successfully replicated
        7. Operation should be successful
    """

    log.info(
        "Ticket 47536 - Allow usage of OpenLDAP libraries that don't use NSS for crypto"
    )

    m1 = topology_m2.ms["master1"]
    m2 = topology_m2.ms["master2"]
    [i.enable_tls() for i in topology_m2]
    repl = ReplicationManager(DEFAULT_SUFFIX)
    repl.test_replication(m1, m2)

    add_entry(m1, 'master1', 'uid=m1user', 0, 5)
    add_entry(m2, 'master2', 'uid=m2user', 0, 5)
    repl.wait_for_replication(m1, m2)
    repl.wait_for_replication(m2, m1)

    log.info('##### Searching for entries on master1...')
    entries = m1.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, '(uid=*)')
    assert 10 == len(entries)

    log.info('##### Searching for entries on master2...')
    entries = m2.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, '(uid=*)')
    assert 10 == len(entries)

    relocate_pem_files(topology_m2)

    add_entry(m1, 'master1', 'uid=m1user', 10, 5)
    add_entry(m2, 'master2', 'uid=m2user', 10, 5)

    repl.wait_for_replication(m1, m2)
    repl.wait_for_replication(m2, m1)

    log.info('##### Searching for entries on master1...')
    entries = m1.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, '(uid=*)')
    assert 20 == len(entries)

    log.info('##### Searching for entries on master2...')
    entries = m2.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, '(uid=*)')
    assert 20 == len(entries)

    output_file = os.path.join(m1.get_ldif_dir(), "master1.ldif")
    m1.tasks.exportLDIF(benamebase='userRoot',
                        output_file=output_file,
                        args={'wait': True})

    log.info("Ticket 47536 - PASSED")
Esempio n. 16
0
def topo_tls_ldapi(topo):
    """Enable TLS on both masters and reconfigure both agreements
    to use TLS Client auth. Also, setup ldapi and export DB
    """

    m1 = topo.ms["master1"]
    m2 = topo.ms["master2"]
    # Create the certmap before we restart for enable_tls
    cm_m1 = CertmapLegacy(m1)
    cm_m2 = CertmapLegacy(m2)

    # We need to configure the same maps for both ....
    certmaps = cm_m1.list()
    certmaps['default']['DNComps'] = None
    certmaps['default']['CmapLdapAttr'] = 'nsCertSubjectDN'

    cm_m1.set(certmaps)
    cm_m2.set(certmaps)

    [i.enable_tls() for i in topo]

    # Create the replication dns
    services = ServiceAccounts(m1, DEFAULT_SUFFIX)
    repl_m1 = services.get('%s:%s' % (m1.host, m1.sslport))
    repl_m1.set('nsCertSubjectDN', m1.get_server_tls_subject())

    repl_m2 = services.get('%s:%s' % (m2.host, m2.sslport))
    repl_m2.set('nsCertSubjectDN', m2.get_server_tls_subject())

    # Check the replication is "done".
    repl = ReplicationManager(DEFAULT_SUFFIX)
    repl.wait_for_replication(m1, m2)
    # Now change the auth type

    replica_m1 = Replicas(m1).get(DEFAULT_SUFFIX)
    agmt_m1 = replica_m1.get_agreements().list()[0]

    agmt_m1.replace_many(
        ('nsDS5ReplicaBindMethod', 'SSLCLIENTAUTH'),
        ('nsDS5ReplicaTransportInfo', 'SSL'),
        ('nsDS5ReplicaPort', '%s' % m2.sslport),
    )
    agmt_m1.remove_all('nsDS5ReplicaBindDN')

    replica_m2 = Replicas(m2).get(DEFAULT_SUFFIX)
    agmt_m2 = replica_m2.get_agreements().list()[0]

    agmt_m2.replace_many(
        ('nsDS5ReplicaBindMethod', 'SSLCLIENTAUTH'),
        ('nsDS5ReplicaTransportInfo', 'SSL'),
        ('nsDS5ReplicaPort', '%s' % m1.sslport),
    )
    agmt_m2.remove_all('nsDS5ReplicaBindDN')

    log.info("Export LDAPTLS_CACERTDIR env variable for ds-replcheck")
    os.environ["LDAPTLS_CACERTDIR"] = m1.get_ssca_dir()

    for inst in topo:
        inst.config.set('nsslapd-ldapilisten', 'on')
        inst.config.set('nsslapd-ldapifilepath', '/var/run/slapd-{}.socket'.format(inst.serverid))
        inst.restart()

    repl.test_replication(m1, m2)
    repl.test_replication(m2, m1)

    return topo
Esempio n. 17
0
def tls_client_auth(topo_m2):
    """Enable TLS on both masters and reconfigure
    both agreements to use TLS Client auth
    """

    m1 = topo_m2.ms['master1']
    m2 = topo_m2.ms['master2']

    if ds_is_older('1.4.0.6'):
        transport = 'SSL'
    else:
        transport = 'LDAPS'

    # Create the certmap before we restart for enable_tls
    cm_m1 = CertmapLegacy(m1)
    cm_m2 = CertmapLegacy(m2)

    # We need to configure the same maps for both ....
    certmaps = cm_m1.list()
    certmaps['default']['DNComps'] = None
    certmaps['default']['CmapLdapAttr'] = 'nsCertSubjectDN'

    cm_m1.set(certmaps)
    cm_m2.set(certmaps)

    [i.enable_tls() for i in topo_m2]

    # Create the replication dns
    services = ServiceAccounts(m1, DEFAULT_SUFFIX)
    repl_m1 = services.get('%s:%s' % (m1.host, m1.sslport))
    repl_m1.set('nsCertSubjectDN', m1.get_server_tls_subject())

    repl_m2 = services.get('%s:%s' % (m2.host, m2.sslport))
    repl_m2.set('nsCertSubjectDN', m2.get_server_tls_subject())

    # Check the replication is "done".
    repl = ReplicationManager(DEFAULT_SUFFIX)
    repl.wait_for_replication(m1, m2)
    # Now change the auth type

    replica_m1 = Replicas(m1).get(DEFAULT_SUFFIX)
    agmt_m1 = replica_m1.get_agreements().list()[0]

    agmt_m1.replace_many(
        ('nsDS5ReplicaBindMethod', 'SSLCLIENTAUTH'),
        ('nsDS5ReplicaTransportInfo', transport),
        ('nsDS5ReplicaPort', str(m2.sslport)),
    )
    agmt_m1.remove_all('nsDS5ReplicaBindDN')

    replica_m2 = Replicas(m2).get(DEFAULT_SUFFIX)
    agmt_m2 = replica_m2.get_agreements().list()[0]

    agmt_m2.replace_many(
        ('nsDS5ReplicaBindMethod', 'SSLCLIENTAUTH'),
        ('nsDS5ReplicaTransportInfo', transport),
        ('nsDS5ReplicaPort', str(m1.sslport)),
    )
    agmt_m2.remove_all('nsDS5ReplicaBindDN')

    repl.test_replication_topology(topo_m2)

    return topo_m2
Esempio n. 18
0
def test_gecos_directoryString_wins_M2(topo_m2, request):
    """Check that if inital syntax are IA5(M2) and DirectoryString(M1)
    Then directoryString wins when nsSchemaCSN M2 is the greatest

    :id: 2da7f1b1-f86d-4072-a940-ba56d4bc8348
    :setup: Two suppliers replication setup
    :steps:
        1. Create a testuser on M1
        2  Stop M1 and M2
        3  Change gecos def on M2 to be IA5
        4  Start M1 and M2
        5  Update M2 schema so that M2 has greatest nsSchemaCSN
        6  Update testuser on M2 and trigger replication to M1
        7  Update testuser on M2 with gecos directoryString value
        8  Check replication is still working
        9  Check gecos is DirectoryString on M1 and M2
    :expectedresults:
        1. success
        2. success
        3. success
        4. success
        5. success
        6. success
        7. success
        8. success
        9. success

    """

    repl = ReplicationManager(DEFAULT_SUFFIX)
    m1 = topo_m2.ms["supplier1"]
    m2 = topo_m2.ms["supplier2"]

    # create a test user
    testuser_dn = 'uid={},{}'.format('testuser', DEFAULT_SUFFIX)
    testuser = UserAccount(m1, testuser_dn)
    try:
        testuser.create(
            properties={
                'uid': 'testuser',
                'cn': 'testuser',
                'sn': 'testuser',
                'uidNumber': '1000',
                'gidNumber': '2000',
                'homeDirectory': '/home/testuser',
            })
    except ldap.ALREADY_EXISTS:
        pass
    testuser.replace('displayName', 'to trigger replication M1-> M2')
    repl.wait_for_replication(m1, m2)

    # Stop suppliers to update the schema
    m1.stop()
    m2.stop()

    # on M1: gecos is DirectoryString (default)
    # on M2: gecos is IA5
    schema_filename = (m2.schemadir + "/99user.ldif")
    try:
        with open(schema_filename, 'w') as schema_file:
            schema_file.write("dn: cn=schema\n")
            schema_file.write(
                "attributetypes: ( 1.3.6.1.1.1.1.2 NAME " +
                "'gecos' DESC 'The GECOS field; the common name' " +
                "EQUALITY caseIgnoreIA5Match " +
                "SUBSTR caseIgnoreIA5SubstringsMatch " +
                "SYNTAX 1.3.6.1.4.1.1466.115.121.1.26 " + "SINGLE-VALUE )\n")
        os.chmod(schema_filename, 0o777)
    except OSError as e:
        log.fatal("Failed to update schema file: " +
                  "{} Error: {}".format(schema_filename, str(e)))

    # start the instances
    m1.start()
    m2.start()

    # Check that gecos is IA5 on M2
    schema = SchemaLegacy(m2)
    attributetypes = schema.query_attributetype('gecos')
    assert attributetypes[0].syntax == "1.3.6.1.4.1.1466.115.121.1.26"

    # update M2 schema to increase its nsschemaCSN
    new_at = "( dummy-oid NAME 'dummy' DESC 'dummy attribute' SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 SINGLE-VALUE X-ORIGIN 'RFC 2307' )"
    m2.schema.add_schema('attributetypes', ensure_bytes(new_at))

    # update just to trigger replication M2->M1
    # and update of M2 schema
    testuser_m2 = UserAccount(m2, testuser_dn)
    testuser_m2.replace('displayName', 'to trigger replication M2-> M1')

    # Add a gecos UTF value on M1
    testuser.replace('gecos', 'Hélène')

    # Check replication is still working
    testuser.replace('displayName', 'ascii value')
    repl.wait_for_replication(m1, m2)
    assert testuser_m2.exists()
    assert testuser_m2.get_attr_val_utf8('displayName') == 'ascii value'

    # Check that gecos is DirectoryString on M1
    schema = SchemaLegacy(m1)
    attributetypes = schema.query_attributetype('gecos')
    assert attributetypes[0].syntax == "1.3.6.1.4.1.1466.115.121.1.15"

    # Check that gecos is DirectoryString on M2
    schema = SchemaLegacy(m2)
    attributetypes = schema.query_attributetype('gecos')
    assert attributetypes[0].syntax == "1.3.6.1.4.1.1466.115.121.1.15"

    def fin():
        m1.start()
        m2.start()
        testuser.delete()
        m1.schema.del_schema('attributetypes', ensure_bytes(new_at))
        repl.wait_for_replication(m1, m2)

        # on M2 restore a default 99user.ldif
        m2.stop()
        os.remove(m2.schemadir + "/99user.ldif")
        schema_filename = (m2.schemadir + "/99user.ldif")
        try:
            with open(schema_filename, 'w') as schema_file:
                schema_file.write("dn: cn=schema\n")
            os.chmod(schema_filename, 0o777)
        except OSError as e:
            log.fatal("Failed to update schema file: " +
                      "{} Error: {}".format(schema_filename, str(e)))
        m2.start()

    request.addfinalizer(fin)
Esempio n. 19
0
def test_moving_entry_make_online_init_fail(topo_m2):
    """
    Moving an entry could make the online init fail

    :id: e3895be7-884a-4e9f-80e3-24e9a5167c9e
    :setup: Two suppliers replication setup
    :steps:
         1. Generate DIT_0
         2. Generate password policy for DIT_0
         3. Create users for DIT_0
         4. Turn idx % 2 == 0 users into tombstones
         5. Generate DIT_1
         6. Move 'ou=OU0,ou=OU0,dc=example,dc=com' to DIT_1
         7. Move 'ou=OU0,dc=example,dc=com' to DIT_1
         8. Move idx % 2 == 1 users to 'ou=OU0,ou=OU0,ou=OU1,dc=example,dc=com'
         9. Init replicas
         10. Number of entries should match on both suppliers

    :expectedresults:
         1. Success
         2. Success
         3. Success
         4. Success
         5. Success
         6. Success
         7. Success
         8. Success
         9. Success
         10. Success
    """

    M1 = topo_m2.ms["supplier1"]
    M2 = topo_m2.ms["supplier2"]

    log.info("Generating DIT_0")
    idx = 0
    add_ou_entry(M1, idx, DEFAULT_SUFFIX)
    log.info("Created entry: ou=OU0, dc=example, dc=com")

    ou0 = 'ou=OU%d' % idx
    first_parent = '%s,%s' % (ou0, DEFAULT_SUFFIX)
    add_ou_entry(M1, idx, first_parent)
    log.info("Created entry: ou=OU0, ou=OU0, dc=example, dc=com")

    add_ldapsubentry(M1, first_parent)

    ou_name = 'ou=OU%d,ou=OU%d' % (idx, idx)
    second_parent = 'ou=OU%d,%s' % (idx, first_parent)
    for idx in range(0, 9):
        add_user_entry(M1, idx, ou_name)
        if idx % 2 == 0:
            log.info("Turning tuser%d into a tombstone entry" % idx)
            del_user_entry(M1, idx, ou_name)

    log.info('%s => %s => %s => 10 USERS' %
             (DEFAULT_SUFFIX, first_parent, second_parent))

    log.info("Generating DIT_1")
    idx = 1
    add_ou_entry(M1, idx, DEFAULT_SUFFIX)
    log.info("Created entry: ou=OU1,dc=example,dc=com")

    third_parent = 'ou=OU%d,%s' % (idx, DEFAULT_SUFFIX)
    add_ou_entry(M1, idx, third_parent)
    log.info("Created entry: ou=OU1, ou=OU1, dc=example, dc=com")

    add_ldapsubentry(M1, third_parent)

    log.info("Moving %s to DIT_1" % second_parent)
    OrganizationalUnits(M1, second_parent).get('OU0').rename(
        ou0, newsuperior=third_parent)

    log.info("Moving %s to DIT_1" % first_parent)
    fourth_parent = '%s,%s' % (ou0, third_parent)
    OrganizationalUnits(M1, first_parent).get('OU0').rename(
        ou0, newsuperior=fourth_parent)

    fifth_parent = '%s,%s' % (ou0, fourth_parent)

    ou_name = 'ou=OU0,ou=OU1'
    log.info("Moving USERS to %s" % fifth_parent)
    for idx in range(0, 9):
        if idx % 2 == 1:
            rename_entry(M1, idx, ou_name, fifth_parent)

    log.info('%s => %s => %s => %s => 10 USERS' %
             (DEFAULT_SUFFIX, third_parent, fourth_parent, fifth_parent))

    log.info("Run Initialization.")
    repl = ReplicationManager(DEFAULT_SUFFIX)
    repl.wait_for_replication(M1, M2, timeout=5)

    m1entries = M1.search_s(
        DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE,
        '(|(objectclass=ldapsubentry)(objectclass=nstombstone)(nsuniqueid=*))')
    m2entries = M2.search_s(
        DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE,
        '(|(objectclass=ldapsubentry)(objectclass=nstombstone)(nsuniqueid=*))')

    log.info("m1entry count - %d", len(m1entries))
    log.info("m2entry count - %d", len(m2entries))

    assert len(m1entries) == len(m2entries)
Esempio n. 20
0
def test_mail_attr_repl(topo_r):
    """Check that no crash happens during mail attribute replication

    :id: 959edc84-05be-4bf9-a541-53afae482052
    :setup: Replication setup with master and consumer instances,
            test user on master
    :steps:
        1. Check that user was replicated to consumer
        2. Back up mail database file
        3. Remove mail attribute from the user entry
        4. Restore mail database
        5. Search for the entry with a substring 'mail=user*'
        6. Search for the entry once again to make sure that server is alive
    :expectedresults:
        1. The user should be replicated to consumer
        2. Operation should be successful
        3. The mail attribute should be removed
        4. Operation should be successful
        5. Search should be successful
        6. No crash should happen
    """

    master = topo_r.ms["master1"]
    consumer = topo_r.cs["consumer1"]
    repl = ReplicationManager(DEFAULT_SUFFIX)

    m_users = UserAccounts(topo_r.ms["master1"], DEFAULT_SUFFIX)
    m_user = m_users.ensure_state(properties=TEST_USER_PROPERTIES)
    m_user.ensure_present('mail', '*****@*****.**')

    log.info("Check that replication is working")
    repl.wait_for_replication(master, consumer)
    c_users = UserAccounts(topo_r.cs["consumer1"], DEFAULT_SUFFIX)
    c_user = c_users.get('testuser')

    c_bes = Backends(consumer)
    c_be = c_bes.get(DEFAULT_SUFFIX)

    db_dir = c_be.get_attr_val_utf8('nsslapd-directory')

    mail_db = list(filter(lambda fl: fl.startswith("mail"),
                          os.listdir(db_dir)))
    assert mail_db, "mail.* wasn't found in {}"
    mail_db_path = os.path.join(db_dir, mail_db[0])
    backup_path = os.path.join(DEFAULT_BACKUPDIR, mail_db[0])

    consumer.stop()
    log.info("Back up {} to {}".format(mail_db_path, backup_path))
    shutil.copyfile(mail_db_path, backup_path)
    consumer.start()

    log.info("Remove 'mail' attr from master")
    m_user.remove_all('mail')

    log.info("Wait for the replication to happen")
    repl.wait_for_replication(master, consumer)

    consumer.stop()
    log.info("Restore {} to {}".format(backup_path, mail_db_path))
    shutil.copyfile(backup_path, mail_db_path)
    consumer.start()

    log.info("Make a search for mail attribute in attempt to crash server")
    c_user.get_attr_val("mail")

    log.info("Make sure that server hasn't crashed")
    repl.test_replication(master, consumer)
Esempio n. 21
0
def test_gecos_mixed_definition_topo(topo_m2, request):
    """Check that replication is still working if schema contains
       definitions that does not conform with a replicated entry

    :id: d5940e71-d18a-4b71-aaf7-b9185361fffe
    :setup: Two suppliers replication setup
    :steps:
        1. Create a testuser on M1
        2  Stop M1 and M2
        3  Change gecos def on M2 to be IA5
        4  Update testuser with gecos directoryString value
        5  Check replication is still working
    :expectedresults:
        1. success
        2. success
        3. success
        4. success
        5. success

    """

    repl = ReplicationManager(DEFAULT_SUFFIX)
    m1 = topo_m2.ms["supplier1"]
    m2 = topo_m2.ms["supplier2"]

    # create a test user
    testuser_dn = 'uid={},{}'.format('testuser', DEFAULT_SUFFIX)
    testuser = UserAccount(m1, testuser_dn)
    try:
        testuser.create(
            properties={
                'uid': 'testuser',
                'cn': 'testuser',
                'sn': 'testuser',
                'uidNumber': '1000',
                'gidNumber': '2000',
                'homeDirectory': '/home/testuser',
            })
    except ldap.ALREADY_EXISTS:
        pass
    repl.wait_for_replication(m1, m2)

    # Stop suppliers to update the schema
    m1.stop()
    m2.stop()

    # on M1: gecos is DirectoryString (default)
    # on M2: gecos is IA5
    schema_filename = (m2.schemadir + "/99user.ldif")
    try:
        with open(schema_filename, 'w') as schema_file:
            schema_file.write("dn: cn=schema\n")
            schema_file.write(
                "attributetypes: ( 1.3.6.1.1.1.1.2 NAME " +
                "'gecos' DESC 'The GECOS field; the common name' " +
                "EQUALITY caseIgnoreIA5Match " +
                "SUBSTR caseIgnoreIA5SubstringsMatch " +
                "SYNTAX 1.3.6.1.4.1.1466.115.121.1.26 " + "SINGLE-VALUE )\n")
        os.chmod(schema_filename, 0o777)
    except OSError as e:
        log.fatal("Failed to update schema file: " +
                  "{} Error: {}".format(schema_filename, str(e)))

    # start the instances
    m1.start()
    m2.start()

    # Check that gecos is IA5 on M2
    schema = SchemaLegacy(m2)
    attributetypes = schema.query_attributetype('gecos')
    assert attributetypes[0].syntax == "1.3.6.1.4.1.1466.115.121.1.26"

    # Add a gecos UTF value on M1
    testuser.replace('gecos', 'Hélène')

    # Check replication is still working
    testuser.replace('displayName', 'ascii value')
    repl.wait_for_replication(m1, m2)
    testuser_m2 = UserAccount(m2, testuser_dn)
    assert testuser_m2.exists()
    assert testuser_m2.get_attr_val_utf8('displayName') == 'ascii value'

    def fin():
        m1.start()
        m2.start()
        testuser.delete()
        repl.wait_for_replication(m1, m2)

        # on M2 restore a default 99user.ldif
        m2.stop()
        os.remove(m2.schemadir + "/99user.ldif")
        schema_filename = (m2.schemadir + "/99user.ldif")
        try:
            with open(schema_filename, 'w') as schema_file:
                schema_file.write("dn: cn=schema\n")
            os.chmod(schema_filename, 0o777)
        except OSError as e:
            log.fatal("Failed to update schema file: " +
                      "{} Error: {}".format(schema_filename, str(e)))
        m2.start()
        m1.start()

    request.addfinalizer(fin)
Esempio n. 22
0
def test_rename_large_subtree(topology_m2):
    """
    A report stated that the following configuration would lead
    to an operation failure:

    ou=int,ou=account,dc=...
    ou=s1,ou=int,ou=account,dc=...
    ou=s2,ou=int,ou=account,dc=...

    rename ou=s1 to re-parent to ou=account, leaving:

    ou=int,ou=account,dc=...
    ou=s1,ou=account,dc=...
    ou=s2,ou=account,dc=...

    The ou=s1 if it has < 100 entries below, is able to be reparented.

    If ou=s1 has > 400 entries, it fails.

    Other conditions was the presence of referential integrity - so one would
    assume that all users under s1 are a member of some group external to this.

    :id: 5915c38d-b3c2-4b7c-af76-8a1e002e27f7

    :setup: standalone instance

    :steps: 1. Enable automember plugin
            2. Add UCOUNT users, and ensure they are members of a group.
            3. Enable refer-int plugin
            4. Move ou=s1 to a new parent

    :expectedresults:
        1. The plugin is enabled
        2. The users are members of the group
        3. The plugin is enabled
        4. The rename operation of ou=s1 succeeds
    """

    st = topology_m2.ms["supplier1"]
    m2 = topology_m2.ms["supplier2"]

    # Create a default group
    gps = Groups(st, DEFAULT_SUFFIX)
    # Keep the group so we can get it's DN out.
    group = gps.create(properties={'cn': 'default_group'})

    _enable_plugins(st, group.dn)
    _enable_plugins(m2, group.dn)

    # Now unlike normal, we bypass the plural-create method, because we need control
    # over the exact DN of the OU to create.
    # Create the ou=account

    # We don't need to set a DN here because ...
    ou_account = OrganisationalUnit(st)

    # It's set in the .create step.
    ou_account.create(basedn=DEFAULT_SUFFIX, properties={'ou': 'account'})
    # create the ou=int,ou=account
    ou_int = OrganisationalUnit(st)
    ou_int.create(basedn=ou_account.dn, properties={'ou': 'int'})
    # Create the ou=s1,ou=int,ou=account
    ou_s1 = OrganisationalUnit(st)
    ou_s1.create(basedn=ou_int.dn, properties={'ou': 's1'})

    # Pause replication
    repl = ReplicationManager(DEFAULT_SUFFIX)
    repl.disable_to_supplier(m2, [
        st,
    ])

    # Create the users 1 -> UCOUNT in ou=s1
    nsu = nsUserAccounts(st, basedn=ou_s1.dn, rdn=None)
    for i in range(1000, 1000 + UCOUNT):
        nsu.create_test_user(uid=i)

    # Enable replication

    repl.enable_to_supplier(m2, [
        st,
    ])

    # Assert they are in the group as we expect
    members = group.get_attr_vals_utf8('member')
    assert len(members) == UCOUNT

    # Wait for replication
    repl.wait_for_replication(st, m2, timeout=60)

    for i in range(0, 5):
        # Move ou=s1 to ou=account as parent. We have to provide the rdn,
        # even though it's not changing.
        ou_s1.rename('ou=s1', newsuperior=ou_account.dn)

        members = group.get_attr_vals_utf8('member')
        assert len(members) == UCOUNT
        # Check that we really did refer-int properly, and ou=int is not in the members.
        for member in members:
            assert 'ou=int' not in member

        # Now move it back
        ou_s1.rename('ou=s1', newsuperior=ou_int.dn)
        members = group.get_attr_vals_utf8('member')
        assert len(members) == UCOUNT
        for member in members:
            assert 'ou=int' in member

    # Check everythig on the other side is good.
    repl.wait_for_replication(st, m2, timeout=60)

    group2 = Groups(m2, DEFAULT_SUFFIX).get('default_group')

    members = group2.get_attr_vals_utf8('member')
    assert len(members) == UCOUNT
    for member in members:
        assert 'ou=int' in member
Esempio n. 23
0
def test_fetch_bindDnGroup(topo_m2):
    """Check the bindDNGroup is fetched on first replication session

    :id: 5f1b1f59-6744-4260-b091-c82d22130025
    :setup: 2 Master Instances
    :steps:
        1. Create a replication bound user and group, but the user *not* member of the group
        2. Check that replication is working
        3. Some preparation is required because of lib389 magic that already define a replication via group
           - define the group as groupDN for replication and 60sec as fetch interval
           - pause RA in both direction
           - Define the user as bindDn of the RAs
        4. restart servers.
            It sets the fetch time to 0, so next session will refetch the group
        5. Before resuming RA, add user to groupDN (on both side as replication is not working at that time)
        6. trigger an update and check replication is working and
           there is no failure logged on supplier side 'does not have permission to supply replication updates to the replica'
    :expectedresults:
        1. Success
        2. Success
        3. Success
        4. Success
        5. Success
        6. Success
    """

    # If you need any test suite initialization,
    # please, write additional fixture for that (including finalizer).
    # Topology for suites are predefined in lib389/topologies.py.

    # If you need host, port or any other data about instance,
    # Please, use the instance object attributes for that (for example, topo.ms["master1"].serverid)
    M1 = topo_m2.ms['master1']
    M2 = topo_m2.ms['master2']

    # Enable replication log level. Not really necessary
    M1.modify_s('cn=config',
                [(ldap.MOD_REPLACE, 'nsslapd-errorlog-level', b'8192')])
    M2.modify_s('cn=config',
                [(ldap.MOD_REPLACE, 'nsslapd-errorlog-level', b'8192')])

    # Create a group and a user
    PEOPLE = "ou=People,%s" % SUFFIX
    PASSWD = 'password'
    REPL_MGR_BOUND_DN = 'repl_mgr_bound_dn'

    uid = REPL_MGR_BOUND_DN.encode()
    users = UserAccounts(M1, PEOPLE, rdn=None)
    user_props = TEST_USER_PROPERTIES.copy()
    user_props.update({
        'uid': uid,
        'cn': uid,
        'sn': '_%s' % uid,
        'userpassword': PASSWD.encode(),
        'description': b'value creation'
    })
    create_user = users.create(properties=user_props)

    groups_M1 = Groups(M1, DEFAULT_SUFFIX)
    group_properties = {'cn': 'group1', 'description': 'testgroup'}
    group_M1 = groups_M1.create(properties=group_properties)
    group_M2 = Group(M2, group_M1.dn)
    assert (not group_M1.is_member(create_user.dn))

    # Check that M1 and M2 are in sync
    repl = ReplicationManager(DEFAULT_SUFFIX)
    repl.wait_for_replication(M1, M2, timeout=20)

    # Define the group as the replication manager and fetch interval as 60sec
    replicas = Replicas(M1)
    replica = replicas.list()[0]
    replica.apply_mods([
        (ldap.MOD_REPLACE, 'nsDS5ReplicaBindDnGroupCheckInterval', '60'),
        (ldap.MOD_REPLACE, 'nsDS5ReplicaBindDnGroup', group_M1.dn)
    ])

    replicas = Replicas(M2)
    replica = replicas.list()[0]
    replica.apply_mods([
        (ldap.MOD_REPLACE, 'nsDS5ReplicaBindDnGroupCheckInterval', '60'),
        (ldap.MOD_REPLACE, 'nsDS5ReplicaBindDnGroup', group_M1.dn)
    ])

    # Then pause the replication agreement to prevent them trying to acquire
    # while the user is not member of the group
    topo_m2.pause_all_replicas()

    # Define the user as the bindDN of the RAs
    for inst in (M1, M2):
        agmts = Agreements(inst)
        agmt = agmts.list()[0]
        agmt.replace('nsDS5ReplicaBindDN', create_user.dn.encode())
        agmt.replace('nsds5ReplicaCredentials', PASSWD.encode())

    # Key step
    # The restart will fetch the group/members define in the replica
    #
    # The user NOT member of the group replication will not work until bindDNcheckInterval
    #
    # With the fix, the first fetch is not taken into account (fetch time=0)
    # so on the first session, the group will be fetched
    M1.restart()
    M2.restart()

    # Replication being broken here we need to directly do the same update.
    # Sorry not found another solution except total update
    group_M1.add_member(create_user.dn)
    group_M2.add_member(create_user.dn)

    topo_m2.resume_all_replicas()

    # trigger updates to be sure to have a replication session, giving some time
    M1.modify_s(create_user.dn, [(ldap.MOD_ADD, 'description', b'value_1_1')])
    M2.modify_s(create_user.dn, [(ldap.MOD_ADD, 'description', b'value_2_2')])
    time.sleep(10)

    # Check replication is working
    ents = M1.search_s(create_user.dn, ldap.SCOPE_BASE, '(objectclass=*)')
    for ent in ents:
        assert (ent.hasAttr('description'))
        found = 0
        for val in ent.getValues('description'):
            if (val == b'value_1_1'):
                found = found + 1
            elif (val == b'value_2_2'):
                found = found + 1
        assert (found == 2)

    ents = M2.search_s(create_user.dn, ldap.SCOPE_BASE, '(objectclass=*)')
    for ent in ents:
        assert (ent.hasAttr('description'))
        found = 0
        for val in ent.getValues('description'):
            if (val == b'value_1_1'):
                found = found + 1
            elif (val == b'value_2_2'):
                found = found + 1
        assert (found == 2)

    # Check in the logs that the member was detected in the group although
    # at startup it was not member of the group
    regex = re.compile(
        "does not have permission to supply replication updates to the replica."
    )
    errorlog_M1 = open(M1.errlog, "r")
    errorlog_M2 = open(M1.errlog, "r")

    # Find the last restart position
    restart_location_M1 = find_start_location(errorlog_M1, 2)
    assert (restart_location_M1 != -1)
    restart_location_M2 = find_start_location(errorlog_M2, 2)
    assert (restart_location_M2 != -1)

    # Then check there is no failure to authenticate
    count = pattern_errorlog(errorlog_M1,
                             regex,
                             start_location=restart_location_M1)
    assert (count <= 1)
    count = pattern_errorlog(errorlog_M2,
                             regex,
                             start_location=restart_location_M2)
    assert (count <= 1)

    if DEBUGGING:
        # Add debugging steps(if any)...
        pass