Esempio n. 1
0
def test_ruv_url_not_added_if_different_uuid(topo_m2c2):
    """Check that RUV url is not updated if RUV generation uuid are different

    :id: 7cc30a4e-0ffd-4758-8f00-e500279af344
    :setup: Two suppliers + two consumers replication setup
    :steps:
        1. Generate ldif without replication data
        2. Init both suppliers from that ldif
             (to clear the ruvs and generates different generation uuid)
        3. Perform on line init from supplier1 to consumer1
               and from supplier2 to consumer2
        4. Perform update on both suppliers
        5. Check that c1 RUV does not contains URL towards m2
        6. Check that c2 RUV does contains URL towards m2
        7. Perform on line init from supplier1 to supplier2
        8. Perform update on supplier2
        9. Check that c1 RUV does contains URL towards m2
    :expectedresults:
        1. No error while generating ldif
        2. No error while importing the ldif file
        3. No error and Initialization done.
        4. No error
        5. supplier2 replicaid should not be in the consumer1 RUV
        6. supplier2 replicaid should be in the consumer2 RUV
        7. No error and Initialization done.
        8. No error
        9. supplier2 replicaid should be in the consumer1 RUV

    """

    # Variables initialization
    repl = ReplicationManager(DEFAULT_SUFFIX)

    m1 = topo_m2c2.ms["supplier1"]
    m2 = topo_m2c2.ms["supplier2"]
    c1 = topo_m2c2.cs["consumer1"]
    c2 = topo_m2c2.cs["consumer2"]

    replica_m1 = Replicas(m1).get(DEFAULT_SUFFIX)
    replica_m2 = Replicas(m2).get(DEFAULT_SUFFIX)
    replica_c1 = Replicas(c1).get(DEFAULT_SUFFIX)
    replica_c2 = Replicas(c2).get(DEFAULT_SUFFIX)

    replicid_m2 = replica_m2.get_rid()

    agmts_m1 = Agreements(m1, replica_m1.dn)
    agmts_m2 = Agreements(m2, replica_m2.dn)

    m1_m2 = get_agreement(agmts_m1, m2)
    m1_c1 = get_agreement(agmts_m1, c1)
    m1_c2 = get_agreement(agmts_m1, c2)
    m2_m1 = get_agreement(agmts_m2, m1)
    m2_c1 = get_agreement(agmts_m2, c1)
    m2_c2 = get_agreement(agmts_m2, c2)

    # Step 1: Generate ldif without replication data
    m1.stop()
    m2.stop()
    ldif_file = '%s/norepl.ldif' % m1.get_ldif_dir()
    m1.db2ldif(bename=DEFAULT_BENAME,
               suffixes=[DEFAULT_SUFFIX],
               excludeSuffixes=None,
               repl_data=False,
               outputfile=ldif_file,
               encrypt=False)
    # Remove replication metadata that are still in the ldif
    # _remove_replication_data(ldif_file)

    # Step 2: Init both suppliers from that ldif
    m1.ldif2db(DEFAULT_BENAME, None, None, None, ldif_file)
    m2.ldif2db(DEFAULT_BENAME, None, None, None, ldif_file)
    m1.start()
    m2.start()

    # Step 3: Perform on line init from supplier1 to consumer1
    #          and from supplier2 to consumer2
    m1_c1.begin_reinit()
    m2_c2.begin_reinit()
    (done, error) = m1_c1.wait_reinit()
    assert done is True
    assert error is False
    (done, error) = m2_c2.wait_reinit()
    assert done is True
    assert error is False

    # Step 4: Perform update on both suppliers
    repl.test_replication(m1, c1)
    repl.test_replication(m2, c2)

    # Step 5: Check that c1 RUV does not contains URL towards m2
    ruv = replica_c1.get_ruv()
    log.debug(f"c1 RUV: {ruv}")
    url = ruv._rid_url.get(replica_m2.get_rid())
    if url is None:
        log.debug(f"No URL for RID {replica_m2.get_rid()} in RUV")
    else:
        log.debug(f"URL for RID {replica_m2.get_rid()} in RUV is {url}")
        log.error(f"URL for RID {replica_m2.get_rid()} found in RUV")
        # Note: this assertion fails if issue 2054 is not fixed.
        assert False

    # Step 6: Check that c2 RUV does contains URL towards m2
    ruv = replica_c2.get_ruv()
    log.debug(f"c1 RUV: {ruv} {ruv._rids} ")
    url = ruv._rid_url.get(replica_m2.get_rid())
    if url is None:
        log.error(f"No URL for RID {replica_m2.get_rid()} in RUV")
        assert False
    else:
        log.debug(f"URL for RID {replica_m2.get_rid()} in RUV is {url}")

    # Step 7: Perform on line init from supplier1 to supplier2
    m1_m2.begin_reinit()
    (done, error) = m1_m2.wait_reinit()
    assert done is True
    assert error is False

    # Step 8: Perform update on supplier2
    repl.test_replication(m2, c1)

    # Step 9: Check that c1 RUV does contains URL towards m2
    ruv = replica_c1.get_ruv()
    log.debug(f"c1 RUV: {ruv} {ruv._rids} ")
    url = ruv._rid_url.get(replica_m2.get_rid())
    if url is None:
        log.error(f"No URL for RID {replica_m2.get_rid()} in RUV")
        assert False
    else:
        log.debug(f"URL for RID {replica_m2.get_rid()} in RUV is {url}")
Esempio n. 2
0
def test_memberof_with_changelog_reset(topo_m2):
    """Test that replication does not break, after DS stop-start, due to changelog reset

    :id: 60c11636-55a1-4704-9e09-2c6bcc828de4
    :setup: 2 Masters
    :steps:
        1. On M1 and M2, Enable memberof
        2. On M1, add 999 entries allowing memberof
        3. On M1, add a group with these 999 entries as members
        4. Stop M1 in between,
           when add the group memerof is called and before it is finished the
           add, so step 4 should be executed after memberof has started and
           before the add has finished
        5. Check that replication is working fine
    :expectedresults:
        1. memberof should be enabled
        2. Entries should be added
        3. Add operation should start
        4. M1 should be stopped
        5. Replication should be working fine
    """
    m1 = topo_m2.ms["master1"]
    m2 = topo_m2.ms["master2"]

    log.info("Configure memberof on M1 and M2")
    memberof = MemberOfPlugin(m1)
    memberof.enable()
    memberof.set_autoaddoc('nsMemberOf')
    m1.restart()

    memberof = MemberOfPlugin(m2)
    memberof.enable()
    memberof.set_autoaddoc('nsMemberOf')
    m2.restart()

    log.info("On M1, add 999 test entries allowing memberof")
    users_list = add_users(topo_m2, 999, DEFAULT_SUFFIX)

    log.info("On M1, add a group with these 999 entries as members")
    dic_of_attributes = {
        'cn': ensure_bytes('testgroup'),
        'objectclass': ensure_list_bytes(['top', 'groupOfNames'])
    }

    for user in users_list:
        dic_of_attributes.setdefault('member', [])
        dic_of_attributes['member'].append(user.dn)

    log.info('Adding the test group using async function')
    groupdn = 'cn=testgroup,%s' % DEFAULT_SUFFIX
    m1.add(Entry((groupdn, dic_of_attributes)))

    #shutdown the server in-between adding the group
    m1.stop()

    #start the server
    m1.start()

    log.info("Check the log messages for error")
    error_msg = "ERR - NSMMReplicationPlugin - ruv_compare_ruv"
    assert not m1.ds_error_log.match(error_msg)

    log.info("Check that the replication is working fine both ways, M1 <-> M2")
    repl = ReplicationManager(DEFAULT_SUFFIX)
    repl.test_replication_topology(topo_m2)
Esempio n. 3
0
def test_openldap_no_nss_crypto(topology_m2):
    """Check that we allow usage of OpenLDAP libraries
    that don't use NSS for crypto

    :id: 0a622f3d-8ba5-4df2-a1de-1fb2237da40a
    :setup: Replication with two masters:
        master_1 ----- startTLS -----> master_2;
        master_1 <-- TLS_clientAuth -- master_2;
        nsslapd-extract-pemfiles set to 'on' on both masters
        without specifying cert names
    :steps:
        1. Add 5 users to master 1 and 2
        2. Check that the users were successfully replicated
        3. Relocate PEM files on master 1
        4. Check PEM files in master 1 config directory
        5. Add 5 users more to master 1 and 2
        6. Check that the users were successfully replicated
        7. Export userRoot on master 1
    :expectedresults:
        1. Users should be successfully added
        2. Users should be successfully replicated
        3. Operation should be successful
        4. PEM files should be found
        5. Users should be successfully added
        6. Users should be successfully replicated
        7. Operation should be successful
    """

    log.info(
        "Ticket 47536 - Allow usage of OpenLDAP libraries that don't use NSS for crypto"
    )

    m1 = topology_m2.ms["master1"]
    m2 = topology_m2.ms["master2"]
    [i.enable_tls() for i in topology_m2]
    repl = ReplicationManager(DEFAULT_SUFFIX)
    repl.test_replication(m1, m2)

    add_entry(m1, 'master1', 'uid=m1user', 0, 5)
    add_entry(m2, 'master2', 'uid=m2user', 0, 5)
    repl.wait_for_replication(m1, m2)
    repl.wait_for_replication(m2, m1)

    log.info('##### Searching for entries on master1...')
    entries = m1.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, '(uid=*)')
    assert 10 == len(entries)

    log.info('##### Searching for entries on master2...')
    entries = m2.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, '(uid=*)')
    assert 10 == len(entries)

    relocate_pem_files(topology_m2)

    add_entry(m1, 'master1', 'uid=m1user', 10, 5)
    add_entry(m2, 'master2', 'uid=m2user', 10, 5)

    repl.wait_for_replication(m1, m2)
    repl.wait_for_replication(m2, m1)

    log.info('##### Searching for entries on master1...')
    entries = m1.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, '(uid=*)')
    assert 20 == len(entries)

    log.info('##### Searching for entries on master2...')
    entries = m2.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, '(uid=*)')
    assert 20 == len(entries)

    output_file = os.path.join(m1.get_ldif_dir(), "master1.ldif")
    m1.tasks.exportLDIF(benamebase='userRoot',
                        output_file=output_file,
                        args={'wait': True})

    log.info("Ticket 47536 - PASSED")
Esempio n. 4
0
def test_stress_clean(topology_m4, m4rid):
    """Put each server(m1 - m4) under a stress, and perform the entire clean process

    :id: a8263cd6-f068-4357-86e0-e7c34504c8c5
    :setup: Replication setup with four masters
    :steps:
        1. Add a bunch of updates to all masters
        2. Put master 4 to read-only mode
        3. Disable replication on master 4
        5. Remove agreements to master 4 from other masters
        6. Run a cleanallruv task on master 1
        7. Check that everything was cleaned
    :expectedresults:
        1. Operation should be successful
        2. Master 4 should be put to read-only mode
        3. Replication on master 4 should be disabled
        2. Agreements to master 4 should be removed
        5. Agreements to master 4 should be removed
        6. Operation should be successful
        7. Everything should be cleaned
    """

    log.info('Running test_stress_clean...')
    log.info('test_stress_clean: put all the masters under load...')

    ldbm_config = LDBMConfig(topology_m4.ms["master4"])

    # Put all the masters under load
    m1_add_users = AddUsers(topology_m4.ms["master1"], 2000)
    m1_add_users.start()
    m2_add_users = AddUsers(topology_m4.ms["master2"], 2000)
    m2_add_users.start()
    m3_add_users = AddUsers(topology_m4.ms["master3"], 2000)
    m3_add_users.start()
    m4_add_users = AddUsers(topology_m4.ms["master4"], 2000)
    m4_add_users.start()

    # Allow sometime to get replication flowing in all directions
    log.info(
        'test_stress_clean: allow some time for replication to get flowing...')
    time.sleep(5)

    # Put master 4 into read only mode
    ldbm_config.set('nsslapd-readonly', 'on')
    # We need to wait for master 4 to push its changes out
    log.info(
        'test_stress_clean: allow some time for master 4 to push changes out (60 seconds)...'
    )
    time.sleep(30)

    # Remove the agreements from the other masters that point to master 4
    remove_master4_agmts("test_stress_clean", topology_m4)

    # Run the task
    cruv_task = CleanAllRUVTask(topology_m4.ms["master1"])
    cruv_task.create(
        properties={
            'replica-id': m4rid,
            'replica-base-dn': DEFAULT_SUFFIX,
            'replica-force-cleaning': 'no'
        })
    cruv_task.wait()

    # Wait for the update to finish
    log.info('test_stress_clean: wait for all the updates to finish...')
    m1_add_users.join()
    m2_add_users.join()
    m3_add_users.join()
    m4_add_users.join()

    # Check the other master's RUV for 'replica 4'
    log.info(
        'test_stress_clean: check if all the replicas have been cleaned...')
    clean = check_ruvs("test_stress_clean", topology_m4, m4rid)
    assert clean

    log.info('test_stress_clean:  PASSED, restoring master 4...')

    # Sleep for a bit to replication complete
    log.info("Sleep for 120 seconds to allow replication to complete...")
    repl = ReplicationManager(DEFAULT_SUFFIX)
    repl.test_replication_topology([
        topology_m4.ms["master1"],
        topology_m4.ms["master2"],
        topology_m4.ms["master3"],
    ],
                                   timeout=120)

    # Turn off readonly mode
    ldbm_config.set('nsslapd-readonly', 'off')
Esempio n. 5
0
def tls_client_auth(topo_m2):
    """Enable TLS on both masters and reconfigure
    both agreements to use TLS Client auth
    """

    m1 = topo_m2.ms['master1']
    m2 = topo_m2.ms['master2']

    if ds_is_older('1.4.0.6'):
        transport = 'SSL'
    else:
        transport = 'LDAPS'

    # Create the certmap before we restart for enable_tls
    cm_m1 = CertmapLegacy(m1)
    cm_m2 = CertmapLegacy(m2)

    # We need to configure the same maps for both ....
    certmaps = cm_m1.list()
    certmaps['default']['DNComps'] = None
    certmaps['default']['CmapLdapAttr'] = 'nsCertSubjectDN'

    cm_m1.set(certmaps)
    cm_m2.set(certmaps)

    [i.enable_tls() for i in topo_m2]

    # Create the replication dns
    services = ServiceAccounts(m1, DEFAULT_SUFFIX)
    repl_m1 = services.get('%s:%s' % (m1.host, m1.sslport))
    repl_m1.set('nsCertSubjectDN', m1.get_server_tls_subject())

    repl_m2 = services.get('%s:%s' % (m2.host, m2.sslport))
    repl_m2.set('nsCertSubjectDN', m2.get_server_tls_subject())

    # Check the replication is "done".
    repl = ReplicationManager(DEFAULT_SUFFIX)
    repl.wait_for_replication(m1, m2)
    # Now change the auth type

    replica_m1 = Replicas(m1).get(DEFAULT_SUFFIX)
    agmt_m1 = replica_m1.get_agreements().list()[0]

    agmt_m1.replace_many(
        ('nsDS5ReplicaBindMethod', 'SSLCLIENTAUTH'),
        ('nsDS5ReplicaTransportInfo', transport),
        ('nsDS5ReplicaPort', str(m2.sslport)),
    )
    agmt_m1.remove_all('nsDS5ReplicaBindDN')

    replica_m2 = Replicas(m2).get(DEFAULT_SUFFIX)
    agmt_m2 = replica_m2.get_agreements().list()[0]

    agmt_m2.replace_many(
        ('nsDS5ReplicaBindMethod', 'SSLCLIENTAUTH'),
        ('nsDS5ReplicaTransportInfo', transport),
        ('nsDS5ReplicaPort', str(m1.sslport)),
    )
    agmt_m2.remove_all('nsDS5ReplicaBindDN')

    repl.test_replication_topology(topo_m2)

    return topo_m2
    def test_nested_entries_with_children(self, topology_m2, base_m2):
        """Check that conflict properly resolved for operations
        with nested entries with children

        :id: 77f09b18-03d1-45da-940b-1ad2c2908eb5
        :setup: Two master replication, test container for entries, enable plugin logging,
                audit log, error log for replica and access log for internal
        :steps:
            1. Add 15 containers to m1 and wait for replication to happen
            2. Pause replication
            3. Create parent-child on master2 and master1
            4. Create parent-child on master1 and master2
            5. Create parent-child on master1 and master2 different child rdn
            6. Create parent-child on master1 and delete parent on master2
            7. Create parent on master1, delete it and parent-child on master2, delete them
            8. Create parent on master1, delete it and parent-two children on master2
            9. Create parent-two children on master1 and parent-child on master2, delete them
            10. Create three subsets inside existing container entry, applying only part of changes on m2
            11. Create more combinations of the subset with parent-child on m1 and parent on m2
            12. Delete container on m1, modify user1 on m1, create parent on m2 and modify user2 on m2
            13. Resume replication
            14. Check that the entries on both masters are the same and replication is working
        :expectedresults:
            1. It should pass
            2. It should pass
            3. It should pass
            4. It should pass
            5. It should pass
            6. It should pass
            7. It should pass
            8. It should pass
            9. It should pass
            10. It should pass
            11. It should pass
            12. It should pass
            13. It should pass
            14. It should pass
        """

        pytest.xfail("Issue 49591 - work in progress")

        M1 = topology_m2.ms["master1"]
        M2 = topology_m2.ms["master2"]
        repl = ReplicationManager(SUFFIX)
        test_users_m1 = UserAccounts(M1, base_m2.dn, rdn=None)
        test_users_m2 = UserAccounts(M2, base_m2.dn, rdn=None)
        _create_user(test_users_m1, 4000)
        _create_user(test_users_m1, 4001)

        cont_list = []
        for num in range(15):
            cont = _create_container(M1, base_m2.dn, 'sub{}'.format(num))
            cont_list.append(cont)

        repl.test_replication(M1, M2)

        topology_m2.pause_all_replicas()

        log.info("Create parent-child on master2 and master1")
        _create_container(M2, base_m2.dn, 'p0', sleep=True)
        cont_p = _create_container(M1, base_m2.dn, 'p0', sleep=True)
        _create_container(M1, cont_p.dn, 'c0', sleep=True)
        _create_container(M2, cont_p.dn, 'c0', sleep=True)

        log.info("Create parent-child on master1 and master2")
        cont_p = _create_container(M1, base_m2.dn, 'p1', sleep=True)
        _create_container(M2, base_m2.dn, 'p1', sleep=True)
        _create_container(M1, cont_p.dn, 'c1', sleep=True)
        _create_container(M2, cont_p.dn, 'c1', sleep=True)

        log.info(
            "Create parent-child on master1 and master2 different child rdn")
        cont_p = _create_container(M1, base_m2.dn, 'p2', sleep=True)
        _create_container(M2, base_m2.dn, 'p2', sleep=True)
        _create_container(M1, cont_p.dn, 'c2', sleep=True)
        _create_container(M2, cont_p.dn, 'c3', sleep=True)

        log.info("Create parent-child on master1 and delete parent on master2")
        cont_num = 0
        cont_p_m1 = _create_container(M1,
                                      cont_list[cont_num].dn,
                                      'p0',
                                      sleep=True)
        cont_p_m2 = _create_container(M2,
                                      cont_list[cont_num].dn,
                                      'p0',
                                      sleep=True)
        _create_container(M1, cont_p_m1.dn, 'c0', sleep=True)
        _delete_container(cont_p_m2)

        cont_num += 1
        cont_p_m2 = _create_container(M2,
                                      cont_list[cont_num].dn,
                                      'p0',
                                      sleep=True)
        cont_p_m1 = _create_container(M1, cont_list[cont_num].dn, 'p0')
        _create_container(M1, cont_p_m1.dn, 'c0', sleep=True)
        _delete_container(cont_p_m2, sleep=True)

        log.info(
            "Create parent on master1, delete it and parent-child on master2, delete them"
        )
        cont_num += 1
        cont_p_m1 = _create_container(M1, cont_list[cont_num].dn, 'p0')
        _delete_container(cont_p_m1, sleep=True)

        cont_p_m2 = _create_container(M2, cont_list[cont_num].dn, 'p0')
        cont_c_m2 = _create_container(M2, cont_p_m2.dn, 'c0')
        _delete_container(cont_c_m2)
        _delete_container(cont_p_m2)

        cont_num += 1
        cont_p_m2 = _create_container(M2, cont_list[cont_num].dn, 'p0')
        cont_c_m2 = _create_container(M2, cont_p_m2.dn, 'c0')
        _delete_container(cont_c_m2)
        _delete_container(cont_p_m2, sleep=True)

        cont_p_m1 = _create_container(M1, cont_list[cont_num].dn, 'p0')
        _delete_container(cont_p_m1)

        log.info(
            "Create parent on master1, delete it and parent-two children on master2"
        )
        cont_num += 1
        cont_p_m1 = _create_container(M1, cont_list[cont_num].dn, 'p0')
        _delete_container(cont_p_m1, sleep=True)

        cont_p_m2 = _create_container(M2, cont_list[cont_num].dn, 'p0')
        _create_container(M2, cont_p_m2.dn, 'c0')
        _create_container(M2, cont_p_m2.dn, 'c1')

        cont_num += 1
        cont_p_m2 = _create_container(M2, cont_list[cont_num].dn, 'p0')
        _create_container(M2, cont_p_m2.dn, 'c0')
        _create_container(M2, cont_p_m2.dn, 'c1', sleep=True)

        cont_p_m1 = _create_container(M1, cont_list[cont_num].dn, 'p0')
        _delete_container(cont_p_m1, sleep=True)

        log.info(
            "Create parent-two children on master1 and parent-child on master2, delete them"
        )
        cont_num += 1
        cont_p_m2 = _create_container(M2, cont_list[cont_num].dn, 'p0')
        cont_c_m2 = _create_container(M2, cont_p_m2.dn, 'c0')
        _delete_container(cont_c_m2)
        _delete_container(cont_p_m2, sleep=True)

        cont_p_m1 = _create_container(M1, cont_list[cont_num].dn, 'p0')
        _create_container(M1, cont_p_m1.dn, 'c0')
        _create_container(M1, cont_p_m1.dn, 'c1')

        cont_num += 1
        cont_p_m1 = _create_container(M1, cont_list[cont_num].dn, 'p0')
        _create_container(M1, cont_p_m1.dn, 'c0')
        _create_container(M1, cont_p_m1.dn, 'c1', sleep=True)

        cont_p_m2 = _create_container(M2, cont_list[cont_num].dn, 'p0')
        cont_c_m2 = _create_container(M2, cont_p_m2.dn, 'c0')
        _delete_container(cont_c_m2)
        _delete_container(cont_p_m2, sleep=True)

        log.info(
            "Create three subsets inside existing container entry, applying only part of changes on m2"
        )
        cont_num += 1
        cont_p_m1 = _create_container(M1, cont_list[cont_num].dn, 'p0')
        _create_container(M1, cont_p_m1.dn, 'c0')
        _create_container(M1, cont_p_m1.dn, 'c1', sleep=True)
        _create_container(M2, cont_list[cont_num].dn, 'p0', sleep=True)

        cont_num += 1
        cont_p_m1 = _create_container(M1, cont_list[cont_num].dn, 'p0')
        _create_container(M1, cont_p_m1.dn, 'c0')
        _create_container(M1, cont_p_m1.dn, 'c1', sleep=True)
        cont_p_m2 = _create_container(M2, cont_list[cont_num].dn, 'p0')
        _create_container(M2, cont_p_m2.dn, 'c0', sleep=True)

        cont_num += 1
        cont_p_m1 = _create_container(M1, cont_list[cont_num].dn, 'p0')
        _create_container(M1, cont_p_m1.dn, 'c0')
        _create_container(M1, cont_p_m1.dn, 'c1', sleep=True)
        cont_p_m2 = _create_container(M2, cont_list[cont_num].dn, 'p0')
        cont_c_m2 = _create_container(M2, cont_p_m2.dn, 'c0')
        _delete_container(cont_c_m2, sleep=True)

        log.info(
            "Create more combinations of the subset with parent-child on m1 and parent on m2"
        )
        cont_num += 1
        cont_p_m1 = _create_container(M1,
                                      cont_list[cont_num].dn,
                                      'p0',
                                      sleep=True)
        cont_p_m2 = _create_container(M2,
                                      cont_list[cont_num].dn,
                                      'p0',
                                      sleep=True)
        _delete_container(cont_p_m1, sleep=True)
        cont_c_m2 = _create_container(M2, cont_p_m2.dn, 'c0')
        _delete_container(cont_c_m2)
        _delete_container(cont_p_m2, sleep=True)

        cont_num += 1
        cont_p_m1 = _create_container(M1,
                                      cont_list[cont_num].dn,
                                      'p0',
                                      sleep=True)
        cont_p_m2 = _create_container(M2,
                                      cont_list[cont_num].dn,
                                      'p0',
                                      sleep=True)
        _delete_container(cont_p_m1, sleep=True)
        _create_container(M2, cont_p_m2.dn, 'c0', sleep=True)

        cont_num += 1
        cont_p_m1 = _create_container(M1,
                                      cont_list[cont_num].dn,
                                      'p0',
                                      sleep=True)
        cont_p_m2 = _create_container(M2,
                                      cont_list[cont_num].dn,
                                      'p0',
                                      sleep=True)
        cont_c_m1 = _create_container(M1, cont_p_m1.dn, 'c0', sleep=True)
        _create_container(M2, cont_p_m2.dn, 'c0', sleep=True)
        _delete_container(cont_c_m1, sleep=True)
        _create_container(M2, cont_p_m2.dn, 'c1', sleep=True)
        _delete_container(cont_p_m1, sleep=True)

        log.info(
            "Delete container on m1, modify user1 on m1, create parent on m2 and modify user2 on m2"
        )
        cont_num += 1
        _delete_container(cont_list[cont_num])
        _modify_user(test_users_m1, 4000, sleep=True)
        _create_container(M2, cont_list[cont_num].dn, 'p0')
        _modify_user(test_users_m2, 4001)

        topology_m2.resume_all_replicas()

        repl.test_replication_topology(topology_m2, timeout=60)

        conts_dns = {}
        for num in range(1, 3):
            inst = topology_m2.ms["master{}".format(num)]
            conts_dns[inst.serverid] = []
            conts = nsContainers(inst, base_m2.dn)
            for cont in conts.list():
                conts_p = nsContainers(inst, cont.dn)
                for cont_p in conts_p.list():
                    conts_c = nsContainers(inst, cont_p.dn)
                    conts_dns[inst.serverid].extend(
                        [cont_c.dn for cont_c in conts_c.list()])
                conts_dns[inst.serverid].extend(
                    [cont_p.dn for cont_p in conts_p.list()])
            conts_dns[inst.serverid].extend([cont.dn for cont in conts.list()])

        assert set(conts_dns[M1.serverid]) == set(conts_dns[M2.serverid])

        user_dns_m1 = [user.dn for user in test_users_m1.list()]
        user_dns_m2 = [user.dn for user in test_users_m2.list()]
        assert set(user_dns_m1) == set(user_dns_m2)
Esempio n. 7
0
def test_rename_large_subtree(topology_m2):
    """
    A report stated that the following configuration would lead
    to an operation failure:

    ou=int,ou=account,dc=...
    ou=s1,ou=int,ou=account,dc=...
    ou=s2,ou=int,ou=account,dc=...

    rename ou=s1 to re-parent to ou=account, leaving:

    ou=int,ou=account,dc=...
    ou=s1,ou=account,dc=...
    ou=s2,ou=account,dc=...

    The ou=s1 if it has < 100 entries below, is able to be reparented.

    If ou=s1 has > 400 entries, it fails.

    Other conditions was the presence of referential integrity - so one would
    assume that all users under s1 are a member of some group external to this.

    :id: 5915c38d-b3c2-4b7c-af76-8a1e002e27f7

    :setup: standalone instance

    :steps: 1. Enable automember plugin
            2. Add UCOUNT users, and ensure they are members of a group.
            3. Enable refer-int plugin
            4. Move ou=s1 to a new parent

    :expectedresults:
        1. The plugin is enabled
        2. The users are members of the group
        3. The plugin is enabled
        4. The rename operation of ou=s1 succeeds
    """

    st = topology_m2.ms["supplier1"]
    m2 = topology_m2.ms["supplier2"]

    # Create a default group
    gps = Groups(st, DEFAULT_SUFFIX)
    # Keep the group so we can get it's DN out.
    group = gps.create(properties={'cn': 'default_group'})

    _enable_plugins(st, group.dn)
    _enable_plugins(m2, group.dn)

    # Now unlike normal, we bypass the plural-create method, because we need control
    # over the exact DN of the OU to create.
    # Create the ou=account

    # We don't need to set a DN here because ...
    ou_account = OrganisationalUnit(st)

    # It's set in the .create step.
    ou_account.create(basedn=DEFAULT_SUFFIX, properties={'ou': 'account'})
    # create the ou=int,ou=account
    ou_int = OrganisationalUnit(st)
    ou_int.create(basedn=ou_account.dn, properties={'ou': 'int'})
    # Create the ou=s1,ou=int,ou=account
    ou_s1 = OrganisationalUnit(st)
    ou_s1.create(basedn=ou_int.dn, properties={'ou': 's1'})

    # Pause replication
    repl = ReplicationManager(DEFAULT_SUFFIX)
    repl.disable_to_supplier(m2, [
        st,
    ])

    # Create the users 1 -> UCOUNT in ou=s1
    nsu = nsUserAccounts(st, basedn=ou_s1.dn, rdn=None)
    for i in range(1000, 1000 + UCOUNT):
        nsu.create_test_user(uid=i)

    # Enable replication

    repl.enable_to_supplier(m2, [
        st,
    ])

    # Assert they are in the group as we expect
    members = group.get_attr_vals_utf8('member')
    assert len(members) == UCOUNT

    # Wait for replication
    repl.wait_for_replication(st, m2, timeout=60)

    for i in range(0, 5):
        # Move ou=s1 to ou=account as parent. We have to provide the rdn,
        # even though it's not changing.
        ou_s1.rename('ou=s1', newsuperior=ou_account.dn)

        members = group.get_attr_vals_utf8('member')
        assert len(members) == UCOUNT
        # Check that we really did refer-int properly, and ou=int is not in the members.
        for member in members:
            assert 'ou=int' not in member

        # Now move it back
        ou_s1.rename('ou=s1', newsuperior=ou_int.dn)
        members = group.get_attr_vals_utf8('member')
        assert len(members) == UCOUNT
        for member in members:
            assert 'ou=int' in member

    # Check everythig on the other side is good.
    repl.wait_for_replication(st, m2, timeout=60)

    group2 = Groups(m2, DEFAULT_SUFFIX).get('default_group')

    members = group2.get_attr_vals_utf8('member')
    assert len(members) == UCOUNT
    for member in members:
        assert 'ou=int' in member
Esempio n. 8
0
def test_online_init_should_create_keepalive_entries(topo_m2):
    """Check that keep alive entries are created when initializinf a supplier from another one

    :id: d5940e71-d18a-4b71-aaf7-b9185361fffe
    :setup: Two suppliers replication setup
    :steps:
        1. Generate ldif without replication data
        2  Init both suppliers from that ldif
        3  Check that keep alive entries does not exists
        4  Perform on line init of supplier2 from supplier1
        5  Check that keep alive entries exists
    :expectedresults:
        1. No error while generating ldif
        2. No error while importing the ldif file
        3. No keepalive entrie should exists on any suppliers
        4. No error while initializing supplier2
        5. All keepalive entries should exist on every suppliers

    """

    repl = ReplicationManager(DEFAULT_SUFFIX)
    m1 = topo_m2.ms["supplier1"]
    m2 = topo_m2.ms["supplier2"]
    # Step 1: Generate ldif without replication data
    m1.stop()
    m2.stop()
    ldif_file = '%s/norepl.ldif' % m1.get_ldif_dir()
    m1.db2ldif(bename=DEFAULT_BENAME,
               suffixes=[DEFAULT_SUFFIX],
               excludeSuffixes=None,
               repl_data=False,
               outputfile=ldif_file,
               encrypt=False)
    # Remove replication metadata that are still in the ldif
    _remove_replication_data(ldif_file)

    # Step 2: Init both suppliers from that ldif
    m1.ldif2db(DEFAULT_BENAME, None, None, None, ldif_file)
    m2.ldif2db(DEFAULT_BENAME, None, None, None, ldif_file)
    m1.start()
    m2.start()
    """ Replica state is now as if CLI setup has been done using:
        dsconf supplier1 replication enable --suffix "${SUFFIX}" --role supplier
        dsconf supplier2 replication enable --suffix "${SUFFIX}" --role supplier
        dsconf supplier1 replication create-manager --name "${REPLICATION_MANAGER_NAME}" --passwd "${REPLICATION_MANAGER_PASSWORD}"
        dsconf supplier2 replication create-manager --name "${REPLICATION_MANAGER_NAME}" --passwd "${REPLICATION_MANAGER_PASSWORD}"
        dsconf supplier1 repl-agmt create --suffix "${SUFFIX}"
        dsconf supplier2 repl-agmt create --suffix "${SUFFIX}"
    """

    # Step 3: No keepalive entrie should exists on any suppliers
    verify_keepalive_entries(topo_m2, False)

    # Step 4: Perform on line init of supplier2 from supplier1
    agmt = Agreements(m1).list()[0]
    agmt.begin_reinit()
    (done, error) = agmt.wait_reinit()
    assert done is True
    assert error is False

    # Step 5: All keepalive entries should exists on every suppliers
    #  Verify the keep alive entry once replication is in sync
    # (that is the step that fails when bug is not fixed)
    repl.wait_for_ruv(m2, m1)
    verify_keepalive_entries(topo_m2, True)
Esempio n. 9
0
def test_ticket48759(topology_st):
    """
    The fix for ticket 48759 has to prevent plugin calls for tombstone purging

    The test uses the memberof and retrocl plugins to verify this.
    In tombstone purging without the fix the mmeberof plugin is called,
        if the tombstone entry is a group,
        it  modifies the user entries for the group
        and if retrocl is enabled this mod is written to the retrocl

    The test sequence is:
    - enable replication
    - enable memberof and retro cl plugin
    - add user entries
    - add a group and add the users as members
    - verify memberof is set to users
    - delete the group
    - verify memberof is removed from users
    - add group again
    - verify memberof is set to users
    - get number of changes in retro cl for one user
    - configure tombstone purging
    - wait for purge interval to pass
    - add a dummy entry to increase maxcsn
    - wait for purge interval to pass two times
    - get number of changes in retro cl for user again
    - assert there was no additional change
    """

    log.info('Testing Ticket 48759 - no plugin calls for tombstone purging')

    #
    # Setup Replication
    #
    log.info('Setting up replication...')
    repl = ReplicationManager(DEFAULT_SUFFIX)
    repl.create_first_supplier(topology_st.standalone)
    #
    # enable dynamic plugins, memberof and retro cl plugin
    #
    log.info('Enable plugins...')
    try:
        topology_st.standalone.config.set('nsslapd-dynamic-plugins', 'on')
    except ldap.LDAPError as e:
        ldap.error('Failed to enable dynamic plugins! ' + e.args[0]['desc'])
        assert False

    topology_st.standalone.plugins.enable(name=PLUGIN_MEMBER_OF)
    topology_st.standalone.plugins.enable(name=PLUGIN_RETRO_CHANGELOG)
    # Configure memberOf group attribute
    try:
        topology_st.standalone.modify_s(MEMBEROF_PLUGIN_DN,
                                        [(ldap.MOD_REPLACE,
                                          'memberofgroupattr',
                                          b'member')])
    except ldap.LDAPError as e:
        log.fatal('Failed to configure memberOf plugin: error ' + e.args[0]['desc'])
        assert False

    #
    #  create some users and a group
    #
    log.info('create users and group...')
    for idx in range(1, 5):
        try:
            USER_DN = ("uid=member%d,%s" % (idx, DEFAULT_SUFFIX))
            topology_st.standalone.add_s(Entry((USER_DN,
                                                {'objectclass': 'top extensibleObject'.split(),
                                                 'uid': 'member%d' % (idx)})))
        except ldap.LDAPError as e:
            log.fatal('Failed to add user (%s): error %s' % (USER_DN, e.args[0]['desc']))
            assert False

    _add_group_with_members(topology_st)

    MEMBER_VAL = ("uid=member2,%s" % DEFAULT_SUFFIX)
    time.sleep(1)
    _find_memberof(topology_st, MEMBER_VAL, GROUP_DN, True)

    # delete group
    log.info('delete group...')
    try:
        topology_st.standalone.delete_s(GROUP_DN)
    except ldap.LDAPError as e:
        log.error('Failed to delete entry: ' + e.args[0]['desc'])
        assert False

    time.sleep(1)
    _find_memberof(topology_st, MEMBER_VAL, GROUP_DN, False)

    # add group again
    log.info('add group again')
    _add_group_with_members(topology_st)
    time.sleep(1)
    _find_memberof(topology_st, MEMBER_VAL, GROUP_DN, True)

    #
    # get number of changelog records for one user entry
    log.info('get number of changes for %s before tombstone purging' % MEMBER_VAL)
    changes_pre = _find_retrocl_changes(topology_st, MEMBER_VAL)

    # configure tombstone purging
    args = {REPLICA_PRECISE_PURGING: 'on',
             REPLICA_PURGE_DELAY: '5',
             REPLICA_PURGE_INTERVAL: '5'}
    try:
        Repl_DN = 'cn=replica,cn=dc\\3Dexample\\2Cdc\\3Dcom,cn=mapping tree,cn=config'
        topology_st.standalone.modify_s(Repl_DN,
                                        [(ldap.MOD_ADD, 'nsDS5ReplicaPreciseTombstonePurging', b'on'),
                                         (ldap.MOD_ADD, 'nsDS5ReplicaPurgeDelay', b'5'),
                                         (ldap.MOD_ADD, 'nsDS5ReplicaTombstonePurgeInterval', b'5')])
    except:
        log.fatal('Failed to configure replica')
        assert False

    # Wait for the interval to pass
    log.info('Wait for tombstone purge interval to pass ...')
    time.sleep(6)

    # Add an entry to trigger replication
    log.info('add dummy entry')
    try:
        topology_st.standalone.add_s(Entry(('cn=test_entry,dc=example,dc=com', {
            'objectclass': 'top person'.split(),
            'sn': 'user',
            'cn': 'entry1'})))
    except ldap.LDAPError as e:
        log.error('Failed to add entry: ' + e.args[0]['desc'])
        assert False

    # check memberof is still correct
    time.sleep(1)
    _find_memberof(topology_st, MEMBER_VAL, GROUP_DN, True)

    # Wait for the interval to pass again
    log.info('Wait for tombstone purge interval to pass again...')
    time.sleep(10)

    #
    # get number of changelog records for one user entry
    log.info('get number of changes for %s before tombstone purging' % MEMBER_VAL)
    changes_post = _find_retrocl_changes(topology_st, MEMBER_VAL)

    assert (changes_pre == changes_post)
Esempio n. 10
0
def replication_check(topology_m2):
    repl = ReplicationManager(SUFFIX)
    master1 = topology_m2.ms["master1"]
    master2 = topology_m2.ms["master2"]
    return repl.test_replication(master1, master2)
Esempio n. 11
0
def test_moving_entry_make_online_init_fail(topo_m2):
    """
    Moving an entry could make the online init fail

    :id: e3895be7-884a-4e9f-80e3-24e9a5167c9e
    :setup: Two suppliers replication setup
    :steps:
         1. Generate DIT_0
         2. Generate password policy for DIT_0
         3. Create users for DIT_0
         4. Turn idx % 2 == 0 users into tombstones
         5. Generate DIT_1
         6. Move 'ou=OU0,ou=OU0,dc=example,dc=com' to DIT_1
         7. Move 'ou=OU0,dc=example,dc=com' to DIT_1
         8. Move idx % 2 == 1 users to 'ou=OU0,ou=OU0,ou=OU1,dc=example,dc=com'
         9. Init replicas
         10. Number of entries should match on both suppliers

    :expectedresults:
         1. Success
         2. Success
         3. Success
         4. Success
         5. Success
         6. Success
         7. Success
         8. Success
         9. Success
         10. Success
    """

    M1 = topo_m2.ms["supplier1"]
    M2 = topo_m2.ms["supplier2"]

    log.info("Generating DIT_0")
    idx = 0
    add_ou_entry(M1, idx, DEFAULT_SUFFIX)
    log.info("Created entry: ou=OU0, dc=example, dc=com")

    ou0 = 'ou=OU%d' % idx
    first_parent = '%s,%s' % (ou0, DEFAULT_SUFFIX)
    add_ou_entry(M1, idx, first_parent)
    log.info("Created entry: ou=OU0, ou=OU0, dc=example, dc=com")

    add_ldapsubentry(M1, first_parent)

    ou_name = 'ou=OU%d,ou=OU%d' % (idx, idx)
    second_parent = 'ou=OU%d,%s' % (idx, first_parent)
    for idx in range(0, 9):
        add_user_entry(M1, idx, ou_name)
        if idx % 2 == 0:
            log.info("Turning tuser%d into a tombstone entry" % idx)
            del_user_entry(M1, idx, ou_name)

    log.info('%s => %s => %s => 10 USERS' %
             (DEFAULT_SUFFIX, first_parent, second_parent))

    log.info("Generating DIT_1")
    idx = 1
    add_ou_entry(M1, idx, DEFAULT_SUFFIX)
    log.info("Created entry: ou=OU1,dc=example,dc=com")

    third_parent = 'ou=OU%d,%s' % (idx, DEFAULT_SUFFIX)
    add_ou_entry(M1, idx, third_parent)
    log.info("Created entry: ou=OU1, ou=OU1, dc=example, dc=com")

    add_ldapsubentry(M1, third_parent)

    log.info("Moving %s to DIT_1" % second_parent)
    OrganizationalUnits(M1, second_parent).get('OU0').rename(
        ou0, newsuperior=third_parent)

    log.info("Moving %s to DIT_1" % first_parent)
    fourth_parent = '%s,%s' % (ou0, third_parent)
    OrganizationalUnits(M1, first_parent).get('OU0').rename(
        ou0, newsuperior=fourth_parent)

    fifth_parent = '%s,%s' % (ou0, fourth_parent)

    ou_name = 'ou=OU0,ou=OU1'
    log.info("Moving USERS to %s" % fifth_parent)
    for idx in range(0, 9):
        if idx % 2 == 1:
            rename_entry(M1, idx, ou_name, fifth_parent)

    log.info('%s => %s => %s => %s => 10 USERS' %
             (DEFAULT_SUFFIX, third_parent, fourth_parent, fifth_parent))

    log.info("Run Initialization.")
    repl = ReplicationManager(DEFAULT_SUFFIX)
    repl.wait_for_replication(M1, M2, timeout=5)

    m1entries = M1.search_s(
        DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE,
        '(|(objectclass=ldapsubentry)(objectclass=nstombstone)(nsuniqueid=*))')
    m2entries = M2.search_s(
        DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE,
        '(|(objectclass=ldapsubentry)(objectclass=nstombstone)(nsuniqueid=*))')

    log.info("m1entry count - %d", len(m1entries))
    log.info("m2entry count - %d", len(m2entries))

    assert len(m1entries) == len(m2entries)
Esempio n. 12
0
def restore_supplier4(topology_m4):
    """In our tests will always be removing supplier 4, so we need a common
    way to restore it for another test
    """

    # Restart the remaining suppliers to allow rid 4 to be reused.
    for inst in topology_m4.ms.values():
        inst.restart()

    repl = ReplicationManager(DEFAULT_SUFFIX)
    repl.join_supplier(topology_m4.ms["supplier1"],
                       topology_m4.ms["supplier4"])

    # Add the 2,3 -> 4 agmt.
    repl.ensure_agreement(topology_m4.ms["supplier2"],
                          topology_m4.ms["supplier4"])
    repl.ensure_agreement(topology_m4.ms["supplier3"],
                          topology_m4.ms["supplier4"])
    # And in reverse ...
    repl.ensure_agreement(topology_m4.ms["supplier4"],
                          topology_m4.ms["supplier2"])
    repl.ensure_agreement(topology_m4.ms["supplier4"],
                          topology_m4.ms["supplier3"])

    log.info('Supplier 4 has been successfully restored.')
Esempio n. 13
0
def create_topology(topo_dict, suffix=DEFAULT_SUFFIX):
    """Create a requested topology. Cascading replication scenario isn't supported

    :param topo_dict: a dictionary {ReplicaRole.STANDALONE: num, ReplicaRole.MASTER: num,
                                   ReplicaRole.CONSUMER: num}
    :type topo_dict: dict
    :param suffix: a suffix for the replication
    :type suffix: str

    :return - TopologyMain object
    """

    if not topo_dict:
        ValueError("You need to specify the dict. For instance: {ReplicaRole.STANDALONE: 1}")

    if ReplicaRole.HUB in topo_dict.keys():
        NotImplementedError("Cascading replication scenario isn't supported."
                            "Please, use existing topology or create your own.")

    topo = _create_instances(topo_dict, suffix)

    # Start with a single master, and create it "first".
    first_master = None
    try:
        first_master = list(topo.ms.values())[0]
        log.info("Creating replication topology.")
        # Now get the first master ready.
        repl = ReplicationManager(DEFAULT_SUFFIX)
        repl.create_first_master(first_master)
    except IndexError:
        pass

    # Now init the other masters from this.
    # This will reinit m, and put a bi-directional agreement
    # in place.
    for m in topo.ms.values():
        # Skip firstmaster.
        if m is first_master:
            continue
        log.info("Joining master %s to %s ..." % (m.serverid, first_master.serverid))
        repl.join_master(first_master, m)

    # Mesh the master agreements.
    for mo in topo.ms.values():
        for mi in topo.ms.values():
            if mo is mi:
                continue
            log.info("Ensuring master %s to %s ..." % (mo.serverid, mi.serverid))
            repl.ensure_agreement(mo, mi)

    # Add master -> consumer agreements.
    for c in topo.cs.values():
        log.info("Joining consumer %s from %s ..." % (c.serverid, first_master.serverid))
        repl.join_consumer(first_master, c)

    for m in topo.ms.values():
        for c in topo.cs.values():
            log.info("Ensuring consumer %s from %s ..." % (c.serverid, m.serverid))
            repl.ensure_agreement(m, c)

    # Clear out the tmp dir
    for instance in topo:
        instance.clearTmpDir(__file__)

    return topo
Esempio n. 14
0
def test_memberof_with_repl(topo):
    """Test that we allowed to enable MemberOf plugin in dedicated consumer

    :id: ef71cd7c-e792-41bf-a3c0-b3b38391cbe5
    :setup: 1 Supplier - 1 Hub - 1 Consumer
    :steps:
        1. Configure replication to EXCLUDE memberof
        2. Enable memberof plugin
        3. Create users/groups
        4. Make user_0 member of group_0
        5. Checks that user_0 is memberof group_0 on M,H,C
        6. Make group_0 member of group_1 (nest group)
        7. Checks that user_0 is memberof group_0 and group_1 on M,H,C
        8. Check group_0 is memberof group_1 on M,H,C
        9. Remove group_0 from group_1
        10. Check group_0 and user_0 are NOT memberof group_1 on M,H,C
        11. Remove user_0 from group_0
        12. Check user_0 is not memberof group_0 and group_1 on M,H,C
        13. Disable memberof on C
        14. make user_0 member of group_1
        15. Checks that user_0 is memberof group_0 on M,H but not on C
        16. Enable memberof on C
        17. Checks that user_0 is memberof group_0 on M,H but not on C
        18. Run memberof fixup task
        19. Checks that user_0 is memberof group_0 on M,H,C
    :expectedresults:
        1. Configuration should be successful
        2. Plugin should be enabled
        3. Users and groups should be created
        4. user_0 should be member of group_0
        5. user_0 should be memberof group_0 on M,H,C
        6. group_0 should be member of group_1
        7. user_0 should be memberof group_0 and group_1 on M,H,C
        8. group_0 should be memberof group_1 on M,H,C
        9. group_0 from group_1 removal should be successful
        10. group_0 and user_0 should not be memberof group_1 on M,H,C
        11. user_0 from group_0 remove should be successful
        12. user_0 should not be memberof group_0 and group_1 on M,H,C
        13. memberof should be disabled on C
        14. user_0 should be member of group_1
        15. user_0 should be memberof group_0 on M,H and should not on C
        16. Enable memberof on C should be successful
        17. user_0 should be memberof group_0 on M,H should not on C
        18. memberof fixup task should be successful
        19. user_0 should be memberof group_0 on M,H,C
    """

    M1 = topo.ms["supplier1"]
    H1 = topo.hs["hub1"]
    C1 = topo.cs["consumer1"]
    repl = ReplicationManager(DEFAULT_SUFFIX)

    # Step 1 & 2
    M1.config.enable_log('audit')
    config_memberof(M1)
    M1.restart()

    H1.config.enable_log('audit')
    config_memberof(H1)
    H1.restart()

    C1.config.enable_log('audit')
    config_memberof(C1)
    C1.restart()

    #Declare lists of users and groups
    test_users = []
    test_groups = []

    # Step 3
    # In for loop create users and add them in the user list
    # it creates user_0 to user_9 (range is fun)
    for i in range(10):
        CN = '%s%d' % (USER_CN, i)
        users = UserAccounts(M1, SUFFIX)
        user_props = TEST_USER_PROPERTIES.copy()
        user_props.update({'uid': CN, 'cn': CN, 'sn': '_%s' % CN})
        testuser = users.create(properties=user_props)
        time.sleep(2)
        test_users.append(testuser)

    # In for loop create groups and add them to the group list
    # it creates group_0 to group_2 (range is fun)
    for i in range(3):
        CN = '%s%d' % (GROUP_CN, i)
        groups = Groups(M1, SUFFIX)
        testgroup = groups.create(properties={'cn': CN})
        time.sleep(2)
        test_groups.append(testgroup)

    # Step 4
    # Now start testing by adding differnt user to differn group
    if not ds_is_older('1.3.7'):
        test_groups[0].remove('objectClass', 'nsMemberOf')

    member_dn = test_users[0].dn
    grp0_dn = test_groups[0].dn
    grp1_dn = test_groups[1].dn

    test_groups[0].add_member(member_dn)
    repl.wait_while_replication_is_progressing(M1, C1)

    # Step 5
    for i in [M1, H1, C1]:
        _find_memberof(i, member_dn, grp0_dn)

    # Step 6
    test_groups[1].add_member(test_groups[0].dn)
    repl.wait_while_replication_is_progressing(M1, C1)

    # Step 7
    for i in [grp0_dn, grp1_dn]:
        for inst in [M1, H1, C1]:
            _find_memberof(inst, member_dn, i)

    # Step 8
    for i in [M1, H1, C1]:
        _find_memberof(i, grp0_dn, grp1_dn)

    # Step 9
    test_groups[1].remove_member(test_groups[0].dn)
    time.sleep(2)

    # Step 10
    # For negative testcase, we are using assertionerror
    for inst in [M1, H1, C1]:
        for i in [grp0_dn, member_dn]:
            with pytest.raises(AssertionError):
                _find_memberof(inst, i, grp1_dn)

    # Step 11
    test_groups[0].remove_member(member_dn)
    time.sleep(2)

    # Step 12
    for inst in [M1, H1, C1]:
        for grp in [grp0_dn, grp1_dn]:
            with pytest.raises(AssertionError):
                _find_memberof(inst, member_dn, grp)

    # Step 13
    C1.plugins.disable(name=PLUGIN_MEMBER_OF)
    C1.restart()

    # Step 14
    test_groups[0].add_member(member_dn)
    repl.wait_while_replication_is_progressing(M1, C1)

    # Step 15
    for i in [M1, H1]:
        _find_memberof(i, member_dn, grp0_dn)
    with pytest.raises(AssertionError):
        _find_memberof(C1, member_dn, grp0_dn)

    # Step 16
    memberof = MemberOfPlugin(C1)
    memberof.enable()
    C1.restart()

    # Step 17
    for i in [M1, H1]:
        _find_memberof(i, member_dn, grp0_dn)
    with pytest.raises(AssertionError):
        _find_memberof(C1, member_dn, grp0_dn)

    # Step 18
    memberof.fixup(SUFFIX)
    # have to sleep instead of task.wait() because the task opens a thread and exits
    time.sleep(5)

    # Step 19
    for i in [M1, H1, C1]:
        _find_memberof(i, member_dn, grp0_dn)
Esempio n. 15
0
    def test_memberof_groups(self, topology_m2, base_m2):
        """Check that conflict properly resolved for operations
        with memberOf and groups

        :id: 77f09b18-03d1-45da-940b-1ad2c2908eb3
        :setup: Two master replication, test container for entries, enable plugin logging,
                audit log, error log for replica and access log for internal
        :steps:
            1. Enable memberOf plugin
            2. Add 30 users to m1 and wait for replication to happen
            3. Pause replication
            4. Create a group on m1 and m2
            5. Create a group on m1 and m2, delete from m1
            6. Create a group on m1, delete from m1, and create on m2,
            7. Create a group on m2 and m1, delete from m1
            8. Create two different groups on m2
            9. Resume replication
            10. Check that the entries on both masters are the same and replication is working
        :expectedresults:
            1. It should pass
            2. It should pass
            3. It should pass
            4. It should pass
            5. It should pass
            6. It should pass
            7. It should pass
            8. It should pass
            9. It should pass
            10. It should pass
        """

        pytest.xfail("Issue 49591 - work in progress")

        M1 = topology_m2.ms["master1"]
        M2 = topology_m2.ms["master2"]
        test_users_m1 = UserAccounts(M1, base_m2.dn, rdn=None)
        test_groups_m1 = Groups(M1, base_m2.dn, rdn=None)
        test_groups_m2 = Groups(M2, base_m2.dn, rdn=None)

        repl = ReplicationManager(SUFFIX)

        for inst in topology_m2.ms.values():
            memberof = MemberOfPlugin(inst)
            memberof.enable()
            agmt = Agreements(inst).list()[0]
            agmt.replace_many(('nsDS5ReplicatedAttributeListTotal',
                               '(objectclass=*) $ EXCLUDE '),
                              ('nsDS5ReplicatedAttributeList',
                               '(objectclass=*) $ EXCLUDE memberOf'))
            inst.restart()
        user_dns = []
        for user_num in range(10):
            user_trio = []
            for num in range(0, 30, 10):
                user = _create_user(test_users_m1, 1200 + user_num + num)
                user_trio.append(user.dn)
            user_dns.append(user_trio)

        repl.test_replication(M1, M2)
        topology_m2.pause_all_replicas()

        log.info("Check a simple conflict")
        group_num = 0
        _create_group(test_groups_m1,
                      group_num,
                      user_dns[group_num],
                      sleep=True)
        _create_group(test_groups_m2,
                      group_num,
                      user_dns[group_num],
                      sleep=True)

        log.info("Check a add - del")
        group_num += 1
        _create_group(test_groups_m1,
                      group_num,
                      user_dns[group_num],
                      sleep=True)
        _create_group(test_groups_m2,
                      group_num,
                      user_dns[group_num],
                      sleep=True)
        _delete_group(test_groups_m1, group_num)

        group_num += 1
        _create_group(test_groups_m1, group_num, user_dns[group_num])
        _delete_group(test_groups_m1, group_num, sleep=True)
        _create_group(test_groups_m2, group_num, user_dns[group_num])

        group_num += 1
        _create_group(test_groups_m2,
                      group_num,
                      user_dns[group_num],
                      sleep=True)
        _create_group(test_groups_m1, group_num, user_dns[group_num])
        _delete_group(test_groups_m1, group_num, sleep=True)

        group_num += 1
        _create_group(test_groups_m2, group_num, user_dns[group_num])
        group_num += 1
        _create_group(test_groups_m2, group_num, user_dns[group_num])

        topology_m2.resume_all_replicas()

        repl.test_replication_topology(topology_m2)

        group_dns_m1 = [group.dn for group in test_groups_m1.list()]
        group_dns_m2 = [group.dn for group in test_groups_m2.list()]
        assert set(group_dns_m1) == set(group_dns_m2)
Esempio n. 16
0
def replication_check(topology_m2):
    repl = ReplicationManager(SUFFIX)
    supplier1 = topology_m2.ms["supplier1"]
    supplier2 = topology_m2.ms["supplier2"]
    return repl.test_replication(supplier1, supplier2)
Esempio n. 17
0
    def test_managed_entries(self, topology_m2):
        """Check that conflict properly resolved for operations
        with managed entries

        :id: 77f09b18-03d1-45da-940b-1ad2c2908eb4
        :setup: Two master replication, test container for entries, enable plugin logging,
                audit log, error log for replica and access log for internal
        :steps:
            1. Create ou=managed_users and ou=managed_groups under test container
            2. Configure managed entries plugin and add a template to test container
            3. Add a user to m1 and wait for replication to happen
            4. Pause replication
            5. Create a user on m1 and m2 with a same group ID on both master
            6. Create a user on m1 and m2 with a different group ID on both master
            7. Resume replication
            8. Check that the entries on both masters are the same and replication is working
        :expectedresults:
            1. It should pass
            2. It should pass
            3. It should pass
            4. It should pass
            5. It should pass
            6. It should pass
            7. It should pass
            8. It should pass
        """

        pytest.xfail("Issue 49591 - work in progress")

        M1 = topology_m2.ms["master1"]
        M2 = topology_m2.ms["master2"]
        repl = ReplicationManager(SUFFIX)

        ous = OrganizationalUnits(M1, DEFAULT_SUFFIX)
        ou_people = ous.create(properties={'ou': 'managed_people'})
        ou_groups = ous.create(properties={'ou': 'managed_groups'})

        test_users_m1 = UserAccounts(M1,
                                     DEFAULT_SUFFIX,
                                     rdn='ou={}'.format(ou_people.rdn))
        test_users_m2 = UserAccounts(M2,
                                     DEFAULT_SUFFIX,
                                     rdn='ou={}'.format(ou_people.rdn))

        # TODO: Refactor ManagedPlugin class  functionality (also add configs and templates)
        conts = nsContainers(M1, SUFFIX)
        template = conts.create(
            properties={
                'objectclass':
                'top mepTemplateEntry extensibleObject'.split(),
                'cn':
                'MEP Template',
                'mepRDNAttr':
                'cn',
                'mepStaticAttr':
                ['objectclass: posixGroup', 'objectclass: extensibleObject'],
                'mepMappedAttr':
                ['cn: $uid', 'uid: $cn', 'gidNumber: $uidNumber']
            })
        repl.test_replication(M1, M2)

        for inst in topology_m2.ms.values():
            conts = nsContainers(
                inst, "cn={},{}".format(PLUGIN_MANAGED_ENTRY, DN_PLUGIN))
            conts.create(
                properties={
                    'objectclass': 'top extensibleObject'.split(),
                    'cn': 'config',
                    'originScope': ou_people.dn,
                    'originFilter': 'objectclass=posixAccount',
                    'managedBase': ou_groups.dn,
                    'managedTemplate': template.dn
                })
            inst.restart()

        _create_user(test_users_m1, 1, 1)

        topology_m2.pause_all_replicas()

        _create_user(test_users_m1, 2, 2, sleep=True)
        _create_user(test_users_m2, 2, 2, sleep=True)

        _create_user(test_users_m1, 3, 3, sleep=True)
        _create_user(test_users_m2, 3, 33)

        topology_m2.resume_all_replicas()

        repl.test_replication_topology(topology_m2)

        user_dns_m1 = [user.dn for user in test_users_m1.list()]
        user_dns_m2 = [user.dn for user in test_users_m2.list()]
        assert set(user_dns_m1) == set(user_dns_m2)
Esempio n. 18
0
def test_mail_attr_repl(topo_r):
    """Check that no crash happens during mail attribute replication

    :id: 959edc84-05be-4bf9-a541-53afae482052
    :setup: Replication setup with master and consumer instances,
            test user on master
    :steps:
        1. Check that user was replicated to consumer
        2. Back up mail database file
        3. Remove mail attribute from the user entry
        4. Restore mail database
        5. Search for the entry with a substring 'mail=user*'
        6. Search for the entry once again to make sure that server is alive
    :expectedresults:
        1. The user should be replicated to consumer
        2. Operation should be successful
        3. The mail attribute should be removed
        4. Operation should be successful
        5. Search should be successful
        6. No crash should happen
    """

    master = topo_r.ms["master1"]
    consumer = topo_r.cs["consumer1"]
    repl = ReplicationManager(DEFAULT_SUFFIX)

    m_users = UserAccounts(topo_r.ms["master1"], DEFAULT_SUFFIX)
    m_user = m_users.ensure_state(properties=TEST_USER_PROPERTIES)
    m_user.ensure_present('mail', '*****@*****.**')

    log.info("Check that replication is working")
    repl.wait_for_replication(master, consumer)
    c_users = UserAccounts(topo_r.cs["consumer1"], DEFAULT_SUFFIX)
    c_user = c_users.get('testuser')

    c_bes = Backends(consumer)
    c_be = c_bes.get(DEFAULT_SUFFIX)

    db_dir = c_be.get_attr_val_utf8('nsslapd-directory')

    mail_db = list(filter(lambda fl: fl.startswith("mail"),
                          os.listdir(db_dir)))
    assert mail_db, "mail.* wasn't found in {}"
    mail_db_path = os.path.join(db_dir, mail_db[0])
    backup_path = os.path.join(DEFAULT_BACKUPDIR, mail_db[0])

    consumer.stop()
    log.info("Back up {} to {}".format(mail_db_path, backup_path))
    shutil.copyfile(mail_db_path, backup_path)
    consumer.start()

    log.info("Remove 'mail' attr from master")
    m_user.remove_all('mail')

    log.info("Wait for the replication to happen")
    repl.wait_for_replication(master, consumer)

    consumer.stop()
    log.info("Restore {} to {}".format(backup_path, mail_db_path))
    shutil.copyfile(backup_path, mail_db_path)
    consumer.start()

    log.info("Make a search for mail attribute in attempt to crash server")
    c_user.get_attr_val("mail")

    log.info("Make sure that server hasn't crashed")
    repl.test_replication(master, consumer)
Esempio n. 19
0
def test_basic_with_hub(topo):
    """Check that basic operations work in cascading replication, this includes
    testing plugins that perform internal operatons, and replicated password
    policy state attributes.

    :id: 4ac85552-45bc-477b-89a4-226dfff8c6cc
    :setup: 1 master, 1 hub, 1 consumer
    :steps:
        1. Enable memberOf plugin and set password account lockout settings
        2. Restart the instance
        3. Add a user
        4. Add a group
        5. Test that the replication works
        6. Add the user as a member to the group
        7. Test that the replication works
        8. Issue bad binds to update passwordRetryCount
        9. Test that replicaton works
        10. Check that passwordRetyCount was replicated
    :expectedresults:
        1. Should be a success
        2. Should be a success
        3. Should be a success
        4. Should be a success
        5. Should be a success
        6. Should be a success
        7. Should be a success
        8. Should be a success
        9. Should be a success
        10. Should be a success
    """

    repl_manager = ReplicationManager(DEFAULT_SUFFIX)
    master = topo.ms["master1"]
    consumer = topo.cs["consumer1"]
    hub = topo.hs["hub1"]

    for inst in topo:
        config_memberof(inst)
        inst.config.set('passwordlockout', 'on')
        inst.config.set('passwordlockoutduration', '60')
        inst.config.set('passwordmaxfailure', '3')
        inst.config.set('passwordIsGlobalPolicy', 'on')

    # Create user
    user1 = UserAccount(master, BIND_DN)
    user_props = TEST_USER_PROPERTIES.copy()
    user_props.update({
        'sn': BIND_RDN,
        'cn': BIND_RDN,
        'uid': BIND_RDN,
        'inetUserStatus': '1',
        'objectclass': 'extensibleObject',
        'userpassword': PASSWORD
    })
    user1.create(properties=user_props, basedn=SUFFIX)

    # Create group
    groups = Groups(master, DEFAULT_SUFFIX)
    group = groups.create(properties={'cn': 'group'})

    # Test replication
    repl_manager.test_replication(master, consumer)

    # Trigger memberOf plugin by adding user to group
    group.replace('member', user1.dn)

    # Test replication once more
    repl_manager.test_replication(master, consumer)

    # Issue bad password to update passwordRetryCount
    try:
        master.simple_bind_s(user1.dn, "badpassword")
    except:
        pass

    # Test replication one last time
    master.simple_bind_s(DN_DM, PASSWORD)
    repl_manager.test_replication(master, consumer)

    # Finally check if passwordRetyCount was replicated to the hub and consumer
    user1 = UserAccount(hub, BIND_DN)
    count = user1.get_attr_val_int('passwordRetryCount')
    if count is None:
        log.fatal('PasswordRetyCount was not replicated to hub')
        assert False
    if int(count) != 1:
        log.fatal('PasswordRetyCount has unexpected value: {}'.format(count))
        assert False

    user1 = UserAccount(consumer, BIND_DN)
    count = user1.get_attr_val_int('passwordRetryCount')
    if count is None:
        log.fatal('PasswordRetyCount was not replicated to consumer')
        assert False
    if int(count) != 1:
        log.fatal('PasswordRetyCount has unexpected value: {}'.format(count))
        assert False
Esempio n. 20
0
def check_all_replicated():
    """
    Will check replication status
    """
    for supplier in [SUPPLIER2, CONSUMER1, CONSUMER2]:
        ReplicationManager(DEFAULT_SUFFIX).wait_for_replication(SUPPLIER1, supplier, timeout=100)
Esempio n. 21
0
def test_repl_modrdn(topo_m2):
    """Test that replicated MODRDN does not break replication

    :id: a3e17698-9eb4-41e0-b537-8724b9915fa6
    :setup: Two masters replication setup
    :steps:
        1. Add 3 test OrganizationalUnits A, B and C
        2. Add 1 test user under OU=A
        3. Add same test user under OU=B
        4. Stop Replication
        5. Apply modrdn to M1 - move test user from OU A -> C
        6. Apply modrdn on M2 - move test user from OU B -> C
        7. Start Replication
        8. Check that there should be only one test entry under ou=C on both masters
        9. Check that the replication is working fine both ways M1 <-> M2
    :expectedresults:
        1. This should pass
        2. This should pass
        3. This should pass
        4. This should pass
        5. This should pass
        6. This should pass
        7. This should pass
        8. This should pass
        9. This should pass
    """

    master1 = topo_m2.ms["master1"]
    master2 = topo_m2.ms["master2"]

    repl = ReplicationManager(DEFAULT_SUFFIX)

    log.info(
        "Add test entries - Add 3 OUs and 2 same users under 2 different OUs")
    OUs = OrganizationalUnits(master1, DEFAULT_SUFFIX)
    OU_A = OUs.create(properties={
        'ou': 'A',
        'description': 'A',
    })
    OU_B = OUs.create(properties={
        'ou': 'B',
        'description': 'B',
    })
    OU_C = OUs.create(properties={
        'ou': 'C',
        'description': 'C',
    })

    users = UserAccounts(master1, DEFAULT_SUFFIX, rdn='ou={}'.format(OU_A.rdn))
    tuser_A = users.create(properties=TEST_USER_PROPERTIES)

    users = UserAccounts(master1, DEFAULT_SUFFIX, rdn='ou={}'.format(OU_B.rdn))
    tuser_B = users.create(properties=TEST_USER_PROPERTIES)

    repl.test_replication(master1, master2)
    repl.test_replication(master2, master1)

    log.info("Stop Replication")
    topo_m2.pause_all_replicas()

    log.info("Apply modrdn to M1 - move test user from OU A -> C")
    master1.rename_s(tuser_A.dn,
                     'uid=testuser1',
                     newsuperior=OU_C.dn,
                     delold=1)

    log.info("Apply modrdn on M2 - move test user from OU B -> C")
    master2.rename_s(tuser_B.dn,
                     'uid=testuser1',
                     newsuperior=OU_C.dn,
                     delold=1)

    log.info("Start Replication")
    topo_m2.resume_all_replicas()

    log.info("Wait for sometime for repl to resume")
    repl.test_replication(master1, master2)
    repl.test_replication(master2, master1)

    log.info(
        "Check that there should be only one test entry under ou=C on both masters"
    )
    users = UserAccounts(master1, DEFAULT_SUFFIX, rdn='ou={}'.format(OU_C.rdn))
    assert len(users.list()) == 1

    users = UserAccounts(master2, DEFAULT_SUFFIX, rdn='ou={}'.format(OU_C.rdn))
    assert len(users.list()) == 1

    log.info("Check that the replication is working fine both ways, M1 <-> M2")
    repl.test_replication(master1, master2)
    repl.test_replication(master2, master1)
Esempio n. 22
0
def test_fetch_bindDnGroup(topo_m2):
    """Check the bindDNGroup is fetched on first replication session

    :id: 5f1b1f59-6744-4260-b091-c82d22130025
    :setup: 2 Master Instances
    :steps:
        1. Create a replication bound user and group, but the user *not* member of the group
        2. Check that replication is working
        3. Some preparation is required because of lib389 magic that already define a replication via group
           - define the group as groupDN for replication and 60sec as fetch interval
           - pause RA in both direction
           - Define the user as bindDn of the RAs
        4. restart servers.
            It sets the fetch time to 0, so next session will refetch the group
        5. Before resuming RA, add user to groupDN (on both side as replication is not working at that time)
        6. trigger an update and check replication is working and
           there is no failure logged on supplier side 'does not have permission to supply replication updates to the replica'
    :expectedresults:
        1. Success
        2. Success
        3. Success
        4. Success
        5. Success
        6. Success
    """

    # If you need any test suite initialization,
    # please, write additional fixture for that (including finalizer).
    # Topology for suites are predefined in lib389/topologies.py.

    # If you need host, port or any other data about instance,
    # Please, use the instance object attributes for that (for example, topo.ms["master1"].serverid)
    M1 = topo_m2.ms['master1']
    M2 = topo_m2.ms['master2']

    # Enable replication log level. Not really necessary
    M1.modify_s('cn=config',
                [(ldap.MOD_REPLACE, 'nsslapd-errorlog-level', b'8192')])
    M2.modify_s('cn=config',
                [(ldap.MOD_REPLACE, 'nsslapd-errorlog-level', b'8192')])

    # Create a group and a user
    PEOPLE = "ou=People,%s" % SUFFIX
    PASSWD = 'password'
    REPL_MGR_BOUND_DN = 'repl_mgr_bound_dn'

    uid = REPL_MGR_BOUND_DN.encode()
    users = UserAccounts(M1, PEOPLE, rdn=None)
    user_props = TEST_USER_PROPERTIES.copy()
    user_props.update({
        'uid': uid,
        'cn': uid,
        'sn': '_%s' % uid,
        'userpassword': PASSWD.encode(),
        'description': b'value creation'
    })
    create_user = users.create(properties=user_props)

    groups_M1 = Groups(M1, DEFAULT_SUFFIX)
    group_properties = {'cn': 'group1', 'description': 'testgroup'}
    group_M1 = groups_M1.create(properties=group_properties)
    group_M2 = Group(M2, group_M1.dn)
    assert (not group_M1.is_member(create_user.dn))

    # Check that M1 and M2 are in sync
    repl = ReplicationManager(DEFAULT_SUFFIX)
    repl.wait_for_replication(M1, M2, timeout=20)

    # Define the group as the replication manager and fetch interval as 60sec
    replicas = Replicas(M1)
    replica = replicas.list()[0]
    replica.apply_mods([
        (ldap.MOD_REPLACE, 'nsDS5ReplicaBindDnGroupCheckInterval', '60'),
        (ldap.MOD_REPLACE, 'nsDS5ReplicaBindDnGroup', group_M1.dn)
    ])

    replicas = Replicas(M2)
    replica = replicas.list()[0]
    replica.apply_mods([
        (ldap.MOD_REPLACE, 'nsDS5ReplicaBindDnGroupCheckInterval', '60'),
        (ldap.MOD_REPLACE, 'nsDS5ReplicaBindDnGroup', group_M1.dn)
    ])

    # Then pause the replication agreement to prevent them trying to acquire
    # while the user is not member of the group
    topo_m2.pause_all_replicas()

    # Define the user as the bindDN of the RAs
    for inst in (M1, M2):
        agmts = Agreements(inst)
        agmt = agmts.list()[0]
        agmt.replace('nsDS5ReplicaBindDN', create_user.dn.encode())
        agmt.replace('nsds5ReplicaCredentials', PASSWD.encode())

    # Key step
    # The restart will fetch the group/members define in the replica
    #
    # The user NOT member of the group replication will not work until bindDNcheckInterval
    #
    # With the fix, the first fetch is not taken into account (fetch time=0)
    # so on the first session, the group will be fetched
    M1.restart()
    M2.restart()

    # Replication being broken here we need to directly do the same update.
    # Sorry not found another solution except total update
    group_M1.add_member(create_user.dn)
    group_M2.add_member(create_user.dn)

    topo_m2.resume_all_replicas()

    # trigger updates to be sure to have a replication session, giving some time
    M1.modify_s(create_user.dn, [(ldap.MOD_ADD, 'description', b'value_1_1')])
    M2.modify_s(create_user.dn, [(ldap.MOD_ADD, 'description', b'value_2_2')])
    time.sleep(10)

    # Check replication is working
    ents = M1.search_s(create_user.dn, ldap.SCOPE_BASE, '(objectclass=*)')
    for ent in ents:
        assert (ent.hasAttr('description'))
        found = 0
        for val in ent.getValues('description'):
            if (val == b'value_1_1'):
                found = found + 1
            elif (val == b'value_2_2'):
                found = found + 1
        assert (found == 2)

    ents = M2.search_s(create_user.dn, ldap.SCOPE_BASE, '(objectclass=*)')
    for ent in ents:
        assert (ent.hasAttr('description'))
        found = 0
        for val in ent.getValues('description'):
            if (val == b'value_1_1'):
                found = found + 1
            elif (val == b'value_2_2'):
                found = found + 1
        assert (found == 2)

    # Check in the logs that the member was detected in the group although
    # at startup it was not member of the group
    regex = re.compile(
        "does not have permission to supply replication updates to the replica."
    )
    errorlog_M1 = open(M1.errlog, "r")
    errorlog_M2 = open(M1.errlog, "r")

    # Find the last restart position
    restart_location_M1 = find_start_location(errorlog_M1, 2)
    assert (restart_location_M1 != -1)
    restart_location_M2 = find_start_location(errorlog_M2, 2)
    assert (restart_location_M2 != -1)

    # Then check there is no failure to authenticate
    count = pattern_errorlog(errorlog_M1,
                             regex,
                             start_location=restart_location_M1)
    assert (count <= 1)
    count = pattern_errorlog(errorlog_M2,
                             regex,
                             start_location=restart_location_M2)
    assert (count <= 1)

    if DEBUGGING:
        # Add debugging steps(if any)...
        pass
Esempio n. 23
0
def test_clean_shutdown_crash(topology_m2):
    """Check that server didn't crash after shutdown when running CleanAllRUV task

    :id: c34d0b40-3c3e-4f53-8656-5e4c2a310aaf
    :setup: Replication setup with two masters
    :steps:
        1. Enable TLS on both masters
        2. Reconfigure both agreements to use TLS Client auth
        3. Stop master2
        4. Run the CleanAllRUV task
        5. Restart master1
        6. Check if master1 didn't crash
        7. Restart master1 again
        8. Check if master1 didn't crash

    :expectedresults:
        1. Success
        2. Success
        3. Success
        4. Success
        5. Success
        6. Success
        7. Success
        8. Success
    """

    m1 = topology_m2.ms["master1"]
    m2 = topology_m2.ms["master2"]

    repl = ReplicationManager(DEFAULT_SUFFIX)

    cm_m1 = CertmapLegacy(m1)
    cm_m2 = CertmapLegacy(m2)

    certmaps = cm_m1.list()
    certmaps['default']['DNComps'] = None
    certmaps['default']['CmapLdapAttr'] = 'nsCertSubjectDN'

    cm_m1.set(certmaps)
    cm_m2.set(certmaps)

    log.info('Enabling TLS')
    [i.enable_tls() for i in topology_m2]

    log.info('Creating replication dns')
    services = ServiceAccounts(m1, DEFAULT_SUFFIX)
    repl_m1 = services.get('%s:%s' % (m1.host, m1.sslport))
    repl_m1.set('nsCertSubjectDN', m1.get_server_tls_subject())

    repl_m2 = services.get('%s:%s' % (m2.host, m2.sslport))
    repl_m2.set('nsCertSubjectDN', m2.get_server_tls_subject())

    log.info('Changing auth type')
    replica_m1 = Replicas(m1).get(DEFAULT_SUFFIX)
    agmt_m1 = replica_m1.get_agreements().list()[0]
    agmt_m1.replace_many(
        ('nsDS5ReplicaBindMethod', 'SSLCLIENTAUTH'),
        ('nsDS5ReplicaTransportInfo', 'SSL'),
        ('nsDS5ReplicaPort', '%s' % m2.sslport),
    )

    agmt_m1.remove_all('nsDS5ReplicaBindDN')

    replica_m2 = Replicas(m2).get(DEFAULT_SUFFIX)
    agmt_m2 = replica_m2.get_agreements().list()[0]

    agmt_m2.replace_many(
        ('nsDS5ReplicaBindMethod', 'SSLCLIENTAUTH'),
        ('nsDS5ReplicaTransportInfo', 'SSL'),
        ('nsDS5ReplicaPort', '%s' % m1.sslport),
    )
    agmt_m2.remove_all('nsDS5ReplicaBindDN')

    log.info('Stopping master2')
    m2.stop()

    log.info('Run the cleanAllRUV task')
    cruv_task = CleanAllRUVTask(m1)
    cruv_task.create(
        properties={
            'replica-id': repl.get_rid(m1),
            'replica-base-dn': DEFAULT_SUFFIX,
            'replica-force-cleaning': 'no',
            'replica-certify-all': 'yes'
        })

    m1.restart()

    log.info('Check if master1 crashed')
    assert not m1.detectDisorderlyShutdown()

    log.info('Repeat')
    m1.restart()
    assert not m1.detectDisorderlyShutdown()
Esempio n. 24
0
    def test_nested_entries(self, topology_m3, base_m3):
        """Check that conflict properly resolved for operations
        with nested entries with children

        :id: 77f09b18-03d1-45da-940b-1ad2c2908eb6
        :setup: Three master replication, test container for entries, enable plugin logging,
                audit log, error log for replica and access log for internal
        :steps:
            1. Add 15 containers to m1 and wait for replication to happen
            2. Pause replication
            3. Create two child entries under each of two entries
            4. Create three child entries under each of three entries
            5. Create two parents on m1 and m2, then on m1 - create a child and delete one parent,
               on m2 - delete one parent and create a child
            6. Test a few more parent-child combinations with three instances
            7. Resume replication
            8. Check that the entries on both masters are the same and replication is working
        :expectedresults:
            1. It should pass
            2. It should pass
            3. It should pass
            4. It should pass
            5. It should pass
            6. It should pass
            7. It should pass
            8. It should pass
        """

        pytest.xfail("Issue 49591 - work in progress")

        M1 = topology_m3.ms["master1"]
        M2 = topology_m3.ms["master2"]
        M3 = topology_m3.ms["master3"]
        repl = ReplicationManager(SUFFIX)

        cont_list = []
        for num in range(11):
            cont = _create_container(M1, base_m3.dn, 'sub{}'.format(num))
            cont_list.append(cont)

        repl.test_replication(M1, M2)
        repl.test_replication(M1, M3)

        topology_m3.pause_all_replicas()

        log.info("Create two child entries under each of two entries")
        cont_num = -1
        for num in range(2):
            cont_num += 1
            _create_container(M1, cont_list[cont_num].dn, 'p0', sleep=True)
            _create_container(M2, cont_list[cont_num].dn, 'p1', sleep=True)

        log.info("Create three child entries under each of three entries")
        for num in range(3):
            cont_num += 1
            _create_container(M1, cont_list[cont_num].dn, 'p0', sleep=True)
            _create_container(M2, cont_list[cont_num].dn, 'p1', sleep=True)
            _create_container(M3, cont_list[cont_num].dn, 'p2', sleep=True)

        log.info(
            "Create two parents on m1 and m2, then on m1 - create a child and delete one parent,"
            "on m2 - delete one parent and create a child")
        for inst1, inst2 in ((M1, M2), (M2, M1)):
            cont_num += 1
            cont_p_m1_1 = _create_container(inst1, cont_list[cont_num].dn,
                                            'p0')
            cont_p_m1_2 = _create_container(inst1,
                                            cont_list[cont_num].dn,
                                            'p1',
                                            sleep=True)
            cont_p_m2_1 = _create_container(inst2, cont_list[cont_num].dn,
                                            'p0')
            cont_p_m2_2 = _create_container(inst2,
                                            cont_list[cont_num].dn,
                                            'p1',
                                            sleep=True)
            _create_container(inst1, cont_p_m1_1.dn, 'c0', sleep=True)
            _delete_container(cont_p_m2_1, sleep=True)
            _delete_container(cont_p_m1_2, sleep=True)
            _create_container(inst2, cont_p_m2_2.dn, 'c0', sleep=True)

        log.info(
            "Test a few more parent-child combinations on three instances")
        for inst1, inst2, inst3 in ((M1, M2, M3), (M2, M1, M3), (M3, M1, M2)):
            cont_num += 1
            cont_p_m1 = _create_container(inst1, cont_list[cont_num].dn, 'p0')
            _delete_container(cont_p_m1, sleep=True)

            cont_p_m2 = _create_container(inst2, cont_list[cont_num].dn, 'p0')
            cont_c_m2 = _create_container(inst2, cont_p_m2.dn, 'c0')
            _delete_container(cont_c_m2)
            _delete_container(cont_p_m2, sleep=True)

            cont_p_m3 = _create_container(inst3, cont_list[cont_num].dn, 'p0')
            _create_container(inst3, cont_p_m3.dn, 'c0')
            _create_container(inst3, cont_p_m3.dn, 'c1', sleep=True)

        topology_m3.resume_all_replicas()

        repl.test_replication_topology(topology_m3)

        conts_dns = {}
        for num in range(1, 4):
            inst = topology_m3.ms["master{}".format(num)]
            conts_dns[inst.serverid] = []
            conts = nsContainers(inst, base_m3.dn)
            for cont in conts.list():
                conts_p = nsContainers(inst, cont.dn)
                for cont_p in conts_p.list():
                    conts_c = nsContainers(inst, cont_p.dn)
                    conts_dns[inst.serverid].extend(
                        [cont_c.dn for cont_c in conts_c.list()])
                conts_dns[inst.serverid].extend(
                    [cont_p.dn for cont_p in conts_p.list()])
            conts_dns[inst.serverid].extend([cont.dn for cont in conts.list()])

        for conts1, conts2 in permutations(conts_dns.values(), 2):
            assert set(conts1) == set(conts2)
Esempio n. 25
0
def topo_tls_ldapi(topo):
    """Enable TLS on both masters and reconfigure both agreements
    to use TLS Client auth. Also, setup ldapi and export DB
    """

    m1 = topo.ms["master1"]
    m2 = topo.ms["master2"]
    # Create the certmap before we restart for enable_tls
    cm_m1 = CertmapLegacy(m1)
    cm_m2 = CertmapLegacy(m2)

    # We need to configure the same maps for both ....
    certmaps = cm_m1.list()
    certmaps['default']['DNComps'] = None
    certmaps['default']['CmapLdapAttr'] = 'nsCertSubjectDN'

    cm_m1.set(certmaps)
    cm_m2.set(certmaps)

    [i.enable_tls() for i in topo]

    # Create the replication dns
    services = ServiceAccounts(m1, DEFAULT_SUFFIX)
    repl_m1 = services.get('%s:%s' % (m1.host, m1.sslport))
    repl_m1.set('nsCertSubjectDN', m1.get_server_tls_subject())

    repl_m2 = services.get('%s:%s' % (m2.host, m2.sslport))
    repl_m2.set('nsCertSubjectDN', m2.get_server_tls_subject())

    # Check the replication is "done".
    repl = ReplicationManager(DEFAULT_SUFFIX)
    repl.wait_for_replication(m1, m2)
    # Now change the auth type

    replica_m1 = Replicas(m1).get(DEFAULT_SUFFIX)
    agmt_m1 = replica_m1.get_agreements().list()[0]

    agmt_m1.replace_many(
        ('nsDS5ReplicaBindMethod', 'SSLCLIENTAUTH'),
        ('nsDS5ReplicaTransportInfo', 'SSL'),
        ('nsDS5ReplicaPort', '%s' % m2.sslport),
    )
    agmt_m1.remove_all('nsDS5ReplicaBindDN')

    replica_m2 = Replicas(m2).get(DEFAULT_SUFFIX)
    agmt_m2 = replica_m2.get_agreements().list()[0]

    agmt_m2.replace_many(
        ('nsDS5ReplicaBindMethod', 'SSLCLIENTAUTH'),
        ('nsDS5ReplicaTransportInfo', 'SSL'),
        ('nsDS5ReplicaPort', '%s' % m1.sslport),
    )
    agmt_m2.remove_all('nsDS5ReplicaBindDN')

    log.info("Export LDAPTLS_CACERTDIR env variable for ds-replcheck")
    os.environ["LDAPTLS_CACERTDIR"] = m1.get_ssca_dir()

    for inst in topo:
        inst.config.set('nsslapd-ldapilisten', 'on')
        inst.config.set('nsslapd-ldapifilepath', '/var/run/slapd-{}.socket'.format(inst.serverid))
        inst.restart()

    repl.test_replication(m1, m2)
    repl.test_replication(m2, m1)

    return topo
Esempio n. 26
0
    def test_add_modrdn(self, topology_m2, base_m2):
        """Check that conflict properly resolved for create - modrdn operations

        :id: 77f09b18-03d1-45da-940b-1ad2c2908ebb
        :setup: Two master replication, test container for entries, enable plugin logging,
                audit log, error log for replica and access log for internal
        :steps:
            1. Add five users to m1 and wait for replication to happen
            2. Pause replication
            3. Create an entry on m1 and m2
            4. Create an entry on m1 and rename on m2
            5. Rename an entry on m1 and create on m2
            6. Rename an entry on m1 and rename on m2
            7. Rename an entry on m1 and rename on m2. Use different entries
               but rename them to the same entry
            8. Resume replication
            9. Check that the entries on both masters are the same and replication is working
        :expectedresults:
            1. It should pass
            2. It should pass
            3. It should pass
            4. It should pass
            5. It should pass
            6. It should pass
            7. It should pass
            8. It should pass
        """

        M1 = topology_m2.ms["master1"]
        M2 = topology_m2.ms["master2"]
        test_users_m1 = UserAccounts(M1, base_m2.dn, rdn=None)
        test_users_m2 = UserAccounts(M2, base_m2.dn, rdn=None)
        repl = ReplicationManager(SUFFIX)

        for user_num in range(1000, 1005):
            _create_user(test_users_m1, user_num)

        repl.test_replication(M1, M2)
        topology_m2.pause_all_replicas()

        log.info("Test create - modrdn")
        user_num += 1
        _create_user(test_users_m1, user_num, sleep=True)
        _create_user(test_users_m2, user_num, sleep=True)

        user_num += 1
        _create_user(test_users_m1, user_num, sleep=True)
        _rename_user(test_users_m2, 1000, user_num, sleep=True)

        user_num += 1
        _rename_user(test_users_m1, 1001, user_num, sleep=True)
        _create_user(test_users_m2, user_num, sleep=True)

        user_num += 1
        _rename_user(test_users_m1, 1002, user_num, sleep=True)
        _rename_user(test_users_m2, 1002, user_num, sleep=True)

        user_num += 1
        _rename_user(test_users_m1, 1003, user_num, sleep=True)
        _rename_user(test_users_m2, 1004, user_num)

        topology_m2.resume_all_replicas()

        repl.test_replication_topology(topology_m2)

        user_dns_m1 = [user.dn for user in test_users_m1.list()]
        user_dns_m2 = [user.dn for user in test_users_m2.list()]
        assert set(user_dns_m1) == set(user_dns_m2)
Esempio n. 27
0
def replicate_backend(s1, s2, beSuffix):
    repl = ReplicationManager(beSuffix)
    repl.create_first_master(s1)
    repl.join_master(s1, s2)
    repl.ensure_agreement(s1, s2)
    repl.ensure_agreement(s2, s2)
Esempio n. 28
0
    def test_complex_add_modify_modrdn_delete(self, topology_m2, base_m2):
        """Check that conflict properly resolved for complex operations
        which involve add, modify, modrdn and delete

        :id: 77f09b18-03d1-45da-940b-1ad2c2908eb1
        :customerscenario: True
        :setup: Two master replication, test container for entries, enable plugin logging,
                audit log, error log for replica and access log for internal
        :steps:
            1. Add ten users to m1 and wait for replication to happen
            2. Pause replication
            3. Test add-del on m1 and add on m2
            4. Test add-mod on m1 and add on m2
            5. Test add-modrdn on m1 and add on m2
            6. Test multiple add, modrdn
            7. Test Add-del on both masters
            8. Test modrdn-modrdn
            9. Test modrdn-del
            10. Resume replication
            11. Check that the entries on both masters are the same and replication is working
        :expectedresults:
            1. It should pass
            2. It should pass
            3. It should pass
            4. It should pass
            5. It should pass
            6. It should pass
            7. It should pass
            8. It should pass
            9. It should pass
            10. It should pass
            11. It should pass
        """

        M1 = topology_m2.ms["master1"]
        M2 = topology_m2.ms["master2"]

        test_users_m1 = UserAccounts(M1, base_m2.dn, rdn=None)
        test_users_m2 = UserAccounts(M2, base_m2.dn, rdn=None)
        repl = ReplicationManager(SUFFIX)

        for user_num in range(1100, 1110):
            _create_user(test_users_m1, user_num)

        repl.test_replication(M1, M2)
        topology_m2.pause_all_replicas()

        log.info("Test add-del on M1 and add on M2")
        user_num += 1
        _create_user(test_users_m1, user_num)
        _delete_user(test_users_m1, user_num, sleep=True)
        _create_user(test_users_m2, user_num, sleep=True)

        user_num += 1
        _create_user(test_users_m1, user_num, sleep=True)
        _create_user(test_users_m2, user_num, sleep=True)
        _delete_user(test_users_m1, user_num, sleep=True)

        user_num += 1
        _create_user(test_users_m2, user_num, sleep=True)
        _create_user(test_users_m1, user_num)
        _delete_user(test_users_m1, user_num)

        log.info("Test add-mod on M1 and add on M2")
        user_num += 1
        _create_user(test_users_m1, user_num)
        _modify_user(test_users_m1, user_num, sleep=True)
        _create_user(test_users_m2, user_num, sleep=True)

        user_num += 1
        _create_user(test_users_m1, user_num, sleep=True)
        _create_user(test_users_m2, user_num, sleep=True)
        _modify_user(test_users_m1, user_num, sleep=True)

        user_num += 1
        _create_user(test_users_m2, user_num, sleep=True)
        _create_user(test_users_m1, user_num)
        _modify_user(test_users_m1, user_num)

        log.info("Test add-modrdn on M1 and add on M2")
        user_num += 1
        _create_user(test_users_m1, user_num)
        _rename_user(test_users_m1, user_num, user_num + 20, sleep=True)
        _create_user(test_users_m2, user_num, sleep=True)

        user_num += 1
        _create_user(test_users_m1, user_num, sleep=True)
        _create_user(test_users_m2, user_num, sleep=True)
        _rename_user(test_users_m1, user_num, user_num + 20, sleep=True)

        user_num += 1
        _create_user(test_users_m2, user_num, sleep=True)
        _create_user(test_users_m1, user_num)
        _rename_user(test_users_m1, user_num, user_num + 20)

        log.info("Test multiple add, modrdn")
        user_num += 1
        _create_user(test_users_m1, user_num, sleep=True)
        _create_user(test_users_m2, user_num, sleep=True)
        _rename_user(test_users_m1, user_num, user_num + 20)
        _create_user(test_users_m1, user_num, sleep=True)
        _modify_user(test_users_m2, user_num, sleep=True)

        log.info("Add - del on both masters")
        user_num += 1
        _create_user(test_users_m1, user_num)
        _delete_user(test_users_m1, user_num, sleep=True)
        _create_user(test_users_m2, user_num)
        _delete_user(test_users_m2, user_num, sleep=True)

        log.info("Test modrdn - modrdn")
        user_num += 1
        _rename_user(test_users_m1, 1109, 1129, sleep=True)
        _rename_user(test_users_m2, 1109, 1129, sleep=True)

        log.info("Test modrdn - del")
        user_num += 1
        _rename_user(test_users_m1, 1100, 1120, sleep=True)
        _delete_user(test_users_m2, 1100)

        user_num += 1
        _delete_user(test_users_m2, 1101, sleep=True)
        _rename_user(test_users_m1, 1101, 1121)

        topology_m2.resume_all_replicas()

        repl.test_replication_topology(topology_m2)
        time.sleep(30)

        user_dns_m1 = [user.dn for user in test_users_m1.list()]
        user_dns_m2 = [user.dn for user in test_users_m2.list()]
        assert set(user_dns_m1) == set(user_dns_m2)
Esempio n. 29
0
def test_vattr_on_cos_definition_with_replication(topo, reset_ignore_vattr):
    """Test nsslapd-ignore-virtual-attrs configuration attribute
       The attribute is ON by default. If a cos definition is
       added it is moved to OFF in replication scenario

    :id: c1fd8fa1-bd13-478b-9b33-e33b49c587bd
    :customerscenario: True
    :setup: Supplier Consumer
    :steps:
         1. Check the attribute nsslapd-ignore-virtual-attrs is present in cn=config over consumer
         2. Check the default value of attribute nsslapd-ignore-virtual-attrs should be ON over consumer
         3. Create a cos definition for employeeType in supplier
         4. Check the value of nsslapd-ignore-virtual-attrs should be OFF (with a delay for postop processing) over consumer
         5. Check a message "slapi_vattrspi_regattr - Because employeeType,.." in error logs of consumer
         6. Check after deleting cos definition value of attribute nsslapd-ignore-virtual-attrs is set back to ON over consumer
    :expectedresults:
         1. This should be successful
         2. This should be successful
         3. This should be successful
         4. This should be successful
         5. This should be successful
         6. This should be successful
    """
    s = topo.ms['supplier1']
    c = topo.cs['consumer1']
    log.info(
        "Check the attribute nsslapd-ignore-virtual-attrs is present in cn=config over consumer"
    )
    assert c.config.present('nsslapd-ignore-virtual-attrs')

    log.info(
        "Check the default value of attribute nsslapd-ignore-virtual-attrs should be ON over consumer"
    )
    assert c.config.get_attr_val_utf8('nsslapd-ignore-virtual-attrs') == "on"

    # creating CosClassicDefinition in supplier
    log.info("Create a cos definition")
    properties = {
        'cosTemplateDn':
        'cn=cosClassicGenerateEmployeeTypeUsingnsroleTemplates,{}'.format(
            DEFAULT_SUFFIX),
        'cosAttribute':
        'employeeType',
        'cosSpecifier':
        'nsrole',
        'cn':
        'cosClassicGenerateEmployeeTypeUsingnsrole'
    }
    cosdef = CosClassicDefinition(s,'cn=cosClassicGenerateEmployeeTypeUsingnsrole,{}'.format(DEFAULT_SUFFIX))\
        .create(properties=properties)

    log.info(
        "Check the default value of attribute nsslapd-ignore-virtual-attrs is OFF over consumer"
    )
    time.sleep(2)
    assert c.config.present('nsslapd-ignore-virtual-attrs', 'off')

    #stop both supplier and consumer
    c.stop()
    assert c.searchErrorsLog(
        "slapi_vattrspi_regattr - Because employeeType is a new registered virtual attribute , nsslapd-ignore-virtual-attrs was set to \'off\'"
    )
    c.start()
    log.info("Delete a cos definition")
    cosdef.delete()
    repl = ReplicationManager(DEFAULT_SUFFIX)
    log.info("Check Delete was propagated")
    repl.wait_for_replication(s, c)

    log.info(
        "Check the default value of attribute nsslapd-ignore-virtual-attrs is back to ON over consumer"
    )
    s.restart()
    c.restart()
    assert c.config.get_attr_val_utf8('nsslapd-ignore-virtual-attrs') == "on"
Esempio n. 30
0
def test_csngen_state_not_updated_if_different_uuid(topo_m2c2):
    """Check that csngen remote offset is not updated if RUV generation uuid are different

    :id: 77694b8e-22ae-11eb-89b2-482ae39447e5
    :setup: Two suppliers + two consumers replication setup
    :steps:
        1. Disable m1<->m2 agreement to avoid propagate timeSkew
        2. Generate ldif without replication data
        3. Increase time skew on supplier2
        4. Init both suppliers from that ldif
             (to clear the ruvs and generates different generation uuid)
        5. Perform on line init from supplier1 to consumer1 and supplier2 to consumer2
        6. Perform update on both suppliers
        7: Check that c1 has no time skew
        8: Check that c2 has time skew
        9. Init supplier2 from supplier1
        10. Perform update on supplier2
        11. Check that c1 has time skew
    :expectedresults:
        1. No error
        2. No error while generating ldif
        3. No error
        4. No error while importing the ldif file
        5. No error and Initialization done.
        6. No error
        7. c1 time skew should be lesser than threshold
        8. c2 time skew should be higher than threshold
        9. No error and Initialization done.
        10. No error
        11. c1 time skew should be higher than threshold

    """

    # Variables initialization
    repl = ReplicationManager(DEFAULT_SUFFIX)

    m1 = topo_m2c2.ms["supplier1"]
    m2 = topo_m2c2.ms["supplier2"]
    c1 = topo_m2c2.cs["consumer1"]
    c2 = topo_m2c2.cs["consumer2"]

    replica_m1 = Replicas(m1).get(DEFAULT_SUFFIX)
    replica_m2 = Replicas(m2).get(DEFAULT_SUFFIX)
    replica_c1 = Replicas(c1).get(DEFAULT_SUFFIX)
    replica_c2 = Replicas(c2).get(DEFAULT_SUFFIX)

    replicid_m2 = replica_m2.get_rid()

    agmts_m1 = Agreements(m1, replica_m1.dn)
    agmts_m2 = Agreements(m2, replica_m2.dn)

    m1_m2 = get_agreement(agmts_m1, m2)
    m1_c1 = get_agreement(agmts_m1, c1)
    m1_c2 = get_agreement(agmts_m1, c2)
    m2_m1 = get_agreement(agmts_m2, m1)
    m2_c1 = get_agreement(agmts_m2, c1)
    m2_c2 = get_agreement(agmts_m2, c2)

    # Step 1: Disable m1<->m2 agreement to avoid propagate timeSkew
    m1_m2.pause()
    m2_m1.pause()

    # Step 2: Generate ldif without replication data
    m1.stop()
    m2.stop()
    ldif_file = '%s/norepl.ldif' % m1.get_ldif_dir()
    m1.db2ldif(bename=DEFAULT_BENAME,
               suffixes=[DEFAULT_SUFFIX],
               excludeSuffixes=None,
               repl_data=False,
               outputfile=ldif_file,
               encrypt=False)
    # Remove replication metadata that are still in the ldif
    # _remove_replication_data(ldif_file)

    # Step 3: Increase time skew on supplier2
    timeSkew = 6 * 3600
    # We can modify supplier2 time skew
    # But the time skew on the consumer may be smaller
    # depending on when the cnsgen generation time is updated
    # and when first csn get replicated.
    # Since we use timeSkew has threshold value to detect
    # whether there are time skew or not,
    # lets add a significative margin (longer than the test duration)
    # to avoid any risk of erroneous failure
    timeSkewMargin = 300
    DSEldif(m2)._increaseTimeSkew(DEFAULT_SUFFIX, timeSkew + timeSkewMargin)

    # Step 4: Init both suppliers from that ldif
    m1.ldif2db(DEFAULT_BENAME, None, None, None, ldif_file)
    m2.ldif2db(DEFAULT_BENAME, None, None, None, ldif_file)
    m1.start()
    m2.start()

    # Step 5: Perform on line init from supplier1 to consumer1
    #          and from supplier2 to consumer2
    m1_c1.begin_reinit()
    m2_c2.begin_reinit()
    (done, error) = m1_c1.wait_reinit()
    assert done is True
    assert error is False
    (done, error) = m2_c2.wait_reinit()
    assert done is True
    assert error is False

    # Step 6: Perform update on both suppliers
    repl.test_replication(m1, c1)
    repl.test_replication(m2, c2)

    # Step 7: Check that c1 has no time skew
    # Stop server to insure that dse.ldif is uptodate
    c1.stop()
    c1_nsState = DSEldif(c1).readNsState(DEFAULT_SUFFIX)[0]
    c1_timeSkew = int(c1_nsState['time_skew'])
    log.debug(f"c1 time skew: {c1_timeSkew}")
    if (c1_timeSkew >= timeSkew):
        log.error(
            f"c1 csngen state has unexpectedly been synchronized with m2: time skew {c1_timeSkew}"
        )
        assert False
    c1.start()

    # Step 8: Check that c2 has time skew
    # Stop server to insure that dse.ldif is uptodate
    c2.stop()
    c2_nsState = DSEldif(c2).readNsState(DEFAULT_SUFFIX)[0]
    c2_timeSkew = int(c2_nsState['time_skew'])
    log.debug(f"c2 time skew: {c2_timeSkew}")
    if (c2_timeSkew < timeSkew):
        log.error(
            f"c2 csngen state has not been synchronized with m2: time skew {c2_timeSkew}"
        )
        assert False
    c2.start()

    # Step 9: Perform on line init from supplier1 to supplier2
    m1_c1.pause()
    m1_m2.resume()
    m1_m2.begin_reinit()
    (done, error) = m1_m2.wait_reinit()
    assert done is True
    assert error is False

    # Step 10: Perform update on supplier2
    repl.test_replication(m2, c1)

    # Step 11: Check that c1 has time skew
    # Stop server to insure that dse.ldif is uptodate
    c1.stop()
    c1_nsState = DSEldif(c1).readNsState(DEFAULT_SUFFIX)[0]
    c1_timeSkew = int(c1_nsState['time_skew'])
    log.debug(f"c1 time skew: {c1_timeSkew}")
    if (c1_timeSkew < timeSkew):
        log.error(
            f"c1 csngen state has not been synchronized with m2: time skew {c1_timeSkew}"
        )
        assert False