Esempio n. 1
0
def test_ticket48325(topology_m1h1c1):
    """
    Test that the RUV element order is correctly maintained when promoting
    a hub or consumer.
    """

    #
    # Promote consumer to master
    #
    C1 = topology_m1h1c1.cs["consumer1"]
    M1 = topology_m1h1c1.ms["master1"]
    H1 = topology_m1h1c1.hs["hub1"]
    repl = ReplicationManager(DEFAULT_SUFFIX)
    repl._ensure_changelog(C1)
    DN = topology_m1h1c1.cs["consumer1"].replica._get_mt_entry(DEFAULT_SUFFIX)
    topology_m1h1c1.cs["consumer1"].modify_s(DN, [(ldap.MOD_REPLACE,
                                                   'nsDS5ReplicaType',
                                                   b'3'),
                                                  (ldap.MOD_REPLACE,
                                                   'nsDS5ReplicaID',
                                                   b'1234'),
                                                  (ldap.MOD_REPLACE,
                                                   'nsDS5Flags',
                                                   b'1')])
    time.sleep(1)

    #
    # Check ruv has been reordered
    #
    if not checkFirstElement(topology_m1h1c1.cs["consumer1"], '1234'):
        log.fatal('RUV was not reordered')
        assert False

    topology_m1h1c1.ms["master1"].add_s(Entry((defaultProperties[REPLICATION_BIND_DN],
                                               {'objectclass': 'top netscapeServer'.split(),
                                                'cn': 'replication manager',
                                                'userPassword': '******'})))

    DN = topology_m1h1c1.ms["master1"].replica._get_mt_entry(DEFAULT_SUFFIX)
    topology_m1h1c1.ms["master1"].modify_s(DN, [(ldap.MOD_REPLACE,
                                                 'nsDS5ReplicaBindDN', ensure_bytes(defaultProperties[REPLICATION_BIND_DN]))])
    #
    # Create repl agreement from the newly promoted master to master1

    properties = {RA_NAME: 'meTo_{}:{}'.format(topology_m1h1c1.ms["master1"].host,
                                               str(topology_m1h1c1.ms["master1"].port)),
                  RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
                  RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
                  RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
                  RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
    new_agmt = topology_m1h1c1.cs["consumer1"].agreement.create(suffix=SUFFIX,
                                                                host=topology_m1h1c1.ms["master1"].host,
                                                                port=topology_m1h1c1.ms["master1"].port,
                                                                properties=properties)

    if not new_agmt:
        log.fatal("Fail to create new agmt from old consumer to the master")
        assert False

    # Test replication is working
    repl.test_replication(C1, M1)

    #
    # Promote hub to master
    #
    DN = topology_m1h1c1.hs["hub1"].replica._get_mt_entry(DEFAULT_SUFFIX)
    topology_m1h1c1.hs["hub1"].modify_s(DN, [(ldap.MOD_REPLACE,
                                              'nsDS5ReplicaType',
                                              b'3'),
                                             (ldap.MOD_REPLACE,
                                              'nsDS5ReplicaID',
                                              b'5678')])
    time.sleep(1)

    #
    # Check ruv has been reordered
    #
    if not checkFirstElement(topology_m1h1c1.hs["hub1"], '5678'):
        log.fatal('RUV was not reordered')
        assert False

    # Test replication is working
    repl.test_replication(M1, H1)

    # Done
    log.info('Test complete')
Esempio n. 2
0
def test_repl_modrdn(topo_m2):
    """Test that replicated MODRDN does not break replication

    :id: a3e17698-9eb4-41e0-b537-8724b9915fa6
    :setup: Two suppliers replication setup
    :steps:
        1. Add 3 test OrganizationalUnits A, B and C
        2. Add 1 test user under OU=A
        3. Add same test user under OU=B
        4. Stop Replication
        5. Apply modrdn to M1 - move test user from OU A -> C
        6. Apply modrdn on M2 - move test user from OU B -> C
        7. Start Replication
        8. Check that there should be only one test entry under ou=C on both suppliers
        9. Check that the replication is working fine both ways M1 <-> M2
    :expectedresults:
        1. This should pass
        2. This should pass
        3. This should pass
        4. This should pass
        5. This should pass
        6. This should pass
        7. This should pass
        8. This should pass
        9. This should pass
    """

    supplier1 = topo_m2.ms["supplier1"]
    supplier2 = topo_m2.ms["supplier2"]

    repl = ReplicationManager(DEFAULT_SUFFIX)

    log.info("Add test entries - Add 3 OUs and 2 same users under 2 different OUs")
    OUs = OrganizationalUnits(supplier1, DEFAULT_SUFFIX)
    OU_A = OUs.create(properties={
        'ou': 'A',
        'description': 'A',
    })
    OU_B = OUs.create(properties={
        'ou': 'B',
        'description': 'B',
    })
    OU_C = OUs.create(properties={
        'ou': 'C',
        'description': 'C',
    })

    users = UserAccounts(supplier1, DEFAULT_SUFFIX, rdn='ou={}'.format(OU_A.rdn))
    tuser_A = users.create(properties=TEST_USER_PROPERTIES)

    users = UserAccounts(supplier1, DEFAULT_SUFFIX, rdn='ou={}'.format(OU_B.rdn))
    tuser_B = users.create(properties=TEST_USER_PROPERTIES)

    repl.test_replication(supplier1, supplier2)
    repl.test_replication(supplier2, supplier1)

    log.info("Stop Replication")
    topo_m2.pause_all_replicas()

    log.info("Apply modrdn to M1 - move test user from OU A -> C")
    supplier1.rename_s(tuser_A.dn, 'uid=testuser1', newsuperior=OU_C.dn, delold=1)

    log.info("Apply modrdn on M2 - move test user from OU B -> C")
    supplier2.rename_s(tuser_B.dn, 'uid=testuser1', newsuperior=OU_C.dn, delold=1)

    log.info("Start Replication")
    topo_m2.resume_all_replicas()

    log.info("Wait for sometime for repl to resume")
    repl.test_replication(supplier1, supplier2)
    repl.test_replication(supplier2, supplier1)

    log.info("Check that there should be only one test entry under ou=C on both suppliers")
    users = UserAccounts(supplier1, DEFAULT_SUFFIX, rdn='ou={}'.format(OU_C.rdn))
    assert len(users.list()) == 1

    users = UserAccounts(supplier2, DEFAULT_SUFFIX, rdn='ou={}'.format(OU_C.rdn))
    assert len(users.list()) == 1

    log.info("Check that the replication is working fine both ways, M1 <-> M2")
    repl.test_replication(supplier1, supplier2)
    repl.test_replication(supplier2, supplier1)
Esempio n. 3
0
def topo_tls_ldapi(topo):
    """Enable TLS on both masters and reconfigure both agreements
    to use TLS Client auth. Also, setup ldapi and export DB
    """

    m1 = topo.ms["master1"]
    m2 = topo.ms["master2"]
    # Create the certmap before we restart for enable_tls
    cm_m1 = CertmapLegacy(m1)
    cm_m2 = CertmapLegacy(m2)

    # We need to configure the same maps for both ....
    certmaps = cm_m1.list()
    certmaps['default']['DNComps'] = None
    certmaps['default']['CmapLdapAttr'] = 'nsCertSubjectDN'

    cm_m1.set(certmaps)
    cm_m2.set(certmaps)

    [i.enable_tls() for i in topo]

    # Create the replication dns
    services = ServiceAccounts(m1, DEFAULT_SUFFIX)
    repl_m1 = services.get('%s:%s' % (m1.host, m1.sslport))
    repl_m1.set('nsCertSubjectDN', m1.get_server_tls_subject())

    repl_m2 = services.get('%s:%s' % (m2.host, m2.sslport))
    repl_m2.set('nsCertSubjectDN', m2.get_server_tls_subject())

    # Check the replication is "done".
    repl = ReplicationManager(DEFAULT_SUFFIX)
    repl.wait_for_replication(m1, m2)
    # Now change the auth type

    replica_m1 = Replicas(m1).get(DEFAULT_SUFFIX)
    agmt_m1 = replica_m1.get_agreements().list()[0]

    agmt_m1.replace_many(
        ('nsDS5ReplicaBindMethod', 'SSLCLIENTAUTH'),
        ('nsDS5ReplicaTransportInfo', 'SSL'),
        ('nsDS5ReplicaPort', '%s' % m2.sslport),
    )
    agmt_m1.remove_all('nsDS5ReplicaBindDN')

    replica_m2 = Replicas(m2).get(DEFAULT_SUFFIX)
    agmt_m2 = replica_m2.get_agreements().list()[0]

    agmt_m2.replace_many(
        ('nsDS5ReplicaBindMethod', 'SSLCLIENTAUTH'),
        ('nsDS5ReplicaTransportInfo', 'SSL'),
        ('nsDS5ReplicaPort', '%s' % m1.sslport),
    )
    agmt_m2.remove_all('nsDS5ReplicaBindDN')

    log.info("Export LDAPTLS_CACERTDIR env variable for ds-replcheck")
    os.environ["LDAPTLS_CACERTDIR"] = m1.get_ssca_dir()

    for inst in topo:
        inst.config.set('nsslapd-ldapilisten', 'on')
        inst.config.set('nsslapd-ldapifilepath',
                        '/var/run/slapd-{}.socket'.format(inst.serverid))
        inst.restart()

    repl.test_replication(m1, m2)
    repl.test_replication(m2, m1)

    return topo
    def test_managed_entries(self, topology_m2):
        """Check that conflict properly resolved for operations
        with managed entries

        :id: 77f09b18-03d1-45da-940b-1ad2c2908eb4
        :setup: Two supplier replication, test container for entries, enable plugin logging,
                audit log, error log for replica and access log for internal
        :steps:
            1. Create ou=managed_users and ou=managed_groups under test container
            2. Configure managed entries plugin and add a template to test container
            3. Add a user to m1 and wait for replication to happen
            4. Pause replication
            5. Create a user on m1 and m2 with a same group ID on both supplier
            6. Create a user on m1 and m2 with a different group ID on both supplier
            7. Resume replication
            8. Check that the entries on both suppliers are the same and replication is working
        :expectedresults:
            1. It should pass
            2. It should pass
            3. It should pass
            4. It should pass
            5. It should pass
            6. It should pass
            7. It should pass
            8. It should pass
        """

        pytest.xfail("Issue 49591 - work in progress")

        M1 = topology_m2.ms["supplier1"]
        M2 = topology_m2.ms["supplier2"]
        repl = ReplicationManager(SUFFIX)

        ous = OrganizationalUnits(M1, DEFAULT_SUFFIX)
        ou_people = ous.create(properties={'ou': 'managed_people'})
        ou_groups = ous.create(properties={'ou': 'managed_groups'})

        test_users_m1 = UserAccounts(M1,
                                     DEFAULT_SUFFIX,
                                     rdn='ou={}'.format(ou_people.rdn))
        test_users_m2 = UserAccounts(M2,
                                     DEFAULT_SUFFIX,
                                     rdn='ou={}'.format(ou_people.rdn))

        # TODO: Refactor ManagedPlugin class  functionality (also add configs and templates)
        conts = nsContainers(M1, SUFFIX)
        template = conts.create(
            properties={
                'objectclass':
                'top mepTemplateEntry extensibleObject'.split(),
                'cn':
                'MEP Template',
                'mepRDNAttr':
                'cn',
                'mepStaticAttr':
                ['objectclass: posixGroup', 'objectclass: extensibleObject'],
                'mepMappedAttr':
                ['cn: $uid', 'uid: $cn', 'gidNumber: $uidNumber']
            })
        repl.test_replication(M1, M2)

        for inst in topology_m2.ms.values():
            conts = nsContainers(
                inst, "cn={},{}".format(PLUGIN_MANAGED_ENTRY, DN_PLUGIN))
            conts.create(
                properties={
                    'objectclass': 'top extensibleObject'.split(),
                    'cn': 'config',
                    'originScope': ou_people.dn,
                    'originFilter': 'objectclass=posixAccount',
                    'managedBase': ou_groups.dn,
                    'managedTemplate': template.dn
                })
            inst.restart()

        _create_user(test_users_m1, 1, 1)

        topology_m2.pause_all_replicas()

        _create_user(test_users_m1, 2, 2, sleep=True)
        _create_user(test_users_m2, 2, 2, sleep=True)

        _create_user(test_users_m1, 3, 3, sleep=True)
        _create_user(test_users_m2, 3, 33)

        topology_m2.resume_all_replicas()

        repl.test_replication_topology(topology_m2)

        user_dns_m1 = [user.dn for user in test_users_m1.list()]
        user_dns_m2 = [user.dn for user in test_users_m2.list()]
        assert set(user_dns_m1) == set(user_dns_m2)
    def test_nested_entries_with_children(self, topology_m2, base_m2):
        """Check that conflict properly resolved for operations
        with nested entries with children

        :id: 77f09b18-03d1-45da-940b-1ad2c2908eb5
        :setup: Two supplier replication, test container for entries, enable plugin logging,
                audit log, error log for replica and access log for internal
        :steps:
            1. Add 15 containers to m1 and wait for replication to happen
            2. Pause replication
            3. Create parent-child on supplier2 and supplier1
            4. Create parent-child on supplier1 and supplier2
            5. Create parent-child on supplier1 and supplier2 different child rdn
            6. Create parent-child on supplier1 and delete parent on supplier2
            7. Create parent on supplier1, delete it and parent-child on supplier2, delete them
            8. Create parent on supplier1, delete it and parent-two children on supplier2
            9. Create parent-two children on supplier1 and parent-child on supplier2, delete them
            10. Create three subsets inside existing container entry, applying only part of changes on m2
            11. Create more combinations of the subset with parent-child on m1 and parent on m2
            12. Delete container on m1, modify user1 on m1, create parent on m2 and modify user2 on m2
            13. Resume replication
            14. Check that the entries on both suppliers are the same and replication is working
        :expectedresults:
            1. It should pass
            2. It should pass
            3. It should pass
            4. It should pass
            5. It should pass
            6. It should pass
            7. It should pass
            8. It should pass
            9. It should pass
            10. It should pass
            11. It should pass
            12. It should pass
            13. It should pass
            14. It should pass
        """

        pytest.xfail("Issue 49591 - work in progress")

        M1 = topology_m2.ms["supplier1"]
        M2 = topology_m2.ms["supplier2"]
        repl = ReplicationManager(SUFFIX)
        test_users_m1 = UserAccounts(M1, base_m2.dn, rdn=None)
        test_users_m2 = UserAccounts(M2, base_m2.dn, rdn=None)
        _create_user(test_users_m1, 4000)
        _create_user(test_users_m1, 4001)

        cont_list = []
        for num in range(15):
            cont = _create_container(M1, base_m2.dn, 'sub{}'.format(num))
            cont_list.append(cont)

        repl.test_replication(M1, M2)

        topology_m2.pause_all_replicas()

        log.info("Create parent-child on supplier2 and supplier1")
        _create_container(M2, base_m2.dn, 'p0', sleep=True)
        cont_p = _create_container(M1, base_m2.dn, 'p0', sleep=True)
        _create_container(M1, cont_p.dn, 'c0', sleep=True)
        _create_container(M2, cont_p.dn, 'c0', sleep=True)

        log.info("Create parent-child on supplier1 and supplier2")
        cont_p = _create_container(M1, base_m2.dn, 'p1', sleep=True)
        _create_container(M2, base_m2.dn, 'p1', sleep=True)
        _create_container(M1, cont_p.dn, 'c1', sleep=True)
        _create_container(M2, cont_p.dn, 'c1', sleep=True)

        log.info(
            "Create parent-child on supplier1 and supplier2 different child rdn"
        )
        cont_p = _create_container(M1, base_m2.dn, 'p2', sleep=True)
        _create_container(M2, base_m2.dn, 'p2', sleep=True)
        _create_container(M1, cont_p.dn, 'c2', sleep=True)
        _create_container(M2, cont_p.dn, 'c3', sleep=True)

        log.info(
            "Create parent-child on supplier1 and delete parent on supplier2")
        cont_num = 0
        cont_p_m1 = _create_container(M1,
                                      cont_list[cont_num].dn,
                                      'p0',
                                      sleep=True)
        cont_p_m2 = _create_container(M2,
                                      cont_list[cont_num].dn,
                                      'p0',
                                      sleep=True)
        _create_container(M1, cont_p_m1.dn, 'c0', sleep=True)
        _delete_container(cont_p_m2)

        cont_num += 1
        cont_p_m2 = _create_container(M2,
                                      cont_list[cont_num].dn,
                                      'p0',
                                      sleep=True)
        cont_p_m1 = _create_container(M1, cont_list[cont_num].dn, 'p0')
        _create_container(M1, cont_p_m1.dn, 'c0', sleep=True)
        _delete_container(cont_p_m2, sleep=True)

        log.info(
            "Create parent on supplier1, delete it and parent-child on supplier2, delete them"
        )
        cont_num += 1
        cont_p_m1 = _create_container(M1, cont_list[cont_num].dn, 'p0')
        _delete_container(cont_p_m1, sleep=True)

        cont_p_m2 = _create_container(M2, cont_list[cont_num].dn, 'p0')
        cont_c_m2 = _create_container(M2, cont_p_m2.dn, 'c0')
        _delete_container(cont_c_m2)
        _delete_container(cont_p_m2)

        cont_num += 1
        cont_p_m2 = _create_container(M2, cont_list[cont_num].dn, 'p0')
        cont_c_m2 = _create_container(M2, cont_p_m2.dn, 'c0')
        _delete_container(cont_c_m2)
        _delete_container(cont_p_m2, sleep=True)

        cont_p_m1 = _create_container(M1, cont_list[cont_num].dn, 'p0')
        _delete_container(cont_p_m1)

        log.info(
            "Create parent on supplier1, delete it and parent-two children on supplier2"
        )
        cont_num += 1
        cont_p_m1 = _create_container(M1, cont_list[cont_num].dn, 'p0')
        _delete_container(cont_p_m1, sleep=True)

        cont_p_m2 = _create_container(M2, cont_list[cont_num].dn, 'p0')
        _create_container(M2, cont_p_m2.dn, 'c0')
        _create_container(M2, cont_p_m2.dn, 'c1')

        cont_num += 1
        cont_p_m2 = _create_container(M2, cont_list[cont_num].dn, 'p0')
        _create_container(M2, cont_p_m2.dn, 'c0')
        _create_container(M2, cont_p_m2.dn, 'c1', sleep=True)

        cont_p_m1 = _create_container(M1, cont_list[cont_num].dn, 'p0')
        _delete_container(cont_p_m1, sleep=True)

        log.info(
            "Create parent-two children on supplier1 and parent-child on supplier2, delete them"
        )
        cont_num += 1
        cont_p_m2 = _create_container(M2, cont_list[cont_num].dn, 'p0')
        cont_c_m2 = _create_container(M2, cont_p_m2.dn, 'c0')
        _delete_container(cont_c_m2)
        _delete_container(cont_p_m2, sleep=True)

        cont_p_m1 = _create_container(M1, cont_list[cont_num].dn, 'p0')
        _create_container(M1, cont_p_m1.dn, 'c0')
        _create_container(M1, cont_p_m1.dn, 'c1')

        cont_num += 1
        cont_p_m1 = _create_container(M1, cont_list[cont_num].dn, 'p0')
        _create_container(M1, cont_p_m1.dn, 'c0')
        _create_container(M1, cont_p_m1.dn, 'c1', sleep=True)

        cont_p_m2 = _create_container(M2, cont_list[cont_num].dn, 'p0')
        cont_c_m2 = _create_container(M2, cont_p_m2.dn, 'c0')
        _delete_container(cont_c_m2)
        _delete_container(cont_p_m2, sleep=True)

        log.info(
            "Create three subsets inside existing container entry, applying only part of changes on m2"
        )
        cont_num += 1
        cont_p_m1 = _create_container(M1, cont_list[cont_num].dn, 'p0')
        _create_container(M1, cont_p_m1.dn, 'c0')
        _create_container(M1, cont_p_m1.dn, 'c1', sleep=True)
        _create_container(M2, cont_list[cont_num].dn, 'p0', sleep=True)

        cont_num += 1
        cont_p_m1 = _create_container(M1, cont_list[cont_num].dn, 'p0')
        _create_container(M1, cont_p_m1.dn, 'c0')
        _create_container(M1, cont_p_m1.dn, 'c1', sleep=True)
        cont_p_m2 = _create_container(M2, cont_list[cont_num].dn, 'p0')
        _create_container(M2, cont_p_m2.dn, 'c0', sleep=True)

        cont_num += 1
        cont_p_m1 = _create_container(M1, cont_list[cont_num].dn, 'p0')
        _create_container(M1, cont_p_m1.dn, 'c0')
        _create_container(M1, cont_p_m1.dn, 'c1', sleep=True)
        cont_p_m2 = _create_container(M2, cont_list[cont_num].dn, 'p0')
        cont_c_m2 = _create_container(M2, cont_p_m2.dn, 'c0')
        _delete_container(cont_c_m2, sleep=True)

        log.info(
            "Create more combinations of the subset with parent-child on m1 and parent on m2"
        )
        cont_num += 1
        cont_p_m1 = _create_container(M1,
                                      cont_list[cont_num].dn,
                                      'p0',
                                      sleep=True)
        cont_p_m2 = _create_container(M2,
                                      cont_list[cont_num].dn,
                                      'p0',
                                      sleep=True)
        _delete_container(cont_p_m1, sleep=True)
        cont_c_m2 = _create_container(M2, cont_p_m2.dn, 'c0')
        _delete_container(cont_c_m2)
        _delete_container(cont_p_m2, sleep=True)

        cont_num += 1
        cont_p_m1 = _create_container(M1,
                                      cont_list[cont_num].dn,
                                      'p0',
                                      sleep=True)
        cont_p_m2 = _create_container(M2,
                                      cont_list[cont_num].dn,
                                      'p0',
                                      sleep=True)
        _delete_container(cont_p_m1, sleep=True)
        _create_container(M2, cont_p_m2.dn, 'c0', sleep=True)

        cont_num += 1
        cont_p_m1 = _create_container(M1,
                                      cont_list[cont_num].dn,
                                      'p0',
                                      sleep=True)
        cont_p_m2 = _create_container(M2,
                                      cont_list[cont_num].dn,
                                      'p0',
                                      sleep=True)
        cont_c_m1 = _create_container(M1, cont_p_m1.dn, 'c0', sleep=True)
        _create_container(M2, cont_p_m2.dn, 'c0', sleep=True)
        _delete_container(cont_c_m1, sleep=True)
        _create_container(M2, cont_p_m2.dn, 'c1', sleep=True)
        _delete_container(cont_p_m1, sleep=True)

        log.info(
            "Delete container on m1, modify user1 on m1, create parent on m2 and modify user2 on m2"
        )
        cont_num += 1
        _delete_container(cont_list[cont_num])
        _modify_user(test_users_m1, 4000, sleep=True)
        _create_container(M2, cont_list[cont_num].dn, 'p0')
        _modify_user(test_users_m2, 4001)

        topology_m2.resume_all_replicas()

        repl.test_replication_topology(topology_m2, timeout=60)

        conts_dns = {}
        for num in range(1, 3):
            inst = topology_m2.ms["supplier{}".format(num)]
            conts_dns[inst.serverid] = []
            conts = nsContainers(inst, base_m2.dn)
            for cont in conts.list():
                conts_p = nsContainers(inst, cont.dn)
                for cont_p in conts_p.list():
                    conts_c = nsContainers(inst, cont_p.dn)
                    conts_dns[inst.serverid].extend(
                        [cont_c.dn for cont_c in conts_c.list()])
                conts_dns[inst.serverid].extend(
                    [cont_p.dn for cont_p in conts_p.list()])
            conts_dns[inst.serverid].extend([cont.dn for cont in conts.list()])

        assert set(conts_dns[M1.serverid]) == set(conts_dns[M2.serverid])

        user_dns_m1 = [user.dn for user in test_users_m1.list()]
        user_dns_m2 = [user.dn for user in test_users_m2.list()]
        assert set(user_dns_m1) == set(user_dns_m2)
    def test_complex_add_modify_modrdn_delete(self, topology_m2, base_m2):
        """Check that conflict properly resolved for complex operations
        which involve add, modify, modrdn and delete

        :id: 77f09b18-03d1-45da-940b-1ad2c2908eb1
        :customerscenario: True
        :setup: Two supplier replication, test container for entries, enable plugin logging,
                audit log, error log for replica and access log for internal
        :steps:
            1. Add ten users to m1 and wait for replication to happen
            2. Pause replication
            3. Test add-del on m1 and add on m2
            4. Test add-mod on m1 and add on m2
            5. Test add-modrdn on m1 and add on m2
            6. Test multiple add, modrdn
            7. Test Add-del on both suppliers
            8. Test modrdn-modrdn
            9. Test modrdn-del
            10. Resume replication
            11. Check that the entries on both suppliers are the same and replication is working
        :expectedresults:
            1. It should pass
            2. It should pass
            3. It should pass
            4. It should pass
            5. It should pass
            6. It should pass
            7. It should pass
            8. It should pass
            9. It should pass
            10. It should pass
            11. It should pass
        """

        M1 = topology_m2.ms["supplier1"]
        M2 = topology_m2.ms["supplier2"]

        test_users_m1 = UserAccounts(M1, base_m2.dn, rdn=None)
        test_users_m2 = UserAccounts(M2, base_m2.dn, rdn=None)
        repl = ReplicationManager(SUFFIX)

        for user_num in range(1100, 1110):
            _create_user(test_users_m1, user_num)

        repl.test_replication(M1, M2)
        topology_m2.pause_all_replicas()

        log.info("Test add-del on M1 and add on M2")
        user_num += 1
        _create_user(test_users_m1, user_num)
        _delete_user(test_users_m1, user_num, sleep=True)
        _create_user(test_users_m2, user_num, sleep=True)

        user_num += 1
        _create_user(test_users_m1, user_num, sleep=True)
        _create_user(test_users_m2, user_num, sleep=True)
        _delete_user(test_users_m1, user_num, sleep=True)

        user_num += 1
        _create_user(test_users_m2, user_num, sleep=True)
        _create_user(test_users_m1, user_num)
        _delete_user(test_users_m1, user_num)

        log.info("Test add-mod on M1 and add on M2")
        user_num += 1
        _create_user(test_users_m1, user_num)
        _modify_user(test_users_m1, user_num, sleep=True)
        _create_user(test_users_m2, user_num, sleep=True)

        user_num += 1
        _create_user(test_users_m1, user_num, sleep=True)
        _create_user(test_users_m2, user_num, sleep=True)
        _modify_user(test_users_m1, user_num, sleep=True)

        user_num += 1
        _create_user(test_users_m2, user_num, sleep=True)
        _create_user(test_users_m1, user_num)
        _modify_user(test_users_m1, user_num)

        log.info("Test add-modrdn on M1 and add on M2")
        user_num += 1
        _create_user(test_users_m1, user_num)
        _rename_user(test_users_m1, user_num, user_num + 20, sleep=True)
        _create_user(test_users_m2, user_num, sleep=True)

        user_num += 1
        _create_user(test_users_m1, user_num, sleep=True)
        _create_user(test_users_m2, user_num, sleep=True)
        _rename_user(test_users_m1, user_num, user_num + 20, sleep=True)

        user_num += 1
        _create_user(test_users_m2, user_num, sleep=True)
        _create_user(test_users_m1, user_num)
        _rename_user(test_users_m1, user_num, user_num + 20)

        log.info("Test multiple add, modrdn")
        user_num += 1
        _create_user(test_users_m1, user_num, sleep=True)
        _create_user(test_users_m2, user_num, sleep=True)
        _rename_user(test_users_m1, user_num, user_num + 20)
        _create_user(test_users_m1, user_num, sleep=True)
        _modify_user(test_users_m2, user_num, sleep=True)

        log.info("Add - del on both suppliers")
        user_num += 1
        _create_user(test_users_m1, user_num)
        _delete_user(test_users_m1, user_num, sleep=True)
        _create_user(test_users_m2, user_num)
        _delete_user(test_users_m2, user_num, sleep=True)

        log.info("Test modrdn - modrdn")
        user_num += 1
        _rename_user(test_users_m1, 1109, 1129, sleep=True)
        _rename_user(test_users_m2, 1109, 1129, sleep=True)

        log.info("Test modrdn - del")
        user_num += 1
        _rename_user(test_users_m1, 1100, 1120, sleep=True)
        _delete_user(test_users_m2, 1100)

        user_num += 1
        _delete_user(test_users_m2, 1101, sleep=True)
        _rename_user(test_users_m1, 1101, 1121)

        topology_m2.resume_all_replicas()

        repl.test_replication_topology(topology_m2)
        time.sleep(30)

        user_dns_m1 = [user.dn for user in test_users_m1.list()]
        user_dns_m2 = [user.dn for user in test_users_m2.list()]
        assert set(user_dns_m1) == set(user_dns_m2)
    def test_memberof_groups(self, topology_m2, base_m2):
        """Check that conflict properly resolved for operations
        with memberOf and groups

        :id: 77f09b18-03d1-45da-940b-1ad2c2908eb3
        :setup: Two supplier replication, test container for entries, enable plugin logging,
                audit log, error log for replica and access log for internal
        :steps:
            1. Enable memberOf plugin
            2. Add 30 users to m1 and wait for replication to happen
            3. Pause replication
            4. Create a group on m1 and m2
            5. Create a group on m1 and m2, delete from m1
            6. Create a group on m1, delete from m1, and create on m2,
            7. Create a group on m2 and m1, delete from m1
            8. Create two different groups on m2
            9. Resume replication
            10. Check that the entries on both suppliers are the same and replication is working
        :expectedresults:
            1. It should pass
            2. It should pass
            3. It should pass
            4. It should pass
            5. It should pass
            6. It should pass
            7. It should pass
            8. It should pass
            9. It should pass
            10. It should pass
        """

        pytest.xfail("Issue 49591 - work in progress")

        M1 = topology_m2.ms["supplier1"]
        M2 = topology_m2.ms["supplier2"]
        test_users_m1 = UserAccounts(M1, base_m2.dn, rdn=None)
        test_groups_m1 = Groups(M1, base_m2.dn, rdn=None)
        test_groups_m2 = Groups(M2, base_m2.dn, rdn=None)

        repl = ReplicationManager(SUFFIX)

        for inst in topology_m2.ms.values():
            memberof = MemberOfPlugin(inst)
            memberof.enable()
            agmt = Agreements(inst).list()[0]
            agmt.replace_many(('nsDS5ReplicatedAttributeListTotal',
                               '(objectclass=*) $ EXCLUDE '),
                              ('nsDS5ReplicatedAttributeList',
                               '(objectclass=*) $ EXCLUDE memberOf'))
            inst.restart()
        user_dns = []
        for user_num in range(10):
            user_trio = []
            for num in range(0, 30, 10):
                user = _create_user(test_users_m1, 1200 + user_num + num)
                user_trio.append(user.dn)
            user_dns.append(user_trio)

        repl.test_replication(M1, M2)
        topology_m2.pause_all_replicas()

        log.info("Check a simple conflict")
        group_num = 0
        _create_group(test_groups_m1,
                      group_num,
                      user_dns[group_num],
                      sleep=True)
        _create_group(test_groups_m2,
                      group_num,
                      user_dns[group_num],
                      sleep=True)

        log.info("Check a add - del")
        group_num += 1
        _create_group(test_groups_m1,
                      group_num,
                      user_dns[group_num],
                      sleep=True)
        _create_group(test_groups_m2,
                      group_num,
                      user_dns[group_num],
                      sleep=True)
        _delete_group(test_groups_m1, group_num)

        group_num += 1
        _create_group(test_groups_m1, group_num, user_dns[group_num])
        _delete_group(test_groups_m1, group_num, sleep=True)
        _create_group(test_groups_m2, group_num, user_dns[group_num])

        group_num += 1
        _create_group(test_groups_m2,
                      group_num,
                      user_dns[group_num],
                      sleep=True)
        _create_group(test_groups_m1, group_num, user_dns[group_num])
        _delete_group(test_groups_m1, group_num, sleep=True)

        group_num += 1
        _create_group(test_groups_m2, group_num, user_dns[group_num])
        group_num += 1
        _create_group(test_groups_m2, group_num, user_dns[group_num])

        topology_m2.resume_all_replicas()

        repl.test_replication_topology(topology_m2)

        group_dns_m1 = [group.dn for group in test_groups_m1.list()]
        group_dns_m2 = [group.dn for group in test_groups_m2.list()]
        assert set(group_dns_m1) == set(group_dns_m2)
    def test_nested_entries(self, topology_m3, base_m3):
        """Check that conflict properly resolved for operations
        with nested entries with children

        :id: 77f09b18-03d1-45da-940b-1ad2c2908eb6
        :setup: Three supplier replication, test container for entries, enable plugin logging,
                audit log, error log for replica and access log for internal
        :steps:
            1. Add 15 containers to m1 and wait for replication to happen
            2. Pause replication
            3. Create two child entries under each of two entries
            4. Create three child entries under each of three entries
            5. Create two parents on m1 and m2, then on m1 - create a child and delete one parent,
               on m2 - delete one parent and create a child
            6. Test a few more parent-child combinations with three instances
            7. Resume replication
            8. Check that the entries on both suppliers are the same and replication is working
        :expectedresults:
            1. It should pass
            2. It should pass
            3. It should pass
            4. It should pass
            5. It should pass
            6. It should pass
            7. It should pass
            8. It should pass
        """

        pytest.xfail("Issue 49591 - work in progress")

        M1 = topology_m3.ms["supplier1"]
        M2 = topology_m3.ms["supplier2"]
        M3 = topology_m3.ms["supplier3"]
        repl = ReplicationManager(SUFFIX)

        cont_list = []
        for num in range(11):
            cont = _create_container(M1, base_m3.dn, 'sub{}'.format(num))
            cont_list.append(cont)

        repl.test_replication(M1, M2)
        repl.test_replication(M1, M3)

        topology_m3.pause_all_replicas()

        log.info("Create two child entries under each of two entries")
        cont_num = -1
        for num in range(2):
            cont_num += 1
            _create_container(M1, cont_list[cont_num].dn, 'p0', sleep=True)
            _create_container(M2, cont_list[cont_num].dn, 'p1', sleep=True)

        log.info("Create three child entries under each of three entries")
        for num in range(3):
            cont_num += 1
            _create_container(M1, cont_list[cont_num].dn, 'p0', sleep=True)
            _create_container(M2, cont_list[cont_num].dn, 'p1', sleep=True)
            _create_container(M3, cont_list[cont_num].dn, 'p2', sleep=True)

        log.info(
            "Create two parents on m1 and m2, then on m1 - create a child and delete one parent,"
            "on m2 - delete one parent and create a child")
        for inst1, inst2 in ((M1, M2), (M2, M1)):
            cont_num += 1
            cont_p_m1_1 = _create_container(inst1, cont_list[cont_num].dn,
                                            'p0')
            cont_p_m1_2 = _create_container(inst1,
                                            cont_list[cont_num].dn,
                                            'p1',
                                            sleep=True)
            cont_p_m2_1 = _create_container(inst2, cont_list[cont_num].dn,
                                            'p0')
            cont_p_m2_2 = _create_container(inst2,
                                            cont_list[cont_num].dn,
                                            'p1',
                                            sleep=True)
            _create_container(inst1, cont_p_m1_1.dn, 'c0', sleep=True)
            _delete_container(cont_p_m2_1, sleep=True)
            _delete_container(cont_p_m1_2, sleep=True)
            _create_container(inst2, cont_p_m2_2.dn, 'c0', sleep=True)

        log.info(
            "Test a few more parent-child combinations on three instances")
        for inst1, inst2, inst3 in ((M1, M2, M3), (M2, M1, M3), (M3, M1, M2)):
            cont_num += 1
            cont_p_m1 = _create_container(inst1, cont_list[cont_num].dn, 'p0')
            _delete_container(cont_p_m1, sleep=True)

            cont_p_m2 = _create_container(inst2, cont_list[cont_num].dn, 'p0')
            cont_c_m2 = _create_container(inst2, cont_p_m2.dn, 'c0')
            _delete_container(cont_c_m2)
            _delete_container(cont_p_m2, sleep=True)

            cont_p_m3 = _create_container(inst3, cont_list[cont_num].dn, 'p0')
            _create_container(inst3, cont_p_m3.dn, 'c0')
            _create_container(inst3, cont_p_m3.dn, 'c1', sleep=True)

        topology_m3.resume_all_replicas()

        repl.test_replication_topology(topology_m3)

        conts_dns = {}
        for num in range(1, 4):
            inst = topology_m3.ms["supplier{}".format(num)]
            conts_dns[inst.serverid] = []
            conts = nsContainers(inst, base_m3.dn)
            for cont in conts.list():
                conts_p = nsContainers(inst, cont.dn)
                for cont_p in conts_p.list():
                    conts_c = nsContainers(inst, cont_p.dn)
                    conts_dns[inst.serverid].extend(
                        [cont_c.dn for cont_c in conts_c.list()])
                conts_dns[inst.serverid].extend(
                    [cont_p.dn for cont_p in conts_p.list()])
            conts_dns[inst.serverid].extend([cont.dn for cont in conts.list()])

        for conts1, conts2 in permutations(conts_dns.values(), 2):
            assert set(conts1) == set(conts2)
    def test_add_modrdn(self, topology_m2, base_m2):
        """Check that conflict properly resolved for create - modrdn operations

        :id: 77f09b18-03d1-45da-940b-1ad2c2908ebb
        :setup: Two supplier replication, test container for entries, enable plugin logging,
                audit log, error log for replica and access log for internal
        :steps:
            1. Add five users to m1 and wait for replication to happen
            2. Pause replication
            3. Create an entry on m1 and m2
            4. Create an entry on m1 and rename on m2
            5. Rename an entry on m1 and create on m2
            6. Rename an entry on m1 and rename on m2
            7. Rename an entry on m1 and rename on m2. Use different entries
               but rename them to the same entry
            8. Resume replication
            9. Check that the entries on both suppliers are the same and replication is working
        :expectedresults:
            1. It should pass
            2. It should pass
            3. It should pass
            4. It should pass
            5. It should pass
            6. It should pass
            7. It should pass
            8. It should pass
        """

        M1 = topology_m2.ms["supplier1"]
        M2 = topology_m2.ms["supplier2"]
        test_users_m1 = UserAccounts(M1, base_m2.dn, rdn=None)
        test_users_m2 = UserAccounts(M2, base_m2.dn, rdn=None)
        repl = ReplicationManager(SUFFIX)

        for user_num in range(1000, 1005):
            _create_user(test_users_m1, user_num)

        repl.test_replication(M1, M2)
        topology_m2.pause_all_replicas()

        log.info("Test create - modrdn")
        user_num += 1
        _create_user(test_users_m1, user_num, sleep=True)
        _create_user(test_users_m2, user_num, sleep=True)

        user_num += 1
        _create_user(test_users_m1, user_num, sleep=True)
        _rename_user(test_users_m2, 1000, user_num, sleep=True)

        user_num += 1
        _rename_user(test_users_m1, 1001, user_num, sleep=True)
        _create_user(test_users_m2, user_num, sleep=True)

        user_num += 1
        _rename_user(test_users_m1, 1002, user_num, sleep=True)
        _rename_user(test_users_m2, 1002, user_num, sleep=True)

        user_num += 1
        _rename_user(test_users_m1, 1003, user_num, sleep=True)
        _rename_user(test_users_m2, 1004, user_num)

        topology_m2.resume_all_replicas()

        repl.test_replication_topology(topology_m2)

        user_dns_m1 = [user.dn for user in test_users_m1.list()]
        user_dns_m2 = [user.dn for user in test_users_m2.list()]
        assert set(user_dns_m1) == set(user_dns_m2)
Esempio n. 10
0
def test_mail_attr_repl(topo_r):
    """Check that no crash happens during mail attribute replication

    :id: 959edc84-05be-4bf9-a541-53afae482052
    :customerscenario: True
    :setup: Replication setup with supplier and consumer instances,
            test user on supplier
    :steps:
        1. Check that user was replicated to consumer
        2. Back up mail database file
        3. Remove mail attribute from the user entry
        4. Restore mail database
        5. Search for the entry with a substring 'mail=user*'
        6. Search for the entry once again to make sure that server is alive
    :expectedresults:
        1. The user should be replicated to consumer
        2. Operation should be successful
        3. The mail attribute should be removed
        4. Operation should be successful
        5. Search should be successful
        6. No crash should happen
    """

    supplier = topo_r.ms["supplier1"]
    consumer = topo_r.cs["consumer1"]
    repl = ReplicationManager(DEFAULT_SUFFIX)

    m_users = UserAccounts(topo_r.ms["supplier1"], DEFAULT_SUFFIX)
    m_user = m_users.ensure_state(properties=TEST_USER_PROPERTIES)
    m_user.ensure_present('mail', '*****@*****.**')

    log.info("Check that replication is working")
    repl.wait_for_replication(supplier, consumer)
    c_users = UserAccounts(topo_r.cs["consumer1"], DEFAULT_SUFFIX)
    c_user = c_users.get('testuser')

    c_bes = Backends(consumer)
    c_be = c_bes.get(DEFAULT_SUFFIX)

    db_dir = c_be.get_attr_val_utf8('nsslapd-directory')

    mail_db = list(filter(lambda fl: fl.startswith("mail"),
                          os.listdir(db_dir)))
    assert mail_db, "mail.* wasn't found in {}"
    mail_db_path = os.path.join(db_dir, mail_db[0])
    backup_path = os.path.join(DEFAULT_BACKUPDIR, mail_db[0])

    consumer.stop()
    log.info("Back up {} to {}".format(mail_db_path, backup_path))
    shutil.copyfile(mail_db_path, backup_path)
    consumer.start()

    log.info("Remove 'mail' attr from supplier")
    m_user.remove_all('mail')

    log.info("Wait for the replication to happen")
    repl.wait_for_replication(supplier, consumer)

    consumer.stop()
    log.info("Restore {} to {}".format(backup_path, mail_db_path))
    shutil.copyfile(backup_path, mail_db_path)
    consumer.start()

    log.info("Make a search for mail attribute in attempt to crash server")
    c_user.get_attr_val("mail")

    log.info("Make sure that server hasn't crashed")
    repl.test_replication(supplier, consumer)
Esempio n. 11
0
def test_basic_with_hub(topo):
    """Check that basic operations work in cascading replication, this includes
    testing plugins that perform internal operatons, and replicated password
    policy state attributes.

    :id: 4ac85552-45bc-477b-89a4-226dfff8c6cc
    :setup: 1 supplier, 1 hub, 1 consumer
    :steps:
        1. Enable memberOf plugin and set password account lockout settings
        2. Restart the instance
        3. Add a user
        4. Add a group
        5. Test that the replication works
        6. Add the user as a member to the group
        7. Test that the replication works
        8. Issue bad binds to update passwordRetryCount
        9. Test that replicaton works
        10. Check that passwordRetyCount was replicated
    :expectedresults:
        1. Should be a success
        2. Should be a success
        3. Should be a success
        4. Should be a success
        5. Should be a success
        6. Should be a success
        7. Should be a success
        8. Should be a success
        9. Should be a success
        10. Should be a success
    """

    repl_manager = ReplicationManager(DEFAULT_SUFFIX)
    supplier = topo.ms["supplier1"]
    consumer = topo.cs["consumer1"]
    hub = topo.hs["hub1"]

    for inst in topo:
        config_memberof(inst)
        inst.config.set('passwordlockout', 'on')
        inst.config.set('passwordlockoutduration', '60')
        inst.config.set('passwordmaxfailure', '3')
        inst.config.set('passwordIsGlobalPolicy', 'on')

    # Create user
    user1 = UserAccount(supplier, BIND_DN)
    user_props = TEST_USER_PROPERTIES.copy()
    user_props.update({'sn': BIND_RDN,
                       'cn': BIND_RDN,
                       'uid': BIND_RDN,
                       'inetUserStatus': '1',
                       'objectclass': 'extensibleObject',
                       'userpassword': PASSWORD})
    user1.create(properties=user_props, basedn=SUFFIX)

    # Create group
    groups = Groups(supplier, DEFAULT_SUFFIX)
    group = groups.create(properties={'cn': 'group'})

    # Test replication
    repl_manager.test_replication(supplier, consumer)

    # Trigger memberOf plugin by adding user to group
    group.replace('member', user1.dn)

    # Test replication once more
    repl_manager.test_replication(supplier, consumer)

    # Issue bad password to update passwordRetryCount
    try:
        supplier.simple_bind_s(user1.dn, "badpassword")
    except:
        pass

    # Test replication one last time
    supplier.simple_bind_s(DN_DM, PASSWORD)
    repl_manager.test_replication(supplier, consumer)

    # Finally check if passwordRetyCount was replicated to the hub and consumer
    user1 = UserAccount(hub, BIND_DN)
    count = user1.get_attr_val_int('passwordRetryCount')
    if count is None:
        log.fatal('PasswordRetyCount was not replicated to hub')
        assert False
    if int(count) != 1:
        log.fatal('PasswordRetyCount has unexpected value: {}'.format(count))
        assert False

    user1 = UserAccount(consumer, BIND_DN)
    count = user1.get_attr_val_int('passwordRetryCount')
    if count is None:
        log.fatal('PasswordRetyCount was not replicated to consumer')
        assert False
    if int(count) != 1:
        log.fatal('PasswordRetyCount has unexpected value: {}'.format(count))
        assert False
Esempio n. 12
0
def test_openldap_no_nss_crypto(topology_m2):
    """Check that we allow usage of OpenLDAP libraries
    that don't use NSS for crypto

    :id: 0a622f3d-8ba5-4df2-a1de-1fb2237da40a
    :setup: Replication with two masters:
        master_1 ----- startTLS -----> master_2;
        master_1 <-- TLS_clientAuth -- master_2;
        nsslapd-extract-pemfiles set to 'on' on both masters
        without specifying cert names
    :steps:
        1. Add 5 users to master 1 and 2
        2. Check that the users were successfully replicated
        3. Relocate PEM files on master 1
        4. Check PEM files in master 1 config directory
        5. Add 5 users more to master 1 and 2
        6. Check that the users were successfully replicated
        7. Export userRoot on master 1
    :expectedresults:
        1. Users should be successfully added
        2. Users should be successfully replicated
        3. Operation should be successful
        4. PEM files should be found
        5. Users should be successfully added
        6. Users should be successfully replicated
        7. Operation should be successful
    """

    log.info(
        "Ticket 47536 - Allow usage of OpenLDAP libraries that don't use NSS for crypto"
    )

    m1 = topology_m2.ms["master1"]
    m2 = topology_m2.ms["master2"]
    [i.enable_tls() for i in topology_m2]
    repl = ReplicationManager(DEFAULT_SUFFIX)
    repl.test_replication(m1, m2)

    add_entry(m1, 'master1', 'uid=m1user', 0, 5)
    add_entry(m2, 'master2', 'uid=m2user', 0, 5)
    repl.wait_for_replication(m1, m2)
    repl.wait_for_replication(m2, m1)

    log.info('##### Searching for entries on master1...')
    entries = m1.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, '(uid=*)')
    assert 10 == len(entries)

    log.info('##### Searching for entries on master2...')
    entries = m2.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, '(uid=*)')
    assert 10 == len(entries)

    relocate_pem_files(topology_m2)

    add_entry(m1, 'master1', 'uid=m1user', 10, 5)
    add_entry(m2, 'master2', 'uid=m2user', 10, 5)

    repl.wait_for_replication(m1, m2)
    repl.wait_for_replication(m2, m1)

    log.info('##### Searching for entries on master1...')
    entries = m1.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, '(uid=*)')
    assert 20 == len(entries)

    log.info('##### Searching for entries on master2...')
    entries = m2.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, '(uid=*)')
    assert 20 == len(entries)

    output_file = os.path.join(m1.get_ldif_dir(), "master1.ldif")
    m1.tasks.exportLDIF(benamebase='userRoot',
                        output_file=output_file,
                        args={'wait': True})

    log.info("Ticket 47536 - PASSED")
Esempio n. 13
0
def test_csngen_state_not_updated_if_different_uuid(topo_m2c2):
    """Check that csngen remote offset is not updated if RUV generation uuid are different

    :id: 77694b8e-22ae-11eb-89b2-482ae39447e5
    :setup: Two masters + two consumers replication setup
    :steps:
        1. Disable m1<->m2 agreement to avoid propagate timeSkew
        2. Generate ldif without replication data
        3. Increase time skew on master2
        4. Init both masters from that ldif
             (to clear the ruvs and generates different generation uuid)
        5. Perform on line init from master1 to consumer1 and master2 to consumer2
        6. Perform update on both masters
        7: Check that c1 has no time skew
        8: Check that c2 has time skew
        9. Init master2 from master1
        10. Perform update on master2
        11. Check that c1 has time skew
    :expectedresults:
        1. No error
        2. No error while generating ldif
        3. No error
        4. No error while importing the ldif file
        5. No error and Initialization done.
        6. No error
        7. c1 time skew should be lesser than threshold
        8. c2 time skew should be higher than threshold
        9. No error and Initialization done.
        10. No error
        11. c1 time skew should be higher than threshold

    """

    # Variables initialization
    repl = ReplicationManager(DEFAULT_SUFFIX)

    m1 = topo_m2c2.ms["master1"]
    m2 = topo_m2c2.ms["master2"]
    c1 = topo_m2c2.cs["consumer1"]
    c2 = topo_m2c2.cs["consumer2"]

    replica_m1 = Replicas(m1).get(DEFAULT_SUFFIX)
    replica_m2 = Replicas(m2).get(DEFAULT_SUFFIX)
    replica_c1 = Replicas(c1).get(DEFAULT_SUFFIX)
    replica_c2 = Replicas(c2).get(DEFAULT_SUFFIX)

    replicid_m2 = replica_m2.get_rid()

    agmts_m1 = Agreements(m1, replica_m1.dn)
    agmts_m2 = Agreements(m2, replica_m2.dn)

    m1_m2 = get_agreement(agmts_m1, m2)
    m1_c1 = get_agreement(agmts_m1, c1)
    m1_c2 = get_agreement(agmts_m1, c2)
    m2_m1 = get_agreement(agmts_m2, m1)
    m2_c1 = get_agreement(agmts_m2, c1)
    m2_c2 = get_agreement(agmts_m2, c2)

    # Step 1: Disable m1<->m2 agreement to avoid propagate timeSkew
    m1_m2.pause()
    m2_m1.pause()

    # Step 2: Generate ldif without replication data
    m1.stop()
    m2.stop()
    ldif_file = '%s/norepl.ldif' % m1.get_ldif_dir()
    m1.db2ldif(bename=DEFAULT_BENAME,
               suffixes=[DEFAULT_SUFFIX],
               excludeSuffixes=None,
               repl_data=False,
               outputfile=ldif_file,
               encrypt=False)
    # Remove replication metadata that are still in the ldif
    # _remove_replication_data(ldif_file)

    # Step 3: Increase time skew on master2
    timeSkew = 6 * 3600
    # We can modify master2 time skew
    # But the time skew on the consumer may be smaller
    # depending on when the cnsgen generation time is updated
    # and when first csn get replicated.
    # Since we use timeSkew has threshold value to detect
    # whether there are time skew or not,
    # lets add a significative margin (longer than the test duration)
    # to avoid any risk of erroneous failure
    timeSkewMargin = 300
    DSEldif(m2)._increaseTimeSkew(DEFAULT_SUFFIX, timeSkew + timeSkewMargin)

    # Step 4: Init both masters from that ldif
    m1.ldif2db(DEFAULT_BENAME, None, None, None, ldif_file)
    m2.ldif2db(DEFAULT_BENAME, None, None, None, ldif_file)
    m1.start()
    m2.start()

    # Step 5: Perform on line init from master1 to consumer1
    #          and from master2 to consumer2
    m1_c1.begin_reinit()
    m2_c2.begin_reinit()
    (done, error) = m1_c1.wait_reinit()
    assert done is True
    assert error is False
    (done, error) = m2_c2.wait_reinit()
    assert done is True
    assert error is False

    # Step 6: Perform update on both masters
    repl.test_replication(m1, c1)
    repl.test_replication(m2, c2)

    # Step 7: Check that c1 has no time skew
    # Stop server to insure that dse.ldif is uptodate
    c1.stop()
    c1_nsState = DSEldif(c1).readNsState(DEFAULT_SUFFIX)[0]
    c1_timeSkew = int(c1_nsState['time_skew'])
    log.debug(f"c1 time skew: {c1_timeSkew}")
    if (c1_timeSkew >= timeSkew):
        log.error(
            f"c1 csngen state has unexpectedly been synchronized with m2: time skew {c1_timeSkew}"
        )
        assert False
    c1.start()

    # Step 8: Check that c2 has time skew
    # Stop server to insure that dse.ldif is uptodate
    c2.stop()
    c2_nsState = DSEldif(c2).readNsState(DEFAULT_SUFFIX)[0]
    c2_timeSkew = int(c2_nsState['time_skew'])
    log.debug(f"c2 time skew: {c2_timeSkew}")
    if (c2_timeSkew < timeSkew):
        log.error(
            f"c2 csngen state has not been synchronized with m2: time skew {c2_timeSkew}"
        )
        assert False
    c2.start()

    # Step 9: Perform on line init from master1 to master2
    m1_c1.pause()
    m1_m2.resume()
    m1_m2.begin_reinit()
    (done, error) = m1_m2.wait_reinit()
    assert done is True
    assert error is False

    # Step 10: Perform update on master2
    repl.test_replication(m2, c1)

    # Step 11: Check that c1 has time skew
    # Stop server to insure that dse.ldif is uptodate
    c1.stop()
    c1_nsState = DSEldif(c1).readNsState(DEFAULT_SUFFIX)[0]
    c1_timeSkew = int(c1_nsState['time_skew'])
    log.debug(f"c1 time skew: {c1_timeSkew}")
    if (c1_timeSkew < timeSkew):
        log.error(
            f"c1 csngen state has not been synchronized with m2: time skew {c1_timeSkew}"
        )
        assert False
Esempio n. 14
0
def test_ruv_url_not_added_if_different_uuid(topo_m2c2):
    """Check that RUV url is not updated if RUV generation uuid are different

    :id: 7cc30a4e-0ffd-4758-8f00-e500279af344
    :setup: Two masters + two consumers replication setup
    :steps:
        1. Generate ldif without replication data
        2. Init both masters from that ldif
             (to clear the ruvs and generates different generation uuid)
        3. Perform on line init from master1 to consumer1
               and from master2 to consumer2
        4. Perform update on both masters
        5. Check that c1 RUV does not contains URL towards m2
        6. Check that c2 RUV does contains URL towards m2
        7. Perform on line init from master1 to master2
        8. Perform update on master2
        9. Check that c1 RUV does contains URL towards m2
    :expectedresults:
        1. No error while generating ldif
        2. No error while importing the ldif file
        3. No error and Initialization done.
        4. No error
        5. master2 replicaid should not be in the consumer1 RUV
        6. master2 replicaid should be in the consumer2 RUV
        7. No error and Initialization done.
        8. No error
        9. master2 replicaid should be in the consumer1 RUV

    """

    # Variables initialization
    repl = ReplicationManager(DEFAULT_SUFFIX)

    m1 = topo_m2c2.ms["master1"]
    m2 = topo_m2c2.ms["master2"]
    c1 = topo_m2c2.cs["consumer1"]
    c2 = topo_m2c2.cs["consumer2"]

    replica_m1 = Replicas(m1).get(DEFAULT_SUFFIX)
    replica_m2 = Replicas(m2).get(DEFAULT_SUFFIX)
    replica_c1 = Replicas(c1).get(DEFAULT_SUFFIX)
    replica_c2 = Replicas(c2).get(DEFAULT_SUFFIX)

    replicid_m2 = replica_m2.get_rid()

    agmts_m1 = Agreements(m1, replica_m1.dn)
    agmts_m2 = Agreements(m2, replica_m2.dn)

    m1_m2 = get_agreement(agmts_m1, m2)
    m1_c1 = get_agreement(agmts_m1, c1)
    m1_c2 = get_agreement(agmts_m1, c2)
    m2_m1 = get_agreement(agmts_m2, m1)
    m2_c1 = get_agreement(agmts_m2, c1)
    m2_c2 = get_agreement(agmts_m2, c2)

    # Step 1: Generate ldif without replication data
    m1.stop()
    m2.stop()
    ldif_file = '%s/norepl.ldif' % m1.get_ldif_dir()
    m1.db2ldif(bename=DEFAULT_BENAME,
               suffixes=[DEFAULT_SUFFIX],
               excludeSuffixes=None,
               repl_data=False,
               outputfile=ldif_file,
               encrypt=False)
    # Remove replication metadata that are still in the ldif
    # _remove_replication_data(ldif_file)

    # Step 2: Init both masters from that ldif
    m1.ldif2db(DEFAULT_BENAME, None, None, None, ldif_file)
    m2.ldif2db(DEFAULT_BENAME, None, None, None, ldif_file)
    m1.start()
    m2.start()

    # Step 3: Perform on line init from master1 to consumer1
    #          and from master2 to consumer2
    m1_c1.begin_reinit()
    m2_c2.begin_reinit()
    (done, error) = m1_c1.wait_reinit()
    assert done is True
    assert error is False
    (done, error) = m2_c2.wait_reinit()
    assert done is True
    assert error is False

    # Step 4: Perform update on both masters
    repl.test_replication(m1, c1)
    repl.test_replication(m2, c2)

    # Step 5: Check that c1 RUV does not contains URL towards m2
    ruv = replica_c1.get_ruv()
    log.debug(f"c1 RUV: {ruv}")
    url = ruv._rid_url.get(replica_m2.get_rid())
    if (url == None):
        log.debug(f"No URL for RID {replica_m2.get_rid()} in RUV")
    else:
        log.debug(f"URL for RID {replica_m2.get_rid()} in RUV is {url}")
        log.error(f"URL for RID {replica_m2.get_rid()} found in RUV")
        #Note: this assertion fails if issue 2054 is not fixed.
        assert False

    # Step 6: Check that c2 RUV does contains URL towards m2
    ruv = replica_c2.get_ruv()
    log.debug(f"c1 RUV: {ruv} {ruv._rids} ")
    url = ruv._rid_url.get(replica_m2.get_rid())
    if (url == None):
        log.error(f"No URL for RID {replica_m2.get_rid()} in RUV")
        assert False
    else:
        log.debug(f"URL for RID {replica_m2.get_rid()} in RUV is {url}")

    # Step 7: Perform on line init from master1 to master2
    m1_m2.begin_reinit()
    (done, error) = m1_m2.wait_reinit()
    assert done is True
    assert error is False

    # Step 8: Perform update on master2
    repl.test_replication(m2, c1)

    # Step 9: Check that c1 RUV does contains URL towards m2
    ruv = replica_c1.get_ruv()
    log.debug(f"c1 RUV: {ruv} {ruv._rids} ")
    url = ruv._rid_url.get(replica_m2.get_rid())
    if (url == None):
        log.error(f"No URL for RID {replica_m2.get_rid()} in RUV")
        assert False
    else:
        log.debug(f"URL for RID {replica_m2.get_rid()} in RUV is {url}")