示例#1
0
def test_create(topology_st):
    """Test basic list method functionality

    :id: df55a60b-f4dd-4f18-975d-4b223e63091f
    :setup: Standalone instance
    :steps:
        2. Create a backend specifying properties with a name and a suffix
        2. Create a backend specifying no properties
        3. Create a backend specifying suffix that already exist
        4. Create a backend specifying existing backend name but new suffix
        5. Create a backend specifying no backend name
        6. Create a backend specifying no backend suffix
        7. Clean up the created backend
    :expectedresults:
        1. Backend should be created
        2. Unwilling to perform error should be raised
        3. Unwilling to perform error should be raised
        4. Unwilling to perform error should be raised
        5. Unwilling to perform error should be raised
        6. Unwilling to perform error should be raised
        7. Operation should be successful
    """

    backends = Backends(topology_st.standalone)

    log.info("Create a backend")
    backend = backends.create(properties={'cn': BACKEND_NAME_1,
                                          'nsslapd-suffix': NEW_SUFFIX_1_RDN})

    log.info("Check behaviour with missing properties")
    with pytest.raises(ldap.UNWILLING_TO_PERFORM):
        backends.create()

    log.info("Check behaviour with already existing backend for that suffix")
    with pytest.raises(ldap.UNWILLING_TO_PERFORM):
        backends.create(properties={'cn': BACKEND_NAME_2,
                                    'nsslapd-suffix': NEW_SUFFIX_1_RDN})

    log.info("Check behaviour with already existing backend nasme, "
             "but new suffix")
    with pytest.raises(ldap.UNWILLING_TO_PERFORM):
        backends.create(properties={'cn': BACKEND_NAME_1,
                                    'nsslapd-suffix': NEW_SUFFIX_2_RDN})

    log.info("Create a backend without BACKEND_NAME")
    with pytest.raises(ldap.UNWILLING_TO_PERFORM):
        backends.create(properties={'nsslapd-suffix': NEW_SUFFIX_2_RDN})
        ents = backends.list()
        assert len(ents) == 1

    log.info("Create a backend without BACKEND_SUFFIX")
    with pytest.raises(ldap.UNWILLING_TO_PERFORM):
        backends.create(properties={'cn': BACKEND_NAME_1})
        ents = backends.list()
        assert len(ents) == 1

    log.info("Just make it clean in the end")
    backend.delete()
示例#2
0
def test_list(topology_st):
    """Test basic list method functionality

    :id: 084c0937-0b39-4e89-8561-081ae2b144c6
    :setup: Standalone instance
    :steps:
        1. List all backends
        2. Create a backend
        3. List all backends
        4. Create one more backend
        5. List all backends
        6. Clean up the created backends
    :expectedresults:
        1. Operation should be successful
        2. Backend should be created
        3. Created backend should be listed
        4. Backend should be created
        5. Created backend should be listed
        6. Operation should be successful
    """

    backends = Backends(topology_st.standalone)

    ents = backends.list()
    nb_backend = len(ents)
    for ent in ents:
        topology_st.standalone.log.info("List(%d): backend %s" %
                                        (nb_backend, ent.dn))

    log.info("Create a first backend and check list all backends")
    b1 = backends.create(properties={
        'cn': BACKEND_NAME_1,
        'nsslapd-suffix': NEW_SUFFIX_1_RDN
    })

    ents = backends.list()
    for ent in ents:
        topology_st.standalone.log.info("List(%d): backend %s" %
                                        (nb_backend + 1, ent.dn))
    assert len(ents) == (nb_backend + 1)

    log.info("Create a second backend and check list all backends")
    b2 = backends.create(properties={
        'cn': BACKEND_NAME_2,
        'nsslapd-suffix': NEW_SUFFIX_2_RDN
    })

    ents = backends.list()
    for ent in ents:
        topology_st.standalone.log.info("List(%d): backend %s" %
                                        (nb_backend + 2, ent.dn))
    assert len(ents) == (nb_backend + 2)

    log.info("Just make it clean in the end")
    b1.delete()
    b2.delete()
def enable_user_attr_encryption(topo, request):
    """ Enables attribute encryption for various attributes
        Adds a test user with encrypted attributes
    """

    log.info("Enable TLS for attribute encryption")
    topo.standalone.enable_tls()

    log.info("Enables attribute encryption")
    backends = Backends(topo.standalone)
    backend = backends.list()[0]
    encrypt_attrs = EncryptedAttrs(topo.standalone, basedn='cn=encrypted attributes,{}'.format(backend.dn))
    log.info("Enables attribute encryption for employeeNumber and telephoneNumber")
    emp_num_encrypt = encrypt_attrs.create(properties={'cn': 'employeeNumber', 'nsEncryptionAlgorithm': 'AES'})
    telephone_encrypt = encrypt_attrs.create(properties={'cn': 'telephoneNumber', 'nsEncryptionAlgorithm': '3DES'})

    log.info("Add a test user with encrypted attributes")
    users = UserAccounts(topo.standalone, DEFAULT_SUFFIX)
    test_user = users.create(properties=TEST_USER_PROPERTIES)
    test_user.replace('employeeNumber', '1000')
    test_user.replace('telephoneNumber', '1234567890')

    def fin():
        log.info("Remove attribute encryption for various attributes")
        emp_num_encrypt.delete()
        telephone_encrypt.delete()

    request.addfinalizer(fin)
    return test_user
示例#4
0
def backend_monitor(inst, basedn, log, args):
    bes = Backends(inst)
    if args.backend:
        be = bes.get(args.backend)
        be_monitor = be.get_monitor()
        _format_status(log, be_monitor, args.json)
    else:
        for be in bes.list():
            be_monitor = be.get_monitor()
            _format_status(log, be_monitor, args.json)
示例#5
0
def test_readonly_on_threshold(topo, setup, reset_logs):
    """Verify that nsslapd-disk-monitoring-readonly-on-threshold switches the server to read-only mode

    :id: 06814c19-ef3c-4800-93c9-c7c6e76fcbb9
    :customerscenario: True
    :setup: Standalone
    :steps:
        1. Verify that the backend is in read-only mode
        2. Go back above the threshold
        3. Verify that the backend is in read-write mode
    :expectedresults:
        1. Should Success
        2. Should Success
        3. Should Success
    """
    file_path = '{}/foo'.format(topo.standalone.ds_paths.log_dir)
    backends = Backends(topo.standalone)
    backend_name = backends.list()[0].rdn
    # Verify that verbose logging was set to default level
    topo.standalone.deleteErrorLogs()
    assert topo.standalone.config.set('nsslapd-disk-monitoring', 'on')
    assert topo.standalone.config.set(
        'nsslapd-disk-monitoring-readonly-on-threshold', 'on')
    topo.standalone.restart()
    try:
        subprocess.call([
            'dd', 'if=/dev/zero', f'of={file_path}', 'bs=1M',
            f'count={HALF_THR_FILL_SIZE}'
        ])
        _witherrorlog(
            topo, f"Putting the backend '{backend_name}' to read-only mode",
            11)
        users = UserAccounts(topo.standalone, DEFAULT_SUFFIX)
        try:
            user = users.create_test_user()
            user.delete()
        except ldap.UNWILLING_TO_PERFORM as e:
            if 'database is read-only' not in str(e):
                raise
        os.remove(file_path)
        _witherrorlog(
            topo,
            f"Putting the backend '{backend_name}' back to read-write mode",
            11)
        user = users.create_test_user()
        assert user.exists()
        user.delete()
    finally:
        if os.path.exists(file_path):
            os.remove(file_path)
示例#6
0
def test_readonly_on_threshold_below_half_of_the_threshold(topo, setup, reset_logs):
    """Go below 1/2 of the threshold when readonly on threshold is enabled

    :id: 10262663-b41f-420e-a2d0-9532dd54fa7c
    :setup: Standalone
    :steps:
    :expectedresults:
        1. Go straight below 1/2 of the threshold
        2. Verify that the backend is in read-only mode
        3. Go back above the threshold
        4. Verify that the backend is in read-write mode
    :expectedresults:
        1. Should Success
        2. Should Success
        3. Should Success
        4. Should Success
    """
    file_path = '{}/foo'.format(topo.standalone.ds_paths.log_dir)
    backends = Backends(topo.standalone)
    backend_name = backends.list()[0].rdn
    topo.standalone.deleteErrorLogs()
    assert topo.standalone.config.set('nsslapd-disk-monitoring', 'on')
    assert topo.standalone.config.set('nsslapd-disk-monitoring-readonly-on-threshold', 'on')
    topo.standalone.restart()
    try:
        if float(THRESHOLD) > FULL_THR_FILL_SIZE:
            FULL_THR_FILL_SIZE_new = FULL_THR_FILL_SIZE + round(float(THRESHOLD) - FULL_THR_FILL_SIZE) + 1
            subprocess.call(['dd', 'if=/dev/zero', f'of={file_path}', 'bs=1M', f'count={FULL_THR_FILL_SIZE_new}'])
        else:
            subprocess.call(['dd', 'if=/dev/zero', f'of={file_path}', 'bs=1M', f'count={FULL_THR_FILL_SIZE}'])
        _witherrorlog(topo, f"Putting the backend '{backend_name}' to read-only mode", 11)
        users = UserAccounts(topo.standalone, DEFAULT_SUFFIX)
        try:
            user = users.create_test_user()
            user.delete()
        except ldap.UNWILLING_TO_PERFORM as e:
            if 'database is read-only' not in str(e):
                raise
        _witherrorlog(topo, 'is too far below the threshold', 51)
        # Verify DS has recovered from shutdown
        os.remove(file_path)
        _witherrorlog(topo, f"Putting the backend '{backend_name}' back to read-write mode", 51)
        user = users.create_test_user()
        assert user.exists()
        user.delete()
    finally:
        if os.path.exists(file_path):
            os.remove(file_path)
示例#7
0
def test_basic(topo, enable_user_attr_encryption):
    """Tests encrypted attributes with a test user entry

    :id: d767d5c8-b934-4b14-9774-bd13480d81b3
    :setup: Standalone instance
            Enable AES encryption config on employeenumber
            Enable 3DES encryption config on telephoneNumber
            Add a test user with with encrypted attributes
    :steps:
         1. Restart the server
         2. Check employeenumber encryption enabled
         3. Check telephoneNumber encryption enabled
         4. Check that encrypted attribute is present for user i.e. telephonenumber
    :expectedresults:
         1. This should be successful
         2. This should be successful
         3. This should be successful
         4. This should be successful
    """

    log.info("Restart the server")
    topo.standalone.restart()
    backends = Backends(topo.standalone)
    backend = backends.list()[0]
    encrypt_attrs = backend.get_encrypted_attrs()

    log.info(
        "Extracting values of cn from the list of objects in encrypt_attrs")
    log.info("And appending the cn values in a list")
    enc_attrs_cns = []
    for enc_attr in encrypt_attrs:
        enc_attrs_cns.append(enc_attr.rdn)

    log.info("Check employeenumber encryption is enabled")
    assert "employeeNumber" in enc_attrs_cns

    log.info("Check telephoneNumber encryption is enabled")
    assert "telephoneNumber" in enc_attrs_cns

    log.info(
        "Check that encrypted attribute is present for user i.e. telephonenumber"
    )
    assert enable_user_attr_encryption.present('telephoneNumber')
def test_attr_encryption_multiple_backends(topo, enable_user_attr_encryption):
    """Tests Configuration of attribute encryption for multiple backends
       Where both the backends have attribute encryption

    :id: 9ece3e6c-96b7-4dd5-b092-d76dda23472d
    :setup: Standalone instance
            SSL Enabled
    :steps:
         1. Add two test backends
         2. Configure attribute encryption for telephoneNumber in one test backend
         3. Configure attribute encryption for employeenumber in another test backend
         4. Add a test user in both backends with encrypted attributes
         5. Export data as ciphertext from both backends
         6. Check that telephoneNumber is encrypted in the ldif file of db1
         7. Check that employeeNumber is encrypted in the ldif file of db2
         8. Delete both test backends
    :expectedresults:
         1. This should be successful
         2. This should be successful
         3. This should be successful
         4. This should be successful
         5. This should be successful
         6. This should be successful
         7. This should be successful
         8. This should be successful
    """
    log.info("Add two test backends")
    test_suffix1 = 'dc=test1,dc=com'
    test_db1 = 'test_db1'
    test_suffix2 = 'dc=test2,dc=com'
    test_db2 = 'test_db2'

    # Create backends
    backends = Backends(topo.standalone)
    backend = backends.list()[0]
    test_backend1 = backends.create(properties={'cn': test_db1,
                                                'nsslapd-suffix': test_suffix1})
    test_backend2 = backends.create(properties={'cn': test_db2,
                                                'nsslapd-suffix': test_suffix2})

    # Create the top of the tree
    suffix1 = Domain(topo.standalone, test_suffix1)
    test1 = suffix1.create(properties={'dc': 'test1'})
    suffix2 = Domain(topo.standalone, test_suffix2)
    test2 = suffix2.create(properties={'dc': 'test2'})

    log.info("Enables attribute encryption for telephoneNumber in test_backend1")
    backend1_encrypt_attrs = EncryptedAttrs(topo.standalone, basedn='cn=encrypted attributes,{}'.format(test_backend1.dn))
    b1_encrypt = backend1_encrypt_attrs.create(properties={'cn': 'telephoneNumber',
                                                           'nsEncryptionAlgorithm': 'AES'})

    log.info("Enables attribute encryption for employeeNumber in test_backend2")
    backend2_encrypt_attrs = EncryptedAttrs(topo.standalone, basedn='cn=encrypted attributes,{}'.format(test_backend2.dn))
    b2_encrypt = backend2_encrypt_attrs.create(properties={'cn': 'employeeNumber',
                                                           'nsEncryptionAlgorithm': 'AES'})

    log.info("Add a test user with encrypted attributes in both backends")
    users = UserAccounts(topo.standalone, test1.dn, None)
    test_user = users.create(properties=TEST_USER_PROPERTIES)
    test_user.replace('telephoneNumber', '1234567890')

    users = UserAccounts(topo.standalone, test2.dn, None)
    test_user = users.create(properties=TEST_USER_PROPERTIES)
    test_user.replace('employeeNumber', '1000')

    log.info("Export data as ciphertext from both backends")
    export_db1 = os.path.join(topo.standalone.ds_paths.ldif_dir, "export_db1.ldif")
    export_db2 = os.path.join(topo.standalone.ds_paths.ldif_dir, "export_db2.ldif")

    # Offline export
    topo.standalone.stop()
    if not topo.standalone.db2ldif(bename=test_db1, suffixes=(test_suffix1,),
                                   excludeSuffixes=None, encrypt=False, repl_data=None, outputfile=export_db1):
        log.fatal('Failed to run offline db2ldif')
        assert False

    if not topo.standalone.db2ldif(bename=test_db2, suffixes=(test_suffix2,),
                                   excludeSuffixes=None, encrypt=False, repl_data=None, outputfile=export_db2):
        log.fatal('Failed to run offline db2ldif')
        assert False
    topo.standalone.start()

    log.info("Check that the attribute is present in the exported file in db1")
    log.info("Check that the encrypted value of attribute is not present in the exported file in db1")
    with open(export_db1, 'r') as ldif_file:
        ldif = ldif_file.read()
        assert 'telephoneNumber' in ldif
        assert 'telephoneNumber: 1234567890' not in ldif

    log.info("Check that the attribute is present in the exported file in db2")
    log.info("Check that the encrypted value of attribute is not present in the exported file in db2")
    with open(export_db2, 'r') as ldif_file:
        ldif = ldif_file.read()
        assert 'employeeNumber' in ldif
        assert 'employeeNumber: 1000' not in ldif

    log.info("Delete test backends")
    test_backend1.delete()
    test_backend2.delete()
示例#9
0
def db_monitor(inst, basedn, log, args):
    """Report on all the database statistics
    """
    ldbm_monitor = MonitorLDBM(inst)
    backends_obj = Backends(inst)
    backend_objs = []
    args_backends = None

    # Gather all the backends
    if args.backends is not None:
        # This is a space separated list, it could be backend names or suffixes
        args_backends = args.backends.lower().split()

    for be in backends_obj.list():
        if args_backends is not None:
            for arg_be in args_backends:
                if '=' in arg_be:
                    # We have a suffix
                    if arg_be == be.get_suffix():
                        backend_objs.append(be)
                        break
                else:
                    # We have a backend name
                    if arg_be == be.rdn.lower():
                        backend_objs.append(be)
                        break
        else:
            # Get all the backends
            backend_objs.append(be)

    if args_backends is not None and len(backend_objs) == 0:
        raise ValueError("Could not find any backends from the provided list: {}".format(args.backends))

    # Gather the global DB stats
    report_time = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
    ldbm_mon = ldbm_monitor.get_status()
    dbcachesize = int(ldbm_mon['nsslapd-db-cache-size-bytes'][0])
    if 'nsslapd-db-page-size' in ldbm_mon:
        pagesize = int(ldbm_mon['nsslapd-db-page-size'][0])
    else:
        pagesize = 8 * 1024  # Taken from DBLAYER_PAGESIZE
    dbhitratio = ldbm_mon['dbcachehitratio'][0]
    dbcachepagein = ldbm_mon['dbcachepagein'][0]
    dbcachepageout = ldbm_mon['dbcachepageout'][0]
    dbroevict = ldbm_mon['nsslapd-db-page-ro-evict-rate'][0]
    dbpages = int(ldbm_mon['nsslapd-db-pages-in-use'][0])
    dbcachefree = int(dbcachesize - (pagesize * dbpages))
    dbcachefreeratio = dbcachefree/dbcachesize
    ndnratio = ldbm_mon['normalizeddncachehitratio'][0]
    ndncursize = int(ldbm_mon['currentnormalizeddncachesize'][0])
    ndnmaxsize = int(ldbm_mon['maxnormalizeddncachesize'][0])
    ndncount = ldbm_mon['currentnormalizeddncachecount'][0]
    ndnevictions = ldbm_mon['normalizeddncacheevictions'][0]
    if ndncursize > ndnmaxsize:
        ndnfree = 0
        ndnfreeratio = 0
    else:
        ndnfree = ndnmaxsize - ndncursize
        ndnfreeratio = "{:.1f}".format(ndnfree / ndnmaxsize * 100)

    # Build global cache stats
    result = {
        'date': report_time,
        'dbcache': {
            'hit_ratio': dbhitratio,
            'free': convert_bytes(str(dbcachefree)),
            'free_percentage': "{:.1f}".format(dbcachefreeratio * 100),
            'roevicts': dbroevict,
            'pagein': dbcachepagein,
            'pageout': dbcachepageout
        },
        'ndncache': {
            'hit_ratio': ndnratio,
            'free': convert_bytes(str(ndnfree)),
            'free_percentage': ndnfreeratio,
            'count': ndncount,
            'evictions': ndnevictions
        },
        'backends': {},
    }

    # Build the backend results
    for be in backend_objs:
        be_name = be.rdn
        be_suffix = be.get_suffix()
        monitor = be.get_monitor()
        all_attrs = monitor.get_status()

        # Process entry cache stats
        entcur = int(all_attrs['currententrycachesize'][0])
        entmax = int(all_attrs['maxentrycachesize'][0])
        entcnt = int(all_attrs['currententrycachecount'][0])
        entratio = all_attrs['entrycachehitratio'][0]
        entfree = entmax - entcur
        entfreep = "{:.1f}".format(entfree / entmax * 100)
        if entcnt == 0:
            entsize = 0
        else:
            entsize = int(entcur / entcnt)

        # Process DN cache stats
        dncur = int(all_attrs['currentdncachesize'][0])
        dnmax = int(all_attrs['maxdncachesize'][0])
        dncnt = int(all_attrs['currentdncachecount'][0])
        dnratio = all_attrs['dncachehitratio'][0]
        dnfree = dnmax - dncur
        dnfreep = "{:.1f}".format(dnfree / dnmax * 100)
        if dncnt == 0:
            dnsize = 0
        else:
            dnsize = int(dncur / dncnt)

        # Build the backend result
        result['backends'][be_name] = {
            'suffix': be_suffix,
            'entry_cache_count': all_attrs['currententrycachecount'][0],
            'entry_cache_free': convert_bytes(str(entfree)),
            'entry_cache_free_percentage': entfreep,
            'entry_cache_size': convert_bytes(str(entsize)),
            'entry_cache_hit_ratio': entratio,
            'dn_cache_count': all_attrs['currentdncachecount'][0],
            'dn_cache_free': convert_bytes(str(dnfree)),
            'dn_cache_free_percentage': dnfreep,
            'dn_cache_size': convert_bytes(str(dnsize)),
            'dn_cache_hit_ratio': dnratio,
            'indexes': []
        }

        # Process indexes if requested
        if args.indexes:
            index = {}
            index_name = ''
            for attr, val in all_attrs.items():
                if attr.startswith('dbfile'):
                    if attr.startswith("dbfilename-"):
                        if index_name != '':
                            # Update backend index list
                            result['backends'][be_name]['indexes'].append(index)
                        index_name = val[0].split('/')[1]
                        index = {'name': index_name}
                    elif attr.startswith('dbfilecachehit-'):
                        index['cachehit'] = val[0]
                    elif attr.startswith('dbfilecachemiss-'):
                        index['cachemiss'] = val[0]
                    elif attr.startswith('dbfilepagein-'):
                        index['pagein'] = val[0]
                    elif attr.startswith('dbfilepageout-'):
                        index['pageout'] = val[0]
            if index_name != '':
                # Update backend index list
                result['backends'][be_name]['indexes'].append(index)

    # Return the report
    if args.json:
        log.info(json.dumps(result, indent=4))
    else:
        log.info("DB Monitor Report: " + result['date'])
        log.info("--------------------------------------------------------")
        log.info("Database Cache:")
        log.info(" - Cache Hit Ratio:     {}%".format(result['dbcache']['hit_ratio']))
        log.info(" - Free Space:          {}".format(result['dbcache']['free']))
        log.info(" - Free Percentage:     {}%".format(result['dbcache']['free_percentage']))
        log.info(" - RO Page Drops:       {}".format(result['dbcache']['roevicts']))
        log.info(" - Pages In:            {}".format(result['dbcache']['pagein']))
        log.info(" - Pages Out:           {}".format(result['dbcache']['pageout']))
        log.info("")
        log.info("Normalized DN Cache:")
        log.info(" - Cache Hit Ratio:     {}%".format(result['ndncache']['hit_ratio']))
        log.info(" - Free Space:          {}".format(result['ndncache']['free']))
        log.info(" - Free Percentage:     {}%".format(result['ndncache']['free_percentage']))
        log.info(" - DN Count:            {}".format(result['ndncache']['count']))
        log.info(" - Evictions:           {}".format(result['ndncache']['evictions']))
        log.info("")
        log.info("Backends:")
        for be_name, attr_dict in result['backends'].items():
            log.info(f"  - {attr_dict['suffix']} ({be_name}):")
            log.info("    - Entry Cache Hit Ratio:        {}%".format(attr_dict['entry_cache_hit_ratio']))
            log.info("    - Entry Cache Count:            {}".format(attr_dict['entry_cache_count']))
            log.info("    - Entry Cache Free Space:       {}".format(attr_dict['entry_cache_free']))
            log.info("    - Entry Cache Free Percentage:  {}%".format(attr_dict['entry_cache_free_percentage']))
            log.info("    - Entry Cache Average Size:     {}".format(attr_dict['entry_cache_size']))
            log.info("    - DN Cache Hit Ratio:           {}%".format(attr_dict['dn_cache_hit_ratio']))
            log.info("    - DN Cache Count:               {}".format(attr_dict['dn_cache_count']))
            log.info("    - DN Cache Free Space:          {}".format(attr_dict['dn_cache_free']))
            log.info("    - DN Cache Free Percentage:     {}%".format(attr_dict['dn_cache_free_percentage']))
            log.info("    - DN Cache Average Size:        {}".format(attr_dict['dn_cache_size']))
            if len(result['backends'][be_name]['indexes']) > 0:
                log.info("    - Indexes:")
                for index in result['backends'][be_name]['indexes']:
                    log.info("      - Index:      {}".format(index['name']))
                    log.info("      - Cache Hit:  {}".format(index['cachehit']))
                    log.info("      - Cache Miss: {}".format(index['cachemiss']))
                    log.info("      - Page In:    {}".format(index['pagein']))
                    log.info("      - Page Out:   {}".format(index['pageout']))
                    log.info("")
            log.info("")
示例#10
0
def test_chaining_paged_search(topology):
    """ Test paged search through the chaining db. This
    would cause a SIGSEGV with paged search which could
    be triggered by SSSD.

    :id: 7b29b1f5-26cf-49fa-9fe7-ee29a1408633
    :setup: Two standalones in chaining.
    :steps:
        1. Configure chaining between the nodes
        2. Do a chaining search (no page) to assert it works
        3. Do a paged search through chaining.

    :expectedresults:
        1. Success
        2. Success
        3. Success
    """
    st1 = topology.ins["standalone1"]
    st2 = topology.ins["standalone2"]

    ### We setup so that st1 -> st2

    # Clear all the BE in st1
    bes1 = Backends(st1)
    for be in bes1.list():
        be.delete()

    # Setup st1 to chain to st2
    chain_plugin_1 = ChainingBackendPlugin(st1)
    chain_plugin_1.enable()

    chains = ChainingLinks(st1)
    chain = chains.create(
        properties={
            'cn': 'demochain',
            'nsslapd-suffix': DEFAULT_SUFFIX,
            'nsmultiplexorbinddn': '',
            'nsmultiplexorcredentials': '',
            'nsfarmserverurl': st2.toLDAPURL(),
        })

    mts = MappingTrees(st1)
    # Due to a bug in lib389, we need to delete and recreate the mt.
    for mt in mts.list():
        mt.delete()
    mts.ensure_state(
        properties={
            'cn': DEFAULT_SUFFIX,
            'nsslapd-state': 'backend',
            'nsslapd-backend': 'demochain',
        })
    # Restart to enable
    st1.restart()

    # Get an anonymous connection.
    anon = Account(st1, dn='')
    anon_conn = anon.bind(password='')

    # Now do a search from st1 -> st2
    accs_1 = Accounts(anon_conn, DEFAULT_SUFFIX)
    assert len(accs_1.list()) > 0

    # Allow time to attach lldb if needed.
    # import time
    # print("🔥🔥🔥")
    # time.sleep(45)

    # Now do a *paged* search from st1 -> st2
    assert len(accs_1.list(paged_search=2, paged_critical=False)) > 0
def test_chaining_paged_search(topology):
    """ Check that when the chaining target has anonymous access
    disabled that the ping still functions and allows the search
    to continue with an appropriate bind user.

    :id: 00bf31db-d93b-4224-8e70-86abb2d4cd17
    :setup: Two standalones in chaining.
    :steps:
        1. Configure chaining between the nodes
        2. Do a chaining search (w anon allow) to assert it works
        3. Configure anon dis allowed on st2
        4. Restart both
        5. Check search still works

    :expectedresults:
        1. Success
        2. Success
        3. Success
        4. Success
        5. Success
    """
    st1 = topology.ins["standalone1"]
    st2 = topology.ins["standalone2"]

    ### We setup so that st1 -> st2

    # Setup a chaining user on st2 to authenticate to.
    sa = ServiceAccounts(st2, DEFAULT_SUFFIX).create(properties = {
        'cn': 'sa',
        'userPassword': PW
    })

    # Add a proxy user.
    sproxy = ServiceAccounts(st2, DEFAULT_SUFFIX).create(properties = {
        'cn': 'proxy',
        'userPassword': PW
    })

    # Add the read and proxy ACI
    dc = Domain(st2, DEFAULT_SUFFIX)
    dc.add('aci',
        f"""(targetattr="objectClass || cn || uid")(version 3.0; acl "Enable sa read"; allow (read, search, compare)(userdn="ldap:///{sa.dn}");)"""
    )
    # Add the proxy ACI
    dc.add('aci',
        f"""(targetattr="*")(version 3.0; acl "Enable proxy access"; allow (proxy)(userdn="ldap:///{sproxy.dn}");)"""
    )

    # Clear all the BE in st1
    bes1 = Backends(st1)
    for be in bes1.list():
        be.delete()

    # Setup st1 to chain to st2
    chain_plugin_1 = ChainingBackendPlugin(st1)
    chain_plugin_1.enable()

    # Chain with the proxy user.
    chains = ChainingLinks(st1)
    chain = chains.create(properties={
        'cn': 'demochain',
        'nsfarmserverurl': st2.toLDAPURL(),
        'nsslapd-suffix': DEFAULT_SUFFIX,
        'nsmultiplexorbinddn': sproxy.dn,
        'nsmultiplexorcredentials': PW,
        'nsCheckLocalACI': 'on',
        'nsConnectionLife': '30',
    })

    mts = MappingTrees(st1)
    # Due to a bug in lib389, we need to delete and recreate the mt.
    for mt in mts.list():
        mt.delete()
    mts.ensure_state(properties={
        'cn': DEFAULT_SUFFIX,
        'nsslapd-state': 'backend',
        'nsslapd-backend': 'demochain',
        'nsslapd-distribution-plugin': 'libreplication-plugin',
        'nsslapd-distribution-funct': 'repl_chain_on_update',
    })

    # Enable pwpolicy (Not sure if part of the issue).
    st1.config.set('passwordIsGlobalPolicy', 'on')
    st2.config.set('passwordIsGlobalPolicy', 'on')

    # Restart to enable everything.
    st1.restart()

    # Get a proxy auth connection.
    sa1 = ServiceAccount(st1, sa.dn)
    sa1_conn = sa1.bind(password=PW)

    # Now do a search from st1 -> st2
    sa1_dc = Domain(sa1_conn, DEFAULT_SUFFIX)
    assert sa1_dc.exists()

    # Now on st2 disable anonymous access.
    st2.config.set('nsslapd-allow-anonymous-access', 'rootdse')

    # Stop st2 to force the connection to be dead.
    st2.stop()
    # Restart st1 - this means it must re-do the ping/keepalive.
    st1.restart()

    # do a bind - this should fail, and forces the conn offline.
    with pytest.raises(ldap.OPERATIONS_ERROR):
        sa1.bind(password=PW)

    # Allow time to attach lldb if needed.
    # print("🔥🔥🔥")
    # time.sleep(45)

    # Bring st2 online.
    st2.start()

    # Wait a bit
    time.sleep(5)

    # Get a proxy auth connection (again)
    sa1_conn = sa1.bind(password=PW)
    # Now do a search from st1 -> st2
    sa1_dc = Domain(sa1_conn, DEFAULT_SUFFIX)
    assert sa1_dc.exists()