예제 #1
0
    def swap(self):
        """Make the conflict entry the real valid entry.  Delete old valid entry,
        and rename the conflict
        """

        # Get the conflict entry info
        conflict_value = self.get_attr_val_utf8('nsds5ReplConflict')
        entry_dn = conflict_value.split(' ', 2)[2]
        entry_rdn = ldap.explode_dn(entry_dn, 1)[0]

        # Gather the RDN details
        rdn_attr = entry_dn.split('=', 1)[0]
        new_rdn = "{}={}".format(rdn_attr, entry_rdn)
        tmp_rdn = new_rdn + 'tmp'

        # Delete valid entry and its children (to be replaced by conflict entry)
        original_entry = DSLdapObject(self._instance, dn=entry_dn)
        original_entry._protected = False
        filterstr = "(|(objectclass=*)(objectclass=ldapsubentry))"
        ents = self._instance.search_s(original_entry._dn, ldap.SCOPE_SUBTREE, filterstr, escapehatch='i am sure')
        for ent in sorted(ents, key=lambda e: len(e.dn), reverse=True):
            self._instance.delete_ext_s(ent.dn, serverctrls=self._server_controls, clientctrls=self._client_controls, escapehatch='i am sure')

        # Rename conflict entry to tmp rdn so we can clean up the rdn attr
        self.rename(tmp_rdn, deloldrdn=False)

        # Cleanup entry
        self.remove(rdn_attr, entry_rdn)
        if self.present('objectclass', 'ldapsubentry'):
            self.remove('objectclass', 'ldapsubentry')
        self.remove_all('nsds5ReplConflict')

        # Rename to the final/correct rdn
        self.rename(new_rdn, deloldrdn=True)
예제 #2
0
def test_log_base_dn_when_invalid_attr_request(topology_st,
                                               disable_access_log_buffering):
    """Test that DS correctly logs the base dn when a search with invalid attribute request is performed

    :id: 859de962-c261-4ffb-8705-97bceab1ba2c
    :setup: Standalone instance
    :steps:
         1. Disable the accesslog-logbuffering config parameter
         2. Delete the previous access log
         3. Perform a base search on the DEFAULT_SUFFIX, using ten empty attribute requests
         4. Check the access log file for 'invalid attribute request'
         5. Check the access log file for 'SRCH base="\(null\)"'
         6. Check the access log file for 'SRCH base="DEFAULT_SUFFIX"'
    :expectedresults:
         1. Operations are visible in the access log in real time
         2. Fresh new access log is created
         3. The search operation raises a Protocol error
         4. The access log should have an 'invalid attribute request' message
         5. The access log should not have "\(null\)" as value for the Search base dn
         6. The access log should have the value of DEFAULT_SUFFIX as Search base dn
    """

    entry = DSLdapObject(topology_st.standalone, DEFAULT_SUFFIX)

    log.info('delete the previous access logs to get a fresh new one')
    topology_st.standalone.deleteAccessLogs()

    log.info(
        "Search the default suffix, with invalid '\"\" \"\"' attribute request"
    )
    log.info(
        "A Protocol error exception should be raised, see https://github.com/389ds/389-ds-base/issues/3028"
    )
    # A ldap.PROTOCOL_ERROR exception is expected after 10 empty values
    with pytest.raises(ldap.PROTOCOL_ERROR):
        assert entry.get_attrs_vals_utf8(
            ['', '', '', '', '', '', '', '', '', '', ''])

    # Search for appropriate messages in the access log
    log.info('Check the access logs for correct messages')
    # We should find the 'invalid attribute request' information
    assert topology_st.standalone.ds_access_log.match(
        r'.*invalid attribute request.*')
    # We should not find a "(null)" base dn mention
    assert not topology_st.standalone.ds_access_log.match(
        r'.*SRCH base="\(null\)".*')
    # We should find the base dn for the search
    assert topology_st.standalone.ds_access_log.match(
        r'.*SRCH base="{}".*'.format(DEFAULT_SUFFIX))
예제 #3
0
 def fin():
     """
     Deletes entries after the test.
     """
     for scope_scope in [CONTAINER_1_DELADD, CONTAINER_2_DELADD, PEOPLE]:
         try:
             DSLdapObject(topo.standalone, scope_scope).delete()
         except ldap.ALREADY_EXISTS as eoor_eoor:
             topo.standalone.log.info("Exception (expected): %s" % type(eoor_eoor).__name__)
예제 #4
0
    def get_valid_entry(self):
        """Get the conflict entry's valid counterpart entry
        """
        # Get the conflict entry info
        conflict_value = self.get_attr_val_utf8('nsds5ReplConflict')
        entry_dn = conflict_value.split(' ', 2)[2]

        # Get the valid entry
        return DSLdapObject(self._instance, dn=entry_dn)
예제 #5
0
 def _lint_search(self):
     """Perform a search and make sure an entry is accessible
     """
     dn = self.get_attr_val_utf8('nsslapd-suffix')
     bename = self.lint_uid()
     suffix = DSLdapObject(self._instance, dn=dn)
     try:
         suffix.get_attr_val('objectclass')
     except ldap.NO_SUCH_OBJECT:
         # backend root entry not created yet
         DSBLE0003['items'] = [
             dn,
         ]
         DSBLE0003['check'] = f'backends:{bename}:search'
         yield DSBLE0003
     except ldap.LDAPError as e:
         # Some other error
         DSBLE0002['detail'] = DSBLE0002['detail'].replace('ERROR', str(e))
         DSBLE0002['check'] = f'backends:{bename}:search'
         DSBLE0002['items'] = [
             dn,
         ]
         yield DSBLE0002
예제 #6
0
 def set(self, value_pairs):
     for attr, val in value_pairs:
         attr = attr.lower()
         if attr in self._global_attrs:
             global_config = DSLdapObject(self._instance, dn=self._dn)
             global_config.replace(attr, val)
         elif attr in self._db_attrs['bdb']:
             db_config = DSLdapObject(self._instance, dn=self._db_dn)
             db_config.replace(attr, val)
         elif attr in self._db_attrs['lmdb']:
             pass
         else:
             # Unknown attribute
             raise ValueError(
                 "Can not update database configuration with unknown attribute: "
                 + attr)
예제 #7
0
def change_conf_attr(topology_st, suffix, attr_name, attr_value):
    """Change configurational attribute in the given suffix.

    Returns previous attribute value.
    """

    entry = DSLdapObject(topology_st.standalone, suffix)

    attr_value_bck = entry.get_attr_val_bytes(attr_name)
    log.info('Set %s to %s. Previous value - %s. Modified suffix - %s.' %
             (attr_name, attr_value, attr_value_bck, suffix))
    if attr_value is None:
        entry.remove_all(attr_name)
    else:
        entry.replace(attr_name, attr_value)
    return attr_value_bck
예제 #8
0
def test_cache_autosize_invalid_values(topo, invalid_value):
    """Check that we can't set invalid values to autosize attributes

    :id: 2f0d01b5-ca91-4dc2-97bc-ad0ac8d08633
    :parametrized: yes
    :setup: Standalone instance
    :steps:
        1. Stop the instance
        2. Set in the cn=config,cn=ldbm database,cn=plugins,cn=config:
           nsslapd-cache-autosize and nsslapd-cache-autosize-split
           to invalid values like (-2, 102, invalid_str)
        3. Try to start the instance
    :expectedresults:
        1. The instance should stop successfully
        2. nsslapd-cache-autosize, nsslapd-cache-autosize-split are successfully set
        3. Starting the instance should fail
    """

    config_ldbm = DSLdapObject(topo.standalone, DN_CONFIG_LDBM)
    bdb_config_ldbm = DSLdapObject(topo.standalone, DN_CONFIG_LDBM_BDB)
    if ds_is_older('1.4.2'):
        autosize_val = config_ldbm.get_attr_val('nsslapd-cache-autosize')
        autosize_split_val = config_ldbm.get_attr_val(
            'nsslapd-cache-autosize-split')
    else:
        autosize_val = bdb_config_ldbm.get_attr_val('nsslapd-cache-autosize')
        autosize_split_val = bdb_config_ldbm.get_attr_val(
            'nsslapd-cache-autosize-split')

    log.info("Set nsslapd-cache-autosize-split to {}".format(invalid_value))
    with pytest.raises(ldap.UNWILLING_TO_PERFORM):
        config_ldbm.set('nsslapd-cache-autosize-split', invalid_value)
        topo.standalone.restart()
    config_ldbm.remove('nsslapd-cache-autosize-split', autosize_split_val)

    log.info("Set nsslapd-cache-autosize to {}".format(invalid_value))
    with pytest.raises(ldap.UNWILLING_TO_PERFORM):
        config_ldbm.set('nsslapd-cache-autosize', invalid_value)
        topo.standalone.restart()
    config_ldbm.remove('nsslapd-cache-autosize', autosize_val)
예제 #9
0
def test_cache_autosize_basic_sane(topo, autosize_split):
    """Check that autotuning cachesizes works properly with different values

    :id: 9dc363ef-f551-446d-8b83-8ac45dabb8df
    :parametrized: yes
    :setup: Standalone instance
    :steps:
        1. Set in the cn=config,cn=ldbm database,cn=plugins,cn=config:
           nsslapd-cache-autosize, nsslapd-cache-autosize-split to the next value pairs:
           ('0', '0'), ('0', ''), ('0', '40')
           '' - for deleting the value (set to default)
        2. Set in the cn=config,cn=ldbm database,cn=plugins,cn=config:
           nsslapd-dbcachesize: 0 and some same value
        3. Set in the cn=UserRoot,cn=ldbm database,cn=plugins,cn=config:
           nsslapd-cachememsize: 0 and some same value
        4. Restart the instance
        5. Check nsslapd-dbcachesize and nsslapd-cachememsize
    :expectedresults:
        1. nsslapd-cache-autosize, nsslapd-cache-autosize-split are successfully set
        2. nsslapd-dbcachesize are successfully set
        3. nsslapd-cachememsize are successfully set
        4. The instance should be successfully restarted
        5. nsslapd-dbcachesize and nsslapd-cachememsize should set
           to value greater than 512KB
    """

    config_ldbm = DSLdapObject(topo.standalone, DN_CONFIG_LDBM)
    bdb_config_ldbm = DSLdapObject(topo.standalone, DN_CONFIG_LDBM_BDB)
    userroot_ldbm = DSLdapObject(topo.standalone, DN_USERROOT_LDBM)
    config_ldbm.set('nsslapd-cache-autosize', '0')

    # Test with caches with both real values and 0
    for cachesize in ('0', '33333333'):
        if ds_is_older('1.4.2'):
            dbcachesize_val = config_ldbm.get_attr_val('nsslapd-dbcachesize')
            autosize_val = config_ldbm.get_attr_val('nsslapd-cache-autosize')
            autosize_split_val = config_ldbm.get_attr_val(
                'nsslapd-cache-autosize-split')
        else:
            dbcachesize_val = bdb_config_ldbm.get_attr_val(
                'nsslapd-dbcachesize')
            autosize_val = bdb_config_ldbm.get_attr_val(
                'nsslapd-cache-autosize')
            autosize_split_val = bdb_config_ldbm.get_attr_val(
                'nsslapd-cache-autosize-split')

        cachenensize_val = userroot_ldbm.get_attr_val('nsslapd-cachememsize')
        dncachenensize_val = userroot_ldbm.get_attr_val(
            'nsslapd-dncachememsize')

        log.info(
            "Check nsslapd-dbcachesize and nsslapd-cachememsize before the test"
        )
        log.info("nsslapd-dbcachesize == {}".format(dbcachesize_val))
        log.info("nsslapd-cachememsize == {}".format(cachenensize_val))
        log.info("nsslapd-cache-autosize == {}".format(autosize_val))
        log.info(
            "nsslapd-cache-autosize-split == {}".format(autosize_split_val))

        if autosize_split:
            log.info("Set nsslapd-cache-autosize-split to {}".format(
                autosize_split))
            config_ldbm.set('nsslapd-cache-autosize-split', autosize_split)
        else:
            log.info("Delete nsslapd-cache-autosize-split")
            try:
                config_ldbm.remove('nsslapd-cache-autosize-split',
                                   autosize_split_val)
            except ValueError:
                log.info("nsslapd-cache-autosize-split wasn't found")

        log.info("Set nsslapd-dbcachesize to {}".format(cachesize))
        config_ldbm.set('nsslapd-dbcachesize', cachesize)
        log.info("Set nsslapd-cachememsize to {}".format(cachesize))
        userroot_ldbm.set('nsslapd-cachememsize', cachesize)
        topo.standalone.restart()

        if ds_is_older('1.4.2'):
            dbcachesize_val = config_ldbm.get_attr_val('nsslapd-dbcachesize')
            autosize_val = config_ldbm.get_attr_val('nsslapd-cache-autosize')
            autosize_split_val = config_ldbm.get_attr_val(
                'nsslapd-cache-autosize-split')
        else:
            dbcachesize_val = bdb_config_ldbm.get_attr_val(
                'nsslapd-dbcachesize')
            autosize_val = bdb_config_ldbm.get_attr_val(
                'nsslapd-cache-autosize')
            autosize_split_val = bdb_config_ldbm.get_attr_val(
                'nsslapd-cache-autosize-split')

        cachenensize_val = userroot_ldbm.get_attr_val('nsslapd-cachememsize')
        dncachenensize_val = userroot_ldbm.get_attr_val(
            'nsslapd-dncachememsize')

        log.info(
            "Check nsslapd-dbcachesize and nsslapd-cachememsize in the appropriate range."
        )
        log.info("nsslapd-dbcachesize == {}".format(dbcachesize_val))
        log.info("nsslapd-cachememsize == {}".format(cachenensize_val))
        log.info("nsslapd-dncachememsize == {}".format(dncachenensize_val))
        log.info("nsslapd-cache-autosize == {}".format(autosize_val))
        log.info(
            "nsslapd-cache-autosize-split == {}".format(autosize_split_val))
        assert int(dbcachesize_val) >= 512000
        assert int(cachenensize_val) >= 512000
        assert int(dncachenensize_val) >= 512000
예제 #10
0
def test_cache_autosize_non_zero(topo, autosize, autosize_split):
    """Check that autosizing works works properly in different combinations

    :id: 83fa099c-a6c9-457a-82db-0982b67e8598
    :parametrized: yes
    :setup: Standalone instance
    :steps:
        1. Set in the cn=config,cn=ldbm database,cn=plugins,cn=config:
           nsslapd-cache-autosize, nsslapd-cache-autosize-split to the next value pairs:
           ('', ''), ('', '0'), ('10', '40'), ('', '40'),
           ('10', ''), ('10', '40'), ('10', '0')
           '' - for deleting the value (set to default)
        2. Try to modify nsslapd-dbcachesize and nsslapd-cachememsize to
           some real value, it should be rejected
        3. Restart the instance
        4. Check nsslapd-dbcachesize and nsslapd-cachememsize
    :expectedresults:
        1. nsslapd-cache-autosize, nsslapd-cache-autosize-split are successfully set
        2. Modify operation should be rejected
        3. The instance should be successfully restarted
        4. nsslapd-dbcachesize and nsslapd-cachememsize should set
           to value greater than 512KB
    """

    config_ldbm = DSLdapObject(topo.standalone, DN_CONFIG_LDBM)
    bdb_config_ldbm = DSLdapObject(topo.standalone, DN_CONFIG_LDBM_BDB)
    userroot_ldbm = DSLdapObject(topo.standalone, DN_USERROOT_LDBM)

    cachesize = '33333333'

    if ds_is_older('1.4.2'):
        dbcachesize_val = config_ldbm.get_attr_val('nsslapd-dbcachesize')
        autosize_val = config_ldbm.get_attr_val('nsslapd-cache-autosize')
        autosize_split_val = config_ldbm.get_attr_val(
            'nsslapd-cache-autosize-split')
    else:
        dbcachesize_val = bdb_config_ldbm.get_attr_val('nsslapd-dbcachesize')
        autosize_val = bdb_config_ldbm.get_attr_val('nsslapd-cache-autosize')
        autosize_split_val = bdb_config_ldbm.get_attr_val(
            'nsslapd-cache-autosize-split')

    cachenensize_val = userroot_ldbm.get_attr_val('nsslapd-cachememsize')
    dncachenensize_val = userroot_ldbm.get_attr_val('nsslapd-dncachememsize')

    log.info(
        "Check nsslapd-dbcachesize and nsslapd-cachememsize before the test")
    log.info("nsslapd-dbcachesize == {}".format(dbcachesize_val))
    log.info("nsslapd-cachememsize == {}".format(cachenensize_val))
    log.info("nsslapd-dncachememsize == {}".format(dncachenensize_val))
    log.info("nsslapd-cache-autosize == {}".format(autosize_val))
    log.info("nsslapd-cache-autosize-split == {}".format(autosize_split_val))

    if autosize:
        log.info("Set nsslapd-cache-autosize to {}".format(autosize))
        config_ldbm.set('nsslapd-cache-autosize', autosize)
    else:
        log.info("Delete nsslapd-cache-autosize")
        try:
            config_ldbm.remove('nsslapd-cache-autosize', autosize_val)
        except ValueError:
            log.info("nsslapd-cache-autosize wasn't found")

    if autosize_split:
        log.info(
            "Set nsslapd-cache-autosize-split to {}".format(autosize_split))
        config_ldbm.set('nsslapd-cache-autosize-split', autosize_split)
    else:
        log.info("Delete nsslapd-cache-autosize-split")
        try:
            config_ldbm.remove('nsslapd-cache-autosize-split',
                               autosize_split_val)
        except ValueError:
            log.info("nsslapd-cache-autosize-split wasn't found")

    log.info("Trying to set nsslapd-cachememsize to {}".format(cachesize))
    with pytest.raises(ldap.UNWILLING_TO_PERFORM):
        userroot_ldbm.set('nsslapd-cachememsize', cachesize)
    log.info("Trying to set nsslapd-dbcachesize to {}".format(cachesize))
    with pytest.raises(ldap.UNWILLING_TO_PERFORM):
        config_ldbm.set('nsslapd-dbcachesize ', cachesize)
    topo.standalone.restart()

    if ds_is_older('1.4.2'):
        dbcachesize_val = config_ldbm.get_attr_val('nsslapd-dbcachesize')
        autosize_val = config_ldbm.get_attr_val('nsslapd-cache-autosize')
        autosize_split_val = config_ldbm.get_attr_val(
            'nsslapd-cache-autosize-split')
    else:
        dbcachesize_val = bdb_config_ldbm.get_attr_val('nsslapd-dbcachesize')
        autosize_val = bdb_config_ldbm.get_attr_val('nsslapd-cache-autosize')
        autosize_split_val = bdb_config_ldbm.get_attr_val(
            'nsslapd-cache-autosize-split')

    cachenensize_val = userroot_ldbm.get_attr_val('nsslapd-cachememsize')
    dncachenensize_val = userroot_ldbm.get_attr_val('nsslapd-dncachememsize')

    log.info(
        "Check nsslapd-dbcachesize and nsslapd-cachememsize in the appropriate range."
    )
    log.info("nsslapd-dbcachesize == {}".format(dbcachesize_val))
    log.info("nsslapd-cachememsize == {}".format(cachenensize_val))
    log.info("nsslapd-dncachememsize == {}".format(dncachenensize_val))
    log.info("nsslapd-cache-autosize == {}".format(autosize_val))
    log.info("nsslapd-cache-autosize-split == {}".format(autosize_split_val))
    assert int(dbcachesize_val) >= 512000
    assert int(cachenensize_val) >= 512000
    assert int(dncachenensize_val) >= 512000
예제 #11
0
def test_etime_order_of_magnitude(topology_st, clean_access_logs, remove_users,
                                  disable_access_log_buffering):
    """Test that the etime reported in the access log has a correct order of magnitude

    :id: e815cfa0-8136-4932-b50f-c3dfac34b0e6
    :setup: Standalone instance
    :steps:
         1. Unset log buffering for the access log
         2. Delete potential existing access logs
         3. Add users
         4. Search users
         5. Restart the server to flush the logs
         6. Parse the access log looking for the SRCH operation log
         7. From the SRCH string get the start time and op number of the operation
         8. From the op num find the associated RESULT string in the access log
         9. From the RESULT string get the end time and the etime for the operation 
         10. Calculate the ratio between the calculated elapsed time (end time - start time) and the logged etime
    :expectedresults:
         1. access log buffering is off
         2. Previously existing access logs are deleted
         3. Users are successfully added
         4. Search operation is successful
         5. Server is restarted and logs are flushed 
         6. SRCH operation log string is catched
         7. start time and op number are collected
         8. RESULT string is catched from the access log
         9. end time and etime are collected
         10. ratio between calculated elapsed time and logged etime is less or equal to 1
    """

    entry = DSLdapObject(topology_st.standalone, DEFAULT_SUFFIX)

    log.info('add_users')
    add_users(topology_st.standalone, 30)

    log.info('search users')
    search_users(topology_st.standalone)

    log.info('parse the access logs to get the SRCH string')
    # Here we are looking at the whole string logged for the search request with base ou=People,dc=example,dc=com
    search_str = str(
        topology_st.standalone.ds_access_log.match(
            r'.*SRCH base="ou=People,dc=example,dc=com.*'))[1:-1]
    assert len(search_str) > 0

    # the search_str returned looks like :
    # [23/Apr/2020:06:06:14.360857624 -0400] conn=1 op=93 SRCH base="ou=People,dc=example,dc=com" scope=2 filter="(&(objectClass=account)(objectClass=posixaccount)(objectClass=inetOrgPerson)(objectClass=organizationalPerson))" attrs="distinguishedName"

    log.info('get the operation start time from the SRCH string')
    # Here we are getting the sec.nanosec part of the date, '14.360857624' in the example above
    start_time = (search_str.split()[0]).split(':')[3]

    log.info('get the OP number from the SRCH string')
    # Here we are getting the op number, 'op=93' in the above example
    op_num = search_str.split()[3]

    log.info('get the RESULT string matching the SRCH OP number')
    # Here we are looking at the RESULT string for the above search op, 'op=93' in this example
    result_str = str(
        topology_st.standalone.ds_access_log.match(
            r'.*{} RESULT*'.format(op_num)))[1:-1]
    assert len(result_str) > 0

    # The result_str returned looks like :
    # [23/Apr/2020:06:06:14.366429900 -0400] conn=1 op=93 RESULT err=0 tag=101 nentries=30 etime=0.005723017

    log.info('get the operation end time from the RESULT string')
    # Here we are getting the sec.nanosec part of the date, '14.366429900' in the above example
    end_time = (result_str.split()[0]).split(':')[3]

    log.info('get the logged etime for the operation from the RESULT string')
    # Here we are getting the etime value, '0.005723017' in the example above
    etime = result_str.split()[8].split('=')[1][:-3]

    log.info(
        'Calculate the ratio between logged etime for the operation and elapsed time from its start time to its end time - should be around 1'
    )
    etime_ratio = (Decimal(end_time) - Decimal(start_time)) // Decimal(etime)
    assert etime_ratio <= 1
예제 #12
0
 def __init__(self,
              instance,
              dn="cn=config,cn=ldbm database,cn=plugins,cn=config"):
     super(DatabaseConfig, self).__init__(instance, dn)
     self._rdn_attribute = 'cn'
     self._must_attributes = ['cn']
     self._global_attrs = [
         'nsslapd-lookthroughlimit',
         'nsslapd-mode',
         'nsslapd-idlistscanlimit',
         'nsslapd-directory',
         'nsslapd-import-cachesize',
         'nsslapd-idl-switch',
         'nsslapd-search-bypass-filter-test',
         'nsslapd-search-use-vlv-index',
         'nsslapd-exclude-from-export',
         'nsslapd-serial-lock',
         'nsslapd-subtree-rename-switch',
         'nsslapd-pagedlookthroughlimit',
         'nsslapd-pagedidlistscanlimit',
         'nsslapd-rangelookthroughlimit',
         'nsslapd-backend-opt-level',
         'nsslapd-backend-implement',
     ]
     self._db_attrs = {
         'bdb': [
             'nsslapd-dbcachesize',
             'nsslapd-db-logdirectory',
             'nsslapd-db-home-directory',
             'nsslapd-db-durable-transaction',
             'nsslapd-db-transaction-wait',
             'nsslapd-db-checkpoint-interval',
             'nsslapd-db-compactdb-interval',
             'nsslapd-db-page-size',
             'nsslapd-db-transaction-batch-val',
             'nsslapd-db-transaction-batch-min-wait',
             'nsslapd-db-transaction-batch-max-wait',
             'nsslapd-db-logbuf-size',
             'nsslapd-db-locks',
             'nsslapd-db-private-import-mem',
             'nsslapd-import-cache-autosize',
             'nsslapd-cache-autosize',
             'nsslapd-cache-autosize-split',
             'nsslapd-import-cachesize',
             'nsslapd-search-bypass-filter-test',
             'nsslapd-serial-lock',
             'nsslapd-db-deadlock-policy',
         ],
         'lmdb': []
     }
     self._create_objectclasses = ['top', 'extensibleObject']
     self._protected = True
     # This could be "bdb" or "lmdb", use what we have configured in the global config
     self._db_lib = self.get_attr_val_utf8_l('nsslapd-backend-implement')
     self._dn = "cn=config,cn=ldbm database,cn=plugins,cn=config"
     self._db_dn = f"cn={self._db_lib},cn=config,cn=ldbm database,cn=plugins,cn=config"
     self._globalObj = DSLdapObject(self._instance, dn=self._dn)
     self._dbObj = DSLdapObject(self._instance, dn=self._db_dn)
     # Assert there is no overlap in different config sets
     assert_c(
         len(
             set(self._global_attrs).intersection(
                 set(self._db_attrs['bdb']), set(self._db_attrs['lmdb'])))
         == 0)
예제 #13
0
class DatabaseConfig(DSLdapObject):
    """Backend Database configuration

    The entire database configuration consists of the  main global configuration entry,
    and the underlying DB library configuration: whither BDB or LMDB.  The combined
    configuration should be presented as a single entity so the end user does not need
    to worry about what library is being used, and just focus on the configuration.

    :param instance: An instance
    :type instance: lib389.DirSrv
    :param dn: Entry DN
    :type dn: str
    """
    def __init__(self,
                 instance,
                 dn="cn=config,cn=ldbm database,cn=plugins,cn=config"):
        super(DatabaseConfig, self).__init__(instance, dn)
        self._rdn_attribute = 'cn'
        self._must_attributes = ['cn']
        self._global_attrs = [
            'nsslapd-lookthroughlimit',
            'nsslapd-mode',
            'nsslapd-idlistscanlimit',
            'nsslapd-directory',
            'nsslapd-import-cachesize',
            'nsslapd-idl-switch',
            'nsslapd-search-bypass-filter-test',
            'nsslapd-search-use-vlv-index',
            'nsslapd-exclude-from-export',
            'nsslapd-serial-lock',
            'nsslapd-subtree-rename-switch',
            'nsslapd-pagedlookthroughlimit',
            'nsslapd-pagedidlistscanlimit',
            'nsslapd-rangelookthroughlimit',
            'nsslapd-backend-opt-level',
            'nsslapd-backend-implement',
        ]
        self._db_attrs = {
            'bdb': [
                'nsslapd-dbcachesize',
                'nsslapd-db-logdirectory',
                'nsslapd-db-home-directory',
                'nsslapd-db-durable-transaction',
                'nsslapd-db-transaction-wait',
                'nsslapd-db-checkpoint-interval',
                'nsslapd-db-compactdb-interval',
                'nsslapd-db-page-size',
                'nsslapd-db-transaction-batch-val',
                'nsslapd-db-transaction-batch-min-wait',
                'nsslapd-db-transaction-batch-max-wait',
                'nsslapd-db-logbuf-size',
                'nsslapd-db-locks',
                'nsslapd-db-private-import-mem',
                'nsslapd-import-cache-autosize',
                'nsslapd-cache-autosize',
                'nsslapd-cache-autosize-split',
                'nsslapd-import-cachesize',
                'nsslapd-search-bypass-filter-test',
                'nsslapd-serial-lock',
                'nsslapd-db-deadlock-policy',
            ],
            'lmdb': []
        }
        self._create_objectclasses = ['top', 'extensibleObject']
        self._protected = True
        # This could be "bdb" or "lmdb", use what we have configured in the global config
        self._db_lib = self.get_attr_val_utf8_l('nsslapd-backend-implement')
        self._dn = "cn=config,cn=ldbm database,cn=plugins,cn=config"
        self._db_dn = f"cn={self._db_lib},cn=config,cn=ldbm database,cn=plugins,cn=config"
        self._globalObj = DSLdapObject(self._instance, dn=self._dn)
        self._dbObj = DSLdapObject(self._instance, dn=self._db_dn)
        # Assert there is no overlap in different config sets
        assert_c(
            len(
                set(self._global_attrs).intersection(
                    set(self._db_attrs['bdb']), set(self._db_attrs['lmdb'])))
            == 0)

    def get(self):
        """Get the combined config entries"""
        # Get and combine both sets of attributes
        global_attrs = self._globalObj.get_attrs_vals_utf8(self._global_attrs)
        db_attrs = self._dbObj.get_attrs_vals_utf8(
            self._db_attrs[self._db_lib])
        combined_attrs = {**global_attrs, **db_attrs}
        return combined_attrs

    def display(self):
        """Display the combined configuration"""
        global_attrs = self._globalObj.get_attrs_vals_utf8(self._global_attrs)
        db_attrs = self._dbObj.get_attrs_vals_utf8(
            self._db_attrs[self._db_lib])
        combined_attrs = {**global_attrs, **db_attrs}
        for (k, vo) in combined_attrs.items():
            if len(vo) == 0:
                vo = ""
            else:
                vo = vo[0]
            self._instance.log.info(f'{k}: {vo}')

    def get_db_lib(self):
        """Return the backend library, bdb, lmdb, etc"""
        return self._db_lib

    def set(self, value_pairs):
        for attr, val in value_pairs:
            attr = attr.lower()
            if attr in self._global_attrs:
                global_config = DSLdapObject(self._instance, dn=self._dn)
                global_config.replace(attr, val)
            elif attr in self._db_attrs['bdb']:
                db_config = DSLdapObject(self._instance, dn=self._db_dn)
                db_config.replace(attr, val)
            elif attr in self._db_attrs['lmdb']:
                pass
            else:
                # Unknown attribute
                raise ValueError(
                    "Can not update database configuration with unknown attribute: "
                    + attr)