Exemplo n.º 1
0
def test_dse_config_loglevel_error(topo):
    """Manually setting nsslapd-errorlog-level to 64 in dse.ldif throws error

    :id: 0eeefa17-ec1c-4208-8e7b-44d8fbc38f10

    :setup: Standalone instance

    :steps: 1. Stop the server, edit dse.ldif file and change nsslapd-errorlog-level value to 64
            2. Start the server and observe the error logs.

    :expectedresults:
            1. Server should be successfully stopped and nsslapd-errorlog-level value should be changed.
            2. Server should be successfully started without any errors being reported in the logs.
    """

    topo.standalone.stop(timeout=10)
    dse_ldif = DSEldif(topo.standalone)
    try:
        dse_ldif.replace(DN_CONFIG, 'nsslapd-errorlog-level', 64)
    except:
        log.error(
            'Failed to replace cn=config values of nsslapd-errorlog-level')
        raise
    topo.standalone.start(timeout=10)
    assert not topo.standalone.ds_error_log.match(
        '.*nsslapd-errorlog-level: ignoring 64 \\(since -d 266354688 was given on the command line\\).*'
    )
Exemplo n.º 2
0
def test_cl_encryption_setup_process(topo):
    """Take an already working replication deployment, and setup changelog
    encryption

    :id: 1a1b7d29-69f5-4f0e-91c4-e7f66140ff17
    :setup: Master Instance, Consumer Instance
    :steps:
        1. Enable TLS for the server
        2. Export changelog
        3. Enable changelog encryption
        4. Import changelog
        5. Verify replication is still working
    :expectedresults:
        1. Success
        2. Success
        3. Success
        4. Success
        5. Success
    """

    supplier = topo.ms['master1']
    consumer = topo.cs['consumer1']

    # Enable TLS
    log.info('Enable TLS ...')
    supplier.enable_tls()
    consumer.enable_tls()

    # Export changelog
    log.info('Export changelog ...')
    replicas = Replicas(supplier)
    replica = replicas.get(DEFAULT_SUFFIX)
    replica.begin_task_cl2ldif()
    replica.task_finished()

    # Enable changelog encryption
    log.info('Enable changelog encryption ...')
    dse_ldif = DSEldif(supplier)
    supplier.stop()
    if ds_supports_new_changelog():
        changelog = 'cn=changelog,{}'.format(DN_USERROOT_LDBM)
    else:
        changelog = DN_CHANGELOG
    dse_ldif.replace(changelog, 'nsslapd-encryptionalgorithm', 'AES')
    if dse_ldif.get(changelog, 'nsSymmetricKey'):
        dse_ldif.delete(changelog, 'nsSymmetricKey')
    supplier.start()

    # Import changelog
    log.info('Import changelog ...')
    replica.begin_task_ldif2cl()
    replica.task_finished()

    # Verify replication is still working
    log.info('Test replication is still working ...')
    assert replica.test_replication([consumer])
Exemplo n.º 3
0
def _enable_changelog_encryption(inst, encrypt_algorithm):
    """Configure changelog encryption for master"""

    dse_ldif = DSEldif(inst)
    log.info('Configuring changelog encryption:{} for: {}'.format(
        inst.serverid, encrypt_algorithm))
    inst.stop()
    dse_ldif.replace(DN_CHANGELOG, 'nsslapd-encryptionalgorithm',
                     encrypt_algorithm)
    if dse_ldif.get(DN_CHANGELOG, 'nsSymmetricKey'):
        dse_ldif.delete(DN_CHANGELOG, 'nsSymmetricKey')
    inst.start()
Exemplo n.º 4
0
def memberof_setup(topo, request):
    """Configure required plugins and restart the server"""

    log.info('Configuring memberOf, managedEntry and autoMembers plugins and restarting the server')
    topo.standalone.simple_bind_s(DN_DM, PASSWORD)
    try:
        topo.standalone.plugins.enable(name=PLUGIN_MEMBER_OF)
    except ldap.LDAPError as e:
        log.error('Failed to enable {} plugin'.format(PLUGIN_MEMBER_OF))
        raise e
    try:
        topo.standalone.plugins.enable(name=PLUGIN_MANAGED_ENTRY)
        topo.standalone.plugins.enable(name=PLUGIN_AUTOMEMBER)
    except ldap.LDAPError as e:
        log.error('Failed to enable {}, {} plugins'.format(PLUGIN_MANAGED_ENTRY, PLUGIN_AUTOMEMBER))
        raise e

    log.info('Change config values for db-locks and dbcachesize to import large ldif files')
    if ds_is_older('1.3.6'):
        topo.standalone.stop(timeout=10)
        dse_ldif = DSEldif(topo.standalone)
        try:
            dse_ldif.replace(DN_CONFIG_LDBM, 'nsslapd-db-locks', '100000')
            dse_ldif.replace(DN_CONFIG_LDBM, 'nsslapd-dbcachesize', '10000000')
        except:
            log.error('Failed to replace cn=config values of db-locks and dbcachesize')
            raise
        topo.standalone.start(timeout=10)
    else:
        try:
            topo.standalone.modify_s(DN_CONFIG_LDBM, [(ldap.MOD_REPLACE, 'nsslapd-db-locks', '100000')])
            topo.standalone.modify_s(DN_CONFIG_LDBM, [(ldap.MOD_REPLACE, 'nsslapd-cache-autosize', '0')])
            topo.standalone.modify_s(DN_CONFIG_LDBM, [(ldap.MOD_REPLACE, 'nsslapd-dbcachesize', '10000000')])
        except ldap.LDAPError as e:
            log.error(
                'Failed to replace values of nsslapd-db-locks and nsslapd-dbcachesize {}'.format(e.message['desc']))
            raise e
        topo.standalone.restart(timeout=10)

    def fin():
        log.info('Disabling plugins {}, {}, {}'.format(PLUGIN_MEMBER_OF, PLUGIN_MANAGED_ENTRY, PLUGIN_AUTOMEMBER))
        topo.standalone.simple_bind_s(DN_DM, PASSWORD)
        try:
            topo.standalone.plugins.disable(name=PLUGIN_MEMBER_OF)
            topo.standalone.plugins.disable(name=PLUGIN_MANAGED_ENTRY)
            topo.standalone.plugins.disable(name=PLUGIN_AUTOMEMBER)
        except ldap.LDAPError as e:
            log.error('Failed to disable plugins, {}'.format(e.message['desc']))
            assert False
        topo.standalone.restart(timeout=10)

    request.addfinalizer(fin)
Exemplo n.º 5
0
def _enable_changelog_encryption(inst, encrypt_algorithm):
    """Configure changelog encryption for supplier"""

    dse_ldif = DSEldif(inst)
    log.info('Configuring changelog encryption:{} for: {}'.format(inst.serverid, encrypt_algorithm))
    inst.stop()
    if ds_supports_new_changelog():
        changelog = 'cn=changelog,{}'.format(DN_USERROOT_LDBM)
    else:
        changelog = DN_CHANGELOG

    dse_ldif.replace(changelog, 'nsslapd-encryptionalgorithm', encrypt_algorithm)
    if dse_ldif.get(changelog, 'nsSymmetricKey'):
        dse_ldif.delete(changelog, 'nsSymmetricKey')
    inst.start()
Exemplo n.º 6
0
def test_replace(topo):
    """Check that we can replace an attribute to a given suffix"""

    dse_ldif = DSEldif(topo.standalone)
    port_attr = "nsslapd-port"
    port_value = "390"

    log.info("Get default value of {}".format(port_attr))
    default_value = dse_ldif.get(DN_CONFIG, port_attr)[0]

    log.info("Replace {} with {}".format(port_attr, port_value))
    dse_ldif.replace(DN_CONFIG, port_attr, port_value)
    attr_values = dse_ldif.get(DN_CONFIG, port_attr)
    assert attr_values == [port_value]

    log.info("Restore default value")
    dse_ldif.replace(DN_CONFIG, port_attr, default_value)
Exemplo n.º 7
0
def test_retrochangelog_trimming_crash(topo, changelog_init):
    """Check that when retroCL nsslapd-retrocthangelog contains invalid
    value, then the instance does not crash at shutdown

    :id: 5d9bd7ca-e9bf-4be9-8fc8-902aa5513052
    :customerscenario: True
    :setup: Replication with two supplier, change nsslapd-changelogdir to
            '/var/lib/dirsrv/slapd-supplier1/changelog' and
            set cn=Retro Changelog Plugin,cn=plugins,cn=config to 'on'
    :steps:
        1. Set nsslapd-changelogmaxage in cn=Retro Changelog Plugin,cn=plugins,cn=config to value '-1'
           This value is invalid. To disable retroCL trimming it should be set to 0
        2. Do several restart
        3. check there is no 'Detected Disorderly Shutdown' message (crash)
        4. restore valid value for nsslapd-changelogmaxage '1w'

    :expectedresults:
        1. Operation should be successful
        2. Operation should be successful
        3. Operation should be successful
        4. Operation should be successful
     """
    log.info(
        '1. Test retroCL trimming crash in cn=Retro Changelog Plugin,cn=plugins,cn=config'
    )

    # set the nsslapd-changelogmaxage directly on dse.ldif
    # because the set value is invalid
    topo.ms["supplier1"].log.info("ticket50736 start verification")
    topo.ms["supplier1"].stop()
    retroPlugin = RetroChangelogPlugin(topo.ms["supplier1"])
    dse_ldif = DSEldif(topo.ms["supplier1"])
    dse_ldif.replace(retroPlugin.dn, 'nsslapd-changelogmaxage', '-1')
    topo.ms["supplier1"].start()

    # The crash should be systematic, but just in case do several restart
    # with a delay to let all plugin init
    for i in range(5):
        time.sleep(1)
        topo.ms["supplier1"].stop()
        topo.ms["supplier1"].start()

    assert not topo.ms["supplier1"].detectDisorderlyShutdown()

    topo.ms["supplier1"].log.info("ticket 50736 was successfully verified.")
Exemplo n.º 8
0
def test_maxbersize_repl(topo):
    """Check that instance starts when nsslapd-errorlog-maxlogsize
    nsslapd-errorlog-logmaxdiskspace are set in certain order

    :id: 743e912c-2be4-4f5f-9c2a-93dcb18f51a0
    :setup: MMR with two masters
    :steps:
        1. Stop the instance
        2. Set nsslapd-errorlog-maxlogsize before/after
           nsslapd-errorlog-logmaxdiskspace
        3. Start the instance
        4. Check the error log for errors
    :expectedresults:
        1. Success
        2. Success
        3. Success
        4. The error log should contain no errors
    """

    inst = topo.standalone
    dse_ldif = DSEldif(inst)

    inst.stop()
    log.info(
        "Set nsslapd-errorlog-maxlogsize before nsslapd-errorlog-logmaxdiskspace"
    )
    dse_ldif.replace('cn=config', 'nsslapd-errorlog-maxlogsize', '300')
    dse_ldif.replace('cn=config', 'nsslapd-errorlog-logmaxdiskspace', '500')
    inst.start()
    log.info("Assert no init_dse_file errors in the error log")
    assert not inst.ds_error_log.match('.*ERR - init_dse_file.*')

    inst.stop()
    log.info(
        "Set nsslapd-errorlog-maxlogsize after nsslapd-errorlog-logmaxdiskspace"
    )
    dse_ldif.replace('cn=config', 'nsslapd-errorlog-logmaxdiskspace', '500')
    dse_ldif.replace('cn=config', 'nsslapd-errorlog-maxlogsize', '300')
    inst.start()
    log.info("Assert no init_dse_file errors in the error log")
    assert not inst.ds_error_log.match('.*ERR - init_dse_file.*')
Exemplo n.º 9
0
def dblib_mdb2bdb(inst, log, args):
    global _log
    _log = log
    if args.tmpdir is None:
        tmpdir = get_ldif_dir(inst)
    else:
        tmpdir = args.tmpdir
    try:
        os.makedirs(tmpdir, 0o750, True)
    except OSError as e:
        log.error(
            f"Failed trying to create the directory {tmpdir} needed to store the ldif files, error: {str(e)}"
        )
        return

    # Cannot use Backends(inst).list() because it requires a connection.
    # lets use directlt the dse.ldif after having stopped the instance

    inst.stop()
    dse = DSEldif(inst)
    backends = get_backends(log, dse, tmpdir)
    dbmapdir = backends['config']['dbdir']
    dbhome = inst.ds_paths.db_home_dir
    dblib = backends['config']['dblib']
    dbis = get_mdb_dbis(dbmapdir)

    if dblib == "bdb":
        log.error(f"Instance {inst.serverid} is already configured with bdb.")
        return

    # Remove ldif files and bdb files
    dblib_cleanup(inst, log, args)

    for be in dbis:
        if be is None:
            continue
        id2entry = dbis[be]['id2entry.db']
        if int(id2entry['nbentries']) > 0:
            backends[be]['has_id2entry'] = True

    # Compute the needed space and the lmdb map configuration
    dbmap_size = os.path.getsize(f'{dbmapdir}/{MDB_MAP}')
    # Clearly over evaluated (but better than nothing )
    total_entrysize = dbmap_size

    log.info(
        f"Required space for LDIF files is about {size_fmt(total_entrysize)}")
    log.info(f"Required space for bdb files is about {size_fmt(dbmap_size)}")

    if os.stat(dbmapdir).st_dev == os.stat(tmpdir).st_dev:
        total, used, free = shutil.disk_usage(dbmapdir)
        if free < total_entrysize + dbmap_size:
            log.error(
                f"Not enough space on {dbmapdir} to migrate to bdb (Need {size_fmt(total_entrysize + dbmap_size)}, Have {size_fmt(free)})"
            )
            return
    else:
        total, used, free = shutil.disk_usage(dbmapdir)
        if free < dbmap_size:
            log.error(
                f"Not enough space on {dbmapdir} to migrate to bdb (Need {size_fmt(dbmap_size)}, Have {size_fmt(free)})"
            )
            return
        total, used, free = shutil.disk_usage(tmpdir)
        if free < total_entrysize:
            log.error(
                "Not enough space on {tmpdir} to migrate to bdb (Need {size_fmt(total_entrysize)}, Have {size_fmt(free)})"
            )
            return
    progress = 0
    encrypt = False  # Should maybe be a args param
    total_dbsize = 0
    for bename, be in backends.items():
        # Keep only backend associated with a db
        if 'has_id2entry' not in be:
            continue
        total_dbsize += 1
    uid = inst.get_user_uid()
    gid = inst.get_group_gid()
    for bename, be in backends.items():
        # Keep only backend associated with a db
        if 'has_id2entry' not in be:
            continue
        log.info(
            f"Backends exportation {progress*100/total_dbsize:2f}% ({bename})")
        log.debug(
            f"inst.db2ldif({bename}, None, None, {encrypt}, True, {be['ldifname']})"
        )
        inst.db2ldif(bename, None, None, encrypt, True, be['ldifname'], False)
        be['cl5'] = export_changelog(be, 'mdb')
        progress += 1
    log.info("Backends exportation 100%")
    set_owner(glob.glob(f'{dbmapdir}/*'), uid, gid)

    log.info("Updating dse.ldif file")
    # switch nsslapd-backend-implement in the dse.ldif
    cfgbe = backends['config']
    dn = cfgbe['dn']
    dse.replace(dn, 'nsslapd-backend-implement', 'bdb')

    # bdb entries should still be here

    # Reimport all exported backends and changelog
    progress = 0
    encrypt = False  # Should maybe be a args param
    for bename, be in backends.items():
        # Keep only backend associated with a db
        if 'has_id2entry' not in be:
            continue
        log.info(
            f"Backends importation {progress*100/total_dbsize:2f}% ({bename})")
        log.debug(
            f"inst.ldif2db({be['ecbename']}, None, None, {encrypt}, {be['ldifname']})"
        )
        log.debug(f'dbdir={be["dbdir"]}')
        os.chown(be['ldifname'], uid, gid)
        inst.ldif2db(be['ecbename'], None, None, encrypt, be['ldifname'])
        if be['cl5'] is True:
            import_changelog(be, 'bdb')
        set_owner(glob.glob(f'{be["ecdbdir"]}/*'), uid, gid)
        progress += be['dbsize']

    set_owner(glob.glob(f'{dbhome}/__db.*'), uid, gid)
    set_owner(glob.glob(f'{dbmapdir}/__db.*'), uid, gid)
    set_owner(glob.glob(f'{dbhome}/log.*'), uid, gid)
    set_owner(glob.glob(f'{dbmapdir}/log.*'), uid, gid)
    set_owner((f'{dbhome}/DBVERSION', f'{dbmapdir}/DBVERSION',
               f'{dbhome}/guardian', '{dbmapdir}/guardian'), uid, gid)

    log.info("Backends importation 100%")
    inst.start()
    log.info("Migration from ldbm to Berkeley database is done.")
Exemplo n.º 10
0
def dblib_bdb2mdb(inst, log, args):
    global _log
    _log = log
    if args.tmpdir is None:
        tmpdir = get_ldif_dir(inst)
    else:
        tmpdir = args.tmpdir
    try:
        os.makedirs(tmpdir, 0o750, True)
    except OSError as e:
        log.error(
            f"Failed trying to create the directory {tmpdir} needed to store the ldif files, error: {str(e)}"
        )
        return

    # Cannot use Backends(inst).list() because it requires a connection.
    # lets use directlt the dse.ldif after having stopped the instance

    inst.stop()
    dse = DSEldif(inst)
    backends = get_backends(log, dse, tmpdir)
    dbmapdir = backends['config']['dbdir']
    dblib = backends['config']['dblib']

    if dblib == "mdb":
        log.error(f"Instance {inst.serverid} is already configured with lmdb.")
        return

    # Remove ldif files and mdb files
    dblib_cleanup(inst, log, args)

    # Compute the needed space and the lmdb map configuration
    total_dbsize = 0
    total_entrysize = 0
    total_dbi = 3
    for bename, be in backends.items():
        # Keep only backend associated with a db
        if be['dbsize'] == 0:
            continue
        total_dbsize += be['dbsize']
        total_entrysize += be['entrysize']
        total_dbi += be['dbi']

    # Round up dbmap size
    dbmap_size = DEFAULT_DBMAP_SIZE
    while (total_dbsize * DBSIZE_MARGIN > dbmap_size):
        dbmap_size *= 1.25

    # Round up number of dbis
    nbdbis = 1
    while nbdbis < total_dbi + DBI_MARGIN:
        nbdbis *= 2

    log.info(
        f"Required space for LDIF files is about {size_fmt(total_entrysize)}")
    log.info(f"Required space for DBMAP files is about {size_fmt(dbmap_size)}")
    log.info(f"Required number of dbi is {nbdbis}")

    # Generate the info file (so dbscan could generate the map)
    uid = inst.get_user_uid()
    gid = inst.get_group_gid()
    with open(f'{dbmapdir}/{MDB_INFO}', 'w') as f:
        f.write('LIBVERSION=9025\n')
        f.write('DATAVERSION=0\n')
        f.write(f'MAXSIZE={dbmap_size}\n')
        f.write('MAXREADERS=50\n')
        f.write(f'MAXDBS={nbdbis}\n')
    os.chown(f'{dbmapdir}/{MDB_INFO}', uid, gid)

    if os.stat(dbmapdir).st_dev == os.stat(tmpdir).st_dev:
        total, used, free = shutil.disk_usage(dbmapdir)
        if free < total_entrysize + dbmap_size:
            log.error(
                f"Not enough space on {dbmapdir} to migrate to lmdb (Need {size_fmt(total_entrysize + dbmap_size)}, Have {size_fmt(free)})"
            )
            return
    else:
        total, used, free = shutil.disk_usage(dbmapdir)
        if free < dbmap_size:
            log.error(
                f"Not enough space on {dbmapdir} to migrate to lmdb (Need {size_fmt(dbmap_size)}, Have {size_fmt(free)})"
            )
            return
        total, used, free = shutil.disk_usage(tmpdir)
        if free < total_entrysize:
            log.error(
                "Not enough space on {tmpdir} to migrate to lmdb (Need {size_fmt(total_entrysize)}, Have {size_fmt(free)})"
            )
            return
    progress = 0
    encrypt = False  # Should maybe be a args param
    for bename, be in backends.items():
        # Keep only backend associated with a db
        if be['dbsize'] == 0:
            continue
        log.info(
            f"Backends exportation {progress*100/total_dbsize:2f}% ({bename})")
        log.debug(
            f"inst.db2ldif({bename}, None, None, {encrypt}, True, {be['ldifname']})"
        )
        inst.db2ldif(bename, None, None, encrypt, True, be['ldifname'], False)
        be['cl5'] = export_changelog(be, 'bdb')
        progress += be['dbsize']
    log.info("Backends exportation 100%")

    log.info("Updating dse.ldif file")
    # switch nsslapd-backend-implement in the dse.ldif
    cfgbe = backends['config']
    dn = cfgbe['dn']
    dse.replace(dn, 'nsslapd-backend-implement', 'mdb')

    # Add the lmdb config entry
    dn = f'cn=mdb,{dn}'
    try:
        dse.delete_dn(dn)
    except Exception:
        pass
    dse.add_entry([
        f"dn: {dn}\n", "objectClass: extensibleobject\n", "objectClass: top\n",
        "cn: mdb\n", f"nsslapd-mdb-max-size: {dbmap_size}\n",
        "nsslapd-mdb-max-readers: 0\n", f"nsslapd-mdb-max-dbs: {nbdbis}\n",
        "nsslapd-db-durable-transaction: on\n",
        "nsslapd-search-bypass-filter-test: on\n", "nsslapd-serial-lock: on\n"
    ])

    # Reimport all exported backends and changelog
    progress = 0
    encrypt = False  # Should maybe be a args param
    for bename, be in backends.items():
        # Keep only backend associated with a db
        if be['dbsize'] == 0:
            continue
        log.info(
            f"Backends importation {progress*100/total_dbsize:2f}% ({bename})")
        os.chown(be['ldifname'], uid, gid)
        log.debug(
            f"inst.ldif2db({bename}, None, None, {encrypt}, {be['ldifname']})")
        inst.ldif2db(bename, None, None, encrypt, be['ldifname'])
        if be['cl5'] is True:
            import_changelog(be, 'mdb')
        progress += be['dbsize']
    dbhome = backends["config"]["dbdir"]
    set_owner(glob.glob(f'{dbhome}/*.mdb'), uid, gid)
    log.info("Backends importation 100%")
    inst.start()
    log.info("Migration from Berkeley database to lmdb is done.")
Exemplo n.º 11
0
def test_repl_plugin_name_change(topo):
    """Test that the replication plugin name is updated to the new name at
    server startup.

    :id: c2a7b7fb-6524-4391-8883-683b6af2a1cf
    :setup: Standalone Instance
    :steps:
        1. Stop Server
        2. Edit repl plugin in dse.ldif
        3. Add repl plugin dependency to retro changelog plugin.
        4. Start server
        5. Verify old plugin is not found
        6. Verify new plugin is found
        7. Verify plugin dependency was updated in retro changelog plugin
        8. Restart and repeat steps 5-7
    :expectedresults:
        1. Success
        2. Success
        3. Success
        4. Success
        5. Success
        6. Success
        7. Success
        8. Success
    """

    REPL_PLUGIN_DN = "cn=Multisupplier Replication Plugin,cn=plugins,cn=config"
    REPL_PLUGIN_NAME = "Multisupplier Replication Plugin"
    REPL_PLUGIN_INIT_ATTR = "nsslapd-pluginInitfunc"
    REPL_PLUGIN_INIT_FUNC = "replication_multisupplier_plugin_init"
    REPL_DEPENDS_ATTR = "nsslapd-plugin-depends-on-named"
    OLD_REPL_PLUGIN_NAME = "Replication Plugin"
    OLD_REPL_PLUGIN_DN = "cn=Replication Plugin,cn=plugins,cn=config"
    RETROCL_PLUGIN_DN = "cn=Retro Changelog Plugin,cn=plugins,cn=config"
    RETROCL_PLUGIN_NAME = "Retro Changelog Plugin"

    MEP_PLUGIN_DN = "cn=Managed Entries,cn=plugins,cn=config"
    MEP_PLUGIN_NAME = "Managed Entries"

    # Stop the server
    topo.standalone.stop()

    # Edit repl plugin in dse.ldif
    dse_ldif = DSEldif(topo.standalone)
    dse_ldif.replace(REPL_PLUGIN_DN, REPL_PLUGIN_INIT_ATTR,
                     "multi_old_init_function")
    dse_ldif.rename(REPL_PLUGIN_DN, OLD_REPL_PLUGIN_DN)

    # Add dependency for repl plugin in retro changelog plugin and managed entries
    dse_ldif.replace(RETROCL_PLUGIN_DN, REPL_DEPENDS_ATTR,
                     OLD_REPL_PLUGIN_NAME)
    dse_ldif.replace(MEP_PLUGIN_DN, REPL_DEPENDS_ATTR, OLD_REPL_PLUGIN_NAME)
    # assert 0

    # Restart the server, loop twice to verify settings stick after restart
    for loop in [0, 1]:
        topo.standalone.restart()

        # Verify old plugin is deleted
        with pytest.raises(ldap.NO_SUCH_OBJECT):
            Plugins(topo.standalone).get(OLD_REPL_PLUGIN_NAME)

        # Verify repl plugin name was changed to the new name
        plugin = Plugins(topo.standalone).get(REPL_PLUGIN_NAME)
        assert plugin is not None
        assert plugin.get_attr_val_utf8_l(
            REPL_PLUGIN_INIT_ATTR) == REPL_PLUGIN_INIT_FUNC
        assert len(plugin.get_attr_vals_utf8_l(REPL_PLUGIN_INIT_ATTR)) == 1

        # Verify dependency was updated in retro changelog plugin
        plugin = Plugins(topo.standalone).get(RETROCL_PLUGIN_NAME)
        assert plugin is not None
        assert plugin.get_attr_val_utf8_l(
            REPL_DEPENDS_ATTR) == REPL_PLUGIN_NAME.lower()
        assert len(plugin.get_attr_vals_utf8_l(REPL_DEPENDS_ATTR)) == 1

        # Verify dependency was updated in MEP plugin
        plugin = Plugins(topo.standalone).get(MEP_PLUGIN_NAME)
        assert plugin is not None
        assert plugin.get_attr_val_utf8_l(
            REPL_DEPENDS_ATTR) == REPL_PLUGIN_NAME.lower()