def test_basic(topology_st): BACKUP_DIR = os.path.join(topology_st.standalone.ds_paths.backup_dir, "basic_backup") topology_st.logcap = LogCapture() args = FakeArgs() users = UserAccounts(topology_st.standalone, DEFAULT_SUFFIX) user = users.create_test_user() user.replace("description", "backup_test") # Clean the backup dir first if os.path.exists(BACKUP_DIR): shutil.rmtree(BACKUP_DIR) # Create the backup args.archive = BACKUP_DIR args.db_type = None backup_create(topology_st.standalone, None, topology_st.logcap.log, args) assert os.listdir(BACKUP_DIR) # Restore the backup args.archive = topology_st.standalone.ds_paths.backup_dir args.db_type = None backup_restore(topology_st.standalone, None, topology_st.logcap.log, args) assert user.present("description", "backup_test") # No error has happened! Done! # Clean up if os.path.exists(BACKUP_DIR): shutil.rmtree(BACKUP_DIR)
def test_setup_ds_minimal_dry(topology): # Unset PYTHONPATH to avoid mixing old CLI tools and new lib389 tmp_env = os.environ if "PYTHONPATH" in tmp_env: del tmp_env["PYTHONPATH"] # Create the setupDs lc = LogCapture() # Give it the right types. sds = SetupDs(verbose=DEBUGGING, dryrun=True, log=lc.log) # Get the dicts from Type2Base, as though they were from _validate_ds_2_config # IE get the defaults back just from Slapd2Base.collect # Override instance name, root password, port and secure port. general_options = General2Base(lc.log) general_options.verify() general = general_options.collect() slapd_options = Slapd2Base(lc.log) slapd_options.set('instance_name', INSTANCE_SERVERID) slapd_options.set('port', INSTANCE_PORT) slapd_options.set('secure_port', INSTANCE_SECURE_PORT) slapd_options.set('root_password', PW_DM) slapd_options.verify() slapd = slapd_options.collect() sds.create_from_args(general, slapd, {}, None) insts = topology.standalone.list(serverid=INSTANCE_SERVERID) # Assert we did not change the system. assert (len(insts) == 0)
def test_setup_ds_minimal(topology): # Create the setupDs lc = LogCapture() # Give it the right types. sds = SetupDs(verbose=DEBUGGING, dryrun=False, log=lc.log) # Get the dicts from Type2Base, as though they were from _validate_ds_2_config # IE get the defaults back just from Slapd2Base.collect # Override instance name, root password, port and secure port. general_options = General2Base(lc.log) general_options.verify() general = general_options.collect() slapd_options = Slapd2Base(lc.log) slapd_options.set('instance_name', INSTANCE_SERVERID) slapd_options.set('port', INSTANCE_PORT) slapd_options.set('root_password', PW_DM) slapd_options.verify() slapd = slapd_options.collect() sds.create_from_args(general, slapd, {}, None) insts = topology.standalone.list(serverid=INSTANCE_SERVERID) # Assert we did change the system. assert (len(insts) == 1) # Make sure we can connect topology.standalone.open() # Make sure we can start stop. topology.standalone.stop() topology.standalone.start() # Okay, actually remove the instance remove_ds_instance(topology.standalone)
def topology_m1h1c1(request): """Create Replication Deployment with one master, one consumer and one hub""" topo_roles = {ReplicaRole.MASTER: 1, ReplicaRole.HUB: 1, ReplicaRole.CONSUMER: 1} topology = _create_instances(topo_roles, DEFAULT_SUFFIX) master = topology.ms["master1"] hub = topology.hs["hub1"] consumer = topology.cs["consumer1"] # Start with the master, and create it "first". log.info("Creating replication topology.") # Now get the first master ready. repl = ReplicationManager(DEFAULT_SUFFIX) repl.create_first_master(master) # Finish the topology creation repl.join_hub(master, hub) repl.join_consumer(hub, consumer) repl.test_replication(master, consumer) # Clear out the tmp dir for instance in topology: instance.clearTmpDir(__file__) def fin(): if DEBUGGING: [inst.stop() for inst in topology] else: assert _remove_ssca_db(topology) [inst.delete(pyinstall=PYINSTALL) for inst in topology if inst.exists()] request.addfinalizer(fin) topology.logcap = LogCapture() return topology
def topology(request): topology = create_topology({ReplicaRole.STANDALONE: 1}, None) def fin(): if DEBUGGING: topology.standalone.stop() else: topology.standalone.delete() request.addfinalizer(fin) topology.logcap = LogCapture() return topology
def topology_i3(request): """Create three instance DS deployment""" topology = create_topology({ReplicaRole.STANDALONE: 3}) def fin(): if DEBUGGING: [inst.stop() for inst in topology] else: assert _remove_ssca_db(topology) [inst.delete(pyinstall=PYINSTALL) for inst in topology if inst.exists()] request.addfinalizer(fin) topology.logcap = LogCapture() return topology
def topology_m4(request): """Create Replication Deployment with four masters""" topology = create_topology({ReplicaRole.MASTER: 4}) def fin(): if DEBUGGING: [inst.stop() for inst in topology] else: assert _remove_ssca_db(topology) [inst.delete(pyinstall=PYINSTALL) for inst in topology if inst.exists()] request.addfinalizer(fin) topology.logcap = LogCapture() return topology
def topology_st(request): """Create DS standalone instance""" topology = create_topology({ReplicaRole.STANDALONE: 1}) def fin(): if DEBUGGING: topology.standalone.stop() else: assert _remove_ssca_db(topology) if topology.standalone.exists(): topology.standalone.delete(pyinstall=PYINSTALL) request.addfinalizer(fin) topology.logcap = LogCapture() return topology
def topology(request): instance = DirSrv(verbose=DEBUGGING) instance.log.debug("Instance allocated") args = {SER_PORT: INSTANCE_PORT, SER_SERVERID_PROP: INSTANCE_SERVERID} instance.allocate(args) if instance.exists(): instance.delete() # Create the setupDs lc = LogCapture() # Give it the right types. sds = SetupDs(verbose=DEBUGGING, dryrun=False, log=lc.log) # Get the dicts from Type2Base, as though they were from _validate_ds_2_config # IE get the defaults back just from Slapd2Base.collect # Override instance name, root password, port and secure port. general_options = General2Base(lc.log) general_options.verify() general = general_options.collect() slapd_options = Slapd2Base(lc.log) slapd_options.set('instance_name', INSTANCE_SERVERID) slapd_options.set('port', INSTANCE_PORT) slapd_options.set('root_password', PW_DM) slapd_options.verify() slapd = slapd_options.collect() sds.create_from_args(general, slapd, {}, None) # Make sure we can connect instance.open() # Create the example backend with sample entries. instance.backends.create(properties={ 'cn': ['userRoot'], 'nsslapd-suffix': ['dc=example,dc=com'], }) def fin(): if instance.exists() and not DEBUGGING: instance.delete() request.addfinalizer(fin) return TopologyInstance(instance)
def topology_m4(request): """Create Replication Deployment with four suppliers""" topology = create_topology({ReplicaRole.SUPPLIER: 4}) def fin(): [inst.stop() for inst in topology] if DEBUGGING is None: assert _remove_ssca_db(topology) for inst in topology: if inst.exists(): inst.delete() request.addfinalizer(fin) topology.logcap = LogCapture() return topology
def topology_i2(request): """Create two instance DS deployment""" topology = create_topology({ReplicaRole.STANDALONE: 2}) def fin(): [inst.stop() for inst in topology] if DEBUGGING is None: assert _remove_ssca_db(topology) for inst in topology: if inst.exists(): inst.delete() request.addfinalizer(fin) topology.logcap = LogCapture() return topology
def topology_m1(request): """Create Replication Deployment with one master and one consumer""" topology = create_topology({ReplicaRole.MASTER: 1}) def fin(): [inst.stop() for inst in topology] if DEBUGGING is None: assert _remove_ssca_db(topology) for inst in topology: if inst.exists(): inst.delete() request.addfinalizer(fin) topology.logcap = LogCapture() return topology
def topology_st_fn(request): """Create DS standalone instance for each test case""" topology = create_topology({ReplicaRole.STANDALONE: 1}) def fin(): # Kill the hanging process at the end of test to prevent failures in the following tests if DEBUGGING: [_kill_ns_slapd(inst) for inst in topology] else: [_kill_ns_slapd(inst) for inst in topology] assert _remove_ssca_db(topology) [inst.stop() for inst in topology if inst.exists()] [inst.delete() for inst in topology if inst.exists()] request.addfinalizer(fin) topology.logcap = LogCapture() return topology
def topology_be_001003006(request): topology = create_topology({ReplicaRole.STANDALONE: 1}, None) topology.standalone.backends.create(properties={ 'cn': 'userRoot', 'nsslapd-suffix': DEFAULT_SUFFIX, }) # Now apply sample entries centries = get_sample_entries('001003006') cent = centries(topology.standalone, DEFAULT_SUFFIX) cent.apply() def fin(): if DEBUGGING: topology.standalone.stop() else: topology.standalone.delete() request.addfinalizer(fin) topology.logcap = LogCapture() return topology
def topology_no_sample(request): """Create instance without sample entries to reproduce not initialised database""" topology = create_topology({ReplicaRole.STANDALONE: 1}, None) topology.standalone.backends.create(properties={ 'cn': 'userRoot', 'nsslapd-suffix': DEFAULT_SUFFIX, }) def fin(): topology.standalone.stop() if DEBUGGING is None: assert _remove_ssca_db(topology) if topology.standalone.exists(): topology.standalone.delete() request.addfinalizer(fin) topology.logcap = LogCapture() return topology
def test_import_export(topology_st): BE_NAME = 'userRoot' EXCLUDE_SUFFIX = "ou=Groups,dc=example,dc=com" LDIF_NAME = "test_import_export.ldif" LDIF_PATH = os.path.join(topology_st.standalone.ds_paths.ldif_dir, LDIF_NAME) topology_st.logcap = LogCapture() args = FakeArgs() # Export the backend args.be_names = [BE_NAME] args.ldif = LDIF_NAME args.use_id2entry = None args.encrypted = None args.min_base64 = None args.no_dump_uniq_id = None args.replication = None args.not_folded = None args.no_seq_num = None args.include_suffixes = None args.exclude_suffixes = [EXCLUDE_SUFFIX] backend_export(topology_st.standalone, None, topology_st.logcap.log, args) # Verify export worked assert os.path.exists(LDIF_PATH) with open(LDIF_PATH, 'r') as ldif: for line in ldif: assert not line.endswith("%s\n" % EXCLUDE_SUFFIX) # Import the backend args.be_name = BE_NAME args.ldifs = [LDIF_NAME] args.chunks_size = None args.encrypted = None args.gen_uniq_id = None args.only_core = None args.include_suffixes = None args.exclude_suffixes = None backend_import(topology_st.standalone, None, topology_st.logcap.log, args) os.remove(LDIF_PATH)
def test_conflict_cli(topo): """Test manageing replication conflict entries :id: 800f432a-52ab-4661-ac66-a2bdd9b984d8 :setup: two masters :steps: 1. Create replication conflict entries 2. List conflicts 3. Compare conflict entry 4. Delete conflict 5. Resurrect conflict 6. Swap conflict 7. List glue entry 8. Delete glue entry 9. Convert glue entry :expectedresults: 1. Success 2. Success 3. Success 4. Success 5. Success 6. Success 7. Success 8. Success 9. Success 10. Success """ # Setup our default parameters for CLI functions topo.logcap = LogCapture() sys.stdout = io.StringIO() args = FakeArgs() args.DN = "" args.suffix = DEFAULT_SUFFIX args.json = True m1 = topo.ms["master1"] m2 = topo.ms["master2"] topo.pause_all_replicas() # Create entries _create_container(m1, DEFAULT_SUFFIX, 'conflict_parent1') _create_container(m2, DEFAULT_SUFFIX, 'conflict_parent1') _create_container(m1, DEFAULT_SUFFIX, 'conflict_parent2') _create_container(m2, DEFAULT_SUFFIX, 'conflict_parent2') cont_parent_m1 = _create_container(m1, DEFAULT_SUFFIX, 'conflict_parent3') cont_parent_m2 = _create_container(m2, DEFAULT_SUFFIX, 'conflict_parent3') cont_glue_m1 = _create_container(m1, DEFAULT_SUFFIX, 'conflict_parent4') cont_glue_m2 = _create_container(m2, DEFAULT_SUFFIX, 'conflict_parent4') # Create the conflicts _delete_container(cont_parent_m1) _create_container(m2, cont_parent_m2.dn, 'conflict_child1') _delete_container(cont_glue_m1) _create_container(m2, cont_glue_m2.dn, 'conflict_child2') # Resume replication topo.resume_all_replicas() time.sleep(5) # Test "list" list_conflicts(m2, None, topo.logcap.log, args) conflicts = json.loads(topo.logcap.outputs[0].getMessage()) assert len(conflicts['items']) == 4 conflict_1_DN = conflicts['items'][0]['dn'] conflict_2_DN = conflicts['items'][1]['dn'] conflict_3_DN = conflicts['items'][2]['dn'] topo.logcap.flush() # Test compare args.DN = conflict_1_DN cmp_conflict(m2, None, topo.logcap.log, args) conflicts = json.loads(topo.logcap.outputs[0].getMessage()) assert len(conflicts['items']) == 2 topo.logcap.flush() # Test delete del_conflict(m2, None, topo.logcap.log, args) list_conflicts(m2, None, topo.logcap.log, args) conflicts = json.loads(topo.logcap.outputs[0].getMessage()) assert len(conflicts['items']) == 3 topo.logcap.flush() # Test swap args.DN = conflict_2_DN swap_conflict(m2, None, topo.logcap.log, args) list_conflicts(m2, None, topo.logcap.log, args) conflicts = json.loads(topo.logcap.outputs[0].getMessage()) assert len(conflicts['items']) == 2 topo.logcap.flush() # Test conflict convert args.DN = conflict_3_DN args.new_rdn = "cn=testing convert" convert_conflict(m2, None, topo.logcap.log, args) list_conflicts(m2, None, topo.logcap.log, args) conflicts = json.loads(topo.logcap.outputs[0].getMessage()) assert len(conflicts['items']) == 1 topo.logcap.flush() # Test list glue entries list_glue(m2, None, topo.logcap.log, args) glues = json.loads(topo.logcap.outputs[0].getMessage()) assert len(glues['items']) == 2 topo.logcap.flush() # Test delete glue entries args.DN = "cn=conflict_parent3,dc=example,dc=com" del_glue(m2, None, topo.logcap.log, args) list_glue(m2, None, topo.logcap.log, args) glues = json.loads(topo.logcap.outputs[0].getMessage()) assert len(glues['items']) == 1 topo.logcap.flush() # Test convert glue entries args.DN = "cn=conflict_parent4,dc=example,dc=com" convert_glue(m2, None, topo.logcap.log, args) list_glue(m2, None, topo.logcap.log, args) glues = json.loads(topo.logcap.outputs[0].getMessage()) assert len(glues['items']) == 0 topo.logcap.flush()
def test_backend_cli(topology_st, create_backend): """Test creating, listing, getting, and deleting a backend (and subsuffix) :id: 800f432a-52ab-4661-ac66-a2bdd9b984d7 :setup: Standalone instance :steps: 1. List backends 2. Get backend by suffix 3. Get backend by DN 4. Add subsuffix 5. Verify subsuffix 6. Modify subsuffix 7. Delete subsuffix 8. Verify subsuffix is removed 9. Modify backend 10. Verify modify worked 11. Test monitor works :expectedresults: 1. Success 2. Success 3. Success 4. Success 5. Success 6. Success 7. Success 8. Success 9. Success 10. Success 11. Success """ topology_st.logcap = LogCapture() sys.stdout = io.StringIO() args = FakeArgs() args.cn = BE_NAME args.be_name = BE_NAME args.suffix = False args.nsslapd_suffix = SUFFIX args.skip_subsuffixes = False args.json = False args.parent_suffix = False args.create_entries = True # List backend backend_list(topology_st.standalone, None, topology_st.logcap.log, args) check_output(SUFFIX) # Get backend by by name args.selector = BE_NAME backend_get(topology_st.standalone, None, topology_st.logcap.log, args) check_output(BE_NAME) # Get backend by DN args.dn = 'cn=backendRoot,cn=ldbm database,cn=plugins,cn=config' backend_get_dn(topology_st.standalone, None, topology_st.logcap.log, args) check_output(BE_NAME) # Add subsuffix args.parent_suffix = SUFFIX args.suffix = SUB_SUFFIX args.be_name = SUB_BE_NAME backend_create(topology_st.standalone, None, topology_st.logcap.log, args) check_output("The database was successfully created") # Verify subsuffix args.suffix = False backend_list(topology_st.standalone, None, topology_st.logcap.log, args) check_output(SUB_SUFFIX) # Modify subsuffix args.enable = False args.disable = False args.add_referral = False args.del_referral = False args.cache_size = False args.cache_memsize = False args.dncache_memsize = False args.enable_readonly = True # Setting nsslapd-readonly to "on" args.disable_readonly = False backend_set(topology_st.standalone, None, topology_st.logcap.log, args) check_output("successfully updated") # Verify modified worked args.selector = SUB_BE_NAME backend_get(topology_st.standalone, None, topology_st.logcap.log, args) check_output("nsslapd-readonly: on") # Delete subsuffix args.suffix = SUB_SUFFIX backend_delete(topology_st.standalone, None, topology_st.logcap.log, args, warn=False) check_output("successfully deleted") # Verify it is deleted args.suffix = False backend_list(topology_st.standalone, None, topology_st.logcap.log, args) check_output(SUB_BE_NAME, missing=True) # Modify backend (use same args from subsuffix modify) args.be_name = BE_NAME backend_set(topology_st.standalone, None, topology_st.logcap.log, args) check_output("successfully updated") # Verify modified worked args.selector = BE_NAME backend_get(topology_st.standalone, None, topology_st.logcap.log, args) check_output("nsslapd-readonly: on") # Run database monitor args.suffix = SUFFIX get_monitor(topology_st.standalone, None, topology_st.logcap.log, args) check_output("entrycachetries")
def test_pwp_cli(topology_st, do_setup): """Test creating, listing, getting, and deleting a backend (and subsuffix) :id: 800f432a-52ab-4661-ac66-a2bdd9b984da :setup: Standalone instance :steps: 1. Create User policy 2. Create Subtree policy 3. List policies 4. Set user policy 5. Get user policy 6. Set subtree policy 7. Get subtree policy 8. Delete user policy 9. Delete subtree policy 10. List local policies - make sure none are returned 11. Get global policy 12. Set global policy 13. Verify global policy update :expectedresults: 1. Success 2. Success 3. Success 4. Success 5. Success 6. Success 7. Success 8. Success 9. Success 10. Success 11. Success 12. Success 13. Success """ topology_st.logcap = LogCapture() sys.stdout = io.StringIO() # Create User Policy args = test_args(USER_DN) args.pwdchange = 'on' create_user_policy(topology_st.standalone, None, topology_st.logcap.log, args) # Create Subtree Policy args = test_args(OU_DN) args.pwdchange = 'off' create_subtree_policy(topology_st.standalone, None, topology_st.logcap.log, args) # List policies args = test_args(DEFAULT_SUFFIX) list_policies(topology_st.standalone, None, topology_st.logcap.log, args) check_output([USER_OUTPUT, OU_OUTPUT]) # Set User Policy args = test_args(USER_DN) args.pwdhistory = 'on' set_local_policy(topology_st.standalone, None, topology_st.logcap.log, args) # Get User Policy args = test_args(USER_DN) get_local_policy(topology_st.standalone, None, topology_st.logcap.log, args) check_output("passwordHistory: on") # Set Subtree Policy args = test_args(OU_DN) args.pwdexpire = 'on' set_local_policy(topology_st.standalone, None, topology_st.logcap.log, args) # Get Subtree Policy args = test_args(OU_DN) get_local_policy(topology_st.standalone, None, topology_st.logcap.log, args) check_output("passwordExp: on") # Delete User Policy (and verify) args = test_args(USER_DN) del_local_policy(topology_st.standalone, None, topology_st.logcap.log, args) with pytest.raises(ValueError): get_local_policy(topology_st.standalone, None, topology_st.logcap.log, args) # Delete Subtree Policy (and verify) args = test_args(OU_DN) del_local_policy(topology_st.standalone, None, topology_st.logcap.log, args) with pytest.raises(ValueError): get_local_policy(topology_st.standalone, None, topology_st.logcap.log, args) # List policies (or lack there of) args = test_args(DEFAULT_SUFFIX) list_policies(topology_st.standalone, None, topology_st.logcap.log, args) check_output([USER_OUTPUT, OU_OUTPUT], missing=True) # Get global policy args = test_args(DEFAULT_SUFFIX) get_global_policy(topology_st.standalone, None, topology_st.logcap.log, args) check_output('passwordLockout: off') # Set global policy args = test_args(DEFAULT_SUFFIX) args.pwdlockout = "on" set_global_policy(topology_st.standalone, None, topology_st.logcap.log, args) # Check update was applied get_global_policy(topology_st.standalone, None, topology_st.logcap.log, args) check_output('passwordLockout: on')
def test_chaining_cli(topology_st, create_backend): """Test creating, listing, getting, and deleting a backend (and subsuffix) :id: 800f432a-52ab-4661-ac66-a2bdd9b984d7 :setup: Standalone instance :steps: 1. Update config controls and components 2. Verify update to config 3. Set default config 4. Verify update to default config 5. Add DB Link 6. Verify Link was created 7. Edit Link 8. Verify edit to link 9. Test monitor 10. Delete link 11. Verify link was deleted :expectedresults: 1. Success 2. Success 3. Success 4. Success 5. Success 6. Success 7. Success 8. Success 9. Success 10. Success 11. Success """ topology_st.logcap = LogCapture() sys.stdout = io.StringIO() args = FakeArgs() args.CHAIN_NAME = [LINK_NAME] args.suffix = LINK_SUFFIX args.json = False args.add_control = None args.del_control = None args.add_comp = None args.del_comp = None # Set config (add control) args.add_control = '1.1.1.1.1.1.1' config_set(topology_st.standalone, None, None, args) args.add_control = None check_output("updated chaining configuration") # Verify config change config_get(topology_st.standalone, None, None, args) check_output("1.1.1.1.1.1.1") # Set config (delete control) args.del_control = '1.1.1.1.1.1.1' config_set(topology_st.standalone, None, None, args) args.del_control = None check_output("updated chaining configuration") # Verify config change config_get(topology_st.standalone, None, None, args) check_output("1.1.1.1.1.1.1", missing=True) # Set config (add comp) args.add_comp = 'cn=test,cn=config' config_set(topology_st.standalone, None, None, args) args.add_comp = None check_output("updated chaining configuration") # Verify config change config_get(topology_st.standalone, None, None, args) check_output('cn=test,cn=config') # Set config (delete comp) args.del_comp = 'cn=test,cn=config' config_set(topology_st.standalone, None, None, args) args.del_comp = None check_output("updated chaining configuration") # Verify config change config_get(topology_st.standalone, None, None, args) check_output("cn=test,cn=config", missing=True) # Set default config args.time_limit = '5555' def_config_set(topology_st.standalone, None, None, args) check_output("updated chaining default instance creation configuration") # Verify default config change def_config_get(topology_st.standalone, None, None, args) check_output("nsslapd_timelimit: 5555") # Create database link args.server_url = "ldap://localhost.localdomain" args.bind_dn = "cn=link_admin," + SUFFIX args.bind_pw = "secret_157" args.bind_mech = "LDAP" create_link(topology_st.standalone, None, None, args) check_output("created database link") # Verify link was created list_links(topology_st.standalone, None, topology_st.logcap.log, args) check_output(LINK_NAME) # Edit link args.bind_dn = "uid=newuser,cn=config" args.suffix = None edit_link(topology_st.standalone, None, None, args) check_output("updated database chaining link") # Verify link was edited args.cn = LINK_NAME get_link(topology_st.standalone, None, topology_st.logcap.log, args) check_output("uid=newuser,cn=config") # Test monitor time.sleep(2) # need time for link to start up and generate monitor monitor_link(topology_st.standalone, None, topology_st.logcap.log, args) check_output("nssearchonelevelcount: ") # Delete link delete_link(topology_st.standalone, None, topology_st.logcap.log, args) check_output("deleted database link") # Verify link was deleted list_links(topology_st.standalone, None, topology_st.logcap.log, args) check_output(LINK_NAME, missing=True)
def test_instance_list(): lc = LogCapture() inst = DirSrv() instance_list(inst, lc.log, None) assert (lc.contains("No instances of Directory Server") or lc.contains("instance: "))
def test_instance_list(): lc = LogCapture() inst = DirSrv() instance_list(inst, lc.log, None) assert(lc.contains("No instances of Directory Server") or lc.contains("instance: "))
def topology_st_gssapi(request): """Create a DS standalone instance with GSSAPI enabled. This will alter the instance to remove the secure port, to allow GSSAPI to function. """ hostname = socket.gethostname().split('.', 1) # Assert we have a domain setup in some kind. assert len(hostname) == 2 REALM = hostname[1].upper() topology = create_topology({ReplicaRole.STANDALONE: 1}) # Fix the hostname. topology.standalone.host = socket.gethostname() krb = MitKrb5(realm=REALM, debug=DEBUGGING) # Destroy existing realm. if krb.check_realm(): krb.destroy_realm() krb.create_realm() # Now add krb to our instance. krb.create_principal(principal='ldap/%s' % topology.standalone.host) krb.create_keytab(principal='ldap/%s' % topology.standalone.host, keytab='/etc/krb5.keytab') os.chown('/etc/krb5.keytab', topology.standalone.get_user_uid(), topology.standalone.get_group_gid()) # Add sasl mappings saslmappings = SaslMappings(topology.standalone) # First, purge all the default maps. [m.delete() for m in saslmappings.list()] # Now create a single map that works for our case. saslmappings.create(properties={ 'cn': 'suffix map', # Don't add the realm due to a SASL bug # 'nsSaslMapRegexString': '\\(.*\\)@%s' % self.realm, 'nsSaslMapRegexString': '\\(.*\\)', 'nsSaslMapBaseDNTemplate': topology.standalone.creation_suffix, 'nsSaslMapFilterTemplate': '(uid=\\1)' }) topology.standalone.realm = krb topology.standalone.config.set('nsslapd-localhost', topology.standalone.host) topology.standalone.sslport = None topology.standalone.restart() topology.standalone.clearTmpDir(__file__) def fin(): if DEBUGGING: topology.standalone.stop() else: assert _remove_ssca_db(topology) if topology.standalone.exists(): topology.standalone.delete(pyinstall=PYINSTALL) krb.destroy_realm() request.addfinalizer(fin) topology.logcap = LogCapture() return topology