def topology(request): global installation1_prefix if installation1_prefix: args_instance[SER_DEPLOYED_DIR] = installation1_prefix # Creating standalone instance ... standalone = DirSrv(verbose=False) args_instance[SER_HOST] = HOST_STANDALONE args_instance[SER_PORT] = PORT_STANDALONE args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX args_standalone = args_instance.copy() standalone.allocate(args_standalone) instance_standalone = standalone.exists() if instance_standalone: standalone.delete() standalone.create() standalone.open() def fin(): standalone.delete() request.addfinalizer(fin) return TopologyStandalone(standalone)
def topology(request): """This fixture is used to standalone topology for the 'module'.""" standalone = DirSrv(verbose=False) # Args for the standalone instance args_instance[SER_HOST] = HOST_STANDALONE args_instance[SER_PORT] = PORT_STANDALONE args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE args_standalone = args_instance.copy() standalone.allocate(args_standalone) # Get the status of the instance and restart it if it exists instance_standalone = standalone.exists() # Remove the instance if instance_standalone: standalone.delete() # Create the instance standalone.create() # Used to retrieve configuration information (dbdir, confdir...) standalone.open() # Delete each instance in the end def fin(): standalone.delete() request.addfinalizer(fin) # Here we have standalone instance up and running return TopologyStandalone(standalone)
def topology(request): ''' This fixture is used to standalone topology for the 'module'. At the beginning, It may exists a standalone instance. It may also exists a backup for the standalone instance. ''' standalone = DirSrv(verbose=False) # Args for the standalone instance args_instance[SER_HOST] = HOST_STANDALONE args_instance[SER_PORT] = PORT_STANDALONE args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE args_standalone = args_instance.copy() standalone.allocate(args_standalone) # Get the status of the instance and restart it if it exists instance_standalone = standalone.exists() if instance_standalone: standalone.delete() # Create the instance standalone.create() # Used to retrieve configuration information (dbdir, confdir...) standalone.open() def fin(): standalone.delete() request.addfinalizer(fin) return TopologyStandalone(standalone)
def test_basic(topology_st, simple_allocate): """Check that all DS directories and systemd items were removed :id: 9e8bbcda-358d-4e9c-a38c-9b4c3b63308e :parametrized: yes """ inst = topology_st.standalone # FreeIPA uses local_simple_allocate for the removal process if simple_allocate: inst = DirSrv(verbose=inst.verbose) inst.local_simple_allocate(topology_st.standalone.serverid) remove_ds_instance(inst) paths = [ inst.ds_paths.backup_dir, inst.ds_paths.cert_dir, inst.ds_paths.config_dir, inst.ds_paths.db_dir, inst.get_changelog_dir(), inst.ds_paths.ldif_dir, inst.ds_paths.lock_dir, inst.ds_paths.log_dir ] for path in paths: assert not os.path.exists(path) try: subprocess.check_output( ['systemctl', 'is-enabled', 'dirsrv@{}'.format(inst.serverid)], encoding='utf-8') except subprocess.CalledProcessError as ex: assert "disabled" in ex.output
def topology(request): """ This fixture is used to create a DirSrv instance for the 'module'. """ schemainst = DirSrv(verbose=False) # Args for the master instance args_instance[SER_HOST] = HOST_STANDALONE args_instance[SER_PORT] = PORT_STANDALONE args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE schemainst.allocate(args_instance) # Remove all the instance if schemainst.exists(): schemainst.delete() # Create the instance schemainst.create() schemainst.open() def fin(): schemainst.delete() request.addfinalizer(fin) return TopologyStandalone(schemainst)
def getConnection(binddn='', password='', saslmethod=None): dir_srv_conn = DirSrv(verbose=False) headers = request.headers inst = {} print(headers) # # Parse the headers looking for host name # # We need to prefix these with LDAP, because host is reserved for HTTP # things. inst[SER_HOST] = headers.get('ldaphost', socket.gethostname()) try: inst[SER_PORT] = int(headers.get('ldapport', '389')) except ValueError: inst[SER_PORT] = 389 inst[SER_SERVERID_PROP] = headers.get('ldapinst', None) # We need apache to determine this, not us. Set to simple # saslmethod = str(headers.get('ldapsaslmethod', None)) # Get certificate database directory name # don't wrap this in str, else bad things happen :) certdir = headers.get('ldapcertdir', None) inst[SER_ROOT_DN] = binddn inst[SER_ROOT_PW] = password dir_srv_conn.allocate(inst) dir_srv_conn.open(saslmethod, certdir) return dir_srv_conn
def setup(): global conn conn = DirSrv(**config.auth) conn.verbose = True conn.added_entries = [] conn.added_backends = set(['o=mockbe2']) conn.added_replicas = [] harn_nolog()
def setup(): global conn try: conn = DirSrv(**config.auth) conn.verbose = True conn.added_entries = [] except SERVER_DOWN, e: log.error("To run tests you need a working 389 instance %s" % config.auth) raise e
def openConnection(self, inst): # Open a new connection to our LDAP server server = DirSrv(verbose=False) args_instance[SER_HOST] = inst.host args_instance[SER_PORT] = inst.port args_instance[SER_SERVERID_PROP] = inst.serverid args_standalone = args_instance.copy() server.allocate(args_standalone) server.open() return server
def setup(): global conn try: conn = DirSrv(**config.auth) conn.verbose = True conn.added_entries = [] except SERVER_DOWN as e: log.error("To run tests you need a working 389 instance %s" % config.auth) raise e
def openConnection(inst): # Open a new connection to our LDAP server server = DirSrv(verbose=False) args_instance[SER_HOST] = HOST_STANDALONE args_instance[SER_PORT] = PORT_STANDALONE args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE args_standalone = args_instance.copy() server.allocate(args_standalone) server.open() return server
def topology(request): instance = DirSrv(verbose=False) if instance.exists(): instance.delete() def fin(): if instance.exists(): instance.delete() request.addfinalizer(fin) return TopologyInstance(instance)
def setup(): # uses an existing 389 instance # add a suffix # add an agreement # This setup is quite verbose but to test DirSrv method we should # do things manually. A better solution would be to use an LDIF. global conn conn = DirSrv(**config.auth) conn.verbose = True conn.added_entries = [] conn.added_backends = set(['o=mockbe1']) conn.added_replicas = [] """
def setUp(self): instance = DirSrv(verbose=False) instance.log.debug("Instance allocated") args = {SER_HOST: LOCALHOST, SER_PORT: INSTANCE_PORT, SER_DEPLOYED_DIR: INSTANCE_PREFIX, SER_SERVERID_PROP: INSTANCE_SERVERID } instance.allocate(args) if instance.exists(): instance.delete() instance.create() instance.open() self.instance = instance
def _remove_failed_install(self, serverid): """The install failed, remove the scraps :param serverid - The server ID of the instance """ inst = DirSrv() # Allocate the instance based on name insts = [] insts = inst.list(serverid=serverid) if len(insts) != 1: self.log.error("No such instance to remove {}".format(serverid)) return inst.allocate(insts[0]) remove_ds_instance(inst, force=True)
def connect_instance(ldapurl, binddn, verbose, starttls): dsargs = {SER_LDAP_URL: ldapurl, SER_ROOT_DN: binddn} ds = DirSrv(verbose=verbose) ds.allocate(dsargs) if not ds.can_autobind() and binddn is not None: dsargs[SER_ROOT_PW] = getpass("Enter password for %s on %s : " % (binddn, ldapurl)) elif binddn is None: raise Exception("Must provide a binddn to connect with") ds.allocate(dsargs) ds.open(starttls=starttls) print("") return ds
def topology(request): standalone = DirSrv(verbose=False) standalone.log.debug('Instance allocated') args = {SER_HOST: LOCALHOST, SER_PORT: INSTANCE_PORT, SER_SERVERID_PROP: INSTANCE_SERVERID} standalone.allocate(args) if standalone.exists(): standalone.delete() standalone.create() standalone.open() def fin(): standalone.delete() request.addfinalizer(fin) return TopologyStandalone(standalone)
def topology(request): instance = DirSrv(verbose=False) instance.log.debug("Instance allocated") args = {SER_HOST: LOCALHOST, SER_PORT: INSTANCE_PORT, SER_SERVERID_PROP: INSTANCE_SERVERID} instance.allocate(args) if instance.exists(): instance.delete() instance.create() instance.open() def fin(): instance.delete() request.addfinalizer(fin) return TopologyInstance(instance)
def test_finalizer(): # for each defined instance, remove it for args_instance in ALL_INSTANCES: instance = DirSrv(verbose=True) instance.allocate(args_instance) if instance.exists(): instance.delete() # remove any existing backup for this instance instance.clearBackupFS()
def instance_remove_all(log, args): """Remove all instances - clean sweep! """ inst_names = get_instance_list(args.remove_all) if len(inst_names) > 0: answer = input( "Are you sure you want to remove all the Directory Server instances? Enter \"Yes\" to continue: " ) if answer != 'Yes': print("Aborted removal of all instances") return # Do it! list_inst = DirSrv(verbose=args.verbose) insts = list_inst.list(all=True, serverid=inst_names[0]) for inst in insts: remove_inst = DirSrv(verbose=args.verbose) remove_inst.allocate(inst) try: log.info("Removing instance: slapd-" + str(remove_inst.serverid)) remove_ds_instance(remove_inst) except Exception as e: log.fatal('Failed to remove all instances: ' + str(e)) sys.exit(1) log.info('All instances have been successfully removed') else: print("No instances to remove")
def _offlineDirsrv(args): ''' Function to allocate an offline DirSrv instance. This instance is not initialized with the Directory instance (__localinit__() and __add_brookers__() are not called) The properties set are: instance.host instance.port instance.serverid instance.inst instance.prefix instance.backup ''' from lib389 import DirSrv instance = DirSrv(verbose=True) instance.allocate(args) return instance
def connect_instance(dsrc_inst, verbose): dsargs = dsrc_inst['args'] ds = DirSrv(verbose=verbose) ds.allocate(dsargs) if not ds.can_autobind() and dsrc_inst['binddn'] is not None: dsargs[SER_ROOT_PW] = getpass("Enter password for %s on %s : " % (dsrc_inst['binddn'], dsrc_inst['uri'])) elif not ds.can_autobind() and dsrc_inst['binddn'] is None: raise Exception("Must provide a binddn to connect with") ds.allocate(dsargs) ds.open(saslmethod=dsrc_inst['saslmech'], certdir=dsrc_inst['tls_cacertdir'], reqcert=dsrc_inst['tls_reqcert'], usercert=dsrc_inst['tls_cert'], userkey=dsrc_inst['tls_key'], starttls=dsrc_inst['starttls'], connOnly=True) return ds
class CliTool(object): def __init__(self, args=None): if args is not None: self.args = args self.ds = DirSrv(verbose=args.verbose) else: self.ds = DirSrv() def populate_instance_dict(self, instance): insts = self.ds.list(serverid=instance) if len(insts) != 1: # Raise an exception here? self.inst = None raise ValueError("No such instance %s" % instance) else: self.inst = insts[0] def get_rootdn_pass(self): if self.args.binddn is None: binddn = self.inst[SER_ROOT_DN] else: binddn = self.args.binddn # There is a dict get key thing somewhere ... if self.inst.get(SER_ROOT_PW, None) is None: prompt_txt = ('Enter password for %s on instance %s: ' % (binddn, self.inst[SER_SERVERID_PROP])) self.inst[SER_ROOT_PW] = getpass(prompt_txt) print("") return def connect(self): # Can we attempt the autobind? # This should be a bit cleaner perhaps # Perhaps an argument to the cli? self.ds.allocate(self.inst) if not self.ds.can_autobind(): self.get_rootdn_pass() self.ds.allocate(self.inst) self.ds.open() def disconnect(self): # Is there a ds unbind / disconnect? self.ds.close()
def topology(request): instance = DirSrv(verbose=DEBUGGING) instance.log.debug("Instance allocated") args = {SER_PORT: INSTANCE_PORT, SER_SERVERID_PROP: INSTANCE_SERVERID} instance.allocate(args) if instance.exists(): instance.delete() # Create the setupDs lc = LogCapture() # Give it the right types. sds = SetupDs(verbose=DEBUGGING, dryrun=False, log=lc.log) # Get the dicts from Type2Base, as though they were from _validate_ds_2_config # IE get the defaults back just from Slapd2Base.collect # Override instance name, root password, port and secure port. general_options = General2Base(lc.log) general_options.verify() general = general_options.collect() slapd_options = Slapd2Base(lc.log) slapd_options.set('instance_name', INSTANCE_SERVERID) slapd_options.set('port', INSTANCE_PORT) slapd_options.set('root_password', PW_DM) slapd_options.verify() slapd = slapd_options.collect() sds.create_from_args(general, slapd, {}, None) # Make sure we can connect instance.open() # Create the example backend with sample entries. instance.backends.create(properties={ 'cn': ['userRoot'], 'nsslapd-suffix': ['dc=example,dc=com'], }) def fin(): if instance.exists() and not DEBUGGING: instance.delete() request.addfinalizer(fin) return TopologyInstance(instance)
def topology(request): # Creating standalone instance ... standalone = DirSrv(verbose=False) args_instance[SER_HOST] = HOST_STANDALONE args_instance[SER_PORT] = PORT_STANDALONE args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX args_standalone = args_instance.copy() standalone.allocate(args_standalone) instance_standalone = standalone.exists() if instance_standalone: standalone.delete() standalone.create() standalone.open() # Delete each instance in the end def fin(): standalone.delete() request.addfinalizer(fin) return TopologyStandalone(standalone)
def rootdn_bind(inst, uri=None, fail=False): """Helper function to test root DN bind """ newinst = DirSrv(verbose=False) args = {SER_PORT: inst.port, SER_SERVERID_PROP: inst.serverid} newinst.allocate(args) newinst.open(uri=uri, connOnly=True) # This binds as root dn
def test_user_search_performance(): inst = DirSrv(verbose=True) inst.remote_simple_allocate(f"ldaps://{TARGET_HOST}", password="******") # Need a better way to set this. inst.host = TARGET_HOST inst.port = TARGET_PORT inst.open(reqcert=ldap.OPT_X_TLS_NEVER) assert_data_present(inst) r1 = _do_search_performance(inst, 1) # r2 = _do_search_performance(inst, 4) # r3 = _do_search_performance(inst, 6) # r4 = _do_search_performance(inst, 8) # r5 = _do_search_performance(inst, 12) r6 = _do_search_performance(inst, 16)
def topology(request): lc = LogCapture() instance = DirSrv(verbose=DEBUGGING) instance.log.debug("Instance allocated") args = {SER_PORT: INSTANCE_PORT, SER_SERVERID_PROP: INSTANCE_SERVERID} instance.allocate(args) if instance.exists(): instance.delete() # This will need to change to instance.create in the future # when it's linked up! sds = SetupDs(verbose=DEBUGGING, dryrun=False, log=lc.log) # Get the dicts from Type2Base, as though they were from _validate_ds_2_config # IE get the defaults back just from Slapd2Base.collect # Override instance name, root password, port and secure port. general_options = General2Base(lc.log) general_options.verify() general = general_options.collect() # Need an args -> options2 ... slapd_options = Slapd2Base(lc.log) slapd_options.set('instance_name', INSTANCE_SERVERID) slapd_options.set('port', INSTANCE_PORT) slapd_options.set('root_password', PW_DM) slapd_options.verify() slapd = slapd_options.collect() sds.create_from_args(general, slapd, {}, None) insts = instance.list(serverid=INSTANCE_SERVERID) # Assert we did change the system. assert(len(insts) == 1) # Make sure we can connect instance.open(connOnly=True) def fin(): if instance.exists() and not DEBUGGING: instance.delete() request.addfinalizer(fin) return TopologyInstance(instance, lc)
def test_allocated_to_online(self, verbose): # Here the instance was already create, check we can connect to it # without creating it (especially without serverid value) # Allocate the instance args = { SER_HOST: LOCALHOST, SER_PORT: INSTANCE_PORT, SER_DEPLOYED_DIR: INSTANCE_PREFIX, SER_SERVERID_PROP: INSTANCE_SERVERID } self.instance.log.info("test_allocated_to_online: Create an instance") self.instance = DirSrv(verbose=verbose) assert not hasattr(self, 'serverid') self.instance.allocate(args) self.instance.create() self.instance.open() assert self.instance.serverid != None # The instance is create, allocate a new DirSrv self.instance.log.info("test_allocated_to_online: instance New") self.instance = DirSrv(verbose=verbose) assert not hasattr(self, 'serverid') assert self.instance.state == DIRSRV_STATE_INIT args = { SER_HOST: LOCALHOST, SER_PORT: INSTANCE_PORT, SER_DEPLOYED_DIR: INSTANCE_PREFIX, } self.instance.allocate(args) self.instance.log.info("test_allocated_to_online: instance Allocated") assert self.instance.serverid == None assert self.instance.state == DIRSRV_STATE_ALLOCATED self.instance.open() self.instance.log.info("test_allocated_to_online: instance online") assert self.instance.serverid != None assert self.instance.serverid == self.instance.inst assert self.instance.state == DIRSRV_STATE_ONLINE
def connect_instance(ldapurl, binddn, verbose, starttls): dsargs = { SER_LDAP_URL: ldapurl, SER_ROOT_DN: binddn } ds = DirSrv(verbose=verbose) ds.allocate(dsargs) if not ds.can_autobind() and binddn is not None: dsargs[SER_ROOT_PW] = getpass("Enter password for %s on %s : " % (binddn, ldapurl)) elif binddn is None: raise Exception("Must provide a binddn to connect with") ds.allocate(dsargs) ds.open(starttls=starttls, connOnly=True) print("") return ds
def test_allocated_to_online(self, verbose): # Here the instance was already create, check we can connect to it # without creating it (especially without serverid value) # Allocate the instance args = {SER_HOST: LOCALHOST, SER_PORT: INSTANCE_PORT, SER_DEPLOYED_DIR: INSTANCE_PREFIX, SER_SERVERID_PROP: INSTANCE_SERVERID } self.instance.log.info("test_allocated_to_online: Create an instance") self.instance = DirSrv(verbose=verbose) assert not hasattr(self, 'serverid') self.instance.allocate(args) self.instance.create() self.instance.open() assert self.instance.serverid != None # The instance is create, allocate a new DirSrv self.instance.log.info("test_allocated_to_online: instance New") self.instance = DirSrv(verbose=verbose) assert not hasattr(self, 'serverid') assert self.instance.state == DIRSRV_STATE_INIT args = {SER_HOST: LOCALHOST, SER_PORT: INSTANCE_PORT, SER_DEPLOYED_DIR: INSTANCE_PREFIX, } self.instance.allocate(args) self.instance.log.info("test_allocated_to_online: instance Allocated") assert self.instance.serverid == None assert self.instance.state == DIRSRV_STATE_ALLOCATED self.instance.open() self.instance.log.info("test_allocated_to_online: instance online") assert self.instance.serverid != None assert self.instance.serverid == self.instance.inst assert self.instance.state == DIRSRV_STATE_ONLINE
def topology(request): # Create the realm krb = MitKrb5(realm=REALM) instance = DirSrv(verbose=False) instance.log.debug("Instance allocated") # WARNING: If this test fails it's like a hostname issue!!! args = {SER_HOST: socket.gethostname(), SER_PORT: INSTANCE_PORT, SER_REALM: REALM, SER_SERVERID_PROP: INSTANCE_SERVERID} instance.allocate(args) if instance.exists(): instance.delete() # Its likely our realm exists too # Remove the old keytab if os.path.exists(KEYTAB): os.remove(KEYTAB) if krb.check_realm(): krb.destroy_realm() # This will automatically create the krb entries krb.create_realm() instance.create() instance.open() def fin(): if instance.exists(): instance.delete() if krb.check_realm(): krb.destroy_realm() if os.path.exists(KEYTAB): os.remove(KEYTAB) if os.path.exists(CCACHE): os.remove(CCACHE) request.addfinalizer(fin) return TopologyInstance(instance)
def topology(request): instance = DirSrv(verbose=DEBUGGING) instance.log.debug("Instance allocated") args = {SER_PORT: INSTANCE_PORT, SER_SERVERID_PROP: INSTANCE_SERVERID} instance.allocate(args) if instance.exists(): instance.delete() def fin(): if instance.exists() and not DEBUGGING: instance.delete() request.addfinalizer(fin) return TopologyInstance(instance)
def getReport(): # Capture data about stoped instances # Return a Report (i.e: list of ( sectionName, text ) tuple ) # Lets determine the list of instances report = [] def addSection(name, text): report.append((name, text)) instancesOK = [] instancesKO = [] for instdir in DirSrv().list(all=True): inst = DirSrv() inst.allocate(instdir) if inst.status(): instancesOK.append(inst) else: instancesKO.append(inst) text = "" # Lets generate the report addSection("Running instances", loglist([i.getServerId() for i in instancesOK])) addSection("Stopped instances", loglist([i.getServerId() for i in instancesKO])) # Get core file informations addSection("Core files", logcorefiles()) # Get asan file informations report.extend(logasanfiles()) # Get error log informations on stopped servers # By default we only log an extract of error log: # Critical, Emergency and standard errors # and the final "server stopped" info line (that denotes a clean stop) for inst in instancesKO: # Log extract of error log path = inst.ds_paths.error_log.format(instance_name=inst.getServerId()) addSection(f"Extract of instance {inst.getServerId()} error log", logErrors(path)) # And dbscan -L output addSection(f"Database info for instance {inst.getServerId()}", logDbscan(inst)) return report
def topology(request): instance = DirSrv(verbose=False) args = { SER_HOST: LOCALHOST, SER_PORT: INSTANCE_PORT, SER_SERVERID_PROP: INSTANCE_SERVERID } instance.allocate(args) if instance.exists(): instance.delete() def fin(): if instance.exists(): instance.delete() request.addfinalizer(fin) return TopologyInstance(instance)
def instance_remove_all(log, args): """Remove all instances - clean sweep! """ inst_names = get_instance_list() if len(inst_names) > 0: log.info(""" About to remove all Directory Server instances! If this is not what you want, press ctrl-c now ... """) for i in range(1, 6): log.info('%s ...' % (6 - int(i))) time.sleep(1) # Do it! list_inst = DirSrv(verbose=args.verbose) insts = list_inst.list(all=True, serverid=inst_names[0]) no_problems = True for inst in insts: remove_inst = DirSrv(verbose=args.verbose) remove_inst.allocate(inst) try: log.info("Removing instance: slapd-" + str(remove_inst.serverid)) remove_ds_instance(remove_inst) except Exception as e: log.error( f'Failed to remove slapd-{remove_inst.serverid} - Error: {str(e)}' ) no_problems = False if no_problems: log.info('All instances have been successfully removed') else: log.info('Some instances were not removed') else: print("No instances to remove")
def test_instance_list(): lc = LogCapture() inst = DirSrv() instance_list(inst, lc.log, None) assert (lc.contains("No instances of Directory Server") or lc.contains("instance: "))
def topology(request): # Create the master instance master = DirSrv(verbose=False) master.log.debug("Master allocated") args = { SER_HOST: HOST_MASTER, SER_PORT: PORT_MASTER, SER_SERVERID_PROP: SERVERID_MASTER } master.allocate(args) if master.exists(): master.delete() master.create() master.open() # Create the consumer instance consumer = DirSrv(verbose=False) consumer.log.debug("Consumer allocated") args = { SER_HOST: HOST_CONSUMER, SER_PORT: PORT_CONSUMER, SER_SERVERID_PROP: SERVERID_CONSUMER } consumer.allocate(args) if consumer.exists(): consumer.delete() consumer.create() consumer.open() # Delete each instance in the end def fin(): master.delete() consumer.delete() request.addfinalizer(fin) return TopologyReplication(master, consumer)
def topology(request): standalone = DirSrv(verbose=False) standalone.log.debug('Instance allocated') args = { SER_HOST: LOCALHOST, SER_PORT: INSTANCE_PORT, SER_SERVERID_PROP: INSTANCE_SERVERID } standalone.allocate(args) if standalone.exists(): standalone.delete() standalone.create() standalone.open() def fin(): standalone.delete() request.addfinalizer(fin) return TopologyStandalone(standalone)
class Test_dirsrv(): def _add_user(self, success=True): try: self.instance.add_s(Entry((TEST_REPL_DN, {'objectclass': "top person organizationalPerson inetOrgPerson".split(), 'uid': 'test', 'sn': 'test', 'cn': 'test'}))) except Exception as e: if success: raise else: self.instance.log.info('Fail to add (expected): %s' % e.args) return self.instance.log.info('%s added' % TEST_REPL_DN) def _mod_user(self, success=True): try: replace = [(ldap.MOD_REPLACE, 'description', str(randint(1, 100)))] self.instance.modify_s(TEST_REPL_DN, replace) except Exception as e: if success: raise else: self.instance.log.info('Fail to modify (expected): %s' % e.args) return self.instance.log.info('%s modified' % TEST_REPL_DN) def setUp(self): pass def tearDown(self): pass def test_allocate(self, verbose=False): instance = DirSrv(verbose=verbose) instance.log.debug("Instance allocated") assert instance.state == DIRSRV_STATE_INIT # Allocate the instance args = {SER_HOST: LOCALHOST, SER_PORT: INSTANCE_PORT, SER_DEPLOYED_DIR: INSTANCE_PREFIX, SER_SERVERID_PROP: INSTANCE_SERVERID } instance.allocate(args) userid = pwd.getpwuid( os.getuid() )[ 0 ] # Now verify the settings assert instance.state == DIRSRV_STATE_ALLOCATED assert instance.host == LOCALHOST assert instance.port == INSTANCE_PORT assert instance.sslport == None assert instance.binddn == DN_DM assert instance.bindpw == PW_DM assert instance.creation_suffix == DEFAULT_SUFFIX assert instance.userid == userid assert instance.serverid == INSTANCE_SERVERID assert instance.groupid == instance.userid assert instance.prefix == INSTANCE_PREFIX assert instance.backupdir == INSTANCE_BACKUP # Now check we can change the settings of an allocated DirSrv args = {SER_SERVERID_PROP:INSTANCE_SERVERID, SER_HOST: LOCALHOST, SER_PORT: INSTANCE_PORT, SER_DEPLOYED_DIR: INSTANCE_PREFIX, SER_ROOT_DN: "uid=foo"} instance.allocate(args) assert instance.state == DIRSRV_STATE_ALLOCATED assert instance.host == LOCALHOST assert instance.port == INSTANCE_PORT assert instance.sslport == None assert instance.binddn == "uid=foo" assert instance.bindpw == PW_DM assert instance.creation_suffix == DEFAULT_SUFFIX assert instance.userid == userid assert instance.serverid == INSTANCE_SERVERID assert instance.groupid == instance.userid assert instance.prefix == INSTANCE_PREFIX assert instance.backupdir == INSTANCE_BACKUP # OK restore back the valid parameters args = {SER_SERVERID_PROP:INSTANCE_SERVERID, SER_HOST: LOCALHOST, SER_PORT: INSTANCE_PORT, SER_DEPLOYED_DIR: INSTANCE_PREFIX} instance.allocate(args) assert instance.state == DIRSRV_STATE_ALLOCATED assert instance.host == LOCALHOST assert instance.port == INSTANCE_PORT assert instance.sslport == None assert instance.binddn == DN_DM assert instance.bindpw == PW_DM assert instance.creation_suffix == DEFAULT_SUFFIX assert instance.userid == userid assert instance.serverid == INSTANCE_SERVERID assert instance.groupid == instance.userid assert instance.prefix == INSTANCE_PREFIX assert instance.backupdir == INSTANCE_BACKUP self.instance = instance def test_list_init(self): ''' Lists the instances on the file system ''' for properties in self.instance.list(): self.instance.log.info("properties: %r" % properties) for properties in self.instance.list(all=True): self.instance.log.info("properties (all): %r" % properties) def test_allocated_to_offline(self): self.instance.create() def test_offline_to_online(self): self.instance.open() def test_online_to_offline(self): self.instance.close() def test_offline_to_allocated(self): self.instance.delete() def test_allocated_to_online(self, verbose): # Here the instance was already create, check we can connect to it # without creating it (especially without serverid value) # Allocate the instance args = {SER_HOST: LOCALHOST, SER_PORT: INSTANCE_PORT, SER_DEPLOYED_DIR: INSTANCE_PREFIX, SER_SERVERID_PROP: INSTANCE_SERVERID } self.instance.log.info("test_allocated_to_online: Create an instance") self.instance = DirSrv(verbose=verbose) assert not hasattr(self, 'serverid') self.instance.allocate(args) self.instance.create() self.instance.open() assert self.instance.serverid != None # The instance is create, allocate a new DirSrv self.instance.log.info("test_allocated_to_online: instance New") self.instance = DirSrv(verbose=verbose) assert not hasattr(self, 'serverid') assert self.instance.state == DIRSRV_STATE_INIT args = {SER_HOST: LOCALHOST, SER_PORT: INSTANCE_PORT, SER_DEPLOYED_DIR: INSTANCE_PREFIX, } self.instance.allocate(args) self.instance.log.info("test_allocated_to_online: instance Allocated") assert self.instance.serverid == None assert self.instance.state == DIRSRV_STATE_ALLOCATED self.instance.open() self.instance.log.info("test_allocated_to_online: instance online") assert self.instance.serverid != None assert self.instance.serverid == self.instance.inst assert self.instance.state == DIRSRV_STATE_ONLINE
def test_allocate(self, verbose=False): instance = DirSrv(verbose=verbose) instance.log.debug("Instance allocated") assert instance.state == DIRSRV_STATE_INIT # Allocate the instance args = {SER_HOST: LOCALHOST, SER_PORT: INSTANCE_PORT, SER_DEPLOYED_DIR: INSTANCE_PREFIX, SER_SERVERID_PROP: INSTANCE_SERVERID } instance.allocate(args) userid = pwd.getpwuid( os.getuid() )[ 0 ] # Now verify the settings assert instance.state == DIRSRV_STATE_ALLOCATED assert instance.host == LOCALHOST assert instance.port == INSTANCE_PORT assert instance.sslport == None assert instance.binddn == DN_DM assert instance.bindpw == PW_DM assert instance.creation_suffix == DEFAULT_SUFFIX assert instance.userid == userid assert instance.serverid == INSTANCE_SERVERID assert instance.groupid == instance.userid assert instance.prefix == INSTANCE_PREFIX assert instance.backupdir == INSTANCE_BACKUP # Now check we can change the settings of an allocated DirSrv args = {SER_SERVERID_PROP:INSTANCE_SERVERID, SER_HOST: LOCALHOST, SER_PORT: INSTANCE_PORT, SER_DEPLOYED_DIR: INSTANCE_PREFIX, SER_ROOT_DN: "uid=foo"} instance.allocate(args) assert instance.state == DIRSRV_STATE_ALLOCATED assert instance.host == LOCALHOST assert instance.port == INSTANCE_PORT assert instance.sslport == None assert instance.binddn == "uid=foo" assert instance.bindpw == PW_DM assert instance.creation_suffix == DEFAULT_SUFFIX assert instance.userid == userid assert instance.serverid == INSTANCE_SERVERID assert instance.groupid == instance.userid assert instance.prefix == INSTANCE_PREFIX assert instance.backupdir == INSTANCE_BACKUP # OK restore back the valid parameters args = {SER_SERVERID_PROP:INSTANCE_SERVERID, SER_HOST: LOCALHOST, SER_PORT: INSTANCE_PORT, SER_DEPLOYED_DIR: INSTANCE_PREFIX} instance.allocate(args) assert instance.state == DIRSRV_STATE_ALLOCATED assert instance.host == LOCALHOST assert instance.port == INSTANCE_PORT assert instance.sslport == None assert instance.binddn == DN_DM assert instance.bindpw == PW_DM assert instance.creation_suffix == DEFAULT_SUFFIX assert instance.userid == userid assert instance.serverid == INSTANCE_SERVERID assert instance.groupid == instance.userid assert instance.prefix == INSTANCE_PREFIX assert instance.backupdir == INSTANCE_BACKUP self.instance = instance
def topology(request): global installation1_prefix if installation1_prefix: args_instance[SER_DEPLOYED_DIR] = installation1_prefix # Creating master 1... master1 = DirSrv(verbose=False) if installation1_prefix: args_instance[SER_DEPLOYED_DIR] = installation1_prefix args_instance[SER_HOST] = HOST_MASTER_1 args_instance[SER_PORT] = PORT_MASTER_1 args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_1 args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX args_master = args_instance.copy() master1.allocate(args_master) instance_master1 = master1.exists() if instance_master1: master1.delete() master1.create() master1.open() master1.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER, replicaId=REPLICAID_MASTER_1) # Creating master 2... master2 = DirSrv(verbose=False) if installation1_prefix: args_instance[SER_DEPLOYED_DIR] = installation1_prefix args_instance[SER_HOST] = HOST_MASTER_2 args_instance[SER_PORT] = PORT_MASTER_2 args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_2 args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX args_master = args_instance.copy() master2.allocate(args_master) instance_master2 = master2.exists() if instance_master2: master2.delete() master2.create() master2.open() master2.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER, replicaId=REPLICAID_MASTER_2) # # Create all the agreements # # Creating agreement from master 1 to master 2 properties = {RA_NAME: r'meTo_%s:%s' % (master2.host, master2.port), RA_BINDDN: defaultProperties[REPLICATION_BIND_DN], RA_BINDPW: defaultProperties[REPLICATION_BIND_PW], RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD], RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]} global m1_m2_agmt m1_m2_agmt = master1.agreement.create(suffix=SUFFIX, host=master2.host, port=master2.port, properties=properties) if not m1_m2_agmt: log.fatal("Fail to create a master -> master replica agreement") sys.exit(1) log.debug("%s created" % m1_m2_agmt) # Creating agreement from master 2 to master 1 properties = {RA_NAME: r'meTo_%s:%s' % (master1.host, master1.port), RA_BINDDN: defaultProperties[REPLICATION_BIND_DN], RA_BINDPW: defaultProperties[REPLICATION_BIND_PW], RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD], RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]} m2_m1_agmt = master2.agreement.create(suffix=SUFFIX, host=master1.host, port=master1.port, properties=properties) if not m2_m1_agmt: log.fatal("Fail to create a master -> master replica agreement") sys.exit(1) log.debug("%s created" % m2_m1_agmt) # Allow the replicas to get situated with the new agreements... time.sleep(5) # # Initialize all the agreements # master1.agreement.init(SUFFIX, HOST_MASTER_2, PORT_MASTER_2) master1.waitForReplInit(m1_m2_agmt) # Check replication is working... if master1.testReplication(DEFAULT_SUFFIX, master2): log.info('Replication is working.') else: log.fatal('Replication is not working.') assert False # Delete each instance in the end def fin(): master1.delete() master2.delete() request.addfinalizer(fin) return TopologyReplication(master1, master2)
def _install_ds(self, general, slapd, backends): """ Actually install the Ds from the dicts provided. You should never call this directly, as it bypasses assertions. """ ######################## WARNING ############################# # DO NOT CHANGE THIS FUNCTION OR ITS CONTENTS WITHOUT READING # ALL OF THE COMMENTS FIRST. THERE ARE VERY DELICATE # AND DETAILED INTERACTIONS OF COMPONENTS IN THIS FUNCTION. # # IF IN DOUBT CONTACT WILLIAM BROWN <*****@*****.**> ### This first section is about creating the *minimal* required paths and config to get # directory server to start: After this, we then perform all configuration as online # changes from after this point. # Create dse.ldif with a temporary root password. # This is done first, because instances are found for removal and listing by detecting # the present of their dse.ldif!!!! # The template is in slapd['data_dir']/dirsrv/data/template-dse.ldif # Variables are done with %KEY%. self.log.debug("ACTION: Creating dse.ldif") try: os.umask( 0o007 ) # For parent dirs that get created -> sets 770 for perms os.makedirs(slapd['config_dir'], mode=0o770) except OSError: pass # Get suffix for some plugin defaults (if possible) # annoyingly for legacy compat backend takes TWO key types # and we have to now deal with that .... # # Create ds_suffix here else it won't be in scope .... ds_suffix = '' if len(backends) > 0: ds_suffix = normalizeDN(backends[0]['nsslapd-suffix']) dse = "" with open( os.path.join(slapd['data_dir'], 'dirsrv', 'data', 'template-dse.ldif')) as template_dse: for line in template_dse.readlines(): dse += line.replace('%', '{', 1).replace('%', '}', 1) # Check if we are in a container, if so don't use /dev/shm for the db home dir # as containers typically don't allocate enough space for dev/shm and we don't # want to unexpectedly break the server after an upgrade # # If we know we are are in a container, we don't need to re-detect on systemd. # It actually turns out if you add systemd-detect-virt, that pulls in system # which subsequently breaks containers starting as instance.start then believes # it COULD check the ds status. The times we need to check for systemd are mainly # in other environments that use systemd natively in their containers. container_result = 1 if not self.containerised: container_result = subprocess.run(["systemd-detect-virt", "-c"], stdout=subprocess.PIPE) if self.containerised or container_result.returncode == 0: # In a container, set the db_home_dir to the db path self.log.debug( "Container detected setting db home directory to db directory." ) slapd['db_home_dir'] = slapd['db_dir'] with open(os.path.join(slapd['config_dir'], 'dse.ldif'), 'w') as file_dse: dse_fmt = dse.format( schema_dir=slapd['schema_dir'], lock_dir=slapd['lock_dir'], tmp_dir=slapd['tmp_dir'], cert_dir=slapd['cert_dir'], ldif_dir=slapd['ldif_dir'], bak_dir=slapd['backup_dir'], run_dir=slapd['run_dir'], inst_dir=slapd['inst_dir'], log_dir=slapd['log_dir'], fqdn=general['full_machine_name'], ds_port=slapd['port'], ds_user=slapd['user'], rootdn=slapd['root_dn'], instance_name=slapd['instance_name'], ds_passwd=self. _secure_password, # We set our own password here, so we can connect and mod. # This is because we never know the users input root password as they can validly give # us a *hashed* input. ds_suffix=ds_suffix, config_dir=slapd['config_dir'], db_dir=slapd['db_dir'], db_home_dir=slapd['db_home_dir'], db_lib=slapd['db_lib'], ldapi_enabled="on", ldapi=slapd['ldapi'], ldapi_autobind="on", ) file_dse.write(dse_fmt) self.log.info("Create file system structures ...") # Create all the needed paths # we should only need to make bak_dir, cert_dir, config_dir, db_dir, ldif_dir, lock_dir, log_dir, run_dir? for path in ('backup_dir', 'cert_dir', 'db_dir', 'db_home_dir', 'ldif_dir', 'lock_dir', 'log_dir', 'run_dir'): self.log.debug("ACTION: creating %s", slapd[path]) try: os.umask( 0o007 ) # For parent dirs that get created -> sets 770 for perms os.makedirs(slapd[path], mode=0o770) except OSError: pass os.chown(slapd[path], slapd['user_uid'], slapd['group_gid']) # /var/lock/dirsrv needs special attention... parentdir = os.path.abspath(os.path.join(slapd['lock_dir'], os.pardir)) os.chown(parentdir, slapd['user_uid'], slapd['group_gid']) ### Warning! We need to down the directory under db too for .restore to work. # During a restore, the db dir is deleted and recreated, which is why we need # to own it for a restore. # # However, in a container, we can't always guarantee this due to how the volumes # work and are mounted. Specifically, if we have an anonymous volume we will # NEVER be able to own it, but in a true deployment it is reasonable to expect # we DO own it. Thus why we skip it in this specific context if not self.containerised: db_parent = os.path.join(slapd['db_dir'], '..') os.chown(db_parent, slapd['user_uid'], slapd['group_gid']) # Copy correct data to the paths. # Copy in the schema # This is a little fragile, make it better. # It won't matter when we move schema to usr anyway ... _ds_shutil_copytree( os.path.join(slapd['sysconf_dir'], 'dirsrv/schema'), slapd['schema_dir']) os.chown(slapd['schema_dir'], slapd['user_uid'], slapd['group_gid']) os.chmod(slapd['schema_dir'], 0o770) # Copy in the collation srcfile = os.path.join(slapd['sysconf_dir'], 'dirsrv/config/slapd-collations.conf') dstfile = os.path.join(slapd['config_dir'], 'slapd-collations.conf') shutil.copy(srcfile, dstfile) os.chown(dstfile, slapd['user_uid'], slapd['group_gid']) os.chmod(dstfile, 0o440) # Copy in the certmap configuration srcfile = os.path.join(slapd['sysconf_dir'], 'dirsrv/config/certmap.conf') dstfile = os.path.join(slapd['config_dir'], 'certmap.conf') shutil.copy(srcfile, dstfile) os.chown(dstfile, slapd['user_uid'], slapd['group_gid']) os.chmod(dstfile, 0o440) # If we are on the correct platform settings, systemd if general['systemd']: # Should create the symlink we need, but without starting it. result = subprocess.run( ["systemctl", "enable", "dirsrv@%s" % slapd['instance_name']], stdout=subprocess.PIPE, stderr=subprocess.PIPE) args = ' '.join(ensure_list_str(result.args)) stdout = ensure_str(result.stdout) stderr = ensure_str(result.stderr) # Systemd encodes some odd charecters into it's symlink output on newer versions which # can trip up the logger. self.log.debug( f"CMD: {args} ; STDOUT: {stdout} ; STDERR: {stderr}".encode( "utf-8")) # Setup tmpfiles_d tmpfile_d = ds_paths.tmpfiles_d + "/dirsrv-" + slapd[ 'instance_name'] + ".conf" with open(tmpfile_d, "w") as TMPFILE_D: TMPFILE_D.write("d {} 0770 {} {}\n".format( slapd['run_dir'], slapd['user'], slapd['group'])) TMPFILE_D.write("d {} 0770 {} {}\n".format( slapd['lock_dir'].replace( "slapd-" + slapd['instance_name'], ""), slapd['user'], slapd['group'])) TMPFILE_D.write("d {} 0770 {} {}\n".format( slapd['lock_dir'], slapd['user'], slapd['group'])) # Else we need to detect other init scripts? # WB: No, we just install and assume that docker will start us ... # Bind sockets to our type? # Create certdb in sysconfidir self.log.debug("ACTION: Creating certificate database is %s", slapd['cert_dir']) # BELOW THIS LINE - all actions are now ONLINE changes to the directory server. # if it all possible, ALWAYS ADD NEW INSTALLER CHANGES AS ONLINE ACTIONS. # Should I move this import? I think this prevents some recursion from lib389 import DirSrv ds_instance = DirSrv(self.verbose, containerised=self.containerised) if self.containerised: ds_instance.systemd_override = general['systemd'] # By default SUSE does something extremely silly - it creates a hostname # that CANT be resolved by DNS. As a result this causes all installs to # fail. We need to guarantee that we only connect to localhost here, as # it's the only stable and guaranteed way to connect to the instance # at this point. # # Use ldapi which would prevent the need # to configure a temp root pw in the setup phase. args = { SER_HOST: "localhost", SER_PORT: slapd['port'], SER_SERVERID_PROP: slapd['instance_name'], SER_ROOT_DN: slapd['root_dn'], SER_ROOT_PW: self._raw_secure_password, SER_DEPLOYED_DIR: slapd['prefix'], SER_LDAPI_ENABLED: 'on', SER_LDAPI_SOCKET: slapd['ldapi'], SER_LDAPI_AUTOBIND: 'on' } ds_instance.allocate(args) # Does this work? assert_c(ds_instance.exists(), "Instance failed to install, does not exist when expected") # Create a certificate database. tlsdb = NssSsl(dirsrv=ds_instance, dbpath=slapd['cert_dir']) if not tlsdb._db_exists(): tlsdb.reinit() if slapd['self_sign_cert']: self.log.info("Create self-signed certificate database ...") etc_dirsrv_path = os.path.join(slapd['sysconf_dir'], 'dirsrv/') ssca_path = os.path.join(etc_dirsrv_path, 'ssca/') ssca = NssSsl(dbpath=ssca_path) # If it doesn't exist, create a CA DB if not ssca._db_exists(): ssca.reinit() ssca.create_rsa_ca(months=slapd['self_sign_cert_valid_months']) # If CA is expired or will expire soon, # Reissue it and resign the existing certs that were signed by the cert previously elif ssca.rsa_ca_needs_renew(): ca = ssca.renew_rsa_ca( months=slapd['self_sign_cert_valid_months']) # Import CA to the existing instances except the one we install now (we import it later) for dir in os.listdir(etc_dirsrv_path): if dir.startswith("slapd-") and dir != slapd['cert_dir']: tlsdb_inst = NssSsl( dbpath=os.path.join(etc_dirsrv_path, dir)) tlsdb_inst.import_rsa_crt(ca) csr = tlsdb.create_rsa_key_and_csr( alt_names=[general['full_machine_name']]) (ca, crt) = ssca.rsa_ca_sign_csr(csr) tlsdb.import_rsa_crt(ca, crt) if general['selinux']: # Set selinux port label selinux_label_port(slapd['secure_port']) # Do selinux fixups if general['selinux']: self.log.info("Perform SELinux labeling ...") selinux_paths = ('backup_dir', 'cert_dir', 'config_dir', 'db_dir', 'ldif_dir', 'lock_dir', 'log_dir', 'db_home_dir', 'run_dir', 'schema_dir', 'tmp_dir') for path in selinux_paths: selinux_restorecon(slapd[path]) selinux_label_port(slapd['port']) # Start the server # Make changes using the temp root self.log.debug(f"asan_enabled={ds_instance.has_asan()}") self.log.debug( f"libfaketime installed ={'libfaketime' in sys.modules}") assert_c( not ds_instance.has_asan() or 'libfaketime' not in sys.modules, "libfaketime python module is incompatible with ASAN build.") ds_instance.start(timeout=60) ds_instance.open() # In some cases we may want to change log settings # ds_instance.config.enable_log('audit') # Create the configs related to this version. base_config = get_config(general['defaults']) base_config_inst = base_config(ds_instance) base_config_inst.apply_config(install=True) # Setup TLS with the instance. # We *ALWAYS* set secure port, even if security is off, because it breaks # tests with standalone.enable_tls if we do not. It's only when security; on # that we actually start listening on it. if not slapd['secure_port']: slapd['secure_port'] = "636" ds_instance.config.set('nsslapd-secureport', '%s' % slapd['secure_port']) if slapd['self_sign_cert']: ds_instance.config.set('nsslapd-security', 'on') # Before we create any backends, create any extra default indexes that may be # dynamically provisioned, rather than from template-dse.ldif. Looking at you # entryUUID (requires rust enabled). # # Indexes defaults to default_index_dn indexes = Indexes(ds_instance) if ds_instance.ds_paths.rust_enabled: indexes.create( properties={ 'cn': 'entryUUID', 'nsSystemIndex': 'false', 'nsIndexType': ['eq', 'pres'], }) # Create the backends as listed # Load example data if needed. for backend in backends: self.log.info( f"Create database backend: {backend['nsslapd-suffix']} ...") is_sample_entries_in_props = "sample_entries" in backend create_suffix_entry_in_props = backend.pop('create_suffix_entry', False) ds_instance.backends.create(properties=backend) if not is_sample_entries_in_props and create_suffix_entry_in_props: # Set basic ACIs c_aci = '(targetattr="c || description || objectClass")(targetfilter="(objectClass=country)")(version 3.0; acl "Enable anyone c read"; allow (read, search, compare)(userdn="ldap:///anyone");)' o_aci = '(targetattr="o || description || objectClass")(targetfilter="(objectClass=organization)")(version 3.0; acl "Enable anyone o read"; allow (read, search, compare)(userdn="ldap:///anyone");)' dc_aci = '(targetattr="dc || description || objectClass")(targetfilter="(objectClass=domain)")(version 3.0; acl "Enable anyone domain read"; allow (read, search, compare)(userdn="ldap:///anyone");)' ou_aci = '(targetattr="ou || description || objectClass")(targetfilter="(objectClass=organizationalUnit)")(version 3.0; acl "Enable anyone ou read"; allow (read, search, compare)(userdn="ldap:///anyone");)' cn_aci = '(targetattr="cn || description || objectClass")(targetfilter="(objectClass=nscontainer)")(version 3.0; acl "Enable anyone cn read"; allow (read, search, compare)(userdn="ldap:///anyone");)' suffix_rdn_attr = backend['nsslapd-suffix'].split( '=')[0].lower() if suffix_rdn_attr == 'dc': domain = create_base_domain(ds_instance, backend['nsslapd-suffix']) domain.add('aci', dc_aci) elif suffix_rdn_attr == 'o': org = create_base_org(ds_instance, backend['nsslapd-suffix']) org.add('aci', o_aci) elif suffix_rdn_attr == 'ou': orgunit = create_base_orgunit(ds_instance, backend['nsslapd-suffix']) orgunit.add('aci', ou_aci) elif suffix_rdn_attr == 'cn': cn = create_base_cn(ds_instance, backend['nsslapd-suffix']) cn.add('aci', cn_aci) elif suffix_rdn_attr == 'c': c = create_base_c(ds_instance, backend['nsslapd-suffix']) c.add('aci', c_aci) else: # Unsupported rdn raise ValueError( "Suffix RDN '{}' in '{}' is not supported. Supported RDN's are: 'c', 'cn', 'dc', 'o', and 'ou'" .format(suffix_rdn_attr, backend['nsslapd-suffix'])) # Create all required sasl maps: if we have a single backend ... # our default maps are really really bad, and we should feel bad. # they basically only work with a single backend, and they'll break # GSSAPI in some cases too :( if len(backends) > 0: self.log.debug("Adding sasl maps for suffix %s" % backend['nsslapd-suffix']) backend = backends[0] saslmappings = SaslMappings(ds_instance) saslmappings.create( properties={ 'cn': 'rfc 2829 u syntax', 'nsSaslMapRegexString': '^u:\\(.*\\)', 'nsSaslMapBaseDNTemplate': backend['nsslapd-suffix'], 'nsSaslMapFilterTemplate': '(uid=\\1)' }) # I think this is for LDAPI saslmappings.create( properties={ 'cn': 'uid mapping', 'nsSaslMapRegexString': '^[^:@]+$', 'nsSaslMapBaseDNTemplate': backend['nsslapd-suffix'], 'nsSaslMapFilterTemplate': '(uid=&)' }) else: self.log.debug("Skipping default SASL maps - no backend found!") self.log.info("Perform post-installation tasks ...") # Change the root password finally ds_instance.config.set('nsslapd-rootpw', slapd['root_password']) # We need to log the password when containerised if self.containerised: self.log.debug("Root DN password: {}".format( slapd['root_password'])) # Complete. if general['start']: # Restart for changes to take effect - this could be removed later ds_instance.restart(post_open=False) else: # Just stop the instance now. ds_instance.stop() self.log.debug(" 🎉 Instance setup complete")
def connect_instance(dsrc_inst, verbose, args): dsargs = dsrc_inst['args'] if '//' not in dsargs['ldapurl']: # Connecting to the local instance dsargs['server-id'] = dsargs['ldapurl'] # We have an instance name - generate url from dse.ldif ldapurl, certdir = get_ldapurl_from_serverid(dsargs['ldapurl']) if ldapurl is not None: dsargs['ldapurl'] = ldapurl if 'ldapi://' in ldapurl: dsargs['ldapi_enabled'] = 'on' dsargs['ldapi_socket'] = ldapurl.replace('ldapi://', '') dsargs['ldapi_autobind'] = 'on' elif 'ldaps://' in ldapurl: dsrc_inst['tls_cert'] = certdir else: # The instance name does not match any instances raise ValueError("Could not find configuration for instance: " + dsargs['ldapurl']) ds = DirSrv(verbose=verbose) # We do an empty allocate here to determine if we can autobind ... (really # we should actually be inspect the URL ...) ds.allocate(dsargs) if args.pwdfile is not None or args.bindpw is not None or args.prompt is True: if args.pwdfile is not None: # Read password from file try: with open(args.pwdfile, "r") as f: dsargs[SER_ROOT_PW] = f.readline().rstrip() except EnvironmentError as e: raise ValueError("Failed to open password file: " + str(e)) elif args.bindpw is not None: # Password provided # This shouldn't be needed? dsrc already inherits the args ... dsargs[SER_ROOT_PW] = args.bindpw else: # No password or we chose to prompt dsargs[SER_ROOT_PW] = getpass("Enter password for {} on {}: ".format(dsrc_inst['binddn'], dsrc_inst['uri'])) elif not ds.can_autobind(): # No LDAPI, prompt for password, and bind DN if necessary if dsrc_inst['binddn'] is None: dn = "" while dn == "": dn = input("Enter Bind DN: ") dsrc_inst['binddn'] = dn dsargs[SER_ROOT_PW] = getpass("Enter password for {} on {}: ".format(dsrc_inst['binddn'], dsrc_inst['uri'])) # Allocate is an awful interface that we should stop using, but for now # just directly map the dsrc_inst args in (remember, dsrc_inst DOES # overlay cli args into the map ...) dsargs[SER_ROOT_DN] = dsrc_inst['binddn'] ds = DirSrv(verbose=verbose) ds.allocate(dsargs) ds.open(saslmethod=dsrc_inst['saslmech'], certdir=dsrc_inst['tls_cacertdir'], reqcert=dsrc_inst['tls_reqcert'], usercert=dsrc_inst['tls_cert'], userkey=dsrc_inst['tls_key'], starttls=dsrc_inst['starttls'], connOnly=True) if ds.serverid is not None and ds.serverid.startswith("slapd-"): ds.serverid = ds.serverid.replace("slapd-", "", 1) return ds
def topology(request): """Create Replication Deployment""" # Creating master 1... if DEBUGGING: master1 = DirSrv(verbose=True) else: master1 = DirSrv(verbose=False) args_instance[SER_HOST] = HOST_MASTER_1 args_instance[SER_PORT] = PORT_MASTER_1 args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_1 args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX args_master = args_instance.copy() master1.allocate(args_master) instance_master1 = master1.exists() if instance_master1: master1.delete() master1.create() master1.open() master1.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER, replicaId=REPLICAID_MASTER_1) # Creating master 2... if DEBUGGING: master2 = DirSrv(verbose=True) else: master2 = DirSrv(verbose=False) args_instance[SER_HOST] = HOST_MASTER_2 args_instance[SER_PORT] = PORT_MASTER_2 args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_2 args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX args_master = args_instance.copy() master2.allocate(args_master) instance_master2 = master2.exists() if instance_master2: master2.delete() master2.create() master2.open() master2.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER, replicaId=REPLICAID_MASTER_2) # # Create all the agreements # # Creating agreement from master 1 to master 2 properties = {RA_NAME: 'meTo_' + master2.host + ':' + str(master2.port), RA_BINDDN: defaultProperties[REPLICATION_BIND_DN], RA_BINDPW: defaultProperties[REPLICATION_BIND_PW], RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD], RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]} m1_m2_agmt = master1.agreement.create(suffix=SUFFIX, host=master2.host, port=master2.port, properties=properties) if not m1_m2_agmt: log.fatal("Fail to create a master -> master replica agreement") sys.exit(1) log.debug("%s created" % m1_m2_agmt) # Creating agreement from master 2 to master 1 properties = {RA_NAME: 'meTo_' + master1.host + ':' + str(master1.port), RA_BINDDN: defaultProperties[REPLICATION_BIND_DN], RA_BINDPW: defaultProperties[REPLICATION_BIND_PW], RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD], RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]} m2_m1_agmt = master2.agreement.create(suffix=SUFFIX, host=master1.host, port=master1.port, properties=properties) if not m2_m1_agmt: log.fatal("Fail to create a master -> master replica agreement") sys.exit(1) log.debug("%s created" % m2_m1_agmt) # Allow the replicas to get situated with the new agreements... time.sleep(5) # # Initialize all the agreements # master1.agreement.init(SUFFIX, HOST_MASTER_2, PORT_MASTER_2) master1.waitForReplInit(m1_m2_agmt) # Check replication is working... if master1.testReplication(DEFAULT_SUFFIX, master2): log.info('Replication is working.') else: log.fatal('Replication is not working.') assert False def fin(): """If we are debugging just stop the instances, otherwise remove them """ if DEBUGGING: master1.stop() master2.stop() else: master1.delete() master2.delete() request.addfinalizer(fin) # Clear out the tmp dir master1.clearTmpDir(__file__) return TopologyReplication(master1, master2)
def topology(request): # Master # # Create the master instance master = DirSrv(verbose=False) master.log.debug("Master allocated") args = {SER_HOST: HOST_MASTER, SER_PORT: PORT_MASTER, SER_SERVERID_PROP: SERVERID_MASTER} master.allocate(args) if master.exists(): master.delete() master.create() master.open() # Enable replication master.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER, replicaId=REPLICAID_MASTER) # Consumer # # Create the consumer instance consumer = DirSrv(verbose=False) consumer.log.debug("Consumer allocated") args = {SER_HOST: HOST_CONSUMER, SER_PORT: PORT_CONSUMER, SER_SERVERID_PROP: SERVERID_CONSUMER} consumer.allocate(args) if consumer.exists(): consumer.delete() consumer.create() consumer.open() # Enable replication consumer.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_CONSUMER) # Delete each instance in the end def fin(): master.delete() consumer.delete() request.addfinalizer(fin) return TopologyReplication(master, consumer)
def __getattr__(self, name): if name in Agreement.proxied_methods: return DirSrv.__getattr__(self.conn, name)
def __getattr__(self, name): if name in Tasks.proxied_methods: return DirSrv.__getattr__(self.conn, name)
def __getattr__(self, name): if name in MappingTree.proxied_methods: return DirSrv.__getattr__(self.conn, name)
def topology(request): # Creating standalone instance ... standalone = DirSrv(verbose=False) args_instance[SER_HOST] = HOST_STANDALONE args_instance[SER_PORT] = PORT_STANDALONE args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX args_standalone = args_instance.copy() standalone.allocate(args_standalone) instance_standalone = standalone.exists() if instance_standalone: standalone.delete() standalone.create() standalone.open() # Delete each instance in the end def fin(): #standalone.delete() pass request.addfinalizer(fin) return TopologyStandalone(standalone)
def setUp(self): instance = DirSrv(verbose=False) instance.log.debug("Instance allocated") args = { SER_HOST: LOCALHOST, SER_PORT: INSTANCE_PORT, SER_DEPLOYED_DIR: INSTANCE_PREFIX, SER_SERVERID_PROP: INSTANCE_SERVERID } instance.allocate(args) if instance.exists(): instance.delete() instance.create() instance.open() self.instance = instance
def _prepare_ds(self, general, slapd, backends): self.log.info("Validate installation settings ...") assert_c(general['defaults'] is not None, "Configuration defaults in section [general] not found") self.log.debug("PASSED: using config settings %s" % general['defaults']) # Validate our arguments. assert_c(slapd['user'] is not None, "Configuration user in section [slapd] not found") # check the user exists assert_c(pwd.getpwnam(slapd['user']), "user %s not found on system" % slapd['user']) slapd['user_uid'] = pwd.getpwnam(slapd['user']).pw_uid assert_c(slapd['group'] is not None, "Configuration group in section [slapd] not found") assert_c(grp.getgrnam(slapd['group']), "group %s not found on system" % slapd['group']) slapd['group_gid'] = grp.getgrnam(slapd['group']).gr_gid # check this group exists # Check that we are running as this user / group, or that we are root. assert_c( os.geteuid() == 0 or getpass.getuser() == slapd['user'], "Not running as user root or %s, may not have permission to continue" % slapd['user']) self.log.debug("PASSED: user / group checking") assert_c( general['full_machine_name'] is not None, "Configuration full_machine_name in section [general] not found") assert_c( general['strict_host_checking'] is not None, "Configuration strict_host_checking in section [general] not found" ) if general['strict_host_checking'] is True: # Check it resolves with dns assert_c( socket.gethostbyname(general['full_machine_name']), "Strict hostname check failed. Check your DNS records for %s" % general['full_machine_name']) self.log.debug("PASSED: Hostname strict checking") assert_c(slapd['prefix'] is not None, "Configuration prefix in section [slapd] not found") if (slapd['prefix'] != ""): assert_c(os.path.exists(slapd['prefix']), "Prefix location '%s' not found" % slapd['prefix']) self.log.debug("PASSED: prefix checking") # We need to know the prefix before we can do the instance checks assert_c(slapd['instance_name'] is not None, "Configuration instance_name in section [slapd] not found") assert_c( len(slapd['instance_name']) <= 80, "Server identifier should not be longer than 80 symbols") assert_c(all(ord(c) < 128 for c in slapd['instance_name']), "Server identifier can not contain non ascii characters") assert_c(' ' not in slapd['instance_name'], "Server identifier can not contain a space") assert_c( slapd['instance_name'] != 'admin', "Server identifier \"admin\" is reserved, please choose a different identifier" ) # Check that valid characters are used safe = re.compile(r'^[:\w@_-]+$').search assert_c( bool(safe(slapd['instance_name'])), "Server identifier has invalid characters, please choose a different value" ) # Check if the instance exists or not. # Should I move this import? I think this prevents some recursion from lib389 import DirSrv ds = DirSrv(verbose=self.verbose) ds.containerised = self.containerised ds.prefix = slapd['prefix'] insts = ds.list(serverid=slapd['instance_name']) assert_c( len(insts) == 0, "Another instance named '%s' may already exist" % slapd['instance_name']) self.log.debug("PASSED: instance checking") assert_c(slapd['root_dn'] is not None, "Configuration root_dn in section [slapd] not found") # Assert this is a valid DN assert_c(is_a_dn(slapd['root_dn']), "root_dn in section [slapd] is not a well formed LDAP DN") assert_c( slapd['root_password'] is not None and slapd['root_password'] != '', "Configuration attribute 'root_password' in section [slapd] not found" ) if len(slapd['root_password']) < 8: raise ValueError( "root_password must be at least 8 characters long") # Check if pre-hashed or not. # !!!!!!!!!!!!!! # Right now, the way that rootpw works on ns-slapd works, it force hashes the pw # see https://fedorahosted.org/389/ticket/48859 if not re.match('^([A-Z0-9]+).*$', slapd['root_password']): # We need to hash it. Call pwdhash-bin. # slapd['root_password'] = password_hash(slapd['root_password'], prefix=slapd['prefix']) pass else: pass # Create a random string # Hash it. # This will be our temporary rootdn password so that we can do # live mods and setup rather than static ldif manipulations. self._raw_secure_password = password_generate() self._secure_password = password_hash(self._raw_secure_password, bin_dir=slapd['bin_dir']) self.log.debug("INFO: temp root password set to %s" % self._raw_secure_password) self.log.debug("PASSED: root user checking") assert_c(slapd['port'] is not None, "Configuration port in section [slapd] not found") if self.containerised: if slapd['port'] <= 1024: self.log.warning( "WARNING: slapd port %s may not work without NET_BIND_SERVICE in containers" % slapd['port']) if slapd['secure_port'] <= 1024: self.log.warning( "WARNING: slapd secure_port %s may not work without NET_BIND_SERVICE in containers" % slapd['secure_port']) assert_c( socket_check_open('::1', slapd['port']) is False, "port %s is already in use, or missing NET_BIND_SERVICE" % slapd['port']) # We enable secure port by default. assert_c(slapd['secure_port'] is not None, "Configuration secure_port in section [slapd] not found") assert_c( socket_check_open('::1', slapd['secure_port']) is False, "secure_port %s is already in use, or missing NET_BIND_SERVICE" % slapd['secure_port']) self.log.debug("PASSED: network avaliability checking") # Make assertions of the paths? # Make assertions of the backends? # First fix some compat shenanigans. I hate legacy ... for be in backends: for k in BACKEND_PROPNAME_TO_ATTRNAME: if k in be: be[BACKEND_PROPNAME_TO_ATTRNAME[k]] = be[k] del (be[k]) for be in backends: assert_c('nsslapd-suffix' in be) assert_c('cn' in be)
def topology(request): """Create DS Deployment""" # Creating standalone instance ... if DEBUGGING: standalone = DirSrv(verbose=True) else: standalone = DirSrv(verbose=False) args_instance[SER_HOST] = HOST_STANDALONE args_instance[SER_PORT] = PORT_STANDALONE args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX args_standalone = args_instance.copy() standalone.allocate(args_standalone) instance_standalone = standalone.exists() if instance_standalone: standalone.delete() standalone.create() standalone.open() def fin(): """If we are debugging just stop the instances, otherwise remove them """ if DEBUGGING: standalone.stop(60) else: standalone.delete() request.addfinalizer(fin) # Clear out the tmp dir standalone.clearTmpDir(__file__) return TopologyStandalone(standalone)
def default_test(): host = socket.gethostname() port = 10200 binddn = "cn=directory manager" bindpw = "password" suffix = 'dc=example,dc=com' basedn = DN_CONFIG scope = ldap.SCOPE_BASE filt = "(objectclass=*)" instance_name = ['m1', 'm2'] instance_config = { 'cfgdshost': host, 'cfgdsport': port, 'cfgdsuser': '******', 'cfgdspwd': 'admin', 'newrootpw': 'password', 'newhost': host, 'newport': port, 'newinstance': instance_name[0], 'newsuffix': suffix, 'setup_admin': True, } try: m1 = DirSrv(host, port, binddn, bindpw) except: m1 = DirSrvTools.createInstance(instance_config, verbose=0) added_instances.append(instance_config['newinstance']) # filename = "%s/slapd-%s/ldif/Example.ldif" % (m1.sroot, m1.inst) # m1.importLDIF(filename, "dc=example,dc=com", None, True) # m1.exportLDIF('/tmp/ldif', "dc=example,dc=com", False, True) print (m1.sroot, m1.inst, m1.errlog) ent = m1.getEntry(basedn, scope, filt, None) if ent: print (ent.passwordmaxage) instance_config.update({ 'newinstance': instance_name[1], 'newport': port + 10, }) m1 = DirSrvTools.createInstance(instance_config, verbose=0) added_instances.append(instance_config['newinstance']) # m1.stop(True) # m1.start(True) suffix = "dc=example2,dc=com" bename = "example2db" backendEntry, dummy = m1.backend.add(suffix, bename) suffixEntry = m1.backend.setup_mt(suffix, bename) cn = backendEntry.getValue('cn') print (cn) entry = m1.getEntry(DN_CONFIG, ldap.SCOPE_SUBTREE, "(cn=" + cn + ")") print ("new backend entry is:") print (entry) print (entry.getValues('objectclass')) print (entry.OBJECTCLASS) results = m1.search_s("cn=monitor", ldap.SCOPE_SUBTREE) print (results) results = m1.getBackendsForSuffix("dc=example,dc=com") print (results) print ("done")
def __getattr__(self, name): if name in MappingTree.proxied_methods: from lib389 import DirSrv return DirSrv.__getattr__(self.conn, name)