def topology(request): # Create the master instance master = DirSrv(verbose=False) master.log.debug("Master allocated") args = {SER_HOST: HOST_MASTER, SER_PORT: PORT_MASTER, SER_SERVERID_PROP: SERVERID_MASTER} master.allocate(args) if master.exists(): master.delete() master.create() master.open() # Create the consumer instance consumer = DirSrv(verbose=False) consumer.log.debug("Consumer allocated") args = {SER_HOST: HOST_CONSUMER, SER_PORT: PORT_CONSUMER, SER_SERVERID_PROP: SERVERID_CONSUMER} consumer.allocate(args) if consumer.exists(): consumer.delete() consumer.create() consumer.open() # Delete each instance in the end def fin(): master.delete() consumer.delete() request.addfinalizer(fin) return TopologyReplication(master, consumer)
def setUp(self): # Create the master instance master = DirSrv(verbose=False) master.log.debug("Master allocated") args = {SER_HOST: HOST_MASTER, SER_PORT: PORT_MASTER, SER_DEPLOYED_DIR: INSTANCE_PREFIX, SER_SERVERID_PROP: SERVERID_MASTER } master.allocate(args) if master.exists(): master.delete() master.create() master.open() self.master = master # Create the consumer instance consumer = DirSrv(verbose=False) consumer.log.debug("Consumer allocated") args = {SER_HOST: HOST_CONSUMER, SER_PORT: PORT_CONSUMER, SER_DEPLOYED_DIR: INSTANCE_PREFIX, SER_SERVERID_PROP: SERVERID_CONSUMER } consumer.allocate(args) if consumer.exists(): consumer.delete() consumer.create() consumer.open() self.consumer = consumer
def topology(request): return # Create the realm first krb = MitKrb5(realm=REALM) if krb.check_realm(): krb.destroy_realm() krb.create_realm() DEBUG = False # Creating master 1... master1 = DirSrv(verbose=DEBUG) args_instance[SER_HOST] = HOST_MASTER_1 args_instance[SER_PORT] = PORT_MASTER_1 args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_1 args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX args_instance[SER_REALM] = REALM args_instance[SER_STRICT_HOSTNAME_CHECKING] = False args_master = args_instance.copy() master1.allocate(args_master) instance_master1 = master1.exists() if instance_master1: master1.delete() master1.create() # There is some magic in .create that finds the realm, and adds the keytab for us. master1.open() master1.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER, replicaId=REPLICAID_MASTER_1) # Creating master 2... master2 = DirSrv(verbose=DEBUG) args_instance[SER_HOST] = HOST_MASTER_2 args_instance[SER_PORT] = PORT_MASTER_2 args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_2 args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX args_instance[SER_REALM] = REALM args_instance[SER_STRICT_HOSTNAME_CHECKING] = False args_master = args_instance.copy() master2.allocate(args_master) instance_master2 = master2.exists() if instance_master2: master2.delete() master2.create() master2.open() master2.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER, replicaId=REPLICAID_MASTER_2) # Delete each instance in the end def fin(): master1.delete() master2.delete() if krb.check_realm(): krb.destroy_realm() request.addfinalizer(fin) # Clear out the tmp dir master1.clearTmpDir(__file__) return TopologyReplication(master1, master2)
def topology(request): # Master # # Create the master instance master = DirSrv(verbose=False) master.log.debug("Master allocated") args = { SER_HOST: HOST_MASTER, SER_PORT: PORT_MASTER, SER_SERVERID_PROP: SERVERID_MASTER } master.allocate(args) if master.exists(): master.delete() master.create() master.open() # Enable replication master.replica.enableReplication(suffix=SUFFIX, role=ReplicaRole.MASTER, replicaId=REPLICAID_MASTER) # Consumer # # Create the consumer instance consumer = DirSrv(verbose=False) consumer.log.debug("Consumer allocated") args = { SER_HOST: HOST_CONSUMER, SER_PORT: PORT_CONSUMER, SER_SERVERID_PROP: SERVERID_CONSUMER } consumer.allocate(args) if consumer.exists(): consumer.delete() consumer.create() consumer.open() # Enable replication consumer.replica.enableReplication(suffix=SUFFIX, role=ReplicaRole.CONSUMER) # Delete each instance in the end def fin(): master.delete() consumer.delete() request.addfinalizer(fin) return TopologyReplication(master, consumer)
def topology(request): """ This fixture is used to create a DirSrv instance for the 'module'. """ schemainst = DirSrv(verbose=False) # Args for the master instance args_instance[SER_HOST] = HOST_STANDALONE args_instance[SER_PORT] = PORT_STANDALONE args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE schemainst.allocate(args_instance) # Remove all the instance if schemainst.exists(): schemainst.delete() # Create the instance schemainst.create() schemainst.open() def fin(): schemainst.delete() request.addfinalizer(fin) return TopologyStandalone(schemainst)
def topology(request): # Creating standalone instance ... standalone = DirSrv(verbose=False) args_instance[SER_HOST] = HOST_STANDALONE args_instance[SER_PORT] = PORT_STANDALONE args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX args_standalone = args_instance.copy() standalone.allocate(args_standalone) instance_standalone = standalone.exists() if instance_standalone: standalone.delete() standalone.create() standalone.open() # Delete each instance in the end def fin(): standalone.delete() request.addfinalizer(fin) # Clear out the tmp dir standalone.clearTmpDir(__file__) return TopologyStandalone(standalone)
def topology(request): """Create DS Deployment""" # Creating standalone instance ... if DEBUGGING: standalone = DirSrv(verbose=True) else: standalone = DirSrv(verbose=False) args_instance[SER_HOST] = HOST_STANDALONE args_instance[SER_PORT] = PORT_STANDALONE args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX args_standalone = args_instance.copy() standalone.allocate(args_standalone) instance_standalone = standalone.exists() if instance_standalone: standalone.delete() standalone.create() standalone.open() def fin(): """If we are debugging just stop the instances, otherwise remove them """ if DEBUGGING: standalone.stop(60) else: standalone.delete() request.addfinalizer(fin) # Clear out the tmp dir standalone.clearTmpDir(__file__) return TopologyStandalone(standalone)
def topology(request): global installation1_prefix if installation1_prefix: args_instance[SER_DEPLOYED_DIR] = installation1_prefix # Creating standalone instance ... standalone = DirSrv(verbose=False) args_instance[SER_HOST] = HOST_STANDALONE args_instance[SER_PORT] = PORT_STANDALONE args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX args_standalone = args_instance.copy() standalone.allocate(args_standalone) instance_standalone = standalone.exists() if instance_standalone: standalone.delete() standalone.create() standalone.open() # Clear out the tmp dir standalone.clearTmpDir(__file__) def fin(): standalone.delete() sbin_dir = standalone.get_sbin_dir() if not standalone.has_asan(): valgrind_disable(sbin_dir) request.addfinalizer(fin) return TopologyStandalone(standalone)
def topology(request): global installation1_prefix if installation1_prefix: args_instance[SER_DEPLOYED_DIR] = installation1_prefix # Creating standalone instance ... standalone = DirSrv(verbose=False) args_instance[SER_HOST] = HOST_STANDALONE args_instance[SER_PORT] = PORT_STANDALONE args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX args_standalone = args_instance.copy() standalone.allocate(args_standalone) instance_standalone = standalone.exists() if instance_standalone: standalone.delete() standalone.create() standalone.open() def fin(): standalone.delete() request.addfinalizer(fin) return TopologyStandalone(standalone)
def topology(request): """This fixture is used to standalone topology for the 'module'.""" standalone = DirSrv(verbose=False) # Args for the standalone instance args_instance[SER_HOST] = HOST_STANDALONE args_instance[SER_PORT] = PORT_STANDALONE args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE args_standalone = args_instance.copy() standalone.allocate(args_standalone) # Get the status of the instance and restart it if it exists instance_standalone = standalone.exists() # Remove the instance if instance_standalone: standalone.delete() # Create the instance standalone.create() # Used to retrieve configuration information (dbdir, confdir...) standalone.open() # Delete each instance in the end def fin(): standalone.delete() request.addfinalizer(fin) # Here we have standalone instance up and running return TopologyStandalone(standalone)
def topology(request): # Creating standalone instance ... standalone = DirSrv(verbose=False) args_instance[SER_HOST] = HOST_STANDALONE args_instance[SER_PORT] = PORT_STANDALONE args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX args_standalone = args_instance.copy() standalone.allocate(args_standalone) instance_standalone = standalone.exists() if instance_standalone: standalone.delete() standalone.create() standalone.open() # Delete each instance in the end def fin(): # This is useful for analysing the test env. standalone.db2ldif(bename=DEFAULT_BENAME, suffixes=[DEFAULT_SUFFIX], excludeSuffixes=[], encrypt=False, \ repl_data=True, outputfile='%s/ldif/%s.ldif' % (standalone.dbdir,SERVERID_STANDALONE )) standalone.clearBackupFS() standalone.backupFS() standalone.delete() request.addfinalizer(fin) # Clear out the tmp dir standalone.clearTmpDir(__file__) return TopologyStandalone(standalone)
def topology(request): ''' This fixture is used to standalone topology for the 'module'. ''' standalone = DirSrv(verbose=True) # Args for the standalone instance args_instance[SER_HOST] = HOST_STANDALONE args_instance[SER_PORT] = PORT_STANDALONE args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE args_standalone = args_instance.copy() standalone.allocate(args_standalone) # Get the status of the instance and restart it if it exists instance_standalone = standalone.exists() # Remove the instance if instance_standalone: standalone.delete() # Create the instance standalone.create() # Used to retrieve configuration information (dbdir, confdir...) standalone.open() # clear the tmp directory standalone.clearTmpDir(__file__) # Here we have standalone instance up and running return TopologyStandalone(standalone)
def topology(request): """Create DS Deployment""" # Creating standalone instance ... standalone = DirSrv(verbose=DEBUGGING) args_instance[SER_HOST] = HOST_STANDALONE args_instance[SER_PORT] = PORT_STANDALONE args_instance[SER_SECURE_PORT] = SECUREPORT_STANDALONE args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX args_standalone = args_instance.copy() standalone.allocate(args_standalone) instance_standalone = standalone.exists() if instance_standalone: standalone.delete() standalone.create() standalone.open() def fin(): """If we are debugging just stop the instances, otherwise remove them """ if DEBUGGING: standalone.stop() else: standalone.delete() request.addfinalizer(fin) return TopologyStandalone(standalone)
def topology(request): ''' This fixture is used to standalone topology for the 'module'. At the beginning, It may exists a standalone instance. It may also exists a backup for the standalone instance. ''' standalone = DirSrv(verbose=False) # Args for the standalone instance args_instance[SER_HOST] = HOST_STANDALONE args_instance[SER_PORT] = PORT_STANDALONE args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE args_standalone = args_instance.copy() standalone.allocate(args_standalone) # Get the status of the instance and restart it if it exists instance_standalone = standalone.exists() if instance_standalone: standalone.delete() # Create the instance standalone.create() # Used to retrieve configuration information (dbdir, confdir...) standalone.open() def fin(): standalone.delete() request.addfinalizer(fin) return TopologyStandalone(standalone)
def setUp(self): # # Master # # Create the master instance master = DirSrv(verbose=False) master.log.debug("Master allocated") args = { SER_HOST: HOST_MASTER, SER_PORT: PORT_MASTER, SER_DEPLOYED_DIR: INSTANCE_PREFIX, SER_SERVERID_PROP: SERVERID_MASTER } master.allocate(args) if master.exists(): master.delete() master.create() master.open() # enable replication master.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER, replicaId=REPLICAID_MASTER) self.master = master # # Consumer # # Create the consumer instance consumer = DirSrv(verbose=False) consumer.log.debug("Consumer allocated") args = { SER_HOST: HOST_CONSUMER, SER_PORT: PORT_CONSUMER, SER_DEPLOYED_DIR: INSTANCE_PREFIX, SER_SERVERID_PROP: SERVERID_CONSUMER } consumer.allocate(args) if consumer.exists(): consumer.delete() consumer.create() consumer.open() # enable replication consumer.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_CONSUMER) self.consumer = consumer
def topology(request): # Master # # Create the master instance master = DirSrv(verbose=False) master.log.debug("Master allocated") args = {SER_HOST: HOST_MASTER, SER_PORT: PORT_MASTER, SER_SERVERID_PROP: SERVERID_MASTER} master.allocate(args) if master.exists(): master.delete() master.create() master.open() # Enable replication master.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER, replicaId=REPLICAID_MASTER) # Consumer # # Create the consumer instance consumer = DirSrv(verbose=False) consumer.log.debug("Consumer allocated") args = {SER_HOST: HOST_CONSUMER, SER_PORT: PORT_CONSUMER, SER_SERVERID_PROP: SERVERID_CONSUMER} consumer.allocate(args) if consumer.exists(): consumer.delete() consumer.create() consumer.open() # Enable replication consumer.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_CONSUMER) # Delete each instance in the end def fin(): master.delete() consumer.delete() request.addfinalizer(fin) return TopologyReplication(master, consumer)
def test_finalizer(): # for each defined instance, remove it for args_instance in ALL_INSTANCES: instance = DirSrv(verbose=True) instance.allocate(args_instance) if instance.exists(): instance.delete() # remove any existing backup for this instance instance.clearBackupFS()
def setUp(self): # # Master # # Create the master instance master = DirSrv(verbose=False) master.log.debug("Master allocated") args = {SER_HOST: HOST_MASTER, SER_PORT: PORT_MASTER, SER_DEPLOYED_DIR: INSTANCE_PREFIX, SER_SERVERID_PROP: SERVERID_MASTER } master.allocate(args) if master.exists(): master.delete() master.create() master.open() # enable replication master.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER, replicaId=REPLICAID_MASTER) self.master = master # # Consumer # # Create the consumer instance consumer = DirSrv(verbose=False) consumer.log.debug("Consumer allocated") args = {SER_HOST: HOST_CONSUMER, SER_PORT: PORT_CONSUMER, SER_DEPLOYED_DIR: INSTANCE_PREFIX, SER_SERVERID_PROP: SERVERID_CONSUMER } consumer.allocate(args) if consumer.exists(): consumer.delete() consumer.create() consumer.open() # enable replication consumer.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_CONSUMER) self.consumer = consumer
def topology(request): instance = DirSrv(verbose=False) if instance.exists(): instance.delete() def fin(): if instance.exists(): instance.delete() request.addfinalizer(fin) return TopologyInstance(instance)
def setUp(self): instance = DirSrv(verbose=False) instance.log.debug("Instance allocated") args = {SER_HOST: LOCALHOST, SER_PORT: INSTANCE_PORT, SER_DEPLOYED_DIR: INSTANCE_PREFIX, SER_SERVERID_PROP: INSTANCE_SERVERID } instance.allocate(args) if instance.exists(): instance.delete() instance.create() instance.open() self.instance = instance
def topology(request): # Create the master instance master = DirSrv(verbose=False) master.log.debug("Master allocated") args = { SER_HOST: HOST_MASTER, SER_PORT: PORT_MASTER, SER_SERVERID_PROP: SERVERID_MASTER } master.allocate(args) if master.exists(): master.delete() master.create() master.open() # Create the consumer instance consumer = DirSrv(verbose=False) consumer.log.debug("Consumer allocated") args = { SER_HOST: HOST_CONSUMER, SER_PORT: PORT_CONSUMER, SER_SERVERID_PROP: SERVERID_CONSUMER } consumer.allocate(args) if consumer.exists(): consumer.delete() consumer.create() consumer.open() # Delete each instance in the end def fin(): master.delete() consumer.delete() request.addfinalizer(fin) return TopologyReplication(master, consumer)
def topology(request): instance = DirSrv(verbose=DEBUGGING) instance.log.debug("Instance allocated") args = {SER_PORT: INSTANCE_PORT, SER_SERVERID_PROP: INSTANCE_SERVERID} instance.allocate(args) if instance.exists(): instance.delete() def fin(): if instance.exists() and not DEBUGGING: instance.delete() request.addfinalizer(fin) return TopologyInstance(instance)
def topology(request): instance = DirSrv(verbose=DEBUGGING) instance.log.debug("Instance allocated") args = {SER_PORT: INSTANCE_PORT, SER_SERVERID_PROP: INSTANCE_SERVERID} instance.allocate(args) if instance.exists(): instance.delete() # Create the setupDs lc = LogCapture() # Give it the right types. sds = SetupDs(verbose=DEBUGGING, dryrun=False, log=lc.log) # Get the dicts from Type2Base, as though they were from _validate_ds_2_config # IE get the defaults back just from Slapd2Base.collect # Override instance name, root password, port and secure port. general_options = General2Base(lc.log) general_options.verify() general = general_options.collect() slapd_options = Slapd2Base(lc.log) slapd_options.set('instance_name', INSTANCE_SERVERID) slapd_options.set('port', INSTANCE_PORT) slapd_options.set('root_password', PW_DM) slapd_options.verify() slapd = slapd_options.collect() sds.create_from_args(general, slapd, {}, None) # Make sure we can connect instance.open() # Create the example backend with sample entries. instance.backends.create(properties={ 'cn': ['userRoot'], 'nsslapd-suffix': ['dc=example,dc=com'], }) def fin(): if instance.exists() and not DEBUGGING: instance.delete() request.addfinalizer(fin) return TopologyInstance(instance)
def topology(request): instance = DirSrv(verbose=False) instance.log.debug("Instance allocated") args = {SER_HOST: LOCALHOST, SER_PORT: INSTANCE_PORT, SER_SERVERID_PROP: INSTANCE_SERVERID} instance.allocate(args) if instance.exists(): instance.delete() instance.create() instance.open() def fin(): instance.delete() request.addfinalizer(fin) return TopologyInstance(instance)
def topology(request): standalone = DirSrv(verbose=False) standalone.log.debug('Instance allocated') args = {SER_HOST: LOCALHOST, SER_PORT: INSTANCE_PORT, SER_SERVERID_PROP: INSTANCE_SERVERID} standalone.allocate(args) if standalone.exists(): standalone.delete() standalone.create() standalone.open() def fin(): standalone.delete() request.addfinalizer(fin) return TopologyStandalone(standalone)
def topology(request): # Create the realm instance = DirSrv(verbose=False) instance.log.debug("Instance allocated") args = {SER_PORT: INSTANCE_PORT, SER_SERVERID_PROP: INSTANCE_SERVERID} instance.allocate(args) if instance.exists(): instance.delete() instance.create() instance.open() def fin(): if instance.exists(): instance.delete() request.addfinalizer(fin) return TopologyInstance(instance)
def topology(request): standalone = DirSrv(verbose=False) standalone.log.debug("Instance allocated") args = {SER_HOST: LOCALHOST, SER_PORT: INSTANCE_PORT, # SER_DEPLOYED_DIR: INSTANCE_PREFIX, SER_SERVERID_PROP: INSTANCE_SERVERID} standalone.allocate(args) if standalone.exists(): standalone.delete() standalone.create() standalone.open() def fin(): standalone.delete() request.addfinalizer(fin) return TopologyStandalone(standalone)
def topology(request): lc = LogCapture() instance = DirSrv(verbose=DEBUGGING) instance.log.debug("Instance allocated") args = {SER_PORT: INSTANCE_PORT, SER_SERVERID_PROP: INSTANCE_SERVERID} instance.allocate(args) if instance.exists(): instance.delete() # This will need to change to instance.create in the future # when it's linked up! sds = SetupDs(verbose=DEBUGGING, dryrun=False, log=lc.log) # Get the dicts from Type2Base, as though they were from _validate_ds_2_config # IE get the defaults back just from Slapd2Base.collect # Override instance name, root password, port and secure port. general_options = General2Base(lc.log) general_options.verify() general = general_options.collect() # Need an args -> options2 ... slapd_options = Slapd2Base(lc.log) slapd_options.set('instance_name', INSTANCE_SERVERID) slapd_options.set('port', INSTANCE_PORT) slapd_options.set('root_password', PW_DM) slapd_options.verify() slapd = slapd_options.collect() sds.create_from_args(general, slapd, {}, None) insts = instance.list(serverid=INSTANCE_SERVERID) # Assert we did change the system. assert(len(insts) == 1) # Make sure we can connect instance.open(connOnly=True) def fin(): if instance.exists() and not DEBUGGING: instance.delete() request.addfinalizer(fin) return TopologyInstance(instance, lc)
def topology(request): instance = DirSrv(verbose=False) args = { SER_HOST: LOCALHOST, SER_PORT: INSTANCE_PORT, SER_SERVERID_PROP: INSTANCE_SERVERID } instance.allocate(args) if instance.exists(): instance.delete() def fin(): if instance.exists(): instance.delete() request.addfinalizer(fin) return TopologyInstance(instance)
def create_instance(config_attr): log.info('create_instance - Installs the instance and Sets the value of InstScriptsEnabled to true OR false.') log.info("Set up the instance and set the config_attr") instance_data = generate_ds_params(1, ReplicaRole.STANDALONE) # Create instance standalone = DirSrv(verbose=False) # Args for the instance args_instance[SER_HOST] = instance_data[SER_HOST] args_instance[SER_PORT] = instance_data[SER_PORT] args_instance[SER_SERVERID_PROP] = instance_data[SER_SERVERID_PROP] args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX args_instance[SER_INST_SCRIPTS_ENABLED] = config_attr args_standalone = args_instance.copy() standalone.allocate(args_standalone) if standalone.exists(): standalone.delete() standalone.create() standalone.open() return standalone
def topology(request): # Creating standalone instance ... standalone = DirSrv(verbose=False) args_instance[SER_HOST] = HOST_STANDALONE args_instance[SER_PORT] = PORT_STANDALONE args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX args_standalone = args_instance.copy() standalone.allocate(args_standalone) instance_standalone = standalone.exists() if instance_standalone: standalone.delete() standalone.create() standalone.open() # Delete each instance in the end def fin(): #standalone.delete() pass request.addfinalizer(fin) return TopologyStandalone(standalone)
def topology(request): standalone = DirSrv(verbose=False) standalone.log.debug("Instance allocated") args = { SER_PORT: INSTANCE_PORT, # SER_DEPLOYED_DIR: INSTANCE_PREFIX, SER_SERVERID_PROP: INSTANCE_SERVERID } standalone.allocate(args) if standalone.exists(): standalone.delete() standalone.create() standalone.open() standalone.config.set('nsslapd-accesslog-logbuffering', 'off') def fin(): standalone.delete() request.addfinalizer(fin) # We have to wait for time to elapse for the access log to be flushed. return TopologyStandalone(standalone)
def topology(request): # Create the realm krb = MitKrb5(realm=REALM) instance = DirSrv(verbose=False) instance.log.debug("Instance allocated") # WARNING: If this test fails it's like a hostname issue!!! args = {SER_HOST: socket.gethostname(), SER_PORT: INSTANCE_PORT, SER_REALM: REALM, SER_SERVERID_PROP: INSTANCE_SERVERID} instance.allocate(args) if instance.exists(): instance.delete() # Its likely our realm exists too # Remove the old keytab if os.path.exists(KEYTAB): os.remove(KEYTAB) if krb.check_realm(): krb.destroy_realm() # This will automatically create the krb entries krb.create_realm() instance.create() instance.open() def fin(): if instance.exists(): instance.delete() if krb.check_realm(): krb.destroy_realm() if os.path.exists(KEYTAB): os.remove(KEYTAB) if os.path.exists(CCACHE): os.remove(CCACHE) request.addfinalizer(fin) return TopologyInstance(instance)
def topology(request): """Create Replication Deployment""" # Creating master 1... if DEBUGGING: master1 = DirSrv(verbose=True) else: master1 = DirSrv(verbose=False) args_instance[SER_HOST] = HOST_MASTER_1 args_instance[SER_PORT] = PORT_MASTER_1 args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_1 args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX args_master = args_instance.copy() master1.allocate(args_master) instance_master1 = master1.exists() if instance_master1: master1.delete() master1.create() master1.open() master1.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER, replicaId=REPLICAID_MASTER_1) # Creating master 2... if DEBUGGING: master2 = DirSrv(verbose=True) else: master2 = DirSrv(verbose=False) args_instance[SER_HOST] = HOST_MASTER_2 args_instance[SER_PORT] = PORT_MASTER_2 args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_2 args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX args_master = args_instance.copy() master2.allocate(args_master) instance_master2 = master2.exists() if instance_master2: master2.delete() master2.create() master2.open() master2.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER, replicaId=REPLICAID_MASTER_2) # # Create all the agreements # # Creating agreement from master 1 to master 2 properties = {RA_NAME: 'meTo_' + master2.host + ':' + str(master2.port), RA_BINDDN: defaultProperties[REPLICATION_BIND_DN], RA_BINDPW: defaultProperties[REPLICATION_BIND_PW], RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD], RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]} m1_m2_agmt = master1.agreement.create(suffix=SUFFIX, host=master2.host, port=master2.port, properties=properties) if not m1_m2_agmt: log.fatal("Fail to create a master -> master replica agreement") sys.exit(1) log.debug("%s created" % m1_m2_agmt) # Creating agreement from master 2 to master 1 properties = {RA_NAME: 'meTo_' + master1.host + ':' + str(master1.port), RA_BINDDN: defaultProperties[REPLICATION_BIND_DN], RA_BINDPW: defaultProperties[REPLICATION_BIND_PW], RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD], RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]} m2_m1_agmt = master2.agreement.create(suffix=SUFFIX, host=master1.host, port=master1.port, properties=properties) if not m2_m1_agmt: log.fatal("Fail to create a master -> master replica agreement") sys.exit(1) log.debug("%s created" % m2_m1_agmt) # Allow the replicas to get situated with the new agreements... time.sleep(5) # # Initialize all the agreements # master1.agreement.init(SUFFIX, HOST_MASTER_2, PORT_MASTER_2) master1.waitForReplInit(m1_m2_agmt) # Check replication is working... if master1.testReplication(DEFAULT_SUFFIX, master2): log.info('Replication is working.') else: log.fatal('Replication is not working.') assert False def fin(): """If we are debugging just stop the instances, otherwise remove them """ if DEBUGGING: master1.stop() master2.stop() else: master1.delete() master2.delete() request.addfinalizer(fin) # Clear out the tmp dir master1.clearTmpDir(__file__) return TopologyReplication(master1, master2)
def topology(request): ''' This fixture is used to create a replicated topology for the 'module'. The replicated topology is MASTER1 <-> Master2. ''' global installation1_prefix global installation2_prefix # allocate master1 on a given deployement master1 = DirSrv(verbose=False) if installation1_prefix: args_instance[SER_DEPLOYED_DIR] = installation1_prefix # Args for the master1 instance args_instance[SER_HOST] = HOST_MASTER_1 args_instance[SER_PORT] = PORT_MASTER_1 args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_1 args_master = args_instance.copy() master1.allocate(args_master) # allocate master1 on a given deployement master2 = DirSrv(verbose=False) if installation2_prefix: args_instance[SER_DEPLOYED_DIR] = installation2_prefix # Args for the consumer instance args_instance[SER_HOST] = HOST_MASTER_2 args_instance[SER_PORT] = PORT_MASTER_2 args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_2 args_master = args_instance.copy() master2.allocate(args_master) # Get the status of the instance and restart it if it exists instance_master1 = master1.exists() instance_master2 = master2.exists() # Remove all the instances if instance_master1: master1.delete() if instance_master2: master2.delete() # Create the instances master1.create() master1.open() master2.create() master2.open() # # Now prepare the Master-Consumer topology # # First Enable replication master1.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER, replicaId=REPLICAID_MASTER_1) master2.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER, replicaId=REPLICAID_MASTER_2) # Initialize the supplier->consumer properties = {RA_NAME: r'meTo_$host:$port', RA_BINDDN: defaultProperties[REPLICATION_BIND_DN], RA_BINDPW: defaultProperties[REPLICATION_BIND_PW], RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD], RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]} repl_agreement = master1.agreement.create(suffix=SUFFIX, host=master2.host, port=master2.port, properties=properties) if not repl_agreement: log.fatal("Fail to create a replica agreement") sys.exit(1) log.debug("%s created" % repl_agreement) properties = {RA_NAME: r'meTo_$host:$port', RA_BINDDN: defaultProperties[REPLICATION_BIND_DN], RA_BINDPW: defaultProperties[REPLICATION_BIND_PW], RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD], RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]} master2.agreement.create(suffix=SUFFIX, host=master1.host, port=master1.port, properties=properties) master1.agreement.init(SUFFIX, HOST_MASTER_2, PORT_MASTER_2) master1.waitForReplInit(repl_agreement) # Check replication is working fine if master1.testReplication(DEFAULT_SUFFIX, master2): log.info('Replication is working.') else: log.fatal('Replication is not working.') assert False def fin(): master1.delete() master2.delete() request.addfinalizer(fin) # Here we have two instances master and consumer # with replication working. return TopologyMaster1Master2(master1, master2)
def _create_instances(topo_dict, suffix): """Create requested instances without replication or any other modifications :param topo_dict: a dictionary {ReplicaRole.STANDALONE: num, ReplicaRole.MASTER: num, ReplicaRole.HUB: num, ReplicaRole.CONSUMER: num} :type topo_dict: dict :param suffix: a suffix :type suffix: str :return - TopologyMain object """ instances = {} ms = {} cs = {} hs = {} ins = {} # Create instances for role in topo_dict.keys(): for inst_num in range(1, topo_dict[role]+1): instance_data = generate_ds_params(inst_num, role) if DEBUGGING: instance = DirSrv(verbose=True) else: instance = DirSrv(verbose=False) # TODO: Put 'args_instance' to generate_ds_params. # Also, we need to keep in mind that the function returns # SER_SECURE_PORT and REPLICA_ID that are not used in # the instance creation here. # args_instance[SER_HOST] = instance_data[SER_HOST] args_instance = {} args_instance[SER_PORT] = instance_data[SER_PORT] args_instance[SER_SECURE_PORT] = instance_data[SER_SECURE_PORT] args_instance[SER_SERVERID_PROP] = instance_data[SER_SERVERID_PROP] # It's required to be able to make a suffix-less install for # some cli tests. It's invalid to require replication with # no suffix however .... if suffix is not None: args_instance[SER_CREATION_SUFFIX] = suffix elif role != ReplicaRole.STANDALONE: raise AssertionError("Invalid request to make suffix-less replicated environment") instance.allocate(args_instance) instance_exists = instance.exists() if instance_exists: instance.delete(pyinstall=PYINSTALL) instance.create(pyinstall=PYINSTALL) # We set a URL here to force ldap:// only. Once we turn on TLS # we'll flick this to ldaps. instance.use_ldap_uri() instance.open() if role == ReplicaRole.STANDALONE: ins[instance.serverid] = instance instances.update(ins) if role == ReplicaRole.MASTER: ms[instance.serverid] = instance instances.update(ms) if role == ReplicaRole.CONSUMER: cs[instance.serverid] = instance instances.update(cs) if role == ReplicaRole.HUB: hs[instance.serverid] = instance instances.update(hs) if DEBUGGING: instance.config.set('nsslapd-accesslog-logbuffering','off') instance.config.set('nsslapd-errorlog-level','8192') instance.config.set('nsslapd-auditlog-logging-enabled','on') log.info("Instance with parameters {} was created.".format(args_instance)) if "standalone1" in instances and len(instances) == 1: return TopologyMain(standalones=instances["standalone1"]) else: return TopologyMain(standalones=ins, masters=ms, consumers=cs, hubs=hs)
def _install_ds(self, general, slapd, backends): """ Actually install the Ds from the dicts provided. You should never call this directly, as it bypasses assertions. """ # register the instance to /etc/sysconfig # We do this first so that we can trick remove-ds.pl if needed. # There may be a way to create this from template like the dse.ldif ... initconfig = "" with open("%s/dirsrv/config/template-initconfig" % slapd['sysconf_dir']) as template_init: for line in template_init.readlines(): initconfig += line.replace('{{', '{', 1).replace('}}', '}', 1).replace('-', '_') with open("%s/sysconfig/dirsrv-%s" % (slapd['sysconf_dir'], slapd['instance_name']), 'w') as f: f.write(initconfig.format( SERVER_DIR=slapd['lib_dir'], SERVERBIN_DIR=slapd['sbin_dir'], CONFIG_DIR=slapd['config_dir'], INST_DIR=slapd['inst_dir'], RUN_DIR=slapd['run_dir'], DS_ROOT='', PRODUCT_NAME='slapd', )) # Create all the needed paths # we should only need to make bak_dir, cert_dir, config_dir, db_dir, ldif_dir, lock_dir, log_dir, run_dir? schema_dir, for path in ('backup_dir', 'cert_dir', 'config_dir', 'db_dir', 'ldif_dir', 'lock_dir', 'log_dir', 'run_dir'): if self.verbose: self.log.info("ACTION: creating %s" % slapd[path]) try: os.makedirs(slapd[path], mode=0o775) except OSError: pass os.chown(slapd[path], slapd['user_uid'], slapd['group_gid']) # Copy correct data to the paths. # Copy in the schema # This is a little fragile, make it better. # It won't matter when we move schema to usr anyway ... _ds_shutil_copytree(os.path.join(slapd['sysconf_dir'], 'dirsrv/schema'), slapd['schema_dir']) os.chown(slapd['schema_dir'], slapd['user_uid'], slapd['group_gid']) # Copy in the collation srcfile = os.path.join(slapd['sysconf_dir'], 'dirsrv/config/slapd-collations.conf') dstfile = os.path.join(slapd['config_dir'], 'slapd-collations.conf') shutil.copy2(srcfile, dstfile) os.chown(slapd['schema_dir'], slapd['user_uid'], slapd['group_gid']) # Selinux fixups? # Restorecon of paths? # Bind sockets to our type? # Create certdb in sysconfidir if self.verbose: self.log.info("ACTION: Creating certificate database is %s" % slapd['cert_dir']) # nss_create_new_database(slapd['cert_dir']) # Create dse.ldif with a temporary root password. # The template is in slapd['data_dir']/dirsrv/data/template-dse.ldif # Variables are done with %KEY%. # You could cheat and read it in, do a replace of % to { and } then use format? if self.verbose: self.log.info("ACTION: Creating dse.ldif") dse = "" with open(os.path.join(slapd['data_dir'], 'dirsrv', 'data', 'template-dse.ldif')) as template_dse: for line in template_dse.readlines(): dse += line.replace('%', '{', 1).replace('%', '}', 1) with open(os.path.join(slapd['config_dir'], 'dse.ldif'), 'w') as file_dse: file_dse.write(dse.format( schema_dir=slapd['schema_dir'], lock_dir=slapd['lock_dir'], tmp_dir=slapd['tmp_dir'], cert_dir=slapd['cert_dir'], ldif_dir=slapd['ldif_dir'], bak_dir=slapd['backup_dir'], run_dir=slapd['run_dir'], inst_dir="", log_dir=slapd['log_dir'], fqdn=general['full_machine_name'], ds_port=slapd['port'], ds_user=slapd['user'], rootdn=slapd['root_dn'], # ds_passwd=slapd['root_password'], ds_passwd=self._secure_password, # We set our own password here, so we can connect and mod. ds_suffix='', config_dir=slapd['config_dir'], db_dir=slapd['db_dir'], )) # open the connection to the instance. # Should I move this import? I think this prevents some recursion from lib389 import DirSrv ds_instance = DirSrv(self.verbose) ds_instance.containerised = self.containerised args = { SER_PORT: slapd['port'], SER_SERVERID_PROP: slapd['instance_name'], SER_ROOT_DN: slapd['root_dn'], SER_ROOT_PW: self._raw_secure_password, SER_DEPLOYED_DIR: slapd['prefix'] } ds_instance.allocate(args) # Does this work? assert(ds_instance.exists()) # Start the server ds_instance.start(timeout=60) ds_instance.open() # Create the backends as listed # Load example data if needed. for backend in backends: ds_instance.backends.create(properties=backend) # Make changes using the temp root # Change the root password finally # Complete. ds_instance.config.set('nsslapd-rootpw', ensure_str(slapd['root_password']))
def _install_ds(self, general, slapd, backends): """ Actually install the Ds from the dicts provided. You should never call this directly, as it bypasses assertions. """ ######################## WARNING ############################# # DO NOT CHANGE THIS FUNCTION OR ITS CONTENTS WITHOUT READING # ALL OF THE COMMENTS FIRST. THERE ARE VERY DELICATE # AND DETAILED INTERACTIONS OF COMPONENTS IN THIS FUNCTION. # # IF IN DOUBT CONTACT WILLIAM BROWN <*****@*****.**> ### This first section is about creating the *minimal* required paths and config to get # directory server to start: After this, we then perform all configuration as online # changes from after this point. # Create dse.ldif with a temporary root password. # This is done first, because instances are found for removal and listing by detecting # the present of their dse.ldif!!!! # The template is in slapd['data_dir']/dirsrv/data/template-dse.ldif # Variables are done with %KEY%. self.log.debug("ACTION: Creating dse.ldif") try: os.umask( 0o007 ) # For parent dirs that get created -> sets 770 for perms os.makedirs(slapd['config_dir'], mode=0o770) except OSError: pass # Get suffix for some plugin defaults (if possible) # annoyingly for legacy compat backend takes TWO key types # and we have to now deal with that .... # # Create ds_suffix here else it won't be in scope .... ds_suffix = '' if len(backends) > 0: ds_suffix = normalizeDN(backends[0]['nsslapd-suffix']) dse = "" with open( os.path.join(slapd['data_dir'], 'dirsrv', 'data', 'template-dse.ldif')) as template_dse: for line in template_dse.readlines(): dse += line.replace('%', '{', 1).replace('%', '}', 1) # Check if we are in a container, if so don't use /dev/shm for the db home dir # as containers typically don't allocate enough space for dev/shm and we don't # want to unexpectedly break the server after an upgrade # # If we know we are are in a container, we don't need to re-detect on systemd. # It actually turns out if you add systemd-detect-virt, that pulls in system # which subsequently breaks containers starting as instance.start then believes # it COULD check the ds status. The times we need to check for systemd are mainly # in other environments that use systemd natively in their containers. container_result = 1 if not self.containerised: container_result = subprocess.run(["systemd-detect-virt", "-c"], stdout=subprocess.PIPE) if self.containerised or container_result.returncode == 0: # In a container, set the db_home_dir to the db path self.log.debug( "Container detected setting db home directory to db directory." ) slapd['db_home_dir'] = slapd['db_dir'] with open(os.path.join(slapd['config_dir'], 'dse.ldif'), 'w') as file_dse: dse_fmt = dse.format( schema_dir=slapd['schema_dir'], lock_dir=slapd['lock_dir'], tmp_dir=slapd['tmp_dir'], cert_dir=slapd['cert_dir'], ldif_dir=slapd['ldif_dir'], bak_dir=slapd['backup_dir'], run_dir=slapd['run_dir'], inst_dir=slapd['inst_dir'], log_dir=slapd['log_dir'], fqdn=general['full_machine_name'], ds_port=slapd['port'], ds_user=slapd['user'], rootdn=slapd['root_dn'], instance_name=slapd['instance_name'], ds_passwd=self. _secure_password, # We set our own password here, so we can connect and mod. # This is because we never know the users input root password as they can validly give # us a *hashed* input. ds_suffix=ds_suffix, config_dir=slapd['config_dir'], db_dir=slapd['db_dir'], db_home_dir=slapd['db_home_dir'], db_lib=slapd['db_lib'], ldapi_enabled="on", ldapi=slapd['ldapi'], ldapi_autobind="on", ) file_dse.write(dse_fmt) self.log.info("Create file system structures ...") # Create all the needed paths # we should only need to make bak_dir, cert_dir, config_dir, db_dir, ldif_dir, lock_dir, log_dir, run_dir? for path in ('backup_dir', 'cert_dir', 'db_dir', 'db_home_dir', 'ldif_dir', 'lock_dir', 'log_dir', 'run_dir'): self.log.debug("ACTION: creating %s", slapd[path]) try: os.umask( 0o007 ) # For parent dirs that get created -> sets 770 for perms os.makedirs(slapd[path], mode=0o770) except OSError: pass os.chown(slapd[path], slapd['user_uid'], slapd['group_gid']) # /var/lock/dirsrv needs special attention... parentdir = os.path.abspath(os.path.join(slapd['lock_dir'], os.pardir)) os.chown(parentdir, slapd['user_uid'], slapd['group_gid']) ### Warning! We need to down the directory under db too for .restore to work. # During a restore, the db dir is deleted and recreated, which is why we need # to own it for a restore. # # However, in a container, we can't always guarantee this due to how the volumes # work and are mounted. Specifically, if we have an anonymous volume we will # NEVER be able to own it, but in a true deployment it is reasonable to expect # we DO own it. Thus why we skip it in this specific context if not self.containerised: db_parent = os.path.join(slapd['db_dir'], '..') os.chown(db_parent, slapd['user_uid'], slapd['group_gid']) # Copy correct data to the paths. # Copy in the schema # This is a little fragile, make it better. # It won't matter when we move schema to usr anyway ... _ds_shutil_copytree( os.path.join(slapd['sysconf_dir'], 'dirsrv/schema'), slapd['schema_dir']) os.chown(slapd['schema_dir'], slapd['user_uid'], slapd['group_gid']) os.chmod(slapd['schema_dir'], 0o770) # Copy in the collation srcfile = os.path.join(slapd['sysconf_dir'], 'dirsrv/config/slapd-collations.conf') dstfile = os.path.join(slapd['config_dir'], 'slapd-collations.conf') shutil.copy(srcfile, dstfile) os.chown(dstfile, slapd['user_uid'], slapd['group_gid']) os.chmod(dstfile, 0o440) # Copy in the certmap configuration srcfile = os.path.join(slapd['sysconf_dir'], 'dirsrv/config/certmap.conf') dstfile = os.path.join(slapd['config_dir'], 'certmap.conf') shutil.copy(srcfile, dstfile) os.chown(dstfile, slapd['user_uid'], slapd['group_gid']) os.chmod(dstfile, 0o440) # If we are on the correct platform settings, systemd if general['systemd']: # Should create the symlink we need, but without starting it. result = subprocess.run( ["systemctl", "enable", "dirsrv@%s" % slapd['instance_name']], stdout=subprocess.PIPE, stderr=subprocess.PIPE) args = ' '.join(ensure_list_str(result.args)) stdout = ensure_str(result.stdout) stderr = ensure_str(result.stderr) # Systemd encodes some odd charecters into it's symlink output on newer versions which # can trip up the logger. self.log.debug( f"CMD: {args} ; STDOUT: {stdout} ; STDERR: {stderr}".encode( "utf-8")) # Setup tmpfiles_d tmpfile_d = ds_paths.tmpfiles_d + "/dirsrv-" + slapd[ 'instance_name'] + ".conf" with open(tmpfile_d, "w") as TMPFILE_D: TMPFILE_D.write("d {} 0770 {} {}\n".format( slapd['run_dir'], slapd['user'], slapd['group'])) TMPFILE_D.write("d {} 0770 {} {}\n".format( slapd['lock_dir'].replace( "slapd-" + slapd['instance_name'], ""), slapd['user'], slapd['group'])) TMPFILE_D.write("d {} 0770 {} {}\n".format( slapd['lock_dir'], slapd['user'], slapd['group'])) # Else we need to detect other init scripts? # WB: No, we just install and assume that docker will start us ... # Bind sockets to our type? # Create certdb in sysconfidir self.log.debug("ACTION: Creating certificate database is %s", slapd['cert_dir']) # BELOW THIS LINE - all actions are now ONLINE changes to the directory server. # if it all possible, ALWAYS ADD NEW INSTALLER CHANGES AS ONLINE ACTIONS. # Should I move this import? I think this prevents some recursion from lib389 import DirSrv ds_instance = DirSrv(self.verbose, containerised=self.containerised) if self.containerised: ds_instance.systemd_override = general['systemd'] # By default SUSE does something extremely silly - it creates a hostname # that CANT be resolved by DNS. As a result this causes all installs to # fail. We need to guarantee that we only connect to localhost here, as # it's the only stable and guaranteed way to connect to the instance # at this point. # # Use ldapi which would prevent the need # to configure a temp root pw in the setup phase. args = { SER_HOST: "localhost", SER_PORT: slapd['port'], SER_SERVERID_PROP: slapd['instance_name'], SER_ROOT_DN: slapd['root_dn'], SER_ROOT_PW: self._raw_secure_password, SER_DEPLOYED_DIR: slapd['prefix'], SER_LDAPI_ENABLED: 'on', SER_LDAPI_SOCKET: slapd['ldapi'], SER_LDAPI_AUTOBIND: 'on' } ds_instance.allocate(args) # Does this work? assert_c(ds_instance.exists(), "Instance failed to install, does not exist when expected") # Create a certificate database. tlsdb = NssSsl(dirsrv=ds_instance, dbpath=slapd['cert_dir']) if not tlsdb._db_exists(): tlsdb.reinit() if slapd['self_sign_cert']: self.log.info("Create self-signed certificate database ...") etc_dirsrv_path = os.path.join(slapd['sysconf_dir'], 'dirsrv/') ssca_path = os.path.join(etc_dirsrv_path, 'ssca/') ssca = NssSsl(dbpath=ssca_path) # If it doesn't exist, create a CA DB if not ssca._db_exists(): ssca.reinit() ssca.create_rsa_ca(months=slapd['self_sign_cert_valid_months']) # If CA is expired or will expire soon, # Reissue it and resign the existing certs that were signed by the cert previously elif ssca.rsa_ca_needs_renew(): ca = ssca.renew_rsa_ca( months=slapd['self_sign_cert_valid_months']) # Import CA to the existing instances except the one we install now (we import it later) for dir in os.listdir(etc_dirsrv_path): if dir.startswith("slapd-") and dir != slapd['cert_dir']: tlsdb_inst = NssSsl( dbpath=os.path.join(etc_dirsrv_path, dir)) tlsdb_inst.import_rsa_crt(ca) csr = tlsdb.create_rsa_key_and_csr( alt_names=[general['full_machine_name']]) (ca, crt) = ssca.rsa_ca_sign_csr(csr) tlsdb.import_rsa_crt(ca, crt) if general['selinux']: # Set selinux port label selinux_label_port(slapd['secure_port']) # Do selinux fixups if general['selinux']: self.log.info("Perform SELinux labeling ...") selinux_paths = ('backup_dir', 'cert_dir', 'config_dir', 'db_dir', 'ldif_dir', 'lock_dir', 'log_dir', 'db_home_dir', 'run_dir', 'schema_dir', 'tmp_dir') for path in selinux_paths: selinux_restorecon(slapd[path]) selinux_label_port(slapd['port']) # Start the server # Make changes using the temp root self.log.debug(f"asan_enabled={ds_instance.has_asan()}") self.log.debug( f"libfaketime installed ={'libfaketime' in sys.modules}") assert_c( not ds_instance.has_asan() or 'libfaketime' not in sys.modules, "libfaketime python module is incompatible with ASAN build.") ds_instance.start(timeout=60) ds_instance.open() # In some cases we may want to change log settings # ds_instance.config.enable_log('audit') # Create the configs related to this version. base_config = get_config(general['defaults']) base_config_inst = base_config(ds_instance) base_config_inst.apply_config(install=True) # Setup TLS with the instance. # We *ALWAYS* set secure port, even if security is off, because it breaks # tests with standalone.enable_tls if we do not. It's only when security; on # that we actually start listening on it. if not slapd['secure_port']: slapd['secure_port'] = "636" ds_instance.config.set('nsslapd-secureport', '%s' % slapd['secure_port']) if slapd['self_sign_cert']: ds_instance.config.set('nsslapd-security', 'on') # Before we create any backends, create any extra default indexes that may be # dynamically provisioned, rather than from template-dse.ldif. Looking at you # entryUUID (requires rust enabled). # # Indexes defaults to default_index_dn indexes = Indexes(ds_instance) if ds_instance.ds_paths.rust_enabled: indexes.create( properties={ 'cn': 'entryUUID', 'nsSystemIndex': 'false', 'nsIndexType': ['eq', 'pres'], }) # Create the backends as listed # Load example data if needed. for backend in backends: self.log.info( f"Create database backend: {backend['nsslapd-suffix']} ...") is_sample_entries_in_props = "sample_entries" in backend create_suffix_entry_in_props = backend.pop('create_suffix_entry', False) ds_instance.backends.create(properties=backend) if not is_sample_entries_in_props and create_suffix_entry_in_props: # Set basic ACIs c_aci = '(targetattr="c || description || objectClass")(targetfilter="(objectClass=country)")(version 3.0; acl "Enable anyone c read"; allow (read, search, compare)(userdn="ldap:///anyone");)' o_aci = '(targetattr="o || description || objectClass")(targetfilter="(objectClass=organization)")(version 3.0; acl "Enable anyone o read"; allow (read, search, compare)(userdn="ldap:///anyone");)' dc_aci = '(targetattr="dc || description || objectClass")(targetfilter="(objectClass=domain)")(version 3.0; acl "Enable anyone domain read"; allow (read, search, compare)(userdn="ldap:///anyone");)' ou_aci = '(targetattr="ou || description || objectClass")(targetfilter="(objectClass=organizationalUnit)")(version 3.0; acl "Enable anyone ou read"; allow (read, search, compare)(userdn="ldap:///anyone");)' cn_aci = '(targetattr="cn || description || objectClass")(targetfilter="(objectClass=nscontainer)")(version 3.0; acl "Enable anyone cn read"; allow (read, search, compare)(userdn="ldap:///anyone");)' suffix_rdn_attr = backend['nsslapd-suffix'].split( '=')[0].lower() if suffix_rdn_attr == 'dc': domain = create_base_domain(ds_instance, backend['nsslapd-suffix']) domain.add('aci', dc_aci) elif suffix_rdn_attr == 'o': org = create_base_org(ds_instance, backend['nsslapd-suffix']) org.add('aci', o_aci) elif suffix_rdn_attr == 'ou': orgunit = create_base_orgunit(ds_instance, backend['nsslapd-suffix']) orgunit.add('aci', ou_aci) elif suffix_rdn_attr == 'cn': cn = create_base_cn(ds_instance, backend['nsslapd-suffix']) cn.add('aci', cn_aci) elif suffix_rdn_attr == 'c': c = create_base_c(ds_instance, backend['nsslapd-suffix']) c.add('aci', c_aci) else: # Unsupported rdn raise ValueError( "Suffix RDN '{}' in '{}' is not supported. Supported RDN's are: 'c', 'cn', 'dc', 'o', and 'ou'" .format(suffix_rdn_attr, backend['nsslapd-suffix'])) # Create all required sasl maps: if we have a single backend ... # our default maps are really really bad, and we should feel bad. # they basically only work with a single backend, and they'll break # GSSAPI in some cases too :( if len(backends) > 0: self.log.debug("Adding sasl maps for suffix %s" % backend['nsslapd-suffix']) backend = backends[0] saslmappings = SaslMappings(ds_instance) saslmappings.create( properties={ 'cn': 'rfc 2829 u syntax', 'nsSaslMapRegexString': '^u:\\(.*\\)', 'nsSaslMapBaseDNTemplate': backend['nsslapd-suffix'], 'nsSaslMapFilterTemplate': '(uid=\\1)' }) # I think this is for LDAPI saslmappings.create( properties={ 'cn': 'uid mapping', 'nsSaslMapRegexString': '^[^:@]+$', 'nsSaslMapBaseDNTemplate': backend['nsslapd-suffix'], 'nsSaslMapFilterTemplate': '(uid=&)' }) else: self.log.debug("Skipping default SASL maps - no backend found!") self.log.info("Perform post-installation tasks ...") # Change the root password finally ds_instance.config.set('nsslapd-rootpw', slapd['root_password']) # We need to log the password when containerised if self.containerised: self.log.debug("Root DN password: {}".format( slapd['root_password'])) # Complete. if general['start']: # Restart for changes to take effect - this could be removed later ds_instance.restart(post_open=False) else: # Just stop the instance now. ds_instance.stop() self.log.debug(" 🎉 Instance setup complete")
def topology(request): """ This fixture is used to create a replicated topology for the 'module'. The replicated topology is MASTER -> Consumer. """ master = DirSrv(verbose=False) consumer = DirSrv(verbose=False) # Args for the master instance args_instance[SER_HOST] = HOST_MASTER_1 args_instance[SER_PORT] = PORT_MASTER_1 args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_1 args_master = args_instance.copy() master.allocate(args_master) # Args for the consumer instance args_instance[SER_HOST] = HOST_CONSUMER_1 args_instance[SER_PORT] = PORT_CONSUMER_1 args_instance[SER_SERVERID_PROP] = SERVERID_CONSUMER_1 args_consumer = args_instance.copy() consumer.allocate(args_consumer) # Get the status of the instance and restart it if it exists instance_master = master.exists() instance_consumer = consumer.exists() # Remove all the instances if instance_master: master.delete() if instance_consumer: consumer.delete() # Create the instances master.create() master.open() consumer.create() consumer.open() # # Now prepare the Master-Consumer topology # # First Enable replication master.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER, replicaId=REPLICAID_MASTER_1) consumer.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_CONSUMER) # Initialize the supplier->consumer properties = { RA_NAME: r"meTo_$host:$port", RA_BINDDN: defaultProperties[REPLICATION_BIND_DN], RA_BINDPW: defaultProperties[REPLICATION_BIND_PW], RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD], RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT], } repl_agreement = master.agreement.create( suffix=SUFFIX, host=consumer.host, port=consumer.port, properties=properties ) if not repl_agreement: log.fatal("Fail to create a replica agreement") sys.exit(1) log.debug("%s created" % repl_agreement) master.agreement.init(SUFFIX, HOST_CONSUMER_1, PORT_CONSUMER_1) master.waitForReplInit(repl_agreement) # Check replication is working fine if master.testReplication(DEFAULT_SUFFIX, consumer): log.info("Replication is working.") else: log.fatal("Replication is not working.") assert False def fin(): master.delete() consumer.delete() request.addfinalizer(fin) # # Here we have two instances master and consumer # with replication working. Either coming from a backup recovery # or from a fresh (re)init # Time to return the topology return TopologyMasterConsumer(master, consumer)
def topology(request): global installation1_prefix if installation1_prefix: args_instance[SER_DEPLOYED_DIR] = installation1_prefix # Creating master 1... master1 = DirSrv(verbose=False) args_instance[SER_HOST] = HOST_MASTER_1 args_instance[SER_PORT] = PORT_MASTER_1 args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_1 args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX args_master = args_instance.copy() master1.allocate(args_master) instance_master1 = master1.exists() if instance_master1: master1.delete() master1.create() master1.open() master1.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER, replicaId=REPLICAID_MASTER_1) # Creating master 2... master2 = DirSrv(verbose=False) args_instance[SER_HOST] = HOST_MASTER_2 args_instance[SER_PORT] = PORT_MASTER_2 args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_2 args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX args_master = args_instance.copy() master2.allocate(args_master) instance_master2 = master2.exists() if instance_master2: master2.delete() master2.create() master2.open() master2.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER, replicaId=REPLICAID_MASTER_2) # # Create all the agreements # # Creating agreement from master 1 to master 2 properties = {RA_NAME: 'meTo_%s:%s' %(master2.host, master2.port), RA_BINDDN: defaultProperties[REPLICATION_BIND_DN], RA_BINDPW: defaultProperties[REPLICATION_BIND_PW], RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD], RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]} m1_m2_agmt = master1.agreement.create(suffix=SUFFIX, host=master2.host, port=master2.port, properties=properties) if not m1_m2_agmt: log.fatal("Fail to create a master -> master replica agreement") sys.exit(1) log.debug("%s created" % m1_m2_agmt) # Creating agreement from master 2 to master 1 properties = {RA_NAME: 'meTo_%s:%s' %(master1.host, master1.port), RA_BINDDN: defaultProperties[REPLICATION_BIND_DN], RA_BINDPW: defaultProperties[REPLICATION_BIND_PW], RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD], RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]} m2_m1_agmt = master2.agreement.create(suffix=SUFFIX, host=master1.host, port=master1.port, properties=properties) if not m2_m1_agmt: log.fatal("Fail to create a master -> master replica agreement") sys.exit(1) log.debug("%s created" % m2_m1_agmt) # Allow the replicas to get situated with the new agreements... time.sleep(5) # # Initialize all the agreements # master1.agreement.init(SUFFIX, HOST_MASTER_2, PORT_MASTER_2) master1.waitForReplInit(m1_m2_agmt) master2.agreement.init(SUFFIX, HOST_MASTER_1, PORT_MASTER_1) master2.waitForReplInit(m2_m1_agmt) # Check replication is working... if master1.testReplication(DEFAULT_SUFFIX, master2): log.info('Replication is working.') else: log.fatal('Replication is not working.') assert False log.info("Set Replication Debugging loglevel for the errorlog") master1.setLogLevel(LOG_REPLICA) master2.setLogLevel(LOG_REPLICA) logging_attr = 'nsslapd-logging-hr-timestamps-enabled' master1.modify_s("cn=config", [(ldap.MOD_REPLACE, logging_attr, "off")]) master2.modify_s("cn=config", [(ldap.MOD_REPLACE, logging_attr, "off")]) # Delete each instance in the end def fin(): master1.delete() master2.delete() request.addfinalizer(fin) # Clear out the tmp dir master1.clearTmpDir(__file__) return TopologyReplication(master1, master2, m1_m2_agmt, m2_m1_agmt)
def _install_ds(self, general, slapd, backends): """ Actually install the Ds from the dicts provided. You should never call this directly, as it bypasses assert_cions. """ # register the instance to /etc/sysconfig # We do this first so that we can trick remove-ds.pl if needed. # There may be a way to create this from template like the dse.ldif ... initconfig = "" with open("%s/dirsrv/config/template-initconfig" % slapd['sysconf_dir']) as template_init: for line in template_init.readlines(): initconfig += line.replace('{{', '{', 1).replace('}}', '}', 1).replace('-', '_') try: os.makedirs("%s/sysconfig" % slapd['sysconf_dir'], mode=0o775) except FileExistsError: pass with open("%s/sysconfig/dirsrv-%s" % (slapd['sysconf_dir'], slapd['instance_name']), 'w') as f: f.write(initconfig.format( SERVER_DIR=slapd['lib_dir'], SERVERBIN_DIR=slapd['sbin_dir'], CONFIG_DIR=slapd['config_dir'], INST_DIR=slapd['inst_dir'], RUN_DIR=slapd['run_dir'], DS_ROOT='', PRODUCT_NAME='slapd', )) # Create all the needed paths # we should only need to make bak_dir, cert_dir, config_dir, db_dir, ldif_dir, lock_dir, log_dir, run_dir? schema_dir, for path in ('backup_dir', 'cert_dir', 'config_dir', 'db_dir', 'ldif_dir', 'lock_dir', 'log_dir', 'run_dir'): if self.verbose: self.log.info("ACTION: creating %s" % slapd[path]) try: os.makedirs(slapd[path], mode=0o775) except OSError: pass os.chown(slapd[path], slapd['user_uid'], slapd['group_gid']) ### Warning! We need to down the directory under db too for .restore to work. # See dblayer.c for more! db_parent = os.path.join(slapd['db_dir'], '..') os.chown(db_parent, slapd['user_uid'], slapd['group_gid']) # Copy correct data to the paths. # Copy in the schema # This is a little fragile, make it better. # It won't matter when we move schema to usr anyway ... _ds_shutil_copytree(os.path.join(slapd['sysconf_dir'], 'dirsrv/schema'), slapd['schema_dir']) os.chown(slapd['schema_dir'], slapd['user_uid'], slapd['group_gid']) # Copy in the collation srcfile = os.path.join(slapd['sysconf_dir'], 'dirsrv/config/slapd-collations.conf') dstfile = os.path.join(slapd['config_dir'], 'slapd-collations.conf') shutil.copy2(srcfile, dstfile) os.chown(dstfile, slapd['user_uid'], slapd['group_gid']) # Copy in the certmap configuration srcfile = os.path.join(slapd['sysconf_dir'], 'dirsrv/config/certmap.conf') dstfile = os.path.join(slapd['config_dir'], 'certmap.conf') shutil.copy2(srcfile, dstfile) os.chown(dstfile, slapd['user_uid'], slapd['group_gid']) # If we are on the correct platform settings, systemd if general['systemd'] and not self.containerised: # Should create the symlink we need, but without starting it. subprocess.check_call(["/usr/bin/systemctl", "enable", "dirsrv@%s" % slapd['instance_name']]) # Else we need to detect other init scripts? # Bind sockets to our type? # Create certdb in sysconfidir if self.verbose: self.log.info("ACTION: Creating certificate database is %s" % slapd['cert_dir']) # Create dse.ldif with a temporary root password. # The template is in slapd['data_dir']/dirsrv/data/template-dse.ldif # Variables are done with %KEY%. # You could cheat and read it in, do a replace of % to { and } then use format? if self.verbose: self.log.info("ACTION: Creating dse.ldif") dse = "" with open(os.path.join(slapd['data_dir'], 'dirsrv', 'data', 'template-dse.ldif')) as template_dse: for line in template_dse.readlines(): dse += line.replace('%', '{', 1).replace('%', '}', 1) with open(os.path.join(slapd['config_dir'], 'dse.ldif'), 'w') as file_dse: file_dse.write(dse.format( schema_dir=slapd['schema_dir'], lock_dir=slapd['lock_dir'], tmp_dir=slapd['tmp_dir'], cert_dir=slapd['cert_dir'], ldif_dir=slapd['ldif_dir'], bak_dir=slapd['backup_dir'], run_dir=slapd['run_dir'], inst_dir="", log_dir=slapd['log_dir'], fqdn=general['full_machine_name'], ds_port=slapd['port'], ds_user=slapd['user'], rootdn=slapd['root_dn'], # ds_passwd=slapd['root_password'], ds_passwd=self._secure_password, # We set our own password here, so we can connect and mod. ds_suffix='', config_dir=slapd['config_dir'], db_dir=slapd['db_dir'], )) # open the connection to the instance. # Should I move this import? I think this prevents some recursion from lib389 import DirSrv ds_instance = DirSrv(self.verbose) ds_instance.containerised = self.containerised args = { SER_PORT: slapd['port'], SER_SERVERID_PROP: slapd['instance_name'], SER_ROOT_DN: slapd['root_dn'], SER_ROOT_PW: self._raw_secure_password, SER_DEPLOYED_DIR: slapd['prefix'] } ds_instance.allocate(args) # Does this work? assert_c(ds_instance.exists(), "Instance failed to install, does not exist when expected") # Create a certificate database. tlsdb = NssSsl(dbpath=slapd['cert_dir']) if not tlsdb._db_exists(): tlsdb.reinit() if slapd['self_sign_cert']: # If it doesn't exist, create a cadb. ssca_path = os.path.join(slapd['sysconf_dir'], 'dirsrv/ssca/') ssca = NssSsl(dbpath=ssca_path) if not ssca._db_exists(): ssca.reinit() ssca.create_rsa_ca() csr = tlsdb.create_rsa_key_and_csr() (ca, crt) = ssca.rsa_ca_sign_csr(csr) tlsdb.import_rsa_crt(ca, crt) ## LAST CHANCE, FIX PERMISSIONS. # Selinux fixups? # Restorecon of paths? # Start the server ds_instance.start(timeout=60) ds_instance.open() # In some cases we may want to change log settings # ds_instance.config.enable_log('audit') # Create the configs related to this version. base_config = get_config(general['defaults']) base_config_inst = base_config(ds_instance) base_config_inst.apply_config(install=True) # Setup TLS with the instance. ds_instance.config.set('nsslapd-secureport', '%s' % slapd['secure_port']) if slapd['self_sign_cert']: ds_instance.config.set('nsslapd-security', 'on') # Create the backends as listed # Load example data if needed. for backend in backends: ds_instance.backends.create(properties=backend) # Make changes using the temp root # Change the root password finally # Initialise ldapi socket information. IPA expects this .... ds_instance.config.set('nsslapd-ldapifilepath', ds_instance.get_ldapi_path()) ds_instance.config.set('nsslapd-ldapilisten', 'on') # Complete. ds_instance.config.set('nsslapd-rootpw', ensure_str(slapd['root_password'])) if self.containerised: # In a container build we need to stop DirSrv at the end ds_instance.stop() else: # Restart for changes to take effect - this could be removed later ds_instance.restart(post_open=False)
def topology(request): global installation1_prefix if installation1_prefix: args_instance[SER_DEPLOYED_DIR] = installation1_prefix # Creating master 1... master1 = DirSrv(verbose=False) if installation1_prefix: args_instance[SER_DEPLOYED_DIR] = installation1_prefix args_instance[SER_HOST] = HOST_MASTER_1 args_instance[SER_PORT] = PORT_MASTER_1 args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_1 args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX args_master = args_instance.copy() master1.allocate(args_master) instance_master1 = master1.exists() if instance_master1: master1.delete() master1.create() master1.open() master1.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER, replicaId=REPLICAID_MASTER_1) # Creating master 2... master2 = DirSrv(verbose=False) if installation1_prefix: args_instance[SER_DEPLOYED_DIR] = installation1_prefix args_instance[SER_HOST] = HOST_MASTER_2 args_instance[SER_PORT] = PORT_MASTER_2 args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_2 args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX args_master = args_instance.copy() master2.allocate(args_master) instance_master2 = master2.exists() if instance_master2: master2.delete() master2.create() master2.open() master2.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER, replicaId=REPLICAID_MASTER_2) # # Create all the agreements # # Creating agreement from master 1 to master 2 properties = {RA_NAME: r'meTo_%s:%s' % (master2.host, master2.port), RA_BINDDN: defaultProperties[REPLICATION_BIND_DN], RA_BINDPW: defaultProperties[REPLICATION_BIND_PW], RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD], RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]} global m1_m2_agmt m1_m2_agmt = master1.agreement.create(suffix=SUFFIX, host=master2.host, port=master2.port, properties=properties) if not m1_m2_agmt: log.fatal("Fail to create a master -> master replica agreement") sys.exit(1) log.debug("%s created" % m1_m2_agmt) # Creating agreement from master 2 to master 1 properties = {RA_NAME: r'meTo_%s:%s' % (master1.host, master1.port), RA_BINDDN: defaultProperties[REPLICATION_BIND_DN], RA_BINDPW: defaultProperties[REPLICATION_BIND_PW], RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD], RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]} m2_m1_agmt = master2.agreement.create(suffix=SUFFIX, host=master1.host, port=master1.port, properties=properties) if not m2_m1_agmt: log.fatal("Fail to create a master -> master replica agreement") sys.exit(1) log.debug("%s created" % m2_m1_agmt) # Allow the replicas to get situated with the new agreements... time.sleep(5) # # Initialize all the agreements # master1.agreement.init(SUFFIX, HOST_MASTER_2, PORT_MASTER_2) master1.waitForReplInit(m1_m2_agmt) # Check replication is working... if master1.testReplication(DEFAULT_SUFFIX, master2): log.info('Replication is working.') else: log.fatal('Replication is not working.') assert False # Delete each instance in the end def fin(): master1.delete() master2.delete() request.addfinalizer(fin) return TopologyReplication(master1, master2)
def topology(request): global installation1_prefix if installation1_prefix: args_instance[SER_DEPLOYED_DIR] = installation1_prefix # Creating master 1... master1 = DirSrv(verbose=False) args_instance[SER_HOST] = HOST_MASTER_1 args_instance[SER_PORT] = PORT_MASTER_1 args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_1 args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX args_master = args_instance.copy() master1.allocate(args_master) instance_master1 = master1.exists() if instance_master1: master1.delete() master1.create() master1.open() master1.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER, replicaId=REPLICAID_MASTER_1) # Creating hub 1... hub1 = DirSrv(verbose=False) args_instance[SER_HOST] = HOST_HUB_1 args_instance[SER_PORT] = PORT_HUB_1 args_instance[SER_SERVERID_PROP] = SERVERID_HUB_1 args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX args_hub = args_instance.copy() hub1.allocate(args_hub) instance_hub1 = hub1.exists() if instance_hub1: hub1.delete() hub1.create() hub1.open() hub1.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_HUB, replicaId=REPLICAID_HUB_1) # Creating consumer 1... consumer1 = DirSrv(verbose=False) args_instance[SER_HOST] = HOST_CONSUMER_1 args_instance[SER_PORT] = PORT_CONSUMER_1 args_instance[SER_SERVERID_PROP] = SERVERID_CONSUMER_1 args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX args_consumer = args_instance.copy() consumer1.allocate(args_consumer) instance_consumer1 = consumer1.exists() if instance_consumer1: consumer1.delete() consumer1.create() consumer1.open() consumer1.changelog.create() consumer1.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_CONSUMER, replicaId=CONSUMER_REPLICAID) # # Create all the agreements # # Creating agreement from master 1 to hub 1 properties = {RA_NAME: r'meTo_$host:$port', RA_BINDDN: defaultProperties[REPLICATION_BIND_DN], RA_BINDPW: defaultProperties[REPLICATION_BIND_PW], RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD], RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]} m1_h1_agmt = master1.agreement.create(suffix=SUFFIX, host=hub1.host, port=hub1.port, properties=properties) if not m1_h1_agmt: log.fatal("Fail to create a master -> hub replica agreement") sys.exit(1) log.debug("%s created" % m1_h1_agmt) # Creating agreement from hub 1 to consumer 1 properties = {RA_NAME: r'meTo_$host:$port', RA_BINDDN: defaultProperties[REPLICATION_BIND_DN], RA_BINDPW: defaultProperties[REPLICATION_BIND_PW], RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD], RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]} h1_c1_agmt = hub1.agreement.create(suffix=SUFFIX, host=consumer1.host, port=consumer1.port, properties=properties) if not h1_c1_agmt: log.fatal("Fail to create a hub -> consumer replica agreement") sys.exit(1) log.debug("%s created" % h1_c1_agmt) # Allow the replicas to get situated with the new agreements... time.sleep(5) # # Initialize all the agreements # master1.agreement.init(SUFFIX, HOST_HUB_1, PORT_HUB_1) master1.waitForReplInit(m1_h1_agmt) hub1.agreement.init(SUFFIX, HOST_CONSUMER_1, PORT_CONSUMER_1) hub1.waitForReplInit(h1_c1_agmt) # Check replication is working... if master1.testReplication(DEFAULT_SUFFIX, consumer1): log.info('Replication is working.') else: log.fatal('Replication is not working.') assert False # Delete each instance in the end def fin(): master1.delete() hub1.delete() consumer1.delete() pass request.addfinalizer(fin) # Clear out the tmp dir master1.clearTmpDir(__file__) return TopologyReplication(master1, hub1, consumer1)
def topology(request): global installation1_prefix if installation1_prefix: args_instance[SER_DEPLOYED_DIR] = installation1_prefix # Creating supplier 1... supplier1 = DirSrv(verbose=DEBUGGING) args_instance[SER_HOST] = HOST_SUPPLIER_1 args_instance[SER_PORT] = PORT_SUPPLIER_1 args_instance[SER_SECURE_PORT] = SECUREPORT_SUPPLIER_1 args_instance[SER_SERVERID_PROP] = SERVERID_SUPPLIER_1 args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX args_supplier = args_instance.copy() supplier1.allocate(args_supplier) instance_supplier1 = supplier1.exists() if instance_supplier1: supplier1.delete() supplier1.create() supplier1.open() supplier1.replica.enableReplication(suffix=SUFFIX, role=ReplicaRole.SUPPLIER, replicaId=REPLICAID_SUPPLIER_1) # Creating supplier 2... supplier2 = DirSrv(verbose=DEBUGGING) args_instance[SER_HOST] = HOST_SUPPLIER_2 args_instance[SER_PORT] = PORT_SUPPLIER_2 args_instance[SER_SECURE_PORT] = SECUREPORT_SUPPLIER_2 args_instance[SER_SERVERID_PROP] = SERVERID_SUPPLIER_2 args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX args_supplier = args_instance.copy() supplier2.allocate(args_supplier) instance_supplier2 = supplier2.exists() if instance_supplier2: supplier2.delete() supplier2.create() supplier2.open() supplier2.replica.enableReplication(suffix=SUFFIX, role=ReplicaRole.SUPPLIER, replicaId=REPLICAID_SUPPLIER_2) # # Create all the agreements # # Creating agreement from supplier 1 to supplier 2 properties = { RA_NAME: r'meTo_$host:$port', RA_BINDDN: defaultProperties[REPLICATION_BIND_DN], RA_BINDPW: defaultProperties[REPLICATION_BIND_PW], RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD], RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT] } m1_m2_agmt = supplier1.agreement.create(suffix=SUFFIX, host=supplier2.host, port=supplier2.port, properties=properties) if not m1_m2_agmt: log.fatal("Fail to create a supplier -> supplier replica agreement") sys.exit(1) log.debug("%s created" % m1_m2_agmt) # Creating agreement from supplier 2 to supplier 1 properties = { RA_NAME: r'meTo_$host:$port', RA_BINDDN: defaultProperties[REPLICATION_BIND_DN], RA_BINDPW: defaultProperties[REPLICATION_BIND_PW], RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD], RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT] } m2_m1_agmt = supplier2.agreement.create(suffix=SUFFIX, host=supplier1.host, port=supplier1.port, properties=properties) if not m2_m1_agmt: log.fatal("Fail to create a supplier -> supplier replica agreement") sys.exit(1) log.debug("%s created" % m2_m1_agmt) # Allow the replicas to get situated with the new agreements... time.sleep(5) # # Import tests entries into supplier1 before we initialize supplier2 # ldif_dir = supplier1.get_ldif_dir() import_ldif = ldif_dir + '/rel7.5-entries.ldif' # First generate an ldif try: ldif = open(import_ldif, 'w') except IOError as e: log.fatal('Failed to create test ldif, error: %s - %s' % (e.errno, e.strerror)) assert False # Create the root node ldif.write('dn: ' + DEFAULT_SUFFIX + '\n') ldif.write('objectclass: top\n') ldif.write('objectclass: domain\n') ldif.write('dc: example\n') ldif.write('\n') # Create the entries idx = 0 while idx < NUM_USERS: count = str(idx) ldif.write('dn: uid=supplier1_entry' + count + ',' + DEFAULT_SUFFIX + '\n') ldif.write('objectclass: top\n') ldif.write('objectclass: person\n') ldif.write('objectclass: inetorgperson\n') ldif.write('objectclass: organizationalperson\n') ldif.write('uid: supplier1_entry' + count + '\n') ldif.write('cn: supplier1 entry' + count + '\n') ldif.write('givenname: supplier1 ' + count + '\n') ldif.write('sn: entry ' + count + '\n') ldif.write('userpassword: supplier1_entry' + count + '\n') ldif.write('description: ' + 'a' * random.randint(1, 1000) + '\n') ldif.write('\n') ldif.write('dn: uid=supplier2_entry' + count + ',' + DEFAULT_SUFFIX + '\n') ldif.write('objectclass: top\n') ldif.write('objectclass: person\n') ldif.write('objectclass: inetorgperson\n') ldif.write('objectclass: organizationalperson\n') ldif.write('uid: supplier2_entry' + count + '\n') ldif.write('cn: supplier2 entry' + count + '\n') ldif.write('givenname: supplier2 ' + count + '\n') ldif.write('sn: entry ' + count + '\n') ldif.write('userpassword: supplier2_entry' + count + '\n') ldif.write('description: ' + 'a' * random.randint(1, 1000) + '\n') ldif.write('\n') idx += 1 ldif.close() # Now import it try: supplier1.tasks.importLDIF(suffix=DEFAULT_SUFFIX, input_file=import_ldif, args={TASK_WAIT: True}) except ValueError: log.fatal('test_reliab_7.5: Online import failed') assert False # # Initialize all the agreements # supplier1.agreement.init(SUFFIX, HOST_SUPPLIER_2, PORT_SUPPLIER_2) supplier1.waitForReplInit(m1_m2_agmt) # Check replication is working... if supplier1.testReplication(DEFAULT_SUFFIX, supplier2): log.info('Replication is working.') else: log.fatal('Replication is not working.') assert False # Clear out the tmp dir supplier1.clearTmpDir(__file__) # Delete each instance in the end def fin(): supplier1.delete() supplier2.delete() if ENABLE_VALGRIND: sbin_dir = get_sbin_dir(prefix=supplier1.prefix) valgrind_disable(sbin_dir) request.addfinalizer(fin) return TopologyReplication(supplier1, supplier2)
def _install_ds(self, general, slapd, backends): """ Actually install the Ds from the dicts provided. You should never call this directly, as it bypasses assertions. """ ######################## WARNING ############################# # DO NOT CHANGE THIS FUNCTION OR ITS CONTENTS WITHOUT READING # ALL OF THE COMMENTS FIRST. THERE ARE VERY DELICATE # AND DETAILED INTERACTIONS OF COMPONENTS IN THIS FUNCTION. # # IF IN DOUBT CONTACT WILLIAM BROWN <*****@*****.**> ### This first section is about creating the *minimal* required paths and config to get # directory server to start: After this, we then perform all configuration as online # changes from after this point. # Create dse.ldif with a temporary root password. # This is done first, because instances are found for removal and listing by detecting # the present of their dse.ldif!!!! # The template is in slapd['data_dir']/dirsrv/data/template-dse.ldif # Variables are done with %KEY%. self.log.debug("ACTION: Creating dse.ldif") try: os.umask(0o007) # For parent dirs that get created -> sets 770 for perms os.makedirs(slapd['config_dir'], mode=0o770) except OSError: pass # Get suffix for some plugin defaults (if possible) # annoyingly for legacy compat backend takes TWO key types # and we have to now deal with that .... # # Create ds_suffix here else it won't be in scope .... ds_suffix = '' if len(backends) > 0: ds_suffix = normalizeDN(backends[0]['nsslapd-suffix']) dse = "" with open(os.path.join(slapd['data_dir'], 'dirsrv', 'data', 'template-dse.ldif')) as template_dse: for line in template_dse.readlines(): dse += line.replace('%', '{', 1).replace('%', '}', 1) with open(os.path.join(slapd['config_dir'], 'dse.ldif'), 'w') as file_dse: file_dse.write(dse.format( schema_dir=slapd['schema_dir'], lock_dir=slapd['lock_dir'], tmp_dir=slapd['tmp_dir'], cert_dir=slapd['cert_dir'], ldif_dir=slapd['ldif_dir'], bak_dir=slapd['backup_dir'], run_dir=slapd['run_dir'], inst_dir=slapd['inst_dir'], log_dir=slapd['log_dir'], fqdn=general['full_machine_name'], ds_port=slapd['port'], ds_user=slapd['user'], rootdn=slapd['root_dn'], ds_passwd=self._secure_password, # We set our own password here, so we can connect and mod. # This is because we never know the users input root password as they can validily give # us a *hashed* input. ds_suffix=ds_suffix, config_dir=slapd['config_dir'], db_dir=slapd['db_dir'], )) # Create all the needed paths # we should only need to make bak_dir, cert_dir, config_dir, db_dir, ldif_dir, lock_dir, log_dir, run_dir? for path in ('backup_dir', 'cert_dir', 'db_dir', 'ldif_dir', 'lock_dir', 'log_dir', 'run_dir'): self.log.debug("ACTION: creating %s", slapd[path]) try: os.umask(0o007) # For parent dirs that get created -> sets 770 for perms os.makedirs(slapd[path], mode=0o770) except OSError: pass os.chown(slapd[path], slapd['user_uid'], slapd['group_gid']) # /var/lock/dirsrv needs special attention... parentdir = os.path.abspath(os.path.join(slapd['lock_dir'], os.pardir)) os.chown(parentdir, slapd['user_uid'], slapd['group_gid']) ### Warning! We need to down the directory under db too for .restore to work. # See dblayer.c for more! db_parent = os.path.join(slapd['db_dir'], '..') os.chown(db_parent, slapd['user_uid'], slapd['group_gid']) # Copy correct data to the paths. # Copy in the schema # This is a little fragile, make it better. # It won't matter when we move schema to usr anyway ... _ds_shutil_copytree(os.path.join(slapd['sysconf_dir'], 'dirsrv/schema'), slapd['schema_dir']) os.chown(slapd['schema_dir'], slapd['user_uid'], slapd['group_gid']) os.chmod(slapd['schema_dir'], 0o770) # Copy in the collation srcfile = os.path.join(slapd['sysconf_dir'], 'dirsrv/config/slapd-collations.conf') dstfile = os.path.join(slapd['config_dir'], 'slapd-collations.conf') shutil.copy2(srcfile, dstfile) os.chown(dstfile, slapd['user_uid'], slapd['group_gid']) os.chmod(dstfile, 0o440) # Copy in the certmap configuration srcfile = os.path.join(slapd['sysconf_dir'], 'dirsrv/config/certmap.conf') dstfile = os.path.join(slapd['config_dir'], 'certmap.conf') shutil.copy2(srcfile, dstfile) os.chown(dstfile, slapd['user_uid'], slapd['group_gid']) os.chmod(dstfile, 0o440) # If we are on the correct platform settings, systemd if general['systemd']: # Should create the symlink we need, but without starting it. subprocess.check_call(["systemctl", "enable", "dirsrv@%s" % slapd['instance_name']]) # Setup tmpfiles_d tmpfile_d = ds_paths.tmpfiles_d + "/dirsrv-" + slapd['instance_name'] + ".conf" with open(tmpfile_d, "w") as TMPFILE_D: TMPFILE_D.write("d {} 0770 {} {}\n".format(slapd['run_dir'], slapd['user'], slapd['group'])) TMPFILE_D.write("d {} 0770 {} {}\n".format(slapd['lock_dir'].replace("slapd-" + slapd['instance_name'], ""), slapd['user'], slapd['group'])) TMPFILE_D.write("d {} 0770 {} {}\n".format(slapd['lock_dir'], slapd['user'], slapd['group'])) # Else we need to detect other init scripts? # WB: No, we just install and assume that docker will start us ... # Bind sockets to our type? # Create certdb in sysconfidir self.log.debug("ACTION: Creating certificate database is %s", slapd['cert_dir']) # BELOWE THIS LINE - all actions are now ONLINE changes to the directory server. # if it all possible, ALWAYS ADD NEW INSTALLER CHANGES AS ONLINE ACTIONS. # Should I move this import? I think this prevents some recursion from lib389 import DirSrv ds_instance = DirSrv(self.verbose) if self.containerised: ds_instance.systemd = general['systemd'] args = { SER_PORT: slapd['port'], SER_SERVERID_PROP: slapd['instance_name'], SER_ROOT_DN: slapd['root_dn'], SER_ROOT_PW: self._raw_secure_password, SER_DEPLOYED_DIR: slapd['prefix'] } ds_instance.allocate(args) # Does this work? assert_c(ds_instance.exists(), "Instance failed to install, does not exist when expected") # Create a certificate database. tlsdb = NssSsl(dbpath=slapd['cert_dir']) if not tlsdb._db_exists(): tlsdb.reinit() if slapd['self_sign_cert']: etc_dirsrv_path = os.path.join(slapd['sysconf_dir'], 'dirsrv/') ssca_path = os.path.join(etc_dirsrv_path, 'ssca/') ssca = NssSsl(dbpath=ssca_path) # If it doesn't exist, create a CA DB if not ssca._db_exists(): ssca.reinit() ssca.create_rsa_ca(months=slapd['self_sign_cert_valid_months']) # If CA is expired or will expire soon, # Reissue it and resign the existing certs that were signed by the cert previously elif ssca.rsa_ca_needs_renew(): ca = ssca.renew_rsa_ca(months=slapd['self_sign_cert_valid_months']) # Import CA to the existing instances except the one we install now (we import it later) for dir in os.listdir(etc_dirsrv_path): if dir.startswith("slapd-") and dir != slapd['cert_dir']: tlsdb_inst = NssSsl(dbpath=os.path.join(etc_dirsrv_path, dir)) tlsdb_inst.import_rsa_crt(ca) csr = tlsdb.create_rsa_key_and_csr() (ca, crt) = ssca.rsa_ca_sign_csr(csr) tlsdb.import_rsa_crt(ca, crt) if general['selinux']: # Set selinux port label selinux_label_port(slapd['secure_port']) # Do selinux fixups if general['selinux']: selinux_paths = ('backup_dir', 'cert_dir', 'config_dir', 'db_dir', 'ldif_dir', 'lock_dir', 'log_dir', 'run_dir', 'schema_dir', 'tmp_dir') for path in selinux_paths: selinux_restorecon(slapd[path]) selinux_label_port(slapd['port']) # Start the server # Make changes using the temp root ds_instance.start(timeout=60) ds_instance.open() # In some cases we may want to change log settings # ds_instance.config.enable_log('audit') # Create the configs related to this version. base_config = get_config(general['defaults']) base_config_inst = base_config(ds_instance) base_config_inst.apply_config(install=True) # Setup TLS with the instance. # We *ALWAYS* set secure port, even if security is off, because it breaks # tests with standalone.enable_tls if we do not. It's only when security; on # that we actually start listening on it. if not slapd['secure_port']: slapd['secure_port'] = "636" ds_instance.config.set('nsslapd-secureport', '%s' % slapd['secure_port']) if slapd['self_sign_cert']: ds_instance.config.set('nsslapd-security', 'on') # Create the backends as listed # Load example data if needed. for backend in backends: is_sample_entries_in_props = "sample_entries" in backend create_suffix_entry_in_props = backend.pop('create_suffix_entry', False) ds_instance.backends.create(properties=backend) if not is_sample_entries_in_props and create_suffix_entry_in_props: domain = create_base_domain(ds_instance, backend['nsslapd-suffix']) # Set basic ACI domain.add('aci', [ # Allow reading the base domain object '(targetattr="dc || description || objectClass")(targetfilter="(objectClass=domain)")(version 3.0; acl "Enable anyone domain read"; allow (read, search, compare)(userdn="ldap:///anyone");)', # Allow reading the ou '(targetattr="ou || objectClass")(targetfilter="(objectClass=organizationalUnit)")(version 3.0; acl "Enable anyone ou read"; allow (read, search, compare)(userdn="ldap:///anyone");)' ]) # Initialise ldapi socket information. IPA expects this .... ldapi_path = os.path.join(slapd['local_state_dir'], "run/slapd-%s.socket" % slapd['instance_name']) ds_instance.config.set('nsslapd-ldapifilepath', ldapi_path) ds_instance.config.set('nsslapd-ldapilisten', 'on') ds_instance.config.set('nsslapd-ldapiautobind', 'on') ds_instance.config.set('nsslapd-ldapimaprootdn', slapd['root_dn']) # Create all required sasl maps: if we have a single backend ... # our default maps are really really bad, and we should feel bad. # they basically only work with a single backend, and they'll break # GSSAPI in some cases too :( if len(backends) > 0: self.log.debug("Adding sasl maps for suffix %s" % backend['nsslapd-suffix']) backend = backends[0] saslmappings = SaslMappings(ds_instance) saslmappings.create(properties={ 'cn': 'rfc 2829 u syntax', 'nsSaslMapRegexString': '^u:\\(.*\\)', 'nsSaslMapBaseDNTemplate': backend['nsslapd-suffix'], 'nsSaslMapFilterTemplate': '(uid=\\1)' }) # I think this is for LDAPI saslmappings.create(properties={ 'cn': 'uid mapping', 'nsSaslMapRegexString': '^[^:@]+$', 'nsSaslMapBaseDNTemplate': backend['nsslapd-suffix'], 'nsSaslMapFilterTemplate': '(uid=&)' }) else: self.log.debug("Skipping default SASL maps - no backend found!") # Change the root password finally ds_instance.config.set('nsslapd-rootpw', slapd['root_password']) # We need to log the password when containerised if self.containerised: self.log.debug("Root DN password: {}".format(slapd['root_password'])) # Complete. if general['start']: # Restart for changes to take effect - this could be removed later ds_instance.restart(post_open=False) else: # Just stop the instance now. ds_instance.stop()
def topology(request): global installation1_prefix if installation1_prefix: args_instance[SER_DEPLOYED_DIR] = installation1_prefix # Creating master 1... master1 = DirSrv(verbose=False) args_instance[SER_HOST] = HOST_MASTER_1 args_instance[SER_PORT] = PORT_MASTER_1 args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_1 args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX args_master = args_instance.copy() master1.allocate(args_master) instance_master1 = master1.exists() if instance_master1: master1.delete() master1.create() master1.open() master1.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER, replicaId=REPLICAID_MASTER_1) # Creating master 2... master2 = DirSrv(verbose=False) args_instance[SER_HOST] = HOST_MASTER_2 args_instance[SER_PORT] = PORT_MASTER_2 args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_2 args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX args_master = args_instance.copy() master2.allocate(args_master) instance_master2 = master2.exists() if instance_master2: master2.delete() master2.create() master2.open() master2.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER, replicaId=REPLICAID_MASTER_2) # # Create all the agreements # # Creating agreement from master 1 to master 2 properties = {RA_NAME: r'meTo_$host:$port', RA_BINDDN: defaultProperties[REPLICATION_BIND_DN], RA_BINDPW: defaultProperties[REPLICATION_BIND_PW], RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD], RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]} m1_m2_agmt = master1.agreement.create(suffix=SUFFIX, host=master2.host, port=master2.port, properties=properties) if not m1_m2_agmt: log.fatal("Fail to create a master -> master replica agreement") sys.exit(1) log.debug("%s created" % m1_m2_agmt) # Creating agreement from master 2 to master 1 properties = {RA_NAME: r'meTo_$host:$port', RA_BINDDN: defaultProperties[REPLICATION_BIND_DN], RA_BINDPW: defaultProperties[REPLICATION_BIND_PW], RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD], RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]} m2_m1_agmt = master2.agreement.create(suffix=SUFFIX, host=master1.host, port=master1.port, properties=properties) if not m2_m1_agmt: log.fatal("Fail to create a master -> master replica agreement") sys.exit(1) log.debug("%s created" % m2_m1_agmt) # Allow the replicas to get situated with the new agreements... time.sleep(5) # # Import tests entries into master1 before we initialize master2 # ldif_dir = master1.get_ldif_dir() import_ldif = ldif_dir + '/rel7.5-entries.ldif' # First generate an ldif try: ldif = open(import_ldif, 'w') except IOError as e: log.fatal('Failed to create test ldif, error: %s - %s' % (e.errno, e.strerror)) assert False # Create the root node ldif.write('dn: ' + DEFAULT_SUFFIX + '\n') ldif.write('objectclass: top\n') ldif.write('objectclass: domain\n') ldif.write('dc: example\n') ldif.write('\n') # Create the entries idx = 0 while idx < NUM_USERS: count = str(idx) ldif.write('dn: uid=master1_entry' + count + ',' + DEFAULT_SUFFIX + '\n') ldif.write('objectclass: top\n') ldif.write('objectclass: person\n') ldif.write('objectclass: inetorgperson\n') ldif.write('objectclass: organizationalperson\n') ldif.write('uid: master1_entry' + count + '\n') ldif.write('cn: master1 entry' + count + '\n') ldif.write('givenname: master1 ' + count + '\n') ldif.write('sn: entry ' + count + '\n') ldif.write('userpassword: master1_entry' + count + '\n') ldif.write('description: ' + 'a' * random.randint(1, 1000) + '\n') ldif.write('\n') ldif.write('dn: uid=master2_entry' + count + ',' + DEFAULT_SUFFIX + '\n') ldif.write('objectclass: top\n') ldif.write('objectclass: person\n') ldif.write('objectclass: inetorgperson\n') ldif.write('objectclass: organizationalperson\n') ldif.write('uid: master2_entry' + count + '\n') ldif.write('cn: master2 entry' + count + '\n') ldif.write('givenname: master2 ' + count + '\n') ldif.write('sn: entry ' + count + '\n') ldif.write('userpassword: master2_entry' + count + '\n') ldif.write('description: ' + 'a' * random.randint(1, 1000) + '\n') ldif.write('\n') idx += 1 ldif.close() # Now import it try: master1.tasks.importLDIF(suffix=DEFAULT_SUFFIX, input_file=import_ldif, args={TASK_WAIT: True}) except ValueError: log.fatal('test_reliab_7.5: Online import failed') assert False # # Initialize all the agreements # master1.agreement.init(SUFFIX, HOST_MASTER_2, PORT_MASTER_2) master1.waitForReplInit(m1_m2_agmt) # Check replication is working... if master1.testReplication(DEFAULT_SUFFIX, master2): log.info('Replication is working.') else: log.fatal('Replication is not working.') assert False # Clear out the tmp dir master1.clearTmpDir(__file__) # Delete each instance in the end def fin(): master1.delete() master2.delete() if ENABLE_VALGRIND: sbin_dir = get_sbin_dir(prefix=master1.prefix) valgrind_disable(sbin_dir) request.addfinalizer(fin) return TopologyReplication(master1, master2)
class PerformanceTools: def __init__(self, options={}): prefix = os.path.join(os.environ.get('PREFIX', "")) perfdir = f"{prefix}/var/log/dirsrv/perfdir" print(f"Results and logs are stored in {perfdir} directory.") self._options = { 'nbUsers': 10000, 'seed': 'lib389PerfTools', 'resultDir': perfdir, 'suffix': DEFAULT_SUFFIX, **options } seed(self._options['seed']) self._instance = None os.makedirs(perfdir, mode=0o755, exist_ok=True) self._ldclt_template = self.getFilePath("template.ldclt") # Generate a dummy template anyway we do not plan to create entries with open(self._ldclt_template, "w") as f: f.write("objectclass: inetOrgPerson\n") self._users_parents_dn = f"ou=People,{self._options['suffix']}" @staticmethod def log2Csv(fname, fout): # Convert (verbose) log file into csv file (easier for comparing the results) map = {} # ( nb_users, name, nbthreads, db_lib) Tuple to Result map names = {} # { Name : None } Map has_threads = {} # { Name : { threads : { users : users } } } Map # Read log file maxmes = 0 with open(fname) as f: for line in f: if (line[0] != '{'): continue if (line[-1] == '\n'): line = line[:-1] res = eval(line.replace('\n', '\\n')) nb_users = res['nbUsers'] db_lib = res['db_lib'] name = res['measure_name'] names[name] = None try: nbthreads = res['nb_threads'] except KeyError: nbthreads = "" if not name in has_threads: has_threads[name] = {} if not nbthreads in has_threads[name]: has_threads[name][nbthreads] = {} has_threads[name][nbthreads][nb_users] = nb_users key = (nb_users, name, nbthreads, db_lib) if not key in map: map[key] = [] m = map[key] m.append(res) if maxmes < len(m): maxmes = len(m) # Displays the result: by test name then by thread number then by users number # Generates all combinations keys = [] for name in sorted(names.keys()): for t in sorted(has_threads[name].keys()): for user in sorted(has_threads[name][t].keys()): keys.append((user, name, t)) #Generates the csv file sep = ";" with CsvFile(fout, 2 * maxmes + 2) as csv: csv.nf("test name") csv.nf("threads") csv.nf("users") for idx in range(maxmes): csv.nf("bdb") csv.nf("mdb") csv.nf("%") csv.nl() for k in keys: csv.nf(f"{k[1]}") csv.nf(f"{k[2]}") csv.nf(f"{k[0]}") k0 = (k[0], k[1], k[2], "bdb") k1 = (k[0], k[1], k[2], "mdb") for idx in range(maxmes): if k0 in map and idx < len(map[k0]): res = map[k0][idx] csv.nf(res['safemean']) else: csv.nf(None) if k1 in map and idx < len(map[k1]): res = map[k1][idx] csv.nf(res['safemean']) else: csv.nf(None) # Add % formula csv.nf(f"=({csv.ref(-1)}-{csv.ref(-2)})/{csv.ref(-2)}") csv.nl() def getFilePath(self, filename): return os.path.join(self._options['resultDir'], filename) def log(self, filename, msg): with open(self.getFilePath(filename), "at") as f: f.write(str(msg)) f.write("\n") def initInstance(self): if (self._instance): return self._instance uidpath = self.getFilePath("uids") nb_uids = 0 try: with open(uidpath, 'r') as f: while f.readline(): nb_uids += 1 except FileNotFoundError: pass nb_users = self._options['nbUsers'] need_rebuild = True if (nb_uids == nb_users): # Lets try to reuse existing instance try: self._instance = DirSrv(verbose=True) self._instance.local_simple_allocate(serverid="standalone1", password=PW_DM) self._instance.open() if (self._instance.exists()): if (self._instance.get_db_lib() == get_default_db_lib()): need_rebuild = False else: print( f"db is {self._instance.get_db_lib()} instead of {get_default_db_lib()} ==> instance must be rebuild" ) else: print(f"missing instance ==> instance must be rebuild") except Exception: pass else: print( f"Instance has {nb_uids} users instead of {nb_users} ==> instance must be rebuild" ) if (need_rebuild): print("Rebuilding standalone1 instance") # Should rebuild the instance from scratch topology = create_topology({ReplicaRole.STANDALONE: 1}) self._instance = topology.standalone # Adjust db size if needed (i.e about 670 K users) defaultDBsize = 1073741824 entrySize = 1600 # Real size is around 1525 if (self._instance.get_db_lib() == "mdb" and nb_users * entrySize > defaultDBsize): mdb_config = LMDB_LDBMConfig(self._instance) mdb_config.replace("nsslapd-mdb-max-size", str(nb_users * entrySize)) self._instance.restart() # Then populate the users useraccounts = UserAccounts(self._instance, self._options['suffix']) with open(uidpath, 'w') as f: uidgen = IdGeneratorWithNumbers(nb_users) cnGen = IdGeneratorWithNames(100) snGen = IdGeneratorWithNames(100) for uid in uidgen: cn = cnGen.random() sn = snGen.random() rdn = f"uid={uid}" osuid = uidgen.getIdx() + 1000 osgid = int(osuid % 100) + 1000 properties = { 'uid': uid, 'cn': cn, 'sn': sn, 'uidNumber': str(osuid), 'gidNumber': str(osgid), 'homeDirectory': f'/home/{uid}' } super(UserAccounts, useraccounts).create(rdn, properties) f.write(f'{uid}\n') return self._instance @staticmethod def filterMeasures(values, m, ecart): # keep values around m r = [] for val in values: if (val > (1 - ecart) * m and val < (1 + ecart) * m): r.append(val) return r def safeMeasures(self, values, ecart=0.2): v = values try: r = PerformanceTools.filterMeasures(values, statistics.mean(v), ecart) while (r != v): v = r r = PerformanceTools.filterMeasures(values, statistics.mean(v), ecart) if (len(r) == 0): return values return r except statistics.StatisticsError as e: self.log("log", str(e)) print(e) return values # Return a dict about the evironment data def getEnvInfo(self): mem = os.sysconf('SC_PAGE_SIZE') * os.sysconf('SC_PHYS_PAGES') / (1024. **3) with open('/etc/redhat-release') as f: release = f.read() return { "db_lib": self._instance.get_db_lib(), "nb_cpus": multiprocessing.cpu_count(), "total mem": mem, "release": str(release), **self._options } def finalizeResult(self, res): try: rawres = res["rawresults"] res["rawmean"] = statistics.mean(rawres) res["saferesults"] = self.safeMeasures( rawres) # discard first measure result res["safemean"] = statistics.mean(res["saferesults"]) pretty_res_keys = [ 'start_time', 'stop_time', 'measure_name', 'safemean', 'db_lib', 'nbUsers', 'nb_threads' ] pretty_res = dict( filter(lambda elem: elem[0] in pretty_res_keys, res.items())) except statistics.StatisticsError as e: print(e) res["exception"] = e pretty_res = "#ERROR" res["pretty"] = pretty_res self.log("out", res["pretty"]) self.log("log", res) return res def ldclt(self, measure_name, args, nbThreads=10, nbMes=10): # First ldclt measure is always bad so do 1 measure more # and discard it from final result nbMes += 1 prog = os.path.join(self._instance.ds_paths.bin_dir, 'ldclt') cmd = [ prog, '-h', f'{self._instance.host}', '-p', f'{self._instance.port}', '-D', f'{self._instance.binddn}', '-w', f'{self._instance.bindpw}', '-N', str(nbMes), '-n', str(nbThreads) ] for key in args.keys(): cmd.append(str(key)) val = args[key] if (val): cmd.append(str(val)) start_time = time.time() tmout = 30 + 10 * nbMes print(f"Running ldclt with a timeout of {tmout} seconds ...\r") try: result = subprocess.run(args=cmd, capture_output=True, timeout=tmout) except subprocess.CalledProcessError as e: self.log( "log", f'{e.cmd} failed. measure: {measure_name}\n' + f'instance: {self._instance.serverid}\n' + f'return code is {e.returncode}.\n' + f'stdout: {e.stdout}\n' + f'stderr: {e.stderr}\n') raise e print(" Done.") stop_time = time.time() # Lets parse the result res = { "measure_name": measure_name, "cmd": cmd, "stdout": result.stdout, "stderr": result.stderr, "returncode": result.returncode, "start_time": start_time, "stop_time": stop_time, "stop_time": stop_time, "nb_threads": nbThreads, **self.getEnvInfo() } rawres = re.findall(r'Average rate: [^ ]*\s*.([^/]*)', str(result.stdout)) rawres = [float(i) for i in rawres] res["measure0"] = rawres[0] res["rawresults"] = rawres[1:] # Discard first measure return self.finalizeResult(res) def measure_search_by_uid(self, name, nb_threads=1): nb_users = self._options['nbUsers'] args = { "-b": self._users_parents_dn, "-f": "uid=XXXXXXXXXX", "-e": "esearch,random", "-r0": None, f"-R{nb_users-1}": None } return self.ldclt(name, args, nbThreads=nb_threads) # I wish I could make the base dn vary rather than use the dn in filter # but I did not find how to do that (the RDN trick as in modify # generates the same search than measure_search_by_uid test) def measure_search_by_filtering_the_dn(self, name, nb_threads=1): nb_users = self._options['nbUsers'] args = { "-b": self._users_parents_dn, "-f": "uid:dn:=XXXXXXXXXX", "-e": "esearch,random", "-r0": None, f"-R{nb_users-1}": None } return self.ldclt(name, args, nbThreads=nb_threads) def measure_modify(self, name, nb_threads=1): nb_users = self._options['nbUsers'] args = { "-b": self._users_parents_dn, "-e": f"rdn=uid:[RNDN(0;{nb_users-1};10)],object={self._ldclt_template},attreplace=sn: random modify XXXXX" } return self.ldclt(name, args, nbThreads=nb_threads) def offline_export(self): start_time = time.time() assert (self._instance.db2ldif(DEFAULT_BENAME, (self._options['suffix'], ), None, None, None, self._ldif)) stop_time = time.time() # Count entries in ldif file (if not already done) if not self._nbEntries: self._nbEntries = 0 with open(self._ldif) as f: for line in f: if (line.startswith("dn:")): self._nbEntries += 1 return self._nbEntries / (stop_time - start_time) def offline_import(self): start_time = time.time() assert (self._instance.ldif2db(DEFAULT_BENAME, None, None, None, self._ldif)) stop_time = time.time() return self._nbEntries / (stop_time - start_time) def _do_measure(self, measure_name, measure_cb, nbMes): # Perform non ldcltl measure # first_time = time.time() rawres = [] for m in range(nbMes): try: rawres.append(measure_cb()) stop_time = time.time() except AssertionError: continue last_time = time.time() # Lets parse the result res = { "measure_name": measure_name, "start_time": first_time, "stop_time": last_time, "nb_measures": nbMes, "rawresults": rawres, **self.getEnvInfo() } return self.finalizeResult(res) def mesure_export_import(self, nbMes=10): self._instance.stop() self._ldif = self.getFilePath("db.ldif") self._nbEntries = None res = [ self._do_measure("export", self.offline_export, nbMes), self._do_measure("import", self.offline_import, nbMes) ] self._instance.start() return res class Tester: # Basic tester (used to define ldclt tests) def __init__(self, name, description, method_name): self._base_name = name self._base_description = description self._method_name = method_name def name(self): return self._base_name def argsused(self): return ["nb_threads", "name"] def description(self): return self._base_description def run(self, perftools, args): args['name'] = self._base_name res = getattr(perftools, self._method_name)(self._base_name, nb_threads=args['nb_threads']) print(res['pretty']) @staticmethod def initTester(args): os.environ["NSSLAPD_DB_LIB"] = args['db_lib'] perftools = PerformanceTools(args) perftools.initInstance() return perftools class TesterImportExport(Tester): # A special tester for export/import def __init__(self): super().__init__( "export/import", "Measure export rate in entries per seconds then measure import rate.", None) def argsused(self): return [] def run(self, perftools, args=None): res = perftools.mesure_export_import() for r in res: print(r['pretty']) @staticmethod def listTests(): # List of test for which args.nb_threads is useful return { t.name(): t for t in [ PerformanceTools.Tester( "search_uid", "Measure number of searches per seconds using filter with random existing uid.", "measure_search_by_uid"), PerformanceTools.Tester( "search_uid_in_dn", "Measure number of searches per seconds using filter with random existing uid in dn (i.e: (uid:dn:uid_value)).", "measure_search_by_filtering_the_dn"), PerformanceTools.Tester( "modify_sn", "Measure number of modify per seconds replacing sn by random value on random entries.", "measure_modify"), PerformanceTools.TesterImportExport(), ] } @staticmethod def runAllTests(options): for users in (100, 1000, 10000, 100000, 1000000): for db in ('bdb', 'mdb'): perftools = PerformanceTools.Tester.initTester({ **options, 'nbUsers': users, 'db_lib': db }) for t in PerformanceTools.listTests().values(): if 'nb_threads' in t.argsused(): for nbthreads in (1, 4, 8): t.run(perftools, {"nb_threads": nbthreads}) else: t.run(perftools)