def set_state(self, new_state): new_state = new_state.lower() suffix = self.get_attr_val_utf8('nsslapd-suffix') try: mt = self._mts.get(suffix) except ldap.NO_SUCH_OBJECT: raise ValueError( "Backend missing mapping tree entry, unable to set configuration" ) if new_state not in [ 'backend', 'disabled', 'referral', 'referral on update' ]: raise ValueError( f"Invalid backend state {new_state}, value must be one of the following: 'backend', 'disabled', 'referral', 'referral on update'" ) # Can not change state of replicated backend replicas = Replicas(self._instance) try: # Check if replication is enabled replicas.get(suffix) raise ValueError( "Can not change the backend state of a replicated suffix") except ldap.NO_SUCH_OBJECT: pass # Ok, change the state mt.replace('nsslapd-state', new_state)
def get_agmt_maxcsn(self): """Get the agreement maxcsn from the database RUV entry :returns: CSN string if found, otherwise None is returned """ from lib389.replica import Replicas suffix = self.get_attr_val_utf8(REPL_ROOT) agmt_name = self.get_attr_val_utf8('cn') replicas = Replicas(self._instance) replica = replicas.get(suffix) maxcsns = replica.get_ruv_agmt_maxcsns() if maxcsns is None or len(maxcsns) == 0: self._log.debug( 'get_agmt_maxcsn - Failed to get agmt maxcsn from RUV') return None for csn in maxcsns: comps = csn.split(';') if agmt_name == comps[1]: # same replica, get maxcsn if len(comps) < 6: return None else: return comps[5] self._log.debug( 'get_agmt_maxcsn - did not find matching agmt maxcsn from RUV') return None
def resume_all_replicas(self): """Resume all agreements in the class instance""" for inst in self.all_insts.values(): replicas = Replicas(inst) replica = replicas.get(DEFAULT_SUFFIX) for agreement in replica.get_agreements().list(): agreement.resume()
def test_cl_encryption_setup_process(topo): """Take an already working replication deployment, and setup changelog encryption :id: 1a1b7d29-69f5-4f0e-91c4-e7f66140ff17 :setup: Master Instance, Consumer Instance :steps: 1. Enable TLS for the server 2. Export changelog 3. Enable changelog encryption 4. Import changelog 5. Verify replication is still working :expectedresults: 1. Success 2. Success 3. Success 4. Success 5. Success """ supplier = topo.ms['master1'] consumer = topo.cs['consumer1'] # Enable TLS log.info('Enable TLS ...') supplier.enable_tls() consumer.enable_tls() # Export changelog log.info('Export changelog ...') replicas = Replicas(supplier) replica = replicas.get(DEFAULT_SUFFIX) replica.begin_task_cl2ldif() replica.task_finished() # Enable changelog encryption log.info('Enable changelog encryption ...') dse_ldif = DSEldif(supplier) supplier.stop() if ds_supports_new_changelog(): changelog = 'cn=changelog,{}'.format(DN_USERROOT_LDBM) else: changelog = DN_CHANGELOG dse_ldif.replace(changelog, 'nsslapd-encryptionalgorithm', 'AES') if dse_ldif.get(changelog, 'nsSymmetricKey'): dse_ldif.delete(changelog, 'nsSymmetricKey') supplier.start() # Import changelog log.info('Import changelog ...') replica.begin_task_ldif2cl() replica.task_finished() # Verify replication is still working log.info('Test replication is still working ...') assert replica.test_replication([consumer])
def _lint_cl_trimming(self): """Check that cl trimming is at least defined to prevent unbounded growth""" bename = self.lint_uid() suffix = self.get_attr_val_utf8('nsslapd-suffix') replicas = Replicas(self._instance) try: # Check if replication is enabled replicas.get(suffix) # Check the changelog cl = Changelog(self._instance, suffix=suffix) if cl.get_attr_val_utf8('nsslapd-changelogmaxentries') is None and \ cl.get_attr_val_utf8('nsslapd-changelogmaxage') is None: report = copy.deepcopy(DSCLLE0001) report['fix'] = report['fix'].replace('YOUR_INSTANCE', self._instance.serverid) report['check'] = f'backends:{bename}::cl_trimming' yield report except: # Suffix is not replicated self._log.debug( f"_lint_cl_trimming - backend ({suffix}) is not replicated") pass
def test_invalid_agmt(topo_m2): """Test adding that an invalid agreement is properly rejected and does not crash the server :id: 6c3b2a7e-edcd-4327-a003-6bd878ff722b :setup: Four masters replication setup :steps: 1. Add invalid agreement (nsds5ReplicaEnabled set to invalid value) 2. Verify the server is still running :expectedresults: 1. Invalid repl agreement should be rejected 2. Server should be still running """ m1 = topo_m2.ms["master1"] m2 = topo_m2.ms["master2"] repl = ReplicationManager(DEFAULT_SUFFIX) replicas = Replicas(m1) replica = replicas.get(DEFAULT_SUFFIX) agmts = replica.get_agreements() # Add invalid agreement (nsds5ReplicaEnabled set to invalid value) with pytest.raises(ldap.UNWILLING_TO_PERFORM): agmts.create( properties={ 'cn': 'whatever', 'nsDS5ReplicaRoot': DEFAULT_SUFFIX, 'nsDS5ReplicaBindDN': 'cn=replication manager,cn=config', 'nsDS5ReplicaBindMethod': 'simple', 'nsDS5ReplicaTransportInfo': 'LDAP', 'nsds5replicaTimeout': '5', 'description': "test agreement", 'nsDS5ReplicaHost': m2.host, 'nsDS5ReplicaPort': str(m2.port), 'nsDS5ReplicaCredentials': 'whatever', 'nsds5ReplicaEnabled': 'YEAH MATE, LETS REPLICATE' }) # Verify the server is still running repl = ReplicationManager(DEFAULT_SUFFIX) repl.test_replication(m1, m2) repl.test_replication(m2, m1)
def _lint_cl_trimming(self): """Check that cl trimming is at least defined to prevent unbounded growth""" suffix = self.get_attr_val_utf8('nsslapd-suffix') replicas = Replicas(self._instance) replica = replicas.get(suffix) bename = self.lint_uid() if replica is not None: cl = Changelog(self._instance, suffix=suffix) try: if cl.get_attr_val_utf8('nsslapd-changelogmaxentries') is None and \ cl.get_attr_val_utf8('nsslapd-changelogmaxage') is None: report = copy.deepcopy(DSCLLE0001) report['fix'] = report['fix'].replace( 'YOUR_INSTANCE', self._instance.serverid) report['check'] = f'backends:{bename}::cl_trimming' yield report except: # No changelog pass
def check_ruvs(msg, topology_m4, m4rid): """Check suppliers 1- 3 for supplier 4's rid.""" for inst in (topology_m4.ms["supplier1"], topology_m4.ms["supplier2"], topology_m4.ms["supplier3"]): clean = False replicas = Replicas(inst) replica = replicas.get(DEFAULT_SUFFIX) log.info('check_ruvs for replica %s:%s (suffix:rid)' % (replica.get_suffix(), replica.get_rid())) count = 0 while not clean and count < 20: ruv = replica.get_ruv() if m4rid in ruv._rids: time.sleep(5) count = count + 1 else: clean = True if not clean: raise Exception("Supplier %s was not cleaned in time." % inst.serverid) return True
def _create_changelog_dump(topo): """Dump changelog using nss5task and check if ldap operations are logged""" log.info( 'Dump changelog using nss5task and check if ldap operations are logged' ) if ds_supports_new_changelog(): changelog_dir = topo.ms['supplier1'].get_ldif_dir() changelog_end = '_cl.ldif' else: changelog_dir = topo.ms['supplier1'].get_changelog_dir() changelog_end = '.ldif' replicas = Replicas(topo.ms["supplier1"]) replica = replicas.get(DEFAULT_SUFFIX) log.info('Remove ldif files, if present in: {}'.format(changelog_dir)) for files in os.listdir(changelog_dir): if files.endswith(changelog_end): changelog_file = os.path.join(changelog_dir, files) try: os.remove(changelog_file) except OSError as e: log.fatal( 'Failed to remove ldif file: {}'.format(changelog_file)) raise e log.info('Existing changelog ldif file: {} removed'.format( changelog_file)) else: log.info('No existing changelog ldif files present') log.info('Running nsds5task to dump changelog database to a file') replica.begin_task_cl2ldif() log.info('Check if changelog ldif file exist in: {}'.format(changelog_dir)) for files in os.listdir(changelog_dir): if files.endswith(changelog_end): changelog_ldif = os.path.join(changelog_dir, files) log.info('Changelog ldif file exist: {}'.format(changelog_ldif)) return changelog_ldif else: log.fatal( 'Changelog ldif file does not exist in: {}'.format(changelog_dir)) assert False
def check_ruvs(msg, topology_m4, m4rid): """Check masters 1- 3 for master 4's rid.""" for inst in (topology_m4.ms["master1"], topology_m4.ms["master2"], topology_m4.ms["master3"]): clean = False replicas = Replicas(inst) replica = replicas.get(DEFAULT_SUFFIX) count = 0 while not clean and count < 10: ruv = replica.get_ruv() if m4rid in ruv._rids: time.sleep(5) count = count + 1 else: clean = True if not clean: raise Exception("Master %s was not cleaned in time." % inst.serverid) return True
def test_multiple_changelogs(topo): """Test the multiple suffixes can be replicated with the new per backend changelog. :id: eafcdb57-4ea2-4887-a0a8-9e4d295f4f4d :setup: Supplier Instance, Consumer Instance :steps: 1. Create s second suffix 2. Enable replication for second backend 3. Perform some updates on both backends and make sure replication is working for both backends :expectedresults: 1. Success 2. Success 3. Success """ supplier = topo.ms['supplier1'] consumer = topo.cs['consumer1'] # Create second suffix dc=second_backend on both replicas for inst in [supplier, consumer]: # Create the backends props = {'cn': 'secondRoot', 'nsslapd-suffix': SECOND_SUFFIX} be = Backend(inst) be.create(properties=props) be.create_sample_entries('001004002') # Setup replication for second suffix repl = ReplicationManager(SECOND_SUFFIX) repl.create_first_supplier(supplier) repl.join_consumer(supplier, consumer) # Test replication works for each backend for suffix in [DEFAULT_SUFFIX, SECOND_SUFFIX]: replicas = Replicas(supplier) replica = replicas.get(suffix) log.info("Testing replication for: " + suffix) assert replica.test_replication([consumer])
def _get_replica_generation(instance, suffix): replicas = Replicas(instance) replica = replicas.get(suffix) ruv = replica.get_ruv() return ruv._data_generation
def test_dsconf_replication_monitor(topology_m2, set_log_file): """Test replication monitor that was ported from legacy tools :id: ce48020d-7c30-41b7-8f68-144c9cd757f6 :setup: 2 MM topology :steps: 1. Create DS instance 2. Run replication monitor with connections option 3. Run replication monitor with aliases option 4. Run replication monitor with --json option 5. Run replication monitor with .dsrc file created 6. Run replication monitor with connections option as if using dsconf CLI :expectedresults: 1. Success 2. Success 3. Success 4. Success 5. Success 6. Success """ m1 = topology_m2.ms["master1"] m2 = topology_m2.ms["master2"] # Enable ldapi if not already done. for inst in [topology_m2.ms["master1"], topology_m2.ms["master2"]]: if not inst.can_autobind(): # Update ns-slapd instance inst.config.set('nsslapd-ldapilisten', 'on') inst.config.set('nsslapd-ldapiautobind', 'on') inst.restart() # Ensure that updates have been sent both ways. replicas = Replicas(m1) replica = replicas.get(DEFAULT_SUFFIX) replica.test_replication([m2]) replicas = Replicas(m2) replica = replicas.get(DEFAULT_SUFFIX) replica.test_replication([m1]) alias_content = [ 'Supplier: M1 (' + m1.host + ':' + str(m1.port) + ')', 'Supplier: M2 (' + m2.host + ':' + str(m2.port) + ')' ] connection_content = 'Supplier: ' + m1.host + ':' + str(m1.port) content_list = [ 'Replica Root: dc=example,dc=com', 'Replica ID: 1', 'Replica Status: Available', 'Max CSN', 'Status For Agreement: "002" (' + m2.host + ':' + str(m2.port) + ')', 'Replica Enabled: on', 'Update In Progress: FALSE', 'Last Update Start:', 'Last Update End:', 'Number Of Changes Sent:', 'Number Of Changes Skipped: None', 'Last Update Status: Error (0) Replica acquired successfully: Incremental update succeeded', 'Last Init Start:', 'Last Init End:', 'Last Init Status:', 'Reap Active: 0', 'Replication Status: In Synchronization', 'Replication Lag Time:', 'Supplier: ', m2.host + ':' + str(m2.port), 'Replica Root: dc=example,dc=com', 'Replica ID: 2', 'Status For Agreement: "001" (' + m1.host + ':' + str(m1.port) + ')' ] error_list = [ 'consumer (Unavailable)', 'Failed to retrieve database RUV entry from consumer' ] json_list = [ 'type', 'list', 'items', 'name', m1.host + ':' + str(m1.port), 'data', '"replica_id": "1"', '"replica_root": "dc=example,dc=com"', '"replica_status": "Available"', 'maxcsn', 'agmts_status', 'agmt-name', '002', 'replica', m2.host + ':' + str(m2.port), 'replica-enabled', 'update-in-progress', 'last-update-start', 'last-update-end', 'number-changes-sent', 'number-changes-skipped', 'last-update-status', 'Error (0) Replica acquired successfully: Incremental update succeeded', 'last-init-start', 'last-init-end', 'last-init-status', 'reap-active', 'replication-status', 'In Synchronization', 'replication-lag-time', '"replica_id": "2"', '001', m1.host + ':' + str(m1.port) ] connections = [ m1.host + ':' + str(m1.port) + ':' + DN_DM + ':' + PW_DM, m2.host + ':' + str(m2.port) + ':' + DN_DM + ':' + PW_DM ] args = FakeArgs() args.connections = connections args.aliases = None args.json = False log.info('Run replication monitor with connections option') get_repl_monitor_info(m1, DEFAULT_SUFFIX, log, args) (host_m1, host_m2) = get_hostnames_from_log(m1.port, m2.port) check_value_in_log_and_reset(content_list, connection_content, error_list=error_list) # Prepare the data for next tests aliases = [ 'M1=' + host_m1 + ':' + str(m1.port), 'M2=' + host_m2 + ':' + str(m2.port) ] alias_content = [ 'Supplier: M1 (' + host_m1 + ':' + str(m1.port) + ')', 'Supplier: M2 (' + host_m2 + ':' + str(m2.port) + ')' ] dsrc_content = '[repl-monitor-connections]\n' \ 'connection1 = ' + m1.host + ':' + str(m1.port) + ':' + DN_DM + ':' + PW_DM + '\n' \ 'connection2 = ' + m2.host + ':' + str(m2.port) + ':' + DN_DM + ':' + PW_DM + '\n' \ '\n' \ '[repl-monitor-aliases]\n' \ 'M1 = ' + host_m1 + ':' + str(m1.port) + '\n' \ 'M2 = ' + host_m2 + ':' + str(m2.port) log.info('Run replication monitor with aliases option') args.aliases = aliases get_repl_monitor_info(m1, DEFAULT_SUFFIX, log, args) check_value_in_log_and_reset(content_list, alias_content) log.info('Run replication monitor with --json option') args.aliases = None args.json = True get_repl_monitor_info(m1, DEFAULT_SUFFIX, log, args) check_value_in_log_and_reset(json_list) with open(os.path.expanduser(DSRC_HOME), 'w+') as f: f.write(dsrc_content) args.connections = None args.aliases = None args.json = False log.info('Run replication monitor when .dsrc file is present with content') get_repl_monitor_info(m1, DEFAULT_SUFFIX, log, args) check_value_in_log_and_reset(content_list, alias_content) os.remove(os.path.expanduser(DSRC_HOME)) log.info( 'Run replication monitor with connections option as if using dsconf CLI' ) # Perform same test than steps 2 test but without using directly the topology instance. # but with an instance similar to those than dsconf cli generates: # step 2 args args.connections = connections args.aliases = None args.json = False # args needed to generate an instance with dsrc_arg_concat args.instance = 'master1' args.basedn = None args.binddn = None args.bindpw = None args.pwdfile = None args.prompt = False args.starttls = False dsrc_inst = dsrc_arg_concat(args, None) inst = connect_instance(dsrc_inst, True, args) get_repl_monitor_info(inst, DEFAULT_SUFFIX, log, args) check_value_in_log_and_reset(content_list, connection_content, error_list=error_list)
def get_consumer_maxcsn(self, binddn=None, bindpw=None): """Attempt to get the consumer's maxcsn from its database RUV entry :param binddn: Specifies a specific bind DN to use when contacting the remote consumer :type binddn: str :param bindpw: Password for the bind DN :type bindpw: str :returns: CSN string if found, otherwise "Unavailable" is returned """ host = self.get_attr_val_utf8(AGMT_HOST) port = self.get_attr_val_utf8(AGMT_PORT) suffix = self.get_attr_val_utf8(REPL_ROOT) protocol = self.get_attr_val_utf8('nsds5replicatransportinfo').lower() result_msg = "Unavailable" # If we are using LDAPI we need to provide the credentials, otherwise # use the existing credentials if binddn is None: binddn = self._instance.binddn if bindpw is None: bindpw = self._instance.bindpw # Get the replica id from supplier to compare to the consumer's rid from lib389.replica import Replicas replicas = Replicas(self._instance) replica = replicas.get(suffix) rid = replica.get_attr_val_utf8(REPL_ID) # Open a connection to the consumer consumer = DirSrv(verbose=self._instance.verbose) args_instance[SER_HOST] = host if protocol == "ssl" or protocol == "ldaps": args_instance[SER_SECURE_PORT] = int(port) else: args_instance[SER_PORT] = int(port) args_instance[SER_ROOT_DN] = binddn args_instance[SER_ROOT_PW] = bindpw args_standalone = args_instance.copy() consumer.allocate(args_standalone) try: consumer.open() except ldap.INVALID_CREDENTIALS as e: raise (e) except ldap.LDAPError as e: self._log.debug( 'Connection to consumer ({}:{}) failed, error: {}'.format( host, port, e)) return result_msg # Search for the tombstone RUV entry try: entry = consumer.search_s(suffix, ldap.SCOPE_SUBTREE, REPLICA_RUV_FILTER, ['nsds50ruv']) if not entry: self._log.debug( "Failed to retrieve database RUV entry from consumer") else: elements = ensure_list_str(entry[0].getValues('nsds50ruv')) for ruv in elements: if ('replica %s ' % rid) in ruv: ruv_parts = ruv.split() if len(ruv_parts) == 5: result_msg = ruv_parts[4] break except ldap.INVALID_CREDENTIALS as e: raise (e) except ldap.LDAPError as e: self._log.debug('Failed to search for the suffix ' + '({}) consumer ({}:{}) failed, error: {}'.format( suffix, host, port, e)) consumer.close() return result_msg
def test_multiple_changelogs_export_import(topo): """Test that we can export and import the replication changelog :id: b74fcaaf-a13f-4ee0-98f9-248b281f8700 :setup: Supplier Instance, Consumer Instance :steps: 1. Create s second suffix 2. Enable replication for second backend 3. Perform some updates on a backend, and export the changelog 4. Do an export and import while the server is idle 5. Do an import while the server is under load :expectedresults: 1. Success 2. Success 3. Success 4. Success 5. Success """ SECOND_SUFFIX = 'dc=second_suffix' supplier = topo.ms['supplier1'] consumer = topo.cs['consumer1'] supplier.config.set('nsslapd-errorlog-level', '0') # Create second suffix dc=second_backend on both replicas for inst in [supplier, consumer]: # Create the backends props = {'cn': 'secondRoot', 'nsslapd-suffix': SECOND_SUFFIX} be = Backend(inst) try: be.create(properties=props) be.create_sample_entries('001004002') except ldap.UNWILLING_TO_PERFORM: pass # Setup replication for second suffix try: repl = ReplicationManager(SECOND_SUFFIX) repl.create_first_supplier(supplier) repl.join_consumer(supplier, consumer) except ldap.ALREADY_EXISTS: pass # Put the replica under load, and export the changelog replicas = Replicas(supplier) replica = replicas.get(DEFAULT_SUFFIX) doMods1 = DoMods(supplier, task="export") doMods1.start() replica.begin_task_cl2ldif() doMods1.join() replica.task_finished() # allow some time to pass, and test replication time.sleep(1) assert replica.test_replication([consumer]) # While idle, go an export and import, and make sure replication still works log.info("Testing idle server with CL export and import...") replica.begin_task_cl2ldif() replica.task_finished() replica.begin_task_ldif2cl() replica.task_finished() assert replica.test_replication([consumer]) # stability test, put the replica under load, import the changelog, and make # sure server did not crash. log.info("Testing busy server with CL import...") doMods2 = DoMods(supplier, task="import") doMods2.start() replica.begin_task_ldif2cl() doMods2.join() replica.task_finished() # Replication will be broken so no need to test it. This is just make sure # the import works, and the server is stable assert supplier.status() assert consumer.status()