def __init__(self, instance, dn=None): super(UserAccount, self).__init__(instance, dn) self._rdn_attribute = RDN # Can I generate these from schema? self._must_attributes = MUST_ATTRIBUTES self._create_objectclasses = [ 'top', 'account', 'posixaccount', 'inetOrgPerson', 'organizationalPerson', ] if ds_is_older('1.3.7'): self._create_objectclasses.append('inetUser') else: self._create_objectclasses.append('nsMemberOf') if not ds_is_older('1.4.0'): self._create_objectclasses.append('nsAccount') user_compare_exclude = [ 'nsUniqueId', 'modifyTimestamp', 'createTimestamp', 'entrydn' ] self._compare_exclude = self._compare_exclude + user_compare_exclude self._protected = False
def test_ssl_transport(tls_client_auth): """Test different combinations for nsDS5ReplicaTransportInfo values :id: 922d16f8-662a-4915-a39e-0aecd7c8e6e2 :setup: Two master replication, enabled TLS client auth :steps: 1. Set nsDS5ReplicaTransportInfoCheck: SSL or StartTLS or TLS 2. Restart the instance 3. Check that replication works 4. Set nsDS5ReplicaTransportInfoCheck: LDAPS back :expectedresults: 1. Success 2. Success 3. Replication works 4. Success """ m1 = tls_client_auth.ms['master1'] m2 = tls_client_auth.ms['master2'] repl = ReplicationManager(DEFAULT_SUFFIX) replica_m1 = Replicas(m1).get(DEFAULT_SUFFIX) replica_m2 = Replicas(m2).get(DEFAULT_SUFFIX) agmt_m1 = replica_m1.get_agreements().list()[0] agmt_m2 = replica_m2.get_agreements().list()[0] if ds_is_older('1.4.0.6'): check_list = (('TLS', False), ) else: check_list = (('SSL', True), ('StartTLS', False), ('TLS', False)) for transport, secure_port in check_list: agmt_m1.replace_many( ('nsDS5ReplicaTransportInfo', transport), ('nsDS5ReplicaPort', '{}'.format(m2.port if not secure_port else m2.sslport))) agmt_m2.replace_many( ('nsDS5ReplicaTransportInfo', transport), ('nsDS5ReplicaPort', '{}'.format(m1.port if not secure_port else m1.sslport))) repl.test_replication_topology(tls_client_auth) if ds_is_older('1.4.0.6'): agmt_m1.replace_many(('nsDS5ReplicaTransportInfo', 'SSL'), ('nsDS5ReplicaPort', str(m2.sslport))) agmt_m2.replace_many(('nsDS5ReplicaTransportInfo', 'SSL'), ('nsDS5ReplicaPort', str(m1.sslport))) else: agmt_m1.replace_many(('nsDS5ReplicaTransportInfo', 'LDAPS'), ('nsDS5ReplicaPort', str(m2.sslport))) agmt_m2.replace_many(('nsDS5ReplicaTransportInfo', 'LDAPS'), ('nsDS5ReplicaPort', str(m1.sslport))) repl.test_replication_topology(tls_client_auth)
def __init__(self, instance, dn=None): super(MonitorLDBM, self).__init__(instance=instance) self._dn = DN_MONITOR_LDBM self._backend_keys = [ 'dbcachehits', 'dbcachetries', 'dbcachehitratio', 'dbcachepagein', 'dbcachepageout', 'dbcacheroevict', 'dbcacherwevict', ] if not ds_is_older("1.4.0"): self._backend_keys.extend([ 'normalizeddncachetries', 'normalizeddncachehits', 'normalizeddncachemisses', 'normalizeddncachehitratio', 'normalizeddncacheevictions', 'currentnormalizeddncachesize', 'maxnormalizeddncachesize', 'currentnormalizeddncachecount', 'normalizeddncachethreadsize', 'normalizeddncachethreadslots' ])
def __init__(self, instance, dn=None): super(MonitorBackend, self).__init__(instance=instance, dn=dn) self._backend_keys = [ 'readonly', 'entrycachehits', 'entrycachetries', 'entrycachehitratio', 'currententrycachesize', 'maxentrycachesize', 'currententrycachecount', 'maxentrycachecount', 'dncachehits', 'dncachetries', 'dncachehitratio', 'currentdncachesize', 'maxdncachesize', 'currentdncachecount', 'maxdncachecount', ] if ds_is_older("1.4.0"): self._backend_keys.extend([ 'normalizeddncachetries', 'normalizeddncachehits', 'normalizeddncachemisses', 'normalizeddncachehitratio', 'currentnormalizeddncachesize', 'maxnormalizeddncachesize', 'currentnormalizeddncachecount' ])
def test_extract_pemfiles(tls_client_auth): """Test TLS client authentication between two masters operates as expected with 'on' and 'off' options of nsslapd-extract-pemfiles :id: 922d16f8-662a-4915-a39e-0aecd7c8e6e1 :setup: Two master replication, enabled TLS client auth :steps: 1. Check that nsslapd-extract-pemfiles default value is right 2. Check that replication works with both 'on' and 'off' values :expectedresults: 1. Success 2. Replication works """ m1 = tls_client_auth.ms['master1'] m2 = tls_client_auth.ms['master2'] repl = ReplicationManager(DEFAULT_SUFFIX) if ds_is_older('1.3.7'): default_val = 'off' else: default_val = 'on' attr_val = m1.config.get_attr_val_utf8('nsslapd-extract-pemfiles') log.info("Check that nsslapd-extract-pemfiles is {}".format(default_val)) assert attr_val == default_val for extract_pemfiles in ('on', 'off'): log.info( "Set nsslapd-extract-pemfiles = '{}' and check replication works)") m1.config.set('nsslapd-extract-pemfiles', extract_pemfiles) m2.config.set('nsslapd-extract-pemfiles', extract_pemfiles) repl.test_replication_topology(tls_client_auth)
def __init__(self, instance, dn=None): super(nsAdminGroup, self).__init__(instance, dn) self._rdn_attribute = RDN self._must_attributes = MUST_ATTRIBUTES self._create_objectclasses = ['top', 'nsAdminGroup'] if ds_is_older('1.3.7'): self._create_objectclasses.append('inetUser') else: self._create_objectclasses.append('nsMemberOf') if not ds_is_older('1.4.0'): self._create_objectclasses.append('nsAccount') user_compare_exclude = [ 'nsUniqueId', 'modifyTimestamp', 'createTimestamp', 'entrydn' ] self._compare_exclude = self._compare_exclude + user_compare_exclude self._protected = False
def runUpgrade(prefix, online=True): ''' Run "setup-ds.pl --update" We simply pass in one DirSrv isntance, and this will update all the instances that are in this prefix. For the update to work we must fix/adjust the permissions of the scripts in: /prefix/lib[64]/dirsrv/slapd-INSTANCE/ ''' if ds_is_older('1.4.0'): libdir = os.path.join(_ds_paths.lib_dir, 'dirsrv') # Gather all the instances so we can adjust the permissions, otherwise servers = [] path = os.path.join(_ds_paths.sysconf_dir, 'dirsrv') for files in os.listdir(path): if files.startswith( 'slapd-') and not files.endswith('.removed'): servers.append(os.path.join(libdir, files)) if len(servers) == 0: # This should not happen log.fatal('runUpgrade: no servers found!') assert False ''' The setup script calls things like /lib/dirsrv/slapd-instance/db2bak, etc, and when we run the setup perl script it gets permission denied as the default permissions are 750. Adjust the permissions to 755. ''' for instance in servers: for files in os.listdir(instance): os.chmod(os.path.join(instance, files), 755) # Run the "upgrade" try: prog = os.path.join(_ds_paths.sbin_dir, PATH_SETUP_DS) process = subprocess.Popen([prog, '--update'], shell=False, stdin=subprocess.PIPE) # Answer the interactive questions, as "--update" currently does # not work with INF files process.stdin.write(b'yes\n') if (online): process.stdin.write(b'online\n') for x in servers: process.stdin.write(ensure_bytes(DN_DM + '\n')) process.stdin.write(ensure_bytes(PW_DM + '\n')) else: process.stdin.write(b'offline\n') process.stdin.close() process.wait() if process.returncode != 0: log.fatal('runUpgrade failed! Error: %s ' % process.returncode) assert (False) except: log.fatal('runUpgrade failed!') raise else: pass
def test_ssl_version_range(topo): """Specify a test case purpose or name here :id: bc400f54-3966-49c8-b640-abbf4fb2377e :customerscenario: True 1. Get current default range 2. Set sslVersionMin and verify it is applied after a restart 3. Set sslVersionMax and verify it is applied after a restart 4. Sanity test all the min/max versions :expectedresults: 1. Success 2. Success 3. Success 4. Success """ topo.standalone.enable_tls() enc = Encryption(topo.standalone) default_min = enc.get_attr_val_utf8('sslVersionMin') default_max = enc.get_attr_val_utf8('sslVersionMax') log.info(f"default min: {default_min} max: {default_max}") if DEBUGGING: topo.standalone.config.set('nsslapd-auditlog-logging-enabled', 'on') # Test that setting the min version is applied after a restart enc.replace('sslVersionMin', default_max) enc.replace('sslVersionMax', default_max) topo.standalone.restart() min = enc.get_attr_val_utf8('sslVersionMin') assert min == default_max # Test that setting the max version is applied after a restart enc.replace('sslVersionMin', default_min) enc.replace('sslVersionMax', default_min) topo.standalone.restart() max = enc.get_attr_val_utf8('sslVersionMax') assert max == default_min # 389-ds-base-1.4.3 == Fedora 32, 389-ds-base-1.4.4 == Fedora 33 # Starting from Fedora 33, cryptographic protocols (TLS 1.0 and TLS 1.1) were moved to LEGACY # So we should not check for the policies with our DEFAULT crypro setup # https://fedoraproject.org/wiki/Changes/StrongCryptoSettings2 if ds_is_older('1.4.4'): ssl_versions = [('sslVersionMin', ['TLS1.0', 'TLS1.1', 'TLS1.2', 'TLS1.0']), ('sslVersionMax', ['TLS1.0', 'TLS1.1', 'TLS1.2'])] else: ssl_versions = [('sslVersionMin', ['TLS1.2']), ('sslVersionMax', ['TLS1.2', 'TLS1.3'])] # Sanity test all the min/max versions for attr, versions in ssl_versions: for version in versions: # Test that the setting is correctly applied after a restart enc.replace(attr, version) topo.standalone.restart() current_val = enc.get_attr_val_utf8(attr) assert current_val == version
def __init__(self, instance, dn=None): super(MonitorLDBM, self).__init__(instance=instance) self._dn = DN_MONITOR_LDBM self._db_mon = MonitorDatabase(instance) self._backend_keys = [ 'dbcachehits', 'dbcachetries', 'dbcachehitratio', 'dbcachepagein', 'dbcachepageout', 'dbcacheroevict', 'dbcacherwevict', ] self._db_mon_keys = [ 'nsslapd-db-abort-rate', 'nsslapd-db-active-txns', 'nsslapd-db-cache-hit', 'nsslapd-db-cache-try', 'nsslapd-db-cache-region-wait-rate', 'nsslapd-db-cache-size-bytes', 'nsslapd-db-clean-pages', 'nsslapd-db-commit-rate', 'nsslapd-db-deadlock-rate', 'nsslapd-db-dirty-pages', 'nsslapd-db-hash-buckets', 'nsslapd-db-hash-elements-examine-rate', 'nsslapd-db-hash-search-rate', 'nsslapd-db-lock-conflicts', 'nsslapd-db-lock-region-wait-rate', 'nsslapd-db-lock-request-rate', 'nsslapd-db-lockers', 'nsslapd-db-configured-locks', 'nsslapd-db-current-locks', 'nsslapd-db-max-locks', 'nsslapd-db-current-lock-objects', 'nsslapd-db-max-lock-objects', 'nsslapd-db-log-bytes-since-checkpoint', 'nsslapd-db-log-region-wait-rate', 'nsslapd-db-log-write-rate', 'nsslapd-db-longest-chain-length', 'nsslapd-db-page-create-rate', 'nsslapd-db-page-read-rate', 'nsslapd-db-page-ro-evict-rate', 'nsslapd-db-page-rw-evict-rate', 'nsslapd-db-page-trickle-rate', 'nsslapd-db-page-write-rate', 'nsslapd-db-pages-in-use', 'nsslapd-db-txn-region-wait-rate', ] if not ds_is_older("1.4.0", instance=instance): self._backend_keys.extend([ 'normalizeddncachetries', 'normalizeddncachehits', 'normalizeddncachemisses', 'normalizeddncachehitratio', 'normalizeddncacheevictions', 'currentnormalizeddncachesize', 'maxnormalizeddncachesize', 'currentnormalizeddncachecount', 'normalizeddncachethreadsize', 'normalizeddncachethreadslots' ])
def __init__(self, instance, dn=None): super(UniqueGroup, self).__init__(instance, dn) self._rdn_attribute = RDN self._must_attributes = MUST_ATTRIBUTES self._create_objectclasses = [ 'top', 'groupOfUniqueNames', ] if not ds_is_older('1.3.7'): self._create_objectclasses.append('nsMemberOf') self._protected = False
def memberof_setup(topo, request): """Configure required plugins and restart the server""" log.info('Configuring memberOf, managedEntry and autoMembers plugins and restarting the server') topo.standalone.simple_bind_s(DN_DM, PASSWORD) try: topo.standalone.plugins.enable(name=PLUGIN_MEMBER_OF) except ldap.LDAPError as e: log.error('Failed to enable {} plugin'.format(PLUGIN_MEMBER_OF)) raise e try: topo.standalone.plugins.enable(name=PLUGIN_MANAGED_ENTRY) topo.standalone.plugins.enable(name=PLUGIN_AUTOMEMBER) except ldap.LDAPError as e: log.error('Failed to enable {}, {} plugins'.format(PLUGIN_MANAGED_ENTRY, PLUGIN_AUTOMEMBER)) raise e log.info('Change config values for db-locks and dbcachesize to import large ldif files') if ds_is_older('1.3.6'): topo.standalone.stop(timeout=10) dse_ldif = DSEldif(topo.standalone) try: dse_ldif.replace(DN_CONFIG_LDBM, 'nsslapd-db-locks', '100000') dse_ldif.replace(DN_CONFIG_LDBM, 'nsslapd-dbcachesize', '10000000') except: log.error('Failed to replace cn=config values of db-locks and dbcachesize') raise topo.standalone.start(timeout=10) else: try: topo.standalone.modify_s(DN_CONFIG_LDBM, [(ldap.MOD_REPLACE, 'nsslapd-db-locks', '100000')]) topo.standalone.modify_s(DN_CONFIG_LDBM, [(ldap.MOD_REPLACE, 'nsslapd-cache-autosize', '0')]) topo.standalone.modify_s(DN_CONFIG_LDBM, [(ldap.MOD_REPLACE, 'nsslapd-dbcachesize', '10000000')]) except ldap.LDAPError as e: log.error( 'Failed to replace values of nsslapd-db-locks and nsslapd-dbcachesize {}'.format(e.message['desc'])) raise e topo.standalone.restart(timeout=10) def fin(): log.info('Disabling plugins {}, {}, {}'.format(PLUGIN_MEMBER_OF, PLUGIN_MANAGED_ENTRY, PLUGIN_AUTOMEMBER)) topo.standalone.simple_bind_s(DN_DM, PASSWORD) try: topo.standalone.plugins.disable(name=PLUGIN_MEMBER_OF) topo.standalone.plugins.disable(name=PLUGIN_MANAGED_ENTRY) topo.standalone.plugins.disable(name=PLUGIN_AUTOMEMBER) except ldap.LDAPError as e: log.error('Failed to disable plugins, {}'.format(e.message['desc'])) assert False topo.standalone.restart(timeout=10) request.addfinalizer(fin)
def __init__(self, instance, dn=None): super(PosixGroup, self).__init__(instance, dn) self._rdn_attribute = RDN # Can I generate these from schema? self._must_attributes = MUST_ATTRIBUTES self._create_objectclasses = [ 'top', 'groupOfNames', 'posixGroup', ] if not ds_is_older('1.3.7'): self._create_objectclasses.append('nsMemberOf') self._protected = False
def __init__(self, instance, dn='cn=changelog5,cn=config'): super(Changelog5, self).__init__(instance, dn) self._rdn_attribute = 'cn' self._must_attributes = ['cn', 'nsslapd-changelogdir'] self._create_objectclasses = [ 'top', 'nsChangelogConfig', ] if ds_is_older('1.4.0'): self._create_objectclasses = [ 'top', 'extensibleobject', ] self._protected = True
def __init__(self, instance, dn=None): super(ServiceAccount, self).__init__(instance, dn) self._rdn_attribute = RDN self._must_attributes = MUST_ATTRIBUTES self._create_objectclasses = [ 'top', 'netscapeServer', ] if ds_is_older('1.4.0'): # This is a HORRIBLE HACK for older versions that DON'T have # correct updated schema! # # I feel physically ill having wrtten this line of code. :( self._create_objectclasses.append('extensibleobject') else: self._create_objectclasses.append('nsMemberOf') self._create_objectclasses.append('nsAccount') self._protected = False
def __init__(self, instance, dn=None): if ds_is_older('1.4.0', instance=instance): raise Exception("Not supported") super(nsUserAccount, self).__init__(instance, dn) self._rdn_attribute = RDN self._must_attributes = nsUserAccount._must_attributes # Can I generate these from schema? self._create_objectclasses = [ 'top', 'nsPerson', 'nsAccount', 'nsOrgPerson', 'posixAccount', ] user_compare_exclude = [ 'nsUniqueId', 'modifyTimestamp', 'createTimestamp', 'entrydn' ] self._compare_exclude = self._compare_exclude + user_compare_exclude self._protected = False
def __init2(self): # Determine the key when really accessing the object with get_status # because config attrbute and connectio are not yet set in DirSrv # when __init is called db_lib = self._instance.get_db_lib() if db_lib == "bdb": self._backend_keys = [ 'readonly', 'entrycachehits', 'entrycachetries', 'entrycachehitratio', 'currententrycachesize', 'maxentrycachesize', 'currententrycachecount', 'maxentrycachecount', 'dncachehits', 'dncachetries', 'dncachehitratio', 'currentdncachesize', 'maxdncachesize', 'currentdncachecount', 'maxdncachecount', ] if ds_is_older("1.4.0"): self._backend_keys.extend([ 'normalizeddncachetries', 'normalizeddncachehits', 'normalizeddncachemisses', 'normalizeddncachehitratio', 'currentnormalizeddncachesize', 'maxnormalizeddncachesize', 'currentnormalizeddncachecount' ]) if db_lib == "mdb": self._backend_keys = [ 'readonly', 'entrycachehits', 'entrycachetries', 'entrycachehitratio', 'currententrycachesize', 'maxentrycachesize', 'currententrycachecount', 'maxentrycachecount', ]
import os import ldap from lib389.utils import ds_is_older from lib389._constants import * from lib389.plugins import AutoMembershipPlugin, AutoMembershipDefinition, AutoMembershipDefinitions, AutoMembershipRegexRule from lib389._mapped_object import DSLdapObjects, DSLdapObject from lib389 import agreement from lib389.idm.user import UserAccount, UserAccounts, TEST_USER_PROPERTIES from lib389.idm.group import Groups, Group from lib389.topologies import topology_st as topo from lib389._constants import DEFAULT_SUFFIX # Skip on older versions pytestmark = [ pytest.mark.tier1, pytest.mark.skipif(ds_is_older('1.3.7'), reason="Not implemented") ] DEBUGGING = os.getenv("DEBUGGING", default=False) if DEBUGGING: logging.getLogger(__name__).setLevel(logging.DEBUG) else: logging.getLogger(__name__).setLevel(logging.INFO) log = logging.getLogger(__name__) @pytest.fixture(scope="module") def automember_fixture(topo, request): groups = Groups(topo.standalone, DEFAULT_SUFFIX) group = groups.create(properties={'cn': 'testgroup'})
def create_test_user(topology_st, request): log.info('Create test user') users = nsUserAccounts(topology_st.standalone, DEFAULT_SUFFIX) test_user = users.create_test_user() def fin(): log.info('Delete test user') if test_user.exists(): test_user.delete() request.addfinalizer(fin) @pytest.mark.bz1862971 @pytest.mark.ds4281 @pytest.mark.skipif(ds_is_older("1.4.2"), reason="Not implemented") def test_dsidm_account_entry_status_with_lock(topology_st, create_test_user): """ Test dsidm account entry-status option with account lock/unlock :id: d911bbf2-3a65-42a4-ad76-df1114caa396 :setup: Standalone instance :steps: 1. Create user account 2. Run dsidm account entry status 3. Run dsidm account lock 4. Run dsidm account entry status 5. Run dsidm account unlock 6. Run dsidm account entry status :expectedresults: 1. Success 2. The state message should be Entry State: activated
from lib389.topologies import topology_st as topology from lib389.paths import Paths from lib389.utils import ds_is_older from lib389.plugins import RetroChangelogPlugin, ContentSyncPlugin from lib389._constants import * from . import ISyncRepl, syncstate_assert default_paths = Paths() pytestmark = pytest.mark.tier1 log = logging.getLogger(__name__) @pytest.mark.skipif( ds_is_older('1.4.4.0'), reason="Sync repl does not support openldap compat in older versions") def test_syncrepl_openldap(topology): """ Test basic functionality of the openldap syncrepl compatability handler. :id: 03039178-2cc6-40bd-b32c-7d6de108828b :setup: Standalone instance :steps: 1. Enable Retro Changelog 2. Enable Syncrepl 3. Run the syncstate test to check refresh, add, delete, mod. :expectedresults:
def test_memberof_with_repl(topo): """Test that we allowed to enable MemberOf plugin in dedicated consumer :id: 60c11636-55a1-4704-9e09-2c6bcc828de4 :setup: 1 Master - 1 Hub - 1 Consumer :steps: 1. Configure replication to EXCLUDE memberof 2. Enable memberof plugin 3. Create users/groups 4. Make user_0 member of group_0 5. Checks that user_0 is memberof group_0 on M,H,C 6. Make group_0 member of group_1 (nest group) 7. Checks that user_0 is memberof group_0 and group_1 on M,H,C 8. Check group_0 is memberof group_1 on M,H,C 9. Remove group_0 from group_1 10. Check group_0 and user_0 are NOT memberof group_1 on M,H,C 11. Remove user_0 from group_0 12. Check user_0 is not memberof group_0 and group_1 on M,H,C 13. Disable memberof on C 14. make user_0 member of group_1 15. Checks that user_0 is memberof group_0 on M,H but not on C 16. Enable memberof on C 17. Checks that user_0 is memberof group_0 on M,H but not on C 18. Run memberof fixup task 19. Checks that user_0 is memberof group_0 on M,H,C :expectedresults: 1. Configuration should be successful 2. Plugin should be enabled 3. Users and groups should be created 4. user_0 should be member of group_0 5. user_0 should be memberof group_0 on M,H,C 6. group_0 should be member of group_1 7. user_0 should be memberof group_0 and group_1 on M,H,C 8. group_0 should be memberof group_1 on M,H,C 9. group_0 from group_1 removal should be successful 10. group_0 and user_0 should not be memberof group_1 on M,H,C 11. user_0 from group_0 remove should be successful 12. user_0 should not be memberof group_0 and group_1 on M,H,C 13. memberof should be disabled on C 14. user_0 should be member of group_1 15. user_0 should be memberof group_0 on M,H and should not on C 16. Enable memberof on C should be successful 17. user_0 should be memberof group_0 on M,H should not on C 18. memberof fixup task should be successful 19. user_0 should be memberof group_0 on M,H,C """ M1 = topo.ms["master1"] H1 = topo.hs["hub1"] C1 = topo.cs["consumer1"] # Step 1 & 2 M1.config.enable_log('audit') config_memberof(M1) M1.restart() H1.config.enable_log('audit') config_memberof(H1) H1.restart() C1.config.enable_log('audit') config_memberof(C1) C1.restart() #Declare lists of users and groups test_users = [] test_groups = [] # Step 3 #In for loop create users and add them in the user list #it creates user_0 to user_9 (range is fun) for i in range(10): CN = '%s%d' % (USER_CN, i) users = UserAccounts(M1, SUFFIX) user_props = TEST_USER_PROPERTIES.copy() user_props.update({'uid': CN, 'cn': CN, 'sn': '_%s' % CN}) testuser = users.create(properties=user_props) time.sleep(2) test_users.append(testuser) #In for loop create groups and add them to the group list #it creates group_0 to group_2 (range is fun) for i in range(3): CN = '%s%d' % (GROUP_CN, i) groups = Groups(M1, SUFFIX) testgroup = groups.create(properties={'cn': CN}) time.sleep(2) test_groups.append(testgroup) # Step 4 #Now start testing by adding differnt user to differn group if not ds_is_older('1.3.7'): test_groups[0].remove('objectClass', 'nsMemberOf') member_dn = test_users[0].dn grp0_dn = test_groups[0].dn grp1_dn = test_groups[1].dn test_groups[0].add_member(member_dn) time.sleep(5) # Step 5 for i in [M1, H1, C1]: _find_memberof(i, member_dn, grp0_dn) # Step 6 test_groups[1].add_member(test_groups[0].dn) time.sleep(5) # Step 7 for i in [grp0_dn, grp1_dn]: for inst in [M1, H1, C1]: _find_memberof(inst, member_dn, i) # Step 8 for i in [M1, H1, C1]: _find_memberof(i, grp0_dn, grp1_dn) # Step 9 test_groups[1].remove_member(test_groups[0].dn) time.sleep(5) # Step 10 # For negative testcase, we are using assertionerror for inst in [M1, H1, C1]: for i in [grp0_dn, member_dn]: with pytest.raises(AssertionError): _find_memberof(inst, i, grp1_dn) # Step 11 test_groups[0].remove_member(member_dn) time.sleep(5) # Step 12 for inst in [M1, H1, C1]: for grp in [grp0_dn, grp1_dn]: with pytest.raises(AssertionError): _find_memberof(inst, member_dn, grp) # Step 13 C1.plugins.disable(name=PLUGIN_MEMBER_OF) C1.restart() # Step 14 test_groups[0].add_member(member_dn) time.sleep(5) # Step 15 for i in [M1, H1]: _find_memberof(i, member_dn, grp0_dn) with pytest.raises(AssertionError): _find_memberof(C1, member_dn, grp0_dn) # Step 16 memberof = MemberOfPlugin(C1) memberof.enable() C1.restart() # Step 17 for i in [M1, H1]: _find_memberof(i, member_dn, grp0_dn) with pytest.raises(AssertionError): _find_memberof(C1, member_dn, grp0_dn) # Step 18 memberof.fixup(SUFFIX) time.sleep(5) # Step 19 for i in [M1, H1, C1]: _find_memberof(i, member_dn, grp0_dn)
from lib389.idm.user import nsUserAccounts, UserAccounts from lib389.topologies import topology_st as topology from lib389.paths import Paths from lib389.utils import ds_is_older from lib389._constants import * default_paths = Paths() pytestmark = pytest.mark.tier1 USER_PASSWORD = "******" NEW_USER_PASSWORD = "******" @pytest.mark.skipif( ds_is_older('1.4.2.0'), reason="Default aci's in older versions do not support this functionality") def test_acl_default_allow_self_write_nsuser(topology): """ Testing nsusers can self write and self read. This it a sanity test so that our default entries have their aci's checked. :id: 4f0fb01a-36a6-430c-a2ee-ebeb036bd951 :setup: Standalone instance :steps: 1. Testing comparison of two different users. :expectedresults: 1. Should fail to compare
def tls_client_auth(topo_m2): """Enable TLS on both masters and reconfigure both agreements to use TLS Client auth """ m1 = topo_m2.ms['master1'] m2 = topo_m2.ms['master2'] if ds_is_older('1.4.0.6'): transport = 'SSL' else: transport = 'LDAPS' # Create the certmap before we restart for enable_tls cm_m1 = CertmapLegacy(m1) cm_m2 = CertmapLegacy(m2) # We need to configure the same maps for both .... certmaps = cm_m1.list() certmaps['default']['DNComps'] = None certmaps['default']['CmapLdapAttr'] = 'nsCertSubjectDN' cm_m1.set(certmaps) cm_m2.set(certmaps) [i.enable_tls() for i in topo_m2] # Create the replication dns services = ServiceAccounts(m1, DEFAULT_SUFFIX) repl_m1 = services.get('%s:%s' % (m1.host, m1.sslport)) repl_m1.set('nsCertSubjectDN', m1.get_server_tls_subject()) repl_m2 = services.get('%s:%s' % (m2.host, m2.sslport)) repl_m2.set('nsCertSubjectDN', m2.get_server_tls_subject()) # Check the replication is "done". repl = ReplicationManager(DEFAULT_SUFFIX) repl.wait_for_replication(m1, m2) # Now change the auth type replica_m1 = Replicas(m1).get(DEFAULT_SUFFIX) agmt_m1 = replica_m1.get_agreements().list()[0] agmt_m1.replace_many( ('nsDS5ReplicaBindMethod', 'SSLCLIENTAUTH'), ('nsDS5ReplicaTransportInfo', transport), ('nsDS5ReplicaPort', str(m2.sslport)), ) agmt_m1.remove_all('nsDS5ReplicaBindDN') replica_m2 = Replicas(m2).get(DEFAULT_SUFFIX) agmt_m2 = replica_m2.get_agreements().list()[0] agmt_m2.replace_many( ('nsDS5ReplicaBindMethod', 'SSLCLIENTAUTH'), ('nsDS5ReplicaTransportInfo', transport), ('nsDS5ReplicaPort', str(m1.sslport)), ) agmt_m2.remove_all('nsDS5ReplicaBindDN') repl.test_replication_topology(topo_m2) return topo_m2
def test_internal_log_level_516(topology_st, add_user_log_level_516, disable_access_log_buffering): """Tests client initiated operations when referential integrity plugin is enabled :id: bee1d681-763d-4fa5-aca2-569cf93f8b71 :setup: Standalone instance Configure access log level to - 512+4 Set nsslapd-plugin-logging to on :steps: 1. Configure access log level to 516 2. Set nsslapd-plugin-logging to on 3. Enable Referential Integrity and automember plugins 4. Restart the server 5. Add a test group 6. Add a test user and add it as member of the test group 7. Rename the test user 8. Delete the test user 9. Check the access logs for nested internal operation logs :expectedresults: 1. Operation should be successful 2. Operation should be successful 3. Operation should be successful 4. Operation should be successful 5. Operation should be successful 6. Operation should be successful 7. Operation should be successful 8. Operation should be successful 9. Access log should contain internal info about operations of the user """ topo = topology_st.standalone log.info('Restart the server to flush the logs') topo.restart() # These comments contain lines we are trying to find without regex (the op numbers are just examples) log.info("Check the access logs for ADD operation of the user") # op=10 ADD dn="uid=test_user_777,ou=branch1,dc=example,dc=com" assert not topo.ds_access_log.match( r'.*op=[0-9]+ ADD dn="uid=test_user_777,ou=branch1,dc=example,dc=com".*' ) # (Internal) op=10(1)(1) MOD dn="cn=group,ou=Groups,dc=example,dc=com" assert topo.ds_access_log.match( r'.*\(Internal\) op=[0-9]+\([0-9]+\)\([0-9]+\) ' r'MOD dn="cn=group,ou=Groups,dc=example,dc=com".*') # (Internal) op=10(1)(2) SRCH base="cn=group,ou=Groups,dc=example,dc=com" assert topo.ds_access_log.match( r'.*\(Internal\) op=[0-9]+\([0-9]+\)\([0-9]+\) ' r'SRCH base="cn=group,ou=Groups,dc=example,dc=com".*') # (Internal) op=10(1)(2) ENTRY dn="cn=group,ou=Groups,dc=example,dc=com" assert topo.ds_access_log.match( r'.*\(Internal\) op=[0-9]+\([0-9]+\)\([0-9]+\) ' r'ENTRY dn="cn=group,ou=Groups,dc=example,dc=com".*') # (Internal) op=10(1)(2) RESULT err=0 tag=48 nentries=1*') assert topo.ds_access_log.match( r'.*\(Internal\) op=[0-9]+\([0-9]+\)\([0-9]+\) RESULT err=0 tag=48 nentries=1*' ) # (Internal) op=10(1)(1) RESULT err=0 tag=48 assert topo.ds_access_log.match( r'.*\(Internal\) op=[0-9]+\([0-9]+\)\([0-9]+\) RESULT err=0 tag=48.*') # op=10 RESULT err=0 tag=105 assert not topo.ds_access_log.match(r'.*op=[0-9]+ RESULT err=0 tag=105.*') log.info("Check the access logs for MOD operation of the user") # op=12 MODRDN dn="uid=test_user_777,ou=branch1,dc=example,dc=com" ' # 'newrdn="uid=new_test_user_777" newsuperior="dc=example,dc=com" assert not topo.ds_access_log.match( r'.*op=[0-9]+ MODRDN dn="uid=test_user_777,ou=branch1,dc=example,dc=com" ' 'newrdn="uid=new_test_user_777" newsuperior="dc=example,dc=com".*') if ds_is_older(('1.4.3.9', '1.4.4.3')): # Internal) op=12(1)(1) SRCH base="uid=test_user_777, ou=branch1,dc=example,dc=com" assert topo.ds_access_log.match( r'.*\(Internal\) op=[0-9]+\([0-9]+\)\([0-9]+\) SRCH base="uid=test_user_777,' 'ou=branch1,dc=example,dc=com".*') # (Internal) op=12(1)(1) ENTRY dn="uid=test_user_777, ou=branch1,dc=example,dc=com" assert topo.ds_access_log.match( r'.*\(Internal\) op=[0-9]+\([0-9]+\)\([0-9]+\) ENTRY dn="uid=test_user_777,' 'ou=branch1,dc=example,dc=com".*') # (Internal) op=12(1)(1) RESULT err=0 tag=48 nentries=1 assert topo.ds_access_log.match( r'.*\(Internal\) op=[0-9]+\([0-9]+\)\([0-9]+\) RESULT err=0 tag=48 nentries=1.*' ) # op=12 RESULT err=0 tag=109 assert not topo.ds_access_log.match(r'.*op=[0-9]+ RESULT err=0 tag=109.*') log.info("Check the access logs for DEL operation of the user") # op=15 DEL dn="uid=new_test_user_777,dc=example,dc=com" assert not topo.ds_access_log.match( r'.*op=[0-9]+ DEL dn="uid=new_test_user_777,dc=example,dc=com".*') if ds_is_older(('1.4.3.9', '1.4.4.3')): # (Internal) op=15(1)(1) SRCH base="uid=new_test_user_777, dc=example,dc=com" assert topo.ds_access_log.match( r'.*\(Internal\) op=[0-9]+\([0-9]+\)\([0-9]+\) SRCH base="uid=new_test_user_777,' 'dc=example,dc=com".*') # (Internal) op=15(1)(1) ENTRY dn="uid=new_test_user_777, dc=example,dc=com" assert topo.ds_access_log.match( r'.*\(Internal\) op=[0-9]+\([0-9]+\)\([0-9]+\) ENTRY dn="uid=new_test_user_777,' 'dc=example,dc=com".*') # (Internal) op=15(1)(1) RESULT err=0 tag=48 nentries=1 assert topo.ds_access_log.match( r'.*\(Internal\) op=[0-9]+\([0-9]+\)\([0-9]+\) RESULT err=0 tag=48 nentries=1.*' ) # op=15 RESULT err=0 tag=107 assert not topo.ds_access_log.match(r'.*op=[0-9]+ RESULT err=0 tag=107.*') log.info("Check if the other internal operations have the correct format") # conn=Internal(0) op=0 assert topo.ds_access_log.match( r'.*conn=Internal\([0-9]+\) op=[0-9]+\([0-9]+\)\([0-9]+\).*')
topology_st.standalone.deleteAccessLogs() # Now generate some fresh logs add_users(topology_st.standalone, 10) search_users(topology_st.standalone) log.info('Restart the server to flush the logs') topology_st.standalone.restart(timeout=10) log.info('check access log that microseconds are not present') access_log_lines = topology_st.standalone.ds_access_log.readlines() assert len(access_log_lines) > 0 assert not topology_st.standalone.ds_access_log.match(r'^\[.+\d{9}.+\].+') @pytest.mark.xfail(ds_is_older('1.4.0'), reason="May fail on 1.3.x because of bug 1358706") @pytest.mark.bz1358706 @pytest.mark.ds49029 def test_internal_log_server_level_0(topology_st, clean_access_logs, disable_access_log_buffering): """Tests server-initiated internal operations :id: 798d06fe-92e8-4648-af66-21349c20638e :setup: Standalone instance :steps: 1. Set nsslapd-plugin-logging to on 2. Configure access log level to only 0 3. Check the access logs. :expectedresults: 1. Operation should be successful
def test_etime_order_of_magnitude(topology_st, clean_access_logs, remove_users, disable_access_log_buffering): """Test that the etime reported in the access log has a correct order of magnitude :id: e815cfa0-8136-4932-b50f-c3dfac34b0e6 :setup: Standalone instance :steps: 1. Unset log buffering for the access log 2. Delete potential existing access logs 3. Add users 4. Search users 5. Restart the server to flush the logs 6. Parse the access log looking for the SRCH operation log 7. From the SRCH string get the start time and op number of the operation 8. From the op num find the associated RESULT string in the access log 9. From the RESULT string get the end time and the etime for the operation 10. Calculate the ratio between the calculated elapsed time (end time - start time) and the logged etime :expectedresults: 1. access log buffering is off 2. Previously existing access logs are deleted 3. Users are successfully added 4. Search operation is successful 5. Server is restarted and logs are flushed 6. SRCH operation log string is catched 7. start time and op number are collected 8. RESULT string is catched from the access log 9. end time and etime are collected 10. ratio between calculated elapsed time and logged etime is less or equal to 1 """ DSLdapObject(topology_st.standalone, DEFAULT_SUFFIX) log.info('add_users') add_users(topology_st.standalone, 30) log.info('search users') search_users(topology_st.standalone) log.info('parse the access logs to get the SRCH string') # Here we are looking at the whole string logged for the search request with base ou=People,dc=example,dc=com search_str = str( topology_st.standalone.ds_access_log.match( r'.*SRCH base="ou=People,dc=example,dc=com.*'))[1:-1] assert len(search_str) > 0 # the search_str returned looks like : # [23/Apr/2020:06:06:14.360857624 -0400] conn=1 op=93 SRCH base="ou=People,dc=example,dc=com" scope=2 filter="(&(objectClass=account)(objectClass=posixaccount)(objectClass=inetOrgPerson)(objectClass=organizationalPerson))" attrs="distinguishedName" log.info('get the operation start time from the SRCH string') # Here we are getting the sec.nanosec part of the date, '14.360857624' in the example above start_time = (search_str.split()[0]).split(':')[3] log.info('get the OP number from the SRCH string') # Here we are getting the op number, 'op=93' in the above example op_num = search_str.split()[3] log.info('get the RESULT string matching the SRCH OP number') # Here we are looking at the RESULT string for the above search op, 'op=93' in this example result_str = str( topology_st.standalone.ds_access_log.match( r'.*{} RESULT*'.format(op_num)))[1:-1] assert len(result_str) > 0 # The result_str returned looks like : # For ds older than 1.4.3.8: [23/Apr/2020:06:06:14.366429900 -0400] conn=1 op=93 RESULT err=0 tag=101 nentries=30 etime=0.005723017 # For ds newer than 1.4.3.8: [21/Oct/2020:09:27:50.095209871 -0400] conn=1 op=96 RESULT err=0 tag=101 nentries=30 wtime=0.000412584 optime=0.005428971 etime=0.005836077 log.info('get the operation end time from the RESULT string') # Here we are getting the sec.nanosec part of the date, '14.366429900' in the above example end_time = (result_str.split()[0]).split(':')[3] log.info('get the logged etime for the operation from the RESULT string') # Here we are getting the etime value, '0.005723017' in the example above if ds_is_older('1.4.3.8'): etime = result_str.split()[8].split('=')[1][:-3] else: etime = result_str.split()[10].split('=')[1][:-3] log.info( 'Calculate the ratio between logged etime for the operation and elapsed time from its start time to its end time - should be around 1' ) etime_ratio = (Decimal(end_time) - Decimal(start_time)) // Decimal(etime) assert etime_ratio <= 1
import pytest import ldap from lib389._constants import DEFAULT_SUFFIX, INSTALL_LATEST_CONFIG from lib389.cli_conf.backend import backend_create from lib389.cli_idm.initialise import initialise from lib389.cli_idm.group import get, create, delete, members, add_member, remove_member from lib389.cli_idm.user import create as create_user from lib389.cli_base import LogCapture, FakeArgs from lib389.tests.cli import topology_be_latest as topology from lib389.utils import ds_is_older pytestmark = pytest.mark.skipif(ds_is_older('1.4.0'), reason="Not implemented") # Topology is pulled from __init__.py def test_group_tasks(topology): # First check that our test group isn't there: topology.logcap.flush() g_args = FakeArgs() g_args.selector = 'testgroup' with pytest.raises(ldap.NO_SUCH_OBJECT): get(topology.standalone, DEFAULT_SUFFIX, topology.logcap.log, g_args) # Create a group topology.logcap.flush() g_args.cn = 'testgroup' create(topology.standalone, DEFAULT_SUFFIX, topology.logcap.log, g_args)
# check the supported list is the same as our first check. standalone.log.info("Check that we have the original set of mechanisms") final_mechs = standalone.rootdse.supported_sasl() assert(set(final_mechs) == set(orig_mechs)) # Check it after a restart standalone.log.info("Check that we have the original set of mechanisms after a restart") standalone.restart() final_mechs = standalone.rootdse.supported_sasl() assert(set(final_mechs) == set(orig_mechs)) @pytest.mark.bz1816854 @pytest.mark.ds50869 @pytest.mark.xfail(ds_is_older('1.3.11', '1.4.3.6'), reason="May fail because of bz1816854") def test_config_set_few_mechs(topology_st): """Test that we can successfully set multiple values to nsslapd-allowed-sasl-mechanisms :id: d7c3c58b-4fbe-42ab-a8d4-9dd362916d5f :setup: Standalone instance :steps: 1. Set nsslapd-allowed-sasl-mechanisms to "PLAIN GSSAPI" 2. Verify nsslapd-allowed-sasl-mechanisms has the values :expectedresults: 1. Operation should be successful 2. Operation should be successful """ standalone = topology_st.standalone
# License: GPL (version 3 or any later version). # See LICENSE for details. # --- END COPYRIGHT BLOCK --- import sys import pytest from lib389 import DirSrv from lib389.cli_base import LogCapture from lib389.instance.setup import SetupDs from lib389.instance.remove import remove_ds_instance from lib389.instance.options import General2Base, Slapd2Base from lib389._constants import * from lib389.utils import ds_is_older pytestmark = [ pytest.mark.tier0, pytest.mark.skipif(ds_is_older('1.4.1.2'), reason="Needs a compatible systemd unit, see PR#50213") ] INSTANCE_PORT = 54321 INSTANCE_SECURE_PORT = 54322 INSTANCE_SERVERID = 'standalone' DEBUGGING = True MAJOR, MINOR, _, _, _ = sys.version_info class TopologyInstance(object): def __init__(self, standalone): # For these tests, we don't want to open the instance. # instance.open()
# 5 - ge search r6 = accounts.filter("(entryuuid>=%s)" % UUID_BETWEEN) assert (len(r6) == 1) # 6 - le 0 search r7 = accounts.filter("(entryuuid<=%s)" % UUID_MIN) assert (len(r7) == 0) # 7 - ge f search r8 = accounts.filter("(entryuuid>=%s)" % UUID_MAX) assert (len(r8) == 0) # 8 - export db task = be.export_ldif() task.wait() assert (task.is_complete() and task.get_exit_code() == 0) @pytest.mark.skipif(not default_paths.rust_enabled or ds_is_older('1.4.2.0'), reason="Entryuuid is not available in older versions") def test_entryuuid_indexed_import_and_search(topology): """ Test that an ldif of entries containing entryUUID's can be indexed and searched correctly. As https://tools.ietf.org/html/rfc4530 states, the MR's are equality and ordering, so we check these are correct. :id: c98ee6dc-a7ee-4bd4-974d-597ea966dad9 :setup: Standalone instance :steps: 1. Import the db from the ldif 2. EQ search for an entryuuid (match) 3. EQ search for an entryuuid that does not exist 4. LE search for an entryuuid lower (1 res)
""" import os import pytest from lib389.topologies import topology_st as topo from lib389.idm.user import UserAccounts, UserAccount from lib389._constants import DEFAULT_SUFFIX from lib389.config import Config from lib389.idm.group import Group from lib389.utils import ds_is_older import ldap import time pytestmark = pytest.mark.tier1 if ds_is_older('1.4'): DEFAULT_PASSWORD_STORAGE_SCHEME = 'SSHA512' else: DEFAULT_PASSWORD_STORAGE_SCHEME = 'PBKDF2_SHA256' def _create_user(topo, uid, cn, uidNumber, userpassword): """ Will Create user """ user = UserAccounts(topo.standalone, DEFAULT_SUFFIX).create( properties={ 'uid': uid, 'sn': cn.split(' ')[-1], 'cn': cn, 'givenname': cn.split(' ')[0],