def test_invalid_referint_log(topo): """If there is an invalid log line in the referint log, make sure the server does not crash at startup :id: 34807b5a-ab17-4281-ae48-4e3513e19145 :setup: Standalone Instance :steps: 1. Set the referint log delay 2. Create invalid log 3. Start the server (no crash) :expectedresults: 1. Success 2. Success 3. Success """ inst = topo.standalone # Set delay - required for log parsing at server startup plugin = ReferentialIntegrityPlugin(inst) plugin.enable() plugin.set_update_delay('2') logfile = plugin.get_log_file() inst.restart() # Create invalid log inst.stop() with open(logfile, 'w') as log_fh: log_fh.write("CRASH\n") # Start the instance inst.start() assert inst.status()
def _enable_plugins(inst, group_dn): # Enable automember amp = AutoMembershipPlugin(inst) amp.enable() # Create the automember definition automembers = AutoMembershipDefinitions(inst) automember = automembers.create( properties={ 'cn': 'testgroup_definition', 'autoMemberScope': DEFAULT_SUFFIX, 'autoMemberFilter': 'objectclass=nsAccount', 'autoMemberDefaultGroup': group_dn, 'autoMemberGroupingAttr': 'member:dn', }) # Enable MemberOf mop = MemberOfPlugin(inst) mop.enable() # Enable referint rip = ReferentialIntegrityPlugin(inst) # We only need to enable the plugin, the default configuration is sane and # correctly coveres member as an enforced attribute. rip.enable() # Restart to make sure it's enabled and good to go. inst.restart()
def test_healthcheck_RI_plugin_missing_indexes(topology_st): """Check if HealthCheck returns DSRILE0002 code :id: 05c55e37-bb3e-48d1-bbe8-29c980f94f10 :setup: Standalone instance :steps: 1. Create DS instance 2. Configure the instance with Integrity Plugin 3. Change the index type of the member attribute index to ‘approx’ 4. Use HealthCheck without --json option 5. Use HealthCheck with --json option 6. Set the index type of the member attribute index to ‘eq’ 7. Use HealthCheck without --json option 8. Use HealthCheck with --json option :expectedresults: 1. Success 2. Success 3. Success 4. Healthcheck reports DSRILE0002 code and related details 5. Healthcheck reports DSRILE0002 code and related details 6. Success 7. Healthcheck reports no issue found 8. Healthcheck reports no issue found """ RET_CODE = 'DSRILE0002' MEMBER_DN = 'cn=member,cn=index,cn=userroot,cn=ldbm database,cn=plugins,cn=config' standalone = topology_st.standalone log.info('Enable RI plugin') plugin = ReferentialIntegrityPlugin(standalone) plugin.disable() plugin.enable() log.info('Change the index type of the member attribute index to approx') index = Index(topology_st.standalone, MEMBER_DN) index.replace('nsIndexType', 'approx') run_healthcheck_and_flush_log(topology_st, standalone, json=False, searched_code=RET_CODE) run_healthcheck_and_flush_log(topology_st, standalone, json=True, searched_code=RET_CODE) log.info('Set the index type of the member attribute index back to eq') index.replace('nsIndexType', 'eq') run_healthcheck_and_flush_log(topology_st, standalone, json=False, searched_code=CMD_OUTPUT) run_healthcheck_and_flush_log(topology_st, standalone, json=True, searched_code=JSON_OUTPUT)
def test_healthcheck_RI_plugin_is_misconfigured(topology_st): """Check if HealthCheck returns DSRILE0001 code :id: de2e90a2-89fe-472c-acdb-e13cbca5178d :setup: Standalone instance :steps: 1. Create DS instance 2. Configure the instance with Integrity Plugin 3. Set the referint-update-delay attribute of the RI plugin, to a value upper than 0 4. Use HealthCheck without --json option 5. Use HealthCheck with --json option 6. Set the referint-update-delay attribute to 0 7. Use HealthCheck without --json option 8. Use HealthCheck with --json option :expectedresults: 1. Success 2. Success 3. Success 4. Healthcheck reports DSRILE0001 code and related details 5. Healthcheck reports DSRILE0001 code and related details 6. Success 7. Healthcheck reports no issue found 8. Healthcheck reports no issue found """ RET_CODE = 'DSRILE0001' standalone = topology_st.standalone plugin = ReferentialIntegrityPlugin(standalone) plugin.disable() plugin.enable() log.info('Set the referint-update-delay attribute to a value upper than 0') plugin.replace('referint-update-delay', '5') run_healthcheck_and_flush_log(topology_st, standalone, json=False, searched_code=RET_CODE) run_healthcheck_and_flush_log(topology_st, standalone, json=True, searched_code=RET_CODE) log.info('Set the referint-update-delay attribute back to 0') plugin.replace('referint-update-delay', '0') run_healthcheck_and_flush_log(topology_st, standalone, json=False, searched_code=CMD_OUTPUT) run_healthcheck_and_flush_log(topology_st, standalone, json=True, searched_code=JSON_OUTPUT)
def test_require_internal_index(topo): """Test nsslapd-ignore-virtual-attrs configuration attribute :id: 22b94f30-59e3-4f27-89a1-c4f4be036f7f :setup: Standalone instance :steps: 1. Set "nsslapd-require-internalop-index" to "on" 2. Enable RI plugin, and configure it to use an attribute that is not indexed 3. Create a user and add it a group 4. Deleting user should be rejected as the RI plugin issues an unindexed internal search :expectedresults: 1. Success 2. Success 3. Success 4. Success """ # Set the config be_insts = Backends(topo.standalone).list() for be in be_insts: if be.get_attr_val_utf8_l('nsslapd-suffix') == DEFAULT_SUFFIX: be.set('nsslapd-require-index', 'off') be.set('nsslapd-require-internalop-index', 'on') # Configure RI plugin rip = ReferentialIntegrityPlugin(topo.standalone) rip.set('referint-membership-attr', 'description') rip.enable() # Create a bunch of users db_cfg = DatabaseConfig(topo.standalone) db_cfg.set([('nsslapd-idlistscanlimit', '100')]) users = UserAccounts(topo.standalone, DEFAULT_SUFFIX) for i in range(102, 202): users.create_test_user(uid=i) # Create user and group user = users.create(properties={ 'uid': 'indexuser', 'cn' : 'indexuser', 'sn' : 'user', 'uidNumber' : '1010', 'gidNumber' : '2010', 'homeDirectory' : '/home/indexuser' }) groups = Groups(topo.standalone, DEFAULT_SUFFIX) group = groups.create(properties={'cn': 'group', 'member': user.dn}) # Restart the server topo.standalone.restart() # Deletion of user should be rejected with pytest.raises(ldap.UNWILLING_TO_PERFORM): user.delete()
def topology(request): topology = default_topology(request) plugin = ReferentialIntegrityPlugin(topology.standalone) if not plugin.exists(): plugin.create() # we need to restart the server after enabling the plugin plugin.enable() topology.standalone.restart() topology.logcap.flush() return topology
def test_hc_referint(topology_st): plugin = ReferentialIntegrityPlugin(topology_st.standalone) plugin.enable() # Assert we don't get an error when delay is 0. plugin.set('referint-update-delay', '0') result = plugin._lint_update_delay() assert result is None # Assert we get an error when delay is not 0. plugin.set('referint-update-delay', '10') result = plugin._lint_update_delay() assert result == DSRILE0001 # Assert we don't get an error when plugin is disabled. plugin.disable() result = plugin._lint_update_delay() assert result is None
def configureRI(inst): plugin = ReferentialIntegrityPlugin(inst) plugin.enable() plugin.replace('referint-membership-attr', 'uniquemember')
def apply(self, inst): rip = ReferentialIntegrityPlugin(inst) rip.set_update_delay(0) rip.enable()
def test_referential_false_failure(topo): """On MODRDN referential integrity can erroneously fail :id: f77aeb80-c4c4-471b-8c1b-4733b714778b :setup: Standalone Instance :steps: 1. Configure the plugin 2. Create a group - 1rst member the one that will be move - more than 128 members - last member is a DN containing escaped char 3. Rename the 1rst member :expectedresults: 1. should succeed 2. should succeed 3. should succeed """ inst = topo[0] # stop the plugin, and start it plugin = ReferentialIntegrityPlugin(inst) plugin.disable() plugin.enable() ############################################################################ # Configure plugin ############################################################################ GROUP_CONTAINER = "ou=groups,%s" % DEFAULT_SUFFIX plugin.replace('referint-membership-attr', 'member') plugin.replace('nsslapd-plugincontainerscope', GROUP_CONTAINER) ############################################################################ # Creates a group with members having escaped DN ############################################################################ # Add some users and a group users = UserAccounts(inst, DEFAULT_SUFFIX, None) user1 = users.create_test_user(uid=1001) user2 = users.create_test_user(uid=1002) groups = Groups(inst, GROUP_CONTAINER, None) group = groups.create(properties={'cn': 'group'}) group.add('member', user2.dn) group.add('member', user1.dn) # Add more than 128 members so that referint follows the buggy path for i in range(130): escaped_user = add_escaped_user(inst, i) group.add('member', escaped_user) ############################################################################ # Check that the MODRDN succeeds ########################################################################### # Here we need to restart so that member values are taken in the right order # the last value is the escaped one inst.restart() # Here if the bug is fixed, referential is able to update the member value user1.rename('uid=new_test_user_1001', newsuperior=DEFAULT_SUFFIX, deloldrdn=False)
def test_ri_and_mep_cache_corruption(topology_st): """Test RI plugin aborts change after MEP plugin fails. This is really testing the entry cache for corruption :id: 70d0b96e-b693-4bf7-bbf5-102a66ac5995 :setup: Standalone instance :steps: 1. Enable and configure mep and ri plugins 2. Add user and add it to a group 3. Disable MEP plugin and remove MEP group 4. Delete user 5. Check that user is still a member of the group :expectedresults: 1. Success 2. Success 3. Success 4. It fails with NO_SUCH_OBJECT 5. Success """ # Start plugins topology_st.standalone.config.set('nsslapd-dynamic-plugins', 'on') mep_plugin = ManagedEntriesPlugin(topology_st.standalone) mep_plugin.enable() ri_plugin = ReferentialIntegrityPlugin(topology_st.standalone) ri_plugin.enable() # Add our org units ous = OrganizationalUnits(topology_st.standalone, DEFAULT_SUFFIX) ou_people = ous.create(properties={'ou': 'managed_people'}) ou_groups = ous.create(properties={'ou': 'managed_groups'}) # Configure MEP mep_templates = MEPTemplates(topology_st.standalone, DEFAULT_SUFFIX) mep_template1 = mep_templates.create( properties={ 'cn': 'MEP template', 'mepRDNAttr': 'cn', 'mepStaticAttr': 'objectclass: posixGroup|objectclass: extensibleObject'.split('|'), 'mepMappedAttr': 'cn: $cn|uid: $cn|gidNumber: $uidNumber'.split('|') }) mep_configs = MEPConfigs(topology_st.standalone) mep_configs.create( properties={ 'cn': 'config', 'originScope': ou_people.dn, 'originFilter': 'objectclass=posixAccount', 'managedBase': ou_groups.dn, 'managedTemplate': mep_template1.dn }) # Add an entry that meets the MEP scope users = UserAccounts(topology_st.standalone, DEFAULT_SUFFIX, rdn='ou={}'.format(ou_people.rdn)) user = users.create( properties={ 'uid': 'test-user1', 'cn': 'test-user', 'sn': 'test-user', 'uidNumber': '10011', 'gidNumber': '20011', 'homeDirectory': '/home/test-user1' }) # Add group groups = Groups(topology_st.standalone, DEFAULT_SUFFIX) user_group = groups.ensure_state(properties={ 'cn': 'group', 'member': user.dn }) # Check if a managed group entry was created mep_group = Group(topology_st.standalone, dn='cn={},{}'.format(user.rdn, ou_groups.dn)) if not mep_group.exists(): log.fatal("MEP group was not created for the user") assert False # Test MEP be txn pre op failure does not corrupt entry cache # Should get the same exception for both rename attempts with pytest.raises(ldap.UNWILLING_TO_PERFORM): mep_group.rename("cn=modrdn group") with pytest.raises(ldap.UNWILLING_TO_PERFORM): mep_group.rename("cn=modrdn group") # Mess with MEP so it fails mep_plugin.disable() mep_group.delete() mep_plugin.enable() # Add another group to verify entry cache is not corrupted test_group = groups.create(properties={'cn': 'test_group'}) # Delete user, should fail in MEP be txn post op, and user should still be a member with pytest.raises(ldap.NO_SUCH_OBJECT): user.delete() # Verify membership is intact if not user_group.is_member(user.dn): log.fatal( "Member was incorrectly removed from the group!! Or so it seems") # Restart server and test again in case this was a cache issue topology_st.standalone.restart() if user_group.is_member(user.dn): log.info("The entry cache was corrupted") assert False assert False # Verify test group is still found in entry cache by deleting it test_group.delete() # Success log.info("Test PASSED")
def test_ri_and_mep_cache_corruption(topology_st): """Test RI plugin aborts change after MEP plugin fails. This is really testing the entry cache for corruption :id: 70d0b96e-b693-4bf7-bbf5-102a66ac5995 :setup: Standalone instance :steps: 1. Enable and configure mep and ri plugins 2. Add user and add it to a group 3. Disable MEP plugin and remove MEP group 4. Delete user 5. Check that user is still a member of the group :expectedresults: 1. Success 2. Success 3. Success 4. It fails with NO_SUCH_OBJECT 5. Success """ # Add ACI so we can test that non-DM user can't delete managed entry domain = Domain(topology_st.standalone, DEFAULT_SUFFIX) ACI_TARGET = f"(target = \"ldap:///{DEFAULT_SUFFIX}\")" ACI_TARGETATTR = "(targetattr = *)" ACI_ALLOW = "(version 3.0; acl \"Admin Access\"; allow (all) " ACI_SUBJECT = "(userdn = \"ldap:///anyone\");)" ACI_BODY = ACI_TARGET + ACI_TARGETATTR + ACI_ALLOW + ACI_SUBJECT domain.add('aci', ACI_BODY) # Start plugins topology_st.standalone.config.set('nsslapd-dynamic-plugins', 'on') mep_plugin = ManagedEntriesPlugin(topology_st.standalone) mep_plugin.enable() ri_plugin = ReferentialIntegrityPlugin(topology_st.standalone) ri_plugin.enable() # Add our org units ous = OrganizationalUnits(topology_st.standalone, DEFAULT_SUFFIX) ou_people = ous.create(properties={'ou': 'managed_people'}) ou_groups = ous.create(properties={'ou': 'managed_groups'}) # Configure MEP mep_templates = MEPTemplates(topology_st.standalone, DEFAULT_SUFFIX) mep_template1 = mep_templates.create(properties={ 'cn': 'MEP template', 'mepRDNAttr': 'cn', 'mepStaticAttr': 'objectclass: groupOfNames|objectclass: extensibleObject'.split('|'), 'mepMappedAttr': 'cn: $cn|uid: $cn|gidNumber: $uidNumber'.split('|') }) mep_configs = MEPConfigs(topology_st.standalone) mep_configs.create(properties={'cn': 'config', 'originScope': ou_people.dn, 'originFilter': 'objectclass=posixAccount', 'managedBase': ou_groups.dn, 'managedTemplate': mep_template1.dn}) # Add an entry that meets the MEP scope users = UserAccounts(topology_st.standalone, DEFAULT_SUFFIX, rdn='ou={}'.format(ou_people.rdn)) user = users.create(properties={ 'uid': 'test-user1', 'cn': 'test-user', 'sn': 'test-user', 'uidNumber': '10011', 'gidNumber': '20011', 'homeDirectory': '/home/test-user1' }) user.reset_password(USER_PASSWORD) user_bound_conn = user.bind(USER_PASSWORD) # Add group groups = Groups(topology_st.standalone, DEFAULT_SUFFIX) user_group = groups.ensure_state(properties={'cn': 'group', 'member': user.dn}) # Check if a managed group entry was created mep_group = Group(topology_st.standalone, dn='cn={},{}'.format(user.rdn, ou_groups.dn)) if not mep_group.exists(): log.fatal("MEP group was not created for the user") assert False # Test MEP be txn pre op failure does not corrupt entry cache # Should get the same exception for both rename attempts # Try to remove the entry while bound as Admin (non-DM) managed_groups_user_conn = Groups(user_bound_conn, ou_groups.dn, rdn=None) managed_entry_user_conn = managed_groups_user_conn.get(user.rdn) with pytest.raises(ldap.UNWILLING_TO_PERFORM): managed_entry_user_conn.rename("cn=modrdn group") with pytest.raises(ldap.UNWILLING_TO_PERFORM): managed_entry_user_conn.rename("cn=modrdn group") # Mess with MEP so it fails mep_plugin.disable() users_mep_group = UserAccounts(topology_st.standalone, mep_group.dn, rdn=None) users_mep_group.create_test_user(1001) mep_plugin.enable() # Add another group to verify entry cache is not corrupted test_group = groups.create(properties={'cn': 'test_group'}) # Try to delete user - it fails because managed entry can't be deleted with pytest.raises(ldap.NOT_ALLOWED_ON_NONLEAF): user.delete() # Verify membership is intact if not user_group.is_member(user.dn): log.fatal("Member was incorrectly removed from the group!! Or so it seems") # Restart server and test again in case this was a cache issue topology_st.standalone.restart() if user_group.is_member(user.dn): log.info("The entry cache was corrupted") assert False assert False # Verify test group is still found in entry cache by deleting it test_group.delete() # Success log.info("Test PASSED")