def _find_memberof(server, member_dn, group_dn): #To get the specific server's (M1, C1 and H1) user and group user = UserAccount(server, member_dn) assert user.exists() group = Group(server, group_dn) assert group.exists() #test that the user entry should have memberof attribute with sepecified group dn value assert group._dn in user.get_attr_vals_utf8('memberOf')
def _add_user(request, topo): ou = OrganizationalUnit(topo.standalone, 'ou=Product Development,{}'.format(DEFAULT_SUFFIX)) ou.create(properties={'ou': 'Product Development'}) ou = OrganizationalUnit(topo.standalone, 'ou=Accounting,{}'.format(DEFAULT_SUFFIX)) ou.create(properties={'ou': 'Accounting'}) groups = Group(topo.standalone, DYNAMIC_MODRDN) group_properties = { "cn": "Test DYNAMIC_MODRDN Group 70", "objectclass": ["top", 'groupofURLs'], 'memberURL': 'ldap:///{}??base?(cn=*)'.format(USER_WITH_ACI_DELADD) } groups.create(properties=group_properties) properties = { 'uid': 'Jeff Vedder', 'cn': 'Jeff Vedder', 'sn': 'user', 'uidNumber': '1000', 'gidNumber': '2000', 'homeDirectory': '/home/' + 'JeffVedder', 'userPassword': PW_DM } user = UserAccount( topo.standalone, 'cn=Jeff Vedder,ou=Product Development,{}'.format(DEFAULT_SUFFIX)) user.create(properties=properties) properties = { 'uid': 'Sam Carter', 'cn': 'Sam Carter', 'sn': 'user', 'uidNumber': '1000', 'gidNumber': '2000', 'homeDirectory': '/home/' + 'SamCarter', 'userPassword': PW_DM } user = UserAccount(topo.standalone, 'cn=Sam Carter,ou=Accounting,{}'.format(DEFAULT_SUFFIX)) user.create(properties=properties) def fin(): for DN in [ USER_DELADD, USER_WITH_ACI_DELADD, DYNAMIC_MODRDN, CONTAINER_2_DELADD, CONTAINER_1_DELADD ]: UserAccount(topo.standalone, DN).delete() request.addfinalizer(fin)
def create_test_group(instance, cn=None, suffix=None, unique_group=False): """ Creates a new group for testing. It tries to create a group that doesn't already exist by using a different ID each time. However, if it is provided with an existing cn/suffix it will fail to create a new group and it will raise an LDAP error. Returns a Group object. """ global test_group_id if cn is None: cn = "testgroup_" + str(test_group_id) test_group_id += 1 if suffix is None: suffix = "ou=Groups," + DEFAULT_SUFFIX dn = "cn=" + cn + "," + suffix properties = { 'cn': cn, 'ou': 'groups', } if unique_group: group = UniqueGroup(instance, dn) else: group = Group(instance, dn) group.create(properties=properties) return group
def test_sin_invalid_no_basedn(topology_st): """Test that with insufficent data, create fails. :id: a710b81c-cb74-4632-97b3-bdbcccd40954 :setup: standalone instance :steps: 1. Create with no basedn (no rdn derivation will work) :expectedresults: 1. Create fails """ gp = Group(topology_st.standalone) # No basedn, so we can't derive the full dn from this. with pytest.raises(ldap.UNWILLING_TO_PERFORM): gp.create(properties={ 'cn': ['test_sin_invalid_no_basedn'], })
def test_adding_1000_users(topo_m4, _create_entries): """ Adding 1000 users matching inclusive regex for Managers/Contractors and exclusive regex for Interns/Visitors :id: f641e612-be57-11e9-94e6-8c16451d917b :setup: Instance with 4 suppliers :steps: 1. Add 1000 user entries matching the inclusive/exclusive regex rules at topo_m4.ms['supplier1'] 2. Check the same created in rest suppliers :expected results: 1. Pass 2. Pass """ automem_scope = "ou=Employees,{}".format(DEFAULT_SUFFIX) grp_container = "cn=replsubGroups,{}".format(DEFAULT_SUFFIX) default_group1 = "cn=SubDef1,{}".format(DEFAULT_SUFFIX) default_group2 = "cn=SubDef2,{}".format(DEFAULT_SUFFIX) # Adding 1000 users for number in range(1000): create_entry(topo_m4, f'automemusrs{number}', automem_scope, '799', '5693', 'Manager') try: # Check to sync the entries for supplier in [ topo_m4.ms['supplier2'], topo_m4.ms['supplier3'], topo_m4.ms['supplier4'] ]: ReplicationManager(DEFAULT_SUFFIX).wait_for_replication( topo_m4.ms['supplier1'], supplier, timeout=30000) for instance, grp in [(topo_m4.ms['supplier1'], 'Managers'), (topo_m4.ms['supplier3'], 'Contractors')]: assert len( nsAdminGroup(instance, "cn={},{}".format( grp, grp_container)).get_attr_vals_utf8('member')) == 1000 for instance, grp in [(topo_m4.ms['supplier2'], 'Interns'), (topo_m4.ms['supplier4'], 'Visitors')]: assert not nsAdminGroup(instance, "cn={},{}".format( grp, grp_container)).get_attr_vals_utf8('member') for grp in [default_group1, default_group2]: assert not Group(topo_m4.ms['supplier2'], grp).get_attr_vals_utf8('member') assert not Group(topo_m4.ms['supplier3'], grp).get_attr_vals_utf8('member') finally: delete_users_and_wait(topo_m4, automem_scope)
def test_adding_300_user(topo_m4, _create_entries): """ Adding 300 user entries matching the inclusive regex rules for all targetted groups at M1 and checking the same created in M2 & M3 :id: fcd867bc-be57-11e9-9842-8c16451d917b :setup: Instance with 4 suppliers :steps: 1. Add 300 user entries matching the inclusive regex rules at topo_m4.ms['supplier1'] 2. Check the same created in rest suppliers :expected results: 1. Pass 2. Pass """ user_rdn = "long01usr" automem_scope = "ou=Employees,{}".format(DEFAULT_SUFFIX) grp_container = "cn=replsubGroups,{}".format(DEFAULT_SUFFIX) default_group1 = "cn=SubDef1,{}".format(DEFAULT_SUFFIX) default_group2 = "cn=SubDef2,{}".format(DEFAULT_SUFFIX) # Adding BulkUsers for number in range(300): create_entry(topo_m4, f'{user_rdn}{number}', automem_scope, '5795', '5693', 'Contractor') try: # Check to sync the entries for supplier in [ topo_m4.ms['supplier2'], topo_m4.ms['supplier3'], topo_m4.ms['supplier4'] ]: ReplicationManager(DEFAULT_SUFFIX).wait_for_replication( topo_m4.ms['supplier1'], supplier, timeout=30000) for instance, grp in [(topo_m4.ms['supplier2'], 'Managers'), (topo_m4.ms['supplier3'], 'Contractors'), (topo_m4.ms['supplier4'], 'Interns')]: assert len( nsAdminGroup(instance, f'cn={grp},{grp_container}').get_attr_vals_utf8( 'member')) == 300 for grp in [default_group1, default_group2]: assert not Group(topo_m4.ms['supplier4'], grp).get_attr_vals_utf8('member') assert not Group(topo_m4.ms['supplier3'], grp).get_attr_vals_utf8('member') finally: delete_users_and_wait(topo_m4, automem_scope)
def test_sin_invalid_no_rdn(topology_st): """Test that with no cn, rdn derivation fails. :id: c3bb28f8-db59-4d8a-8920-169879ef702b :setup: standalone instance :steps: 1. Create with no cn :expectedresults: 1. Create fails """ gp = Group(topology_st.standalone) with pytest.raises(ldap.UNWILLING_TO_PERFORM): # Note lack of rdn derivable type (cn) AND no rdn gp.create(basedn=f'ou=groups,{DEFAULT_SUFFIX}', properties={ 'member': ['test_sin_explicit_dn'], })
def test_memberurl_needs_to_be_normalized(topo, clean, aci_of_user): """Non-regression test for BUG 326000: MemberURL needs to be normalized :id: a5d172e6-7db8-11e8-aca7-8c16451d917b :setup: Standalone Instance :steps: 1. Add test entry 2. Add ACI 3. User should follow ACI role :expectedresults: 1. Entry should be added 2. Operation should succeed 3. Operation should succeed """ ou_ou = OrganizationalUnit(topo.standalone, "ou=PEOPLE,{}".format(DEFAULT_SUFFIX)) ou_ou.set( 'aci', '(targetattr= *)' '(version 3.0; acl "tester"; allow(all) ' 'groupdn = "ldap:///cn =DYNGROUP,ou=PEOPLE, {}";)'.format( DEFAULT_SUFFIX)) groups = Groups(topo.standalone, DEFAULT_SUFFIX, rdn='ou=PEOPLE') groups.create( properties={ "cn": "DYNGROUP", "description": "DYNGROUP", 'objectClass': 'groupOfURLS', 'memberURL': "ldap:///ou=PEOPLE,{}??sub?" "(uid=test_user_2)".format(DEFAULT_SUFFIX) }) uas = UserAccounts(topo.standalone, DEFAULT_SUFFIX) for demo1 in [(1, "Entry to test rights on."), (2, "Member of DYNGROUP")]: user = uas.create_test_user(uid=demo1[0], gid=demo1[0]) user.replace_many(('description', demo1[1]), ('userPassword', PW_DM)) ##with normal aci conn = UserAccount(topo.standalone, uas.list()[1].dn).bind(PW_DM) harry = UserAccount(conn, uas.list()[1].dn) harry.add('sn', 'FRED') ##with abnomal aci dygrp = Group(topo.standalone, DYNGROUP) dygrp.remove( 'memberurl', "ldap:///ou=PEOPLE,{}??sub?(uid=test_user_2)".format(DEFAULT_SUFFIX)) dygrp.add( 'memberurl', "ldap:///ou=PEOPLE,{}??sub?(uid=tesT_UsEr_2)".format(DEFAULT_SUFFIX)) harry.add('sn', 'Not FRED') for i in uas.list(): i.delete()
def test_exists(topology_st): """ Assert that exists method returns True when entry exists, else False. """ group = Group(topology_st.standalone, dn="cn=MyTestGroup,ou=Groups," + DEFAULT_SUFFIX) assert not group.exists() group.create(properties={'cn': 'MyTestGroup', 'ou': 'groups'}) assert group.exists()
def test_sin_explicit_dn(topology_st): """Test explicit dn with create :id: 2d812225-243b-4f87-85ad-d403a4ae0267 :setup: standalone instance :steps: 1. Create with explicit dn :expectedresults: 1. Create success """ expect_dn = f'cn=test_sin_explicit_dn,ou=groups,{DEFAULT_SUFFIX}' gp = Group(topology_st.standalone, dn=expect_dn) gp.create(properties={ 'cn': ['test_sin_explicit_dn'], }) assert gp.dn.lower() == expect_dn.lower() gp.delete()
def test_sin_derive_single_dn(topology_st): """Derive the dn from a single cn :id: d7597016-214c-4fbd-8b48-71eb16ea9ede :setup: standalone instance :steps: 1. Create with a single cn (no dn, no rdn) :expectedresults: 1. Create success """ gp = Group(topology_st.standalone) gp.create(basedn=f'ou=groups,{DEFAULT_SUFFIX}', properties={ 'cn': ['test_sin_explicit_dn'], }) assert gp.dn.lower( ) == f'cn=test_sin_explicit_dn,ou=groups,{DEFAULT_SUFFIX}'.lower() gp.delete()
def test_sin_explicit_rdn(topology_st): """Test explicit rdn with create. :id: a2c14e50-8086-4edb-9088-3f4a8e875c3a :setup: standalone instance :steps: 1. Create with explicit rdn :expectedresults: 1. Create success """ gp = Group(topology_st.standalone) gp.create(rdn='cn=test_sin_explicit_rdn', basedn=f'ou=groups,{DEFAULT_SUFFIX}', properties={ 'cn': ['test_sin_explicit_rdn'], }) assert gp.dn.lower( ) == f'cn=test_sin_explicit_rdn,ou=groups,{DEFAULT_SUFFIX}'.lower() gp.delete()
def test_sin_derive_mult_dn(topology_st): """Derive the dn from multiple cn :id: 0a1a7132-a08f-4b56-ae52-30c8ca59cfaf :setup: standalone instance :steps: 1. Create with multiple cn :expectedresults: 1. Create success """ gp = Group(topology_st.standalone) gp.create(basedn=f'ou=groups,{DEFAULT_SUFFIX}', properties={ 'cn': ['test_sin_derive_mult_dn', 'other_test_sin_derive_mult_dn'], }) assert gp.dn.lower( ) == f'cn=test_sin_derive_mult_dn,ou=groups,{DEFAULT_SUFFIX}'.lower() gp.delete()
def test_fetch_bindDnGroup(topo_m2): """Check the bindDNGroup is fetched on first replication session :id: 5f1b1f59-6744-4260-b091-c82d22130025 :setup: 2 Master Instances :steps: 1. Create a replication bound user and group, but the user *not* member of the group 2. Check that replication is working 3. Some preparation is required because of lib389 magic that already define a replication via group - define the group as groupDN for replication and 60sec as fetch interval - pause RA in both direction - Define the user as bindDn of the RAs 4. restart servers. It sets the fetch time to 0, so next session will refetch the group 5. Before resuming RA, add user to groupDN (on both side as replication is not working at that time) 6. trigger an update and check replication is working and there is no failure logged on supplier side 'does not have permission to supply replication updates to the replica' :expectedresults: 1. Success 2. Success 3. Success 4. Success 5. Success 6. Success """ # If you need any test suite initialization, # please, write additional fixture for that (including finalizer). # Topology for suites are predefined in lib389/topologies.py. # If you need host, port or any other data about instance, # Please, use the instance object attributes for that (for example, topo.ms["master1"].serverid) M1 = topo_m2.ms['master1'] M2 = topo_m2.ms['master2'] # Enable replication log level. Not really necessary M1.modify_s('cn=config', [(ldap.MOD_REPLACE, 'nsslapd-errorlog-level', b'8192')]) M2.modify_s('cn=config', [(ldap.MOD_REPLACE, 'nsslapd-errorlog-level', b'8192')]) # Create a group and a user PEOPLE = "ou=People,%s" % SUFFIX PASSWD = 'password' REPL_MGR_BOUND_DN = 'repl_mgr_bound_dn' uid = REPL_MGR_BOUND_DN.encode() users = UserAccounts(M1, PEOPLE, rdn=None) user_props = TEST_USER_PROPERTIES.copy() user_props.update({ 'uid': uid, 'cn': uid, 'sn': '_%s' % uid, 'userpassword': PASSWD.encode(), 'description': b'value creation' }) create_user = users.create(properties=user_props) groups_M1 = Groups(M1, DEFAULT_SUFFIX) group_properties = {'cn': 'group1', 'description': 'testgroup'} group_M1 = groups_M1.create(properties=group_properties) group_M2 = Group(M2, group_M1.dn) assert (not group_M1.is_member(create_user.dn)) # Check that M1 and M2 are in sync repl = ReplicationManager(DEFAULT_SUFFIX) repl.wait_for_replication(M1, M2, timeout=20) # Define the group as the replication manager and fetch interval as 60sec replicas = Replicas(M1) replica = replicas.list()[0] replica.apply_mods([ (ldap.MOD_REPLACE, 'nsDS5ReplicaBindDnGroupCheckInterval', '60'), (ldap.MOD_REPLACE, 'nsDS5ReplicaBindDnGroup', group_M1.dn) ]) replicas = Replicas(M2) replica = replicas.list()[0] replica.apply_mods([ (ldap.MOD_REPLACE, 'nsDS5ReplicaBindDnGroupCheckInterval', '60'), (ldap.MOD_REPLACE, 'nsDS5ReplicaBindDnGroup', group_M1.dn) ]) # Then pause the replication agreement to prevent them trying to acquire # while the user is not member of the group topo_m2.pause_all_replicas() # Define the user as the bindDN of the RAs for inst in (M1, M2): agmts = Agreements(inst) agmt = agmts.list()[0] agmt.replace('nsDS5ReplicaBindDN', create_user.dn.encode()) agmt.replace('nsds5ReplicaCredentials', PASSWD.encode()) # Key step # The restart will fetch the group/members define in the replica # # The user NOT member of the group replication will not work until bindDNcheckInterval # # With the fix, the first fetch is not taken into account (fetch time=0) # so on the first session, the group will be fetched M1.restart() M2.restart() # Replication being broken here we need to directly do the same update. # Sorry not found another solution except total update group_M1.add_member(create_user.dn) group_M2.add_member(create_user.dn) topo_m2.resume_all_replicas() # trigger updates to be sure to have a replication session, giving some time M1.modify_s(create_user.dn, [(ldap.MOD_ADD, 'description', b'value_1_1')]) M2.modify_s(create_user.dn, [(ldap.MOD_ADD, 'description', b'value_2_2')]) time.sleep(10) # Check replication is working ents = M1.search_s(create_user.dn, ldap.SCOPE_BASE, '(objectclass=*)') for ent in ents: assert (ent.hasAttr('description')) found = 0 for val in ent.getValues('description'): if (val == b'value_1_1'): found = found + 1 elif (val == b'value_2_2'): found = found + 1 assert (found == 2) ents = M2.search_s(create_user.dn, ldap.SCOPE_BASE, '(objectclass=*)') for ent in ents: assert (ent.hasAttr('description')) found = 0 for val in ent.getValues('description'): if (val == b'value_1_1'): found = found + 1 elif (val == b'value_2_2'): found = found + 1 assert (found == 2) # Check in the logs that the member was detected in the group although # at startup it was not member of the group regex = re.compile( "does not have permission to supply replication updates to the replica." ) errorlog_M1 = open(M1.errlog, "r") errorlog_M2 = open(M1.errlog, "r") # Find the last restart position restart_location_M1 = find_start_location(errorlog_M1, 2) assert (restart_location_M1 != -1) restart_location_M2 = find_start_location(errorlog_M2, 2) assert (restart_location_M2 != -1) # Then check there is no failure to authenticate count = pattern_errorlog(errorlog_M1, regex, start_location=restart_location_M1) assert (count <= 1) count = pattern_errorlog(errorlog_M2, regex, start_location=restart_location_M2) assert (count <= 1) if DEBUGGING: # Add debugging steps(if any)... pass
def test_automemscope_and_running_modrdn(topo_m4, _create_entries): """ Adding bulk users to non-automem_scope and running modrdn operation with new superior to automem_scope :id: bf60f958-be57-11e9-945d-8c16451d917b :setup: Instance with 4 suppliers :steps: 1. Running modrdn operation to change the ou to automem_scope 2. Add 3000 user entries to non-automem_scope at topo_m4.ms['supplier1'] 3. Run AutomemberRebuildMembershipTask 4. Check the same created in rest suppliers :expected results: 1. Pass 2. Pass 3. Pass 4. Pass """ user_rdn = "long09usr" automem_scope1 = "ou=Employees,{}".format(DEFAULT_SUFFIX) automem_scope2 = "cn=NewEmployees,{}".format(DEFAULT_SUFFIX) grp_container = "cn=replsubGroups,{}".format(DEFAULT_SUFFIX) default_group1 = "cn=SubDef3,{}".format(DEFAULT_SUFFIX) default_group2 = "cn=SubDef5,{}".format(DEFAULT_SUFFIX) OrganizationalUnits( topo_m4.ms['supplier1'], DEFAULT_SUFFIX).create(properties={'ou': 'NewEmployees'}) Group(topo_m4.ms['supplier1'], f'cn=replsubGroups,cn=autoMembersPlugin,{DEFAULT_SUFFIX}').replace( 'autoMemberScope', automem_scope2) for instance in [ topo_m4.ms['supplier1'], topo_m4.ms['supplier2'], topo_m4.ms['supplier3'], topo_m4.ms['supplier4'] ]: Config(instance).replace('nsslapd-errorlog-level', '73728') instance.restart() # Adding bulk users for number in range(3000): create_entry(topo_m4, f'automemusrs{number}', automem_scope1, '3994', '5695', 'OnDeputation') try: for supplier in [ topo_m4.ms['supplier2'], topo_m4.ms['supplier3'], topo_m4.ms['supplier4'] ]: ReplicationManager(DEFAULT_SUFFIX).wait_for_replication( topo_m4.ms['supplier1'], supplier, timeout=30000) for grp, instance in [(default_group2, topo_m4.ms['supplier3']), ("cn=Managers,{}".format(grp_container), topo_m4.ms['supplier1']), ("cn=Contractors,{}".format(grp_container), topo_m4.ms['supplier3'])]: assert not nsAdminGroup(instance, grp).get_attr_vals_utf8('member') count = 0 for user in nsAdminGroups(topo_m4.ms['supplier3'], automem_scope1, rdn=None).list(): topo_m4.ms['supplier1'].rename_s(user.dn, f'cn=New{user_rdn}{count}', newsuperior=automem_scope2, delold=1) count += 1 for supplier in [ topo_m4.ms['supplier2'], topo_m4.ms['supplier3'], topo_m4.ms['supplier4'] ]: ReplicationManager(DEFAULT_SUFFIX).wait_for_replication( topo_m4.ms['supplier1'], supplier, timeout=30000) AutomemberRebuildMembershipTask(topo_m4.ms['supplier1']).create( properties={ 'basedn': automem_scope2, 'filter': "objectClass=posixAccount" }) for supplier in [ topo_m4.ms['supplier2'], topo_m4.ms['supplier3'], topo_m4.ms['supplier4'] ]: ReplicationManager(DEFAULT_SUFFIX).wait_for_replication( topo_m4.ms['supplier1'], supplier, timeout=30000) for instance, grp in [(topo_m4.ms['supplier3'], default_group2), (topo_m4.ms['supplier3'], default_group1)]: assert len( nsAdminGroup(instance, grp).get_attr_vals_utf8('member')) == 3000 for instance, grp in [(topo_m4.ms['supplier1'], 'Managers'), (topo_m4.ms['supplier3'], 'Contractors'), (topo_m4.ms['supplier2'], 'Interns'), (topo_m4.ms['supplier4'], 'Visitors')]: assert not nsAdminGroup(instance, "cn={},{}".format( grp, grp_container)).get_attr_vals_utf8('member') finally: for scope in [automem_scope1, automem_scope2]: delete_users_and_wait(topo_m4, scope)
def test_bulk_users_to_non_automemscope(topo_m4, _create_entries): """ Adding bulk users to non-automem_scope and then running modrdn operation to change the ou to automem_scope :id: c532dc0c-be57-11e9-bcca-8c16451d917b :setup: Instance with 4 suppliers :steps: 1. Running modrdn operation to change the ou to automem_scope 2. Add 3000 user entries to non-automem_scope at topo_m4.ms['supplier1'] 3. Run AutomemberRebuildMembershipTask 4. Check the same created in rest suppliers :expected results: 1. Pass 2. Pass 3. Pass 4. Pass """ automem_scope = "cn=EmployeesNew,{}".format(DEFAULT_SUFFIX) grp_container = "cn=replsubGroups,{}".format(DEFAULT_SUFFIX) default_group1 = "cn=SubDef3,{}".format(DEFAULT_SUFFIX) default_group2 = "cn=SubDef5,{}".format(DEFAULT_SUFFIX) nsContainers(topo_m4.ms['supplier1'], DEFAULT_SUFFIX).create(properties={'cn': 'ChangeThisCN'}) Group(topo_m4.ms['supplier1'], f'cn=replsubGroups,cn=autoMembersPlugin,{DEFAULT_SUFFIX}').replace( 'autoMemberScope', automem_scope) for instance in [ topo_m4.ms['supplier1'], topo_m4.ms['supplier2'], topo_m4.ms['supplier3'], topo_m4.ms['supplier4'] ]: instance.restart() # Adding BulkUsers for number in range(3000): create_entry(topo_m4, f'automemusrs{number}', f'cn=ChangeThisCN,{DEFAULT_SUFFIX}', '5995', '5693', 'Supervisor') try: for supplier in [ topo_m4.ms['supplier2'], topo_m4.ms['supplier3'], topo_m4.ms['supplier4'] ]: ReplicationManager(DEFAULT_SUFFIX).wait_for_replication( topo_m4.ms['supplier1'], supplier, timeout=30000) for instance, grp in [(topo_m4.ms['supplier2'], default_group1), (topo_m4.ms['supplier1'], "cn=Managers,{}".format(grp_container))]: assert not nsAdminGroup(instance, grp).get_attr_vals_utf8('member') # Deleting BulkUsers "User_Name" Suffix "Nof_Users" topo_m4.ms['supplier3'].rename_s(f"CN=ChangeThisCN,{DEFAULT_SUFFIX}", f'cn=EmployeesNew', newsuperior=DEFAULT_SUFFIX, delold=1) for supplier in [ topo_m4.ms['supplier2'], topo_m4.ms['supplier3'], topo_m4.ms['supplier4'] ]: ReplicationManager(DEFAULT_SUFFIX).wait_for_replication( topo_m4.ms['supplier1'], supplier, timeout=30000) AutomemberRebuildMembershipTask(topo_m4.ms['supplier1']).create( properties={ 'basedn': automem_scope, 'filter': "objectClass=posixAccount" }) for supplier in [ topo_m4.ms['supplier2'], topo_m4.ms['supplier3'], topo_m4.ms['supplier4'] ]: ReplicationManager(DEFAULT_SUFFIX).wait_for_replication( topo_m4.ms['supplier1'], supplier, timeout=30000) for instance, grp in [(topo_m4.ms['supplier1'], 'Managers'), (topo_m4.ms['supplier2'], 'Interns'), (topo_m4.ms['supplier3'], 'Contractors'), (topo_m4.ms['supplier4'], 'Visitors')]: assert len( nsAdminGroup(instance, f'cn={grp},{grp_container}').get_attr_vals_utf8( 'member')) == 3000 for grp, instance in [(default_group1, topo_m4.ms['supplier2']), (default_group2, topo_m4.ms['supplier3'])]: assert not nsAdminGroup(instance, grp).get_attr_vals_utf8('member') finally: delete_users_and_wait(topo_m4, automem_scope) nsContainer(topo_m4.ms['supplier1'], "CN=EmployeesNew,{}".format(DEFAULT_SUFFIX)).delete()
def test_multiple_scopes(topo): """Specify memberOf works when multiple include scopes are defined :id: fbcd70cc-c83d-4c79-bd5b-2d8f017545ae :setup: Standalone Instance :steps: 1. Set multiple include scopes 2. Test members added to both scopes are correctly updated 3. Test user outside of scope was not updated 4. Set exclude scope 5. Move user into excluded subtree and check the membership is correct :expectedresults: 1. Success 2. Success 3. Success 4. Success 5. Success """ inst = topo.standalone # configure plugin memberof = MemberOfPlugin(inst) memberof.enable() memberof.add('memberOfEntryScope', SUBTREE_1) memberof.add('memberOfEntryScope', SUBTREE_2) inst.restart() # Add setup entries add_container(inst, SUFFIX, 'sub1') add_container(inst, SUFFIX, 'sub2') add_container(inst, SUFFIX, 'sub3') add_member_and_group(inst, 'm1', 'g1', SUBTREE_1) add_member_and_group(inst, 'm2', 'g2', SUBTREE_2) add_member_and_group(inst, 'm3', 'g3', SUBTREE_3) # Check users 1 and 2 were correctly updated check_membership(inst, f'uid=test_m1,{SUBTREE_1}', f'cn=g1,{SUBTREE_1}', True) check_membership(inst, f'uid=test_m2,{SUBTREE_2}', f'cn=g2,{SUBTREE_2}', True) # Check that user3, which is out of scope, was not updated check_membership(inst, f'uid=test_m3,{SUBTREE_3}', f'cn=g1,{SUBTREE_1}', False) check_membership(inst, f'uid=test_m3,{SUBTREE_3}', f'cn=g2,{SUBTREE_2}', False) check_membership(inst, f'uid=test_m3,{SUBTREE_3}', f'cn=g3,{SUBTREE_3}', False) # Set exclude scope EXCLUDED_SUBTREE = 'cn=exclude,%s' % SUFFIX EXCLUDED_USER = f"uid=test_m1,{EXCLUDED_SUBTREE}" INCLUDED_USER = f"uid=test_m1,{SUBTREE_1}" GROUP_DN = f'cn=g1,{SUBTREE_1}' add_container(inst, SUFFIX, 'exclude') memberof.add('memberOfEntryScopeExcludeSubtree', EXCLUDED_SUBTREE) # Move user to excluded scope user = UserAccount(topo.standalone, dn=INCLUDED_USER) user.rename("uid=test_m1", newsuperior=EXCLUDED_SUBTREE) # Check memberOf and group are cleaned up check_membership(inst, EXCLUDED_USER, GROUP_DN, False) group = Group(topo.standalone, dn=GROUP_DN) assert not group.present("member", EXCLUDED_USER) assert not group.present("member", INCLUDED_USER)
def test_repl_agmt_bootstrap_credentials(topo): """Test that the agreement bootstrap credentials works if the default credentials fail for some reason. :id: 38c8095c-d958-415a-b602-74854b7882b3 :setup: 2 Master Instances :steps: 1. Change the bind dn group member passwords 2. Verify replication is not working 3. Create a new repl manager on master 2 for bootstrapping 4. Add bootstrap credentials to agmt on master 1 5. Verify replication is now working with bootstrap creds 6. Trigger new repl session and default credentials are used first :expectedresults: 1. Success 2. Success 3. Success 4. Success 5. Success 6. Success """ # Gather all of our objects for the test m1 = topo.ms["master1"] m2 = topo.ms["master2"] master1_replica = Replicas(m1).get(DEFAULT_SUFFIX) master2_replica = Replicas(m2).get(DEFAULT_SUFFIX) master2_users = UserAccounts(m2, DEFAULT_SUFFIX) m1_agmt = master1_replica.get_agreements().list()[0] num_of_original_users = len(master2_users.list()) # Change the member's passwords which should break replication bind_group = Group(m2, dn=BIND_GROUP_DN) members = bind_group.list_members() for member_dn in members: member = UserAccount(m2, dn=member_dn) member.replace('userPassword', 'not_right') time.sleep(3) m1_agmt.pause() m1_agmt.resume() # Verify replication is not working, a new user should not be replicated users = UserAccounts(m1, DEFAULT_SUFFIX) test_user = users.ensure_state(properties=TEST_USER_PROPERTIES) time.sleep(3) assert len(master2_users.list()) == num_of_original_users # Create a repl manager on replica repl_mgr = BootstrapReplicationManager(m2, dn=BOOTSTRAP_MGR_DN) mgr_properties = { 'uid': 'replication manager', 'cn': 'replication manager', 'userPassword': BOOTSTRAP_MGR_PWD, } repl_mgr.create(properties=mgr_properties) # Update master 2 config master2_replica.remove_all('nsDS5ReplicaBindDNGroup') master2_replica.remove_all('nsDS5ReplicaBindDnGroupCheckInterval') master2_replica.replace('nsDS5ReplicaBindDN', BOOTSTRAP_MGR_DN) # Add bootstrap credentials to master1 agmt, and restart agmt m1_agmt.replace('nsds5ReplicaBootstrapTransportInfo', 'LDAP') m1_agmt.replace('nsds5ReplicaBootstrapBindMethod', 'SIMPLE') m1_agmt.replace('nsds5ReplicaBootstrapCredentials', BOOTSTRAP_MGR_PWD) m1_agmt.replace('nsds5ReplicaBootstrapBindDN', BOOTSTRAP_MGR_DN) m1_agmt.pause() m1_agmt.resume() # Verify replication is working. The user should have been replicated time.sleep(3) assert len(master2_users.list()) > num_of_original_users # Finally check if the default credentials are used on the next repl # session. Clear out the logs, and disable log buffering. Then # trigger a replication update/session. m1_agmt.pause() m2.stop() m2.deleteLog(m2.accesslog) # Clear out the logs m2.start() m2.config.set('nsslapd-accesslog-logbuffering', 'off') m1_agmt.resume() test_user.delete() time.sleep(3) # We know if the default credentials are used it will fail (err=49) results = m2.ds_access_log.match('.* err=49 .*') assert len(results) > 0
def test_sin_non_present_rdn(topology_st): """Test that with an rdn not present in attributes, create succeeds in some cases. :id: a5d9cb24-8907-4622-ac85-90407a66e00a :setup: standalone instance :steps: 1. Create with an rdn not in properties :expectedresults: 1. Create success """ # Test that creating something with an rdn not present in the properties works # NOTE: I think that this is 389-ds making this work, NOT lib389. gp1 = Group(topology_st.standalone) gp1.create(rdn='cn=test_sin_non_present_rdn', basedn=f'ou=groups,{DEFAULT_SUFFIX}', properties={ 'cn': ['other_test_sin_non_present_rdn'], }) assert gp1.dn.lower( ) == f'cn=test_sin_non_present_rdn,ou=groups,{DEFAULT_SUFFIX}'.lower() gp1.delete() # Now, test where there is no cn. lib389 is blocking this today, but # 50259 will change this. gp2 = Group(topology_st.standalone) gp2.create(rdn='cn=test_sin_non_present_rdn', basedn=f'ou=groups,{DEFAULT_SUFFIX}', properties={}) assert gp2.dn.lower( ) == f'cn=test_sin_non_present_rdn,ou=groups,{DEFAULT_SUFFIX}'.lower() gp2.delete()
def test_passwordlockout(topo, _fix_password): """Test adding admin user diradmin to Directory Administrator group :id: 3ffcffda-5a20-11ea-a3af-8c16451d917b :setup: Standalone :steps: 1. Account Lockout must be cleared on successful password change 2. Adding admin user diradmin 3. Adding admin user diradmin to Directory Administrator group 4. Turn on passwordlockout 5. Sets lockout duration to 30 seconds 6. Sets failure count reset duration to 30 sec 7. Sets max password bind failure count to 3 8. Reset password retry count (to 0) 9. Try to bind with invalid credentials(3 times) 10. Try to bind with valid pw, should give lockout error 11. Reset password using admin login 12. Try to login as the user to check the unlocking of account. Will also change the password back to original 13. Change to account lockout forever until reset 14. Reset password retry count (to 0) 15. Try to bind with invalid credentials(3 times) 16. Try to bind with valid pw, should give lockout error 17. Reset password using admin login 18. Try to login as the user to check the unlocking of account. Will also change the password back to original :expected results: 1. Success 2. Success 3. Success 4. Success 5. Success 6. Success 7. Success 8. Success 9. Fail 10. Success 11. Success 12. Success 13. Success 14. Success 15. Fail 16. Success 17. Success 18. Success """ config = Config(topo.standalone) # Adding admin user diradmin user = UserAccounts(topo.standalone, DEFAULT_SUFFIX).create_test_user() user.replace('userpassword', 'dby3rs2') admin = _create_user(topo, 'diradmin', 'Anuj Borah', '1002', 'diradmin') # Adding admin user diradmin to Directory Administrator group Group(topo.standalone, f'cn=Directory Administrators,{DEFAULT_SUFFIX}').add( 'uniquemember', admin.dn) # Turn on passwordlockout # Sets lockout duration to 30 seconds # Sets failure count reset duration to 30 sec # Sets max password bind failure count to 3 # Reset password retry count (to 0) config.replace_many( ('passwordlockout', 'on'), ('passwordlockoutduration', '30'), ('passwordresetfailurecount', '30'), ('passwordmaxfailure', '3'), ('passwordhistory', 'off')) user.replace('passwordretrycount', '0') # Try to bind with invalid credentials(3 times) for _ in range(3): with pytest.raises(ldap.INVALID_CREDENTIALS): _change_password_with_own(topo, user.dn, 'Invalid', 'secreter') # Try to bind with valid pw, should give lockout error with pytest.raises(ldap.CONSTRAINT_VIOLATION): _change_password_with_own(topo, user.dn, 'Invalid', 'secreter') # Reset password using admin login conn = admin.bind('diradmin') UserAccount(conn, user.dn).replace('userpassword', 'dby3rs2') time.sleep(1) # Try to login as the user to check the unlocking of account. Will also change # the password back to original _change_password_with_own(topo, user.dn, 'dby3rs2', 'secreter') # Change to account lockout forever until reset # Reset password retry count (to 0) config.replace('passwordunlock', 'off') user.replace('passwordretrycount', '0') # Try to bind with invalid credentials(3 times) for _ in range(3): with pytest.raises(ldap.INVALID_CREDENTIALS): _change_password_with_own(topo, user.dn, 'Invalid', 'secreter') # Try to bind with valid pw, should give lockout error with pytest.raises(ldap.CONSTRAINT_VIOLATION): _change_password_with_own(topo, user.dn, 'Invalid', 'secreter') # Reset password using admin login UserAccount(conn, user.dn).replace('userpassword', 'dby3rs2') time.sleep(1) # Try to login as the user to check the unlocking of account. Will also change the # password back to original _change_password_with_own(topo, user.dn, 'dby3rs2', 'secreter')
def test_ri_and_mep_cache_corruption(topology_st): """Test RI plugin aborts change after MEP plugin fails. This is really testing the entry cache for corruption :id: 70d0b96e-b693-4bf7-bbf5-102a66ac5995 :setup: Standalone instance :steps: 1. Enable and configure mep and ri plugins 2. Add user and add it to a group 3. Disable MEP plugin and remove MEP group 4. Delete user 5. Check that user is still a member of the group :expectedresults: 1. Success 2. Success 3. Success 4. It fails with NO_SUCH_OBJECT 5. Success """ # Start plugins topology_st.standalone.config.set('nsslapd-dynamic-plugins', 'on') mep_plugin = ManagedEntriesPlugin(topology_st.standalone) mep_plugin.enable() ri_plugin = ReferentialIntegrityPlugin(topology_st.standalone) ri_plugin.enable() # Add our org units ous = OrganizationalUnits(topology_st.standalone, DEFAULT_SUFFIX) ou_people = ous.create(properties={'ou': 'managed_people'}) ou_groups = ous.create(properties={'ou': 'managed_groups'}) # Configure MEP mep_templates = MEPTemplates(topology_st.standalone, DEFAULT_SUFFIX) mep_template1 = mep_templates.create( properties={ 'cn': 'MEP template', 'mepRDNAttr': 'cn', 'mepStaticAttr': 'objectclass: posixGroup|objectclass: extensibleObject'.split('|'), 'mepMappedAttr': 'cn: $cn|uid: $cn|gidNumber: $uidNumber'.split('|') }) mep_configs = MEPConfigs(topology_st.standalone) mep_configs.create( properties={ 'cn': 'config', 'originScope': ou_people.dn, 'originFilter': 'objectclass=posixAccount', 'managedBase': ou_groups.dn, 'managedTemplate': mep_template1.dn }) # Add an entry that meets the MEP scope users = UserAccounts(topology_st.standalone, DEFAULT_SUFFIX, rdn='ou={}'.format(ou_people.rdn)) user = users.create( properties={ 'uid': 'test-user1', 'cn': 'test-user', 'sn': 'test-user', 'uidNumber': '10011', 'gidNumber': '20011', 'homeDirectory': '/home/test-user1' }) # Add group groups = Groups(topology_st.standalone, DEFAULT_SUFFIX) user_group = groups.ensure_state(properties={ 'cn': 'group', 'member': user.dn }) # Check if a managed group entry was created mep_group = Group(topology_st.standalone, dn='cn={},{}'.format(user.rdn, ou_groups.dn)) if not mep_group.exists(): log.fatal("MEP group was not created for the user") assert False # Test MEP be txn pre op failure does not corrupt entry cache # Should get the same exception for both rename attempts with pytest.raises(ldap.UNWILLING_TO_PERFORM): mep_group.rename("cn=modrdn group") with pytest.raises(ldap.UNWILLING_TO_PERFORM): mep_group.rename("cn=modrdn group") # Mess with MEP so it fails mep_plugin.disable() mep_group.delete() mep_plugin.enable() # Add another group to verify entry cache is not corrupted test_group = groups.create(properties={'cn': 'test_group'}) # Delete user, should fail in MEP be txn post op, and user should still be a member with pytest.raises(ldap.NO_SUCH_OBJECT): user.delete() # Verify membership is intact if not user_group.is_member(user.dn): log.fatal( "Member was incorrectly removed from the group!! Or so it seems") # Restart server and test again in case this was a cache issue topology_st.standalone.restart() if user_group.is_member(user.dn): log.info("The entry cache was corrupted") assert False assert False # Verify test group is still found in entry cache by deleting it test_group.delete() # Success log.info("Test PASSED")
def test_dn_syntax_spaces_delete(topo, props, rawdn): """Test that an entry with a space as the first character in the DN can be deleted without error. We also want to make sure the indexes are properly updated by repeatedly adding and deleting the entry, and that the entry cache is properly maintained. :id: b993f37c-c2b0-4312-992c-a9048ff98965 :parametrized: yes :setup: Standalone Instance :steps: 1. Create a group with a DN that has a space as the first/last character. 2. Delete group 3. Add group 4. Modify group 5. Restart server and modify entry 6. Delete group 7. Add group back 8. Delete group using specific DN :expectedresults: 1. Success 2. Success 3. Success 4. Success 5. Success 6. Success 7. Success 8. Success """ # Create group groups = Groups(topo.standalone, DEFAULT_SUFFIX) group = groups.create(properties=props.copy()) # Delete group (verifies DN/RDN parsing works and cache is correct) group.delete() # Add group again (verifies entryrdn index was properly updated) groups = Groups(topo.standalone, DEFAULT_SUFFIX) group = groups.create(properties=props.copy()) # Modify the group (verifies dn/rdn parsing is correct) group.replace('description', 'escaped space group') # Restart the server. This will pull the entry from the database and # convert it into a cache entry, which is different than how a client # first adds an entry and is put into the cache before being written to # disk. topo.standalone.restart() # Make sure we can modify the entry (verifies cache entry was created # correctly) group.replace('description', 'escaped space group after restart') # Make sure it can still be deleted (verifies cache again). group.delete() # Add it back so we can delete it using a specific DN (sanity test to verify # another DN/RDN parsing variation). groups = Groups(topo.standalone, DEFAULT_SUFFIX) group = groups.create(properties=props.copy()) group = Group(topo.standalone, dn=rawdn) group.delete()
def test_ri_and_mep_cache_corruption(topology_st): """Test RI plugin aborts change after MEP plugin fails. This is really testing the entry cache for corruption :id: 70d0b96e-b693-4bf7-bbf5-102a66ac5995 :setup: Standalone instance :steps: 1. Enable and configure mep and ri plugins 2. Add user and add it to a group 3. Disable MEP plugin and remove MEP group 4. Delete user 5. Check that user is still a member of the group :expectedresults: 1. Success 2. Success 3. Success 4. It fails with NO_SUCH_OBJECT 5. Success """ # Add ACI so we can test that non-DM user can't delete managed entry domain = Domain(topology_st.standalone, DEFAULT_SUFFIX) ACI_TARGET = f"(target = \"ldap:///{DEFAULT_SUFFIX}\")" ACI_TARGETATTR = "(targetattr = *)" ACI_ALLOW = "(version 3.0; acl \"Admin Access\"; allow (all) " ACI_SUBJECT = "(userdn = \"ldap:///anyone\");)" ACI_BODY = ACI_TARGET + ACI_TARGETATTR + ACI_ALLOW + ACI_SUBJECT domain.add('aci', ACI_BODY) # Start plugins topology_st.standalone.config.set('nsslapd-dynamic-plugins', 'on') mep_plugin = ManagedEntriesPlugin(topology_st.standalone) mep_plugin.enable() ri_plugin = ReferentialIntegrityPlugin(topology_st.standalone) ri_plugin.enable() # Add our org units ous = OrganizationalUnits(topology_st.standalone, DEFAULT_SUFFIX) ou_people = ous.create(properties={'ou': 'managed_people'}) ou_groups = ous.create(properties={'ou': 'managed_groups'}) # Configure MEP mep_templates = MEPTemplates(topology_st.standalone, DEFAULT_SUFFIX) mep_template1 = mep_templates.create(properties={ 'cn': 'MEP template', 'mepRDNAttr': 'cn', 'mepStaticAttr': 'objectclass: groupOfNames|objectclass: extensibleObject'.split('|'), 'mepMappedAttr': 'cn: $cn|uid: $cn|gidNumber: $uidNumber'.split('|') }) mep_configs = MEPConfigs(topology_st.standalone) mep_configs.create(properties={'cn': 'config', 'originScope': ou_people.dn, 'originFilter': 'objectclass=posixAccount', 'managedBase': ou_groups.dn, 'managedTemplate': mep_template1.dn}) # Add an entry that meets the MEP scope users = UserAccounts(topology_st.standalone, DEFAULT_SUFFIX, rdn='ou={}'.format(ou_people.rdn)) user = users.create(properties={ 'uid': 'test-user1', 'cn': 'test-user', 'sn': 'test-user', 'uidNumber': '10011', 'gidNumber': '20011', 'homeDirectory': '/home/test-user1' }) user.reset_password(USER_PASSWORD) user_bound_conn = user.bind(USER_PASSWORD) # Add group groups = Groups(topology_st.standalone, DEFAULT_SUFFIX) user_group = groups.ensure_state(properties={'cn': 'group', 'member': user.dn}) # Check if a managed group entry was created mep_group = Group(topology_st.standalone, dn='cn={},{}'.format(user.rdn, ou_groups.dn)) if not mep_group.exists(): log.fatal("MEP group was not created for the user") assert False # Test MEP be txn pre op failure does not corrupt entry cache # Should get the same exception for both rename attempts # Try to remove the entry while bound as Admin (non-DM) managed_groups_user_conn = Groups(user_bound_conn, ou_groups.dn, rdn=None) managed_entry_user_conn = managed_groups_user_conn.get(user.rdn) with pytest.raises(ldap.UNWILLING_TO_PERFORM): managed_entry_user_conn.rename("cn=modrdn group") with pytest.raises(ldap.UNWILLING_TO_PERFORM): managed_entry_user_conn.rename("cn=modrdn group") # Mess with MEP so it fails mep_plugin.disable() users_mep_group = UserAccounts(topology_st.standalone, mep_group.dn, rdn=None) users_mep_group.create_test_user(1001) mep_plugin.enable() # Add another group to verify entry cache is not corrupted test_group = groups.create(properties={'cn': 'test_group'}) # Try to delete user - it fails because managed entry can't be deleted with pytest.raises(ldap.NOT_ALLOWED_ON_NONLEAF): user.delete() # Verify membership is intact if not user_group.is_member(user.dn): log.fatal("Member was incorrectly removed from the group!! Or so it seems") # Restart server and test again in case this was a cache issue topology_st.standalone.restart() if user_group.is_member(user.dn): log.info("The entry cache was corrupted") assert False assert False # Verify test group is still found in entry cache by deleting it test_group.delete() # Success log.info("Test PASSED")