def create_containers(self, suffix): conf_conts = nsContainers(self._instance, suffix) conf_cont = conf_conts.ensure_state(properties={'cn': 'Configuration'}) schema_conts = nsContainers(self._instance, conf_cont.dn) schema_cont = schema_conts.ensure_state(properties={'cn': 'Schema'}) self._configuration_dn = conf_cont.dn self._schema_dn = schema_cont.dn
def add_container(inst, dn, name, sleep=False): """Creates container entry""" conts = nsContainers(inst, dn) cont = conts.create(properties={'cn': name}) if sleep: time.sleep(1) return cont
def _create_container(inst, dn, name): """Creates container entry""" conts = nsContainers(inst, dn) cont = conts.create(properties={'cn': name}) time.sleep(1) return cont
def _create_inital(topo): """ Will create entries for this module """ meps = MEPTemplates(topo.standalone, DEFAULT_SUFFIX) mep_template1 = meps.create( properties={ 'cn': 'UPG Template', 'mepRDNAttr': 'cn', 'mepStaticAttr': 'objectclass: posixGroup', 'mepMappedAttr': 'cn: $uid|gidNumber: $gidNumber|description: User private group for $uid' .split('|') }) conf_mep = MEPConfigs(topo.standalone) conf_mep.create( properties={ 'cn': 'UPG Definition1', 'originScope': f'cn=Users,{DEFAULT_SUFFIX}', 'originFilter': 'objectclass=posixaccount', 'managedBase': f'cn=Groups,{DEFAULT_SUFFIX}', 'managedTemplate': mep_template1.dn }) container = nsContainers(topo.standalone, DEFAULT_SUFFIX) for cn in ['Users', 'Groups']: container.create(properties={'cn': cn})
def create_subtree_policy(self, dn, properties): """Creates all entries which are needed for the subtree password policy :param dn: Entry DN for the subtree pwpolicy :type dn: str :param properties: A dict with password policy settings :type properties: dict :returns: PwPolicyEntry instance """ # Verify target dn exists before getting started subtree_entry = Account(self._instance, dn) if not subtree_entry.exists(): raise ValueError('Can not create subtree password policy because the target dn does not exist') # Create the pwp container if needed pwp_containers = nsContainers(self._instance, basedn=dn) pwp_container = pwp_containers.ensure_state(properties={'cn': 'nsPwPolicyContainer'}) # Create policy entry pwp_entry = None properties['cn'] = 'cn=nsPwPolicyEntry_subtree,%s' % dn pwp_entries = PwPolicyEntries(self._instance, pwp_container.dn) pwp_entry = pwp_entries.create(properties=properties) try: # The CoS template entry (nsPwTemplateEntry) that has the pwdpolicysubentry # value pointing to the above (nsPwPolicyEntry) entry cos_template = None cos_templates = CosTemplates(self._instance, pwp_container.dn) cos_template = cos_templates.create(properties={'cosPriority': '1', 'pwdpolicysubentry': pwp_entry.dn, 'cn': 'cn=nsPwTemplateEntry,%s' % dn}) # The CoS specification entry at the subtree level cos_pointer_defs = CosPointerDefinitions(self._instance, dn) cos_pointer_defs.create(properties={'cosAttribute': 'pwdpolicysubentry default operational', 'cosTemplateDn': cos_template.dn, 'cn': 'nsPwPolicy_CoS'}) except ldap.LDAPError as e: # Something went wrong, remove what we have done if pwp_entry is not None: pwp_entry.delete() if cos_template is not None: cos_template.delete() raise e # make sure that local policies are enabled self.set_global_policy({'nsslapd-pwpolicy-local': 'on'}) return pwp_entry
def lock(self): """Set the entry dn to nsDisabledRole and ensure it exists""" current_status = self.status() if current_status["state"] == RoleState.DIRECTLY_LOCKED: raise ValueError( f"Role is already {current_status['state'].describe()}") inst = self._instance mapping_trees = MappingTrees(inst) root_suffix = "" root_suffix = mapping_trees.get_root_suffix_by_entry(self.dn) if root_suffix: managed_roles = ManagedRoles(inst, root_suffix) managed_role = managed_roles.ensure_state( properties={"cn": "nsManagedDisabledRole"}) nested_roles = NestedRoles(inst, root_suffix) try: disabled_role = nested_roles.get("nsDisabledRole") except ldap.NO_SUCH_OBJECT: # We don't use "ensure_state" because we want to preserve the existing attributes disabled_role = nested_roles.create(properties={ "cn": "nsDisabledRole", "nsRoleDN": managed_role.dn }) disabled_role.add("nsRoleDN", self.dn) inact_containers = nsContainers(inst, basedn=root_suffix) inact_container = inact_containers.ensure_state( properties={'cn': 'nsAccountInactivationTmp'}) cos_templates = CosTemplates(inst, inact_container.dn) cos_templates.ensure_state( properties={ 'cosPriority': '1', 'nsAccountLock': 'true', 'cn': f'{disabled_role.dn}' }) cos_classic_defs = CosClassicDefinitions(inst, root_suffix) cos_classic_defs.ensure_state( properties={ 'cosAttribute': 'nsAccountLock operational', 'cosSpecifier': 'nsRole', 'cosTemplateDn': inact_container.dn, 'cn': 'nsAccountInactivation_cos' })
def _test_base(topology): """Add test container for entries, enable plugin logging, audit log, error log for replica and access log for internal """ M1 = topology.ms["master1"] conts = nsContainers(M1, SUFFIX) test_base = conts.create(properties={'cn': 'test_container'}) for inst in topology: inst.config.loglevel([ErrorLog.DEFAULT, ErrorLog.REPLICA], service='error') inst.config.loglevel([AccessLog.DEFAULT, AccessLog.INTERNAL], service='access') inst.config.set('nsslapd-plugin-logging', 'on') inst.config.enable_log('audit') inst.restart() return test_base
def create_user_policy(self, dn, properties): """Creates all entries which are needed for the user password policy :param dn: Entry DN for the subtree pwpolicy :type dn: str :param properties: A dict with password policy settings :type properties: dict :returns: PwPolicyEntry instance """ # Verify target dn exists before getting started user_entry = Account(self._instance, dn) if not user_entry.exists(): raise ValueError( 'Can not create user password policy because the target dn does not exist' ) dn_comps = ldap.dn.explode_dn(user_entry.dn) dn_comps.pop(0) parentdn = ",".join(dn_comps) # Create the pwp container if needed pwp_containers = nsContainers(self._instance, basedn=parentdn) pwp_container = pwp_containers.ensure_state( properties={'cn': 'nsPwPolicyContainer'}) # Create policy entry properties['cn'] = 'cn=nsPwPolicyEntry_user,%s' % dn pwp_entries = PwPolicyEntries(self._instance, pwp_container.dn) pwp_entry = pwp_entries.create(properties=properties) try: # Add policy to the entry user_entry.replace('pwdpolicysubentry', pwp_entry.dn) except ldap.LDAPError as e: # failure, undo what we have done pwp_entry.delete() raise e # make sure that local policies are enabled self.set_global_policy({'nsslapd-pwpolicy-local': 'on'}) return pwp_entry
def status(self): """Check if role is locked in nsDisabledRole (directly or indirectly) :returns: a dict """ inst = self._instance disabled_roles = {} try: mapping_trees = MappingTrees(inst) root_suffix = mapping_trees.get_root_suffix_by_entry(self.dn) roles = Roles(inst, root_suffix) disabled_roles = roles.get_disabled_roles() nested_roles = NestedRoles(inst, root_suffix) disabled_role = nested_roles.get("nsDisabledRole") inact_containers = nsContainers(inst, basedn=root_suffix) inact_container = inact_containers.get('nsAccountInactivationTmp') cos_templates = CosTemplates(inst, inact_container.dn) cos_template = cos_templates.get(f'{disabled_role.dn}') cos_template.present('cosPriority', '1') cos_template.present('nsAccountLock', 'true') cos_classic_defs = CosClassicDefinitions(inst, root_suffix) cos_classic_def = cos_classic_defs.get('nsAccountInactivation_cos') cos_classic_def.present('cosAttribute', 'nsAccountLock operational') cos_classic_def.present('cosTemplateDn', inact_container.dn) cos_classic_def.present('cosSpecifier', 'nsRole') except ldap.NO_SUCH_OBJECT: return self._format_status_message(RoleState.PROBABLY_ACTIVATED) for role, parent in disabled_roles.items(): if str.lower(self.dn) == str.lower(role.dn): if parent is None: return self._format_status_message( RoleState.DIRECTLY_LOCKED) else: return self._format_status_message( RoleState.INDIRECTLY_LOCKED, parent) return self._format_status_message(RoleState.ACTIVATED)
def test_nested_entries_with_children(self, topology_m2, base_m2): """Check that conflict properly resolved for operations with nested entries with children :id: 77f09b18-03d1-45da-940b-1ad2c2908eb5 :setup: Two master replication, test container for entries, enable plugin logging, audit log, error log for replica and access log for internal :steps: 1. Add 15 containers to m1 and wait for replication to happen 2. Pause replication 3. Create parent-child on master2 and master1 4. Create parent-child on master1 and master2 5. Create parent-child on master1 and master2 different child rdn 6. Create parent-child on master1 and delete parent on master2 7. Create parent on master1, delete it and parent-child on master2, delete them 8. Create parent on master1, delete it and parent-two children on master2 9. Create parent-two children on master1 and parent-child on master2, delete them 10. Create three subsets inside existing container entry, applying only part of changes on m2 11. Create more combinations of the subset with parent-child on m1 and parent on m2 12. Delete container on m1, modify user1 on m1, create parent on m2 and modify user2 on m2 13. Resume replication 14. Check that the entries on both masters are the same and replication is working :expectedresults: 1. It should pass 2. It should pass 3. It should pass 4. It should pass 5. It should pass 6. It should pass 7. It should pass 8. It should pass 9. It should pass 10. It should pass 11. It should pass 12. It should pass 13. It should pass 14. It should pass """ pytest.xfail("Issue 49591 - work in progress") M1 = topology_m2.ms["master1"] M2 = topology_m2.ms["master2"] repl = ReplicationManager(SUFFIX) test_users_m1 = UserAccounts(M1, base_m2.dn, rdn=None) test_users_m2 = UserAccounts(M2, base_m2.dn, rdn=None) _create_user(test_users_m1, 4000) _create_user(test_users_m1, 4001) cont_list = [] for num in range(15): cont = _create_container(M1, base_m2.dn, 'sub{}'.format(num)) cont_list.append(cont) repl.test_replication(M1, M2) topology_m2.pause_all_replicas() log.info("Create parent-child on master2 and master1") _create_container(M2, base_m2.dn, 'p0', sleep=True) cont_p = _create_container(M1, base_m2.dn, 'p0', sleep=True) _create_container(M1, cont_p.dn, 'c0', sleep=True) _create_container(M2, cont_p.dn, 'c0', sleep=True) log.info("Create parent-child on master1 and master2") cont_p = _create_container(M1, base_m2.dn, 'p1', sleep=True) _create_container(M2, base_m2.dn, 'p1', sleep=True) _create_container(M1, cont_p.dn, 'c1', sleep=True) _create_container(M2, cont_p.dn, 'c1', sleep=True) log.info( "Create parent-child on master1 and master2 different child rdn") cont_p = _create_container(M1, base_m2.dn, 'p2', sleep=True) _create_container(M2, base_m2.dn, 'p2', sleep=True) _create_container(M1, cont_p.dn, 'c2', sleep=True) _create_container(M2, cont_p.dn, 'c3', sleep=True) log.info("Create parent-child on master1 and delete parent on master2") cont_num = 0 cont_p_m1 = _create_container(M1, cont_list[cont_num].dn, 'p0', sleep=True) cont_p_m2 = _create_container(M2, cont_list[cont_num].dn, 'p0', sleep=True) _create_container(M1, cont_p_m1.dn, 'c0', sleep=True) _delete_container(cont_p_m2) cont_num += 1 cont_p_m2 = _create_container(M2, cont_list[cont_num].dn, 'p0', sleep=True) cont_p_m1 = _create_container(M1, cont_list[cont_num].dn, 'p0') _create_container(M1, cont_p_m1.dn, 'c0', sleep=True) _delete_container(cont_p_m2, sleep=True) log.info( "Create parent on master1, delete it and parent-child on master2, delete them" ) cont_num += 1 cont_p_m1 = _create_container(M1, cont_list[cont_num].dn, 'p0') _delete_container(cont_p_m1, sleep=True) cont_p_m2 = _create_container(M2, cont_list[cont_num].dn, 'p0') cont_c_m2 = _create_container(M2, cont_p_m2.dn, 'c0') _delete_container(cont_c_m2) _delete_container(cont_p_m2) cont_num += 1 cont_p_m2 = _create_container(M2, cont_list[cont_num].dn, 'p0') cont_c_m2 = _create_container(M2, cont_p_m2.dn, 'c0') _delete_container(cont_c_m2) _delete_container(cont_p_m2, sleep=True) cont_p_m1 = _create_container(M1, cont_list[cont_num].dn, 'p0') _delete_container(cont_p_m1) log.info( "Create parent on master1, delete it and parent-two children on master2" ) cont_num += 1 cont_p_m1 = _create_container(M1, cont_list[cont_num].dn, 'p0') _delete_container(cont_p_m1, sleep=True) cont_p_m2 = _create_container(M2, cont_list[cont_num].dn, 'p0') _create_container(M2, cont_p_m2.dn, 'c0') _create_container(M2, cont_p_m2.dn, 'c1') cont_num += 1 cont_p_m2 = _create_container(M2, cont_list[cont_num].dn, 'p0') _create_container(M2, cont_p_m2.dn, 'c0') _create_container(M2, cont_p_m2.dn, 'c1', sleep=True) cont_p_m1 = _create_container(M1, cont_list[cont_num].dn, 'p0') _delete_container(cont_p_m1, sleep=True) log.info( "Create parent-two children on master1 and parent-child on master2, delete them" ) cont_num += 1 cont_p_m2 = _create_container(M2, cont_list[cont_num].dn, 'p0') cont_c_m2 = _create_container(M2, cont_p_m2.dn, 'c0') _delete_container(cont_c_m2) _delete_container(cont_p_m2, sleep=True) cont_p_m1 = _create_container(M1, cont_list[cont_num].dn, 'p0') _create_container(M1, cont_p_m1.dn, 'c0') _create_container(M1, cont_p_m1.dn, 'c1') cont_num += 1 cont_p_m1 = _create_container(M1, cont_list[cont_num].dn, 'p0') _create_container(M1, cont_p_m1.dn, 'c0') _create_container(M1, cont_p_m1.dn, 'c1', sleep=True) cont_p_m2 = _create_container(M2, cont_list[cont_num].dn, 'p0') cont_c_m2 = _create_container(M2, cont_p_m2.dn, 'c0') _delete_container(cont_c_m2) _delete_container(cont_p_m2, sleep=True) log.info( "Create three subsets inside existing container entry, applying only part of changes on m2" ) cont_num += 1 cont_p_m1 = _create_container(M1, cont_list[cont_num].dn, 'p0') _create_container(M1, cont_p_m1.dn, 'c0') _create_container(M1, cont_p_m1.dn, 'c1', sleep=True) _create_container(M2, cont_list[cont_num].dn, 'p0', sleep=True) cont_num += 1 cont_p_m1 = _create_container(M1, cont_list[cont_num].dn, 'p0') _create_container(M1, cont_p_m1.dn, 'c0') _create_container(M1, cont_p_m1.dn, 'c1', sleep=True) cont_p_m2 = _create_container(M2, cont_list[cont_num].dn, 'p0') _create_container(M2, cont_p_m2.dn, 'c0', sleep=True) cont_num += 1 cont_p_m1 = _create_container(M1, cont_list[cont_num].dn, 'p0') _create_container(M1, cont_p_m1.dn, 'c0') _create_container(M1, cont_p_m1.dn, 'c1', sleep=True) cont_p_m2 = _create_container(M2, cont_list[cont_num].dn, 'p0') cont_c_m2 = _create_container(M2, cont_p_m2.dn, 'c0') _delete_container(cont_c_m2, sleep=True) log.info( "Create more combinations of the subset with parent-child on m1 and parent on m2" ) cont_num += 1 cont_p_m1 = _create_container(M1, cont_list[cont_num].dn, 'p0', sleep=True) cont_p_m2 = _create_container(M2, cont_list[cont_num].dn, 'p0', sleep=True) _delete_container(cont_p_m1, sleep=True) cont_c_m2 = _create_container(M2, cont_p_m2.dn, 'c0') _delete_container(cont_c_m2) _delete_container(cont_p_m2, sleep=True) cont_num += 1 cont_p_m1 = _create_container(M1, cont_list[cont_num].dn, 'p0', sleep=True) cont_p_m2 = _create_container(M2, cont_list[cont_num].dn, 'p0', sleep=True) _delete_container(cont_p_m1, sleep=True) _create_container(M2, cont_p_m2.dn, 'c0', sleep=True) cont_num += 1 cont_p_m1 = _create_container(M1, cont_list[cont_num].dn, 'p0', sleep=True) cont_p_m2 = _create_container(M2, cont_list[cont_num].dn, 'p0', sleep=True) cont_c_m1 = _create_container(M1, cont_p_m1.dn, 'c0', sleep=True) _create_container(M2, cont_p_m2.dn, 'c0', sleep=True) _delete_container(cont_c_m1, sleep=True) _create_container(M2, cont_p_m2.dn, 'c1', sleep=True) _delete_container(cont_p_m1, sleep=True) log.info( "Delete container on m1, modify user1 on m1, create parent on m2 and modify user2 on m2" ) cont_num += 1 _delete_container(cont_list[cont_num]) _modify_user(test_users_m1, 4000, sleep=True) _create_container(M2, cont_list[cont_num].dn, 'p0') _modify_user(test_users_m2, 4001) topology_m2.resume_all_replicas() repl.test_replication_topology(topology_m2, timeout=60) conts_dns = {} for num in range(1, 3): inst = topology_m2.ms["master{}".format(num)] conts_dns[inst.serverid] = [] conts = nsContainers(inst, base_m2.dn) for cont in conts.list(): conts_p = nsContainers(inst, cont.dn) for cont_p in conts_p.list(): conts_c = nsContainers(inst, cont_p.dn) conts_dns[inst.serverid].extend( [cont_c.dn for cont_c in conts_c.list()]) conts_dns[inst.serverid].extend( [cont_p.dn for cont_p in conts_p.list()]) conts_dns[inst.serverid].extend([cont.dn for cont in conts.list()]) assert set(conts_dns[M1.serverid]) == set(conts_dns[M2.serverid]) user_dns_m1 = [user.dn for user in test_users_m1.list()] user_dns_m2 = [user.dn for user in test_users_m2.list()] assert set(user_dns_m1) == set(user_dns_m2)
def test_managed_entries(self, topology_m2): """Check that conflict properly resolved for operations with managed entries :id: 77f09b18-03d1-45da-940b-1ad2c2908eb4 :setup: Two master replication, test container for entries, enable plugin logging, audit log, error log for replica and access log for internal :steps: 1. Create ou=managed_users and ou=managed_groups under test container 2. Configure managed entries plugin and add a template to test container 3. Add a user to m1 and wait for replication to happen 4. Pause replication 5. Create a user on m1 and m2 with a same group ID on both master 6. Create a user on m1 and m2 with a different group ID on both master 7. Resume replication 8. Check that the entries on both masters are the same and replication is working :expectedresults: 1. It should pass 2. It should pass 3. It should pass 4. It should pass 5. It should pass 6. It should pass 7. It should pass 8. It should pass """ pytest.xfail("Issue 49591 - work in progress") M1 = topology_m2.ms["master1"] M2 = topology_m2.ms["master2"] repl = ReplicationManager(SUFFIX) ous = OrganizationalUnits(M1, DEFAULT_SUFFIX) ou_people = ous.create(properties={'ou': 'managed_people'}) ou_groups = ous.create(properties={'ou': 'managed_groups'}) test_users_m1 = UserAccounts(M1, DEFAULT_SUFFIX, rdn='ou={}'.format(ou_people.rdn)) test_users_m2 = UserAccounts(M2, DEFAULT_SUFFIX, rdn='ou={}'.format(ou_people.rdn)) # TODO: Refactor ManagedPlugin class functionality (also add configs and templates) conts = nsContainers(M1, SUFFIX) template = conts.create( properties={ 'objectclass': 'top mepTemplateEntry extensibleObject'.split(), 'cn': 'MEP Template', 'mepRDNAttr': 'cn', 'mepStaticAttr': ['objectclass: posixGroup', 'objectclass: extensibleObject'], 'mepMappedAttr': ['cn: $uid', 'uid: $cn', 'gidNumber: $uidNumber'] }) repl.test_replication(M1, M2) for inst in topology_m2.ms.values(): conts = nsContainers( inst, "cn={},{}".format(PLUGIN_MANAGED_ENTRY, DN_PLUGIN)) conts.create( properties={ 'objectclass': 'top extensibleObject'.split(), 'cn': 'config', 'originScope': ou_people.dn, 'originFilter': 'objectclass=posixAccount', 'managedBase': ou_groups.dn, 'managedTemplate': template.dn }) inst.restart() _create_user(test_users_m1, 1, 1) topology_m2.pause_all_replicas() _create_user(test_users_m1, 2, 2, sleep=True) _create_user(test_users_m2, 2, 2, sleep=True) _create_user(test_users_m1, 3, 3, sleep=True) _create_user(test_users_m2, 3, 33) topology_m2.resume_all_replicas() repl.test_replication_topology(topology_m2) user_dns_m1 = [user.dn for user in test_users_m1.list()] user_dns_m2 = [user.dn for user in test_users_m2.list()] assert set(user_dns_m1) == set(user_dns_m2)
def test_nested_entries(self, topology_m3, base_m3): """Check that conflict properly resolved for operations with nested entries with children :id: 77f09b18-03d1-45da-940b-1ad2c2908eb6 :setup: Three master replication, test container for entries, enable plugin logging, audit log, error log for replica and access log for internal :steps: 1. Add 15 containers to m1 and wait for replication to happen 2. Pause replication 3. Create two child entries under each of two entries 4. Create three child entries under each of three entries 5. Create two parents on m1 and m2, then on m1 - create a child and delete one parent, on m2 - delete one parent and create a child 6. Test a few more parent-child combinations with three instances 7. Resume replication 8. Check that the entries on both masters are the same and replication is working :expectedresults: 1. It should pass 2. It should pass 3. It should pass 4. It should pass 5. It should pass 6. It should pass 7. It should pass 8. It should pass """ pytest.xfail("Issue 49591 - work in progress") M1 = topology_m3.ms["master1"] M2 = topology_m3.ms["master2"] M3 = topology_m3.ms["master3"] repl = ReplicationManager(SUFFIX) cont_list = [] for num in range(11): cont = _create_container(M1, base_m3.dn, 'sub{}'.format(num)) cont_list.append(cont) repl.test_replication(M1, M2) repl.test_replication(M1, M3) topology_m3.pause_all_replicas() log.info("Create two child entries under each of two entries") cont_num = -1 for num in range(2): cont_num += 1 _create_container(M1, cont_list[cont_num].dn, 'p0', sleep=True) _create_container(M2, cont_list[cont_num].dn, 'p1', sleep=True) log.info("Create three child entries under each of three entries") for num in range(3): cont_num += 1 _create_container(M1, cont_list[cont_num].dn, 'p0', sleep=True) _create_container(M2, cont_list[cont_num].dn, 'p1', sleep=True) _create_container(M3, cont_list[cont_num].dn, 'p2', sleep=True) log.info( "Create two parents on m1 and m2, then on m1 - create a child and delete one parent," "on m2 - delete one parent and create a child") for inst1, inst2 in ((M1, M2), (M2, M1)): cont_num += 1 cont_p_m1_1 = _create_container(inst1, cont_list[cont_num].dn, 'p0') cont_p_m1_2 = _create_container(inst1, cont_list[cont_num].dn, 'p1', sleep=True) cont_p_m2_1 = _create_container(inst2, cont_list[cont_num].dn, 'p0') cont_p_m2_2 = _create_container(inst2, cont_list[cont_num].dn, 'p1', sleep=True) _create_container(inst1, cont_p_m1_1.dn, 'c0', sleep=True) _delete_container(cont_p_m2_1, sleep=True) _delete_container(cont_p_m1_2, sleep=True) _create_container(inst2, cont_p_m2_2.dn, 'c0', sleep=True) log.info( "Test a few more parent-child combinations on three instances") for inst1, inst2, inst3 in ((M1, M2, M3), (M2, M1, M3), (M3, M1, M2)): cont_num += 1 cont_p_m1 = _create_container(inst1, cont_list[cont_num].dn, 'p0') _delete_container(cont_p_m1, sleep=True) cont_p_m2 = _create_container(inst2, cont_list[cont_num].dn, 'p0') cont_c_m2 = _create_container(inst2, cont_p_m2.dn, 'c0') _delete_container(cont_c_m2) _delete_container(cont_p_m2, sleep=True) cont_p_m3 = _create_container(inst3, cont_list[cont_num].dn, 'p0') _create_container(inst3, cont_p_m3.dn, 'c0') _create_container(inst3, cont_p_m3.dn, 'c1', sleep=True) topology_m3.resume_all_replicas() repl.test_replication_topology(topology_m3) conts_dns = {} for num in range(1, 4): inst = topology_m3.ms["master{}".format(num)] conts_dns[inst.serverid] = [] conts = nsContainers(inst, base_m3.dn) for cont in conts.list(): conts_p = nsContainers(inst, cont.dn) for cont_p in conts_p.list(): conts_c = nsContainers(inst, cont_p.dn) conts_dns[inst.serverid].extend( [cont_c.dn for cont_c in conts_c.list()]) conts_dns[inst.serverid].extend( [cont_p.dn for cont_p in conts_p.list()]) conts_dns[inst.serverid].extend([cont.dn for cont in conts.list()]) for conts1, conts2 in permutations(conts_dns.values(), 2): assert set(conts1) == set(conts2)
def test_bulk_users_to_non_automemscope(topo_m4, _create_entries): """ Adding bulk users to non-automem_scope and then running modrdn operation to change the ou to automem_scope :id: c532dc0c-be57-11e9-bcca-8c16451d917b :setup: Instance with 4 suppliers :steps: 1. Running modrdn operation to change the ou to automem_scope 2. Add 3000 user entries to non-automem_scope at topo_m4.ms['supplier1'] 3. Run AutomemberRebuildMembershipTask 4. Check the same created in rest suppliers :expected results: 1. Pass 2. Pass 3. Pass 4. Pass """ automem_scope = "cn=EmployeesNew,{}".format(DEFAULT_SUFFIX) grp_container = "cn=replsubGroups,{}".format(DEFAULT_SUFFIX) default_group1 = "cn=SubDef3,{}".format(DEFAULT_SUFFIX) default_group2 = "cn=SubDef5,{}".format(DEFAULT_SUFFIX) nsContainers(topo_m4.ms['supplier1'], DEFAULT_SUFFIX).create(properties={'cn': 'ChangeThisCN'}) Group(topo_m4.ms['supplier1'], f'cn=replsubGroups,cn=autoMembersPlugin,{DEFAULT_SUFFIX}').replace( 'autoMemberScope', automem_scope) for instance in [ topo_m4.ms['supplier1'], topo_m4.ms['supplier2'], topo_m4.ms['supplier3'], topo_m4.ms['supplier4'] ]: instance.restart() # Adding BulkUsers for number in range(3000): create_entry(topo_m4, f'automemusrs{number}', f'cn=ChangeThisCN,{DEFAULT_SUFFIX}', '5995', '5693', 'Supervisor') try: for supplier in [ topo_m4.ms['supplier2'], topo_m4.ms['supplier3'], topo_m4.ms['supplier4'] ]: ReplicationManager(DEFAULT_SUFFIX).wait_for_replication( topo_m4.ms['supplier1'], supplier, timeout=30000) for instance, grp in [(topo_m4.ms['supplier2'], default_group1), (topo_m4.ms['supplier1'], "cn=Managers,{}".format(grp_container))]: assert not nsAdminGroup(instance, grp).get_attr_vals_utf8('member') # Deleting BulkUsers "User_Name" Suffix "Nof_Users" topo_m4.ms['supplier3'].rename_s(f"CN=ChangeThisCN,{DEFAULT_SUFFIX}", f'cn=EmployeesNew', newsuperior=DEFAULT_SUFFIX, delold=1) for supplier in [ topo_m4.ms['supplier2'], topo_m4.ms['supplier3'], topo_m4.ms['supplier4'] ]: ReplicationManager(DEFAULT_SUFFIX).wait_for_replication( topo_m4.ms['supplier1'], supplier, timeout=30000) AutomemberRebuildMembershipTask(topo_m4.ms['supplier1']).create( properties={ 'basedn': automem_scope, 'filter': "objectClass=posixAccount" }) for supplier in [ topo_m4.ms['supplier2'], topo_m4.ms['supplier3'], topo_m4.ms['supplier4'] ]: ReplicationManager(DEFAULT_SUFFIX).wait_for_replication( topo_m4.ms['supplier1'], supplier, timeout=30000) for instance, grp in [(topo_m4.ms['supplier1'], 'Managers'), (topo_m4.ms['supplier2'], 'Interns'), (topo_m4.ms['supplier3'], 'Contractors'), (topo_m4.ms['supplier4'], 'Visitors')]: assert len( nsAdminGroup(instance, f'cn={grp},{grp_container}').get_attr_vals_utf8( 'member')) == 3000 for grp, instance in [(default_group1, topo_m4.ms['supplier2']), (default_group2, topo_m4.ms['supplier3'])]: assert not nsAdminGroup(instance, grp).get_attr_vals_utf8('member') finally: delete_users_and_wait(topo_m4, automem_scope) nsContainer(topo_m4.ms['supplier1'], "CN=EmployeesNew,{}".format(DEFAULT_SUFFIX)).delete()
def add_container(inst, dn, name): """Creates container entry""" conts = nsContainers(inst, dn) cont = conts.create(properties={'cn': name}) return cont