def _test_conflict_existing_single_valued_link(self, sync_order): """ Tests a single-valued link conflict, where the conflicting link value already exists (as inactive) on both DCs. """ # create the link objects src_ou = self.unique_dn("OU=src") src_guid = self.add_object(self.ldb_dc1, src_ou) target1_ou = self.unique_dn("OU=target1") target2_ou = self.unique_dn("OU=target2") target1_guid = self.add_object(self.ldb_dc1, target1_ou) target2_guid = self.add_object(self.ldb_dc1, target2_ou) # add the links, but then delete them self.add_link_attr(self.ldb_dc1, src_ou, "managedBy", target1_ou) self.del_link_attr(self.ldb_dc1, src_ou, "managedBy", target1_ou) self.add_link_attr(self.ldb_dc1, src_ou, "managedBy", target2_ou) self.del_link_attr(self.ldb_dc1, src_ou, "managedBy", target2_ou) self.sync_DCs() # re-add the links independently on each DC self.add_link_attr(self.ldb_dc1, src_ou, "managedBy", target1_ou) self.ensure_unique_timestamp() self.add_link_attr(self.ldb_dc2, src_ou, "managedBy", target2_ou) # try to sync the 2 DCs self.sync_DCs(sync_order=sync_order) res1 = self.ldb_dc1.search(base="<GUID=%s>" % src_guid, scope=SCOPE_BASE, attrs=["managedBy"]) res2 = self.ldb_dc2.search(base="<GUID=%s>" % src_guid, scope=SCOPE_BASE, attrs=["managedBy"]) # check the object has only have one occurence of the single-valued # attribute and it matches on both DCs self.assert_attrs_match(res1, res2, "managedBy", 1) # here we expect DC2 to win because it has the more recent link self.assertTrue( str(res1[0]["managedBy"][0]) == target2_ou, "Expected most recent update to win conflict") # we can't query the deleted links over LDAP, but we can check DRS # to make sure the DC kept a copy of the conflicting link link1 = AbstractLink(drsuapi.DRSUAPI_ATTID_managedBy, 0, misc.GUID(src_guid), misc.GUID(target1_guid)) link2 = AbstractLink(drsuapi.DRSUAPI_ATTID_managedBy, drsuapi.DRSUAPI_DS_LINKED_ATTRIBUTE_FLAG_ACTIVE, misc.GUID(src_guid), misc.GUID(target2_guid)) self._check_replicated_links(src_ou, [link1, link2])
def _singleval_link_conflict_deleted_loser(self, sync_order): """ Tests a single-valued link conflict, where the losing link value is deleted. """ src_ou = self.unique_dn("OU=src") src_guid = self.add_object(self.ldb_dc1, src_ou) self.sync_DCs() # create a unique target on each DC target1_ou = self.unique_dn("OU=target1") target2_ou = self.unique_dn("OU=target2") target1_guid = self.add_object(self.ldb_dc1, target1_ou) target2_guid = self.add_object(self.ldb_dc2, target2_ou) # add the links - we want the link to end up deleted on DC2, but active # on DC1. DC1 has the better version and DC2 has the better timestamp - # the better version should win self.add_link_attr(self.ldb_dc1, src_ou, "managedBy", target1_ou) self.del_link_attr(self.ldb_dc1, src_ou, "managedBy", target1_ou) self.add_link_attr(self.ldb_dc1, src_ou, "managedBy", target1_ou) self.ensure_unique_timestamp() self.add_link_attr(self.ldb_dc2, src_ou, "managedBy", target2_ou) self.del_link_attr(self.ldb_dc2, src_ou, "managedBy", target2_ou) self.sync_DCs(sync_order=sync_order) res1 = self.ldb_dc1.search(base="<GUID=%s>" % src_guid, scope=SCOPE_BASE, attrs=["managedBy"]) res2 = self.ldb_dc2.search(base="<GUID=%s>" % src_guid, scope=SCOPE_BASE, attrs=["managedBy"]) # check the object has only have one occurence of the single-valued # attribute and it matches on both DCs self.assert_attrs_match(res1, res2, "managedBy", 1) self.assertTrue( str(res1[0]["managedBy"][0]) == target1_ou, "Expected most recent update to win conflict") # we can't query the deleted links over LDAP, but we can check DRS # to make sure the DC kept a copy of the conflicting link link1 = AbstractLink(drsuapi.DRSUAPI_ATTID_managedBy, drsuapi.DRSUAPI_DS_LINKED_ATTRIBUTE_FLAG_ACTIVE, misc.GUID(src_guid), misc.GUID(target1_guid)) link2 = AbstractLink(drsuapi.DRSUAPI_ATTID_managedBy, 0, misc.GUID(src_guid), misc.GUID(target2_guid)) self._check_replicated_links(src_ou, [link1, link2])
def _singleval_link_conflict_deleted_winner(self, sync_order): """ Tests a single-value link conflict where the more-up-to-date link value is deleted. """ src_ou = self.unique_dn("OU=src") src_guid = self.add_object(self.ldb_dc1, src_ou) self.sync_DCs() # create a unique target on each DC target1_ou = self.unique_dn("OU=target1") target2_ou = self.unique_dn("OU=target2") target1_guid = self.add_object(self.ldb_dc1, target1_ou) target2_guid = self.add_object(self.ldb_dc2, target2_ou) # add the links for the respective targets, and delete one of the links self.add_link_attr(self.ldb_dc1, src_ou, "managedBy", target1_ou) self.add_link_attr(self.ldb_dc2, src_ou, "managedBy", target2_ou) self.ensure_unique_timestamp() self.del_link_attr(self.ldb_dc1, src_ou, "managedBy", target1_ou) # sync the 2 DCs self.sync_DCs(sync_order=sync_order) res1 = self.ldb_dc1.search(base="<GUID=%s>" % src_guid, scope=SCOPE_BASE, attrs=["managedBy"]) res2 = self.ldb_dc2.search(base="<GUID=%s>" % src_guid, scope=SCOPE_BASE, attrs=["managedBy"]) # Although the more up-to-date link value is deleted, this shouldn't # trump DC1's active link self.assert_attrs_match(res1, res2, "managedBy", 1) self.assertTrue( str(res1[0]["managedBy"][0]) == target2_ou, "Expected active link win conflict") # we can't query the deleted links over LDAP, but we can check that # the deleted links exist using DRS link1 = AbstractLink(drsuapi.DRSUAPI_ATTID_managedBy, 0, misc.GUID(src_guid), misc.GUID(target1_guid)) link2 = AbstractLink(drsuapi.DRSUAPI_ATTID_managedBy, drsuapi.DRSUAPI_DS_LINKED_ATTRIBUTE_FLAG_ACTIVE, misc.GUID(src_guid), misc.GUID(target2_guid)) self._check_replicated_links(src_ou, [link1, link2])
def _test_conflict_single_valued_link(self, sync_order): """ Tests a simple single-value link conflict, i.e. each DC adds a link to the same source object but linking to different targets. """ src_ou = self.unique_dn("OU=src") src_guid = self.add_object(self.ldb_dc1, src_ou) self.sync_DCs() # create a unique target on each DC target1_ou = self.unique_dn("OU=target1") target2_ou = self.unique_dn("OU=target2") target1_guid = self.add_object(self.ldb_dc1, target1_ou) target2_guid = self.add_object(self.ldb_dc2, target2_ou) # link the test OU to the respective targets created self.add_link_attr(self.ldb_dc1, src_ou, "managedBy", target1_ou) self.ensure_unique_timestamp() self.add_link_attr(self.ldb_dc2, src_ou, "managedBy", target2_ou) # sync the 2 DCs self.sync_DCs(sync_order=sync_order) res1 = self.ldb_dc1.search(base="<GUID=%s>" % src_guid, scope=SCOPE_BASE, attrs=["managedBy"]) res2 = self.ldb_dc2.search(base="<GUID=%s>" % src_guid, scope=SCOPE_BASE, attrs=["managedBy"]) # check the object has only have one occurence of the single-valued # attribute and it matches on both DCs self.assert_attrs_match(res1, res2, "managedBy", 1) self.assertTrue( str(res1[0]["managedBy"][0]) == target2_ou, "Expected most recent update to win conflict") # we can't query the deleted links over LDAP, but we can check DRS # to make sure the DC kept a copy of the conflicting link link1 = AbstractLink(drsuapi.DRSUAPI_ATTID_managedBy, 0, misc.GUID(src_guid), misc.GUID(target1_guid)) link2 = AbstractLink(drsuapi.DRSUAPI_ATTID_managedBy, drsuapi.DRSUAPI_DS_LINKED_ATTRIBUTE_FLAG_ACTIVE, misc.GUID(src_guid), misc.GUID(target2_guid)) self._check_replicated_links(src_ou, [link1, link2])
def test_sort_behaviour_single_object(self): """Testing sorting behaviour on single objects""" user1_dn = "cn=test_user1,%s" % self.ou user2_dn = "cn=test_user2,%s" % self.ou user3_dn = "cn=test_user3,%s" % self.ou group_dn = "cn=test_group,%s" % self.ou self.ldb_dc1.add({"dn": user1_dn, "objectclass": "user"}) self.ldb_dc1.add({"dn": user2_dn, "objectclass": "user"}) self.ldb_dc1.add({"dn": user3_dn, "objectclass": "user"}) self.ldb_dc1.add({"dn": group_dn, "objectclass": "group"}) u1_guid = misc.GUID( self.ldb_dc1.search(base=user1_dn, attrs=["objectGUID"])[0]['objectGUID'][0]) u2_guid = misc.GUID( self.ldb_dc1.search(base=user2_dn, attrs=["objectGUID"])[0]['objectGUID'][0]) u3_guid = misc.GUID( self.ldb_dc1.search(base=user3_dn, attrs=["objectGUID"])[0]['objectGUID'][0]) g_guid = misc.GUID( self.ldb_dc1.search(base=group_dn, attrs=["objectGUID"])[0]['objectGUID'][0]) self.add_linked_attribute(group_dn, user1_dn, attr='member') self.add_linked_attribute(group_dn, user2_dn, attr='member') self.add_linked_attribute(group_dn, user3_dn, attr='member') self.add_linked_attribute(group_dn, user1_dn, attr='managedby') self.add_linked_attribute(group_dn, user2_dn, attr='nonSecurityMember') self.add_linked_attribute(group_dn, user3_dn, attr='nonSecurityMember') set_inactive = AbstractLink( drsuapi.DRSUAPI_ATTID_nonSecurityMember, drsuapi.DRSUAPI_DS_LINKED_ATTRIBUTE_FLAG_ACTIVE, g_guid, u3_guid) expected_links = set([ set_inactive, AbstractLink(drsuapi.DRSUAPI_ATTID_member, drsuapi.DRSUAPI_DS_LINKED_ATTRIBUTE_FLAG_ACTIVE, g_guid, u1_guid), AbstractLink(drsuapi.DRSUAPI_ATTID_member, drsuapi.DRSUAPI_DS_LINKED_ATTRIBUTE_FLAG_ACTIVE, g_guid, u2_guid), AbstractLink(drsuapi.DRSUAPI_ATTID_member, drsuapi.DRSUAPI_DS_LINKED_ATTRIBUTE_FLAG_ACTIVE, g_guid, u3_guid), AbstractLink(drsuapi.DRSUAPI_ATTID_managedBy, drsuapi.DRSUAPI_DS_LINKED_ATTRIBUTE_FLAG_ACTIVE, g_guid, u1_guid), AbstractLink(drsuapi.DRSUAPI_ATTID_nonSecurityMember, drsuapi.DRSUAPI_DS_LINKED_ATTRIBUTE_FLAG_ACTIVE, g_guid, u2_guid), ]) dc_guid_1 = self.ldb_dc1.get_invocation_id() drs, drs_handle = self._ds_bind(self.dnsname_dc1) req8 = self._exop_req8(dest_dsa=None, invocation_id=dc_guid_1, nc_dn_str=group_dn, exop=drsuapi.DRSUAPI_EXOP_REPL_OBJ) (level, ctr) = drs.DsGetNCChanges(drs_handle, 8, req8) no_inactive = [] for link in ctr.linked_attributes: target_guid = ndr_unpack(drsuapi.DsReplicaObjectIdentifier3, link.value.blob).guid no_inactive.append((link, target_guid)) self.assertTrue( AbstractLink(link.attid, link.flags, link.identifier.guid, target_guid) in expected_links) no_inactive.sort(cmp=_linked_attribute_compare) # assert the two arrays are the same self.assertEqual(len(expected_links), ctr.linked_attributes_count) self.assertEqual([x[0] for x in no_inactive], ctr.linked_attributes) self.remove_linked_attribute(group_dn, user3_dn, attr='nonSecurityMember') # Set the link inactive expected_links.remove(set_inactive) set_inactive.flags = 0 expected_links.add(set_inactive) has_inactive = [] (level, ctr) = drs.DsGetNCChanges(drs_handle, 8, req8) for link in ctr.linked_attributes: target_guid = ndr_unpack(drsuapi.DsReplicaObjectIdentifier3, link.value.blob).guid has_inactive.append((link, target_guid)) self.assertTrue( AbstractLink(link.attid, link.flags, link.identifier.guid, target_guid) in expected_links) has_inactive.sort(cmp=_linked_attribute_compare) # assert the two arrays are the same self.assertEqual(len(expected_links), ctr.linked_attributes_count) self.assertEqual([x[0] for x in has_inactive], ctr.linked_attributes)
def test_link_utdv_hwm(self): ou1 = "OU=get_anc1,%s" % self.ou self.ldb_dc1.add({"dn": ou1, "objectclass": "organizationalUnit"}) ou1_id = self._get_indentifier(self.ldb_dc1, ou1) ou2 = "OU=get_anc2,%s" % ou1 self.ldb_dc1.add({"dn": ou2, "objectclass": "organizationalUnit"}) ou2_id = self._get_indentifier(self.ldb_dc1, ou2) dc3 = "CN=test_anc_dc_%u,%s" % (random.randint(0, 4294967295), ou2) self.ldb_dc1.add({ "dn": dc3, "objectclass": "computer", "userAccountControl": "%d" % (samba.dsdb.UF_ACCOUNTDISABLE | samba.dsdb.UF_SERVER_TRUST_ACCOUNT) }) dc3_id = self._get_indentifier(self.ldb_dc1, dc3) cn3 = "CN=get_anc3,%s" % ou2 self.ldb_dc1.add({ "dn": cn3, "objectclass": "container", }) cn3_id = self._get_indentifier(self.ldb_dc1, cn3) m = ldb.Message() m.dn = ldb.Dn(self.ldb_dc1, ou1) m["managedBy"] = ldb.MessageElement(dc3, ldb.FLAG_MOD_ADD, "managedBy") self.ldb_dc1.modify(m) ou1_managedBy_dc3 = AbstractLink( drsuapi.DRSUAPI_ATTID_managedBy, drsuapi.DRSUAPI_DS_LINKED_ATTRIBUTE_FLAG_ACTIVE, ou1_id.guid, dc3_id.guid) (hwm0, utdv0) = self._check_replication([ou2, dc3, cn3, ou1], drsuapi.DRSUAPI_DRS_WRIT_REP, expected_links=[ou1_managedBy_dc3], nc_object_count=4) m = ldb.Message() m.dn = ldb.Dn(self.ldb_dc1, dc3) m["managedBy"] = ldb.MessageElement(ou2, ldb.FLAG_MOD_ADD, "managedBy") self.ldb_dc1.modify(m) dc3_managedBy_ou2 = AbstractLink( drsuapi.DRSUAPI_ATTID_managedBy, drsuapi.DRSUAPI_DS_LINKED_ATTRIBUTE_FLAG_ACTIVE, dc3_id.guid, ou2_id.guid) self._check_replication([], drsuapi.DRSUAPI_DRS_WRIT_REP, expected_links=[dc3_managedBy_ou2], highwatermark=hwm0, nc_object_count=1) self._check_replication([], drsuapi.DRSUAPI_DRS_WRIT_REP | drsuapi.DRSUAPI_DRS_GET_ANC, expected_links=[dc3_managedBy_ou2], highwatermark=hwm0, nc_object_count=1) self._check_replication([], drsuapi.DRSUAPI_DRS_CRITICAL_ONLY, expected_links=[dc3_managedBy_ou2], highwatermark=hwm0, nc_object_count=1) self._check_replication([], drsuapi.DRSUAPI_DRS_CRITICAL_ONLY | drsuapi.DRSUAPI_DRS_GET_ANC, expected_links=[dc3_managedBy_ou2], highwatermark=hwm0, nc_object_count=1) self._check_replication([], drsuapi.DRSUAPI_DRS_WRIT_REP, expected_links=[dc3_managedBy_ou2], uptodateness_vector=utdv0, nc_object_count=4) self._check_replication([], drsuapi.DRSUAPI_DRS_WRIT_REP | drsuapi.DRSUAPI_DRS_GET_ANC, expected_links=[dc3_managedBy_ou2], uptodateness_vector=utdv0, nc_object_count=4) self._check_replication([], drsuapi.DRSUAPI_DRS_CRITICAL_ONLY, expected_links=[dc3_managedBy_ou2], uptodateness_vector=utdv0, nc_object_count=1) self._check_replication([], drsuapi.DRSUAPI_DRS_CRITICAL_ONLY | drsuapi.DRSUAPI_DRS_GET_ANC, expected_links=[dc3_managedBy_ou2], uptodateness_vector=utdv0, nc_object_count=1)
def test_link_utdv_hwm(self): """Test verify the DRS_GET_ANC behavior.""" ou1 = "OU=get_anc1,%s" % self.ou self.ldb_dc1.add({"dn": ou1, "objectclass": "organizationalUnit"}) ou1_id = self._get_indentifier(self.ldb_dc1, ou1) ou2 = "OU=get_anc2,%s" % ou1 self.ldb_dc1.add({"dn": ou2, "objectclass": "organizationalUnit"}) ou2_id = self._get_indentifier(self.ldb_dc1, ou2) dc3 = "CN=test_anc_dc_%u,%s" % (random.randint(0, 4294967295), ou2) self.ldb_dc1.add({ "dn": dc3, "objectclass": "computer", "userAccountControl": "%d" % (samba.dsdb.UF_ACCOUNTDISABLE | samba.dsdb.UF_SERVER_TRUST_ACCOUNT) }) dc3_id = self._get_indentifier(self.ldb_dc1, dc3) (hwm1, utdv1) = self._check_replication([ou1, ou2, dc3], drsuapi.DRSUAPI_DRS_WRIT_REP) self._check_replication([ou1, ou2, dc3], drsuapi.DRSUAPI_DRS_WRIT_REP | drsuapi.DRSUAPI_DRS_GET_ANC) self._check_replication([dc3], drsuapi.DRSUAPI_DRS_CRITICAL_ONLY) self._check_replication([ou1, ou2, dc3], drsuapi.DRSUAPI_DRS_CRITICAL_ONLY | drsuapi.DRSUAPI_DRS_GET_ANC) m = ldb.Message() m.dn = ldb.Dn(self.ldb_dc1, ou1) m["displayName"] = ldb.MessageElement("OU1", ldb.FLAG_MOD_ADD, "displayName") self.ldb_dc1.modify(m) (hwm2, utdv2) = self._check_replication([ou2, dc3, ou1], drsuapi.DRSUAPI_DRS_WRIT_REP) self._check_replication([ou1, ou2, dc3], drsuapi.DRSUAPI_DRS_WRIT_REP | drsuapi.DRSUAPI_DRS_GET_ANC) self._check_replication([dc3], drsuapi.DRSUAPI_DRS_CRITICAL_ONLY) self._check_replication([ou1, ou2, dc3], drsuapi.DRSUAPI_DRS_CRITICAL_ONLY | drsuapi.DRSUAPI_DRS_GET_ANC) self._check_replication([ou1], drsuapi.DRSUAPI_DRS_WRIT_REP, highwatermark=hwm1) self._check_replication([ou1], drsuapi.DRSUAPI_DRS_WRIT_REP | drsuapi.DRSUAPI_DRS_GET_ANC, highwatermark=hwm1) self._check_replication([ou1], drsuapi.DRSUAPI_DRS_WRIT_REP | drsuapi.DRSUAPI_DRS_GET_ANC, uptodateness_vector=utdv1) m = ldb.Message() m.dn = ldb.Dn(self.ldb_dc1, ou2) m["displayName"] = ldb.MessageElement("OU2", ldb.FLAG_MOD_ADD, "displayName") self.ldb_dc1.modify(m) (hwm3, utdv3) = self._check_replication([dc3, ou1, ou2], drsuapi.DRSUAPI_DRS_WRIT_REP) self._check_replication([ou1, ou2, dc3], drsuapi.DRSUAPI_DRS_WRIT_REP | drsuapi.DRSUAPI_DRS_GET_ANC) self._check_replication([dc3], drsuapi.DRSUAPI_DRS_CRITICAL_ONLY) self._check_replication([ou1, ou2, dc3], drsuapi.DRSUAPI_DRS_CRITICAL_ONLY | drsuapi.DRSUAPI_DRS_GET_ANC) self._check_replication([ou1, ou2], drsuapi.DRSUAPI_DRS_WRIT_REP, highwatermark=hwm1) self._check_replication([ou1, ou2], drsuapi.DRSUAPI_DRS_WRIT_REP | drsuapi.DRSUAPI_DRS_GET_ANC, highwatermark=hwm1) self._check_replication([ou1, ou2], drsuapi.DRSUAPI_DRS_WRIT_REP | drsuapi.DRSUAPI_DRS_GET_ANC, uptodateness_vector=utdv1) m = ldb.Message() m.dn = ldb.Dn(self.ldb_dc1, self.ou) m["displayName"] = ldb.MessageElement("OU", ldb.FLAG_MOD_ADD, "displayName") self.ldb_dc1.modify(m) (hwm4, utdv4) = self._check_replication([dc3, ou1, ou2, self.ou], drsuapi.DRSUAPI_DRS_WRIT_REP) self._check_replication([self.ou, ou1, ou2, dc3], drsuapi.DRSUAPI_DRS_WRIT_REP | drsuapi.DRSUAPI_DRS_GET_ANC) self._check_replication([dc3], drsuapi.DRSUAPI_DRS_CRITICAL_ONLY) self._check_replication([self.ou, ou1, ou2, dc3], drsuapi.DRSUAPI_DRS_CRITICAL_ONLY | drsuapi.DRSUAPI_DRS_GET_ANC) self._check_replication([self.ou, ou2], drsuapi.DRSUAPI_DRS_WRIT_REP | drsuapi.DRSUAPI_DRS_GET_ANC, uptodateness_vector=utdv2) cn3 = "CN=get_anc3,%s" % ou2 self.ldb_dc1.add({ "dn": cn3, "objectclass": "container", }) cn3_id = self._get_indentifier(self.ldb_dc1, cn3) (hwm5, utdv5) = self._check_replication([dc3, ou1, ou2, self.ou, cn3], drsuapi.DRSUAPI_DRS_WRIT_REP) self._check_replication([self.ou, ou1, ou2, dc3, cn3], drsuapi.DRSUAPI_DRS_WRIT_REP | drsuapi.DRSUAPI_DRS_GET_ANC) self._check_replication([dc3], drsuapi.DRSUAPI_DRS_CRITICAL_ONLY) self._check_replication([self.ou, ou1, ou2, dc3], drsuapi.DRSUAPI_DRS_CRITICAL_ONLY | drsuapi.DRSUAPI_DRS_GET_ANC) m = ldb.Message() m.dn = ldb.Dn(self.ldb_dc1, ou2) m["managedBy"] = ldb.MessageElement(dc3, ldb.FLAG_MOD_ADD, "managedBy") self.ldb_dc1.modify(m) ou2_managedBy_dc3 = AbstractLink( drsuapi.DRSUAPI_ATTID_managedBy, drsuapi.DRSUAPI_DS_LINKED_ATTRIBUTE_FLAG_ACTIVE, ou2_id.guid, dc3_id.guid) (hwm6, utdv6) = self._check_replication([dc3, ou1, self.ou, cn3, ou2], drsuapi.DRSUAPI_DRS_WRIT_REP, expected_links=[ou2_managedBy_dc3]) # Can fail against Windows due to equal precedence of dc3, cn3 self._check_replication([self.ou, ou1, ou2, dc3, cn3], drsuapi.DRSUAPI_DRS_WRIT_REP | drsuapi.DRSUAPI_DRS_GET_ANC, expected_links=[ou2_managedBy_dc3]) self._check_replication([dc3], drsuapi.DRSUAPI_DRS_CRITICAL_ONLY) self._check_replication([self.ou, ou1, ou2, dc3], drsuapi.DRSUAPI_DRS_CRITICAL_ONLY | drsuapi.DRSUAPI_DRS_GET_ANC) self._check_replication([], drsuapi.DRSUAPI_DRS_WRIT_REP, uptodateness_vector=utdv5, expected_links=[ou2_managedBy_dc3]) self._check_replication([], drsuapi.DRSUAPI_DRS_CRITICAL_ONLY, uptodateness_vector=utdv5) self._check_replication([], drsuapi.DRSUAPI_DRS_CRITICAL_ONLY, uptodateness_vector=utdv5) m = ldb.Message() m.dn = ldb.Dn(self.ldb_dc1, dc3) m["managedBy"] = ldb.MessageElement(ou1, ldb.FLAG_MOD_ADD, "managedBy") self.ldb_dc1.modify(m) dc3_managedBy_ou1 = AbstractLink( drsuapi.DRSUAPI_ATTID_managedBy, drsuapi.DRSUAPI_DS_LINKED_ATTRIBUTE_FLAG_ACTIVE, dc3_id.guid, ou1_id.guid) (hwm7, utdv7) = self._check_replication( [ou1, self.ou, cn3, ou2, dc3], drsuapi.DRSUAPI_DRS_WRIT_REP, expected_links=[ou2_managedBy_dc3, dc3_managedBy_ou1]) # Can fail against Windows due to equal precedence of dc3, cn3 #self._check_replication([self.ou,ou1,ou2,dc3,cn3], # drsuapi.DRSUAPI_DRS_WRIT_REP| # drsuapi.DRSUAPI_DRS_GET_ANC, # expected_links=[ou2_managedBy_dc3,dc3_managedBy_ou1]) self._check_replication([dc3], drsuapi.DRSUAPI_DRS_CRITICAL_ONLY, expected_links=[dc3_managedBy_ou1]) self._check_replication([dc3], drsuapi.DRSUAPI_DRS_CRITICAL_ONLY, expected_links=[dc3_managedBy_ou1]) self._check_replication([self.ou, ou1, ou2, dc3], drsuapi.DRSUAPI_DRS_CRITICAL_ONLY | drsuapi.DRSUAPI_DRS_GET_ANC, expected_links=[dc3_managedBy_ou1]) self._check_replication([dc3], drsuapi.DRSUAPI_DRS_CRITICAL_ONLY, more_flags=drsuapi.DRSUAPI_DRS_GET_TGT, expected_links=[dc3_managedBy_ou1]) m = ldb.Message() m.dn = ldb.Dn(self.ldb_dc1, dc3) m["managedBy"] = ldb.MessageElement(ou2, ldb.FLAG_MOD_REPLACE, "managedBy") self.ldb_dc1.modify(m) dc3_managedBy_ou1.flags &= ~drsuapi.DRSUAPI_DS_LINKED_ATTRIBUTE_FLAG_ACTIVE dc3_managedBy_ou2 = AbstractLink( drsuapi.DRSUAPI_ATTID_managedBy, drsuapi.DRSUAPI_DS_LINKED_ATTRIBUTE_FLAG_ACTIVE, dc3_id.guid, ou2_id.guid) (hwm8, utdv8) = self._check_replication([ou1, self.ou, cn3, ou2, dc3], drsuapi.DRSUAPI_DRS_WRIT_REP, expected_links=[ ou2_managedBy_dc3, dc3_managedBy_ou1, dc3_managedBy_ou2 ]) # Can fail against Windows due to equal precedence of dc3, cn3 #self._check_replication([self.ou,ou1,ou2,dc3,cn3], # drsuapi.DRSUAPI_DRS_WRIT_REP| # drsuapi.DRSUAPI_DRS_GET_ANC, # expected_links=[ou2_managedBy_dc3,dc3_managedBy_ou1,dc3_managedBy_ou2]) self._check_replication( [dc3], drsuapi.DRSUAPI_DRS_CRITICAL_ONLY, expected_links=[dc3_managedBy_ou1, dc3_managedBy_ou2]) self._check_replication( [self.ou, ou1, ou2, dc3], drsuapi.DRSUAPI_DRS_CRITICAL_ONLY | drsuapi.DRSUAPI_DRS_GET_ANC, expected_links=[dc3_managedBy_ou1, dc3_managedBy_ou2]) self._check_replication( [dc3], drsuapi.DRSUAPI_DRS_CRITICAL_ONLY, more_flags=drsuapi.DRSUAPI_DRS_GET_TGT, expected_links=[dc3_managedBy_ou1, dc3_managedBy_ou2]) self._check_replication( [], drsuapi.DRSUAPI_DRS_WRIT_REP, expected_links=[dc3_managedBy_ou1, dc3_managedBy_ou2], highwatermark=hwm7) self._check_replication( [], drsuapi.DRSUAPI_DRS_WRIT_REP | drsuapi.DRSUAPI_DRS_GET_ANC, expected_links=[dc3_managedBy_ou1, dc3_managedBy_ou2], highwatermark=hwm7) self._check_replication( [], drsuapi.DRSUAPI_DRS_CRITICAL_ONLY, expected_links=[dc3_managedBy_ou1, dc3_managedBy_ou2], highwatermark=hwm7) self._check_replication( [], drsuapi.DRSUAPI_DRS_CRITICAL_ONLY | drsuapi.DRSUAPI_DRS_GET_ANC, expected_links=[dc3_managedBy_ou1, dc3_managedBy_ou2], highwatermark=hwm7) self._check_replication( [], drsuapi.DRSUAPI_DRS_CRITICAL_ONLY, more_flags=drsuapi.DRSUAPI_DRS_GET_TGT, expected_links=[dc3_managedBy_ou1, dc3_managedBy_ou2], highwatermark=hwm7) self._check_replication( [], drsuapi.DRSUAPI_DRS_WRIT_REP, expected_links=[dc3_managedBy_ou1, dc3_managedBy_ou2], uptodateness_vector=utdv7) self._check_replication( [], drsuapi.DRSUAPI_DRS_WRIT_REP | drsuapi.DRSUAPI_DRS_GET_ANC, expected_links=[dc3_managedBy_ou1, dc3_managedBy_ou2], uptodateness_vector=utdv7) self._check_replication( [], drsuapi.DRSUAPI_DRS_CRITICAL_ONLY, expected_links=[dc3_managedBy_ou1, dc3_managedBy_ou2], uptodateness_vector=utdv7) self._check_replication( [], drsuapi.DRSUAPI_DRS_CRITICAL_ONLY | drsuapi.DRSUAPI_DRS_GET_ANC, expected_links=[dc3_managedBy_ou1, dc3_managedBy_ou2], uptodateness_vector=utdv7) self._check_replication( [], drsuapi.DRSUAPI_DRS_CRITICAL_ONLY, more_flags=drsuapi.DRSUAPI_DRS_GET_TGT, expected_links=[dc3_managedBy_ou1, dc3_managedBy_ou2], uptodateness_vector=utdv7)
def test_link_utdv_hwm(self): """Test verify the DRS_GET_ANC behavior.""" ou1 = "OU=get_anc1,%s" % self.ou self.ldb_dc1.add({"dn": ou1, "objectclass": "organizationalUnit"}) ou1_id = self._get_identifier(self.ldb_dc1, ou1) ou2 = "OU=get_anc2,%s" % ou1 self.ldb_dc1.add({"dn": ou2, "objectclass": "organizationalUnit"}) ou2_id = self._get_identifier(self.ldb_dc1, ou2) dc3 = "CN=test_anc_dc_%u,%s" % (random.randint(0, 4294967295), ou2) self.ldb_dc1.add({ "dn": dc3, "objectclass": "computer", "userAccountControl": "%d" % (samba.dsdb.UF_ACCOUNTDISABLE | samba.dsdb.UF_SERVER_TRUST_ACCOUNT) }) dc3_id = self._get_identifier(self.ldb_dc1, dc3) (hwm1, utdv1) = self._check_replication([ou1, ou2, dc3], drsuapi.DRSUAPI_DRS_WRIT_REP) self._check_replication([ou1, ou2, dc3], drsuapi.DRSUAPI_DRS_WRIT_REP | drsuapi.DRSUAPI_DRS_GET_ANC) self._check_replication([dc3], drsuapi.DRSUAPI_DRS_CRITICAL_ONLY) self._check_replication([ou1, ou2, dc3], drsuapi.DRSUAPI_DRS_CRITICAL_ONLY | drsuapi.DRSUAPI_DRS_GET_ANC) m = ldb.Message() m.dn = ldb.Dn(self.ldb_dc1, ou1) m["displayName"] = ldb.MessageElement("OU1", ldb.FLAG_MOD_ADD, "displayName") self.ldb_dc1.modify(m) (hwm2, utdv2) = self._check_replication([ou2, dc3, ou1], drsuapi.DRSUAPI_DRS_WRIT_REP) self._check_replication([ou1, ou2, dc3], drsuapi.DRSUAPI_DRS_WRIT_REP | drsuapi.DRSUAPI_DRS_GET_ANC) self._check_replication([dc3], drsuapi.DRSUAPI_DRS_CRITICAL_ONLY) self._check_replication([ou1, ou2, dc3], drsuapi.DRSUAPI_DRS_CRITICAL_ONLY | drsuapi.DRSUAPI_DRS_GET_ANC) self._check_replication([ou1], drsuapi.DRSUAPI_DRS_WRIT_REP, highwatermark=hwm1) self._check_replication([ou1], drsuapi.DRSUAPI_DRS_WRIT_REP | drsuapi.DRSUAPI_DRS_GET_ANC, highwatermark=hwm1) self._check_replication([ou1], drsuapi.DRSUAPI_DRS_WRIT_REP | drsuapi.DRSUAPI_DRS_GET_ANC, uptodateness_vector=utdv1) m = ldb.Message() m.dn = ldb.Dn(self.ldb_dc1, ou2) m["displayName"] = ldb.MessageElement("OU2", ldb.FLAG_MOD_ADD, "displayName") self.ldb_dc1.modify(m) (hwm3, utdv3) = self._check_replication([dc3, ou1, ou2], drsuapi.DRSUAPI_DRS_WRIT_REP) self._check_replication([ou1, ou2, dc3], drsuapi.DRSUAPI_DRS_WRIT_REP | drsuapi.DRSUAPI_DRS_GET_ANC) self._check_replication([dc3], drsuapi.DRSUAPI_DRS_CRITICAL_ONLY) self._check_replication([ou1, ou2, dc3], drsuapi.DRSUAPI_DRS_CRITICAL_ONLY | drsuapi.DRSUAPI_DRS_GET_ANC) self._check_replication([ou1, ou2], drsuapi.DRSUAPI_DRS_WRIT_REP, highwatermark=hwm1) self._check_replication([ou1, ou2], drsuapi.DRSUAPI_DRS_WRIT_REP | drsuapi.DRSUAPI_DRS_GET_ANC, highwatermark=hwm1) self._check_replication([ou1, ou2], drsuapi.DRSUAPI_DRS_WRIT_REP | drsuapi.DRSUAPI_DRS_GET_ANC, uptodateness_vector=utdv1) m = ldb.Message() m.dn = ldb.Dn(self.ldb_dc1, self.ou) m["displayName"] = ldb.MessageElement("OU", ldb.FLAG_MOD_ADD, "displayName") self.ldb_dc1.modify(m) (hwm4, utdv4) = self._check_replication([dc3, ou1, ou2, self.ou], drsuapi.DRSUAPI_DRS_WRIT_REP) self._check_replication([self.ou, ou1, ou2, dc3], drsuapi.DRSUAPI_DRS_WRIT_REP | drsuapi.DRSUAPI_DRS_GET_ANC) self._check_replication([dc3], drsuapi.DRSUAPI_DRS_CRITICAL_ONLY) self._check_replication([self.ou, ou1, ou2, dc3], drsuapi.DRSUAPI_DRS_CRITICAL_ONLY | drsuapi.DRSUAPI_DRS_GET_ANC) self._check_replication([self.ou, ou2], drsuapi.DRSUAPI_DRS_WRIT_REP | drsuapi.DRSUAPI_DRS_GET_ANC, uptodateness_vector=utdv2) cn3 = "CN=get_anc3,%s" % ou2 self.ldb_dc1.add({ "dn": cn3, "objectclass": "container", }) cn3_id = self._get_identifier(self.ldb_dc1, cn3) (hwm5, utdv5) = self._check_replication([dc3, ou1, ou2, self.ou, cn3], drsuapi.DRSUAPI_DRS_WRIT_REP) self._check_replication([self.ou, ou1, ou2, dc3, cn3], drsuapi.DRSUAPI_DRS_WRIT_REP | drsuapi.DRSUAPI_DRS_GET_ANC) self._check_replication([dc3], drsuapi.DRSUAPI_DRS_CRITICAL_ONLY) self._check_replication([self.ou, ou1, ou2, dc3], drsuapi.DRSUAPI_DRS_CRITICAL_ONLY | drsuapi.DRSUAPI_DRS_GET_ANC) m = ldb.Message() m.dn = ldb.Dn(self.ldb_dc1, ou2) m["managedBy"] = ldb.MessageElement(dc3, ldb.FLAG_MOD_ADD, "managedBy") self.ldb_dc1.modify(m) ou2_managedBy_dc3 = AbstractLink( drsuapi.DRSUAPI_ATTID_managedBy, drsuapi.DRSUAPI_DS_LINKED_ATTRIBUTE_FLAG_ACTIVE, ou2_id.guid, dc3_id.guid) (hwm6, utdv6) = self._check_replication([dc3, ou1, self.ou, cn3, ou2], drsuapi.DRSUAPI_DRS_WRIT_REP, expected_links=[ou2_managedBy_dc3]) # Can fail against Windows due to equal precedence of dc3, cn3 self._check_replication([self.ou, ou1, ou2, dc3, cn3], drsuapi.DRSUAPI_DRS_WRIT_REP | drsuapi.DRSUAPI_DRS_GET_ANC, expected_links=[ou2_managedBy_dc3]) self._check_replication([dc3], drsuapi.DRSUAPI_DRS_CRITICAL_ONLY) self._check_replication([self.ou, ou1, ou2, dc3], drsuapi.DRSUAPI_DRS_CRITICAL_ONLY | drsuapi.DRSUAPI_DRS_GET_ANC) self._check_replication([], drsuapi.DRSUAPI_DRS_WRIT_REP, uptodateness_vector=utdv5, expected_links=[ou2_managedBy_dc3]) self._check_replication([], drsuapi.DRSUAPI_DRS_CRITICAL_ONLY, uptodateness_vector=utdv5) self._check_replication([], drsuapi.DRSUAPI_DRS_CRITICAL_ONLY, uptodateness_vector=utdv5) m = ldb.Message() m.dn = ldb.Dn(self.ldb_dc1, dc3) m["managedBy"] = ldb.MessageElement(ou1, ldb.FLAG_MOD_ADD, "managedBy") self.ldb_dc1.modify(m) dc3_managedBy_ou1 = AbstractLink( drsuapi.DRSUAPI_ATTID_managedBy, drsuapi.DRSUAPI_DS_LINKED_ATTRIBUTE_FLAG_ACTIVE, dc3_id.guid, ou1_id.guid) (hwm7, utdv7) = self._check_replication( [ou1, self.ou, cn3, ou2, dc3], drsuapi.DRSUAPI_DRS_WRIT_REP, expected_links=[ou2_managedBy_dc3, dc3_managedBy_ou1]) # Can fail against Windows due to equal precedence of dc3, cn3 # self._check_replication([self.ou,ou1,ou2,dc3,cn3], # drsuapi.DRSUAPI_DRS_WRIT_REP| # drsuapi.DRSUAPI_DRS_GET_ANC, # expected_links=[ou2_managedBy_dc3,dc3_managedBy_ou1]) self._check_replication([dc3], drsuapi.DRSUAPI_DRS_CRITICAL_ONLY, expected_links=[dc3_managedBy_ou1]) self._check_replication([dc3], drsuapi.DRSUAPI_DRS_CRITICAL_ONLY, expected_links=[dc3_managedBy_ou1]) self._check_replication([self.ou, ou1, ou2, dc3], drsuapi.DRSUAPI_DRS_CRITICAL_ONLY | drsuapi.DRSUAPI_DRS_GET_ANC, expected_links=[dc3_managedBy_ou1]) # GET_TGT seems to override DRS_CRITICAL_ONLY and also returns any # object(s) that relate to the linked attributes (similar to GET_ANC) self._check_replication([ou1, dc3], drsuapi.DRSUAPI_DRS_CRITICAL_ONLY, more_flags=drsuapi.DRSUAPI_DRS_GET_TGT, expected_links=[dc3_managedBy_ou1], dn_ordered=False) # Change DC3's managedBy to OU2 instead of OU1 # Note that the OU1 managedBy linked attribute will still exist as # a tombstone object (and so will be returned in the replication still) m = ldb.Message() m.dn = ldb.Dn(self.ldb_dc1, dc3) m["managedBy"] = ldb.MessageElement(ou2, ldb.FLAG_MOD_REPLACE, "managedBy") self.ldb_dc1.modify(m) dc3_managedBy_ou1.flags &= ~drsuapi.DRSUAPI_DS_LINKED_ATTRIBUTE_FLAG_ACTIVE dc3_managedBy_ou2 = AbstractLink( drsuapi.DRSUAPI_ATTID_managedBy, drsuapi.DRSUAPI_DS_LINKED_ATTRIBUTE_FLAG_ACTIVE, dc3_id.guid, ou2_id.guid) (hwm8, utdv8) = self._check_replication([ou1, self.ou, cn3, ou2, dc3], drsuapi.DRSUAPI_DRS_WRIT_REP, expected_links=[ ou2_managedBy_dc3, dc3_managedBy_ou1, dc3_managedBy_ou2 ]) # Can fail against Windows due to equal precedence of dc3, cn3 # self._check_replication([self.ou,ou1,ou2,dc3,cn3], # drsuapi.DRSUAPI_DRS_WRIT_REP| # drsuapi.DRSUAPI_DRS_GET_ANC, # expected_links=[ou2_managedBy_dc3,dc3_managedBy_ou1,dc3_managedBy_ou2]) self._check_replication( [dc3], drsuapi.DRSUAPI_DRS_CRITICAL_ONLY, expected_links=[dc3_managedBy_ou1, dc3_managedBy_ou2]) self._check_replication( [self.ou, ou1, ou2, dc3], drsuapi.DRSUAPI_DRS_CRITICAL_ONLY | drsuapi.DRSUAPI_DRS_GET_ANC, expected_links=[dc3_managedBy_ou1, dc3_managedBy_ou2]) # GET_TGT will also return any DNs referenced by the linked attributes # (including the Tombstone attribute) self._check_replication( [ou1, ou2, dc3], drsuapi.DRSUAPI_DRS_CRITICAL_ONLY, more_flags=drsuapi.DRSUAPI_DRS_GET_TGT, expected_links=[dc3_managedBy_ou1, dc3_managedBy_ou2], dn_ordered=False) # Use the highwater-mark prior to changing ManagedBy - this should # only return the old/Tombstone and new linked attributes (we already # know all the DNs) self._check_replication( [], drsuapi.DRSUAPI_DRS_WRIT_REP, expected_links=[dc3_managedBy_ou1, dc3_managedBy_ou2], highwatermark=hwm7) self._check_replication( [], drsuapi.DRSUAPI_DRS_WRIT_REP | drsuapi.DRSUAPI_DRS_GET_ANC, expected_links=[dc3_managedBy_ou1, dc3_managedBy_ou2], highwatermark=hwm7) self._check_replication( [], drsuapi.DRSUAPI_DRS_CRITICAL_ONLY, expected_links=[dc3_managedBy_ou1, dc3_managedBy_ou2], highwatermark=hwm7) self._check_replication( [], drsuapi.DRSUAPI_DRS_CRITICAL_ONLY | drsuapi.DRSUAPI_DRS_GET_ANC, expected_links=[dc3_managedBy_ou1, dc3_managedBy_ou2], highwatermark=hwm7) self._check_replication( [], drsuapi.DRSUAPI_DRS_CRITICAL_ONLY, more_flags=drsuapi.DRSUAPI_DRS_GET_TGT, expected_links=[dc3_managedBy_ou1, dc3_managedBy_ou2], highwatermark=hwm7) # Repeat the above set of tests using the uptodateness_vector # instead of the highwater-mark self._check_replication( [], drsuapi.DRSUAPI_DRS_WRIT_REP, expected_links=[dc3_managedBy_ou1, dc3_managedBy_ou2], uptodateness_vector=utdv7) self._check_replication( [], drsuapi.DRSUAPI_DRS_WRIT_REP | drsuapi.DRSUAPI_DRS_GET_ANC, expected_links=[dc3_managedBy_ou1, dc3_managedBy_ou2], uptodateness_vector=utdv7) self._check_replication( [], drsuapi.DRSUAPI_DRS_CRITICAL_ONLY, expected_links=[dc3_managedBy_ou1, dc3_managedBy_ou2], uptodateness_vector=utdv7) self._check_replication( [], drsuapi.DRSUAPI_DRS_CRITICAL_ONLY | drsuapi.DRSUAPI_DRS_GET_ANC, expected_links=[dc3_managedBy_ou1, dc3_managedBy_ou2], uptodateness_vector=utdv7) self._check_replication( [], drsuapi.DRSUAPI_DRS_CRITICAL_ONLY, more_flags=drsuapi.DRSUAPI_DRS_GET_TGT, expected_links=[dc3_managedBy_ou1, dc3_managedBy_ou2], uptodateness_vector=utdv7)
def test_do_single_repl(self): """ Make sure that DRSUAPI_EXOP_REPL_OBJ never replicates more than one object, even when we use DRS_GET_ANC/GET_TGT. """ ou1 = "OU=get_anc1,%s" % self.ou self.ldb_dc1.add({"dn": ou1, "objectclass": "organizationalUnit"}) ou1_id = self._get_identifier(self.ldb_dc1, ou1) ou2 = "OU=get_anc2,%s" % ou1 self.ldb_dc1.add({"dn": ou2, "objectclass": "organizationalUnit"}) ou2_id = self._get_identifier(self.ldb_dc1, ou2) dc3 = "CN=test_anc_dc_%u,%s" % (random.randint(0, 4294967295), ou2) self.ldb_dc1.add({ "dn": dc3, "objectclass": "computer", "userAccountControl": "%d" % (samba.dsdb.UF_ACCOUNTDISABLE | samba.dsdb.UF_SERVER_TRUST_ACCOUNT) }) dc3_id = self._get_identifier(self.ldb_dc1, dc3) # Add some linked attributes (for checking GET_TGT behaviour) m = ldb.Message() m.dn = ldb.Dn(self.ldb_dc2, ou1) m["managedBy"] = ldb.MessageElement(ou2, ldb.FLAG_MOD_ADD, "managedBy") self.ldb_dc1.modify(m) ou1_link = AbstractLink( drsuapi.DRSUAPI_ATTID_managedBy, drsuapi.DRSUAPI_DS_LINKED_ATTRIBUTE_FLAG_ACTIVE, ou1_id.guid, ou2_id.guid) m.dn = ldb.Dn(self.ldb_dc2, dc3) m["managedBy"] = ldb.MessageElement(ou2, ldb.FLAG_MOD_ADD, "managedBy") self.ldb_dc1.modify(m) dc3_link = AbstractLink( drsuapi.DRSUAPI_ATTID_managedBy, drsuapi.DRSUAPI_DS_LINKED_ATTRIBUTE_FLAG_ACTIVE, dc3_id.guid, ou2_id.guid) req = self._getnc_req10(dest_dsa=None, invocation_id=self.ldb_dc1.get_invocation_id(), nc_dn_str=ou1, exop=drsuapi.DRSUAPI_EXOP_REPL_OBJ, replica_flags=drsuapi.DRSUAPI_DRS_WRIT_REP, more_flags=drsuapi.DRSUAPI_DRS_GET_TGT) (level, ctr) = self.drs.DsGetNCChanges(self.drs_handle, 10, req) self._check_ctr6(ctr, [ou1], expected_links=[ou1_link]) # DRSUAPI_DRS_WRIT_REP means that we should only replicate the dn we give (dc3). # DRSUAPI_DRS_GET_ANC means that we should also replicate its ancestors, but # Windows doesn't do this if we use both. req = self._getnc_req10(dest_dsa=None, invocation_id=self.ldb_dc1.get_invocation_id(), nc_dn_str=dc3, exop=drsuapi.DRSUAPI_EXOP_REPL_OBJ, replica_flags=drsuapi.DRSUAPI_DRS_WRIT_REP | drsuapi.DRSUAPI_DRS_GET_ANC, more_flags=drsuapi.DRSUAPI_DRS_GET_TGT) (level, ctr) = self.drs.DsGetNCChanges(self.drs_handle, 10, req) self._check_ctr6(ctr, [dc3], expected_links=[dc3_link]) # Even though the ancestor of ou2 (ou1) has changed since last hwm, and we're # sending DRSUAPI_DRS_GET_ANC, the expected response is that it will only try # and replicate the single object still. req = self._getnc_req10(dest_dsa=None, invocation_id=self.ldb_dc1.get_invocation_id(), nc_dn_str=ou2, exop=drsuapi.DRSUAPI_EXOP_REPL_OBJ, replica_flags=drsuapi.DRSUAPI_DRS_CRITICAL_ONLY | drsuapi.DRSUAPI_DRS_GET_ANC, more_flags=drsuapi.DRSUAPI_DRS_GET_TGT) (level, ctr) = self.drs.DsGetNCChanges(self.drs_handle, 10, req) self._check_ctr6(ctr, [ou2])
def test_sort_behaviour_single_object(self): """Testing sorting behaviour on single objects""" user1_dn = "cn=test_user1,%s" % self.ou user2_dn = "cn=test_user2,%s" % self.ou user3_dn = "cn=test_user3,%s" % self.ou group_dn = "cn=test_group,%s" % self.ou self.ldb_dc1.add({"dn": user1_dn, "objectclass": "user"}) self.ldb_dc1.add({"dn": user2_dn, "objectclass": "user"}) self.ldb_dc1.add({"dn": user3_dn, "objectclass": "user"}) self.ldb_dc1.add({"dn": group_dn, "objectclass": "group"}) u1_guid = misc.GUID(self.ldb_dc1.search(base=user1_dn, attrs=["objectGUID"])[0]['objectGUID'][0]) u2_guid = misc.GUID(self.ldb_dc1.search(base=user2_dn, attrs=["objectGUID"])[0]['objectGUID'][0]) u3_guid = misc.GUID(self.ldb_dc1.search(base=user3_dn, attrs=["objectGUID"])[0]['objectGUID'][0]) g_guid = misc.GUID(self.ldb_dc1.search(base=group_dn, attrs=["objectGUID"])[0]['objectGUID'][0]) self.add_linked_attribute(group_dn, user1_dn, attr='member') self.add_linked_attribute(group_dn, user2_dn, attr='member') self.add_linked_attribute(group_dn, user3_dn, attr='member') self.add_linked_attribute(group_dn, user1_dn, attr='managedby') self.add_linked_attribute(group_dn, user2_dn, attr='nonSecurityMember') self.add_linked_attribute(group_dn, user3_dn, attr='nonSecurityMember') set_inactive = AbstractLink(drsuapi.DRSUAPI_ATTID_nonSecurityMember, drsuapi.DRSUAPI_DS_LINKED_ATTRIBUTE_FLAG_ACTIVE, g_guid, u3_guid) expected_links = set([set_inactive, AbstractLink(drsuapi.DRSUAPI_ATTID_member, drsuapi.DRSUAPI_DS_LINKED_ATTRIBUTE_FLAG_ACTIVE, g_guid, u1_guid), AbstractLink(drsuapi.DRSUAPI_ATTID_member, drsuapi.DRSUAPI_DS_LINKED_ATTRIBUTE_FLAG_ACTIVE, g_guid, u2_guid), AbstractLink(drsuapi.DRSUAPI_ATTID_member, drsuapi.DRSUAPI_DS_LINKED_ATTRIBUTE_FLAG_ACTIVE, g_guid, u3_guid), AbstractLink(drsuapi.DRSUAPI_ATTID_managedBy, drsuapi.DRSUAPI_DS_LINKED_ATTRIBUTE_FLAG_ACTIVE, g_guid, u1_guid), AbstractLink(drsuapi.DRSUAPI_ATTID_nonSecurityMember, drsuapi.DRSUAPI_DS_LINKED_ATTRIBUTE_FLAG_ACTIVE, g_guid, u2_guid), ]) dc_guid_1 = self.ldb_dc1.get_invocation_id() drs, drs_handle = self._ds_bind(self.dnsname_dc1) req8 = self._exop_req8(dest_dsa=None, invocation_id=dc_guid_1, nc_dn_str=group_dn, exop=drsuapi.DRSUAPI_EXOP_REPL_OBJ) (level, ctr) = drs.DsGetNCChanges(drs_handle, 8, req8) no_inactive = [] for link in ctr.linked_attributes: target_guid = ndr_unpack(drsuapi.DsReplicaObjectIdentifier3, link.value.blob).guid no_inactive.append((link, target_guid)) self.assertTrue(AbstractLink(link.attid, link.flags, link.identifier.guid, target_guid) in expected_links) no_inactive.sort(cmp=_linked_attribute_compare) # assert the two arrays are the same self.assertEqual(len(expected_links), ctr.linked_attributes_count) self.assertEqual([x[0] for x in no_inactive], ctr.linked_attributes) self.remove_linked_attribute(group_dn, user3_dn, attr='nonSecurityMember') # Set the link inactive expected_links.remove(set_inactive) set_inactive.flags = 0 expected_links.add(set_inactive) has_inactive = [] (level, ctr) = drs.DsGetNCChanges(drs_handle, 8, req8) for link in ctr.linked_attributes: target_guid = ndr_unpack(drsuapi.DsReplicaObjectIdentifier3, link.value.blob).guid has_inactive.append((link, target_guid)) self.assertTrue(AbstractLink(link.attid, link.flags, link.identifier.guid, target_guid) in expected_links) has_inactive.sort(cmp=_linked_attribute_compare) # assert the two arrays are the same self.assertEqual(len(expected_links), ctr.linked_attributes_count) self.assertEqual([x[0] for x in has_inactive], ctr.linked_attributes)