def groupAttendeesForObjects(cls, txn, cobjs): """ Get delegator/group pairs for each of the specified calendar objects. """ # Do a join to get what we need rows = yield Select( list(GroupAttendeeRecord.table) + list(GroupsRecord.table), From=GroupAttendeeRecord.table.join(GroupsRecord.table, GroupAttendeeRecord.groupID == GroupsRecord.groupID), Where=(GroupAttendeeRecord.resourceID.In(Parameter("cobjs", len(cobjs)))) ).on(txn, cobjs=cobjs) results = [] groupAttendeeNames = [GroupAttendeeRecord.__colmap__[column] for column in list(GroupAttendeeRecord.table)] groupsNames = [GroupsRecord.__colmap__[column] for column in list(GroupsRecord.table)] split_point = len(groupAttendeeNames) for row in rows: groupAttendeeRow = row[:split_point] groupAttendeeRecord = GroupAttendeeRecord() groupAttendeeRecord._attributesFromRow(zip(groupAttendeeNames, groupAttendeeRow)) groupAttendeeRecord.transaction = txn groupsRow = row[split_point:] groupsRecord = GroupsRecord() groupsRecord._attributesFromRow(zip(groupsNames, groupsRow)) groupsRecord.transaction = txn results.append((groupAttendeeRecord, groupsRecord,)) returnValue(results)
def update(self, txn): if self.useDirectoryBasedDelegates: # Pull in delegate assignments from the directory and stick them # into the delegate db recordsWithDirectoryBasedDelegates = yield self.directoryBasedDelegatesSource() externalAssignments = {} for record in recordsWithDirectoryBasedDelegates: try: readWriteProxy = record.readWriteProxy except AttributeError: readWriteProxy = None try: readOnlyProxy = record.readOnlyProxy except AttributeError: readOnlyProxy = None if readOnlyProxy or readWriteProxy: externalAssignments[record.uid] = (readOnlyProxy, readWriteProxy) yield self.scheduleExternalAssignments(txn, externalAssignments) # Figure out which groups matter groupUIDs = yield self.groupsToRefresh(txn) # self.log.debug( # "Groups to refresh: {g}", g=groupUIDs # ) if config.AutomaticPurging.Enabled and groupUIDs: # remove unused groups and groups that have not been seen in a while dateLimit = ( datetime.datetime.utcnow() - datetime.timedelta(seconds=float(config.AutomaticPurging.GroupPurgeIntervalSeconds)) ) rows = yield GroupsRecord.deletesome( txn, ( (GroupsRecord.extant == 0).And(GroupsRecord.modified < dateLimit) ).Or( GroupsRecord.groupUID.NotIn(groupUIDs) ), returnCols=GroupsRecord.groupUID, ) else: # remove unused groups rows = yield GroupsRecord.deletesome( txn, GroupsRecord.groupUID.NotIn(groupUIDs) if groupUIDs else None, returnCols=GroupsRecord.groupUID, ) deletedGroupUIDs = [row[0] for row in rows] if deletedGroupUIDs: self.log.debug("Deleted old or unused groups {d}", d=deletedGroupUIDs) # For each of those groups, create a per-group refresh work item for groupUID in set(groupUIDs) - set(deletedGroupUIDs): self.log.debug("Enqueuing group refresh for {u}", u=groupUID) yield GroupRefreshWork.reschedule(txn, 0, groupUID=groupUID)
def update(self, txn): if self.useDirectoryBasedDelegates: # Pull in delegate assignments from the directory and stick them # into the delegate db recordsWithDirectoryBasedDelegates = yield self.directoryBasedDelegatesSource( ) externalAssignments = {} for record in recordsWithDirectoryBasedDelegates: try: readWriteProxy = record.readWriteProxy except AttributeError: readWriteProxy = None try: readOnlyProxy = record.readOnlyProxy except AttributeError: readOnlyProxy = None if readOnlyProxy or readWriteProxy: externalAssignments[record.uid] = (readOnlyProxy, readWriteProxy) yield self.scheduleExternalAssignments(txn, externalAssignments) # Figure out which groups matter groupUIDs = yield self.groupsToRefresh(txn) # self.log.debug( # "Groups to refresh: {g}", g=groupUIDs # ) if config.AutomaticPurging.Enabled and groupUIDs: # remove unused groups and groups that have not been seen in a while dateLimit = ( datetime.datetime.utcnow() - datetime.timedelta(seconds=float( config.AutomaticPurging.GroupPurgeIntervalSeconds))) rows = yield GroupsRecord.deletesome( txn, ((GroupsRecord.extant == 0).And( GroupsRecord.modified < dateLimit)).Or( GroupsRecord.groupUID.NotIn(groupUIDs)), returnCols=GroupsRecord.groupUID, ) else: # remove unused groups rows = yield GroupsRecord.deletesome( txn, GroupsRecord.groupUID.NotIn(groupUIDs) if groupUIDs else None, returnCols=GroupsRecord.groupUID, ) deletedGroupUIDs = [row[0] for row in rows] if deletedGroupUIDs: self.log.debug("Deleted old or unused groups {d}", d=deletedGroupUIDs) # For each of those groups, create a per-group refresh work item for groupUID in set(groupUIDs) - set(deletedGroupUIDs): self.log.debug("Enqueuing group refresh for {u}", u=groupUID) yield GroupRefreshWork.reschedule(txn, 0, groupUID=groupUID)
def getAllGroupAttendees(self): """ Return a list of L{GroupAttendeeRecord},L{GroupRecord} for each group attendee referenced in calendar data owned by this home. """ raw_results = yield self._txn.store().conduit.send_home_get_all_group_attendees(self) returnValue([(GroupAttendeeRecord.deserialize(item[0]), GroupsRecord.deserialize(item[1]),) for item in raw_results])
def groupSharees(self): results = yield self._txn.store().conduit.send_homechild_group_sharees( self) results["groups"] = [ GroupsRecord.deserialize(items) for items in results["groups"] ] results["sharees"] = [ GroupShareeRecord.deserialize(items) for items in results["sharees"] ] returnValue(results)
def groupsToRefresh(self, txn): delegatedUIDs = set((yield txn.allGroupDelegates())) self.log.debug( "There are {count} group delegates", count=len(delegatedUIDs) ) # Also get group delegates from other pods if txn.directoryService().serversDB() is not None and len(txn.directoryService().serversDB().allServersExceptThis()) != 0: results = yield DeferredList([ txn.store().conduit.send_all_group_delegates(txn, server) for server in txn.directoryService().serversDB().allServersExceptThis() ], consumeErrors=True) for result in results: if result and result[0]: delegatedUIDs.update(result[1]) self.log.debug( "There are {count} group delegates on this and other pods", count=len(delegatedUIDs) ) # Get groupUIDs for all group attendees groups = yield GroupsRecord.query( txn, GroupsRecord.groupID.In(GroupAttendeeRecord.queryExpr( expr=None, attributes=(GroupAttendeeRecord.groupID,), distinct=True, )) ) attendeeGroupUIDs = frozenset([group.groupUID for group in groups]) self.log.debug( "There are {count} group attendees", count=len(attendeeGroupUIDs) ) # Get groupUIDs for all group shares gs = schema.GROUP_SHAREE gr = schema.GROUPS rows = yield Select( [gr.GROUP_UID], From=gr, Where=gr.GROUP_ID.In( Select( [gs.GROUP_ID], From=gs, Distinct=True ) ) ).on(txn) shareeGroupUIDs = frozenset([row[0] for row in rows]) self.log.debug( "There are {count} group sharees", count=len(shareeGroupUIDs) ) returnValue(frozenset(delegatedUIDs | attendeeGroupUIDs | shareeGroupUIDs))
def groupAttendeesForObjects(cls, txn, cobjs): """ Get delegator/group pairs for each of the specified calendar objects. """ # Do a join to get what we need rows = yield Select( list(GroupAttendeeRecord.table) + list(GroupsRecord.table), From=GroupAttendeeRecord.table.join( GroupsRecord.table, GroupAttendeeRecord.groupID == GroupsRecord.groupID), Where=(GroupAttendeeRecord.resourceID.In( Parameter("cobjs", len(cobjs))))).on(txn, cobjs=cobjs) results = [] groupAttendeeNames = [ GroupAttendeeRecord.__colmap__[column] for column in list(GroupAttendeeRecord.table) ] groupsNames = [ GroupsRecord.__colmap__[column] for column in list(GroupsRecord.table) ] split_point = len(groupAttendeeNames) for row in rows: groupAttendeeRow = row[:split_point] groupAttendeeRecord = GroupAttendeeRecord() groupAttendeeRecord._attributesFromRow( zip(groupAttendeeNames, groupAttendeeRow)) groupAttendeeRecord.transaction = txn groupsRow = row[split_point:] groupsRecord = GroupsRecord() groupsRecord._attributesFromRow(zip(groupsNames, groupsRow)) groupsRecord.transaction = txn results.append(( groupAttendeeRecord, groupsRecord, )) returnValue(results)
def groupsToRefresh(self, txn): delegatedUIDs = set((yield txn.allGroupDelegates())) self.log.debug("There are {count} group delegates", count=len(delegatedUIDs)) # Also get group delegates from other pods if (txn.directoryService().serversDB() is not None and len(txn.directoryService().serversDB().allServersExceptThis( filter_v5=True)) != 0): results = yield DeferredList([ txn.store().conduit.send_all_group_delegates(txn, server) for server in txn.directoryService().serversDB(). allServersExceptThis(filter_v5=True) ], consumeErrors=True) for result in results: if result and result[0]: delegatedUIDs.update(result[1]) self.log.debug( "There are {count} group delegates on this and other pods", count=len(delegatedUIDs)) # Get groupUIDs for all group attendees groups = yield GroupsRecord.query( txn, GroupsRecord.groupID.In( GroupAttendeeRecord.queryExpr( expr=None, attributes=(GroupAttendeeRecord.groupID, ), distinct=True, ))) attendeeGroupUIDs = frozenset([group.groupUID for group in groups]) self.log.debug("There are {count} group attendees", count=len(attendeeGroupUIDs)) # Get groupUIDs for all group shares gs = schema.GROUP_SHAREE gr = schema.GROUPS rows = yield Select([gr.GROUP_UID], From=gr, Where=gr.GROUP_ID.In( Select([gs.GROUP_ID], From=gs, Distinct=True))).on(txn) shareeGroupUIDs = frozenset([row[0] for row in rows]) self.log.debug("There are {count} group sharees", count=len(shareeGroupUIDs)) returnValue( frozenset(delegatedUIDs | attendeeGroupUIDs | shareeGroupUIDs))
def update(self, txn): if self.useDirectoryBasedDelegates: # Pull in delegate assignments from the directory and stick them # into the delegate db recordsWithDirectoryBasedDelegates = yield self.directoryBasedDelegatesSource( ) externalAssignments = {} for record in recordsWithDirectoryBasedDelegates: try: readWriteProxy = record.readWriteProxy except AttributeError: readWriteProxy = None try: readOnlyProxy = record.readOnlyProxy except AttributeError: readOnlyProxy = None if readOnlyProxy or readWriteProxy: externalAssignments[record.uid] = (readOnlyProxy, readWriteProxy) yield self.scheduleExternalAssignments(txn, externalAssignments) # Figure out which groups matter groupUIDs = yield self.groupsToRefresh(txn) # self.log.debug( # "Groups to refresh: {g}", g=groupUIDs # ) # Get the set of all known groups in the DB knownGroupUIDs = yield txn.allGroups() # We'll want to remove groups no longer in use groupsToRemove = knownGroupUIDs - groupUIDs # Also look for groups which have been marked as missing for a while if config.AutomaticPurging.Enabled: dateLimit = ( datetime.datetime.utcnow() - datetime.timedelta(seconds=float( config.AutomaticPurging.GroupPurgeIntervalSeconds))) missingGroups = yield txn.groupsMissingSince(dateLimit) groupsToRemove |= missingGroups # Delete the groups in batches groupsToRemove = list(groupsToRemove) batchSize = 100 deletedGroupUIDs = [] while groupsToRemove: batch = groupsToRemove[:batchSize] del groupsToRemove[:batchSize] rows = yield GroupsRecord.deletesome( txn, GroupsRecord.groupUID.In(batch), returnCols=GroupsRecord.groupUID, ) deletedGroupUIDs.extend([row[0] for row in rows]) if deletedGroupUIDs: self.log.debug("Deleted old or unused groups {d}", d=deletedGroupUIDs) # For each of those groups, create a per-group refresh work item futureSeconds = self.initialSchedulingDelaySeconds i = 0 for groupUID in set(groupUIDs) - set(deletedGroupUIDs): self.log.debug("Enqueuing group refresh for {u} in {sec} seconds", u=groupUID, sec=futureSeconds) yield GroupRefreshWork.reschedule(txn, futureSeconds, groupUID=groupUID) i += 1 if i % self.batchSize == 0: i = 0 futureSeconds += self.batchSchedulingIntervalSeconds
def groupSharees(self): results = yield self._txn.store().conduit.send_homechild_group_sharees(self) results["groups"] = [GroupsRecord.deserialize(items) for items in results["groups"]] results["sharees"] = [GroupShareeRecord.deserialize(items) for items in results["sharees"]] returnValue(results)