def update(self, txn): if self.useDirectoryBasedDelegates: # Pull in delegate assignments from the directory and stick them # into the delegate db recordsWithDirectoryBasedDelegates = yield self.directoryBasedDelegatesSource() externalAssignments = {} for record in recordsWithDirectoryBasedDelegates: try: readWriteProxy = record.readWriteProxy except AttributeError: readWriteProxy = None try: readOnlyProxy = record.readOnlyProxy except AttributeError: readOnlyProxy = None if readOnlyProxy or readWriteProxy: externalAssignments[record.uid] = (readOnlyProxy, readWriteProxy) yield self.scheduleExternalAssignments(txn, externalAssignments) # Figure out which groups matter groupUIDs = yield self.groupsToRefresh(txn) # self.log.debug( # "Groups to refresh: {g}", g=groupUIDs # ) if config.AutomaticPurging.Enabled and groupUIDs: # remove unused groups and groups that have not been seen in a while dateLimit = ( datetime.datetime.utcnow() - datetime.timedelta(seconds=float(config.AutomaticPurging.GroupPurgeIntervalSeconds)) ) rows = yield GroupsRecord.deletesome( txn, ( (GroupsRecord.extant == 0).And(GroupsRecord.modified < dateLimit) ).Or( GroupsRecord.groupUID.NotIn(groupUIDs) ), returnCols=GroupsRecord.groupUID, ) else: # remove unused groups rows = yield GroupsRecord.deletesome( txn, GroupsRecord.groupUID.NotIn(groupUIDs) if groupUIDs else None, returnCols=GroupsRecord.groupUID, ) deletedGroupUIDs = [row[0] for row in rows] if deletedGroupUIDs: self.log.debug("Deleted old or unused groups {d}", d=deletedGroupUIDs) # For each of those groups, create a per-group refresh work item for groupUID in set(groupUIDs) - set(deletedGroupUIDs): self.log.debug("Enqueuing group refresh for {u}", u=groupUID) yield GroupRefreshWork.reschedule(txn, 0, groupUID=groupUID)
def update(self, txn): if self.useDirectoryBasedDelegates: # Pull in delegate assignments from the directory and stick them # into the delegate db recordsWithDirectoryBasedDelegates = yield self.directoryBasedDelegatesSource( ) externalAssignments = {} for record in recordsWithDirectoryBasedDelegates: try: readWriteProxy = record.readWriteProxy except AttributeError: readWriteProxy = None try: readOnlyProxy = record.readOnlyProxy except AttributeError: readOnlyProxy = None if readOnlyProxy or readWriteProxy: externalAssignments[record.uid] = (readOnlyProxy, readWriteProxy) yield self.scheduleExternalAssignments(txn, externalAssignments) # Figure out which groups matter groupUIDs = yield self.groupsToRefresh(txn) # self.log.debug( # "Groups to refresh: {g}", g=groupUIDs # ) if config.AutomaticPurging.Enabled and groupUIDs: # remove unused groups and groups that have not been seen in a while dateLimit = ( datetime.datetime.utcnow() - datetime.timedelta(seconds=float( config.AutomaticPurging.GroupPurgeIntervalSeconds))) rows = yield GroupsRecord.deletesome( txn, ((GroupsRecord.extant == 0).And( GroupsRecord.modified < dateLimit)).Or( GroupsRecord.groupUID.NotIn(groupUIDs)), returnCols=GroupsRecord.groupUID, ) else: # remove unused groups rows = yield GroupsRecord.deletesome( txn, GroupsRecord.groupUID.NotIn(groupUIDs) if groupUIDs else None, returnCols=GroupsRecord.groupUID, ) deletedGroupUIDs = [row[0] for row in rows] if deletedGroupUIDs: self.log.debug("Deleted old or unused groups {d}", d=deletedGroupUIDs) # For each of those groups, create a per-group refresh work item for groupUID in set(groupUIDs) - set(deletedGroupUIDs): self.log.debug("Enqueuing group refresh for {u}", u=groupUID) yield GroupRefreshWork.reschedule(txn, 0, groupUID=groupUID)
def update(self, txn): if self.useDirectoryBasedDelegates: # Pull in delegate assignments from the directory and stick them # into the delegate db recordsWithDirectoryBasedDelegates = yield self.directoryBasedDelegatesSource( ) externalAssignments = {} for record in recordsWithDirectoryBasedDelegates: try: readWriteProxy = record.readWriteProxy except AttributeError: readWriteProxy = None try: readOnlyProxy = record.readOnlyProxy except AttributeError: readOnlyProxy = None if readOnlyProxy or readWriteProxy: externalAssignments[record.uid] = (readOnlyProxy, readWriteProxy) yield self.scheduleExternalAssignments(txn, externalAssignments) # Figure out which groups matter groupUIDs = yield self.groupsToRefresh(txn) # self.log.debug( # "Groups to refresh: {g}", g=groupUIDs # ) # Get the set of all known groups in the DB knownGroupUIDs = yield txn.allGroups() # We'll want to remove groups no longer in use groupsToRemove = knownGroupUIDs - groupUIDs # Also look for groups which have been marked as missing for a while if config.AutomaticPurging.Enabled: dateLimit = ( datetime.datetime.utcnow() - datetime.timedelta(seconds=float( config.AutomaticPurging.GroupPurgeIntervalSeconds))) missingGroups = yield txn.groupsMissingSince(dateLimit) groupsToRemove |= missingGroups # Delete the groups in batches groupsToRemove = list(groupsToRemove) batchSize = 100 deletedGroupUIDs = [] while groupsToRemove: batch = groupsToRemove[:batchSize] del groupsToRemove[:batchSize] rows = yield GroupsRecord.deletesome( txn, GroupsRecord.groupUID.In(batch), returnCols=GroupsRecord.groupUID, ) deletedGroupUIDs.extend([row[0] for row in rows]) if deletedGroupUIDs: self.log.debug("Deleted old or unused groups {d}", d=deletedGroupUIDs) # For each of those groups, create a per-group refresh work item futureSeconds = self.initialSchedulingDelaySeconds i = 0 for groupUID in set(groupUIDs) - set(deletedGroupUIDs): self.log.debug("Enqueuing group refresh for {u} in {sec} seconds", u=groupUID, sec=futureSeconds) yield GroupRefreshWork.reschedule(txn, futureSeconds, groupUID=groupUID) i += 1 if i % self.batchSize == 0: i = 0 futureSeconds += self.batchSchedulingIntervalSeconds