class NotificationObjectRecord(SerializableRecord, fromTable(schema.NOTIFICATION)): """ @DynamicAttrs L{Record} for L{schema.NOTIFICATION}. """ pass
class ExternalDelegateGroupsRecord(SerializableRecord, fromTable(schema.EXTERNAL_DELEGATE_GROUPS)): """ @DynamicAttrs L{Record} for L{schema.EXTERNAL_DELEGATE_GROUPS}. """ pass
class GroupMembershipRecord(SerializableRecord, fromTable(schema.GROUP_MEMBERSHIP)): """ @DynamicAttrs L{Record} for L{schema.GROUP_MEMBERSHIP}. """ pass
class HomeCleanupWork(WorkItem, fromTable(schema.HOME_CLEANUP_WORK)): """ Work item to clean up any previously "external" homes on the pod to which data was migrated to. Those old homes will now be marked as disabled and need to be silently removed without any side effects (i.e., no implicit scheduling, no sharing cancels, etc). """ group = "ownerUID" notBeforeDelay = 300 # 5 minutes @inlineCallbacks def doWork(self): """ Delete all the corresponding homes. """ oldhome = yield self.transaction.calendarHomeWithUID( self.ownerUID, status=_HOME_STATUS_DISABLED) if oldhome is not None: yield oldhome.purgeAll() oldnotifications = yield self.transaction.notificationsWithUID( self.ownerUID, status=_HOME_STATUS_DISABLED) if oldnotifications is not None: yield oldnotifications.purge()
class ScheduleWork(Record, fromTable(schema.SCHEDULE_WORK)): """ @DynamicAttrs A L{Record} based table whose rows are used for locking scheduling work by iCalendar UID value. as well as helping to determine the next work for a particular UID. """ _classForWorkType = {} @classmethod def jobIDsQueryJoin(cls, homeID, other): return Select( [ cls.jobID, ], From=cls.table.join(other.table, on=(cls.workID == other.workID)), Where=other.homeResourceID == homeID, ) @classmethod def classForWorkType(cls, workType): return cls._classForWorkType.get(workType) def migrate(self, mapIDsCallback): """ Abstract API that must be implemented by each sub-class. This method will take a record, and replace the references to the home and any object resource id with those determined from the callback, and then will create new job/work items for the record. This is used for cross-pod migration of work items. @param mapIDsCallback: a callback that returns a tuple of the new home id and new resource id """ raise NotImplementedError
class IMIPInvitationWork(WorkItem, fromTable(schema.IMIP_INVITATION_WORK)): """ Sends outbound IMIP messages """ mailSender = None @classmethod def getMailSender(cls): """ Instantiate and return a singleton MailSender object @return: a MailSender """ if cls.mailSender is None: if config.Scheduling.iMIP.Enabled: settings = config.Scheduling.iMIP.Sending smtpSender = SMTPSender(settings.Username, settings.Password, settings.UseSSL, settings.Server, settings.Port) cls.mailSender = MailSender(settings.Address, settings.SuppressionDays, smtpSender, getLanguage(config)) return cls.mailSender @inlineCallbacks def doWork(self): """ Send an outbound IMIP message """ mailSender = self.getMailSender() if mailSender is not None: calendar = Component.fromString(self.icalendarText) yield mailSender.outbound(self.transaction, self.fromAddr, self.toAddr, calendar)
class MigrationCleanupWork(WorkItem, fromTable(schema.MIGRATION_CLEANUP_WORK)): group = "homeResourceID" notBeforeDelay = 300 # 5 minutes @inlineCallbacks def doWork(self): """ Delete all the corresponding migration records. """ yield CalendarMigrationRecord.deletesome( self.transaction, CalendarMigrationRecord.calendarHomeResourceID == self.homeResourceID, ) yield CalendarObjectMigrationRecord.deletesome( self.transaction, CalendarObjectMigrationRecord.calendarHomeResourceID == self.homeResourceID, ) yield AttachmentMigrationRecord.deletesome( self.transaction, AttachmentMigrationRecord.calendarHomeResourceID == self.homeResourceID, )
class FindMinValidRevisionWork(RegeneratingWorkItem, fromTable(schema.FIND_MIN_VALID_REVISION_WORK)): group = "find_min_revision" @classmethod def initialSchedule(cls, store, seconds): def _enqueue(txn): return FindMinValidRevisionWork.reschedule(txn, seconds) if config.RevisionCleanup.Enabled: return store.inTransaction( "FindMinValidRevisionWork.initialSchedule", _enqueue) else: return succeed(None) def regenerateInterval(self): """ Return the interval in seconds between regenerating instances. """ return float(config.RevisionCleanup.CleanupPeriodDays) * 24 * 60 * 60 @inlineCallbacks def doWork(self): # Get the minimum valid revision minValidRevision = int( (yield self.transaction.calendarserverValue("MIN-VALID-REVISION"))) # get max revision on table rows before dateLimit dateLimit = (datetime.datetime.utcnow() - datetime.timedelta( days=float(config.RevisionCleanup.SyncTokenLifetimeDays))) maxRevOlderThanDate = 0 # TODO: Use one Select statement for table in ( schema.CALENDAR_OBJECT_REVISIONS, schema.NOTIFICATION_OBJECT_REVISIONS, schema.ADDRESSBOOK_OBJECT_REVISIONS, schema.ABO_MEMBERS, ): revisionRows = yield Select( [Max(table.REVISION)], From=table, Where=(table.MODIFIED < dateLimit), ).on(self.transaction) if revisionRows: tableMaxRevision = revisionRows[0][0] if tableMaxRevision > maxRevOlderThanDate: maxRevOlderThanDate = tableMaxRevision if maxRevOlderThanDate > minValidRevision: # save new min valid revision yield self.transaction.updateCalendarserverValue( "MIN-VALID-REVISION", maxRevOlderThanDate) # Schedule revision cleanup yield RevisionCleanupWork.reschedule(self.transaction, seconds=0)
class CalendarObjectMigrationRecord(Record, fromTable(schema.CALENDAR_OBJECT_MIGRATION) ): """ @DynamicAttrs L{Record} for L{schema.CALENDAR_OBJECT_MIGRATION}. """ pass
class PrincipalPurgeHomeWork(WorkItem, fromTable(schema.PRINCIPAL_PURGE_HOME_WORK)): """ Work item for removing a UID's home """ group = property(lambda self: (self.table.HOME_RESOURCE_ID == self.homeResourceID)) @inlineCallbacks def doWork(self): # Delete any other work items for this UID yield Delete( From=self.table, Where=self.group, ).on(self.transaction) # NB We do not check config.AutomaticPurging.Enabled here because if this work # item was enqueued we always need to complete it # Check for pending scheduling operations sow = schema.SCHEDULE_ORGANIZER_WORK sosw = schema.SCHEDULE_ORGANIZER_SEND_WORK srw = schema.SCHEDULE_REPLY_WORK rows = yield Select( [sow.HOME_RESOURCE_ID], From=sow, Where=(sow.HOME_RESOURCE_ID == self.homeResourceID), SetExpression=Union( Select( [sosw.HOME_RESOURCE_ID], From=sosw, Where=(sosw.HOME_RESOURCE_ID == self.homeResourceID), SetExpression=Union( Select( [srw.HOME_RESOURCE_ID], From=srw, Where=( srw.HOME_RESOURCE_ID == self.homeResourceID), )), )), ).on(self.transaction) if rows and len(rows): # Regenerate this job notBefore = (datetime.datetime.utcnow() + datetime.timedelta( seconds=config.AutomaticPurging.HomePurgeDelaySeconds)) yield self.transaction.enqueue(PrincipalPurgeHomeWork, homeResourceID=self.homeResourceID, notBefore=notBefore) else: # Get the home and remove it - only if properly marked as being purged home = yield self.transaction.calendarHomeWithResourceID( self.homeResourceID) if home.purging(): yield home.remove()
class CleanupOneInboxWork(WorkItem, fromTable(schema.CLEANUP_ONE_INBOX_WORK)): group = property(lambda self: (self.table.HOME_ID == self.homeID)) @inlineCallbacks def doWork(self): # No need to delete other work items. They are unique # get orphan names orphanNames = set(( yield self.transaction.orphanedInboxItemsInHomeID(self.homeID) )) if orphanNames: home = yield self.transaction.calendarHomeWithResourceID(self.homeID) log.info( "Inbox cleanup work in home: {homeUID}, deleting orphaned items: {orphanNames}", homeUID=home.uid(), orphanNames=orphanNames, ) # get old item names if float(config.InboxCleanup.ItemLifetimeDays) >= 0: # use -1 to disable; 0 is test case cutoff = datetime.datetime.utcnow() - datetime.timedelta(days=float(config.InboxCleanup.ItemLifetimeDays)) oldItemNames = set(( yield self.transaction.listInboxItemsInHomeCreatedBefore(self.homeID, cutoff) )) newDeleters = oldItemNames - orphanNames if newDeleters: home = yield self.transaction.calendarHomeWithResourceID(self.homeID) log.info( "Inbox cleanup work in home: {homeUID}, deleting old items: {newDeleters}", homeUID=home.uid(), newDeleters=newDeleters, ) else: oldItemNames = set() # get item name for old events if float(config.InboxCleanup.ItemLifeBeyondEventEndDays) >= 0: # use -1 to disable; 0 is test case cutoff = datetime.datetime.utcnow() - datetime.timedelta(days=float(config.InboxCleanup.ItemLifeBeyondEventEndDays)) itemNamesForOldEvents = set(( yield self.transaction.listInboxItemsInHomeForEventsBefore(self.homeID, cutoff) )) newDeleters = itemNamesForOldEvents - oldItemNames - orphanNames if newDeleters: home = yield self.transaction.calendarHomeWithResourceID(self.homeID) log.info( "Inbox cleanup work in home: {homeUID}, deleting items for old events: {newDeleters}", homeUID=home.uid(), newDeleters=newDeleters, ) else: itemNamesForOldEvents = set() itemNamesToDelete = orphanNames | itemNamesForOldEvents | oldItemNames if itemNamesToDelete: inbox = yield home.childWithName("inbox") for item in (yield inbox.objectResourcesWithNames(itemNamesToDelete)): yield item.remove()
class IMIPReplyWork(WorkItem, fromTable(schema.IMIP_REPLY_WORK)): @inlineCallbacks def doWork(self): calendar = Component.fromString(self.icalendarText) try: yield injectMessage(self.transaction, self.organizer, self.attendee, calendar) except: log.error("Unable to process reply")
class PrincipalPurgePollingWork(RegeneratingWorkItem, fromTable(schema.PRINCIPAL_PURGE_POLLING_WORK) ): """ A work item that scans the existing set of provisioned homes in the store and creates a work item for each to be checked against the directory to see if they need purging. """ group = "principal_purge_polling" @classmethod def initialSchedule(cls, store, seconds): def _enqueue(txn): return PrincipalPurgePollingWork.reschedule(txn, seconds) if config.AutomaticPurging.Enabled: return store.inTransaction( "PrincipalPurgePollingWork.initialSchedule", _enqueue) else: return succeed(None) def regenerateInterval(self): """ Return the interval in seconds between regenerating instances. """ return config.AutomaticPurging.PollingIntervalSeconds if config.AutomaticPurging.Enabled else None @inlineCallbacks def doWork(self): # If not enabled, punt here if not config.AutomaticPurging.Enabled: returnValue(None) # Do the scan allUIDs = set() for home in (schema.CALENDAR_HOME, schema.ADDRESSBOOK_HOME): for [uid] in (yield Select( [home.OWNER_UID], From=home, Where=(home.STATUS == _HOME_STATUS_NORMAL), ).on(self.transaction)): allUIDs.add(uid) # Spread out the per-uid checks 0 second apart seconds = 0 for uid in allUIDs: notBefore = (datetime.datetime.utcnow() + datetime.timedelta( seconds=config.AutomaticPurging.CheckStaggerSeconds)) seconds += 1 yield self.transaction.enqueue(PrincipalPurgeCheckWork, uid=uid, notBefore=notBefore)
class MigratedHomeCleanupWork(WorkItem, fromTable(schema.MIGRATED_HOME_CLEANUP_WORK)): """ Work item to clean up the old home data left behind after migration, as well as other unwanted items like iMIP tokens, delegates etc. The old homes will now be marked as disabled and need to be silently removed without any side effects (i.e., no implicit scheduling, no sharing cancels, etc). """ group = "ownerUID" notBeforeDelay = 300 # 5 minutes @inlineCallbacks def doWork(self): """ Delete all the corresponding homes, then the ancillary data. """ oldhome = yield self.transaction.calendarHomeWithUID( self.ownerUID, status=_HOME_STATUS_DISABLED) if oldhome is not None: # Work items - we need to clean these up before the home goes away because we have an "on delete cascade" on the WorkItem # table, and if that ran it would leave orphaned Job rows set to a pause state and those would remain for ever in the table. for workType in allScheduleWork: items = yield workType.query( self.transaction, workType.homeResourceID == oldhome.id()) for item in items: yield item.remove() yield oldhome.purgeAll() oldnotifications = yield self.transaction.notificationsWithUID( self.ownerUID, status=_HOME_STATUS_DISABLED) if oldnotifications is not None: yield oldnotifications.purge() # These are things that reference the home id or the user UID but don't get removed via a cascade # iMIP tokens cuaddr = "urn:x-uid:{}".format(self.ownerUID) yield iMIPTokenRecord.deletesome( self.transaction, iMIPTokenRecord.organizer == cuaddr, ) # Delegators - individual and group yield DelegateRecord.deletesome( self.transaction, DelegateRecord.delegator == self.ownerUID) yield DelegateGroupsRecord.deletesome( self.transaction, DelegateGroupsRecord.delegator == self.ownerUID) yield ExternalDelegateGroupsRecord.deletesome( self.transaction, ExternalDelegateGroupsRecord.delegator == self.ownerUID)
class InboxCleanupWork(RegeneratingWorkItem, fromTable(schema.INBOX_CLEANUP_WORK)): group = "inbox_cleanup" @classmethod def initialSchedule(cls, store, seconds): def _enqueue(txn): return InboxCleanupWork.reschedule(txn, seconds) if config.InboxCleanup.Enabled: return store.inTransaction("InboxCleanupWork.initialSchedule", _enqueue) else: return succeed(None) def regenerateInterval(self): """ Return the interval in seconds between regenerating instances. """ return float(config.InboxCleanup.CleanupPeriodDays) * 24 * 60 * 60 @inlineCallbacks def doWork(self): # exit if not done with last delete: coiw = schema.CLEANUP_ONE_INBOX_WORK queuedCleanupOneInboxWorkItems = (yield Select( [Count(coiw.HOME_ID)], From=coiw, ).on(self.transaction))[0][0] if queuedCleanupOneInboxWorkItems: log.error( "Inbox cleanup work: Can't schedule per home cleanup because {} work items still queued.", queuedCleanupOneInboxWorkItems) else: # enumerate provisioned normal calendar homes ch = schema.CALENDAR_HOME homeRows = yield Select( [ch.RESOURCE_ID], From=ch, Where=ch.STATUS == _HOME_STATUS_NORMAL, ).on(self.transaction) # Add an initial delay to the start of the first work item, then add an offset between each item seconds = config.InboxCleanup.StartDelaySeconds for homeRow in homeRows: yield CleanupOneInboxWork.reschedule(self.transaction, seconds=seconds, homeID=homeRow[0]) seconds += config.InboxCleanup.StaggerSeconds
class GroupAttendeeReconciliationWork( AggregatedWorkItem, fromTable(schema.GROUP_ATTENDEE_RECONCILE_WORK)): group = property(lambda self: (self.table.RESOURCE_ID == self.resourceID)) @inlineCallbacks def doWork(self): # get db object calendarObject = yield CalendarStoreFeatures( self.transaction._store).calendarObjectWithID( self.transaction, self.resourceID) yield calendarObject.groupAttendeeChanged(self.groupID)
class CalendarMigrationRecord(Record, fromTable(schema.CALENDAR_MIGRATION)): """ @DynamicAttrs L{Record} for L{schema.CALENDAR_MIGRATION}. """ @classmethod @inlineCallbacks def deleteremotes(cls, txn, homeid, remotes): return Delete( From=cls.table, Where=(cls.calendarHomeResourceID == homeid).And( cls.remoteResourceID.In(Parameter("remotes", len(remotes)))), ).on(txn, remotes=remotes)
class RevisionCleanupWork(SingletonWorkItem, fromTable(schema.REVISION_CLEANUP_WORK)): group = "group_revsion_cleanup" @inlineCallbacks def doWork(self): # Get the minimum valid revision minValidRevision = int( (yield self.transaction.calendarserverValue("MIN-VALID-REVISION"))) # delete revisions yield self.transaction.deleteRevisionsBefore(minValidRevision)
class PushNotificationWork(WorkItem, fromTable(schema.PUSH_NOTIFICATION_WORK)): group = property(lambda self: (self.table.PUSH_ID == self.pushID)) default_priority = WORK_PRIORITY_HIGH default_weight = WORK_WEIGHT_1 @inlineCallbacks def doWork(self): # Find all work items with the same push ID and find the highest # priority. Delete matching work items. results = (yield Select( [self.table.WORK_ID, self.table.JOB_ID, self.table.PUSH_PRIORITY], From=self.table, Where=self.table.PUSH_ID == self.pushID).on(self.transaction)) maxPriority = self.pushPriority # If there are other enqueued work items for this push ID, find the # highest priority one and use that value. Note that L{results} will # not contain this work item as job processing behavior will have already # deleted it. So we need to make sure the max priority calculation includes # this one. if results: workIDs, jobIDs, priorities = zip(*results) maxPriority = max(priorities + (self.pushPriority, )) # Delete the work items and jobs we selected - deleting the job will ensure that there are no # orphaned" jobs left in the job queue which would otherwise get to run at some later point, # though not do anything because there is no related work item. yield Delete(From=self.table, Where=self.table.WORK_ID.In( Parameter("workIDs", len(workIDs)))).on(self.transaction, workIDs=workIDs) yield Delete( From=JobItem.table, #@UndefinedVariable Where=JobItem.jobID.In(Parameter( "jobIDs", len(jobIDs))) #@UndefinedVariable ).on(self.transaction, jobIDs=jobIDs) pushDistributor = self.transaction._pushDistributor if pushDistributor is not None: # Convert the integer priority value back into a constant priority = PushPriority.lookupByValue(maxPriority) yield pushDistributor.enqueue(self.transaction, self.pushID, priority=priority)
class GroupAttendeeRecord(SerializableRecord, fromTable(schema.GROUP_ATTENDEE)): """ @DynamicAttrs L{Record} for L{schema.GROUP_ATTENDEE}. """ @classmethod @inlineCallbacks def groupAttendeesForObjects(cls, txn, cobjs): """ Get delegator/group pairs for each of the specified calendar objects. """ # Do a join to get what we need rows = yield Select( list(GroupAttendeeRecord.table) + list(GroupsRecord.table), From=GroupAttendeeRecord.table.join( GroupsRecord.table, GroupAttendeeRecord.groupID == GroupsRecord.groupID), Where=(GroupAttendeeRecord.resourceID.In( Parameter("cobjs", len(cobjs))))).on(txn, cobjs=cobjs) results = [] groupAttendeeNames = [ GroupAttendeeRecord.__colmap__[column] for column in list(GroupAttendeeRecord.table) ] groupsNames = [ GroupsRecord.__colmap__[column] for column in list(GroupsRecord.table) ] split_point = len(groupAttendeeNames) for row in rows: groupAttendeeRow = row[:split_point] groupAttendeeRecord = GroupAttendeeRecord() groupAttendeeRecord._attributesFromRow( zip(groupAttendeeNames, groupAttendeeRow)) groupAttendeeRecord.transaction = txn groupsRow = row[split_point:] groupsRecord = GroupsRecord() groupsRecord._attributesFromRow(zip(groupsNames, groupsRow)) groupsRecord.transaction = txn results.append(( groupAttendeeRecord, groupsRecord, )) returnValue(results)
class InboxRemoveWork(WorkItem, fromTable(schema.INBOX_REMOVE_WORK)): group = property(lambda self: (self.table.HOME_ID == self.homeID).And(self.table.RESOURCE_NAME == self.resourceName)) @inlineCallbacks def doWork(self): # Some of the resources may no longer exist by the time this work item runs # so simply ignore that and let the work complete without doing anything home = yield self.transaction.calendarHomeWithResourceID(self.homeID) if home is not None: inbox = yield home.childWithName("inbox") if inbox is not None: item = yield inbox.objectResourceWithName(self.resourceName) if item is not None: yield item.remove()
class TestWork(WorkItem, fromTable(schema.TEST_WORK)): """ This work item is used solely for testing purposes to allow us to simulate different types of work with varying priority, weight and notBefore, and taking a variable amount of time to complete. This will allow us to load test the job queue. """ @classmethod def schedule(cls, store, delay, priority, weight, runtime): """ Create a new L{TestWork} item. @param store: the L{CommonStore} to use @type store: L{CommonStore} @param delay: seconds before work executes @type delay: L{int} @param priority: priority to use for this work @type priority: L{int} @param weight: weight to use for thus work @type weight: L{int} @param runtime: amount of time this work should take to execute in milliseconds @type runtime: L{int} """ def _enqueue(txn): return TestWork.reschedule(txn, delay, priority=priority, weight=weight, delay=runtime) return store.inTransaction("TestWork.schedule", _enqueue) @inlineCallbacks def doWork(self): """ All this work does is wait for the specified amount of time. """ log.debug("TestWork started: {jobid}", jobid=self.jobID) if self.delay != 0: wait = Deferred() def _timedDeferred(): wait.callback(True) reactor.callLater(self.delay / 1000.0, _timedDeferred) yield wait log.debug("TestWork done: {jobid}", jobid=self.jobID)
class PrincipalPurgeCheckWork( WorkItem, fromTable(schema.PRINCIPAL_PURGE_CHECK_WORK) ): """ Work item for checking for the existence of a UID in the directory. This work item is created by L{PrincipalPurgePollingWork} - one for each unique user UID to check. """ group = property(lambda self: (self.table.UID == self.uid)) @inlineCallbacks def doWork(self): # Delete any other work items for this UID yield Delete( From=self.table, Where=self.group, ).on(self.transaction) # If not enabled, punt here if not config.AutomaticPurging.Enabled: returnValue(None) log.debug("Checking for existence of {uid} in directory", uid=self.uid) directory = self.transaction.store().directoryService() record = yield directory.recordWithUID(self.uid) if record is None: # Schedule purge of this UID a week from now notBefore = ( datetime.datetime.utcnow() + datetime.timedelta(seconds=config.AutomaticPurging.PurgeIntervalSeconds) ) log.warn( "Principal {uid} is no longer in the directory; scheduling clean-up at {when}", uid=self.uid, when=notBefore ) yield self.transaction.enqueue( PrincipalPurgeWork, uid=self.uid, notBefore=notBefore ) else: log.debug("{uid} is still in the directory", uid=self.uid)
class NamedLock(Record, fromTable(LockSchema.NAMED_LOCK)): """ An L{AcquiredLock} lock against a shared data store that the current process holds via the referenced transaction. """ @classmethod def acquire(cls, txn, name): """ Acquire a lock with the given name. @param name: The name of the lock to acquire. Against the same store, no two locks may be acquired. @type name: L{unicode} @return: a L{Deferred} that fires with an L{AcquiredLock} when the lock has fired, or fails when the lock has not been acquired. """ def autoRelease(self): txn.preCommit(lambda: self.release(True)) return self def lockFailed(f): raise LockTimeout(name) d = cls.create(txn, lockName=name) d.addCallback(autoRelease) d.addErrback(lockFailed) return d def release(self, ignoreAlreadyUnlocked=False): """ Release this lock. @param ignoreAlreadyUnlocked: If you don't care about the current status of this lock, and just want to release it if it is still acquired, pass this parameter as L{True}. Otherwise this method will raise an exception if it is invoked when the lock has already been released. @raise: L{AlreadyUnlocked} @return: A L{Deferred} that fires with L{None} when the lock has been unlocked. """ return self.delete()
class PrincipalPurgeWork( WorkItem, fromTable(schema.PRINCIPAL_PURGE_WORK) ): """ Work item for purging a UID's data """ group = property(lambda self: (self.table.UID == self.uid)) @inlineCallbacks def doWork(self): # Delete any other work items for this UID yield Delete( From=self.table, Where=self.group, ).on(self.transaction) # If not enabled, punt here if not config.AutomaticPurging.Enabled: returnValue(None) # Check for UID in directory again log.debug("One last existence check for {uid}", uid=self.uid) directory = self.transaction.store().directoryService() record = yield directory.recordWithUID(self.uid) if record is None: # Time to go service = PurgePrincipalService(self.transaction.store()) log.warn( "Cleaning up future events for principal {uid} since they are no longer in directory", uid=self.uid ) yield service.purgeUIDs( self.transaction.store(), directory, [self.uid], proxies=True, when=None ) else: log.debug("{uid} has re-appeared in the directory", uid=self.uid)
class IMIPPollingWork(RegeneratingWorkItem, fromTable(schema.IMIP_POLLING_WORK)): # FIXME: purge all old tokens here group = "imip_polling" def regenerateInterval(self): """ Return the interval in seconds between regenerating instances. """ mailRetriever = self.transaction._mailRetriever return mailRetriever.settings["seconds"] @inlineCallbacks def doWork(self): mailRetriever = self.transaction._mailRetriever if mailRetriever is not None: yield mailRetriever.fetchMail()
class GroupRefreshWork(AggregatedWorkItem, fromTable(schema.GROUP_REFRESH_WORK)): group = property(lambda self: (self.table.GROUP_UID == self.groupUID)) @inlineCallbacks def doWork(self): groupCacher = getattr(self.transaction, "_groupCacher", None) if groupCacher is not None: try: yield groupCacher.refreshGroup(self.transaction, self.groupUID.decode("utf-8")) except Exception, e: log.error("Failed to refresh group {group} {err}", group=self.groupUID, err=e) else:
class CleanupOneInboxWork(WorkItem, fromTable(schema.CLEANUP_ONE_INBOX_WORK)): group = property(lambda self: (self.table.HOME_ID == self.homeID)) @inlineCallbacks def doWork(self): # No need to delete other work items. They are unique # get old item names if float(config.InboxCleanup.ItemLifetimeDays ) >= 0: # use -1 to disable; 0 is test case cutoff = datetime.datetime.utcnow() - datetime.timedelta( days=float(config.InboxCleanup.ItemLifetimeDays)) oldItemNames = set( (yield self.transaction.listInboxItemsInHomeCreatedBefore( self.homeID, cutoff))) if oldItemNames: home = yield self.transaction.calendarHomeWithResourceID( self.homeID) log.info( "Inbox cleanup work in home: {homeUID}, deleting old items: {oldItemNames}", homeUID=home.uid(), oldItemNames=oldItemNames, ) # If the number to delete is below our threshold then delete right away, # otherwise queue up more work items to delete these if len(oldItemNames ) < config.InboxCleanup.InboxRemoveWorkThreshold: inbox = yield home.childWithName("inbox") for item in (yield inbox.objectResourcesWithNames(oldItemNames)): yield item.remove() else: seconds = config.InboxCleanup.RemovalStaggerSeconds for item in oldItemNames: yield InboxRemoveWork.reschedule(self.transaction, seconds=seconds, homeID=self.homeID, resourceName=item) seconds += config.InboxCleanup.RemovalStaggerSeconds
class GroupDelegateChangesWork(AggregatedWorkItem, fromTable(schema.GROUP_DELEGATE_CHANGES_WORK)): group = property(lambda self: (self.table.DELEGATOR_UID == self.delegatorUID)) @inlineCallbacks def doWork(self): groupCacher = getattr(self.transaction, "_groupCacher", None) if groupCacher is not None: try: yield groupCacher.applyExternalAssignments( self.transaction, self.delegatorUID.decode("utf-8"), self.readDelegateUID.decode("utf-8"), self.writeDelegateUID.decode("utf-8")) except Exception, e: log.error("Failed to apply external delegates for {uid} {err}", uid=self.delegatorUID, err=e)
class GroupCacherPollingWork(RegeneratingWorkItem, fromTable(schema.GROUP_CACHER_POLLING_WORK)): group = "group_cacher_polling" @classmethod def initialSchedule(cls, store, seconds): def _enqueue(txn): return GroupCacherPollingWork.reschedule(txn, seconds) if config.GroupCaching.Enabled: return store.inTransaction( "GroupCacherPollingWork.initialSchedule", _enqueue) else: return succeed(None) def regenerateInterval(self): """ Return the interval in seconds between regenerating instances. """ groupCacher = getattr(self.transaction, "_groupCacher", None) return groupCacher.updateSeconds if groupCacher else 10 @inlineCallbacks def doWork(self): groupCacher = getattr(self.transaction, "_groupCacher", None) if groupCacher is not None: startTime = time.time() try: yield groupCacher.update(self.transaction) except Exception, e: log.error( "Failed to update new group membership cache ({error})", error=e) endTime = time.time() log.debug("GroupCacher polling took {duration:0.2f} seconds", duration=(endTime - startTime))