def removeNotificationsForUUID(self, uuid): # Get NOTIFICATION_HOME.RESOURCE_ID nh = schema.NOTIFICATION_HOME kwds = {"UUID": uuid} rows = (yield Select( [ nh.RESOURCE_ID, ], From=nh, Where=(nh.OWNER_UID == Parameter("UUID")), ).on(self.txn, **kwds)) if rows: resourceID = rows[0][0] # Delete NOTIFICATION rows if not self.options["dry-run"]: no = schema.NOTIFICATION kwds = {"ResourceID": resourceID} yield Delete( From=no, Where=(no.NOTIFICATION_HOME_RESOURCE_ID == Parameter( "ResourceID")), ).on(self.txn, **kwds) # Delete NOTIFICATION_HOME (will cascade to NOTIFICATION_OBJECT_REVISIONS) if not self.options["dry-run"]: kwds = {"UUID": uuid} yield Delete( From=nh, Where=(nh.OWNER_UID == Parameter("UUID")), ).on(self.txn, **kwds)
def removeAttachments(self, resourceID): # Get ATTACHMENT paths at = schema.ATTACHMENT kwds = {"resourceID": resourceID} rows = (yield Select( [ at.PATH, ], From=at, Where=(at.CALENDAR_HOME_RESOURCE_ID == Parameter("resourceID")), ).on(self.txn, **kwds)) if rows: self.attachments.update([row[0] for row in rows]) # Delete ATTACHMENT rows if not self.options["dry-run"]: at = schema.ATTACHMENT kwds = {"resourceID": resourceID} yield Delete( From=at, Where=(at.CALENDAR_HOME_RESOURCE_ID == Parameter( "resourceID")), ).on(self.txn, **kwds) returnValue(len(rows) if rows else 0)
def getData(self, txn, uid, name): ch = schema.CALENDAR_HOME cb = schema.CALENDAR_BIND return self.getEventData(txn, (ch.OWNER_UID == Parameter("UID")).And( cb.CALENDAR_RESOURCE_NAME == Parameter("NAME")), { "UID": uid, "NAME": name })
def _completelyNewRevisionQuery(cls): rev = cls._revisionsSchema return Insert({rev.HOME_RESOURCE_ID: Parameter("homeID"), # rev.RESOURCE_ID: Parameter("resourceID"), rev.RESOURCE_NAME: Parameter("name"), rev.REVISION: schema.REVISION_SEQ, rev.DELETED: False}, Return=rev.REVISION)
def update(self, txn): if self.useDirectoryBasedDelegates: # Pull in delegate assignments from the directory and stick them # into the delegate db recordsWithDirectoryBasedDelegates = yield self.directoryBasedDelegatesSource( ) externalAssignments = {} for record in recordsWithDirectoryBasedDelegates: try: readWriteProxy = record.readWriteProxy except AttributeError: readWriteProxy = None try: readOnlyProxy = record.readOnlyProxy except AttributeError: readOnlyProxy = None if readOnlyProxy or readWriteProxy: externalAssignments[record.uid] = (readOnlyProxy, readWriteProxy) yield self.scheduleExternalAssignments(txn, externalAssignments) # Figure out which groups matter groupUIDs = yield self.groupsToRefresh(txn) # self.log.debug( # "Groups to refresh: {g}", g=groupUIDs # ) gr = schema.GROUPS if config.AutomaticPurging.Enabled and groupUIDs: # remove unused groups and groups that have not been seen in a while dateLimit = ( datetime.datetime.utcnow() - datetime.timedelta(seconds=float( config.AutomaticPurging.GroupPurgeIntervalSeconds))) rows = yield Delete( From=gr, Where=((gr.EXTANT == 0).And(gr.MODIFIED < dateLimit)).Or( gr.GROUP_UID.NotIn(Parameter("groupUIDs", len(groupUIDs)))) if groupUIDs else None, Return=[gr.GROUP_UID]).on(txn, groupUIDs=groupUIDs) else: # remove unused groups rows = yield Delete( From=gr, Where=gr.GROUP_UID.NotIn(Parameter( "groupUIDs", len(groupUIDs))) if groupUIDs else None, Return=[gr.GROUP_UID]).on(txn, groupUIDs=groupUIDs) deletedGroupUIDs = [row[0] for row in rows] if deletedGroupUIDs: self.log.debug("Deleted old or unused groups {d}", d=deletedGroupUIDs) # For each of those groups, create a per-group refresh work item for groupUID in set(groupUIDs) - set(deletedGroupUIDs): self.log.debug("Enqueuing group refresh for {u}", u=groupUID) yield GroupRefreshWork.reschedule(txn, 0, groupUid=groupUID)
def _updateBumpTokenQuery(cls): rev = cls._revisionsSchema return Update( { rev.REVISION: schema.REVISION_SEQ, rev.MODIFIED: utcNowSQL, }, Where=(rev.RESOURCE_ID == Parameter("resourceID")).And( rev.RESOURCE_NAME == Parameter("name")), Return=rev.REVISION)
def _deleteSyncTokenQuery(cls): """ DAL query to remove all child revision information. The revision for the collection itself is not touched. """ rev = cls._revisionsSchema return Delete(From=rev, Where=(rev.HOME_RESOURCE_ID == Parameter("homeID")).And( rev.RESOURCE_ID == Parameter("resourceID")).And( rev.COLLECTION_NAME == None))
def _oneNotificationQuery(cls): no = cls._objectSchema return Select( [ no.RESOURCE_ID, no.MD5, Len(no.NOTIFICATION_DATA), no.NOTIFICATION_TYPE, no.CREATED, no.MODIFIED ], From=no, Where=(no.NOTIFICATION_UID == Parameter("uid")).And( no.NOTIFICATION_HOME_RESOURCE_ID == Parameter("homeID")))
def getData(self, txn, homeName, calendarName, resourceName): ch = schema.CALENDAR_HOME cb = schema.CALENDAR_BIND co = schema.CALENDAR_OBJECT return self.getEventData(txn, (ch.OWNER_UID == Parameter("HOME")).And( cb.CALENDAR_RESOURCE_NAME == Parameter("CALENDAR")).And( co.RESOURCE_NAME == Parameter("RESOURCE")), { "HOME": homeName, "CALENDAR": calendarName, "RESOURCE": resourceName })
def _newNotificationQuery(cls): no = cls._objectSchema return Insert( { no.NOTIFICATION_HOME_RESOURCE_ID: Parameter("homeID"), no.NOTIFICATION_UID: Parameter("uid"), no.NOTIFICATION_TYPE: Parameter("notificationType"), no.NOTIFICATION_DATA: Parameter("notificationData"), no.MD5: Parameter("md5"), }, Return=[no.RESOURCE_ID, no.CREATED, no.MODIFIED])
def _updateNotificationQuery(cls): no = cls._objectSchema return Update( { no.NOTIFICATION_TYPE: Parameter("notificationType"), no.NOTIFICATION_DATA: Parameter("notificationData"), no.MD5: Parameter("md5"), }, Where=( no.NOTIFICATION_HOME_RESOURCE_ID == Parameter("homeID")).And( no.NOTIFICATION_UID == Parameter("uid")), Return=no.MODIFIED)
def deleteBind(self, homeID, resourceID): if not self.options["dry-run"]: cb = schema.CALENDAR_BIND kwds = { "HomeID": homeID, "ResourceID": resourceID, } yield Delete( From=cb, Where=(( cb.CALENDAR_HOME_RESOURCE_ID == Parameter("HomeID")).And( cb.CALENDAR_RESOURCE_ID == Parameter("ResourceID"))), ).on(self.txn, **kwds)
def _objectNamesSinceRevisionQuery(cls, deleted=True): """ DAL query for (resource, deleted-flag) """ rev = cls._revisionsSchema where = (rev.REVISION > Parameter("revision")).And( rev.RESOURCE_ID == Parameter("resourceID")) if not deleted: where = where.And(rev.DELETED == False) return Select( [rev.RESOURCE_NAME, rev.DELETED], From=rev, Where=where, )
def _sharedRemovalQuery(cls): """ DAL query to indicate a shared collection has been deleted. """ rev = cls._revisionsSchema return Update( { rev.RESOURCE_ID: None, rev.REVISION: schema.REVISION_SEQ, rev.DELETED: True, rev.MODIFIED: utcNowSQL, }, Where=(rev.HOME_RESOURCE_ID == Parameter("homeID")).And( rev.RESOURCE_ID == Parameter("resourceID")).And( rev.RESOURCE_NAME == None))
def _renameSyncTokenQuery(cls): """ DAL query to change sync token for a rename (increment and adjust resource name). """ rev = cls._revisionsSchema return Update( { rev.REVISION: schema.REVISION_SEQ, rev.COLLECTION_NAME: Parameter("name"), rev.MODIFIED: utcNowSQL, }, Where=(rev.RESOURCE_ID == Parameter("resourceID")).And( rev.RESOURCE_NAME == None), Return=rev.REVISION)
def deleteremotes(cls, txn, homeid, remotes): return Delete( From=cls.table, Where=(cls.calendarHomeResourceID == homeid).And( cls.remoteResourceID.In(Parameter("remotes", len(remotes))) ), ).on(txn, remotes=remotes)
def test_query(self): """ Basic query test - no time range """ filter = caldavxml.Filter( caldavxml.ComponentFilter( *[ caldavxml.ComponentFilter( **{"name": ("VEVENT", "VFREEBUSY", "VAVAILABILITY")}) ], **{"name": "VCALENDAR"})) filter = Filter(filter) filter.child.settzinfo(Timezone(tzid="America/New_York")) expression = buildExpression(filter, self._queryFields) sql = CalDAVSQLQueryGenerator(expression, self, 1234) select, args, usedtimerange = sql.generate() self.assertEqual( select.toSQL(), SQLFragment( "select distinct RESOURCE_NAME, ICALENDAR_UID, ICALENDAR_TYPE from CALENDAR_OBJECT where CALENDAR_RESOURCE_ID = ? and ICALENDAR_TYPE in (?, ?, ?)", [1234, Parameter('arg1', 3)])) self.assertEqual(args, {"arg1": ("VEVENT", "VFREEBUSY", "VAVAILABILITY")}) self.assertEqual(usedtimerange, False)
def test_old_queued(self): """ Verify that old inbox items are removed """ # Patch to force remove work items self.patch(config.InboxCleanup, "InboxRemoveWorkThreshold", 0) # Predate some inbox items inbox = yield self.calendarUnderTest(home="user01", name="inbox") oldDate = datetime.datetime.utcnow() - datetime.timedelta(days=float(config.InboxCleanup.ItemLifetimeDays), seconds=10) itemsToPredate = ["cal2.ics", "cal3.ics"] co = schema.CALENDAR_OBJECT yield Update( {co.CREATED: oldDate}, Where=co.RESOURCE_NAME.In(Parameter("itemsToPredate", len(itemsToPredate))).And( co.CALENDAR_RESOURCE_ID == inbox._resourceID) ).on(self.transactionUnderTest(), itemsToPredate=itemsToPredate) # do cleanup yield self.transactionUnderTest().enqueue(CleanupOneInboxWork, homeID=inbox.ownerHome()._resourceID, notBefore=datetime.datetime.utcnow()) yield self.commit() yield JobItem.waitEmpty(self.storeUnderTest().newTransaction, reactor, 60) # check that old items are deleted inbox = yield self.calendarUnderTest(home="user01", name="inbox") items = yield inbox.objectResources() names = [item.name() for item in items] self.assertEqual(set(names), set(["cal1.ics"]))
def test_inboxCleanupWorkQueueing(self): """ Verify that InboxCleanupWork queues one CleanupOneInboxBoxWork per home """ self.patch(config.InboxCleanup, "CleanupPeriodDays", -1) class FakeCleanupOneInboxWork(WorkItem): scheduledHomeIDs = [] @classmethod def reschedule(cls, txn, seconds, homeID): cls.scheduledHomeIDs.append(homeID) pass self.patch(CleanupOneInboxWork, "reschedule", FakeCleanupOneInboxWork.reschedule) # do cleanup yield InboxCleanupWork.reschedule(self.transactionUnderTest(), 0) yield self.commit() yield JobItem.waitEmpty(self.storeUnderTest().newTransaction, reactor, 60) ch = schema.CALENDAR_HOME workRows = yield Select( [ch.OWNER_UID], From=ch, Where=ch.RESOURCE_ID.In(Parameter("scheduledHomeIDs", len(FakeCleanupOneInboxWork.scheduledHomeIDs))), ).on(self.transactionUnderTest(), scheduledHomeIDs=FakeCleanupOneInboxWork.scheduledHomeIDs) homeUIDs = [workRow[0] for workRow in workRows] self.assertEqual(set(homeUIDs), set(['user01', 'user02'])) # two homes
def removeHomeForResourceID(self, resourceID): if not self.options["dry-run"]: ch = schema.CALENDAR_HOME kwds = {"ResourceID": resourceID} yield Delete( From=ch, Where=(ch.RESOURCE_ID == Parameter("ResourceID")), ).on(self.txn, **kwds)
def removePropertiesForResourceID(self, resourceID): if not self.options["dry-run"]: props = schema.RESOURCE_PROPERTY kwds = {"ResourceID": resourceID} yield Delete( From=props, Where=(props.RESOURCE_ID == Parameter("ResourceID")), ).on(self.txn, **kwds)
def removeRevisionsForCalendarResourceID(self, resourceID): if not self.options["dry-run"]: rev = schema.CALENDAR_OBJECT_REVISIONS kwds = {"ResourceID": resourceID} yield Delete( From=rev, Where=(rev.CALENDAR_RESOURCE_ID == Parameter("ResourceID")), ).on(self.txn, **kwds)
def _addNewRevision(cls): rev = cls._revisionsSchema return Insert( { rev.HOME_RESOURCE_ID: Parameter("homeID"), rev.RESOURCE_ID: Parameter("resourceID"), rev.COLLECTION_NAME: Parameter("collectionName"), rev.RESOURCE_NAME: None, # Always starts false; may be updated to be a tombstone # later. rev.DELETED: False }, Return=[rev.REVISION])
def _childSyncTokenQuery(cls): """ DAL query for retrieving the sync token of a L{CommonHomeChild} based on its resource ID. """ rev = cls._revisionsSchema return Select([Max(rev.REVISION)], From=rev, Where=rev.RESOURCE_ID == Parameter("resourceID"))
def _revisionsForResourceIDs(cls, resourceIDs): rev = cls._revisionsSchema return Select( [rev.RESOURCE_ID, Max(rev.REVISION)], From=rev, Where=rev.RESOURCE_ID.In(Parameter( "resourceIDs", len(resourceIDs))).And( (rev.RESOURCE_NAME != None).Or(rev.DELETED == False)), GroupBy=rev.RESOURCE_ID)
def _getModified(): home = yield self.addressbookHomeUnderTest(name="user01") addressbook = yield self.addressbookUnderTest(home="user01", name="addressbook") rev = addressbook._revisionsSchema modified = yield Select( [ rev.MODIFIED, ], From=rev, Where=(rev.ADDRESSBOOK_HOME_RESOURCE_ID == Parameter("homeID") ).And( rev.RESOURCE_NAME == Parameter("resourceName"))).on( home._txn, homeID=home.id(), resourceName="1.vcf", ) yield self.commit() returnValue(modified[0][0])
def remove(self): """ Remove DB rows corresponding to this notification home. """ # Delete NOTIFICATION rows no = schema.NOTIFICATION kwds = {"ResourceID": self._resourceID} yield Delete( From=no, Where=( no.NOTIFICATION_HOME_RESOURCE_ID == Parameter("ResourceID")), ).on(self._txn, **kwds) # Delete NOTIFICATION_HOME (will cascade to NOTIFICATION_OBJECT_REVISIONS) nh = schema.NOTIFICATION_HOME yield Delete( From=nh, Where=(nh.RESOURCE_ID == Parameter("ResourceID")), ).on(self._txn, **kwds)
def _allColumnsByHomeIDQuery(cls): """ DAL query to load all columns by home ID. """ obj = cls._objectSchema return Select( [obj.RESOURCE_ID, obj.NOTIFICATION_UID, obj.MD5, Len(obj.NOTIFICATION_DATA), obj.NOTIFICATION_TYPE, obj.CREATED, obj.MODIFIED], From=obj, Where=(obj.NOTIFICATION_HOME_RESOURCE_ID == Parameter("homeID")) )
def doWork(self): # Find all work items with the same push ID and find the highest # priority. Delete matching work items. results = (yield Select( [self.table.WORK_ID, self.table.JOB_ID, self.table.PUSH_PRIORITY], From=self.table, Where=self.table.PUSH_ID == self.pushID).on(self.transaction)) maxPriority = self.pushPriority # If there are other enqueued work items for this push ID, find the # highest priority one and use that value. Note that L{results} will # not contain this work item as job processing behavior will have already # deleted it. So we need to make sure the max priority calculation includes # this one. if results: workIDs, jobIDs, priorities = zip(*results) maxPriority = max(priorities + (self.pushPriority, )) # Delete the work items and jobs we selected - deleting the job will ensure that there are no # orphaned" jobs left in the job queue which would otherwise get to run at some later point, # though not do anything because there is no related work item. yield Delete(From=self.table, Where=self.table.WORK_ID.In( Parameter("workIDs", len(workIDs)))).on(self.transaction, workIDs=workIDs) yield Delete( From=JobItem.table, #@UndefinedVariable Where=JobItem.jobID.In(Parameter( "jobIDs", len(jobIDs))) #@UndefinedVariable ).on(self.transaction, jobIDs=jobIDs) pushDistributor = self.transaction._pushDistributor if pushDistributor is not None: # Convert the integer priority value back into a constant priority = PushPriority.lookupByValue(maxPriority) yield pushDistributor.enqueue(self.transaction, self.pushID, priority=priority)
def resourcesForCalendar(self, rid): co = schema.CALENDAR_OBJECT kwds = {"RID": rid} rows = (yield Select( [ co.RESOURCE_NAME, ], From=co, Where=(co.CALENDAR_RESOURCE_ID == Parameter("RID")), ).on(self.txn, **kwds)) returnValue(rows)