def test_notificationObjectRevisions(self): """ Verify that all extra notification object revisions are deleted by FindMinValidRevisionWork and RevisionCleanupWork """ # get sync token home = yield self.homeUnderTest(name="user01") token = yield home.syncToken() # make notification changes as side effect of sharing yield self._createCalendarShare() # Get object revisions rev = schema.NOTIFICATION_OBJECT_REVISIONS revisionRows = yield Select( [rev.REVISION], From=rev, ).on(self.transactionUnderTest()) self.assertNotEqual(len(revisionRows), 0) # do FindMinValidRevisionWork yield self.transactionUnderTest().enqueue( FindMinValidRevisionWork, notBefore=datetime.datetime.utcnow()) yield self.commit() yield JobItem.waitEmpty(self.storeUnderTest().newTransaction, reactor, 60) # Get the minimum valid revision and check it minValidRevision = yield self.transactionUnderTest( ).calendarserverValue("MIN-VALID-REVISION") self.assertEqual(int(minValidRevision), max([row[0] for row in revisionRows]) + 1) # do RevisionCleanupWork yield self.transactionUnderTest().enqueue( RevisionCleanupWork, notBefore=datetime.datetime.utcnow()) yield self.commit() yield JobItem.waitEmpty(self.storeUnderTest().newTransaction, reactor, 60) # Get group1 object revision rev = schema.NOTIFICATION_OBJECT_REVISIONS revisionRows = yield Select( [rev.REVISION], From=rev, ).on(self.transactionUnderTest()) self.assertEqual(len(revisionRows), 0) # old sync token fails home = yield self.homeUnderTest(name="user01") yield self.failUnlessFailure(home.resourceNamesSinceToken(token, "1"), SyncTokenValidException) yield self.failUnlessFailure( home.resourceNamesSinceToken(token, "infinity"), SyncTokenValidException)
def _runAllJobs(self): """ Run all outstanding jobs. """ # Run jobs jobs = yield JobItem.all(self.transactionUnderTest()) while jobs: yield jobs[0].run() yield self.commit() jobs = yield JobItem.all(self.transactionUnderTest()) yield self.commit()
def action_refreshgroups(self, j): txn = self._store.newTransaction(label="ControlAPIResource.action_refreshgroups") yield txn.directoryService().flush() work = yield GroupCacherPollingWork.reschedule(txn, 0, force=True) jobID = work.jobID yield txn.commit() if "wait" in j and j["wait"]: yield JobItem.waitJobDone(self._store.newTransaction, reactor, 60.0, jobID) yield JobItem.waitWorkDone(self._store.newTransaction, reactor, 60.0, ( GroupRefreshWork, GroupAttendeeReconciliationWork, GroupDelegateChangesWork, GroupShareeReconciliationWork, )) returnValue(self._ok("ok", "Group refresh scheduled"))
def action_refreshgroups(self, j): txn = self._store.newTransaction() yield txn.directoryService().flush() work = yield GroupCacherPollingWork.reschedule(txn, 0, force=True) jobID = work.jobID yield txn.commit() if "wait" in j and j["wait"]: yield JobItem.waitJobDone(self._store.newTransaction, reactor, 60.0, jobID) yield JobItem.waitWorkDone(self._store.newTransaction, reactor, 60.0, ( GroupRefreshWork, GroupAttendeeReconciliationWork, GroupDelegateChangesWork, GroupShareeReconciliationWork, )) returnValue(self._ok("ok", "Group refresh scheduled"))
def test_cascade_delete_cleanup(self): """ Test that when work associated with L{txdav.caldav.datastore.scheduling.work.ScheduleWork} is removed with the L{ScheduleWork} item being removed, the associated L{JobItem} runs and removes itself and the L{ScheduleWork}. """ ScheduleWorkMixin._queued = 0 txn = self.transactionUnderTest() home = yield self.homeUnderTest(name="user01") yield ScheduleOrganizerWork.schedule( txn, "12345-67890", "create", home, None, None, self.calendar_new, "urn:uuid:user01", 2, True, ) yield self.commit() self.assertEqual(ScheduleWorkMixin._queued, 1) jobs = yield JobItem.all(self.transactionUnderTest()) work = yield jobs[0].workItem() yield WorkItem.delete(work) yield self.commit() jobs = yield JobItem.all(self.transactionUnderTest()) self.assertEqual(len(jobs), 1) baseWork = yield ScheduleWork.all(self.transactionUnderTest()) self.assertEqual(len(baseWork), 1) self.assertEqual(baseWork[0].jobID, jobs[0].jobID) work = yield jobs[0].workItem() self.assertTrue(work is None) yield self.commit() yield JobItem.waitEmpty(self.storeUnderTest().newTransaction, reactor, 60) jobs = yield JobItem.all(self.transactionUnderTest()) self.assertEqual(len(jobs), 0) work = yield ScheduleOrganizerWork.all(self.transactionUnderTest()) self.assertEqual(len(work), 0) baseWork = yield ScheduleWork.all(self.transactionUnderTest()) self.assertEqual(len(baseWork), 0)
def test_addressbookObjectRevisions(self): """ Verify that all extra addressbook object revisions are deleted by FindMinValidRevisionWork and RevisionCleanupWork """ # get sync token addressbook = yield self.addressbookUnderTest(home="user01", name="addressbook") token = yield addressbook.syncToken() # make changes card1Object = yield self.addressbookObjectUnderTest(self.transactionUnderTest(), name="card1.vcf", addressbook_name="addressbook", home="user01") yield card1Object.remove() card2Object = yield self.addressbookObjectUnderTest(self.transactionUnderTest(), name="card2.vcf", addressbook_name="addressbook", home="user01") yield card2Object.remove() # Get object revisions rev = schema.ADDRESSBOOK_OBJECT_REVISIONS revisionRows = yield Select( [rev.REVISION], From=rev, ).on(self.transactionUnderTest()) self.assertNotEqual(len(revisionRows), 0) # do FindMinValidRevisionWork yield self.transactionUnderTest().enqueue(FindMinValidRevisionWork, notBefore=datetime.datetime.utcnow()) yield self.commit() yield JobItem.waitEmpty(self.storeUnderTest().newTransaction, reactor, 60) # Get the minimum valid revision and check it minValidRevision = yield self.transactionUnderTest().calendarserverValue("MIN-VALID-REVISION") self.assertEqual(int(minValidRevision), max([row[0] for row in revisionRows]) + 1) # do RevisionCleanupWork yield self.transactionUnderTest().enqueue(RevisionCleanupWork, notBefore=datetime.datetime.utcnow()) yield self.commit() yield JobItem.waitEmpty(self.storeUnderTest().newTransaction, reactor, 60) # Get group1 object revision rev = schema.ADDRESSBOOK_OBJECT_REVISIONS revisionRows = yield Select( [rev.REVISION], From=rev, ).on(self.transactionUnderTest()) self.assertEqual(len(revisionRows), 0) # old sync token fails addressbook = yield self.addressbookUnderTest(home="user01", name="addressbook") yield self.failUnlessFailure(addressbook.resourceNamesSinceToken(token), SyncTokenValidException)
def stopIt(): if enableJobProcessing: txn = store.newTransaction() jobs = yield JobItem.all(txn) yield txn.commit() yield pool.stopService() else: jobs = () # active transactions should have been shut down. wasBusy = len(cp._busy) busyText = repr(cp._busy) result = yield cp.stopService() if deriveValue(testCase, _SPECIAL_TXN_CLEAN, lambda tc: False): if wasBusy: testCase.fail("Outstanding Transactions: " + busyText) returnValue(result) if len(jobs): testCase.fail( "Jobs left in job queue {}: {}".format(testCase, ",".join([job.workType for job in jobs])) ) returnValue(result)
def test_work(self): calendar = """BEGIN:VCALENDAR PRODID:-//CALENDARSERVER.ORG//NONSGML Version 1//EN VERSION:2.0 METHOD:REPLY BEGIN:VEVENT UID:12345-67890 DTSTAMP:20130208T120000Z DTSTART:20180601T120000Z DTEND:20180601T130000Z ORGANIZER:urn:x-uid:user01 ATTENDEE:mailto:[email protected];PARTSTAT=ACCEPTED END:VEVENT END:VCALENDAR """ txn = self.store.newTransaction() yield txn.enqueue( IMIPReplyWork, organizer="urn:x-uid:user01", attendee="mailto:[email protected]", icalendarText=calendar ) yield txn.commit() yield JobItem.waitEmpty(self.store.newTransaction, reactor, 60)
def test_processReply(self): # Make sure an unknown token in an older email is deleted msg = email.message_from_string(self.dataFile('good_reply_past')) result = (yield self.receiver.processReply(msg)) self.assertEquals(result, MailReceiver.UNKNOWN_TOKEN_OLD) # Make sure an unknown token is not processed msg = email.message_from_string(self.dataFile('good_reply_future')) result = (yield self.receiver.processReply(msg)) self.assertEquals(result, MailReceiver.UNKNOWN_TOKEN) # Make sure a known token *is* processed txn = self.store.newTransaction() yield txn.imipCreateToken( "urn:x-uid:5A985493-EE2C-4665-94CF-4DFEA3A89500", "mailto:[email protected]", "1E71F9C8-AEDA-48EB-98D0-76E898F6BB5C", token="d7cdf68d-8b73-4df1-ad3b-f08002fb285f" ) yield txn.commit() result = (yield self.receiver.processReply(msg)) self.assertEquals(result, MailReceiver.INJECTION_SUBMITTED) yield JobItem.waitEmpty(self.store.newTransaction, reactor, 60)
def test_referenceOldEvent(self): """ Verify that inbox items references old events are removed """ # events are already too old, so make one event end now calendar = yield self.calendarUnderTest(home="user01", name="calendar") cal3Event = yield calendar.objectResourceWithName("cal3.ics") tr = schema.TIME_RANGE yield Update( { tr.END_DATE: datetime.datetime.utcnow() }, Where=tr.CALENDAR_OBJECT_RESOURCE_ID == cal3Event._resourceID).on( self.transactionUnderTest()) # do cleanup yield self.transactionUnderTest().enqueue( CleanupOneInboxWork, homeID=calendar.ownerHome()._resourceID, notBefore=datetime.datetime.utcnow()) yield self.commit() yield JobItem.waitEmpty(self.storeUnderTest().newTransaction, reactor, 60) # check that old items are deleted inbox = yield self.calendarUnderTest(home="user01", name="inbox") items = yield inbox.objectResources() names = [item.name() for item in items] self.assertEqual(set(names), set(["cal3.ics"]))
def test_old_queued(self): """ Verify that old inbox items are removed """ # Patch to force remove work items self.patch(config.InboxCleanup, "InboxRemoveWorkThreshold", 0) # Predate some inbox items inbox = yield self.calendarUnderTest(home="user01", name="inbox") oldDate = datetime.datetime.utcnow() - datetime.timedelta(days=float(config.InboxCleanup.ItemLifetimeDays), seconds=10) itemsToPredate = ["cal2.ics", "cal3.ics"] co = schema.CALENDAR_OBJECT yield Update( {co.CREATED: oldDate}, Where=co.RESOURCE_NAME.In(Parameter("itemsToPredate", len(itemsToPredate))).And( co.CALENDAR_RESOURCE_ID == inbox._resourceID) ).on(self.transactionUnderTest(), itemsToPredate=itemsToPredate) # do cleanup yield self.transactionUnderTest().enqueue(CleanupOneInboxWork, homeID=inbox.ownerHome()._resourceID, notBefore=datetime.datetime.utcnow()) yield self.commit() yield JobItem.waitEmpty(self.storeUnderTest().newTransaction, reactor, 60) # check that old items are deleted inbox = yield self.calendarUnderTest(home="user01", name="inbox") items = yield inbox.objectResources() names = [item.name() for item in items] self.assertEqual(set(names), set(["cal1.ics"]))
def test_orphans(self): """ Verify that orphaned Inbox items are removed """ self.patch(config.InboxCleanup, "ItemLifetimeDays", -1) self.patch(config.InboxCleanup, "ItemLifeBeyondEventEndDays", -1) # create orphans by deleting events cal = yield self.calendarUnderTest(home="user01", name="calendar") for item in (yield cal.objectResourcesWithNames(["cal1.ics", "cal3.ics"])): yield item.purge() # do cleanup yield self.transactionUnderTest().enqueue( CleanupOneInboxWork, homeID=cal.ownerHome()._resourceID, notBefore=datetime.datetime.utcnow()) yield self.commit() yield JobItem.waitEmpty(self.storeUnderTest().newTransaction, reactor, 60) # check that orphans are deleted inbox = yield self.calendarUnderTest(home="user01", name="inbox") items = yield inbox.objectResources() names = [item.name() for item in items] self.assertEqual(set(names), set(["cal2.ics"]))
def test_create(self): """ Test that jobs associated with L{txdav.caldav.datastore.scheduling.work.ScheduleOrganizerSendWork} can be created and correctly removed. """ txn = self.transactionUnderTest() home = yield self.homeUnderTest(name="user01") yield ScheduleOrganizerSendWork.schedule( txn, "create", home, None, "urn:x-uid:user01", "urn:x-uid:user02", self.itip_new, True, 1000, ) jobs = yield JobItem.all(self.transactionUnderTest()) self.assertEqual(len(jobs), 1) work = yield jobs[0].workItem() yield work.doWork() home2 = yield self.calendarUnderTest(home="user02", name="calendar") cobjs = yield home2.calendarObjects() self.assertEqual(len(cobjs), 1) # cal2 = yield cobjs[0].component() yield work.delete() yield jobs[0].delete() yield self.commit()
def afterWork(self): """ A hook that gets called after the L{WorkItem} does its real work. This can be used for common clean-up behaviors. The base implementation does nothing. """ yield super(ScheduleWorkMixin, self).afterWork() # Find the next item and schedule to run immediately after this. # We only coalesce ScheduleOrganizerSendWork. if self.workType() == ScheduleOrganizerSendWork.workType(): all = yield self.baseWork.query( self.transaction, (ScheduleWork.icalendarUID == self.icalendarUID).And( ScheduleWork.workID != self.workID), order=ScheduleWork.workID, limit=1, ) if all: work = all[0] if work.workType == self.workType(): job = yield JobItem.load(self.transaction, work.jobID) yield job.update(notBefore=datetime.datetime.utcnow()) log.debug( "ScheduleOrganizerSendWork - promoted job: {id}, UID: '{uid}'", id=work.workID, uid=self.icalendarUID)
def stopIt(): if enableJobProcessing: txn = store.newTransaction() jobs = yield JobItem.all(txn) yield txn.commit() yield pool.stopService() else: jobs = () # active transactions should have been shut down. wasBusy = len(cp._busy) busyText = repr(cp._busy) result = yield cp.stopService() if deriveValue(testCase, _SPECIAL_TXN_CLEAN, lambda tc: False): if wasBusy: testCase.fail("Outstanding Transactions: " + busyText) returnValue(result) if len(jobs): testCase.fail("Jobs left in job queue {}: {}".format( testCase, ",".join([job.workType for job in jobs]))) returnValue(result)
def test_old(self): """ Verify that old inbox items are removed """ self.patch(config.InboxCleanup, "ItemLifeBeyondEventEndDays", -1) # Predate some inbox items inbox = yield self.calendarUnderTest(home="user01", name="inbox") oldDate = datetime.datetime.utcnow() - datetime.timedelta(days=float(config.InboxCleanup.ItemLifetimeDays), seconds=10) itemsToPredate = ["cal2.ics", "cal3.ics"] co = schema.CALENDAR_OBJECT yield Update( {co.CREATED: oldDate}, Where=co.RESOURCE_NAME.In(Parameter("itemsToPredate", len(itemsToPredate))).And( co.CALENDAR_RESOURCE_ID == inbox._resourceID) ).on(self.transactionUnderTest(), itemsToPredate=itemsToPredate) # do cleanup yield self.transactionUnderTest().enqueue(CleanupOneInboxWork, homeID=inbox.ownerHome()._resourceID, notBefore=datetime.datetime.utcnow()) yield self.commit() yield JobItem.waitEmpty(self.storeUnderTest().newTransaction, reactor, 60) # check that old items are deleted inbox = yield self.calendarUnderTest(home="user01", name="inbox") items = yield inbox.objectResources() names = [item.name() for item in items] self.assertEqual(set(names), set(["cal1.ics"]))
def test_inboxCleanupWorkQueueing(self): """ Verify that InboxCleanupWork queues one CleanupOneInboxBoxWork per home """ self.patch(config.InboxCleanup, "CleanupPeriodDays", -1) class FakeCleanupOneInboxWork(WorkItem): scheduledHomeIDs = [] @classmethod def reschedule(cls, txn, seconds, homeID): cls.scheduledHomeIDs.append(homeID) pass self.patch(CleanupOneInboxWork, "reschedule", FakeCleanupOneInboxWork.reschedule) # do cleanup yield InboxCleanupWork.reschedule(self.transactionUnderTest(), 0) yield self.commit() yield JobItem.waitEmpty(self.storeUnderTest().newTransaction, reactor, 60) ch = schema.CALENDAR_HOME workRows = yield Select( [ch.OWNER_UID], From=ch, Where=ch.RESOURCE_ID.In(Parameter("scheduledHomeIDs", len(FakeCleanupOneInboxWork.scheduledHomeIDs))), ).on(self.transactionUnderTest(), scheduledHomeIDs=FakeCleanupOneInboxWork.scheduledHomeIDs) homeUIDs = [workRow[0] for workRow in workRows] self.assertEqual(set(homeUIDs), set(['user01', 'user02'])) # two homes
def makeJob(cls, transaction, **kwargs): """ A new work item needs to be created. First we create a Job record, then we create the actual work item related to the job. @param transaction: the transaction to use @type transaction: L{IAsyncTransaction} """ jobargs = {"workType": cls.workType()} def _transferArg(name): arg = kwargs.pop(name, None) if arg is not None: jobargs[name] = arg elif hasattr(cls, "default_{}".format(name)): jobargs[name] = getattr(cls, "default_{}".format(name)) _transferArg("jobID") _transferArg("priority") _transferArg("weight") _transferArg("notBefore") _transferArg("pause") # Always need a notBefore if "notBefore" not in jobargs: jobargs["notBefore"] = datetime.utcnow() job = yield JobItem.create(transaction, **jobargs) kwargs["jobID"] = job.jobID work = yield cls.create(transaction, **kwargs) work.__dict__["job"] = job returnValue(work)
def test_processDSN(self): template = """BEGIN:VCALENDAR VERSION:2.0 CALSCALE:GREGORIAN METHOD:REQUEST PRODID:-//example Inc.//iCal 3.0//EN BEGIN:VTIMEZONE TZID:US/Pacific BEGIN:DAYLIGHT DTSTART:20070311T020000 RRULE:FREQ=YEARLY;BYMONTH=3;BYDAY=2SU TZNAME:PDT TZOFFSETFROM:-0800 TZOFFSETTO:-0700 END:DAYLIGHT BEGIN:STANDARD DTSTART:20071104T020000 RRULE:FREQ=YEARLY;BYMONTH=11;BYDAY=1SU TZNAME:PST TZOFFSETFROM:-0700 TZOFFSETTO:-0800 END:STANDARD END:VTIMEZONE BEGIN:VEVENT UID:1E71F9C8-AEDA-48EB-98D0-76E898F6BB5C DTSTART;TZID=US/Pacific:20080812T094500 DTEND;TZID=US/Pacific:20080812T104500 ATTENDEE;CUTYPE=INDIVIDUAL;CN=User 01;PARTSTAT=ACCEPTED:mailto:user01@exam ple.com ATTENDEE;CUTYPE=INDIVIDUAL;RSVP=TRUE;ROLE=REQ-PARTICIPANT;PARTSTAT=NEEDS-A CTION;[email protected]:mailto:[email protected] CREATED:20080812T191857Z DTSTAMP:20080812T191932Z ORGANIZER;CN=User 01:mailto:xyzzy+%[email protected] SEQUENCE:2 SUMMARY:New Event TRANSP:OPAQUE END:VEVENT END:VCALENDAR """ # Make sure an unknown token is not processed calBody = template % "bogus_token" self.assertEquals((yield self.receiver.processDSN(calBody, "xyzzy")), MailReceiver.UNKNOWN_TOKEN) # Make sure a known token *is* processed txn = self.store.newTransaction() record = (yield txn.imipCreateToken( "urn:x-uid:5A985493-EE2C-4665-94CF-4DFEA3A89500", "mailto:[email protected]", "1E71F9C8-AEDA-48EB-98D0-76E898F6BB5C")) yield txn.commit() calBody = template % record.token result = (yield self.receiver.processDSN(calBody, "xyzzy")) self.assertEquals(result, MailReceiver.INJECTION_SUBMITTED) yield JobItem.waitEmpty(self.store.newTransaction, reactor, 60)
def test_notificationObjectRevisions(self): """ Verify that all extra notification object revisions are deleted by FindMinValidRevisionWork and RevisionCleanupWork """ # get sync token home = yield self.homeUnderTest(name="user01") token = yield home.syncToken() # make notification changes as side effect of sharing yield self._createCalendarShare() # Get object revisions rev = schema.NOTIFICATION_OBJECT_REVISIONS revisionRows = yield Select( [rev.REVISION], From=rev, ).on(self.transactionUnderTest()) self.assertNotEqual(len(revisionRows), 0) # do FindMinValidRevisionWork yield self.transactionUnderTest().enqueue(FindMinValidRevisionWork, notBefore=datetime.datetime.utcnow()) yield self.commit() yield JobItem.waitEmpty(self.storeUnderTest().newTransaction, reactor, 60) # Get the minimum valid revision and check it minValidRevision = yield self.transactionUnderTest().calendarserverValue("MIN-VALID-REVISION") self.assertEqual(int(minValidRevision), max([row[0] for row in revisionRows]) + 1) # do RevisionCleanupWork yield self.transactionUnderTest().enqueue(RevisionCleanupWork, notBefore=datetime.datetime.utcnow()) yield self.commit() yield JobItem.waitEmpty(self.storeUnderTest().newTransaction, reactor, 60) # Get group1 object revision rev = schema.NOTIFICATION_OBJECT_REVISIONS revisionRows = yield Select( [rev.REVISION], From=rev, ).on(self.transactionUnderTest()) self.assertEqual(len(revisionRows), 0) # old sync token fails home = yield self.homeUnderTest(name="user01") yield self.failUnlessFailure(home.resourceNamesSinceToken(token, "1"), SyncTokenValidException) yield self.failUnlessFailure(home.resourceNamesSinceToken(token, "infinity"), SyncTokenValidException)
def updateWorkTypes(cls, updates): """ Update the priority and weight values of each specified work type. @param updates: a dict whose workType is the work class name, and whose settings is a dict containing one or both of "weight" and "priority" keys and numeric values to change to. @type updates: L{dict} """ for workType, settings in updates.items(): try: workItem = JobItem.workItemForType(workType) except KeyError: log.error( "updateWorkTypes: '{workType}' is not a valid work type", workType=workType, ) continue if "priority" in settings: priority = settings["priority"] try: priority = int(priority) if not (WORK_PRIORITY_LOW <= priority <= WORK_PRIORITY_HIGH): raise ValueError except ValueError: log.error( "updateWorkTypes: '{workType}' priority '{priority}' is not value", workType=workType, priority=priority, ) else: workItem.default_priority = priority else: priority = "unchanged" if "weight" in settings: weight = settings["weight"] try: weight = int(weight) if not (WORK_WEIGHT_0 <= weight <= WORK_WEIGHT_10): raise ValueError except ValueError: log.error( "updateWorkTypes: '{workType}' weight '{weight}' is not value", workType=workType, weight=weight, ) else: workItem.default_weight = weight else: weight = "unchanged" log.info( "updateWorkTypes: '{workType}' priority: '{priority}' weight: '{weight}' ", workType=workType, priority=priority, weight=weight, )
def action_schedulingdone(self, j): """ Wait for all schedule queue items to complete. """ yield JobItem.waitWorkDone(self._store.newTransaction, reactor, 120.0, ( ScheduleOrganizerWork, ScheduleOrganizerSendWork, ScheduleReplyWork, ScheduleRefreshWork, ScheduleAutoReplyWork, )) returnValue(self._ok("ok", "Scheduling done"))
def data_jobcount(self): """ Return a count of job types. @return: the JSON result. @rtype: L{int} """ return succeed(JobItem.numberOfWorkTypes())
def executeJobHere(self, job): """ This is where it's time to actually do the job. The controller process has instructed this worker to do it; so, look up the data in the row, and do it. """ d = JobItem.ultimatelyPerform(self.transactionFactory, job) d.addCallback(lambda ignored: {}) return d
def test_basicWork(self): """ Verify that an L{TestWork} item can be enqueued and executed. """ # do FindMinValidRevisionWork yield TestWork.schedule(self.storeUnderTest(), 0, 1, 2, 3) work = yield TestWork.all(self.transactionUnderTest()) self.assertEqual(len(work), 1) self.assertEqual(work[0].delay, 3) job = yield JobItem.querysimple(self.transactionUnderTest(), jobID=work[0].jobID) self.assertEqual(len(job), 1) self.assertEqual(job[0].priority, 1) self.assertEqual(job[0].weight, 2) yield self.commit() yield JobItem.waitEmpty(self.storeUnderTest().newTransaction, reactor, 60)
def remove(self): """ Remove this L{WorkItem} and the associated L{JobItem}. Typically work is not removed directly, but goes away when processed, but in some cases (e.g., pod-2-pod migration) old work needs to be removed along with the job (which is in a pause state and would otherwise never run). """ # Delete the job, then self yield JobItem.deletesome(self.transaction, JobItem.jobID == self.jobID) yield self.delete()
def _runOneJob(self): """ Run the first outstanding jobs. """ # Run jobs jobs = yield JobItem.all(self.transactionUnderTest()) for job in jobs: yield job.run() break yield self.commit()
def test_create(self): """ Test that jobs associated with L{txdav.caldav.datastore.scheduling.work.ScheduleOrganizerWork} can be created and correctly removed. """ ScheduleWorkMixin._queued = 0 txn = self.transactionUnderTest() home = yield self.homeUnderTest(name="user01") yield ScheduleOrganizerWork.schedule( txn, "12345-67890", "create", home, None, None, self.calendar_new, "urn:uuid:user01", 2, True, ) yield self.commit() self.assertEqual(ScheduleWorkMixin._queued, 1) jobs = yield JobItem.all(self.transactionUnderTest()) self.assertEqual(len(jobs), 1) work = yield jobs[0].workItem() self.assertTrue(isinstance(work, ScheduleOrganizerWork)) self.assertEqual(work.icalendarUID, "12345-67890") self.assertEqual(scheduleActionFromSQL[work.scheduleAction], "create") yield work.delete() yield jobs[0].delete() yield self.commit() jobs = yield JobItem.all(self.transactionUnderTest()) self.assertEqual(len(jobs), 0) work = yield ScheduleOrganizerWork.all(self.transactionUnderTest()) self.assertEqual(len(work), 0) baseWork = yield ScheduleWork.all(self.transactionUnderTest()) self.assertEqual(len(baseWork), 0)
def _runOneJob(self, work_type=None): """ Run the first outstanding jobs. """ # Run jobs jobs = yield JobItem.all(self.transactionUnderTest()) for job in jobs: if work_type is None or job.workType == work_type: yield job.run() break yield self.commit()
def test_workFailure(self): self.sender.smtpSender.shouldSucceed = False txn = self.store.newTransaction() yield txn.enqueue( IMIPInvitationWork, fromAddr=ORGANIZER, toAddr=ATTENDEE, icalendarText=initialInviteText.replace("\n", "\r\n"), ) yield txn.commit() yield JobItem.waitEmpty(self.store.newTransaction, reactor, 60)
def data_jobs(self): """ Return a summary of the job queue. @return: a string containing the JSON result. @rtype: L{str} """ if self.factory.store: txn = self.factory.store.newTransaction() records = (yield JobItem.histogram(txn)) yield txn.commit() else: records = {} returnValue(records)
def test_processReplyMissingAttendee(self): msg = email.message_from_string( self.dataFile('reply_missing_attendee')) txn = self.store.newTransaction() yield txn.imipCreateToken( "urn:x-uid:5A985493-EE2C-4665-94CF-4DFEA3A89500", "mailto:[email protected]", "1E71F9C8-AEDA-48EB-98D0-76E898F6BB5C", token="d7cdf68d-8b73-4df1-ad3b-f08002fb285f") yield txn.commit() result = (yield self.receiver.processReply(msg)) self.assertEquals(result, MailReceiver.INJECTION_SUBMITTED) yield JobItem.waitEmpty(self.store.newTransaction, reactor, 60)
def data_jobs(self): """ Return a summary of the job queue. @return: a string containing the JSON result. @rtype: L{str} """ if self.factory.store: txn = self.factory.store.newTransaction("DashboardProtocol.data_jobs") records = (yield JobItem.histogram(txn)) yield txn.commit() else: records = {} returnValue(records)
def test_processReplyMissingAttendee(self): msg = email.message_from_string(self.dataFile('reply_missing_attendee')) txn = self.store.newTransaction() yield txn.imipCreateToken( "urn:x-uid:5A985493-EE2C-4665-94CF-4DFEA3A89500", "mailto:[email protected]", "1E71F9C8-AEDA-48EB-98D0-76E898F6BB5C", token="d7cdf68d-8b73-4df1-ad3b-f08002fb285f" ) yield txn.commit() result = (yield self.receiver.processReply(msg)) self.assertEquals(result, MailReceiver.INJECTION_SUBMITTED) yield JobItem.waitEmpty(self.store.newTransaction, reactor, 60)
def action_revisioncleanup(self, j): """ Wait for all schedule queue items to complete. """ from txdav.common.datastore.work.revision_cleanup import _triggerRevisionCleanup from txdav.common.datastore.work.revision_cleanup import RevisionCleanupWork txn = self._store.newTransaction() yield _triggerRevisionCleanup(txn, 60) yield txn.commit() yield JobItem.waitWorkDone(self._store.newTransaction, reactor, 120.0, ( RevisionCleanupWork, )) returnValue(self._ok("ok", "RevisionCleanupWork done"))
def checkTemporaryFailure(self, results): """ Check to see whether whether a temporary failure should be raised as opposed to continuing on with a permanent failure. @param results: set of results gathered in L{extractSchedulingResponse} @type results: L{list} """ if all([result[1] == iTIPRequestStatus.MESSAGE_PENDING_CODE for result in results]): job = yield JobItem.load(self.transaction, self.jobID) if job.failed >= config.Scheduling.Options.WorkQueues.MaxTemporaryFailures: # Set results to SERVICE_UNAVAILABLE for ctr, result in enumerate(results): results[ctr] = (result[0], iTIPRequestStatus.SERVICE_UNAVAILABLE_CODE,) returnValue(None) else: raise JobTemporaryError(config.Scheduling.Options.WorkQueues.TemporaryFailureDelay)
def action_revisioncleanup(self, j): """ Wait for all schedule queue items to complete. """ from txdav.common.datastore.work.revision_cleanup import _triggerRevisionCleanup from txdav.common.datastore.work.revision_cleanup import RevisionCleanupWork txn = self._store.newTransaction( label="ControlAPIResource.action_revisioncleanup") yield _triggerRevisionCleanup(txn, 60) yield txn.commit() yield JobItem.waitWorkDone(self._store.newTransaction, reactor, 120.0, (RevisionCleanupWork, )) returnValue(self._ok("ok", "RevisionCleanupWork done"))
def test_update_delete_old_nonextant(self): """ Verify that old missing groups are deleted from group cache """ oldGroupPurgeIntervalSeconds = config.AutomaticPurging.GroupPurgeIntervalSeconds store = self.storeUnderTest() for uid in ( u"testgroup", u"emptygroup", ): config.AutomaticPurging.GroupPurgeIntervalSeconds = oldGroupPurgeIntervalSeconds txn = store.newTransaction() group = yield txn.groupByUID(uid) yield txn.addDelegateGroup(delegator=u"sagen", delegateGroupID=group.groupID, readWrite=True) group = yield txn.groupByUID(uid, create=False) yield txn.commit() self.assertNotEqual(group, None) self.assertTrue(group.extant) # Remove the group, still cached yield self.directory.removeRecords([uid]) txn = store.newTransaction() yield self.groupCacher.update(txn) group = yield txn.groupByUID(uid, create=False) yield txn.commit() yield JobItem.waitEmpty(store.newTransaction, reactor, 60) txn = store.newTransaction() group = yield txn.groupByUID(uid, create=False) yield txn.commit() self.assertNotEqual(group, None) self.assertFalse(group.extant) # delete the group config.AutomaticPurging.GroupPurgeIntervalSeconds = "0.0" txn = store.newTransaction() yield self.groupCacher.update(txn) group = yield txn.groupByUID(uid, create=False) yield txn.commit() self.assertEqual(group, None)
def test_ImportComponentOrganizer(self): component = Component.allFromString(DATA_WITH_ORGANIZER) yield importCollectionComponent(self.store, component) yield JobItem.waitEmpty(self.store.newTransaction, reactor, 60) txn = self.store.newTransaction() home = yield txn.calendarHomeWithUID("user01") collection = yield home.childWithName("calendar") # Verify properties have been set collectionProperties = collection.properties() for element, value in ( (davxml.DisplayName, "I'm the organizer"), (customxml.CalendarColor, "#0000FFFF"), ): self.assertEquals( value, collectionProperties[PropertyName.fromElement(element)] ) # Verify the organizer's child objects objects = yield collection.listObjectResources() self.assertEquals(len(objects), 1) # Verify the attendees' child objects home = yield txn.calendarHomeWithUID("user02") collection = yield home.childWithName("calendar") objects = yield collection.listObjectResources() self.assertEquals(len(objects), 1) home = yield txn.calendarHomeWithUID("user03") collection = yield home.childWithName("calendar") objects = yield collection.listObjectResources() self.assertEquals(len(objects), 1) home = yield txn.calendarHomeWithUID("mercury") collection = yield home.childWithName("calendar") objects = yield collection.listObjectResources() self.assertEquals(len(objects), 1) yield txn.commit()
def test_update_delete_old_nonextant(self): """ Verify that old missing groups are deleted from group cache """ oldGroupPurgeIntervalSeconds = config.AutomaticPurging.GroupPurgeIntervalSeconds store = self.storeUnderTest() for uid in (u"testgroup", u"emptygroup",): config.AutomaticPurging.GroupPurgeIntervalSeconds = oldGroupPurgeIntervalSeconds txn = store.newTransaction() group = yield txn.groupByUID(uid) yield txn.addDelegateGroup(delegator=u"sagen", delegateGroupID=group.groupID, readWrite=True) group = yield txn.groupByUID(uid, create=False) yield txn.commit() self.assertNotEqual(group, None) self.assertTrue(group.extant) # Remove the group, still cached yield self.directory.removeRecords([uid]) txn = store.newTransaction() yield self.groupCacher.update(txn) group = yield txn.groupByUID(uid, create=False) yield txn.commit() yield JobItem.waitEmpty(store.newTransaction, reactor, 60) txn = store.newTransaction() group = yield txn.groupByUID(uid, create=False) yield txn.commit() self.assertNotEqual(group, None) self.assertFalse(group.extant) # delete the group config.AutomaticPurging.GroupPurgeIntervalSeconds = "0.0" txn = store.newTransaction() yield self.groupCacher.update(txn) group = yield txn.groupByUID(uid, create=False) yield txn.commit() self.assertEqual(group, None)
def test_iMIP_delivery(self): data = """BEGIN:VCALENDAR VERSION:2.0 METHOD:REQUEST PRODID:-//CALENDARSERVER.ORG//NONSGML Version 1//EN BEGIN:VEVENT UID:12345-67890 DTSTART:20080601T120000Z DTEND:20080601T130000Z ORGANIZER;CN="User 01":mailto:[email protected] ATTENDEE:mailto:[email protected] ATTENDEE:mailto:[email protected] END:VEVENT END:VCALENDAR """ results = [] class FakeSender(object): def outbound(self, txn, fromAddr, toAddr, calendar): results.append((fromAddr, toAddr)) return succeed(None) self.patch(IMIPInvitationWork, "mailSender", FakeSender()) scheduler = iMIPProcessing.FakeSchedule( LocalCalendarUser("mailto:[email protected]", None), Component.fromString(data) ) scheduler.txn = self.transactionUnderTest() recipients = (RemoteCalendarUser("mailto:[email protected]"),) responses = ScheduleResponseQueue("REQUEST", responsecode.OK) delivery = ScheduleViaIMip(scheduler, recipients, responses, False) yield delivery.generateSchedulingResponses() self.assertEqual(len(responses.responses), 1) self.assertEqual(str(responses.responses[0].reqstatus), iTIPRequestStatus.MESSAGE_SENT) yield JobItem.waitEmpty(self.store.newTransaction, reactor, 60) self.assertEqual(len(results), 1) self.assertEqual(results[0], ("mailto:[email protected]", "mailto:[email protected]",))
def test_processReplyMissingAttachment(self): msg = email.message_from_string( self.dataFile('reply_missing_attachment') ) # stick the token in the database first txn = self.store.newTransaction() yield txn.imipCreateToken( "urn:x-uid:5A985493-EE2C-4665-94CF-4DFEA3A89500", "mailto:[email protected]", "1E71F9C8-AEDA-48EB-98D0-76E898F6BB5C", token="d7cdf68d-8b73-4df1-ad3b-f08002fb285f" ) yield txn.commit() result = (yield self.receiver.processReply(msg)) self.assertEquals(result, MailReceiver.REPLY_FORWARDED_TO_ORGANIZER) yield JobItem.waitEmpty(self.store.newTransaction, reactor, 60)
def test_orphans(self): """ Verify that orphaned Inbox items are removed """ self.patch(config.InboxCleanup, "ItemLifetimeDays", -1) self.patch(config.InboxCleanup, "ItemLifeBeyondEventEndDays", -1) # create orphans by deleting events cal = yield self.calendarUnderTest(home="user01", name="calendar") for item in (yield cal.objectResourcesWithNames(["cal1.ics", "cal3.ics"])): yield item.purge() # do cleanup yield self.transactionUnderTest().enqueue(CleanupOneInboxWork, homeID=cal.ownerHome()._resourceID, notBefore=datetime.datetime.utcnow()) yield self.commit() yield JobItem.waitEmpty(self.storeUnderTest().newTransaction, reactor, 60) # check that orphans are deleted inbox = yield self.calendarUnderTest(home="user01", name="inbox") items = yield inbox.objectResources() names = [item.name() for item in items] self.assertEqual(set(names), set(["cal2.ics"]))
def afterWork(self): """ A hook that gets called after the L{WorkItem} does its real work. This can be used for common clean-up behaviors. The base implementation does nothing. """ yield super(ScheduleWorkMixin, self).afterWork() # Find the next item and schedule to run immediately after this. # We only coalesce ScheduleOrganizerSendWork. if self.workType() == ScheduleOrganizerSendWork.workType(): all = yield self.baseWork.query( self.transaction, (ScheduleWork.icalendarUID == self.icalendarUID).And(ScheduleWork.workID != self.workID), order=ScheduleWork.workID, limit=1, ) if all: work = all[0] if work.workType == self.workType(): job = yield JobItem.load(self.transaction, work.jobID) yield job.update(notBefore=datetime.datetime.utcnow()) log.debug("ScheduleOrganizerSendWork - promoted job: {id}, UID: '{uid}'", id=work.workID, uid=self.icalendarUID)
def test_referenceOldEvent(self): """ Verify that inbox items references old events are removed """ # events are already too old, so make one event end now calendar = yield self.calendarUnderTest(home="user01", name="calendar") cal3Event = yield calendar.objectResourceWithName("cal3.ics") tr = schema.TIME_RANGE yield Update( {tr.END_DATE: datetime.datetime.utcnow()}, Where=tr.CALENDAR_OBJECT_RESOURCE_ID == cal3Event._resourceID ).on(self.transactionUnderTest()) # do cleanup yield self.transactionUnderTest().enqueue(CleanupOneInboxWork, homeID=calendar.ownerHome()._resourceID, notBefore=datetime.datetime.utcnow()) yield self.commit() yield JobItem.waitEmpty(self.storeUnderTest().newTransaction, reactor, 60) # check that old items are deleted inbox = yield self.calendarUnderTest(home="user01", name="inbox") items = yield inbox.objectResources() names = [item.name() for item in items] self.assertEqual(set(names), set(["cal3.ics"]))
def test_work(self): txn = self.store.newTransaction() yield txn.enqueue( IMIPInvitationWork, fromAddr=ORGANIZER, toAddr=ATTENDEE, icalendarText=initialInviteText.replace("\n", "\r\n"), ) yield txn.commit() yield JobItem.waitEmpty(self.store.newTransaction, reactor, 60) txn = self.store.newTransaction() record = (yield txn.imipGetToken( ORGANIZER, ATTENDEE, ICALUID )) self.assertTrue(record is not None) record = (yield txn.imipLookupByToken(record.token))[0] yield txn.commit() self.assertEquals(record.organizer, ORGANIZER) self.assertEquals(record.attendee, ATTENDEE) self.assertEquals(record.icaluid, ICALUID)
def postCheck(self): """ Checks after migration is done """ # Check that the home has been moved home = yield self.homeUnderTest(self.theTransactionUnderTest(0), name="user01") self.assertTrue(home.external()) home = yield self.homeUnderTest(self.theTransactionUnderTest(0), name="user01", status=_HOME_STATUS_NORMAL) self.assertTrue(home is None) home = yield self.homeUnderTest(self.theTransactionUnderTest(0), name="user01", status=_HOME_STATUS_EXTERNAL) self.assertTrue(home is not None) home = yield self.homeUnderTest(self.theTransactionUnderTest(0), name="user01", status=_HOME_STATUS_DISABLED) self.assertTrue(home is not None) home = yield self.homeUnderTest(self.theTransactionUnderTest(0), name="user01", status=_HOME_STATUS_MIGRATING) self.assertTrue(home is None) yield self.commitTransaction(0) home = yield self.homeUnderTest(self.theTransactionUnderTest(1), name="user01") self.assertTrue(home.normal()) home = yield self.homeUnderTest(self.theTransactionUnderTest(1), name="user01", status=_HOME_STATUS_NORMAL) self.assertTrue(home is not None) home = yield self.homeUnderTest(self.theTransactionUnderTest(1), name="user01", status=_HOME_STATUS_EXTERNAL) self.assertTrue(home is None) home = yield self.homeUnderTest(self.theTransactionUnderTest(1), name="user01", status=_HOME_STATUS_DISABLED) self.assertTrue(home is not None) home = yield self.homeUnderTest(self.theTransactionUnderTest(1), name="user01", status=_HOME_STATUS_MIGRATING) self.assertTrue(home is None) yield self.commitTransaction(1) # Check that the notifications have been moved notifications = yield self.notificationCollectionUnderTest(self.theTransactionUnderTest(0), name="user01", status=_HOME_STATUS_NORMAL) self.assertTrue(notifications is None) notifications = yield self.notificationCollectionUnderTest(self.theTransactionUnderTest(0), name="user01", status=_HOME_STATUS_EXTERNAL) self.assertTrue(notifications is None) notifications = yield self.notificationCollectionUnderTest(self.theTransactionUnderTest(0), name="user01", status=_HOME_STATUS_DISABLED) self.assertTrue(notifications is not None) yield self.commitTransaction(0) notifications = yield self.notificationCollectionUnderTest(self.theTransactionUnderTest(1), name="user01", status=_HOME_STATUS_NORMAL) self.assertTrue(notifications is not None) notifications = yield self.notificationCollectionUnderTest(self.theTransactionUnderTest(1), name="user01", status=_HOME_STATUS_EXTERNAL) self.assertTrue(notifications is None) notifications = yield self.notificationCollectionUnderTest(self.theTransactionUnderTest(1), name="user01", status=_HOME_STATUS_DISABLED) self.assertTrue(notifications is not None) yield self.commitTransaction(1) # New pod data homes = {} homes["user01"] = yield self.homeUnderTest(self.theTransactionUnderTest(1), name="user01") homes["user02"] = yield self.homeUnderTest(self.theTransactionUnderTest(1), name="user02") self.assertTrue(homes["user02"].external()) homes["user03"] = yield self.homeUnderTest(self.theTransactionUnderTest(1), name="user03") self.assertTrue(homes["user03"].external()) homes["puser01"] = yield self.homeUnderTest(self.theTransactionUnderTest(1), name="puser01") self.assertTrue(homes["puser01"].normal()) homes["puser02"] = yield self.homeUnderTest(self.theTransactionUnderTest(1), name="puser02") self.assertTrue(homes["puser02"].normal()) homes["puser03"] = yield self.homeUnderTest(self.theTransactionUnderTest(1), name="puser03") self.assertTrue(homes["puser03"].normal()) # Check calendar data on new pod calendars = yield homes["user01"].loadChildren() calnames = dict([(calendar.name(), calendar) for calendar in calendars]) self.assertEqual( set(calnames.keys()), set(("calendar", "tasks", "inbox", self.stash["sharename_user02_to_user01"], self.stash["sharename_puser02_to_user01"],)) ) # Check shared-by user01 on new pod shared = calnames["calendar"] invitations = yield shared.sharingInvites() by_sharee = dict([(invitation.shareeUID, invitation) for invitation in invitations]) self.assertEqual(len(invitations), 2) self.assertEqual(set(by_sharee.keys()), set(("user03", "puser03",))) self.assertEqual(by_sharee["user03"].shareeHomeID, homes["user03"].id()) self.assertEqual(by_sharee["puser03"].shareeHomeID, homes["puser03"].id()) # Check shared-to user01 on new pod shared = calnames[self.stash["sharename_user02_to_user01"]] self.assertEqual(shared.ownerHome().uid(), "user02") self.assertEqual(shared.ownerHome().id(), homes["user02"].id()) shared = calnames[self.stash["sharename_puser02_to_user01"]] self.assertEqual(shared.ownerHome().uid(), "puser02") self.assertEqual(shared.ownerHome().id(), homes["puser02"].id()) shared = yield homes["puser02"].calendarWithName("calendar") invitations = yield shared.sharingInvites() self.assertEqual(len(invitations), 1) self.assertEqual(invitations[0].shareeHomeID, homes["user01"].id()) yield self.commitTransaction(1) # Old pod data homes = {} homes["user01"] = yield self.homeUnderTest(self.theTransactionUnderTest(0), name="user01") homes["user02"] = yield self.homeUnderTest(self.theTransactionUnderTest(0), name="user02") self.assertTrue(homes["user02"].normal()) homes["user03"] = yield self.homeUnderTest(self.theTransactionUnderTest(0), name="user03") self.assertTrue(homes["user03"].normal()) homes["puser01"] = yield self.homeUnderTest(self.theTransactionUnderTest(0), name="puser01") self.assertTrue(homes["puser01"] is None) homes["puser02"] = yield self.homeUnderTest(self.theTransactionUnderTest(0), name="puser02") self.assertTrue(homes["puser02"].external()) homes["puser03"] = yield self.homeUnderTest(self.theTransactionUnderTest(0), name="puser03") self.assertTrue(homes["puser03"].external()) # Check shared-by user01 on old pod shared = yield homes["user03"].calendarWithName(self.stash["sharename_user01_to_user03"]) self.assertEqual(shared.ownerHome().uid(), "user01") self.assertEqual(shared.ownerHome().id(), homes["user01"].id()) # Check shared-to user01 on old pod shared = yield homes["user02"].calendarWithName("calendar") invitations = yield shared.sharingInvites() self.assertEqual(len(invitations), 1) self.assertEqual(invitations[0].shareeHomeID, homes["user01"].id()) yield self.commitTransaction(0) # Delegates on each pod for pod in range(self.numberOfStores): txn = self.theTransactionUnderTest(pod) records = {} for ctr in range(10): uid = u"user{:02d}".format(ctr + 1) records[uid] = yield txn.directoryService().recordWithUID(uid) for ctr in range(10): uid = u"puser{:02d}".format(ctr + 1) records[uid] = yield txn.directoryService().recordWithUID(uid) for ctr in range(10): uid = u"group{:02d}".format(ctr + 1) records[uid] = yield txn.directoryService().recordWithUID(uid) delegates = yield Delegates.delegatesOf(txn, records["user01"], True, False) self.assertTrue(records["user02"] in delegates) self.assertTrue(records["group02"] in delegates) delegates = yield Delegates.delegatesOf(txn, records["user01"], True, True) self.assertTrue(records["user02"] in delegates) self.assertTrue(records["user06"] in delegates) self.assertTrue(records["user07"] in delegates) self.assertTrue(records["user08"] in delegates) delegates = yield Delegates.delegatesOf(txn, records["user01"], False, False) self.assertTrue(records["user03"] in delegates) self.assertTrue(records["group03"] in delegates) self.assertTrue(records["puser01"] in delegates) delegates = yield Delegates.delegatesOf(txn, records["user01"], False, True) self.assertTrue(records["user03"] in delegates) self.assertTrue(records["user07"] in delegates) self.assertTrue(records["user08"] in delegates) self.assertTrue(records["user09"] in delegates) self.assertTrue(records["puser01"] in delegates) # Attachments obj = yield self.calendarObjectUnderTest(txn=self.theTransactionUnderTest(1), name="01_3.ics", calendar_name="calendar", home="user01") attachment = yield obj.attachmentWithManagedID(self.stash["user01_attachment_mid"]) self.assertTrue(attachment is not None) self.assertEqual(attachment.md5(), self.stash["user01_attachment_md5"]) data = yield self.attachmentToString(attachment) self.assertEqual(data, "Here is some text #1.") # Check removal of data from new pod # Make sure all jobs are done yield JobItem.waitEmpty(self.theStoreUnderTest(1).newTransaction, reactor, 60) # No migration state data left txn = self.theTransactionUnderTest(1) for migrationType in (CalendarMigrationRecord, CalendarObjectMigrationRecord, AttachmentMigrationRecord,): records = yield migrationType.all(txn) self.assertEqual(len(records), 0, msg=migrationType.__name__) yield self.commitTransaction(1) # No homes txn = self.theTransactionUnderTest(1) oldhome = yield txn.calendarHomeWithUID("user01", status=_HOME_STATUS_DISABLED) self.assertTrue(oldhome is None) oldhome = yield txn.notificationsWithUID("user01", status=_HOME_STATUS_DISABLED) self.assertTrue(oldhome is None) # Check removal of data from old pod # Make sure all jobs are done yield JobItem.waitEmpty(self.theStoreUnderTest(0).newTransaction, reactor, 60) # No homes txn = self.theTransactionUnderTest(0) oldhome = yield txn.calendarHomeWithUID("user01", status=_HOME_STATUS_DISABLED) self.assertTrue(oldhome is None) oldhome = yield txn.notificationsWithUID("user01", status=_HOME_STATUS_DISABLED) self.assertTrue(oldhome is None) # No delegates for delegateType in (DelegateRecord, DelegateGroupsRecord, ExternalDelegateGroupsRecord): records = yield delegateType.query(txn, delegateType.delegator == "user01") self.assertEqual(len(records), 0, msg=delegateType.__name__) # No work items for workType in allScheduleWork: records = yield workType.query(txn, workType.homeResourceID == self.stash["user01_pod0_home_id"]) self.assertEqual(len(records), 0, msg=workType.__name__)