Exemplo n.º 1
0
    def doWork(self):

        # exit if not done with last delete:
        coiw = schema.CLEANUP_ONE_INBOX_WORK
        queuedCleanupOneInboxWorkItems = (yield Select(
            [Count(coiw.HOME_ID)],
            From=coiw,
        ).on(self.transaction))[0][0]

        if queuedCleanupOneInboxWorkItems:
            log.error(
                "Inbox cleanup work: Can't schedule per home cleanup because {} work items still queued.",
                queuedCleanupOneInboxWorkItems
            )
        else:
            # enumerate provisioned normal calendar homes
            ch = schema.CALENDAR_HOME
            homeRows = yield Select(
                [ch.RESOURCE_ID],
                From=ch,
                Where=ch.STATUS == _HOME_STATUS_NORMAL,
            ).on(self.transaction)

            for homeRow in homeRows:
                yield CleanupOneInboxWork.reschedule(self.transaction, seconds=0, homeID=homeRow[0])
Exemplo n.º 2
0
    def doWork(self):

        # exit if not done with last delete:
        coiw = schema.CLEANUP_ONE_INBOX_WORK
        queuedCleanupOneInboxWorkItems = (yield Select(
            [Count(coiw.HOME_ID)],
            From=coiw,
        ).on(self.transaction))[0][0]

        if queuedCleanupOneInboxWorkItems:
            log.error(
                "Inbox cleanup work: Can't schedule per home cleanup because {} work items still queued.",
                queuedCleanupOneInboxWorkItems
            )
        else:
            # enumerate provisioned normal calendar homes
            ch = schema.CALENDAR_HOME
            homeRows = yield Select(
                [ch.RESOURCE_ID],
                From=ch,
                Where=ch.STATUS == _HOME_STATUS_NORMAL,
            ).on(self.transaction)

            # Add an initial delay to the start of the first work item, then add an offset between each item
            seconds = config.InboxCleanup.StartDelaySeconds
            for homeRow in homeRows:
                yield CleanupOneInboxWork.reschedule(self.transaction, seconds=seconds, homeID=homeRow[0])
                seconds += config.InboxCleanup.StaggerSeconds
Exemplo n.º 3
0
def doToEachHomeNotAtVersion(store, homeSchema, version, doIt, logStr, filterOwnerUID=None, processExternal=False):
    """
    Do something to each home whose version column indicates it is older
    than the specified version. Do this in batches as there may be a lot of work to do. Also,
    allow the GUID to be filtered to support a parallel mode of operation.
    """

    txn = store.newTransaction("updateDataVersion")
    where = homeSchema.DATAVERSION < version
    if filterOwnerUID:
        where = where.And(homeSchema.OWNER_UID.StartsWith(filterOwnerUID))
    total = (yield Select(
        [Count(homeSchema.RESOURCE_ID), ],
        From=homeSchema,
        Where=where,
    ).on(txn))[0][0]
    yield txn.commit()
    count = 0

    while True:

        logUpgradeStatus(logStr, count, total)

        # Get the next home with an old version
        txn = store.newTransaction("updateDataVersion")
        try:
            rows = yield Select(
                [homeSchema.RESOURCE_ID, homeSchema.OWNER_UID, homeSchema.STATUS, ],
                From=homeSchema,
                Where=where,
                OrderBy=homeSchema.OWNER_UID,
                Limit=1,
            ).on(txn)

            if len(rows) == 0:
                yield txn.commit()
                logUpgradeStatus("End {}".format(logStr), count, total)
                returnValue(None)

            # Apply to the home if not external
            homeResourceID, _ignore_owner_uid, homeStatus = rows[0]
            if homeStatus != _HOME_STATUS_EXTERNAL or processExternal:
                yield doIt(txn, homeResourceID)

            # Update the home to the current version
            yield Update(
                {homeSchema.DATAVERSION: version},
                Where=homeSchema.RESOURCE_ID == homeResourceID,
            ).on(txn)
            yield txn.commit()
        except RuntimeError, e:
            f = Failure()
            logUpgradeError(
                logStr,
                "Failed to upgrade {} to {}: {}".format(homeSchema, version, e)
            )
            yield txn.abort()
            f.raiseException()

        count += 1
Exemplo n.º 4
0
 def getTableSize(self, txn, dbtable):
     rows = (yield Select(
         [
             Count(ALL_COLUMNS),
         ],
         From=dbtable,
     ).on(txn))
     returnValue(rows[0][0])
Exemplo n.º 5
0
 def getTableSize(self, txn, dbtable):
     rows = (yield Select(
         [
             Count(Constant(1)),
         ],
         From=dbtable,
     ).on(txn))
     returnValue(rows[0][0])
Exemplo n.º 6
0
    def histogram(cls, txn):
        """
        Generate a histogram of work items currently in the queue.
        """
        from twext.enterprise.jobs.queue import WorkerConnectionPool

        # Fill out an empty set of results for all the known work types. The SQL
        # query will only return work types that are currently queued, but we want
        # results for all possible work.
        results = {}
        now = datetime.utcnow()
        for workItemType in cls.workTypes():
            workType = workItemType.workType()
            results.setdefault(workType, {
                "queued": 0,
                "assigned": 0,
                "late": 0,
                "failed": 0,
                "completed": WorkerConnectionPool.completed.get(workType, 0),
                "time": WorkerConnectionPool.timing.get(workType, 0.0)
            })

        # Use an aggregate query to get the results for each currently queued
        # work type.
        jobs = yield cls.queryExpr(
            expr=None,
            attributes=(
                cls.workType,
                Count(cls.workType),
                Count(cls.assigned),
                Count(Case((cls.assigned == None).And(cls.notBefore < now), Constant(1), None)),
                Sum(cls.failed),
            ),
            group=cls.workType
        ).on(txn)

        for workType, queued, assigned, late, failed in jobs:
            results[workType].update({
                "queued": queued,
                "assigned": assigned,
                "late": late,
                "failed": failed,
            })

        returnValue(results)
Exemplo n.º 7
0
def countProperty(txn, propelement):
    pname = PropertyName.fromElement(propelement)

    rp = schema.RESOURCE_PROPERTY
    count = (yield Select(
        [Count(rp.RESOURCE_ID), ],
        From=rp,
        Where=rp.NAME == pname.toString(),
    ).on(txn))[0][0]

    returnValue(count)
Exemplo n.º 8
0
 def count(cls, transaction, where=None):
     """
     Count the number of rows in the table that corresponds to C{cls}.
     """
     rows = yield Select(
         [
             Count(ALL_COLUMNS),
         ],
         From=cls.table,
         Where=where,
     ).on(transaction)
     returnValue(rows[0][0])
Exemplo n.º 9
0
    def test_upgradeOrphanedAttachment(self):
        """
        Test L{attachment_migration.doUpgrade} when an orphaned attachment is present.
        """
        def _hasDropboxAttachments(_self, txn):
            return succeed(True)

        self.patch(CalendarStoreFeatures, "hasDropboxAttachments",
                   _hasDropboxAttachments)

        # Create orphaned attachment
        dropboxID = "ABCD.dropbox"
        attachmentName = "test.txt"
        home = yield self.homeUnderTest(name="user01")
        at = schema.ATTACHMENT
        yield Insert({
            at.CALENDAR_HOME_RESOURCE_ID: home._resourceID,
            at.DROPBOX_ID: dropboxID,
            at.CONTENT_TYPE: "text/plain",
            at.SIZE: 10,
            at.MD5: "abcd",
            at.PATH: attachmentName,
        }).on(self.transactionUnderTest())
        yield self.commit()

        hasheduid = hashlib.md5(dropboxID).hexdigest()
        fp = self._sqlCalendarStore.attachmentsPath.child(
            hasheduid[0:2]).child(hasheduid[2:4]).child(hasheduid)
        fp.makedirs()
        fp = fp.child(attachmentName)
        fp.setContent("1234567890")

        self.assertTrue(os.path.exists(fp.path))

        upgrader = UpgradeDatabaseOtherStep(self._sqlCalendarStore)
        yield attachment_migration.doUpgrade(upgrader)

        txn = upgrader.sqlStore.newTransaction()
        managed = (yield txn.calendarserverValue("MANAGED-ATTACHMENTS",
                                                 raiseIfMissing=False))
        count = (yield Select(
            [
                Count(at.DROPBOX_ID),
            ],
            From=at,
        ).on(txn))[0][0]
        yield txn.commit()
        self.assertEqual(count, 1)
        self.assertNotEqual(managed, None)

        self.assertTrue(os.path.exists(fp.path))
Exemplo n.º 10
0
 def getCalendars(self, txn):
     ch = schema.CALENDAR_HOME
     cb = schema.CALENDAR_BIND
     co = schema.CALENDAR_OBJECT
     rows = (yield Select(
         [
             ch.OWNER_UID,
             cb.CALENDAR_RESOURCE_NAME,
             Count(co.RESOURCE_ID),
         ],
         From=ch.join(
             cb, type="inner", on=(ch.RESOURCE_ID == cb.CALENDAR_HOME_RESOURCE_ID).And(
                 cb.BIND_MODE == _BIND_MODE_OWN)).join(
             co, type="left", on=(cb.CALENDAR_RESOURCE_ID == co.CALENDAR_RESOURCE_ID)),
         GroupBy=(ch.OWNER_UID, cb.CALENDAR_RESOURCE_NAME)
     ).on(txn))
     returnValue(tuple(rows))
Exemplo n.º 11
0
    def countResources(self, uuid):
        ch = schema.CALENDAR_HOME
        cb = schema.CALENDAR_BIND
        co = schema.CALENDAR_OBJECT
        kwds = {"UUID": uuid}
        rows = (yield Select(
            [
                Count(co.RESOURCE_ID),
            ],
            From=ch.join(
                cb, type="inner", on=(ch.RESOURCE_ID == cb.CALENDAR_HOME_RESOURCE_ID).And(
                    cb.BIND_MODE == _BIND_MODE_OWN)).join(
                co, type="left", on=(cb.CALENDAR_RESOURCE_ID == co.CALENDAR_RESOURCE_ID)),
            Where=(
                ch.OWNER_UID == Parameter("UUID")
            ),
        ).on(self.txn, **kwds))

        returnValue(rows[0][0] if rows else 0)