Esempio n. 1
0
    def doWork(self):

        # exit if not done with last delete:
        coiw = schema.CLEANUP_ONE_INBOX_WORK
        queuedCleanupOneInboxWorkItems = (yield Select(
            [Count(coiw.HOME_ID)],
            From=coiw,
        ).on(self.transaction))[0][0]

        if queuedCleanupOneInboxWorkItems:
            log.error(
                "Inbox cleanup work: Can't schedule per home cleanup because {} work items still queued.",
                queuedCleanupOneInboxWorkItems
            )
        else:
            # enumerate provisioned normal calendar homes
            ch = schema.CALENDAR_HOME
            homeRows = yield Select(
                [ch.RESOURCE_ID],
                From=ch,
                Where=ch.STATUS == _HOME_STATUS_NORMAL,
            ).on(self.transaction)

            # Add an initial delay to the start of the first work item, then add an offset between each item
            seconds = config.InboxCleanup.StartDelaySeconds
            for homeRow in homeRows:
                yield CleanupOneInboxWork.reschedule(self.transaction, seconds=seconds, homeID=homeRow[0])
                seconds += config.InboxCleanup.StaggerSeconds
Esempio n. 2
0
    def forMultipleResourcesWithResourceIDs(cls, defaultUser, txn, resourceIDs):
        """
        Load all property stores for all specified resources.  This is used
        to optimize Depth:1 operations on that collection, by loading all
        relevant properties in a single query. Note that the caller of this
        method must make sure that the number of items being queried for is
        within a reasonable batch size. If the caller is itself batching
        related queries, that will take care of itself.

        @param defaultUser: the UID of the user who owns / is requesting the
            property stores; the ones whose per-user properties will be exposed.

        @type defaultUser: C{str}

        @param txn: the transaction within which to fetch the rows.

        @type txn: L{IAsyncTransaction}

        @param resourceIDs: The set of resource ID's to query.

        @return: a L{Deferred} that fires with a C{dict} mapping resource ID (a
            value taken from C{childColumn}) to a L{PropertyStore} for that ID.
        """
        query = Select([
            prop.RESOURCE_ID, prop.NAME, prop.VIEWER_UID, prop.VALUE],
            From=prop,
            Where=prop.RESOURCE_ID.In(Parameter("resourceIDs", len(resourceIDs)))
        )
        rows = yield query.on(txn, resourceIDs=resourceIDs)
        stores = cls._createMultipleStores(defaultUser, txn, rows)
        returnValue(stores)
Esempio n. 3
0
    def doWork(self):

        # exit if not done with last delete:
        coiw = schema.CLEANUP_ONE_INBOX_WORK
        queuedCleanupOneInboxWorkItems = (yield Select(
            [Count(coiw.HOME_ID)],
            From=coiw,
        ).on(self.transaction))[0][0]

        if queuedCleanupOneInboxWorkItems:
            log.error(
                "Inbox cleanup work: Can't schedule per home cleanup because {} work items still queued.",
                queuedCleanupOneInboxWorkItems
            )
        else:
            # enumerate provisioned normal calendar homes
            ch = schema.CALENDAR_HOME
            homeRows = yield Select(
                [ch.RESOURCE_ID],
                From=ch,
                Where=ch.STATUS == _HOME_STATUS_NORMAL,
            ).on(self.transaction)

            for homeRow in homeRows:
                yield CleanupOneInboxWork.reschedule(self.transaction, seconds=0, homeID=homeRow[0])
Esempio n. 4
0
def doToEachHomeNotAtVersion(store, homeSchema, version, doIt, logStr, filterOwnerUID=None, processExternal=False):
    """
    Do something to each home whose version column indicates it is older
    than the specified version. Do this in batches as there may be a lot of work to do. Also,
    allow the GUID to be filtered to support a parallel mode of operation.
    """

    txn = store.newTransaction("updateDataVersion")
    where = homeSchema.DATAVERSION < version
    if filterOwnerUID:
        where = where.And(homeSchema.OWNER_UID.StartsWith(filterOwnerUID))
    total = (yield Select(
        [Count(homeSchema.RESOURCE_ID), ],
        From=homeSchema,
        Where=where,
    ).on(txn))[0][0]
    yield txn.commit()
    count = 0

    while True:

        logUpgradeStatus(logStr, count, total)

        # Get the next home with an old version
        txn = store.newTransaction("updateDataVersion")
        try:
            rows = yield Select(
                [homeSchema.RESOURCE_ID, homeSchema.OWNER_UID, homeSchema.STATUS, ],
                From=homeSchema,
                Where=where,
                OrderBy=homeSchema.OWNER_UID,
                Limit=1,
            ).on(txn)

            if len(rows) == 0:
                yield txn.commit()
                logUpgradeStatus("End {}".format(logStr), count, total)
                returnValue(None)

            # Apply to the home if not external
            homeResourceID, _ignore_owner_uid, homeStatus = rows[0]
            if homeStatus != _HOME_STATUS_EXTERNAL or processExternal:
                yield doIt(txn, homeResourceID)

            # Update the home to the current version
            yield Update(
                {homeSchema.DATAVERSION: version},
                Where=homeSchema.RESOURCE_ID == homeResourceID,
            ).on(txn)
            yield txn.commit()
        except RuntimeError, e:
            f = Failure()
            logUpgradeError(
                logStr,
                "Failed to upgrade {} to {}: {}".format(homeSchema, version, e)
            )
            yield txn.abort()
            f.raiseException()

        count += 1
Esempio n. 5
0
    def test_notificationObjectRevisions(self):
        """
        Verify that all extra notification object revisions are deleted by FindMinValidRevisionWork and RevisionCleanupWork
        """

        # get sync token
        home = yield self.homeUnderTest(name="user01")
        token = yield home.syncToken()

        # make notification changes as side effect of sharing
        yield self._createCalendarShare()

        # Get object revisions
        rev = schema.NOTIFICATION_OBJECT_REVISIONS
        revisionRows = yield Select(
            [rev.REVISION],
            From=rev,
        ).on(self.transactionUnderTest())
        self.assertNotEqual(len(revisionRows), 0)

        # do FindMinValidRevisionWork
        yield self.transactionUnderTest().enqueue(
            FindMinValidRevisionWork, notBefore=datetime.datetime.utcnow())
        yield self.commit()
        yield JobItem.waitEmpty(self.storeUnderTest().newTransaction, reactor,
                                60)

        # Get the minimum valid revision and check it
        minValidRevision = yield self.transactionUnderTest(
        ).calendarserverValue("MIN-VALID-REVISION")
        self.assertEqual(int(minValidRevision),
                         max([row[0] for row in revisionRows]) + 1)

        # do RevisionCleanupWork
        yield self.transactionUnderTest().enqueue(
            RevisionCleanupWork, notBefore=datetime.datetime.utcnow())
        yield self.commit()
        yield JobItem.waitEmpty(self.storeUnderTest().newTransaction, reactor,
                                60)

        # Get group1 object revision
        rev = schema.NOTIFICATION_OBJECT_REVISIONS
        revisionRows = yield Select(
            [rev.REVISION],
            From=rev,
        ).on(self.transactionUnderTest())
        self.assertEqual(len(revisionRows), 0)

        # old sync token fails
        home = yield self.homeUnderTest(name="user01")
        yield self.failUnlessFailure(home.resourceNamesSinceToken(token, "1"),
                                     SyncTokenValidException)
        yield self.failUnlessFailure(
            home.resourceNamesSinceToken(token, "infinity"),
            SyncTokenValidException)
Esempio n. 6
0
    def doWork(self):

        # Delete any other work items for this UID
        yield Delete(
            From=self.table,
            Where=self.group,
        ).on(self.transaction)

        # NB We do not check config.AutomaticPurging.Enabled here because if this work
        # item was enqueued we always need to complete it

        # Check for pending scheduling operations
        sow = schema.SCHEDULE_ORGANIZER_WORK
        sosw = schema.SCHEDULE_ORGANIZER_SEND_WORK
        srw = schema.SCHEDULE_REPLY_WORK
        srcw = schema.SCHEDULE_REPLY_CANCEL_WORK
        rows = yield Select(
            [sow.HOME_RESOURCE_ID],
            From=sow,
            Where=(sow.HOME_RESOURCE_ID == self.homeResourceID),
            SetExpression=Union(
                Select(
                    [sosw.HOME_RESOURCE_ID],
                    From=sosw,
                    Where=(sosw.HOME_RESOURCE_ID == self.homeResourceID),
                    SetExpression=Union(
                        Select(
                            [srw.HOME_RESOURCE_ID],
                            From=srw,
                            Where=(
                                srw.HOME_RESOURCE_ID == self.homeResourceID),
                            SetExpression=Union(
                                Select(
                                    [srcw.HOME_RESOURCE_ID],
                                    From=srcw,
                                    Where=(srcw.HOME_RESOURCE_ID ==
                                           self.homeResourceID),
                                )),
                        )),
                )),
        ).on(self.transaction)

        if rows and len(rows):
            # Regenerate this job
            notBefore = (datetime.datetime.utcnow() + datetime.timedelta(
                seconds=config.AutomaticPurging.HomePurgeDelaySeconds))
            yield self.transaction.enqueue(PrincipalPurgeHomeWork,
                                           homeResourceID=self.homeResourceID,
                                           notBefore=notBefore)
        else:
            # Get the home and remove it - only if properly marked as being purged
            home = yield self.transaction.calendarHomeWithResourceID(
                self.homeResourceID)
            if home.purging():
                yield home.remove()
Esempio n. 7
0
    def groupsToRefresh(self, txn):
        delegatedUIDs = set((yield txn.allGroupDelegates()))
        self.log.debug("There are {count} group delegates",
                       count=len(delegatedUIDs))

        # Also get group delegates from other pods
        if (txn.directoryService().serversDB() is not None and
                len(txn.directoryService().serversDB().allServersExceptThis(
                    filter_v5=True)) != 0):
            results = yield DeferredList([
                txn.store().conduit.send_all_group_delegates(txn, server)
                for server in txn.directoryService().serversDB().
                allServersExceptThis(filter_v5=True)
            ],
                                         consumeErrors=True)
            for result in results:
                if result and result[0]:
                    delegatedUIDs.update(result[1])
            self.log.debug(
                "There are {count} group delegates on this and other pods",
                count=len(delegatedUIDs))

        # Get groupUIDs for all group attendees
        groups = yield GroupsRecord.query(
            txn,
            GroupsRecord.groupID.In(
                GroupAttendeeRecord.queryExpr(
                    expr=None,
                    attributes=(GroupAttendeeRecord.groupID, ),
                    distinct=True,
                )))
        attendeeGroupUIDs = frozenset([group.groupUID for group in groups])
        self.log.debug("There are {count} group attendees",
                       count=len(attendeeGroupUIDs))

        # Get groupUIDs for all group shares
        gs = schema.GROUP_SHAREE
        gr = schema.GROUPS
        rows = yield Select([gr.GROUP_UID],
                            From=gr,
                            Where=gr.GROUP_ID.In(
                                Select([gs.GROUP_ID], From=gs,
                                       Distinct=True))).on(txn)
        shareeGroupUIDs = frozenset([row[0] for row in rows])
        self.log.debug("There are {count} group sharees",
                       count=len(shareeGroupUIDs))

        returnValue(
            frozenset(delegatedUIDs | attendeeGroupUIDs | shareeGroupUIDs))
Esempio n. 8
0
    def groupsToRefresh(self, txn):
        delegatedUIDs = set((yield txn.allGroupDelegates()))
        self.log.info("There are {count} group delegates",
                      count=len(delegatedUIDs))

        # Also get group delegates from other pods
        if txn.directoryService().serversDB() is not None and len(
                txn.directoryService().serversDB().allServersExceptThis(
                )) != 0:
            results = yield DeferredList([
                txn.store().conduit.send_all_group_delegates(txn, server)
                for server in
                txn.directoryService().serversDB().allServersExceptThis()
            ],
                                         consumeErrors=True)
            for result in results:
                if result and result[0]:
                    delegatedUIDs.update(result[1])
            self.log.info(
                "There are {count} group delegates on this and other pods",
                count=len(delegatedUIDs))

        # Get groupUIDs for all group attendees
        ga = schema.GROUP_ATTENDEE
        gr = schema.GROUPS
        rows = yield Select([gr.GROUP_UID],
                            From=gr,
                            Where=gr.GROUP_ID.In(
                                Select([ga.GROUP_ID], From=ga,
                                       Distinct=True))).on(txn)
        attendeeGroupUIDs = frozenset([row[0] for row in rows])
        self.log.info("There are {count} group attendees",
                      count=len(attendeeGroupUIDs))

        # Get groupUIDs for all group shares
        gs = schema.GROUP_SHAREE
        gr = schema.GROUPS
        rows = yield Select([gr.GROUP_UID],
                            From=gr,
                            Where=gr.GROUP_ID.In(
                                Select([gs.GROUP_ID], From=gs,
                                       Distinct=True))).on(txn)
        shareeGroupUIDs = frozenset([row[0] for row in rows])
        self.log.info("There are {count} group sharees",
                      count=len(shareeGroupUIDs))

        returnValue(
            frozenset(delegatedUIDs | attendeeGroupUIDs | shareeGroupUIDs))
Esempio n. 9
0
    def scheduleEventReconciliations(self, txn, groupID):
        """
        Find all events who have this groupID as an attendee and create
        work items for them.
        returns: WorkProposal
        """
        ga = schema.GROUP_ATTENDEE
        rows = yield Select(
            [
                ga.RESOURCE_ID,
            ],
            From=ga,
            Where=ga.GROUP_ID == groupID,
        ).on(txn)

        wps = []
        for [eventID] in rows:
            wp = yield GroupAttendeeReconciliationWork.reschedule(
                txn,
                seconds=float(
                    config.GroupAttendees.ReconciliationDelaySeconds),
                resourceID=eventID,
                groupID=groupID,
            )
            wps.append(wp)
        returnValue(tuple(wps))
Esempio n. 10
0
    def generate(self):
        """
        Generate the actual SQL statement from the passed in expression tree.

        @return: a C{tuple} of (C{str}, C{list}), where the C{str} is the partial SQL statement,
            and the C{list} is the list of argument substitutions to use with the SQL API execute method.
        """

        # Init state
        self.arguments = {}
        self.argcount = 0
        obj = self.collection._objectSchema

        columns = [obj.RESOURCE_NAME, obj.UID]

        # For SQL data DB we need to restrict the query to just the targeted collection resource-id if provided
        if self.whereid:
            # AND the whole thing
            test = expression.isExpression(obj.PARENT_RESOURCE_ID,
                                           self.whereid, True)
            self.expression = test if isinstance(
                self.expression, expression.allExpression) else test.andWith(
                    self.expression)

        # Generate ' where ...' partial statement
        where = self.generateExpression(self.expression)

        select = Select(
            columns,
            From=obj,
            Where=where,
            Distinct=True,
        )

        return select, self.arguments
Esempio n. 11
0
 def test_normalizeColumnUUIDs(self):
     """
     L{_normalizeColumnUUIDs} upper-cases only UUIDs in a given column.
     """
     rp = schema.RESOURCE_PROPERTY
     txn = self.transactionUnderTest()
     # setup
     yield Insert({
         rp.RESOURCE_ID: 1,
         rp.NAME: "asdf",
         rp.VALUE: "property-value",
         rp.VIEWER_UID: "not-a-uuid"
     }).on(txn)
     yield Insert({
         rp.RESOURCE_ID: 2,
         rp.NAME: "fdsa",
         rp.VALUE: "another-value",
         rp.VIEWER_UID: "aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa"
     }).on(txn)
     # test
     from txdav.common.datastore.sql import _normalizeColumnUUIDs
     yield _normalizeColumnUUIDs(txn, rp.VIEWER_UID)
     self.assertEqual((yield Select(
         [rp.RESOURCE_ID, rp.NAME, rp.VALUE, rp.VIEWER_UID],
         From=rp,
         OrderBy=rp.RESOURCE_ID,
         Ascending=True,
     ).on(txn)), [[1, "asdf", "property-value", "not-a-uuid"],
                  [
                      2, "fdsa", "another-value",
                      "AAAAAAAA-AAAA-AAAA-AAAA-AAAAAAAAAAAA"
                  ]])
Esempio n. 12
0
    def doWork(self):

        # Get the minimum valid revision
        minValidRevision = int(
            (yield self.transaction.calendarserverValue("MIN-VALID-REVISION")))

        # get max revision on table rows before dateLimit
        dateLimit = self.dateCutoff()
        maxRevOlderThanDate = 0

        # TODO: Use one Select statement
        for table in (
                schema.CALENDAR_OBJECT_REVISIONS,
                schema.NOTIFICATION_OBJECT_REVISIONS,
                schema.ADDRESSBOOK_OBJECT_REVISIONS,
                schema.ABO_MEMBERS,
        ):
            revisionRows = yield Select(
                [Max(table.REVISION)],
                From=table,
                Where=(table.MODIFIED < dateLimit),
            ).on(self.transaction)

            if revisionRows:
                tableMaxRevision = revisionRows[0][0]
                if tableMaxRevision > maxRevOlderThanDate:
                    maxRevOlderThanDate = tableMaxRevision

        if maxRevOlderThanDate > minValidRevision:
            # save new min valid revision
            yield self.transaction.updateCalendarserverValue(
                "MIN-VALID-REVISION", maxRevOlderThanDate + 1)

            # Schedule revision cleanup
            yield RevisionCleanupWork.reschedule(self.transaction, seconds=0)
Esempio n. 13
0
    def trylock(self, where=None):
        """
        Try to lock with a select for update no wait. If it fails, rollback to
        a savepoint and return L{False}, else return L{True}.

        @param where: SQL expression used to match the rows to lock, by default this is just an expression
            that matches the primary key of this L{Record}, but it can be used to lock multiple L{Records}
            matching the expression in one go. If it is an L{str}, then all rows will be matched.
        @type where: L{SQLExpression} or L{None}
        @return: a L{Deferred} that fires when the updates have been sent to
            the database.
        """

        if where is None:
            where = self._primaryKeyComparison(self._primaryKeyValue())
        elif isinstance(where, str):
            where = None
        savepoint = SavepointAction("Record_trylock_{}".format(self.__class__.__name__))
        yield savepoint.acquire(self.transaction)
        try:
            yield Select(
                list(self.table),
                From=self.table,
                Where=where,
                ForUpdate=True,
                NoWait=True,
            ).on(self.transaction)
        except:
            yield savepoint.rollback(self.transaction)
            returnValue(False)
        else:
            yield savepoint.release(self.transaction)
            returnValue(True)
Esempio n. 14
0
 def deletesome(cls, transaction, where, returnCols=None):
     """
     Delete all rows matching the where expression from the table that corresponds to C{cls}.
     """
     if transaction.dbtype.dialect == ORACLE_DIALECT and returnCols is not None:
         # Oracle cannot return multiple rows in the RETURNING clause so
         # we have to split this into a SELECT followed by a DELETE
         if not isinstance(returnCols, (tuple, list)):
             returnCols = [
                 returnCols,
             ]
         result = yield Select(
             returnCols,
             From=cls.table,
             Where=where,
         ).on(transaction)
         yield Delete(
             From=cls.table,
             Where=where,
         ).on(transaction)
     else:
         result = yield Delete(
             From=cls.table,
             Where=where,
             Return=returnCols,
         ).on(transaction)
     returnValue(result)
Esempio n. 15
0
 def getAllHomeUIDs(self, txn):
     ch = schema.CALENDAR_HOME
     rows = (yield Select(
         [ch.OWNER_UID, ],
         From=ch,
     ).on(txn))
     returnValue(tuple([row[0] for row in rows]))
Esempio n. 16
0
    def doWork(self):

        # If not enabled, punt here
        if not config.AutomaticPurging.Enabled:
            returnValue(None)

        # Do the scan
        allUIDs = set()
        for home in (schema.CALENDAR_HOME, schema.ADDRESSBOOK_HOME):
            for [uid] in (
                yield Select(
                    [home.OWNER_UID],
                    From=home,
                    Where=(home.STATUS == _HOME_STATUS_NORMAL),
                ).on(self.transaction)
            ):
                allUIDs.add(uid)

        # Spread out the per-uid checks 0 second apart
        seconds = 0
        for uid in allUIDs:
            notBefore = (
                datetime.datetime.utcnow() +
                datetime.timedelta(seconds=config.AutomaticPurging.CheckStaggerSeconds)
            )
            seconds += 1
            yield self.transaction.enqueue(
                PrincipalPurgeCheckWork,
                uid=uid,
                notBefore=notBefore
            )
Esempio n. 17
0
    def delegatorGroups(cls, txn, delegator):
        """
        Get delegator/group pairs for the specified delegator.
        """

        # Do a join to get what we need
        rows = yield Select(
            list(DelegateGroupsRecord.table) + list(GroupsRecord.table),
            From=DelegateGroupsRecord.table.join(GroupsRecord.table, DelegateGroupsRecord.groupID == GroupsRecord.groupID),
            Where=(DelegateGroupsRecord.delegator == delegator.encode("utf-8"))
        ).on(txn)

        results = []
        delegatorNames = [DelegateGroupsRecord.__colmap__[column] for column in list(DelegateGroupsRecord.table)]
        groupsNames = [GroupsRecord.__colmap__[column] for column in list(GroupsRecord.table)]
        split_point = len(delegatorNames)
        for row in rows:
            delegatorRow = row[:split_point]
            delegatorRecord = DelegateGroupsRecord()
            delegatorRecord._attributesFromRow(zip(delegatorNames, delegatorRow))
            delegatorRecord.transaction = txn
            groupsRow = row[split_point:]
            groupsRecord = GroupsRecord()
            groupsRecord._attributesFromRow(zip(groupsNames, groupsRow))
            groupsRecord.transaction = txn
            results.append((delegatorRecord, groupsRecord,))

        returnValue(results)
Esempio n. 18
0
    def test_subtransactionAbortOuterTransaction(self):
        """
        If an outer transaction that is holding a subtransaction open is
        aborted, then the L{Deferred} returned by L{subtransaction} raises
        L{AllRetriesFailed}.
        """
        txn = self.transactionUnderTest()
        cs = schema.CALENDARSERVER
        yield Select([cs.VALUE], From=cs).on(txn)
        waitAMoment = Deferred()

        @inlineCallbacks
        def later(subtxn):
            yield waitAMoment
            value = yield Select([cs.VALUE], From=cs).on(subtxn)
            returnValue(value)

        started = txn.subtransaction(later)
        txn.abort()
        waitAMoment.callback(True)
        try:
            result = yield started
        except AllRetriesFailed:
            self.flushLoggedErrors()
        else:
            self.fail("AllRetriesFailed not raised, %r returned instead" %
                      (result,))
Esempio n. 19
0
def _normalizeColumnUUIDs(txn, column):
    """
    Upper-case the UUIDs in the given SQL DAL column.

    @param txn: The transaction.
    @type txn: L{CommonStoreTransaction}

    @param column: the column, which may contain UIDs, to normalize.
    @type column: L{ColumnSyntax}

    @return: A L{Deferred} that will fire when the UUID normalization of the
        given column has completed.
    """
    tableModel = column.model.table
    # Get a primary key made of column syntax objects for querying and
    # comparison later.
    pkey = [ColumnSyntax(columnModel) for columnModel in tableModel.primaryKey]
    for row in (yield Select([column] + pkey,
                             From=TableSyntax(tableModel)).on(txn)):
        before = row[0]
        pkeyparts = row[1:]
        after = normalizeUUIDOrNot(before)
        if after != before:
            where = _AndNothing
            # Build a where clause out of the primary key and the parts of the
            # primary key that were found.
            for pkeycol, pkeypart in zip(pkeyparts, pkey):
                where = where.And(pkeycol == pkeypart)
            yield Update({column: after}, Where=where).on(txn)
Esempio n. 20
0
    def refreshAttendees(cls, txn, organizer_resource, organizer_calendar, attendees, pause=0):
        # See if there is already a pending refresh and merge current attendees into that list,
        # otherwise just mark all attendees as pending
        sra = schema.SCHEDULE_REFRESH_ATTENDEES
        pendingAttendees = (yield Select(
            [sra.ATTENDEE, ],
            From=sra,
            Where=sra.RESOURCE_ID == organizer_resource.id(),
        ).on(txn))
        pendingAttendees = [row[0] for row in pendingAttendees]
        attendeesToRefresh = set(attendees) - set(pendingAttendees)
        for attendee in attendeesToRefresh:
            yield Insert(
                {
                    sra.RESOURCE_ID: organizer_resource.id(),
                    sra.ATTENDEE: attendee,
                }
            ).on(txn)

        # Always queue up new work - coalescing happens when work is executed
        notBefore = datetime.datetime.utcnow() + datetime.timedelta(seconds=config.Scheduling.Options.WorkQueues.AttendeeRefreshBatchDelaySeconds)
        work = (yield txn.enqueue(
            cls,
            icalendarUID=organizer_resource.uid(),
            homeResourceID=organizer_resource._home.id(),
            resourceID=organizer_resource.id(),
            attendeeCount=len(attendees),
            notBefore=notBefore,
            pause=pause,
        ))
        cls._enqueued()
        log.debug("ScheduleRefreshWork - enqueued for ID: {id}, UID: {uid}, attendees: {att}", id=work.workID, uid=organizer_resource.uid(), att=",".join(attendeesToRefresh))
Esempio n. 21
0
 def hasWork(cls, txn):
     sch = cls.table
     rows = (yield Select(
         (sch.WORK_ID,),
         From=sch,
     ).on(txn))
     returnValue(len(rows) > 0)
Esempio n. 22
0
    def initFromStore(self):
        """
        Execute necessary SQL queries to retrieve attributes.

        @return: C{True} if this attachment exists, C{False} otherwise.
        """
        att = self._attachmentSchema
        if self._dropboxID and self._dropboxID != ".":
            where = (att.DROPBOX_ID == self._dropboxID).And(
                att.PATH == self._name)
        else:
            where = (att.ATTACHMENT_ID == self._attachmentID)
        rows = (yield Select(self._allColumns(), From=att,
                             Where=where).on(self._txn))

        if not rows:
            returnValue(None)

        for attr, value in zip(self._rowAttributes(), rows[0]):
            setattr(self, attr, value)
        self._created = parseSQLTimestamp(self._created)
        self._modified = parseSQLTimestamp(self._modified)
        self._contentType = MimeType.fromString(self._contentType)

        returnValue(self)
Esempio n. 23
0
    def load(cls, txn, referencedID, managedID, attachmentID=None):
        """
        Load a ManagedAttachment via either its managedID or attachmentID.
        """

        if managedID:
            attco = cls._attachmentLinkSchema
            where = (attco.MANAGED_ID == managedID)
            if referencedID is not None:
                where = where.And(
                    attco.CALENDAR_OBJECT_RESOURCE_ID == referencedID)
            rows = (yield Select(
                [
                    attco.ATTACHMENT_ID,
                ],
                From=attco,
                Where=where,
            ).on(txn))
            if len(rows) == 0:
                returnValue(None)
            elif referencedID is not None and len(rows) != 1:
                raise AttachmentStoreValidManagedID
            attachmentID = rows[0][0]

        attachment = cls(txn, attachmentID, None, None)
        attachment = (yield attachment.initFromStore())
        attachment._managedID = managedID
        attachment._objectResourceID = referencedID
        returnValue(attachment)
Esempio n. 24
0
    def removeNotificationsForUUID(self, uuid):

        # Get NOTIFICATION_HOME.RESOURCE_ID
        nh = schema.NOTIFICATION_HOME
        kwds = {"UUID": uuid}
        rows = (yield Select(
            [
                nh.RESOURCE_ID,
            ],
            From=nh,
            Where=(nh.OWNER_UID == Parameter("UUID")),
        ).on(self.txn, **kwds))

        if rows:
            resourceID = rows[0][0]

            # Delete NOTIFICATION rows
            if not self.options["dry-run"]:
                no = schema.NOTIFICATION
                kwds = {"ResourceID": resourceID}
                yield Delete(
                    From=no,
                    Where=(no.NOTIFICATION_HOME_RESOURCE_ID == Parameter(
                        "ResourceID")),
                ).on(self.txn, **kwds)

            # Delete NOTIFICATION_HOME (will cascade to NOTIFICATION_OBJECT_REVISIONS)
            if not self.options["dry-run"]:
                kwds = {"UUID": uuid}
                yield Delete(
                    From=nh,
                    Where=(nh.OWNER_UID == Parameter("UUID")),
                ).on(self.txn, **kwds)
Esempio n. 25
0
    def removeAttachments(self, resourceID):

        # Get ATTACHMENT paths
        at = schema.ATTACHMENT
        kwds = {"resourceID": resourceID}
        rows = (yield Select(
            [
                at.PATH,
            ],
            From=at,
            Where=(at.CALENDAR_HOME_RESOURCE_ID == Parameter("resourceID")),
        ).on(self.txn, **kwds))

        if rows:
            self.attachments.update([row[0] for row in rows])

            # Delete ATTACHMENT rows
            if not self.options["dry-run"]:
                at = schema.ATTACHMENT
                kwds = {"resourceID": resourceID}
                yield Delete(
                    From=at,
                    Where=(at.CALENDAR_HOME_RESOURCE_ID == Parameter(
                        "resourceID")),
                ).on(self.txn, **kwds)

        returnValue(len(rows) if rows else 0)
Esempio n. 26
0
    def scheduleGroupShareeReconciliations(self, txn, groupID):
        """
        Find all calendars who have shared to this groupID and create
        work items for them.
        returns: WorkProposal
        """
        gs = schema.GROUP_SHAREE
        rows = yield Select(
            [
                gs.CALENDAR_ID,
            ],
            From=gs,
            Where=gs.GROUP_ID == groupID,
        ).on(txn)

        workItems = []
        for [calendarID] in rows:
            work = yield GroupShareeReconciliationWork.reschedule(
                txn,
                seconds=float(config.Sharing.Calendars.Groups.
                              ReconciliationDelaySeconds),
                calendarID=calendarID,
                groupID=groupID,
            )
            workItems.append(work)
        returnValue(tuple(workItems))
Esempio n. 27
0
 def getEventData(self, txn, whereClause, whereParams):
     ch = schema.CALENDAR_HOME
     cb = schema.CALENDAR_BIND
     co = schema.CALENDAR_OBJECT
     rows = (yield Select(
         [
             ch.OWNER_UID,
             cb.CALENDAR_RESOURCE_NAME,
             co.RESOURCE_ID,
             co.RESOURCE_NAME,
             co.CREATED,
             co.MODIFIED,
             co.ICALENDAR_TEXT,
         ],
         From=ch.join(
             cb,
             type="inner",
             on=(ch.RESOURCE_ID == cb.CALENDAR_HOME_RESOURCE_ID).And(
                 cb.BIND_MODE == _BIND_MODE_OWN)).join(
                     co,
                     type="inner",
                     on=(cb.CALENDAR_RESOURCE_ID == co.CALENDAR_RESOURCE_ID
                         )),
         Where=whereClause,
     ).on(txn, **whereParams))
     returnValue(tuple(rows))
    def test_inboxCleanupWorkQueueing(self):
        """
        Verify that InboxCleanupWork queues one CleanupOneInboxBoxWork per home
        """
        self.patch(config.InboxCleanup, "CleanupPeriodDays", -1)

        class FakeCleanupOneInboxWork(WorkItem):
            scheduledHomeIDs = []

            @classmethod
            def reschedule(cls, txn, seconds, homeID):
                cls.scheduledHomeIDs.append(homeID)
                pass

        self.patch(CleanupOneInboxWork, "reschedule", FakeCleanupOneInboxWork.reschedule)

        # do cleanup
        yield InboxCleanupWork.reschedule(self.transactionUnderTest(), 0)
        yield self.commit()
        yield JobItem.waitEmpty(self.storeUnderTest().newTransaction, reactor, 60)

        ch = schema.CALENDAR_HOME
        workRows = yield Select(
            [ch.OWNER_UID],
            From=ch,
            Where=ch.RESOURCE_ID.In(Parameter("scheduledHomeIDs", len(FakeCleanupOneInboxWork.scheduledHomeIDs))),
        ).on(self.transactionUnderTest(), scheduledHomeIDs=FakeCleanupOneInboxWork.scheduledHomeIDs)
        homeUIDs = [workRow[0] for workRow in workRows]
        self.assertEqual(set(homeUIDs), set(['user01', 'user02']))  # two homes
Esempio n. 29
0
 def jobIDsQueryJoin(cls, homeID, other):
     return Select(
         [
             cls.jobID,
         ],
         From=cls.table.join(other.table, on=(cls.workID == other.workID)),
         Where=other.homeResourceID == homeID,
     )
Esempio n. 30
0
 def getTableSize(self, txn, dbtable):
     rows = (yield Select(
         [
             Count(ALL_COLUMNS),
         ],
         From=dbtable,
     ).on(txn))
     returnValue(rows[0][0])
Esempio n. 31
0
 def allHomeUIDs(self, table=schema.CALENDAR_HOME):
     """
     Get a listing of all UIDs in the current store.
     """
     results = yield (Select([table.OWNER_UID], From=table)
                      .on(self.transactionUnderTest()))
     yield self.commit()
     returnValue(results)
Esempio n. 32
0
    def test_removeAddressBookPropertiesOnDelete(self):
        """
        L{IAddressBookHome.removeAddressBookWithName} clears an address book that already
        exists and makes sure added properties are also removed.
        """

        prop = schema.RESOURCE_PROPERTY
        _allWithID = Select(
            [prop.NAME, prop.VIEWER_UID, prop.VALUE],
            From=prop,
            Where=prop.RESOURCE_ID == Parameter("resourceID")
        )

        # Create address book and add a property
        home = yield self.homeUnderTest()
        addressbook = home.addressbook()
        resourceID = home._addressbookPropertyStoreID

        rows = yield _allWithID.on(self.transactionUnderTest(), resourceID=resourceID)
        self.assertEqual(len(tuple(rows)), 0)

        addressbookProperties = addressbook.properties()
        prop = carddavxml.AddressBookDescription.fromString("Address Book prop to be removed")
        addressbookProperties[PropertyName.fromElement(prop)] = prop
        yield self.commit()

        # Check that two properties are present
        home = yield self.homeUnderTest()
        rows = yield _allWithID.on(self.transactionUnderTest(), resourceID=resourceID)
        self.assertEqual(len(tuple(rows)), 1)
        yield self.commit()

        # Remove address book and check for no properties
        home = yield self.homeUnderTest()
        yield home.removeAddressBookWithName(addressbook.name())
        rows = yield _allWithID.on(self.transactionUnderTest(), resourceID=resourceID)
        self.assertEqual(len(tuple(rows)), 0)
        yield self.commit()

        # Recheck it
        rows = yield _allWithID.on(self.transactionUnderTest(), resourceID=resourceID)
        self.assertEqual(len(tuple(rows)), 0)
        yield self.commit()
Esempio n. 33
0
def _needsNormalizationUpgrade(txn):
    """
    Determine whether a given store requires a UUID normalization data upgrade.

    @param txn: the transaction to use
    @type txn: L{CommonStoreTransaction}

    @return: a L{Deferred} that fires with C{True} or C{False} depending on
        whether we need the normalization upgrade or not.
    """
    for x in [schema.CALENDAR_HOME, schema.ADDRESSBOOK_HOME,
              schema.NOTIFICATION_HOME]:
        slct = Select([x.OWNER_UID], From=x,
                      Where=x.OWNER_UID != Upper(x.OWNER_UID))
        rows = yield slct.on(txn)
        if rows:
            for [uid] in rows:
                if normalizeUUIDOrNot(uid) != uid:
                    returnValue(True)
    returnValue(False)
Esempio n. 34
0
    def forMultipleResources(cls, defaultUser, shareeUser, proxyUser, txn,
                             childColumn, parentColumn, parentID):
        """
        Load all property stores for all objects in a collection.  This is used
        to optimize Depth:1 operations on that collection, by loading all
        relevant properties in a single query.

        @param defaultUser: the UID of the user who owns / is requesting the
            property stores; the ones whose per-user properties will be exposed.

        @type defaultUser: C{str}

        @param txn: the transaction within which to fetch the rows.

        @type txn: L{IAsyncTransaction}

        @param childColumn: The resource ID column for the child resources, i.e.
            the resources of the type for which this method will loading the
            property stores.

        @param parentColumn: The resource ID column for the parent resources.
            e.g. if childColumn is addressbook object's resource ID, then this
            should be addressbook's resource ID.

        @return: a L{Deferred} that fires with a C{dict} mapping resource ID (a
            value taken from C{childColumn}) to a L{PropertyStore} for that ID.
        """
        childTable = TableSyntax(childColumn.model.table)
        query = Select([
            childColumn,
            # XXX is that column necessary?  as per the 'on' clause it has to be
            # the same as prop.RESOURCE_ID anyway.
            prop.RESOURCE_ID, prop.NAME, prop.VIEWER_UID, prop.VALUE],
            From=prop.join(childTable, prop.RESOURCE_ID == childColumn,
                           'right'),
            Where=parentColumn == parentID
        )
        rows = yield query.on(txn)
        stores = cls._createMultipleStores(defaultUser, shareeUser, proxyUser, txn, rows)
        returnValue(stores)
Esempio n. 35
0
    def test_removeAddressBookObjectPropertiesOnDelete(self):
        """
        L{IAddressBookHome.removeAddressBookWithName} removes an address book object that already
        exists and makes sure properties are also removed (which is always the case as right
        now address book objects never have properties).
        """

        # Create address book object
        adbk1 = yield self.addressbookUnderTest()
        name = "4.vcf"
        component = VComponent.fromString(vcard4_text)
        addressobject = yield adbk1.createAddressBookObjectWithName(name, component, options={})
        resourceID = addressobject._resourceID

        prop = schema.RESOURCE_PROPERTY
        _allWithID = Select(
            [prop.NAME, prop.VIEWER_UID, prop.VALUE],
            From=prop,
            Where=prop.RESOURCE_ID == Parameter("resourceID")
        )

        # No properties on existing address book object
        rows = yield _allWithID.on(self.transactionUnderTest(), resourceID=resourceID)
        self.assertEqual(len(tuple(rows)), 0)

        yield self.commit()

        # Remove address book object and check for no properties
        adbk1 = yield self.addressbookUnderTest()
        obj1 = yield adbk1.addressbookObjectWithName(name)
        yield obj1.remove()
        rows = yield _allWithID.on(self.transactionUnderTest(), resourceID=resourceID)
        self.assertEqual(len(tuple(rows)), 0)
        yield self.commit()

        # Recheck it
        rows = yield _allWithID.on(self.transactionUnderTest(), resourceID=resourceID)
        self.assertEqual(len(tuple(rows)), 0)
        yield self.commit()