Esempio n. 1
0
 def test_normalizeColumnUUIDs(self):
     """
     L{_normalizeColumnUUIDs} upper-cases only UUIDs in a given column.
     """
     rp = schema.RESOURCE_PROPERTY
     txn = self.transactionUnderTest()
     # setup
     yield Insert({
         rp.RESOURCE_ID: 1,
         rp.NAME: "asdf",
         rp.VALUE: "property-value",
         rp.VIEWER_UID: "not-a-uuid"
     }).on(txn)
     yield Insert({
         rp.RESOURCE_ID: 2,
         rp.NAME: "fdsa",
         rp.VALUE: "another-value",
         rp.VIEWER_UID: "aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa"
     }).on(txn)
     # test
     from txdav.common.datastore.sql import _normalizeColumnUUIDs
     yield _normalizeColumnUUIDs(txn, rp.VIEWER_UID)
     self.assertEqual((yield Select(
         [rp.RESOURCE_ID, rp.NAME, rp.VALUE, rp.VIEWER_UID],
         From=rp,
         OrderBy=rp.RESOURCE_ID,
         Ascending=True,
     ).on(txn)), [[1, "asdf", "property-value", "not-a-uuid"],
                  [
                      2, "fdsa", "another-value",
                      "AAAAAAAA-AAAA-AAAA-AAAA-AAAAAAAAAAAA"
                  ]])
Esempio n. 2
0
    def insert(self, transaction):
        """
        Insert a new a row for an existing record that was not initially created in the database.
        """

        # Cannot do this if a transaction has already been assigned because that means
        # the record already exists in the DB.

        if self.transaction is not None:
            raise ReadOnly(self.__class__.__name__, "Cannot insert")

        colmap = {}
        attrtocol = self.__attrmap__
        needsCols = []
        needsAttrs = []

        for attr in attrtocol:
            col = attrtocol[attr]
            v = getattr(self, attr)
            if not isinstance(v, ColumnSyntax):
                colmap[col] = v
            else:
                if col.model.needsValue():
                    raise TypeError(
                        "required attribute {0!r} not passed".format(attr))
                else:
                    needsCols.append(col)
                    needsAttrs.append(attr)

        result = yield (Insert(
            colmap, Return=needsCols if needsCols else None).on(transaction))
        if needsCols:
            self._attributesFromRow(zip(needsAttrs, result[0]))

        self.transaction = transaction
Esempio n. 3
0
    def create(cls, txn, managedID, ownerHomeID, referencedBy):
        """
        Create a new Attachment object and reference it.

        @param txn: The transaction to use
        @type txn: L{CommonStoreTransaction}
        @param managedID: the identifier for the attachment
        @type managedID: C{str}
        @param ownerHomeID: the resource-id of the home collection of the attachment owner
        @type ownerHomeID: C{int}
        @param referencedBy: the resource-id of the calendar object referencing the attachment
        @type referencedBy: C{int}
        """

        # Now create the DB entry
        attachment = (yield cls._create(txn, managedID, ownerHomeID))
        attachment._objectResourceID = referencedBy

        # Create the attachment<->calendar object relationship for managed attachments
        attco = cls._attachmentLinkSchema
        yield Insert({
            attco.ATTACHMENT_ID:
            attachment._attachmentID,
            attco.MANAGED_ID:
            attachment._managedID,
            attco.CALENDAR_OBJECT_RESOURCE_ID:
            attachment._objectResourceID,
        }).on(txn)

        returnValue(attachment)
Esempio n. 4
0
    def test_delete_returning(self):
        """
        txn.execSQL works with all logging options on.
        """

        txn = self.transactionUnderTest()
        cs = schema.CALENDARSERVER
        yield Insert({cs.NAME: "TEST", cs.VALUE: "Value"}, ).on(txn)
        yield self.commit()

        txn = self.transactionUnderTest()
        value = yield Delete(
            From=cs,
            Where=(cs.NAME == "TEST"),
            Return=cs.VALUE,
        ).on(txn)
        self.assertEqual(list(value), [["Value"]])

        txn = self.transactionUnderTest()
        value = yield Delete(
            From=cs,
            Where=(cs.NAME == "TEST"),
            Return=cs.VALUE,
        ).on(txn)
        self.assertEqual(list(value), [])
Esempio n. 5
0
    def refreshAttendees(cls, txn, organizer_resource, organizer_calendar, attendees, pause=0):
        # See if there is already a pending refresh and merge current attendees into that list,
        # otherwise just mark all attendees as pending
        sra = schema.SCHEDULE_REFRESH_ATTENDEES
        pendingAttendees = (yield Select(
            [sra.ATTENDEE, ],
            From=sra,
            Where=sra.RESOURCE_ID == organizer_resource.id(),
        ).on(txn))
        pendingAttendees = [row[0] for row in pendingAttendees]
        attendeesToRefresh = set(attendees) - set(pendingAttendees)
        for attendee in attendeesToRefresh:
            yield Insert(
                {
                    sra.RESOURCE_ID: organizer_resource.id(),
                    sra.ATTENDEE: attendee,
                }
            ).on(txn)

        # Always queue up new work - coalescing happens when work is executed
        notBefore = datetime.datetime.utcnow() + datetime.timedelta(seconds=config.Scheduling.Options.WorkQueues.AttendeeRefreshBatchDelaySeconds)
        work = (yield txn.enqueue(
            cls,
            icalendarUID=organizer_resource.uid(),
            homeResourceID=organizer_resource._home.id(),
            resourceID=organizer_resource.id(),
            attendeeCount=len(attendees),
            notBefore=notBefore,
            pause=pause,
        ))
        cls._enqueued()
        log.debug("ScheduleRefreshWork - enqueued for ID: {id}, UID: {uid}, attendees: {att}", id=work.workID, uid=organizer_resource.uid(), att=",".join(attendeesToRefresh))
 def _completelyNewRevisionQuery(cls):
     rev = cls._revisionsSchema
     return Insert({rev.HOME_RESOURCE_ID: Parameter("homeID"),
                    # rev.RESOURCE_ID: Parameter("resourceID"),
                    rev.RESOURCE_NAME: Parameter("name"),
                    rev.REVISION: schema.REVISION_SEQ,
                    rev.DELETED: False},
                   Return=rev.REVISION)
Esempio n. 7
0
    def insert(self):
        """
        Insert the object.
        """

        row = dict([(column, getattr(self, attr))
                    for column, attr in itertools.izip(self._allColumns(),
                                                       self._rowAttributes())])
        return Insert(row).on(self._txn)
Esempio n. 8
0
 def _newNotificationQuery(cls):
     no = cls._objectSchema
     return Insert(
         {
             no.NOTIFICATION_HOME_RESOURCE_ID: Parameter("homeID"),
             no.NOTIFICATION_UID: Parameter("uid"),
             no.NOTIFICATION_TYPE: Parameter("notificationType"),
             no.NOTIFICATION_DATA: Parameter("notificationData"),
             no.MD5: Parameter("md5"),
         },
         Return=[no.RESOURCE_ID, no.CREATED, no.MODIFIED])
Esempio n. 9
0
    def test_upgradeOrphanedAttachment(self):
        """
        Test L{attachment_migration.doUpgrade} when an orphaned attachment is present.
        """
        def _hasDropboxAttachments(_self, txn):
            return succeed(True)

        self.patch(CalendarStoreFeatures, "hasDropboxAttachments",
                   _hasDropboxAttachments)

        # Create orphaned attachment
        dropboxID = "ABCD.dropbox"
        attachmentName = "test.txt"
        home = yield self.homeUnderTest(name="user01")
        at = schema.ATTACHMENT
        yield Insert({
            at.CALENDAR_HOME_RESOURCE_ID: home._resourceID,
            at.DROPBOX_ID: dropboxID,
            at.CONTENT_TYPE: "text/plain",
            at.SIZE: 10,
            at.MD5: "abcd",
            at.PATH: attachmentName,
        }).on(self.transactionUnderTest())
        yield self.commit()

        hasheduid = hashlib.md5(dropboxID).hexdigest()
        fp = self._sqlCalendarStore.attachmentsPath.child(
            hasheduid[0:2]).child(hasheduid[2:4]).child(hasheduid)
        fp.makedirs()
        fp = fp.child(attachmentName)
        fp.setContent("1234567890")

        self.assertTrue(os.path.exists(fp.path))

        upgrader = UpgradeDatabaseOtherStep(self._sqlCalendarStore)
        yield attachment_migration.doUpgrade(upgrader)

        txn = upgrader.sqlStore.newTransaction()
        managed = (yield txn.calendarserverValue("MANAGED-ATTACHMENTS",
                                                 raiseIfMissing=False))
        count = (yield Select(
            [
                Count(at.DROPBOX_ID),
            ],
            From=at,
        ).on(txn))[0][0]
        yield txn.commit()
        self.assertEqual(count, 1)
        self.assertNotEqual(managed, None)

        self.assertTrue(os.path.exists(fp.path))
Esempio n. 10
0
    def create(cls, txn, dropboxID, name, ownerHomeID):
        """
        Create a new Attachment object.

        @param txn: The transaction to use
        @type txn: L{CommonStoreTransaction}
        @param dropboxID: the identifier for the attachment (dropbox id or managed id)
        @type dropboxID: C{str}
        @param name: the name of the attachment
        @type name: C{str}
        @param ownerHomeID: the resource-id of the home collection of the attachment owner
        @type ownerHomeID: C{int}
        """

        # If store has already migrated to managed attachments we will prevent creation of dropbox attachments
        dropbox = (yield txn.store().dropboxAllowed(txn))
        if not dropbox:
            raise AttachmentDropboxNotAllowed

        # Now create the DB entry
        att = cls._attachmentSchema
        rows = (yield Insert(
            {
                att.CALENDAR_HOME_RESOURCE_ID: ownerHomeID,
                att.DROPBOX_ID: dropboxID,
                att.CONTENT_TYPE: "",
                att.SIZE: 0,
                att.MD5: "",
                att.PATH: name,
            },
            Return=(att.ATTACHMENT_ID, att.CREATED, att.MODIFIED)).on(txn))

        row_iter = iter(rows[0])
        a_id = row_iter.next()
        created = parseSQLTimestamp(row_iter.next())
        modified = parseSQLTimestamp(row_iter.next())

        attachment = cls(txn, a_id, dropboxID, name, ownerHomeID, True)
        attachment._created = created
        attachment._modified = modified

        # File system paths need to exist
        try:
            attachment._path.parent().makedirs()
        except:
            pass

        returnValue(attachment)
Esempio n. 11
0
 def _addNewRevision(cls):
     rev = cls._revisionsSchema
     return Insert(
         {
             rev.HOME_RESOURCE_ID:
             Parameter("homeID"),
             rev.RESOURCE_ID:
             Parameter("resourceID"),
             rev.COLLECTION_NAME:
             Parameter("collectionName"),
             rev.RESOURCE_NAME:
             None,
             # Always starts false; may be updated to be a tombstone
             # later.
             rev.DELETED:
             False
         },
         Return=[rev.REVISION])
Esempio n. 12
0
    def _calendarTranspUpgrade_setup(self):

        # Set dead property on inbox
        for user in ("user01", "user02",):
            inbox = (yield self.calendarUnderTest(name="inbox", home=user))
            inbox.properties()[PropertyName.fromElement(CalendarFreeBusySet)] = CalendarFreeBusySet(HRef.fromString("/calendars/__uids__/%s/calendar_1" % (user,)))

            # Force current to transparent
            calendar = (yield self.calendarUnderTest(name="calendar_1", home=user))
            yield calendar.setUsedForFreeBusy(False)
            calendar.properties()[PropertyName.fromElement(ScheduleCalendarTransp)] = ScheduleCalendarTransp(Opaque() if user == "user01" else Transparent())

            # Force data version to previous
            home = (yield self.homeUnderTest(name=user))
            ch = home._homeSchema
            yield Update(
                {ch.DATAVERSION: 3},
                Where=ch.RESOURCE_ID == home._resourceID,
            ).on(self.transactionUnderTest())

        yield self.commit()

        for user in ("user01", "user02",):
            calendar = (yield self.calendarUnderTest(name="calendar_1", home=user))
            self.assertFalse(calendar.isUsedForFreeBusy())
            self.assertTrue(PropertyName.fromElement(ScheduleCalendarTransp) in calendar.properties())
            inbox = (yield self.calendarUnderTest(name="inbox", home=user))
            self.assertTrue(PropertyName.fromElement(CalendarFreeBusySet) in inbox.properties())
        yield self.commit()

        # Create "fake" entry for non-existent share
        txn = self.transactionUnderTest()
        calendar = (yield self.calendarUnderTest(name="calendar_1", home="user01"))
        rp = schema.RESOURCE_PROPERTY
        yield Insert(
            {
                rp.RESOURCE_ID: calendar._resourceID,
                rp.NAME: PropertyName.fromElement(ScheduleCalendarTransp).toString(),
                rp.VALUE: ScheduleCalendarTransp(Opaque()).toxml(),
                rp.VIEWER_UID: "user03",
            }
        ).on(txn)
        yield self.commit()
Esempio n. 13
0
    def _create(cls, txn, managedID, ownerHomeID):
        """
        Create a new managed Attachment object.

        @param txn: The transaction to use
        @type txn: L{CommonStoreTransaction}
        @param managedID: the identifier for the attachment
        @type managedID: C{str}
        @param ownerHomeID: the resource-id of the home collection of the attachment owner
        @type ownerHomeID: C{int}
        """

        # Now create the DB entry
        att = cls._attachmentSchema
        rows = (yield Insert(
            {
                att.CALENDAR_HOME_RESOURCE_ID: ownerHomeID,
                att.DROPBOX_ID: ".",
                att.CONTENT_TYPE: "",
                att.SIZE: 0,
                att.MD5: "",
                att.PATH: "",
            },
            Return=(att.ATTACHMENT_ID, att.CREATED, att.MODIFIED)).on(txn))

        row_iter = iter(rows[0])
        a_id = row_iter.next()
        created = parseSQLTimestamp(row_iter.next())
        modified = parseSQLTimestamp(row_iter.next())

        attachment = cls(txn, a_id, ".", None, ownerHomeID, True)
        attachment._managedID = managedID
        attachment._created = created
        attachment._modified = modified

        # File system paths need to exist
        try:
            attachment._path.parent().makedirs()
        except:
            pass

        returnValue(attachment)
Esempio n. 14
0
    def copyManagedID(cls, txn, managedID, referencedBy):
        """
        Associate an existing attachment with the new resource.
        """

        # Find the associated attachment-id and insert new reference
        attco = cls._attachmentLinkSchema
        aid = (yield Select(
            [
                attco.ATTACHMENT_ID,
            ],
            From=attco,
            Where=(attco.MANAGED_ID == managedID),
        ).on(txn))[0][0]

        yield Insert({
            attco.ATTACHMENT_ID: aid,
            attco.MANAGED_ID: managedID,
            attco.CALENDAR_OBJECT_RESOURCE_ID: referencedBy,
        }).on(txn)
Esempio n. 15
0
    def create(cls, transaction, **k):
        """
        Create a row.

        Used like this::

            MyRecord.create(transaction, column1=1, column2=u"two")
        """
        self = cls()
        colmap = {}
        attrtocol = cls.__attrmap__
        needsCols = []
        needsAttrs = []

        for attr in attrtocol:
            col = attrtocol[attr]
            if attr in k:
                setattr(self, attr, k[attr])
                colmap[col] = k.pop(attr)
            else:
                if col.model.needsValue():
                    raise TypeError(
                        "required attribute {0!r} not passed"
                        .format(attr)
                    )
                else:
                    needsCols.append(col)
                    needsAttrs.append(attr)

        if k:
            raise TypeError("received unknown attribute{0}: {1}".format(
                "s" if len(k) > 1 else "", ", ".join(sorted(k))
            ))
        result = yield (Insert(colmap, Return=needsCols if needsCols else None)
                        .on(transaction))
        if needsCols:
            self._attributesFromRow(zip(needsAttrs, result[0]))

        self.transaction = transaction

        returnValue(self)
Esempio n. 16
0
    def newReference(self, resourceID):
        """
        Create a new reference of this attachment to the supplied calendar object resource id, and
        return a ManagedAttachment for the new reference.

        @param resourceID: the resource id to reference
        @type resourceID: C{int}

        @return: the new managed attachment
        @rtype: L{ManagedAttachment}
        """

        attco = self._attachmentLinkSchema
        yield Insert({
            attco.ATTACHMENT_ID: self._attachmentID,
            attco.MANAGED_ID: self._managedID,
            attco.CALENDAR_OBJECT_RESOURCE_ID: resourceID,
        }).on(self._txn)

        mattach = (yield ManagedAttachment.load(self._txn, resourceID,
                                                self._managedID))
        returnValue(mattach)
    def _calendarTimezoneUpgrade_setup(self):

        TimezoneCache.create()
        self.addCleanup(TimezoneCache.clear)

        tz1 = Component(None, pycalendar=readVTZ("Etc/GMT+1"))
        tz2 = Component(None, pycalendar=readVTZ("Etc/GMT+2"))
        tz3 = Component(None, pycalendar=readVTZ("Etc/GMT+3"))

        # Share user01 calendar with user03
        calendar = (yield self.calendarUnderTest(name="calendar_1",
                                                 home="user01"))
        home3 = yield self.homeUnderTest(name="user03")
        shared_name = yield calendar.shareWith(home3, _BIND_MODE_WRITE)

        user_details = (
            ("user01", "calendar_1", tz1),
            ("user02", "calendar_1", tz2),
            ("user03", "calendar_1", None),
            ("user03", shared_name, tz3),
        )

        # Set dead properties on calendars
        for user, calname, tz in user_details:
            calendar = (yield self.calendarUnderTest(name=calname, home=user))
            if tz:
                calendar.properties()[PropertyName.fromElement(
                    caldavxml.CalendarTimeZone
                )] = caldavxml.CalendarTimeZone.fromString(str(tz))

            # Force data version to previous
            home = (yield self.homeUnderTest(name=user))
            ch = home._homeSchema
            yield Update(
                {
                    ch.DATAVERSION: 4
                },
                Where=ch.RESOURCE_ID == home._resourceID,
            ).on(self.transactionUnderTest())

        yield self.commit()

        for user, calname, tz in user_details:
            calendar = (yield self.calendarUnderTest(name=calname, home=user))
            self.assertEqual(calendar.getTimezone(), None)
            self.assertEqual(
                PropertyName.fromElement(caldavxml.CalendarTimeZone)
                in calendar.properties(), tz is not None)
        yield self.commit()

        # Create "fake" entry for non-existent share
        txn = self.transactionUnderTest()
        calendar = (yield self.calendarUnderTest(name="calendar_1",
                                                 home="user01"))
        rp = schema.RESOURCE_PROPERTY
        yield Insert({
            rp.RESOURCE_ID:
            calendar._resourceID,
            rp.NAME:
            PropertyName.fromElement(caldavxml.CalendarTimeZone).toString(),
            rp.VALUE:
            caldavxml.CalendarTimeZone.fromString(str(tz3)).toxml(),
            rp.VIEWER_UID:
            "user04",
        }).on(txn)
        yield self.commit()

        returnValue(user_details)
Esempio n. 18
0
    def _calendarTimezoneUpgrade_setup(self):

        tz1 = Component.fromString("""BEGIN:VCALENDAR
VERSION:2.0
CALSCALE:GREGORIAN
PRODID:-//calendarserver.org//Zonal//EN
BEGIN:VTIMEZONE
TZID:Etc/GMT+1
X-LIC-LOCATION:Etc/GMT+1
BEGIN:STANDARD
DTSTART:18000101T000000
RDATE:18000101T000000
TZNAME:GMT+1
TZOFFSETFROM:-0100
TZOFFSETTO:-0100
END:STANDARD
END:VTIMEZONE
END:VCALENDAR
""")
        tz2 = Component.fromString("""BEGIN:VCALENDAR
VERSION:2.0
CALSCALE:GREGORIAN
PRODID:-//calendarserver.org//Zonal//EN
BEGIN:VTIMEZONE
TZID:Etc/GMT+2
X-LIC-LOCATION:Etc/GMT+2
BEGIN:STANDARD
DTSTART:18000101T000000
RDATE:18000101T000000
TZNAME:GMT+2
TZOFFSETFROM:-0200
TZOFFSETTO:-0200
END:STANDARD
END:VTIMEZONE
END:VCALENDAR
""")
        tz3 = Component.fromString("""BEGIN:VCALENDAR
VERSION:2.0
CALSCALE:GREGORIAN
PRODID:-//calendarserver.org//Zonal//EN
BEGIN:VTIMEZONE
TZID:Etc/GMT+3
X-LIC-LOCATION:Etc/GMT+3
BEGIN:STANDARD
DTSTART:18000101T000000
RDATE:18000101T000000
TZNAME:GMT+3
TZOFFSETFROM:-0300
TZOFFSETTO:-0300
END:STANDARD
END:VTIMEZONE
END:VCALENDAR
""")

        # Share user01 calendar with user03
        calendar = (yield self.calendarUnderTest(name="calendar_1",
                                                 home="user01"))
        home3 = yield self.homeUnderTest(name="user03")
        shared_name = yield calendar.shareWith(home3, _BIND_MODE_WRITE)

        user_details = (
            ("user01", "calendar_1", tz1),
            ("user02", "calendar_1", tz2),
            ("user03", "calendar_1", None),
            ("user03", shared_name, tz3),
        )

        # Set dead properties on calendars
        for user, calname, tz in user_details:
            calendar = (yield self.calendarUnderTest(name=calname, home=user))
            if tz:
                calendar.properties()[PropertyName.fromElement(
                    caldavxml.CalendarTimeZone
                )] = caldavxml.CalendarTimeZone.fromString(str(tz))

            # Force data version to previous
            home = (yield self.homeUnderTest(name=user))
            ch = home._homeSchema
            yield Update(
                {
                    ch.DATAVERSION: 4
                },
                Where=ch.RESOURCE_ID == home._resourceID,
            ).on(self.transactionUnderTest())

        yield self.commit()

        for user, calname, tz in user_details:
            calendar = (yield self.calendarUnderTest(name=calname, home=user))
            self.assertEqual(calendar.getTimezone(), None)
            self.assertEqual(
                PropertyName.fromElement(caldavxml.CalendarTimeZone)
                in calendar.properties(), tz is not None)
        yield self.commit()

        # Create "fake" entry for non-existent share
        txn = self.transactionUnderTest()
        calendar = (yield self.calendarUnderTest(name="calendar_1",
                                                 home="user01"))
        rp = schema.RESOURCE_PROPERTY
        yield Insert({
            rp.RESOURCE_ID:
            calendar._resourceID,
            rp.NAME:
            PropertyName.fromElement(caldavxml.CalendarTimeZone).toString(),
            rp.VALUE:
            caldavxml.CalendarTimeZone.fromString(str(tz3)).toxml(),
            rp.VIEWER_UID:
            "user04",
        }).on(txn)
        yield self.commit()

        returnValue(user_details)
Esempio n. 19
0
class PropertyStore(AbstractPropertyStore):
    """
    We are going to use memcache to cache properties per-resource/per-user. However, we
    need to be able to invalidate on a per-resource basis, in addition to per-resource/per-user.
    So we will also track in memcache which resource/uid tokens are valid. That way we can remove
    the tracking entry to completely invalidate all the per-resource/per-user pairs.
    """

    _cacher = Memcacher("SQL.props", pickle=True, key_normalization=False)

    def __init__(self, *a, **kw):
        raise NotImplementedError(
            "do not construct directly, call PropertyStore.load()")

    _allWithID = Select([prop.NAME, prop.VIEWER_UID, prop.VALUE],
                        From=prop,
                        Where=prop.RESOURCE_ID == Parameter("resourceID"))

    _allWithIDViewer = Select(
        [prop.NAME, prop.VALUE],
        From=prop,
        Where=(prop.RESOURCE_ID == Parameter("resourceID")).And(
            prop.VIEWER_UID == Parameter("viewerID")))

    def _cacheToken(self, userid):
        return "{0!s}/{1}".format(self._resourceID, userid)

    @inlineCallbacks
    def _refresh(self, txn):
        """
        Load, or re-load, this object with the given transaction; first from
        memcache, then pulling from the database again.
        """
        # Cache existing properties in this object
        # Look for memcache entry first

        @inlineCallbacks
        def _cache_user_props(uid):

            # First check whether uid already has a valid cached entry
            rows = None
            if self._cacher is not None:
                valid_cached_users = yield self._cacher.get(
                    str(self._resourceID))
                if valid_cached_users is None:
                    valid_cached_users = set()

                # Fetch cached user data if valid and present
                if uid in valid_cached_users:
                    rows = yield self._cacher.get(self._cacheToken(uid))

            # If no cached data, fetch from SQL DB and cache
            if rows is None:
                rows = yield self._allWithIDViewer.on(
                    txn,
                    resourceID=self._resourceID,
                    viewerID=uid,
                )
                if self._cacher is not None:
                    yield self._cacher.set(self._cacheToken(uid),
                                           rows if rows is not None else ())

                    # Mark this uid as valid
                    valid_cached_users.add(uid)
                    yield self._cacher.set(str(self._resourceID),
                                           valid_cached_users)

            for name, value in rows:
                self._cached[(name, uid)] = value

        # Cache for the owner first, then the sharee if different
        yield _cache_user_props(self._defaultUser)
        if self._perUser != self._defaultUser:
            yield _cache_user_props(self._perUser)
        if self._proxyUser != self._perUser:
            yield _cache_user_props(self._proxyUser)

    @classmethod
    @inlineCallbacks
    def load(cls,
             defaultuser,
             shareUser,
             proxyUser,
             txn,
             resourceID,
             created=False,
             notifyCallback=None):
        """
        @param notifyCallback: a callable used to trigger notifications when the
            property store changes.
        """
        self = cls.__new__(cls)
        super(PropertyStore, self).__init__(defaultuser, shareUser, proxyUser)
        self._txn = txn
        self._resourceID = resourceID
        if not self._txn.store().queryCachingEnabled():
            self._cacher = None
        self._cached = {}
        if not created:
            yield self._refresh(txn)
        self._notifyCallback = notifyCallback
        returnValue(self)

    @classmethod
    @inlineCallbacks
    def forMultipleResources(cls, defaultUser, shareeUser, proxyUser, txn,
                             childColumn, parentColumn, parentID):
        """
        Load all property stores for all objects in a collection.  This is used
        to optimize Depth:1 operations on that collection, by loading all
        relevant properties in a single query.

        @param defaultUser: the UID of the user who owns / is requesting the
            property stores; the ones whose per-user properties will be exposed.

        @type defaultUser: C{str}

        @param txn: the transaction within which to fetch the rows.

        @type txn: L{IAsyncTransaction}

        @param childColumn: The resource ID column for the child resources, i.e.
            the resources of the type for which this method will loading the
            property stores.

        @param parentColumn: The resource ID column for the parent resources.
            e.g. if childColumn is addressbook object's resource ID, then this
            should be addressbook's resource ID.

        @return: a L{Deferred} that fires with a C{dict} mapping resource ID (a
            value taken from C{childColumn}) to a L{PropertyStore} for that ID.
        """
        childTable = TableSyntax(childColumn.model.table)
        query = Select(
            [
                childColumn,
                # XXX is that column necessary?  as per the 'on' clause it has to be
                # the same as prop.RESOURCE_ID anyway.
                prop.RESOURCE_ID,
                prop.NAME,
                prop.VIEWER_UID,
                prop.VALUE
            ],
            From=prop.join(childTable, prop.RESOURCE_ID == childColumn,
                           'right'),
            Where=parentColumn == parentID)
        rows = yield query.on(txn)
        stores = cls._createMultipleStores(defaultUser, shareeUser, proxyUser,
                                           txn, rows)
        returnValue(stores)

    @classmethod
    @inlineCallbacks
    def forMultipleResourcesWithResourceIDs(cls, defaultUser, shareeUser,
                                            proxyUser, txn, resourceIDs):
        """
        Load all property stores for all specified resources.  This is used
        to optimize Depth:1 operations on that collection, by loading all
        relevant properties in a single query. Note that the caller of this
        method must make sure that the number of items being queried for is
        within a reasonable batch size. If the caller is itself batching
        related queries, that will take care of itself.

        @param defaultUser: the UID of the user who owns / is requesting the
            property stores; the ones whose per-user properties will be exposed.

        @type defaultUser: C{str}

        @param txn: the transaction within which to fetch the rows.

        @type txn: L{IAsyncTransaction}

        @param resourceIDs: The set of resource ID's to query.

        @return: a L{Deferred} that fires with a C{dict} mapping resource ID (a
            value taken from C{childColumn}) to a L{PropertyStore} for that ID.
        """
        query = Select(
            [prop.RESOURCE_ID, prop.NAME, prop.VIEWER_UID, prop.VALUE],
            From=prop,
            Where=prop.RESOURCE_ID.In(
                Parameter("resourceIDs", len(resourceIDs))))
        rows = yield query.on(txn, resourceIDs=resourceIDs)
        stores = cls._createMultipleStores(defaultUser, shareeUser, proxyUser,
                                           txn, rows)

        # Make sure we have a store for each resourceID even if no properties exist
        for resourceID in resourceIDs:
            if resourceID not in stores:
                store = cls.__new__(cls)
                super(PropertyStore, store).__init__(defaultUser, shareeUser,
                                                     proxyUser)
                store._txn = txn
                store._resourceID = resourceID
                store._cached = {}
                stores[resourceID] = store

        returnValue(stores)

    @classmethod
    def _createMultipleStores(cls, defaultUser, shareeUser, proxyUser, txn,
                              rows):
        """
        Create a set of stores for the set of rows passed in.
        """

        createdStores = {}
        for row in rows:
            if len(row) == 5:
                object_resource_id, resource_id, name, view_uid, value = row
            else:
                object_resource_id = None
                resource_id, name, view_uid, value = row
            if resource_id:
                if resource_id not in createdStores:
                    store = cls.__new__(cls)
                    super(PropertyStore,
                          store).__init__(defaultUser, shareeUser, proxyUser)
                    store._txn = txn
                    store._resourceID = resource_id
                    store._cached = {}
                    createdStores[resource_id] = store
                createdStores[resource_id]._cached[(name, view_uid)] = value
            elif object_resource_id:
                store = cls.__new__(cls)
                super(PropertyStore, store).__init__(defaultUser, shareeUser,
                                                     proxyUser)
                store._txn = txn
                store._resourceID = object_resource_id
                store._cached = {}
                createdStores[object_resource_id] = store

        return createdStores

    def _getitem_uid(self, key, uid):
        validKey(key)

        try:
            value = self._cached[(key.toString(), uid)]
        except KeyError:
            raise KeyError(key)

        return WebDAVDocument.fromString(value).root_element

    _updateQuery = Update(
        {prop.VALUE: Parameter("value")},
        Where=(prop.RESOURCE_ID == Parameter("resourceID")).And(
            prop.NAME == Parameter("name")).And(
                prop.VIEWER_UID == Parameter("uid")))

    _insertQuery = Insert({
        prop.VALUE: Parameter("value"),
        prop.RESOURCE_ID: Parameter("resourceID"),
        prop.NAME: Parameter("name"),
        prop.VIEWER_UID: Parameter("uid")
    })

    def _setitem_uid(self, key, value, uid):
        validKey(key)

        key_str = key.toString()
        value_str = value.toxml()

        tried = []

        wasCached = [(key_str, uid) in self._cached]
        self._cached[(key_str, uid)] = value_str

        @inlineCallbacks
        def trySetItem(txn):
            if tried:
                yield self._refresh(txn)
                wasCached[:] = [(key_str, uid) in self._cached]
            tried.append(True)
            if wasCached[0]:
                yield self._updateQuery.on(txn,
                                           resourceID=self._resourceID,
                                           value=value_str,
                                           name=key_str,
                                           uid=uid)
            else:
                yield self._insertQuery.on(txn,
                                           resourceID=self._resourceID,
                                           value=value_str,
                                           name=key_str,
                                           uid=uid)
            if self._cacher is not None:
                self._cacher.delete(self._cacheToken(uid))

        # Call the registered notification callback - we need to do this as a preCommit since it involves
        # a bunch of deferred operations, but this propstore api is not deferred. preCommit will execute
        # the deferreds properly, and it is fine to wait until everything else is done before sending the
        # notifications.
        if hasattr(self,
                   "_notifyCallback") and self._notifyCallback is not None:
            self._txn.preCommit(self._notifyCallback)

        def justLogIt(f):
            f.trap(AllRetriesFailed)
            self.log.error("setting a property failed; probably nothing.")

        self._txn.subtransaction(trySetItem).addErrback(justLogIt)

    _deleteQuery = Delete(
        prop,
        Where=(prop.RESOURCE_ID == Parameter("resourceID")).And(
            prop.NAME == Parameter("name")).And(
                prop.VIEWER_UID == Parameter("uid")))

    def _delitem_uid(self, key, uid):
        validKey(key)

        key_str = key.toString()
        del self._cached[(key_str, uid)]

        @inlineCallbacks
        def doIt(txn):
            yield self._deleteQuery.on(txn,
                                       lambda: KeyError(key),
                                       resourceID=self._resourceID,
                                       name=key_str,
                                       uid=uid)
            if self._cacher is not None:
                self._cacher.delete(self._cacheToken(uid))

        # Call the registered notification callback - we need to do this as a preCommit since it involves
        # a bunch of deferred operations, but this propstore api is not deferred. preCommit will execute
        # the deferreds properly, and it is fine to wait until everything else is done before sending the
        # notifications.
        if hasattr(self,
                   "_notifyCallback") and self._notifyCallback is not None:
            self._txn.preCommit(self._notifyCallback)

        def justLogIt(f):
            f.trap(AllRetriesFailed)
            self.log.error("setting a property failed; probably nothing.")

        self._txn.subtransaction(doIt).addErrback(justLogIt)

    def _keys_uid(self, uid):
        for cachedKey, cachedUID in self._cached.keys():
            if cachedUID == uid:
                yield PropertyName.fromString(cachedKey)

    _deleteResourceQuery = Delete(
        prop, Where=(prop.RESOURCE_ID == Parameter("resourceID")))

    @inlineCallbacks
    def _removeResource(self):

        self._cached = {}
        yield self._deleteResourceQuery.on(self._txn,
                                           resourceID=self._resourceID)

        # Invalidate entire set of cached per-user data for this resource
        if self._cacher is not None:
            self._cacher.delete(str(self._resourceID))

    @inlineCallbacks
    def copyAllProperties(self, other):
        """
        Copy all the properties from another store into this one. This needs to be done
        independently of the UID.
        """

        rows = yield other._allWithID.on(other._txn,
                                         resourceID=other._resourceID)
        for key_str, uid, value_str in rows:
            wasCached = [(key_str, uid) in self._cached]
            if wasCached[0]:
                yield self._updateQuery.on(self._txn,
                                           resourceID=self._resourceID,
                                           value=value_str,
                                           name=key_str,
                                           uid=uid)
            else:
                yield self._insertQuery.on(self._txn,
                                           resourceID=self._resourceID,
                                           value=value_str,
                                           name=key_str,
                                           uid=uid)

        # Invalidate entire set of cached per-user data for this resource and reload
        self._cached = {}
        if self._cacher is not None:
            self._cacher.delete(str(self._resourceID))
        yield self._refresh(self._txn)
Esempio n. 20
0
class NotificationCollection(FancyEqMixin, _SharedSyncLogic):
    log = Logger()

    implements(INotificationCollection)

    compareAttributes = (
        "_ownerUID",
        "_resourceID",
    )

    _revisionsSchema = schema.NOTIFICATION_OBJECT_REVISIONS
    _homeSchema = schema.NOTIFICATION_HOME

    _externalClass = None

    @classmethod
    def makeClass(cls, transaction, homeData):
        """
        Build the actual home class taking into account the possibility that we might need to
        switch in the external version of the class.

        @param transaction: transaction
        @type transaction: L{CommonStoreTransaction}
        @param homeData: home table column data
        @type homeData: C{list}
        """

        status = homeData[cls.homeColumns().index(cls._homeSchema.STATUS)]
        if status == _HOME_STATUS_EXTERNAL:
            home = cls._externalClass(transaction, homeData)
        else:
            home = cls(transaction, homeData)
        return home.initFromStore()

    @classmethod
    def homeColumns(cls):
        """
        Return a list of column names to retrieve when doing an ownerUID->home lookup.
        """

        # Common behavior is to have created and modified

        return (
            cls._homeSchema.RESOURCE_ID,
            cls._homeSchema.OWNER_UID,
            cls._homeSchema.STATUS,
        )

    @classmethod
    def homeAttributes(cls):
        """
        Return a list of attributes names to map L{homeColumns} to.
        """

        # Common behavior is to have created and modified

        return (
            "_resourceID",
            "_ownerUID",
            "_status",
        )

    def __init__(self, txn, homeData):

        self._txn = txn

        for attr, value in zip(self.homeAttributes(), homeData):
            setattr(self, attr, value)

        self._txn = txn
        self._dataVersion = None
        self._notifications = {}
        self._notificationNames = None
        self._syncTokenRevision = None

        # Make sure we have push notifications setup to push on this collection
        # as well as the home it is in
        self._notifiers = dict([(
            factory_name,
            factory.newNotifier(self),
        ) for factory_name, factory in txn._notifierFactories.items()])

    @inlineCallbacks
    def initFromStore(self):
        """
        Initialize this object from the store.
        """

        yield self._loadPropertyStore()
        returnValue(self)

    @property
    def _home(self):
        """
        L{NotificationCollection} serves as its own C{_home} for the purposes of
        working with L{_SharedSyncLogic}.
        """
        return self

    @classmethod
    def notificationsWithUID(cls, txn, uid, status=None, create=False):
        return cls.notificationsWith(txn,
                                     None,
                                     uid,
                                     status=status,
                                     create=create)

    @classmethod
    def notificationsWithResourceID(cls, txn, rid):
        return cls.notificationsWith(txn, rid, None)

    @classmethod
    @inlineCallbacks
    def notificationsWith(cls, txn, rid, uid, status=None, create=False):
        """
        @param uid: I'm going to assume uid is utf-8 encoded bytes
        """
        if rid is not None:
            query = cls._homeSchema.RESOURCE_ID == rid
        elif uid is not None:
            query = cls._homeSchema.OWNER_UID == uid
            if status is not None:
                query = query.And(cls._homeSchema.STATUS == status)
            else:
                statusSet = (
                    _HOME_STATUS_NORMAL,
                    _HOME_STATUS_EXTERNAL,
                )
                if txn._allowDisabled:
                    statusSet += (_HOME_STATUS_DISABLED, )
                query = query.And(cls._homeSchema.STATUS.In(statusSet))
        else:
            raise AssertionError("One of rid or uid must be set")

        results = yield Select(
            cls.homeColumns(),
            From=cls._homeSchema,
            Where=query,
        ).on(txn)

        if len(results) > 1:
            # Pick the best one in order: normal, disabled and external
            byStatus = dict([
                (result[cls.homeColumns().index(cls._homeSchema.STATUS)],
                 result) for result in results
            ])
            result = byStatus.get(_HOME_STATUS_NORMAL)
            if result is None:
                result = byStatus.get(_HOME_STATUS_DISABLED)
            if result is None:
                result = byStatus.get(_HOME_STATUS_EXTERNAL)
        elif results:
            result = results[0]
        else:
            result = None

        if result:
            # Return object that already exists in the store
            homeObject = yield cls.makeClass(txn, result)
            returnValue(homeObject)
        else:
            # Can only create when uid is specified
            if not create or uid is None:
                returnValue(None)

            # Determine if the user is local or external
            record = yield txn.directoryService().recordWithUID(
                uid.decode("utf-8"))
            if record is None:
                raise DirectoryRecordNotFoundError(
                    "Cannot create home for UID since no directory record exists: {}"
                    .format(uid))

            if status is None:
                createStatus = _HOME_STATUS_NORMAL if record.thisServer(
                ) else _HOME_STATUS_EXTERNAL
            elif status == _HOME_STATUS_MIGRATING:
                if record.thisServer():
                    raise RecordNotAllowedError(
                        "Cannot migrate a user data for a user already hosted on this server"
                    )
                createStatus = status
            elif status in (
                    _HOME_STATUS_NORMAL,
                    _HOME_STATUS_EXTERNAL,
            ):
                createStatus = status
            else:
                raise RecordNotAllowedError(
                    "Cannot create home with status {}: {}".format(
                        status, uid))

            # Use savepoint so we can do a partial rollback if there is a race
            # condition where this row has already been inserted
            savepoint = SavepointAction("notificationsWithUID")
            yield savepoint.acquire(txn)

            try:
                resourceid = (yield Insert(
                    {
                        cls._homeSchema.OWNER_UID: uid,
                        cls._homeSchema.STATUS: createStatus,
                    },
                    Return=cls._homeSchema.RESOURCE_ID).on(txn))[0][0]
            except Exception:
                # FIXME: Really want to trap the pg.DatabaseError but in a non-
                # DB specific manner
                yield savepoint.rollback(txn)

                # Retry the query - row may exist now, if not re-raise
                results = yield Select(
                    cls.homeColumns(),
                    From=cls._homeSchema,
                    Where=query,
                ).on(txn)
                if results:
                    homeObject = yield cls.makeClass(txn, results[0])
                    returnValue(homeObject)
                else:
                    raise
            else:
                yield savepoint.release(txn)

                # Note that we must not cache the owner_uid->resource_id
                # mapping in the query cacher when creating as we don't want that to appear
                # until AFTER the commit
                results = yield Select(
                    cls.homeColumns(),
                    From=cls._homeSchema,
                    Where=cls._homeSchema.RESOURCE_ID == resourceid,
                ).on(txn)
                homeObject = yield cls.makeClass(txn, results[0])
                if homeObject.normal():
                    yield homeObject._initSyncToken()
                    yield homeObject.notifyChanged()
                returnValue(homeObject)

    @inlineCallbacks
    def _loadPropertyStore(self):
        self._propertyStore = yield PropertyStore.load(
            self._ownerUID,
            self._ownerUID,
            None,
            self._txn,
            self._resourceID,
            notifyCallback=self.notifyChanged)

    def __repr__(self):
        return "<%s: %s>" % (self.__class__.__name__, self._resourceID)

    def id(self):
        """
        Retrieve the store identifier for this collection.

        @return: store identifier.
        @rtype: C{int}
        """
        return self._resourceID

    @classproperty
    def _dataVersionQuery(cls):
        nh = cls._homeSchema
        return Select([nh.DATAVERSION],
                      From=nh,
                      Where=nh.RESOURCE_ID == Parameter("resourceID"))

    @inlineCallbacks
    def dataVersion(self):
        if self._dataVersion is None:
            self._dataVersion = (yield self._dataVersionQuery.on(
                self._txn, resourceID=self._resourceID))[0][0]
        returnValue(self._dataVersion)

    def name(self):
        return "notification"

    def uid(self):
        return self._ownerUID

    def status(self):
        return self._status

    @inlineCallbacks
    def setStatus(self, newStatus):
        """
        Mark this home as being purged.
        """
        # Only if different
        if self._status != newStatus:
            yield Update(
                {
                    self._homeSchema.STATUS: newStatus
                },
                Where=(self._homeSchema.RESOURCE_ID == self._resourceID),
            ).on(self._txn)
            self._status = newStatus

    def normal(self):
        """
        Is this an normal (internal) home.

        @return: a L{bool}.
        """
        return self._status == _HOME_STATUS_NORMAL

    def external(self):
        """
        Is this an external home.

        @return: a L{bool}.
        """
        return self._status == _HOME_STATUS_EXTERNAL

    def owned(self):
        return True

    def ownerHome(self):
        return self._home

    def viewerHome(self):
        return self._home

    def notificationObjectRecords(self):
        return NotificationObjectRecord.querysimple(
            self._txn, notificationHomeResourceID=self.id())

    @inlineCallbacks
    def notificationObjects(self):
        results = (yield NotificationObject.loadAllObjects(self))
        for result in results:
            self._notifications[result.uid()] = result
        self._notificationNames = sorted([result.name() for result in results])
        returnValue(results)

    _notificationUIDsForHomeQuery = Select(
        [schema.NOTIFICATION.NOTIFICATION_UID],
        From=schema.NOTIFICATION,
        Where=schema.NOTIFICATION.NOTIFICATION_HOME_RESOURCE_ID == Parameter(
            "resourceID"))

    @inlineCallbacks
    def listNotificationObjects(self):
        """
        List the names of all notification objects in this collection. Note that the name
        is actually the UID value with ".xml" appended, as per L{NotificationObject.name}.
        """
        if self._notificationNames is None:
            rows = yield self._notificationUIDsForHomeQuery.on(
                self._txn, resourceID=self._resourceID)
            self._notificationNames = sorted([row[0] + ".xml" for row in rows])
        returnValue(self._notificationNames)

    # used by _SharedSyncLogic.resourceNamesSinceRevision()
    def listObjectResources(self):
        return self.listNotificationObjects()

    def _nameToUID(self, name):
        """
        Based on the file-backed implementation, the 'name' is just uid +
        ".xml".
        """
        return name.rsplit(".", 1)[0]

    def notificationObjectWithName(self, name):
        return self.notificationObjectWithUID(self._nameToUID(name))

    @memoizedKey("uid", "_notifications")
    @inlineCallbacks
    def notificationObjectWithUID(self, uid):
        """
        Create an empty notification object first then have it initialize itself
        from the store.
        """
        no = NotificationObject(self, uid)
        no = (yield no.initFromStore())
        returnValue(no)

    @inlineCallbacks
    def writeNotificationObject(self, uid, notificationtype, notificationdata):

        inserting = False
        notificationObject = yield self.notificationObjectWithUID(uid)
        if notificationObject is None:
            notificationObject = NotificationObject(self, uid)
            inserting = True
        yield notificationObject.setData(uid,
                                         notificationtype,
                                         notificationdata,
                                         inserting=inserting)
        if inserting:
            yield self._insertRevision(notificationObject.name())
            if self._notificationNames is not None:
                self._notificationNames.append(notificationObject.name())
        else:
            yield self._updateRevision(notificationObject.name())
        yield self.notifyChanged()
        returnValue(notificationObject)

    def removeNotificationObjectWithName(self, name):
        if self._notificationNames is not None:
            self._notificationNames.remove(name)
        return self.removeNotificationObjectWithUID(self._nameToUID(name))

    _removeByUIDQuery = Delete(
        From=schema.NOTIFICATION,
        Where=(schema.NOTIFICATION.NOTIFICATION_UID == Parameter("uid")).And(
            schema.NOTIFICATION.NOTIFICATION_HOME_RESOURCE_ID == Parameter(
                "resourceID")))

    @inlineCallbacks
    def removeNotificationObjectWithUID(self, uid):
        yield self._removeByUIDQuery.on(self._txn,
                                        uid=uid,
                                        resourceID=self._resourceID)
        self._notifications.pop(uid, None)
        yield self._deleteRevision("%s.xml" % (uid, ))
        yield self.notifyChanged()

    _initSyncTokenQuery = Insert(
        {
            _revisionsSchema.HOME_RESOURCE_ID: Parameter("resourceID"),
            _revisionsSchema.RESOURCE_NAME: None,
            _revisionsSchema.REVISION: schema.REVISION_SEQ,
            _revisionsSchema.DELETED: False
        },
        Return=_revisionsSchema.REVISION)

    @inlineCallbacks
    def _initSyncToken(self):
        self._syncTokenRevision = (yield self._initSyncTokenQuery.on(
            self._txn, resourceID=self._resourceID))[0][0]

    _syncTokenQuery = Select(
        [Max(_revisionsSchema.REVISION)],
        From=_revisionsSchema,
        Where=_revisionsSchema.HOME_RESOURCE_ID == Parameter("resourceID"))

    @inlineCallbacks
    def syncToken(self):
        if self._syncTokenRevision is None:
            self._syncTokenRevision = yield self.syncTokenRevision()
        returnValue("%s_%s" % (self._resourceID, self._syncTokenRevision))

    @inlineCallbacks
    def syncTokenRevision(self):
        revision = (yield
                    self._syncTokenQuery.on(self._txn,
                                            resourceID=self._resourceID))[0][0]
        if revision is None:
            revision = int(
                (yield self._txn.calendarserverValue("MIN-VALID-REVISION")))
        returnValue(revision)

    def properties(self):
        return self._propertyStore

    def addNotifier(self, factory_name, notifier):
        self._notifiers[factory_name] = notifier

    def getNotifier(self, factory_name):
        return self._notifiers.get(factory_name)

    def notifierID(self):
        return (
            self._txn._homeClass[self._txn._primaryHomeType]._notifierPrefix,
            "%s/notification" % (self.ownerHome().uid(), ),
        )

    def parentNotifierID(self):
        return (
            self._txn._homeClass[self._txn._primaryHomeType]._notifierPrefix,
            "%s" % (self.ownerHome().uid(), ),
        )

    @inlineCallbacks
    def notifyChanged(self, category=ChangeCategory.default):
        """
        Send notifications, change sync token and bump last modified because
        the resource has changed.  We ensure we only do this once per object
        per transaction.
        """
        if self._txn.isNotifiedAlready(self):
            returnValue(None)
        self._txn.notificationAddedForObject(self)

        # Send notifications
        if self._notifiers:
            # cache notifiers run in post commit
            notifier = self._notifiers.get("cache", None)
            if notifier:
                self._txn.postCommit(notifier.notify)
            # push notifiers add their work items immediately
            notifier = self._notifiers.get("push", None)
            if notifier:
                yield notifier.notify(self._txn, priority=category.value)

        returnValue(None)

    @classproperty
    def _completelyNewRevisionQuery(cls):
        rev = cls._revisionsSchema
        return Insert(
            {
                rev.HOME_RESOURCE_ID:
                Parameter("homeID"),
                # rev.RESOURCE_ID: Parameter("resourceID"),
                rev.RESOURCE_NAME:
                Parameter("name"),
                rev.REVISION:
                schema.REVISION_SEQ,
                rev.DELETED:
                False
            },
            Return=rev.REVISION)

    def _maybeNotify(self):
        """
        Emit a push notification after C{_changeRevision}.
        """
        return self.notifyChanged()

    @inlineCallbacks
    def remove(self):
        """
        Remove DB rows corresponding to this notification home.
        """
        # Delete NOTIFICATION rows
        no = schema.NOTIFICATION
        kwds = {"ResourceID": self._resourceID}
        yield Delete(
            From=no,
            Where=(
                no.NOTIFICATION_HOME_RESOURCE_ID == Parameter("ResourceID")),
        ).on(self._txn, **kwds)

        # Delete NOTIFICATION_HOME (will cascade to NOTIFICATION_OBJECT_REVISIONS)
        nh = schema.NOTIFICATION_HOME
        yield Delete(
            From=nh,
            Where=(nh.RESOURCE_ID == Parameter("ResourceID")),
        ).on(self._txn, **kwds)

    purge = remove
Esempio n. 21
0
    def notificationsWith(cls, txn, rid, uid, status=None, create=False):
        """
        @param uid: I'm going to assume uid is utf-8 encoded bytes
        """
        if rid is not None:
            query = cls._homeSchema.RESOURCE_ID == rid
        elif uid is not None:
            query = cls._homeSchema.OWNER_UID == uid
            if status is not None:
                query = query.And(cls._homeSchema.STATUS == status)
            else:
                statusSet = (
                    _HOME_STATUS_NORMAL,
                    _HOME_STATUS_EXTERNAL,
                )
                if txn._allowDisabled:
                    statusSet += (_HOME_STATUS_DISABLED, )
                query = query.And(cls._homeSchema.STATUS.In(statusSet))
        else:
            raise AssertionError("One of rid or uid must be set")

        results = yield Select(
            cls.homeColumns(),
            From=cls._homeSchema,
            Where=query,
        ).on(txn)

        if len(results) > 1:
            # Pick the best one in order: normal, disabled and external
            byStatus = dict([
                (result[cls.homeColumns().index(cls._homeSchema.STATUS)],
                 result) for result in results
            ])
            result = byStatus.get(_HOME_STATUS_NORMAL)
            if result is None:
                result = byStatus.get(_HOME_STATUS_DISABLED)
            if result is None:
                result = byStatus.get(_HOME_STATUS_EXTERNAL)
        elif results:
            result = results[0]
        else:
            result = None

        if result:
            # Return object that already exists in the store
            homeObject = yield cls.makeClass(txn, result)
            returnValue(homeObject)
        else:
            # Can only create when uid is specified
            if not create or uid is None:
                returnValue(None)

            # Determine if the user is local or external
            record = yield txn.directoryService().recordWithUID(
                uid.decode("utf-8"))
            if record is None:
                raise DirectoryRecordNotFoundError(
                    "Cannot create home for UID since no directory record exists: {}"
                    .format(uid))

            if status is None:
                createStatus = _HOME_STATUS_NORMAL if record.thisServer(
                ) else _HOME_STATUS_EXTERNAL
            elif status == _HOME_STATUS_MIGRATING:
                if record.thisServer():
                    raise RecordNotAllowedError(
                        "Cannot migrate a user data for a user already hosted on this server"
                    )
                createStatus = status
            elif status in (
                    _HOME_STATUS_NORMAL,
                    _HOME_STATUS_EXTERNAL,
            ):
                createStatus = status
            else:
                raise RecordNotAllowedError(
                    "Cannot create home with status {}: {}".format(
                        status, uid))

            # Use savepoint so we can do a partial rollback if there is a race
            # condition where this row has already been inserted
            savepoint = SavepointAction("notificationsWithUID")
            yield savepoint.acquire(txn)

            try:
                resourceid = (yield Insert(
                    {
                        cls._homeSchema.OWNER_UID: uid,
                        cls._homeSchema.STATUS: createStatus,
                    },
                    Return=cls._homeSchema.RESOURCE_ID).on(txn))[0][0]
            except Exception:
                # FIXME: Really want to trap the pg.DatabaseError but in a non-
                # DB specific manner
                yield savepoint.rollback(txn)

                # Retry the query - row may exist now, if not re-raise
                results = yield Select(
                    cls.homeColumns(),
                    From=cls._homeSchema,
                    Where=query,
                ).on(txn)
                if results:
                    homeObject = yield cls.makeClass(txn, results[0])
                    returnValue(homeObject)
                else:
                    raise
            else:
                yield savepoint.release(txn)

                # Note that we must not cache the owner_uid->resource_id
                # mapping in the query cacher when creating as we don't want that to appear
                # until AFTER the commit
                results = yield Select(
                    cls.homeColumns(),
                    From=cls._homeSchema,
                    Where=cls._homeSchema.RESOURCE_ID == resourceid,
                ).on(txn)
                homeObject = yield cls.makeClass(txn, results[0])
                if homeObject.normal():
                    yield homeObject._initSyncToken()
                    yield homeObject.notifyChanged()
                returnValue(homeObject)
Esempio n. 22
0
    def test_upgrade_SCHEDULE_REPLY(self):

        cal1 = """BEGIN:VCALENDAR
VERSION:2.0
PRODID:-//CALENDARSERVER.ORG//NONSGML Version 1//EN
BEGIN:VEVENT
UID:1234-5678
DTSTART:20071114T010000Z
DURATION:PT1H
DTSTAMP:20071114T000000Z
ATTENDEE:mailto:[email protected]
ATTENDEE:mailto:[email protected]
ORGANIZER:mailto:[email protected]
SUMMARY:Test
END:VEVENT
END:VCALENDAR
"""

        # Load old schema and populate with data
        schema = yield self._loadOldSchema(self.upgradePath.child("v49.sql"))

        txn = self.store.newTransaction("loadData")
        yield Insert({
            schema.CALENDAR_HOME.RESOURCE_ID: 1,
            schema.CALENDAR_HOME.OWNER_UID: "abcdefg",
        }).on(txn)
        yield Insert({
            schema.CALENDAR.RESOURCE_ID: 2,
        }).on(txn)
        yield Insert({
            schema.CALENDAR_OBJECT.RESOURCE_ID: 3,
            schema.CALENDAR_OBJECT.CALENDAR_RESOURCE_ID: 2,
            schema.CALENDAR_OBJECT.RESOURCE_NAME: "1.ics",
            schema.CALENDAR_OBJECT.ICALENDAR_TEXT: cal1,
            schema.CALENDAR_OBJECT.ICALENDAR_UID: "1234-5678",
            schema.CALENDAR_OBJECT.ICALENDAR_TYPE: "VEVENT",
            schema.CALENDAR_OBJECT.MD5: "md5-1234567890",
        }).on(txn)
        yield Insert({
            schema.JOB.JOB_ID: 1,
            schema.JOB.WORK_TYPE: "SCHEDULE_REPLY_WORK",
            schema.JOB.NOT_BEFORE: datetime.utcnow(),
        }).on(txn)
        yield Insert({
            schema.SCHEDULE_WORK.WORK_ID: 1,
            schema.SCHEDULE_WORK.JOB_ID: 1,
            schema.SCHEDULE_WORK.ICALENDAR_UID: "1234-5678",
            schema.SCHEDULE_WORK.WORK_TYPE: "SCHEDULE_REPLY_WORK",
        }).on(txn)
        yield Insert({
            schema.SCHEDULE_REPLY_WORK.WORK_ID: 1,
            schema.SCHEDULE_REPLY_WORK.HOME_RESOURCE_ID: 1,
            schema.SCHEDULE_REPLY_WORK.RESOURCE_ID: 3,
            schema.SCHEDULE_REPLY_WORK.CHANGED_RIDS: None,
        }).on(txn)
        yield txn.commit()

        # Try to upgrade and verify new version afterwards
        upgrader = UpgradeDatabaseSchemaStep(self.store)
        yield upgrader.databaseUpgrade()

        new_version = yield self._loadVersion()
        self.assertEqual(new_version, self.currentVersion)

        txn = self.store.newTransaction("loadData")
        jobs = yield Select(From=schema.JOB, ).on(txn)
        schedules = yield Select(From=schema.SCHEDULE_WORK, ).on(txn)
        replies = yield Select(From=schema.SCHEDULE_REPLY_WORK, ).on(txn)

        self.assertEqual(len(jobs), 1)
        self.assertEqual(len(schedules), 1)
        self.assertEqual(len(replies), 1)

        self.assertEqual(list(replies[0]), [
            1,
            1,
            3,
            None,
        ])

        jobs = yield JobItem.all(txn)
        self.assertEqual(len(jobs), 1)
        work = yield jobs[0].workItem()
        self.assertTrue(isinstance(work, ScheduleReplyWork))

        workers = yield ScheduleWork.all(txn)
        self.assertEqual(len(workers), 1)
        self.assertEqual(workers[0].workType, "SCHEDULE_REPLY_WORK")

        yield txn.commit()
Esempio n. 23
0
def _translateSchema(out, schema=schema):
    """
    When run as a script, translate the schema to another dialect.  Currently
    only postgres and oracle are supported, and native format is postgres, so
    emit in oracle format.
    """
    for sequence in schema.model.sequences:
        out.write('create sequence %s;\n' % (sequence.name, ))
    for table in schema:
        # The only table name which actually exceeds the length limit right now
        # is CALENDAR_OBJECT_ATTACHMENTS_MODE, which isn't actually _used_
        # anywhere, so we can fake it for now.
        if len(table.model.name) > ORACLE_TABLE_NAME_MAX:
            raise SchemaBroken("Table name too long: %s" %
                               (table.model.name, ))
        out.write('create table %s (\n' %
                  (table.model.name[:ORACLE_TABLE_NAME_MAX], ))
        first = True
        for column in table:
            if first:
                first = False
            else:
                out.write(",\n")

            if len(column.model.name) > ORACLE_TABLE_NAME_MAX:
                raise SchemaBroken("Column name too long: %s" %
                                   (column.model.name, ))

            typeName = column.model.type.name
            typeName = _translatedTypes.get(typeName, typeName)
            out.write('    "%s" %s' % (column.model.name, typeName))
            if column.model.type.length:
                out.write("(%s)" % (column.model.type.length, ))
            if [column.model] == table.model.primaryKey:
                out.write(' primary key')
            default = column.model.default
            if default is not NO_DEFAULT:
                # Can't do default sequence types in Oracle, so don't bother.
                if not isinstance(default, Sequence):
                    out.write(' default')
                    if default is None:
                        out.write(' null')
                    elif isinstance(default, ProcedureCall):
                        # Cheating, because there are currently no other
                        # functions being used.
                        out.write(" CURRENT_TIMESTAMP at time zone 'UTC'")
                    else:
                        if default is True:
                            default = 1
                        elif default is False:
                            default = 0
                        out.write(" " + repr(default))
            if ((not column.model.canBeNull())
                    # Oracle treats empty strings as NULLs, so we have to accept
                    # NULL values in columns of a string type.  Other types should
                    # be okay though.
                    and typeName not in ('varchar', 'nclob', 'char', 'nchar',
                                         'nvarchar', 'nvarchar2')):
                out.write(' not null')
            if [column.model] in list(table.model.uniques()):
                out.write(' unique')
            if column.model.references is not None:
                out.write(" references %s" % (column.model.references.name, ))
            if column.model.deleteAction is not None:
                out.write(" on delete %s" % (column.model.deleteAction, ))

        def writeConstraint(name, cols):
            out.write(", \n")  # the table has to have some preceding columns
            out.write("    %s(%s)" % (name, ", ".join('"' + col.name + '"'
                                                      for col in cols)))

        pk = table.model.primaryKey
        if pk is not None and len(pk) > 1:
            writeConstraint("primary key ", pk)

        for uniqueColumns in table.model.uniques():
            if len(uniqueColumns) == 1:
                continue  # already done inline, skip
            writeConstraint("unique ", uniqueColumns)

        for checkConstraint in table.model.constraints:
            if checkConstraint.type == 'CHECK':
                out.write(", \n    ")
                if checkConstraint.name is not None:
                    out.write('constraint "%s" ' % (checkConstraint.name, ))
                out.write("check (%s)" %
                          (_staticSQL(checkConstraint.expression, True)))

        out.write('\n);\n\n')

        for row in table.model.schemaRows:
            cmap = dict([(getattr(table, cmodel.name), val)
                         for (cmodel, val) in row.items()])
            out.write(_staticSQL(Insert(cmap)))
            out.write(";\n")

    for index in schema.model.indexes:
        # Index names combine and repeat multiple table names and column names,
        # so several of them conflict once oracle's length limit is applied.
        # To keep them unique within the limit we truncate and append 8 characters
        # of the md5 hash of the full name.
        shortIndexName = "%s_%s" % (
            index.name[:21],
            str(hashlib.md5(index.name).hexdigest())[:8],
        )
        shortTableName = index.table.name[:30]
        out.write('create index %s on %s (\n    ' %
                  (shortIndexName, shortTableName))
        out.write(',\n    '.join(
            ["\"{}\"".format(column.name) for column in index.columns]))
        out.write('\n);\n\n')

    # Functions are skipped as they likely use dialect specific syntax. Instead, functions
    # for other dialects need to be written in an "extras" file which will be appended to
    # the output.
    for function in schema.model.functions:
        out.write("-- Skipped Function {}\n".format(function.name))