class ScheduleAddressMapper(object):
    """
    Class that maps a calendar user address into a delivery service type.
    """

    def __init__(self):

        # We are going to cache mappings whilst running
        self.cache = Memcacher("ScheduleAddressMapper", no_invalidation=True)


    @inlineCallbacks
    def getCalendarUser(self, cuaddr, principal):

        # If we have a principal always treat the user as local or partitioned
        if principal:
            returnValue(calendarUserFromPrincipal(cuaddr, principal))

        # Get the type
        cuaddr_type = (yield self.getCalendarUserServiceType(cuaddr))
        if cuaddr_type == DeliveryService.serviceType_caldav:
            returnValue(InvalidCalendarUser(cuaddr))
        elif cuaddr_type == DeliveryService.serviceType_ischedule:
            returnValue(RemoteCalendarUser(cuaddr))
        elif cuaddr_type == DeliveryService.serviceType_imip:
            returnValue(EmailCalendarUser(cuaddr))
        else:
            returnValue(InvalidCalendarUser(cuaddr))


    @inlineCallbacks
    def getCalendarUserServiceType(self, cuaddr):

        # Try cache first
        cuaddr_type = (yield self.cache.get(str(cuaddr)))
        if cuaddr_type is None:

            serviceTypes = (ScheduleViaCalDAV,)
            if config.Scheduling[DeliveryService.serviceType_ischedule]["Enabled"]:
                serviceTypes += (ScheduleViaISchedule,)
            if config.Scheduling[DeliveryService.serviceType_imip]["Enabled"]:
                serviceTypes += (ScheduleViaIMip,)
            for service in serviceTypes:
                matched = (yield service.matchCalendarUserAddress(cuaddr))
                if matched:
                    yield self.cache.set(str(cuaddr), service.serviceType())
                    returnValue(service.serviceType())

        returnValue(cuaddr_type)


    def isCalendarUserInMyDomain(self, cuaddr):

        # Check whether it is a possible local address
        def _gotResult(serviceType):
            return serviceType == DeliveryService.serviceType_caldav

        d = self.getCalendarUserServiceType(cuaddr)
        d.addCallback(_gotResult)
        return d
    def test_missingget(self):

        for processType in ("Single", "Combined",):
            config.ProcessType = processType

            cacher = Memcacher("testing")

            result = yield cacher.get("akey")
            self.assertEquals(None, result)
class ScheduleAddressMapper(object):
    """
    Class that maps a calendar user address into a delivery service type.
    """

    def __init__(self):

        # We are going to cache mappings whilst running
        self.cache = Memcacher("ScheduleAddressMapper", no_invalidation=True)


    @inlineCallbacks
    def getCalendarUser(self, cuaddr):

        # Get the type
        cuaddr_type = (yield self.getCalendarUserServiceType(cuaddr))
        if cuaddr_type == DeliveryService.serviceType_caldav:
            returnValue(InvalidCalendarUser(cuaddr))
        elif cuaddr_type == DeliveryService.serviceType_ischedule:
            returnValue(RemoteCalendarUser(cuaddr))
        elif cuaddr_type == DeliveryService.serviceType_imip:
            returnValue(EmailCalendarUser(cuaddr))
        else:
            returnValue(InvalidCalendarUser(cuaddr))


    @inlineCallbacks
    def getCalendarUserServiceType(self, cuaddr):

        # Try cache first
        cuaddr_type = (yield self.cache.get(str(cuaddr)))
        if cuaddr_type is None:

            serviceTypes = (ScheduleViaCalDAV,)
            if config.Scheduling[DeliveryService.serviceType_ischedule]["Enabled"]:
                serviceTypes += (ScheduleViaISchedule,)
            if config.Scheduling[DeliveryService.serviceType_imip]["Enabled"]:
                serviceTypes += (ScheduleViaIMip,)
            for service in serviceTypes:
                matched = (yield service.matchCalendarUserAddress(cuaddr))
                if matched:
                    yield self.cache.set(str(cuaddr), service.serviceType())
                    returnValue(service.serviceType())

        returnValue(cuaddr_type)


    def isCalendarUserInMyDomain(self, cuaddr):

        # Check whether it is a possible local address
        def _gotResult(serviceType):
            return serviceType == DeliveryService.serviceType_caldav

        d = self.getCalendarUserServiceType(cuaddr)
        d.addCallback(_gotResult)
        return d
    def test_checkAndSet(self):

        config.ProcessType = "Single"
        cacher = Memcacher("testing")

        result = yield cacher.set("akey", "avalue")
        self.assertTrue(result)

        value, identifier = yield cacher.get("akey", withIdentifier=True)
        self.assertEquals("avalue", value)
        self.assertEquals(identifier, "0")

        # Make sure cas identifier changes (we know the test implementation increases
        # by 1 each time)
        result = yield cacher.set("akey", "anothervalue")
        value, identifier = yield cacher.get("akey", withIdentifier=True)
        self.assertEquals("anothervalue", value)
        self.assertEquals(identifier, "1")

        # Should not work because key doesn't exist:
        self.assertFalse((yield cacher.checkAndSet("missingkey", "val", "0")))
        # Should not work because identifier doesn't match:
        self.assertFalse((yield cacher.checkAndSet("akey", "yetanother", "0")))
        # Should work because identifier does match:
        self.assertTrue((yield cacher.checkAndSet("akey", "yetanother", "1")))
    def test_missingget(self):

        for processType in (
                "Single",
                "Combined",
        ):
            config.ProcessType = processType

            cacher = Memcacher("testing")

            result = yield cacher.get("akey")
            self.assertEquals(None, result)
    def test_checkAndSet(self):

        config.ProcessType = "Single"
        cacher = Memcacher("testing")

        result = yield cacher.set("akey", "avalue")
        self.assertTrue(result)

        value, identifier = yield cacher.get("akey", withIdentifier=True)
        self.assertEquals("avalue", value)
        self.assertEquals(identifier, "0")

        # Make sure cas identifier changes (we know the test implementation increases
        # by 1 each time)
        result = yield cacher.set("akey", "anothervalue")
        value, identifier = yield cacher.get("akey", withIdentifier=True)
        self.assertEquals("anothervalue", value)
        self.assertEquals(identifier, "1")

        # Should not work because key doesn't exist:
        self.assertFalse((yield cacher.checkAndSet("missingkey", "val", "0")))
        # Should not work because identifier doesn't match:
        self.assertFalse((yield cacher.checkAndSet("akey", "yetanother", "0")))
        # Should work because identifier does match:
        self.assertTrue((yield cacher.checkAndSet("akey", "yetanother", "1")))
    def test_setget(self):

        for processType in ("Single", "Combined",):
            config.ProcessType = processType

            cacher = Memcacher("testing")

            result = yield cacher.set("akey", "avalue")
            self.assertTrue(result)

            result = yield cacher.get("akey")
            if isinstance(cacher._memcacheProtocol, Memcacher.nullCacher):
                self.assertEquals(None, result)
            else:
                self.assertEquals("avalue", result)
    def test_keynormalization(self):

        for processType in ("Single", "Combined",):
            config.ProcessType = processType

            cacher = Memcacher("testing")

            self.assertTrue(len(cacher._normalizeKey("A" * 100)) <= 250)
            self.assertTrue(len(cacher._normalizeKey("A" * 512)) <= 250)

            key = cacher._normalizeKey(" \n\t\r" * 20)
            self.assertTrue(" " not in key)
            self.assertTrue("\n" not in key)
            self.assertTrue("\t" not in key)
            self.assertTrue("\r" not in key)
    def _doBatchRefresh(self):
        """
        Do refresh of attendees in batches until the batch list is empty.
        """

        # Need to lock whilst manipulating the batch list
        log.debug("ImplicitProcessing - batch refresh for UID: '%s'" % (self.uid,))
        lock = MemcacheLock(
            "BatchRefreshUIDLock",
            self.uid,
            timeout=config.Scheduling.Options.UIDLockTimeoutSeconds,
            expire_time=config.Scheduling.Options.UIDLockExpirySeconds,
        )
        try:
            yield lock.acquire()
        except MemcacheLockTimeoutError:
            # If we could not lock then just fail the refresh - not sure what else to do
            returnValue(None)

        try:
            # Get the batch list
            cache = Memcacher("BatchRefreshAttendees", pickle=True)
            pendingAttendees = yield cache.get(self.uid)
            if pendingAttendees:

                # Get the next batch of attendees to process and update the cache value or remove it if
                # no more processing is needed
                attendeesToProcess = pendingAttendees[:config.Scheduling.Options.AttendeeRefreshBatch]
                pendingAttendees = pendingAttendees[config.Scheduling.Options.AttendeeRefreshBatch:]
                if pendingAttendees:
                    yield cache.set(self.uid, pendingAttendees)
                else:
                    yield cache.delete(self.uid)

                # Make sure we release this here to avoid potential deadlock when grabbing the ImplicitUIDLock in the next call
                yield lock.release()

                # Now do the batch refresh
                yield self._doDelayedRefresh(attendeesToProcess)

                # Queue the next refresh if needed
                if pendingAttendees:
                    self._enqueueBatchRefresh()
            else:
                yield cache.delete(self.uid)
                yield lock.release()
        finally:
            yield lock.clean()
    def test_keyValueLimits(self):

        config.ProcessType = "Single"
        cacher = Memcacher("testing", key_normalization=False)

        result = yield cacher.set("*", "*")
        self.assertTrue(result)

        # Key limits
        result = yield cacher.set("*" * (Memcacher.MEMCACHE_KEY_LIMIT + 10), "*")
        self.assertFalse(result)
        value = yield cacher.get("*" * (Memcacher.MEMCACHE_KEY_LIMIT + 10), "*")
        self.assertEquals(value, (None, "",))

        # Value limits
        result = yield cacher.set("*", "*" * (Memcacher.MEMCACHE_VALUE_LIMIT + 10))
        self.assertFalse(result)
    def test_keynormalization(self):

        for processType in (
                "Single",
                "Combined",
        ):
            config.ProcessType = processType

            cacher = Memcacher("testing")

            self.assertTrue(len(cacher._normalizeKey("A" * 100)) <= 250)
            self.assertTrue(len(cacher._normalizeKey("A" * 512)) <= 250)

            key = cacher._normalizeKey(" \n\t\r" * 20)
            self.assertTrue(" " not in key)
            self.assertTrue("\n" not in key)
            self.assertTrue("\t" not in key)
            self.assertTrue("\r" not in key)
    def test_all_noinvalidation(self):

        for processType in ("Single", "Combined",):
            config.ProcessType = processType

            cacher = Memcacher("testing", pickle=True, no_invalidation=True)

            result = yield cacher.set("akey", ["1", "2", "3", ])
            self.assertTrue(result)

            result = yield cacher.get("akey")
            self.assertEquals(["1", "2", "3", ], result)

            result = yield cacher.delete("akey")
            self.assertTrue(result)

            result = yield cacher.get("akey")
            self.assertEquals(None, result)
    def test_setget(self):

        for processType in (
                "Single",
                "Combined",
        ):
            config.ProcessType = processType

            cacher = Memcacher("testing")

            result = yield cacher.set("akey", "avalue")
            self.assertTrue(result)

            result = yield cacher.get("akey")
            if isinstance(cacher._memcacheProtocol, Memcacher.nullCacher):
                self.assertEquals(None, result)
            else:
                self.assertEquals("avalue", result)
Exemple #14
0
    def test_key_value_str(self):

        config.ProcessType = "Single"

        cacher = Memcacher("testing", pickle=False)
        self.failUnlessRaises(ValueError, cacher.set, "akey", [
            "1",
            "2",
            "3",
        ])
        self.failUnlessRaises(ValueError, cacher.set, "akey", u"abc")
    def test_all_pickled(self):

        for processType in ("Single", "Combined",):
            config.ProcessType = processType

            cacher = Memcacher("testing", pickle=True)

            result = yield cacher.set("akey", ["1", "2", "3", ])
            self.assertTrue(result)

            result = yield cacher.get("akey")
            if isinstance(cacher._memcacheProtocol, Memcacher.nullCacher):
                self.assertEquals(None, result)
            else:
                self.assertEquals(["1", "2", "3", ], result)

            result = yield cacher.delete("akey")
            self.assertTrue(result)

            result = yield cacher.get("akey")
            self.assertEquals(None, result)
    def test_expiration(self):

        config.ProcessType = "Single"
        cacher = Memcacher("testing")

        # Expire this key in 10 seconds
        result = yield cacher.set("akey", "avalue", 10)
        self.assertTrue(result)

        result = yield cacher.get("akey")
        self.assertEquals("avalue", result)

        # Advance time 9 seconds, key still there
        cacher._memcacheProtocol.advanceClock(9)
        result = yield cacher.get("akey")
        self.assertEquals("avalue", result)

        # Advance time 1 more second, key expired
        cacher._memcacheProtocol.advanceClock(1)
        result = yield cacher.get("akey")
        self.assertEquals(None, result)
    def test_all_pickled(self):

        for processType in (
                "Single",
                "Combined",
        ):
            config.ProcessType = processType

            cacher = Memcacher("testing", pickle=True)

            result = yield cacher.set("akey", [
                "1",
                "2",
                "3",
            ])
            self.assertTrue(result)

            result = yield cacher.get("akey")
            if isinstance(cacher._memcacheProtocol, Memcacher.nullCacher):
                self.assertEquals(None, result)
            else:
                self.assertEquals([
                    "1",
                    "2",
                    "3",
                ], result)

            result = yield cacher.delete("akey")
            self.assertTrue(result)

            result = yield cacher.get("akey")
            self.assertEquals(None, result)
    def test_all_noinvalidation(self):

        for processType in (
                "Single",
                "Combined",
        ):
            config.ProcessType = processType

            cacher = Memcacher("testing", no_invalidation=True)

            result = yield cacher.set("akey", [
                "1",
                "2",
                "3",
            ])
            self.assertTrue(result)

            result = yield cacher.get("akey")
            self.assertEquals([
                "1",
                "2",
                "3",
            ], result)

            result = yield cacher.delete("akey")
            self.assertTrue(result)

            result = yield cacher.get("akey")
            self.assertEquals(None, result)
Exemple #19
0
class FBCacheEntry(object):

    CACHE_DAYS_FLOATING_ADJUST = 1

    fbcacher = Memcacher("FBCache", pickle=True)

    def __init__(self, key, token, timerange, fbresults):
        self.key = key
        self.token = token
        self.timerange = timerange.getText()
        self.fbresults = fbresults

    @classmethod
    @inlineCallbacks
    def getCacheEntry(cls, calresource, useruid, timerange):

        key = str(calresource.id()) + "/" + useruid
        token = (yield calresource.syncToken())
        entry = (yield cls.fbcacher.get(key))

        if entry:

            # Offset one day at either end to account for floating
            entry_timerange = Period.parseText(entry.timerange)
            cached_start = entry_timerange.getStart() + Duration(
                days=cls.CACHE_DAYS_FLOATING_ADJUST)
            cached_end = entry_timerange.getEnd() - Duration(
                days=cls.CACHE_DAYS_FLOATING_ADJUST)

            # Verify that the requested time range lies within the cache time range
            if compareDateTime(timerange.getEnd(),
                               cached_end) <= 0 and compareDateTime(
                                   timerange.getStart(), cached_start) >= 0:

                # Verify that cached entry is still valid
                if token == entry.token:
                    returnValue(entry.fbresults)

        returnValue(None)

    @classmethod
    @inlineCallbacks
    def makeCacheEntry(cls, calresource, useruid, timerange, fbresults):

        key = str(calresource.id()) + "/" + useruid
        token = (yield calresource.syncToken())
        entry = cls(key, token, timerange, fbresults)
        yield cls.fbcacher.set(key, entry)
    def test_expiration(self):

        config.ProcessType = "Single"
        cacher = Memcacher("testing")

        # Expire this key in 10 seconds
        result = yield cacher.set("akey", "avalue", 10)
        self.assertTrue(result)

        result = yield cacher.get("akey")
        self.assertEquals("avalue", result)

        # Advance time 9 seconds, key still there
        cacher._memcacheProtocol.advanceClock(9)
        result = yield cacher.get("akey")
        self.assertEquals("avalue", result)

        # Advance time 1 more second, key expired
        cacher._memcacheProtocol.advanceClock(1)
        result = yield cacher.get("akey")
        self.assertEquals(None, result)
    def test_keyValueLimits(self):

        config.ProcessType = "Single"
        cacher = Memcacher("testing", key_normalization=False)

        result = yield cacher.set("*", "*")
        self.assertTrue(result)

        # Key limits
        result = yield cacher.set("*" * (Memcacher.MEMCACHE_KEY_LIMIT + 10),
                                  "*")
        self.assertFalse(result)
        value = yield cacher.get("*" * (Memcacher.MEMCACHE_KEY_LIMIT + 10),
                                 "*")
        self.assertEquals(value, (
            None,
            "",
        ))

        # Value limits
        result = yield cacher.set("*",
                                  "*" * (Memcacher.MEMCACHE_VALUE_LIMIT + 10))
        self.assertFalse(result)
Exemple #22
0
class PropertyStore(AbstractPropertyStore):
    """
    We are going to use memcache to cache properties per-resource/per-user. However, we
    need to be able to invalidate on a per-resource basis, in addition to per-resource/per-user.
    So we will also track in memcache which resource/uid tokens are valid. That way we can remove
    the tracking entry to completely invalidate all the per-resource/per-user pairs.
    """

    _cacher = Memcacher("SQL.props", pickle=True, key_normalization=False)

    def __init__(self, *a, **kw):
        raise NotImplementedError(
            "do not construct directly, call PropertyStore.load()")

    _allWithID = Select([prop.NAME, prop.VIEWER_UID, prop.VALUE],
                        From=prop,
                        Where=prop.RESOURCE_ID == Parameter("resourceID"))

    _allWithIDViewer = Select(
        [prop.NAME, prop.VALUE],
        From=prop,
        Where=(prop.RESOURCE_ID == Parameter("resourceID")).And(
            prop.VIEWER_UID == Parameter("viewerID")))

    def _cacheToken(self, userid):
        return "{0!s}/{1}".format(self._resourceID, userid)

    @inlineCallbacks
    def _refresh(self, txn):
        """
        Load, or re-load, this object with the given transaction; first from
        memcache, then pulling from the database again.
        """
        # Cache existing properties in this object
        # Look for memcache entry first

        @inlineCallbacks
        def _cache_user_props(uid):

            # First check whether uid already has a valid cached entry
            rows = None
            if self._cacher is not None:
                valid_cached_users = yield self._cacher.get(
                    str(self._resourceID))
                if valid_cached_users is None:
                    valid_cached_users = set()

                # Fetch cached user data if valid and present
                if uid in valid_cached_users:
                    rows = yield self._cacher.get(self._cacheToken(uid))

            # If no cached data, fetch from SQL DB and cache
            if rows is None:
                rows = yield self._allWithIDViewer.on(
                    txn,
                    resourceID=self._resourceID,
                    viewerID=uid,
                )
                if self._cacher is not None:
                    yield self._cacher.set(self._cacheToken(uid),
                                           rows if rows is not None else ())

                    # Mark this uid as valid
                    valid_cached_users.add(uid)
                    yield self._cacher.set(str(self._resourceID),
                                           valid_cached_users)

            for name, value in rows:
                self._cached[(name, uid)] = value

        # Cache for the owner first, then the sharee if different
        yield _cache_user_props(self._defaultUser)
        if self._perUser != self._defaultUser:
            yield _cache_user_props(self._perUser)
        if self._proxyUser != self._perUser:
            yield _cache_user_props(self._proxyUser)

    @classmethod
    @inlineCallbacks
    def load(cls,
             defaultuser,
             shareUser,
             proxyUser,
             txn,
             resourceID,
             created=False,
             notifyCallback=None):
        """
        @param notifyCallback: a callable used to trigger notifications when the
            property store changes.
        """
        self = cls.__new__(cls)
        super(PropertyStore, self).__init__(defaultuser, shareUser, proxyUser)
        self._txn = txn
        self._resourceID = resourceID
        if not self._txn.store().queryCachingEnabled():
            self._cacher = None
        self._cached = {}
        if not created:
            yield self._refresh(txn)
        self._notifyCallback = notifyCallback
        returnValue(self)

    @classmethod
    @inlineCallbacks
    def forMultipleResources(cls, defaultUser, shareeUser, proxyUser, txn,
                             childColumn, parentColumn, parentID):
        """
        Load all property stores for all objects in a collection.  This is used
        to optimize Depth:1 operations on that collection, by loading all
        relevant properties in a single query.

        @param defaultUser: the UID of the user who owns / is requesting the
            property stores; the ones whose per-user properties will be exposed.

        @type defaultUser: C{str}

        @param txn: the transaction within which to fetch the rows.

        @type txn: L{IAsyncTransaction}

        @param childColumn: The resource ID column for the child resources, i.e.
            the resources of the type for which this method will loading the
            property stores.

        @param parentColumn: The resource ID column for the parent resources.
            e.g. if childColumn is addressbook object's resource ID, then this
            should be addressbook's resource ID.

        @return: a L{Deferred} that fires with a C{dict} mapping resource ID (a
            value taken from C{childColumn}) to a L{PropertyStore} for that ID.
        """
        childTable = TableSyntax(childColumn.model.table)
        query = Select(
            [
                childColumn,
                # XXX is that column necessary?  as per the 'on' clause it has to be
                # the same as prop.RESOURCE_ID anyway.
                prop.RESOURCE_ID,
                prop.NAME,
                prop.VIEWER_UID,
                prop.VALUE
            ],
            From=prop.join(childTable, prop.RESOURCE_ID == childColumn,
                           'right'),
            Where=parentColumn == parentID)
        rows = yield query.on(txn)
        stores = cls._createMultipleStores(defaultUser, shareeUser, proxyUser,
                                           txn, rows)
        returnValue(stores)

    @classmethod
    @inlineCallbacks
    def forMultipleResourcesWithResourceIDs(cls, defaultUser, shareeUser,
                                            proxyUser, txn, resourceIDs):
        """
        Load all property stores for all specified resources.  This is used
        to optimize Depth:1 operations on that collection, by loading all
        relevant properties in a single query. Note that the caller of this
        method must make sure that the number of items being queried for is
        within a reasonable batch size. If the caller is itself batching
        related queries, that will take care of itself.

        @param defaultUser: the UID of the user who owns / is requesting the
            property stores; the ones whose per-user properties will be exposed.

        @type defaultUser: C{str}

        @param txn: the transaction within which to fetch the rows.

        @type txn: L{IAsyncTransaction}

        @param resourceIDs: The set of resource ID's to query.

        @return: a L{Deferred} that fires with a C{dict} mapping resource ID (a
            value taken from C{childColumn}) to a L{PropertyStore} for that ID.
        """
        query = Select(
            [prop.RESOURCE_ID, prop.NAME, prop.VIEWER_UID, prop.VALUE],
            From=prop,
            Where=prop.RESOURCE_ID.In(
                Parameter("resourceIDs", len(resourceIDs))))
        rows = yield query.on(txn, resourceIDs=resourceIDs)
        stores = cls._createMultipleStores(defaultUser, shareeUser, proxyUser,
                                           txn, rows)

        # Make sure we have a store for each resourceID even if no properties exist
        for resourceID in resourceIDs:
            if resourceID not in stores:
                store = cls.__new__(cls)
                super(PropertyStore, store).__init__(defaultUser, shareeUser,
                                                     proxyUser)
                store._txn = txn
                store._resourceID = resourceID
                store._cached = {}
                stores[resourceID] = store

        returnValue(stores)

    @classmethod
    def _createMultipleStores(cls, defaultUser, shareeUser, proxyUser, txn,
                              rows):
        """
        Create a set of stores for the set of rows passed in.
        """

        createdStores = {}
        for row in rows:
            if len(row) == 5:
                object_resource_id, resource_id, name, view_uid, value = row
            else:
                object_resource_id = None
                resource_id, name, view_uid, value = row
            if resource_id:
                if resource_id not in createdStores:
                    store = cls.__new__(cls)
                    super(PropertyStore,
                          store).__init__(defaultUser, shareeUser, proxyUser)
                    store._txn = txn
                    store._resourceID = resource_id
                    store._cached = {}
                    createdStores[resource_id] = store
                createdStores[resource_id]._cached[(name, view_uid)] = value
            elif object_resource_id:
                store = cls.__new__(cls)
                super(PropertyStore, store).__init__(defaultUser, shareeUser,
                                                     proxyUser)
                store._txn = txn
                store._resourceID = object_resource_id
                store._cached = {}
                createdStores[object_resource_id] = store

        return createdStores

    def _getitem_uid(self, key, uid):
        validKey(key)

        try:
            value = self._cached[(key.toString(), uid)]
        except KeyError:
            raise KeyError(key)

        return WebDAVDocument.fromString(value).root_element

    _updateQuery = Update(
        {prop.VALUE: Parameter("value")},
        Where=(prop.RESOURCE_ID == Parameter("resourceID")).And(
            prop.NAME == Parameter("name")).And(
                prop.VIEWER_UID == Parameter("uid")))

    _insertQuery = Insert({
        prop.VALUE: Parameter("value"),
        prop.RESOURCE_ID: Parameter("resourceID"),
        prop.NAME: Parameter("name"),
        prop.VIEWER_UID: Parameter("uid")
    })

    def _setitem_uid(self, key, value, uid):
        validKey(key)

        key_str = key.toString()
        value_str = value.toxml()

        tried = []

        wasCached = [(key_str, uid) in self._cached]
        self._cached[(key_str, uid)] = value_str

        @inlineCallbacks
        def trySetItem(txn):
            if tried:
                yield self._refresh(txn)
                wasCached[:] = [(key_str, uid) in self._cached]
            tried.append(True)
            if wasCached[0]:
                yield self._updateQuery.on(txn,
                                           resourceID=self._resourceID,
                                           value=value_str,
                                           name=key_str,
                                           uid=uid)
            else:
                yield self._insertQuery.on(txn,
                                           resourceID=self._resourceID,
                                           value=value_str,
                                           name=key_str,
                                           uid=uid)
            if self._cacher is not None:
                self._cacher.delete(self._cacheToken(uid))

        # Call the registered notification callback - we need to do this as a preCommit since it involves
        # a bunch of deferred operations, but this propstore api is not deferred. preCommit will execute
        # the deferreds properly, and it is fine to wait until everything else is done before sending the
        # notifications.
        if hasattr(self,
                   "_notifyCallback") and self._notifyCallback is not None:
            self._txn.preCommit(self._notifyCallback)

        def justLogIt(f):
            f.trap(AllRetriesFailed)
            self.log.error("setting a property failed; probably nothing.")

        self._txn.subtransaction(trySetItem).addErrback(justLogIt)

    _deleteQuery = Delete(
        prop,
        Where=(prop.RESOURCE_ID == Parameter("resourceID")).And(
            prop.NAME == Parameter("name")).And(
                prop.VIEWER_UID == Parameter("uid")))

    def _delitem_uid(self, key, uid):
        validKey(key)

        key_str = key.toString()
        del self._cached[(key_str, uid)]

        @inlineCallbacks
        def doIt(txn):
            yield self._deleteQuery.on(txn,
                                       lambda: KeyError(key),
                                       resourceID=self._resourceID,
                                       name=key_str,
                                       uid=uid)
            if self._cacher is not None:
                self._cacher.delete(self._cacheToken(uid))

        # Call the registered notification callback - we need to do this as a preCommit since it involves
        # a bunch of deferred operations, but this propstore api is not deferred. preCommit will execute
        # the deferreds properly, and it is fine to wait until everything else is done before sending the
        # notifications.
        if hasattr(self,
                   "_notifyCallback") and self._notifyCallback is not None:
            self._txn.preCommit(self._notifyCallback)

        def justLogIt(f):
            f.trap(AllRetriesFailed)
            self.log.error("setting a property failed; probably nothing.")

        self._txn.subtransaction(doIt).addErrback(justLogIt)

    def _keys_uid(self, uid):
        for cachedKey, cachedUID in self._cached.keys():
            if cachedUID == uid:
                yield PropertyName.fromString(cachedKey)

    _deleteResourceQuery = Delete(
        prop, Where=(prop.RESOURCE_ID == Parameter("resourceID")))

    @inlineCallbacks
    def _removeResource(self):

        self._cached = {}
        yield self._deleteResourceQuery.on(self._txn,
                                           resourceID=self._resourceID)

        # Invalidate entire set of cached per-user data for this resource
        if self._cacher is not None:
            self._cacher.delete(str(self._resourceID))

    @inlineCallbacks
    def copyAllProperties(self, other):
        """
        Copy all the properties from another store into this one. This needs to be done
        independently of the UID.
        """

        rows = yield other._allWithID.on(other._txn,
                                         resourceID=other._resourceID)
        for key_str, uid, value_str in rows:
            wasCached = [(key_str, uid) in self._cached]
            if wasCached[0]:
                yield self._updateQuery.on(self._txn,
                                           resourceID=self._resourceID,
                                           value=value_str,
                                           name=key_str,
                                           uid=uid)
            else:
                yield self._insertQuery.on(self._txn,
                                           resourceID=self._resourceID,
                                           value=value_str,
                                           name=key_str,
                                           uid=uid)

        # Invalidate entire set of cached per-user data for this resource and reload
        self._cached = {}
        if self._cacher is not None:
            self._cacher.delete(str(self._resourceID))
        yield self._refresh(self._txn)
    def __init__(self):

        # We are going to cache mappings whilst running
        self.cache = Memcacher("ScheduleAddressMapper", no_invalidation=True)
Exemple #24
0
from twistedcaldav.instance import InstanceList
from twistedcaldav.memcacher import Memcacher

from txdav.caldav.datastore.query.filter import Filter
from txdav.caldav.icalendarstore import QueryMaxResources
from txdav.common.icommondatastore import IndexedSearchException, \
    InternalDataStoreError

import uuid

log = Logger()

fbtype_mapper = {"BUSY": 0, "BUSY-TENTATIVE": 1, "BUSY-UNAVAILABLE": 2}
fbtype_index_mapper = {'B': 0, 'T': 1, 'U': 2}

fbcacher = Memcacher("FBCache", pickle=True)


class FBCacheEntry(object):

    CACHE_DAYS_FLOATING_ADJUST = 1

    def __init__(self, key, token, timerange, fbresults):
        self.key = key
        self.token = token
        self.timerange = timerange
        self.fbresults = fbresults

    @classmethod
    @inlineCallbacks
    def getCacheEntry(cls, calresource, useruid, timerange):
 def _getFakeMemcacheProtocol(self):
     result = super(MemcacheLock, self)._getMemcacheProtocol()
     if isinstance(result, Memcacher.nullCacher):
         result = self._memcacheProtocol = Memcacher.memoryCacher()
     return result
 def _getFakeMemcacheProtocol(self):
     result = super(MemcacheLock, self)._getMemcacheProtocol()
     if isinstance(result, Memcacher.nullCacher):
         result = self._memcacheProtocol = Memcacher.memoryCacher()
     return result
    def queueAttendeeUpdate(self, exclude_attendees):
        """
        Queue up an update to attendees and use a memcache lock to ensure we don't update too frequently.

        @param exclude_attendees: list of attendees who should not be refreshed (e.g., the one that triggeed the refresh)
        @type exclude_attendees: C{list}
        """

        # When doing auto-processing of replies, only refresh attendees when the last auto-accept is done.
        # Note that when we do this we also need to refresh the attendee that is generating the reply because they
        # are no longer up to date with changes of other auto-accept attendees.
        if hasattr(self.request, "auto_reply_processing_count") and self.request.auto_reply_processing_count > 1:
            self.request.auto_reply_suppressed = True
            returnValue(None)
        if hasattr(self.request, "auto_reply_suppressed"):
            exclude_attendees = ()

        self.uid = self.recipient_calendar.resourceUID()

        # Check for batched refreshes
        if config.Scheduling.Options.AttendeeRefreshBatch:

            # Need to lock whilst manipulating the batch list
            lock = MemcacheLock(
                "BatchRefreshUIDLock",
                self.uid,
                timeout=config.Scheduling.Options.UIDLockTimeoutSeconds,
                expire_time=config.Scheduling.Options.UIDLockExpirySeconds,
            )
            try:
                yield lock.acquire()
            except MemcacheLockTimeoutError:
                # If we could not lock then just fail the refresh - not sure what else to do
                returnValue(None)

            try:
                # Get all attendees to refresh
                allAttendees = sorted(list(self.recipient_calendar.getAllUniqueAttendees()))
                allAttendees = filter(lambda x: x not in exclude_attendees, allAttendees)

                if allAttendees:
                    # See if there is already a pending refresh and merge current attendees into that list,
                    # otherwise just mark all attendees as pending
                    cache = Memcacher("BatchRefreshAttendees", pickle=True)
                    pendingAttendees = yield cache.get(self.uid)
                    firstTime = False
                    if pendingAttendees:
                        for attendee in allAttendees:
                            if attendee not in pendingAttendees:
                                pendingAttendees.append(attendee)
                    else:
                        firstTime = True
                        pendingAttendees = allAttendees
                    yield cache.set(self.uid, pendingAttendees)

                    # Now start the first batch off
                    if firstTime:
                        self._enqueueBatchRefresh()
            finally:
                yield lock.clean()

        else:
            yield self._doRefresh(self.organizer_calendar_resource, exclude_attendees)
Exemple #28
0
    def __init__(self):

        # We are going to cache mappings whilst running
        self.cache = Memcacher("ScheduleAddressMapper", no_invalidation=True)