Esempio n. 1
0
    def test_checkAndSet(self):

        config.ProcessType = "Single"
        cacher = Memcacher("testing")

        result = yield cacher.set("akey", "avalue")
        self.assertTrue(result)

        value, identifier = yield cacher.get("akey", withIdentifier=True)
        self.assertEquals("avalue", value)
        self.assertEquals(identifier, "0")

        # Make sure cas identifier changes (we know the test implementation increases
        # by 1 each time)
        result = yield cacher.set("akey", "anothervalue")
        value, identifier = yield cacher.get("akey", withIdentifier=True)
        self.assertEquals("anothervalue", value)
        self.assertEquals(identifier, "1")

        # Should not work because key doesn't exist:
        self.assertFalse((yield cacher.checkAndSet("missingkey", "val", "0")))
        # Should not work because identifier doesn't match:
        self.assertFalse((yield cacher.checkAndSet("akey", "yetanother", "0")))
        # Should work because identifier does match:
        self.assertTrue((yield cacher.checkAndSet("akey", "yetanother", "1")))
    def test_all_pickled(self):

        for processType in (
                "Single",
                "Combined",
        ):
            config.ProcessType = processType

            cacher = Memcacher("testing", pickle=True)

            result = yield cacher.set("akey", [
                "1",
                "2",
                "3",
            ])
            self.assertTrue(result)

            result = yield cacher.get("akey")
            if isinstance(cacher._memcacheProtocol, Memcacher.nullCacher):
                self.assertEquals(None, result)
            else:
                self.assertEquals([
                    "1",
                    "2",
                    "3",
                ], result)

            result = yield cacher.delete("akey")
            self.assertTrue(result)

            result = yield cacher.get("akey")
            self.assertEquals(None, result)
    def test_all_noinvalidation(self):

        for processType in (
                "Single",
                "Combined",
        ):
            config.ProcessType = processType

            cacher = Memcacher("testing", no_invalidation=True)

            result = yield cacher.set("akey", [
                "1",
                "2",
                "3",
            ])
            self.assertTrue(result)

            result = yield cacher.get("akey")
            self.assertEquals([
                "1",
                "2",
                "3",
            ], result)

            result = yield cacher.delete("akey")
            self.assertTrue(result)

            result = yield cacher.get("akey")
            self.assertEquals(None, result)
    def test_checkAndSet(self):

        config.ProcessType = "Single"
        cacher = Memcacher("testing")

        result = yield cacher.set("akey", "avalue")
        self.assertTrue(result)

        value, identifier = yield cacher.get("akey", withIdentifier=True)
        self.assertEquals("avalue", value)
        self.assertEquals(identifier, "0")

        # Make sure cas identifier changes (we know the test implementation increases
        # by 1 each time)
        result = yield cacher.set("akey", "anothervalue")
        value, identifier = yield cacher.get("akey", withIdentifier=True)
        self.assertEquals("anothervalue", value)
        self.assertEquals(identifier, "1")

        # Should not work because key doesn't exist:
        self.assertFalse((yield cacher.checkAndSet("missingkey", "val", "0")))
        # Should not work because identifier doesn't match:
        self.assertFalse((yield cacher.checkAndSet("akey", "yetanother", "0")))
        # Should work because identifier does match:
        self.assertTrue((yield cacher.checkAndSet("akey", "yetanother", "1")))
class ScheduleAddressMapper(object):
    """
    Class that maps a calendar user address into a delivery service type.
    """

    def __init__(self):

        # We are going to cache mappings whilst running
        self.cache = Memcacher("ScheduleAddressMapper", no_invalidation=True)


    @inlineCallbacks
    def getCalendarUser(self, cuaddr, principal):

        # If we have a principal always treat the user as local or partitioned
        if principal:
            returnValue(calendarUserFromPrincipal(cuaddr, principal))

        # Get the type
        cuaddr_type = (yield self.getCalendarUserServiceType(cuaddr))
        if cuaddr_type == DeliveryService.serviceType_caldav:
            returnValue(InvalidCalendarUser(cuaddr))
        elif cuaddr_type == DeliveryService.serviceType_ischedule:
            returnValue(RemoteCalendarUser(cuaddr))
        elif cuaddr_type == DeliveryService.serviceType_imip:
            returnValue(EmailCalendarUser(cuaddr))
        else:
            returnValue(InvalidCalendarUser(cuaddr))


    @inlineCallbacks
    def getCalendarUserServiceType(self, cuaddr):

        # Try cache first
        cuaddr_type = (yield self.cache.get(str(cuaddr)))
        if cuaddr_type is None:

            serviceTypes = (ScheduleViaCalDAV,)
            if config.Scheduling[DeliveryService.serviceType_ischedule]["Enabled"]:
                serviceTypes += (ScheduleViaISchedule,)
            if config.Scheduling[DeliveryService.serviceType_imip]["Enabled"]:
                serviceTypes += (ScheduleViaIMip,)
            for service in serviceTypes:
                matched = (yield service.matchCalendarUserAddress(cuaddr))
                if matched:
                    yield self.cache.set(str(cuaddr), service.serviceType())
                    returnValue(service.serviceType())

        returnValue(cuaddr_type)


    def isCalendarUserInMyDomain(self, cuaddr):

        # Check whether it is a possible local address
        def _gotResult(serviceType):
            return serviceType == DeliveryService.serviceType_caldav

        d = self.getCalendarUserServiceType(cuaddr)
        d.addCallback(_gotResult)
        return d
Esempio n. 6
0
    def test_all_noinvalidation(self):

        for processType in ("Single", "Combined",):
            config.ProcessType = processType

            cacher = Memcacher("testing", pickle=True, no_invalidation=True)

            result = yield cacher.set("akey", ["1", "2", "3", ])
            self.assertTrue(result)

            result = yield cacher.get("akey")
            self.assertEquals(["1", "2", "3", ], result)

            result = yield cacher.delete("akey")
            self.assertTrue(result)

            result = yield cacher.get("akey")
            self.assertEquals(None, result)
Esempio n. 7
0
    def test_missingget(self):

        for processType in ("Single", "Combined",):
            config.ProcessType = processType

            cacher = Memcacher("testing")

            result = yield cacher.get("akey")
            self.assertEquals(None, result)
Esempio n. 8
0
class ScheduleAddressMapper(object):
    """
    Class that maps a calendar user address into a delivery service type.
    """

    def __init__(self):

        # We are going to cache mappings whilst running
        self.cache = Memcacher("ScheduleAddressMapper", no_invalidation=True)


    @inlineCallbacks
    def getCalendarUser(self, cuaddr):

        # Get the type
        cuaddr_type = (yield self.getCalendarUserServiceType(cuaddr))
        if cuaddr_type == DeliveryService.serviceType_caldav:
            returnValue(InvalidCalendarUser(cuaddr))
        elif cuaddr_type == DeliveryService.serviceType_ischedule:
            returnValue(RemoteCalendarUser(cuaddr))
        elif cuaddr_type == DeliveryService.serviceType_imip:
            returnValue(EmailCalendarUser(cuaddr))
        else:
            returnValue(InvalidCalendarUser(cuaddr))


    @inlineCallbacks
    def getCalendarUserServiceType(self, cuaddr):

        # Try cache first
        cuaddr_type = (yield self.cache.get(str(cuaddr)))
        if cuaddr_type is None:

            serviceTypes = (ScheduleViaCalDAV,)
            if config.Scheduling[DeliveryService.serviceType_ischedule]["Enabled"]:
                serviceTypes += (ScheduleViaISchedule,)
            if config.Scheduling[DeliveryService.serviceType_imip]["Enabled"]:
                serviceTypes += (ScheduleViaIMip,)
            for service in serviceTypes:
                matched = (yield service.matchCalendarUserAddress(cuaddr))
                if matched:
                    yield self.cache.set(str(cuaddr), service.serviceType())
                    returnValue(service.serviceType())

        returnValue(cuaddr_type)


    def isCalendarUserInMyDomain(self, cuaddr):

        # Check whether it is a possible local address
        def _gotResult(serviceType):
            return serviceType == DeliveryService.serviceType_caldav

        d = self.getCalendarUserServiceType(cuaddr)
        d.addCallback(_gotResult)
        return d
Esempio n. 9
0
    def test_all_pickled(self):

        for processType in ("Single", "Combined",):
            config.ProcessType = processType

            cacher = Memcacher("testing", pickle=True)

            result = yield cacher.set("akey", ["1", "2", "3", ])
            self.assertTrue(result)

            result = yield cacher.get("akey")
            if isinstance(cacher._memcacheProtocol, Memcacher.nullCacher):
                self.assertEquals(None, result)
            else:
                self.assertEquals(["1", "2", "3", ], result)

            result = yield cacher.delete("akey")
            self.assertTrue(result)

            result = yield cacher.get("akey")
            self.assertEquals(None, result)
Esempio n. 10
0
    def test_delete(self):

        for processType in ("Single", "Combined",):
            config.ProcessType = processType

            cacher = Memcacher("testing")

            result = yield cacher.set("akey", "avalue")
            self.assertTrue(result)

            result = yield cacher.get("akey")
            if isinstance(cacher._memcacheProtocol, Memcacher.nullCacher):
                self.assertEquals(None, result)
            else:
                self.assertEquals("avalue", result)

            result = yield cacher.delete("akey")
            self.assertTrue(result)

            result = yield cacher.get("akey")
            self.assertEquals(None, result)
Esempio n. 11
0
    def test_expiration(self):

        config.ProcessType = "Single"
        cacher = Memcacher("testing")

        # Expire this key in 10 seconds
        result = yield cacher.set("akey", "avalue", 10)
        self.assertTrue(result)

        result = yield cacher.get("akey")
        self.assertEquals("avalue", result)

        # Advance time 9 seconds, key still there
        cacher._memcacheProtocol.advanceClock(9)
        result = yield cacher.get("akey")
        self.assertEquals("avalue", result)

        # Advance time 1 more second, key expired
        cacher._memcacheProtocol.advanceClock(1)
        result = yield cacher.get("akey")
        self.assertEquals(None, result)
Esempio n. 12
0
    def test_missingget(self):

        for processType in (
                "Single",
                "Combined",
        ):
            config.ProcessType = processType

            cacher = Memcacher("testing")

            result = yield cacher.get("akey")
            self.assertEquals(None, result)
Esempio n. 13
0
    def test_expiration(self):

        config.ProcessType = "Single"
        cacher = Memcacher("testing")

        # Expire this key in 10 seconds
        result = yield cacher.set("akey", "avalue", 10)
        self.assertTrue(result)

        result = yield cacher.get("akey")
        self.assertEquals("avalue", result)

        # Advance time 9 seconds, key still there
        cacher._memcacheProtocol.advanceClock(9)
        result = yield cacher.get("akey")
        self.assertEquals("avalue", result)

        # Advance time 1 more second, key expired
        cacher._memcacheProtocol.advanceClock(1)
        result = yield cacher.get("akey")
        self.assertEquals(None, result)
Esempio n. 14
0
    def test_delete(self):

        for processType in (
                "Single",
                "Combined",
        ):
            config.ProcessType = processType

            cacher = Memcacher("testing")

            result = yield cacher.set("akey", "avalue")
            self.assertTrue(result)

            result = yield cacher.get("akey")
            if isinstance(cacher._memcacheProtocol, Memcacher.nullCacher):
                self.assertEquals(None, result)
            else:
                self.assertEquals("avalue", result)

            result = yield cacher.delete("akey")
            self.assertTrue(result)

            result = yield cacher.get("akey")
            self.assertEquals(None, result)
Esempio n. 15
0
    def _doBatchRefresh(self):
        """
        Do refresh of attendees in batches until the batch list is empty.
        """

        # Need to lock whilst manipulating the batch list
        log.debug("ImplicitProcessing - batch refresh for UID: '%s'" % (self.uid,))
        lock = MemcacheLock(
            "BatchRefreshUIDLock",
            self.uid,
            timeout=config.Scheduling.Options.UIDLockTimeoutSeconds,
            expire_time=config.Scheduling.Options.UIDLockExpirySeconds,
        )
        try:
            yield lock.acquire()
        except MemcacheLockTimeoutError:
            # If we could not lock then just fail the refresh - not sure what else to do
            returnValue(None)

        try:
            # Get the batch list
            cache = Memcacher("BatchRefreshAttendees", pickle=True)
            pendingAttendees = yield cache.get(self.uid)
            if pendingAttendees:

                # Get the next batch of attendees to process and update the cache value or remove it if
                # no more processing is needed
                attendeesToProcess = pendingAttendees[:config.Scheduling.Options.AttendeeRefreshBatch]
                pendingAttendees = pendingAttendees[config.Scheduling.Options.AttendeeRefreshBatch:]
                if pendingAttendees:
                    yield cache.set(self.uid, pendingAttendees)
                else:
                    yield cache.delete(self.uid)

                # Make sure we release this here to avoid potential deadlock when grabbing the ImplicitUIDLock in the next call
                yield lock.release()

                # Now do the batch refresh
                yield self._doDelayedRefresh(attendeesToProcess)

                # Queue the next refresh if needed
                if pendingAttendees:
                    self._enqueueBatchRefresh()
            else:
                yield cache.delete(self.uid)
                yield lock.release()
        finally:
            yield lock.clean()
Esempio n. 16
0
    def test_keyValueLimits(self):

        config.ProcessType = "Single"
        cacher = Memcacher("testing", key_normalization=False)

        result = yield cacher.set("*", "*")
        self.assertTrue(result)

        # Key limits
        result = yield cacher.set("*" * (Memcacher.MEMCACHE_KEY_LIMIT + 10), "*")
        self.assertFalse(result)
        value = yield cacher.get("*" * (Memcacher.MEMCACHE_KEY_LIMIT + 10), "*")
        self.assertEquals(value, (None, "",))

        # Value limits
        result = yield cacher.set("*", "*" * (Memcacher.MEMCACHE_VALUE_LIMIT + 10))
        self.assertFalse(result)
Esempio n. 17
0
    def test_keyValueLimits(self):

        config.ProcessType = "Single"
        cacher = Memcacher("testing", key_normalization=False)

        result = yield cacher.set("*", "*")
        self.assertTrue(result)

        # Key limits
        result = yield cacher.set("*" * (Memcacher.MEMCACHE_KEY_LIMIT + 10),
                                  "*")
        self.assertFalse(result)
        value = yield cacher.get("*" * (Memcacher.MEMCACHE_KEY_LIMIT + 10),
                                 "*")
        self.assertEquals(value, (
            None,
            "",
        ))

        # Value limits
        result = yield cacher.set("*",
                                  "*" * (Memcacher.MEMCACHE_VALUE_LIMIT + 10))
        self.assertFalse(result)
Esempio n. 18
0
    def queueAttendeeUpdate(self, exclude_attendees):
        """
        Queue up an update to attendees and use a memcache lock to ensure we don't update too frequently.

        @param exclude_attendees: list of attendees who should not be refreshed (e.g., the one that triggeed the refresh)
        @type exclude_attendees: C{list}
        """

        # When doing auto-processing of replies, only refresh attendees when the last auto-accept is done.
        # Note that when we do this we also need to refresh the attendee that is generating the reply because they
        # are no longer up to date with changes of other auto-accept attendees.
        if hasattr(self.request, "auto_reply_processing_count") and self.request.auto_reply_processing_count > 1:
            self.request.auto_reply_suppressed = True
            returnValue(None)
        if hasattr(self.request, "auto_reply_suppressed"):
            exclude_attendees = ()

        self.uid = self.recipient_calendar.resourceUID()

        # Check for batched refreshes
        if config.Scheduling.Options.AttendeeRefreshBatch:

            # Need to lock whilst manipulating the batch list
            lock = MemcacheLock(
                "BatchRefreshUIDLock",
                self.uid,
                timeout=config.Scheduling.Options.UIDLockTimeoutSeconds,
                expire_time=config.Scheduling.Options.UIDLockExpirySeconds,
            )
            try:
                yield lock.acquire()
            except MemcacheLockTimeoutError:
                # If we could not lock then just fail the refresh - not sure what else to do
                returnValue(None)

            try:
                # Get all attendees to refresh
                allAttendees = sorted(list(self.recipient_calendar.getAllUniqueAttendees()))
                allAttendees = filter(lambda x: x not in exclude_attendees, allAttendees)

                if allAttendees:
                    # See if there is already a pending refresh and merge current attendees into that list,
                    # otherwise just mark all attendees as pending
                    cache = Memcacher("BatchRefreshAttendees", pickle=True)
                    pendingAttendees = yield cache.get(self.uid)
                    firstTime = False
                    if pendingAttendees:
                        for attendee in allAttendees:
                            if attendee not in pendingAttendees:
                                pendingAttendees.append(attendee)
                    else:
                        firstTime = True
                        pendingAttendees = allAttendees
                    yield cache.set(self.uid, pendingAttendees)

                    # Now start the first batch off
                    if firstTime:
                        self._enqueueBatchRefresh()
            finally:
                yield lock.clean()

        else:
            yield self._doRefresh(self.organizer_calendar_resource, exclude_attendees)