def test_simple_lru_expulsion_maxsize_1(self):
        self.lru = lru.AsyncLRUCache(self.short_miss_fn, 1)
        d = defer.succeed(None)

        d.addCallback(lambda _ :
            self.lru.get('a'))
        d.addCallback(self.check_result, short('a'), 0, 1)
        d.addCallback(lambda _ :
            self.lru.get('a'))
        d.addCallback(self.check_result, short('a'), 1, 1)
        d.addCallback(lambda _ :
            self.lru.get('b'))
        d.addCallback(self.check_result, short('b'), 1, 2)

        # now try 'a' again - it should be a miss
        self.lru.miss_fn = self.long_miss_fn
        d.addCallback(lambda _ :
            self.lru.get('a'))
        d.addCallback(self.check_result, long('a'), 1, 3)

        # ..and that expelled B
        d.addCallback(lambda _ :
            self.lru.get('b'))
        d.addCallback(self.check_result, long('b'), 1, 4)
        return d
Exemple #2
0
    def do_fuzz(self, endTime):
        lru.inv_failed = False

        def delayed_miss_fn(key):
            return deferUntilLater(random.uniform(0.001, 0.002),
                                   set([key + 1000]))

        self.lru = lru.AsyncLRUCache(delayed_miss_fn, 50)

        keys = range(250)
        errors = []  # bail out early in the event of an error
        results = []  # keep references to (most) results

        # fire off as many requests as we can in one second, with lots of
        # overlap.
        while not errors and reactor.seconds() < endTime:
            key = random.choice(keys)

            d = self.lru.get(key)

            def check(result, key):
                self.assertEqual(result, set([key + 1000]))
                if random.uniform(0, 1.0) < 0.9:
                    results.append(result)
                    results[:-100] = []

            d.addCallback(check, key)

            def eb(f):
                errors.append(f)
                return f  # unhandled error -> in the logs

            d.addErrback(eb)

            # give the reactor some time to process pending events
            if random.uniform(0, 1.0) < 0.5:
                yield deferUntilLater(0)

        # now wait until all of the pending calls have cleared, noting that
        # this method will be counted as one delayed call, in the current
        # implementation
        while len(reactor.getDelayedCalls()) > 1:
            # give the reactor some time to process pending events
            yield deferUntilLater(0.001)

        self.assertFalse(lru.inv_failed, "invariant failed; see logs")
        log.msg("hits: %d; misses: %d; refhits: %d" %
                (self.lru.hits, self.lru.misses, self.lru.refhits))
Exemple #3
0
    def test_simple_lru_expulsion_maxsize_1_null_result(self):
        # a regression test for #2011
        def miss_fn(k):
            if k == 'b':
                return defer.succeed(None)
            return defer.succeed(short(k))

        self.lru = lru.AsyncLRUCache(miss_fn, 1)

        res = yield self.lru.get('a')
        self.check_result(res, short('a'), 0, 1)
        res = yield self.lru.get('b')
        self.check_result(res, None, 0, 2)

        # 'a' was not expelled since 'b' was None
        self.lru.miss_fn = self.long_miss_fn
        res = yield self.lru.get('a')
        self.check_result(res, short('a'), 1, 2)
Exemple #4
0
    def test_fuzz(self):
        started = reactor.seconds()

        def delayed_miss_fn(key):
            return deferUntilLater(random.uniform(0.001, 0.002),
                                   set([key + 1000]))

        self.lru = lru.AsyncLRUCache(delayed_miss_fn, 50)

        keys = range(250)
        errors = []  # bail out early in the event of an error
        results = []  # keep references to (most) results

        # fire off as many requests as we can in the time alotted
        while not errors and reactor.seconds() - started < self.FUZZ_TIME:
            key = random.choice(keys)

            d = self.lru.get(key)

            def check(result, key):
                self.assertEqual(result, set([key + 1000]))
                if random.uniform(0, 1.0) < 0.9:
                    results.append(result)
                    results[:-100] = []

            d.addCallback(check, key)

            def eb(f):
                errors.append(f)
                return f  # unhandled error -> in the logs

            d.addErrback(eb)

            # give the reactor some time to process pending events
            if random.uniform(0, 1.0) < 0.5:
                yield deferUntilLater(0)

        # now wait until all of the pending calls have cleared, noting that
        # this method will be counted as one delayed call, in the current
        # implementation
        while len(reactor.getDelayedCalls()) > 1:
            # give the reactor some time to process pending events
            yield deferUntilLater(0.001)
Exemple #5
0
    def get_cache(self, cache_name, miss_fn):
        """
        Get an L{AsyncLRUCache} object with the given name.  If such an object
        does not exist, it will be created.  Since the cache is permanent, this
        method can be called only once, e.g., in C{startService}, and it value
        stored indefinitely.

        @param cache_name: name of the cache (usually the name of the type of
        object it stores)
        @param miss_fn: miss function for the cache; see L{AsyncLRUCache}
        constructor.
        @returns: L{AsyncLRUCache} instance
        """
        try:
            return self._caches[cache_name]
        except KeyError:
            max_size = self.config.get(cache_name, self.DEFAULT_CACHE_SIZE)
            assert max_size >= 1
            c = self._caches[cache_name] = lru.AsyncLRUCache(miss_fn, max_size)
            return c
Exemple #6
0
    def test_simple_lru_expulsion_maxsize_1_null_result(self):
        # a regression test for #2011
        def miss_fn(k):
            if k == 'b':
                return defer.succeed(None)
            else:
                return defer.succeed(short(k))

        self.lru = lru.AsyncLRUCache(miss_fn, 1)
        d = defer.succeed(None)

        d.addCallback(lambda _: self.lru.get('a'))
        d.addCallback(self.check_result, short('a'), 0, 1)
        d.addCallback(lambda _: self.lru.get('b'))
        d.addCallback(self.check_result, None, 0, 2)

        # 'a' was not expelled since 'b' was None
        self.lru.miss_fn = self.long_miss_fn
        d.addCallback(lambda _: self.lru.get('a'))
        d.addCallback(self.check_result, short('a'), 1, 2)

        return d
Exemple #7
0
    def test_simple_lru_expulsion_maxsize_1(self):
        self.lru = lru.AsyncLRUCache(self.short_miss_fn, 1)

        res = yield self.lru.get('a')
        self.check_result(res, short('a'), 0, 1)
        res = yield self.lru.get('a')
        self.check_result(res, short('a'), 1, 1)
        res = yield self.lru.get('b')
        self.check_result(res, short('b'), 1, 2)

        gc.collect()

        # now try 'a' again - it should be a miss
        self.lru.miss_fn = self.long_miss_fn
        res = yield self.lru.get('a')
        self.check_result(res, long('a'), 1, 3)

        gc.collect()

        # ..and that expelled B
        res = yield self.lru.get('b')
        self.check_result(res, long('b'), 1, 4)
Exemple #8
0
 def setUp(self):
     lru.inv_failed = False
     self.lru = lru.AsyncLRUCache(self.short_miss_fn, 3)
Exemple #9
0
 def setUp(self):
     self.lru = lru.AsyncLRUCache(3)
Exemple #10
0
 def setUp(self):
     self.lru = lru.AsyncLRUCache(self.short_miss_fn, 3)