def teardown_method(self, method):
     # reset cleanup frequency to sane value
     # reset max memory
     Memory.setAvailableRamCaches(-1)
     mgr = _CacheMemoryManager()
     mgr.setRefreshInterval(default_refresh_interval)
     mgr.enable()
     Request.reset_thread_pool()
    def testAPIConformity(self):
        c = NonRegisteredCache("c")
        mgr = _CacheMemoryManager()

        # dont clean up while we are testing
        mgr.disable()

        import weakref

        d = NonRegisteredCache("testwr")
        s = weakref.WeakSet()
        s.add(d)
        del d
        gc.collect()
        l = list(s)
        assert len(l) == 0, l[0].name

        c1 = NonRegisteredCache("c1")
        c1a = c1
        c2 = NonRegisteredCache("c2")

        mgr.addFirstClassCache(c)
        mgr.addCache(c1)
        mgr.addCache(c1a)
        mgr.addCache(c2)

        fcc = mgr.getFirstClassCaches()
        assert len(fcc) == 1, "too many first class caches"
        assert c in fcc, "did not register fcc correctly"
        del fcc

        cs = mgr.getCaches()
        assert len(cs) == 3, "wrong number of caches"
        refcs = [c, c1, c2]
        for cache in refcs:
            assert cache in cs, "{} not stored".format(cache.name)
        del cs
        del refcs
        del cache

        del c1a
        gc.collect()
        cs = mgr.getCaches()
        assert c1 in cs
        assert len(cs) == 3, str([x.name for x in cs])
        del cs

        del c2
        gc.collect()
        cs = mgr.getCaches()
        assert len(cs) == 2, str([x.name for x in cs])
    def testBadMemoryConditions(self):
        """
        TestCacheMemoryManager.testBadMemoryConditions

        This test is a proof of the proposition in
            https://github.com/ilastik/lazyflow/issue/185
        which states that, given certain memory constraints, the cache
        cleanup strategy in use is inefficient. An advanced strategy
        should pass the test.
        """

        mgr = _CacheMemoryManager()
        mgr.setRefreshInterval(0.01)
        mgr.enable()

        d = 2
        tags = "xy"

        shape = (999,) * d
        blockshape = (333,) * d

        # restrict memory for computation to one block (including fudge
        # factor 2 of bigRequestStreamer)
        cacheMem = np.prod(shape)
        Memory.setAvailableRam(np.prod(blockshape) * 2 + cacheMem)

        # restrict cache memory to the whole volume
        Memory.setAvailableRamCaches(cacheMem)

        # to ease observation, do everything single threaded
        Request.reset_thread_pool(num_workers=1)

        x = np.zeros(shape, dtype=np.uint8)
        x = vigra.taggedView(x, axistags=tags)

        g = Graph()
        pipe = OpArrayPiperWithAccessCount(graph=g)
        pipe.Input.setValue(x)
        pipe.Output.meta.ideal_blockshape = blockshape

        # simulate BlockedArrayCache behaviour without caching
        # cache = OpSplitRequestsBlockwise(True, graph=g)
        # cache.BlockShape.setValue(blockshape)
        # cache.Input.connect(pipe.Output)

        cache = OpBlockedArrayCache(graph=g)
        cache.Input.connect(pipe.Output)
        cache.BlockShape.setValue(blockshape)

        op = OpEnlarge(graph=g)
        op.Input.connect(cache.Output)

        split = OpSplitRequestsBlockwise(True, graph=g)
        split.BlockShape.setValue(blockshape)
        split.Input.connect(op.Output)
        streamer = BigRequestStreamer(split.Output, [(0,) * len(shape), shape])
        streamer.execute()

        # in the worst case, we have 4*4 + 4*6 + 9 = 49 requests to pipe
        # in the best case, we have 9
        np.testing.assert_equal(pipe.accessCount, 9)
Exemple #4
0
def cacheMemoryManager(monkeypatch):
    mem_manager = _CacheMemoryManager()
    monkeypatch.setattr(lazyflow.operators.cacheMemoryManager,
                        "_cache_memory_manager", mem_manager)
    return mem_manager