예제 #1
0
    def testBlockedCacheHandling(self):
        n, k = 10, 5
        vol = np.zeros((n, ) * 5, dtype=np.uint8)
        vol = vigra.taggedView(vol, axistags='txyzc')

        g = Graph()
        pipe = OpArrayPiperWithAccessCount(graph=g)
        cache = OpBlockedArrayCache(graph=g)

        mgr = CacheMemoryManager()

        # restrict cache memory to 0 Byte
        Memory.setAvailableRamCaches(0)

        # set to frequent cleanup
        mgr.setRefreshInterval(.01)
        mgr.enable()

        cache.outerBlockShape.setValue((k, ) * 5)
        cache.Input.connect(pipe.Output)
        pipe.Input.setValue(vol)

        a = pipe.accessCount
        cache.Output[...].wait()
        b = pipe.accessCount
        assert b > a, "did not cache"

        # let the manager clean up
        mgr.enable()
        time.sleep(.5)
        gc.collect()

        cache.Output[...].wait()
        c = pipe.accessCount
        assert c > b, "did not clean up"
예제 #2
0
    def testBlockedCacheHandling(self):
        n, k = 10, 5
        vol = np.zeros((n,) * 5, dtype=np.uint8)
        vol = vigra.taggedView(vol, axistags="txyzc")

        g = Graph()
        pipe = OpArrayPiperWithAccessCount(graph=g)
        cache = OpBlockedArrayCache(graph=g)

        mgr = CacheMemoryManager()

        # restrict cache memory to 0 Byte
        Memory.setAvailableRamCaches(0)

        # set to frequent cleanup
        mgr.setRefreshInterval(0.01)
        mgr.enable()

        cache.BlockShape.setValue((k,) * 5)
        cache.Input.connect(pipe.Output)
        pipe.Input.setValue(vol)

        a = pipe.accessCount
        cache.Output[...].wait()
        b = pipe.accessCount
        assert b > a, "did not cache"

        # let the manager clean up
        mgr.enable()
        time.sleep(0.5)
        gc.collect()

        cache.Output[...].wait()
        c = pipe.accessCount
        assert c > b, "did not clean up"
예제 #3
0
 def tearDown(self):
     # reset cleanup frequency to sane value
     # reset max memory
     Memory.setAvailableRamCaches(-1)
     mgr = CacheMemoryManager()
     mgr.setRefreshInterval(default_refresh_interval)
     mgr.enable()
     Request.reset_thread_pool()
예제 #4
0
 def teardown_method(self, method):
     # reset cleanup frequency to sane value
     # reset max memory
     Memory.setAvailableRamCaches(-1)
     mgr = CacheMemoryManager()
     mgr.setRefreshInterval(default_refresh_interval)
     mgr.enable()
     Request.reset_thread_pool()
예제 #5
0
    def testBadMemoryConditions(self):
        """
        TestCacheMemoryManager.testBadMemoryConditions

        This test is a proof of the proposition in 
            https://github.com/ilastik/lazyflow/issue/185
        which states that, given certain memory constraints, the cache
        cleanup strategy in use is inefficient. An advanced strategy
        should pass the test.
        """

        mgr = CacheMemoryManager()
        mgr.setRefreshInterval(.01)
        mgr.enable()

        d = 2
        tags = 'xy'

        shape = (999, ) * d
        blockshape = (333, ) * d

        # restrict memory for computation to one block (including fudge
        # factor 2 of bigRequestStreamer)
        cacheMem = np.prod(shape)
        Memory.setAvailableRam(np.prod(blockshape) * 2 + cacheMem)

        # restrict cache memory to the whole volume
        Memory.setAvailableRamCaches(cacheMem)

        # to ease observation, do everything single threaded
        Request.reset_thread_pool(num_workers=1)

        x = np.zeros(shape, dtype=np.uint8)
        x = vigra.taggedView(x, axistags=tags)

        g = Graph()
        pipe = OpArrayPiperWithAccessCount(graph=g)
        pipe.Input.setValue(x)
        pipe.Output.meta.ideal_blockshape = blockshape

        # simulate BlockedArrayCache behaviour without caching
        # cache = OpSplitRequestsBlockwise(True, graph=g)
        # cache.BlockShape.setValue(blockshape)
        # cache.Input.connect(pipe.Output)

        cache = OpBlockedArrayCache(graph=g)
        cache.Input.connect(pipe.Output)
        cache.outerBlockShape.setValue(blockshape)

        op = OpEnlarge(graph=g)
        op.Input.connect(cache.Output)

        split = OpSplitRequestsBlockwise(True, graph=g)
        split.BlockShape.setValue(blockshape)
        split.Input.connect(op.Output)

        streamer = BigRequestStreamer(split.Output,
                                      [(0, ) * len(shape), shape])
        streamer.execute()

        # in the worst case, we have 4*4 + 4*6 + 9 = 49 requests to pipe
        # in the best case, we have 9
        np.testing.assert_equal(pipe.accessCount, 9)
예제 #6
0
    def testBadMemoryConditions(self):
        """
        TestCacheMemoryManager.testBadMemoryConditions

        This test is a proof of the proposition in
            https://github.com/ilastik/lazyflow/issue/185
        which states that, given certain memory constraints, the cache
        cleanup strategy in use is inefficient. An advanced strategy
        should pass the test.
        """

        mgr = CacheMemoryManager()
        mgr.setRefreshInterval(0.01)
        mgr.enable()

        d = 2
        tags = "xy"

        shape = (999,) * d
        blockshape = (333,) * d

        # restrict memory for computation to one block (including fudge
        # factor 2 of bigRequestStreamer)
        cacheMem = np.prod(shape)
        Memory.setAvailableRam(np.prod(blockshape) * 2 + cacheMem)

        # restrict cache memory to the whole volume
        Memory.setAvailableRamCaches(cacheMem)

        # to ease observation, do everything single threaded
        Request.reset_thread_pool(num_workers=1)

        x = np.zeros(shape, dtype=np.uint8)
        x = vigra.taggedView(x, axistags=tags)

        g = Graph()
        pipe = OpArrayPiperWithAccessCount(graph=g)
        pipe.Input.setValue(x)
        pipe.Output.meta.ideal_blockshape = blockshape

        # simulate BlockedArrayCache behaviour without caching
        # cache = OpSplitRequestsBlockwise(True, graph=g)
        # cache.BlockShape.setValue(blockshape)
        # cache.Input.connect(pipe.Output)

        cache = OpBlockedArrayCache(graph=g)
        cache.Input.connect(pipe.Output)
        cache.BlockShape.setValue(blockshape)

        op = OpEnlarge(graph=g)
        op.Input.connect(cache.Output)

        split = OpSplitRequestsBlockwise(True, graph=g)
        split.BlockShape.setValue(blockshape)
        split.Input.connect(op.Output)

        streamer = BigRequestStreamer(split.Output, [(0,) * len(shape), shape])
        streamer.execute()

        # in the worst case, we have 4*4 + 4*6 + 9 = 49 requests to pipe
        # in the best case, we have 9
        np.testing.assert_equal(pipe.accessCount, 9)