def testNoFullBlocks(self):
        op = OpSplitRequestsBlockwise(False, graph=self.g)
        op.Input.connect(self.piper.Output)
        op.BlockShape.setValue((10, 15, 20))

        op.Output[0:20, 30:45, 10:30].wait()
        slot = self.piper.Output
        expected = [SubRegion(slot, (0, 30, 10), (10, 45, 20)),
                    SubRegion(slot, (0, 30, 20), (10, 45, 30)),
                    SubRegion(slot, (10, 30, 10), (20, 45, 20)),
                    SubRegion(slot, (10, 30, 20), (20, 45, 30))]

        for req in self.piper.requests:
            print(req)

        for roi in expected:
            filtered = [x for x in self.piper.requests if x == roi]
            assert len(filtered) == 1, "missing roi {}".format(roi)

        self.piper.requests = []

        op.Output[5:14, 32:44, 17:21].wait()
        expected = [SubRegion(slot, (5, 32, 17), (10, 44, 20)),
                    SubRegion(slot, (5, 32, 20), (10, 44, 21)),
                    SubRegion(slot, (10, 32, 17), (14, 44, 20)),
                    SubRegion(slot, (10, 32, 20), (14, 44, 21))]
        for roi in expected:
            filtered = [x for x in self.piper.requests if x == roi]
            assert len(filtered) == 1, "missing roi {}".format(roi)
    def testCorrectData(self):
        op = OpSplitRequestsBlockwise(False, graph=self.g)
        op.Input.connect(self.piper.Output)
        op.BlockShape.setValue((10, 15, 20))

        data = op.Output[0:20, 30:45, 10:30].wait()
        assert_array_equal(data, self.vol[:20, 30:45, 10:30].view(np.ndarray))

        data = op.Output[5:14, 32:44, 17:21].wait()
        assert_array_equal(data, self.vol[5:14, 32:44, 17:21].view(np.ndarray))

        op = OpSplitRequestsBlockwise(True, graph=self.g)
        op.Input.connect(self.piper.Output)
        op.BlockShape.setValue((10, 15, 20))

        data = op.Output[0:20, 30:45, 10:30].wait()
        assert_array_equal(data, self.vol[:20, 30:45, 10:30].view(np.ndarray))

        data = op.Output[5:14, 32:44, 17:21].wait()
        assert_array_equal(data, self.vol[5:14, 32:44, 17:21].view(np.ndarray))
Esempio n. 3
0
    def testFullBlocks(self):
        op = OpSplitRequestsBlockwise(True, graph=self.g)
        op.Input.connect(self.piper.Output)
        op.BlockShape.setValue((10, 15, 20))

        op.Output[0:20, 30:45, 10:30].wait()
        slot = self.piper.Output
        expected = [SubRegion(slot, (0, 30, 0), (10, 45, 20)),
                    SubRegion(slot, (0, 30, 20), (10, 45, 40)),
                    SubRegion(slot, (10, 30, 0), (20, 45, 20)),
                    SubRegion(slot, (10, 30, 20), (20, 45, 40))]

        for roi in expected:
            filtered = filter(lambda x: x == roi, self.piper.requests)
            assert len(filtered) == 1, "missing roi {}".format(roi)

        self.piper.requests = []

        op.Output[5:14, 32:44, 17:21].wait()
        for roi in expected:
            filtered = filter(lambda x: x == roi, self.piper.requests)
            assert len(filtered) == 1, "missing roi {}".format(roi)
    def testBadMemoryConditions(self):
        """
        TestCacheMemoryManager.testBadMemoryConditions

        This test is a proof of the proposition in
            https://github.com/ilastik/lazyflow/issue/185
        which states that, given certain memory constraints, the cache
        cleanup strategy in use is inefficient. An advanced strategy
        should pass the test.
        """

        mgr = _CacheMemoryManager()
        mgr.setRefreshInterval(0.01)
        mgr.enable()

        d = 2
        tags = "xy"

        shape = (999,) * d
        blockshape = (333,) * d

        # restrict memory for computation to one block (including fudge
        # factor 2 of bigRequestStreamer)
        cacheMem = np.prod(shape)
        Memory.setAvailableRam(np.prod(blockshape) * 2 + cacheMem)

        # restrict cache memory to the whole volume
        Memory.setAvailableRamCaches(cacheMem)

        # to ease observation, do everything single threaded
        Request.reset_thread_pool(num_workers=1)

        x = np.zeros(shape, dtype=np.uint8)
        x = vigra.taggedView(x, axistags=tags)

        g = Graph()
        pipe = OpArrayPiperWithAccessCount(graph=g)
        pipe.Input.setValue(x)
        pipe.Output.meta.ideal_blockshape = blockshape

        # simulate BlockedArrayCache behaviour without caching
        # cache = OpSplitRequestsBlockwise(True, graph=g)
        # cache.BlockShape.setValue(blockshape)
        # cache.Input.connect(pipe.Output)

        cache = OpBlockedArrayCache(graph=g)
        cache.Input.connect(pipe.Output)
        cache.BlockShape.setValue(blockshape)

        op = OpEnlarge(graph=g)
        op.Input.connect(cache.Output)

        split = OpSplitRequestsBlockwise(True, graph=g)
        split.BlockShape.setValue(blockshape)
        split.Input.connect(op.Output)
        streamer = BigRequestStreamer(split.Output, [(0,) * len(shape), shape])
        streamer.execute()

        # in the worst case, we have 4*4 + 4*6 + 9 = 49 requests to pipe
        # in the best case, we have 9
        np.testing.assert_equal(pipe.accessCount, 9)