Esempio n. 1
0
    def testFailedProcessing(self):
        op = OpArrayPiper(graph=Graph())
        inputData = numpy.indices((100, 100)).sum(0)
        op.Input.setValue(inputData)
        roiList = []
        block_starts = getIntersectingBlocks([10, 10], ([0, 0], [100, 100]))
        for block_start in block_starts:
            roiList.append(getBlockBounds([100, 100], [10, 10], block_start))

        class SpecialException(Exception):
            pass

        def handleResult(roi, result):
            raise SpecialException(
                "Intentional Exception: raised while handling the result")

        totalVolume = numpy.prod(inputData.shape)
        batch = RoiRequestBatch(op.Output,
                                roiList.__iter__(),
                                totalVolume,
                                batchSize=10,
                                allowParallelResults=False)
        batch.resultSignal.subscribe(handleResult)

        # FIXME: There are multiple places where the RoiRequestBatch tool should be prepared to handle exceptions.
        #        This only tests one of them (in the notify_finished() handler)
        try:
            batch.execute()
        except SpecialException:
            pass
        else:
            assert False, "Expected exception to be propagated out of the RoiRequestBatch."
Esempio n. 2
0
    def testBasic(self):
        op = OpArrayPiper(graph=Graph())
        inputData = numpy.indices((100, 100)).sum(0)
        op.Input.setValue(inputData)
        roiList = []
        block_starts = getIntersectingBlocks([10, 10], ([0, 0], [100, 100]))
        for block_start in block_starts:
            roiList.append(getBlockBounds([100, 100], [10, 10], block_start))

        results = numpy.zeros((100, 100), dtype=numpy.int32)
        resultslock = threading.Lock()

        resultsCount = [0]

        def handleResult(roi, result):
            acquired = resultslock.acquire(False)
            assert acquired, "resultslock is contested! Access to callback is supposed to be automatically serialized."
            results[roiToSlice(*roi)] = result
            logger.debug("Got result for {}".format(roi))
            resultslock.release()
            resultsCount[0] += 1

        progressList = []

        def handleProgress(progress):
            progressList.append(progress)
            logger.debug("Progress update: {}".format(progress))

        totalVolume = numpy.prod(inputData.shape)
        batch = RoiRequestBatch(op.Output,
                                roiList.__iter__(),
                                totalVolume,
                                batchSize=10,
                                allowParallelResults=False)
        batch.resultSignal.subscribe(handleResult)
        batch.progressSignal.subscribe(handleProgress)

        batch.execute()
        logger.debug("Got {} results".format(resultsCount[0]))
        assert (results == inputData).all()

        # Progress reporting MUST start with 0 and end with 100
        assert progressList[0] == 0, "Invalid progress reporting."
        assert progressList[-1] == 100, "Invalid progress reporting."

        # There should be some intermediate progress reporting, but exactly how much is unspecified.
        assert len(progressList) >= 10

        logger.debug("FINISHED")
    def __init__(self, outputSlot, roi, minBlockShape, batchSize=None):
        """
        Constructor.
        
        :param outputSlot: The slot to request data from.
        :param roi: The roi `(start, stop)` of interest.  Will be broken up and requested via smaller requests.
        :param minBlockShape: The minimum amount of data to request in each request.
                              Note: The current implementation breaks the big request into smaller 
                              requests of exactly ``minBlockShape`` size. Future implementations could 
                              concatenate smaller requests if it appears the system is not being overloaded by the smaller requests.
        :param batchSize: The maximum number of requests to launch in parallel.
        """
        self._outputSlot = outputSlot
        self._bigRoi = roi
        self._minBlockShape = minBlockShape

        if batchSize is None:
            batchSize = 2

        # Align the blocking with the start of the roi
        offsetRoi = ([0] * len(roi[0]), numpy.subtract(roi[1], roi[0]))
        self._minBlockStarts = getIntersectingBlocks(minBlockShape, offsetRoi)
        self._minBlockStarts += roi[0]  # Un-offset

        totalVolume = numpy.prod(numpy.subtract(roi[1], roi[0]))

        # For now, simply iterate over the min blocks
        # TODO: Auto-dialate block sizes based on CPU/RAM usage.
        def roiGen():
            block_iter = self._minBlockStarts.__iter__()
            while True:
                block_start = block_iter.next()

                # Use offset blocking
                offset_block_start = block_start - self._bigRoi[0]
                offset_data_shape = numpy.subtract(self._bigRoi[1],
                                                   self._bigRoi[0])
                offset_block_bounds = getBlockBounds(offset_data_shape,
                                                     minBlockShape,
                                                     offset_block_start)

                # Un-offset
                block_bounds = (offset_block_bounds[0] + self._bigRoi[0],
                                offset_block_bounds[1] + self._bigRoi[0])
                logger.debug("Requesting Roi: {}".format(block_bounds))
                yield block_bounds

        self._requestBatch = RoiRequestBatch(self._outputSlot, roiGen(),
                                             totalVolume, batchSize)
Esempio n. 4
0
    def run_export(self) -> None:
        """Export an image from Input to Filepath."""
        path = pathlib.Path(self.Filepath.value)
        if path.exists():
            path.unlink()

        self._page_buf = _NdBuf(self._opReorderAxes.Output.meta.shape[:-2])

        batch = RoiRequestBatch(
            outputSlot=self._opReorderAxes.Output,
            roiIterator=_page_rois(*self._opReorderAxes.Output.meta.shape),
            totalVolume=np.prod(self._opReorderAxes.Output.meta.shape),
            batchSize=self._batch_size,
        )
        batch.progressSignal.subscribe(self.progressSignal)
        batch.resultSignal.subscribe(self._write_buffered_pages)
        batch.execute()
Esempio n. 5
0
    def testPropagatesProcessingException(self, op_raising_at_3):
        roiList = [
            ((0, 0, 0), (4, 4, 4)),
            ((1, 1, 1), (4, 4, 4)),
            ((2, 2, 2), (4, 4, 4)),
            ((3, 3, 3), (4, 4, 4)),
        ]

        totalVolume = numpy.prod(op_raising_at_3.Output.meta.shape)
        batch = RoiRequestBatch(op_raising_at_3.Output,
                                roiList.__iter__(),
                                totalVolume,
                                batchSize=1,
                                allowParallelResults=False)

        with pytest.raises(ProcessingException):
            batch.execute()
Esempio n. 6
0
    def __init__(self,
                 outputSlot,
                 roi,
                 blockshape=None,
                 batchSize=None,
                 blockAlignment='absolute',
                 allowParallelResults=False):
        """
        Constructor.
        
        :param outputSlot: The slot to request data from.
        :param roi: The roi `(start, stop)` of interest.  Will be broken up and requested via smaller requests.
        :param blockshape: The amount of data to request in each request. If omitted, a default blockshape is chosen by inspecting the metadata of the given slot.
        :param batchSize: The maximum number of requests to launch in parallel.  This should not be necessary if the blockshape is small enough that you won't run out of RAM.
        :param blockAlignment: Determines how block the requests. Choices are 'absolute' or 'relative'.
        :param allowParallelResults: If False, The resultSignal will not be called in parallel.
                                     In that case, your handler function has no need for locks.
        """
        self._outputSlot = outputSlot
        self._bigRoi = roi

        totalVolume = numpy.prod(numpy.subtract(roi[1], roi[0]))

        if batchSize is None:
            batchSize = 1000

        if blockshape is None:
            blockshape = self._determine_blockshape(outputSlot)

        assert blockAlignment in ['relative', 'absolute']
        if blockAlignment == 'relative':
            # Align the blocking with the start of the roi
            offsetRoi = ([0] * len(roi[0]), numpy.subtract(roi[1], roi[0]))
            block_starts = getIntersectingBlocks(blockshape, offsetRoi)
            block_starts += roi[0]  # Un-offset

            # For now, simply iterate over the min blocks
            # TODO: Auto-dialate block sizes based on CPU/RAM usage.
            def roiGen():
                block_iter = block_starts.__iter__()
                while True:
                    block_start = block_iter.next()

                    # Use offset blocking
                    offset_block_start = block_start - self._bigRoi[0]
                    offset_data_shape = numpy.subtract(self._bigRoi[1],
                                                       self._bigRoi[0])
                    offset_block_bounds = getBlockBounds(
                        offset_data_shape, blockshape, offset_block_start)

                    # Un-offset
                    block_bounds = (offset_block_bounds[0] + self._bigRoi[0],
                                    offset_block_bounds[1] + self._bigRoi[0])
                    logger.debug("Requesting Roi: {}".format(block_bounds))
                    yield block_bounds

        else:
            # Absolute blocking.
            # Blocks are simply relative to (0,0,0,...)
            # But we still clip the requests to the overall roi bounds.
            block_starts = getIntersectingBlocks(blockshape, roi)

            def roiGen():
                block_iter = block_starts.__iter__()
                while True:
                    block_start = block_iter.next()
                    block_bounds = getBlockBounds(outputSlot.meta.shape,
                                                  blockshape, block_start)
                    block_intersecting_portion = getIntersection(
                        block_bounds, roi)

                    logger.debug("Requesting Roi: {}".format(block_bounds))
                    yield block_intersecting_portion

        self._requestBatch = RoiRequestBatch(self._outputSlot, roiGen(),
                                             totalVolume, batchSize,
                                             allowParallelResults)