Exemplo n.º 1
0
class BigRequestStreamer(object):
    """
    Execute a big request by breaking it up into smaller requests.
    
    This class encapsulates the logic for dividing big rois into smaller ones to be executed separately.
    It relies on a RoiRequestBatch object, which is responsible for creating and scheduling the request for each roi.
    """
    def __init__(self, outputSlot, roi, minBlockShape):
        self._outputSlot = outputSlot
        self._bigRoi = roi
        self._minBlockShape = minBlockShape

        # Align the blocking with the start of the roi
        offsetRoi = ([0] * len(roi[0]), numpy.subtract(roi[1], roi[0]))
        self._minBlockStarts = getIntersectingBlocks(minBlockShape, offsetRoi)
        self._minBlockStarts += roi[0] # Un-offset

        self._requestedBlocks = numpy.zeros( self._minBlockStarts.shape[:-1], dtype=bool )
        
        totalVolume = numpy.prod( numpy.subtract(roi[1], roi[0]) )
        # For now, simply iterate over the min blocks
        # TODO: Auto-dialate block sizes based on CPU/RAM usage.
        def roiGen():
            block_iter = self._minBlockStarts.__iter__()
            while True:
                block_start = block_iter.next()

                # Use offset blocking
                offset_block_start = block_start - self._bigRoi[0]
                offset_data_shape = numpy.subtract(self._bigRoi[1], self._bigRoi[0])
                offset_block_bounds = getBlockBounds( offset_data_shape, minBlockShape, offset_block_start )
                
                # Un-offset
                block_bounds = ( offset_block_bounds[0] + self._bigRoi[0],
                                 offset_block_bounds[1] + self._bigRoi[0] )
                logger.debug( "Requesting Roi: {}".format( block_bounds ) )
                yield block_bounds
        
        self._requestBatch = RoiRequestBatch( self._outputSlot, roiGen(), totalVolume, 2 )

    @property
    def progressSignal(self):
        return self._requestBatch.progressSignal

    @property
    def resultSignal(self):
        return self._requestBatch.resultSignal

    def execute(self):
        self._requestBatch.execute()
Exemplo n.º 2
0
    def __init__(self, outputSlot, roi, minBlockShape):
        self._outputSlot = outputSlot
        self._bigRoi = roi
        self._minBlockShape = minBlockShape

        # Align the blocking with the start of the roi
        offsetRoi = ([0] * len(roi[0]), numpy.subtract(roi[1], roi[0]))
        self._minBlockStarts = getIntersectingBlocks(minBlockShape, offsetRoi)
        self._minBlockStarts += roi[0] # Un-offset

        self._requestedBlocks = numpy.zeros( self._minBlockStarts.shape[:-1], dtype=bool )
        
        totalVolume = numpy.prod( numpy.subtract(roi[1], roi[0]) )
        # For now, simply iterate over the min blocks
        # TODO: Auto-dialate block sizes based on CPU/RAM usage.
        def roiGen():
            block_iter = self._minBlockStarts.__iter__()
            while True:
                block_start = block_iter.next()

                # Use offset blocking
                offset_block_start = block_start - self._bigRoi[0]
                offset_data_shape = numpy.subtract(self._bigRoi[1], self._bigRoi[0])
                offset_block_bounds = getBlockBounds( offset_data_shape, minBlockShape, offset_block_start )
                
                # Un-offset
                block_bounds = ( offset_block_bounds[0] + self._bigRoi[0],
                                 offset_block_bounds[1] + self._bigRoi[0] )
                logger.debug( "Requesting Roi: {}".format( block_bounds ) )
                yield block_bounds
        
        self._requestBatch = RoiRequestBatch( self._outputSlot, roiGen(), totalVolume, 2 )
Exemplo n.º 3
0
    def testBasic(self):
        op = OpArrayPiper( graph=Graph() )
        inputData = numpy.indices( (100,100) ).sum(0)
        op.Input.setValue( inputData )
        roiList = []
        block_starts = getIntersectingBlocks( [10,10], ([0,0], [100, 100]) )
        for block_start in block_starts:
            roiList.append( getBlockBounds( [100,100], [10,10], block_start ) )    
    
        results = numpy.zeros( (100,100), dtype=numpy.int32 )
        resultslock = threading.Lock()

        resultsCount = [0]
        
        def handleResult(roi, result):
            assert resultslock.acquire(False), "resultslock is contested! Access to callback is supposed to be automatically serialized."
            results[ roiToSlice( *roi ) ] = result
            logger.debug( "Got result for {}".format(roi) )
            resultslock.release()
            resultsCount[0] += 1

        progressList = []
        def handleProgress( progress ):
            progressList.append( progress )
            logger.debug( "Progress update: {}".format(progress) )
        
        totalVolume = numpy.prod( inputData.shape )
        batch = RoiRequestBatch(op.Output, roiList.__iter__(), totalVolume, batchSize=10)
        batch.resultSignal.subscribe( handleResult )
        batch.progressSignal.subscribe( handleProgress )
        
        batch.execute()
        logger.debug( "Got {} results".format( resultsCount[0] ) )
        assert (results == inputData).all()

        # Progress reporting MUST start with 0 and end with 100        
        assert progressList[0] == 0, "Invalid progress reporting."
        assert progressList[-1] == 100, "Invalid progress reporting."
        
        # There should be some intermediate progress reporting, but exactly how much is unspecified.
        assert len(progressList) >= 10
        
        logger.debug( "FINISHED" )