예제 #1
0
    def _transferData(self, roi, array_data, read):
        """
        Read or write data from/to the fileset.

        :param roi: The region of interest.
        :param array_data: If ``read`` is True, ``array_data`` is the destination array for the read data.  If ``read`` is False, array_data contains the data to write to disk.
        :param read: If True, read data from the fileset into ``array_data``.  Otherwise, write data from ``array_data`` into the fileset on disk.
        :type read: bool
        """
        entire_dataset_roi = ([0] * len(self._description.view_shape), self._description.view_shape)
        clipped_roi = getIntersection(roi, entire_dataset_roi)
        assert (
            numpy.array(clipped_roi) == numpy.array(roi)
        ).all(), "Roi {} does not fit within dataset bounds: {}".format(roi, self._description.view_shape)

        block_starts = getIntersectingBlocks(self._description.block_shape, roi)

        # TODO: Parallelize this loop?
        for block_start in block_starts:
            entire_block_roi = self.getEntireBlockRoi(block_start)  # Roi of this whole block within the whole dataset
            transfer_block_roi = getIntersection(
                entire_block_roi, roi
            )  # Roi of data needed from this block within the whole dataset
            block_relative_roi = (
                transfer_block_roi[0] - block_start,
                transfer_block_roi[1] - block_start,
            )  # Roi of needed data from this block, relative to the block itself
            array_data_roi = (
                transfer_block_roi[0] - roi[0],
                transfer_block_roi[1] - roi[0],
            )  # Roi of data needed from this block within array_data

            array_slicing = roiToSlice(*array_data_roi)
            self._transferBlockData(entire_block_roi, block_relative_roi, array_data, array_slicing, read)
    def _copyData(self, roi, destination, block_starts):
        """
        Copy data from each block into the destination array.
        For blocks that aren't currently stored, just write zeros.
        """
        # (Parallelism not needed here: h5py will serialize these requests anyway)
        block_starts = map( tuple, block_starts )
        for block_start in block_starts:
            entire_block_roi = getBlockBounds( self.Output.meta.shape, self._blockshape, block_start )

            # This block's portion of the roi
            intersecting_roi = getIntersection( (roi.start, roi.stop), entire_block_roi )
            
            # Compute slicing within destination array and slicing within this block
            destination_relative_intersection = numpy.subtract(intersecting_roi, roi.start)
            block_relative_intersection = numpy.subtract(intersecting_roi, block_start)
            destination_relative_intersection_slicing = roiToSlice(*destination_relative_intersection)
            block_relative_intersection_slicing = roiToSlice(*block_relative_intersection)

            if block_start in self._cacheFiles:
                # Copy from block to destination
                dataset = self._getBlockDataset( entire_block_roi )

                if self.Output.meta.has_mask:
                    destination[ destination_relative_intersection_slicing ] = dataset["data"][ block_relative_intersection_slicing ]
                    destination.mask[ destination_relative_intersection_slicing ] = dataset["mask"][ block_relative_intersection_slicing ]
                    destination.fill_value = dataset["fill_value"][()]
                else:
                    destination[ destination_relative_intersection_slicing ] = dataset[ block_relative_intersection_slicing ]
            else:
                # Not stored yet.  Overwrite with zeros.
                destination[ destination_relative_intersection_slicing ] = 0
    def _executePredictionImage(self, roi, destination):
        # Determine intersecting blocks
        block_shape = self._getFullShape( self.BlockShape3dDict.value )
        block_starts = getIntersectingBlocks( block_shape, (roi.start, roi.stop) )
        block_starts = map( tuple, block_starts )

        # Ensure that block pipelines exist (create first if necessary)
        for block_start in block_starts:
            self._ensurePipelineExists(block_start)

        # Retrieve result from each block, and write into the approprate region of the destination
        # TODO: Parallelize this loop
        for block_start in block_starts:
            opBlockPipeline = self._blockPipelines[block_start]
            block_roi = opBlockPipeline.block_roi
            block_intersection = getIntersection( block_roi, (roi.start, roi.stop) )
            block_relative_intersection = numpy.subtract(block_intersection, block_roi[0])
            destination_relative_intersection = numpy.subtract(block_intersection, roi.start)
            
            destination_slice = roiToSlice( *destination_relative_intersection )
            req = opBlockPipeline.PredictionImage( *block_relative_intersection )
            req.writeInto( destination[destination_slice] )
            req.wait()

        return destination
예제 #4
0
    def _copyData(self, roi, destination, block_starts):
        # Copy data from each block
        # (Parallelism not needed here: h5py will serialize these requests anyway)
        logger.debug( "Copying data from {} blocks...".format( len(block_starts) ) )
        for block_start in block_starts:
            entire_block_roi = getBlockBounds( self.Output.meta.shape, self._blockshape, block_start )

            # This block's portion of the roi
            intersecting_roi = getIntersection( (roi.start, roi.stop), entire_block_roi )
            
            # Compute slicing within destination array and slicing within this block
            destination_relative_intersection = numpy.subtract(intersecting_roi, roi.start)
            block_relative_intersection = numpy.subtract(intersecting_roi, block_start)
            destination_relative_intersection_slicing = roiToSlice(*destination_relative_intersection)
            block_relative_intersection_slicing = roiToSlice( *block_relative_intersection )
            
            # Copy from block to destination
            dataset = self._getBlockDataset( entire_block_roi )
            if self.Output.meta.has_mask:
                destination.data[ destination_relative_intersection_slicing ] = dataset["data"][ block_relative_intersection_slicing ]
                destination.mask[ destination_relative_intersection_slicing ] = dataset["mask"][ block_relative_intersection_slicing ]
                destination.fill_value = dataset["fill_value"][()]
            else:
                destination[ destination_relative_intersection_slicing ] = dataset[ block_relative_intersection_slicing ]
            self._last_access_times[block_start] = time.time()
예제 #5
0
    def _calculate_probabilities(self, roi):
        classifier = self.Classifier.value

        assert isinstance(
            classifier, LazyflowPixelwiseClassifierABC
        ), f"Classifier {classifier} must be sublcass of {LazyflowPixelwiseClassifierABC}"

        upstream_roi = (roi.start, roi.stop)
        # Ask for the halo needed by the classifier
        axiskeys = self.Image.meta.getAxisKeys()
        halo_shape = classifier.get_halo_shape(axiskeys)
        assert len(halo_shape) == len(upstream_roi[0])
        assert halo_shape[-1] == 0, "Didn't expect a non-zero halo for channel dimension."

        # Expand block by halo, then clip to image bounds
        upstream_roi = numpy.array(upstream_roi)
        upstream_roi[0] -= halo_shape
        upstream_roi[1] += halo_shape
        upstream_roi = getIntersection(upstream_roi, roiFromShape(self.Image.meta.shape))
        upstream_roi = numpy.asarray(upstream_roi)

        # Determine how to extract the data from the result (without the halo)
        downstream_roi = numpy.array((roi.start, roi.stop))
        predictions_roi = downstream_roi[:, :-1] - upstream_roi[0, :-1]

        # Request all upstream channels
        input_channels = self.Image.meta.shape[-1]
        upstream_roi[:, -1] = [0, input_channels]

        input_data = self.Image(*upstream_roi).wait()
        axistags = self.Image.meta.axistags
        probabilities = classifier.predict_probabilities_pixelwise(input_data, predictions_roi, axistags)
        return probabilities
예제 #6
0
    def _copyData(self, roi, destination, block_starts):
        """
        Copy data from each block into the destination array.
        For blocks that aren't currently stored, just write zeros.
        """
        # (Parallelism not needed here: h5py will serialize these requests anyway)
        block_starts = map(tuple, block_starts)
        for block_start in block_starts:
            entire_block_roi = getBlockBounds(self.Output.meta.shape,
                                              self._blockshape, block_start)

            # This block's portion of the roi
            intersecting_roi = getIntersection((roi.start, roi.stop),
                                               entire_block_roi)

            # Compute slicing within destination array and slicing within this block
            destination_relative_intersection = numpy.subtract(
                intersecting_roi, roi.start)
            block_relative_intersection = numpy.subtract(
                intersecting_roi, block_start)

            if block_start in self._cacheFiles:
                # Copy from block to destination
                dataset = self._getBlockDataset(entire_block_roi)
                destination[roiToSlice(
                    *destination_relative_intersection)] = dataset[roiToSlice(
                        *block_relative_intersection)]
            else:
                # Not stored yet.  Overwrite with zeros.
                destination[roiToSlice(*destination_relative_intersection)] = 0
예제 #7
0
    def _calculate_probabilities(self, roi):
        classifier = self.Classifier.value

        assert isinstance(
            classifier, LazyflowPixelwiseClassifierABC
        ), f"Classifier {classifier} must be sublcass of {LazyflowPixelwiseClassifierABC}"

        upstream_roi = (roi.start, roi.stop)
        # Ask for the halo needed by the classifier
        axiskeys = self.Image.meta.getAxisKeys()
        halo_shape = classifier.get_halo_shape(axiskeys)
        assert len(halo_shape) == len(upstream_roi[0])
        assert halo_shape[-1] == 0, "Didn't expect a non-zero halo for channel dimension."

        # Expand block by halo, then clip to image bounds
        upstream_roi = numpy.array(upstream_roi)
        upstream_roi[0] -= halo_shape
        upstream_roi[1] += halo_shape
        upstream_roi = getIntersection(upstream_roi, roiFromShape(self.Image.meta.shape))
        upstream_roi = numpy.asarray(upstream_roi)

        # Determine how to extract the data from the result (without the halo)
        downstream_roi = numpy.array((roi.start, roi.stop))
        predictions_roi = downstream_roi[:, :-1] - upstream_roi[0, :-1]

        # Request all upstream channels
        input_channels = self.Image.meta.shape[-1]
        upstream_roi[:, -1] = [0, input_channels]

        input_data = self.Image(*upstream_roi).wait()
        axistags = self.Image.meta.axistags
        probabilities = classifier.predict_probabilities_pixelwise(input_data, predictions_roi, axistags)
        return probabilities
예제 #8
0
    def _executeOutput(self, roi, destination):
        assert len(roi.stop) == len(self.Input.meta.shape), "roi: {} has the wrong number of dimensions for Input shape: {}".format( roi, self.Input.meta.shape )
        assert numpy.less_equal(roi.stop, self.Input.meta.shape).all(), "roi: {} is out-of-bounds for Input shape: {}".format( roi, self.Input.meta.shape )
        
        block_starts = getIntersectingBlocks( self._blockshape, (roi.start, roi.stop) )
        block_starts = map( tuple, block_starts )

        # Ensure all block cache files are up-to-date
        reqPool = RequestPool() # (Do the work in parallel.)
        for block_start in block_starts:
            entire_block_roi = getBlockBounds( self.Input.meta.shape, self._blockshape, block_start )
            f = partial( self._ensureCached, entire_block_roi)
            reqPool.add( Request(f) )
        logger.debug( "Waiting for {} blocks...".format( len(block_starts) ) )
        reqPool.wait()

        # Copy data from each block
        # (Parallelism not needed here: h5py will serialize these requests anyway)
        logger.debug( "Copying data from {} blocks...".format( len(block_starts) ) )
        for block_start in block_starts:
            entire_block_roi = getBlockBounds( self.Input.meta.shape, self._blockshape, block_start )

            # This block's portion of the roi
            intersecting_roi = getIntersection( (roi.start, roi.stop), entire_block_roi )
            
            # Compute slicing within destination array and slicing within this block
            destination_relative_intersection = numpy.subtract(intersecting_roi, roi.start)
            block_relative_intersection = numpy.subtract(intersecting_roi, block_start)
            
            # Copy from block to destination
            dataset = self._getBlockDataset( entire_block_roi )
            destination[ roiToSlice(*destination_relative_intersection) ] = dataset[ roiToSlice( *block_relative_intersection ) ]
        return destination
예제 #9
0
    def _setInSlotInput(self, slot, subindex, roi, value):
        assert len(roi.stop) == len(self.Input.meta.shape), "roi: {} has the wrong number of dimensions for Input shape: {}".format( roi, self.Input.meta.shape )
        assert numpy.less_equal(roi.stop, self.Input.meta.shape).all(), "roi: {} is out-of-bounds for Input shape: {}".format( roi, self.Input.meta.shape )
        
        block_starts = getIntersectingBlocks( self._blockshape, (roi.start, roi.stop) )
        block_starts = map( tuple, block_starts )

        # Copy data to each block
        logger.debug( "Copying data INTO {} blocks...".format( len(block_starts) ) )
        for block_start in block_starts:
            entire_block_roi = getBlockBounds( self.Input.meta.shape, self._blockshape, block_start )

            # This block's portion of the roi
            intersecting_roi = getIntersection( (roi.start, roi.stop), entire_block_roi )
            
            # Compute slicing within source array and slicing within this block
            source_relative_intersection = numpy.subtract(intersecting_roi, roi.start)
            block_relative_intersection = numpy.subtract(intersecting_roi, block_start)
            
            # Copy from source to block
            dataset = self._getBlockDataset( entire_block_roi )
            dataset[ roiToSlice( *block_relative_intersection ) ] = value[ roiToSlice(*source_relative_intersection) ]

            # Here, we assume that if this function is used to update ANY PART of a 
            #  block, he is responsible for updating the ENTIRE block.
            # Therefore, this block is no longer 'dirty'
            self._dirtyBlocks.discard( block_start )
    def _executePredictionImage(self, roi, destination):
        # Determine intersecting blocks
        block_shape = self._getFullShape(self.BlockShape3dDict.value)
        block_starts = getIntersectingBlocks(block_shape,
                                             (roi.start, roi.stop))
        block_starts = map(tuple, block_starts)

        # Ensure that block pipelines exist (create first if necessary)
        for block_start in block_starts:
            self._ensurePipelineExists(block_start)

        # Retrieve result from each block, and write into the appropriate region of the destination
        # TODO: Parallelize this loop
        for block_start in block_starts:
            opBlockPipeline = self._blockPipelines[block_start]
            block_roi = opBlockPipeline.block_roi
            block_intersection = getIntersection(block_roi,
                                                 (roi.start, roi.stop))
            block_relative_intersection = numpy.subtract(
                block_intersection, block_roi[0])
            destination_relative_intersection = numpy.subtract(
                block_intersection, roi.start)

            destination_slice = roiToSlice(*destination_relative_intersection)
            req = opBlockPipeline.PredictionImage(*block_relative_intersection)
            req.writeInto(destination[destination_slice])
            req.wait()

        return destination
예제 #11
0
 def propagateDirty(self, dirtySlot, subindex, input_dirty_roi):
     input_dirty_roi = (input_dirty_roi.start, input_dirty_roi.stop)
     intersection = getIntersection(input_dirty_roi, self._roi, False)
     if intersection:
         output_dirty_roi = numpy.array(intersection)
         output_dirty_roi -= self._roi[0]
         output_dirty_roi = map(tuple, output_dirty_roi)
         self.Output.setDirty(*output_dirty_roi)
예제 #12
0
 def propagateDirty(self, dirtySlot, subindex, input_dirty_roi):
     input_dirty_roi = ( input_dirty_roi.start, input_dirty_roi.stop )
     intersection = getIntersection( input_dirty_roi, self._roi, False )
     if intersection:
         output_dirty_roi = numpy.array(intersection)
         output_dirty_roi -= self._roi[0]
         output_dirty_roi = map( tuple, output_dirty_roi )
         self.Output.setDirty( *output_dirty_roi )
예제 #13
0
    def read(self, view_roi, result_out):
        """
        roi: (start, stop) tuples, ordered according to description.output_axes
             roi should be relative to the view
        """
        output_axes = self.description.output_axes
        roi_transposed = zip(*view_roi)
        roi_dict = dict( zip(output_axes, roi_transposed) )
        view_roi = zip( *(roi_dict['z'], roi_dict['y'], roi_dict['x']) )

        # First, normalize roi and result to zyx order
        result_out = vigra.taggedView(result_out, output_axes)
        result_out = result_out.withAxes(*'zyx')
        
        assert numpy.array(view_roi).shape == (2,3), "Invalid roi for 3D volume: {}".format( view_roi )
        view_roi = numpy.array(view_roi)
        assert (result_out.shape == (view_roi[1] - view_roi[0])).all()
        
        # User gave roi according to the view output.
        # Now offset it find global roi.
        roi = view_roi + self.description.view_origin_zyx
        
        tile_blockshape = (1,) + tuple(self.description.tile_shape_2d_yx)
        tile_starts = getIntersectingBlocks( tile_blockshape, roi )

        pool = RequestPool()
        for tile_start in tile_starts:
            tile_roi_in = getBlockBounds( self.description.bounds_zyx, tile_blockshape, tile_start )
            tile_roi_in = numpy.array(tile_roi_in)

            # This tile's portion of the roi
            intersecting_roi = getIntersection( roi, tile_roi_in )
            intersecting_roi = numpy.array( intersecting_roi )

            # Compute slicing within destination array and slicing within this tile
            destination_relative_intersection = numpy.subtract(intersecting_roi, roi[0])
            tile_relative_intersection = intersecting_roi - tile_roi_in[0]
            
            # Get a view to the output slice
            result_region = result_out[roiToSlice(*destination_relative_intersection)]
            
            rest_args = self._get_rest_args(tile_blockshape, tile_roi_in)
            if self.description.tile_url_format.startswith('http'):
                retrieval_fn = partial( self._retrieve_remote_tile, rest_args, tile_relative_intersection, result_region )
            else:
                retrieval_fn = partial( self._retrieve_local_tile, rest_args, tile_relative_intersection, result_region )            

            PARALLEL_REQ = True
            if PARALLEL_REQ:
                pool.add( Request( retrieval_fn ) )
            else:
                # execute serially (leave the pool empty)
                retrieval_fn()

        if PARALLEL_REQ:
            with Timer() as timer:
                pool.wait()
            logger.info("Loading {} tiles took a total of {}".format( len(tile_starts), timer.seconds() ))
예제 #14
0
    def read(self, view_roi, result_out):
        """
        roi: (start, stop) tuples, ordered according to description.output_axes
             roi should be relative to the view
        """
        output_axes = self.description.output_axes
        roi_transposed = list(zip(*view_roi))
        roi_dict = dict(list(zip(output_axes, roi_transposed)))
        view_roi = list(zip(*(roi_dict["z"], roi_dict["y"], roi_dict["x"])))

        # First, normalize roi and result to zyx order
        result_out = vigra.taggedView(result_out, output_axes)
        result_out = result_out.withAxes(*"zyx")

        assert numpy.array(view_roi).shape == (2, 3), "Invalid roi for 3D volume: {}".format(view_roi)
        view_roi = numpy.array(view_roi)
        assert (result_out.shape == (view_roi[1] - view_roi[0])).all()

        # User gave roi according to the view output.
        # Now offset it find global roi.
        roi = view_roi + self.description.view_origin_zyx

        tile_blockshape = (1,) + tuple(self.description.tile_shape_2d_yx)
        tile_starts = getIntersectingBlocks(tile_blockshape, roi)

        pool = RequestPool()
        for tile_start in tile_starts:
            tile_roi_in = getBlockBounds(self.description.bounds_zyx, tile_blockshape, tile_start)
            tile_roi_in = numpy.array(tile_roi_in)

            # This tile's portion of the roi
            intersecting_roi = getIntersection(roi, tile_roi_in)
            intersecting_roi = numpy.array(intersecting_roi)

            # Compute slicing within destination array and slicing within this tile
            destination_relative_intersection = numpy.subtract(intersecting_roi, roi[0])
            tile_relative_intersection = intersecting_roi - tile_roi_in[0]

            # Get a view to the output slice
            result_region = result_out[roiToSlice(*destination_relative_intersection)]

            rest_args = self._get_rest_args(tile_blockshape, tile_roi_in)
            if self.description.tile_url_format.startswith("http"):
                retrieval_fn = partial(self._retrieve_remote_tile, rest_args, tile_relative_intersection, result_region)
            else:
                retrieval_fn = partial(self._retrieve_local_tile, rest_args, tile_relative_intersection, result_region)

            PARALLEL_REQ = True
            if PARALLEL_REQ:
                pool.add(Request(retrieval_fn))
            else:
                # execute serially (leave the pool empty)
                retrieval_fn()

        if PARALLEL_REQ:
            with Timer() as timer:
                pool.wait()
            logger.info("Loading {} tiles took a total of {}".format(len(tile_starts), timer.seconds()))
예제 #15
0
 def testAssertNonIntersect(self):
     roiA = [(10,10,10), (20,20,20)]
     roiB = [(15,26,27), (16,30,30)]
     try:
         intersection = getIntersection( roiA, roiB )
     except AssertionError:
         pass
     else: 
         assert False, "getIntersection() was supposed to assert because the parameters don't intersect!"
예제 #16
0
         def roiGen():
             block_iter = block_starts.__iter__()
             while True:
                 block_start = block_iter.next()
                 block_bounds = getBlockBounds( outputSlot.meta.shape, blockshape, block_start )
                 block_intersecting_portion = getIntersection( block_bounds, roi )
 
                 logger.debug( "Requesting Roi: {}".format( block_bounds ) )
                 yield block_intersecting_portion
예제 #17
0
         def roiGen():
             block_iter = block_starts.__iter__()
             while True:
                 block_start = block_iter.next()
                 block_bounds = getBlockBounds( outputSlot.meta.shape, blockshape, block_start )
                 block_intersecting_portion = getIntersection( block_bounds, roi )
 
                 logger.debug( "Requesting Roi: {}".format( block_bounds ) )
                 yield block_intersecting_portion
예제 #18
0
 def testAssertNonIntersect(self):
     roiA = [(10, 10, 10), (20, 20, 20)]
     roiB = [(15, 26, 27), (16, 30, 30)]
     try:
         intersection = getIntersection(roiA, roiB)
     except AssertionError:
         pass
     else:
         assert False, "getIntersection() was supposed to assert because the parameters don't intersect!"
예제 #19
0
    def _setInSlotInput(self,
                        slot,
                        subindex,
                        roi,
                        value,
                        store_zero_blocks=True):
        """
        Write the data in the array 'value' into the cache.
        If the optional store_zero_blocks param is False, then don't bother 
        creating cache blocks for blocks that are totally zero.
        """
        assert len(roi.stop) == len(self.Input.meta.shape), \
            "roi: {} has the wrong number of dimensions for Input shape: {}"\
            "".format( roi, self.Input.meta.shape )
        assert numpy.less_equal(roi.stop, self.Input.meta.shape).all(), \
            "roi: {} is out-of-bounds for Input shape: {}"\
            "".format( roi, self.Input.meta.shape )

        block_starts = getIntersectingBlocks(self._blockshape,
                                             (roi.start, roi.stop))
        block_starts = map(tuple, block_starts)

        # Copy data to each block
        logger.debug("Copying data INTO {} blocks...".format(
            len(block_starts)))
        for block_start in block_starts:
            entire_block_roi = getBlockBounds(self.Output.meta.shape,
                                              self._blockshape, block_start)

            # This block's portion of the roi
            intersecting_roi = getIntersection((roi.start, roi.stop),
                                               entire_block_roi)

            # Compute slicing within source array and slicing within this block
            source_relative_intersection = numpy.subtract(
                intersecting_roi, roi.start)
            block_relative_intersection = numpy.subtract(
                intersecting_roi, block_start)

            new_block_data = value[roiToSlice(*source_relative_intersection)]
            if not store_zero_blocks and new_block_data.sum(
            ) == 0 and block_start not in self._cacheFiles:
                # Special fast-path: If this block doesn't exist yet,
                #  don't bother creating if we're just going to fill it with zeros
                # (Used by the OpCompressedUserLabelArray)
                pass
            else:
                # Copy from source to block
                dataset = self._getBlockDataset(entire_block_roi)
                dataset[roiToSlice(
                    *block_relative_intersection)] = new_block_data

            # Here, we assume that if this function is used to update ANY PART of a
            #  block, he is responsible for updating the ENTIRE block.
            # Therefore, this block is no longer 'dirty'
            self._dirtyBlocks.discard(block_start)
def normalize_synapse_ids(current_slice, current_roi, previous_slice, previous_roi, maxLabel):
    current_roi = numpy.array(current_roi)
    intersection_roi = None
    if previous_roi is not None:
        previous_roi = numpy.array(previous_roi)
        current_roi_2d = current_roi[:, :-1]
        previous_roi_2d = previous_roi[:, :-1]
        intersection_roi = getIntersection( current_roi_2d, previous_roi_2d, assertIntersect=False )

    if intersection_roi is None or previous_slice is None or abs(int(current_roi[0,2]) - int(previous_roi[0,2])) > 1:
        # We want our synapse ids to be consecutive, so we do a proper relabeling.
        # If we could guarantee that the input slice was already consecutive, we could do this:
        # relabeled_current = numpy.where( current_slice, current_slice+maxLabel, 0 )
        # ... but that's not the case.

        current_unique_labels = numpy.unique(current_slice)
        assert current_unique_labels[0] == 0, "This function assumes that not all pixels belong to detections."
        if len(current_unique_labels) == 1:
            # No objects in this slice.
            return current_slice, maxLabel
        max_current_label = current_unique_labels[-1]
        relabel = numpy.zeros( (max_current_label+1,), dtype=numpy.uint32 )
        new_max = maxLabel + len(current_unique_labels)-1
        relabel[(current_unique_labels[1:],)] = numpy.arange( maxLabel+1, new_max+1, dtype=numpy.uint32 )
        return relabel[current_slice], new_max
    
    # Extract the intersecting region from the current/prev slices,
    #  so its easy to compare corresponding pixels
    current_intersection_roi = numpy.subtract(intersection_roi, current_roi_2d[0])
    prev_intersection_roi = numpy.subtract(intersection_roi, previous_roi_2d[0])    
    current_intersection_slice = current_slice[roiToSlice(*current_intersection_roi)]
    prev_intersection_slice = previous_slice[roiToSlice(*prev_intersection_roi)]

    # omit label 0
    previous_slice_objects = numpy.unique(previous_slice)[1:]
    current_slice_objects = numpy.unique(current_slice)[1:]
    max_current_object = max(0, *current_slice_objects)
    relabel = numpy.zeros((max_current_object+1,), dtype=numpy.uint32)
    
    for cc in previous_slice_objects:
        current_labels = numpy.unique(current_intersection_slice[prev_intersection_slice==cc].flat)
        for cur_label in current_labels:
            relabel[cur_label] = cc
    
    for cur_object in current_slice_objects:
        if relabel[cur_object]==0:
            relabel[cur_object] = maxLabel+1
            maxLabel=maxLabel+1

    relabel[0] = 0

    # Relabel the entire current slice
    relabeled_slice_objects = relabel[current_slice]
    return relabeled_slice_objects, maxLabel
 def _handleDirtyPrediction(self, slot, roi):
     """
     Foward dirty notifications from our internal output slot to the external one,
     but first discard the halo and offset the roi to compensate for the halo.
     """
     # Discard halo.  dirtyRoi is in internal coordinates (i.e. relative to halo start)
     dirtyRoi = getIntersection( (roi.start, roi.stop), self._output_roi, assertIntersect=False )
     if dirtyRoi is not None:
         halo_offset = numpy.subtract(self.block_roi[0], self._halo_roi[0])
         adjusted_roi = dirtyRoi - halo_offset # adjusted_roi is in output coordinates (relative to output block start)
         self.PredictionImage.setDirty( *adjusted_roi )
 def _handleDirtyPrediction(self, slot, roi):
     """
     Foward dirty notifications from our internal output slot to the external one,
     but first discard the halo and offset the roi to compensate for the halo.
     """
     # Discard halo.  dirtyRoi is in internal coordinates (i.e. relative to halo start)
     dirtyRoi = getIntersection( (roi.start, roi.stop), self._output_roi, assertIntersect=False )
     if dirtyRoi is not None:
         halo_offset = numpy.subtract(self.block_roi[0], self._halo_roi[0])
         adjusted_roi = dirtyRoi - halo_offset # adjusted_roi is in output coordinates (relative to output block start)
         self.PredictionImage.setDirty( *adjusted_roi )
예제 #23
0
파일: generic.py 프로젝트: ilastik/lazyflow
 def propagateDirty(self, dirtySlot, subindex, input_dirty_roi):
     input_dirty_roi = (input_dirty_roi.start, input_dirty_roi.stop)
     if len(input_dirty_roi[0]) != len(self._roi[0]):
         # The dimensionality of the data is changing.
         # The whole workflow must be updating, so don't bother with dirty notifications.
         return
     intersection = getIntersection(input_dirty_roi, self._roi, False)
     if intersection:
         output_dirty_roi = numpy.array(intersection)
         output_dirty_roi -= self._roi[0]
         output_dirty_roi = map(tuple, output_dirty_roi)
         self.Output.setDirty(*output_dirty_roi)
예제 #24
0
 def propagateDirty(self, dirtySlot, subindex, input_dirty_roi):
     input_dirty_roi = (input_dirty_roi.start, input_dirty_roi.stop)
     if len(input_dirty_roi[0]) != len(self._roi[0]):
         # The dimensionality of the data is changing.
         # The whole workflow must be updating, so don't bother with dirty notifications.
         return
     intersection = getIntersection(input_dirty_roi, self._roi, False)
     if intersection:
         output_dirty_roi = numpy.array(intersection)
         output_dirty_roi -= self._roi[0]
         output_dirty_roi = map(tuple, output_dirty_roi)
         self.Output.setDirty(*output_dirty_roi)
예제 #25
0
            def roiGen():
                block_iter = block_starts.__iter__()
                while True:
                    try:
                        block_start = next(block_iter)
                    except StopIteration:
                        # As of Python 3.7, not allowed to let StopIteration exceptions escape a generator
                        # https://www.python.org/dev/peps/pep-0479
                        break
                    else:
                        block_bounds = getBlockBounds(outputSlot.meta.shape, blockshape, block_start)
                        block_intersecting_portion = getIntersection(block_bounds, roi)

                        logger.debug("Requesting Roi: {}".format(block_bounds))
                        yield block_intersecting_portion
예제 #26
0
    def _copyData(self, roi, destination, block_starts):
        # Copy data from each block
        # (Parallelism not needed here: h5py will serialize these requests anyway)
        logger.debug( "Copying data from {} blocks...".format( len(block_starts) ) )
        for block_start in block_starts:
            entire_block_roi = getBlockBounds( self.Output.meta.shape, self._blockshape, block_start )

            # This block's portion of the roi
            intersecting_roi = getIntersection( (roi.start, roi.stop), entire_block_roi )
            
            # Compute slicing within destination array and slicing within this block
            destination_relative_intersection = numpy.subtract(intersecting_roi, roi.start)
            block_relative_intersection = numpy.subtract(intersecting_roi, block_start)
            
            # Copy from block to destination
            dataset = self._getBlockDataset( entire_block_roi )
            destination[ roiToSlice(*destination_relative_intersection) ] = dataset[ roiToSlice( *block_relative_intersection ) ]
예제 #27
0
    def execute(self, slot, subindex, roi, result):
        classifier_factory = self.ClassifierFactory.value
        assert issubclass(type(classifier_factory), LazyflowPixelwiseClassifierFactoryABC), \
            "Factory is of type {}, which does not satisfy the LazyflowPixelwiseClassifierFactoryABC interface."\
            "".format( type(classifier_factory) )
        
        # Accumulate all non-zero blocks of each image into lists
        label_data_blocks = []
        image_data_blocks = []
        for image_slot, label_slot, nonzero_block_slot in zip(self.Images, self.Labels, self.nonzeroLabelBlocks):
            block_slicings = nonzero_block_slot.value
            for block_slicing in block_slicings:
                block_label_roi = sliceToRoi( block_slicing, image_slot.meta.shape )

                # Ask for the halo needed by the classifier
                axiskeys = image_slot.meta.getAxisKeys()
                halo_shape = classifier_factory.get_halo_shape(axiskeys)
                assert len(halo_shape) == len( block_label_roi[0] )
                assert halo_shape[-1] == 0, "Didn't expect a non-zero halo for channel dimension."

                # Expand block by halo, then clip to image bounds
                block_label_roi = numpy.array( block_label_roi )
                block_label_roi[0] -= halo_shape
                block_label_roi[1] += halo_shape
                block_label_roi = getIntersection( block_label_roi, roiFromShape(image_slot.meta.shape) )

                block_image_roi = numpy.array( block_label_roi )
                assert (block_image_roi[:, -1] == [0,1]).all()
                num_channels = image_slot.meta.shape[-1]
                block_image_roi[:, -1] = [0, num_channels]

                # Ensure the results are plain ndarray, not VigraArray, 
                #  which some classifiers might have trouble with.
                block_label_data = numpy.asarray( label_slot(*block_label_roi).wait() )
                block_image_data = numpy.asarray( image_slot(*block_image_roi).wait() )
                
                label_data_blocks.append( block_label_data )
                image_data_blocks.append( block_image_data )
                
        logger.debug("Training new classifier: {}".format( classifier_factory.description ))
        classifier = classifier_factory.create_and_train_pixelwise( image_data_blocks, label_data_blocks )
        assert issubclass(type(classifier), LazyflowPixelwiseClassifierABC), \
            "Classifier is of type {}, which does not satisfy the LazyflowPixelwiseClassifierABC interface."\
            "".format( type(classifier) )
        result[0] = classifier
        return result
예제 #28
0
    def propagateDirty(self, slot, subindex, roi):
        dirty_roi = self._standardize_roi( roi.start, roi.stop )
        maximum_roi = roiFromShape(self.Input.meta.shape)
        maximum_roi = self._standardize_roi( *maximum_roi )
        
        if dirty_roi == maximum_roi:
            # Optimize the common case:
            # Everything is dirty, so no need to loop
            self._resetBlocks()
        else:
            # FIXME: This is O(N) for now.
            #        We should speed this up by maintaining a bookkeeping data structure in execute().
            for block_roi in self._block_data.keys():
                if getIntersection(block_roi, dirty_roi, assertIntersect=False):
                    self.freeBlock(block_roi)

        self.Output.setDirty( roi.start, roi.stop )
예제 #29
0
            def roiGen():
                block_iter = block_starts.__iter__()
                while True:
                    try:
                        block_start = next(block_iter)
                    except StopIteration:
                        # As of Python 3.7, not allowed to let StopIteration exceptions escape a generator
                        # https://www.python.org/dev/peps/pep-0479
                        break
                    else:
                        block_bounds = getBlockBounds(outputSlot.meta.shape,
                                                      blockshape, block_start)
                        block_intersecting_portion = getIntersection(
                            block_bounds, roi)

                        logger.debug("Requesting Roi: {}".format(block_bounds))
                        yield block_intersecting_portion
예제 #30
0
    def propagateDirty(self, slot, subindex, roi):
        dirty_roi = self._standardize_roi(roi.start, roi.stop)
        maximum_roi = roiFromShape(self.Input.meta.shape)
        maximum_roi = self._standardize_roi(*maximum_roi)

        if dirty_roi == maximum_roi:
            # Optimize the common case:
            # Everything is dirty, so no need to loop
            self._resetBlocks()
        else:
            # FIXME: This is O(N) for now.
            #        We should speed this up by maintaining a bookkeeping data structure in execute().
            for block_roi in self._block_data.keys():
                if getIntersection(block_roi, dirty_roi,
                                   assertIntersect=False):
                    self.freeBlock(block_roi)

        self.Output.setDirty(roi.start, roi.stop)
예제 #31
0
    def _setInSlotInput(self, slot, subindex, roi, value, store_zero_blocks=True):
        """
        Write the data in the array 'value' into the cache.
        If the optional store_zero_blocks param is False, then don't bother 
        creating cache blocks for blocks that are totally zero.
        """
        assert len(roi.stop) == len(self.Input.meta.shape), \
            "roi: {} has the wrong number of dimensions for Input shape: {}"\
            "".format( roi, self.Input.meta.shape )
        assert numpy.less_equal(roi.stop, self.Input.meta.shape).all(), \
            "roi: {} is out-of-bounds for Input shape: {}"\
            "".format( roi, self.Input.meta.shape )
        
        block_starts = getIntersectingBlocks( self._blockshape, (roi.start, roi.stop) )
        block_starts = map( tuple, block_starts )

        # Copy data to each block
        logger.debug( "Copying data INTO {} blocks...".format( len(block_starts) ) )
        for block_start in block_starts:
            entire_block_roi = getBlockBounds( self.Output.meta.shape, self._blockshape, block_start )

            # This block's portion of the roi
            intersecting_roi = getIntersection( (roi.start, roi.stop), entire_block_roi )
            
            # Compute slicing within source array and slicing within this block
            source_relative_intersection = numpy.subtract(intersecting_roi, roi.start)
            block_relative_intersection = numpy.subtract(intersecting_roi, block_start)
            
            new_block_data = value[ roiToSlice(*source_relative_intersection) ]
            if not store_zero_blocks and new_block_data.sum() == 0 and block_start not in self._cacheFiles:
                # Special fast-path: If this block doesn't exist yet, 
                #  don't bother creating if we're just going to fill it with zeros
                # (Used by the OpCompressedUserLabelArray)
                pass
            else:
                # Copy from source to block
                dataset = self._getBlockDataset( entire_block_roi )
                dataset[ roiToSlice( *block_relative_intersection ) ] = new_block_data
    
            # Here, we assume that if this function is used to update ANY PART of a 
            #  block, he is responsible for updating the ENTIRE block.
            # Therefore, this block is no longer 'dirty'
            self._dirtyBlocks.discard( block_start )
예제 #32
0
    def _executePredictionImage(self, slot, roi, destination):
        roi_one_channel = numpy.array((roi.start, roi.stop))
        roi_one_channel[..., -1] = (0, 1)
        # Determine intersecting blocks
        block_shape = self._getFullShape(self.BlockShape3dDict.value)
        block_starts = getIntersectingBlocks(block_shape, roi_one_channel)
        block_starts = map(tuple, block_starts)

        # Ensure that block pipelines exist (create first if necessary)
        for block_start in block_starts:
            self._ensurePipelineExists(block_start)

        # Retrieve result from each block, and write into the appropriate region of the destination
        pool = RequestPool()
        for block_start in block_starts:
            opBlockPipeline = self._blockPipelines[block_start]
            block_roi = opBlockPipeline.block_roi
            block_intersection = getIntersection(block_roi, roi_one_channel)
            block_relative_intersection = numpy.subtract(
                block_intersection, block_roi[0])
            destination_relative_intersection = numpy.subtract(
                block_intersection, roi_one_channel[0])

            block_slot = opBlockPipeline.PredictionImage
            if slot == self.ProbabilityChannelImage:
                block_slot = opBlockPipeline.ProbabilityChannelImage
                # Add channels back to roi
                # request all channels
                block_relative_intersection[..., -1] = (
                    0, opBlockPipeline.ProbabilityChannelImage.meta.shape[-1])
                # But only write the ones that were specified in the original roi
                destination_relative_intersection[..., -1] = (roi.start[-1],
                                                              roi.stop[-1])

            # Request the data
            destination_slice = roiToSlice(*destination_relative_intersection)
            req = block_slot(*block_relative_intersection)
            req.writeInto(destination[destination_slice])
            pool.add(req)
        pool.wait()

        return destination
    def _executePredictionImage(self, slot, roi, destination):
        roi_one_channel = numpy.array( (roi.start, roi.stop) )
        roi_one_channel[...,-1] = (0,1)
        # Determine intersecting blocks
        block_shape = self._getFullShape( self.BlockShape3dDict.value )
        block_starts = getIntersectingBlocks( block_shape, roi_one_channel )
        block_starts = map( tuple, block_starts )

        # Ensure that block pipelines exist (create first if necessary)
        for block_start in block_starts:
            self._ensurePipelineExists(block_start)

        # Retrieve result from each block, and write into the appropriate region of the destination
        pool = RequestPool()
        for block_start in block_starts:
            opBlockPipeline = self._blockPipelines[block_start]
            block_roi = opBlockPipeline.block_roi
            block_intersection = getIntersection( block_roi, roi_one_channel )
            block_relative_intersection = numpy.subtract(block_intersection, block_roi[0])
            destination_relative_intersection = numpy.subtract(block_intersection, roi_one_channel[0])

            block_slot = opBlockPipeline.PredictionImage            
            if slot == self.ProbabilityChannelImage:
                block_slot = opBlockPipeline.ProbabilityChannelImage
                # Add channels back to roi
                # request all channels
                block_relative_intersection[...,-1] = (0, opBlockPipeline.ProbabilityChannelImage.meta.shape[-1])
                # But only write the ones that were specified in the original roi
                destination_relative_intersection[...,-1] = ( roi.start[-1], roi.stop[-1] )

            # Request the data
            destination_slice = roiToSlice( *destination_relative_intersection )
            req = block_slot( *block_relative_intersection )
            req.writeInto( destination[destination_slice] )
            pool.add( req )
        pool.wait()

        return destination
예제 #34
0
    def _copyData(self, roi, destination, block_starts):
        # Copy data from each block
        # (Parallelism not needed here: h5py will serialize these requests anyway)
        logger.debug("Copying data from {} blocks...".format(
            len(block_starts)))
        for block_start in block_starts:
            entire_block_roi = getBlockBounds(self.Output.meta.shape,
                                              self._blockshape, block_start)

            # This block's portion of the roi
            intersecting_roi = getIntersection((roi.start, roi.stop),
                                               entire_block_roi)

            # Compute slicing within destination array and slicing within this block
            destination_relative_intersection = numpy.subtract(
                intersecting_roi, roi.start)
            block_relative_intersection = numpy.subtract(
                intersecting_roi, block_start)

            # Copy from block to destination
            dataset = self._getBlockDataset(entire_block_roi)
            destination[roiToSlice(
                *destination_relative_intersection)] = dataset[roiToSlice(
                    *block_relative_intersection)]
예제 #35
0
    def _setInSlotInput(self,
                        slot,
                        subindex,
                        roi,
                        value,
                        store_zero_blocks=True):
        """
        Write the data in the array 'value' into the cache.
        If the optional store_zero_blocks param is False, then don't bother
        creating cache blocks for blocks that are totally zero.
        """
        assert len(roi.stop) == len(
            self.Input.meta.shape
        ), "roi: {} has the wrong number of dimensions for Input shape: {}".format(
            roi, self.Input.meta.shape)
        assert numpy.less_equal(roi.stop, self.Input.meta.shape).all(
        ), "roi: {} is out-of-bounds for Input shape: {}".format(
            roi, self.Input.meta.shape)

        block_starts = getIntersectingBlocks(self._blockshape,
                                             (roi.start, roi.stop))
        block_starts = list(map(tuple, block_starts))

        # Copy data to each block
        logger.debug("Copying data INTO {} blocks...".format(
            len(block_starts)))
        for block_start in block_starts:
            entire_block_roi = getBlockBounds(self.Output.meta.shape,
                                              self._blockshape, block_start)

            # This block's portion of the roi
            intersecting_roi = getIntersection((roi.start, roi.stop),
                                               entire_block_roi)

            # Compute slicing within source array and slicing within this block
            source_relative_intersection = numpy.subtract(
                intersecting_roi, roi.start)
            block_relative_intersection = numpy.subtract(
                intersecting_roi, block_start)
            source_relative_intersection_slicing = roiToSlice(
                *source_relative_intersection)
            block_relative_intersection_slicing = roiToSlice(
                *block_relative_intersection)

            new_block_data = value[source_relative_intersection_slicing]
            new_block_sum = new_block_data.sum()
            if not store_zero_blocks and new_block_sum == 0 and block_start not in self._cacheFiles:
                # Special fast-path: If this block doesn't exist yet,
                #  don't bother creating if we're just going to fill it with zeros.
                # (This feature is used by the OpCompressedUserLabelArray)
                pass
            else:
                # Copy from source to block
                dataset = self._getBlockDataset(entire_block_roi)
                if self.Output.meta.has_mask:
                    dataset["data"][
                        block_relative_intersection_slicing] = new_block_data.data
                    dataset["mask"][
                        block_relative_intersection_slicing] = new_block_data.mask
                    dataset["fill_value"][()] = new_block_data.fill_value

                    # Untested. Write a test to use this.
                    # # If we can, remove this block entirely.
                    # if not store_zero_blocks and new_block_sum == 0 and (dataset["data"][:] == 0).all() and (dataset["mask"]).any() and (dataset["fill_value"] == 0).all():
                    #     with self._lock:
                    #         with self._blockLocks[block_start]:
                    #            self._cacheFiles[block_start].close()
                    #            del self._cacheFiles[block_start]
                    #         del self._blockLocks[block_start]
                else:
                    dataset[
                        block_relative_intersection_slicing] = new_block_data

                    # If we can, remove this block entirely.
                    if not store_zero_blocks and new_block_sum == 0 and (
                            dataset[:] == 0).all():
                        with self._lock:
                            with self._blockLocks[block_start]:
                                self._cacheFiles[block_start].close()
                                del self._cacheFiles[block_start]
                            del self._blockLocks[block_start]

            # Here, we assume that if this function is used to update ANY PART of a
            #  block, he is responsible for updating the ENTIRE block.
            # Therefore, this block is no longer 'dirty'
            self._dirtyBlocks.discard(block_start)
예제 #36
0
    def read(self, roi, result_out):
        """
        roi: (start, stop) tuples, ordered according to description.output_axes
        """
        output_axes = self.description.output_axes
        roi_transposed = zip(*roi)
        roi_dict = dict(zip(output_axes, roi_transposed))
        roi = zip(*(roi_dict['z'], roi_dict['y'], roi_dict['x']))

        # First, normalize roi and result to zyx order
        result_out = vigra.taggedView(result_out, output_axes)
        result_out = result_out.withAxes(*'zyx')

        assert numpy.array(roi).shape == (
            2, 3), "Invalid roi for 3D volume: {}".format(roi)
        roi = numpy.array(roi)
        assert (result_out.shape == (roi[1] - roi[0])).all()

        tile_blockshape = (1, ) + tuple(self.description.tile_shape_2d_yx)
        tile_starts = getIntersectingBlocks(tile_blockshape, roi)

        # We use a fresh tmp dir for each read to avoid conflicts between parallel reads
        tmpdir = tempfile.mkdtemp()

        pool = RequestPool()
        for tile_start in tile_starts:
            tile_roi_in = getBlockBounds(self.description.shape_zyx,
                                         tile_blockshape, tile_start)
            tile_roi_in = numpy.array(tile_roi_in)

            # This tile's portion of the roi
            intersecting_roi = getIntersection(roi, tile_roi_in)
            intersecting_roi = numpy.array(intersecting_roi)

            # Compute slicing within destination array and slicing within this tile
            destination_relative_intersection = numpy.subtract(
                intersecting_roi, roi[0])
            tile_relative_intersection = intersecting_roi - tile_roi_in[0]

            # Get a view to the output slice
            result_region = result_out[roiToSlice(
                *destination_relative_intersection)]

            # Special feature:
            # Some slices are missing, in which case we provide fake data from a different slice.
            # Overwrite the rest args to pull data from an alternate source tile.
            z_start = tile_roi_in[0][0]
            if z_start in self._slice_remapping:
                new_source_slice = self._slice_remapping[z_start]
                tile_roi_in[0][0] = new_source_slice
                tile_roi_in[1][0] = new_source_slice + 1

            tile_index = numpy.array(tile_roi_in[0]) / tile_blockshape
            rest_args = {
                'z_start': tile_roi_in[0][0],
                'z_stop': tile_roi_in[1][0],
                'y_start': tile_roi_in[0][1],
                'y_stop': tile_roi_in[1][1],
                'x_start': tile_roi_in[0][2],
                'x_stop': tile_roi_in[1][2],
                'z_index': tile_index[0],
                'y_index': tile_index[1],
                'x_index': tile_index[2]
            }

            # Quick sanity check
            assert rest_args['z_index'] == rest_args['z_start']

            retrieval_fn = partial(self._retrieve_tile, tmpdir, rest_args,
                                   tile_relative_intersection, result_region)

            PARALLEL_REQ = True
            if PARALLEL_REQ:
                pool.add(Request(retrieval_fn))
            else:
                # execute serially (leave the pool empty)
                retrieval_fn()

        pool.wait()

        # Clean up our temp files.
        shutil.rmtree(tmpdir)
예제 #37
0
 def is_in_block(self, block_start, coord):
     block_roi = self.get_block_roi(block_start)
     coord_roi = (coord, TinyVector(coord) + 1)
     intersection = getIntersection(block_roi, coord_roi, False)
     return (intersection is not None)
 def is_in_block(self, block_start, coord):
     block_roi = self.get_block_roi(block_start)
     coord_roi = (coord, TinyVector( coord ) + 1)
     intersection = getIntersection(block_roi, coord_roi, False)
     return (intersection is not None)
예제 #39
0
    def execute(self, slot, subindex, roi, result):
        classifier = self.Classifier.value

        # Training operator may return 'None' if there was no data to train with
        skip_prediction = (classifier is None)

        # Shortcut: If the mask is totally zero, skip this request entirely
        if not skip_prediction and self.PredictionMask.ready():
            mask_roi = numpy.array((roi.start, roi.stop))
            mask_roi[:, -1:] = [[0], [1]]
            start, stop = map(tuple, mask_roi)
            mask = self.PredictionMask(start, stop).wait()
            skip_prediction = not numpy.any(mask)

        if skip_prediction:
            result[:] = 0.0
            return result

        assert issubclass(type(classifier), LazyflowPixelwiseClassifierABC), \
            "Classifier is of type {}, which does not satisfy the LazyflowPixelwiseClassifierABC interface."\
            "".format( type(classifier) )

        upstream_roi = (roi.start, roi.stop)
        # Ask for the halo needed by the classifier
        axiskeys = self.Image.meta.getAxisKeys()
        halo_shape = classifier.get_halo_shape(axiskeys)
        assert len(halo_shape) == len(upstream_roi[0])
        assert halo_shape[
            -1] == 0, "Didn't expect a non-zero halo for channel dimension."

        # Expand block by halo, then clip to image bounds
        upstream_roi = numpy.array(upstream_roi)
        upstream_roi[0] -= halo_shape
        upstream_roi[1] += halo_shape
        upstream_roi = getIntersection(upstream_roi,
                                       roiFromShape(self.Image.meta.shape))
        upstream_roi = numpy.asarray(upstream_roi)

        # Determine how to extract the data from the result (without the halo)
        downstream_roi = numpy.array((roi.start, roi.stop))
        downstream_channels = self.PMaps.meta.shape[-1]
        roi_within_result = downstream_roi - upstream_roi[0]
        roi_within_result[:, -1] = [0, downstream_channels]

        # Request all upstream channels
        input_channels = self.Image.meta.shape[-1]
        upstream_roi[:, -1] = [0, input_channels]

        # Request the data
        input_data = self.Image(*upstream_roi).wait()
        axistags = self.Image.meta.axistags
        probabilities = classifier.predict_probabilities_pixelwise(
            input_data, axistags)

        # We're expecting a channel for each label class.
        # If we didn't provide at least one sample for each label,
        #  we may get back fewer channels.
        if probabilities.shape[-1] != self.PMaps.meta.shape[-1]:
            # Copy to an array of the correct shape
            # This is slow, but it's an unusual case
            assert probabilities.shape[-1] == len(classifier.known_classes)
            full_probabilities = numpy.zeros(probabilities.shape[:-1] +
                                             (self.PMaps.meta.shape[-1], ),
                                             dtype=numpy.float32)
            for i, label in enumerate(classifier.known_classes):
                full_probabilities[..., label - 1] = probabilities[..., i]

            probabilities = full_probabilities

        # Extract requested region (discard halo)
        probabilities = probabilities[roiToSlice(*roi_within_result)]

        # Copy only the prediction channels the client requested.
        result[...] = probabilities[..., roi.start[-1]:roi.stop[-1]]
        return result
    def _executeProjection2D(self, roi, destination):
        assert sum(TinyVector(destination.shape) > 1) <= 2, "Projection result must be exactly 2D"
        
        # First, we have to determine which axis we are projecting along.
        # We infer this from the shape of the roi.
        # For example, if the roi is of shape 
        #  zyx = (1,256,256), then we know we're projecting along Z
        # If more than one axis has a width of 1, then we choose an 
        #  axis according to the following priority order: zyxt
        tagged_input_shape = self.Input.meta.getTaggedShape()
        tagged_result_shape = collections.OrderedDict( zip( tagged_input_shape.keys(),
                                                            destination.shape ) )
        nonprojection_axes = []
        for key in tagged_input_shape.keys():
            if (key == 'c' or tagged_input_shape[key] == 1 or tagged_result_shape[key] > 1):
                nonprojection_axes.append( key )
            
        possible_projection_axes = set(tagged_input_shape) - set(nonprojection_axes)
        if len(possible_projection_axes) == 0:
            # If the image is 2D to begin with, 
            #   then the projection is simply the same as the normal output,
            #   EXCEPT it is made binary
            self.Output(roi.start, roi.stop).writeInto(destination).wait()
            
            # make binary
            numpy.greater(destination, 0, out=destination)
            return
        
        for k in 'zyxt':
            if k in possible_projection_axes:
                projection_axis_key = k
                break

        # Now we know which axis we're projecting along.
        # Proceed with the projection, working blockwise to avoid unecessary work in unlabeled blocks
        
        projection_axis_index = self.Input.meta.getAxisKeys().index(projection_axis_key)
        projection_length = tagged_input_shape[projection_axis_key]
        input_roi = roi.copy()
        input_roi.start[projection_axis_index] = 0
        input_roi.stop[projection_axis_index] = projection_length

        destination[:] = 0.0

        # Get the logical blocking.
        block_starts = getIntersectingBlocks( self._blockshape, (input_roi.start, input_roi.stop) )

        # (Parallelism wouldn't help here: h5py will serialize these requests anyway)
        block_starts = map( tuple, block_starts )
        for block_start in block_starts:
            if block_start not in self._cacheFiles:
                # No label data in this block.  Move on.
                continue

            entire_block_roi = getBlockBounds( self.Output.meta.shape, self._blockshape, block_start )

            # This block's portion of the roi
            intersecting_roi = getIntersection( (input_roi.start, input_roi.stop), entire_block_roi )
            
            # Compute slicing within the deep array and slicing within this block
            deep_relative_intersection = numpy.subtract(intersecting_roi, input_roi.start)
            block_relative_intersection = numpy.subtract(intersecting_roi, block_start)
                        
            deep_data = self._getBlockDataset( entire_block_roi )[roiToSlice(*block_relative_intersection)]

            # make binary and convert to float
            deep_data_float = numpy.where( deep_data, numpy.float32(1.0), numpy.float32(0.0) )
            
            # multiply by slice-index
            deep_data_view = numpy.rollaxis(deep_data_float, projection_axis_index, 0)

            min_deep_slice_index = deep_relative_intersection[0][projection_axis_index]
            max_deep_slice_index = deep_relative_intersection[1][projection_axis_index]
            
            def calc_color_value(slice_index):
                # Note 1: We assume that the colortable has at least 256 entries in it,
                #           so, we try to ensure that all colors are above 1/256 
                #           (we don't want colors in low slices to be rounded to 0)
                # Note 2: Ideally, we'd use a min projection in the code below, so that 
                #           labels in the "back" slices would appear occluded.  But the 
                #           min projection would favor 0.0.  Instead, we invert the 
                #           relationship between color and slice index, do a max projection, 
                #           and then re-invert the colors after everything is done.
                #           Hence, this function starts with (1.0 - ...)
                return (1.0 - (float(slice_index) / projection_length)) * (1.0 - 1.0/255) + 1.0/255.0
            min_color_value = calc_color_value(min_deep_slice_index)
            max_color_value = calc_color_value(max_deep_slice_index)
            
            num_slices = max_deep_slice_index - min_deep_slice_index
            deep_data_view *= numpy.linspace( min_color_value, max_color_value, num=num_slices )\
                              [ (slice(None),) + (None,)*(deep_data_view.ndim-1) ]

            # Take the max projection of this block's data.
            block_max_projection = numpy.amax(deep_data_float, axis=projection_axis_index, keepdims=True)

            # Merge this block's projection into the overall projection.
            destination_relative_intersection = numpy.array(deep_relative_intersection)
            destination_relative_intersection[:, projection_axis_index] = (0,1)            
            destination_subview = destination[roiToSlice(*destination_relative_intersection)]            
            numpy.maximum(block_max_projection, destination_subview, out=destination_subview)
            
            # Invert the nonzero pixels so increasing colors correspond to increasing slices.
            # See comment in calc_color_value(), above.
            destination_subview[:] = numpy.where(destination_subview, 
                                                 numpy.float32(1.0) - destination_subview, 
                                                 numpy.float32(0.0))
        return
예제 #41
0
    def _setInSlotInput(self, slot, subindex, roi, value, store_zero_blocks=True):
        """
        Write the data in the array 'value' into the cache.
        If the optional store_zero_blocks param is False, then don't bother
        creating cache blocks for blocks that are totally zero.
        """
        assert len(roi.stop) == len(
            self.Input.meta.shape
        ), "roi: {} has the wrong number of dimensions for Input shape: {}" "".format(roi, self.Input.meta.shape)
        assert numpy.less_equal(
            roi.stop, self.Input.meta.shape
        ).all(), "roi: {} is out-of-bounds for Input shape: {}" "".format(roi, self.Input.meta.shape)

        block_starts = getIntersectingBlocks(self._blockshape, (roi.start, roi.stop))
        block_starts = list(map(tuple, block_starts))

        # Copy data to each block
        logger.debug("Copying data INTO {} blocks...".format(len(block_starts)))
        for block_start in block_starts:
            entire_block_roi = getBlockBounds(self.Output.meta.shape, self._blockshape, block_start)

            # This block's portion of the roi
            intersecting_roi = getIntersection((roi.start, roi.stop), entire_block_roi)

            # Compute slicing within source array and slicing within this block
            source_relative_intersection = numpy.subtract(intersecting_roi, roi.start)
            block_relative_intersection = numpy.subtract(intersecting_roi, block_start)
            source_relative_intersection_slicing = roiToSlice(*source_relative_intersection)
            block_relative_intersection_slicing = roiToSlice(*block_relative_intersection)

            new_block_data = value[source_relative_intersection_slicing]
            new_block_sum = new_block_data.sum()
            if not store_zero_blocks and new_block_sum == 0 and block_start not in self._cacheFiles:
                # Special fast-path: If this block doesn't exist yet,
                #  don't bother creating if we're just going to fill it with zeros.
                # (This feature is used by the OpCompressedUserLabelArray)
                pass
            else:
                # Copy from source to block
                dataset = self._getBlockDataset(entire_block_roi)
                if self.Output.meta.has_mask:
                    dataset["data"][block_relative_intersection_slicing] = new_block_data.data
                    dataset["mask"][block_relative_intersection_slicing] = new_block_data.mask
                    dataset["fill_value"][()] = new_block_data.fill_value

                    # Untested. Write a test to use this.
                    # # If we can, remove this block entirely.
                    # if not store_zero_blocks and new_block_sum == 0 and (dataset["data"][:] == 0).all() and (dataset["mask"]).any() and (dataset["fill_value"] == 0).all():
                    #     with self._lock:
                    #         with self._blockLocks[block_start]:
                    #            self._cacheFiles[block_start].close()
                    #            del self._cacheFiles[block_start]
                    #         del self._blockLocks[block_start]
                else:
                    dataset[block_relative_intersection_slicing] = new_block_data

                    # If we can, remove this block entirely.
                    if not store_zero_blocks and new_block_sum == 0 and (dataset[:] == 0).all():
                        with self._lock:
                            with self._blockLocks[block_start]:
                                self._cacheFiles[block_start].close()
                                del self._cacheFiles[block_start]
                            del self._blockLocks[block_start]

            # Here, we assume that if this function is used to update ANY PART of a
            #  block, he is responsible for updating the ENTIRE block.
            # Therefore, this block is no longer 'dirty'
            self._dirtyBlocks.discard(block_start)
예제 #42
0
    def read(self, roi, result_out):
        """
        roi: (start, stop) tuples, ordered according to description.output_axes
        """
        output_axes = self.description.output_axes
        roi_transposed = zip(*roi)
        roi_dict = dict( zip(output_axes, roi_transposed) )
        roi = zip( *(roi_dict['z'], roi_dict['y'], roi_dict['x']) )

        # First, normalize roi and result to zyx order
        result_out = vigra.taggedView(result_out, output_axes)
        result_out = result_out.withAxes(*'zyx')
        
        assert numpy.array(roi).shape == (2,3), "Invalid roi for 3D volume: {}".format( roi )
        roi = numpy.array(roi)
        assert (result_out.shape == (roi[1] - roi[0])).all()
        
        tile_blockshape = (1,) + tuple(self.description.tile_shape_2d_yx)
        tile_starts = getIntersectingBlocks( tile_blockshape, roi )

        # We use a fresh tmp dir for each read to avoid conflicts between parallel reads
        tmpdir = tempfile.mkdtemp()
        
        pool = RequestPool()
        for tile_start in tile_starts:
            tile_roi_in = getBlockBounds( self.description.shape_zyx, tile_blockshape, tile_start )
            tile_roi_in = numpy.array(tile_roi_in)

            # This tile's portion of the roi
            intersecting_roi = getIntersection( roi, tile_roi_in )
            intersecting_roi = numpy.array( intersecting_roi )

            # Compute slicing within destination array and slicing within this tile
            destination_relative_intersection = numpy.subtract(intersecting_roi, roi[0])
            tile_relative_intersection = intersecting_roi - tile_roi_in[0]
            
            # Get a view to the output slice
            result_region = result_out[roiToSlice(*destination_relative_intersection)]
            
            # Special feature: 
            # Some slices are missing, in which case we provide fake data from a different slice.
            # Overwrite the rest args to pull data from an alternate source tile.
            z_start = tile_roi_in[0][0]
            if z_start in self._slice_remapping:
                new_source_slice = self._slice_remapping[z_start]
                tile_roi_in[0][0] = new_source_slice
                tile_roi_in[1][0] = new_source_slice+1

            tile_index = numpy.array(tile_roi_in[0]) / tile_blockshape
            rest_args = { 'z_start' : tile_roi_in[0][0],
                          'z_stop'  : tile_roi_in[1][0],
                          'y_start' : tile_roi_in[0][1],
                          'y_stop'  : tile_roi_in[1][1],
                          'x_start' : tile_roi_in[0][2],
                          'x_stop'  : tile_roi_in[1][2],
                          'z_index' : tile_index[0],
                          'y_index' : tile_index[1],
                          'x_index' : tile_index[2] }

            # Quick sanity check
            assert rest_args['z_index'] == rest_args['z_start']

            retrieval_fn = partial( self._retrieve_tile, tmpdir, rest_args, tile_relative_intersection, result_region )

            PARALLEL_REQ = True
            if PARALLEL_REQ:
                pool.add( Request( retrieval_fn ) )
            else:
                # execute serially (leave the pool empty)
                retrieval_fn()
        
        pool.wait()
        
        # Clean up our temp files.
        shutil.rmtree(tmpdir)
예제 #43
0
    def execute(self, slot, subindex, roi, result):
        classifier = self.Classifier.value
        
        # Training operator may return 'None' if there was no data to train with
        skip_prediction = (classifier is None)

        # Shortcut: If the mask is totally zero, skip this request entirely
        if not skip_prediction and self.PredictionMask.ready():
            mask_roi = numpy.array((roi.start, roi.stop))
            mask_roi[:,-1:] = [[0],[1]]
            start, stop = map(tuple, mask_roi)
            mask = self.PredictionMask( start, stop ).wait()
            skip_prediction = not numpy.any(mask)

        if skip_prediction:
            result[:] = 0.0
            return result

        assert issubclass(type(classifier), LazyflowPixelwiseClassifierABC), \
            "Classifier is of type {}, which does not satisfy the LazyflowPixelwiseClassifierABC interface."\
            "".format( type(classifier) )

        upstream_roi = (roi.start, roi.stop)
        # Ask for the halo needed by the classifier
        axiskeys = self.Image.meta.getAxisKeys()
        halo_shape = classifier.get_halo_shape(axiskeys)
        assert len(halo_shape) == len( upstream_roi[0] )
        assert halo_shape[-1] == 0, "Didn't expect a non-zero halo for channel dimension."

        # Expand block by halo, then clip to image bounds
        upstream_roi = numpy.array( upstream_roi )
        upstream_roi[0] -= halo_shape
        upstream_roi[1] += halo_shape
        upstream_roi = getIntersection( upstream_roi, roiFromShape(self.Image.meta.shape) )
        upstream_roi = numpy.asarray( upstream_roi )

        # Determine how to extract the data from the result (without the halo)
        downstream_roi = numpy.array((roi.start, roi.stop))
        predictions_roi = downstream_roi[:,:-1] - upstream_roi[0,:-1]

        # Request all upstream channels
        input_channels = self.Image.meta.shape[-1]
        upstream_roi[:,-1] = [0, input_channels]

        # Request the data
        input_data = self.Image(*upstream_roi).wait()
        axistags = self.Image.meta.axistags
        probabilities = classifier.predict_probabilities_pixelwise( input_data, predictions_roi, axistags )
        
        # We're expecting a channel for each label class.
        # If we didn't provide at least one sample for each label,
        #  we may get back fewer channels.
        if probabilities.shape[-1] != self.PMaps.meta.shape[-1]:
            # Copy to an array of the correct shape
            # This is slow, but it's an unusual case
            assert probabilities.shape[-1] == len(classifier.known_classes)
            full_probabilities = numpy.zeros( probabilities.shape[:-1] + (self.PMaps.meta.shape[-1],), dtype=numpy.float32 )
            for i, label in enumerate(classifier.known_classes):
                full_probabilities[..., label-1] = probabilities[..., i]
            
            probabilities = full_probabilities

        # Copy only the prediction channels the client requested.
        result[...] = probabilities[..., roi.start[-1]:roi.stop[-1]]
        return result
예제 #44
0
    def read(self, view_roi, result_out):
        """
        roi: (start, stop) tuples, ordered according to description.output_axes
             roi should be relative to the view
        """
        output_axes = self.description.output_axes
        roi_transposed = zip(*view_roi)
        roi_dict = dict( zip(output_axes, roi_transposed) )
        view_roi = zip( *(roi_dict['z'], roi_dict['y'], roi_dict['x']) )

        # First, normalize roi and result to zyx order
        result_out = vigra.taggedView(result_out, output_axes)
        result_out = result_out.withAxes(*'zyx')
        
        assert numpy.array(view_roi).shape == (2,3), "Invalid roi for 3D volume: {}".format( view_roi )
        view_roi = numpy.array(view_roi)
        assert (result_out.shape == (view_roi[1] - view_roi[0])).all()
        
        # User gave roi according to the view output.
        # Now offset it find global roi.
        roi = view_roi + self.description.view_origin_zyx
        
        tile_blockshape = (1,) + tuple(self.description.tile_shape_2d_yx)
        tile_starts = getIntersectingBlocks( tile_blockshape, roi )

        pool = RequestPool()
        for tile_start in tile_starts:
            tile_roi_in = getBlockBounds( self.description.bounds_zyx, tile_blockshape, tile_start )
            tile_roi_in = numpy.array(tile_roi_in)

            # This tile's portion of the roi
            intersecting_roi = getIntersection( roi, tile_roi_in )
            intersecting_roi = numpy.array( intersecting_roi )

            # Compute slicing within destination array and slicing within this tile
            destination_relative_intersection = numpy.subtract(intersecting_roi, roi[0])
            tile_relative_intersection = intersecting_roi - tile_roi_in[0]
            
            # Get a view to the output slice
            result_region = result_out[roiToSlice(*destination_relative_intersection)]
            
            # Special feature: 
            # Some slices are missing, in which case we provide fake data from a different slice.
            # Overwrite the rest args to pull data from an alternate source tile.
            z_start = tile_roi_in[0][0]
            if z_start in self._slice_remapping:
                new_source_slice = self._slice_remapping[z_start]
                tile_roi_in[0][0] = new_source_slice
                tile_roi_in[1][0] = new_source_slice+1

            tile_index = numpy.array(tile_roi_in[0]) / tile_blockshape
            rest_args = { 'z_start' : tile_roi_in[0][0],
                          'z_stop'  : tile_roi_in[1][0],
                          'y_start' : tile_roi_in[0][1],
                          'y_stop'  : tile_roi_in[1][1],
                          'x_start' : tile_roi_in[0][2],
                          'x_stop'  : tile_roi_in[1][2],
                          'z_index' : tile_index[0],
                          'y_index' : tile_index[1],
                          'x_index' : tile_index[2] }

            # Apply special z_translation_function
            if self.description.z_translation_function is not None:
                z_update_func = eval(self.description.z_translation_function)
                rest_args['z_index'] = rest_args['z_start'] = z_update_func(rest_args['z_index'])
                rest_args['z_stop'] = 1 + rest_args['z_start']

            # Quick sanity check
            assert rest_args['z_index'] == rest_args['z_start']

            if self.description.tile_url_format.startswith('http'):
                retrieval_fn = partial( self._retrieve_remote_tile, rest_args, tile_relative_intersection, result_region )
            else:
                retrieval_fn = partial( self._retrieve_local_tile, rest_args, tile_relative_intersection, result_region )            

            PARALLEL_REQ = True
            if PARALLEL_REQ:
                pool.add( Request( retrieval_fn ) )
            else:
                # execute serially (leave the pool empty)
                retrieval_fn()

        if PARALLEL_REQ:
            with Timer() as timer:
                pool.wait()
            logger.info("Loading {} tiles took a total of {}".format( len(tile_starts), timer.seconds() ))
예제 #45
0
 def testNoAssertNonIntersect(self):
     roiA = [(10, 10, 10), (20, 20, 20)]
     roiB = [(15, 26, 27), (16, 30, 30)]
     intersection = getIntersection(roiA, roiB, assertIntersect=False)
     assert intersection is None, "Expected None because {} doesn't intersect with {}".format(
     )
예제 #46
0
 def testNoAssertNonIntersect(self):
     roiA = [(10,10,10), (20,20,20)]
     roiB = [(15,26,27), (16,30,30)]
     intersection = getIntersection( roiA, roiB , assertIntersect=False)
     assert intersection is None, "Expected None because {} doesn't intersect with {}".format(  )
예제 #47
0
 def testBasic(self):
     roiA = [(10, 10, 10), (20, 20, 20)]
     roiB = [(15, 16, 17), (25, 25, 25)]
     intersection = getIntersection(roiA, roiB)
     assert (numpy.array(intersection) == ([15, 16, 17], [20, 20,
                                                          20])).all()
예제 #48
0
 def testBasic(self):
     roiA = [(10,10,10), (20,20,20)]
     roiB = [(15,16,17), (25,25,25)]
     intersection = getIntersection( roiA, roiB ) 
     assert (numpy.array(intersection) == ( [15,16,17], [20,20,20] )).all()