Ejemplo n.º 1
0
    def test_basic(self):
        _ = 0
        #                  0 1 2 3 4  5 6 7 8 9
        complete_mask = [[
            [_, _, _, _, _, _, _, _, _, _],  # 0
            [_, 1, _, _, _, _, 1, 1, _, _],  # 1
            [_, 1, 1, _, _, 1, 1, 1, _, _],  # 2
            [_, _, 1, _, _, _, _, 1, _, _],  # 3
            [_, _, _, 1, _, _, _, 1, _, _],  # 4
            [_, _, _, 1, 1, _, 1, 1, _, _],  # 5
            [_, _, _, _, _, 1, 1, 1, _, _],  # 6
            [_, _, _, _, _, 1, _, _, 1, _],  # 7
            [_, _, _, _, _, 1, _, _, _, _]
        ]]  # 8

        complete_mask = np.asarray(complete_mask, dtype=bool)

        boxes = []
        boxes.append(((0, 1, 1), (1, 5, 4)))
        boxes.append(((0, 1, 5), (1, 5, 8)))
        boxes.append(((0, 5, 3), (1, 6, 5)))
        boxes.append(((0, 5, 5), (1, 9, 9)))

        masks = [complete_mask[bb_to_slicing(*box)] for box in boxes]

        combined_bounding_box, combined_mask, downsample_factor = assemble_masks(
            boxes, masks, downsample_factor=1, minimum_object_size=1)
        assert (combined_bounding_box == ((0, 1, 1), (1, 9, 9))).all()
        assert (combined_mask == complete_mask[bb_to_slicing(
            *combined_bounding_box)]).all()
        assert downsample_factor == 1
Ejemplo n.º 2
0
    def test_compressed_output(self):
        label_ids_and_masks = object_masks_for_labels(
            self.segmentation,
            box=None,
            minimum_object_size=1,
            always_keep_border_objects=False,
            compress_masks=True)

        # Result isn't necessarily sorted
        masks_dict = dict(label_ids_and_masks)
        assert set(masks_dict.keys()) == set([2, 3, 4])

        for label in (2, 3, 4):
            full_mask = (self.segmentation == label)
            bb_start = np.transpose(full_mask.nonzero()).min(axis=0)
            bb_stop = np.transpose(full_mask.nonzero()).max(axis=0) + 1

            box, compressed_mask, count = masks_dict[label]
            assert isinstance(compressed_mask, CompressedNumpyArray)
            mask = compressed_mask.deserialize()

            assert (np.asarray(box) == (bb_start, bb_stop)).all()
            assert (mask == full_mask[bb_to_slicing(bb_start, bb_stop)]).all(), \
                "Incorrect mask for label {}: \n {}".format( label, full_mask )
            assert count == full_mask[bb_to_slicing(bb_start, bb_stop)].sum()
Ejemplo n.º 3
0
    def roi_coords_for_box(self, roi_map, subvol_start_px, subvol_stop_px):
        from DVIDSparkServices.util import bb_to_slicing, RoiMap
        assert isinstance(roi_map, RoiMap)

        # Subvol bounding box in block coords
        subvol_blocks_start = subvol_start_px // self.roi_blocksize
        subvol_blocks_stop = (subvol_stop_px + self.roi_blocksize -
                              1) // self.roi_blocksize
        subvol_blocks_shape = subvol_blocks_stop - subvol_blocks_start

        # Where does this subvolume start within roi_map.block_mask?
        # Offset, since the ROI didn't necessarily start at (0,0,0)
        subvol_blocks_offset = subvol_blocks_start - roi_map.blocks_start

        # Clip the extracted region, since subvol may extend outside of ROI and therefore outside of roi_map.block_mask
        subvol_blocks_clipped_start = np.maximum(subvol_blocks_offset,
                                                 (0, 0, 0))
        subvol_blocks_clipped_stop = np.minimum(
            roi_map.blocks_shape,
            (subvol_blocks_start + subvol_blocks_shape) - roi_map.blocks_start)

        # Extract the portion of the mask for this subvol
        subvol_blocks_mask = roi_map.block_mask[bb_to_slicing(
            subvol_blocks_clipped_start, subvol_blocks_clipped_stop)]
        subvol_block_coords = np.transpose(subvol_blocks_mask.nonzero())

        # Un-offset.
        subvol_block_coords += (subvol_blocks_clipped_start +
                                roi_map.blocks_start)
        return subvol_block_coords
Ejemplo n.º 4
0
    def test_with_downsampling_and_pad(self):
        _ = 0
        #                  0 1 2 3 4  5 6 7 8 9
        complete_mask = [[
            [_, _, _, _, _, _, _, _, _, _],  # 0
            [_, 1, _, _, _, _, 1, 1, _, _],  # 1
            [_, 1, 1, _, _, 1, 1, 1, _, _],  # 2
            [_, _, 1, _, _, _, _, 1, _, _],  # 3
            [_, _, _, 1, _, _, _, 1, _, _],  # 4
            [_, _, _, 1, 1, _, 1, 1, _, _],  # 5
            [_, _, _, _, _, 1, 1, 1, _, _],  # 6
            [_, _, _, _, _, 1, _, _, 1, _],  # 7
            [_, _, _, _, _, 1, _, _, _, _]
        ]]  # 8

        complete_mask = np.asarray(complete_mask, dtype=bool)

        boxes = []
        boxes.append(((0, 1, 1), (1, 5, 4)))
        boxes.append(((0, 1, 5), (1, 5, 8)))
        boxes.append(((0, 5, 3), (1, 6, 5)))
        boxes.append(((0, 5, 5), (1, 9, 9)))

        masks = [complete_mask[bb_to_slicing(*box)] for box in boxes]

        pad = 2
        combined_bounding_box, combined_mask, downsample_factor = assemble_masks(
            boxes,
            masks,
            downsample_factor=2,
            minimum_object_size=1,
            suppress_zero=False,
            pad=pad)

        expected_downsampled_mask = [[[1, _, _, 1, _], [0, _, _, 1, _],
                                      [0, 1, 1, 1, _], [0, _, 1, _, _],
                                      [0, _, 1, _, _]]]

        expected_downsampled_mask = np.pad(expected_downsampled_mask,
                                           pad,
                                           'constant',
                                           constant_values=0)

        expected_downsampled_mask = np.asarray(expected_downsampled_mask)

        combined_box_without_pad = np.array([(0, 1, 1), (1, 9, 9)])
        padding_in_full_res_space = [(-4, -4, -4), (4, 4, 4)]
        assert (combined_bounding_box == (combined_box_without_pad +
                                          padding_in_full_res_space)).all()
        assert (combined_mask == expected_downsampled_mask).all()
        assert downsample_factor == 2

        downsampled_box = downsample_box(combined_bounding_box,
                                         np.array((2, 2, 2)))
        assert (combined_mask.shape == (downsampled_box[1] - downsampled_box[0])).all(), \
            "Output mask shape is not consistent with the combined box"
Ejemplo n.º 5
0
    def test_always_keep_border_objects(self):
        # Object 4 is too small, but it's kept anyway because it touches the border.
        label_ids_and_masks = object_masks_for_labels(
            self.segmentation,
            box=None,
            minimum_object_size=3,
            always_keep_border_objects=True,  # Keep border objects
            compress_masks=False)

        # Result isn't necessarily sorted
        masks_dict = dict(label_ids_and_masks)
        assert set(masks_dict.keys()) == set([2, 3, 4])

        for label in (2, 3, 4):
            full_mask = (self.segmentation == label)
            bb_start = np.transpose(full_mask.nonzero()).min(axis=0)
            bb_stop = np.transpose(full_mask.nonzero()).max(axis=0) + 1

            box, mask, count = masks_dict[label]

            assert (np.asarray(box) == (bb_start, bb_stop)).all()
            assert (mask == full_mask[bb_to_slicing(bb_start, bb_stop)]).all(), \
                "Incorrect mask for label {}: \n {}".format( label, full_mask )
            assert count == full_mask[bb_to_slicing(bb_start, bb_stop)].sum()
Ejemplo n.º 6
0
    def test_downsample_labels_3d_WITH_OFFSET(self):
        # Take a subset of the data, and tell the downsampling function where it came from.
        data_box = [(0, 1, 2),
                    (1, 10, 9)]
        offset_data = self.data[bb_to_slicing(*data_box)]
        downsampled, box = downsample_labels_3d_suppress_zero(offset_data, (1,3,3), data_box)
        assert (box == [(0,0,0), (1,4,3)]).all()
        
        _ = 0
        expected = [[[2,4,5],
                     [6,8,9],
                     [_,_,_],
                     [_,4,6]]]

        assert downsampled.shape == (1,4,3)
        assert (downsampled == expected).all()
Ejemplo n.º 7
0
    def test_with_downsampling(self):
        _ = 0
        #                  0 1 2 3 4  5 6 7 8 9
        complete_mask = [[
            [_, _, _, _, _, _, _, _, _, _],  # 0
            [_, 1, _, _, _, _, 1, 1, _, _],  # 1
            [_, 1, 1, _, _, 1, 1, 1, _, _],  # 2
            [_, _, 1, _, _, _, _, 1, _, _],  # 3
            [_, _, _, 1, _, _, _, 1, _, _],  # 4
            [_, _, _, 1, 1, _, 1, 1, _, _],  # 5
            [_, _, _, _, _, 1, 1, 1, _, _],  # 6
            [_, _, _, _, _, 1, _, _, 1, _],  # 7
            [_, _, _, _, _, 1, _, _, _, _]
        ]]  # 8

        complete_mask = np.asarray(complete_mask, dtype=bool)

        boxes = []
        boxes.append(((0, 1, 1), (1, 5, 4)))
        boxes.append(((0, 1, 5), (1, 5, 8)))
        boxes.append(((0, 5, 3), (1, 6, 5)))
        boxes.append(((0, 5, 5), (1, 9, 9)))

        masks = [complete_mask[bb_to_slicing(*box)] for box in boxes]

        combined_bounding_box, combined_mask, downsample_factor = assemble_masks(
            boxes,
            masks,
            downsample_factor=2,
            minimum_object_size=1,
            suppress_zero=False)

        expected_downsampled_mask = [[[1, _, _, 1, _], [0, _, _, 1, _],
                                      [0, 1, 1, 1, _], [0, _, 1, _, _],
                                      [0, _, 1, _, _]]]
        expected_downsampled_mask = np.asarray(expected_downsampled_mask)

        assert (combined_bounding_box == ((0, 1, 1), (1, 9, 9))).all()
        assert (combined_mask == expected_downsampled_mask).all()
        assert downsample_factor == 2
Ejemplo n.º 8
0
    def roi_coords_for_box(self, roi_map, subvol_start_px, subvol_stop_px):
        from DVIDSparkServices.util import bb_to_slicing, RoiMap
        assert isinstance(roi_map, RoiMap)

        # Subvol bounding box in block coords
        subvol_blocks_start = subvol_start_px // self.roi_blocksize
        subvol_blocks_stop = (subvol_stop_px + self.roi_blocksize-1) // self.roi_blocksize
        subvol_blocks_shape = subvol_blocks_stop - subvol_blocks_start

        # Where does this subvolume start within roi_map.block_mask?
        # Offset, since the ROI didn't necessarily start at (0,0,0)
        subvol_blocks_offset = subvol_blocks_start - roi_map.blocks_start
        
        # Clip the extracted region, since subvol may extend outside of ROI and therefore outside of roi_map.block_mask
        subvol_blocks_clipped_start = np.maximum(subvol_blocks_offset, (0,0,0))
        subvol_blocks_clipped_stop = np.minimum(roi_map.blocks_shape, (subvol_blocks_start + subvol_blocks_shape) - roi_map.blocks_start)
        
        # Extract the portion of the mask for this subvol
        subvol_blocks_mask = roi_map.block_mask[bb_to_slicing(subvol_blocks_clipped_start, subvol_blocks_clipped_stop)]
        subvol_block_coords = np.transpose( subvol_blocks_mask.nonzero() )
        
        # Un-offset.
        subvol_block_coords += (subvol_blocks_clipped_start + roi_map.blocks_start)
        return subvol_block_coords
Ejemplo n.º 9
0
from DVIDSparkServices.util import bb_to_slicing

dirpath = sys.argv[1]

configs = glob.glob(dirpath + "/temp_data/config.*")
assert len(configs) == 1, "Why does the temp_dir have more than one config.* file?"

with open(configs[0], 'r') as f:
    config = yaml.load(f)


n5_file = z5py.File(dirpath + '/../resources/volume-256.n5')

dset_name = config['input']['n5']['dataset-name']
input_bb_xyz = config['input']['geometry']['bounding-box']
input_bb_zyx = np.array(input_bb_xyz)[:,::-1]
input_shape = input_bb_zyx[1] - input_bb_zyx[0]

input_volume = n5_file[dset_name][bb_to_slicing(*input_bb_zyx.tolist())]
assert (input_volume.shape == input_shape).all(), "Error reading reference N5 volume -- bad shape??"

os.chdir(f'{dirpath}/temp_data')

import vigra
for z in range(input_bb_zyx[0,0], input_bb_zyx[1,0]):
    z_slice = vigra.impex.readImage(config['output']['slice-files']['slice-path-format'].format(z))
    assert (z_slice.withAxes('yx') == input_volume[z-input_bb_zyx[0]]).all()

print("DEBUG: ExportSlices test passed.")
sys.exit(0)
Ejemplo n.º 10
0
def assemble_masks(boxes,
                   masks,
                   downsample_factor=0,
                   minimum_object_size=1,
                   max_combined_mask_size=1e9,
                   suppress_zero=True,
                   pad=0):
    """
    Given a list of bounding boxes and corresponding binary mask arrays,
    assemble the superset of those masks in a larger array.
    To save RAM, the entire result can be optionally downsampled.
    
    boxes:
        List of bounding box tuples [(z0,y0,x0), (z1,y1,x1), ...]
    
    masks:
        Iterable of binary mask arrays.
    
    downsample_factor:
        How much to downsample the result:
            1 - no downsampling (if possible, considering max_combined_mask_size)
            2+ - Downsample the result by at least 2x,3x, etc.

    minimum_object_size:
        If the final result is smaller than this number (as measured in NON-downsampled pixels),
        return 'None' instead of an actual mask.
    
    max_combined_mask_size:
        The maximum allowed size for the combined downsampled array.
        If the given downsample_factor would result in an array that exceeds max_combined_mask_size,
        then a new downsample_factor is automatically chosen.
    
    suppress_zero:
        ("Maximal value downsampling") In the downsampled mask result, output voxels
        will be 1 if *any* of their input voxels were 1 (even of they were outnumbered by 0s).
    
    pad:
        If non-zero, leave some padding (a halo of blank voxels) on all sides of the final volume.
        (This is useful for mesh-generation algorithms, which require a boundary between on/off pixels.)
        
        Note: The padding is applied AFTER downsampling, so the returned combined_bounding_box and combined_mask
              will be expanded by pad*downsample_factor before and after each axis.
    
    Returns: (combined_bounding_box, combined_mask, downsample_factor)

        where:
            combined_bounding_box:
                the bounding box of the returned mask,
                in NON-downsampled coordinates: ((z0,y0,x0), (z1,y1,x1)).
                Note: If you specified a 'pad', then this will be
                      reflected in the combined_bounding_box.
            
            combined_mask:
                the full downsampled combined mask, including any padding.
            
            downsample_factor:
                The chosen downsampling factor if using 'auto' downsampling,
                otherwise equal to the downsample_factor you passed in.
    """
    boxes = np.asarray(boxes)

    combined_box = np.zeros((2, 3), dtype=np.int64)
    combined_box[0] = boxes[:, 0, :].min(axis=0)
    combined_box[1] = boxes[:, 1, :].max(axis=0)

    # Auto-choose a downsample factor that will result in a
    # combined downsampled array no larger than max_combined_mask_size
    full_size = np.prod(combined_box[1] - combined_box[0])
    auto_downsample_factor = 1 + int(
        np.power(full_size / max_combined_mask_size, (1. / 3)))
    chosen_downsample_factor = max(downsample_factor, auto_downsample_factor)

    # Leave room for padding.
    combined_box[:] += chosen_downsample_factor * np.array(
        (-pad, pad))[:, None]

    block_shape = np.array((chosen_downsample_factor, ) * 3)
    combined_downsampled_box = downsample_box(combined_box, block_shape)
    combined_downsampled_box_shape = combined_downsampled_box[
        1] - combined_downsampled_box[0]

    combined_mask_downsampled = np.zeros(combined_downsampled_box_shape,
                                         dtype=np.bool)

    if suppress_zero:
        downsample_func = downsample_binary_3d_suppress_zero
    else:
        downsample_func = downsample_binary_3d

    for box_global, mask in zip(boxes, masks):
        box_global = np.asarray(box_global)
        mask_downsampled, downsampled_box = downsample_func(
            mask, chosen_downsample_factor, box_global)
        downsampled_box[:] -= combined_downsampled_box[0]
        combined_mask_downsampled[bb_to_slicing(
            *downsampled_box)] |= mask_downsampled

    if combined_mask_downsampled.sum(
    ) * chosen_downsample_factor**3 < minimum_object_size:
        # 'None' results will be filtered out. See below.
        combined_mask_downsampled = None

    return (combined_box, combined_mask_downsampled, chosen_downsample_factor)
Ejemplo n.º 11
0
def object_masks_for_labels(segmentation,
                            box=None,
                            minimum_object_size=1,
                            always_keep_border_objects=True,
                            compress_masks=False):
    """
    Given a segmentation containing N unique label values (excluding label 0),
    return N binary masks and their bounding boxes.

    Note: Result is *not* sorted by label ID.
    
    segmentation:
        label image, any dtype
    
    box:
        Bounding box of the segmentation in global coordiantes.
        If the segmentation was extracted from a larger (global) coordinate space,
        this parameter can be used to ensure that the returned mask bounding boxes use global coordinates.
    
    minimum_object_size:
        Extracted objects with fewer pixels than the minimum size are discarded.
    
    always_keep_border_objects:
        Ignore the `minimum_object_size` constraint for objects that touch edge of the segmentation volume.
        (Useful if you plan to merge the object masks with neighboring segmentation blocks.)

    compress_masks:
        Return masks as a CompressedNumpyArray instead of an ordinary np.ndarray
    
    Returns:
        List of tuples: [(label_id, (mask_bounding_box, mask, count)),
                         (label_id, (mask_bounding_box, mask, count)), ...]
        
        ...where: `mask_bounding_box` is of the form ((z0, y0, x0), (z1, y1, x1)),
                  `mask` is either a np.ndarray or CompressedNumpyArray, depending on the compress_masks argument, and
                   `count` is the count of nonzero pixels in the mask        

        Note: Result is *not* sorted by label ID.
    """
    if box is None:
        box = [(0, ) * segmentation.ndim, segmentation.shape]
    sv_start, sv_stop = box

    segmentation = vigra.taggedView(segmentation, 'zyx')
    consecutive_seg = np.empty_like(segmentation, dtype=np.uint32)
    _, maxlabel, bodies_to_consecutive = vigra.analysis.relabelConsecutive(
        segmentation, out=consecutive_seg)  # preserves zeros by default
    consecutive_to_bodies = {v: k for k, v in bodies_to_consecutive.items()}
    del segmentation

    # We don't care what the 'image' parameter is, but we have to give something
    image = consecutive_seg.view(np.float32)
    acc = vigra.analysis.extractRegionFeatures(
        image,
        consecutive_seg,
        features=['Coord<Minimum >', 'Coord<Maximum >', 'Count'])

    body_ids_and_masks = []
    for label in range(1, maxlabel + 1):  # Skip 0
        count = acc['Count'][label]
        min_coord = acc['Coord<Minimum >'][label].astype(int)
        max_coord = acc['Coord<Maximum >'][label].astype(int)
        box_local = np.array((min_coord, 1 + max_coord))

        mask = (consecutive_seg[bb_to_slicing(*box_local)] == label)
        if compress_masks:
            assert mask.dtype == np.bool  # CompressedNumpyArray has special support for boolean masks.
            mask = CompressedNumpyArray(mask)

        body_id = consecutive_to_bodies[label]
        box_global = box_local + sv_start

        # Only keep segments that are big enough OR touch the subvolume border.
        if count >= minimum_object_size \
        or (always_keep_border_objects and (   (box_global[0] == sv_start).any()
                                            or (box_global[1] == sv_stop).any())):
            body_ids_and_masks.append(
                (body_id, (bb_as_tuple(box_global), mask, count)))

    return body_ids_and_masks
Ejemplo n.º 12
0
def assemble_masks( boxes, masks, downsample_factor=0, minimum_object_size=1, max_combined_mask_size=1e9, suppress_zero=True, pad=0 ):
    """
    Given a list of bounding boxes and corresponding binary mask arrays,
    assemble the superset of those masks in a larger array.
    To save RAM, the entire result can be optionally downsampled.
    
    boxes:
        List of bounding box tuples [(z0,y0,x0), (z1,y1,x1), ...]
    
    masks:
        Iterable of binary mask arrays.
    
    downsample_factor:
        How much to downsample the result:
            1 - no downsampling (if possible, considering max_combined_mask_size)
            2+ - Downsample the result by at least 2x,3x, etc.

    minimum_object_size:
        If the final result is smaller than this number (as measured in NON-downsampled pixels),
        return 'None' instead of an actual mask.
    
    max_combined_mask_size:
        The maximum allowed size for the combined downsampled array.
        If the given downsample_factor would result in an array that exceeds max_combined_mask_size,
        then a new downsample_factor is automatically chosen.
    
    suppress_zero:
        ("Maximal value downsampling") In the downsampled mask result, output voxels
        will be 1 if *any* of their input voxels were 1 (even of they were outnumbered by 0s).
    
    pad:
        If non-zero, leave some padding (a halo of blank voxels) on all sides of the final volume.
        (This is useful for mesh-generation algorithms, which require a boundary between on/off pixels.)
        
        Note: The padding is applied AFTER downsampling, so the returned combined_bounding_box and combined_mask
              will be expanded by pad*downsample_factor before and after each axis.
    
    Returns: (combined_bounding_box, combined_mask, downsample_factor)

        where:
            combined_bounding_box:
                the bounding box of the returned mask,
                in NON-downsampled coordinates: ((z0,y0,x0), (z1,y1,x1)).
                Note: If you specified a 'pad', then this will be
                      reflected in the combined_bounding_box.
            
            combined_mask:
                the full downsampled combined mask, including any padding.
            
            downsample_factor:
                The chosen downsampling factor if using 'auto' downsampling,
                otherwise equal to the downsample_factor you passed in.
    """
    boxes = np.asarray(boxes)
    
    combined_box = np.zeros((2,3), dtype=np.int64)
    combined_box[0] = boxes[:, 0, :].min(axis=0)
    combined_box[1] = boxes[:, 1, :].max(axis=0)
    
    # Auto-choose a downsample factor that will result in a
    # combined downsampled array no larger than max_combined_mask_size
    full_size = np.prod(combined_box[1] - combined_box[0])
    auto_downsample_factor = 1 + int(np.power(full_size / max_combined_mask_size, (1./3)))
    chosen_downsample_factor = max(downsample_factor, auto_downsample_factor)

    # Leave room for padding.
    combined_box[:] += chosen_downsample_factor * np.array((-pad, pad))[:,None]

    block_shape = np.array((chosen_downsample_factor,)*3)
    combined_downsampled_box = downsample_box( combined_box, block_shape )
    combined_downsampled_box_shape = combined_downsampled_box[1] - combined_downsampled_box[0]

    combined_mask_downsampled = np.zeros( combined_downsampled_box_shape, dtype=np.bool )

    if suppress_zero:
        downsample_func = downsample_binary_3d_suppress_zero
    else:
        downsample_func = downsample_binary_3d

    for box_global, mask in zip(boxes, masks):
        box_global = np.asarray(box_global)
        mask_downsampled, downsampled_box = downsample_func(mask, chosen_downsample_factor, box_global)
        downsampled_box[:] -= combined_downsampled_box[0]
        combined_mask_downsampled[ bb_to_slicing(*downsampled_box) ] |= mask_downsampled

    if combined_mask_downsampled.sum() * chosen_downsample_factor**3 < minimum_object_size:
        # 'None' results will be filtered out. See below.
        combined_mask_downsampled = None

    return ( combined_box, combined_mask_downsampled, chosen_downsample_factor )
Ejemplo n.º 13
0
def object_masks_for_labels( segmentation, box=None, minimum_object_size=1, always_keep_border_objects=True, compress_masks=False ):
    """
    Given a segmentation containing N unique label values (excluding label 0),
    return N binary masks and their bounding boxes.

    Note: Result is *not* sorted by label ID.
    
    segmentation:
        label image, any dtype
    
    box:
        Bounding box of the segmentation in global coordiantes.
        If the segmentation was extracted from a larger (global) coordinate space,
        this parameter can be used to ensure that the returned mask bounding boxes use global coordinates.
    
    minimum_object_size:
        Extracted objects with fewer pixels than the minimum size are discarded.
    
    always_keep_border_objects:
        Ignore the `minimum_object_size` constraint for objects that touch edge of the segmentation volume.
        (Useful if you plan to merge the object masks with neighboring segmentation blocks.)

    compress_masks:
        Return masks as a CompressedNumpyArray instead of an ordinary np.ndarray
    
    Returns:
        List of tuples: [(label_id, (mask_bounding_box, mask, count)),
                         (label_id, (mask_bounding_box, mask, count)), ...]
        
        ...where: `mask_bounding_box` is of the form ((z0, y0, x0), (z1, y1, x1)),
                  `mask` is either a np.ndarray or CompressedNumpyArray, depending on the compress_masks argument, and
                   `count` is the count of nonzero pixels in the mask        

        Note: Result is *not* sorted by label ID.
    """
    if box is None:
        box = [ (0,)*segmentation.ndim, segmentation.shape ]
    sv_start, sv_stop = box

    segmentation = vigra.taggedView(segmentation, 'zyx')
    consecutive_seg = np.empty_like(segmentation, dtype=np.uint32)
    _, maxlabel, bodies_to_consecutive = vigra.analysis.relabelConsecutive(segmentation, out=consecutive_seg) # preserves zeros by default
    consecutive_to_bodies = { v:k for k,v in bodies_to_consecutive.items() }
    del segmentation
    
    # We don't care what the 'image' parameter is, but we have to give something
    image = consecutive_seg.view(np.float32)
    acc = vigra.analysis.extractRegionFeatures(image, consecutive_seg, features=['Coord<Minimum >', 'Coord<Maximum >', 'Count'])

    body_ids_and_masks = []
    for label in range(1, maxlabel+1): # Skip 0
        count = acc['Count'][label]
        min_coord = acc['Coord<Minimum >'][label].astype(int)
        max_coord = acc['Coord<Maximum >'][label].astype(int)
        box_local = np.array((min_coord, 1+max_coord))
        
        mask = (consecutive_seg[bb_to_slicing(*box_local)] == label)
        if compress_masks:
            assert mask.dtype == np.bool # CompressedNumpyArray has special support for boolean masks.
            mask = CompressedNumpyArray(mask)

        body_id = consecutive_to_bodies[label]
        box_global = box_local + sv_start

        # Only keep segments that are big enough OR touch the subvolume border.
        if count >= minimum_object_size \
        or (always_keep_border_objects and (   (box_global[0] == sv_start).any()
                                            or (box_global[1] == sv_stop).any())):
            body_ids_and_masks.append( (body_id, (bb_as_tuple(box_global), mask, count)) )
    
    return body_ids_and_masks