def test_copysegmentation_from_hdf5_to_dvid_input_mask(
        setup_hdf5_segmentation_input, disable_auto_retry):
    template_dir, config, volume, dvid_address, repo_uuid, _output_segmentation_name = setup_hdf5_segmentation_input

    # make sure we get a fresh output
    output_segmentation_name = 'copyseg-with-input-mask'
    config["output"]["dvid"]["segmentation-name"] = output_segmentation_name

    # Select only even IDs
    all_labels = pd.unique(volume.reshape(-1))
    even_labels = all_labels[all_labels % 2 == 0]
    config["copysegmentation"]["input-mask-labels"] = even_labels.tolist()

    # Add an offset, which is added to both the input volume AND the mask labels
    offset = 2000
    config["copysegmentation"]["add-offset-to-ids"] = offset

    input_box = np.array(config["input"]["geometry"]["bounding-box"])[:, ::-1]
    volume = np.where((volume % 2) == 0, volume + offset, 0)
    expected_vol = np.zeros_like(volume)
    overwrite_subvol(expected_vol, input_box,
                     extract_subvol(volume, input_box))

    setup = template_dir, config, expected_vol, dvid_address, repo_uuid, output_segmentation_name
    _box_zyx, _expected_vol, _output_vol = _run_to_dvid(setup)
def test_copysegmentation_from_dvid_to_dvid_input_mask(
        setup_dvid_segmentation_input, disable_auto_retry):
    template_dir, config, volume, dvid_address, repo_uuid, _output_segmentation_name = setup_dvid_segmentation_input

    # make sure we get a fresh output
    output_segmentation_name = 'copyseg-with-input-mask-from-dvid'
    config["output"]["dvid"]["segmentation-name"] = output_segmentation_name

    # Add an offset, which is added to both the input volume AND the mask labels
    offset = 2000
    config["copysegmentation"]["add-offset-to-ids"] = offset

    # Select some labels that don't extend throughout the whole volume
    selected_labels = pd.unique(volume[150, 64:128, 64:128].reshape(-1))
    assert 0 not in selected_labels
    selected_coords = np.array(
        mask_for_labels(volume, selected_labels).nonzero()).transpose()
    selected_box = np.array(
        [selected_coords.min(axis=0), 1 + selected_coords.max(axis=0)])

    input_box = np.array(config["input"]["geometry"]["bounding-box"])[:, ::-1]

    subvol_box = box_intersection(input_box, selected_box)
    selected_subvol = extract_subvol(volume, subvol_box).copy()
    selected_subvol = apply_mask_for_labels(selected_subvol, selected_labels)
    config["copysegmentation"]["input-mask-labels"] = selected_labels.tolist()

    selected_subvol = np.where(selected_subvol, selected_subvol + offset, 0)
    expected_vol = np.zeros(volume.shape, np.uint64)
    overwrite_subvol(expected_vol, subvol_box, selected_subvol)

    setup = template_dir, config, expected_vol, dvid_address, repo_uuid, output_segmentation_name
    _box_zyx, _expected_vol, _output_vol = _run_to_dvid(setup)
def test_dvid_volume_service_grayscale(setup_dvid_repo, disable_auto_retry):
    server, uuid = setup_dvid_repo
    instance_name = 'test-dvs-grayscale'

    volume = np.random.randint(100, size=(256, 192, 128), dtype=np.uint8)
    max_scale = 2
    voxel_dimensions = [4.0, 4.0, 32.0]

    config_text = textwrap.dedent(f"""\
        dvid:
          server: {server}
          uuid: {uuid}
          grayscale-name: {instance_name}
          
          create-if-necessary: true
          creation-settings:
            max-scale: {max_scale}
            voxel-size: {voxel_dimensions}
       
        geometry:
          bounding-box: [[0,0,0], {list(volume.shape[::-1])}]
    """)

    yaml = YAML()
    with StringIO(config_text) as f:
        volume_config = yaml.load(f)

    assert instance_name not in fetch_repo_instances(server, uuid)

    service = VolumeService.create_from_config(volume_config)

    repo_instances = fetch_repo_instances(server, uuid)

    info = fetch_instance_info(server, uuid, instance_name)
    assert info["Extended"]["VoxelSize"] == voxel_dimensions

    scaled_volumes = {}
    for scale in range(max_scale + 1):
        if scale == 0:
            assert instance_name in repo_instances
            assert repo_instances[instance_name] == 'uint8blk'
        else:
            assert f"{instance_name}_{scale}" in repo_instances
            assert repo_instances[f"{instance_name}_{scale}"] == 'uint8blk'

        vol = downsample(volume, 2**scale,
                         'label')  # label downsampling is easier to test with
        aligned_shape = (np.ceil(np.array(vol.shape) / 64) * 64).astype(int)
        aligned_vol = np.zeros(aligned_shape, np.uint8)
        overwrite_subvol(aligned_vol, [(0, 0, 0), aligned_shape], aligned_vol)
        service.write_subvolume(aligned_vol, (0, 0, 0), scale)
        scaled_volumes[scale] = aligned_vol

    box = np.array([[40, 80, 40], [240, 160, 100]])
    for scale in range(max_scale + 1):
        scaled_box = box // 2**scale
        vol = service.get_subvolume(scaled_box, scale)
        assert (vol == extract_subvol(scaled_volumes[scale], scaled_box)).all()
Exemplo n.º 4
0
def assemble_brick_fragments( fragments ):
    """
    Given a list of Bricks with identical logical_boxes, splice their volumes
    together into a final Brick that contains a full volume containing all of
    the fragments.
    
    Note: Brick 'fragments' are also just Bricks, whose physical_box does
          not cover the entire logical_box for the brick.
    
    Each fragment's physical_box indicates where that fragment's data
    should be located within the final returned Brick.
    
    Returns: A Brick containing the data from all fragments,
            UNLESS the fully assembled fragments would not intersect
            with the Brick's own logical_box (i.e. all fragments fall
            within the halo), in which case None is returned.
    
    Note: If the fragment physical_boxes are not disjoint, the results
          are undefined.
    """
    fragments = list(fragments)

    # All logical boxes must be the same
    logical_boxes = np.asarray([frag.logical_box for frag in fragments])
    assert (logical_boxes == logical_boxes[0]).all(), \
        "Cannot assemble brick fragments from different logical boxes. "\
        "They belong to different bricks!"
    final_logical_box = logical_boxes[0]

    # The final physical box is the min/max of all fragment physical extents.
    physical_boxes = np.array([frag.physical_box for frag in fragments])
    assert physical_boxes.ndim == 3 # (N, 2, Dim)
    assert physical_boxes.shape == ( len(fragments), 2, final_logical_box.shape[1] )
    
    final_physical_box = np.asarray( ( np.min( physical_boxes[:,0,:], axis=0 ),
                                       np.max( physical_boxes[:,1,:], axis=0 ) ) )

    interior_box = box_intersection(final_physical_box, final_logical_box)
    if (interior_box[1] - interior_box[0] < 1).any():
        # All fragments lie completely within the halo
        return None

    final_volume_shape = final_physical_box[1] - final_physical_box[0]
    dtype = fragments[0].volume.dtype

    final_volume = np.zeros(final_volume_shape, dtype)

    for frag in fragments:
        internal_box = frag.physical_box - final_physical_box[0]
        overwrite_subvol(final_volume, internal_box, frag.volume)

        # Destroy original to save RAM
        frag.destroy()

    brick = Brick( final_logical_box, final_physical_box, final_volume )
    brick.compress()
    return brick
Exemplo n.º 5
0
 def zero_fill(vol, box, full_box):
     """
     Given a volume, it's corresponding box, and a 'full box' that encompasses it,
     Return a volume that fills the full box, padding with zeros if necessary.
     """
     if (box == full_box).all():
         return vol
     else:
         full_vol = np.zeros(full_box[1] - full_box[0], vol.dtype)
         overwrite_subvol(full_vol, box - full_box[0], vol)
         return full_vol
Exemplo n.º 6
0
def download(bounding_box_zyx, output_path):
    shape = bounding_box_zyx[1] - bounding_box_zyx[0]

    with h5py.File(output_path, 'w') as f:
        gray_dset = f.create_dataset('grayscale',
                                     shape=shape,
                                     dtype=np.uint8,
                                     chunks=True)
        seg_dset = f.create_dataset('segmentation',
                                    shape=shape,
                                    dtype=np.uint64,
                                    chunks=True,
                                    compression='gzip')

        print("Downloading grayscale...")
        block_shape = (256, 256, 256)
        block_boxes = boxes_from_grid(bounding_box_zyx,
                                      block_shape,
                                      clipped=True)
        for block_box in tqdm(block_boxes):
            relative_box = block_box - bounding_box_zyx[0]
            block_gray = fetch_raw(*GRAYSCALE, block_box)
            overwrite_subvol(gray_dset, relative_box, block_gray)

        print("")
        print("Downloading segmentation...")
        block_boxes = boxes_from_grid(bounding_box_zyx,
                                      block_shape,
                                      clipped=True)
        for block_box in tqdm(block_boxes):
            relative_box = block_box - bounding_box_zyx[0]
            block_seg = fetch_labelmap_voxels(*SEGMENTATION, block_box)
            overwrite_subvol(seg_dset, relative_box, block_seg)

    print("")
    print("DONE")
Exemplo n.º 7
0
def setup_dvid_segmentation_input(setup_dvid_repo, random_segmentation):
    dvid_address, repo_uuid = setup_dvid_repo

    input_segmentation_name = 'labelmapcopy-segmentation-input'
    output_segmentation_name = 'labelmapcopy-segmentation-output'

    partial_output_segmentation_name = 'labelmapcopy-segmentation-partial-output'

    max_scale = 3
    already_exists = False

    try:
        create_labelmap_instance(dvid_address,
                                 repo_uuid,
                                 input_segmentation_name,
                                 max_scale=max_scale)
        create_labelmap_instance(dvid_address,
                                 repo_uuid,
                                 partial_output_segmentation_name,
                                 max_scale=max_scale)
    except HTTPError as ex:
        if ex.response is not None and 'already exists' in ex.response.content.decode(
                'utf-8'):
            already_exists = True

    expected_vols = {}
    for scale in range(1 + max_scale):
        if scale == 0:
            scaled_vol = random_segmentation
        else:
            scaled_vol = downsample(scaled_vol, 2, 'labels-numba')
        expected_vols[scale] = scaled_vol

        if not already_exists:
            scaled_box = round_box([(0, 0, 0), scaled_vol.shape], 64, 'out')
            aligned_vol = np.zeros(scaled_box[1], np.uint64)
            overwrite_subvol(aligned_vol, [(0, 0, 0), scaled_vol.shape],
                             scaled_vol)
            post_labelmap_voxels(dvid_address,
                                 repo_uuid,
                                 input_segmentation_name, (0, 0, 0),
                                 aligned_vol,
                                 scale=scale)

    if not already_exists:
        # Create a 'partial' output volume that is the same (bitwise) as the input except for some blocks.
        scaled_box = np.array([(0, 0, 0), random_segmentation.shape])
        scaled_box[1, -1] = 192
        for scale in range(1 + max_scale):
            scaled_box = round_box(scaled_box // (2**scale), 64, 'out')
            raw_blocks = fetch_labelmap_voxels(dvid_address,
                                               repo_uuid,
                                               input_segmentation_name,
                                               scaled_box,
                                               scale,
                                               supervoxels=True,
                                               format='raw-response')
            post_labelmap_blocks(dvid_address,
                                 repo_uuid,
                                 partial_output_segmentation_name, [(0, 0, 0)],
                                 raw_blocks,
                                 scale,
                                 is_raw=True)

        block = np.random.randint(1_000_000,
                                  1_000_010,
                                  size=(64, 64, 64),
                                  dtype=np.uint64)
        post_labelmap_voxels(dvid_address,
                             repo_uuid,
                             partial_output_segmentation_name, (0, 128, 64),
                             block,
                             0,
                             downres=True)

    partial_vol = fetch_labelmap_voxels(dvid_address,
                                        repo_uuid,
                                        partial_output_segmentation_name,
                                        [(0, 0, 0), random_segmentation.shape],
                                        supervoxels=True)

    template_dir = tempfile.mkdtemp(suffix="labelmapcopy-template")

    config_text = textwrap.dedent(f"""\
        workflow-name: labelmapcopy
        cluster-type: {CLUSTER_TYPE}
         
        input:
          dvid:
            server: {dvid_address}
            uuid: {repo_uuid}
            segmentation-name: {input_segmentation_name}
            supervoxels: true
           
          geometry:
            message-block-shape: [512,64,64]
            available-scales: [0,1,2,3]
 
        output:
          dvid:
            server: {dvid_address}
            uuid: {repo_uuid}
            segmentation-name: {output_segmentation_name}
            supervoxels: true
            disable-indexing: true
            create-if-necessary: true
        
        labelmapcopy:
          slab-shape: [512,128,64]
          dont-overwrite-identical-blocks: true
    """)

    with open(f"{template_dir}/workflow.yaml", 'w') as f:
        f.write(config_text)

    yaml = YAML()
    with StringIO(config_text) as f:
        config = yaml.load(f)

    return template_dir, config, expected_vols, partial_vol, dvid_address, repo_uuid, output_segmentation_name, partial_output_segmentation_name
Exemplo n.º 8
0
def pad_brick_data_from_volume_source( padding_grid, volume_accessor_func, brick ):
    """
    Expand the given Brick's data until its physical_box is aligned with the given padding_grid.
    The data in the expanded region will be sourced from the given volume_accessor_func.
    
    Note: padding_grid need not be identical to the grid the Brick was created with,
          but it must divide evenly into that grid. 
    
    For instance, if padding_grid happens to be the same as the brick's own native grid,
    then the phyiscal_box is expanded to align perfectly with the logical_box on all sides: 
    
        +-------------+      +-------------+
        | physical |  |      |     same    |
        |__________|  |      |   physical  |
        |             |  --> |     and     |
        |   logical   |      |   logical   |
        |_____________|      |_____________|
    
    Args:
        brick: Brick
        padding_grid: Grid
        volume_accessor_func: Callable with signature: f(box) -> ndarray

    Returns: Brick
    
    Note: It is not legal to call this function unless the Brick's physical_box
          lies completely within the logical_box (i.e. no halos allowed).
          Furthremore, the padding_grid is not permitted to use a halo, either.
          (These restrictions could be fixed, but the current version of this
          function has these requirements.)

    Note: If no padding is necessary, then the original Brick is returned (no copy is made).
    """
    assert isinstance(padding_grid, Grid)
    assert not padding_grid.halo_shape.any()
    block_shape = padding_grid.block_shape
    assert ((brick.logical_box - padding_grid.offset) % block_shape == 0).all(), \
        f"Padding grid {padding_grid.offset} must be aligned with brick logical_box: {brick.logical_box}"
    
    # Subtract offset to calculate the needed padding
    offset_physical_box = brick.physical_box - padding_grid.offset

    if (offset_physical_box % block_shape == 0).all():
        # Internal data is already aligned to the padding_grid.
        return brick
    
    offset_padded_box = np.array([offset_physical_box[0] // block_shape * block_shape,
                                  (offset_physical_box[1] + block_shape - 1) // block_shape * block_shape])
    
    # Re-add offset
    padded_box = offset_padded_box + padding_grid.offset
    assert (padded_box[0] >= brick.logical_box[0]).all()
    assert (padded_box[1] <= brick.logical_box[1]).all()

    # Initialize a new volume of the fully-padded shape
    padded_volume_shape = padded_box[1] - padded_box[0]
    padded_volume = np.zeros(padded_volume_shape, dtype=brick.volume.dtype)

    # Overwrite the previously existing data in the new padded volume
    orig_box = brick.physical_box
    orig_box_within_padded = orig_box - padded_box[0]
    overwrite_subvol(padded_volume, orig_box_within_padded, brick.volume)
    
    # Check for a non-zero-volume halo on all six sides.
    halo_boxes = []
    for axis in range(padded_volume.ndim):
        if orig_box[0,axis] != padded_box[0,axis]:
            leading_halo_box = padded_box.copy()
            leading_halo_box[1, axis] = orig_box[0,axis]
            halo_boxes.append(leading_halo_box)

        if orig_box[1,axis] != padded_box[1,axis]:
            trailing_halo_box = padded_box.copy()
            trailing_halo_box[0, axis] = orig_box[1,axis]
            halo_boxes.append(trailing_halo_box)

    assert halo_boxes, \
        "How could halo_boxes be empty if there was padding needed?"

    for halo_box in halo_boxes:
        # Retrieve padding data for one halo side
        halo_volume = volume_accessor_func(halo_box)
        
        # Overwrite in the final padded volume
        halo_box_within_padded = halo_box - padded_box[0]
        overwrite_subvol(padded_volume, halo_box_within_padded, halo_volume)

    return Brick( brick.logical_box, padded_box, padded_volume )
Exemplo n.º 9
0
    def _init_mask(self):
        """
        - read the mask ROI as a volume
        - dilate/erode it if necessary
        - invert it if necessary
        - save to .h5 (just for offline debug)
        - return the scale-5 mask and its scale-5 bounding-box
        """
        options = self.config["masksegmentation"]
        roi = options["mask-roi"]
        invert_mask = options["invert-mask"]
        max_scale = options["max-pyramid-scale"]
        roi_dilation = options["dilate-roi"]
        roi_erosion = options["erode-roi"]
        seg_dilation = options["dilate-segmentation"]

        block_width = self.output_service.block_width

        # Select a mask_box that's large enough to divide evenly into the
        # block width even when reduced to the highest scale we'll be processing.
        seg_box = round_box(self.input_service.bounding_box_zyx,
                            block_width * 2**max_scale)
        seg_box_s5 = round_box(seg_box, 2**5) // (2**5)

        with Timer(f"Loading ROI '{roi}'", logger):
            roi_mask, _ = fetch_roi(self.input_service.server,
                                    self.input_service.uuid,
                                    roi,
                                    format='mask',
                                    mask_box=seg_box_s5)

        with h5py.File('roi-mask.h5', 'w') as f:
            f.create_dataset('mask',
                             data=roi_mask.view(np.uint8),
                             chunks=(128, 128, 128))

        assert not (roi_dilation and roi_erosion)

        if roi_dilation > 0:
            with Timer(f"Dilating ROI by {roi_dilation}", logger):
                roi_mask = vigra.filters.multiBinaryDilation(
                    roi_mask, roi_dilation)
            with h5py.File('dilated-roi-mask.h5', 'w') as f:
                f.create_dataset('mask',
                                 data=roi_mask.view(np.uint8),
                                 chunks=(128, 128, 128))

        if roi_erosion > 0:
            with Timer(f"Eroding ROI by {roi_erosion}", logger):
                roi_mask = vigra.filters.multiBinaryErosion(
                    roi_mask, roi_erosion)
            with h5py.File('eroded-roi-mask.h5', 'w') as f:
                f.create_dataset('mask',
                                 data=roi_mask.view(np.uint8),
                                 chunks=(128, 128, 128))

        assert not seg_dilation or invert_mask, \
            "Can't use 'dilate-segmentation'. The segmentation isn't downloaded unless 'invert-mask' is used."

        if invert_mask:
            with Timer("Inverting mask", logger):
                # Initialize the mask with entire segmentation at scale 5,
                # then subtract the roi from it.
                boxes = [
                    *boxes_from_grid(seg_box_s5, (64, 64, 2048), clipped=True)
                ]

                input_service = self.input_service

                def fetch_seg_mask_s5(box_s5):
                    seg_s5 = input_service.get_subvolume(box_s5, scale=5)
                    return box_s5, (seg_s5 != 0)

                boxes_and_mask = dask.bag.from_sequence(
                    boxes, 1).map(fetch_seg_mask_s5).compute()

                seg_mask = np.zeros(box_shape(seg_box_s5), bool)
                for box_s5, box_mask in boxes_and_mask:
                    overwrite_subvol(seg_mask, box_s5, box_mask)

                if seg_dilation == 0:
                    with h5py.File('segmentation-mask.h5', 'w') as f:
                        f.create_dataset('mask',
                                         data=seg_mask.view(np.uint8),
                                         chunks=(128, 128, 128))
                else:
                    with Timer(f"Dilating segmentation by {seg_dilation}",
                               logger):
                        seg_mask = vigra.filters.multiBinaryDilation(
                            seg_mask, seg_dilation)

                    with h5py.File('dilated-segmentation-mask.h5', 'w') as f:
                        f.create_dataset('mask',
                                         data=seg_mask.view(np.uint8),
                                         chunks=(128, 128, 128))

                seg_mask[roi_mask] = False
                roi_mask = seg_mask

        with h5py.File('final-mask.h5', 'w') as f:
            f.create_dataset('mask',
                             data=roi_mask.view(np.uint8),
                             chunks=(128, 128, 128))

        # Downsample the roi_mask to dvid-block resolution, just to see how many blocks it touches.
        block_mask = view_as_blocks(roi_mask, (2, 2, 2)).any(axis=(3, 4, 5))
        blocks_touched = block_mask.sum()
        voxel_total = blocks_touched * (block_width**3)
        logger.info(
            f"Mask touches {blocks_touched} blocks ({voxel_total / 1e9:.1f} Gigavoxels)"
        )

        return roi_mask, seg_box_s5
Exemplo n.º 10
0
def assemble_brick_fragments(fragments, output_accessor_fn=None):
    """
    Given a list of Bricks with identical logical_boxes, splice their volumes
    together into a final Brick that contains a full volume containing all of
    the fragments.

    Note:
        Brick 'fragments' are also just Bricks, whose physical_box does
        not cover the entire logical_box for the brick.
        Each fragment's physical_box indicates where that fragment's data
        should be located within the final returned Brick.

    Args:
        fragments:
            TODO: docs.

        output_accessor_fn:
            Callable with signature: f(box) -> ndarray
            TODO: docs.

    Returns:
        A Brick containing the data from all fragments,
        UNLESS the fully assembled fragments would not intersect
        with the Brick's own logical_box (i.e. all fragments fall
        within the halo), in which case None is returned.

    Note:
        If the fragment physical_boxes are not disjoint, the results
        are undefined.  That is, if two fragments overlap, there's
        no guarantee about which one "wins" for the overlapping region.
    """
    fragments = list(fragments)

    # All logical boxes must be the same
    logical_boxes = np.asarray([frag.logical_box for frag in fragments])
    assert (logical_boxes == logical_boxes[0]).all(), \
        "Cannot assemble brick fragments from different logical boxes. "\
        "They belong to different bricks!"
    final_logical_box = fragments[0].logical_box
    final_location_id = fragments[0].location_id

    # The final physical box is the min/max of all fragment physical extents.
    physical_boxes = np.array([frag.physical_box for frag in fragments])
    assert physical_boxes.ndim == 3  # (N, 2, Dim)
    assert physical_boxes.shape == (len(fragments), 2,
                                    final_logical_box.shape[1])

    final_physical_box = np.asarray(
        (np.min(physical_boxes[:, 0, :],
                axis=0), np.max(physical_boxes[:, 1, :], axis=0)))

    intersects_interior = False
    for frag_pbox in physical_boxes:
        interior_box = box_intersection(frag_pbox, final_logical_box)
        if (interior_box[1] - interior_box[0] > 0).all():
            intersects_interior = True

    if not intersects_interior:
        # All fragments lie completely within the halo;
        # none intersect with the interior logical_box,
        # so we don't bother keeping this brick.
        return None

    final_volume_shape = final_physical_box[1] - final_physical_box[0]
    dtype = fragments[0].volume.dtype

    # If the physical boxes don't completely fill the final_box,
    # then we will need to use the output_accessor_fn (if given).
    if output_accessor_fn is None or is_box_coverage_complete(
            physical_boxes, final_physical_box):
        final_volume = np.zeros(final_volume_shape, dtype)
    else:
        final_volume = output_accessor_fn(final_physical_box)

    for frag in fragments:
        internal_box = frag.physical_box - final_physical_box[0]
        overwrite_subvol(final_volume, internal_box, frag.volume)

        # Recompress fragment now that we're done with it.
        frag.compress()

        ## It's tempting to destroy the fragment to save RAM,
        ## but the fragment might be needed by more than one final brick.
        ## (Also, it might be needed twice if a Worker gets restarted.)
        # frag.destroy()

    compression = fragments[0].compression
    brick = Brick(final_logical_box,
                  final_physical_box,
                  final_volume,
                  location_id=final_location_id,
                  compression=compression)
    brick.compress()
    return brick
Exemplo n.º 11
0
def pad_brick_data_from_volume_source(padding_grid, volume_accessor_func,
                                      brick):
    """
    Expand the given Brick's data until its physical_box is aligned with the given padding_grid.
    The data in the expanded region will be sourced from the given volume_accessor_func.

    Note: padding_grid need not be identical to the grid the Brick was created with,
          but it must divide evenly into that grid.

    For instance, if padding_grid happens to be the same as the brick's own native grid,
    then the phyiscal_box is expanded to align perfectly with the logical_box on all sides:

        +-------------+      +-------------+
        | physical |  |      |     same    |
        |__________|  |      |   physical  |
        |             |  --> |     and     |
        |   logical   |      |   logical   |
        |_____________|      |_____________|

    Args:
        brick: Brick
        padding_grid: Grid
        volume_accessor_func: Callable with signature: f(box) -> ndarray

    Returns: Brick

    Note: It is not legal to call this function unless the Brick's physical_box
          lies completely within the logical_box (i.e. no halos allowed).
          Furthremore, the padding_grid is not permitted to use a halo, either.
          (These restrictions could be fixed, but the current version of this
          function has these requirements.)

    Note: If no padding is necessary, then the original Brick is returned (no copy is made).
    """
    assert isinstance(padding_grid, Grid)
    assert not padding_grid.halo_shape.any()
    block_shape = padding_grid.block_shape
    assert ((brick.logical_box - padding_grid.offset) % block_shape == 0).all(), \
        f"Padding grid {padding_grid.offset} must be aligned with brick logical_box: {brick.logical_box}"

    # Subtract offset to calculate the needed padding
    offset_physical_box = brick.physical_box - padding_grid.offset

    if (offset_physical_box % block_shape == 0).all():
        # Internal data is already aligned to the padding_grid.
        return brick

    offset_padded_box = np.array([
        offset_physical_box[0] // block_shape * block_shape,
        (offset_physical_box[1] + block_shape - 1) // block_shape * block_shape
    ])

    # Re-add offset
    padded_box = offset_padded_box + padding_grid.offset
    assert (padded_box[0] >= brick.logical_box[0]).all()
    assert (padded_box[1] <= brick.logical_box[1]).all()

    # Initialize a new volume of the fully-padded shape
    padded_volume_shape = padded_box[1] - padded_box[0]
    padded_volume = np.zeros(padded_volume_shape, dtype=brick.volume.dtype)

    # Overwrite the previously existing data in the new padded volume
    orig_box = brick.physical_box
    orig_box_within_padded = orig_box - padded_box[0]
    overwrite_subvol(padded_volume, orig_box_within_padded, brick.volume)

    # Check for a non-zero-volume halo on all six sides.
    halo_boxes = []
    for axis in range(padded_volume.ndim):
        if orig_box[0, axis] != padded_box[0, axis]:
            leading_halo_box = padded_box.copy()
            leading_halo_box[1, axis] = orig_box[0, axis]
            halo_boxes.append(leading_halo_box)

        if orig_box[1, axis] != padded_box[1, axis]:
            trailing_halo_box = padded_box.copy()
            trailing_halo_box[0, axis] = orig_box[1, axis]
            halo_boxes.append(trailing_halo_box)

    assert halo_boxes, \
        "How could halo_boxes be empty if there was padding needed?"

    for halo_box in halo_boxes:
        # Retrieve padding data for one halo side
        halo_volume = volume_accessor_func(halo_box)

        # Overwrite in the final padded volume
        halo_box_within_padded = halo_box - padded_box[0]
        overwrite_subvol(padded_volume, halo_box_within_padded, halo_volume)

    new_brick = Brick(brick.logical_box,
                      padded_box,
                      padded_volume,
                      location_id=brick.location_id,
                      compression=brick.compression)
    brick.compress()
    return new_brick
def test_dvid_volume_service_labelmap(setup_dvid_repo, random_segmentation,
                                      disable_auto_retry):
    server, uuid = setup_dvid_repo
    instance_name = 'test-dvs-labelmap'

    volume = random_segmentation[:256, :192, :128]
    max_scale = 2
    voxel_dimensions = [4.0, 4.0, 32.0]

    config_text = textwrap.dedent(f"""\
        dvid:
          server: {server}
          uuid: {uuid}
          segmentation-name: {instance_name}
          supervoxels: true
          
          create-if-necessary: true
          creation-settings:
            max-scale: {max_scale}
            voxel-size: {voxel_dimensions}
       
        geometry:
          bounding-box: [[0,0,0], {list(volume.shape[::-1])}]
          message-block-shape: [64,64,64]
    """)

    yaml = YAML()
    with StringIO(config_text) as f:
        volume_config = yaml.load(f)

    assert instance_name not in fetch_repo_instances(server, uuid)

    service = VolumeService.create_from_config(volume_config)

    repo_instances = fetch_repo_instances(server, uuid)

    assert instance_name in repo_instances
    assert repo_instances[instance_name] == 'labelmap'

    info = fetch_instance_info(server, uuid, instance_name)
    assert info["Extended"]["VoxelSize"] == voxel_dimensions

    scaled_volumes = {}
    for scale in range(max_scale + 1):
        vol = downsample(volume, 2**scale, 'label')
        aligned_shape = (np.ceil(np.array(vol.shape) / 64) * 64).astype(int)
        aligned_vol = np.zeros(aligned_shape, np.uint64)
        overwrite_subvol(aligned_vol, [(0, 0, 0), vol.shape], vol)

        service.write_subvolume(aligned_vol, (0, 0, 0), scale)
        scaled_volumes[scale] = aligned_vol

    box = np.array([[40, 80, 40], [240, 160, 100]])
    for scale in range(max_scale + 1):
        scaled_box = box // 2**scale
        vol = service.get_subvolume(scaled_box, scale)
        assert (vol == extract_subvol(scaled_volumes[scale], scaled_box)).all()

    #
    # Check sparse coords function
    #
    labels = list({*pd.unique(volume.reshape(-1))} - {0})
    brick_coords_df = service.sparse_brick_coords_for_labels(labels)

    assert brick_coords_df.columns.tolist() == ['z', 'y', 'x', 'label']
    assert set(brick_coords_df['label'].values) == set(labels), \
        "Some labels were missing from the sparse brick coords!"

    def ndi(shape):
        return np.indices(shape).reshape(len(shape), -1).transpose()

    expected_df = pd.DataFrame(ndi(volume.shape), columns=[*'zyx'])

    expected_df['label'] = volume.reshape(-1)
    expected_df['z'] //= 64
    expected_df['y'] //= 64
    expected_df['x'] //= 64
    expected_df = expected_df.drop_duplicates()
    expected_df['z'] *= 64
    expected_df['y'] *= 64
    expected_df['x'] *= 64

    expected_df = expected_df.query('label != 0')

    expected_df.sort_values(['z', 'y', 'x', 'label'], inplace=True)
    brick_coords_df.sort_values(['z', 'y', 'x', 'label'], inplace=True)

    expected_df.reset_index(drop=True, inplace=True)
    brick_coords_df.reset_index(drop=True, inplace=True)

    assert expected_df.shape == brick_coords_df.shape
    assert (brick_coords_df == expected_df).all().all()

    #
    # Check sample_labels()
    #
    points = [np.random.randint(d, size=(10, )) for d in vol.shape]
    points = np.transpose(points)
    labels = service.sample_labels(points)
    assert (labels == volume[(*points.transpose(), )]).all()