def setup_dvid_segmentation_input(setup_dvid_repo):
    dvid_address, repo_uuid = setup_dvid_repo

    # Start with a low-res map of the test data
    # and scale it up 16x to achieve a 128-cube

    _ = 0
    #                  0 1 2 3  4 5 6 7
    volume_layout = [[
        [_, _, _, _, _, 6, 6, 6],  # 0
        [_, 1, 1, 2, 2, _, 6, _],  # 1
        [_, 1, 1, 2, 2, _, _, _],  # 2
        [_, 1, 1, 2, 8, _, 7, 7],  # 3
        [_, 1, 1, 2, 8, _, 7, 7],  # 4
        [_, _, _, _, _, _, _, _],  # 5
        [_, 4, 4, 4, _, _, _, _],  # 6
        [_, 3, 3, 3, _, 5, 9, _]
    ]]  # 7
    #                  0 1 2 3  4 5 6 7

    lowres_volume = np.zeros((4, 8, 8), np.uint64)
    lowres_volume[:] = volume_layout

    volume = upsample(lowres_volume, 16)
    assert volume.shape == (64, 128, 128)

    input_segmentation_name = 'findadjacencies-input'
    create_labelmap_instance(dvid_address, repo_uuid, input_segmentation_name)
    post_labelmap_voxels(dvid_address, repo_uuid, input_segmentation_name,
                         (0, 0, 0), volume)

    template_dir = tempfile.mkdtemp(suffix="findadjacencies-from-dvid")

    config_text = textwrap.dedent(f"""\
        workflow-name: findadjacencies
        cluster-type: {CLUSTER_TYPE}
         
        input:
          dvid:
            server: {dvid_address}
            uuid: {repo_uuid}
            segmentation-name: {input_segmentation_name}
            supervoxels: true
        
          geometry:
            message-block-shape: [128,64,64]
 
        findadjacencies:
          output-table: output.csv
          find-closest-using-scale: 0
    """)

    with open(f"{template_dir}/workflow.yaml", 'w') as f:
        f.write(config_text)

    yaml = YAML()
    with StringIO(config_text) as f:
        config = yaml.load(f)

    return template_dir, config, volume, dvid_address, repo_uuid
def setup_tsv_input(setup_dvid_repo):
    dvid_address, repo_uuid = setup_dvid_repo

    input_segmentation_name = 'segmentation-decimatemeshes-input'
    test_volume, object_boxes, object_sizes = create_test_segmentation()

    create_labelmap_instance(dvid_address, repo_uuid, input_segmentation_name, max_scale=3)
    post_labelmap_voxels(dvid_address, repo_uuid, input_segmentation_name, (0,0,0), test_volume, downres=True, noindexing=False)

    tsv_name = 'segmentation-decimatemeshes-tsv'
    create_tarsupervoxel_instance(dvid_address, repo_uuid, tsv_name, input_segmentation_name, '.drc')
 
    # Post supervoxel meshes
    meshes = Mesh.from_label_volume(test_volume, progress=False)
    meshes_data = {f"{label}.drc": mesh.serialize(fmt='drc') for label, mesh in meshes.items()}
    post_load(dvid_address, repo_uuid, tsv_name, meshes_data)
    
    # Merge two of the objects (100 and 300)
    post_merge(dvid_address, repo_uuid, input_segmentation_name, 100, [300])
    object_boxes[100] = box_union(object_boxes[100], object_boxes[300])
    del object_boxes[300]
    
    object_sizes[100] += object_sizes[300]
    del object_sizes[300]
    
    meshes[100] = Mesh.concatenate_meshes((meshes[100], meshes[300]))
    del meshes[300]
    
    return dvid_address, repo_uuid, tsv_name, object_boxes, object_sizes, meshes
Esempio n. 3
0
def copy_vnc_subvolume(box_zyx,
                       copy_grayscale=True,
                       copy_segmentation=True,
                       chunk_shape=(64, 64, 2048)):
    assert not (box_zyx % 64).any(), \
        "Only 64px block-aligned volumes can be copied."

    import numpy as np
    from neuclease.util import boxes_from_grid, tqdm_proxy, round_box
    from neuclease.dvid import find_master, fetch_raw, post_raw, fetch_subvol, post_labelmap_voxels

    vnc_master = ('emdata4:8200', find_master('emdata4:8200'))

    NUM_SCALES = 8
    num_voxels = np.prod(box_zyx[1] - box_zyx[0])

    if copy_grayscale:
        logger.info(
            f"Copying grayscale from box {box_zyx[:,::-1].tolist()} ({num_voxels/1e6:.1f} Mvox) for {NUM_SCALES} scales"
        )
        for scale in tqdm_proxy(range(NUM_SCALES)):
            if scale == 0:
                input_name = 'grayscalejpeg'
                output_name = 'local-grayscalejpeg'
            else:
                input_name = f'grayscalejpeg_{scale}'
                output_name = f'local-grayscalejpeg_{scale}'

            scaled_box_zyx = np.maximum(box_zyx // 2**scale, 1)
            scaled_box_zyx = round_box(scaled_box_zyx, 64, 'out')

            for chunk_box in tqdm_proxy(boxes_from_grid(scaled_box_zyx,
                                                        chunk_shape,
                                                        clipped=True),
                                        leave=False):
                chunk = fetch_subvol(*vnc_master,
                                     input_name,
                                     chunk_box,
                                     progress=False)
                post_raw(*vnc_master, output_name, chunk_box[0], chunk)

    if copy_segmentation:
        logger.info(
            f"Copying segmentation from box {box_zyx[:,::-1].tolist()} ({num_voxels/1e6:.2f} Mvox)"
        )
        for chunk_box in tqdm_proxy(
                boxes_from_grid(box_zyx, chunk_shape, clipped=True)):
            chunk = fetch_raw(*vnc_master,
                              'segmentation',
                              chunk_box,
                              dtype=np.uint64)
            post_labelmap_voxels(*vnc_master,
                                 'local-segmentation',
                                 chunk_box[0],
                                 chunk,
                                 downres=True)

        # TODO: Update label indexes?

    logger.info("DONE")
Esempio n. 4
0
def test_post_hierarchical_cleaves(labelmap_setup):
    dvid_server, dvid_repo, _merge_table_path, _mapping_path, _supervoxel_vol = labelmap_setup

    uuid = post_branch(dvid_server, dvid_repo,
                       'segmentation-post_hierarchical_cleaves', '')
    instance_info = dvid_server, uuid, 'segmentation-post_hierarchical_cleaves'
    create_labelmap_instance(*instance_info)

    svs = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
    groups = [1, 1, 2, 2, 3, 3, 3, 3, 3, 4]

    svs = np.asarray(svs, np.uint64)

    # Post some supervoxels in multiple blocks, just to prove that post_hierarchical_cleaves()
    # doesn't assume that the labelindex has the same length as the mapping.
    sv_vol = np.zeros((128, 64, 64), np.uint64)
    sv_vol[0, 0, :len(svs)] = svs
    sv_vol[64, 0, 0:len(svs):2] = svs[::2]

    post_labelmap_voxels(*instance_info, (0, 0, 0), sv_vol)

    post_merge(*instance_info, 1, svs[1:])

    group_mapping = pd.Series(index=svs, data=groups)
    final_table = post_hierarchical_cleaves(*instance_info, 1, group_mapping)

    assert (fetch_mapping(*instance_info,
                          svs) == final_table['body'].values).all()
    assert (final_table.drop_duplicates(
        ['group']) == final_table.drop_duplicates(['group',
                                                   'body'])).all().all()
    assert (final_table.drop_duplicates(
        ['body']) == final_table.drop_duplicates(['group',
                                                  'body'])).all().all()

    # Since the mapping included all supervoxels in the body,
    # the last group is left with the original label.
    assert final_table.iloc[-1]['body'] == 1

    # Now merge them all together and try again, but leave
    # two supevoxels out of the groups this time.
    merges = set(pd.unique(final_table['body'].values)) - set([1])
    post_merge(*instance_info, 1, list(merges))

    group_mapping = pd.Series(index=svs[:-2], data=groups[:-2])
    final_table = post_hierarchical_cleaves(*instance_info, 1, group_mapping)

    assert len(
        final_table.query('body == 1')
    ) == 0, "Did not expect any of the groups to retain the original body ID!"
    assert (fetch_mapping(*instance_info,
                          svs[:-2]) == final_table['body'].values).all()
    assert (final_table.drop_duplicates(
        ['group']) == final_table.drop_duplicates(['group',
                                                   'body'])).all().all()
    assert (final_table.drop_duplicates(
        ['body']) == final_table.drop_duplicates(['group',
                                                  'body'])).all().all()
    assert (fetch_mapping(*instance_info, svs[-2:]) == 1).all()
Esempio n. 5
0
def test_labelindex(labelmap_setup):
    dvid_server, dvid_repo, _merge_table_path, _mapping_path, _supervoxel_vol = labelmap_setup

    # Need an unlocked node to test these posts
    uuid = post_branch(dvid_server, dvid_repo, 'test_labelindex',
                       'test_labelindex')
    instance_info = (dvid_server, uuid, 'segmentation-scratch')

    # Write some random data
    sv = 99
    vol = sv * np.random.randint(2, size=(128, 128, 128), dtype=np.uint64)
    offset = np.array((64, 64, 64))

    # DVID will generate the index.
    post_labelmap_voxels(*instance_info, offset, vol)

    # Compute labelindex table from scratch
    rows = []
    for block_coord in ndrange(offset, offset + vol.shape, (64, 64, 64)):
        block_coord = np.array(block_coord)
        block_box = np.array((block_coord, block_coord + 64))
        block = extract_subvol(vol, block_box - offset)

        count = (block == sv).sum()
        rows.append([*block_coord, sv, count])

    index_df = pd.DataFrame(rows, columns=['z', 'y', 'x', 'sv', 'count'])

    # Check DVID's generated labelindex table against expected
    labelindex_tuple = fetch_labelindex(*instance_info, sv, format='pandas')
    assert labelindex_tuple.label == sv

    labelindex_tuple.blocks.sort_values(['z', 'y', 'x', 'sv'], inplace=True)
    labelindex_tuple.blocks.reset_index(drop=True, inplace=True)
    assert (labelindex_tuple.blocks == index_df).all().all()

    # Check our protobuf against DVID's
    index_tuple = PandasLabelIndex(index_df, sv, 1,
                                   datetime.datetime.now().isoformat(),
                                   'someuser')
    labelindex = create_labelindex(index_tuple)

    # Since labelindex block entries are not required to be sorted,
    # dvid might return them in a different order.
    # Hence this comparison function which sorts them first.
    def compare_proto_blocks(left, right):
        left_blocks = sorted(left.blocks.items())
        right_blocks = sorted(right.blocks.items())
        return left_blocks == right_blocks

    dvid_labelindex = fetch_labelindex(*instance_info, sv, format='protobuf')
    assert compare_proto_blocks(labelindex, dvid_labelindex)

    # Check post/get roundtrip
    post_labelindex(*instance_info, sv, labelindex)
    dvid_labelindex = fetch_labelindex(*instance_info, sv, format='protobuf')
    assert compare_proto_blocks(labelindex, dvid_labelindex)
Esempio n. 6
0
def test_fetch_sparsevol_coarse_via_labelindex(labelmap_setup):
    dvid_server, dvid_repo, _merge_table_path, _mapping_path, _supervoxel_vol = labelmap_setup

    # Create a labelmap volume with 3 blocks.
    #
    # Supervoxels are arranged like this:
    #
    #   | 1 2 | 3 4 | 5 6 |
    #
    # After merging [2,3,4,5], bodies will be:
    #
    #   | 1 2 | 2 4 | 5 6 |
    #
    vol_shape = (64, 64, 256)
    sv_vol = np.zeros(vol_shape, np.uint64)
    sv_vol[:, :, 0:32] = 1
    sv_vol[:, :, 32:64] = 2
    sv_vol[:, :, 64:96] = 3
    sv_vol[:, :, 96:128] = 4
    sv_vol[:, :, 128:160] = 5
    sv_vol[:, :, 160:192] = 6

    instance_info = dvid_server, dvid_repo, 'segmentation-test-sparsevol-coarse'
    create_labelmap_instance(*instance_info)
    post_labelmap_voxels(*instance_info, (0, 0, 0), sv_vol)

    post_merge(*instance_info, 2, [3, 4, 5])

    body_svc = fetch_sparsevol_coarse_via_labelindex(*instance_info,
                                                     2,
                                                     method='protobuf')
    expected_body_svc = fetch_sparsevol_coarse(*instance_info, 2)
    assert sorted(body_svc.tolist()) == sorted(expected_body_svc.tolist())

    body_svc = fetch_sparsevol_coarse_via_labelindex(*instance_info,
                                                     2,
                                                     method='pandas')
    expected_body_svc = fetch_sparsevol_coarse(*instance_info, 2)
    assert sorted(body_svc.tolist()) == sorted(expected_body_svc.tolist())

    sv_svc = fetch_sparsevol_coarse_via_labelindex(*instance_info,
                                                   3,
                                                   supervoxels=True,
                                                   method='protobuf')
    expected_sv_svc = fetch_sparsevol_coarse(*instance_info,
                                             3,
                                             supervoxels=True)
    assert sorted(sv_svc.tolist()) == sorted(expected_sv_svc.tolist())

    sv_svc = fetch_sparsevol_coarse_via_labelindex(*instance_info,
                                                   3,
                                                   supervoxels=True,
                                                   method='pandas')
    expected_sv_svc = fetch_sparsevol_coarse(*instance_info,
                                             3,
                                             supervoxels=True)
    assert sorted(sv_svc.tolist()) == sorted(expected_sv_svc.tolist())
def setup_hdf5_segmentation_input(setup_dvid_repo, write_hdf5_volume):
    volume_path, random_segmentation = write_hdf5_volume
    dvid_address, repo_uuid = setup_dvid_repo
    template_dir = tempfile.mkdtemp(
        suffix="copysegmentation-from-hdf5-template")

    output_segmentation_name = 'segmentation-output-from-hdf5'

    # Make sure the output is empty (if it exists)
    if output_segmentation_name in fetch_repo_instances(
            dvid_address, repo_uuid):
        z = np.zeros((256, 256, 256), np.uint64)
        post_labelmap_voxels(dvid_address, repo_uuid, output_segmentation_name,
                             (0, 0, 0), z, 0, True)

    config_text = textwrap.dedent(f"""\
        workflow-name: copysegmentation
        cluster-type: {CLUSTER_TYPE}
        
        input:
          hdf5:
            path: {volume_path}
            dataset: volume
          
          geometry:
            message-block-shape: [64,64,256] # note: this is weird because normally we stripe in the X direction...
            bounding-box: [[0,0,100], [256,200,256]]

        output:
          dvid:
            server: {dvid_address}
            uuid: {repo_uuid}
            segmentation-name: {output_segmentation_name}
            supervoxels: true
            disable-indexing: true
            create-if-necessary: true
                        
          geometry: {{}} # Auto-set from input
        
        copysegmentation:
          pyramid-depth: 1
          slab-depth: 128
          download-pre-downsampled: false
    """)

    with open(f"{template_dir}/workflow.yaml", 'w') as f:
        f.write(config_text)

    yaml = YAML()
    with StringIO(config_text) as f:
        config = yaml.load(f)

    return template_dir, config, random_segmentation, dvid_address, repo_uuid, output_segmentation_name
Esempio n. 8
0
def test_fetch_labelindices(labelmap_setup):
    dvid_server, dvid_repo, _merge_table_path, _mapping_path, _supervoxel_vol = labelmap_setup

    # Need an unlocked node to test these posts
    uuid = post_branch(dvid_server, dvid_repo, 'test_labelindices',
                       'test_labelindices')
    instance_info = (dvid_server, uuid, 'segmentation-scratch')

    # Write some random data
    vol = np.random.randint(1, 10, size=(128, 128, 128), dtype=np.uint64)
    offset = np.array((64, 64, 64))

    # DVID will generate the index.
    post_labelmap_voxels(*instance_info, offset, vol)

    labelindices = fetch_labelindices(*instance_info, list(range(1, 10)))
    for sv, li in zip(range(1, 10), labelindices.indices):
        # This function is already tested elsewhere, so we'll use it as a reference
        li2 = fetch_labelindex(*instance_info, sv)
        assert li == li2

    labelindices = fetch_labelindices(*instance_info,
                                      list(range(1, 10)),
                                      format='list-of-protobuf')
    for sv, li in zip(range(1, 10), labelindices):
        # This function is already tested elsewhere, so we'll use it as a reference
        li2 = fetch_labelindex(*instance_info, sv)
        assert li == li2

    labelindices = fetch_labelindices(*instance_info,
                                      list(range(1, 10)),
                                      format='pandas')
    for sv, li in zip(range(1, 10), labelindices):
        # This function is already tested elsewhere, so we'll use it as a reference
        li2 = fetch_labelindex(*instance_info, sv, format='pandas')
        li_df = li.blocks.sort_values(['z', 'y', 'x']).reset_index(drop=True)
        li2_df = li2.blocks.sort_values(['z', 'y', 'x']).reset_index(drop=True)
        assert (li_df == li2_df).all().all()

    # Test the copy function (just do a round-trip -- hopefully I didn't swap src and dest anywhere...)
    copy_labelindices(instance_info,
                      instance_info,
                      list(range(1, 10)),
                      batch_size=2)
    copy_labelindices(instance_info,
                      instance_info,
                      list(range(1, 10)),
                      batch_size=2,
                      processes=2)
Esempio n. 9
0
def test_post_labelmap_voxels(labelmap_setup):
    dvid_server, dvid_repo, _merge_table_path, _mapping_path, _supervoxel_vol = labelmap_setup
    instance_info = DvidInstanceInfo(dvid_server, dvid_repo,
                                     'segmentation-scratch')

    # Write some random data and read it back.
    vol = np.random.randint(10, size=(128, 128, 128), dtype=np.uint64)
    offset = (64, 64, 64)

    post_labelmap_voxels(dvid_server, dvid_repo, 'segmentation-scratch',
                         offset, vol, 0)
    complete_voxels = fetch_labelmap_voxels(*instance_info, [(0, 0, 0),
                                                             (256, 256, 256)],
                                            supervoxels=True)

    assert (complete_voxels[64:192, 64:192, 64:192] == vol).all()
Esempio n. 10
0
def setup_segmentation_input(setup_dvid_repo):
    dvid_address, repo_uuid = setup_dvid_repo
    input_segmentation_name = 'segmentation-createmeshes-input'
    test_volume, object_boxes, object_sizes = create_test_segmentation()

    create_labelmap_instance(dvid_address,
                             repo_uuid,
                             input_segmentation_name,
                             max_scale=3)
    post_labelmap_voxels(dvid_address,
                         repo_uuid,
                         input_segmentation_name, (0, 0, 0),
                         test_volume,
                         downres=True,
                         noindexing=False)
    return dvid_address, repo_uuid, input_segmentation_name, object_boxes, object_sizes
Esempio n. 11
0
def process_point(seg_src, seg_dst, point, radius, src_body, dst_body):
    """
    Generate a neighborhood segment around a particular point.
    Upload the voxels for the segment and the corresponding mesh.
    """
    r = radius
    src_box = np.asarray((point - r, point + r + 1))
    src_vol = fetch_labelmap_voxels(*seg_src, src_box)

    if src_body is None:
        src_body = src_vol[r, r, r]

    if dst_body is None:
        # Generate a neighborhood segment ID from the coordinate.
        # Divide by 4 to ensure the coordinates fit within 2^53.
        # (The segment ID will not retain the full resolution of
        # the coordinate, but that's usually OK for our purposes.)
        dst_body = encode_point_to_uint64(point // 4, 17)

    mask = (src_vol == src_body) & sphere_mask(r)

    dst_box = round_box(src_box, 64, 'out')
    dst_vol = fetch_labelmap_voxels(*seg_dst, dst_box)

    dst_view = dst_vol[b2s(*(src_box - dst_box[0]))]
    dst_view[mask] = dst_body

    post_labelmap_voxels(*seg_dst, dst_box[0], dst_vol, downres=True)

    # Mesh needs to be written in nm, hence 8x
    mesh = Mesh.from_binary_vol(mask, 8 * src_box, smoothing_rounds=2)
    mesh.simplify(0.05, in_memory=True)
    post_key(*seg_dst[:2], f'{seg_dst[2]}_meshes', f'{dst_body}.ngmesh',
             mesh.serialize(fmt='ngmesh'))

    centroid = src_box[0] + mask_centroid(mask, True)
    top_z = mask.sum(axis=(1, 2)).nonzero()[0][0]
    top_coords = np.transpose(mask[top_z].nonzero())
    top_point = src_box[0] + (top_z, *top_coords[len(top_coords) // 2])

    return point, centroid, top_point, src_body, dst_body, mask.sum()
Esempio n. 12
0
def test_fetch_mutations(labelmap_setup):
    dvid_server, dvid_repo, _merge_table_path, _mapping_path, _supervoxel_vol = labelmap_setup

    uuid = post_branch(dvid_server, dvid_repo, 'segmentation-fetch_mutations',
                       '')

    instance = 'segmentation-fetch_mutations'
    create_labelmap_instance(dvid_server, uuid, instance)

    voxels = np.zeros((64, 64, 64), dtype=np.uint64)
    voxels[0, 0, :10] = [*range(1, 11)]

    post_labelmap_voxels(dvid_server, uuid, instance, (0, 0, 0), voxels)

    post_merge(dvid_server, uuid, instance, 1, [2, 3, 4])
    post_merge(dvid_server, uuid, instance, 5, [6, 7, 8])

    post_commit(dvid_server, uuid, '')
    uuid2 = post_newversion(dvid_server, uuid, '')

    post_merge(dvid_server, uuid2, instance, 9, [10])
    post_merge(dvid_server, uuid2, instance, 1, [5, 10])

    mut_df = fetch_mutations(dvid_server,
                             uuid2,
                             instance,
                             dag_filter='leaf-only')
    assert len(mut_df) == 2
    assert (mut_df['uuid'] == uuid2).all()
    assert (mut_df['action'] == 'merge').all()
    assert (mut_df['target_body'] == [9, 1]).all()

    mut_df = fetch_mutations(dvid_server,
                             uuid2,
                             instance,
                             dag_filter='leaf-and-parents')
    assert len(mut_df) == 4
    assert (mut_df['uuid'] == [uuid, uuid, uuid2, uuid2]).all()
    assert (mut_df['action'] == 'merge').all()
    assert (mut_df['target_body'] == [1, 5, 9, 1]).all()
def test_copysegmentation_from_hdf5_to_dvid_output_mask(
        setup_hdf5_segmentation_input, disable_auto_retry):
    template_dir, config, input_volume, dvid_address, repo_uuid, _output_segmentation_name = setup_hdf5_segmentation_input

    # make sure we get a fresh output
    output_segmentation_name = 'copyseg-with-output-mask'
    config["output"]["dvid"]["segmentation-name"] = output_segmentation_name

    output_volume = np.zeros((256, 256, 256), np.uint64)
    mask = np.zeros((256, 256, 256), dtype=bool)

    masked_labels = [5, 10, 15, 20]

    # Start with an output that is striped (but along on block boundaries)
    for label, (z_start,
                z_stop) in enumerate(zip(range(0, 250, 10), range(10, 260,
                                                                  10))):
        output_volume[z_start:z_stop] = label
        if label in masked_labels:
            mask[z_start:z_stop] = True

    # We expect the output to remain unchanged except in the masked voxels.
    expected_vol = np.where(mask, input_volume, output_volume)

    # make sure we get a fresh output
    output_segmentation_name = 'copyseg-with-output-mask'
    config["output"]["dvid"]["segmentation-name"] = output_segmentation_name
    config["copysegmentation"]["output-mask-labels"] = masked_labels

    max_scale = config["copysegmentation"]["pyramid-depth"]
    create_labelmap_instance(dvid_address,
                             repo_uuid,
                             output_segmentation_name,
                             max_scale=max_scale)
    post_labelmap_voxels(dvid_address, repo_uuid, output_segmentation_name,
                         (0, 0, 0), output_volume)

    setup = template_dir, config, expected_vol, dvid_address, repo_uuid, output_segmentation_name
    _box_zyx, _expected_vol, _output_vol = _run_to_dvid(setup)
def setup_dvid_segmentation_input(setup_dvid_repo, random_segmentation):
    dvid_address, repo_uuid = setup_dvid_repo

    input_segmentation_name = 'segmentation-input'
    output_segmentation_name = 'segmentation-output-from-dvid'

    try:
        create_labelmap_instance(dvid_address, repo_uuid,
                                 input_segmentation_name)
    except HTTPError as ex:
        if ex.response is not None and 'already exists' in ex.response.content.decode(
                'utf-8'):
            pass

    post_labelmap_voxels(dvid_address, repo_uuid, input_segmentation_name,
                         (0, 0, 0), random_segmentation)

    # Make sure the output is empty (if it exists)
    if output_segmentation_name in fetch_repo_instances(
            dvid_address, repo_uuid):
        z = np.zeros((256, 256, 256), np.uint64)
        post_labelmap_voxels(dvid_address, repo_uuid, output_segmentation_name,
                             (0, 0, 0), z, 0, True)

    template_dir = tempfile.mkdtemp(
        suffix="copysegmentation-from-dvid-template")

    config_text = textwrap.dedent(f"""\
        workflow-name: copysegmentation
        cluster-type: {CLUSTER_TYPE}
         
        input:
          dvid:
            server: {dvid_address}
            uuid: {repo_uuid}
            segmentation-name: {input_segmentation_name}
            supervoxels: true
           
          geometry:
            message-block-shape: [64,64,512]
            bounding-box: [[0,0,100], [256,200,256]]
          
          adapters: {{}}
 
        output:
          dvid:
            server: {dvid_address}
            uuid: {repo_uuid}
            segmentation-name: {output_segmentation_name}
            supervoxels: true
            disable-indexing: true
            create-if-necessary: true
           
          geometry: {{}} # Auto-set from input
 
        copysegmentation:
          pyramid-depth: 1
          slab-depth: 128
    """)

    with open(f"{template_dir}/workflow.yaml", 'w') as f:
        f.write(config_text)

    yaml = YAML()
    with StringIO(config_text) as f:
        config = yaml.load(f)

    return template_dir, config, random_segmentation, dvid_address, repo_uuid, output_segmentation_name
def setup_dvid_to_zarr(setup_dvid_repo, random_segmentation):
    dvid_address, repo_uuid = setup_dvid_repo

    input_segmentation_name = 'segmentation-input'

    try:
        create_labelmap_instance(dvid_address, repo_uuid,
                                 input_segmentation_name)
    except HTTPError as ex:
        if ex.response is not None and 'already exists' in ex.response.content.decode(
                'utf-8'):
            pass

    post_labelmap_voxels(dvid_address, repo_uuid, input_segmentation_name,
                         (0, 0, 0), random_segmentation)

    template_dir = tempfile.mkdtemp(
        suffix="copysegmentation-from-dvid-template")

    output_path = 'output.zarr'

    config_text = textwrap.dedent(f"""\
        workflow-name: copysegmentation
        cluster-type: {CLUSTER_TYPE}
         
        input:
          dvid:
            server: {dvid_address}
            uuid: {repo_uuid}
            segmentation-name: {input_segmentation_name}
            supervoxels: true
           
          geometry:
            message-block-shape: [64,64,512]
            bounding-box: [[0,0,100], [256,200,256]]
          
          adapters: {{}}
 
        output:
          zarr:
            path: {output_path}
            dataset: s0
            create-if-necessary: true
            creation-settings:
              dtype: uint64
              chunk-shape: [64,64,64]
           
          geometry: {{}} # Auto-set from input
 
        copysegmentation:
          pyramid-depth: 1
          slab-depth: 128
    """)

    with open(f"{template_dir}/workflow.yaml", 'w') as f:
        f.write(config_text)

    yaml = YAML()
    with StringIO(config_text) as f:
        config = yaml.load(f)

    return template_dir, config, random_segmentation, dvid_address, repo_uuid, output_path
Esempio n. 16
0
def load_roi_label_volume(server,
                          uuid,
                          rois_or_neuprint,
                          box_s5=[None, None],
                          export_path=None,
                          export_labelmap=None):
    """
    Fetch several ROIs from DVID and combine them into a single label volume or mask.
    The label values in the returned volume correspond to the order in which the ROI
    names were passed in, starting at label 1.
    
    This function is essentially a convenience function around fetch_combined_roi_volume(),
    but in this case it will optionally auto-fetch the ROI list, and auto-export the volume.
    
    Args:
        server:
            DVID server

        uuid:
            DVID uuid

        rois_or_neuprint:
            Either a list of ROIs or a neuprint server from which to obtain the roi list.

        box_s5:
            If you want to restrict the ROIs to a particular subregion,
            you may pass your own bounding box (at scale 5).
            Alternatively, you may pass the name of a segmentation
            instance from DVID whose bounding box will be used.

        export_path:
            If you want the ROI volume to be exported to disk,
            provide a path name ending with .npy or .h5.
        
        export_labelmap:
            If you want the ROI volume to be exported to a DVID labelmap instance,
            Provide the instance name, or a tuple of (server, uuid, instance).
    
    Returns:
        (roi_vol, roi_box), containing the fetched label volume and the
        bounding box it corresponds to, in DVID scale-5 coordinates.

    Note:
      If you have a list of (full-res) points to extract from the returned volume,
      pass a DataFrame with columns ['z','y','x'] to the following function.
      If you already downloaded the roi_vol (above), provide it.
      Otherwise, leave out those args and it will be fetched first.
      Adds columns to the input DF (in-place) for 'roi' (str) and 'roi_label' (int).
    
        >>> from neuclease.dvid import determine_point_rois
        >>> determine_point_rois(*master, rois, point_df, roi_vol, roi_box)
    """
    if isinstance(box_s5, str):
        # Assume that this is a segmentation instance whose dimensions should be used
        # Fetch the maximum extents of the segmentation,
        # and rescale it for scale-5.
        seg_box = fetch_volume_box(server, uuid, box_s5)
        box_s5 = round_box(seg_box, (2**5), 'out') // 2**5
        box_s5[0] = (0, 0, 0)

    if export_labelmap:
        assert isinstance(box_s5, np.ndarray)
        assert not (box_s5 % 64).any(), \
            ("If exporting to a labelmap instance, please supply "
             "an explicit box and make sure it is block-aligned.")

    if isinstance(rois_or_neuprint, (str, neuprint.Client)):
        if isinstance(rois_or_neuprint, str):
            npclient = neuprint.Client(rois_or_neuprint)
        else:
            npclient = rois_or_neuprint

        # Fetch ROI names from neuprint
        q = "MATCH (m: Meta) RETURN m.superLevelRois as rois"
        rois = npclient.fetch_custom(q)['rois'].iloc[0]
        rois = sorted(rois)
        # # Remove '.*ACA' ROIs. Apparently there is some
        # # problem with them. (They overlap with other ROIs.)
        # rois = [*filter(lambda r: 'ACA' not in r, rois)]
    else:
        assert isinstance(rois_or_neuprint, collections.abc.Iterable)
        rois = rois_or_neuprint

    # Fetch each ROI and write it into a volume
    with Timer(f"Fetching combined ROI volume for {len(rois)} ROIs", logger):
        roi_vol, roi_box, overlap_stats = fetch_combined_roi_volume(
            server, uuid, rois, box_zyx=box_s5)

    if len(overlap_stats) > 0:
        logger.warn(
            f"Some ROIs overlap! Here's an incomplete list of overlapping pairs:\n{overlap_stats}"
        )

    # Export to npy/h5py for external use
    if export_path:
        with Timer(f"Exporting to {export_path}", logger):
            if export_path.endswith('.npy'):
                np.save(export_path, roi_vol)
            elif export_path.endswith('.h5'):
                with h5py.File(export_path, 'w') as f:
                    f.create_dataset('rois_scale_5', data=roi_vol, chunks=True)

    if export_labelmap:
        if isinstance(export_labelmap, str):
            export_labelmap = (server, uuid, export_labelmap)

        assert len(export_labelmap) == 3
        with Timer(f"Exporting to {export_labelmap[2]}", logger):
            if export_labelmap[2] not in fetch_repo_instances(
                    server, uuid, 'labelmap'):
                create_labelmap_instance(
                    *export_labelmap, voxel_size=8 * (2**5),
                    max_scale=6)  # FIXME: hard-coded voxel size

            # It's really important to use this block shape.
            # See https://github.com/janelia-flyem/dvid/issues/342
            boxes = boxes_from_grid(roi_box, (256, 256, 256), clipped=True)
            for box in tqdm_proxy(boxes):
                block = extract_subvol(roi_vol, box - roi_box[0])
                post_labelmap_voxels(*export_labelmap,
                                     box[0],
                                     block,
                                     scale=0,
                                     downres=True)

    return roi_vol, roi_box, rois
def setup_connectedcomponents_dvid(setup_dvid_repo):
    dvid_address, repo_uuid = setup_dvid_repo

    _ = 0
    volume_layout = [[_, _, _, _, _, _, _, _], [_, _, _, _, _, 4, _, _],
                     [_, 1, 1, 1, _, _, 1, 1], [_, 1, _, _, _, _, _, _],
                     [_, 1, _, _, _, _, _, _], [_, 1, _, 2, 2, 2, 2, _],
                     [_, _, _, _, _, _, _, _], [_, 3, _, _, _, 3, _, 1]]

    lowres_volume = np.zeros((4, 8, 8), np.uint64)
    lowres_volume[:] = volume_layout

    volume = upsample(lowres_volume, 16)
    assert volume.shape == (64, 128, 128)

    input_segmentation_name = 'cc-input'
    output_segmentation_name = 'cc-output'

    create_labelmap_instance(dvid_address, repo_uuid, input_segmentation_name)
    post_labelmap_voxels(dvid_address, repo_uuid, input_segmentation_name,
                         (0, 0, 0), volume)

    # Post data to the output -- don't leave it empty,
    # or we run into 'maxlabel' issues related to dvid issue #284
    # https://github.com/janelia-flyem/dvid/issues/284
    create_labelmap_instance(dvid_address, repo_uuid, output_segmentation_name)
    post_labelmap_voxels(dvid_address, repo_uuid, output_segmentation_name,
                         (0, 0, 0), volume)

    config_text = textwrap.dedent(f"""\
        workflow-name: connectedcomponents
        cluster-type: {CLUSTER_TYPE}
         
        input:
          dvid:
            server: {dvid_address}
            uuid: {repo_uuid}
            segmentation-name: {input_segmentation_name}
            supervoxels: true
           
          geometry:
            message-block-shape: [64,64,64]
            bounding-box: [[0,0,0], [128,128,64]]
 
        output:
          dvid:
            server: {dvid_address}
            uuid: {repo_uuid}
            segmentation-name: {output_segmentation_name}
            supervoxels: true
            disable-indexing: true
            create-if-necessary: true
           
          geometry: {{}} # Auto-set from input
 
        connectedcomponents:
          orig-max-label: 4
          halo: 1
          subset-labels: [1,2,4] # Not 3
          compute-block-statistics: true
          log-relabeled-objects: true
    """)

    template_dir = tempfile.mkdtemp(suffix="connectedcomponents-template")

    with open(f"{template_dir}/workflow.yaml", 'w') as f:
        f.write(config_text)

    yaml = YAML()
    with StringIO(config_text) as f:
        config = yaml.load(f)

    yaml = YAML()
    yaml.default_flow_style = False
    with open(f"{template_dir}/workflow.yaml", 'w') as f:
        yaml.dump(config, f)

    return template_dir, config, volume, dvid_address, repo_uuid, output_segmentation_name
def setup_dvid_segmentation_input(setup_dvid_repo, random_segmentation):
    dvid_address, repo_uuid = setup_dvid_repo

    # Since the same UUID is re-used for each test case,
    # this counter is a little hack used to make sure the segmentation
    # has a unique name each time, so that previous test cases don't
    # affect subsequent test casess.
    global test_case_counter
    test_case_counter += 1

    # Normally the MaskSegmentation workflow is used to update
    # a segmentation instance from a parent uuid to a child uuid.
    # But for this test, we'll simulate that by writing to two
    # different instances in the same uuid.
    input_segmentation_name = f'masksegmentation-input-{test_case_counter}'
    output_segmentation_name = f'masksegmentation-output-from-dvid-{test_case_counter}'

    # Agglomerate some supervoxels into bodies
    # Choose supervoxels that intersect three Z-planes at 64, 128, 192
    svs_1 = np.unique(random_segmentation[64])
    svs_2 = np.unique(random_segmentation[128])
    svs_3 = np.unique(random_segmentation[192])

    for instance in (input_segmentation_name, output_segmentation_name):
        create_labelmap_instance(dvid_address,
                                 repo_uuid,
                                 instance,
                                 max_scale=MAX_SCALE)

        # Start with an empty mapping (the repo/instance are re-used for each test case)
        post_labelmap_voxels(dvid_address,
                             repo_uuid,
                             instance, (0, 0, 0),
                             random_segmentation,
                             downres=True)
        post_merge(dvid_address, repo_uuid, instance, svs_1[0], svs_1[1:])
        post_merge(dvid_address, repo_uuid, instance, svs_2[0], svs_2[1:])
        post_merge(dvid_address, repo_uuid, instance, svs_3[0], svs_3[1:])

    # Create an ROI to test with -- a sphere with scale-5 resolution
    shape_s5 = np.array(random_segmentation.shape) // 2**5
    midpoint_s5 = shape_s5 / 2
    radius = midpoint_s5.min()

    coords_s5 = ndindex_array(*shape_s5)
    distances = np.sqrt(np.sum((coords_s5 - midpoint_s5)**2, axis=1))
    keep = (distances < radius)
    coords_s5 = coords_s5[keep, :]

    roi_ranges = runlength_encode_to_ranges(coords_s5)
    roi_name = 'masksegmentation-test-roi'

    try:
        create_instance(dvid_address, repo_uuid, roi_name, 'roi')
    except HTTPError as ex:
        if ex.response is not None and 'already exists' in ex.response.content.decode(
                'utf-8'):
            pass

    post_roi(dvid_address, repo_uuid, roi_name, roi_ranges)

    roi_mask_s5 = np.zeros(shape_s5, dtype=bool)
    roi_mask_s5[(*coords_s5.transpose(), )] = True

    template_dir = tempfile.mkdtemp(
        suffix="masksegmentation-from-dvid-template")

    config_text = textwrap.dedent(f"""\
        workflow-name: masksegmentation
        cluster-type: {CLUSTER_TYPE}
         
        input:
          dvid:
            server: {dvid_address}
            uuid: {repo_uuid}
            segmentation-name: {input_segmentation_name}
            supervoxels: true
           
          geometry:
            # Choose a brick that doesn't cleanly divide into the bounding box
            message-block-shape: [192,64,64]
 
        output:
          dvid:
            server: {dvid_address}
            uuid: {repo_uuid}
            segmentation-name: {output_segmentation_name}
            supervoxels: true
            disable-indexing: true
 
        masksegmentation:
          mask-roi: {roi_name}
          batch-size: 5
          block-statistics-file: erased-block-statistics.h5
    """)

    with open(f"{template_dir}/workflow.yaml", 'w') as f:
        f.write(config_text)

    yaml = YAML()
    with StringIO(config_text) as f:
        config = yaml.load(f)

    return template_dir, config, random_segmentation, dvid_address, repo_uuid, roi_mask_s5, input_segmentation_name, output_segmentation_name
Esempio n. 19
0
def setup_sparseblockstats(setup_dvid_repo):
    dvid_address, repo_uuid = setup_dvid_repo

    _ = 0
    volume_layout = [[_, _, _, _, _, _, _, _], [_, _, _, _, _, 4, _, _],
                     [_, 1, 1, 1, _, _, 1, 1], [_, 1, _, _, _, _, _, _],
                     [_, 1, _, _, _, _, _, _], [_, 1, _, 2, 2, 2, 2, _],
                     [_, _, _, _, _, _, _, _], [_, 3, _, _, _, 3, _, 1]]

    lowres_volume = np.zeros((4, 8, 8), np.uint64)
    lowres_volume[:] = volume_layout

    volume = upsample(lowres_volume, 16)
    assert volume.shape == (64, 128, 128)

    input_segmentation_name = 'sparseblockstats-input'
    create_labelmap_instance(dvid_address, repo_uuid, input_segmentation_name)
    post_labelmap_voxels(dvid_address, repo_uuid, input_segmentation_name,
                         (0, 0, 0), volume)

    # Mask is same as input, but times 10
    mask_volume = volume * 10
    mask_segmentation_name = 'sparseblockstats-mask'
    create_labelmap_instance(dvid_address, repo_uuid, mask_segmentation_name)
    post_labelmap_voxels(dvid_address, repo_uuid, mask_segmentation_name,
                         (0, 0, 0), mask_volume)

    config_text = textwrap.dedent(f"""\
        workflow-name: sparseblockstats
        cluster-type: {CLUSTER_TYPE}
         
        input:
          dvid:
            server: {dvid_address}
            uuid: {repo_uuid}
            segmentation-name: {input_segmentation_name}
            supervoxels: true
           
          geometry:
            message-block-shape: [64,64,64]
            bounding-box: [[0,0,0], [128,128,64]]
 
        mask-input:
          dvid:
            server: {dvid_address}
            uuid: {repo_uuid}
            segmentation-name: {mask_segmentation_name}
            supervoxels: true
           
          geometry:
            message-block-shape: [64,64,64]
            bounding-box: [[0,0,0], [128,128,64]]
 
        sparseblockstats:
          mask-labels: [20,40] # Avoids the top-left block
    """)

    template_dir = tempfile.mkdtemp(suffix="sparseblockstats-template")

    with open(f"{template_dir}/workflow.yaml", 'w') as f:
        f.write(config_text)

    yaml = YAML()
    with StringIO(config_text) as f:
        config = yaml.load(f)

    yaml = YAML()
    yaml.default_flow_style = False
    with open(f"{template_dir}/workflow.yaml", 'w') as f:
        yaml.dump(config, f)

    return template_dir, config, volume, mask_volume, dvid_address, repo_uuid
def setup_dvid_segmentation_input(setup_dvid_repo, random_segmentation):
    dvid_address, repo_uuid = setup_dvid_repo

    input_segmentation_name = 'labelmapcopy-segmentation-input'
    output_segmentation_name = 'labelmapcopy-segmentation-output'

    partial_output_segmentation_name = 'labelmapcopy-segmentation-partial-output'

    max_scale = 3
    already_exists = False

    try:
        create_labelmap_instance(dvid_address,
                                 repo_uuid,
                                 input_segmentation_name,
                                 max_scale=max_scale)
        create_labelmap_instance(dvid_address,
                                 repo_uuid,
                                 partial_output_segmentation_name,
                                 max_scale=max_scale)
    except HTTPError as ex:
        if ex.response is not None and 'already exists' in ex.response.content.decode(
                'utf-8'):
            already_exists = True

    expected_vols = {}
    for scale in range(1 + max_scale):
        if scale == 0:
            scaled_vol = random_segmentation
        else:
            scaled_vol = downsample(scaled_vol, 2, 'labels-numba')
        expected_vols[scale] = scaled_vol

        if not already_exists:
            scaled_box = round_box([(0, 0, 0), scaled_vol.shape], 64, 'out')
            aligned_vol = np.zeros(scaled_box[1], np.uint64)
            overwrite_subvol(aligned_vol, [(0, 0, 0), scaled_vol.shape],
                             scaled_vol)
            post_labelmap_voxels(dvid_address,
                                 repo_uuid,
                                 input_segmentation_name, (0, 0, 0),
                                 aligned_vol,
                                 scale=scale)

    if not already_exists:
        # Create a 'partial' output volume that is the same (bitwise) as the input except for some blocks.
        scaled_box = np.array([(0, 0, 0), random_segmentation.shape])
        scaled_box[1, -1] = 192
        for scale in range(1 + max_scale):
            scaled_box = round_box(scaled_box // (2**scale), 64, 'out')
            raw_blocks = fetch_labelmap_voxels(dvid_address,
                                               repo_uuid,
                                               input_segmentation_name,
                                               scaled_box,
                                               scale,
                                               supervoxels=True,
                                               format='raw-response')
            post_labelmap_blocks(dvid_address,
                                 repo_uuid,
                                 partial_output_segmentation_name, [(0, 0, 0)],
                                 raw_blocks,
                                 scale,
                                 is_raw=True)

        block = np.random.randint(1_000_000,
                                  1_000_010,
                                  size=(64, 64, 64),
                                  dtype=np.uint64)
        post_labelmap_voxels(dvid_address,
                             repo_uuid,
                             partial_output_segmentation_name, (0, 128, 64),
                             block,
                             0,
                             downres=True)

    partial_vol = fetch_labelmap_voxels(dvid_address,
                                        repo_uuid,
                                        partial_output_segmentation_name,
                                        [(0, 0, 0), random_segmentation.shape],
                                        supervoxels=True)

    template_dir = tempfile.mkdtemp(suffix="labelmapcopy-template")

    config_text = textwrap.dedent(f"""\
        workflow-name: labelmapcopy
        cluster-type: {CLUSTER_TYPE}
         
        input:
          dvid:
            server: {dvid_address}
            uuid: {repo_uuid}
            segmentation-name: {input_segmentation_name}
            supervoxels: true
           
          geometry:
            message-block-shape: [512,64,64]
            available-scales: [0,1,2,3]
 
        output:
          dvid:
            server: {dvid_address}
            uuid: {repo_uuid}
            segmentation-name: {output_segmentation_name}
            supervoxels: true
            disable-indexing: true
            create-if-necessary: true
        
        labelmapcopy:
          slab-shape: [512,128,64]
          dont-overwrite-identical-blocks: true
    """)

    with open(f"{template_dir}/workflow.yaml", 'w') as f:
        f.write(config_text)

    yaml = YAML()
    with StringIO(config_text) as f:
        config = yaml.load(f)

    return template_dir, config, expected_vols, partial_vol, dvid_address, repo_uuid, output_segmentation_name, partial_output_segmentation_name
Esempio n. 21
0
def init_labelmap_nodes():
    # Five supervoxels are each 1x3x3, arranged in a single row like this:
    # [[[1 1 1 2 2 2 3 3 3 4 4 4 5 5 5]
    #   [1 1 1 2 2 2 3 3 3 4 4 4 5 5 5]
    #   [1 1 1 2 2 2 3 3 3 4 4 4 5 5 5]]]
    supervoxel_vol = np.zeros((1,3,15), np.uint64)
    supervoxel_vol[:] = (np.arange(15, dtype=np.uint64) // 3).reshape(1,1,15)
    supervoxel_vol += 1
    np.set_printoptions(linewidth=100)
    #print(supervoxel_vol)

    # Merge table: Merge them all together
    id_a = np.array([1, 2, 3, 4], np.uint64)
    id_b = np.array([2, 3, 4, 5], np.uint64)

    xa = np.array([2, 5, 8, 11], np.uint32)
    ya = np.array([1, 1, 1, 1], np.uint32)
    za = np.array([0, 0, 0, 0], np.uint32)

    xb = np.array([3, 6, 9, 12], np.uint32)
    yb = np.array([1, 1, 1, 1], np.uint32)
    zb = np.array([0, 0, 0, 0], np.uint32)

    # Weak edge between 3 and 4
    score = np.array([0.4, 0.4, 0.8, 0.4], np.float32)

    merge_table = pd.DataFrame({'id_a': id_a, 'id_b': id_b,
                                'xa': xa, 'ya': ya, 'za': za,
                                'xb': xb, 'yb': yb, 'zb': zb,
                                'score': score})
    merge_table = merge_table[['id_a', 'id_b', 'xa', 'ya', 'za', 'xb', 'yb', 'zb', 'score']]

    merge_table_path = f'{TEST_DATA_DIR}/merge-table.npy'
    np.save(merge_table_path, merge_table.to_records(index=False))
    
    create_labelmap_instance(TEST_SERVER, TEST_REPO, 'segmentation', max_scale=2)
    create_labelmap_instance(TEST_SERVER, TEST_REPO, 'segmentation-scratch', max_scale=2)

    # Expand to 64**3
    supervoxel_block = np.zeros((64,64,64), np.uint64)
    supervoxel_block[:1,:3,:15] = supervoxel_vol
    post_labelmap_voxels(TEST_SERVER, TEST_REPO, 'segmentation', (0,0,0), supervoxel_block)
    post_labelmap_voxels(TEST_SERVER, TEST_REPO, 'segmentation-scratch', (0,0,0), supervoxel_block)

    post_commit(TEST_SERVER, TEST_REPO, 'supervoxels')

#     # Create a child node for agglo mappings
#     r = requests.post(f'http://{TEST_SERVER}/api/node/{TEST_REPO}/newversion', json={'note': 'agglo'})
#     r.raise_for_status()
#     agglo_uuid = r.json["child"]

    # Merge everything
    agglo_uuid = TEST_REPO
    post_merge(TEST_SERVER, agglo_uuid, 'segmentation', 1, [2, 3, 4, 5])

    mapping = np.array([[1,1],[2,1],[3,1],[4,1],[5,1]], np.uint64)
    #mapping = pd.DataFrame(mapping, columns=['sv', 'body']).set_index('sv')['body']
    
    mapping_path = f'{TEST_DATA_DIR}/mapping.npy'
    np.save(mapping_path, mapping)

    return merge_table_path, mapping_path, supervoxel_vol