Beispiel #1
0
def test_maxlabel_and_friends(labelmap_setup):
    dvid_server, dvid_repo, _merge_table_path, _mapping_path, _supervoxel_vol = labelmap_setup

    # Need an unlocked node to test these posts
    uuid = post_branch(dvid_server, dvid_repo, 'test_maxlabel_and_friends',
                       'test_maxlabel_and_friends')
    instance_info = (dvid_server, uuid, 'segmentation-scratch')

    max_label = fetch_maxlabel(*instance_info)
    next_label = fetch_nextlabel(*instance_info)
    assert max_label + 1 == next_label

    start, end = post_nextlabel(*instance_info, 5)
    assert start == max_label + 1
    assert end == start + 5 - 1

    max_label = fetch_maxlabel(*instance_info)
    next_label = fetch_nextlabel(*instance_info)
    assert next_label == max_label + 1 == end + 1

    new_max = next_label + 10
    post_maxlabel(*instance_info, new_max)

    max_label = fetch_maxlabel(*instance_info)
    assert max_label == new_max

    next_label = fetch_nextlabel(*instance_info)
    assert max_label + 1 == next_label
Beispiel #2
0
def _setup_test_append_edges_for_split(labelmap_setup, branch_name):
    dvid_server, dvid_repo, _merge_table_path, _mapping_path, supervoxel_vol = labelmap_setup
    uuid = post_branch(dvid_server, dvid_repo, branch_name, '')

    # Split supervoxel 3 (see conftest.init_labelmap_nodes)
    # Remove the first column of pixels from it.

    # supervoxel 3 starts in column 6
    assert (supervoxel_vol == 3).nonzero()[2][0] == 6

    rle = [
        [6, 0, 0, 1],  # x,y,z,runlength
        [6, 1, 0, 1],
        [6, 2, 0, 1]
    ]

    rle = np.array(rle, np.uint32)

    header = np.array([0, 3, 0, 0], np.uint8)
    voxels = np.array([0], np.uint32)
    num_spans = np.array([len(rle)], np.uint32)
    payload = bytes(header) + bytes(voxels) + bytes(num_spans) + bytes(rle)

    split_sv, remain_sv = post_split_supervoxel(dvid_server, uuid,
                                                'segmentation', 3, payload)
    return uuid, split_sv, remain_sv
Beispiel #3
0
def test_post_hierarchical_cleaves(labelmap_setup):
    dvid_server, dvid_repo, _merge_table_path, _mapping_path, _supervoxel_vol = labelmap_setup

    uuid = post_branch(dvid_server, dvid_repo,
                       'segmentation-post_hierarchical_cleaves', '')
    instance_info = dvid_server, uuid, 'segmentation-post_hierarchical_cleaves'
    create_labelmap_instance(*instance_info)

    svs = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
    groups = [1, 1, 2, 2, 3, 3, 3, 3, 3, 4]

    svs = np.asarray(svs, np.uint64)

    # Post some supervoxels in multiple blocks, just to prove that post_hierarchical_cleaves()
    # doesn't assume that the labelindex has the same length as the mapping.
    sv_vol = np.zeros((128, 64, 64), np.uint64)
    sv_vol[0, 0, :len(svs)] = svs
    sv_vol[64, 0, 0:len(svs):2] = svs[::2]

    post_labelmap_voxels(*instance_info, (0, 0, 0), sv_vol)

    post_merge(*instance_info, 1, svs[1:])

    group_mapping = pd.Series(index=svs, data=groups)
    final_table = post_hierarchical_cleaves(*instance_info, 1, group_mapping)

    assert (fetch_mapping(*instance_info,
                          svs) == final_table['body'].values).all()
    assert (final_table.drop_duplicates(
        ['group']) == final_table.drop_duplicates(['group',
                                                   'body'])).all().all()
    assert (final_table.drop_duplicates(
        ['body']) == final_table.drop_duplicates(['group',
                                                  'body'])).all().all()

    # Since the mapping included all supervoxels in the body,
    # the last group is left with the original label.
    assert final_table.iloc[-1]['body'] == 1

    # Now merge them all together and try again, but leave
    # two supevoxels out of the groups this time.
    merges = set(pd.unique(final_table['body'].values)) - set([1])
    post_merge(*instance_info, 1, list(merges))

    group_mapping = pd.Series(index=svs[:-2], data=groups[:-2])
    final_table = post_hierarchical_cleaves(*instance_info, 1, group_mapping)

    assert len(
        final_table.query('body == 1')
    ) == 0, "Did not expect any of the groups to retain the original body ID!"
    assert (fetch_mapping(*instance_info,
                          svs[:-2]) == final_table['body'].values).all()
    assert (final_table.drop_duplicates(
        ['group']) == final_table.drop_duplicates(['group',
                                                   'body'])).all().all()
    assert (final_table.drop_duplicates(
        ['body']) == final_table.drop_duplicates(['group',
                                                  'body'])).all().all()
    assert (fetch_mapping(*instance_info, svs[-2:]) == 1).all()
Beispiel #4
0
def test_labelindex(labelmap_setup):
    dvid_server, dvid_repo, _merge_table_path, _mapping_path, _supervoxel_vol = labelmap_setup

    # Need an unlocked node to test these posts
    uuid = post_branch(dvid_server, dvid_repo, 'test_labelindex',
                       'test_labelindex')
    instance_info = (dvid_server, uuid, 'segmentation-scratch')

    # Write some random data
    sv = 99
    vol = sv * np.random.randint(2, size=(128, 128, 128), dtype=np.uint64)
    offset = np.array((64, 64, 64))

    # DVID will generate the index.
    post_labelmap_voxels(*instance_info, offset, vol)

    # Compute labelindex table from scratch
    rows = []
    for block_coord in ndrange(offset, offset + vol.shape, (64, 64, 64)):
        block_coord = np.array(block_coord)
        block_box = np.array((block_coord, block_coord + 64))
        block = extract_subvol(vol, block_box - offset)

        count = (block == sv).sum()
        rows.append([*block_coord, sv, count])

    index_df = pd.DataFrame(rows, columns=['z', 'y', 'x', 'sv', 'count'])

    # Check DVID's generated labelindex table against expected
    labelindex_tuple = fetch_labelindex(*instance_info, sv, format='pandas')
    assert labelindex_tuple.label == sv

    labelindex_tuple.blocks.sort_values(['z', 'y', 'x', 'sv'], inplace=True)
    labelindex_tuple.blocks.reset_index(drop=True, inplace=True)
    assert (labelindex_tuple.blocks == index_df).all().all()

    # Check our protobuf against DVID's
    index_tuple = PandasLabelIndex(index_df, sv, 1,
                                   datetime.datetime.now().isoformat(),
                                   'someuser')
    labelindex = create_labelindex(index_tuple)

    # Since labelindex block entries are not required to be sorted,
    # dvid might return them in a different order.
    # Hence this comparison function which sorts them first.
    def compare_proto_blocks(left, right):
        left_blocks = sorted(left.blocks.items())
        right_blocks = sorted(right.blocks.items())
        return left_blocks == right_blocks

    dvid_labelindex = fetch_labelindex(*instance_info, sv, format='protobuf')
    assert compare_proto_blocks(labelindex, dvid_labelindex)

    # Check post/get roundtrip
    post_labelindex(*instance_info, sv, labelindex)
    dvid_labelindex = fetch_labelindex(*instance_info, sv, format='protobuf')
    assert compare_proto_blocks(labelindex, dvid_labelindex)
Beispiel #5
0
def test_fetch_labelindices(labelmap_setup):
    dvid_server, dvid_repo, _merge_table_path, _mapping_path, _supervoxel_vol = labelmap_setup

    # Need an unlocked node to test these posts
    uuid = post_branch(dvid_server, dvid_repo, 'test_labelindices',
                       'test_labelindices')
    instance_info = (dvid_server, uuid, 'segmentation-scratch')

    # Write some random data
    vol = np.random.randint(1, 10, size=(128, 128, 128), dtype=np.uint64)
    offset = np.array((64, 64, 64))

    # DVID will generate the index.
    post_labelmap_voxels(*instance_info, offset, vol)

    labelindices = fetch_labelindices(*instance_info, list(range(1, 10)))
    for sv, li in zip(range(1, 10), labelindices.indices):
        # This function is already tested elsewhere, so we'll use it as a reference
        li2 = fetch_labelindex(*instance_info, sv)
        assert li == li2

    labelindices = fetch_labelindices(*instance_info,
                                      list(range(1, 10)),
                                      format='list-of-protobuf')
    for sv, li in zip(range(1, 10), labelindices):
        # This function is already tested elsewhere, so we'll use it as a reference
        li2 = fetch_labelindex(*instance_info, sv)
        assert li == li2

    labelindices = fetch_labelindices(*instance_info,
                                      list(range(1, 10)),
                                      format='pandas')
    for sv, li in zip(range(1, 10), labelindices):
        # This function is already tested elsewhere, so we'll use it as a reference
        li2 = fetch_labelindex(*instance_info, sv, format='pandas')
        li_df = li.blocks.sort_values(['z', 'y', 'x']).reset_index(drop=True)
        li2_df = li2.blocks.sort_values(['z', 'y', 'x']).reset_index(drop=True)
        assert (li_df == li2_df).all().all()

    # Test the copy function (just do a round-trip -- hopefully I didn't swap src and dest anywhere...)
    copy_labelindices(instance_info,
                      instance_info,
                      list(range(1, 10)),
                      batch_size=2)
    copy_labelindices(instance_info,
                      instance_info,
                      list(range(1, 10)),
                      batch_size=2,
                      processes=2)
Beispiel #6
0
def test_fetch_mutations(labelmap_setup):
    dvid_server, dvid_repo, _merge_table_path, _mapping_path, _supervoxel_vol = labelmap_setup

    uuid = post_branch(dvid_server, dvid_repo, 'segmentation-fetch_mutations',
                       '')

    instance = 'segmentation-fetch_mutations'
    create_labelmap_instance(dvid_server, uuid, instance)

    voxels = np.zeros((64, 64, 64), dtype=np.uint64)
    voxels[0, 0, :10] = [*range(1, 11)]

    post_labelmap_voxels(dvid_server, uuid, instance, (0, 0, 0), voxels)

    post_merge(dvid_server, uuid, instance, 1, [2, 3, 4])
    post_merge(dvid_server, uuid, instance, 5, [6, 7, 8])

    post_commit(dvid_server, uuid, '')
    uuid2 = post_newversion(dvid_server, uuid, '')

    post_merge(dvid_server, uuid2, instance, 9, [10])
    post_merge(dvid_server, uuid2, instance, 1, [5, 10])

    mut_df = fetch_mutations(dvid_server,
                             uuid2,
                             instance,
                             dag_filter='leaf-only')
    assert len(mut_df) == 2
    assert (mut_df['uuid'] == uuid2).all()
    assert (mut_df['action'] == 'merge').all()
    assert (mut_df['target_body'] == [9, 1]).all()

    mut_df = fetch_mutations(dvid_server,
                             uuid2,
                             instance,
                             dag_filter='leaf-and-parents')
    assert len(mut_df) == 4
    assert (mut_df['uuid'] == [uuid, uuid, uuid2, uuid2]).all()
    assert (mut_df['action'] == 'merge').all()
    assert (mut_df['target_body'] == [1, 5, 9, 1]).all()
Beispiel #7
0
def test_extract_edges_with_large_gap(labelmap_setup):
    """
    If a large gap exists between a supervoxel and the rest of the body,
    we won't find an edge for it, but there should be no crash.
    """
    dvid_server, dvid_repo, merge_table_path, mapping_path, _supervoxel_vol = labelmap_setup
    orig_merge_table = load_merge_table(merge_table_path,
                                        mapping_path,
                                        normalize=True)

    merge_graph = LabelmapMergeGraph(merge_table_path)
    merge_graph.apply_mapping(mapping_path)

    uuid = post_branch(dvid_server, dvid_repo, f'test_extract_edges_large_gap',
                       '')

    # Exercise a corner case:
    # Add a new supervoxel to the body, far away from the others.
    # (No edge will be added for that supervoxel.)
    block_99 = 99 * np.ones((64, 64, 64), np.uint64)
    DVIDNodeService(dvid_server, uuid).put_labels3D('segmentation', block_99,
                                                    (128, 0, 0))
    post_merge(dvid_server, uuid, 'segmentation', 1, [99])

    root_logger = logging.getLogger()
    oldlevel = root_logger.level
    try:
        # Hide warnings for this call; they're intentional.
        logging.getLogger().setLevel(logging.ERROR)
        _mutid, dvid_supervoxels, edges, _scores = merge_graph.extract_edges(
            dvid_server, uuid, 'segmentation', 1)
    finally:
        root_logger.setLevel(oldlevel)

    assert (dvid_supervoxels == [1, 2, 3, 4, 5, 99]).all()
    assert (orig_merge_table[['id_a', 'id_b']].values == edges).all().all(), \
        f"Original merge table doesn't match fetched:\n{orig_merge_table}\n\n{edges}\n"
Beispiel #8
0
def _test_extract_edges(labelmap_setup, force_dirty_mapping):
    """
    Implementation for testing extract_edges(), starting either with a "clean" mapping
    (in which the body column is already correct beforehand),
    or a "dirty" mapping (in which the body column is not correct beforehand).
    """
    dvid_server, dvid_repo, merge_table_path, mapping_path, _supervoxel_vol = labelmap_setup
    instance_info = DvidInstanceInfo(dvid_server, dvid_repo, 'segmentation')

    orig_merge_table = load_merge_table(merge_table_path,
                                        mapping_path,
                                        normalize=True)

    merge_graph = LabelmapMergeGraph(merge_table_path)
    merge_graph.apply_mapping(mapping_path)

    if force_dirty_mapping:
        # A little white-box manipulation here to ensure that the mapping is dirty
        merge_graph.merge_table_df['body'] = np.uint64(0)
        merge_graph.mapping[:] = np.uint64(0)

    # First test: If nothing has changed in DVID, we get all rows.
    # We should be able to repeat this with the same results
    # (Make sure the cache is repopulated correctly.)
    _mutid, dvid_supervoxels, edges, _scores = merge_graph.extract_edges(
        *instance_info, 1)
    assert (dvid_supervoxels == [1, 2, 3, 4, 5]).all()
    assert (orig_merge_table[['id_a', 'id_b']].values == edges).all().all(), \
        f"Original merge table doesn't match fetched:\n{orig_merge_table}\n\n{edges}\n"

    # Now change the mapping in DVID and verify it is reflected in the extracted rows.
    # For this test, we'll cleave supervoxels [4,5] from the rest of the body.
    uuid = post_branch(dvid_server, dvid_repo,
                       f'extract-rows-test-{force_dirty_mapping}', '')

    cleaved_body = post_cleave(dvid_server, uuid, 'segmentation', 1, [4, 5])
    cleaved_mutid = fetch_mutation_id(dvid_server, uuid, 'segmentation', 1)

    if force_dirty_mapping:
        # A little white-box manipulation here to ensure that the mapping is dirty
        merge_graph.mapping.loc[2] = 0
        merge_graph.merge_table_df['body'].values[0:2] = np.uint64(0)

    mutid, dvid_supervoxels, edges, _scores = merge_graph.extract_edges(
        dvid_server, uuid, 'segmentation', 1)
    assert (dvid_supervoxels == [1, 2, 3]).all()
    _cleaved_svs = set([4, 5])
    assert (edges == orig_merge_table[[
        'id_a', 'id_b'
    ]].query('id_a not in @_cleaved_svs and id_b not in @_cleaved_svs')
            ).all().all()
    assert mutid == cleaved_mutid, "Expected cached mutation ID to match DVID"

    cleaved_mutid = fetch_mutation_id(dvid_server, uuid, 'segmentation',
                                      cleaved_body)

    # Check the other body
    mutid, dvid_supervoxels, edges, _scores = merge_graph.extract_edges(
        dvid_server, uuid, 'segmentation', cleaved_body)

    assert (edges == orig_merge_table[[
        'id_a', 'id_b'
    ]].query('id_a in @_cleaved_svs and id_b in @_cleaved_svs')).all().all()
    assert mutid == cleaved_mutid, "Expected cached mutation ID to match DVID"