def _impl_test_findadjacencies_closest_approach(template_dir, config):
    with open(f"{template_dir}/workflow.yaml", 'w') as f:
        yaml.dump(config, f)

    execution_dir, workflow = launch_flow(template_dir, 1)

    final_config = workflow.config
    output_df = pd.read_csv(
        f'{execution_dir}/{final_config["findadjacencies"]["output-table"]}')

    label_pairs = output_df[['label_a', 'label_b']].values
    assert 0 not in label_pairs.flat

    label_pairs = list(map(tuple, label_pairs))
    assert (1, 2) not in label_pairs
    assert (3, 4) in label_pairs
    assert (6, 7) in label_pairs
    assert (6, 7) in label_pairs

    assert (output_df.query('label_a == 3')[['za', 'zb']].values[0] == 0).all()
    assert (output_df.query('label_a == 3')[[
        'ya', 'yb'
    ]].values[0] == (6, 5)).all()  # not 'forward'
    assert (output_df.query('label_a == 3')[['xa', 'xb']].values[0] == 2).all()

    assert (output_df.query('label_a == 6')[['za', 'zb']].values[0] == 0).all()
    assert (output_df.query('label_a == 6')[[
        'ya', 'yb'
    ]].values[0] == (1, 3)).all()  # not 'forward'
    assert (output_df.query('label_a == 6')[['xa', 'xb']].values[0] == 6).all()

    return output_df
def test_findadjacencies_subset_edges(setup_findadjacencies):
    template_dir, config, _volume = setup_findadjacencies

    subset_edges = pd.DataFrame([[4, 3]], columns=['label_a', 'label_b'])
    subset_edges.to_csv(f'{template_dir}/subset-edges.csv',
                        index=False,
                        header=True)

    # Overwrite config with updated settings.
    config = copy.deepcopy(config)
    config["findadjacencies"]["subset-edges"] = 'subset-edges.csv'

    with open(f"{template_dir}/workflow.yaml", 'w') as f:
        yaml.dump(config, f)

    execution_dir, workflow = launch_flow(template_dir, 1)

    final_config = workflow.config
    output_df = pd.read_csv(
        f'{execution_dir}/{final_config["findadjacencies"]["output-table"]}')

    label_pairs = output_df[['label_a', 'label_b']].values
    assert 0 not in label_pairs.flat

    label_pairs = list(map(tuple, label_pairs))
    assert (1, 2) not in label_pairs
    assert (3, 4) in label_pairs

    assert (output_df.query('label_a == 3')[['za',
                                             'zb']].values[0] == (0, 0)).all()
    assert (output_df.query('label_a == 3')[[
        'ya', 'yb'
    ]].values[0] == (6, 5)).all()  # not 'forward'
    assert (output_df.query('label_a == 3')[['xa',
                                             'xb']].values[0] == (2, 2)).all()
def test_copysegmentation_dvid_to_zarr(setup_dvid_to_zarr):
    template_dir, config, volume, dvid_address, repo_uuid, output_file = setup_dvid_to_zarr

    # Modify the config from above to compute pyramid scales,
    # and choose a bounding box that is aligned with the bricks even at scale 2
    # (just for easier testing).
    box_zyx = [[0, 0, 0], [256, 256, 256]]
    config["input"]["geometry"]["bounding-box"] = box_zyx
    config["copysegmentation"]["pyramid-depth"] = 2

    yaml = YAML()
    yaml.default_flow_style = False
    with open(f"{template_dir}/workflow.yaml", 'w') as f:
        yaml.dump(config, f)

    execution_dir, _workflow = launch_flow(template_dir, 1)

    box_zyx = np.array(box_zyx)

    scale_0_vol = volume[box_to_slicing(*box_zyx)]
    scale_1_vol = downsample_labels(scale_0_vol, 2, True)
    scale_2_vol = downsample_labels(scale_1_vol, 2, True)

    store = zarr.NestedDirectoryStore(f"{execution_dir}/{output_file}")
    f = zarr.open(store, 'r')
    output_0_vol = f['s0'][box_to_slicing(*(box_zyx // 1))]
    output_1_vol = f['s1'][box_to_slicing(*(box_zyx // 2))]
    output_2_vol = f['s2'][box_to_slicing(*(box_zyx // 4))]

    assert (output_0_vol == scale_0_vol).all(), \
        "Scale 0: Written vol does not match expected"
    assert (output_1_vol == scale_1_vol).all(), \
        "Scale 1: Written vol does not match expected"
    assert (output_2_vol == scale_2_vol).all(), \
        "Scale 2: Written vol does not match expected"
def _run_to_dvid(setup, check_scale_0=True):
    template_dir, config, volume, dvid_address, repo_uuid, output_segmentation_name = setup

    yaml = YAML()
    yaml.default_flow_style = False

    # re-dump config in case it's been changed by a specific test
    with open(f"{template_dir}/workflow.yaml", 'w') as f:
        yaml.dump(config, f)

    _execution_dir, workflow = launch_flow(template_dir, 1)
    final_config = workflow.config

    input_box_xyz = np.array(final_config['input']['geometry']['bounding-box'])
    input_box_zyx = input_box_xyz[:, ::-1]

    expected_vol = extract_subvol(volume, input_box_zyx)

    output_box_xyz = np.array(
        final_config['output']['geometry']['bounding-box'])
    output_box_zyx = output_box_xyz[:, ::-1]
    output_vol = fetch_raw(dvid_address,
                           repo_uuid,
                           output_segmentation_name,
                           output_box_zyx,
                           dtype=np.uint64)

    np.save('/tmp/output_vol.npy', output_vol)
    np.save('/tmp/expected_vol.npy', expected_vol)

    if check_scale_0:
        assert (output_vol == expected_vol).all(), \
            "Written vol does not match expected"

    return input_box_zyx, expected_vol, output_vol
def test_samplepoints_rescale(setup_samplepoints):
    template_dir, config, volume, points_df = setup_samplepoints
    config = copy.copy(config)
    config["input"]["adapters"]["rescale-level"] = 2
    config["samplepoints"]["rescale-points-to-level"] = 2

    with open(f"{template_dir}/workflow.yaml", 'w') as f:
        yaml.dump(config, f)

    execution_dir, workflow = launch_flow(template_dir, 1)

    final_config = workflow.config
    output_df = pd.read_csv(
        f'{execution_dir}/{final_config["samplepoints"]["output-table"]}')

    # Should appear in sorted order
    points_df.sort_values(['z', 'y', 'x'], inplace=True)
    sorted_coords = points_df[['z', 'y', 'x']].values

    output_df['Z'] = points_df['z'].values
    output_df['Y'] = points_df['y'].values
    output_df['X'] = points_df['x'].values

    assert (sorted_coords == output_df[['z', 'y', 'x']].values).all()

    downsampled_vol = downsample(volume, 2**2, 'labels')
    labels = downsampled_vol[(*(sorted_coords // 2**2).transpose(), )]
    assert (labels == output_df['label'].values).all()

    # 'extra' columns should be preserved, even
    # though they weren't used in the computation.
    input_extra = points_df.sort_values(['z', 'y', 'x'])['extra'].values
    output_extra = output_df.sort_values(['z', 'y', 'x'])['extra'].values
    assert (output_extra == input_extra
            ).all(), "Extra column was not correctly preserved"
def test_findadjacencies_solid_volume():
    """
    If the volume is solid or empty, an error is raised at the end of the workflow.
    """
    template_dir = tempfile.mkdtemp(suffix="findadjacencies-template")

    # Create solid volume
    volume = 99 * np.ones((256, 256, 256), np.uint64)
    volume_path = f"{template_dir}/volume.h5"

    with h5py.File(volume_path, 'w') as f:
        f['volume'] = volume

    config = {
        "workflow-name": "findadjacencies",
        "cluster-type": CLUSTER_TYPE,
        "input": {
            "hdf5": {
                "path": volume_path,
                "dataset": "volume"
            },
            "geometry": {
                "message-block-shape": [64, 64, 64]
            },
        },
        "findadjacencies": {
            "output-table": "output.csv"
        }
    }

    with open(f"{template_dir}/workflow.yaml", 'w') as f:
        yaml.dump(config, f)

    with pytest.raises(RuntimeError):
        _execution_dir, _workflow = launch_flow(template_dir, 1)
def _run_to_dvid(setup, check_scale_0=True):
    template_dir, config, volume, dvid_address, repo_uuid, output_grayscale_name = setup

    yaml = YAML()
    yaml.default_flow_style = False

    # re-dump config in case it's been changed by a specific test
    with open(f"{template_dir}/workflow.yaml", 'w') as f:
        yaml.dump(config, f)

    _execution_dir, workflow = launch_flow(template_dir, 1)
    final_config = workflow.config

    box_xyz = np.array(final_config['input']['geometry']['bounding-box'])
    box_zyx = box_xyz[:, ::-1]

    output_vol = fetch_raw(dvid_address, repo_uuid, output_grayscale_name,
                           box_zyx)
    expected_vol = volume[box_to_slicing(*box_zyx)]

    if check_scale_0:
        assert (output_vol == expected_vol).all(), \
            "Written vol does not match expected"

    return box_zyx, expected_vol
示例#8
0
def test_contingencytable(setup_hdf5_inputs):
    template_dir, _config, left_vol, right_vol = setup_hdf5_inputs
    expected_table = contingency_table(left_vol, right_vol).reset_index()

    execution_dir, _workflow = launch_flow(template_dir, 1)

    output_table = pd.DataFrame(
        np.load(f"{execution_dir}/contingency_table.npy"))
    assert (output_table == expected_table).all().all()
def test_decimatemeshes_max_vertices(setup_decimatemeshes_config, disable_auto_retry):
    template_dir, config, _dvid_address, _repo_uuid, object_boxes, _object_sizes = setup_decimatemeshes_config
    max_vertices = 1000
    config['decimatemeshes']['max-vertices'] = max_vertices
    YAML().dump(config, open(f"{template_dir}/workflow.yaml", 'w'))

    execution_dir, _workflow = launch_flow(template_dir, 1)
    #print(execution_dir)
    check_outputs(execution_dir, object_boxes, max_vertices=max_vertices)
示例#10
0
def test_createmeshes_rescale_isotropic(setup_createmeshes_config,
                                        disable_auto_retry):
    template_dir, config, _dvid_address, _repo_uuid, object_boxes, _object_sizes = setup_createmeshes_config
    config['createmeshes']['rescale-before-write'] = 2
    YAML().dump(config, open(f"{template_dir}/workflow.yaml", 'w'))

    execution_dir, _workflow = launch_flow(template_dir, 1)

    scaled_boxes = {label: box * 2 for label, box in object_boxes.items()}
    check_outputs(execution_dir, scaled_boxes)
示例#11
0
def test_maskedcopy(setup_hdf5_inputs):
    template_dir, config, input_vol, mask_vol, output_path = setup_hdf5_inputs
    expected_vol = np.where(mask_vol, input_vol, 0)

    execution_dir, _workflow = launch_flow(template_dir, 1)

    with h5py.File(output_path, 'r') as f:
        output_vol = f['volume'][:]

    assert (output_vol == expected_vol).all()
def test_masksegmentation_resume(setup_dvid_segmentation_input,
                                 disable_auto_retry):
    template_dir, config, volume, dvid_address, repo_uuid, roi_mask_s5, _input_segmentation_name, output_segmentation_name = setup_dvid_segmentation_input

    brick_shape = config["input"]["geometry"]["message-block-shape"]
    batch_size = config["masksegmentation"]["batch-size"]

    # This is the total bricks in the volume, not necessarily
    # the total *processed* bricks, but it's close enough.
    total_bricks = np.ceil(np.prod(np.array(volume.shape) /
                                   brick_shape)).astype(int)
    total_batches = int(np.ceil(total_bricks / batch_size))

    # Skip over half of the original bricks.
    config["masksegmentation"]["resume-at"] = {
        "scale": 0,
        "batch-index": 1 + (total_batches // 2)
    }

    # re-dump config
    yaml = YAML()
    yaml.default_flow_style = False
    with open(f"{template_dir}/workflow.yaml", 'w') as f:
        yaml.dump(config, f)

    _execution_dir, workflow = launch_flow(template_dir, 1)
    final_config = workflow.config

    input_box_xyz = np.array(final_config['input']['geometry']['bounding-box'])
    input_box_zyx = input_box_xyz[:, ::-1]

    roi_mask = upsample(roi_mask_s5, 2**5)
    roi_mask = extract_subvol(roi_mask, input_box_zyx)

    masked_vol = extract_subvol(volume.copy(), input_box_zyx)
    masked_vol[roi_mask] = 0

    output_box_xyz = np.array(
        final_config['output']['geometry']['bounding-box'])
    output_box_zyx = output_box_xyz[:, ::-1]
    output_vol = fetch_labelmap_voxels(dvid_address,
                                       repo_uuid,
                                       output_segmentation_name,
                                       output_box_zyx,
                                       scale=0,
                                       supervoxels=True)

    #np.save('/tmp/original.npy', volume)
    #np.save('/tmp/output.npy', output_vol)

    # First part was untouched
    assert (output_vol[:128] == volume[:128]).all()

    # Last part was touched somewhere
    assert (output_vol[128:] != volume[128:]).any()
示例#13
0
def test_createmeshes_from_body_source(setup_createmeshes_config,
                                       disable_auto_retry):
    template_dir, config, _dvid_address, _repo_uuid, object_boxes, _object_sizes = setup_createmeshes_config
    config["input"]["dvid"]["supervoxels"] = False
    YAML().dump(config, open(f"{template_dir}/workflow.yaml", 'w'))

    execution_dir, _workflow = launch_flow(template_dir, 1)
    #print(execution_dir)

    # In this test, each supervoxel is its own body anyway.
    check_outputs(execution_dir, object_boxes)
def test_copygrayscale_from_hdf5_to_slices(disable_auto_retry):
    template_dir = tempfile.mkdtemp(suffix="copygrayscale-from-hdf5-template")

    # Create volume, write to HDF5
    volume = np.random.randint(10, size=TESTVOL_SHAPE, dtype=np.uint8)
    volume_path = f"{template_dir}/volume.h5"
    with h5py.File(volume_path, 'w') as f:
        f['volume'] = volume

    SLICE_FMT = 'slices/{:04d}.png'

    config_text = textwrap.dedent(f"""\
        workflow-name: copygrayscale
        cluster-type: {CLUSTER_TYPE}
        
        input:
          hdf5:
            path: {volume_path}
            dataset: volume
          
          geometry:
            message-block-shape: [64,64,256]
            bounding-box: [[0,0,100], [256,200,256]]

          adapters:
            # Enable multi-scale, since otherwise
            # Hdf5VolumeService doesn't support it out-of-the box
            rescale-level: 0

        output:
          slice-files:
            slice-path-format: "{SLICE_FMT}"
            dtype: uint8
        
        copygrayscale:
          max-pyramid-scale: 0
          slab-depth: 128
    """)

    with open(f"{template_dir}/workflow.yaml", 'w') as f:
        f.write(config_text)

    _execution_dir, workflow = launch_flow(template_dir, 1)
    final_config = workflow.config

    box_xyz = np.array(final_config['input']['geometry']['bounding-box'])
    box_zyx = box_xyz[:, ::-1]

    output_vol = SliceFilesVolumeService(final_config['output']).get_subvolume(
        [[100, 0, 0], [256, 200, 256]])
    expected_vol = volume[box_to_slicing(*box_zyx)]

    assert (output_vol == expected_vol).all(), \
        "Written vol does not match expected"
示例#15
0
def test_createmeshes_subset_bodies(setup_createmeshes_config,
                                    disable_auto_retry):
    template_dir, config, _dvid_address, _repo_uuid, object_boxes, _object_sizes = setup_createmeshes_config
    config['createmeshes']['subset-bodies'] = [100, 300]
    YAML().dump(config, open(f"{template_dir}/workflow.yaml", 'w'))

    execution_dir, _workflow = launch_flow(template_dir, 1)

    df = pd.DataFrame(np.load(f'{execution_dir}/final-mesh-stats.npy'))
    assert 200 not in df['sv'].values

    check_outputs(execution_dir, object_boxes, subset_labels=[100, 300])
示例#16
0
def test_createmeshes_max_svs_per_brick(setup_createmeshes_config,
                                        disable_auto_retry):
    template_dir, config, _dvid_address, _repo_uuid, object_boxes, _object_sizes = setup_createmeshes_config
    config['createmeshes']['max-svs-per-brick'] = 1
    # Use just one brick, but it should end up getting split for each object.
    config["input"]["geometry"].update(
        {"message-block-shape": [128, 128, 128]})
    YAML().dump(config, open(f"{template_dir}/workflow.yaml", 'w'))

    execution_dir, _workflow = launch_flow(template_dir, 1)
    #print(execution_dir)
    check_outputs(execution_dir, object_boxes)
示例#17
0
def test_mitostats(setup_hdf5_inputs):
    template_dir, config, seg_vol, mask_vol, output_path = setup_hdf5_inputs
    execution_dir, _workflow = launch_flow(template_dir, 1)

    stats_df = pickle.load(open(f'{execution_dir}/stats_df.pkl', 'rb'))

    assert (stats_df.loc[1, ['z', 'y', 'x']] == [32, 32, 32]).all()
    assert (stats_df.loc[2, ['z', 'y', 'x']] == [128, 128, 128]).all()

    # These classes should be symmetric. See above.
    assert (stats_df.loc[2, 'class_3'] == stats_df.loc[2, 'class_4'])

    assert stats_df.loc[1, 'total_size'] == (seg_vol == 1).sum()
    assert stats_df.loc[2, 'total_size'] == (seg_vol == 2).sum()
def test_decimatemeshes_skip_existing(setup_decimatemeshes_config, disable_auto_retry):
    template_dir, config, _dvid_address, _repo_uuid, object_boxes, _object_sizes = setup_decimatemeshes_config
    config['decimatemeshes']['skip-existing'] = True
    YAML().dump(config, open(f"{template_dir}/workflow.yaml", 'w'))

    # Create an empty file for mesh 200
    os.makedirs(f"{template_dir}/meshes")
    open(f"{template_dir}/meshes/200.ngmesh", 'wb').close()
    execution_dir, _workflow = launch_flow(template_dir, 1)
 
    # The file should have been left alone (still empty).
    assert open(f"{execution_dir}/meshes/200.ngmesh", 'rb').read() == b''

    # But other meshes were generated.
    check_outputs(execution_dir, object_boxes, subset_labels=[100], skipped_labels=[200])
示例#19
0
def _impl_findadjacencies_different_dvid_blocks_sparse_edges(
        template_dir, config):
    with open(f"{template_dir}/workflow.yaml", 'w') as f:
        yaml.dump(config, f)

    execution_dir, workflow = launch_flow(template_dir, 1)
    final_config = workflow.config
    output_df = pd.read_csv(
        f'{execution_dir}/{final_config["findadjacencies"]["output-table"]}')

    label_pairs = output_df[['label_a', 'label_b']].values
    assert 0 not in label_pairs.flat

    label_pairs = list(map(tuple, label_pairs))
    assert (1, 6) in label_pairs
示例#20
0
def test_createmeshes_subset_bodies_in_batches(setup_createmeshes_config,
                                               disable_auto_retry):
    template_dir, config, _dvid_address, _repo_uuid, object_boxes, _object_sizes = setup_createmeshes_config
    config['createmeshes']['subset-bodies'] = [100, 200, 300]
    config['createmeshes']['subset-batch-size'] = 2
    YAML().dump(config, open(f"{template_dir}/workflow.yaml", 'w'))

    execution_dir, _workflow = launch_flow(template_dir, 1)

    #print(execution_dir)
    check_outputs(execution_dir,
                  object_boxes,
                  subset_labels=[100, 200],
                  stats_dir=f'{execution_dir}/batch-00')
    check_outputs(execution_dir,
                  object_boxes,
                  subset_labels=[300],
                  stats_dir=f'{execution_dir}/batch-01')
示例#21
0
def test_createmeshes_filter_supervoxels(setup_createmeshes_config,
                                         disable_auto_retry):
    template_dir, config, _dvid_address, _repo_uuid, object_boxes, object_sizes = setup_createmeshes_config

    # Set size filter to exclude the largest SV (100) and smallest SV (300),
    # leaving only the middle object (SV 200).
    assert object_sizes[300] < object_sizes[200] < object_sizes[100]
    config['createmeshes']['size-filters'][
        'minimum-supervoxel-size'] = object_sizes[300] + 1
    config['createmeshes']['size-filters'][
        'maximum-supervoxel-size'] = object_sizes[100] - 1
    YAML().dump(config, open(f"{template_dir}/workflow.yaml", 'w'))

    execution_dir, _workflow = launch_flow(template_dir, 1)

    df = pd.DataFrame(np.load(f'{execution_dir}/final-mesh-stats.npy'))
    assert 100 not in df['sv'].values
    assert 300 not in df['sv'].values

    check_outputs(execution_dir, object_boxes, subset_labels=[200])
def test_sparsemeshes(setup_dvid_segmentation_input):
    template_dir, _config, _dvid_address, _repo_uuid = setup_dvid_segmentation_input

    execution_dir, _workflow = launch_flow(template_dir, 1)
    #final_config = workflow.config

    assert os.path.exists(f"{execution_dir}/meshes/100.obj")

    # Here's where our test mesh ended up:
    #print(f"{execution_dir}/meshes/100.obj")

    df = pd.read_csv(f'{execution_dir}/mesh-stats.csv')
    assert len(df) == 2
    assert df.loc[0, 'body'] == 100
    assert df.loc[0, 'scale'] == 1
    assert df.loc[0, 'result'] == 'success'

    # The second body didn't exist, so it fails (but doesn't kill the workflow)
    assert df.loc[1, 'body'] == 200
    assert df.loc[1, 'scale'] == 0
    assert df.loc[1, 'result'] == 'error-sparsevol-coarse'
示例#23
0
def _impl_test_findadjacencies_from_dvid_sparse(template_dir, config):
    with open(f"{template_dir}/workflow.yaml", 'w') as f:
        yaml.dump(config, f)

    execution_dir, workflow = launch_flow(template_dir, 1)
    final_config = workflow.config
    output_df = pd.read_csv(
        f'{execution_dir}/{final_config["findadjacencies"]["output-table"]}')

    label_pairs = output_df[['label_a', 'label_b']].values
    assert 0 not in label_pairs.flat

    assert output_df[['label_a', 'label_b', 'group']].duplicated().sum() == 0

    label_pairs = list(map(tuple, label_pairs))
    assert (1, 2) in label_pairs
    assert (3, 4) in label_pairs
    assert (6, 7) in label_pairs
    assert (2, 8) in label_pairs
    assert (1, 6) not in label_pairs
    assert (1, 7) not in label_pairs

    assert (output_df.query('label_a == 3')[['za',
                                             'zb']].values[0] == 31).all()
    assert (output_df.query('label_a == 3')[[
        'ya', 'yb'
    ]].values[0] == (7 * 16, 7 * 16 - 1)).all()  # not 'forward'
    assert (output_df.query('label_a == 3')[['xa',
                                             'xb']].values[0] == 2.5 * 16 -
            1).all()

    # The Z and X locations here are a little hard to test, since several voxels are tied.
    #assert (output_df.query('label_a == 6')[['za', 'zb']].values[0] == 31).all()
    assert (output_df.query('label_a == 6')[[
        'ya', 'yb'
    ]].values[0] == (2 * 16 - 1, 3 * 16)).all()  # not 'forward'
    assert (output_df.query('label_a == 6')[['xa',
                                             'xb']].values[0] == 6 * 16).all()

    return execution_dir, workflow
def test_labelmapcopy_partial(setup_dvid_segmentation_input,
                              disable_auto_retry):
    template_dir, config, expected_vols, partial_vol, dvid_address, repo_uuid, _output_segmentation_name, partial_output_segmentation_name = setup_dvid_segmentation_input

    config = copy.deepcopy(config)
    config["output"]["dvid"][
        "segmentation-name"] = partial_output_segmentation_name

    yaml = YAML()
    yaml.default_flow_style = False
    with open(f"{template_dir}/workflow.yaml", 'w') as f:
        yaml.dump(config, f)

    execution_dir, workflow = launch_flow(template_dir, 1)
    final_config = workflow.config

    output_box_xyz = np.array(
        final_config['output']['geometry']['bounding-box'])
    output_box_zyx = output_box_xyz[:, ::-1]

    max_scale = final_config['labelmapcopy']['max-scale']
    for scale in range(1 + max_scale):
        scaled_box = output_box_zyx // (2**scale)
        output_vol = fetch_labelmap_voxels(dvid_address,
                                           repo_uuid,
                                           partial_output_segmentation_name,
                                           scaled_box,
                                           scale=scale)
        assert (output_vol == expected_vols[scale]).all(), \
            f"Written vol does not match expected for scale {scale}"

    # Any labels NOT in the partial vol had to be written.
    written_labels = pd.unique(
        expected_vols[0][expected_vols[0] != partial_vol])
    assert len(written_labels) > 0, \
        "This test data was chosen poorly -- there's no difference between the partial and full labels!"

    svs = pd.read_csv(f'{execution_dir}/recorded-labels.csv')['sv']
    assert set(svs) == set(written_labels)
示例#25
0
def test_findadjacencies_subset_bodies(setup_findadjacencies):
    template_dir, config, _volume = setup_findadjacencies

    # Overwrite config with updated settings.
    config = copy.deepcopy(config)
    config["findadjacencies"]["subset-labels"] = [3]
    config["findadjacencies"]["subset-labels-requirement"] = 1

    with open(f"{template_dir}/workflow.yaml", 'w') as f:
        yaml.dump(config, f)

    execution_dir, workflow = launch_flow(template_dir, 1)

    final_config = workflow.config
    output_df = pd.read_csv(
        f'{execution_dir}/{final_config["findadjacencies"]["output-table"]}')

    label_pairs = output_df[['label_a', 'label_b']].values
    assert 0 not in label_pairs.flat

    label_pairs = list(map(tuple, label_pairs))
    assert (1, 2) not in label_pairs
    assert (3, 4) in label_pairs

    assert (output_df.query('label_a == 3')[['za',
                                             'zb']].values[0] == (0, 0)).all()
    assert (output_df.query('label_a == 3')[[
        'ya', 'yb'
    ]].values[0] == (6, 5)).all()  # not 'forward'
    assert (output_df.query('label_a == 3')[['xa',
                                             'xb']].values[0] == (2, 2)).all()

    # Check CC groups
    cc_sets = set()
    for _cc, cc_df in output_df.groupby('group_cc'):
        cc_set = frozenset(cc_df[['label_a', 'label_b']].values.flat)
        cc_sets.add(cc_set)
    assert set(cc_sets) == {frozenset({4, 3})}
示例#26
0
def test_createmeshes_bad_subset_bodies(setup_createmeshes_config,
                                        disable_auto_retry):
    """
    If one (or more) of the listed bodies no longer exists,
    DVID returns a 404 when we try to fetch its supervoxels.
    The job should complete anyway, for the other bodies.
    """
    template_dir, config, _dvid_address, _repo_uuid, object_boxes, _object_sizes = setup_createmeshes_config
    config['createmeshes']['subset-bodies'] = [100, 999]  # 999 doesn't exist
    YAML().dump(config, open(f"{template_dir}/workflow.yaml", 'w'))

    execution_dir, _workflow = launch_flow(template_dir, 1)

    assert pd.read_csv(
        f'{execution_dir}/missing-bodies.csv')['body'].tolist() == [999]

    df = pd.DataFrame(np.load(f'{execution_dir}/final-mesh-stats.npy'))
    assert 100 in df['sv'].values
    assert 200 not in df['sv'].values
    assert 300 not in df['sv'].values
    assert 999 not in df['sv'].values

    check_outputs(execution_dir, object_boxes, subset_labels=[100])
def test_contingencytable(setup_hdf5_inputs):
    """
    TODO: Test the filtering options (left-subset-labels, min-overlap-size)
    """
    template_dir, _config, left_vol, right_vol = setup_hdf5_inputs
    expected_table = contingency_table(left_vol,
                                       right_vol).sort_index().reset_index()

    expected_left_sizes = expected_table.groupby('left')['voxel_count'].sum()
    expected_right_sizes = expected_table.groupby('right')['voxel_count'].sum()

    execution_dir, _workflow = launch_flow(template_dir, 1)

    with open(f"{execution_dir}/contingency_table.pkl", "rb") as f:
        output_table = pickle.load(f)
    with open(f"{execution_dir}/left_sizes.pkl", "rb") as f:
        left_sizes = pickle.load(f)
    with open(f"{execution_dir}/right_sizes.pkl", "rb") as f:
        right_sizes = pickle.load(f)

    assert (output_table == expected_table).all().all()
    assert (left_sizes == expected_left_sizes).all().all()
    assert (right_sizes == expected_right_sizes).all().all()
def test_labelmapcopy(setup_dvid_segmentation_input, disable_auto_retry):
    template_dir, _config, expected_vols, partial_vol, dvid_address, repo_uuid, output_segmentation_name, _partial_output_segmentation_name = setup_dvid_segmentation_input

    execution_dir, workflow = launch_flow(template_dir, 1)
    final_config = workflow.config

    output_box_xyz = np.array(
        final_config['output']['geometry']['bounding-box'])
    output_box_zyx = output_box_xyz[:, ::-1]

    max_scale = final_config['labelmapcopy']['max-scale']
    for scale in range(1 + max_scale):
        scaled_box = output_box_zyx // (2**scale)
        output_vol = fetch_labelmap_voxels(dvid_address,
                                           repo_uuid,
                                           output_segmentation_name,
                                           scaled_box,
                                           scale=scale)
        assert (output_vol == expected_vols[scale]).all(), \
            f"Written vol does not match expected for scale {scale}"

    svs = pd.read_csv(f'{execution_dir}/recorded-labels.csv')['sv']
    assert set(svs) == set(np.unique(expected_vols[0].reshape(-1)))
def test_samplepoints(setup_samplepoints):
    template_dir, _config, volume, points_df = setup_samplepoints

    execution_dir, workflow = launch_flow(template_dir, 1)

    final_config = workflow.config
    output_df = pd.read_csv(
        f'{execution_dir}/{final_config["samplepoints"]["output-table"]}')

    # Should appear in sorted order
    sorted_coords = points_df[['z', 'y', 'x']].sort_values(['z', 'y',
                                                            'x']).values
    assert (sorted_coords == output_df[['z', 'y', 'x']].values).all()

    labels = volume[(*sorted_coords.transpose(), )]
    assert (labels == output_df['label']).all()

    # 'extra' columns should be preserved, even
    # though they weren't used in the computation.
    input_extra = points_df.sort_values(['z', 'y', 'x'])['extra'].values
    output_extra = output_df.sort_values(['z', 'y', 'x'])['extra'].values
    assert (output_extra == input_extra
            ).all(), "Extra column was not correctly preserved"
示例#30
0
def test_createmeshes_from_body_source_subset_bodies(setup_createmeshes_config,
                                                     disable_auto_retry):
    template_dir, config, _dvid_address, _repo_uuid, object_boxes, _object_sizes = setup_createmeshes_config

    config["input"]["dvid"].update({"supervoxels": False})

    config["input"]["geometry"].update(
        {"message-block-shape": [128, 128, 128]})

    config["input"]["adapters"] = {"rescale-level": 1}

    config["createmeshes"].update({
        "subset-bodies": [100, 300],
        "rescale-before-write": 2.0
    })

    YAML().dump(config, open(f"{template_dir}/workflow.yaml", 'w'))

    execution_dir, _workflow = launch_flow(template_dir, 1)
    #print(execution_dir)

    # In this test, each supervoxel is its own body anyway.
    check_outputs(execution_dir, object_boxes, [100, 300])