Exemple #1
0
def fib_single_skeleton(bb=np.s_[:], method='teasar'):
    path = '/g/kreshuk/data/FIB25/training_blocks/raw/raw_block1.h5'
    with h5py.File(path) as f:
        raw = f['data'][bb]
    print(raw.shape)

    path = '/g/kreshuk/data/FIB25/training_blocks/gt/gt_block1.h5'
    with h5py.File(path) as f:
        seg = f['data'][bb]
    print(seg.shape)

    seg = vigra.analysis.labelVolume(seg)
    ids, sizes = np.unique(seg, return_counts=True)
    obj_id = ids[np.argmax(sizes)]
    print("Extract object", obj_id, "...")
    obj = seg == obj_id
    bb = np.where(obj)
    bb = tuple(slice(int(b.min()), int(b.max()) + 1) for b in bb)
    seg = seg[bb]
    obj = obj[bb]
    raw = raw[bb]
    # view([seg.astype('uint32'), obj.astype('uint32')])

    print("Skeletonize ...")
    resolution = 8
    nodes, edges = skeletor.skeletonize(obj,
                                        resolution=resolution,
                                        method=method)

    node_coords = tuple(np.array([n[i] for n in nodes]) for i in range(3))
    vol = np.zeros_like(obj, dtype='uint32')
    vol[node_coords] = obj_id

    view([raw, obj.astype('uint32'), vol], ['raw', 'obj', 'skeleton'])
def view_block(lauritzen_block_id,
               block_id,
               block_shape=(52, 512, 512),
               halo=[0, 0, 0]):
    path = '/home/papec/mnt/nrs/lauritzen/0%i/workspace.n5/raw' % lauritzen_block_id
    f = z5py.File(path)

    ds_raw = f['gray']
    shape = ds_raw.shape

    blocking = nifty.tools.blocking(roiBegin=[0, 0, 0],
                                    roiEnd=list(shape),
                                    blockShape=list(block_shape))
    if sum(halo) > 0:
        block = blocking.getBlockWithHalo(block_id, halo).outerBlock
    else:
        block = blocking.getBlock(block_id)

    bb = tuple(slice(beg, end) for beg, end in zip(block.begin, block.end))

    raw = ds_raw[bb]
    seg = f['segmentations/mc_glia_affs'][bb]

    mask1 = f['masks/initial_mask'][bb]
    mask2 = f['masks/minfilter_mask'][bb]

    view([raw, seg, mask1.astype('uint32'),
          mask2.astype('uint32')],
         ['raw', 'seg', 'initial mask', 'minfilter mask'])
Exemple #3
0
def view_block(block_id):
    gt_path = '/g/kreshuk/data/arendt/sponge/nn_train_data/train_data_0%i.h5' % block_id
    with h5py.File(gt_path, 'r') as f:
        raw = f['volumes/raw'][:]
        gt = f['volumes/labels/instances'][:]
        sem = f['volumes/labels/semantic'][:]
    mask = np.where(gt != -1)
    bb = tuple(slice(int(ma.min()), int(ma.max() + 1)) for ma in mask)

    raw = raw[bb]
    gt = gt[bb]
    sem = sem[bb]
    assert raw.shape == gt.shape == sem.shape
    print(raw.shape, gt.shape)

    global_bb = get_bb(block_id)
    print(global_bb)

    path = '/g/kreshuk/data/arendt/sponge/data.n5'
    f = z5py.File(path)
    ds_mc = f['volumes/segmentation/for_eval/multicut']
    seg_mc = vigra.analysis.labelVolume(ds_mc[global_bb].astype('uint32'))
    assert seg_mc.shape == gt.shape

    ds_lmc = f['volumes/segmentation/for_eval/lifted_multicut']
    seg_lmc = vigra.analysis.labelVolume(ds_lmc[global_bb].astype('uint32'))
    assert seg_lmc.shape == gt.shape

    view([raw, gt, sem, seg_mc, seg_lmc],
         ['raw', 'gt', 'sem', 'seg-mc', 'seg_lmc'])
def extract_middle_cutout():
    path = '/g/kreshuk/data/FIB25/data.n5'
    f = z5py.File(path)

    ds = f['volumes/raw/s0']
    ds.n_threads = 8

    shape = ds.shape
    halo = [50, 512, 512]
    shift = [0, -512, -512]
    bb = tuple(
        slice(sh // 2 - ha - si, sh // 2 + ha - si)
        for sh, ha, si in zip(shape, halo, shift))
    raw = ds[bb]
    chunks = ds.chunks

    ds = f['volumes/affinities']
    ds.n_threads = 8
    affs = ds[(slice(None), ) + bb]

    if False:
        view([raw, affs.transpose((1, 2, 3, 0))])

    out_path = '/g/kreshuk/data/FIB25/cutout.n5'
    f = z5py.File(out_path)
    f.create_dataset('volumes/raw',
                     data=raw,
                     compression='gzip',
                     chunks=chunks,
                     n_threads=8)
    f.create_dataset('volumes/affinities',
                     data=affs,
                     compression='gzip',
                     chunks=(1, ) + chunks,
                     n_threads=8)
Exemple #5
0
def view_skeletons(path):
    from cremi_tools.viewer.volumina import view
    input_key = 'segmentation/multicut'
    output_key = 'skeletons'

    f = z5py.File(path)

    raw_key = 'raw/s0'
    ds = f[raw_key]
    ds.n_threads = 8
    raw = ds[:]

    ds = f[input_key]
    ds.n_threads = 8
    seg = ds[:]

    skel_vol = np.zeros_like(seg, dtype='uint32')
    ds_skels = f[output_key]
    seg_ids = np.unique(seg)
    for seg_id in seg_ids:
        nodes, _ = skeletor.io.read_n5(ds_skels, seg_id)
        if nodes is None:
            continue

        coords = tuple(np.array([n[i] for n in nodes])
                       for i in range(3))
        skel_vol[coords] = seg_id

    view([raw, seg, skel_vol])
def debug_subresult(block_id=1):
    example_path = '/home/cpape/Work/data/isbi2012/cluster_example/isbi_train.n5'
    block_prefix = os.path.join(example_path, 's0', 'sub_graphs', 'block_')

    graph = ndist.Graph(os.path.join(example_path, 'graph'))
    block_path = block_prefix + str(block_id)
    nodes = ndist.loadNodes(block_path)
    inner_edges, outer_edges, sub_uvs = graph.extractSubgraphFromNodes(nodes)

    block_res_path = './tmp/subproblem_results/s0_block%i.npy' % block_id
    res = np.load(block_res_path)

    merge_edges = np.ones(graph.numberOfEdges, dtype='bool')
    merge_edges[res] = False
    merge_edges[outer_edges] = False

    uv_ids = graph.uvIds()
    n_nodes = int(uv_ids.max()) + 1
    ufd = nifty.ufd.ufd(n_nodes)
    ufd.merge(uv_ids[merge_edges])
    node_labels = ufd.elementLabeling()

    ws = z5py.File(example_path)['volumes/watersheds'][:]
    rag = nrag.gridRag(ws, numberOfLabels=n_nodes)
    seg = nrag.projectScalarNodeDataToPixels(rag, node_labels)
    view([ws, seg])
Exemple #7
0
def agglomerate_wsdt(thresh=.1, size_thresh=500):
    f = z5py.File('/home/papec/Work/neurodata_hdd/scotts_blocks/data_test_small.n5')
    affs = 1. - f['predictions/full_affs'][:3, :]
    affs_xy = np.mean(affs[1:3], axis=0)
    affs_z = affs[0]

    wsdt = cseg.DTWatershed(0.2, 1.6)
    ws, max_id = wsdt(affs_xy)
    rag = nrag.gridRagStacked2D(ws.astype('uint32'),
                                numberOfLabels=int(max_id + 1),
                                dtype='uint32')
    features_z = nrag.accumulateEdgeStandardFeatures(rag, affs_z, keepZOnly=True, zDirection=2)[1]
    features_z = features_z[:, 0]
    edge_offset = rag.totalNumberOfInSliceEdges
    edge_sizes = rag.edgeLengths()[edge_offset:]

    uvs = rag.uvIds()[edge_offset:]
    assert len(features_z) == len(uvs)
    # TODO filter by edge overlap as well !
    merge_edges = np.logical_and(features_z < thresh, edge_sizes > size_thresh)
    merge_nodes = uvs[merge_edges]

    ufd = nifty.ufd.ufd(rag.numberOfNodes)
    ufd.merge(merge_nodes)
    node_labels = ufd.elementLabeling()
    ws_merged = nrag.projectScalarNodeDataToPixels(rag, node_labels)

    raw = f['gray'][:]
    view([raw, affs.transpose((1, 2, 3, 0)), ws, ws_merged],
         ['raw', 'affs', 'ws', 'ws-merged'])
Exemple #8
0
def check_skeleton(path_to_segmentation,
                   raw_path,
                   swc_folder,
                   obj_id,
                   invert_coordinates=False):
    seg = vigra.readHDF5(path_to_segmentation, 'z/0/data')
    resolution = [35., 4., 4.]
    swc_file = os.path.join(swc_folder, '%04d.swc' % obj_id)
    skel_marked = paint_in_swc(swc_file,
                               seg.shape,
                               resolution,
                               invert_coordinates=invert_coordinates)
    skel_points = convert_swc_to_volume(swc_file,
                                        seg.shape,
                                        resolution,
                                        dtype='uint32',
                                        invert_coordinates=invert_coordinates)
    mask = (seg == obj_id).astype('uint32')
    assert mask.shape == skel_points.shape, "%s, %s" % (str(
        mask.shape), str(skel_points.shape))
    with h5py.File(raw_path, 'r') as f:
        raw = f['data'][:mask.shape[0]].astype('float32')
        assert raw.shape == mask.shape
    view([raw, mask, skel_marked, skel_points],
         ['raw', 'obj_mask', 'skeleton-marked', 'skeleton-points'])
Exemple #9
0
def ws_example(shebang):
    example_path = '/home/cpape/Work/data/isbi2012/cluster_example/isbi_train.n5'

    input_key = 'volumes/affinities'
    output_key = 'volumes/ws'

    max_jobs = 8

    global_conf = WatershedLocal.default_global_config()
    global_conf.update({'shebang': shebang})
    try:
        os.mkdir('configs')
    except OSError:
        pass

    with open('./configs/global.config', 'w') as f:
        json.dump(global_conf, f)

    ret = luigi.build([
        WatershedWorkflow(input_path=example_path,
                          input_key=input_key,
                          output_path=example_path,
                          output_key=output_key,
                          config_dir='./configs',
                          tmp_folder='./tmp',
                          target='local',
                          max_jobs=max_jobs)
    ],
                      local_scheduler=True)
    if ret:
        from cremi_tools.viewer.volumina import view
        with z5py.File(example_path) as f:
            affs = f[input_key][:3].transpose((1, 2, 3, 0))
            ws = f[output_key][:]
        view([affs, ws])
def edge_costs_block(block_id):
    block_path = '/home/papec/mnt/papec/Work/neurodata_hdd/cache/cremi_A+/tmp_files/features.n5/blocks'
    graph_path = '/home/papec/mnt/papec/Work/neurodata_hdd/cache/cremi_A+/tmp_files/graph.n5/sub_graphs/s0'

    graph_ds = z5py.File(graph_path)['block_%i' % block_id]
    roi_begin = graph_ds.attrs['roiBegin']
    roi_end = graph_ds.attrs['roiEnd']
    probs = 1. - z5py.File(block_path)['block_%i' % block_id][:, 0:1]

    path = '/home/papec/Work/neurodata_hdd/ntwrk_papec/cremi_warped/sampleA+.n5'
    ws_key = 'segmentations/watershed'
    raw_key = 'raw'

    bb = tuple(slice(rb, re) for rb, re in zip(roi_begin, roi_end))
    ws = z5py.File(path)[ws_key][bb].astype('uint32')

    rag = nrag.gridRag(ws, numberOfLabels=int(ws.max()) + 1)
    edges = graph_ds['edges'][:]
    assert len(edges) == rag.numberOfEdges
    assert (rag.uvIds() == edges).all()

    edge_id_vol, edge_vol_att, edge_vol_rep = get_edge_costs(rag, probs)

    raw = z5py.File(path)[raw_key][bb]
    view([raw, ws, edge_id_vol, edge_vol_att, edge_vol_rep],
         ['raw', 'ws', 'edge-ids', 'attractive edges', 'repulsive edges'],
         layer_types=[
             "Grayscale", "RandomColors", "RandomColors", "Blue", "Red"
         ])
def ms_fs_affs_cremi():

    path = '/home/papec/Work/neurodata_hdd/cremi/sample_A_20160501.hdf'
    bb = np.s_[:60, :512, :512]
    # bb = np.s_[:50, :256, :256]
    with h5py.File(path, 'r') as f:
        gt = f['volumes/labels/neuron_ids'][bb]

    sampling_factors = [3, 27, 27]
    # sampling_factors = [1, 3, 3]

    t1 = time.time()
    ms_affs, mask = compute_fullscale_multiscale_affinities(
        gt, sampling_factors)
    print("Ms affs in", time.time() - t1)

    with h5py.File(path, 'r') as f:
        raw = f['volumes/raw'][bb].astype('float32')

    new_shape = ms_affs.shape[1:]
    mask = (1. - mask).astype('uint32')
    raw = vigra.sampling.resize(raw, new_shape)
    view([
        raw, gt,
        ms_affs.transpose((1, 2, 3, 0)),
        mask.transpose((1, 2, 3, 0))
    ], ['raw', 'gt', 'ms-affs', 'mask'])
Exemple #12
0
def debug_subresult(block_id=1):
    from cremi_tools.viewer.volumina import view
    path = '/g/kreshuk/data/arendt/platyneris_v1/membrane_training_data/validation/segmentation/val_block_01.n5'
    tmp_folder = './tmp_plat_val'
    block_prefix = os.path.join(path, 's0', 'sub_graphs', 'block_')

    graph = ndist.Graph(os.path.join(path, 'graph'))
    block_path = block_prefix + str(block_id)
    nodes = ndist.loadNodes(block_path)
    nodes = nodes[1:]
    inner_edges, outer_edges, sub_uvs = graph.extractSubgraphFromNodes(nodes)

    block_res_path = os.path.join(
        tmp_folder, 'subproblem_results/s0_block%i.npy' % block_id)
    res = np.load(block_res_path)

    merge_edges = np.ones(graph.numberOfEdges, dtype='bool')
    merge_edges[res] = False
    merge_edges[outer_edges] = False

    uv_ids = graph.uvIds()
    n_nodes = int(uv_ids.max()) + 1
    ufd = nufd.ufd(n_nodes)
    ufd.merge(uv_ids[merge_edges])
    node_labels = ufd.elementLabeling()

    ws = z5py.File(path)['volumes/watershed'][:]
    seg = nt.take(node_labels, ws)
    view([ws, seg])
def ms_affs_cremi():

    path = '/home/papec/Work/neurodata_hdd/cremi/sample_A_20160501.hdf'
    bb = np.s_[:50, :512, :512]
    with h5py.File(path, 'r') as f:
        gt = f['volumes/labels/neuron_ids'][bb]

    # sampling_factors = [3, 9, 9]
    sampling_factors = [1, 3, 3]

    # t0 = time.time()
    # ms_affs_py = ms_single_scale_dense(gt, sampling_factors)
    # print('Ms affs python in', time.time() - t0)

    t1 = time.time()
    ms_affs, mask = compute_multiscale_affinities(gt, sampling_factors)
    print("Ms affs in", time.time() - t1)

    # assert ms_affs.shape == ms_affs_py.shape, "%s, %s" % (str(ms_affs.shape), str(ms_affs_py.shape))

    with h5py.File(path, 'r') as f:
        raw = f['volumes/raw'][bb].astype('float32')

    new_shape = ms_affs.shape[1:]
    mask = (1. - mask).astype('uint32')
    raw = vigra.sampling.resize(raw, new_shape)
    # view([raw, ms_affs.transpose((1, 2, 3, 0)), ms_affs_py.transpose((1, 2, 3, 0))],
    #      ['raw', 'ms-affs-cpp', 'ms-affs-py'])
    view([raw,
          ms_affs.transpose((1, 2, 3, 0)),
          mask.transpose((1, 2, 3, 0))], ['raw', 'ms-affs-cpp', 'mask'])
Exemple #14
0
def test_tp():
    path = '/home/pape/Work/data/cluster_tools_test_data/test_data.n5'
    aff_key = '/volumes/full_affinities'

    f = z5py.File(path)
    ds_affs = f[aff_key]
    ds_affs.n_threads = 8
    affs = ds_affs[:]

    # affs = affs[:, :10, :256]
    # affs = affs[:, :20, :256]
    print(affs.shape)

    offsets = [[-1, 0, 0], [0, -1, 0], [0, 0, -1], [-1, -1, -1], [-1, 1, 1],
               [-1, -1, 1], [-1, 1, -1], [0, -9, 0], [0, 0, -9], [0, -9, -9],
               [0, 9, -9], [0, -9, -4], [0, -4, -9], [0, 4, -9], [0, 9, -4],
               [0, -27, 0], [0, 0, -27]]

    block_shape = [10, 256, 256]
    halo = [2, 32, 32]

    print("Start agglomeration")
    agglomerator = partial(mws_agglomerator,
                           strides=[2, 10, 10],
                           randomize_strides=True)
    seg = two_pass_agglomeration(affs, offsets, agglomerator, block_shape,
                                 halo, 4)
    print(seg.shape)

    view([affs[1], seg])
Exemple #15
0
def test_tp():
    path = '/home/pape/Work/data/cluster_tools_test_data/test_data.n5'
    aff_key = '/volumes/full_affinities'

    f = z5py.File(path)
    ds_affs = f[aff_key]
    ds_affs.n_threads = 8
    affs = ds_affs[:]

    # affs = affs[:, :10, :256]
    # affs = affs[:, :20, :256]
    print(affs.shape)

    offsets = [[-1, 0, 0], [0, -1, 0], [0, 0, -1],
               [-1, -1, -1], [-1, 1, 1], [-1, -1, 1], [-1, 1, -1],
               [0, -9, 0], [0, 0, -9],
               [0, -9, -9], [0, 9, -9], [0, -9, -4], [0, -4, -9], [0, 4, -9], [0, 9, -4],
               [0, -27, 0], [0, 0, -27]]

    block_shape = [10, 256, 256]
    halo = [2, 32, 32]

    print("Start agglomeration")
    agglomerator = partial(mws_agglomerator, strides=[2, 10, 10], randomize_strides=True)
    seg = two_pass_agglomeration(affs, offsets, agglomerator, block_shape, halo, 4)
    print(seg.shape)

    view([affs[1], seg])
Exemple #16
0
def downscale_volume(roi_name, max_jobs=250, target='slurm'):
    config_dir = './configs'

    if roi_name is None:
        roi_begin, roi_end = None, None
        tmp_folder = './tmp'
    else:
        roi_begin, roi_end = get_roi(roi_name)
        tmp_folder = './tmp_%s' % roi_name

    try:
        os.mkdir(config_dir)
    except OSError:
        pass

    config = DownscalingWorkflow.get_config()
    global_config = config['global']
    global_config.update({'shebang': '#! /g/kreshuk/pape/Work/software/conda/miniconda3/envs/cluster_env/bin/python',
                          'roi_begin': roi_begin,
                          'roi_end': roi_end})
    with open(os.path.join(config_dir, 'global.config'), 'w') as f:
        json.dump(global_config, f)

    task_config = config['downscaling']
    task_config.update({'time_limit': 120,
                        'mem_limit': 3})
    with open(os.path.join(config_dir, 'downscaling.config'), 'w') as f:
        json.dump(task_config, f)

    scale_factors = [[1, 2, 2], 2, 2, 2, 2]
    halos = [[0, 10, 10], [10, 10, 10], [10, 10, 10], [10, 10, 10], [10, 10, 10]]
    # scale_factors = [[1, 2, 2]]
    # halos = [[0, 10, 10]]

    path = '/g/kreshuk/data/arendt/platyneris_v1/data.n5'

    task = DownscalingWorkflow(tmp_folder=tmp_folder,
                               max_jobs=max_jobs,
                               config_dir=config_dir,
                               target=target,
                               input_path=path,
                               input_key='volumes/raw/s0',
                               output_key_prefix='volumes/raw',
                               scale_factors=scale_factors,
                               halos=halos)
    success = luigi.build([task], local_scheduler=True)
    view_ = False
    if view_ and success and target == 'local':
        sfa = [2, 4, 4]
        roi_begin = tuple(roib // sf for roib, sf in zip(roi_begin, sfa))
        roi_end = tuple(roie // sf for roie, sf in zip(roi_end, sfa))
        bb = tuple(slice(roib, roie) for roib, roie in zip(roi_begin, roi_end))
        print(bb)
        with z5py.File(path) as f:
            ds = f['volumes/raw/s2']
            ds.n_threads = 8
            data = ds[bb]
            print(data.shape)
        view([data])
def run(shebang, with_rf=False):
    input_path = '/home/cpape/Work/data/isbi2012/cluster_example/isbi_train.n5'
    example_path = './isbi_exp.n5'
    input_key = 'volumes/affinties'

    max_jobs = 8
    configs = MulticutSegmentationWorkflow.get_config()

    global_conf = configs['global']
    global_conf.update({'shebang': shebang,
                        'block_shape': (25, 256, 256)})
    with open('./configs/global.config', 'w') as f:
        json.dump(global_conf, f)

    ws_conf = configs['watershed']
    ws_conf.update({'sigma_weights': 0, 'channel_begin': 1, 'channel_end': 3})
    with open('./configs/watershed.config', 'w') as f:
        json.dump(ws_conf, f)

    if with_rf:
        feat_config = configs['block_edge_features']
        feat_config.update({'filters': ['gaussianSmoothing', 'laplacianOfGaussian'],
                            'sigmas': [1., 2., 4.], 'apply_in_2d': True})
        rf_path = './rf.pkl'
    else:
        feat_config = configs['block_edge_features']
        feat_config.update({'offsets': [[-1, 0, 0], [0, -1, 0], [0, 0, -1]]})
        rf_path = ''

    with open('./configs/block_edge_features.config', 'w') as f:
        json.dump(feat_config, f)

    ret = luigi.build([MulticutSegmentationWorkflow(input_path=input_path,
                                                    input_key='volumes/affinities',
                                                    ws_path=example_path,
                                                    ws_key='volumes/watersheds',
                                                    problem_path=example_path,
                                                    node_labels_path=example_path,
                                                    node_labels_key='node_labels',
                                                    output_path=example_path,
                                                    output_key='volumes/segmentation',
                                                    rf_path=rf_path,
                                                    n_scales=1,
                                                    config_dir='./configs',
                                                    tmp_folder='./tmp',
                                                    target='local',
                                                    max_jobs=max_jobs)], local_scheduler=True)
    if ret:
        from cremi_tools.viewer.volumina import view
        with z5py.File(input_path) as f:
            affs = f['volumes/affinities'][:3].transpose((1, 2, 3, 0))
        with z5py.File(example_path) as f:
            ws = f['volumes/watersheds'][:]
            data = [affs, ws]
            if 'volumes/segmentation' in f:
                seg = f['volumes/segmentation'][:]
                data.append(seg)
        view(data)
Exemple #18
0
def toy_euclid_dist_small():
    x = np.zeros((3, 10, 10), dtype='bool')
    x[:, 2:8, 2:8] = 1

    voxel_size = [1, 1, 1]
    root = [1, 5, 5]
    dist = nskel.euclidean_distance(x, root, voxel_size)

    view([x.astype('uint8'), dist])
Exemple #19
0
def toy_euclid_dist_big():
    x = np.zeros((10, 100, 100), dtype='bool')
    x[2:8, 20:80, 20:80] = 1

    voxel_size = [10, 1, 1]
    root = [5, 50, 50]
    dist = nskel.euclidean_distance(x, root, voxel_size)

    view([x.astype('uint8'), dist])
Exemple #20
0
def check_cremi():
    folder = '/home/papec/mnt/papec/Work/neurodata_hdd/cremi_warped/sampleA+.n5'
    bb = np.s_[50:100, 512:1024, 512:1024]
    f = z5py.File(folder)
    mc1 = f['segmentations/multicut_rf'][bb]
    mc2 = f['segmentations/multicut'][bb]
    ws = f['segmentations/watershed'][bb]
    raw = f['raw'][bb]
    view([raw, ws, mc1, mc2])
Exemple #21
0
def view_result():
    from cremi_tools.viewer.volumina import view
    raw = z5py.File('./raw.n5')['data'][:]
    # ws = z5py.File('./ws.n5')['data'][:]
    ws = z5py.File(
        '/home/papec/mnt/papec/Work/neurodata_hdd/cluster_test_data/ws.n5'
    )['data'][:]
    affs = z5py.File('./affs.n5')['affs_xy'][:]
    view([raw, affs, ws])
Exemple #22
0
def skeletons(sample, max_jobs=8, target='local'):
    input_path = '/g/kreshuk/data/cremi/realigned/sample%s_small.n5' % sample
    input_prefix = 'segmentation/multicut'
    output_prefix = 'skeletons/multicut'

    config_dir = './config_skeletons'
    tmp_folder = './tmp_skeletons_%s' % sample

    try:
        os.mkdir(config_dir)
    except OSError:
        pass

    config = SkeletonWorkflow.get_config()
    global_config = config['global']
    shebang = '#! /g/kreshuk/pape/Work/software/conda/miniconda3/envs/cluster_env/bin/python'
    global_config.update({'shebang': shebang})
    with open(os.path.join(config_dir, 'global.config'), 'w') as f:
        json.dump(global_config, f)

    ds_config = config['skeletonize']
    ds_config.update({'threads_per_job': 8})
    with open(os.path.join(config_dir, 'skeletonize.config'), 'w') as f:
        json.dump(ds_config, f)

    task = SkeletonWorkflow(tmp_folder=tmp_folder,
                            max_jobs=1,
                            config_dir=config_dir,
                            target='local',
                            input_path=input_path,
                            output_path=input_path,
                            input_prefix=input_prefix,
                            output_prefix=output_prefix,
                            work_scale=2)
    success = luigi.build([task], local_scheduler=True)

    #
    if success and target == 'local':
        with z5py.File(input_path) as f:
            #
            ds = f['skeletons/multicut/s2']
            ds.n_threads = 8
            skels = ds[:]

            #
            ds = f['raw/s2']
            ds.n_threads = 8
            raw = ds[:]

            #
            ds = f['segmentation/multicut/s2']
            ds.n_threads = 8
            seg = ds[:]


        view([raw, seg, skels], ['raw', 'seg', 'skels'])
Exemple #23
0
def view_mc():
    path = '/home/papec/Work/neurodata_hdd/testdata.n5'
    f = z5py.File(path)
    raw = f['raw'][:]
    ws = f['watershed'][:]
    # affs = f['affs_z'][:]
    mc = f['multicut'][:]

    # view([raw, affs, ws, mc])
    view([raw, ws, mc])
Exemple #24
0
def view_backaligned(sample):
    path = '/g/kreshuk/data/cremi/original/sample_%s_20160601.hdf' % sample
    with h5py.File(path) as f:
        raw = f['volumes/raw'][:]

    with h5py.File('./%s_tmp.h5' % sample) as f:
        seg = f['volumes/labels/neuron_ids'][:]
    assert raw.shape == seg.shape

    view([raw, seg])
def debug_reduce_problem():
    example_path = '/home/cpape/Work/data/isbi2012/cluster_example/isbi_train.n5'

    node_labels = z5py.File(example_path)['s1']['node_labeling'][:]
    node_labels = np.concatenate((np.zeros(1, dtype='uint64'), node_labels))
    n_nodes = len(node_labels)

    ws = z5py.File(example_path)['volumes/watersheds'][:]
    rag = nrag.gridRag(ws, numberOfLabels=n_nodes)
    seg = nrag.projectScalarNodeDataToPixels(rag, node_labels)

    view([ws, seg])
 def _test_wslr(self):
     bb_aff = (slice(None),) + self.bb
     affs = z5py.File(self.path)['full_affs'][bb_aff]
     segmenter = cseg.LRAffinityWatershed(0.1, 0.25, 2.,
                                          return_seeds=True,
                                          invert_input=True)
     print("Start watershed...")
     ws, seeds, _ = segmenter(affs)
     print("... done")
     raw = z5py.File(self.path)['raw'][self.bb]
     view([raw, 1. - affs.transpose((1, 2, 3, 0)), seeds, ws],
          ['raw', 'affs', 'seeds', 'ws'])
Exemple #27
0
def view_res():
    raw_path = '/home/cpape/Work/data/isbi2012/isbi2012_test_volume.h5'
    with h5py.File(raw_path) as f:
        raw = f['volumes/raw'][:]

    seg_path = './results/mws.h5'
    with h5py.File(seg_path) as f:
        seg = f['data'][:]

    edges = make_edges(seg)

    view([raw, seg, edges], ['raw', 'mws', 'edges'])
Exemple #28
0
def view_skeletons(block_id, skeleton_postfix=''):
    import z5py
    from cremi_tools.viewer.volumina import view
    from cremi_tools.skeletons import visualize_skeletons

    # skeletons = extract_skeletons(block_id)
    skel_path = '/home/papec/mnt/nrs/lauritzen/0%i/workspace.n5/skeletons' % block_id
    assert os.path.exists(skel_path), skel_path

    f_skel = z5py.File(skel_path)
    # if we don't have the post-fix, these are
    # the initial neurons of interest
    if skeleton_postfix == '':
        fg = f_skel['neurons_of_interest']
    # otherwise, thse are for evaluation
    else:
        fg = f_skel['for_eval_%s' % skeleton_postfix]

    print("Loading skeletons...")
    skeletons = {}
    for skel_id in fg.keys():
        if not skel_id.isdigit():
            continue
        g = fg[skel_id]
        coords = g['coordinates'][:]
        node_ids = coords[:, 0]
        coords = coords[:, 1:]
        edges = g['edges'][:]
        skeletons[int(skel_id)] = {
            'coordinates': coords.astype('uint64'),
            'node_ids': node_ids,
            'edges': edges
        }
    print("... done")

    path = '/home/papec/mnt/nrs/lauritzen/0%i/workspace.n5' % block_id
    f = z5py.File(path)
    ds = f['filtered/gray']

    shape = ds.shape
    central = tuple(sh // 2 for sh in shape)
    offset = (100, 1000, 1000)
    bb = tuple(slice(c - off, c + off) for c, off in zip(central, offset))

    print("Visualizing skeletons ...")
    bb_shape = tuple(b.stop - b.start for b in bb)
    skeletons = intersect_skeletons_with_bb(skeletons, bb)
    skeleton_vol = visualize_skeletons(bb_shape, skeletons)
    print("... done")

    print("Have skeletons, loading raw from bb", bb)
    raw = ds[bb]
    view([raw, skeleton_vol])
Exemple #29
0
def visualize_cc():
    import h5py
    import vigra
    from cremi_tools.viewer.volumina import view
    thresh = .9
    with h5py.File('/home/cpape/Work/data/isbi2012/isbi_train_offsetsV4_3d_meantda_damws2deval_final.h5') as f:
        affs = f['data'][:, :5, :256, :256]
    print(affs.shape)
    affs = 1. - affs
    thresholded = (np.mean(affs, axis=0) >= thresh).astype('uint8')
    # cs = vigra.analysis.labelMultiArrayWithBackground(thresholded)
    cs = vigra.analysis.labelMultiArray(thresholded)
    view([affs.transpose((1, 2, 3, 0)), thresholded, cs])
Exemple #30
0
def multicut_from_n5(path,
                     raw_key,
                     affinity_key,
                     offsets,
                     bounding_box=(slice(None), ),
                     out_key=None):
    import z5py
    affs = z5py.File(path)[affinity_key][(slice(None), ) + bounding_box]
    segmentation = multicut(affs, offsets)
    # either save or view the segmentation
    if out_key is None:
        raw = z5py.File(path)[raw_key][bounding_box]
        view([raw, affs.transpose((1, 2, 3, 0)), segmentation])
Exemple #31
0
def visualize_mc():
    import h5py
    from cremi_tools.viewer.volumina import view
    from neurofire.metrics.arand import ArandErrorFromMulticut
    with h5py.File('/home/cpape/Work/data/isbi2012/isbi_train_offsetsV4_3d_meantda_damws2deval_final.h5') as f:
        affs = f['data'][:, :10, :256, :256]
    offsets = [[-1, 0, 0], [0, -1, 0], [0, 0, -1]]
    use_2d_ws = True
    metrics = ArandErrorFromMulticut(use_2d_ws=use_2d_ws, offsets=offsets)
    beta = .5
    mc_seg = metrics.input_to_segmentation(affs[None], beta).numpy().squeeze()
    assert mc_seg.shape == affs.shape[1:]
    view([affs.transpose((1, 2, 3, 0)), mc_seg])