Ejemplo n.º 1
0
def segment_block(block_id, weight_edges=False, cached=False):
    import cremi_tools.segmentation as cseg
    raw_path = '/home/papec/Work/neurodata_hdd/fib25/raw/raw_block%i.h5' % block_id
    pmap_path = '/home/papec/Work/neurodata_hdd/fib25/pmaps/probs_squeezed_block%i.h5' % block_id
    ws_path = '/home/papec/Work/neurodata_hdd/fib25/watersheds/watershed_agglomerated_0.075000_block%i.h5' % block_id

    # load pmap and watersheds
    raw = vigra.readHDF5(raw_path, 'data').astype('float32')
    pmap = vigra.readHDF5(pmap_path, 'data')
    ws = vigra.readHDF5(ws_path, 'data')

    if cached:
        edge_probs = vigra.readHDF5('edge_probs_%i.h5' % block_id, 'data')
        rag = nrag.gridRag(ws, numberOfLabels=int(ws.max()) + 1)
        # TODO edge sizes
    else:
        # feature extractor and multicut
        feature_extractor = cseg.RandomForestFeatures('./rf.pkl', True)
        # make graph and costs
        rag, edge_probs, _, edge_sizes = feature_extractor(pmap, ws, raw=raw)
        vigra.writeHDF5(edge_probs, 'edge_probs_%i.h5' % block_id, 'data')
    graph = nifty.graph.undirectedGraph(rag.numberOfNodes)
    graph.insertEdges(rag.uvIds())

    mc = cseg.Multicut('kernighan-lin', weight_edges=weight_edges)
    if weight_edges:
        costs = mc.probabilities_to_costs(edge_probs, edge_sizes)
    else:
        costs = mc.probabilities_to_costs(edge_probs)
    node_labels = mc(graph, costs)
    return nrag.projectScalarNodeDataToPixels(rag, node_labels)
Ejemplo n.º 2
0
    def _compute_mc(self, input_, feat_function, beta):
        # watershed and region adjacency graph
        ws, n_labels = self._compute_ws(input_)

        rag = nrag.gridRag(ws,
                           numberOfLabels=n_labels,
                           numberOfThreads=self.n_threads)
        if rag.numberOfEdges == 0:
            return np.zeros_like(ws)

        # features and features to costs
        feats = feat_function(rag, input_)
        probs, edge_len = feats[:, 0], feats[:, -1]
        costs = self._probs_to_costs(probs, edge_len, beta)

        # graph and multicut solver
        graph = nifty.graph.undirectedGraph(rag.numberOfNodes)
        graph.insertEdges(rag.uvIds())
        objective = nmc.multicutObjective(graph, costs)
        solver = objective.kernighanLinFactory(
            warmStartGreedy=True).create(objective)

        # solve multicut and project back to segmentation
        # TODO time limit
        node_labels = solver.optimize()
        return nrag.projectScalarNodeDataToPixels(
            rag, node_labels, numberOfThreads=self.n_threads)
Ejemplo n.º 3
0
def agglomerate_wsdt(thresh=.1, size_thresh=500):
    f = z5py.File('/home/papec/Work/neurodata_hdd/scotts_blocks/data_test_small.n5')
    affs = 1. - f['predictions/full_affs'][:3, :]
    affs_xy = np.mean(affs[1:3], axis=0)
    affs_z = affs[0]

    wsdt = cseg.DTWatershed(0.2, 1.6)
    ws, max_id = wsdt(affs_xy)
    rag = nrag.gridRagStacked2D(ws.astype('uint32'),
                                numberOfLabels=int(max_id + 1),
                                dtype='uint32')
    features_z = nrag.accumulateEdgeStandardFeatures(rag, affs_z, keepZOnly=True, zDirection=2)[1]
    features_z = features_z[:, 0]
    edge_offset = rag.totalNumberOfInSliceEdges
    edge_sizes = rag.edgeLengths()[edge_offset:]

    uvs = rag.uvIds()[edge_offset:]
    assert len(features_z) == len(uvs)
    # TODO filter by edge overlap as well !
    merge_edges = np.logical_and(features_z < thresh, edge_sizes > size_thresh)
    merge_nodes = uvs[merge_edges]

    ufd = nifty.ufd.ufd(rag.numberOfNodes)
    ufd.merge(merge_nodes)
    node_labels = ufd.elementLabeling()
    ws_merged = nrag.projectScalarNodeDataToPixels(rag, node_labels)

    raw = f['gray'][:]
    view([raw, affs.transpose((1, 2, 3, 0)), ws, ws_merged],
         ['raw', 'affs', 'ws', 'ws-merged'])
def debug_subresult(block_id=1):
    example_path = '/home/cpape/Work/data/isbi2012/cluster_example/isbi_train.n5'
    block_prefix = os.path.join(example_path, 's0', 'sub_graphs', 'block_')

    graph = ndist.Graph(os.path.join(example_path, 'graph'))
    block_path = block_prefix + str(block_id)
    nodes = ndist.loadNodes(block_path)
    inner_edges, outer_edges, sub_uvs = graph.extractSubgraphFromNodes(nodes)

    block_res_path = './tmp/subproblem_results/s0_block%i.npy' % block_id
    res = np.load(block_res_path)

    merge_edges = np.ones(graph.numberOfEdges, dtype='bool')
    merge_edges[res] = False
    merge_edges[outer_edges] = False

    uv_ids = graph.uvIds()
    n_nodes = int(uv_ids.max()) + 1
    ufd = nifty.ufd.ufd(n_nodes)
    ufd.merge(uv_ids[merge_edges])
    node_labels = ufd.elementLabeling()

    ws = z5py.File(example_path)['volumes/watersheds'][:]
    rag = nrag.gridRag(ws, numberOfLabels=n_nodes)
    seg = nrag.projectScalarNodeDataToPixels(rag, node_labels)
    view([ws, seg])
Ejemplo n.º 5
0
def mc(rag, probs):
    mc = cseg.Multicut('kernighan-lin', weight_edges=False)
    graph = nifty.graph.undirectedGraph(rag.numberOfNodes)
    graph.insertEdges(rag.uvIds())
    costs = mc.probabilities_to_costs(probs)
    node_labels = mc(graph, costs)
    return nrag.projectScalarNodeDataToPixels(rag, node_labels)
Ejemplo n.º 6
0
def segment_lmc(ws, affs, n_labels, offsets, return_merged_nodes=False):
    rag = nrag.gridRag(ws, numberOfLabels=n_labels, numberOfThreads=1)
    lifted_uvs, local_features, lifted_features = nrag.computeFeaturesAndNhFromAffinities(
        rag, affs, offsets, numberOfThreads=1)
    uv_ids = rag.uvIds()

    lmc = cseg.LiftedMulticut('kernighan-lin', weight_edges=False)
    local_costs = lmc.probabilities_to_costs(local_features[:, 0])
    local_ignore = (uv_ids == 0).any(axis=1)
    local_costs[local_ignore] = 5 * local_costs.min()

    # we might not have lifted edges -> just solve multicut
    if len(lifted_uvs) == 1 and (lifted_uvs[0] == -1).any():
        mc = cseg.Multicut('kernighan-lin', weight_edges=False)
        graph = nifty.graph.undirectedGraph(n_labels)
        graph.insertEdges(uv_ids)
        node_labels = mc(graph, local_costs)

    else:
        lifted_costs = lmc.probabilities_to_costs(lifted_features[:, 0])
        lifted_ignore = (lifted_uvs == 0).any(axis=1)
        lifted_costs[lifted_ignore] = 5 * lifted_costs.min()
        node_labels = lmc(uv_ids, lifted_uvs, local_costs, lifted_costs)

    if return_merged_nodes:
        return get_merged_nodes(uv_ids, node_labels)
    else:
        return nrag.projectScalarNodeDataToPixels(rag, node_labels)
Ejemplo n.º 7
0
def compute_mcrf_segments(ws, affs, n_labels, glia, offsets, rf, block_id):
    rag = nrag.gridRag(ws, numberOfLabels=int(n_labels), numberOfThreads=1)
    uv_ids = rag.uvIds()

    # we can have a single over-segment id for (small border) blocks
    # resulting in 0 edges
    if(uv_ids.shape[0] == 0):
        print("WARNING:, block", block_id, "contains only a single id, but is not masked")
        print("This is may be caused by an incorrect mask")
        return ws

    # TODO split features over different affinity ranges ?
    feats = np.concatenate([nrag.accumulateAffinityStandartFeatures(rag,
                                                                    affs,
                                                                    offsets,
                                                                    numberOfThreads=1),
                            glia_features(rag, ws, glia)], axis=1)
    probs = rf.predict_proba(feats)[:, 1]

    mc = cseg.Multicut('kernighan-lin', weight_edges=False)
    costs = mc.probabilities_to_costs(probs)
    ignore_edges = (uv_ids == 0).any(axis=1)
    costs[ignore_edges] = 5 * costs.min()

    graph = nifty.graph.undirectedGraph(n_labels)
    graph.insertEdges(uv_ids)
    node_labels = mc(graph, costs)
    return nrag.projectScalarNodeDataToPixels(rag, node_labels)
Ejemplo n.º 8
0
 def __call__(self, input_):
     fragments = self.oversegmenter(input_)
     rag, probs, node_sizes, edge_sizes = self.extractor(input_, fragments)
     graph = nifty.graph.undirectedGraph(rag.numberOfNodes)
     graph.insertEdges(rag.uvIds())
     costs = self.segmener.probabilities_to_costs(probs, edge_sizes)
     node_labels = self.segmenter(graph, costs, node_sizes, edge_sizes)
     return nrag.projectScalarNodeDataToPixels(rag, node_labels)
Ejemplo n.º 9
0
    def _project_result_to_segmentation(self, rag, mc_nodes, out):
        assert mc_nodes.shape[0] == rag.numberOfNodes
        mc_nodes, _, _ = vigra.analysis.relabelConsecutive(mc_nodes,
                                                           start_label=1,
                                                           keep_zeros=False)
        # if we have an ignore label, set it's node value to zero
        if PipelineParameter().ignoreSegLabel != -1:
            workflow_logger.info(
                "SegmentationWorkflow: Setting node values for ignore seg value: %i to 0."
                % PipelineParameter().ignoreSegLabel)
            mc_nodes[PipelineParameter().ignoreSegLabel] = 0

        if np.dtype(self.dtype) != np.dtype(mc_nodes.dtype):
            self.dtype = mc_nodes.dtype
        nrag.projectScalarNodeDataToPixels(rag, mc_nodes,
                                           out.get(self.saveKey),
                                           PipelineParameter().nThreads)
Ejemplo n.º 10
0
def merge_fully_enclosed(segmentation, n_threads=8):
    rag = nrag.gridRag(segmentation, numberOfLabels=int(relabeled.max() + 1),
                       numberOfThreads=n_threads)
    ufd = nifty.ufd.ufd(rag.numberOfNodes)
    nodes = np.unique(segmentation)
    for node in nodes:
        adjacency = [adj for adj in rag.nodeAdjacency(node)]
        if len(adjacency) == 1:
            ufd.merge(node, adjacency[0][0])
    labeling = ufd.elementLabeling()
    return nrag.projectScalarNodeDataToPixels(rag, labeling)
def debug_reduce_problem():
    example_path = '/home/cpape/Work/data/isbi2012/cluster_example/isbi_train.n5'

    node_labels = z5py.File(example_path)['s1']['node_labeling'][:]
    node_labels = np.concatenate((np.zeros(1, dtype='uint64'), node_labels))
    n_nodes = len(node_labels)

    ws = z5py.File(example_path)['volumes/watersheds'][:]
    rag = nrag.gridRag(ws, numberOfLabels=n_nodes)
    seg = nrag.projectScalarNodeDataToPixels(rag, node_labels)

    view([ws, seg])
Ejemplo n.º 12
0
def export_object(project, name, ws=None, rag=None):

    # TODO check for consistency in ws and rag arguments
    if ws is None:
        ws, rag = load_watershed_and_rag(project)

    with h5py.File(project, 'r') as f:
        fg = f['carving/objects/%s/sv' % name][:].squeeze()
    node_labels = np.zeros(rag.numberOfNodes, dtype='uint32')
    node_labels[fg] = 1
    seg = nrag.projectScalarNodeDataToPixels(rag, node_labels)

    return seg
Ejemplo n.º 13
0
def preagglomerate(affs, ws, n_labels, thresh=.95):
    offsets = [[-1, 0, 0], [0, -1, 0], [0, 0, -1], [-2, 0, 0], [0, -3, 0],
               [0, 0, -3], [-3, 0, 0], [0, -9, 0], [0, 0, -9], [-4, 0, 0],
               [0, -27, 0], [0, 0, -27]]
    rag = nrag.gridRag(ws, numberOfLabels=int(n_labels), numberOfThreads=1)
    probs = nrag.accumulateAffinityStandartFeatures(rag,
                                                    affs,
                                                    offsets,
                                                    numberOfThreads=1)[:, 0]
    agglomerator = cseg.MalaClustering(thresh)
    g = nifty.graph.undirectedGraph(rag.numberOfNodes)
    g.insertEdges(rag.uvIds())
    node_labels = agglomerator(g, probs)
    return nrag.projectScalarNodeDataToPixels(rag, node_labels), int(
        node_labels.max())
Ejemplo n.º 14
0
def project_node_labels_to_pixels(rag, node_labels, n_threads=None):
    """ Project label values for graph nodes back to pixels to obtain segmentation.

    Arguments:
        rag [RegionAdjacencyGraph] - region adjacency graph
        node_labels [np.ndarray] - array with node labels
        n_threads [int] - number of threads used, set to cpu count by default. (default: None)
    """
    n_threads = multiprocessing.cpu_count() if n_threads is None else n_threads
    if len(node_labels) != rag.numberOfNodes:
        raise ValueError("Incompatible number of node labels: %i, %i" %
                         (len(node_labels), rag.numberOfNodes))
    seg = nrag.projectScalarNodeDataToPixels(rag,
                                             node_labels,
                                             numberOfThreads=n_threads)
    return seg
Ejemplo n.º 15
0
def agglomerate_sp(ws_path, prob_path, out_path, threshold):
    probs = vigra.readHDF5(prob_path, 'data')

    ws = vigra.readHDF5(ws_path, 'data')
    n_nodes = int(ws.max()) + 1

    rag = nrag.gridRag(ws, numberOfLabels=n_nodes)
    graph = nifty.graph.undirectedGraph(n_nodes)
    graph.insertEdges(rag.uvIds())

    agglomerator = cseg.MalaClustering(threshold)
    node_labeling = agglomerator(graph, probs)
    vigra.analysis.relabelConsecutive(node_labeling, out=node_labeling)
    seg = nrag.projectScalarNodeDataToPixels(rag, node_labeling)
    view([ws, seg])

    vigra.writeHDF5(seg, out_path, 'data', compression='gzip')
def debug_costs():
    example_path = '/home/cpape/Work/data/isbi2012/cluster_example/isbi_train.n5'

    costs = z5py.File(example_path)['costs'][:]
    edges = z5py.File(example_path)['graph/edges'][:]
    n_nodes = int(edges.max()) + 1
    graph = nifty.graph.undirectedGraph(n_nodes)
    graph.insertEdges(edges)

    assert graph.numberOfEdges == len(costs)
    node_labels = multicut_kernighan_lin(graph, costs)

    ws = z5py.File(example_path)['volumes/watersheds'][:]
    rag = nrag.gridRag(ws, numberOfLabels=n_nodes)
    seg = nrag.projectScalarNodeDataToPixels(rag, node_labels)

    view([ws, seg])
def segment_sample(sample):

    aff_path = '%s' % sample

    print("Load affinities")
    affs = 1. - z5py.File(aff_path)['predictions/full_affs'][:]
    # affs = 1. - vigra.readHDF5('./sampleB+_affs_cut.h5', 'data')
    print("done")

    # TODO multi-threaded
    print("making oversegmentation")
    seg = make_oversegmentation(affs, 8)
    print("done")

    # for z in range(seg.shape[0]):
    #     print(seg[z].min(), seg[z].max(), seg[z].max() - seg[z].min())
    # quit()

    print("computing features")
    rag, lr_uvs, local_prob, lr_prob = compute_features(seg, affs)
    print("done")
    assert rag.numberOfEdges == len(local_prob)
    assert len(lr_uvs) == len(lr_prob)

    uvs = rag.uvIds()
    n_nodes = rag.numberOfNodes
    assert lr_uvs.max() + 1 == n_nodes

    print("compute mutex clustering")
    # TODO do I need to invert the lr weights ?!
    lr_prob = 1. - lr_prob
    t0 = time.time()
    node_labeling = nmws.computeMwsClustering(n_nodes, uvs.astype('uint32'),
                                              lr_uvs.astype('uint32'),
                                              local_prob, lr_prob)
    assert len(node_labeling) == n_nodes
    print("done in", time.time() - t0, "s")

    # get segmentation
    mws_seg = nrag.projectScalarNodeDataToPixels(rag, node_labeling)
    out_path = '' % sample
    vigra.writeHDF5(mws_seg,
                    out_path,
                    'volumes/labels/neuron_ids',
                    compression='gzip')
Ejemplo n.º 18
0
def check(segment_id, bb):
    path = '/g/arendt/pape/proofreading_fib/data.n5'
    key_raw = 'volumes/raw/s1'
    key_merged = 'volumes/merged/v1'
    key_frag = 'volumes/segmentation/data/s0'

    f = z5py.File(path)

    ds = f[key_frag]
    ds.n_threads = 8
    frag = ds[bb]

    with open('./res.pkl', 'rb') as fres:
        res = pickle.load(fres)

    fragment_ids = res['ids']
    assignments = res['assignments']

    n_nodes = int(frag.max()) + 1
    node_labels = np.zeros(n_nodes, dtype='uint64')
    node_labels[fragment_ids] = assignments

    print("Compute rag ...")
    rag = nrag.gridRag(frag, numberOfLabels=n_nodes, numberOfThreads=8)
    print("done")
    new_seg = nrag.projectScalarNodeDataToPixels(rag, node_labels)

    seed_fragments = [3523531, 3980164]
    mask_seeds = (frag == seed_fragments[0]).astype('uint32')
    mask_seeds[frag == seed_fragments[1]] = 2

    ds = f[key_merged]
    ds.n_threads = 8
    seg = ds[bb]

    mask_seg = (seg == segment_id).astype('uint32')

    ds = f[key_raw]
    ds.n_threads = 8
    raw = ds[bb].astype('float32')

    view([raw, frag, seg, mask_seeds, mask_seg, new_seg],
         ['raw', 'fragments', 'segments', 'seed-mask', 'seg-mask', 'curated'])
Ejemplo n.º 19
0
def mc_from_costs(sample, out_key=None):
    path = '/home/papec/Work/neurodata_hdd/ntwrk_papec/cremi_warped/sampleA+.n5'
    # path = '/home/papec/Work/neurodata_hdd/ntwrk_papec/cluster_test_data/testdata1.n5'
    ws_key = 'segmentations/watershed'
    # data_key = 'predictions/full_affs'
    raw_key = 'raw'

    # first we calculate the graph and features
    ws = z5py.File(path)[ws_key][:].astype('uint32')
    rag = nrag.gridRag(ws, numberOfLabels=int(ws.max()) + 1)

    feature_path = '/home/papec/Work/neurodata_hdd/ntwrk_papec/cache/cremi_A+/tmp_files/features.n5'
    # feature_path = '/home/papec/Work/neurodata_hdd/ntwrk_papec/cluster_test_data/aff_features.n5'
    probs = 1. - z5py.File(feature_path)['features'][:, 0:1]
    probs = probs.squeeze()
    assert rag.numberOfEdges == len(probs), "%i, %i" % (rag.numberOfEdges,
                                                        len(probs))

    costs = cseg.transform_probabilities_to_costs(probs, edge_sizes=None)
    uv_ids = rag.uvIds()
    ignore_edges = (uv_ids == 0).any(axis=1)
    costs[ignore_edges] = 5 * costs.min()

    # finally, we run multicut
    cutter = cseg.Multicut("kernighan-lin")

    graph = nifty.graph.undirectedGraph(rag.numberOfNodes)
    graph.insertEdges(uv_ids)
    node_labels = cutter(graph, costs)
    segmentation = nrag.projectScalarNodeDataToPixels(rag, node_labels)

    if out_key is not None:
        f = z5py.File('./mc_%s.n5' % sample, use_zarr_format=False)
        ds = f.create_dataset(out_key,
                              dtype='uint32',
                              compression='gzip',
                              shape=segmentation.shape,
                              chunks=(64, 64, 64))
        ds[:] = segmentation.astype('uint32')
    else:
        raw = z5py.File(path)[raw_key][:]
        view([raw, ws, segmentation])
Ejemplo n.º 20
0
def segment_mcrf(ws, affs, n_labels, offsets, rf, return_merged_nodes=False):
    rag = nrag.gridRag(ws, numberOfLabels=int(n_labels), numberOfThreads=1)
    feats = nrag.accumulateAffinityStandartFeatures(rag,
                                                    affs,
                                                    offsets,
                                                    numberOfThreads=1)
    probs = rf.predict_proba(feats)[:, 1]
    uv_ids = rag.uvIds()

    mc = cseg.Multicut('kernighan-lin', weight_edges=False)
    costs = mc.probabilities_to_costs(probs)
    ignore_edges = (uv_ids == 0).any(axis=1)
    costs[ignore_edges] = 5 * costs.min()

    graph = nifty.graph.undirectedGraph(n_labels)
    graph.insertEdges(uv_ids)
    node_labels = mc(graph, costs)
    if return_merged_nodes:
        return get_merged_nodes(uv_ids, node_labels)
    else:
        return nrag.projectScalarNodeDataToPixels(rag, node_labels)
Ejemplo n.º 21
0
def agglomerate_sp_eval(ws_path, gt_path, prob_path):

    probs = vigra.readHDF5(prob_path, 'data')

    ws = vigra.readHDF5(ws_path, 'data')
    n_nodes = int(ws.max()) + 1

    rag = nrag.gridRag(ws, numberOfLabels=n_nodes)
    # _, node_sizes = np.unique(ws, return_counts=True)
    # edge_sizes = nrag.accumulateEdgeMeanAndLength(rag, np.zeros(rag.shape, dtype='float32'))[:, 1]
    graph = nifty.graph.undirectedGraph(n_nodes)
    graph.insertEdges(rag.uvIds())

    gt = Volume(vigra.readHDF5(gt_path, 'data'))

    # node_factor = [0.9, 0.8, 0.7, 0.6, 0.5, 0.4, 0.3, 0.2, 0.1][::-1]
    node_factor = [.025, .05, .075, .1, .15, .2, .25, .4, .5]

    for nf in node_factor:
        # FIXME agglomerative clustering segfaults
        # n_target_nodes = int(nf * n_nodes)
        # agglomerator = cseg.AgglomerativeClustering(n_target_nodes)
        # node_labeling = agglomerator(graph, probs, edge_sizes=edge_sizes, node_sizes=node_sizes)

        agglomerator = cseg.MalaClustering(nf)
        node_labeling = agglomerator(graph, probs)
        vigra.analysis.relabelConsecutive(node_labeling, out=node_labeling)

        seg = nrag.projectScalarNodeDataToPixels(rag, node_labeling)
        seg = Volume(seg)
        metrics = NeuronIds(gt)
        vi_s, vi_m = metrics.voi(seg)
        are = metrics.adapted_rand(seg)
        print("Evaluation for reduction", nf)
        print("Voi - Split ", vi_s)
        print("Voi - Merge ", vi_m)
        print("Adapted Rand", are)
        print("N-Nodes:", int(node_labeling.max() + 1), '/', n_nodes)
Ejemplo n.º 22
0
def gt_projection(block_id):
    ws_path = '/home/papec/Work/neurodata_hdd/fib25/watersheds/watershed_block%i.h5' % block_id
    ws = vigra.readHDF5(ws_path, 'data')
    ws = vigra.analysis.labelVolume(ws.astype('uint32'))
    gt = vigra.readHDF5('/home/papec/Work/neurodata_hdd/fib25/gt/gt_block%i.h5' % block_id,
                        'data')

    rag = nrag.gridRag(ws, numberOfLabels=int(ws.max()) + 1)
    labeling = nrag.gridRagAccumulateLabels(rag, gt)

    projected = Volume(nrag.projectScalarNodeDataToPixels(rag, labeling))

    metrics = NeuronIds(Volume(gt))
    vi_s, vi_m = metrics.voi(projected)
    are = metrics.adapted_rand(projected)

    print(vi_s)
    print(vi_m)
    print(are)
    print()

    os.remove(ws_path)
    vigra.writeHDF5(ws, ws_path, 'data', compression='gzip')
Ejemplo n.º 23
0
def _agglomerate_block(blocking, block_id, ds_in, ds_out, config):
    fu.log("start processing block %i" % block_id)
    have_ignore_label = config['have_ignore_label']
    use_mala_agglomeration = config.get('use_mala_agglomeration', True)
    threshold = config.get('threshold', 0.9)
    size_regularizer = config.get('size_regularizer', .5)
    invert_inputs = config.get('invert_inputs', False)
    offsets = config.get('offsets', None)

    bb = vu.block_to_bb(blocking.getBlock(block_id))
    # load the segmentation / output
    seg = ds_out[bb]

    # check if this block is empty
    if np.sum(seg) == 0:
        fu.log_block_success(block_id)
        return

    # load the input data
    ndim_in = ds_in.ndim
    if ndim_in == 4:
        assert offsets is not None
        assert len(offsets) <= ds_in.shape[0]
        bb_in = (slice(0, len(offsets)),) + bb
        input_ = vu.normalize(ds_in[bb_in])
    else:
        assert offsets is None
        input_ = vu.normalize(ds_in[bb])

    if invert_inputs:
        input_ = 1. - input_

    id_offset = int(seg[seg != 0].min())

    # relabel the segmentation
    _, max_id, _ = relabelConsecutive(seg, out=seg, keep_zeros=True, start_label=1)
    seg = seg.astype('uint32')

    # construct rag
    rag = nrag.gridRag(seg, numberOfLabels=max_id + 1,
                       numberOfThreads=1)

    # extract edge features
    if offsets is None:
        edge_features = nrag.accumulateEdgeMeanAndLength(rag, input_, numberOfThreads=1)
    else:
        edge_features = nrag.accumulateAffinityStandartFeatures(rag, input_, offsets,
                                                                numberOfThreads=1)
    edge_features, edge_sizes = edge_features[:, 0], edge_features[:, -1]
    uv_ids = rag.uvIds()
    # set edges to ignore label to be maximally repulsive
    if have_ignore_label:
        ignore_mask = (uv_ids == 0).any(axis=1)
        edge_features[ignore_mask] = 1

    # build undirected graph
    n_nodes = rag.numberOfNodes
    graph = nifty.graph.undirectedGraph(n_nodes)
    graph.insertEdges(uv_ids)

    if use_mala_agglomeration:
        node_labels = mala_clustering(graph, edge_features,
                                      edge_sizes, threshold)
    else:
        node_ids, node_sizes = np.unique(seg, return_counts=True)
        if node_ids[0] != 0:
            node_sizes = np.concatenate([np.array([0]), node_sizes])
        n_stop = int(threshold * n_nodes)
        node_labels = agglomerative_clustering(graph, edge_features,
                                               node_sizes, edge_sizes,
                                               n_stop, size_regularizer)

    # run clusteting
    node_labels, max_id, _ = relabelConsecutive(node_labels, start_label=1, keep_zeros=True)

    fu.log("reduced number of labels from %i to %i" % (n_nodes, max_id + 1))

    # project node labels back to segmentation
    seg = nrag.projectScalarNodeDataToPixels(rag, node_labels, numberOfThreads=1)
    seg = seg.astype('uint64')
    # add offset back to segmentation
    seg[seg != 0] += id_offset

    ds_out[bb] = seg
    # log block success
    fu.log_block_success(block_id)
Ejemplo n.º 24
0
def merge_along_skeletons(block_id, in_key, out_key, n_threads):
    path = '/nrs/saalfeld/lauritzen/0%i/workspace.n5' % block_id
    key1 = '/'.join(('filtered', 'segmentations', in_key))
    label_file = os.path.join(path, key1)

    # find false splits according to skeletons and the nodes that have to
    # be merged to fix it
    skeleton_file = os.path.join(path, 'skeletons')
    metrics = build_skeleton_metrics(label_file, skeleton_file, n_threads)
    skeleton_merges = metrics.mergeFalseSplitNodes(n_threads)

    n_labels = z5py.File(label_file).attrs['maxId'] + 1

    # get new node labeling with ufd
    ufd = nifty.ufd.ufd(n_labels)
    for _, merge_nodes in skeleton_merges.items():
        merge_nodes = np.array([mn for mn in merge_nodes])
        ufd.merge(merge_nodes)
    node_labels = ufd.elementLabeling()
    # TODO make sure 0 is mapped to zero
    vigra.analysis.relabelConsecutive(node_labels,
                                      out=node_labels,
                                      keep_zeros=True,
                                      start_label=1)

    labels = nz5.datasetWrapper('uint64', label_file)
    block_shape = [25, 256, 256]
    rag_file = './rag.npy'
    if not os.path.exists(rag_file):
        print("Computing RAG...")
        rag = nrag.gridRagZ5(labels,
                             numberOfLabels=int(n_labels),
                             numberOfThreads=n_threads,
                             dtype='uint64',
                             blockShape=block_shape)
        np.save(rag_file, rag.serialize())
        print("... done")
    else:
        ragser = np.load(rag_file)
        rag = nrag.gridRagZ5(labels,
                             numberOfLabels=int(n_labels),
                             serialization=ragser,
                             dtype='uint64')

    f_out = z5py.File(path)
    key2 = '/'.join(('filtered', 'segmentations', out_key))
    if key2 not in f_out:
        f_out.create_dataset(key2,
                             dtype='uint64',
                             compression='gzip',
                             shape=z5py.File(path)[key1].shape,
                             chunks=z5py.File(path)[key1].chunks)

    out_file = os.path.join(path, key2)
    out = nz5.datasetWrapper('uint64', out_file)

    print("Projecting to pixels...")
    nrag.projectScalarNodeDataToPixels(graph=rag,
                                       nodeData=node_labels,
                                       pixelData=out,
                                       blockShape=block_shape,
                                       numberOfThreads=n_threads)
    print("... done")
    z5py.File(path)[key2].attrs['maxId'] = n_labels - 1
Ejemplo n.º 25
0
                    # Agglomeration:
                    print("Agglomerating...")
                    c_graph = nifty.graph.edgeContractionGraph(
                        rag, nifty.graph.EdgeContractionGraphCallback())
                    uv_ids_to_merge = uv_ids[edges_to_be_merged, :]
                    for u, v in uv_ids_to_merge:
                        repr_u = c_graph.findRepresentativeNode(u)
                        repr_v = c_graph.findRepresentativeNode(v)
                        if repr_u != repr_v:
                            repr_edge = c_graph.findRepresentativeEdge(
                                rag.findEdge(u, v))
                            c_graph.contractEdge(repr_edge)
                    node_labels = np.array([
                        c_graph.findRepresentativeNode(u) for u in rag.nodes()
                    ])
                    segm = nrag.projectScalarNodeDataToPixels(rag, node_labels)
                else:
                    postfix = "_relabeled"

                assert gt_segm.shape == segm.shape

                # Find connected components
                gt_ignore_mask = gt_segm == 0
                segm += 1
                segm[gt_ignore_mask] = 0
                segm = vigra.analysis.labelVolumeWithBackground(
                    segm.astype('uint32'))

                if COMPUTE_NEW_SCORES:

                    from segmfriends.utils.various import cremi_score
Ejemplo n.º 26
0
def lmc(rag, lifted_uvs, local_features, lifted_features):
    lmc = cseg.LiftedMulticut('kernighan-lin', weight_edges=False)
    local_costs = lmc.probabilities_to_costs(local_features)
    lifted_costs = lmc.probabilities_to_costs(lifted_features)
    node_labels = lmc(rag.uvIds(), lifted_uvs, local_costs, lifted_costs)
    return nrag.projectScalarNodeDataToPixels(rag, node_labels)
Ejemplo n.º 27
0
def mws_clustering(rag, lifted_uvs, local_features, lifted_features):
    import nifty.mws as nmws
    node_labels = nmws.computeMwsClustering(rag.numberOfNodes, rag.uvIds(), lifted_uvs,
                                            local_features[:, 0], 1. - lifted_features[:, 0])
    return nrag.projectScalarNodeDataToPixels(rag, node_labels)