Example #1
0
def get_rag(segmentation, nb_threads):
    """
    If the segmentation has values equal to -1, those are interpreted as background pixels.

    When this rag is build, the node IDs will be taken from segmentation and the background_node will have ID
    previous_max_label+1

    In `build_lifted_graph_from_rag`, the background node and all the edges connecting to it are ignored while creating
    the new (possibly lifted) undirected graph.
    """

    # Check if the segmentation has a background label that should be ignored in the graph:
    min_label = segmentation.min()
    if min_label >= 0:
        out_dict = {'has_background_label': False}
        return nrag.gridRag(segmentation.astype(np.uint32),
                            numberOfThreads=nb_threads), out_dict
    else:
        assert min_label == -1, "The only accepted background label is -1"
        max_valid_label = segmentation.max()
        assert max_valid_label >= 0, "A label image with only background label was passed!"
        mod_segmentation = segmentation.copy()
        background_mask = segmentation == min_label
        mod_segmentation[background_mask] = max_valid_label + 1

        # Build rag including background:
        out_dict = {
            'has_background_label': True,
            'updated_segmentation': mod_segmentation,
            'background_label': max_valid_label + 1
        }
        return nrag.gridRag(mod_segmentation.astype(np.uint32),
                            numberOfThreads=nb_threads), out_dict
Example #2
0
def get_rag(segmentation, nb_threads):
    # Check if the segmentation has a background label that should be ignored in the graph:
    min_label = segmentation.min()
    if min_label >=0:
        return  nrag.gridRag(segmentation.astype(np.uint32), numberOfThreads=nb_threads), False
    else:
        assert min_label == -1, "The only accepted background label is -1"
        max_valid_label = segmentation.max()
        assert max_valid_label >= 0, "A label image with only background label was passed!"
        mod_segmentation = segmentation.copy()
        background_mask = segmentation == min_label
        mod_segmentation[background_mask] = max_valid_label + 1

        # Build rag including background:
        return nrag.gridRag(mod_segmentation.astype(np.uint32), numberOfThreads=nb_threads), True
Example #3
0
def visualize_probabilities_for_subvolume(sub_ws,
                                          probs,
                                          uv_ids,
                                          edge_direction=2):
    rag = nrag.gridRag(sub_ws, numberOfLabels=int(sub_ws.max()) + 1)

    edge_builder = nrag.ragCoordinates(rag)
    edge_map_att = np.zeros(rag.numberOfEdges, dtype='uint32')
    edge_map_rep = np.zeros(rag.numberOfEdges, dtype='uint32')

    rag_uvs = rag.uvIds()
    indices = find_matching_row_indices(rag_uvs, uv_ids)[:, 0]

    sub_probs = probs[indices]

    # build attractive edges
    edge_map_att[sub_probs <= .1] = 3
    edge_map_att[np.logical_and(sub_probs > .1, sub_probs <= .3)] = 2
    edge_map_att[np.logical_and(sub_probs > .3, sub_probs <= .5)] = 1
    edge_vol_att = edge_builder.edgesToVolume(edge_map_att,
                                              edgeDirection=edge_direction)

    # build repulsive edges
    edge_map_rep[np.logical_and(sub_probs > .5, sub_probs <= .7)] = 1
    edge_map_rep[np.logical_and(sub_probs > .7, sub_probs <= .9)] = 2
    edge_map_rep[sub_probs > .9] = 3
    edge_vol_rep = edge_builder.edgesToVolume(edge_map_rep,
                                              edgeDirection=edge_direction)

    # build edge ids
    edge_ids = np.arange(rag.numberOfEdges).astype('uint32')
    edge_id_vol = edge_builder.edgesToVolume(edge_ids,
                                             edgeDirection=edge_direction)
    return edge_id_vol, edge_vol_att, edge_vol_rep
def merge_label(segmentation, merge_id, n_threads=8):
    """
    Merge all instances of a given label id into the surrounding labels.
    """
    merge_map = segmentation == merge_id
    relabeled = vigra.analysis.labelMultiArrayWithBackground(segmentation)
    merge_ids = np.unique(relabeled[merge_map])

    n_labels = int(relabeled.max() + 1)
    rag = nrag.gridRag(relabeled, numberOfLabels=n_labels,
                       numberOfThreads=n_threads)
    fake = np.zeros(rag.shape, dtype='float32')
    edge_sizes = nrag.accumulateEdgeMeanAndLength(rag, fake)[:, 1]

    for merge in merge_ids:
        adjacency = [adj for adj in rag.nodeAdjacency(merge)]
        if len(adjacency) == 1:
            node = adjacency[0][0]
        else:
            node = 0
            size = 0
            for adj in adjacency:
                curr_node, edge = adj
                if edge_sizes[edge] > size and curr_node != 0:
                    node = curr_node
                    size = edge_sizes[edge]
        relabeled[relabeled == merge] = node
    relabeled = vigra.analysis.labelMultiArrayWithBackground(relabeled)
    return relabeled
Example #5
0
def learn_rf():
    import cremi_tools.segmentation as cseg
    raw_path = '/home/papec/Work/neurodata_hdd/fib25/traintest/raw_train_normalized.h5'
    pmap_path = '/home/papec/Work/neurodata_hdd/fib25/traintest/probabilities_train.h5'
    assert os.path.exists(pmap_path), pmap_path
    ws_path = '/home/papec/Work/neurodata_hdd/fib25/traintest/overseg_train.h5'
    assert os.path.exists(ws_path), ws_path

    # load pmap and watersheds
    raw = vigra.readHDF5(raw_path, 'data').astype('float32')
    pmap = vigra.readHDF5(pmap_path, 'data')
    ws = vigra.readHDF5(ws_path, 'data').astype('uint64')
    assert ws.shape == pmap.shape

    # feature extractor and multicut
    rag = nrag.gridRag(ws, numberOfLabels=int(ws.max() + 1))
    # feature extractor and multicut
    feature_extractor = cseg.FeatureExtractor(True)
    features = feature_extractor(rag, pmap, ws, raw)

    gt_path = '/home/papec/Work/neurodata_hdd/fib25/traintest/gt_train.h5'
    gt = vigra.readHDF5(gt_path, 'data')
    node_labels = nrag.gridRagAccumulateLabels(rag, gt)
    uv_ids = rag.uvIds()
    labels = node_labels[uv_ids[:, 0]] != node_labels[uv_ids[:, 1]]
    assert len(labels) == len(features), "%i, %i" % (len(labels), len(features))

    print("learning rf from features", features.shape)
    rf = RandomForestClassifier(n_jobs=40, n_estimators=500)
    rf.fit(features, labels)
    with open('./rf.pkl', 'wb') as f:
        pickle.dump(rf, f)
Example #6
0
def compute_mcrf_segments(ws, affs, n_labels, glia, offsets, rf, block_id):
    rag = nrag.gridRag(ws, numberOfLabels=int(n_labels), numberOfThreads=1)
    uv_ids = rag.uvIds()

    # we can have a single over-segment id for (small border) blocks
    # resulting in 0 edges
    if(uv_ids.shape[0] == 0):
        print("WARNING:, block", block_id, "contains only a single id, but is not masked")
        print("This is may be caused by an incorrect mask")
        return ws

    # TODO split features over different affinity ranges ?
    feats = np.concatenate([nrag.accumulateAffinityStandartFeatures(rag,
                                                                    affs,
                                                                    offsets,
                                                                    numberOfThreads=1),
                            glia_features(rag, ws, glia)], axis=1)
    probs = rf.predict_proba(feats)[:, 1]

    mc = cseg.Multicut('kernighan-lin', weight_edges=False)
    costs = mc.probabilities_to_costs(probs)
    ignore_edges = (uv_ids == 0).any(axis=1)
    costs[ignore_edges] = 5 * costs.min()

    graph = nifty.graph.undirectedGraph(n_labels)
    graph.insertEdges(uv_ids)
    node_labels = mc(graph, costs)
    return nrag.projectScalarNodeDataToPixels(rag, node_labels)
Example #7
0
def compute_mc_learned(ws, affs, offsets, n_labels, weight_mulitcut_edges,
                       weighting_exponent, rf):
    assert len(rf) == 2
    # compute the region adjacency graph
    rag = nrag.gridRag(ws, numberOfLabels=n_labels, numberOfThreads=1)
    uv_ids = rag.uvIds()
    if uv_ids.size == 0:
        return None, None, None

    # TODO add glia features ?
    rf_xy, rf_z = rf
    features, sizes, z_edges = feat.edge_features(rag, ws, n_labels, uv_ids,
                                                  affs[:3])

    probs = np.zeros(len(features))
    xy_edges = np.logical_not(z_edges)
    if np.sum(xy_edges) > 0:
        probs[xy_edges] = rf_xy.predict_proba(features[xy_edges])[:, 1]
    if np.sum(z_edges) > 0:
        probs[z_edges] = rf_z.predict_proba(features[z_edges])[:, 1]

    graph = nifty.graph.undirectedGraph(n_labels)
    graph.insertEdges(uv_ids)
    # compute multicut edge results
    _, merge_indicator = run_mc(
        graph,
        probs,
        uv_ids,
        with_ignore_edges=True,
        edge_sizes=sizes if weight_mulitcut_edges else None,
        weighting_exponent=weighting_exponent)

    return uv_ids, merge_indicator, sizes
Example #8
0
def compute_state(affs, seg, offsets, n_attractive):

    # with affogato TODO debug this
    # FIXME the uv ids don't make sense!
    # grid_graph = compute_grid_graph(segmentation.shape)
    # uvs, weights, attractive = grid_graph.compute_state_for_segmentation(affs, segmentation, offsets,
    #                                                                      n_attractive_channels=3,
    #                                                                      ignore_label=False)
    # weights[np.logical_not(attractive)] *= -1
    # state = (uvs, weights)

    # with nifty
    rag = nrag.gridRag(seg, numberOfLabels=int(seg.max() + 1),
                       numberOfThreads=1)
    uv_ids = rag.uvIds()

    affs_attractive = affs[:n_attractive]
    # -2 corresponds to max value
    weights_attractive = nrag.accumulateAffinityStandartFeatures(rag, affs_attractive, offsets,
                                                                 numberOfThreads=1)[:, -2]

    affs_repulsive = np.require(affs[n_attractive:], requirements='C')
    weights_repulsive = nrag.accumulateAffinityStandartFeatures(rag, affs_repulsive, offsets,
                                                                numberOfThreads=1)[:, -2]

    weights = weights_attractive
    repulsive = weights_repulsive > weights_attractive
    weights[repulsive] = -1*weights_repulsive[repulsive]
    return uv_ids, weights
def segment_lmc(ws, affs, n_labels, offsets, return_merged_nodes=False):
    rag = nrag.gridRag(ws, numberOfLabels=n_labels, numberOfThreads=1)
    lifted_uvs, local_features, lifted_features = nrag.computeFeaturesAndNhFromAffinities(
        rag, affs, offsets, numberOfThreads=1)
    uv_ids = rag.uvIds()

    lmc = cseg.LiftedMulticut('kernighan-lin', weight_edges=False)
    local_costs = lmc.probabilities_to_costs(local_features[:, 0])
    local_ignore = (uv_ids == 0).any(axis=1)
    local_costs[local_ignore] = 5 * local_costs.min()

    # we might not have lifted edges -> just solve multicut
    if len(lifted_uvs) == 1 and (lifted_uvs[0] == -1).any():
        mc = cseg.Multicut('kernighan-lin', weight_edges=False)
        graph = nifty.graph.undirectedGraph(n_labels)
        graph.insertEdges(uv_ids)
        node_labels = mc(graph, local_costs)

    else:
        lifted_costs = lmc.probabilities_to_costs(lifted_features[:, 0])
        lifted_ignore = (lifted_uvs == 0).any(axis=1)
        lifted_costs[lifted_ignore] = 5 * lifted_costs.min()
        node_labels = lmc(uv_ids, lifted_uvs, local_costs, lifted_costs)

    if return_merged_nodes:
        return get_merged_nodes(uv_ids, node_labels)
    else:
        return nrag.projectScalarNodeDataToPixels(rag, node_labels)
Example #10
0
def _mc_impl(ws,
             affs,
             offsets,
             weight_mulitcut_edges=False,
             weighting_exponent=1):

    ws = ws.astype('uint32')
    n_labels = int(ws.max()) + 1
    rag = nrag.gridRag(ws, numberOfLabels=n_labels, numberOfThreads=8)
    uv_ids = rag.uvIds()

    # compute the features and get edge probabilities (from mean affinities)
    # and edge sizes
    features = nrag.accumulateAffinityStandartFeatures(rag,
                                                       affs,
                                                       offsets,
                                                       numberOfThreads=8)
    probs = features[:, 0]
    sizes = features[:, -1]

    graph = nifty.graph.undirectedGraph(n_labels)
    graph.insertEdges(uv_ids)
    # compute multicut edge results
    node_labels, _ = multicut.run_mc(
        graph,
        probs,
        uv_ids,
        with_ignore_edges=True,
        edge_sizes=sizes if weight_mulitcut_edges else None,
        weighting_exponent=weighting_exponent)
    return node_labels
Example #11
0
def compute_mc(ws, affs, offsets, n_labels, weight_mulitcut_edges,
               weighting_exponent):
    # compute the region adjacency graph
    rag = nrag.gridRag(ws, numberOfLabels=n_labels, numberOfThreads=1)
    uv_ids = rag.uvIds()
    if uv_ids.size == 0:
        return None, None, None

    # compute the features and get edge probabilities (from mean affinities)
    # and edge sizes
    features = nrag.accumulateAffinityStandartFeatures(rag,
                                                       affs,
                                                       offsets,
                                                       numberOfThreads=1)
    probs = features[:, 0]
    sizes = features[:, -1]

    graph = nifty.graph.undirectedGraph(n_labels)
    graph.insertEdges(uv_ids)
    # compute multicut edge results
    _, merge_indicator = run_mc(
        graph,
        probs,
        uv_ids,
        with_ignore_edges=True,
        edge_sizes=sizes if weight_mulitcut_edges else None,
        weighting_exponent=weighting_exponent)

    return uv_ids, merge_indicator, sizes
Example #12
0
def segment_block(block_id, weight_edges=False, cached=False):
    import cremi_tools.segmentation as cseg
    raw_path = '/home/papec/Work/neurodata_hdd/fib25/raw/raw_block%i.h5' % block_id
    pmap_path = '/home/papec/Work/neurodata_hdd/fib25/pmaps/probs_squeezed_block%i.h5' % block_id
    ws_path = '/home/papec/Work/neurodata_hdd/fib25/watersheds/watershed_agglomerated_0.075000_block%i.h5' % block_id

    # load pmap and watersheds
    raw = vigra.readHDF5(raw_path, 'data').astype('float32')
    pmap = vigra.readHDF5(pmap_path, 'data')
    ws = vigra.readHDF5(ws_path, 'data')

    if cached:
        edge_probs = vigra.readHDF5('edge_probs_%i.h5' % block_id, 'data')
        rag = nrag.gridRag(ws, numberOfLabels=int(ws.max()) + 1)
        # TODO edge sizes
    else:
        # feature extractor and multicut
        feature_extractor = cseg.RandomForestFeatures('./rf.pkl', True)
        # make graph and costs
        rag, edge_probs, _, edge_sizes = feature_extractor(pmap, ws, raw=raw)
        vigra.writeHDF5(edge_probs, 'edge_probs_%i.h5' % block_id, 'data')
    graph = nifty.graph.undirectedGraph(rag.numberOfNodes)
    graph.insertEdges(rag.uvIds())

    mc = cseg.Multicut('kernighan-lin', weight_edges=weight_edges)
    if weight_edges:
        costs = mc.probabilities_to_costs(edge_probs, edge_sizes)
    else:
        costs = mc.probabilities_to_costs(edge_probs)
    node_labels = mc(graph, costs)
    return nrag.projectScalarNodeDataToPixels(rag, node_labels)
Example #13
0
def load_watershed_and_rag(project):
    with h5py.File(project, 'r') as f:
        # load the watershed; we need to transpose due to axis-tags
        ws = f['preprocessing/graph/labels'][:].T
    n_labels = int(ws.max() + 1)
    rag = nrag.gridRag(ws, numberOfLabels=n_labels)
    return ws, rag
Example #14
0
def get_confidence_scores(instance_labels,
                          affinities,
                          offsets,
                          size_thresh=256,
                          minimum_score=0.4):
    assert instance_labels.ndim == 3 and affinities.ndim == 4 and offsets.shape[
        1] == 3, "Expect 3D data here"
    rag = nrag.gridRag(instance_labels.astype('uint32'))

    confidence_scores, sizes, max_aff = nrag.accumulateAffinitiesMeanAndLengthOnNodes(
        rag,
        instance_labels.astype('int'),
        np.rollaxis(affinities, axis=0, start=4),
        offsets,
        np.ones(offsets.shape[0], dtype='float32'),
        numberOfThreads=1)

    # Set background confidence to zero:
    confidence_scores[0] = 0.
    assert confidence_scores.shape[0] == instance_labels.max() + 1
    if sizes.shape[0] > 1:
        if not all(sizes[1:] > 20):
            print(sizes)
            assert all(sizes[1:] > 20)

        # # Get rid of tiny instances:
        # node_sizes = nrag.accumulateMeanAndLength(rag, instance_labels*0.)[1][:, 1]
        # node_sizes[0] = 10000 # mod for ignoring background
        # size_mask = node_sizes < size_thresh
        # score_mask = confidence_scores[1:] > minimum_score
        # # Find instances to keep: (big instances or
        # np.argwhere(size_mask)
        # confidence_scores = np.delete(confidence_scores, size_mask)

    return confidence_scores
Example #15
0
def lifted_graph(image, seg, lifted_ids, out_path, lifted_weights=None, edge_weights=None,
                 exclude_nodes=[], exclude_edges=[], edge_threshold=.5, node_style=get_node_style(),
                 edge_style=get_edge_style(), lifted_edge_style=get_lifted_edge_style()):
    if image.ndim == 3:
        assert image.shape[:-1] == seg.shape, "%s, %s" % (str(image.shape), str(seg.shape))
    else:
        assert image.shape == seg.shape, "%s, %s" % (str(image.shape), str(seg.shape))
    shape = seg.shape
    assert shape[0] == shape[1], "Only works for square shapes"
    rag = nrag.gridRag(seg, numberOfLabels=int(seg.max()) + 1)

    # generate the nodes
    node_str = get_nodes(seg, exclude_nodes,
                         style_to_string(node_style))

    # generate the edges
    edge_str = get_edges(rag, edge_weights, exclude_nodes, exclude_edges, edge_threshold,
                         style_to_string(edge_style, return_color=True))

    # generate the lifted edges
    lifted_edge_str = get_lifted_edges(lifted_ids, lifted_weights, edge_threshold,
                                       style_to_string(lifted_edge_style, return_color=True))

    # write the tex files
    os.makedirs('./tmp_tex', exist_ok=True)
    with open("tmp_tex/nodes.tex", "w") as text_file:
        text_file.write(node_str)
    with open("tmp_tex/edges.tex", "w") as text_file:
        text_file.write(edge_str)
    with open("tmp_tex/lifted_edges.tex", "w") as text_file:
        text_file.write(lifted_edge_str)

    # save the image
    vigra.impex.writeImage(image, 'tmp_tex/im.png')
    compile_tikz('lifted_graph', out_path)
Example #16
0
 def __call__(self, input_, fragments, **kwargs):
     self.rag = nrag.gridRag(fragments,
                             numberOfLabels=int(fragments.max() + 1))
     probs = self._compute_edge_probabilities(input_, fragments, **kwargs)
     node_sizes, edge_sizes = self.compute_node_and_edge_sizes(
         fragments, self.rag)
     return self.rag, probs, node_sizes, edge_sizes
def edge_costs_block(block_id):
    block_path = '/home/papec/mnt/papec/Work/neurodata_hdd/cache/cremi_A+/tmp_files/features.n5/blocks'
    graph_path = '/home/papec/mnt/papec/Work/neurodata_hdd/cache/cremi_A+/tmp_files/graph.n5/sub_graphs/s0'

    graph_ds = z5py.File(graph_path)['block_%i' % block_id]
    roi_begin = graph_ds.attrs['roiBegin']
    roi_end = graph_ds.attrs['roiEnd']
    probs = 1. - z5py.File(block_path)['block_%i' % block_id][:, 0:1]

    path = '/home/papec/Work/neurodata_hdd/ntwrk_papec/cremi_warped/sampleA+.n5'
    ws_key = 'segmentations/watershed'
    raw_key = 'raw'

    bb = tuple(slice(rb, re) for rb, re in zip(roi_begin, roi_end))
    ws = z5py.File(path)[ws_key][bb].astype('uint32')

    rag = nrag.gridRag(ws, numberOfLabels=int(ws.max()) + 1)
    edges = graph_ds['edges'][:]
    assert len(edges) == rag.numberOfEdges
    assert (rag.uvIds() == edges).all()

    edge_id_vol, edge_vol_att, edge_vol_rep = get_edge_costs(rag, probs)

    raw = z5py.File(path)[raw_key][bb]
    view([raw, ws, edge_id_vol, edge_vol_att, edge_vol_rep],
         ['raw', 'ws', 'edge-ids', 'attractive edges', 'repulsive edges'],
         layer_types=[
             "Grayscale", "RandomColors", "RandomColors", "Blue", "Red"
         ])
Example #18
0
    def _compute_mc(self, input_, feat_function, beta):
        # watershed and region adjacency graph
        ws, n_labels = self._compute_ws(input_)

        rag = nrag.gridRag(ws,
                           numberOfLabels=n_labels,
                           numberOfThreads=self.n_threads)
        if rag.numberOfEdges == 0:
            return np.zeros_like(ws)

        # features and features to costs
        feats = feat_function(rag, input_)
        probs, edge_len = feats[:, 0], feats[:, -1]
        costs = self._probs_to_costs(probs, edge_len, beta)

        # graph and multicut solver
        graph = nifty.graph.undirectedGraph(rag.numberOfNodes)
        graph.insertEdges(rag.uvIds())
        objective = nmc.multicutObjective(graph, costs)
        solver = objective.kernighanLinFactory(
            warmStartGreedy=True).create(objective)

        # solve multicut and project back to segmentation
        # TODO time limit
        node_labels = solver.optimize()
        return nrag.projectScalarNodeDataToPixels(
            rag, node_labels, numberOfThreads=self.n_threads)
def check_graph_blocks():
    path = '/home/papec/Work/neurodata_hdd/ntwrk_papec/cremi_warped/sampleA+.n5'
    ws_key = 'segmentations/watershed'

    ws = z5py.File(path)[ws_key][:].astype('uint32')
    rag = nrag.gridRag(ws, numberOfLabels=int(ws.max()) + 1)
    uv_ids = rag.uvIds()

    n_blocks = 1680
    graph_path = '/home/papec/mnt/papec/Work/neurodata_hdd/cache/cremi_A+/tmp_files/graph.n5/sub_graphs/s0'

    for block_id in range(n_blocks):
        graph_ds = z5py.File(graph_path)['block_%i' % block_id]
        if 'edges' not in graph_ds:
            continue
        print("Checking block", block_id)
        edges = graph_ds['edges'][:]
        edge_ids = graph_ds['edgeIds'][:]
        assert len(edges) == len(edge_ids)
        # rag_edge_ids = find_matching_row_indices(edges, uv_ids)[:, 0]
        # assert len(edge_ids) == len(rag_edge_ids), "%i, %i" % len(edge_ids) == len(rag_edge_ids)
        # for e, ei in zip(edges, edge_ids):
        #     print(e, uv_ids[ei])
        # assert (rag_edge_ids == edge_ids).all()
        assert (edges == uv_ids[edge_ids]).all()
    print("All passed")
 def check_rag(self, labels, graph):
     uvs = graph.uvIds()
     rag = nrag.gridRag(labels.astype('uint32'),
                        numberOfLabels=int(labels.max() + 1))
     uvs_rag = rag.uvIds()
     self.assertEqual(uvs.shape, uvs_rag.shape)
     self.assertTrue((uvs == uvs_rag).all())
Example #21
0
    def _check_fullresults(self):
        f = z5py.File(self.input_path)
        ds_inp = f[self.input_key]
        ds_inp.n_threads = 8
        ds_ws = f[self.ws_key]
        ds_ws.n_threads = 8

        seg = ds_ws[:]
        rag = nrag.gridRag(seg, numberOfLabels=int(seg.max()) + 1)
        inp = ds_inp[:]

        # compute nifty features
        features_nifty = nrag.accumulateEdgeStandartFeatures(rag, inp, 0., 1.)
        # load features
        features = z5py.File(self.output_path)[self.output_key][:]
        self.assertEqual(len(features_nifty), len(features))
        self.assertEqual(features_nifty.shape[1], features.shape[1] - 1)

        # we can only assert equality for mean, std, min, max and len
        print(features_nifty[:10, 0])
        print(features[:10, 0])
        # -> mean
        self.assertTrue(np.allclose(features_nifty[:, 0], features[:, 0]))
        # -> std
        self.assertTrue(np.allclose(features_nifty[:, 1], features[:, 1]))
        # -> min
        self.assertTrue(np.allclose(features_nifty[:, 2], features[:, 2]))
        # -> max
        self.assertTrue(np.allclose(features_nifty[:, 8], features[:, 8]))
        self.assertFalse(np.allcose(features[:, 3:8], 0))
        # check that the edge-lens agree
        len_nifty = nrag.accumulateEdgeMeanAndLength(rag, inp)[:, 1]
        self.assertTrue(np.allclose(len_nifty, features_block[:, -1]))
Example #22
0
def map_edge_features_to_image(offsets, edge_features, rag=None, label_image=None, contractedRag=None,
                               channel_affs=-2, fillValue=0., number_of_threads=8):
    """
    Label image or rag should be passed. Using nifty rag.
    """
    raise DeprecationWarning()
    assert label_image is not None or rag is not None
    if contractedRag is not None:
        assert rag is not None

    if rag is None:
        rag = nrag.gridRag(label_image.astype(np.uint32))

    if contractedRag is None:
        image_map = nrag.mapFeaturesToBoundaries(rag, edge_features.astype(np.float32),
                                                       offsets.astype(np.int32), fillValue,
                                                 number_of_threads)
    else:
        assert number_of_threads == 1, "Multiple threads are currently not supported with a contracted graph!"
        image_map = nrag.mapFeaturesToBoundaries(rag, contractedRag,
                                                       edge_features.astype(np.float32),
                                                       offsets.astype(np.int32), fillValue)

    if channel_affs==0:
        ndim = image_map.ndim - 2
        dims = tuple(range(ndim))
        return np.transpose(image_map, (ndim,) + dims + (ndim+1,) )
    elif channel_affs!=-2:
        raise NotImplementedError()

    return image_map
Example #23
0
def compute_state(affs, seg, offsets, n_attractive):

    # with affogato TODO debug this
    # FIXME the uv ids don't make sense!
    # grid_graph = compute_grid_graph(segmentation.shape)
    # uvs, weights, attractive = grid_graph.compute_state_for_segmentation(affs, segmentation, offsets,
    #                                                                      n_attractive_channels=3,
    #                                                                      ignore_label=False)
    # weights[np.logical_not(attractive)] *= -1
    # state = (uvs, weights)

    # with nifty
    rag = nrag.gridRag(seg,
                       numberOfLabels=int(seg.max() + 1),
                       numberOfThreads=1)
    uv_ids = rag.uvIds()

    affs_attractive = affs[:n_attractive]
    # -2 corresponds to max value
    weights_attractive = nrag.accumulateAffinityStandartFeatures(
        rag, affs_attractive, offsets, numberOfThreads=1)[:, -2]

    affs_repulsive = np.require(affs[n_attractive:], requirements='C')
    weights_repulsive = nrag.accumulateAffinityStandartFeatures(
        rag, affs_repulsive, offsets, numberOfThreads=1)[:, -2]

    weights = weights_attractive
    repulsive = weights_repulsive > weights_attractive
    weights[repulsive] = -1 * weights_repulsive[repulsive]
    return uv_ids, weights
def debug_subresult(block_id=1):
    example_path = '/home/cpape/Work/data/isbi2012/cluster_example/isbi_train.n5'
    block_prefix = os.path.join(example_path, 's0', 'sub_graphs', 'block_')

    graph = ndist.Graph(os.path.join(example_path, 'graph'))
    block_path = block_prefix + str(block_id)
    nodes = ndist.loadNodes(block_path)
    inner_edges, outer_edges, sub_uvs = graph.extractSubgraphFromNodes(nodes)

    block_res_path = './tmp/subproblem_results/s0_block%i.npy' % block_id
    res = np.load(block_res_path)

    merge_edges = np.ones(graph.numberOfEdges, dtype='bool')
    merge_edges[res] = False
    merge_edges[outer_edges] = False

    uv_ids = graph.uvIds()
    n_nodes = int(uv_ids.max()) + 1
    ufd = nifty.ufd.ufd(n_nodes)
    ufd.merge(uv_ids[merge_edges])
    node_labels = ufd.elementLabeling()

    ws = z5py.File(example_path)['volumes/watersheds'][:]
    rag = nrag.gridRag(ws, numberOfLabels=n_nodes)
    seg = nrag.projectScalarNodeDataToPixels(rag, node_labels)
    view([ws, seg])
def segment_block(ds_ws, ds_affs, blocking, block_id, offsets):

    # load the segmentation
    block = blocking.getBlock(block_id)
    bb = tuple(slice(beg, end) for beg, end in zip(block.begin, block.end))
    ws = ds_ws[bb]

    # if this block only contains a single segment id (usually 0 = ignore label) continue
    ws_ids = np.unique(ws)
    if len(ws_ids) == 1:
        return None
    max_id = ws_ids[-1]

    # TODO should we do this ?
    # map to a consecutive segmentation to speed up graph computations
    # ws, max_id, mapping = vigra.analysis.relabelConsecutive(ws, keep_zeros=True, start_label=1)

    # load the affinities
    n_channels = len(offsets)
    bb_affs = (slice(0, n_channels), ) + bb
    affs = ds_affs[bb_affs]
    # convert affinities to float and invert them
    # to get boundary probabilities
    if affs.dtype == np.dtype('uint8'):
        affs = affs.astype('float32') / 255.
    affs = 1. - affs

    # compute the region adjacency graph
    n_labels = int(max_id) + 1
    rag = nrag.gridRag(ws, numberOfLabels=n_labels, numberOfThreads=1)
    uv_ids = rag.uvIds()

    # compute the features and get edge probabilities (from mean affinities)
    # and edge sizes
    features = nrag.accumulateAffinityStandartFeatures(rag,
                                                       affs,
                                                       offsets,
                                                       numberOfThreads=1)
    probs = features[:, 0]
    sizes = features[:, -1].astype('uint64')

    # compute multicut
    mc = cseg.Multicut('kernighan-lin', weight_edges=False)
    # transform probabilities to costs
    costs = mc.probabilities_to_costs(probs)
    # set edges connecting to 0 (= ignore label) to repulsive
    ignore_edges = (uv_ids == 0).any(axis=1)
    costs[ignore_edges] = -100
    # solve the mc problem
    graph = nifty.graph.undirectedGraph(n_labels)
    graph.insertEdges(uv_ids)
    node_labels = mc(graph, costs)

    # get indicators for merge !
    # and return uv-ids, edge indicators and edge sizes
    edge_indicator = (
        node_labels[uv_ids[:, 0]] == node_labels[uv_ids[:, 1]]).astype('uint8')

    return uv_ids, edge_indicator, sizes
def compute_features(seg, affs):
    import nifty.graph.rag as nrag
    offsets = [[-1, 0, 0], [0, -1, 0], [0, 0, -1], [-2, 0, 0], [0, -3, 0],
               [0, 0, -3], [-3, 0, 0], [0, -9, 0], [0, 0, -9], [-4, 0, 0],
               [0, -27, 0], [0, 0, -27]]
    rag = nrag.gridRag(seg, numberOfLabels=int(seg.max()) + 1)
    lr_uvs, local_features, lr_features = nrag.computeFeaturesAndNhFromAffinities(
        rag, affs, offsets)
    return rag, lr_uvs, local_features[:, 0], lr_features[:, 0]
Example #27
0
    def pass2(block_id):
        # load affinities and segmentation from pass1 from the current block with halo
        block = blocking.getBlockWithHalo(block_id, list(halo))
        bb = tuple(
            slice(beg, end)
            for beg, end in zip(block.outerBlock.begin, block.outerBlock.end))
        seg = segmentation[bb]
        aff_bb = (slice(None), ) + bb
        affs = affinities[aff_bb]

        # get the state of the segmentation from pass 1
        # TODO maybe there is a better option than doing this with the rag
        rag = nrag.gridRag(seg,
                           numberOfLabels=int(seg.max() + 1),
                           numberOfThreads=1)
        prev_uv_ids = rag.uvIds()
        prev_uv_ids = prev_uv_ids[(prev_uv_ids != 0).all(axis=1)]
        edge_ids = graph.findEdges(prev_uv_ids)
        assert len(edge_ids) == len(prev_uv_ids), "%i, %i" % (len(edge_ids),
                                                              len(prev_uv_ids))

        # TODO for some reason we can get edges here that are not part of the serialized state
        # I don't fully get why, but it means that we have seeds from the different pass 1
        # blocks touching
        # for now, we just get rid of these edges
        # assert (edge_ids != -1).all()
        valid_edges = edge_ids == -1
        edge_ids = edge_ids[valid_edges]
        prev_uv_ids = prev_uv_ids[valid_edges]
        prev_weights = weights[edge_ids]
        assert len(prev_uv_ids) == len(prev_weights)

        # call the agglomerator with state
        new_seg = agglomerator(affs,
                               offsets,
                               previous_segmentation=seg,
                               previous_edges=prev_uv_ids,
                               previous_weights=prev_weights)

        # offset the segmentation with the lowest block coordinate to
        # make segmentation ids unique
        id_offset = block_id * block_size
        new_seg += id_offset

        # find the assignments to seed ids
        assignments = get_assignments(new_seg, seg)

        # write out the segmentation
        inner_bb = tuple(
            slice(beg, end)
            for beg, end in zip(block.innerBlock.begin, block.innerBlock.end))
        local_bb = tuple(
            slice(beg, end) for beg, end in zip(block.innerBlockLocal.begin,
                                                block.innerBlockLocal.end))
        segmentation[inner_bb] = new_seg[local_bb]

        return assignments
    def testFlatAccumulation(self):
        seg, val = self.makeToyData()
        rag = nrag.gridRag(seg, numberOfLabels=seg.max() + 1)

        # test the different z accumulations
        for zDir in (0, 1, 2):
            feats = nrag.accumulateEdgeFeaturesFlat(rag, val, val.min(), val.max(), zDir, 1)
            self.assertEqual(len(feats), rag.numberOfEdges)
            self.checkToyFeats(feats, zDir)
 def __init__(self, base_segmentation, n_threads=8):
     assert isinstance(base_segmentation, np.ndarray)
     self.base_segmentation = base_segmentation
     self.n_threads = n_threads
     self.rag = nrag.gridRag(self.base_segmentation,
                             numberOfLabels=int(base_segmentation.max()) + 1,
                             numberOfThreads=self.n_threads)
     self.uv_ids = self.rag.uvIds()
     self.volume_builder = nrag.ragCoordinates(self.rag, self.n_threads)
Example #30
0
    def testFlatAccumulation(self):
        seg, val = self.makeToyData()
        rag = nrag.gridRag(seg, numberOfLabels=seg.max() + 1)

        # test the different z accumulations
        for zDir in (0, 1, 2):
            feats = nrag.accumulateEdgeFeaturesFlat(rag, val, val.min(),
                                                    val.max(), zDir, 1)
            self.assertEqual(len(feats), rag.numberOfEdges)
            self.checkToyFeats(feats, zDir)
Example #31
0
    def pass2(block_id):
        # load segmentation from pass1 from the current block with halo
        block = blocking.getBlockWithHalo(block_id, list(halo))
        bb = tuple(
            slice(beg, end)
            for beg, end in zip(block.outerBlock.begin, block.outerBlock.end))
        seg = segmentation[bb]
        # mask the corners, because these are not part of the seeds, and could already be written by path 2
        seg = mask_corners(seg, halo)

        # load affinties
        aff_bb = (slice(None), ) + bb
        # mutex watershed changes the affs, so we need to copy here
        affs = affinities[aff_bb].copy()

        # get the state of the segmentation from pass 1
        # TODO maybe there is a better option than doing this with the rag
        rag = nrag.gridRag(seg,
                           numberOfLabels=int(seg.max() + 1),
                           numberOfThreads=1)
        prev_uv_ids = rag.uvIds()
        prev_uv_ids = prev_uv_ids[(prev_uv_ids != 0).all(axis=1)]
        edge_ids = graph.findEdges(prev_uv_ids)
        assert len(edge_ids) == len(prev_uv_ids), "%i, %i" % (len(edge_ids),
                                                              len(prev_uv_ids))
        assert (edge_ids != -1).all()
        prev_weights = weights[edge_ids]
        assert len(prev_uv_ids) == len(prev_weights)

        # call the agglomerator with state
        new_seg = agglomerator(affs,
                               offsets,
                               previous_segmentation=seg,
                               previous_edges=prev_uv_ids,
                               previous_weights=prev_weights)

        # offset the segmentation with the lowest block coordinate to
        # make segmentation ids unique
        id_offset = block_id * block_size
        new_seg += id_offset

        # find the assignments to seed ids
        assignments = get_assignments(new_seg, seg)

        # write out the segmentation
        inner_bb = tuple(
            slice(beg, end)
            for beg, end in zip(block.innerBlock.begin, block.innerBlock.end))
        local_bb = tuple(
            slice(beg, end) for beg, end in zip(block.innerBlockLocal.begin,
                                                block.innerBlockLocal.end))
        segmentation[inner_bb] = new_seg[local_bb]

        return assignments
    def _compute_and_check_expected(self, ws, inp, res, exclude=None):
        self.assertFalse((res == 0).all())
        rag = nrag.gridRag(ws, numberOfLabels=int(ws.max() + 1))
        expected = nrag.gridRagAccumulateLabels(rag, inp)

        if exclude is not None:
            res = res[exclude]
            expected = expected[exclude]

        self.assertEqual(res.shape, expected.shape)
        self.assertTrue(np.allclose(res, expected))
Example #33
0
    def _test_carving(self, ndim):
        from nifty.carving import carvingSegmenter
        x = self.make_labels(ndim)
        rag = nrag.gridRag(x, numberOfLabels=int(x.max()) + 1)
        edgeWeights = 128 * np.random.rand(rag.numberOfEdges).astype('float32')
        segmenter = carvingSegmenter(rag, edgeWeights)

        noBoundaryBelow = 64.
        for bias in (.9, .95, 1.):
            seeds = np.random.randint(0, 3, size=rag.numberOfNodes)
            out = segmenter(seeds, bias, noBoundaryBelow)
            self.assertEqual(len(out), rag.numberOfNodes)
            self.assertTrue(np.allclose(np.unique(out), [1, 2]))
    def pass2(block_id):
        # load segmentation from pass1 from the current block with halo
        block = blocking.getBlockWithHalo(block_id, list(halo))
        bb = tuple(slice(beg, end) for beg, end in zip(block.outerBlock.begin, block.outerBlock.end))
        seg = segmentation[bb]
        # mask the corners, because these are not part of the seeds, and could already be written by path 2
        seg = mask_corners(seg, halo)

        # load affinties
        aff_bb = (slice(None),) + bb
        # mutex watershed changes the affs, so we need to copy here
        affs = affinities[aff_bb].copy()

        # get the state of the segmentation from pass 1
        # TODO maybe there is a better option than doing this with the rag
        rag = nrag.gridRag(seg, numberOfLabels=int(seg.max() + 1), numberOfThreads=1)
        prev_uv_ids = rag.uvIds()
        prev_uv_ids = prev_uv_ids[(prev_uv_ids != 0).all(axis=1)]
        edge_ids = graph.findEdges(prev_uv_ids)
        assert len(edge_ids) == len(prev_uv_ids), "%i, %i" % (len(edge_ids), len(prev_uv_ids))
        assert (edge_ids != -1).all()
        prev_weights = weights[edge_ids]
        assert len(prev_uv_ids) == len(prev_weights)

        # call the agglomerator with state
        new_seg = agglomerator(affs, offsets, previous_segmentation=seg,
                               previous_edges=prev_uv_ids, previous_weights=prev_weights)

        # offset the segmentation with the lowest block coordinate to
        # make segmentation ids unique
        id_offset = block_id * block_size
        new_seg += id_offset

        # find the assignments to seed ids
        assignments = get_assignments(new_seg, seg)

        # write out the segmentation
        inner_bb = tuple(slice(beg, end) for beg, end in zip(block.innerBlock.begin, block.innerBlock.end))
        local_bb = tuple(slice(beg, end) for beg, end in zip(block.innerBlockLocal.begin, block.innerBlockLocal.end))
        segmentation[inner_bb] = new_seg[local_bb]

        return assignments