Exemple #1
0
def segment_volume_mc(pmaps,
                      threshold=0.4,
                      sigma=2.0,
                      beta=0.6,
                      ws=None,
                      sp_min_size=100):
    if ws is None:
        ws = distance_transform_watershed(pmaps,
                                          threshold,
                                          sigma,
                                          min_size=sp_min_size)[0]

    rag = compute_rag(ws, 1)
    features = nrag.accumulateEdgeMeanAndLength(rag, pmaps, numberOfThreads=1)
    probs = features[:, 0]  # mean edge prob
    edge_sizes = features[:, 1]
    costs = transform_probabilities_to_costs(probs,
                                             edge_sizes=edge_sizes,
                                             beta=beta)
    graph = nifty.graph.undirectedGraph(rag.numberOfNodes)
    graph.insertEdges(rag.uvIds())

    node_labels = multicut_kernighan_lin(graph, costs)

    return nifty.tools.take(node_labels, ws)
Exemple #2
0
def probs_to_costs(probs,
                   beta=.5,
                   weighting_scheme=None,
                   rag=None,
                   segmentation=None,
                   weight=16.):
    """
    :param probs: expected a probability map (0.0 merge or 1.0 split)
    :param beta: bias factor (with 1.0 everything is repulsive, with 0. everything is attractive)
    """
    p_min = 0.001
    p_max = 1. - p_min
    # Costs: positive (merge), negative (split)
    costs = (p_max - p_min) * probs + p_min

    # probabilities to energies, second term is boundary bias
    costs = np.log((1. - costs) / costs) + np.log((1. - beta) / beta)

    if weighting_scheme is not None:
        assert rag is not None
        assert weighting_scheme in ('xyz', 'z', 'all')
        assert segmentation is not None
        shape = segmentation.shape
        fake_data = np.zeros(shape, dtype='float32')
        edge_sizes = nrag.accumulateEdgeMeanAndLength(rag, fake_data)[:, 1]

        if weighting_scheme == 'all':
            w = weight * edge_sizes / edge_sizes.max()
        else:
            raise NotImplementedError("Weighting scheme not implemented")
        costs *= w

    return costs
Exemple #3
0
    def segment_volume(self, pmaps):
        if self.ws_2D:
            # WS in 2D
            ws = self.ws_dt_2D(pmaps)
        else:
            # WS in 3D
            ws, _ = distance_transform_watershed(pmaps,
                                                 self.ws_threshold,
                                                 self.ws_sigma,
                                                 sigma_weights=self.ws_w_sigma,
                                                 min_size=self.ws_minsize)

        rag = compute_rag(ws, 1)
        # Computing edge features
        features = nrag.accumulateEdgeMeanAndLength(
            rag, pmaps, numberOfThreads=1)  # DO NOT CHANGE numberOfThreads
        probs = features[:, 0]  # mean edge prob
        edge_sizes = features[:, 1]
        # Prob -> edge costs
        costs = transform_probabilities_to_costs(probs,
                                                 edge_sizes=edge_sizes,
                                                 beta=self.beta)
        # Creating graph
        graph = nifty.graph.undirectedGraph(rag.numberOfNodes)
        graph.insertEdges(rag.uvIds())
        # Solving Multicut
        node_labels = multicut_kernighan_lin(graph, costs)
        return nifty.tools.take(node_labels, ws)
Exemple #4
0
    def _check_fullresults(self):
        f = z5py.File(self.input_path)
        ds_inp = f[self.input_key]
        ds_inp.n_threads = 8
        ds_ws = f[self.ws_key]
        ds_ws.n_threads = 8

        seg = ds_ws[:]
        rag = nrag.gridRag(seg, numberOfLabels=int(seg.max()) + 1)
        inp = ds_inp[:]

        # compute nifty features
        features_nifty = nrag.accumulateEdgeStandartFeatures(rag, inp, 0., 1.)
        # load features
        features = z5py.File(self.output_path)[self.output_key][:]
        self.assertEqual(len(features_nifty), len(features))
        self.assertEqual(features_nifty.shape[1], features.shape[1] - 1)

        # we can only assert equality for mean, std, min, max and len
        print(features_nifty[:10, 0])
        print(features[:10, 0])
        # -> mean
        self.assertTrue(np.allclose(features_nifty[:, 0], features[:, 0]))
        # -> std
        self.assertTrue(np.allclose(features_nifty[:, 1], features[:, 1]))
        # -> min
        self.assertTrue(np.allclose(features_nifty[:, 2], features[:, 2]))
        # -> max
        self.assertTrue(np.allclose(features_nifty[:, 8], features[:, 8]))
        self.assertFalse(np.allcose(features[:, 3:8], 0))
        # check that the edge-lens agree
        len_nifty = nrag.accumulateEdgeMeanAndLength(rag, inp)[:, 1]
        self.assertTrue(np.allclose(len_nifty, features_block[:, -1]))
def merge_label(segmentation, merge_id, n_threads=8):
    """
    Merge all instances of a given label id into the surrounding labels.
    """
    merge_map = segmentation == merge_id
    relabeled = vigra.analysis.labelMultiArrayWithBackground(segmentation)
    merge_ids = np.unique(relabeled[merge_map])

    n_labels = int(relabeled.max() + 1)
    rag = nrag.gridRag(relabeled, numberOfLabels=n_labels,
                       numberOfThreads=n_threads)
    fake = np.zeros(rag.shape, dtype='float32')
    edge_sizes = nrag.accumulateEdgeMeanAndLength(rag, fake)[:, 1]

    for merge in merge_ids:
        adjacency = [adj for adj in rag.nodeAdjacency(merge)]
        if len(adjacency) == 1:
            node = adjacency[0][0]
        else:
            node = 0
            size = 0
            for adj in adjacency:
                curr_node, edge = adj
                if edge_sizes[edge] > size and curr_node != 0:
                    node = curr_node
                    size = edge_sizes[edge]
        relabeled[relabeled == merge] = node
    relabeled = vigra.analysis.labelMultiArrayWithBackground(relabeled)
    return relabeled
    def check_results(self, in_key, feat_func):
        f = z5py.File(self.input_path)
        ds_inp = f[in_key]
        ds_inp.n_threads = 8
        ds_ws = f[self.ws_key]
        ds_ws.n_threads = 8

        # load features
        features = z5py.File(self.output_path)[self.output_key][:]

        # load seg and input, make rag
        seg = ds_ws[:]
        inp = normalize(ds_inp[:]) if ds_inp.ndim == 3 else normalize(ds_inp[:3])
        rag = nrag.gridRag(seg, numberOfLabels=int(seg.max()) + 1)

        # compute nifty lens
        if inp.ndim == 3:
            len_nifty = nrag.accumulateEdgeMeanAndLength(rag, inp)[:, 1]
        else:
            len_nifty = nrag.accumulateEdgeMeanAndLength(rag, inp[0])[:, 1]
        self.assertTrue(np.allclose(len_nifty, features[:, -1]))

        # compute nifty features
        features_nifty = feat_func(rag, inp)

        # check feature shape
        self.assertEqual(len(features_nifty), len(features))
        self.assertEqual(features_nifty.shape[1], features.shape[1] - 1)

        # debugging: check closeness of the min values
        # close = np.isclose(features_nifty[:, 2], features[:, 2])
        # print(close.sum(), '/', len(close))
        # not_close = ~close
        # print(np.where(not_close)[:10])
        # print(features[:, 2][not_close][:10])
        # print(features_nifty[:, 2][not_close][:10])

        # we can only assert equality for mean, std, min, max and len
        # -> mean
        self.assertTrue(np.allclose(features_nifty[:, 0], features[:, 0]))
        # -> std
        self.assertTrue(np.allclose(features_nifty[:, 1], features[:, 1]))
        # -> min
        self.assertTrue(np.allclose(features_nifty[:, 2], features[:, 2]))
        # -> max
        self.assertTrue(np.allclose(features_nifty[:, 8], features[:, 8]))
Exemple #7
0
def probs_to_costs(probs,
                   beta=.5,
                   weighting_scheme=None,
                   edge_sizes=None,
                   rag=None,
                   segmentation=None,
                   weight=16.):
    """
    :param probs: expected a probability map (0.0 merge or 1.0 split)
    :param beta: bias factor (with 1.0 everything is repulsive, with 0. everything is attractive)
    :param weighting_scheme:
    :param rag:
    :param segmentation:
    :param weight:
    :return:
    """
    p_min = 0.001
    p_max = 1. - p_min
    # Costs: positive (merge), negative (split)
    costs = (p_max - p_min) * probs + p_min

    # probabilities to energies, second term is boundary bias
    costs = np.log((1. - costs) / costs) + np.log((1. - beta) / beta)

    if weighting_scheme is not None:
        assert weighting_scheme in ('xyz', 'z', 'all')

        if edge_sizes is None:
            assert rag is not None
            assert segmentation is not None
            shape = segmentation.shape
            fake_data = np.zeros(shape, dtype='float32')
            # FIXME something is wrong here with the nifty function
            edge_sizes = nrag.accumulateEdgeMeanAndLength(rag, fake_data)[:, 1]

        if weighting_scheme == 'all':
            w = weight * edge_sizes / edge_sizes.max()
        elif weighting_scheme == 'z':
            assert segmentation is not None
            z_edges = get_z_edges(rag, segmentation)
            w = np.ones_like(costs)
            z_max = edge_sizes[z_edges].max()
            w[z_edges] = weight * edge_sizes[z_edges] / z_max

        elif weighting_scheme == 'xyz':
            assert segmentation is not None
            z_edges = get_z_edges(rag, segmentation)
            w = np.ones_like(costs)
            xy_max = edge_sizes[np.logical_not(z_edges)].max()
            z_max = edge_sizes[z_edges].max()
            w[np.logical_not(z_edges)] = weight * edge_sizes[np.logical_not(
                z_edges)] / xy_max
            w[z_edges] = weight * edge_sizes[z_edges] / z_max

        costs *= w

    return costs
Exemple #8
0
def compute_boundary_mean_and_length(rag, input_, n_threads=None):
    """ Compute mean value and length of boundaries.

    Arguments:
        rag [RegionAdjacencyGraph] - region adjacency graph
        input_ [np.ndarray] - input map.
        n_threads [int] - number of threads used, set to cpu count by default. (default: None)
    """
    n_threads = multiprocessing.cpu_count() if n_threads is None else n_threads
    if tuple(rag.shape) != input_.shape:
        raise ValueError("Incompatible shapes: %s, %s" %
                         (str(rag.shape), str(input_.shape)))
    features = nrag.accumulateEdgeMeanAndLength(rag,
                                                input_,
                                                numberOfThreads=n_threads)
    return features
def edge_features(rag, ws, n_labels, uv_ids, affs, n_threads=1):

    # get the input maps for xy and z features
    bmap_xy = np.mean(affs[1:], axis=0)
    bmap_z = affs[0]

    node_z = node_z_coord(ws, n_labels)
    z_edge_mask = edge_indications(uv_ids, node_z)

    # TODO try to use fastfilters ?
    filters = [
        ff.gaussianSmoothing, ff.laplacianOfGaussian,
        ff.hessianOfGaussianEigenvalues
    ]
    sigmas = [1.6, 4.2, 8.3]

    def feature_channel(filter_, sigma):
        feats = accumulate_filter(rag, bmap_xy, filter_, sigma)
        feats_z = accumulate_filter(rag, bmap_z, filter_, sigma)
        feats[z_edge_mask] = feats_z[z_edge_mask]
        return feats

    if n_threads == 1:
        features = np.concatenate([
            feature_channel(filter_, sigma) for filter_ in filters
            for sigma in sigmas
        ],
                                  axis=1)
    else:
        with futures.ThreadPoolExecutor(n_threads) as tp:
            tasks = [
                tp.submit(feature_channel, filter_, sigma)
                for filter_ in filters for sigma in sigmas
            ]
            features = np.concatenate([t.result() for t in tasks], axis=1)

    sizes = nrag.accumulateEdgeMeanAndLength(
        rag, bmap_xy, numberOfThreads=n_threads)[:, 1].astype('uint64')
    features = np.concatenate([features, sizes[:, None].astype('float32')],
                              axis=1)
    return features, sizes, z_edge_mask
Exemple #10
0
    def _check_subresults(self):
        f = z5py.File(self.input_path)
        f_feat = z5py.File(self.output_path)
        ds_inp = f[self.input_key]
        ds_ws = f[self.ws_key]

        shape = ds_ws.shape
        blocking = nt.blocking([0, 0, 0], list(shape), self.block_shape)

        halo = [1, 1, 1]
        for block_id in range(blocking.numberOfBlocks):

            # get the block with the appropriate halo
            # and the corresponding bounding box
            block = blocking.getBlockWithHalo(block_id, halo)
            outer_block, inner_block = block.outerBlock, block.innerBlock
            bb = tuple(
                slice(beg, end)
                for beg, end in zip(inner_block.begin, outer_block.end))
            # load the segmentation and the input
            # and compute the nifty graph
            seg = ds_ws[bb]
            rag = nrag.gridRag(seg, numberOfLabels=int(seg.max()) + 1)
            inp = ds_inp[bb]

            # compute nifty features
            features_nifty = nrag.accumulateEdgeStandartFeatures(
                rag, inp, 0., 1.)

            # load the features
            feat_key = os.path.join('blocks', 'block_%i' % block_id)
            features_block = f_feat[feat_key][:]

            # compare features
            self.assertEqual(len(features_nifty), len(features_block))
            self.assertEqual(features_nifty.shape[1],
                             features_block.shape[1] - 1)
            self.assertTrue(np.allclose(features_nifty,
                                        features_block[:, :-1]))
            len_nifty = nrag.accumulateEdgeMeanAndLength(rag, inp)[:, 1]
            self.assertTrue(np.allclose(len_nifty, features_block[:, -1]))
def compute_edge_weights(rag, inp):
    weights = nrag.accumulateEdgeMeanAndLength(rag, inp)
    return weights[:, 0]
Exemple #12
0
 def test_accumulate_mean_and_length_3d(self):
     labels = np.random.randint(0, 100, size=self.shape_3d, dtype='uint32')
     rag = nrag.gridRag(labels, numberOfLabels=100)
     data = np.random.random_sample(self.shape_3d).astype('float32')
     res = nrag.accumulateEdgeMeanAndLength(rag, data)
     self.assertTrue(np.sum(res) != 0)
Exemple #13
0
 def compute_node_and_edge_sizes(fragments, rag):
     _, node_sizes = np.unique(fragments, return_counts=True)
     edge_sizes = nrag.accumulateEdgeMeanAndLength(
         rag, np.zeros(rag.shape, dtype='float32'))[:, 1]
     return node_sizes, edge_sizes
Exemple #14
0
def _agglomerate_block(blocking, block_id, ds_in, ds_out, config):
    fu.log("start processing block %i" % block_id)
    have_ignore_label = config['have_ignore_label']
    use_mala_agglomeration = config.get('use_mala_agglomeration', True)
    threshold = config.get('threshold', 0.9)
    size_regularizer = config.get('size_regularizer', .5)
    invert_inputs = config.get('invert_inputs', False)
    offsets = config.get('offsets', None)

    bb = vu.block_to_bb(blocking.getBlock(block_id))
    # load the segmentation / output
    seg = ds_out[bb]

    # check if this block is empty
    if np.sum(seg) == 0:
        fu.log_block_success(block_id)
        return

    # load the input data
    ndim_in = ds_in.ndim
    if ndim_in == 4:
        assert offsets is not None
        assert len(offsets) <= ds_in.shape[0]
        bb_in = (slice(0, len(offsets)),) + bb
        input_ = vu.normalize(ds_in[bb_in])
    else:
        assert offsets is None
        input_ = vu.normalize(ds_in[bb])

    if invert_inputs:
        input_ = 1. - input_

    id_offset = int(seg[seg != 0].min())

    # relabel the segmentation
    _, max_id, _ = relabelConsecutive(seg, out=seg, keep_zeros=True, start_label=1)
    seg = seg.astype('uint32')

    # construct rag
    rag = nrag.gridRag(seg, numberOfLabels=max_id + 1,
                       numberOfThreads=1)

    # extract edge features
    if offsets is None:
        edge_features = nrag.accumulateEdgeMeanAndLength(rag, input_, numberOfThreads=1)
    else:
        edge_features = nrag.accumulateAffinityStandartFeatures(rag, input_, offsets,
                                                                numberOfThreads=1)
    edge_features, edge_sizes = edge_features[:, 0], edge_features[:, -1]
    uv_ids = rag.uvIds()
    # set edges to ignore label to be maximally repulsive
    if have_ignore_label:
        ignore_mask = (uv_ids == 0).any(axis=1)
        edge_features[ignore_mask] = 1

    # build undirected graph
    n_nodes = rag.numberOfNodes
    graph = nifty.graph.undirectedGraph(n_nodes)
    graph.insertEdges(uv_ids)

    if use_mala_agglomeration:
        node_labels = mala_clustering(graph, edge_features,
                                      edge_sizes, threshold)
    else:
        node_ids, node_sizes = np.unique(seg, return_counts=True)
        if node_ids[0] != 0:
            node_sizes = np.concatenate([np.array([0]), node_sizes])
        n_stop = int(threshold * n_nodes)
        node_labels = agglomerative_clustering(graph, edge_features,
                                               node_sizes, edge_sizes,
                                               n_stop, size_regularizer)

    # run clusteting
    node_labels, max_id, _ = relabelConsecutive(node_labels, start_label=1, keep_zeros=True)

    fu.log("reduced number of labels from %i to %i" % (n_nodes, max_id + 1))

    # project node labels back to segmentation
    seg = nrag.projectScalarNodeDataToPixels(rag, node_labels, numberOfThreads=1)
    seg = seg.astype('uint64')
    # add offset back to segmentation
    seg[seg != 0] += id_offset

    ds_out[bb] = seg
    # log block success
    fu.log_block_success(block_id)