예제 #1
0
def compute_state(affs, seg, offsets, n_attractive):

    # with affogato TODO debug this
    # FIXME the uv ids don't make sense!
    # grid_graph = compute_grid_graph(segmentation.shape)
    # uvs, weights, attractive = grid_graph.compute_state_for_segmentation(affs, segmentation, offsets,
    #                                                                      n_attractive_channels=3,
    #                                                                      ignore_label=False)
    # weights[np.logical_not(attractive)] *= -1
    # state = (uvs, weights)

    # with nifty
    rag = nrag.gridRag(seg,
                       numberOfLabels=int(seg.max() + 1),
                       numberOfThreads=1)
    uv_ids = rag.uvIds()

    affs_attractive = affs[:n_attractive]
    # -2 corresponds to max value
    weights_attractive = nrag.accumulateAffinityStandartFeatures(
        rag, affs_attractive, offsets, numberOfThreads=1)[:, -2]

    affs_repulsive = np.require(affs[n_attractive:], requirements='C')
    weights_repulsive = nrag.accumulateAffinityStandartFeatures(
        rag, affs_repulsive, offsets, numberOfThreads=1)[:, -2]

    weights = weights_attractive
    repulsive = weights_repulsive > weights_attractive
    weights[repulsive] = -1 * weights_repulsive[repulsive]
    return uv_ids, weights
예제 #2
0
def compute_state(affs, seg, offsets, n_attractive):

    # with affogato TODO debug this
    # FIXME the uv ids don't make sense!
    # grid_graph = compute_grid_graph(segmentation.shape)
    # uvs, weights, attractive = grid_graph.compute_state_for_segmentation(affs, segmentation, offsets,
    #                                                                      n_attractive_channels=3,
    #                                                                      ignore_label=False)
    # weights[np.logical_not(attractive)] *= -1
    # state = (uvs, weights)

    # with nifty
    rag = nrag.gridRag(seg, numberOfLabels=int(seg.max() + 1),
                       numberOfThreads=1)
    uv_ids = rag.uvIds()

    affs_attractive = affs[:n_attractive]
    # -2 corresponds to max value
    weights_attractive = nrag.accumulateAffinityStandartFeatures(rag, affs_attractive, offsets,
                                                                 numberOfThreads=1)[:, -2]

    affs_repulsive = np.require(affs[n_attractive:], requirements='C')
    weights_repulsive = nrag.accumulateAffinityStandartFeatures(rag, affs_repulsive, offsets,
                                                                numberOfThreads=1)[:, -2]

    weights = weights_attractive
    repulsive = weights_repulsive > weights_attractive
    weights[repulsive] = -1*weights_repulsive[repulsive]
    return uv_ids, weights
예제 #3
0
파일: features.py 프로젝트: jhennies/elf
def compute_affinity_features(rag,
                              affinity_map,
                              offsets,
                              min_value=0,
                              max_value=1,
                              n_threads=None):
    """ Compute edge features from affinity map.

    Arguments:
        rag [RegionAdjacencyGraph] - region adjacency graph
        boundary_map [np.ndarray] - boundary map.
        min_value [float] - minimum value used in accumulation (default: 0)
        max_value [float] - maximum value used in accumulation (default: 1)
        n_threads [int] - number of threads used, set to cpu count by default. (default: None)
    """
    n_threads = multiprocessing.cpu_count() if n_threads is None else n_threads
    if tuple(rag.shape) != affinity_map.shape[1:]:
        raise ValueError("Incompatible shapes: %s, %s" %
                         (str(rag.shape), str(affinity_map.shape[1:])))
    if len(offsets) != affinity_map.shape[0]:
        raise ValueError(
            "Incompatible number of channels and offsets: %i, %i" %
            (len(offsets), affinity_map.shape[0]))
    features = nrag.accumulateAffinityStandartFeatures(
        rag,
        affinity_map,
        offsets,
        min_value,
        max_value,
        numberOfThreads=n_threads)
    return features
예제 #4
0
def compute_mcrf_segments(ws, affs, n_labels, glia, offsets, rf, block_id):
    rag = nrag.gridRag(ws, numberOfLabels=int(n_labels), numberOfThreads=1)
    uv_ids = rag.uvIds()

    # we can have a single over-segment id for (small border) blocks
    # resulting in 0 edges
    if(uv_ids.shape[0] == 0):
        print("WARNING:, block", block_id, "contains only a single id, but is not masked")
        print("This is may be caused by an incorrect mask")
        return ws

    # TODO split features over different affinity ranges ?
    feats = np.concatenate([nrag.accumulateAffinityStandartFeatures(rag,
                                                                    affs,
                                                                    offsets,
                                                                    numberOfThreads=1),
                            glia_features(rag, ws, glia)], axis=1)
    probs = rf.predict_proba(feats)[:, 1]

    mc = cseg.Multicut('kernighan-lin', weight_edges=False)
    costs = mc.probabilities_to_costs(probs)
    ignore_edges = (uv_ids == 0).any(axis=1)
    costs[ignore_edges] = 5 * costs.min()

    graph = nifty.graph.undirectedGraph(n_labels)
    graph.insertEdges(uv_ids)
    node_labels = mc(graph, costs)
    return nrag.projectScalarNodeDataToPixels(rag, node_labels)
예제 #5
0
def _mc_impl(ws,
             affs,
             offsets,
             weight_mulitcut_edges=False,
             weighting_exponent=1):

    ws = ws.astype('uint32')
    n_labels = int(ws.max()) + 1
    rag = nrag.gridRag(ws, numberOfLabels=n_labels, numberOfThreads=8)
    uv_ids = rag.uvIds()

    # compute the features and get edge probabilities (from mean affinities)
    # and edge sizes
    features = nrag.accumulateAffinityStandartFeatures(rag,
                                                       affs,
                                                       offsets,
                                                       numberOfThreads=8)
    probs = features[:, 0]
    sizes = features[:, -1]

    graph = nifty.graph.undirectedGraph(n_labels)
    graph.insertEdges(uv_ids)
    # compute multicut edge results
    node_labels, _ = multicut.run_mc(
        graph,
        probs,
        uv_ids,
        with_ignore_edges=True,
        edge_sizes=sizes if weight_mulitcut_edges else None,
        weighting_exponent=weighting_exponent)
    return node_labels
예제 #6
0
def compute_mc(ws, affs, offsets, n_labels, weight_mulitcut_edges,
               weighting_exponent):
    # compute the region adjacency graph
    rag = nrag.gridRag(ws, numberOfLabels=n_labels, numberOfThreads=1)
    uv_ids = rag.uvIds()
    if uv_ids.size == 0:
        return None, None, None

    # compute the features and get edge probabilities (from mean affinities)
    # and edge sizes
    features = nrag.accumulateAffinityStandartFeatures(rag,
                                                       affs,
                                                       offsets,
                                                       numberOfThreads=1)
    probs = features[:, 0]
    sizes = features[:, -1]

    graph = nifty.graph.undirectedGraph(n_labels)
    graph.insertEdges(uv_ids)
    # compute multicut edge results
    _, merge_indicator = run_mc(
        graph,
        probs,
        uv_ids,
        with_ignore_edges=True,
        edge_sizes=sizes if weight_mulitcut_edges else None,
        weighting_exponent=weighting_exponent)

    return uv_ids, merge_indicator, sizes
예제 #7
0
 def _compute_edge_probabilities(self, input_, fragments=None):
     assert input_.ndim == 4
     assert input_.shape[0] == len(
         self.offsets), "%i, %i" % (input_.shape[0], len(self.offsets))
     features = nrag.accumulateAffinityStandartFeatures(
         self.rag, input_, self.offsets)
     return features[:, self.stat_index]
def segment_block(ds_ws, ds_affs, blocking, block_id, offsets):

    # load the segmentation
    block = blocking.getBlock(block_id)
    bb = tuple(slice(beg, end) for beg, end in zip(block.begin, block.end))
    ws = ds_ws[bb]

    # if this block only contains a single segment id (usually 0 = ignore label) continue
    ws_ids = np.unique(ws)
    if len(ws_ids) == 1:
        return None
    max_id = ws_ids[-1]

    # TODO should we do this ?
    # map to a consecutive segmentation to speed up graph computations
    # ws, max_id, mapping = vigra.analysis.relabelConsecutive(ws, keep_zeros=True, start_label=1)

    # load the affinities
    n_channels = len(offsets)
    bb_affs = (slice(0, n_channels), ) + bb
    affs = ds_affs[bb_affs]
    # convert affinities to float and invert them
    # to get boundary probabilities
    if affs.dtype == np.dtype('uint8'):
        affs = affs.astype('float32') / 255.
    affs = 1. - affs

    # compute the region adjacency graph
    n_labels = int(max_id) + 1
    rag = nrag.gridRag(ws, numberOfLabels=n_labels, numberOfThreads=1)
    uv_ids = rag.uvIds()

    # compute the features and get edge probabilities (from mean affinities)
    # and edge sizes
    features = nrag.accumulateAffinityStandartFeatures(rag,
                                                       affs,
                                                       offsets,
                                                       numberOfThreads=1)
    probs = features[:, 0]
    sizes = features[:, -1].astype('uint64')

    # compute multicut
    mc = cseg.Multicut('kernighan-lin', weight_edges=False)
    # transform probabilities to costs
    costs = mc.probabilities_to_costs(probs)
    # set edges connecting to 0 (= ignore label) to repulsive
    ignore_edges = (uv_ids == 0).any(axis=1)
    costs[ignore_edges] = -100
    # solve the mc problem
    graph = nifty.graph.undirectedGraph(n_labels)
    graph.insertEdges(uv_ids)
    node_labels = mc(graph, costs)

    # get indicators for merge !
    # and return uv-ids, edge indicators and edge sizes
    edge_indicator = (
        node_labels[uv_ids[:, 0]] == node_labels[uv_ids[:, 1]]).astype('uint8')

    return uv_ids, edge_indicator, sizes
예제 #9
0
def separate_channel_features(rag, affs):
    offsets = [[-1, 0, 0], [0, -1, 0], [0, 0, -1], [-2, 0, 0], [0, -3, 0],
               [0, 0, -3], [-3, 0, 0], [0, -9, 0], [0, 0, -9], [-4, 0, 0],
               [0, -27, 0], [0, 0, -27]]
    assert len(affs) == len(offsets)
    features = []
    for channel_id in range(len(affs)):
        features.append(
            nrag.accumulateAffinityStandartFeatures(
                rag, affs[channel_id:channel_id + 1], [offsets[channel_id]]))
    features = np.concatenate(features, axis=1)
    return np.nan_to_num(features)
예제 #10
0
def preagglomerate(affs, ws, n_labels, thresh=.95):
    offsets = [[-1, 0, 0], [0, -1, 0], [0, 0, -1], [-2, 0, 0], [0, -3, 0],
               [0, 0, -3], [-3, 0, 0], [0, -9, 0], [0, 0, -9], [-4, 0, 0],
               [0, -27, 0], [0, 0, -27]]
    rag = nrag.gridRag(ws, numberOfLabels=int(n_labels), numberOfThreads=1)
    probs = nrag.accumulateAffinityStandartFeatures(rag,
                                                    affs,
                                                    offsets,
                                                    numberOfThreads=1)[:, 0]
    agglomerator = cseg.MalaClustering(thresh)
    g = nifty.graph.undirectedGraph(rag.numberOfNodes)
    g.insertEdges(rag.uvIds())
    node_labels = agglomerator(g, probs)
    return nrag.projectScalarNodeDataToPixels(rag, node_labels), int(
        node_labels.max())
예제 #11
0
def compute_mc_nodes(ws, affs, n_labels, offsets, glia=None):
    rag = nrag.gridRag(ws, numberOfLabels=int(n_labels), numberOfThreads=1)
    probs = nrag.accumulateAffinityStandartFeatures(rag, affs, offsets, numberOfThreads=1)[:, 0]
    uv_ids = rag.uvIds()

    mc = cseg.Multicut('kernighan-lin', weight_edges=False)
    costs = mc.probabilities_to_costs(probs)
    ignore_edges = (uv_ids == 0).any(axis=1)
    costs[ignore_edges] = - 100

    # run multicut
    graph = nifty.graph.undirectedGraph(n_labels)
    graph.insertEdges(uv_ids)
    node_labels = mc(graph, costs)
    # find the pairs of merged nodes from the multicut
    # node labeling
    return get_merged_nodes(uv_ids, node_labels)
예제 #12
0
def segment_mcrf(ws, affs, n_labels, offsets, rf, return_merged_nodes=False):
    rag = nrag.gridRag(ws, numberOfLabels=int(n_labels), numberOfThreads=1)
    feats = nrag.accumulateAffinityStandartFeatures(rag,
                                                    affs,
                                                    offsets,
                                                    numberOfThreads=1)
    probs = rf.predict_proba(feats)[:, 1]
    uv_ids = rag.uvIds()

    mc = cseg.Multicut('kernighan-lin', weight_edges=False)
    costs = mc.probabilities_to_costs(probs)
    ignore_edges = (uv_ids == 0).any(axis=1)
    costs[ignore_edges] = 5 * costs.min()

    graph = nifty.graph.undirectedGraph(n_labels)
    graph.insertEdges(uv_ids)
    node_labels = mc(graph, costs)
    if return_merged_nodes:
        return get_merged_nodes(uv_ids, node_labels)
    else:
        return nrag.projectScalarNodeDataToPixels(rag, node_labels)
def compute_rag_feats():
    path = '/home/papec/mnt/papec/Work/neurodata_hdd/cremi_warped/sampleA+.n5'
    print("Loading seg")
    seg = z5py.File(path)['segmentations/watershed'][:]
    print(seg.shape)
    print("Computing rag")
    rag = nrag.gridRag(seg, numberOfLabels=int(seg.max() + 1))
    print("Loading affs")
    affs = z5py.File(path)['predictions/affs_glia'][0:12, :]
    offsets = [[-1, 0, 0], [0, -1, 0], [0, 0, -1], [-2, 0, 0], [0, -3, 0],
               [0, 0, -3], [-3, 0, 0], [0, -9, 0], [0, 0, -9], [-4, 0, 0],
               [0, -27, 0], [0, 0, -27]]
    assert len(offsets) == len(affs)
    print("Computing feats")
    feats = nrag.accumulateAffinityStandartFeatures(rag, affs, offsets)

    f_save = z5py.File('./costs_tmp.n5')
    ds = f_save.create_dataset('feats',
                               dtype='float32',
                               shape=feats.shape,
                               chunks=(feats.shape[0], 1),
                               compression='gzip')
    ds[:] = feats
예제 #14
0
def compute_mcrf_nodes(ws, affs, n_labels, offsets, glia, rf):
    rag = nrag.gridRag(ws, numberOfLabels=int(n_labels), numberOfThreads=1)
    # TODO split features over different affinity ranges ?
    feats = np.concatenate([nrag.accumulateAffinityStandartFeatures(rag,
                                                                    affs,
                                                                    offsets,
                                                                    numberOfThreads=1),
                            glia_features(rag, ws, glia)], axis=1)
    probs = rf.predict_proba(feats)[:, 1]
    uv_ids = rag.uvIds()

    mc = cseg.Multicut('kernighan-lin', weight_edges=False)
    costs = mc.probabilities_to_costs(probs)
    ignore_edges = (uv_ids == 0).any(axis=1)
    costs[ignore_edges] = -100

    # run multicut
    graph = nifty.graph.undirectedGraph(n_labels)
    graph.insertEdges(uv_ids)
    node_labels = mc(graph, costs)
    # find the pairs of merged nodes from the multicut
    # node labeling
    return get_merged_nodes(uv_ids, node_labels)
예제 #15
0
def affinity_features(rag, affs):
    offsets = [[-1, 0, 0], [0, -1, 0], [0, 0, -1], [-2, 0, 0], [0, -3, 0],
               [0, 0, -3], [-3, 0, 0], [0, -9, 0], [0, 0, -9], [-4, 0, 0],
               [0, -27, 0], [0, 0, -27]]
    features = nrag.accumulateAffinityStandartFeatures(rag, affs, offsets)
    return np.nan_to_num(features)
예제 #16
0
def compute_lmc_learned(ws, affs, glia, offsets, n_labels, lifted_rf,
                        lifted_nh, weight_mulitcut_edges, weighting_exponent):
    # compute the region adjacency graph
    rag = nrag.gridRag(ws, numberOfLabels=n_labels, numberOfThreads=1)
    uv_ids = rag.uvIds()

    # compute the features and get edge probabilities (from mean affinities)
    # and edge sizes
    features = nrag.accumulateAffinityStandartFeatures(rag,
                                                       affs,
                                                       offsets,
                                                       numberOfThreads=1)
    local_probs = features[:, 0]
    sizes = features[:, -1].astype('uint64')

    # remove all edges connecting to the ignore label, because
    # they introduce short-cut lifted edges
    valid_edges = (uv_ids != 0).all(axis=1)
    uv_ids = uv_ids[valid_edges]

    # if we only had a single edge to ignore label, we can end up
    # with empty uv-ids at this point.
    # if so, return None
    if uv_ids.size == 0:
        return None, None, None

    local_probs = local_probs[valid_edges]
    sizes = sizes[valid_edges]

    # build the original graph and lifted objective
    # with lifted uv-ids
    lifted_uv_ids = feat.make_filtered_lifted_nh(rag, n_labels, uv_ids,
                                                 lifted_nh)
    graph = nifty.graph.undirectedGraph(n_labels)
    graph.insertEdges(uv_ids)

    # we may not get any lifted edges, in this case, fall back to normal multicut
    if lifted_uv_ids.size == 0:
        _, merge_indicator = run_mc(
            graph,
            local_probs,
            uv_ids,
            weighting_exponent=weighting_exponent,
            edge_sizes=sizes if weight_mulitcut_edges else None)
        return uv_ids, merge_indicator, sizes

    # get features for the lifted edges
    lifted_feats = np.concatenate(
        [  # feat.ucm_features(n_labels, lifted_objective, local_probs),
            feat.clustering_features(graph, local_probs, lifted_uv_ids),
            feat.ucm_features(n_labels, uv_ids, lifted_uv_ids, local_probs),
            feat.region_features(ws, lifted_uv_ids, glia)
        ],
        axis=1)
    lifted_probs = lifted_rf.predict_proba(lifted_feats)[:, 1]

    _, merge_indicator = run_lmc(
        graph,
        uv_ids,
        lifted_uv_ids,
        local_probs,
        lifted_probs,
        local_sizes=sizes if weight_mulitcut_edges else None,
        weighting_exponent=weighting_exponent,
        with_ignore_edges=False)

    # we don't weight, because we might just have few lifted edges
    # and this would downvote the local edges significantly
    # weight the costs
    # n_local, n_lifted = len(uv_ids), len(lifted_uv_ids)
    # total = float(n_lifted) + n_local
    # local_costs *= (n_lifted / total)
    # lifted_costs *= (n_local / total)

    return uv_ids, merge_indicator, sizes
def extract_feats_and_labels(path,
                             aff_key,
                             ws_key,
                             gt_key,
                             mask_key,
                             lifted_nh,
                             offsets=[[-1, 0, 0], [0, -1, 0], [0, 0, -1]],
                             n_threads=40):
    f = z5py.File(path)

    # load the watershed segmentation and compute rag
    ds_seg = f[ws_key]
    ds_seg.n_threads = n_threads
    seg = ds_seg[:]
    print(seg.shape)
    n_labels = int(seg.max()) + 1
    rag = nrag.gridRag(seg, numberOfLabels=n_labels, numberOfThreads=n_threads)

    # load affinities and glia channel
    ds_affs = f[aff_key]
    ds_affs.n_threads = n_threads
    aff_slice = slice(0, len(offsets))
    affs = ds_affs[aff_slice]
    if affs.dtype == np.dtype('uint8'):
        affs = affs.astype('float32') / 255.
    affs = 1. - affs

    n_chans = ds_affs.shape[0]
    glia_slice = slice(n_chans - 1, n_chans)
    glia = ds_affs[glia_slice]
    if glia.dtype == np.dtype('uint8'):
        glia = glia.astype('float32') / 255.

    # compute local probs from affinities
    print("Computing local probabilities")
    probs = nrag.accumulateAffinityStandartFeatures(
        rag, affs, offsets, numberOfThreads=n_threads)[:, 0]
    probs = np.nan_to_num(probs)

    # remove zero-label (== ignore label) from the graph, because it short-circuits
    # lifted edges
    uv_ids = rag.uvIds()
    valid_edges = (uv_ids != 0).all(axis=1)
    uv_ids = uv_ids[valid_edges]
    probs = probs[valid_edges]

    # compute the lifted graph and lifted features
    print("Computing lifted objective")
    lifted_uv_ids = feat.make_filtered_lifted_nh(rag, n_labels, uv_ids,
                                                 lifted_nh)
    graph = nifty.graph.undirectedGraph(n_labels)
    graph.insertEdges(uv_ids)

    # TODO parallelize some of these
    print("Computing lifted features")
    features = np.concatenate(
        [  # feat.ucm_features(n_labels, lifted_objective, probs),
            feat.clustering_features(graph, probs, lifted_uv_ids),
            feat.ucm_features(n_labels, uv_ids, lifted_uv_ids, probs),
            feat.region_features(seg, lifted_uv_ids, glia)
        ],
        axis=1)

    # load mask and groundtruth
    ds_mask = f[mask_key]
    ds_mask.n_threads = n_threads
    mask = ds_mask[:]

    ds_gt = f[gt_key]
    ds_gt.n_threads = n_threads
    gt = ds_gt[:]
    gt[np.logical_not(mask)] = 0

    # compute the edge labels and valid edges
    node_labels = nrag.gridRagAccumulateLabels(rag, gt)
    labels = (node_labels[lifted_uv_ids[:, 0]] !=
              node_labels[lifted_uv_ids[:, 1]]).astype('uint8')
    valid_edges = (node_labels[lifted_uv_ids] != 0).all(axis=1)
    print(np.sum(valid_edges), "edges of", len(lifted_uv_ids), "are valid")
    assert features.shape[0] == labels.shape[0]

    # just for temporary inspection, deactivate !
    import vigra
    vigra.writeHDF5(features, './feats_tmp.h5', 'data', chunks=True)
    vigra.writeHDF5(labels, './labs_tmp.h5', 'data', chunks=True)

    return features[valid_edges], labels[valid_edges]
예제 #18
0
def _agglomerate_block(blocking, block_id, ds_in, ds_out, config):
    fu.log("start processing block %i" % block_id)
    have_ignore_label = config['have_ignore_label']
    use_mala_agglomeration = config.get('use_mala_agglomeration', True)
    threshold = config.get('threshold', 0.9)
    size_regularizer = config.get('size_regularizer', .5)
    invert_inputs = config.get('invert_inputs', False)
    offsets = config.get('offsets', None)

    bb = vu.block_to_bb(blocking.getBlock(block_id))
    # load the segmentation / output
    seg = ds_out[bb]

    # check if this block is empty
    if np.sum(seg) == 0:
        fu.log_block_success(block_id)
        return

    # load the input data
    ndim_in = ds_in.ndim
    if ndim_in == 4:
        assert offsets is not None
        assert len(offsets) <= ds_in.shape[0]
        bb_in = (slice(0, len(offsets)),) + bb
        input_ = vu.normalize(ds_in[bb_in])
    else:
        assert offsets is None
        input_ = vu.normalize(ds_in[bb])

    if invert_inputs:
        input_ = 1. - input_

    id_offset = int(seg[seg != 0].min())

    # relabel the segmentation
    _, max_id, _ = relabelConsecutive(seg, out=seg, keep_zeros=True, start_label=1)
    seg = seg.astype('uint32')

    # construct rag
    rag = nrag.gridRag(seg, numberOfLabels=max_id + 1,
                       numberOfThreads=1)

    # extract edge features
    if offsets is None:
        edge_features = nrag.accumulateEdgeMeanAndLength(rag, input_, numberOfThreads=1)
    else:
        edge_features = nrag.accumulateAffinityStandartFeatures(rag, input_, offsets,
                                                                numberOfThreads=1)
    edge_features, edge_sizes = edge_features[:, 0], edge_features[:, -1]
    uv_ids = rag.uvIds()
    # set edges to ignore label to be maximally repulsive
    if have_ignore_label:
        ignore_mask = (uv_ids == 0).any(axis=1)
        edge_features[ignore_mask] = 1

    # build undirected graph
    n_nodes = rag.numberOfNodes
    graph = nifty.graph.undirectedGraph(n_nodes)
    graph.insertEdges(uv_ids)

    if use_mala_agglomeration:
        node_labels = mala_clustering(graph, edge_features,
                                      edge_sizes, threshold)
    else:
        node_ids, node_sizes = np.unique(seg, return_counts=True)
        if node_ids[0] != 0:
            node_sizes = np.concatenate([np.array([0]), node_sizes])
        n_stop = int(threshold * n_nodes)
        node_labels = agglomerative_clustering(graph, edge_features,
                                               node_sizes, edge_sizes,
                                               n_stop, size_regularizer)

    # run clusteting
    node_labels, max_id, _ = relabelConsecutive(node_labels, start_label=1, keep_zeros=True)

    fu.log("reduced number of labels from %i to %i" % (n_nodes, max_id + 1))

    # project node labels back to segmentation
    seg = nrag.projectScalarNodeDataToPixels(rag, node_labels, numberOfThreads=1)
    seg = seg.astype('uint64')
    # add offset back to segmentation
    seg[seg != 0] += id_offset

    ds_out[bb] = seg
    # log block success
    fu.log_block_success(block_id)
예제 #19
0
def nearest_features(rag, affs):
    offsets = [[-1, 0, 0], [0, -1, 0], [0, 0, -1]]
    affs_ = affs[:3]
    probs = nrag.accumulateAffinityStandartFeatures(rag, affs_, offsets)[:, 0]
    return probs