Ejemplo n.º 1
0
def mc(rag, probs):
    mc = cseg.Multicut('kernighan-lin', weight_edges=False)
    graph = nifty.graph.undirectedGraph(rag.numberOfNodes)
    graph.insertEdges(rag.uvIds())
    costs = mc.probabilities_to_costs(probs)
    node_labels = mc(graph, costs)
    return nrag.projectScalarNodeDataToPixels(rag, node_labels)
Ejemplo n.º 2
0
def compute_mcrf_segments(ws, affs, n_labels, glia, offsets, rf, block_id):
    rag = nrag.gridRag(ws, numberOfLabels=int(n_labels), numberOfThreads=1)
    uv_ids = rag.uvIds()

    # we can have a single over-segment id for (small border) blocks
    # resulting in 0 edges
    if(uv_ids.shape[0] == 0):
        print("WARNING:, block", block_id, "contains only a single id, but is not masked")
        print("This is may be caused by an incorrect mask")
        return ws

    # TODO split features over different affinity ranges ?
    feats = np.concatenate([nrag.accumulateAffinityStandartFeatures(rag,
                                                                    affs,
                                                                    offsets,
                                                                    numberOfThreads=1),
                            glia_features(rag, ws, glia)], axis=1)
    probs = rf.predict_proba(feats)[:, 1]

    mc = cseg.Multicut('kernighan-lin', weight_edges=False)
    costs = mc.probabilities_to_costs(probs)
    ignore_edges = (uv_ids == 0).any(axis=1)
    costs[ignore_edges] = 5 * costs.min()

    graph = nifty.graph.undirectedGraph(n_labels)
    graph.insertEdges(uv_ids)
    node_labels = mc(graph, costs)
    return nrag.projectScalarNodeDataToPixels(rag, node_labels)
Ejemplo n.º 3
0
def segment_lmc(ws, affs, n_labels, offsets, return_merged_nodes=False):
    rag = nrag.gridRag(ws, numberOfLabels=n_labels, numberOfThreads=1)
    lifted_uvs, local_features, lifted_features = nrag.computeFeaturesAndNhFromAffinities(
        rag, affs, offsets, numberOfThreads=1)
    uv_ids = rag.uvIds()

    lmc = cseg.LiftedMulticut('kernighan-lin', weight_edges=False)
    local_costs = lmc.probabilities_to_costs(local_features[:, 0])
    local_ignore = (uv_ids == 0).any(axis=1)
    local_costs[local_ignore] = 5 * local_costs.min()

    # we might not have lifted edges -> just solve multicut
    if len(lifted_uvs) == 1 and (lifted_uvs[0] == -1).any():
        mc = cseg.Multicut('kernighan-lin', weight_edges=False)
        graph = nifty.graph.undirectedGraph(n_labels)
        graph.insertEdges(uv_ids)
        node_labels = mc(graph, local_costs)

    else:
        lifted_costs = lmc.probabilities_to_costs(lifted_features[:, 0])
        lifted_ignore = (lifted_uvs == 0).any(axis=1)
        lifted_costs[lifted_ignore] = 5 * lifted_costs.min()
        node_labels = lmc(uv_ids, lifted_uvs, local_costs, lifted_costs)

    if return_merged_nodes:
        return get_merged_nodes(uv_ids, node_labels)
    else:
        return nrag.projectScalarNodeDataToPixels(rag, node_labels)
Ejemplo n.º 4
0
def segment_block(block_id, weight_edges=False, cached=False):
    import cremi_tools.segmentation as cseg
    raw_path = '/home/papec/Work/neurodata_hdd/fib25/raw/raw_block%i.h5' % block_id
    pmap_path = '/home/papec/Work/neurodata_hdd/fib25/pmaps/probs_squeezed_block%i.h5' % block_id
    ws_path = '/home/papec/Work/neurodata_hdd/fib25/watersheds/watershed_agglomerated_0.075000_block%i.h5' % block_id

    # load pmap and watersheds
    raw = vigra.readHDF5(raw_path, 'data').astype('float32')
    pmap = vigra.readHDF5(pmap_path, 'data')
    ws = vigra.readHDF5(ws_path, 'data')

    if cached:
        edge_probs = vigra.readHDF5('edge_probs_%i.h5' % block_id, 'data')
        rag = nrag.gridRag(ws, numberOfLabels=int(ws.max()) + 1)
        # TODO edge sizes
    else:
        # feature extractor and multicut
        feature_extractor = cseg.RandomForestFeatures('./rf.pkl', True)
        # make graph and costs
        rag, edge_probs, _, edge_sizes = feature_extractor(pmap, ws, raw=raw)
        vigra.writeHDF5(edge_probs, 'edge_probs_%i.h5' % block_id, 'data')
    graph = nifty.graph.undirectedGraph(rag.numberOfNodes)
    graph.insertEdges(rag.uvIds())

    mc = cseg.Multicut('kernighan-lin', weight_edges=weight_edges)
    if weight_edges:
        costs = mc.probabilities_to_costs(edge_probs, edge_sizes)
    else:
        costs = mc.probabilities_to_costs(edge_probs)
    node_labels = mc(graph, costs)
    return nrag.projectScalarNodeDataToPixels(rag, node_labels)
def segment_block(ds_ws, ds_affs, blocking, block_id, offsets):

    # load the segmentation
    block = blocking.getBlock(block_id)
    bb = tuple(slice(beg, end) for beg, end in zip(block.begin, block.end))
    ws = ds_ws[bb]

    # if this block only contains a single segment id (usually 0 = ignore label) continue
    ws_ids = np.unique(ws)
    if len(ws_ids) == 1:
        return None
    max_id = ws_ids[-1]

    # TODO should we do this ?
    # map to a consecutive segmentation to speed up graph computations
    # ws, max_id, mapping = vigra.analysis.relabelConsecutive(ws, keep_zeros=True, start_label=1)

    # load the affinities
    n_channels = len(offsets)
    bb_affs = (slice(0, n_channels), ) + bb
    affs = ds_affs[bb_affs]
    # convert affinities to float and invert them
    # to get boundary probabilities
    if affs.dtype == np.dtype('uint8'):
        affs = affs.astype('float32') / 255.
    affs = 1. - affs

    # compute the region adjacency graph
    n_labels = int(max_id) + 1
    rag = nrag.gridRag(ws, numberOfLabels=n_labels, numberOfThreads=1)
    uv_ids = rag.uvIds()

    # compute the features and get edge probabilities (from mean affinities)
    # and edge sizes
    features = nrag.accumulateAffinityStandartFeatures(rag,
                                                       affs,
                                                       offsets,
                                                       numberOfThreads=1)
    probs = features[:, 0]
    sizes = features[:, -1].astype('uint64')

    # compute multicut
    mc = cseg.Multicut('kernighan-lin', weight_edges=False)
    # transform probabilities to costs
    costs = mc.probabilities_to_costs(probs)
    # set edges connecting to 0 (= ignore label) to repulsive
    ignore_edges = (uv_ids == 0).any(axis=1)
    costs[ignore_edges] = -100
    # solve the mc problem
    graph = nifty.graph.undirectedGraph(n_labels)
    graph.insertEdges(uv_ids)
    node_labels = mc(graph, costs)

    # get indicators for merge !
    # and return uv-ids, edge indicators and edge sizes
    edge_indicator = (
        node_labels[uv_ids[:, 0]] == node_labels[uv_ids[:, 1]]).astype('uint8')

    return uv_ids, edge_indicator, sizes
Ejemplo n.º 6
0
def compute_mc_nodes(ws, affs, n_labels, offsets, glia=None):
    rag = nrag.gridRag(ws, numberOfLabels=int(n_labels), numberOfThreads=1)
    probs = nrag.accumulateAffinityStandartFeatures(rag, affs, offsets, numberOfThreads=1)[:, 0]
    uv_ids = rag.uvIds()

    mc = cseg.Multicut('kernighan-lin', weight_edges=False)
    costs = mc.probabilities_to_costs(probs)
    ignore_edges = (uv_ids == 0).any(axis=1)
    costs[ignore_edges] = - 100

    # run multicut
    graph = nifty.graph.undirectedGraph(n_labels)
    graph.insertEdges(uv_ids)
    node_labels = mc(graph, costs)
    # find the pairs of merged nodes from the multicut
    # node labeling
    return get_merged_nodes(uv_ids, node_labels)
Ejemplo n.º 7
0
def mc_from_costs(sample, out_key=None):
    path = '/home/papec/Work/neurodata_hdd/ntwrk_papec/cremi_warped/sampleA+.n5'
    # path = '/home/papec/Work/neurodata_hdd/ntwrk_papec/cluster_test_data/testdata1.n5'
    ws_key = 'segmentations/watershed'
    # data_key = 'predictions/full_affs'
    raw_key = 'raw'

    # first we calculate the graph and features
    ws = z5py.File(path)[ws_key][:].astype('uint32')
    rag = nrag.gridRag(ws, numberOfLabels=int(ws.max()) + 1)

    feature_path = '/home/papec/Work/neurodata_hdd/ntwrk_papec/cache/cremi_A+/tmp_files/features.n5'
    # feature_path = '/home/papec/Work/neurodata_hdd/ntwrk_papec/cluster_test_data/aff_features.n5'
    probs = 1. - z5py.File(feature_path)['features'][:, 0:1]
    probs = probs.squeeze()
    assert rag.numberOfEdges == len(probs), "%i, %i" % (rag.numberOfEdges,
                                                        len(probs))

    costs = cseg.transform_probabilities_to_costs(probs, edge_sizes=None)
    uv_ids = rag.uvIds()
    ignore_edges = (uv_ids == 0).any(axis=1)
    costs[ignore_edges] = 5 * costs.min()

    # finally, we run multicut
    cutter = cseg.Multicut("kernighan-lin")

    graph = nifty.graph.undirectedGraph(rag.numberOfNodes)
    graph.insertEdges(uv_ids)
    node_labels = cutter(graph, costs)
    segmentation = nrag.projectScalarNodeDataToPixels(rag, node_labels)

    if out_key is not None:
        f = z5py.File('./mc_%s.n5' % sample, use_zarr_format=False)
        ds = f.create_dataset(out_key,
                              dtype='uint32',
                              compression='gzip',
                              shape=segmentation.shape,
                              chunks=(64, 64, 64))
        ds[:] = segmentation.astype('uint32')
    else:
        raw = z5py.File(path)[raw_key][:]
        view([raw, ws, segmentation])
Ejemplo n.º 8
0
def run_mc(graph,
           probs,
           uv_ids,
           edge_sizes=None,
           weighting_exponent=None,
           with_ignore_edges=True):

    # build multicut solver
    mc = cseg.Multicut('kernighan-lin', weight_edges=edge_sizes is not None)

    # if we still have edges to ignore label, filter them (if we come here from lifted mc, we don't)
    # set edges connecting to 0 (= ignore label) to repulsive
    # (implemented like this, because we don't need to filter if we
    # come here from LMC workflow)
    if with_ignore_edges:
        ignore_edges = (uv_ids == 0).any(axis=1)
        # if we have edge sizes, set them to 1 for ignore edges,
        # to not skew the max calculation
        if edge_sizes is not None:
            edge_sizes[ignore_edges] = 1

    # transform probabilities to costs
    if edge_sizes is not None:
        costs = mc.probabilities_to_costs(
            probs,
            edge_sizes=edge_sizes,
            weighting_exponent=weighting_exponent)
    else:
        costs = mc.probabilities_to_costs(probs)

    if with_ignore_edges:
        costs[ignore_edges] = -100

    # solve the mc problem
    node_labels = mc(graph, costs)

    # get indicators for merge !
    # and return uv-ids, edge indicators and edge sizes
    merge_indicator = (
        node_labels[uv_ids[:, 0]] == node_labels[uv_ids[:, 1]]).astype('uint8')
    return node_labels, merge_indicator
Ejemplo n.º 9
0
def segment_mcrf(ws, affs, n_labels, offsets, rf, return_merged_nodes=False):
    rag = nrag.gridRag(ws, numberOfLabels=int(n_labels), numberOfThreads=1)
    feats = nrag.accumulateAffinityStandartFeatures(rag,
                                                    affs,
                                                    offsets,
                                                    numberOfThreads=1)
    probs = rf.predict_proba(feats)[:, 1]
    uv_ids = rag.uvIds()

    mc = cseg.Multicut('kernighan-lin', weight_edges=False)
    costs = mc.probabilities_to_costs(probs)
    ignore_edges = (uv_ids == 0).any(axis=1)
    costs[ignore_edges] = 5 * costs.min()

    graph = nifty.graph.undirectedGraph(n_labels)
    graph.insertEdges(uv_ids)
    node_labels = mc(graph, costs)
    if return_merged_nodes:
        return get_merged_nodes(uv_ids, node_labels)
    else:
        return nrag.projectScalarNodeDataToPixels(rag, node_labels)
Ejemplo n.º 10
0
def compute_mcrf_nodes(ws, affs, n_labels, offsets, glia, rf):
    rag = nrag.gridRag(ws, numberOfLabels=int(n_labels), numberOfThreads=1)
    # TODO split features over different affinity ranges ?
    feats = np.concatenate([nrag.accumulateAffinityStandartFeatures(rag,
                                                                    affs,
                                                                    offsets,
                                                                    numberOfThreads=1),
                            glia_features(rag, ws, glia)], axis=1)
    probs = rf.predict_proba(feats)[:, 1]
    uv_ids = rag.uvIds()

    mc = cseg.Multicut('kernighan-lin', weight_edges=False)
    costs = mc.probabilities_to_costs(probs)
    ignore_edges = (uv_ids == 0).any(axis=1)
    costs[ignore_edges] = -100

    # run multicut
    graph = nifty.graph.undirectedGraph(n_labels)
    graph.insertEdges(uv_ids)
    node_labels = mc(graph, costs)
    # find the pairs of merged nodes from the multicut
    # node labeling
    return get_merged_nodes(uv_ids, node_labels)
Ejemplo n.º 11
0
def multicut(labels_path, labels_key,
             graph_path, graph_key,
             feature_path,
             out_path, out_key,
             initial_block_shape, n_scales,
             weight_edges=True):

    assert os.path.exists(feature_path), feature_path

    # graph = ndist.loadAsUndirectedGraph(os.path.join(graph_path, graph_key))
    # load number of nodes and uv-ids
    f_graph = z5py.File(graph_path)[graph_key]
    shape = f_graph.attrs['shape']
    n_nodes = f_graph.attrs['numberOfNodes']
    uv_ids = f_graph['edges'][:]

    # get the multicut edge costs from mean affinities
    feature_ds = z5py.File(feature_path)['features']
    costs = 1. - feature_ds[:, 0:1].squeeze()
    if weight_edges:
        edge_sizes = feature_ds[:, 9:].squeeze()
    else:
        edge_sizes = None

    # find ignore edges
    ignore_edges = (uv_ids == 0).any(axis=1)

    # set edge sizes of ignore edges to 1 (we don't want them to influence the weighting)
    edge_sizes[ignore_edges] = 1
    costs = cseg.transform_probabilities_to_costs(costs, edge_sizes=edge_sizes)

    # set weights of ignore edges to be maximally repulsive
    costs[ignore_edges] = 5 * costs.min()

    # get the number of initial blocks
    blocking = nifty.tools.blocking(roiBegin=[0, 0, 0],
                                    roiEnd=list(shape),
                                    blockShape=initial_block_shape)
    n_initial_blocks = blocking.numberOfBlocks

    # get node to block assignment for scale level 0 and the oversegmentaton nodes
    f_nodes = z5py.File('./nodes_to_blocks.n5', use_zarr_format=False)
    if 's0' not in f_nodes:
        f_nodes.create_group('s0')
    print("Here")
    ndist.nodesToBlocks(os.path.join(graph_path, 'sub_graphs/s0/block_'),
                        os.path.join('./nodes_to_blocks.n5/s0', 'node_'),
                        n_initial_blocks, n_nodes, 8)
    print("There")

    initial_node_labeling = None
    agglomerator = cseg.Multicut('kernighan-lin')

    for scale in range(n_scales):
        factor = 2**scale
        block_shape = [bs * factor for bs in initial_block_shape]
        blocking = nifty.tools.blocking(roiBegin=[0, 0, 0],
                                        roiEnd=list(shape),
                                        blockShape=block_shape)
        n_blocks = blocking.numberOfBlocks

        if scale == 0:
            block_prefix = os.path.join(graph_path, 'sub_graphs/s%i/block_' % scale)
        else:
            block_prefix = os.path.join(graph_path, 'merged_graphs/s%i/block_' % scale)
        print("Solving sub-problems for scale", scale)
        merge_edge_ids = solve_subproblems_scalelevel(block_prefix,
                                                      os.path.join('./nodes_to_blocks.n5', 's%i' % scale, 'node_'),
                                                      costs,
                                                      n_blocks,
                                                      agglomerator)
        print("Merging sub-solutions for scale", scale)
        next_factor = 2**(scale + 1)
        next_block_shape = [bs * next_factor for bs in initial_block_shape]
        n_nodes, uv_ids, costs, initial_node_labeling = reduce_scalelevel(scale, n_nodes, uv_ids, costs,
                                                                          merge_edge_ids,
                                                                          initial_node_labeling,
                                                                          shape, block_shape, next_block_shape)

    initial_node_labeling = solve_global_problem(n_nodes, uv_ids, costs,
                                                 initial_node_labeling, agglomerator)

    out = z5py.File(out_path, use_zarr_format=False)
    if out_key not in out:
        out.create_dataset(out_key, dtype='uint64', shape=tuple(shape),
                           chunks=tuple(initial_block_shape),
                           compression='gzip')
    else:
        # TODO assertions
        pass

    block_ids = list(range(n_initial_blocks))
    ndist.nodeLabelingToPixels(os.path.join(labels_path, labels_key),
                               os.path.join(out_path, out_key),
                               initial_node_labeling, block_ids,
                               initial_block_shape)
#! /usr/bin/python

import time
import os
import argparse
import subprocess
import json
from shutil import rmtree

import z5py
import nifty
import luigi
import cremi_tools.segmentation as cseg

# TODO support more agglomerators
AGGLOMERATORS = {"multicut_kl": cseg.Multicut("kernighan-lin")}


class GlobalProblemTask(luigi.Task):
    """
    Solve the global reduced problem
    """

    path = luigi.Parameter()
    out_key = luigi.Parameter()
    max_scale = luigi.IntParameter()
    config_path = luigi.Parameter()
    tmp_folder = luigi.Parameter()
    dependency = luigi.TaskParameter()
    # FIXME default does not work; this still needs to be specified
    time_estimate = luigi.IntParameter(default=10)
Ejemplo n.º 13
0
def multicut(affs, offsets, solver='kernighan-lin'):
    segmenter = cseg.SegmentationPipeline(
        cseg.LRAffinityWatershed(0.1, 0.25, 2.),
        cseg.MeanAffinityFeatures(offsets), cseg.Multicut(solver))
    return segmenter(affs)