Пример #1
0
def cremi_score(gt, seg, return_all_scores=False, border_threshold=None):
    if cremi is None:
        raise ImportError("The cremi package is necessary to run cremi_score()")

    # # the zeros must be kept in the gt since they are the ignore label
    gt = vigra.analysis.labelVolumeWithBackground(gt.astype(np.uint32))
    # seg = vigra.analysis.labelVolume(seg.astype(np.uint32))

    seg = np.array(seg)
    seg = np.require(seg, requirements=['C'])
    # Make sure that all labels are strictly positive:
    seg = seg.astype('uint32')
    # FIXME: it seems to have some trouble with label 0 in the segmentation:
    seg += 1

    gt = np.array(gt)
    gt = np.require(gt, requirements=['C'])
    gt = (gt - 1).astype('uint32')
    # assert gt.min() >= -1

    gt_ = Volume(gt)
    seg_ = Volume(seg)

    metrics = NeuronIds(gt_, border_threshold=border_threshold)
    arand = metrics.adapted_rand(seg_)

    vi_s, vi_m = metrics.voi(seg_)
    cs = np.sqrt(arand * (vi_s + vi_m))
    # cs = (vi_s + vi_m + arand) / 3.
    if return_all_scores:
        return {'cremi-score': cs.item(), 'vi-merge': vi_m.item(), 'vi-split': vi_s.item(), 'adapted-rand': arand.item()}
    else:
        return cs
def check_ccs():
    binary = z5py.File('./binary_volume.n5')['data'][:]
    ccs_vi = vigra.analysis.labelVolumeWithBackground(binary)
    ccs = z5py.File('./ccs.n5')['data'][:]

    print("Start comparison")
    metric = NeuronIds(Volume(ccs_vi))
    print("Arand", metric.adapted_rand(Volume(ccs)))
Пример #3
0
def eval_block(block_id, res_prefix):
    gt = Volume(vigra.readHDF5('/home/papec/Work/neurodata_hdd/fib25/gt/gt_block%i.h5' % block_id,
                               'data'))
    res = Volume(vigra.readHDF5('%s_%i.h5' % (res_prefix, block_id), 'data'))
    metrics = NeuronIds(gt)
    are = metrics.adapted_rand(res)
    vi_s, vi_m = metrics.voi(res)
    return are, vi_s, vi_m
Пример #4
0
def evaluate(gt, segmentation):
    gt, _, _ = vigra.analysis.relabelConsecutive(gt, start_label=1)
    evaluate = NeuronIds(Volume(gt))

    segmentation = Volume(segmentation)
    vi_split, vi_merge = evaluate.voi(segmentation)
    ri = evaluate.adapted_rand(segmentation)

    return vi_split, vi_merge, ri
Пример #5
0
def cremi_scores(seg, gt):
    gt[gt == 0] = -1
    seg = Volume(seg)
    metric = NeuronIds(Volume(gt))
    vis, vim = metric.voi(seg)
    are = metric.adapted_rand(seg)
    cs = (are + vis + vim) / 3
    return {
        'cremi-score': cs,
        'vi-merge': vim,
        'vi-split': vis,
        'adapted-rand': are
    }
def cremi_scores(seg, gt):
    gt[gt == 0] = -1
    seg = Volume(seg)
    metric = NeuronIds(Volume(gt))
    vis, vim = metric.voi(seg)
    are = metric.adapted_rand(seg)
    # cremi uses the geometric mean of rand and vi !
    cs = sqrt(are * (vis + vim))
    return {
        'cremi-score': cs,
        'vi-merge': vim,
        'vi-split': vis,
        'adapted-rand': are
    }
Пример #7
0
def main(argv):
    del argv
    npz_file = FLAGS.base_dir + 'seg_000_' + FLAGS.model_name + '.npz'
    hdf_file = FLAGS.base_dir + 'seg_000_' + FLAGS.model_name + '.hdf'
    gt_file = 'FIB-25/test_sample/groundtruth.h5'
    pred = np.load(npz_file)
    img = pred['segmentation']
    print(np.unique(img))
    print(img.shape)

    with h5py.File(hdf_file, 'w') as f:
        neuron_ids = f.create_dataset('volumes/labels/neuron_ids', data=img)

    with h5py.File(hdf_file, 'r+') as f1:
        f1['volumes/labels/neuron_ids'].attrs.create('resolution',
                                                     [8.0, 8.0, 8.0])

    test = CremiFile(hdf_file, 'r')
    truth = CremiFile(gt_file, 'r')

    neuron_ids_evaluation = NeuronIds(truth.read_neuron_ids())

    (voi_split, voi_merge) = neuron_ids_evaluation.voi(test.read_neuron_ids())
    adapted_rand = neuron_ids_evaluation.adapted_rand(test.read_neuron_ids())

    print("Neuron IDs")
    print("==========")
    print("\tvoi split   : " + str(voi_split))
    print("\tvoi merge   : " + str(voi_merge))
    voi_total = voi_split + voi_merge
    print("\tvoi total   : " + str(voi_total))
    print("\tadapted RAND: " + str(adapted_rand))
    cremi_score = sqrt(voi_total * adapted_rand)
    print("\tcremi score : " + str(cremi_score))
    print("\tmodel name  : " + FLAGS.model_name)

    with open(FLAGS.results_file, 'a+') as f:
        f.write("\nvoi split   : " + str(voi_split)+"\nvoi merge   : " + str(voi_merge)+\
                "\nvoi total   : " + str(voi_total)+"\nadapted RAND: " + str(adapted_rand)+\
                "\ncremi score : " + str(cremi_score)+\
                "\ntime cost   : " + FLAGS.time+'\n\n')
Пример #8
0
def agglomerate_sp_eval(ws_path, gt_path, prob_path):

    probs = vigra.readHDF5(prob_path, 'data')

    ws = vigra.readHDF5(ws_path, 'data')
    n_nodes = int(ws.max()) + 1

    rag = nrag.gridRag(ws, numberOfLabels=n_nodes)
    # _, node_sizes = np.unique(ws, return_counts=True)
    # edge_sizes = nrag.accumulateEdgeMeanAndLength(rag, np.zeros(rag.shape, dtype='float32'))[:, 1]
    graph = nifty.graph.undirectedGraph(n_nodes)
    graph.insertEdges(rag.uvIds())

    gt = Volume(vigra.readHDF5(gt_path, 'data'))

    # node_factor = [0.9, 0.8, 0.7, 0.6, 0.5, 0.4, 0.3, 0.2, 0.1][::-1]
    node_factor = [.025, .05, .075, .1, .15, .2, .25, .4, .5]

    for nf in node_factor:
        # FIXME agglomerative clustering segfaults
        # n_target_nodes = int(nf * n_nodes)
        # agglomerator = cseg.AgglomerativeClustering(n_target_nodes)
        # node_labeling = agglomerator(graph, probs, edge_sizes=edge_sizes, node_sizes=node_sizes)

        agglomerator = cseg.MalaClustering(nf)
        node_labeling = agglomerator(graph, probs)
        vigra.analysis.relabelConsecutive(node_labeling, out=node_labeling)

        seg = nrag.projectScalarNodeDataToPixels(rag, node_labeling)
        seg = Volume(seg)
        metrics = NeuronIds(gt)
        vi_s, vi_m = metrics.voi(seg)
        are = metrics.adapted_rand(seg)
        print("Evaluation for reduction", nf)
        print("Voi - Split ", vi_s)
        print("Voi - Merge ", vi_m)
        print("Adapted Rand", are)
        print("N-Nodes:", int(node_labeling.max() + 1), '/', n_nodes)
Пример #9
0
def gt_projection(block_id):
    ws_path = '/home/papec/Work/neurodata_hdd/fib25/watersheds/watershed_block%i.h5' % block_id
    ws = vigra.readHDF5(ws_path, 'data')
    ws = vigra.analysis.labelVolume(ws.astype('uint32'))
    gt = vigra.readHDF5('/home/papec/Work/neurodata_hdd/fib25/gt/gt_block%i.h5' % block_id,
                        'data')

    rag = nrag.gridRag(ws, numberOfLabels=int(ws.max()) + 1)
    labeling = nrag.gridRagAccumulateLabels(rag, gt)

    projected = Volume(nrag.projectScalarNodeDataToPixels(rag, labeling))

    metrics = NeuronIds(Volume(gt))
    vi_s, vi_m = metrics.voi(projected)
    are = metrics.adapted_rand(projected)

    print(vi_s)
    print(vi_m)
    print(are)
    print()

    os.remove(ws_path)
    vigra.writeHDF5(ws, ws_path, 'data', compression='gzip')
Пример #10
0
def cremi_score(gt,
                seg,
                return_all_scores=True,
                b_thresh=2,
                data_resolution=(1.0, 1.0, 1.0)):
    """compute cremi scores from np.array"""

    if len(gt.shape) == 2:
        gt = gt[None, :, :]
        seg = seg[None, :, :]
    gt_ = Volume(gt, resolution=data_resolution)
    seg_ = Volume(seg, resolution=data_resolution)

    metrics = NeuronIds(gt_, b_thresh)
    arand = metrics.adapted_rand(seg_)

    vi_s, vi_m = metrics.voi(seg_)
    # official cremi score
    cs = np.sqrt(vi_s * (vi_m + arand))

    if return_all_scores:
        return cs, vi_s, vi_m, arand
    else:
        return cs
Пример #11
0
#!/usr/bin/python

from cremi.io import CremiFile
from cremi.evaluation import NeuronIds, Clefts, SynapticPartners

test = CremiFile('test.hdf', 'r')
truth = CremiFile('groundtruth.hdf', 'r')

neuron_ids_evaluation = NeuronIds(truth.read_neuron_ids())

(voi_split, voi_merge) = neuron_ids_evaluation.voi(test.read_neuron_ids())
adapted_rand = neuron_ids_evaluation.adapted_rand(test.read_neuron_ids())

print "Neuron IDs"
print "=========="
print "\tvoi split   : " + str(voi_split)
print "\tvoi merge   : " + str(voi_merge)
print "\tadapted RAND: " + str(adapted_rand)

clefts_evaluation = Clefts(test.read_clefts(), truth.read_clefts())

false_positive_count = clefts_evaluation.count_false_positives()
false_negative_count = clefts_evaluation.count_false_negatives()

false_positive_stats = clefts_evaluation.acc_false_positives()
false_negative_stats = clefts_evaluation.acc_false_negatives()

print "Clefts"
print "======"

print "\tfalse positives: " + str(false_positive_count)
Пример #12
0
#!/usr/bin/python

from cremi.io import CremiFile
from cremi.evaluation import NeuronIds, Clefts, SynapticPartners

test = CremiFile('test.hdf', 'r')
truth = CremiFile('groundtruth.hdf', 'r')

neuron_ids_evaluation = NeuronIds(truth.read_neuron_ids())

(voi_split, voi_merge) = neuron_ids_evaluation.voi(test.read_neuron_ids())
adapted_rand = neuron_ids_evaluation.adapted_rand(test.read_neuron_ids())

print "Neuron IDs"
print "=========="
print "\tvoi split   : " + str(voi_split)
print "\tvoi merge   : " + str(voi_merge)
print "\tadapted RAND: " + str(adapted_rand)

clefts_evaluation = Clefts(test.read_clefts(), truth.read_clefts())

false_positive_count = clefts_evaluation.count_false_positives()
false_negative_count = clefts_evaluation.count_false_negatives()

false_positive_stats = clefts_evaluation.acc_false_positives()
false_negative_stats = clefts_evaluation.acc_false_negatives()

print "Clefts"
print "======"

print "\tfalse positives: " + str(false_positive_count)