示例#1
0
def test_split_vi():
    seg_test1 = imio.read_h5_stack(os.path.join(rundir, "example-data/test-seg1.lzf.h5"))
    seg_test4 = imio.read_h5_stack(os.path.join(rundir, "example-data/test-seg4.lzf.h5"))
    result = np.vstack(
        (ev.split_vi(ws_test, gt_test), ev.split_vi(seg_test1, gt_test), ev.split_vi(seg_test4, gt_test))
    )
    expected = np.load(os.path.join(rundir, "example-data/vi-results.npy"))
    assert_allclose(result, expected, atol=1e-6)
示例#2
0
def test_split_vi():
    seg_test1 = imio.read_h5_stack('example-data/test-seg1.lzf.h5')
    seg_test4 = imio.read_h5_stack('example-data/test-seg4.lzf.h5')
    result = np.vstack((ev.split_vi(ws_test,
                                    gt_test), ev.split_vi(seg_test1, gt_test),
                        ev.split_vi(seg_test4, gt_test)))
    expected = np.load('example-data/vi-results.npy')
    assert_allclose(result, expected, atol=1e-6)
示例#3
0
文件: test_gala.py 项目: NealJMD/gala
def test_split_vi():
    seg_test1 = imio.read_h5_stack('example-data/test-seg1.lzf.h5')
    seg_test4 = imio.read_h5_stack('example-data/test-seg4.lzf.h5')
    result = np.vstack((
        ev.split_vi(ws_test, gt_test),
        ev.split_vi(seg_test1, gt_test),
        ev.split_vi(seg_test4, gt_test)
        ))
    expected = np.load('example-data/vi-results.npy')
    assert_allclose(result, expected, atol=1e-6)
示例#4
0
def test_split_vi():
    ws_test = imio.read_h5_stack(
        os.path.join(rundir, 'example-data/test-ws.lzf.h5'))
    gt_test = imio.read_h5_stack(
        os.path.join(rundir, 'example-data/test-gt.lzf.h5'))
    seg_test1 = imio.read_h5_stack(
        os.path.join(rundir, 'example-data/test-seg1.lzf.h5'))
    seg_test4 = imio.read_h5_stack(
        os.path.join(rundir, 'example-data/test-seg4.lzf.h5'))
    result = np.vstack((ev.split_vi(ws_test,
                                    gt_test), ev.split_vi(seg_test1, gt_test),
                        ev.split_vi(seg_test4, gt_test)))
    expected = np.load(os.path.join(rundir, 'example-data/vi-results.npy'))
    assert_allclose(result, expected, atol=1e-6)
示例#5
0
def get_vi(pred: np.ndarray,
           mask: np.ndarray,
           bg_value: int = 0,
           method: int = 1) -> Tuple:
    """
    Referenced by:
    Marina Meilă (2007), Comparing clusterings—an information based distance,
    Journal of Multivariate Analysis, Volume 98, Issue 5, Pages 873-895, ISSN 0047-259X, DOI:10.1016/j.jmva.2006.11.013.
    :param method: 0: skimage implementation and 1: gala implementation (https://github.com/janelia-flyem/gala.)
    :return Tuple = (VI, merger_error, split_error)
    """
    vi, merger_error, split_error = 0.0, 0.0, 0.0

    label_pred, num_pred = label(pred,
                                 connectivity=1,
                                 background=bg_value,
                                 return_num=True)
    label_mask, num_mask = label(mask,
                                 connectivity=1,
                                 background=bg_value,
                                 return_num=True)
    if method == 0:
        # scikit-image
        split_error, merger_error = metrics.variation_of_information(
            label_mask, label_pred)
    elif method == 1:
        # gala
        merger_error, split_error = ev.split_vi(label_pred, label_mask)
    vi = merger_error + split_error
    if math.isnan(vi):
        return 10, 5, 5
    return merger_error, split_error, vi
示例#6
0
def calculate_vi_ri_ari(result, gt):
    # false merges(缺失), false splits(划痕)
    merger_error, split_error = ev.split_vi(result, gt)
    vi = merger_error + split_error
    ri = ev.rand_index(result, gt)
    adjust_ri = ev.adj_rand_index(result, gt)
    return {'vi':vi,'ri':ri,'adjust_ri':adjust_ri,
            'merger_error':merger_error,
            'split_error':split_error}
示例#7
0
def evaluate_VI(adress):
    
    pred = np.load(adress + "/results/output.npy")[:,0]
    pred_window_size = np.load(adress + "/results/pred_window_size.npy")
        
    # Load in test-addresses
    f = file(adress + "/pre_processed/" + "test_adress.dat", 'rb')
    test_adress = cPickle.load(f)
    f.close()

    gap = (pred_window_size[0]-pred_window_size[1])/2
    radii = np.linspace(7, 65, 10) 
    
    VI_metric = np.zeros((radii.size,3))
    n = 0
    for adress_img in test_adress:
        ground_truth = mh.imread(adress_img)
        ground_truth = ground_truth[gap:-gap,gap:-gap]
        ground_truth = ground_truth.astype(np.uint16)
        
        r = 0
        for radius in radii:
            # VI_metric[0] = undersegmentation error
            # VI_metric[1] = oversegmentation error
            VI_split =  ev.split_vi(watershed(pred[n], radius), ground_truth)
            VI_metric[r,0] += VI_split[0]
            VI_metric[r,1] += VI_split[1]
            VI_metric[r,2] += VI_split[0] + VI_split[1]
            r += 1
            
        n += 1        
    
    VI_metric /= n 
    
    VI_min_pos = np.argmin(VI_metric[:,2])
    VI_min     = VI_metric[VI_min_pos]

    print "Variation of Information (VI):",VI_min[2]
    print "VI, undersegmentation error:", VI_min[0]
    print "VI, oversegmentation error:", VI_min[1]
    
    np.save(adress + "/results/VI.npy",VI_metric)
    
    
    return VI_min
示例#8
0
# the same approach works with a multi-channel probability map
p4_train = imio.read_h5_stack('train-p4.lzf.h5')
# note: the feature manager works transparently with multiple channels!
g_train4 = agglo.Rag(ws_train, p4_train, feature_manager=fc)
(X4, y4, w4, merges4) = g_train4.learn_agglomerate(gt_train, fc)[0]
y4 = y4[:, 0]
print((X4.shape, y4.shape))
rf4 = classify.DefaultRandomForest().fit(X4, y4)
learned_policy4 = agglo.classifier_probability(fc, rf4)
p4_test = imio.read_h5_stack('test-p4.lzf.h5')
g_test4 = agglo.Rag(ws_test, p4_test, learned_policy4, feature_manager=fc)
g_test4.agglomerate(0.5)
seg_test4 = g_test4.get_segmentation()

# gala allows implementation of other agglomerative algorithms, including
# the default, mean agglomeration
g_testm = agglo.Rag(ws_test,
                    pr_test,
                    merge_priority_function=agglo.boundary_mean)
g_testm.agglomerate(0.5)
seg_testm = g_testm.get_segmentation()

# examine how well we did with either learning approach, or mean agglomeration
gt_test = imio.read_h5_stack('test-gt.lzf.h5')
import numpy as np
results = np.vstack(
    (ev.split_vi(ws_test, gt_test), ev.split_vi(seg_testm, gt_test),
     ev.split_vi(seg_test1, gt_test), ev.split_vi(seg_test4, gt_test)))

print(results)
示例#9
0
learned_policy = agglo.classifier_probability(fc, rf)
g_test = agglo.Rag(ws_test, pr_test, learned_policy, feature_manager=fc)
g_test.agglomerate(0.5)
seg_test1 = g_test.get_segmentation()
imio.write_h5_stack(seg_test1, 'example-data/test-seg1.lzf.h5', compression='lzf')
g_train4 = agglo.Rag(ws_train, p4_train, feature_manager=fc)
np.random.RandomState(0)
(X4, y4, w4, merges4) = map(np.copy, map(np.ascontiguousarray,
                            g_train4.learn_agglomerate(gt_train, fc)[0]))
print X4.shape
np.savez('example-data/train-set4.npz', X=X4, y=y4)
y4 = y4[:, 0]
rf4 = classify.DefaultRandomForest()
np.random.RandomState(0)
rf4 = rf4.fit(X4, y4)
classify.save_classifier(rf4, 'example-data/rf-4.joblib')
learned_policy4 = agglo.classifier_probability(fc, rf4)
g_test4 = agglo.Rag(ws_test, p4_test, learned_policy4, feature_manager=fc)
g_test4.agglomerate(0.5)
seg_test4 = g_test4.get_segmentation()
imio.write_h5_stack(seg_test4, 'example-data/test-seg4.lzf.h5', compression='lzf')

results = np.vstack((
    ev.split_vi(ws_test, gt_test),
    ev.split_vi(seg_test1, gt_test),
    ev.split_vi(seg_test4, gt_test)
    ))

np.save('example-data/vi-results.npy', results)

示例#10
0
                    chunk=chunk,
                    offset=offset,
                    size=size,
                    subgroups=subgroups[i] + ["%.8f" % (prm,)],
                    verbose=False,
                )
                segComps = loadh5.data_cube

                # calculate the ISBI2013 rand error (gala, excludes gt background) using the full out components
                are, prec, rec = ev.adapted_rand_error(segComps, gtComps, all_stats=True)
                # are, prec, rec = adapted_rand_error( gtComps, segComps, nogtbg=True)
                are_gala[i, j, k] = are
                are_precrec_gala[i, j, k, :] = np.array([prec, rec])

                # calculate the split variation of information (gala) using full out components
                split_vi_gala[i, j, k, :] = ev.split_vi(segComps, gtComps, ignore_x=[], ignore_y=[0])

                # total number of supervoxels
                # nlabels[i,j,k] = segComps.max()
                nlabels[i, j, k] = np.unique(segComps.ravel()).size

            # normalized segparams, repeat across chunks for convenience
            if segparams[i].size == 1:
                nsegparams[i, j, 0] = 0
            else:
                tmp = segparams[i] - segparams[i].min()
                nsegparams[i, j, : segparams[i].size] = tmp / tmp.max()

            print("\tdone in %.4f s" % (time.time() - t))

    # save all metric data using dill
示例#11
0
                                             size=size,
                                             subgroups=subgroups[i] +
                                             ['%.8f' % (prm, )])
                segComps = loadh5.data_cube

                # calculate the ISBI2013 rand error (gala, excludes gt background) using the full out components
                are, prec, rec = ev.adapted_rand_error(segComps,
                                                       gtComps,
                                                       all_stats=True)
                #are, prec, rec = adapted_rand_error( gtComps, segComps, nogtbg=True)
                are_gala[i, j, k] = are
                are_precrec_gala[i, j, k, :] = np.array([prec, rec])

                # calculate the split variation of information (gala) using full out components
                split_vi_gala[i, j, k, :] = ev.split_vi(segComps,
                                                        gtComps,
                                                        ignore_x=[],
                                                        ignore_y=[0])

            print('\tdone in %.4f s' % (time.time() - t))

    # save all metric data using dill
    with open(os.path.join(outpath, save_file), 'wb') as f:
        dill.dump({'metrics': metrics, 'params': params}, f)
else:
    with open(os.path.join(outpath, save_file), 'rb') as f:
        d = dill.load(f)
    #globals().update(d); globals().update(metrics)
    globals().update(d['metrics'])

# calculations based on parameters
vi_gala = split_vi_gala.sum(axis=3)
示例#12
0
ws_train = ws_train.astype('int64')
print "unique labels in ws:", np.unique(ws_train).size

ws_train = optimized.despeckle_watershed(ws_train)
print "unique labels after despeckling:", np.unique(ws_train).size
ws_train, _, _ = evaluate.relabel_from_one(ws_train)

if ws_train.min() < 1:
    ws_train += (1 - ws_train.min())

ws_train = ws_train.astype('int64')
print "Training watershed complete"

print "Watershed train (VI, ARI)"
vi_ws_train = ev.split_vi(ws_train, gt_train),
ari_ws_train = ev.adj_rand_index(ws_train, gt_train)
print vi_ws_train
print ari_ws_train

scipy.io.savemat('trainWS.mat', mdict={'ws_train': ws_train})
scipy.io.savemat('trainMembrane.mat', mdict={'membraneTrain': membraneTrain})

# create a feature manager
fc = features.base.Composite(children=[
    features.moments.Manager(),
    features.histogram.Manager(25, 0, 1, [0.1, 0.5, 0.9]),
    features.graph.Manager(),
    features.contact.Manager([0.1, 0.5, 0.9])
])
    ax4.matshow(rec_boundary[:, :, section_slice],  cmap=plt.cm.jet)
    ax4.set_title('reconstructed  fscore: %0.2f' %(fscore_rec))
    plt.axis('off')
    plt.show()


if AFFINITYGRAPH:




    # CC_VI_pred, T_pred_CC_VI = sr.best_thresh(out, seg[cube_slice], score_func='CC_VI')
    CC_VI_pred = sr.score(out<4, seg[cube_slice]<1,  score='CC_VI')
    cc_out, count = ndimage.label(np.invert(out<5))
    vi1 = evaluate.split_vi(cc_out, seg[cube_slice])
    plt.matshow(cc_out[:, :, 0])
    plt.show()

    plt.matshow(seg[cube_slice][:, :, 0])
    plt.show()

    plt.matshow(out[:, :, 0])
    plt.show()
    print vi1
    print "\t SMOOTHED DISTANCE MAP"
    print "\t    -Variation of Information on Connected Components:"
    # print "\t        Best threshold: %d"% T_pred_CC_VI
    print "\t        Error: %.5f\n" % CC_VI_pred
    print "\t        Error VI Gala impl false merger: %.5f, false splits: %5f\n" %(vi1[0], vi1[1])
            # print "Running watershed"

            # Now we want to separate the two objects in image
            # Generate the markers as local maxima of the distance to the background
            distance = ndi.distance_transform_edt(foreground)
            distance_float = rescale_intensity(distance,
                                               in_range='image',
                                               out_range='float')

            # Changes local mas peak paramater
            for peakPrint in peakArr:

                local_maxi = peak_local_max(distance,
                                            indices=False,
                                            footprint=np.ones(
                                                (peakPrint, peakPrint)),
                                            labels=foreground)
                markers = ndi.label(local_maxi)[0]  #[0]

                labels = watershed(foreground, markers, mask=foreground)

                ######## Get the watershed evaluation
                split_eval = ev.split_vi(labels, membrane_ground[index])
                adjusted_rand = ev.adj_rand_index(labels,
                                                  membrane_ground[index])
                fm = ev.fm_index(labels, membrane_ground[index])
                outFile.write(
                    str(index) + "_" + str(foot) + "_" + str(filt) + "_" +
                    str(peakPrint) + " " + str(split_eval) + " " +
                    str(adjusted_rand) + " " + str(fm) + "\n")
示例#15
0
print ws_test.dtype
ws_test = ws_test.astype('int64')
print "unique labels in ws:",np.unique(ws_test).size
    
ws_test = optimized.despeckle_watershed(ws_test)
print "unique labels after despeckling:",np.unique(ws_test).size
ws_test, _, _ = evaluate.relabel_from_one(ws_test)
    
if ws_test.min() < 1: 
    ws_test += (1-ws_test.min())

print "Testing watershed complete"

print "Watershed test (VI, ARI)"
vi_ws_test = ev.split_vi(ws_test, gt_test),
ari_ws_test = ev.adj_rand_index(ws_test, gt_test)
print vi_ws_test
print ari_ws_test

scipy.io.savemat('testWS.mat', mdict={'ws_test':ws_test})

print 'Time Elapsed So Far: ' + str(time.time()-start)

print 'applying classifier...'
g_test = agglo.Rag(ws_test, membraneTest, learned_policy, feature_manager=fc)
print 'choosing best operating point...'
g_test.agglomerate(0.5) # best expected segmentation
segtestGala = g_test.get_segmentation()

print "Completed Gala Run"
			threshold = threshold_otsu(img_med)
			binary_img = img_med > threshold

			# make image into binary_img and invert it
			image = binary_img
			foreground = 1 - image

			########
			# Watershed segmentation
			########
			# print "Running watershed"

			# Now we want to separate the two objects in image
			# Generate the markers as local maxima of the distance to the background
			distance = ndi.distance_transform_edt(foreground)
			distance_float = rescale_intensity(distance, in_range='image', out_range='float')

			# Changes local mas peak paramater
			for peakPrint in peakArr:

				local_maxi = peak_local_max(distance, indices=False, footprint=np.ones((peakPrint, peakPrint)), labels=foreground)
				markers = ndi.label(local_maxi)[0] #[0]

				labels =  watershed(foreground, markers, mask=foreground)

				######## Get the watershed evaluation
				split_eval = ev.split_vi(labels, membrane_ground[index])
				adjusted_rand = ev.adj_rand_index(labels, membrane_ground[index])
				fm = ev.fm_index(labels, membrane_ground[index])
				outFile.write(str(index) + "_" + str(foot) + "_" + str(filt) + "_" + str(peakPrint) + " " + str(split_eval) + " " + str(adjusted_rand) + " " + str(fm) + "\n")