Ejemplo n.º 1
0
def test_split_vi():
    seg_test1 = imio.read_h5_stack('example-data/test-seg1.lzf.h5')
    seg_test4 = imio.read_h5_stack('example-data/test-seg4.lzf.h5')
    result = np.vstack((ev.split_vi(ws_test,
                                    gt_test), ev.split_vi(seg_test1, gt_test),
                        ev.split_vi(seg_test4, gt_test)))
    expected = np.load('example-data/vi-results.npy')
    assert_allclose(result, expected, atol=1e-6)
Ejemplo n.º 2
0
def test_split_vi():
    seg_test1 = imio.read_h5_stack(os.path.join(rundir, "example-data/test-seg1.lzf.h5"))
    seg_test4 = imio.read_h5_stack(os.path.join(rundir, "example-data/test-seg4.lzf.h5"))
    result = np.vstack(
        (ev.split_vi(ws_test, gt_test), ev.split_vi(seg_test1, gt_test), ev.split_vi(seg_test4, gt_test))
    )
    expected = np.load(os.path.join(rundir, "example-data/vi-results.npy"))
    assert_allclose(result, expected, atol=1e-6)
Ejemplo n.º 3
0
def test_split_vi():
    seg_test1 = imio.read_h5_stack('example-data/test-seg1.lzf.h5')
    seg_test4 = imio.read_h5_stack('example-data/test-seg4.lzf.h5')
    result = np.vstack((
        ev.split_vi(ws_test, gt_test),
        ev.split_vi(seg_test1, gt_test),
        ev.split_vi(seg_test4, gt_test)
        ))
    expected = np.load('example-data/vi-results.npy')
    assert_allclose(result, expected, atol=1e-6)
Ejemplo n.º 4
0
def concatenate(traintest, size, volume_id, cues_id_1, cues_id_2, datatype, group):
    cues_id_new = cues_id_1+ID_DELIMITER+cues_id_2
    src_paths_1 = get_paths("XX", traintest, size, volume_id, cues_id_1, "XX", "XX", "XX")
    src_paths_2 = get_paths("XX", traintest, size, volume_id, cues_id_2, "XX", "XX", "XX")
    dest_paths = get_paths("XX", traintest, size, volume_id, cues_id_new, "XX", "XX", "XX")
    src_arr_1 = imio.read_h5_stack(src_paths_1[datatype], group=group)
    src_arr_2 = imio.read_h5_stack(src_paths_2[datatype], group=group)
    if src_arr_1.ndim < 4: src_arr_1 = src_arr_1[...,np.newaxis]
    if src_arr_2.ndim < 4: src_arr_2 = src_arr_2[...,np.newaxis]
    dest_arr = np.concatenate((src_arr_1, src_arr_2), axis=3)
    write_h5(dest_arr, dest_paths[datatype], group)
Ejemplo n.º 5
0
def test_split_vi():
    ws_test = imio.read_h5_stack(
        os.path.join(rundir, 'example-data/test-ws.lzf.h5'))
    gt_test = imio.read_h5_stack(
        os.path.join(rundir, 'example-data/test-gt.lzf.h5'))
    seg_test1 = imio.read_h5_stack(
        os.path.join(rundir, 'example-data/test-seg1.lzf.h5'))
    seg_test4 = imio.read_h5_stack(
        os.path.join(rundir, 'example-data/test-seg4.lzf.h5'))
    result = np.vstack((ev.split_vi(ws_test,
                                    gt_test), ev.split_vi(seg_test1, gt_test),
                        ev.split_vi(seg_test4, gt_test)))
    expected = np.load(os.path.join(rundir, 'example-data/vi-results.npy'))
    assert_allclose(result, expected, atol=1e-6)
Ejemplo n.º 6
0
def test_learned_agglo_4channel():
    rf4 = classify.load_classifier('example-data/rf4.joblib')
    learned_policy4 = agglo.classifier_probability(fc, rf4)
    g_test4 = agglo.Rag(ws_test, p4_test, learned_policy4, feature_manager=fc)
    g_test4.agglomerate(0.5)
    seg_test4 = g_test4.get_segmentation()
    seg_test4_result = imio.read_h5_stack('example-data/test-seg4.lzf.h5')
    assert_array_equal(seg_test4, seg_test4_result)
Ejemplo n.º 7
0
def test_learned_agglo_1channel():
    rf = classify.load_classifier('example-data/rf1.joblib')
    learned_policy = agglo.classifier_probability(fc, rf)
    g_test = agglo.Rag(ws_test, pr_test, learned_policy, feature_manager=fc)
    g_test.agglomerate(0.5)
    seg_test1 = g_test.get_segmentation()
    seg_test1_result = imio.read_h5_stack('example-data/test-seg1.lzf.h5')
    assert_array_equal(seg_test1, seg_test1_result)
Ejemplo n.º 8
0
def add_gradient_channel(traintest, size, volume_id, cues_id, datatype, group):
    last_component = cues_id.split(ID_DELIMITER)[-1]
    new_cues_id = cues_id+ID_DELIMITER+DERIVATIVE_ID+last_component
    src_paths = get_paths("XX", traintest, size, volume_id, cues_id, "XX", "XX", "XX")
    dest_paths = get_paths("XX", traintest, size, volume_id, new_cues_id, "XX", "XX", "XX")
    src_arr = imio.read_h5_stack(src_paths[datatype], group=group)
    dest_arr = add_2d_derivative_channel(src_arr, concat=True)
    write_h5(dest_arr, dest_paths[datatype], group)
Ejemplo n.º 9
0
def test_learned_agglo_1channel():
    rf = classify.load_classifier('example-data/rf1.joblib')
    learned_policy = agglo.classifier_probability(fc, rf)
    g_test = agglo.Rag(ws_test, pr_test, learned_policy, feature_manager=fc)
    g_test.agglomerate(0.5)
    seg_test1 = g_test.get_segmentation()
    seg_test1_result = imio.read_h5_stack('example-data/test-seg1.lzf.h5')
    assert_array_equal(seg_test1, seg_test1_result)
Ejemplo n.º 10
0
def test_learned_agglo_4channel():
    rf4 = classify.load_classifier('example-data/rf4.joblib')
    learned_policy4 = agglo.classifier_probability(fc, rf4)
    g_test4 = agglo.Rag(ws_test, p4_test, learned_policy4, feature_manager=fc)
    g_test4.agglomerate(0.5)
    seg_test4 = g_test4.get_segmentation()
    seg_test4_result = imio.read_h5_stack('example-data/test-seg4.lzf.h5')
    assert_array_equal(seg_test4, seg_test4_result)
Ejemplo n.º 11
0
def test_segment_with_classifier_4_channel():
    fn = os.path.join(rundir, 'example-data/rf4-py3.joblib')
    with tar_extract(fn) as fn:
        rf = joblib.load(fn)
    learned_policy = agglo.classifier_probability(fc, rf)
    g_test = agglo.Rag(ws_test, p4_test, learned_policy, feature_manager=fc)
    g_test.agglomerate(0.5)
    seg_test = g_test.get_segmentation()
    seg_expected = imio.read_h5_stack(
            os.path.join(rundir, 'example-data/test-seg-4.lzf.h5'))
    assert_allclose(ev.vi(seg_test, seg_expected), 0.0)
Ejemplo n.º 12
0
def test_segment_with_classifer_1_channel():
    if PYTHON_VERSION == 2:
        rf = classify.load_classifier(os.path.join(rundir, "example-data/rf-1.joblib"))
    else:
        fn = os.path.join(rundir, "example-data/rf1-py3.joblib")
        with tar_extract(fn) as fn:
            rf = joblib.load(fn)
    learned_policy = agglo.classifier_probability(fc, rf)
    g_test = agglo.Rag(ws_test, pr_test, learned_policy, feature_manager=fc)
    g_test.agglomerate(0.5)
    seg_test = g_test.get_segmentation()
    # imio.write_h5_stack(seg_test, 'example-data/test-seg-1.lzf.h5')
    seg_expected = imio.read_h5_stack(os.path.join(rundir, "example-data/test-seg-1.lzf.h5"))
    assert_allclose(ev.vi(seg_test, seg_expected), 0.0)
Ejemplo n.º 13
0
def test_segment_with_classifier_4_channel():
    if PYTHON_VERSION == 2:
        rf = classify.load_classifier(
            os.path.join(rundir, 'example-data/rf-4.joblib'))
    else:
        fn = os.path.join(rundir, 'example-data/rf4-py3.joblib')
        with tar_extract(fn) as fn:
            rf = joblib.load(fn)
    learned_policy = agglo.classifier_probability(fc, rf)
    g_test = agglo.Rag(ws_test, p4_test, learned_policy, feature_manager=fc)
    g_test.agglomerate(0.5)
    seg_test = g_test.get_segmentation()
    seg_expected = imio.read_h5_stack(
            os.path.join(rundir, 'example-data/test-seg-4.lzf.h5'))
    assert_allclose(ev.vi(seg_test, seg_expected), 0.0)
Ejemplo n.º 14
0
def tsdata():
    wsts = imio.read_h5_stack(os.path.join(dd, 'test-ws.lzf.h5'))
    prts = imio.read_h5_stack(os.path.join(dd, 'test-p1.lzf.h5'))
    gtts = imio.read_h5_stack(os.path.join(dd, 'test-gt.lzf.h5'))
    return wsts, prts, gtts
Ejemplo n.º 15
0
# IPython log file


from gala import imio
import numpy as np

slices = [(slice(None), slice(None, 625), slice(None, 625)),
          (slice(None), slice(None, 625), slice(625, None)),
          (slice(None), slice(625, None), slice(None, 625)),
          (slice(None), slice(625, None), slice(625, None))]

gt = imio.read_h5_stack('ground-truth.h5', group='bodies')
gts = [gt[s] for s in slices]
from skimage.measure import label
for i, vol in enumerate(gts):
    fn = 'ground-truth-%i.lzf.h5' % i
    vol_relabel = label(vol)
    print(np.max(vol_relabel))
    imio.write_h5_stack(vol_relabel.astype(np.uint16), fn,
                        compression='lzf')

pr = imio.read_image_stack('membrane/*.tiff')
prs = [pr[s] for s in slices]
for i, vol in enumerate(prs):
    fn = 'probabilities-%i.lzf.h5' % i
    imio.write_h5_stack(vol.astype(np.uint8), fn, compression='lzf')
Ejemplo n.º 16
0
# --enable-gen-agglomeration \
# --enable-raveler-output \
# --enable-h5-output ${datadir}/${datastem} \
# --segmentation-thresholds 0.0

# python3 for gala_20160715; # python2.7 for gala
# imports
from gala import imio, classify, features, agglo, evaluate as ev
import os

datadir='/Users/michielk/oxdata/P01/EM/M3/M3_S1_GNU'
dset_name = 'm000_01000-01500_01000-01500_00030-00460'
os.chdir(datadir)

# read in training data
gt_train = imio.read_h5_stack(dset_name + '_PA.h5')
pr_train = imio.read_h5_stack(dset_name + '_probs.h5', group='/volume/predictions')
# ws_train = imio.read_h5_stack(dset_name + '_slic_s00500_c2.000_o0.050.h5')
ws_train = imio.read_h5_stack(os.path.join(dset_name, 'supervoxels.h5')

gt_train = gt_train[100:200,100:200,100:200]
pr_train = pr_train[100:200,100:200,100:200,:]
ws_train = ws_train[100:200,100:200,100:200]

# create a feature manager
fm = features.moments.Manager()
fh = features.histogram.Manager()
fc = features.base.Composite(children=[fm, fh])

# create graph and obtain a training dataset
g_train = agglo.Rag(ws_train, pr_train, feature_manager=fc)
Ejemplo n.º 17
0
def trexamples():
    gt = imio.read_h5_stack(os.path.join(dd, 'train-gt.lzf.h5'))
    g = trgraph()
    (X, y, w, e), _ = g.learn_agglomerate(gt, em, min_num_epochs=5)
    y = y[:, 0]
    return X, y
Ejemplo n.º 18
0
def tsdata():
    wsts = imio.read_h5_stack(os.path.join(dd, 'test-ws.lzf.h5'))
    prts = imio.read_h5_stack(os.path.join(dd, 'test-p1.lzf.h5'))
    gtts = imio.read_h5_stack(os.path.join(dd, 'test-gt.lzf.h5'))
    return wsts, prts, gtts
Ejemplo n.º 19
0
print((X.shape, y.shape)) # standard scikit-learn input format

# train a classifier, scikit-learn syntax
rf = classify.DefaultRandomForest().fit(X, y)
# a policy is the composition of a feature map and a classifier
learned_policy = agglo.classifier_probability(fc, rf)

# get the test data and make a RAG with the trained policy
pr_test, ws_test = (map(imio.read_h5_stack,
                        ['test-p1.lzf.h5', 'test-ws.lzf.h5']))
g_test = agglo.Rag(ws_test, pr_test, learned_policy, feature_manager=fc)
g_test.agglomerate(0.5) # best expected segmentation
seg_test1 = g_test.get_segmentation()

# the same approach works with a multi-channel probability map
p4_train = imio.read_h5_stack('train-p4.lzf.h5')
# note: the feature manager works transparently with multiple channels!
g_train4 = agglo.Rag(ws_train, p4_train, feature_manager=fc)
(X4, y4, w4, merges4) = g_train4.learn_agglomerate(gt_train, fc)[0]
y4 = y4[:, 0]
print((X4.shape, y4.shape))
rf4 = classify.DefaultRandomForest().fit(X4, y4)
learned_policy4 = agglo.classifier_probability(fc, rf4)
p4_test = imio.read_h5_stack('test-p4.lzf.h5')
g_test4 = agglo.Rag(ws_test, p4_test, learned_policy4, feature_manager=fc)
g_test4.agglomerate(0.5)
seg_test4 = g_test4.get_segmentation()

# gala allows implementation of other agglomerative algorithms, including
# the default, mean agglomeration
g_testm = agglo.Rag(ws_test, pr_test,
Ejemplo n.º 20
0
def trdata():
    wstr = imio.read_h5_stack(os.path.join(dd, 'train-ws.lzf.h5'))
    prtr = imio.read_h5_stack(os.path.join(dd, 'train-p1.lzf.h5'))
    gttr = imio.read_h5_stack(os.path.join(dd, 'train-gt.lzf.h5'))
    return wstr, prtr, gttr
Ejemplo n.º 21
0
def trexamples():
    gt = imio.read_h5_stack(os.path.join(dd, 'train-gt.lzf.h5'))
    g = trgraph()
    (X, y, w, e), _ = g.learn_agglomerate(gt, em, min_num_epochs=5)
    y = y[:, 0]
    return X, y
Ejemplo n.º 22
0
def trdata():
    wstr = imio.read_h5_stack(os.path.join(dd, 'train-ws.lzf.h5'))
    prtr = imio.read_h5_stack(os.path.join(dd, 'train-p1.lzf.h5'))
    gttr = imio.read_h5_stack(os.path.join(dd, 'train-gt.lzf.h5'))
    return wstr, prtr, gttr
Ejemplo n.º 23
0
print((X.shape, y.shape))  # standard scikit-learn input format

# train a classifier, scikit-learn syntax
rf = classify.DefaultRandomForest().fit(X, y)
# a policy is the composition of a feature map and a classifier
learned_policy = agglo.classifier_probability(fc, rf)

# get the test data and make a RAG with the trained policy
pr_test, ws_test = (map(imio.read_h5_stack,
                        ['test-p1.lzf.h5', 'test-ws.lzf.h5']))
g_test = agglo.Rag(ws_test, pr_test, learned_policy, feature_manager=fc)
g_test.agglomerate(0.5)  # best expected segmentation
seg_test1 = g_test.get_segmentation()

# the same approach works with a multi-channel probability map
p4_train = imio.read_h5_stack('train-p4.lzf.h5')
# note: the feature manager works transparently with multiple channels!
g_train4 = agglo.Rag(ws_train, p4_train, feature_manager=fc)
(X4, y4, w4, merges4) = g_train4.learn_agglomerate(gt_train, fc)[0]
y4 = y4[:, 0]
print((X4.shape, y4.shape))
rf4 = classify.DefaultRandomForest().fit(X4, y4)
learned_policy4 = agglo.classifier_probability(fc, rf4)
p4_test = imio.read_h5_stack('test-p4.lzf.h5')
g_test4 = agglo.Rag(ws_test, p4_test, learned_policy4, feature_manager=fc)
g_test4.agglomerate(0.5)
seg_test4 = g_test4.get_segmentation()

# gala allows implementation of other agglomerative algorithms, including
# the default, mean agglomeration
g_testm = agglo.Rag(ws_test,
Ejemplo n.º 24
0
# IPython log file


x = np.array([[0.0, 1.5, 2.7],
              [1.5, 0.0, 0.0],
              [2.7, 0.0, 0.0]])
              
y = sparse.csr_matrix(x)
y
import networkx as nx
g = nx.from_scipy_sparse_matrix(y)
g[0][1]
get_ipython().run_line_magic('cd', '~/Dropbox/data1/drosophila-embryo/')
get_ipython().run_line_magic('ls', '')
from gala import imio
v = imio.read_h5_stack('embA_0.3um_Probabilities.h5')
np.prod(v[..., 0]) * 8
np.prod(v[..., 0].shape) * 8
np.prod(v[..., 0].shape) * 8 / 1.9
np.prod(v[..., 0].shape) * 8 / 1e9
np.prod(v[..., 0].shape) * 2 / 1e9
v.shape
smoothed_vm = filters.gaussian(v[..., 0], sigma=4)
h = plt.hist(smoothed_vm.ravel(), bins='auto');
from fast_histogram import histogram1d as hist
values = hist(smoothed_vm.ravel(), bins=255)
values = hist(smoothed_vm.ravel(), range=[0, 1], bins=255)
plt.plot(values)
np.max(smoothed_vm)
b = smoothed_vm > 0.5
get_ipython().run_line_magic('pwd', '')