# Sample from the training volume to get a training set

from scipy.ndimage import distance_transform_edt
import extract_features
import tiffcvt
import numpy as np
import sys
r = np.random.RandomState()
r.seed(12345)

img = tiffcvt.h5_file["ordinal_train_volume"][:,:,:]
labels = tiffcvt.train_labels[:,:,:]
blur_img = extract_features.blur_image(img)
#
# The idea here is to train all the membrane points because there are few
# of them and to train all points nearby labeled points to get a sharp
# border. After that, we pick randomly from what remains to get some sampling
# of non-membrane, but that is not so crucial.
#
L_MEMBRANE = 0
distance = 3
is_membrane = labels == L_MEMBRANE
coords = [np.argwhere(is_membrane)]
pos = np.sum(is_membrane)
for i in range(labels.shape[2]):
    d = ((distance_transform_edt(~ is_membrane[:,:,i]) <= distance) &  
         ~ is_membrane[:,:,i])
    dc = np.argwhere(d)
    coords.append(np.column_stack([dc.astype(int), np.ones(dc.shape[0], int)]))
coords = np.vstack(coords)
neg = coords.shape[0] - pos
Example #2
0
    img = tiffcvt.h5_file["ordinal_train_volume"][:,:,:]
else:
    labels_name = "%s_test_labels"
    labels_shape = tiffcvt.test_volume.shape
    img = tiffcvt.h5_file["ordinal_test_volume"][:,:,:]

if len(sys.argv) < 3 or sys.argv[2] != "eigentexture":
    extract_fn = extract_features
    labels_name = labels_name % "predicted"
else:
    components = tiffcvt.h5_file["components"][:,:]
    extract_fn = lambda img, bimg, indices:\
       extract_eigenfeatures(img, bimg, components, indices)
    labels_name = labels_name % "eigenpredicted"
    
predicted = tiffcvt.h5_file.require_dataset(labels_name,
                                            labels_shape,
                                            np.float32,
                                            chunks=(64,64,1))
bimg = blur_image(img)
for i in range(0, img.shape[0], 64):
    for j in range(0, img.shape[1], 64):
        for k in range(img.shape[2]):
            coords = np.mgrid[i:(i+64), j:(j+64),k:(k+1)].reshape(3, 64*64).transpose()
            features = extract_fn(img, bimg, coords)
            score = clf.predictProbabilities(features)[:,1]
            score.shape = (64,64)
            predicted[i:(i+64),j:(j+64),k] = score
            print "Finished block %d, %d, %d" % (i, j, k)
tiffcvt.h5_file.close()