def predict_multiview(img_vol, weights_file, orientation='axial', tu_thresh=0.1, k_thresh=0.2, tch_only=False):
    """
    Given a volume `vol` and the weights to use, along with the 
    desired orientation, predict an output volume and return it.
    """

    model = unet.get_unet() if not tch_only else unet.get_unet_Tch()
    model.load_weights(weights_file) # load weights

    # Transform the volume to the correct orientation.
    img_vol = reorient_volume(img_vol, orientation)

    # re-size so that we have 512x512 along the z-axis:
    orig_shape = img_vol.shape
    img_vol    = resize_volume_slices(img_vol, target_shape=512)
    pred_vol   = np.zeros(img_vol.shape, dtype='uint8')
    
    if len(img_vol.shape) < 4:
        img_vol = np.expand_dims(img_vol, axis=3)

    # Now we have the image and seg volumes.  Predict in batches:
    n_batches = int(np.ceil(img_vol.shape[0] / BATCH_SIZE))
    # Batch up the slices and feed them through.
    min_pred, max_pred = [], []
    for batch_idx in range(n_batches):
        # print("    Processing batch {} of {}.".format(batch_idx+1, n_batches))
        min_idx = batch_idx * BATCH_SIZE
        max_idx = min((batch_idx + 1) * BATCH_SIZE, img_vol.shape[0])
        pred_batch = model.predict(img_vol[min_idx:max_idx,...])
        min_pred.append(pred_batch.min())
        max_pred.append(pred_batch.max())
        if not tch_only:
            pred_batch_i = np.greater(pred_batch[..., 0], k_thresh).astype('uint8')
            pred_batch_i[np.greater(pred_batch[..., 1], tu_thresh)] = 2
        else:
            pred_batch_i = np.greater(pred_batch[..., 0], tu_thresh).astype('uint8') * 2
        pred_vol[min_idx:max_idx,...] = pred_batch_i
    
    # Reshape to match the original:
    pred_vol = restore_volume_to_shape(pred_vol, orig_shape, is_mask=True)
    # And return to the original orientation:
    pred_vol = reorient_to_axial(pred_vol, orientation)
    
    return pred_vol
Example #2
0
 def __init__(self,
              model_name,
              input_size=(224, 224),
              patience=20,
              input_channels=3):
     self.input_size = input_size
     self.unet = get_unet(input_size=input_size,
                          input_channels=input_channels)
     self.callbacks = [
         LossHistory(),
         ModelCheckpoint(f"../weights/{model_name}.best.weights.h5py",
                         save_best_only=True,
                         verbose=1,
                         monitor="val_dice_coef",
                         mode='max'),
         EarlyStopping(monitor="val_dice_coef",
                       patience=patience,
                       mode='max')
     ]
Example #3
0
def test(start, video, tag):
    md = get_unet()
    md.load_weights('ynet_single_video.hdf5')

    h, hy = gd(start=start,
               end=start + 10,
               main_dir='../data/data',
               video=video)

    hp = md.predict(h)

    if not os.path.exists(tag):
        os.makedirs(tag)

    i = start
    for p, g in zip(hp, hy):
        save(p.reshape((224, 224)), i, tag)
        save(g.reshape((224, 224)), i, tag + 'gt')
        i += 1
    print("Done")
Example #4
0
def test(start):
    md = get_unet()
    md.load_weights('enet_14_3.hdf5')

    h, hy = gd(start=start, end=start + 10, folder='_ (11)')

    print("predicting")

    hp = md.predict(h)

    i = start
    for p, gt in zip(hp, hy):
        plt.imshow(p.reshape((224, 224)), cmap='gray')
        plt.savefig('' + 'out\\' + str(i) + '.png')

        plt.imshow(gt.reshape((224, 224)), cmap='gray')
        plt.savefig('' + 'out\\gt' + str(i) + '.png')

        print('' + 'out\\' + str(i) + '.png')
        i += 1

    print("Done")
Example #5
0
def train(epochs):
    print('-'*30)
    print('Creating and compiling model...')
    print('-'*30)
    model = unet.get_unet(dropout=False)
    if transfer_learning:
        model.load_weights("transfer_learning/new.hdf5", by_name=True)
    print('-'*30)
    print('Loading and preprocessing train data...')
    print('-'*30)

    X,y = load_train_data()
    print(X.shape)
    imgs_train,imgs_valid,imgs_mask_train,imgs_mask_valid = train_test_split(X,y,test_size=0.2,random_state = 1)
    
    print('-'*30)
    print('Evaluating transfer learning.')
    print('-'*30)
    
    valloss = model.evaluate(x = imgs_valid, y = imgs_mask_valid, batch_size=10, verbose=0)
    with open("runs/history.txt" , "a") as fout:
        fout.write("valid\t%d\t%.4f\n" % (0, valloss[1]))
    filepath="runs/weights_%d_%d.hdf5" % (img_rows,img_cols)
    checkpoint = ModelCheckpoint(filepath, save_best_only=True, monitor="val_dice_coef", mode="max")
    
    history = LossHistory()
    
    print('-'*30)
    print('Fitting model...')
    print('-'*30)
    if apply_augmentation:
        model.fit_generator(data_generator(imgs_train,imgs_mask_train), validation_data = (imgs_valid, imgs_mask_valid),
                  steps_per_epoch= 100, epochs=epochs, verbose=1, callbacks=[history, checkpoint])
    else:
        model.fit(imgs_train, imgs_mask_train, batch_size=batch_size, validation_data=(imgs_valid,imgs_mask_valid),
                  epochs=epochs, verbose=1, callbacks=[history, checkpoint])
    return model
Example #6
0
BATCH_SIZE = 32
LEARNING_RATE = 1e-4
LOG_NUM = 1
EPOCHS = 10

# set paths
train_data_dir = '../data/train_data'
validation_data_dir = '../data/validation_data'

train_dataset = get_train_dataset(train_data_dir, PATCH_SIZE, STEP, BATCH_SIZE,
                                  ALPHA)
validation_dataset = get_train_dataset(validation_data_dir, PATCH_SIZE,
                                       PATCH_SIZE, BATCH_SIZE, ALPHA)

# get unet model
model = get_unet(PATCH_SIZE, DROPOUT_RATE)
model.compile(optimizer=tf.keras.optimizers.Adam(lr=LEARNING_RATE),
              loss='binary_crossentropy',
              metrics=['accuracy'])

# set checkpoint
ckpt_dir = 'ckpt_{}'.format(LOG_NUM)
os.makedirs(ckpt_dir, exist_ok=True)
ckpt_file = os.path.join(ckpt_dir, 'cp.ckpt')

ckpt_callback = tf.keras.callbacks.ModelCheckpoint(filepath=ckpt_file,
                                                   save_weights_only=True,
                                                   verbose=1)

# train model
model.fit(train_dataset,
Example #7
0
from keras.callbacks import ModelCheckpoint

from data import load_train_data
from utils import *

create_paths()
log_file = open(global_path + "logs/log_file.txt", 'a')

# CEAL data definition
X_train, y_train = load_train_data()
labeled_index = np.arange(0, nb_labeled)
unlabeled_index = np.arange(nb_labeled, len(X_train))

# (1) Initialize model
from unet import get_unet, unet
model = get_unet(dropout=True)
# model.load_weights(initial_weights_path)

# from deeplabv3 import Deeplabv3
# from keras.optimizers import Adam
# from unet import dice_coef, dice_coef_loss
# model = Deeplabv3(input_shape=(224,224,1), classes=10, weights=None)
# model.compile(optimizer=Adam(lr=1e-5), loss=dice_coef_loss, metrics=[dice_coef])

if initial_train:
    print("INITIAL TRAINING")
    model_checkpoint = ModelCheckpoint(initial_weights_path,
                                       monitor='loss',
                                       save_best_only=True)

    if apply_augmentation:
Example #8
0
def train(epochs):
    print('-' * 30)
    print('Creating and compiling model...')
    print('-' * 30)
    model = unet.get_unet()
    model.load_weights("deeplearning/new.hdf5", by_name=True)
    lr = 1e-5
    model.compile(optimizer=Adam(lr=lr),
                  loss=dice_coef_loss,
                  metrics=[dice_coef])
    print('-' * 30)
    print('Loading and preprocessing train data...')
    print('-' * 30)
    imgs_train = np.load("prepdata/npyarrays/imgs_train.npy")
    imgs_mask_train = np.load("prepdata/npyarrays/imgs_mask_train.npy")

    imgs_valid = np.load("prepdata/npyarrays/imgs_valid.npy")
    imgs_mask_valid = np.load("prepdata/npyarrays/imgs_mask_valid.npy")

    imgs_train = imgs_train.astype('float32')
    imgs_mask_train = imgs_mask_train.astype('float32')
    imgs_valid = imgs_valid.astype('float32')
    imgs_mask_valid = imgs_mask_valid.astype('float32')

    imgs_train -= 28.991758347
    imgs_train /= 46.875888824

    imgs_valid -= 28.991758347
    imgs_valid /= 46.875888824

    imgs_mask_train /= 255.
    imgs_mask_valid /= 255.

    print('-' * 30)
    print('Evaluating transfer learning.')
    print('-' * 30)

    valloss = model.evaluate(x=imgs_valid,
                             y=imgs_mask_valid,
                             batch_size=10,
                             verbose=1)
    with open("runs/history.txt", "a") as fout:
        fout.write("valid\t%d\t%.4f\n" % (0, valloss[1]))
    filepath = "runs/weights.hdf5"
    checkpoint = ModelCheckpoint(filepath,
                                 save_best_only=True,
                                 monitor="val_dice_coef",
                                 mode="max")

    history = LossHistory()

    print('-' * 30)
    print('Fitting model...')
    print('-' * 30)
    model.fit(x=imgs_train,
              y=imgs_mask_train,
              validation_data=(imgs_valid, imgs_mask_valid),
              batch_size=10,
              epochs=epochs,
              verbose=1,
              callbacks=[history, checkpoint])
Example #9
0
def compute_train_sets(X_train, y_train, labeled_index, unlabeled_index, weights, iteration):
    """
    Performs the Cost-Effective Active Learning labeling step, giving the available training data for each iteration.
    :param X_train: Overall training data.
    :param y_train: Overall training labels. Including the unlabeled samples to simulate the oracle annotations.
    :param labeled_index: Index of labeled samples.
    :param unlabeled_index: Index of unlabeled samples.
    :param weights: pre-trained unet weights.
    :param iteration: Currently CEAL iteration.

    :return: X_labeled_train: Update of labeled training data, adding the manual and pseudo annotations.
    :return: y_labeled_train: Update of labeled training labels, adding the manual and pseudo annotations.
    :return: labeled_index: Update of labeled index, adding the manual annotations.
    :return: unlabeled_index: Update of labeled index, removing the manual annotations.

    """
    print("\nActive iteration " + str(iteration))
    print("-" * 50 + "\n")

    # load models
    modelUncertain = get_unet(dropout=True)
    modelUncertain.load_weights(weights)
    modelPredictions = get_unet(dropout=False)
    modelPredictions.load_weights(weights)

    # modelUncertain = Deeplabv3(input_shape=(1,224,224), classes=10)
    # modelUncertain.load_weights(weights)
    # modelPredictions = Deeplabv3(input_shape=(1,224,224), classes=10)
    # modelPredictions.load_weights(weights)

    # predictions
    print("Computing log predictions ...\n")
    predictions = predict(X_train[unlabeled_index], modelPredictions)

    uncertain = np.zeros(len(unlabeled_index))
    accuracy = np.zeros(len(unlabeled_index))

    print("Computing train sets ...")
    for index in range(0, len(unlabeled_index)):

        if index % 100 == 0:
            print("completed: " + str(index) + "/" + str(len(unlabeled_index)))

        sample = X_train[unlabeled_index[index]].reshape([1, 3, img_rows, img_cols])

        sample_prediction = cv2.threshold(predictions[index], 0.5, 1, cv2.THRESH_BINARY)[1].astype('uint8')

        accuracy[index] = compute_dice_coef(y_train[unlabeled_index[index]][0], sample_prediction)
        uncertain[index] = compute_uncertain(sample, sample_prediction, modelUncertain)

    np.save(global_path + "logs/uncertain" + str(iteration), uncertain)
    np.save(global_path + "logs/accuracy" + str(iteration), accuracy)

    oracle_index = get_oracle_index(uncertain, nb_no_detections, nb_random, nb_most_uncertain,
                                    most_uncertain_rate)

    oracle_rank = unlabeled_index[oracle_index]

    np.save(global_path + "ranks/oracle" + str(iteration), oracle_rank)
    np.save(global_path + "ranks/oraclelogs" + str(iteration), oracle_index)

    labeled_index = np.concatenate((labeled_index, oracle_rank))

    if (iteration >= pseudo_epoch):

        pseudo_index = get_pseudo_index(uncertain, nb_pseudo_initial + (pseudo_rate * (iteration - pseudo_epoch)))
        pseudo_rank = unlabeled_index[pseudo_index]

        np.save(global_path + "ranks/pseudo" + str(iteration), pseudo_rank)
        np.save(global_path + "ranks/pseudologs" + str(iteration), pseudo_index)

        X_labeled_train = np.concatenate((X_train[labeled_index], X_train[pseudo_index]))
        y_labeled_train = np.concatenate((y_train[labeled_index], predictions[pseudo_index]))

    else:
        X_labeled_train = np.concatenate((X_train[labeled_index])).reshape([len(labeled_index), 3, img_rows, img_cols])
        y_labeled_train = np.concatenate((y_train[labeled_index])).reshape([len(labeled_index), 1, img_rows, img_cols])

    unlabeled_index = np.delete(unlabeled_index, oracle_index, 0)

    return X_labeled_train, y_labeled_train, labeled_index, unlabeled_index
Example #10
0
def gen_black_image(path):
    img = np.zeros([512, 512, 1], dtype=np.uint8)
    img.fill(0)
    cv2.imwrite(path, img)
    return path


"""
################ The main code flow to call approriate functions ##########################
"""
if len(sys.argv) < 2:
    sys.exit(0)
cmd = sys.argv[1]
if cmd == "1":
    binary_model = get_unet(n_filters=16, dropout=0.05, batchnorm=True)
    modelname = "unet"
elif cmd == "2":
    binary_model = get_resnet(f=16, bn_axis=3, classes=1)
    modelname = "resnet"
elif cmd == "3":
    binary_model = get_segnet()
    modelname = "segnet"
else:
    binary_model = get_deeplab()
    modelname = "deeplab"
X_train, Y_train = get_class_for_generator("Train")
X_train, X_test, y_train, y_test = train_test_split(X_train,
                                                    Y_train,
                                                    test_size=0.30)
X_train = np.array(X_train)
Example #11
0
d_height = data.shape[1]
d_width = data.shape[2]

# partition the data into training and testing splits using 80% of
# the data for training and the remaining 20% for testing

trainX, testX, trainY, testY = train_test_split(data,
                                                mask4,
                                                train_size=0.004,
                                                random_state=42)

# initialize the model using a sigmoid activation as the final layer

print("[INFO] compiling model...")
input_data = Input((d_height, d_width, 1), name='data')
model = get_unet(input_data, n_filters=16, dropout=0.05, batchnorm=True)

# initialize the optimizer
opt = Adam(lr=config.learn_rate, decay=config.learn_rate / config.epochs)
#decay = config.learn_rate/config.epochs
#opt = SGD(lr=config.learn_rate)
#opt = RMSprop(lr=config.learn_rate)

model.compile(loss="binary_crossentropy", optimizer=opt, metrics=["accuracy"])

#print("[INFO] summary of model...")
#print(model.summary())

callbacks = [
    WandbCallback(),
    EarlyStopping(patience=50, verbose=1, monitor='val_loss'),
Example #12
0
import os
import numpy as np
import tensorflow as tf
from glob import glob
from PIL import Image
from unet import get_unet
from utils import *

PATCH_SIZE = 128
LOG_NUM = 1

# load unet model
ckpt_dir = 'ckpt_{}'.format(LOG_NUM)
latest_ckpt = tf.train.latest_checkpoint(ckpt_dir)

model = get_unet(PATCH_SIZE)
model.load_weights(latest_ckpt)

# set paths for prediction
test_data_dir = '../data/test_data'
prediction_dir = os.path.join(test_data_dir, 'predictions')
os.makedirs(prediction_dir, exist_ok=True)
test_image_files = glob(os.path.join(test_data_dir, 'images/*'))

# save predictions
for image_file in test_image_files:
    file_name = os.path.basename(image_file)
    image = np.array(Image.open(image_file)) / 255  # rescale the image
    old_size = image.shape[0:2]
    image_block_patches = make_block_patches(image, PATCH_SIZE)
    block_coords = list(image_block_patches.keys())
Example #13
0
def preprocess(imgs):
    #imgs_p = np.ndarray((imgs.shape[0], img_rows, img_cols), dtype=np.uint8)
    #imgs_p = np.ndarray((imgs.shape[0], size), dtype=np.uint8)
    imgs_p = imgs
    #for i in range(imgs.shape[0]):
    # imgs_p[i] = resize(imgs[i], (img_cols, img_rows), preserve_range=True)
    #    imgs_p[i] = resize(imgs[i], size, preserve_range=True)

    imgs_p = imgs_p[..., np.newaxis]
    return imgs_p


import unet
import random
model = unet.get_unet()
import GS_split

train_x_path = '/mnt/ibrixfs01-MRI/analysis/washen/temp/train/T1T2/'
train_y_path = '/mnt/ibrixfs01-MRI/analysis/washen/temp/train/Segmentation/'


def generate_data(train_line, batch_size):
    """Replaces Keras' native ImageDataGenerator."""
    i = 0
    while True:
        image_batch = []
        label_batch = []

        for b in range(batch_size):
            if i == len(train_line):
Example #14
0
    #预测图片和标签标准化
    X_test = imgs.astype('float32')/255
    print('X_test original shape: '+str(X_test.shape))
    Y_test = label.astype('float32')/255


    #对预测图片进行裁剪按行优先,裁剪成(144,48,48)
    list = []
    for i in range(resize_height//dx):
        for j in range(resize_width//dx):
            list.append(X_test[i*dx:(i+1)*dx, j*dx:(j+1)*dx])
    X_test = np.array(list)[:,np.newaxis,...] #增加一维变成(144,1,48,48)
    print('input shape: '+str(X_test.shape))

    #加载模型和权重并预测
    model = get_unet(1,dx,dx)
    model.load_weights('best_weights.h5')
    Y_pred = model.predict(X_test)
    print('predict shape: '+str(Y_pred.shape)) #预测结果的shape是(Npatches,patch_height*patch_width,2)

    #把预测输出的numpy数组拼接还原再显示
    Y_pred = Y_pred[..., 0]  #二分类提取出分割前景 现在Y_pred的shape是(144,2304) 且这个144是按照行优先来拼接的

    #对预测结果进行拼接,将(144,2304)拼接成(576,576)
    t=0
    image = np.zeros((resize_height,resize_width))
    for i in range(resize_height//dx):
        for j in range(resize_width//dx):
            temp = Y_pred[t].reshape(dx,dx)
            image[i*dx:(i+1)*dx, j*dx:(j+1)*dx] = temp
            t = t+1
def main():
    args = get_args()
    model = unet.get_unet()

    if args.kt_t_mask_format:
        print("\nUSING KT/T MASK FORMAT\n")

    if args.existing_weights is not None and os.path.isfile(
            args.existing_weights):
        print("\nResuming training from {} - Epoch {} to {}".format(
            args.existing_weights, args.resume_from_epoch, args.n_epochs))
        model.load_weights(args.existing_weights, by_name=True)

    train_meta = pd.read_csv(args.train_info_file)
    # Pull off 20% for test set
    train_meta = train_meta.sort_values(by="pid")
    pids = np.asarray(train_meta["pid"].unique())
    np.random.shuffle(pids)
    train_set_size = int(round(len(pids) * 0.8))
    train_pids, test_pids = pids[:train_set_size], pids[train_set_size:]
    train_train_meta = train_meta[train_meta["pid"].isin(train_pids)]
    train_test_meta = train_meta[train_meta["pid"].isin(test_pids)]

    checkpoint_name = (
        "unet_KiTS{}__{}".format("" if not args.kt_t_mask_format else "~KT-T",
                                 args.orientation) +
        "_e{epoch:02d}-l{dice_coef_loss:.2f}-vl{val_dice_coef_loss:.2f}.h5")
    callbacks = [
        keras.callbacks.TensorBoard(log_dir="./logs",
                                    histogram_freq=0,
                                    write_graph=True,
                                    write_images=False)
    ]
    if not args.save_last_only:
        callbacks.append(
            keras.callbacks.ModelCheckpoint(
                os.path.join("./checkpoints", checkpoint_name),
                verbose=0,
                save_weights_only=True,
            ))

    print("Training data head:\n{}".format(train_train_meta.head()))

    print("\nVal data head:\n{}".format(train_test_meta.head()))

    if args.orientation.lower()[0] == "a":
        print(
            "\nAXIAL orientation using slice-oriented generator for speed.\n")
        # Axial model uses the faster slice-oriented generator:
        train_gen = NumpyMaskedAxialSliceGenerator(
            train_train_meta,
            base_dir=args.base_dir,
            img_dir=args.image_dir,
            mask_dir=args.mask_dir,
            batch_size=args.batch_size,
            id_col_name="pid",
            slc_col_name="slice",
            class_col_name="has_either",
            kt_t_mask_format=args.kt_t_mask_format,
        )

        test_gen = NumpyMaskedAxialSliceGenerator(
            train_test_meta,
            base_dir=args.base_dir,
            img_dir=args.image_dir,
            mask_dir=args.mask_dir,
            batch_size=args.batch_size,
            id_col_name="pid",
            slc_col_name="slice",
            class_col_name="has_either",
            kt_t_mask_format=args.kt_t_mask_format,
        )

    else:
        train_gen = NumpyMaskedVolumeGenerator(
            train_train_meta,
            base_dir=args.base_dir,
            img_dir=args.image_dir,
            mask_dir=args.mask_dir,
            batch_size=args.batch_size,
            slices_per_sample=1,
            orientation=args.orientation,
            id_col_name="pid",
            kt_t_mask_format=args.kt_t_mask_format,
        )

        test_gen = NumpyMaskedVolumeGenerator(
            train_test_meta,
            base_dir=args.base_dir,
            img_dir=args.image_dir,
            mask_dir=args.mask_dir,
            batch_size=args.batch_size,
            slices_per_sample=1,
            orientation=args.orientation,
            id_col_name="pid",
            kt_t_mask_format=args.kt_t_mask_format,
        )

    model.fit_generator(
        train_gen,
        steps_per_epoch=len(train_gen) if args.steps is None else args.steps,
        nb_epoch=args.n_epochs,
        validation_data=test_gen,
        validation_steps=100,
        callbacks=callbacks,
        initial_epoch=args.resume_from_epoch,
    )
    if args.save_last_only:
        model.save_weights(os.path.join(".", "weights.h5"))
def train(batch):

    #===========================================
    dv.section_print('Calculating Image Lists...')

    imgs_list_trn = [
        np.load(fs.img_list(p, 'ED_ES')) for p in range(cg.num_partitions)
    ]
    segs_list_trn = [
        np.load(fs.seg_list(p, 'ED_ES')) for p in range(cg.num_partitions)
    ]

    if batch is None:
        print('No batch was provided: training on all images.')
        batch = 'all'

        imgs_list_trn = np.concatenate(imgs_list_trn)
        segs_list_trn = np.concatenate(segs_list_trn)

        imgs_list_tst = imgs_list_trn
        segs_list_tst = segs_list_trn

    else:
        imgs_list_tst = imgs_list_trn.pop(batch)
        segs_list_tst = segs_list_trn.pop(batch)

        imgs_list_trn = np.concatenate(imgs_list_trn)
        segs_list_trn = np.concatenate(segs_list_trn)

    #===========================================
    dv.section_print('Creating and compiling model...')

    shape = cg.dim + (1, )
    # cg.batch_size = 1
    model_inputs = [Input(shape)]

    _, _, output = unet.get_unet(
        cg.dim,
        cg.num_classes,
        cg.conv_depth,
        0,  # Stage
        dimension=len(cg.dim),
        unet_depth=cg.unet_depth,
    )(model_inputs[0])

    model_outputs = [output]
    with tf.device("/cpu:0"):
        models = Model(
            inputs=model_inputs,
            outputs=model_outputs,
        )

#    # https://github.com/avolkov1/keras_experiments/blob/master/examples/mnist/mnist_tfrecord_mgpu.py
#    model = make_parallel(model, get_available_gpus())

    print(cg.batch_size)
    # cbk = MyCbk(models,batch)
    # saved_model="/media/McVeighLab/projects/SNitesh/datasetsall-classes-all-phases-1.5/model_batch_all.hdf5"
    # if(os.path.isfile(saved_model)):
    #   models.load_weights(fs.model(batch), by_name=True)

    # model = multi_gpu_model(models, gpus=2)
    model = models
    opt = Adam(lr=1e-3)
    model.compile(optimizer=opt, loss='categorical_crossentropy')
    #===========================================
    dv.section_print('Fitting model...')

    # callbacks = [
    #              LearningRateScheduler(ut.step_decay),cbk
    #             ]
    callbacks = [
        ModelCheckpoint(
            fs.model(batch),
            monitor='val_loss',
            save_best_only=True,
        ),
        LearningRateScheduler(ut.step_decay),
    ]
    # Training Generator
    datagen = ImageDataGenerator(
        3,  # Dimension of input image
        translation_range=cg.
        xy_range,  # randomly shift images vertically (fraction of total height)
        #        rotation_range = 0.0,  # randomly rotate images in the range (degrees, 0 to 180)
        scale_range=cg.zm_range,
        flip=cg.flip,
    )

    datagen_flow = datagen.flow(
        imgs_list_trn,
        segs_list_trn,
        batch_size=cg.batch_size,
        input_adapter=ut.in_adapt,
        output_adapter=ut.out_adapt,
        shape=cg.dim,
        input_channels=1,
        output_channels=cg.num_classes,
        augment=True,
    )

    valgen = ImageDataGenerator(
        3,  # Dimension of input image
    )
    print(cg.dim)

    valgen_flow = valgen.flow(
        imgs_list_tst,
        segs_list_tst,
        batch_size=cg.batch_size,
        input_adapter=ut.in_adapt,
        output_adapter=ut.out_adapt,
        shape=cg.dim,
        input_channels=1,
        output_channels=cg.num_classes,
    )
    # file_write=open("model_description","w")
    # print_summary(model, line_length=None, positions=[.33, .75, .87, 1.], print_fn=None)
    # print(model.layers)
    # print(model.inputs)
    # print(model.outputs)
    # print(model.summary(),file=file_write)
    # Fit the model on the batches generated by datagen.flow().
    model.fit_generator(
        datagen_flow,
        steps_per_epoch=imgs_list_trn.shape[0] // (cg.batch_size),
        epochs=cg.epochs,
        workers=1,
        validation_data=valgen_flow,
        validation_steps=imgs_list_tst.shape[0] // (cg.batch_size),
        callbacks=callbacks,
        verbose=1,
    )
    print_summary(model, line_length=None, positions=None, print_fn=None)
Example #17
0
def train(epochs):
    print('-' * 30)
    print('Creating and compiling model...')
    print('-' * 30)
    model = unet.get_unet(dropout=True)
    if transfer_learning:
        model.load_weights("transfer_learning/new.hdf5", by_name=True)
    if inner:
        model = unet.get_unet_inner(dropout=True)
    print('-' * 30)
    print('Loading and preprocessing train data...')
    print('-' * 30)

    #    imgs_train,imgs_mask_train = load_train_data()
    #
    #    imgs_valid,imgs_mask_valid = load_valid_data()
    #
    #    ind = np.where(np.sum(imgs_mask_train,axis=(1,2,3)))
    #
    #    # Random sample negative samples
    #    ran = np.setdiff1d(np.array(range(0,574)),ind[0])
    #    choice = np.random.choice(ran,50)
    #    final_ind = np.array(np.concatenate((ind[0], choice)))
    #
    #    imgs_mask_train=imgs_mask_train[final_ind]
    #    imgs_train=imgs_train[final_ind]
    #
    #    ind = np.where(np.sum(imgs_mask_valid,axis=(1,2,3)))
    #    # Random sample negative samples
    #    ran = np.setdiff1d(np.array(range(0,60)),ind[0])
    #    choice = np.random.choice(ran,10)
    #    final_ind = np.array(np.concatenate((ind[0], choice)))
    #
    #    imgs_mask_valid = imgs_mask_valid[final_ind]
    #    imgs_valid = imgs_valid[final_ind]
    X_train, y_train, X_valid, y_valid, X_test, y_test = load_data(
        downsample=False)
    y_train_has_mask = (y_train.reshape(y_train.shape[0], -1).max(axis=1) >
                        0) * 1
    y_valid_has_mask = (y_valid.reshape(y_valid.shape[0], -1).max(axis=1) >
                        0) * 1
    print('-' * 30)
    print('Evaluating transfer learning.')
    print('-' * 30)
    if not inner:
        valloss = model.evaluate(x=X_valid,
                                 y=y_valid,
                                 batch_size=10,
                                 verbose=0)
        with open("runs/history.txt", "a") as fout:
            fout.write("valid\t%d\t%.4f\n" % (0, valloss[1]))
        filepath = "runs/weights_%d_%d.hdf5" % (img_rows, img_cols)
        checkpoint = ModelCheckpoint(filepath,
                                     save_best_only=True,
                                     monitor="val_dice_coef",
                                     mode="max")
    else:
        filepath = "runs/weights_%d_%d_inner.hdf5" % (img_rows, img_cols)
        checkpoint = ModelCheckpoint(filepath,
                                     save_best_only=True,
                                     monitor="val_main_output_dice_coef",
                                     mode="max")

    history = LossHistory()

    print('-' * 30)
    print('Fitting model...')
    print('-' * 30)
    if apply_augmentation:
        model.fit_generator(data_generator(X_train, y_train),
                            validation_data=(X_valid, y_valid),
                            steps_per_epoch=5 * X_train.shape[0] // batch_size,
                            shuffle=True,
                            epochs=epochs,
                            verbose=1,
                            callbacks=[history, checkpoint])
    elif inner:
        model.fit(X_train, [y_train, y_train_has_mask],
                  validation_data=(X_valid, [y_valid, y_valid_has_mask]),
                  batch_size=batch_size,
                  epochs=epochs,
                  verbose=1,
                  shuffle=True,
                  callbacks=[checkpoint])
    else:
        model.fit(X_train,
                  y_train,
                  batch_size=batch_size,
                  validation_data=(X_valid, y_valid),
                  shuffle=True,
                  epochs=epochs,
                  verbose=1,
                  callbacks=[history, checkpoint])

    return model, filepath
Example #18
0
    # Split the original training data to get training and validation set
    # to get X_train, X_val, y_train, y_val
    # YOUR CODE HERE
    X_train, X_val, y_train, y_val = train_test_split(X, Y, test_size=0.20)

    # Convert to numpy arrays
    # YOUR CODE HERE
    X_train = np.array(X_train)
    X_val = np.array(X_val)
    y_train = np.array(y_train)
    y_val = np.array(y_val)

    print(X_train.shape, y_train.shape, X_val.shape, y_val.shape)

    model = get_unet()
    batch_size = 8
    num_epochs = 50
    # Compile the model
    model.compile(loss=dice_coef_loss, optimizer=Adam(lr=0.0055), metrics=[dice_coef])

    # Create generator objects for training and validation
    # YOUR CODE HERE
    # training_batch_generator = Surface_Generator(...)
    # validation_batch_generator = Surface_Generator(...)

    num_training_samples = len(X_train)
    num_validation_samples = len(X_val)
    training_batch_generator = Surface_Generator(X_train, y_train, batch_size)
    validation_batch_generator = Surface_Generator(X_val, y_val, batch_size)
Example #19
0
    '''
    input: true label; predicted label; smooth
    output: dice coefficient
    '''
    numerator = 2. * np.sum(np.multiply(y_true, y_pred))
    denominator = np.sum(y_pred + y_true)
    return (numerator + smooth) / (denominator + smooth)


def f(element, thresh):
    return 1 if element >= thresh else 0


f = np.vectorize(f)

model0 = unet.get_unet()
model0.load_weights('weights_0.h5')

model1 = unet.get_unet()
model1.load_weights('weights_1.h5')

model2 = unet.get_unet()
model2.load_weights('weights_2.h5')

model3 = unet.get_unet()
model3.load_weights('weights_3.h5')

model4 = unet.get_unet()
model4.load_weights('weights_4.h5')

NEW = open('eva_cv1.txt', 'w')
Example #20
0
from __future__ import print_function

from train import *

import unet

K.set_image_dim_ordering('th')  # Theano dimension ordering in this code

history, model_weights = train(50)

model = unet.get_unet(dropout=True)

if inner:
    model = unet.get_unet_inner(dropout=True)

model.load_weights(model_weights)

plot_loss_history(history)

test, uncertainty = evaluate_model(model)

Example #21
0
## col feature
list_feature_common = []
for the_assay in args.target:
    for i in np.arange(1, 52):
        the_cell = 'C' + '%02d' % i
        the_id = the_cell + the_assay
        if (the_cell not in test_all) and os.path.isfile(path1 + the_id +
                                                         '.bigwig'):
            list_feature_common.append(the_id)

# model
num_class = len(args.target)
num_channel = 4 + len(list_feature_train) * 2 + len(list_feature_common) * 2
name_model = 'weights_1.h5'
model = unet.get_unet(the_lr=1e-4,
                      num_class=num_class,
                      num_channel=num_channel,
                      size=size)
#model.load_weights(name_model)
model.summary()

# load pyBigWig
dict_label_train = {}
for the_id in list_label_train:
    dict_label_train[the_id] = pyBigWig.open(path1 + 'gold_anchored_' +
                                             the_id + '.bigwig')
dict_label_vali = {}
for the_id in list_label_vali:
    dict_label_vali[the_id] = pyBigWig.open(path1 + 'gold_anchored_' + the_id +
                                            '.bigwig')
#dict_label_test={}
#for the_id in list_label_test:
Example #22
0
    parser.add_argument('--s', metavar='S', type=int, 
                        help='Skip first S samples')

    args = parser.parse_args()

    # creates a directories if there isn't any
    try:
        os.mkdir(os.path.join(args.odir))
        os.mkdir(os.path.join(args.pdir))
    except:
        pass
    
    SIDE = 224

    # creates model and read weights from --mpath 
    model = unet.get_unet(SIDE, SIDE)
    model.load_weights(args.mpath)

    # reads available paths from idir
    paths = glob(os.path.join(args.idir, '*'))

    if args.s:
        paths = paths[args.s:]
    if args.n:
        paths = paths[:args.n]

    BATCH_SIZE = 32
    if args.batch_size:
        BATCH_SIZE = args.batch_size

    j = 1
Example #23
0
    list_feature_test.append(cell_test + the_assay)

## col feature
list_feature_common = []
for the_assay in args.target:
    for i in np.arange(1, 52):
        the_cell = 'C' + '%02d' % i
        the_id = the_cell + the_assay
        if (the_cell not in exclude_all) and os.path.isfile(path1 + the_id +
                                                            '.bigwig'):
            list_feature_common.append(the_id)

# model
num_class = len(args.target)
num_channel = 4 + len(list_feature_test) * 2 + len(list_feature_common) * 2
model1 = unet.get_unet(num_class=num_class, num_channel=num_channel, size=size)
model1.load_weights(args.model)
#model1.summary()

# load pyBigWig
#dict_label_train={}
#for the_id in list_label_train:
#    dict_label_train[the_id]=pyBigWig.open(path1 + 'gold_anchored_' + the_id + '.bigwig')
#dict_label_vali={}
#for the_id in list_label_vali:
#    dict_label_vali[the_id]=pyBigWig.open(path1 + 'gold_anchored_' + the_id + '.bigwig')
#dict_label_test={}
#for the_id in list_label_test:
#    dict_label_test[the_id]=pyBigWig.open(path1 + 'gold_' + the_id + '.bigwig')
dict_dna = {}
for the_id in list_dna:
Example #24
0
orig_imgs = recompone_overlap(patches_embedding, full_img_height,
                              full_img_width, stride_size[0],
                              stride_size[1]) * 255
gtruth_masks = recompone_overlap(patches_embedding_gt, full_img_height,
                                 full_img_width, stride_size[0],
                                 stride_size[1]) * 255

#================ Run the prediction of the patches ==================================
best_last = config.get('testing settings', 'best_last')

#Load the saved model
if u_net:
    model = get_unet(1,
                     batch_size,
                     patch_size[0],
                     patch_size[1],
                     with_activation=True)  #the U-net model
else:
    model = UResNet(input_shape=(1, patch_size[0], patch_size[1]),
                    with_activation=True)

thresholds = np.linspace(0, 1, 200).tolist()
model.compile(
    optimizer='sgd',
    loss=weighted_cross_entropy(9),
    metrics=[
        BinaryAccuracy(),
        TruePositives(thresholds=thresholds),
        FalsePositives(thresholds=thresholds),
        TrueNegatives(thresholds=thresholds),
Example #25
0
num_pool = 8
name_model = 'weights_' + sys.argv[1] + '.h5'

num_augtest = 5

################################

scale_pool = 2**num_pool
scale = 10**reso_digits
positives_all = np.zeros(scale + 1, dtype=np.int64)
negatives_all = np.zeros(scale + 1, dtype=np.int64)
auc_auprc = open('auc_auprc_' + sys.argv[1] + '.txt', 'w')
eva = open('eva_global_' + sys.argv[1] + '.txt', 'w')

if __name__ == '__main__':
    model0 = unet.get_unet()
    model0.load_weights(name_model)

    dice_all = np.empty([0])
    auc_all = np.empty([0])
    auprc_all = np.empty([0])

    path1 = '/ssd/hongyang/2018/physionet/data/multitaper96/'
    new_path = '/ssd/hongyang/2018/physionet/data/new_arousal/'
    all_test_files = open('whole_test.dat', 'r')
    for filename in all_test_files:
        filename = filename.strip()
        tmp = filename.split('/')
        the_id = tmp[-1]
        print(the_id)
Example #26
0
patches_embedding, patches_embedding_gt = session.run(
    [patches_embedding, patches_embedding_gt])

visualize_samples(session, experiment_path, patches_imgs_samples,
                  patches_gts_samples, patch_size)

#============ Load the data and normalize =======================================
test_dataset, train_dataset = load_trainset(train_path, batch_size, N_subimgs)

if finetune:
    test_finetune, train_finetune = load_trainset(finetune_path, batch_size,
                                                  finetune_subimgs)

#=========== Construct and save the model arcitecture ===========================
if u_net:
    model = get_unet(1, batch_size, patch_size[0],
                     patch_size[1])  #the U-net model
else:
    model = UResNet(input_shape=(1, patch_size[0], patch_size[1]),
                    classes=2,
                    n_upsample_blocks=4)

model.compile(
    optimizer='sgd',
    loss=weighted_cross_entropy(2),
    # loss = 'categorical_crossentropy',
    metrics=[accuracy])

print("Check: final output of the network:")
print(model.output_shape)

plot(model, to_file=experiment_path + '/' + name_experiment +