plt.subplot(1, 3, 1)
plt.imshow(d[0,0], cmap="gray")
plt.subplot(1, 3, 2)
d1=elastic_transform_2d(d[0,0], 550., 20.)
plt.imshow(d1, cmap="gray")
plt.subplot(1, 3, 3)
plt.imshow(d[0,0]-d1)
plt.show()
plt.close()'''

data_gen_validation = memmapGenerator_allInOne_segmentation_lossSampling(memmap_data, memmap_gt, BATCH_SIZE, validation_patients, mode="test", ignore=[40])
data_gen_validation = center_crop_generator(data_gen_validation, (PATCH_SIZE, PATCH_SIZE))
data_gen_validation = seg_channel_selection_generator(data_gen_validation, [2])
data_gen_validation = multi_threaded_generator(data_gen_validation, num_threads=4, num_cached=10)

net = build_UNet(20, BATCH_SIZE, num_output_classes=5, base_n_filters=16, input_dim=(PATCH_SIZE, PATCH_SIZE))
output_layer_for_loss = net["output_flattened"]
'''with open(os.path.join(results_dir, "%s_Params_ep30.pkl"%EXPERIMENT_NAME, 'r') as f:
    params = cPickle.load(f)
    lasagne.layers.set_all_param_values(output_layer_for_loss, params)
with open(os.path.join(results_dir, "%s_allLossesNAccur_ep30.pkl"%EXPERIMENT_NAME, 'r') as f:
    # [all_training_losses, all_training_accuracies, all_validation_losses, all_validation_accuracies, auc_all] = cPickle.load(f)
    [all_training_losses, all_training_accuracies, all_validation_losses, all_validation_accuracies] = cPickle.load(f)'''

n_batches_per_epoch = 500
# n_batches_per_epoch = np.floor(n_training_samples/float(BATCH_SIZE))
n_test_batches = 50
# n_test_batches = np.floor(n_val_samples/float(BATCH_SIZE))

x_sym = T.tensor4()
seg_sym = T.ivector()
plt.imshow(d[0,0], cmap="gray")
plt.subplot(1, 3, 2)
d1=elastic_transform_2d(d[0,0], 550., 20.)
plt.imshow(d1, cmap="gray")
plt.subplot(1, 3, 3)
plt.imshow(d[0,0]-d1)
plt.show()
plt.close()'''

data_gen_validation = SegmentationBatchGeneratorDavid(all_patients, BATCH_SIZE, validation_patients, PATCH_SIZE=INPUT_PATCH_SIZE, mode="test", ignore=[81], losses=None, num_batches=None, seed=None)
data_gen_validation = seg_channel_selection_generator(data_gen_validation, [2])
data_gen_validation = center_crop_seg_generator(data_gen_validation, OUTPUT_PATCH_SIZE)
data_gen_validation = Multithreaded_Generator(data_gen_validation, 2, 10)
data_gen_validation._start()

net = build_UNet(25, BATCH_SIZE, num_output_classes=num_classes, base_n_filters=16, input_dim=INPUT_PATCH_SIZE, pad="valid")
output_layer_for_loss = net["output_flattened"]

n_batches_per_epoch = 300
# n_batches_per_epoch = np.floor(n_training_samples/float(BATCH_SIZE))
n_test_batches = 30
# n_test_batches = np.floor(n_val_samples/float(BATCH_SIZE))

x_sym = T.tensor4()
seg_sym = T.ivector()
w_sym = T.vector()

# add some weight decay
l2_loss = lasagne.regularization.regularize_network_params(output_layer_for_loss, lasagne.regularization.l2) * 1e-4

# the distinction between prediction_train and test is important only if we enable dropout
Exemple #3
0
plt.subplot(1, 3, 2)
d1=elastic_transform_2d(d[0,0], 550., 20.)
plt.imshow(d1, cmap="gray")
plt.subplot(1, 3, 3)
plt.imshow(d[0,0]-d1)
plt.show()
plt.close()'''
data_gen_train = multi_threaded_generator(data_gen_train, num_threads=8, num_cached=100)
_ = data_gen_train.next()

data_gen_validation = memmapGenerator_allInOne_segmentation(memmap_data, memmap_gt, BATCH_SIZE, validation_patients, mode="test", shuffle=False, ignore=[40])
data_gen_validation = center_crop_generator(data_gen_validation, (PATCH_SIZE, PATCH_SIZE))
data_gen_validation = seg_channel_selection_generator(data_gen_validation, [2])
data_gen_validation = multi_threaded_generator(data_gen_validation, num_threads=4, num_cached=10)

net = build_UNet(20, BATCH_SIZE, num_output_classes=5, base_n_filters=16, input_dim=(PATCH_SIZE, PATCH_SIZE))
output_layer_for_loss = net["output_flattened"]

with open("../../../results/segment_tumor_v0.1_Unet_Params_ep1.pkl", 'r') as f:
    params = cPickle.load(f)
    lasagne.layers.set_all_param_values(output_layer_for_loss, params)
with open("../../../results/segment_tumor_v0.1_Unet_allLossesNAccur_ep1.pkl", 'r') as f:
    [all_training_losses, all_training_accuracies, all_validation_losses, all_validation_accuracies] = cPickle.load(f)

n_batches_per_epoch = 2000
# n_batches_per_epoch = np.floor(n_training_samples/float(BATCH_SIZE))
n_test_batches = 200
# n_test_batches = np.floor(n_val_samples/float(BATCH_SIZE))

x_sym = T.tensor4()
seg_sym = T.ivector()
Exemple #4
0
    test_file = os.path.join(data_dir, params.test_file)

    train_filenames = []
    val_filenames = []
    test_filenames = []
    with open(train_file) as f:
        for l in f:
            train_filenames.append(os.path.join(data_dir, data_folder, l[:-1]))
    with open(val_file) as f:
        for l in f:
            val_filenames.append(os.path.join(data_dir, data_folder, l[:-1]))
    with open(test_file) as f:
        for l in f:
            test_filenames.append(os.path.join(data_dir, data_folder, l[:-1]))
    params.train_size = len(train_filenames)
    params.eval_size = len(val_filenames)
    params.test_size = len(test_filenames)

    train_dataset = tfrecord_iterator(True, train_filenames, params)
    eval_dataset = tfrecord_iterator(False, val_filenames, params)
    test_dataset = tfrecord_iterator(False, test_filenames, params)

    logging.info("Creating the model...")
    train_model = build_UNet('train', train_dataset, params)
    eval_model = build_UNet('eval', eval_dataset, params, reuse=True)
    test_model = build_UNet('eval', test_dataset, params, reuse=True)

    logging.info("Starting training for {} epochs".format(params.num_epochs))
    train_and_evaluate(train_model, eval_model, test_model, args.model_dir,
                       params, args.restore_from, ts_uuid)
Exemple #5
0
    ignore=[81],
    losses=None,
    num_batches=None,
    seed=None)
data_gen_validation = seg_channel_selection_generator(data_gen_validation, [2])
data_gen_validation = center_crop_seg_generator(data_gen_validation,
                                                (180, 164))
data_gen_validation = Multithreaded_Generator(data_gen_validation, 2, 30)
data_gen_validation._start()

manually_labeled_patients = np.concatenate(
    (range(4), np.arange(222, 232), np.arange(245, 251)))

net = build_UNet(20,
                 BATCH_SIZE,
                 num_output_classes=6,
                 base_n_filters=16,
                 input_dim=PATCH_SIZE,
                 pad="valid")
output_layer_for_loss = net["output_flattened"]

n_batches_per_epoch = 100
# n_batches_per_epoch = np.floor(n_training_samples/float(BATCH_SIZE))
n_test_batches = 10
# n_test_batches = np.floor(n_val_samples/float(BATCH_SIZE))

x_sym = T.tensor4()
seg_sym = T.ivector()
w_sym = T.vector()

# add some weight decay
l2_loss = lasagne.regularization.regularize_network_params(
d1=elastic_transform_2d(d[0,0], 550., 20.)
plt.imshow(d1, cmap="gray")
plt.subplot(1, 3, 3)
plt.imshow(d[0,0]-d1)
plt.show()
plt.close()'''

data_gen_validation = SegmentationBatchGeneratorBraTS2014(all_patients, BATCH_SIZE, validation_patients, PATCH_SIZE=PATCH_SIZE, mode="test", ignore=[81], losses=None, num_batches=None, seed=None)
data_gen_validation = seg_channel_selection_generator(data_gen_validation, [2])
data_gen_validation = center_crop_seg_generator(data_gen_validation, (180, 164))
data_gen_validation = Multithreaded_Generator(data_gen_validation, 2, 30)
data_gen_validation._start()

manually_labeled_patients = np.concatenate((range(4), np.arange(222, 232), np.arange(245, 251)))

net = build_UNet(20, BATCH_SIZE, num_output_classes=6, base_n_filters=16, input_dim=PATCH_SIZE, pad="valid")
output_layer_for_loss = net["output_flattened"]

n_batches_per_epoch = 100
# n_batches_per_epoch = np.floor(n_training_samples/float(BATCH_SIZE))
n_test_batches = 10
# n_test_batches = np.floor(n_val_samples/float(BATCH_SIZE))

x_sym = T.tensor4()
seg_sym = T.ivector()
w_sym = T.vector()

# add some weight decay
l2_loss = lasagne.regularization.regularize_network_params(output_layer_for_loss, lasagne.regularization.l2) * 1e-4

# the distinction between prediction_train and test is important only if we enable dropout