for i in range(n_images):
        seg_pred[i][0, :6] = np.array([0,1,2,3,4,5])
        seg[i,0,0,:6] = np.array([0,1,2,3,4,5])
        plt.subplot(n_images, 2, 2*i+1)
        plt.imshow(seg[i, 0])
        plt.subplot(n_images, 2, 2*i+2)
        plt.imshow(seg_pred[i])
    plt.savefig(os.path.join(results_dir, "some_segmentations_ep_%d.png"%epoch))


while epoch < n_epochs:
    data_gen_train = SegmentationBatchGeneratorDavid(all_patients, BATCH_SIZE, validation_patients, PATCH_SIZE=OUTPUT_PATCH_SIZE, mode="train", ignore=[81], losses=losses, num_batches=None, seed=None)
    data_gen_train = seg_channel_selection_generator(data_gen_train, [2])
    data_gen_train = rotation_generator(data_gen_train)
    data_gen_train = elastric_transform_generator(data_gen_train, 200., 14.)
    data_gen_train = pad_generator(data_gen_train, INPUT_PATCH_SIZE)
    data_gen_train = center_crop_seg_generator(data_gen_train, OUTPUT_PATCH_SIZE)
    data_gen_train = Multithreaded_Generator(data_gen_train, 8, 16)
    data_gen_train._start()
    print "epoch: ", epoch
    train_loss = 0
    train_acc_tmp = 0
    train_loss_tmp = 0
    batch_ctr = 0
    for data, seg, idx in data_gen_train:
        if batch_ctr != 0 and batch_ctr % int(np.floor(n_batches_per_epoch/n_feedbacks_per_epoch)) == 0:
            print "number of batches: ", batch_ctr, "/", n_batches_per_epoch
            print "training_loss since last update: ", train_loss_tmp/np.floor(n_batches_per_epoch/n_feedbacks_per_epoch), " train accuracy: ", train_acc_tmp/np.floor(n_batches_per_epoch/n_feedbacks_per_epoch)
            all_training_losses.append(train_loss_tmp/np.floor(n_batches_per_epoch/n_feedbacks_per_epoch))
            all_training_accuracies.append(train_acc_tmp/np.floor(n_batches_per_epoch/n_feedbacks_per_epoch))
            train_loss_tmp = 0
Esempio n. 2
0
    for i in range(n_images):
        seg_pred[i][0, :6] = np.array([0,1,2,3,4,5])
        seg[i,0,0,:6] = np.array([0,1,2,3,4,5])
        plt.subplot(n_images, 2, 2*i+1)
        plt.imshow(seg[i, 0])
        plt.subplot(n_images, 2, 2*i+2)
        plt.imshow(seg_pred[i])
    plt.savefig(os.path.join(results_dir, "some_segmentations_ep_%d.png"%epoch))


while epoch < 50:
    data_gen_train = SegmentationBatchGeneratorBraTS2014(all_patients, BATCH_SIZE, validation_patients, PATCH_SIZE=(180, 164), mode="train", ignore=[81], losses=losses, num_batches=1500, seed=None)
    data_gen_train = seg_channel_selection_generator(data_gen_train, [2])
    data_gen_train = rotation_generator(data_gen_train)
    data_gen_train = elastric_transform_generator(data_gen_train, 200., 14.)
    data_gen_train = pad_generator(data_gen_train, PATCH_SIZE)
    data_gen_train = center_crop_seg_generator(data_gen_train, (180, 164))
    data_gen_train = Multithreaded_Generator(data_gen_train, 8, 50)
    data_gen_train._start()
    print "epoch: ", epoch
    train_loss = 0
    train_acc_tmp = 0
    train_loss_tmp = 0
    batch_ctr = 0
    for data, seg, idx in data_gen_train:
        if batch_ctr != 0 and batch_ctr % int(np.floor(n_batches_per_epoch/n_feedbacks_per_epoch)) == 0:
            print "number of batches: ", batch_ctr, "/", n_batches_per_epoch
            print "training_loss since last update: ", train_loss_tmp/np.floor(n_batches_per_epoch/n_feedbacks_per_epoch), " train accuracy: ", train_acc_tmp/np.floor(n_batches_per_epoch/n_feedbacks_per_epoch)
            all_training_losses.append(train_loss_tmp/np.floor(n_batches_per_epoch/n_feedbacks_per_epoch))
            all_training_accuracies.append(train_acc_tmp/np.floor(n_batches_per_epoch/n_feedbacks_per_epoch))
            train_loss_tmp = 0
        seg_pred[i][0, :6] = np.array([0,1,2,3,4,5])
        seg[i,0,0,:6] = np.array([0,1,2,3,4,5])
        plt.subplot(n_images, 2, 2*i+1)
        plt.imshow(seg[i, 0])
        plt.subplot(n_images, 2, 2*i+2)
        plt.imshow(seg_pred[i])
    plt.savefig(os.path.join(results_dir, "some_segmentations_ep_%d.png"%epoch))


while epoch < 70:
    # hack to get only manually labeled training data
    data_gen_train = SegmentationBatchGeneratorBraTS2014(all_patients, BATCH_SIZE, manually_labeled_patients, PATCH_SIZE=(180, 164), mode="test", ignore=[81], losses=losses, num_batches=None, seed=None)
    data_gen_train = seg_channel_selection_generator(data_gen_train, [2])
    data_gen_train = rotation_generator(data_gen_train)
    data_gen_train = elastric_transform_generator(data_gen_train, 200., 14.)
    data_gen_train = pad_generator(data_gen_train, PATCH_SIZE)
    data_gen_train = center_crop_seg_generator(data_gen_train, (180, 164))
    data_gen_train = Multithreaded_Generator(data_gen_train, 8, 50)
    data_gen_train._start()
    print "epoch: ", epoch
    train_loss = 0
    train_acc_tmp = 0
    train_loss_tmp = 0
    batch_ctr = 0
    for data, seg, idx in data_gen_train:
        if batch_ctr != 0 and batch_ctr % int(np.floor(n_batches_per_epoch/n_feedbacks_per_epoch)) == 0:
            print "number of batches: ", batch_ctr, "/", n_batches_per_epoch
            print "training_loss since last update: ", train_loss_tmp/np.floor(n_batches_per_epoch/n_feedbacks_per_epoch), " train accuracy: ", train_acc_tmp/np.floor(n_batches_per_epoch/n_feedbacks_per_epoch)
            all_training_losses.append(train_loss_tmp/np.floor(n_batches_per_epoch/n_feedbacks_per_epoch))
            all_training_accuracies.append(train_acc_tmp/np.floor(n_batches_per_epoch/n_feedbacks_per_epoch))
            train_loss_tmp = 0