Beispiel #1
0
def predict_test(model_name, onlyEval=False):
    pred_dir = '/home/anneke/projects/uats/code/kits/output/predictions/'

    test_path = np.load(
        '/cache/suhita/data/kidney_anneke/preprocessed_labeled_test')
    val_len = len(os.listdir(test_path))

    img_arr = np.zeros((val_len * 2, DIM[0], DIM[1], DIM[2], 1), dtype=float)
    GT_arr = np.zeros((val_len * 2, DIM[0], DIM[1], DIM[2], 1), dtype=float)

    for i in os.listdir(test_path):
        img_arr[i * 2, :, :, :,
                0] = np.load(os.path.join(data_path, i, 'img_left.npy'))
        img_arr[i * 2 + 1, :, :, :,
                0] = np.load(os.path.join(data_path, i, 'img_right.npy'))
        GT_arr[i * 2, :, :, :,
               0] = np.load(os.path.join(data_path, i, 'segm_left.npy'))
        GT_arr[i * 2 + 1, :, :, :,
               0] = np.load(os.path.join(data_path, i, 'segm_right.npy'))

    print('load_weights')
    wm = weighted_model()
    model = wm.build_model(img_shape=(DIM[0], DIM[1], DIM[2]),
                           learning_rate=learning_rate)
    model.load_weights(model_name)

    if onlyEval:
        out_value = model.evaluate(img_arr, GT_arr, batch_size=1, verbose=0)
        print(out_value)
    else:
        out = model.predict(img_arr, batch_size=1, verbose=1)
        # np.save(os.path.join(out_dir, 'predicted.npy'), out)
        for i in os.listdir(test_path):
            segm = sitk.GetImageFromArray(out[i, :, :, :, 0])
            utils.makeDirectory(os.path.join(pred_dir, test_path))
            if i % 2 == 0:
                img = sitk.ReadImage(
                    os.path.join(data_path, i, 'img_left.nrrd'))
                segm.CopyInformation(img)
                sitk.WriteImage(img, os.path.join(pred_dir, i,
                                                  'img_left.nrrd'))
                sitk.WriteImage(segm,
                                os.path.join(pred_dir, i, 'segm_left.nrrd'))

            else:
                img = sitk.ReadImage(
                    os.path.join(data_path, i, 'img_right.nrrd'))
                segm.CopyInformation(img)
                sitk.WriteImage(img, os.path.join(pred_dir, i,
                                                  'img_right.nrrd'))
                sitk.WriteImage(segm,
                                os.path.join(pred_dir, i, 'segm_right.nrrd'))

        # single image evaluation
        for i in range(0, val_len * 2):
            out_eval = model.evaluate(img_arr[i:i + 1],
                                      GT_arr[i:i + 1],
                                      batch_size=1,
                                      verbose=0)
            print(i, out_eval)
def predict(model_name):
    data_path = '/data/suhita/temporal/kits/preprocessed_labeled_train'

    val_fold = np.load('/data/suhita/temporal/kits/Folds/val_fold' +
                       str(FOLD_NUM) + '.npy')
    num_val_data = len(val_fold)
    val_supervised_flag = np.ones(
        (num_val_data * 2, DIM[0], DIM[1], DIM[2], 1), dtype='int8')
    img_arr = np.zeros((val_fold.shape[0] * 2, DIM[0], DIM[1], DIM[2], 1),
                       dtype=float)
    GT_arr = np.zeros((val_fold.shape[0] * 2, DIM[0], DIM[1], DIM[2], 1),
                      dtype=float)

    for i in range(val_fold.shape[0]):
        img_arr[i * 2, :, :, :, 0] = np.load(
            os.path.join(data_path, val_fold[i], 'img_left.npy'))
        img_arr[i * 2 + 1, :, :, :, 0] = np.load(
            os.path.join(data_path, val_fold[i], 'img_right.npy'))
        GT_arr[i * 2, :, :, :, 0] = np.load(
            os.path.join(data_path, val_fold[i], 'segm_left.npy'))
        GT_arr[i * 2 + 1, :, :, :, 0] = np.load(
            os.path.join(data_path, val_fold[i], 'segm_right.npy'))

    print('load_weights')
    wm = weighted_model()
    model = wm.build_model(img_shape=(DIM[0], DIM[1], DIM[2]),
                           num_class=1,
                           use_dice_cl=False,
                           learning_rate=learning_rate,
                           gpu_id=None,
                           nb_gpus=None,
                           trained_model=model_name,
                           temp=1)
    model.load_weights(model_name)

    # single image evaluation
    # for i in range(0,val_fold.shape[0]*2):
    #   out_eval = model.evaluate([img_arr[i:i+1],GT_arr[i:i+1],val_supervised_flag[i:i+1]], GT_arr[i:i+1], batch_size=1, verbose=0)
    #  print(val_fold[int(i/2)],out_eval)

    out_eval = model.evaluate([img_arr, GT_arr, val_supervised_flag],
                              GT_arr,
                              batch_size=2,
                              verbose=0)
    print(out_eval)
Beispiel #3
0
def eval_for_supervised(
    model_dir,
    model_name,
):
    GT_dir = '/cache/suhita/data/kidney_anneke/preprocessed_labeled_test'
    img_arr, GT_arr = create_test_arrays(GT_dir)
    DIM = img_arr.shape
    from dataset_specific.kits import weighted_model
    wm = weighted_model()
    model = wm.build_model(img_shape=(DIM[1], DIM[2], DIM[3]),
                           learning_rate=learning_rate)
    model.load_weights(os.path.join(model_dir, NAME + '.h5'))
    prediction = model[0].predict([img_arr], batch_size=batch_size)
    csvName = os.path.join(model_dir, 'kits', 'evaluation',
                           model_name + '.csv')

    # weights epochs LR gpu_id dist orient prediction LRDecay earlyStop
    evaluateFiles_arr(prediction=prediction, GT_array=GT_arr, csvName=csvName)
Beispiel #4
0
def eval_for_uats_softmax(model_dir, model_name, batch_size=1):
    GT_dir = '/cache/suhita/data/kidney_anneke/preprocessed_labeled_test'
    img_arr, GT_arr = create_test_arrays(GT_dir)
    DIM = img_arr.shape
    from dataset_specific.kits import weighted_model
    wm = weighted_model()
    model = wm.build_model(img_shape=(DIM[1], DIM[2], DIM[3]),
                           learning_rate=learning_rate,
                           gpu_id=None,
                           nb_gpus=None,
                           trained_model=os.path.join(model_dir,
                                                      model_name + '.h5'),
                           temp=1)
    # model.load_weights(os.path.join(model_dir, NAME,'.h5'))
    val_supervised_flag = np.ones((DIM[0], DIM[1], DIM[2], DIM[3], 1),
                                  dtype='int8')
    prediction = model.predict([img_arr, GT_arr, val_supervised_flag],
                               batch_size=batch_size)
    csvName = os.path.join(model_dir, 'kits', 'evaluation',
                           model_name + '.csv')

    # weights epochs LR gpu_id dist orient prediction LRDecay earlyStop
    evaluateFiles_arr(prediction=prediction, GT_array=GT_arr, csvName=csvName)
def train(gpu_id, nb_gpus):
    num_labeled_train = int(PERCENTAGE_OF_LABELLED * TRAIN_NUM)
    num_train_data = len(os.listdir(DATA_PATH))
    num_un_labeled_train = num_train_data - num_labeled_train
    IMGS_PER_ENS_BATCH = num_un_labeled_train // 3
    # num_val_data = len(os.listdir(VAL_IMGS_PATH))

    # gen_lr_weight = ramp_down_weight(ramp_down_period)

    # prepare dataset
    print('-' * 30)
    print('Loading train data...')
    print('-' * 30)

    # Build Model
    wm = weighted_model()

    model = wm.build_model(img_shape=(DIM[0], DIM[1], DIM[2]),
                           learning_rate=learning_rate,
                           gpu_id=gpu_id,
                           nb_gpus=nb_gpus,
                           trained_model=TRAINED_MODEL_PATH,
                           temp=TEMP)

    print("Images Size:", num_train_data)
    print("Unlabeled Size:", num_un_labeled_train)

    print('-' * 30)
    print('Creating and compiling model...')
    print('-' * 30)

    model.summary()

    class TemporalCallback(Callback):
        def __init__(self, data_path, ensemble_path, train_idx_list):

            self.val_dice_coef = 0.

            self.data_path = data_path
            self.ensemble_path = ensemble_path
            self.train_idx_list = train_idx_list  # list of indexes of training eg
            # self.confident_pixels_no = (PERCENTAGE_OF_PIXELS * DIM[0] * DIM[1] * DIM[2] * IMGS_PER_ENS_BATCH *2 ) // 100
            flag = np.ones((*DIM, 1)).astype('float16')
            if os.path.exists(self.ensemble_path):
                raise Exception('the path exists!', self.ensemble_path)
            else:
                makedir(self.ensemble_path)

            for patient in np.arange(num_train_data):

                makedir(
                    os.path.join(self.ensemble_path, 'case_' + str(patient)))
                copyfile(
                    os.path.join(DATA_PATH, 'case_' + str(patient),
                                 SEGM_LEFT_NPY),
                    os.path.join(self.ensemble_path, 'case_' + str(patient),
                                 SEGM_LEFT_NPY))
                copyfile(
                    os.path.join(DATA_PATH, 'case_' + str(patient),
                                 SEGM_RIGHT_NPY),
                    os.path.join(self.ensemble_path, 'case_' + str(patient),
                                 SEGM_RIGHT_NPY))

                if patient < num_labeled_train:
                    np.save(
                        self.ensemble_path + 'case_' + str(patient) +
                        '/flag_left.npy', flag)
                    np.save(
                        self.ensemble_path + 'case_' + str(patient) +
                        '/flag_right.npy', flag)
                else:
                    np.save(
                        self.ensemble_path + 'case_' + str(patient) +
                        '/flag_left.npy',
                        np.zeros((*DIM, 1)).astype('float32'))
                    np.save(
                        self.ensemble_path + 'case_' + str(patient) +
                        '/flag_right.npy',
                        np.zeros((*DIM, 1)).astype('float32'))

        def on_batch_begin(self, batch, logs=None):
            pass

        def shall_save(self, cur_val, prev_val):
            flag_save = False
            val_save = prev_val

            if cur_val > prev_val:
                flag_save = True
                val_save = cur_val

            return flag_save, val_save

        def on_epoch_begin(self, epoch, logs=None):
            '''
            if epoch > num_epoch - ramp_down_period:
                weight_down = next(gen_lr_weight)
                K.set_value(model.optimizer.lr, weight_down * learning_rate)
                K.set_value(model.optimizer.beta_1, 0.4 * weight_down + 0.5)
                print('LR: alpha-', K.eval(model.optimizer.lr), K.eval(model.optimizer.beta_1))
            # print(K.eval(model.layers[43].trainable_weights[0]))
'''
            pass

        def on_epoch_end(self, epoch, logs={}):
            # print(time() - self.starttime)
            # model_temp = model

            save, self.val_dice_coef = self.shall_save(logs['val_dice_coef'],
                                                       self.val_dice_coef)

            if epoch > 0:

                patients_per_batch = IMGS_PER_ENS_BATCH
                num_batches = num_un_labeled_train // patients_per_batch
                remainder = num_un_labeled_train % patients_per_batch
                remainder_pixels = remainder * DIM[0] * DIM[1] * DIM[2] * 2
                confident_pixels_no_per_batch = (PERCENTAGE_OF_PIXELS * 2 *
                                                 patients_per_batch * DIM[0] *
                                                 DIM[1] * DIM[2]) // 100
                if remainder_pixels < confident_pixels_no_per_batch:
                    patients_per_last_batch = patients_per_batch + remainder
                else:
                    patients_per_last_batch = remainder
                    num_batches = num_batches + 1

                for b_no in np.arange(num_batches):
                    actual_batch_size = patients_per_batch if (
                        b_no < num_batches - 1) else patients_per_last_batch
                    confident_pixels_no = (PERCENTAGE_OF_PIXELS * DIM[0] *
                                           DIM[1] * DIM[2] *
                                           actual_batch_size * 2) // 100
                    start = (b_no * patients_per_batch) + num_labeled_train
                    end = (start + actual_batch_size)
                    imgs = get_array_kits(self.data_path, start, end, 'img')
                    ensemble_prediction = get_array_kits(self.ensemble_path,
                                                         start,
                                                         end,
                                                         'segm',
                                                         dtype='float32')
                    supervised_flag = get_array_kits(self.ensemble_path,
                                                     start,
                                                     end,
                                                     'flag',
                                                     dtype='float16')

                    inp = [imgs, ensemble_prediction, supervised_flag]
                    del imgs, supervised_flag

                    cur_pred = np.zeros(
                        (actual_batch_size * 2, DIM[0], DIM[1], DIM[2], 1))
                    # cur_sigmoid_pred = np.zeros((actual_batch_size, 32, 168, 168, NUM_CLASS))
                    model_out = model.predict(inp, batch_size=2,
                                              verbose=1)  # 1

                    # model_out = np.add(model_out, train.predict(inp, batch_size=2, verbose=1))  # 2
                    del inp

                    cur_pred = model_out if save else ensemble_prediction

                    del model_out

                    # Z = αZ + (1 - α)z
                    ensemble_prediction = alpha * ensemble_prediction + (
                        1 - alpha) * cur_pred
                    save_array_kits(self.ensemble_path, ensemble_prediction,
                                    'segm', start, end)
                    del ensemble_prediction

                    # flag = np.where(np.reshape(np.max(ensemble_prediction, axis=-1),supervised_flag.shape) >= THRESHOLD, np.ones_like(supervised_flag),np.zeros_like(supervised_flag))
                    # dont consider background
                    # cur_pred[:, :, :, :, 4] = np.zeros((actual_batch_size, 32, 168, 168))
                    max_pred_ravel = np.ravel(np.max(cur_pred, axis=-1))
                    indices = np.argpartition(
                        max_pred_ravel,
                        -confident_pixels_no)[-confident_pixels_no:]

                    mask = np.ones(max_pred_ravel.shape, dtype=bool)
                    mask[indices] = False

                    max_pred_ravel[mask] = 0
                    max_pred_ravel = np.where(max_pred_ravel > 0,
                                              np.ones_like(max_pred_ravel) * 2,
                                              np.zeros_like(max_pred_ravel))
                    flag = np.reshape(
                        max_pred_ravel,
                        (actual_batch_size * 2, DIM[0], DIM[1], DIM[2], 1))
                    del max_pred_ravel, indices

                    save_array_kits(self.ensemble_path, flag, 'flag', start,
                                    end)

                    del flag

                if 'cur_pred' in locals(): del cur_pred

    # callbacks
    print('-' * 30)
    print('Creating callbacks...')
    print('-' * 30)
    csv_logger = CSVLogger(CSV_NAME, append=True, separator=';')
    # model_checkpoint = ModelCheckpoint(MODEL_NAME, monitor='val_loss', save_best_only=True,verbose=1, mode='min')
    if nb_gpus is not None and nb_gpus > 1:
        model_checkpoint = ModelCheckpointParallel(MODEL_NAME,
                                                   monitor='val_dice_coef',
                                                   save_best_only=True,
                                                   verbose=1,
                                                   mode='max')
    else:
        model_checkpoint = ModelCheckpoint(MODEL_NAME,
                                           monitor='val_dice_coef',
                                           save_best_only=True,
                                           verbose=1,
                                           mode='max')

    tensorboard = TensorBoard(log_dir=TB_LOG_DIR,
                              write_graph=False,
                              write_grads=True,
                              histogram_freq=0,
                              batch_size=2,
                              write_images=False)

    train_id_list = []
    for i in np.arange(num_train_data):
        train_id_list.append(str(i) + '#right')
        train_id_list.append(str(i) + '#left')

    print(train_id_list[0:10])

    np.random.shuffle(train_id_list)
    tcb = TemporalCallback(DATA_PATH, ENS_GT_PATH, train_id_list)
    lcb = wm.LossCallback()
    es = EarlyStopping(monitor='val_dice_coef',
                       mode='max',
                       verbose=1,
                       patience=50)
    # del unsupervised_target, unsupervised_weight, supervised_flag, imgs
    # del supervised_flag
    cb = [model_checkpoint, tcb, tensorboard, lcb, csv_logger, es]

    print('BATCH Size = ', batch_size)

    print('Callbacks: ', cb)
    # params = {'dim': (32, 168, 168),'batch_size': batch_size}

    print('-' * 30)
    print('Fitting train...')
    print('-' * 30)
    training_generator = train_gen(DATA_PATH,
                                   ENS_GT_PATH,
                                   train_id_list,
                                   batch_size=batch_size,
                                   augmentation=True)

    # steps = num_train_data / batch_size
    if augmentation == False:
        augm_no = 1
    else:
        augm_no = AUGMENTATION_NO
    steps = (num_train_data * augm_no) / batch_size
    # steps = 2

    val_fold = np.load('/data/suhita/temporal/kits/Folds/val_fold' +
                       str(FOLD_NUM) + '.npy')
    num_val_data = len(val_fold)
    val_supervised_flag = np.ones(
        (num_val_data * 2, DIM[0], DIM[1], DIM[2], 1), dtype='int8') * 3
    val_img_arr = np.zeros((num_val_data * 2, DIM[0], DIM[1], DIM[2], 1),
                           dtype=float)
    val_GT_arr = np.zeros((num_val_data * 2, DIM[0], DIM[1], DIM[2], 1),
                          dtype=float)
    VAL_DATA = '/data/suhita/temporal/kits/preprocessed_labeled_train'
    for i in range(num_val_data):
        val_img_arr[i * 2, :, :, :, 0] = np.load(
            os.path.join(VAL_DATA, val_fold[i], 'img_left.npy'))
        val_img_arr[i * 2 + 1, :, :, :, 0] = np.load(
            os.path.join(VAL_DATA, val_fold[i], 'img_right.npy'))
        val_GT_arr[i * 2, :, :, :, 0] = np.load(
            os.path.join(VAL_DATA, val_fold[i], 'segm_left.npy'))
        val_GT_arr[i * 2 + 1, :, :, :, 0] = np.load(
            os.path.join(VAL_DATA, val_fold[i], 'segm_right.npy'))

    x_val = [val_img_arr, val_GT_arr, val_supervised_flag]
    y_val = val_GT_arr
    history = model.fit_generator(generator=training_generator,
                                  steps_per_epoch=steps,
                                  validation_data=[x_val, y_val],
                                  epochs=num_epoch,
                                  callbacks=cb)
Beispiel #6
0
def train(gpu_id, nb_gpus, trained_model=None, perc=1.0, augmentation=False):
    if augmentation:
        augm = '_augm'
    else:
        augm = ''

    NAME = '1_supervised_F_centered_BB_' + str(FOLD_NUM) + '_' + str(
        TRAIN_NUM) + '_' + str(learning_rate) + '_Perc_' + str(perc) + augm
    CSV_NAME = out_dir + NAME + '.csv'

    TB_LOG_DIR = '/data/suhita/temporal/tb/kits/' + NAME + '_' + str(
        learning_rate) + '/'
    MODEL_NAME = out_dir + NAME + '.h5'
    TRAINED_MODEL_PATH = MODEL_NAME

    wm = weighted_model()
    model = wm.build_model(img_shape=(DIM[0], DIM[1], DIM[2]),
                           learning_rate=learning_rate)

    print('-' * 30)
    print('Creating and compiling train...')
    print('-' * 30)

    # callbacks
    print('-' * 30)
    print('Creating callbacks...')
    print('-' * 30)
    csv_logger = CSVLogger(CSV_NAME, append=True, separator=';')
    # model_checkpoint = ModelCheckpoint(MODEL_NAME, monitor='val_loss', save_best_only=True,verbose=1, mode='min')
    if nb_gpus is not None and nb_gpus > 1:
        model_checkpoint = ModelCheckpointParallel(MODEL_NAME,
                                                   monitor='val_loss',
                                                   save_best_only=True,
                                                   verbose=1,
                                                   mode='min')
    else:
        model_checkpoint = ModelCheckpoint(MODEL_NAME,
                                           monitor='val_loss',
                                           save_best_only=True,
                                           verbose=1,
                                           mode='min')

    tensorboard = TensorBoard(log_dir=TB_LOG_DIR,
                              write_graph=False,
                              write_grads=False,
                              histogram_freq=0,
                              write_images=False)
    es = EarlyStopping(monitor='val_loss', mode='min', verbose=1, patience=50)

    # datagen listmake_dataset
    train_fold = np.load('/data/suhita/temporal/kits/Folds/train_fold' +
                         str(FOLD_NUM) + '.npy')
    print(train_fold[0:10])
    nr_samples = train_fold.shape[0]

    # np.random.seed(5)
    np.random.seed(1234)
    np.random.shuffle(train_fold)
    print(train_fold[0:10])

    train_fold = train_fold[:int(nr_samples * perc)]

    train_id_list = []
    for i in range(train_fold.shape[0]):
        train_id_list.append(os.path.join(train_fold[i], 'img_left.npy'))
        train_id_list.append(os.path.join(train_fold[i], 'img_right.npy'))

    # del unsupervised_target, unsupervised_weight, supervised_flag, imgs
    # del supervised_flag

    cb = [model_checkpoint, tensorboard, es, csv_logger]

    print('BATCH Size = ', batch_size)

    print('Callbacks: ', cb)
    params = {'dim': (DIM[0], DIM[1], DIM[2]), 'batch_size': batch_size}

    print('-' * 30)
    print('Fitting train...')
    print('-' * 30)

    training_generator = train_gen(data_path,
                                   train_id_list,
                                   augmentation=augmentation,
                                   **params)

    val_fold = np.load('/data/suhita/temporal/kits/Folds/val_fold' +
                       str(FOLD_NUM) + '.npy')
    # val_id_list = []
    # for i in range(val_fold.shape[0]):
    #     val_id_list.append(os.path.join(val_fold[i], 'img_left.npy'))
    #     val_id_list.append(os.path.join(val_fold[i], 'img_right.npy'))
    #
    # val_generator = train_gen(data_path,
    #                                val_id_list,
    #                                **params)

    val_img_arr = np.zeros((val_fold.shape[0] * 2, DIM[0], DIM[1], DIM[2], 1),
                           dtype=float)
    val_GT_arr = np.zeros((val_fold.shape[0] * 2, DIM[0], DIM[1], DIM[2], 1),
                          dtype=float)

    for i in range(val_fold.shape[0]):
        val_img_arr[i * 2, :, :, :, 0] = np.load(
            os.path.join(data_path, val_fold[i], 'img_left.npy'))
        val_img_arr[i * 2 + 1, :, :, :, 0] = np.load(
            os.path.join(data_path, val_fold[i], 'img_right.npy'))
        val_GT_arr[i * 2, :, :, :, 0] = np.load(
            os.path.join(data_path, val_fold[i], 'segm_left.npy'))
        val_GT_arr[i * 2 + 1, :, :, :, 0] = np.load(
            os.path.join(data_path, val_fold[i], 'segm_right.npy'))

    if augmentation == False:
        augm_no = 1
    else:
        augm_no = AUGMENTATION_NO
    steps = (TRAIN_NUM * augm_no) / batch_size
    steps = 2

    # get validation fold
    # val_fold = np.load('Folds/val_fold'+str(FOLD_NUM)+'.npy')
    # val_x_arr = get_complete_array(data_path, val_fold, GT = False)
    # val_y_arr = get_complete_array(data_path, val_fold, dtype='int8', GT = True)

    history = model.fit_generator(generator=training_generator,
                                  steps_per_epoch=steps,
                                  validation_data=[val_img_arr, val_GT_arr],
                                  epochs=num_epoch,
                                  callbacks=cb)

    del val_GT_arr, val_img_arr
Beispiel #7
0
def predict(model_name, onlyEval=False):
    pred_dir = '/home/anneke/projects/uats/code/kits/output/predictions/'

    val_fold = np.load('/data/suhita/temporal/kits/Folds/val_fold' +
                       str(FOLD_NUM) + '.npy')

    img_arr = np.zeros((val_fold.shape[0] * 2, DIM[0], DIM[1], DIM[2], 1),
                       dtype=float)
    GT_arr = np.zeros((val_fold.shape[0] * 2, DIM[0], DIM[1], DIM[2], 1),
                      dtype=float)

    for i in range(val_fold.shape[0]):
        img_arr[i * 2, :, :, :, 0] = np.load(
            os.path.join(data_path, val_fold[i], 'img_left.npy'))
        img_arr[i * 2 + 1, :, :, :, 0] = np.load(
            os.path.join(data_path, val_fold[i], 'img_right.npy'))
        GT_arr[i * 2, :, :, :, 0] = np.load(
            os.path.join(data_path, val_fold[i], 'segm_left.npy'))
        GT_arr[i * 2 + 1, :, :, :, 0] = np.load(
            os.path.join(data_path, val_fold[i], 'segm_right.npy'))

    print('load_weights')
    wm = weighted_model()
    model = wm.build_model(img_shape=(DIM[0], DIM[1], DIM[2]),
                           learning_rate=learning_rate)
    model.load_weights(model_name)

    if onlyEval:
        out_value = model.evaluate(img_arr, GT_arr, batch_size=1, verbose=0)
        print(out_value)
    else:
        out = model.predict(img_arr, batch_size=2, verbose=1)
        # np.save(os.path.join(out_dir, 'predicted.npy'), out)
        for i in range(out.shape[0]):
            segm = sitk.GetImageFromArray(out[i, :, :, :, 0])
            utils.makeDirectory(os.path.join(pred_dir, val_fold[int(i / 2)]))
            if i % 2 == 0:
                img = sitk.ReadImage(
                    os.path.join(data_path, val_fold[int(i / 2)],
                                 'img_left.nrrd'))
                segm.CopyInformation(img)
                sitk.WriteImage(
                    img,
                    os.path.join(pred_dir, val_fold[int(i / 2)],
                                 'img_left.nrrd'))
                sitk.WriteImage(
                    segm,
                    os.path.join(pred_dir, val_fold[int(i / 2)],
                                 'segm_left.nrrd'))

            else:
                img = sitk.ReadImage(
                    os.path.join(data_path, val_fold[int(i / 2)],
                                 'img_right.nrrd'))
                segm.CopyInformation(img)
                sitk.WriteImage(
                    img,
                    os.path.join(pred_dir, val_fold[int(i / 2)],
                                 'img_right.nrrd'))
                sitk.WriteImage(
                    segm,
                    os.path.join(pred_dir, val_fold[int(i / 2)],
                                 'segm_right.nrrd'))

        # single image evaluation
        for i in range(0, val_fold.shape[0] * 2):
            out_eval = model.evaluate(img_arr[i:i + 1],
                                      GT_arr[i:i + 1],
                                      batch_size=1,
                                      verbose=0)
            print(val_fold[int(i / 2)], out_eval)
Beispiel #8
0
def generate_prediction_for_ul(model_name,
                               onlyEval=False,
                               img_path=None,
                               ens_path=None):
    pred_dir = out_dir + '/predictions/'

    # val_fold = np.load(out_dir+'/Folds/val_fold' + str(FOLD_NUM) + '.npy')
    val_fold = os.listdir(img_path)

    img_arr = np.zeros((len(val_fold) * 2, DIM[0], DIM[1], DIM[2], 1),
                       dtype=float)
    GT_arr = np.zeros((len(val_fold) * 2, DIM[0], DIM[1], DIM[2], 1),
                      dtype=float)

    for i in range(len(val_fold)):
        img_arr[i * 2, :, :, :, 0] = np.load(
            os.path.join(img_path, val_fold[i], 'img_left.npy'))
        img_arr[i * 2 + 1, :, :, :, 0] = np.load(
            os.path.join(img_path, val_fold[i], 'img_right.npy'))
        GT_arr[i * 2, :, :, :, 0] = np.load(
            os.path.join(img_path, val_fold[i], 'segm_left.npy'))
        GT_arr[i * 2 + 1, :, :, :, 0] = np.load(
            os.path.join(img_path, val_fold[i], 'segm_right.npy'))

    print('load_weights')
    wm = weighted_model()
    model = wm.build_model(img_shape=(DIM[0], DIM[1], DIM[2]),
                           learning_rate=learning_rate)
    model.load_weights(model_name)

    if onlyEval:
        out_value = model.evaluate(img_arr, GT_arr, batch_size=1, verbose=0)
        print(out_value)
    else:
        out = model.predict(img_arr, batch_size=2, verbose=1)
        # np.save(os.path.join(out_dir, 'predicted.npy'), out)
        for i in range(out.shape[0]):
            # segm = sitk.GetImageFromArray(out[i,:,:,:,0])
            # utils.makeDirectory(os.path.join(pred_dir, val_fold[int(i/2)]))
            utils.makeDirectory(os.path.join(ens_path, val_fold[int(i / 2)]))
            if i % 2 == 0:
                # img = sitk.ReadImage(os.path.join(data_path, val_fold[int(i / 2)], 'img_left.nrrd'))
                # segm.CopyInformation(img)
                # sitk.WriteImage(img, os.path.join(pred_dir, val_fold[int(i / 2)], 'img_left.nrrd'))

                np.save(
                    os.path.join(ens_path, val_fold[int(i / 2)],
                                 'img_left.npy'),
                    np.load(
                        os.path.join(img_path, val_fold[int(i / 2)],
                                     'img_left.npy')))
                # sitk.WriteImage(segm, os.path.join(pred_dir, val_fold[int(i/2)], 'segm_left.nrrd'))
                np.save(
                    os.path.join(ens_path, val_fold[int(i / 2)],
                                 'segm_left.npy'), out[i, :, :, :, 0])

            else:
                # img = sitk.ReadImage(os.path.join(data_path, val_fold[int(i / 2)], 'img_right.nrrd'))
                # segm.CopyInformation(img)
                # sitk.WriteImage(img, os.path.join(pred_dir, val_fold[int(i / 2)], 'img_right.nrrd'))
                np.save(
                    os.path.join(ens_path, val_fold[int(i / 2)],
                                 'img_right.npy'),
                    np.load(
                        os.path.join(img_path, val_fold[int(i / 2)],
                                     'img_right.npy')))
                # sitk.WriteImage(segm, os.path.join(pred_dir, val_fold[int(i/2)], 'segm_right.nrrd'))
                np.save(
                    os.path.join(ens_path, val_fold[int(i / 2)],
                                 'segm_right.npy'), out[i, :, :, :, 0])