def secondary_prediction(mask, vol, config2, model2_path=None,
                         preprocess_method2=None, norm_params2=None,
                         overlap_factor=0.9, augment2=None, num_augment=32, return_all_preds=False):
    model2 = load_old_model(get_last_model_path(model2_path), config=config2)
    pred = mask
    bbox_start, bbox_end = find_bounding_box(pred)
    check_bounding_box(pred, bbox_start, bbox_end)
    padding = [16, 16, 8]
    if padding is not None:
        bbox_start = np.maximum(bbox_start - padding, 0)
        bbox_end = np.minimum(bbox_end + padding, mask.shape)
    data = vol.astype(np.float)[
           bbox_start[0]:bbox_end[0],
           bbox_start[1]:bbox_end[1],
           bbox_start[2]:bbox_end[2]
           ]

    data = preproc_and_norm(data, preprocess_method2, norm_params2)

    prediction = get_prediction(data, model2, augment=augment2, num_augments=num_augment, return_all_preds=return_all_preds,
                                overlap_factor=overlap_factor, config=config2)

    padding2 = list(zip(bbox_start, np.array(vol.shape) - bbox_end))
    if return_all_preds:
        padding2 = [(0, 0)] + padding2
    print(padding2)
    print(prediction.shape)
    prediction = np.pad(prediction, padding2, mode='constant', constant_values=0)

    return prediction
Exemple #2
0
def norm_net_model(input_shape=(1, 128, 128, 128),
                   n_base_filters=16,
                   depth=5,
                   dropout_rate=0.3,
                   n_segmentation_levels=3,
                   n_labels=1,
                   optimizer=Adam,
                   initial_learning_rate=5e-4,
                   loss_function=dice_coefficient_loss,
                   old_model_path=None):
    inputs = Input(input_shape)
    norm_net = isensee2017_model_3d(input_shape,
                                    n_base_filters,
                                    depth,
                                    dropout_rate,
                                    n_segmentation_levels,
                                    n_labels,
                                    optimizer,
                                    initial_learning_rate,
                                    loss_function,
                                    activation_name=None)
    norm_inputs = norm_net(inputs)

    seg_net = load_old_model(old_model_path)
    seg_net.trainable = False
    segmentation = seg_net(norm_inputs)

    metrics = ['binary_accuracy', vod_coefficient]
    if loss_function != dice_coefficient_loss:
        metrics += [dice_coefficient]
    model = Model(inputs=[inputs], outputs=segmentation, name='NormNetModel')
    model.compile(optimizer=optimizer(lr=initial_learning_rate),
                  loss=loss_function,
                  metrics=metrics)
    return model
Exemple #3
0
def secondary_prediction(mask,
                         vol,
                         config2,
                         model2_path=None,
                         preprocess_method2=None,
                         norm_params2=None,
                         overlap_factor=0.9):
    model2 = load_old_model(get_last_model_path(model2_path))
    pred = mask
    bbox_start, bbox_end = find_bounding_box(pred)
    check_bounding_box(pred, bbox_start, bbox_end)
    padding = [16, 16, 8]
    if padding is not None:
        bbox_start = np.maximum(bbox_start - padding, 0)
        bbox_end = np.minimum(bbox_end + padding, mask.shape)
    data = vol.astype(np.float)[bbox_start[0]:bbox_end[0],
                                bbox_start[1]:bbox_end[1],
                                bbox_start[2]:bbox_end[2]]

    data = preproc_and_norm(data, preprocess_method2, norm_params2)

    prediction = \
        patch_wise_prediction(model=model2,
                              data=np.expand_dims(data, 0),
                              overlap_factor=overlap_factor,
                              patch_shape=config2["patch_shape"] + [config2["patch_depth"]])
    prediction = prediction.squeeze()
    padding2 = list(zip(bbox_start, np.array(vol.shape) - bbox_end))
    print(padding2)
    print(prediction.shape)
    prediction = np.pad(prediction,
                        padding2,
                        mode='constant',
                        constant_values=0)
    return prediction
def main(input_path, output_path, overlap_factor,
         config, model_path, preprocess_method=None, norm_params=None, augment=None, num_augment=0,
         config2=None, model2_path=None, preprocess_method2=None, norm_params2=None, augment2=None, num_augment2=0,
         z_scale=None, xy_scale=None, return_all_preds=False):
    print(model_path)
    model = load_old_model(get_last_model_path(model_path), config=config)
    print('Loading nifti from {}...'.format(input_path))
    nifti = read_img(input_path)
    print('Predicting mask...')
    data = nifti.get_fdata().astype(np.float).squeeze()
    print('original_shape: ' + str(data.shape))
    scan_name = Path(input_path).name.split('.')[0]

    if (z_scale is None):
        z_scale = 1.0
    if (xy_scale is None):
        xy_scale = 1.0
    if z_scale != 1.0 or xy_scale != 1.0:
        data = ndimage.zoom(data, [xy_scale, xy_scale, z_scale])

    data = preproc_and_norm(data, preprocess_method, norm_params,
                            scale=config.get('scale_data', None),
                            preproc=config.get('preproc', None))

    save_nifti(data, os.path.join(output_path, scan_name + '_data.nii.gz'))

    data = np.pad(data, 3, 'constant', constant_values=data.min())

    print('Shape: ' + str(data.shape))
    prediction = get_prediction(data=data, model=model, augment=augment,
                                num_augments=num_augment, return_all_preds=return_all_preds,
                                overlap_factor=overlap_factor, config=config)
    # unpad
    prediction = prediction[3:-3, 3:-3, 3:-3]

    # revert to original size
    if config.get('scale_data', None) is not None:
        prediction = ndimage.zoom(prediction.squeeze(), np.divide([1, 1, 1], config.get('scale_data', None)), order=0)[..., np.newaxis]

    save_nifti(prediction, os.path.join(output_path, scan_name + '_pred.nii.gz'))

    if z_scale != 1.0 or xy_scale != 1.0:
        prediction = ndimage.zoom(prediction.squeeze(), [1.0 / xy_scale, 1.0 / xy_scale, 1.0 / z_scale], order=1)[..., np.newaxis]

    # if prediction.shape[-1] > 1:
    #    prediction = prediction[..., 1]
    if config2 is not None:
        prediction = prediction.squeeze()
        mask = process_pred(prediction, gaussian_std=0.5, threshold=0.5)  # .astype(np.uint8)
        nifti = read_img(input_path)
        prediction = secondary_prediction(mask, vol=nifti.get_fdata().astype(np.float),
                                          config2=config2, model2_path=model2_path,
                                          preprocess_method2=preprocess_method2, norm_params2=norm_params2,
                                          overlap_factor=overlap_factor, augment2=augment2, num_augment=num_augment2,
                                          return_all_preds=return_all_preds)
        save_nifti(prediction, os.path.join(output_path, scan_name + 'pred_roi.nii.gz'))

    print('Saving to {}'.format(output_path))
    print('Finished.')
Exemple #5
0
def main(input_mat_path,
         output_mat_path,
         overlap_factor,
         config,
         model_path,
         preprocess_method=None,
         norm_params=None,
         config2=None,
         model2_path=None,
         preprocess_method2=None,
         norm_params2=None):
    print(model_path)
    model = load_old_model(get_last_model_path(model_path))
    print('Loading mat from {}...'.format(input_mat_path))
    mat = loadmat(input_mat_path)
    print('Predicting mask...')
    data = mat['volume'].astype(np.float)

    data = preproc_and_norm(data, preprocess_method, norm_params)

    prediction = \
        patch_wise_prediction(model=model,
                              data=np.expand_dims(data, 0),
                              overlap_factor=overlap_factor,
                              patch_shape=config["patch_shape"] + [config["patch_depth"]])

    print('Post-processing mask...')
    if prediction.shape[-1] > 1:
        prediction = prediction[..., 1]
    prediction = prediction.squeeze()
    print("Storing prediction in [7-9], 7 should be the best...")
    mat['masks'][0, 9] = \
        process_pred(prediction, gaussian_std=0, threshold=0.2)  # .astype(np.uint8)
    mat['masks'][0, 8] = \
        process_pred(prediction, gaussian_std=1, threshold=0.5)  # .astype(np.uint8)
    mat['masks'][0, 7] = \
        process_pred(prediction, gaussian_std=0.5, threshold=0.5)  # .astype(np.uint8)

    if config2 is not None:
        print('Making secondary prediction... [6]')
        prediction = secondary_prediction(
            mat['masks'][0, 7],
            vol=mat['volume'].astype(np.float),
            config2=config2,
            model2_path=model2_path,
            preprocess_method2=preprocess_method2,
            norm_params2=norm_params2,
            overlap_factor=0.9)
        mat['masks'][0, 6] = \
            process_pred(prediction, gaussian_std=0, threshold=0.2)  # .astype(np.uint8)
        mat['masks'][0, 5] = \
            process_pred(prediction, gaussian_std=1, threshold=0.5)  # .astype(np.uint8)
        mat['masks'][0, 4] = \
            process_pred(prediction, gaussian_std=0.5, threshold=0.5)  # .astype(np.uint8)

    print('Saving mat to {}'.format(output_mat_path))
    savemat(output_mat_path, mat)
    print('Finished.')
Exemple #6
0
def main(input_path,
         output_path,
         overlap_factor,
         config,
         model_path,
         preprocess_method=None,
         norm_params=None,
         config2=None,
         model2_path=None,
         preprocess_method2=None,
         norm_params2=None):
    print(model_path)
    model = load_old_model(get_last_model_path(model_path))
    print('Loading mat from {}...'.format(input_path))
    nifti = read_img(input_path)
    print('Predicting mask...')
    data = nifti.get_fdata().astype(np.float)

    data = preproc_and_norm(data, preprocess_method, norm_params)

    prediction = \
        patch_wise_prediction(model=model,
                              data=np.expand_dims(data, 0),
                              overlap_factor=overlap_factor,
                              patch_shape=config["patch_shape"] + [config["patch_depth"]])
    nib.save(prediction, os.path.join(output_path, 'pred.nii'))

    print('Post-processing mask...')
    if prediction.shape[-1] > 1:
        prediction = prediction[..., 1]
    prediction = prediction.squeeze()
    print("Storing prediction in [7-9], 7 should be the best...")
    mask = process_pred(prediction, gaussian_std=0.5,
                        threshold=0.5)  # .astype(np.uint8)

    if config2 is not None:
        print('Making secondary prediction... [6]')
        prediction = secondary_prediction(
            mask,
            vol=nifti.get_fdata().astype(np.float),
            config2=config2,
            model2_path=model2_path,
            preprocess_method2=preprocess_method2,
            norm_params2=norm_params2,
            overlap_factor=0.9)
        nib.save(prediction, os.path.join(output_path, 'pred_roi.nii'))

    print('Saving to {}'.format(output_path))
    print('Finished.')
Exemple #7
0
def main(input_mat_path, output_mat_path, config, overlap_factor, model_path, preprocess_method=None, norm_params=None):
    print(model_path)
    model = load_old_model(get_last_model_path(model_path))
    print('Loading mat from {}...'.format(input_mat_path))
    mat = loadmat(input_mat_path)
    print('Predicting mask...')
    data = mat['volume'].astype(np.float)

    if preprocess_method is not None:
        print('Applying preprocess by {}...'.format(preprocess_method))
        if preprocess_method == 'window_1_99':
            data = window_intensities_data(data)
        else:
            raise Exception('Unknown preprocess: {}'.format(preprocess_method))

    if norm_params is not None and any(norm_params.values()):
        data = normalize_data(data, mean=norm_params['mean'], std=norm_params['std'])

    prediction = \
        patch_wise_prediction(model=model,
                              data=np.expand_dims(data, 0),
                              overlap_factor=overlap_factor,
                              patch_shape=config["patch_shape"] + [config["patch_depth"]])

    print('Post-processing mask...')
    if prediction.shape[-1] > 1:
        prediction = prediction[..., 1]
    prediction = prediction.squeeze()
    mat['masks'][0, 9] = \
        process_pred(prediction, gaussian_std=0, threshold=0.2)  # .astype(np.uint8)
    mat['masks'][0, 8] = \
        process_pred(prediction, gaussian_std=1, threshold=0.5)  # .astype(np.uint8)
    mat['masks'][0, 7] = \
        process_pred(prediction, gaussian_std=0.5, threshold=0.5)  # .astype(np.uint8)
    print('Saving mat to {}'.format(output_mat_path))
    savemat(output_mat_path, mat)
    print('Finished.')
Exemple #8
0
def get_config_and_model(configs_folder, model_folder):
    with open(os.path.join(configs_folder, model_folder,
                           CONFIG_FILENAME)) as f:
        config = json.load(f)
    model = load_old_model(get_last_model_path(config["model_file"]))
    return config, model
Exemple #9
0
def main(overwrite=False):
    # convert input images into an hdf5 file
    if overwrite or not os.path.exists(config["data_file"]):
        create_data_file(config)

    data_file_opened = open_data_file(config["data_file"])

    if not overwrite and len(glob.glob(config["model_file"] + '*.h5')) > 0:
        model_path = get_last_model_path(config["model_file"])
        print('Loading model from: {}'.format(model_path))
        model = load_old_model(model_path)
    else:
        # instantiate new model
        loss_func = getattr(fetal_net.metrics, config['loss'])
        model_func = getattr(fetal_net.model, config['model_name'])
        model = model_func(
            input_shape=config["input_shape"],
            initial_learning_rate=config["initial_learning_rate"],
            **{
                'dropout_rate':
                config['dropout_rate'],
                'loss_function':
                loss_func,
                'mask_shape':
                None
                if config["weight_mask"] is None else config["input_shape"],
                # TODO: change to output shape
                'old_model_path':
                config['old_model']
            })
        if not overwrite and len(glob.glob(config["model_file"] + '*.h5')) > 0:
            model_path = get_last_model_path(config["model_file"])
            print('Loading model from: {}'.format(model_path))
            model.load_weights(model_path)
    model.summary()

    # get training and testing generators
    train_generator, validation_generator, n_train_steps, n_validation_steps = get_training_and_validation_generators(
        data_file_opened,
        batch_size=config["batch_size"],
        data_split=config["validation_split"],
        overwrite=overwrite,
        validation_keys_file=config["validation_file"],
        training_keys_file=config["training_file"],
        test_keys_file=config["test_file"],
        n_labels=config["n_labels"],
        labels=config["labels"],
        patch_shape=(*config["patch_shape"], config["patch_depth"]),
        validation_batch_size=config["validation_batch_size"],
        augment=config["augment"],
        skip_blank_train=config["skip_blank_train"],
        skip_blank_val=config["skip_blank_val"],
        truth_index=config["truth_index"],
        truth_size=config["truth_size"],
        prev_truth_index=config["prev_truth_index"],
        prev_truth_size=config["prev_truth_size"],
        truth_downsample=config["truth_downsample"],
        truth_crop=config["truth_crop"],
        patches_per_epoch=config["patches_per_epoch"],
        categorical=config["categorical"],
        is3d=config["3D"],
        drop_easy_patches_train=config["drop_easy_patches_train"],
        drop_easy_patches_val=config["drop_easy_patches_val"])

    # run training
    train_model(model=model,
                model_file=config["model_file"],
                training_generator=train_generator,
                validation_generator=validation_generator,
                steps_per_epoch=n_train_steps,
                validation_steps=n_validation_steps,
                initial_learning_rate=config["initial_learning_rate"],
                learning_rate_drop=config["learning_rate_drop"],
                learning_rate_patience=config["patience"],
                early_stopping_patience=config["early_stop"],
                n_epochs=config["n_epochs"],
                output_folder=config["base_dir"])
    data_file_opened.close()
Exemple #10
0
def main(pred_dir,
         config,
         split='test',
         overlap_factor=1,
         preprocess_method=None):
    padding = [16, 16, 8]
    prediction2_dir = os.path.abspath(
        os.path.join(config['base_dir'], 'predictions2', split))
    model = load_old_model(get_last_model_path(config["model_file"]))
    with open(os.path.join(opts.config_dir, 'norm_params.json'), 'r') as f:
        norm_params = json.load(f)

    for sample_folder in glob(os.path.join(pred_dir, split, '*')):
        mask_path = os.path.join(sample_folder, 'prediction.nii.gz')
        truth_path = os.path.join(sample_folder, 'truth.nii.gz')

        subject_id = Path(sample_folder).name
        dest_folder = os.path.join(prediction2_dir, subject_id)
        Path(dest_folder).mkdir(parents=True, exist_ok=True)

        truth = nib.load(truth_path)
        nib.save(truth, os.path.join(dest_folder, Path(truth_path).name))

        mask = nib.load(mask_path)
        mask = process_pred(mask.get_data(), gaussian_std=0.5, threshold=0.5)
        bbox_start, bbox_end = find_bounding_box(mask)
        check_bounding_box(mask, bbox_start, bbox_end)
        if padding is not None:
            bbox_start = np.maximum(bbox_start - padding, 0)
            bbox_end = np.minimum(bbox_end + padding, mask.shape)
        print("BBox: {}-{}".format(bbox_start, bbox_end))

        volume = nib.load(
            os.path.join(original_data_folder, subject_id, 'volume.nii'))
        orig_volume_shape = np.array(volume.get_data().shape)
        volume = cut_bounding_box(volume, bbox_start,
                                  bbox_end).get_data().astype(np.float)

        if preprocess_method is not None:
            print('Applying preprocess by {}...'.format(preprocess_method))
            if preprocess_method == 'window_1_99':
                volume = window_intensities_data(volume)
            else:
                raise Exception(
                    'Unknown preprocess: {}'.format(preprocess_method))

        if norm_params is not None and any(norm_params.values()):
            volume = normalize_data(volume,
                                    mean=norm_params['mean'],
                                    std=norm_params['std'])

        prediction = patch_wise_prediction(model=model,
                                           data=np.expand_dims(volume, 0),
                                           patch_shape=config["patch_shape"] +
                                           [config["patch_depth"]],
                                           overlap_factor=overlap_factor)
        prediction = prediction.squeeze()

        padding2 = list(zip(bbox_start, orig_volume_shape - bbox_end))
        print(padding2)
        prediction = np.pad(prediction,
                            padding2,
                            mode='constant',
                            constant_values=0)
        assert all(
            [s1 == s2 for s1, s2 in zip(prediction.shape, orig_volume_shape)])
        prediction = get_image(prediction)
        nib.save(prediction, os.path.join(dest_folder, Path(mask_path).name))
Exemple #11
0
step_1_network_experiments_paths = ['/datadrive/configs/...', '/datadrive/configs/...', '/datadrive/configs/...']
step_2_network_experiments_paths = ['/datadrive/configs/...']
output_prediction_dir = r""
if not os.path.exists(output_prediction_dir):
    os.mkdir(output_prediction_dir)
#subject_ids = ['40']
subject_index = []
overlap_factor = 0.9

hdf5_file = r""
data_file = tables.open_file(hdf5_file, "a")
filters = tables.Filters(complevel=5, complib='blosc')
pred_storage = data_file.create_vlarray(data_file.root, 'pred', tables.ObjectAtom(), filters=filters,
                                        expectedrows=len(subject_index)) # TODO: needs to be same length as other arrays

step_1_networks = [load_old_model(get_last_model_path(os.path.join(exp_folder, "fetal_net_model")))
                   for exp_folder in step_1_network_experiments_paths]

step_1_configs = []
n_step_1_models = len(step_1_networks)
for i in range(n_step_1_models):
    with open(os.path.join(step_1_network_experiments_paths[i], 'config.json')) as f:
        config = json.load(f)
        step_1_configs.append(config)

step_2_network = [load_old_model(get_last_model_path(os.path.join(exp_folder, "fetal_net_model")))
                   for exp_folder in step_2_network_experiments_paths]
with open(os.path.join(step_2_network_experiments_paths[0], 'config.json')) as f:
    step_2_config = json.load(f)

########################### Step 1 ###########################
from keras import Model
from fetal_net.training import load_old_model

import argparse

parser = argparse.ArgumentParser()
parser.add_argument("--model_path",
                    help="specifies model path",
                    type=str,
                    required=True)
opts = parser.parse_args()

from_metric = 'acc'
to_metric = 'binary_accuracy'

model: Model = load_old_model(opts.model_path)
model.compile(
    optimizer=model.optimizer,
    loss=model.loss,
    metrics=[to_metric if _ == from_metric else _ for _ in model.metrics])

model.save(opts.model_path)
Exemple #13
0
def main_train(config, overwrite=False):
    # convert input images into an hdf5 file
    if overwrite or not os.path.exists(config["data_file"]):
        print("Writing h5 file")
        training_files, subject_ids = fetch_training_data_files(
            return_subject_ids=True)

        _, (mean, std) = write_data_to_file(training_files,
                                            config["data_file"],
                                            subject_ids=subject_ids,
                                            normalize=config['normalization'],
                                            add_pred=config['pred_size'])
        with open(os.path.join(config["base_dir"], 'norm_params.json'),
                  mode='w') as f:
            json.dump({'mean': mean, 'std': std}, f)

    data_file_opened = open_data_file(config["data_file"])

    if not overwrite and len(glob.glob(config["model_file"] + '*.h5')) > 0:
        model_path = get_last_model_path(config["model_file"])
        print('Loading model from: {}'.format(model_path))
        model = load_old_model(model_path)
    else:
        # instantiate new model
        loss_func = getattr(fetal_net.metrics, config['loss'])
        model_func = getattr(fetal_net.model, config['model_name'])
        model = model_func(
            input_shape=config["input_shape"],
            initial_learning_rate=config["initial_learning_rate"],
            **{
                'dropout_rate': config['dropout_rate'],
                'loss_function': loss_func,
                'depth': config['model_params']['depth'],
                'n_base_filters': config['model_params']['n_base_filters'],
                'old_model_path': config['old_model'],
                'truth_index': config['truth_index'],
                'truth_size': config['truth_size']
            })
    model.summary()

    # get training and testing generators
    train_generator, validation_generator, n_train_steps, n_validation_steps = get_training_and_validation_generators(
        data_file_opened,
        batch_size=config["batch_size"],
        data_split=config["validation_split"],
        overwrite=overwrite,
        validation_keys_file=config["validation_file"],
        training_keys_file=config["training_file"],
        test_keys_file=config["test_file"],
        n_labels=config["n_labels"],
        labels=config["labels"],
        patch_shape=(*config["patch_shape"], config["patch_depth"]),
        validation_batch_size=config["validation_batch_size"],
        augment=config["augment"],
        skip_blank_train=config["skip_blank_train"],
        skip_blank_val=config["skip_blank_val"],
        truth_index=config["truth_index"],
        truth_size=config["truth_size"],
        prev_truth_index=config["prev_truth_index"],
        prev_truth_size=config["prev_truth_size"],
        pred_index=config["pred_index"],
        pred_size=config["pred_size"],
        truth_downsample=config["truth_downsample"],
        truth_crop=config["truth_crop"],
        patches_per_epoch=config["patches_per_epoch"],
        categorical=config["categorical"],
        is3d=config["3D"],
        drop_easy_patches_train=config["drop_easy_patches_train"],
        drop_easy_patches_val=config["drop_easy_patches_val"])

    # run training
    train_model(model=model,
                model_file=config["model_file"],
                training_generator=train_generator,
                validation_generator=validation_generator,
                steps_per_epoch=n_train_steps,
                validation_steps=n_validation_steps,
                initial_learning_rate=config["initial_learning_rate"],
                learning_rate_drop=config["learning_rate_drop"],
                learning_rate_patience=config["patience"],
                early_stopping_patience=config["early_stop"],
                n_epochs=config["n_epochs"],
                output_folder=config["base_dir"])
    data_file_opened.close()