예제 #1
0
def secondary_prediction(mask,
                         vol,
                         config2,
                         model2_path=None,
                         preprocess_method2=None,
                         norm_params2=None,
                         overlap_factor=0.9):
    model2 = load_old_model(get_last_model_path(model2_path))
    pred = mask
    bbox_start, bbox_end = find_bounding_box(pred)
    check_bounding_box(pred, bbox_start, bbox_end)
    padding = [16, 16, 8]
    if padding is not None:
        bbox_start = np.maximum(bbox_start - padding, 0)
        bbox_end = np.minimum(bbox_end + padding, mask.shape)
    data = vol.astype(np.float)[bbox_start[0]:bbox_end[0],
                                bbox_start[1]:bbox_end[1],
                                bbox_start[2]:bbox_end[2]]

    data = preproc_and_norm(data, preprocess_method2, norm_params2)

    prediction = \
        patch_wise_prediction(model=model2,
                              data=np.expand_dims(data, 0),
                              overlap_factor=overlap_factor,
                              patch_shape=config2["patch_shape"] + [config2["patch_depth"]])
    prediction = prediction.squeeze()
    padding2 = list(zip(bbox_start, np.array(vol.shape) - bbox_end))
    print(padding2)
    print(prediction.shape)
    prediction = np.pad(prediction,
                        padding2,
                        mode='constant',
                        constant_values=0)
    return prediction
예제 #2
0
def main(input_mat_path,
         output_mat_path,
         overlap_factor,
         config,
         model_path,
         preprocess_method=None,
         norm_params=None,
         config2=None,
         model2_path=None,
         preprocess_method2=None,
         norm_params2=None):
    print(model_path)
    model = load_old_model(get_last_model_path(model_path))
    print('Loading mat from {}...'.format(input_mat_path))
    mat = loadmat(input_mat_path)
    print('Predicting mask...')
    data = mat['volume'].astype(np.float)

    data = preproc_and_norm(data, preprocess_method, norm_params)

    prediction = \
        patch_wise_prediction(model=model,
                              data=np.expand_dims(data, 0),
                              overlap_factor=overlap_factor,
                              patch_shape=config["patch_shape"] + [config["patch_depth"]])

    print('Post-processing mask...')
    if prediction.shape[-1] > 1:
        prediction = prediction[..., 1]
    prediction = prediction.squeeze()
    print("Storing prediction in [7-9], 7 should be the best...")
    mat['masks'][0, 9] = \
        process_pred(prediction, gaussian_std=0, threshold=0.2)  # .astype(np.uint8)
    mat['masks'][0, 8] = \
        process_pred(prediction, gaussian_std=1, threshold=0.5)  # .astype(np.uint8)
    mat['masks'][0, 7] = \
        process_pred(prediction, gaussian_std=0.5, threshold=0.5)  # .astype(np.uint8)

    if config2 is not None:
        print('Making secondary prediction... [6]')
        prediction = secondary_prediction(
            mat['masks'][0, 7],
            vol=mat['volume'].astype(np.float),
            config2=config2,
            model2_path=model2_path,
            preprocess_method2=preprocess_method2,
            norm_params2=norm_params2,
            overlap_factor=0.9)
        mat['masks'][0, 6] = \
            process_pred(prediction, gaussian_std=0, threshold=0.2)  # .astype(np.uint8)
        mat['masks'][0, 5] = \
            process_pred(prediction, gaussian_std=1, threshold=0.5)  # .astype(np.uint8)
        mat['masks'][0, 4] = \
            process_pred(prediction, gaussian_std=0.5, threshold=0.5)  # .astype(np.uint8)

    print('Saving mat to {}'.format(output_mat_path))
    savemat(output_mat_path, mat)
    print('Finished.')
예제 #3
0
def main(input_path,
         output_path,
         overlap_factor,
         config,
         model_path,
         preprocess_method=None,
         norm_params=None,
         config2=None,
         model2_path=None,
         preprocess_method2=None,
         norm_params2=None):
    print(model_path)
    model = load_old_model(get_last_model_path(model_path))
    print('Loading mat from {}...'.format(input_path))
    nifti = read_img(input_path)
    print('Predicting mask...')
    data = nifti.get_fdata().astype(np.float)

    data = preproc_and_norm(data, preprocess_method, norm_params)

    prediction = \
        patch_wise_prediction(model=model,
                              data=np.expand_dims(data, 0),
                              overlap_factor=overlap_factor,
                              patch_shape=config["patch_shape"] + [config["patch_depth"]])
    nib.save(prediction, os.path.join(output_path, 'pred.nii'))

    print('Post-processing mask...')
    if prediction.shape[-1] > 1:
        prediction = prediction[..., 1]
    prediction = prediction.squeeze()
    print("Storing prediction in [7-9], 7 should be the best...")
    mask = process_pred(prediction, gaussian_std=0.5,
                        threshold=0.5)  # .astype(np.uint8)

    if config2 is not None:
        print('Making secondary prediction... [6]')
        prediction = secondary_prediction(
            mask,
            vol=nifti.get_fdata().astype(np.float),
            config2=config2,
            model2_path=model2_path,
            preprocess_method2=preprocess_method2,
            norm_params2=norm_params2,
            overlap_factor=0.9)
        nib.save(prediction, os.path.join(output_path, 'pred_roi.nii'))

    print('Saving to {}'.format(output_path))
    print('Finished.')
예제 #4
0
def run_validation_cases(validation_keys_file,
                         model_file,
                         training_modalities,
                         hdf5_file,
                         patch_shape,
                         output_dir=".",
                         overlap_factor=0,
                         permute=False,
                         is3d=False,
                         prev_truth_index=None,
                         prev_truth_size=None,
                         pred_index=None,
                         pred_size=None,
                         use_augmentations=False,
                         scale_xy=None,
                         resolution_file=''):
    file_names = []
    validation_indices = pickle_load(validation_keys_file)
    # validation_indices = [23, 24, 7]  # 2
    # validation_indices = [5, 11, 15] # 1
    # validation_indices = [23, 5, 6] # 0
    model = load_old_model(get_last_model_path(model_file))
    data_file = tables.open_file(hdf5_file, "r")
    for index in validation_indices:
        if 'subject_ids' in data_file.root:
            case_directory = os.path.join(
                output_dir, data_file.root.subject_ids[index].decode('utf-8'))
        else:
            case_directory = os.path.join(output_dir,
                                          "validation_case_{}".format(index))
        file_names.append(
            run_validation_case(data_index=index,
                                output_dir=case_directory,
                                model=model,
                                data_file=data_file,
                                training_modalities=training_modalities,
                                overlap_factor=overlap_factor,
                                is3d=is3d,
                                permute=permute,
                                patch_shape=patch_shape,
                                prev_truth_index=prev_truth_index,
                                prev_truth_size=prev_truth_size,
                                pred_index=pred_index,
                                pred_size=pred_size,
                                use_augmentations=use_augmentations,
                                scale_xy=scale_xy,
                                resolution_file=resolution_file))
    data_file.close()
    return file_names
예제 #5
0
def main(input_mat_path, output_mat_path, config, overlap_factor, model_path, preprocess_method=None, norm_params=None):
    print(model_path)
    model = load_old_model(get_last_model_path(model_path))
    print('Loading mat from {}...'.format(input_mat_path))
    mat = loadmat(input_mat_path)
    print('Predicting mask...')
    data = mat['volume'].astype(np.float)

    if preprocess_method is not None:
        print('Applying preprocess by {}...'.format(preprocess_method))
        if preprocess_method == 'window_1_99':
            data = window_intensities_data(data)
        else:
            raise Exception('Unknown preprocess: {}'.format(preprocess_method))

    if norm_params is not None and any(norm_params.values()):
        data = normalize_data(data, mean=norm_params['mean'], std=norm_params['std'])

    prediction = \
        patch_wise_prediction(model=model,
                              data=np.expand_dims(data, 0),
                              overlap_factor=overlap_factor,
                              patch_shape=config["patch_shape"] + [config["patch_depth"]])

    print('Post-processing mask...')
    if prediction.shape[-1] > 1:
        prediction = prediction[..., 1]
    prediction = prediction.squeeze()
    mat['masks'][0, 9] = \
        process_pred(prediction, gaussian_std=0, threshold=0.2)  # .astype(np.uint8)
    mat['masks'][0, 8] = \
        process_pred(prediction, gaussian_std=1, threshold=0.5)  # .astype(np.uint8)
    mat['masks'][0, 7] = \
        process_pred(prediction, gaussian_std=0.5, threshold=0.5)  # .astype(np.uint8)
    print('Saving mat to {}'.format(output_mat_path))
    savemat(output_mat_path, mat)
    print('Finished.')
예제 #6
0
def main(pred_dir,
         config,
         split='test',
         overlap_factor=1,
         preprocess_method=None):
    padding = [16, 16, 8]
    prediction2_dir = os.path.abspath(
        os.path.join(config['base_dir'], 'predictions2', split))
    model = load_old_model(get_last_model_path(config["model_file"]))
    with open(os.path.join(opts.config_dir, 'norm_params.json'), 'r') as f:
        norm_params = json.load(f)

    for sample_folder in glob(os.path.join(pred_dir, split, '*')):
        mask_path = os.path.join(sample_folder, 'prediction.nii.gz')
        truth_path = os.path.join(sample_folder, 'truth.nii.gz')

        subject_id = Path(sample_folder).name
        dest_folder = os.path.join(prediction2_dir, subject_id)
        Path(dest_folder).mkdir(parents=True, exist_ok=True)

        truth = nib.load(truth_path)
        nib.save(truth, os.path.join(dest_folder, Path(truth_path).name))

        mask = nib.load(mask_path)
        mask = process_pred(mask.get_data(), gaussian_std=0.5, threshold=0.5)
        bbox_start, bbox_end = find_bounding_box(mask)
        check_bounding_box(mask, bbox_start, bbox_end)
        if padding is not None:
            bbox_start = np.maximum(bbox_start - padding, 0)
            bbox_end = np.minimum(bbox_end + padding, mask.shape)
        print("BBox: {}-{}".format(bbox_start, bbox_end))

        volume = nib.load(
            os.path.join(original_data_folder, subject_id, 'volume.nii'))
        orig_volume_shape = np.array(volume.get_data().shape)
        volume = cut_bounding_box(volume, bbox_start,
                                  bbox_end).get_data().astype(np.float)

        if preprocess_method is not None:
            print('Applying preprocess by {}...'.format(preprocess_method))
            if preprocess_method == 'window_1_99':
                volume = window_intensities_data(volume)
            else:
                raise Exception(
                    'Unknown preprocess: {}'.format(preprocess_method))

        if norm_params is not None and any(norm_params.values()):
            volume = normalize_data(volume,
                                    mean=norm_params['mean'],
                                    std=norm_params['std'])

        prediction = patch_wise_prediction(model=model,
                                           data=np.expand_dims(volume, 0),
                                           patch_shape=config["patch_shape"] +
                                           [config["patch_depth"]],
                                           overlap_factor=overlap_factor)
        prediction = prediction.squeeze()

        padding2 = list(zip(bbox_start, orig_volume_shape - bbox_end))
        print(padding2)
        prediction = np.pad(prediction,
                            padding2,
                            mode='constant',
                            constant_values=0)
        assert all(
            [s1 == s2 for s1, s2 in zip(prediction.shape, orig_volume_shape)])
        prediction = get_image(prediction)
        nib.save(prediction, os.path.join(dest_folder, Path(mask_path).name))
예제 #7
0
step_1_network_experiments_paths = ['/datadrive/configs/...', '/datadrive/configs/...', '/datadrive/configs/...']
step_2_network_experiments_paths = ['/datadrive/configs/...']
output_prediction_dir = r""
if not os.path.exists(output_prediction_dir):
    os.mkdir(output_prediction_dir)
#subject_ids = ['40']
subject_index = []
overlap_factor = 0.9

hdf5_file = r""
data_file = tables.open_file(hdf5_file, "a")
filters = tables.Filters(complevel=5, complib='blosc')
pred_storage = data_file.create_vlarray(data_file.root, 'pred', tables.ObjectAtom(), filters=filters,
                                        expectedrows=len(subject_index)) # TODO: needs to be same length as other arrays

step_1_networks = [load_old_model(get_last_model_path(os.path.join(exp_folder, "fetal_net_model")))
                   for exp_folder in step_1_network_experiments_paths]

step_1_configs = []
n_step_1_models = len(step_1_networks)
for i in range(n_step_1_models):
    with open(os.path.join(step_1_network_experiments_paths[i], 'config.json')) as f:
        config = json.load(f)
        step_1_configs.append(config)

step_2_network = [load_old_model(get_last_model_path(os.path.join(exp_folder, "fetal_net_model")))
                   for exp_folder in step_2_network_experiments_paths]
with open(os.path.join(step_2_network_experiments_paths[0], 'config.json')) as f:
    step_2_config = json.load(f)

########################### Step 1 ###########################
예제 #8
0
def main_train(config, overwrite=False):
    # convert input images into an hdf5 file
    if overwrite or not os.path.exists(config["data_file"]):
        print("Writing h5 file")
        training_files, subject_ids = fetch_training_data_files(
            return_subject_ids=True)

        _, (mean, std) = write_data_to_file(training_files,
                                            config["data_file"],
                                            subject_ids=subject_ids,
                                            normalize=config['normalization'],
                                            add_pred=config['pred_size'])
        with open(os.path.join(config["base_dir"], 'norm_params.json'),
                  mode='w') as f:
            json.dump({'mean': mean, 'std': std}, f)

    data_file_opened = open_data_file(config["data_file"])

    if not overwrite and len(glob.glob(config["model_file"] + '*.h5')) > 0:
        model_path = get_last_model_path(config["model_file"])
        print('Loading model from: {}'.format(model_path))
        model = load_old_model(model_path)
    else:
        # instantiate new model
        loss_func = getattr(fetal_net.metrics, config['loss'])
        model_func = getattr(fetal_net.model, config['model_name'])
        model = model_func(
            input_shape=config["input_shape"],
            initial_learning_rate=config["initial_learning_rate"],
            **{
                'dropout_rate': config['dropout_rate'],
                'loss_function': loss_func,
                'depth': config['model_params']['depth'],
                'n_base_filters': config['model_params']['n_base_filters'],
                'old_model_path': config['old_model'],
                'truth_index': config['truth_index'],
                'truth_size': config['truth_size']
            })
    model.summary()

    # get training and testing generators
    train_generator, validation_generator, n_train_steps, n_validation_steps = get_training_and_validation_generators(
        data_file_opened,
        batch_size=config["batch_size"],
        data_split=config["validation_split"],
        overwrite=overwrite,
        validation_keys_file=config["validation_file"],
        training_keys_file=config["training_file"],
        test_keys_file=config["test_file"],
        n_labels=config["n_labels"],
        labels=config["labels"],
        patch_shape=(*config["patch_shape"], config["patch_depth"]),
        validation_batch_size=config["validation_batch_size"],
        augment=config["augment"],
        skip_blank_train=config["skip_blank_train"],
        skip_blank_val=config["skip_blank_val"],
        truth_index=config["truth_index"],
        truth_size=config["truth_size"],
        prev_truth_index=config["prev_truth_index"],
        prev_truth_size=config["prev_truth_size"],
        pred_index=config["pred_index"],
        pred_size=config["pred_size"],
        truth_downsample=config["truth_downsample"],
        truth_crop=config["truth_crop"],
        patches_per_epoch=config["patches_per_epoch"],
        categorical=config["categorical"],
        is3d=config["3D"],
        drop_easy_patches_train=config["drop_easy_patches_train"],
        drop_easy_patches_val=config["drop_easy_patches_val"])

    # run training
    train_model(model=model,
                model_file=config["model_file"],
                training_generator=train_generator,
                validation_generator=validation_generator,
                steps_per_epoch=n_train_steps,
                validation_steps=n_validation_steps,
                initial_learning_rate=config["initial_learning_rate"],
                learning_rate_drop=config["learning_rate_drop"],
                learning_rate_patience=config["patience"],
                early_stopping_patience=config["early_stop"],
                n_epochs=config["n_epochs"],
                output_folder=config["base_dir"])
    data_file_opened.close()