Esempio n. 1
0
import os
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = '3'

from deepneuro.data.data_collection import DataCollection
from deepneuro.augmentation.augment import ExtractPatches
from deepneuro.postprocessing.label import BinarizeLabel
from deepneuro.preprocessing.signal import ZeroMeanNormalization
from deepneuro.models.weighted_cat_cross_entropy import WeightedCategoricalCrossEntropy

TrainingDataCollection = DataCollection(data_sources={'csv': 'Metastases_Data_Train.csv'})
TestingDataCollection = DataCollection(data_sources={'csv': 'Metastases_Data_Test.csv'})

Normalization = ZeroMeanNormalization(data_groups=['input_data'])
TrainingDataCollection.append_preprocessor(Normalization)

def BrainRegion(data):
    return data['input_data'] != 0
def TumorRegion(data):
    return data['ground_truth'] == 1

PatchAugmentation = ExtractPatches(patch_shape=(32, 32, 32), 
    patch_region_conditions=[[BrainRegion, 0.70], [TumorRegion, 0.30]])
TrainingDataCollection.append_augmentation(PatchAugmentation, multiplier=20)
TrainingDataCollection.write_data_to_file('training_data.hdf5')

ModelParameters = {'input_shape': (32, 32, 32, 1),
                'cost_function': 'weighted_categorical_label',
                ''}
UNETModel = UNet(**ModelParameters)
Esempio n. 2
0
def skull_strip(output_folder, T1POST=None, FLAIR=None, ground_truth=None, input_directory=None, bias_corrected=True, resampled=False, registered=False, normalized=False, preprocessed=False, save_preprocess=False, save_all_steps=False, mask_output='skullstrip_mask.nii.gz', verbose=True):

    #--------------------------------------------------------------------#
    # Step 1, Load Data
    #--------------------------------------------------------------------#

    input_data = {'input_modalities': [FLAIR, T1POST]}

    if ground_truth is not None:
        input_data['ground_truth'] = [ground_truth]

    if input_directory is None:

        if any(data is None for data in input_data):
            raise ValueError("Cannot segment GBM. Please specify all four modalities.")

        data_collection = DataCollection(verbose=verbose)
        data_collection.add_case(input_data, case_name=output_folder)

    else:
        data_collection = DataCollection(input_directory, modality_dict=input_data, verbose=verbose)
        data_collection.fill_data_groups()

    #--------------------------------------------------------------------#
    # Step 2, Preprocess Data
    #--------------------------------------------------------------------#

    if not preprocessed:
        print 'ABOUT TO PREPROCESS....'

        # Random hack to save DICOMs to niftis for further processing.
        preprocessing_steps = [Preprocessor(data_groups=['input_modalities'], save_output=save_all_steps, verbose=verbose, output_folder=output_folder)]

        if not bias_corrected:
            preprocessing_steps += [N4BiasCorrection(data_groups=['input_modalities'], save_output=save_all_steps, verbose=verbose, output_folder=output_folder)]

        if not resampled:
            preprocessing_steps += [Resample(data_groups=['input_modalities'], save_output=save_all_steps, verbose=verbose, output_folder=output_folder)]

        if not registered:
            preprocessing_steps += [Coregister(data_groups=['input_modalities'], save_output=(save_preprocess or save_all_steps), verbose=verbose, output_folder=output_folder, reference_channel=0)]

        if not normalized:
            preprocessing_steps += [ZeroMeanNormalization(data_groups=['input_modalities'], save_output=save_all_steps, verbose=verbose, output_folder=output_folder, preprocessor_string='_preprocessed')]

        data_collection.append_preprocessor(preprocessing_steps)

    #--------------------------------------------------------------------#
    # Step 3, Skullstripping
    #--------------------------------------------------------------------#

    skullstrip_prediction_parameters = {'inputs': ['input_modalities'], 
                        'output_filename': os.path.join(output_folder, mask_output),
                        'batch_size': 25,
                        'patch_overlaps': 8,
                        'channels_first': True,
                        'patch_dimensions': [-3, -2, -1],
                        'output_patch_shape': (1, 64, 64, 32),
                        # 'input_channels': [0, 3],
                        }

    skull_stripping_model = load_old_model(load('Skull_Strip_T1Post_FLAIR'))

    skull_stripping_prediction = ModelPatchesInference(**skullstrip_prediction_parameters)

    label_binarization = BinarizeLabel()
    largest_component = LargestComponents()
    hole_filler = FillHoles(postprocessor_string='_label')

    skull_stripping_prediction.append_postprocessor([label_binarization, largest_component, hole_filler])

    skull_stripping_model.append_output([skull_stripping_prediction])

    for case in data_collection.cases:

        print '\nStarting New Case...\n'
        
        skull_stripping_prediction.case = case
        skull_stripping_mask = skull_stripping_model.generate_outputs(data_collection)[0]['filenames'][-1]

    if not save_preprocess:
        for index, file in enumerate(data_collection.data_groups['input_modalities'].preprocessed_case):
            os.remove(file)
Esempio n. 3
0
def predict_GBM(output_folder,
                T2=None,
                T1=None,
                T1POST=None,
                FLAIR=None,
                ground_truth=None,
                input_directory=None,
                bias_corrected=True,
                resampled=False,
                registered=False,
                skullstripped=False,
                normalized=False,
                preprocessed=False,
                save_preprocess=False,
                save_all_steps=False,
                output_wholetumor_filename='wholetumor_segmentation.nii.gz',
                output_enhancing_filename='enhancing_segmentation.nii.gz',
                verbose=True):

    #--------------------------------------------------------------------#
    # Step 1, Load Data
    #--------------------------------------------------------------------#

    input_data = {'input_modalities': [FLAIR, T2, T1, T1POST]}

    if ground_truth is not None:
        input_data['ground_truth'] = [ground_truth]

    if input_directory is None:

        if any(data is None for data in input_data):
            raise ValueError(
                "Cannot segment GBM. Please specify all four modalities.")

        data_collection = DataCollection(verbose=verbose)
        data_collection.add_case(input_data, case_name=output_folder)

    else:
        data_collection = DataCollection(input_directory,
                                         modality_dict=input_data,
                                         verbose=verbose)
        data_collection.fill_data_groups()

    #--------------------------------------------------------------------#
    # Step 2, Preprocess Data
    #--------------------------------------------------------------------#

    if not preprocessed or True:

        # Random hack to save DICOMs to niftis for further processing.
        preprocessing_steps = [
            DICOMConverter(data_groups=['input_modalities'],
                           save_output=save_all_steps,
                           verbose=verbose,
                           output_folder=output_folder)
        ]

        if not bias_corrected:
            preprocessing_steps += [
                N4BiasCorrection(data_groups=['input_modalities'],
                                 save_output=save_all_steps,
                                 verbose=verbose,
                                 output_folder=output_folder)
            ]

        if not resampled:
            preprocessing_steps += [
                Resample(data_groups=['input_modalities'],
                         save_output=save_all_steps,
                         verbose=verbose,
                         output_folder=output_folder)
            ]

        if not registered:
            preprocessing_steps += [
                Coregister(data_groups=['input_modalities'],
                           save_output=(save_preprocess or save_all_steps),
                           verbose=verbose,
                           output_folder=output_folder,
                           reference_channel=1)
            ]

        if not skullstripped:
            preprocessing_steps += [
                SkullStrip(data_groups=['input_modalities'],
                           save_output=save_all_steps,
                           verbose=verbose,
                           output_folder=output_folder,
                           reference_channel=1)
            ]

            if not normalized:
                preprocessing_steps += [
                    ZeroMeanNormalization(
                        data_groups=['input_modalities'],
                        save_output=save_all_steps,
                        verbose=verbose,
                        mask_preprocessor=preprocessing_steps[-1],
                        preprocessor_string='_preprocessed')
                ]

        data_collection.append_preprocessor(preprocessing_steps)

    #--------------------------------------------------------------------#
    # Step 3, Segmentation
    #--------------------------------------------------------------------#

    wholetumor_prediction_parameters = {
        'inputs': ['input_modalities'],
        'output_filename': os.path.join(output_folder,
                                        output_wholetumor_filename),
        'batch_size': 75,
        'patch_overlaps': 1,
        'channels_first': True,
        'patch_dimensions': [-3, -2, -1],
        'output_patch_shape': (1, 26, 26, 26),
        # 'input_channels': [0, 3],
    }

    enhancing_prediction_parameters = {
        'inputs': ['input_modalities'],
        'output_filename': os.path.join(output_folder,
                                        output_enhancing_filename),
        'batch_size': 75,
        'patch_overlaps': 1,
        'channels_first': True,
        'output_patch_shape': (1, 26, 26, 26),
        'patch_dimensions': [-3, -2, -1]
    }

    wholetumor_model = load_old_model(load('Segment_GBM_wholetumor'))
    enhancing_model = load_old_model(load('Segment_GBM_enhancing'))

    wholetumor_prediction = ModelPatchesInference(
        **wholetumor_prediction_parameters)
    wholetumor_model.append_output([wholetumor_prediction])

    enhancing_prediction = ModelPatchesInference(
        **enhancing_prediction_parameters)
    enhancing_model.append_output([enhancing_prediction])

    label_binarization = BinarizeLabel(postprocessor_string='_label')

    wholetumor_prediction.append_postprocessor([label_binarization])
    enhancing_prediction.append_postprocessor([label_binarization])

    for case in data_collection.cases:

        print '\nStarting New Case...\n'

        wholetumor_file = wholetumor_model.generate_outputs(
            data_collection, case)[0]['filenames'][-1]

        data_collection.add_channel(case, wholetumor_file)

        enhancing_file = enhancing_model.generate_outputs(
            data_collection, case)[0]['filenames'][-1]

        data_collection.clear_outputs()