示例#1
0
def predict_ischemic_stroke(output_folder, B0, DWI, ground_truth=None, input_directory=None, bias_corrected=True, resampled=False, registered=False, normalized=False, preprocessed=False, save_preprocess=False, save_all_steps=False, output_segmentation_filename='segmentation.nii.gz', verbose=True, input_data=None, registration_reference='FLAIR'):

    registration_reference_channel = 1

    #--------------------------------------------------------------------#
    # Step 1, Load Data
    #--------------------------------------------------------------------#

    data_collection = load_data(inputs=[B0, DWI], output_folder=output_folder, input_directory=input_directory, ground_truth=ground_truth, input_data=input_data, verbose=verbose)

    #--------------------------------------------------------------------#
    # Step 2, Load Models
    #--------------------------------------------------------------------#

    stroke_prediction_parameters = {'inputs': ['input_data'], 
                        'output_filename': os.path.join(output_folder, output_segmentation_filename),
                        'batch_size': 50,
                        'patch_overlaps': 8,
                        'output_patch_shape': (62, 62, 6, 1)}

    stroke_model = load_model_with_output(model_name='ischemic_stroke', outputs=[ModelPatchesInference(**stroke_prediction_parameters)], postprocessors=[BinarizeLabel(postprocessor_string='_label')])

    #--------------------------------------------------------------------#
    # Step 3, Add Data Preprocessors
    #--------------------------------------------------------------------#

    if not preprocessed:

        preprocessing_steps = [DICOMConverter(data_groups=['input_data'], save_output=save_all_steps, verbose=verbose, output_folder=output_folder)]

        if not registered:
            preprocessing_steps += [Coregister(data_groups=['input_data'], save_output=(save_preprocess or save_all_steps), verbose=verbose, output_folder=output_folder, reference_channel=registration_reference_channel)]

        if not normalized:
            preprocessing_steps += [ZeroMeanNormalization(data_groups=['input_data'], save_output=save_all_steps, verbose=verbose, output_folder=output_folder, preprocessor_string='_preprocessed')]

        else:
            preprocessing_steps += [ZeroMeanNormalization(data_groups=['input_data'], save_output=save_all_steps, verbose=verbose, output_folder=output_folder, mask_zeros=True, preprocessor_string='_preprocessed')]

        data_collection.append_preprocessor(preprocessing_steps)

    #--------------------------------------------------------------------#
    # Step 4, Run Inference
    #--------------------------------------------------------------------#

    for case in data_collection.cases:

        docker_print('Starting New Case...')
        
        docker_print('Ischemic Stroke Prediction')
        docker_print('======================')
        stroke_model.generate_outputs(data_collection, case)[0]['filenames'][-1]
示例#2
0
def skull_strip(output_folder, T1POST=None, FLAIR=None, ground_truth=None, input_directory=None, bias_corrected=True, resampled=False, registered=False, normalized=False, preprocessed=False, save_preprocess=False, save_all_steps=False, mask_output='skullstrip_mask.nii.gz', verbose=True):

    #--------------------------------------------------------------------#
    # Step 1, Load Data
    #--------------------------------------------------------------------#

    input_data = {'input_modalities': [FLAIR, T1POST]}

    if ground_truth is not None:
        input_data['ground_truth'] = [ground_truth]

    if input_directory is None:

        if any(data is None for data in input_data):
            raise ValueError("Cannot segment GBM. Please specify all four modalities.")

        data_collection = DataCollection(verbose=verbose)
        data_collection.add_case(input_data, case_name=output_folder)

    else:
        data_collection = DataCollection(input_directory, modality_dict=input_data, verbose=verbose)
        data_collection.fill_data_groups()

    #--------------------------------------------------------------------#
    # Step 2, Preprocess Data
    #--------------------------------------------------------------------#

    if not preprocessed:
        print 'ABOUT TO PREPROCESS....'

        # Random hack to save DICOMs to niftis for further processing.
        preprocessing_steps = [Preprocessor(data_groups=['input_modalities'], save_output=save_all_steps, verbose=verbose, output_folder=output_folder)]

        if not bias_corrected:
            preprocessing_steps += [N4BiasCorrection(data_groups=['input_modalities'], save_output=save_all_steps, verbose=verbose, output_folder=output_folder)]

        if not resampled:
            preprocessing_steps += [Resample(data_groups=['input_modalities'], save_output=save_all_steps, verbose=verbose, output_folder=output_folder)]

        if not registered:
            preprocessing_steps += [Coregister(data_groups=['input_modalities'], save_output=(save_preprocess or save_all_steps), verbose=verbose, output_folder=output_folder, reference_channel=0)]

        if not normalized:
            preprocessing_steps += [ZeroMeanNormalization(data_groups=['input_modalities'], save_output=save_all_steps, verbose=verbose, output_folder=output_folder, preprocessor_string='_preprocessed')]

        data_collection.append_preprocessor(preprocessing_steps)

    #--------------------------------------------------------------------#
    # Step 3, Skullstripping
    #--------------------------------------------------------------------#

    skullstrip_prediction_parameters = {'inputs': ['input_modalities'], 
                        'output_filename': os.path.join(output_folder, mask_output),
                        'batch_size': 25,
                        'patch_overlaps': 8,
                        'channels_first': True,
                        'patch_dimensions': [-3, -2, -1],
                        'output_patch_shape': (1, 64, 64, 32),
                        # 'input_channels': [0, 3],
                        }

    skull_stripping_model = load_old_model(load('Skull_Strip_T1Post_FLAIR'))

    skull_stripping_prediction = ModelPatchesInference(**skullstrip_prediction_parameters)

    label_binarization = BinarizeLabel()
    largest_component = LargestComponents()
    hole_filler = FillHoles(postprocessor_string='_label')

    skull_stripping_prediction.append_postprocessor([label_binarization, largest_component, hole_filler])

    skull_stripping_model.append_output([skull_stripping_prediction])

    for case in data_collection.cases:

        print '\nStarting New Case...\n'
        
        skull_stripping_prediction.case = case
        skull_stripping_mask = skull_stripping_model.generate_outputs(data_collection)[0]['filenames'][-1]

    if not save_preprocess:
        for index, file in enumerate(data_collection.data_groups['input_modalities'].preprocessed_case):
            os.remove(file)
示例#3
0
def train_Segment_GBM(data_directory, val_data_directory):

    # Define input modalities to load.
    training_modality_dict = {
        'input_modalities':
        ['FLAIR_pp.*', 'T2_pp.*', 'T1_pp.*', 'T1post_pp.*'],
        'ground_truth': ['enhancingmask_pp.nii.gz']
    }

    load_data = False
    train_model = False
    load_test_data = False
    predict = True

    training_data = '/mnt/jk489/QTIM_Databank/DeepNeuro_Datasets/BRATS_enhancing_prediction_only_data.h5'
    model_file = '/mnt/jk489/QTIM_Databank/DeepNeuro_Datasets/BRATS_enhancing_prediction_only_model.h5'
    testing_data = '/mnt/jk489/QTIM_Databank/DeepNeuro_Datasets/BRATS_enhancing_prediction_only_data.h5'

    # Write the data to hdf5
    if (not os.path.exists(training_data) and train_model) or load_data:

        # Create a Data Collection
        training_data_collection = DataCollection(
            data_directory, modality_dict=training_modality_dict, verbose=True)
        training_data_collection.fill_data_groups()

        # Define patch sampling regions
        def brain_region(data):
            return (data['ground_truth'] != 1) & (data['input_modalities'] !=
                                                  0)

        def roi_region(data):
            return data['ground_truth'] == 1

        def empty_region(data):
            return data['input_modalities'] == 0

        # Add patch augmentation
        patch_augmentation = ExtractPatches(
            patch_shape=(32, 32, 32),
            patch_region_conditions=[[empty_region, .05], [brain_region, .25],
                                     [roi_region, .7]],
            data_groups=['input_modalities', 'ground_truth'],
            patch_dimensions={
                'ground_truth': [1, 2, 3],
                'input_modalities': [1, 2, 3]
            })
        training_data_collection.append_augmentation(patch_augmentation,
                                                     multiplier=2000)

        # Write data to hdf5
        training_data_collection.write_data_to_file(training_data)

    if train_model:
        # Or load pre-loaded data.
        training_data_collection = DataCollection(data_storage=training_data,
                                                  verbose=True)
        training_data_collection.fill_data_groups()

        # Add left-right flips
        flip_augmentation = Flip_Rotate_2D(
            flip=True,
            rotate=False,
            data_groups=['input_modalities', 'ground_truth'])
        # flip_augmentation = Flip_Rotate_3D(data_groups=['input_modalities', 'ground_truth'])
        training_data_collection.append_augmentation(flip_augmentation,
                                                     multiplier=2)

        # Define model parameters
        model_parameters = {
            'input_shape': (32, 32, 32, 4),
            'downsize_filters_factor': 1,
            'pool_size': (2, 2, 2),
            'filter_shape': (5, 5, 5),
            'dropout': 0,
            'batch_norm': True,
            'initial_learning_rate': 0.000001,
            'output_type': 'regression',
            'num_outputs': 1,
            'activation': 'relu',
            'padding': 'same',
            'implementation': 'keras',
            'depth': 4,
            'max_filter': 512
        }

        # Create U-Net
        unet_model = UNet(**model_parameters)
        plot_model(unet_model.model,
                   to_file='model_image_dn.png',
                   show_shapes=True)
        training_parameters = {
            'input_groups': ['input_modalities', 'ground_truth'],
            'output_model_filepath': model_file,
            'training_batch_size': 64,
            'num_epochs': 1000,
            'training_steps_per_epoch': 20
        }
        unet_model.train(training_data_collection, **training_parameters)
    else:
        unet_model = load_old_model(model_file)

    # Define input modalities to load.
    testing_modality_dict = {
        'input_modalities':
        ['FLAIR_pp.*', 'T2_pp.*', 'T1_pp.*', 'T1post_pp.*']
    }

    if predict:
        testing_data_collection = DataCollection(
            val_data_directory,
            modality_dict=testing_modality_dict,
            verbose=True)
        testing_data_collection.fill_data_groups()

        if load_test_data:
            # Write data to hdf5
            testing_data_collection.write_data_to_file(testing_data)

        testing_parameters = {
            'inputs': ['input_modalities'],
            'output_filename': 'brats_enhancing_only_prediction.nii.gz',
            'batch_size': 250,
            'patch_overlaps': 1,
            'output_patch_shape': (26, 26, 26, 4),
            'save_all_steps': True
        }

        prediction = ModelPatchesInference(**testing_parameters)

        label_binarization = BinarizeLabel(postprocessor_string='_label')

        prediction.append_postprocessor([label_binarization, largest_island])

        unet_model.append_output([prediction])
        unet_model.generate_outputs(testing_data_collection)
示例#4
0
文件: train.py 项目: shlpu/DeepNeuro
def train_Segment_GBM(data_directory, val_data_directory):

    # Define input modalities to load.
    training_modality_dict = {
        'input_modalities': [
            '*FLAIR*', ['*T2SPACE*', '*T2_pp*'], ['*T1_pp.*', '*MPRAGE_Pre*'],
            ['*T1post_pp.*', '*MPRAGE_POST*'], ['enhancing*'],
            ['wholetumor*', 'full_edemamask*']
        ],
        'ground_truth': [['enhancing*'], ['wholetumor*', 'full_edemamask*']]
    }

    load_data = False
    train_model = False
    load_test_data = True
    predict = True

    training_data = '/mnt/jk489/QTIM_Databank/DeepNeuro_Datasets/enhancing_label_upsampling_323232.h5'
    model_file = 'label_upsampling_323232_model_correct.h5'
    testing_data = './FLAIR_upsampling_323232_test.h5'

    # Write the data to hdf5
    if (not os.path.exists(training_data) and train_model) or load_data:

        # Create a Data Collection
        training_data_collection = DataCollection(
            data_directory, modality_dict=training_modality_dict, verbose=True)
        training_data_collection.fill_data_groups()

        # Define patch sampling regions
        def brain_region(data):
            return (data['ground_truth'] != 1) & (data['input_modalities'] !=
                                                  0)

        def roi_region(data):
            return data['ground_truth'] == 1

        # Add patch augmentation
        patch_augmentation = ExtractPatches(
            patch_shape=(32, 32, 32),
            patch_region_conditions=[[roi_region, 1]],
            data_groups=['input_modalities', 'ground_truth'],
            patch_dimensions={
                'ground_truth': [0, 1, 2],
                'input_modalities': [0, 1, 2]
            })
        training_data_collection.append_augmentation(patch_augmentation,
                                                     multiplier=70)

        # Write data to hdf5
        training_data_collection.write_data_to_file(training_data)

    if train_model:
        # Or load pre-loaded data.
        training_data_collection = DataCollection(data_storage=training_data,
                                                  verbose=True)
        training_data_collection.fill_data_groups()

        # Choose a modality
        choice_augmentation = ChooseData(
            axis={
                'input_modalities': -1,
                'ground_truth': -1
            },
            choices=[-1, -2],
            data_groups=['input_modalities', 'ground_truth'],
            random_sample=False)
        training_data_collection.append_augmentation(choice_augmentation,
                                                     multiplier=2)

        # Add down-sampling
        mask_augmentation = Downsample(channel=4,
                                       axes={'input_modalities': [-4, -3, -2]},
                                       factor=3,
                                       data_groups=['input_modalities'])
        training_data_collection.append_augmentation(mask_augmentation,
                                                     multiplier=4)

        # Add left-right flips
        flip_augmentation = Flip_Rotate_2D(
            flip=True,
            rotate=False,
            data_groups=['input_modalities', 'ground_truth'])
        training_data_collection.append_augmentation(flip_augmentation,
                                                     multiplier=2)

        # Define model parameters
        model_parameters = {
            'input_shape': (32, 32, 32, 5),
            'downsize_filters_factor': 1,
            'pool_size': (2, 2, 2),
            'filter_shape': (5, 5, 5),
            'dropout': 0,
            'batch_norm': True,
            'initial_learning_rate': 0.000001,
            'output_type': 'binary_label',
            'num_outputs': 1,
            'activation': 'relu',
            'padding': 'same',
            'implementation': 'keras',
            'depth': 4,
            'max_filter': 512
        }

        # Create U-Net
        unet_model = UNet(**model_parameters)
        plot_model(unet_model.model,
                   to_file='model_image_dn.png',
                   show_shapes=True)
        training_parameters = {
            'input_groups': ['input_modalities', 'ground_truth'],
            'output_model_filepath': model_file,
            'training_batch_size': 64,
            'num_epochs': 1000,
            'training_steps_per_epoch': 20
        }
        unet_model.train(training_data_collection, **training_parameters)
    else:
        unet_model = load_old_model(model_file)

    # Load testing data..
    if not os.path.exists(testing_data) or load_test_data:
        # Create a Data Collection
        testing_data_collection = DataCollection(
            val_data_directory,
            modality_dict=training_modality_dict,
            verbose=True)
        testing_data_collection.fill_data_groups()
        # Write data to hdf5
        testing_data_collection.write_data_to_file(testing_data)

    if predict:
        testing_data_collection = DataCollection(data_storage=testing_data,
                                                 verbose=True)
        testing_data_collection.fill_data_groups()

        # Choose a modality
        choice_augmentation = ChooseData(
            axis={
                'input_modalities': -1,
                'ground_truth': -1
            },
            choices=[-1, -2],
            data_groups=['input_modalities', 'ground_truth'],
            random_sample=False)
        testing_data_collection.append_augmentation(choice_augmentation,
                                                    multiplier=2)

        # Add down-sampling
        mask_augmentation = Downsample(channel=4,
                                       axes={'input_modalities': [-4, -3, -2]},
                                       factor=3,
                                       data_groups=['input_modalities'],
                                       random_sample=False)
        testing_data_collection.append_augmentation(mask_augmentation,
                                                    multiplier=3)

        testing_parameters = {
            'inputs': ['input_modalities'],
            'output_filename': 'deepneuro-label.nii.gz',
            'batch_size': 250,
            'patch_overlaps': 6,
            'output_patch_shape': (26, 26, 26, 4)
        }

        prediction = ModelPatchesInference(testing_data_collection,
                                           **testing_parameters)

        unet_model.append_output([prediction])
        unet_model.generate_outputs()
示例#5
0
def train_Segment_GBM(data_directory, val_data_directory):

    # Define input modalities to load.
    if True:
        training_modality_dict = {
            'input_modalities':
            ['*FLAIR_pp.*', '*T2_pp.*', '*T1_pp.*', '*T1post_pp.*'],
            'ground_truth': ['*full_edemamask_pp.*']
        }
    else:
        training_modality_dict = {
            'input_modalities': [['*FLAIR_pp.*', 'FLAIR_norm2*'],
                                 ['*T1post_pp.*', 'T1post_norm2*']],
            'ground_truth': ['*full_edemamask_pp.*', 'FLAIRmask-label.nii.gz']
        }

    load_data = True
    train_model = True
    load_test_data = True
    predict = True

    training_data = './wholetumor_predict_patches_test3.h5'
    model_file = 'wholetumor_segnet-58-0.38.h5'
    testing_data = './brats_test_case.h5'

    # Write the data to hdf5
    if (not os.path.exists(training_data) and train_model) or load_data:

        # Create a Data Collection
        training_data_collection = DataCollection(
            data_directory, modality_dict=training_modality_dict, verbose=True)
        training_data_collection.fill_data_groups()

        # Define patch sampling regions
        def brain_region(data):
            return (data['ground_truth'] != 1) & (data['input_modalities'] !=
                                                  0)

        def roi_region(data):
            return data['ground_truth'] == 1

        # Add patch augmentation
        patch_augmentation = ExtractPatches(
            patch_shape=(32, 32, 32),
            patch_region_conditions=[[brain_region, 1]],
            data_groups=['input_modalities', 'ground_truth'])
        training_data_collection.append_augmentation(patch_augmentation,
                                                     multiplier=200)

        # Add left-right flips
        flip_augmentation = Flip_Rotate_2D(
            flip=True,
            rotate=False,
            data_groups=['input_modalities', 'ground_truth'])
        training_data_collection.append_augmentation(flip_augmentation,
                                                     multiplier=2)

        # Write data to hdf5
        training_data_collection.write_data_to_file(training_data)

    # Or load pre-loaded data.
    training_data_collection = DataCollection(data_storage=training_data,
                                              verbose=True)
    training_data_collection.fill_data_groups()

    # Define model parameters
    model_parameters = {
        'input_shape': (32, 32, 32, 4),
        'downsize_filters_factor': 1,
        'pool_size': (2, 2, 2),
        'filter_shape': (3, 3, 3),
        'dropout': 0,
        'batch_norm': True,
        'initial_learning_rate': 0.000001,
        'output_type': 'binary_label',
        'num_outputs': 1,
        'activation': 'relu',
        'padding': 'same',
        'implementation': 'keras',
        'depth': 4,
        'max_filter': 512
    }

    # Create U-Net
    if train_model:
        unet_model = UNet(**model_parameters)
        plot_model(unet_model.model,
                   to_file='model_image_dn.png',
                   show_shapes=True)
        training_parameters = {
            'input_groups': ['input_modalities', 'ground_truth'],
            'output_model_filepath':
            'wholetumor_segnet-{epoch:02d}-{loss:.2f}.h5',
            'training_batch_size': 2,
            'num_epochs': 100,
            'training_steps_per_epoch': 200,
            'save_best_only': False
        }
        unet_model.train(training_data_collection, **training_parameters)
    else:
        unet_model = load_old_model(model_file)

    # Load testing data..
    if not os.path.exists(testing_data) or load_test_data:
        # Create a Data Collection
        testing_data_collection = DataCollection(
            val_data_directory,
            modality_dict=training_modality_dict,
            verbose=True)
        testing_data_collection.fill_data_groups()
        # Write data to hdf5
        testing_data_collection.write_data_to_file(testing_data)

    if predict:
        testing_data_collection = DataCollection(data_storage=testing_data,
                                                 verbose=True)
        testing_data_collection.fill_data_groups()

        testing_parameters = {
            'inputs': ['input_modalities'],
            'output_filename': 'deepneuro.nii.gz',
            'batch_size': 200,
            'patch_overlaps': 1
        }

        prediction = ModelPatchesInference(testing_data_collection,
                                           **testing_parameters)

        unet_model.append_output([prediction])
        unet_model.generate_outputs()
示例#6
0
def predict_GBM(output_folder,
                T1POST=None,
                FLAIR=None,
                T1PRE=None,
                ground_truth=None,
                input_directory=None,
                bias_corrected=True,
                resampled=False,
                registered=False,
                skullstripped=False,
                preprocessed=False,
                save_preprocess=False,
                save_all_steps=False,
                output_wholetumor_filename='wholetumor_segmentation.nii.gz',
                output_enhancing_filename='enhancing_segmentation.nii.gz',
                verbose=True,
                input_data=None):

    #--------------------------------------------------------------------#
    # Step 1, Load Data
    #--------------------------------------------------------------------#

    data_collection = load_data(inputs=[FLAIR, T1POST, T1PRE],
                                output_folder=output_folder,
                                input_directory=input_directory,
                                ground_truth=ground_truth,
                                input_data=input_data,
                                verbose=verbose)

    #--------------------------------------------------------------------#
    # Step 2, Load Models
    #--------------------------------------------------------------------#

    wholetumor_prediction_parameters = {
        'inputs': ['input_data'],
        'output_filename': os.path.join(output_folder,
                                        output_wholetumor_filename),
        'batch_size': 50,
        'patch_overlaps': 8,
        'output_patch_shape': (56, 56, 6, 1),
        'input_channels': [0, 1]
    }

    enhancing_prediction_parameters = {
        'inputs': ['input_data'],
        'output_filename': os.path.join(output_folder,
                                        output_enhancing_filename),
        'batch_size': 50,
        'patch_overlaps': 8,
        'output_patch_shape': (56, 56, 6, 1)
    }

    wholetumor_model = load_model_with_output(
        model_name='gbm_wholetumor_mri',
        outputs=[ModelPatchesInference(**wholetumor_prediction_parameters)],
        postprocessors=[BinarizeLabel(postprocessor_string='_label')])
    enhancing_model = load_model_with_output(
        model_name='gbm_enhancingtumor_mri',
        outputs=[ModelPatchesInference(**enhancing_prediction_parameters)],
        postprocessors=[BinarizeLabel(postprocessor_string='_label')])

    if not preprocessed and not skullstripped:

        skullstripping_prediction_parameters = {
            'inputs': ['input_data'],
            'output_filename':
            os.path.join(output_folder, 'skullstrip_mask.nii.gz'),
            'batch_size':
            50,
            'patch_overlaps':
            3,
            'output_patch_shape': (56, 56, 6, 1),
            'save_to_file':
            False
        }

        skullstripping_model = load_model_with_output(
            model_name='skullstrip_mri',
            outputs=[
                ModelPatchesInference(**skullstripping_prediction_parameters)
            ],
            postprocessors=[BinarizeLabel(),
                            FillHoles(),
                            LargestComponents()])

    #--------------------------------------------------------------------#
    # Step 3, Add Data Preprocessors
    #--------------------------------------------------------------------#

    if not preprocessed:

        # Random hack to save DICOMs to niftis for further processing.
        preprocessing_steps = [
            DICOMConverter(data_groups=['input_data'],
                           save_output=save_all_steps,
                           verbose=verbose,
                           output_folder=output_folder)
        ]

        if not bias_corrected:
            preprocessing_steps += [
                N4BiasCorrection(data_groups=['input_data'],
                                 save_output=save_all_steps,
                                 verbose=verbose,
                                 output_folder=output_folder)
            ]

        if not registered:
            preprocessing_steps += [
                Coregister(data_groups=['input_data'],
                           save_output=(save_preprocess or save_all_steps),
                           verbose=verbose,
                           output_folder=output_folder,
                           reference_channel=0)
            ]

        if not skullstripped:

            preprocessing_steps += [
                ZeroMeanNormalization(data_groups=['input_data'],
                                      save_output=save_all_steps,
                                      verbose=verbose,
                                      output_folder=output_folder)
            ]

            preprocessing_steps += [
                SkullStrip_Model(data_groups=['input_data'],
                                 model=skullstripping_model,
                                 save_output=save_all_steps,
                                 verbose=verbose,
                                 output_folder=output_folder,
                                 reference_channel=[0, 1])
            ]

            preprocessing_steps += [
                ZeroMeanNormalization(
                    data_groups=['input_data'],
                    save_output=save_all_steps,
                    verbose=verbose,
                    output_folder=output_folder,
                    mask_preprocessor=preprocessing_steps[-1],
                    preprocessor_string='_preprocessed')
            ]

        data_collection.append_preprocessor(preprocessing_steps)

    #--------------------------------------------------------------------#
    # Step 4, Run Inference
    #--------------------------------------------------------------------#

    for case in data_collection.cases:

        docker_print('\nStarting New Case...\n')

        docker_print('Whole Tumor Prediction')
        docker_print('======================')
        wholetumor_file = wholetumor_model.generate_outputs(
            data_collection, case)[0]['filenames'][-1]

        data_collection.add_channel(case, wholetumor_file)

        docker_print('Enhancing Tumor Prediction')
        docker_print('======================')
        enhancing_model.generate_outputs(data_collection, case)

        data_collection.clear_outputs()
示例#7
0
文件: tmz_4.py 项目: shlpu/DeepNeuro
def train_Segment_GBM(data_directory, val_data_directory):

    # Define input modalities to load.
    training_modality_dict = {'input_modalities': 
    ['*FLAIR*nii.gz', ['*T2SPACE*nii.gz'], ['*MPRAGE_POST*nii.gz'], ['*MPRAGE_Pre*nii.gz']],
    'ground_truth': ['*SUV_r_T2_raw.nii.gz*']}

    load_data = False
    train_model = False
    load_test_data = True
    predict = True

    training_data = '/mnt/jk489/QTIM_Databank/DeepNeuro_Datasets/TMZ_4_323232.h5'
    model_file = 'TMZ_4_323232_model.h5'
    testing_data = './TMZ_4_323232_test.h5'

    # Write the data to hdf5
    if (not os.path.exists(training_data) and train_model) or load_data:

        # Create a Data Collection
        training_data_collection = DataCollection(data_directory, modality_dict=training_modality_dict, verbose=True)
        training_data_collection.fill_data_groups()

        # Define patch sampling regions
        def brain_region(data):
            return (data['ground_truth'] != 1) & (data['input_modalities'] != 0)
        def roi_region(data):
            return data['ground_truth'] >= 1.5

        # Add patch augmentation
        patch_augmentation = ExtractPatches(patch_shape=(32, 32, 32), patch_region_conditions=[[brain_region, .5], [roi_region, .5]], data_groups=['input_modalities', 'ground_truth'], patch_dimensions={'ground_truth': [0,1,2], 'input_modalities': [0,1,2]})
        training_data_collection.append_augmentation(patch_augmentation, multiplier=2000)

        # Write data to hdf5
        training_data_collection.write_data_to_file(training_data)

    if train_model:
        # Or load pre-loaded data.
        training_data_collection = DataCollection(data_storage=training_data, verbose=True)
        training_data_collection.fill_data_groups()

        # Add left-right flips
        flip_augmentation = Flip_Rotate_2D(flip=True, rotate=False, data_groups=['input_modalities', 'ground_truth'])
        training_data_collection.append_augmentation(flip_augmentation, multiplier=2)

        # Define model parameters
        model_parameters = {'input_shape': (32, 32, 32, 4),
                        'downsize_filters_factor': 1,
                        'pool_size': (2, 2, 2), 
                        'filter_shape': (5, 5, 5), 
                        'dropout': 0, 
                        'batch_norm': True, 
                        'initial_learning_rate': 0.000001, 
                        'output_type': 'regression',
                        'num_outputs': 1, 
                        'activation': 'relu',
                        'padding': 'same', 
                        'implementation': 'keras',
                        'depth': 4,
                        'max_filter': 512}

        # Create U-Net
        unet_model = UNet(**model_parameters)
        plot_model(unet_model.model, to_file='model_image_dn.png', show_shapes=True)
        training_parameters = {'input_groups': ['input_modalities', 'ground_truth'],
                        'output_model_filepath': model_file,
                        'training_batch_size': 64,
                        'num_epochs': 1000,
                        'training_steps_per_epoch': 20}
        unet_model.train(training_data_collection, **training_parameters)
    else:
        unet_model = load_old_model(model_file)

    # Load testing data..
    if not os.path.exists(testing_data) or load_test_data:
        # Create a Data Collection
        testing_data_collection = DataCollection(val_data_directory, modality_dict=training_modality_dict, verbose=True)
        testing_data_collection.fill_data_groups()
        # Write data to hdf5
        testing_data_collection.write_data_to_file(testing_data)

    if predict:
        testing_data_collection = DataCollection(data_storage=testing_data, verbose=True)
        testing_data_collection.fill_data_groups()

        flip_augmentation = Copy(data_groups=['input_modalities', 'ground_truth'])
        testing_data_collection.append_augmentation(flip_augmentation, multiplier=1)

        testing_parameters = {'inputs': ['input_modalities'], 
                        'output_filename': 'deepneuro_suv_4.nii.gz',
                        'batch_size': 50,
                        'patch_overlaps': 6,
                        'output_patch_shape': (26,26,26,4)}

        prediction = ModelPatchesInference(testing_data_collection, **testing_parameters)

        unet_model.append_output([prediction])
        unet_model.generate_outputs()
示例#8
0
def skull_strip(output_folder,
                T1POST=None,
                FLAIR=None,
                ground_truth=None,
                input_directory=None,
                bias_corrected=True,
                resampled=False,
                registered=False,
                normalized=False,
                preprocessed=False,
                save_preprocess=False,
                save_all_steps=False,
                mask_output='skullstrip_mask.nii.gz',
                input_data=None,
                verbose=True):

    #--------------------------------------------------------------------#
    # Step 1, Load Data
    #--------------------------------------------------------------------#

    data_collection = load_data(inputs=[FLAIR, T1POST],
                                output_folder=output_folder,
                                input_directory=input_directory,
                                ground_truth=ground_truth,
                                input_data=input_data,
                                verbose=verbose)

    #--------------------------------------------------------------------#
    # Step 2, Load Models
    #--------------------------------------------------------------------#

    skullstripping_prediction_parameters = {
        'inputs': ['input_data'],
        'output_filename': os.path.join(output_folder, mask_output),
        'batch_size': 50,
        'patch_overlaps': 6,
        'channels_first': False,
        'patch_dimensions': [-4, -3, -2],
        'output_patch_shape': (56, 56, 6, 1)
    }

    skullstripping_model = load_model_with_output(
        model_name='skullstrip_mri',
        outputs=[
            ModelPatchesInference(**skullstripping_prediction_parameters)
        ],
        postprocessors=[
            BinarizeLabel(),
            FillHoles(),
            LargestComponents(postprocessor_string='_label')
        ])

    #--------------------------------------------------------------------#
    # Step 3, Add Data Preprocessors
    #--------------------------------------------------------------------#

    if not preprocessed:

        # Random hack to save DICOMs to niftis for further processing.
        preprocessing_steps = [
            DICOMConverter(data_groups=['input_data'],
                           save_output=save_all_steps,
                           verbose=verbose,
                           output_folder=output_folder)
        ]

        if not bias_corrected:
            preprocessing_steps += [
                N4BiasCorrection(data_groups=['input_data'],
                                 save_output=save_all_steps,
                                 verbose=verbose,
                                 output_folder=output_folder)
            ]

        if not registered:
            preprocessing_steps += [
                Coregister(data_groups=['input_data'],
                           save_output=(save_preprocess or save_all_steps),
                           verbose=verbose,
                           output_folder=output_folder,
                           reference_channel=0)
            ]

        preprocessing_steps += [
            ZeroMeanNormalization(data_groups=['input_data'],
                                  save_output=save_all_steps,
                                  verbose=verbose,
                                  output_folder=output_folder)
        ]

        data_collection.append_preprocessor(preprocessing_steps)

    #--------------------------------------------------------------------#
    # Step 4, Run Inference
    #--------------------------------------------------------------------#

    for case in data_collection.cases:

        docker_print('\nStarting New Case...\n')

        docker_print('Skullstripping Prediction')
        docker_print('======================')
        skullstripping_model.generate_outputs(data_collection, case)
示例#9
0
def predict_GBM(output_folder,
                T2=None,
                T1=None,
                T1POST=None,
                FLAIR=None,
                ground_truth=None,
                input_directory=None,
                bias_corrected=True,
                resampled=False,
                registered=False,
                skullstripped=False,
                normalized=False,
                preprocessed=False,
                save_preprocess=False,
                save_all_steps=False,
                output_wholetumor_filename='wholetumor_segmentation.nii.gz',
                output_enhancing_filename='enhancing_segmentation.nii.gz',
                verbose=True):

    #--------------------------------------------------------------------#
    # Step 1, Load Data
    #--------------------------------------------------------------------#

    input_data = {'input_modalities': [FLAIR, T2, T1, T1POST]}

    if ground_truth is not None:
        input_data['ground_truth'] = [ground_truth]

    if input_directory is None:

        if any(data is None for data in input_data):
            raise ValueError(
                "Cannot segment GBM. Please specify all four modalities.")

        data_collection = DataCollection(verbose=verbose)
        data_collection.add_case(input_data, case_name=output_folder)

    else:
        data_collection = DataCollection(input_directory,
                                         modality_dict=input_data,
                                         verbose=verbose)
        data_collection.fill_data_groups()

    #--------------------------------------------------------------------#
    # Step 2, Preprocess Data
    #--------------------------------------------------------------------#

    if not preprocessed or True:

        # Random hack to save DICOMs to niftis for further processing.
        preprocessing_steps = [
            DICOMConverter(data_groups=['input_modalities'],
                           save_output=save_all_steps,
                           verbose=verbose,
                           output_folder=output_folder)
        ]

        if not bias_corrected:
            preprocessing_steps += [
                N4BiasCorrection(data_groups=['input_modalities'],
                                 save_output=save_all_steps,
                                 verbose=verbose,
                                 output_folder=output_folder)
            ]

        if not resampled:
            preprocessing_steps += [
                Resample(data_groups=['input_modalities'],
                         save_output=save_all_steps,
                         verbose=verbose,
                         output_folder=output_folder)
            ]

        if not registered:
            preprocessing_steps += [
                Coregister(data_groups=['input_modalities'],
                           save_output=(save_preprocess or save_all_steps),
                           verbose=verbose,
                           output_folder=output_folder,
                           reference_channel=1)
            ]

        if not skullstripped:
            preprocessing_steps += [
                SkullStrip(data_groups=['input_modalities'],
                           save_output=save_all_steps,
                           verbose=verbose,
                           output_folder=output_folder,
                           reference_channel=1)
            ]

            if not normalized:
                preprocessing_steps += [
                    ZeroMeanNormalization(
                        data_groups=['input_modalities'],
                        save_output=save_all_steps,
                        verbose=verbose,
                        mask_preprocessor=preprocessing_steps[-1],
                        preprocessor_string='_preprocessed')
                ]

        data_collection.append_preprocessor(preprocessing_steps)

    #--------------------------------------------------------------------#
    # Step 3, Segmentation
    #--------------------------------------------------------------------#

    wholetumor_prediction_parameters = {
        'inputs': ['input_modalities'],
        'output_filename': os.path.join(output_folder,
                                        output_wholetumor_filename),
        'batch_size': 75,
        'patch_overlaps': 1,
        'channels_first': True,
        'patch_dimensions': [-3, -2, -1],
        'output_patch_shape': (1, 26, 26, 26),
        # 'input_channels': [0, 3],
    }

    enhancing_prediction_parameters = {
        'inputs': ['input_modalities'],
        'output_filename': os.path.join(output_folder,
                                        output_enhancing_filename),
        'batch_size': 75,
        'patch_overlaps': 1,
        'channels_first': True,
        'output_patch_shape': (1, 26, 26, 26),
        'patch_dimensions': [-3, -2, -1]
    }

    wholetumor_model = load_old_model(load('Segment_GBM_wholetumor'))
    enhancing_model = load_old_model(load('Segment_GBM_enhancing'))

    wholetumor_prediction = ModelPatchesInference(
        **wholetumor_prediction_parameters)
    wholetumor_model.append_output([wholetumor_prediction])

    enhancing_prediction = ModelPatchesInference(
        **enhancing_prediction_parameters)
    enhancing_model.append_output([enhancing_prediction])

    label_binarization = BinarizeLabel(postprocessor_string='_label')

    wholetumor_prediction.append_postprocessor([label_binarization])
    enhancing_prediction.append_postprocessor([label_binarization])

    for case in data_collection.cases:

        print '\nStarting New Case...\n'

        wholetumor_file = wholetumor_model.generate_outputs(
            data_collection, case)[0]['filenames'][-1]

        data_collection.add_channel(case, wholetumor_file)

        enhancing_file = enhancing_model.generate_outputs(
            data_collection, case)[0]['filenames'][-1]

        data_collection.clear_outputs()
示例#10
0
def predict_brain_mets(output_folder,
                       T2=None,
                       T1POST=None,
                       T1PRE=None,
                       FLAIR=None,
                       ground_truth=None,
                       input_directory=None,
                       bias_corrected=True,
                       resampled=False,
                       registered=False,
                       skullstripped=False,
                       preprocessed=False,
                       save_preprocess=False,
                       save_all_steps=False,
                       output_segmentation_filename='segmentation.nii.gz',
                       verbose=True,
                       input_data=None,
                       registration_reference='FLAIR'):

    registration_reference_channel = 1

    #--------------------------------------------------------------------#
    # Step 1, Load Data
    #--------------------------------------------------------------------#

    data_collection = load_data(inputs=[T1PRE, T1POST, T2, FLAIR],
                                output_folder=output_folder,
                                input_directory=input_directory,
                                ground_truth=ground_truth,
                                input_data=input_data,
                                verbose=verbose)

    #--------------------------------------------------------------------#
    # Step 2, Load Models
    #--------------------------------------------------------------------#

    mets_prediction_parameters = {
        'inputs': ['input_data'],
        'output_filename':
        os.path.join(output_folder, output_segmentation_filename),
        'batch_size':
        50,
        'patch_overlaps':
        8,
        'output_patch_shape': (28, 28, 28, 1),
        'output_channels': [1]
    }

    mets_model = load_model_with_output(
        model_name='mets_enhancing',
        outputs=[ModelPatchesInference(**mets_prediction_parameters)],
        postprocessors=[BinarizeLabel(postprocessor_string='_label')],
        wcc_weights={
            0: 0.1,
            1: 3.0
        })

    #--------------------------------------------------------------------#
    # Step 3, Add Data Preprocessors
    #--------------------------------------------------------------------#

    if not preprocessed:

        # Random hack to save DICOMs to niftis for further processing.
        preprocessing_steps = [
            DICOMConverter(data_groups=['input_data'],
                           save_output=save_all_steps,
                           verbose=verbose,
                           output_folder=output_folder)
        ]

        if not skullstripped:
            skullstripping_prediction_parameters = {
                'inputs': ['input_data'],
                'output_filename':
                os.path.join(output_folder, 'skullstrip_mask.nii.gz'),
                'batch_size':
                50,
                'patch_overlaps':
                3,
                'output_patch_shape': (56, 56, 6, 1),
                'save_to_file':
                False,
                'data_collection':
                data_collection
            }

            skullstripping_model = load_model_with_output(
                model_name='skullstrip_mri',
                outputs=[
                    ModelPatchesInference(
                        **skullstripping_prediction_parameters)
                ],
                postprocessors=[
                    BinarizeLabel(),
                    FillHoles(),
                    LargestComponents()
                ])

        if not bias_corrected:
            preprocessing_steps += [
                N4BiasCorrection(data_groups=['input_data'],
                                 save_output=save_all_steps,
                                 verbose=verbose,
                                 output_folder=output_folder)
            ]

        if not registered:
            preprocessing_steps += [
                Coregister(data_groups=['input_data'],
                           save_output=(save_preprocess or save_all_steps),
                           verbose=verbose,
                           output_folder=output_folder,
                           reference_channel=registration_reference_channel)
            ]

        if not skullstripped:
            preprocessing_steps += [
                ZeroMeanNormalization(data_groups=['input_data'],
                                      save_output=save_all_steps,
                                      verbose=verbose,
                                      output_folder=output_folder)
            ]

            preprocessing_steps += [
                SkullStrip_Model(data_groups=['input_data'],
                                 model=skullstripping_model,
                                 save_output=save_all_steps,
                                 verbose=verbose,
                                 output_folder=output_folder,
                                 reference_channel=[3, 1])
            ]

            preprocessing_steps += [
                ZeroMeanNormalization(
                    data_groups=['input_data'],
                    save_output=save_all_steps,
                    verbose=verbose,
                    output_folder=output_folder,
                    mask_preprocessor=preprocessing_steps[-1],
                    preprocessor_string='_preprocessed')
            ]

        else:
            preprocessing_steps += [
                ZeroMeanNormalization(data_groups=['input_data'],
                                      save_output=save_all_steps,
                                      verbose=verbose,
                                      output_folder=output_folder,
                                      mask_zeros=True,
                                      preprocessor_string='_preprocessed')
            ]

        data_collection.append_preprocessor(preprocessing_steps)

    #--------------------------------------------------------------------#
    # Step 4, Run Inference
    #--------------------------------------------------------------------#

    for case in data_collection.cases:

        docker_print('Starting New Case...')

        docker_print('Enhancing Mets Prediction')
        docker_print('======================')
        mets_model.generate_outputs(data_collection, case)[0]['filenames'][-1]
示例#11
0
def train_Segment_GBM(data_directory, val_data_directory):

    # Define input modalities to load.
    training_modality_dict = {
        'input_modalities': ['*phantom*'],
        'ground_truth': ['*ktrans*']
    }

    load_data = False
    train_model = False
    load_test_data = False
    predict = True

    training_data = './dce_mri_ktrans_training_884_1.h5'
    model_file = 'ktrans_net_884_1_3layer_conv_separated_sym.h5'
    testing_data = './dce_mri_ktrans_testing_884_1.h5'

    # Write the data to hdf5
    if (not os.path.exists(training_data) and train_model) or load_data:

        # Create a Data Collection
        training_data_collection = DataCollection(
            data_directory, modality_dict=training_modality_dict, verbose=True)
        training_data_collection.fill_data_groups()

        # Define patch sampling regions
        def brain_region(data):
            return (data['ground_truth'] >= .1)

        # Add patch augmentation
        patch_augmentation = ExtractPatches(
            patch_shape=(8, 8, 4),
            patch_region_conditions=[[brain_region, 1]],
            data_groups=['input_modalities', 'ground_truth'],
            patch_dimensions={
                'ground_truth': [0, 1, 2],
                'input_modalities': [1, 2, 3]
            })
        training_data_collection.append_augmentation(patch_augmentation,
                                                     multiplier=5000)

        # Add left-right flips
        flip_augmentation = Flip_Rotate_2D(
            flip=True,
            rotate=False,
            data_groups=['input_modalities', 'ground_truth'])
        training_data_collection.append_augmentation(flip_augmentation,
                                                     multiplier=2)

        # Write data to hdf5
        training_data_collection.write_data_to_file(training_data)

    # Or load pre-loaded data.
    training_data_collection = DataCollection(data_storage=training_data,
                                              verbose=True)
    training_data_collection.fill_data_groups()

    # Define model parameters
    model_parameters = {
        'input_shape': (65, 8, 8, 4, 1),
        'downsize_filters_factor': 4,
        'pool_size': (2, 2, 2),
        'filter_shape': (3, 3, 3),
        'dropout': .1,
        'batch_norm': True,
        'initial_learning_rate': 0.000001,
        'output_type': 'regression',
        'num_outputs': 1,
        'activation': 'relu',
        'padding': 'same',
        'implementation': 'keras',
        'depth': 1,
        'max_filter': 32
    }

    # Create U-Net
    if train_model:
        timenet_model = TimeNet(**model_parameters)
        plot_model(timenet_model.model,
                   to_file='timenet_model.png',
                   show_shapes=True)
        training_parameters = {
            'input_groups': ['input_modalities', 'ground_truth'],
            'output_model_filepath': model_file,
            'training_batch_size': 32,
            'num_epochs': 100,
            'training_steps_per_epoch': 200,
            'save_best_only': True
        }
        timenet_model.train(training_data_collection, **training_parameters)
    else:
        timenet_model = load_old_model(model_file)

    # Load testing data..
    if not os.path.exists(testing_data) or load_test_data:
        # Create a Data Collection
        testing_data_collection = DataCollection(
            val_data_directory,
            modality_dict=training_modality_dict,
            verbose=True)
        testing_data_collection.fill_data_groups()
        # Write data to hdf5
        testing_data_collection.write_data_to_file(testing_data)

    if predict:
        testing_data_collection = DataCollection(data_storage=testing_data,
                                                 verbose=True)
        testing_data_collection.fill_data_groups()

        testing_parameters = {
            'inputs': ['input_modalities'],
            'output_filename': 'deepneuro.nii.gz',
            'batch_size': 200,
            'patch_overlaps': 8,
            'output_patch_shape': (6, 6, 2, 1)
        }

        prediction = ModelPatchesInference(testing_data_collection,
                                           **testing_parameters)

        timenet_model.append_output([prediction])
        timenet_model.generate_outputs()