예제 #1
0
def predict_ischemic_stroke(output_folder, B0, DWI, ground_truth=None, input_directory=None, bias_corrected=True, resampled=False, registered=False, normalized=False, preprocessed=False, save_preprocess=False, save_all_steps=False, output_segmentation_filename='segmentation.nii.gz', verbose=True, input_data=None, registration_reference='FLAIR'):

    registration_reference_channel = 1

    #--------------------------------------------------------------------#
    # Step 1, Load Data
    #--------------------------------------------------------------------#

    data_collection = load_data(inputs=[B0, DWI], output_folder=output_folder, input_directory=input_directory, ground_truth=ground_truth, input_data=input_data, verbose=verbose)

    #--------------------------------------------------------------------#
    # Step 2, Load Models
    #--------------------------------------------------------------------#

    stroke_prediction_parameters = {'inputs': ['input_data'], 
                        'output_filename': os.path.join(output_folder, output_segmentation_filename),
                        'batch_size': 50,
                        'patch_overlaps': 8,
                        'output_patch_shape': (62, 62, 6, 1)}

    stroke_model = load_model_with_output(model_name='ischemic_stroke', outputs=[ModelPatchesInference(**stroke_prediction_parameters)], postprocessors=[BinarizeLabel(postprocessor_string='_label')])

    #--------------------------------------------------------------------#
    # Step 3, Add Data Preprocessors
    #--------------------------------------------------------------------#

    if not preprocessed:

        preprocessing_steps = [DICOMConverter(data_groups=['input_data'], save_output=save_all_steps, verbose=verbose, output_folder=output_folder)]

        if not registered:
            preprocessing_steps += [Coregister(data_groups=['input_data'], save_output=(save_preprocess or save_all_steps), verbose=verbose, output_folder=output_folder, reference_channel=registration_reference_channel)]

        if not normalized:
            preprocessing_steps += [ZeroMeanNormalization(data_groups=['input_data'], save_output=save_all_steps, verbose=verbose, output_folder=output_folder, preprocessor_string='_preprocessed')]

        else:
            preprocessing_steps += [ZeroMeanNormalization(data_groups=['input_data'], save_output=save_all_steps, verbose=verbose, output_folder=output_folder, mask_zeros=True, preprocessor_string='_preprocessed')]

        data_collection.append_preprocessor(preprocessing_steps)

    #--------------------------------------------------------------------#
    # Step 4, Run Inference
    #--------------------------------------------------------------------#

    for case in data_collection.cases:

        docker_print('Starting New Case...')
        
        docker_print('Ischemic Stroke Prediction')
        docker_print('======================')
        stroke_model.generate_outputs(data_collection, case)[0]['filenames'][-1]
예제 #2
0
def predict_GBM(output_folder,
                T1POST=None,
                FLAIR=None,
                T1PRE=None,
                ground_truth=None,
                input_directory=None,
                bias_corrected=True,
                resampled=False,
                registered=False,
                skullstripped=False,
                preprocessed=False,
                save_preprocess=False,
                save_all_steps=False,
                output_wholetumor_filename='wholetumor_segmentation.nii.gz',
                output_enhancing_filename='enhancing_segmentation.nii.gz',
                verbose=True,
                input_data=None):

    #--------------------------------------------------------------------#
    # Step 1, Load Data
    #--------------------------------------------------------------------#

    data_collection = load_data(inputs=[FLAIR, T1POST, T1PRE],
                                output_folder=output_folder,
                                input_directory=input_directory,
                                ground_truth=ground_truth,
                                input_data=input_data,
                                verbose=verbose)

    #--------------------------------------------------------------------#
    # Step 2, Load Models
    #--------------------------------------------------------------------#

    wholetumor_prediction_parameters = {
        'inputs': ['input_data'],
        'output_filename': os.path.join(output_folder,
                                        output_wholetumor_filename),
        'batch_size': 50,
        'patch_overlaps': 8,
        'output_patch_shape': (56, 56, 6, 1),
        'input_channels': [0, 1]
    }

    enhancing_prediction_parameters = {
        'inputs': ['input_data'],
        'output_filename': os.path.join(output_folder,
                                        output_enhancing_filename),
        'batch_size': 50,
        'patch_overlaps': 8,
        'output_patch_shape': (56, 56, 6, 1)
    }

    wholetumor_model = load_model_with_output(
        model_name='gbm_wholetumor_mri',
        outputs=[ModelPatchesInference(**wholetumor_prediction_parameters)],
        postprocessors=[BinarizeLabel(postprocessor_string='_label')])
    enhancing_model = load_model_with_output(
        model_name='gbm_enhancingtumor_mri',
        outputs=[ModelPatchesInference(**enhancing_prediction_parameters)],
        postprocessors=[BinarizeLabel(postprocessor_string='_label')])

    if not preprocessed and not skullstripped:

        skullstripping_prediction_parameters = {
            'inputs': ['input_data'],
            'output_filename':
            os.path.join(output_folder, 'skullstrip_mask.nii.gz'),
            'batch_size':
            50,
            'patch_overlaps':
            3,
            'output_patch_shape': (56, 56, 6, 1),
            'save_to_file':
            False
        }

        skullstripping_model = load_model_with_output(
            model_name='skullstrip_mri',
            outputs=[
                ModelPatchesInference(**skullstripping_prediction_parameters)
            ],
            postprocessors=[BinarizeLabel(),
                            FillHoles(),
                            LargestComponents()])

    #--------------------------------------------------------------------#
    # Step 3, Add Data Preprocessors
    #--------------------------------------------------------------------#

    if not preprocessed:

        # Random hack to save DICOMs to niftis for further processing.
        preprocessing_steps = [
            DICOMConverter(data_groups=['input_data'],
                           save_output=save_all_steps,
                           verbose=verbose,
                           output_folder=output_folder)
        ]

        if not bias_corrected:
            preprocessing_steps += [
                N4BiasCorrection(data_groups=['input_data'],
                                 save_output=save_all_steps,
                                 verbose=verbose,
                                 output_folder=output_folder)
            ]

        if not registered:
            preprocessing_steps += [
                Coregister(data_groups=['input_data'],
                           save_output=(save_preprocess or save_all_steps),
                           verbose=verbose,
                           output_folder=output_folder,
                           reference_channel=0)
            ]

        if not skullstripped:

            preprocessing_steps += [
                ZeroMeanNormalization(data_groups=['input_data'],
                                      save_output=save_all_steps,
                                      verbose=verbose,
                                      output_folder=output_folder)
            ]

            preprocessing_steps += [
                SkullStrip_Model(data_groups=['input_data'],
                                 model=skullstripping_model,
                                 save_output=save_all_steps,
                                 verbose=verbose,
                                 output_folder=output_folder,
                                 reference_channel=[0, 1])
            ]

            preprocessing_steps += [
                ZeroMeanNormalization(
                    data_groups=['input_data'],
                    save_output=save_all_steps,
                    verbose=verbose,
                    output_folder=output_folder,
                    mask_preprocessor=preprocessing_steps[-1],
                    preprocessor_string='_preprocessed')
            ]

        data_collection.append_preprocessor(preprocessing_steps)

    #--------------------------------------------------------------------#
    # Step 4, Run Inference
    #--------------------------------------------------------------------#

    for case in data_collection.cases:

        docker_print('\nStarting New Case...\n')

        docker_print('Whole Tumor Prediction')
        docker_print('======================')
        wholetumor_file = wholetumor_model.generate_outputs(
            data_collection, case)[0]['filenames'][-1]

        data_collection.add_channel(case, wholetumor_file)

        docker_print('Enhancing Tumor Prediction')
        docker_print('======================')
        enhancing_model.generate_outputs(data_collection, case)

        data_collection.clear_outputs()
예제 #3
0
파일: predict.py 프로젝트: ysuter/DeepNeuro
def predict_ischemic_stroke(output_folder, 
                            B0, 
                            DWI, 
                            ground_truth=None, 
                            input_directory=None,
                            registered=False,
                            preprocessed=False, 
                            save_only_segmentations=False, 
                            save_all_steps=False, 
                            output_segmentation_filename='segmentation.nii.gz',
                            input_data=None, 
                            registration_reference='FLAIR',
                            quiet=False):

    verbose = not quiet
    save_preprocessed = not save_only_segmentations
    registration_reference_channel = 1

    #--------------------------------------------------------------------#
    # Step 1, Load Data
    #--------------------------------------------------------------------#

    data_collection = load_data(inputs=[B0, DWI], output_folder=output_folder, input_directory=input_directory, ground_truth=ground_truth, input_data=input_data, verbose=verbose)

    #--------------------------------------------------------------------#
    # Step 2, Load Models
    #--------------------------------------------------------------------#

    stroke_prediction_parameters = {'inputs': ['input_data'], 
                        'output_directory': output_folder,
                        'output_filename': output_segmentation_filename,
                        'batch_size': 50,
                        'patch_overlaps': 6,
                        'output_patch_shape': (62, 62, 6, 1),
                        'case_in_filename': False,
                        'verbose': verbose}

    stroke_model = load_model_with_output(model_name='ischemic_stroke', outputs=[PatchesInference(**stroke_prediction_parameters)], postprocessors=[BinarizeLabel(postprocessor_string='label')])

    #--------------------------------------------------------------------#
    # Step 3, Add Data Preprocessors
    #--------------------------------------------------------------------#

    if not preprocessed:

        preprocessing_steps = [DICOMConverter(data_groups=['input_data'], save_output=save_all_steps, verbose=verbose, output_folder=output_folder)]

        if not registered:
            preprocessing_steps += [Coregister(data_groups=['input_data'], save_output=save_all_steps, verbose=verbose, output_folder=output_folder, reference_channel=registration_reference_channel)]

        if not preprocessed:
            preprocessing_steps += [ZeroMeanNormalization(data_groups=['input_data'], save_output=save_preprocessed, verbose=verbose, output_folder=output_folder, mask_zeros=True, preprocessor_string='_preprocessed')]

        data_collection.append_preprocessor(preprocessing_steps)

    #--------------------------------------------------------------------#
    # Step 4, Run Inference
    #--------------------------------------------------------------------#

    if verbose:
        docker_print('Starting New Case...')
        
        docker_print('Ischemic Stroke Prediction')
        docker_print('======================')
    
    stroke_model.generate_outputs(data_collection, output_folder)

    data_collection.clear_preprocessor_outputs()
예제 #4
0
def predict_GBM(output_folder, 
                T1POST=None, 
                FLAIR=None, 
                T1PRE=None, 
                ground_truth=None, 
                input_directory=None, 
                bias_corrected=True, 
                resampled=False, 
                registered=False, 
                skullstripped=False, 
                preprocessed=False, 
                save_only_segmentations=False, 
                save_all_steps=False, 
                output_wholetumor_filename='wholetumor_segmentation.nii.gz', 
                output_enhancing_filename='enhancing_segmentation.nii.gz', 
                output_probabilities=False, 
                quiet=False, 
                input_data=None, 
                registration_reference='FLAIR'):

    verbose = not quiet
    save_preprocessed = not save_only_segmentations

    #--------------------------------------------------------------------#
    # Step 1, Load Data
    #--------------------------------------------------------------------#

    data_collection = load_data(inputs=[FLAIR, T1POST, T1PRE], output_folder=output_folder, input_directory=input_directory, ground_truth=ground_truth, input_data=input_data, verbose=verbose)

    #--------------------------------------------------------------------#
    # Step 2, Load Models and Postprocessors
    #--------------------------------------------------------------------#

    wholetumor_prediction_parameters = {'output_directory': output_folder,
                        'output_filename': output_wholetumor_filename,
                        'batch_size': 50,
                        'patch_overlaps': 6,
                        'output_patch_shape': (56, 56, 6, 1),
                        'case_in_filename': False,
                        'verbose': verbose}

    enhancing_prediction_parameters = {'output_directory': output_folder,
                        'output_filename': output_enhancing_filename,
                        'batch_size': 50,
                        'patch_overlaps': 6,
                        'output_patch_shape': (56, 56, 6, 1),
                        'case_in_filename': False,
                        'verbose': verbose}

    wholetumor_model = load_model_with_output(model_name='gbm_wholetumor_mri', 
        outputs=[PatchesInference(**wholetumor_prediction_parameters)], 
        postprocessors=[BinarizeLabel(postprocessor_string='label')])

    enhancing_model = load_model_with_output(model_name='gbm_enhancingtumor_mri', 
        outputs=[PatchesInference(**enhancing_prediction_parameters)], 
        postprocessors=[BinarizeLabel(postprocessor_string='label')])

    #--------------------------------------------------------------------#
    # Step 3, Add Data Preprocessors
    #--------------------------------------------------------------------#

    if not preprocessed:

        preprocessing_steps = [DICOMConverter(data_groups=['input_data'], save_output=save_all_steps, verbose=verbose, output_folder=output_folder)]

        if not bias_corrected:
            preprocessing_steps += [N4BiasCorrection(data_groups=['input_data'], save_output=save_all_steps, verbose=verbose, output_folder=output_folder)]

        if not registered:
            preprocessing_steps += [Coregister(data_groups=['input_data'], save_output=save_all_steps, verbose=verbose, output_folder=output_folder, reference_channel=0)]

        if not skullstripped:

            skullstripping_prediction_parameters = {'inputs': ['input_data'], 
                'output_filename': os.path.join(output_folder, 'skullstrip_mask.nii.gz'),
                'batch_size': 50,
                'patch_overlaps': 3,
                'output_patch_shape': (56, 56, 6, 1),
                'save_to_file': False,
                'data_collection': data_collection}

            skullstripping_model = load_model_with_output(model_name='skullstrip_mri', outputs=[PatchesInference(**skullstripping_prediction_parameters)], postprocessors=[BinarizeLabel(), FillHoles(), LargestComponents()])

            preprocessing_steps += [ZeroMeanNormalization(data_groups=['input_data'], save_output=save_all_steps, verbose=verbose, output_folder=output_folder)]

            preprocessing_steps += [SkullStrip_Model(data_groups=['input_data'], model=skullstripping_model, save_output=save_all_steps, verbose=verbose, output_folder=output_folder, reference_channel=[0, 1])]

            preprocessing_steps += [ZeroMeanNormalization(data_groups=['input_data'], save_output=save_preprocessed, verbose=verbose, output_folder=output_folder, mask_preprocessor=preprocessing_steps[-1], preprocessor_string='_preprocessed')]

        else:
            preprocessing_steps += [ZeroMeanNormalization(data_groups=['input_data'], save_output=save_preprocessed, verbose=verbose, output_folder=output_folder, mask_zeros=True, preprocessor_string='_preprocessed')]

        data_collection.append_preprocessor(preprocessing_steps)

    #--------------------------------------------------------------------#
    # Step 4, Run Inference
    #--------------------------------------------------------------------#

    if verbose:
        docker_print('Starting New Case...')
    
        docker_print('Whole Tumor Prediction')
        docker_print('======================')

    wholetumor_file = wholetumor_model.generate_outputs(data_collection, output_folder)[0]['filenames'][-1]
    data_collection.add_channel(output_folder, wholetumor_file)

    if verbose:
        docker_print('Enhancing Tumor Prediction')
        docker_print('======================')

    enhancing_model.generate_outputs(data_collection, output_folder)

    data_collection.clear_preprocessor_outputs()
예제 #5
0
def skull_strip(output_folder,
                T1POST=None,
                FLAIR=None,
                ground_truth=None,
                input_directory=None,
                bias_corrected=True,
                registered=False,
                preprocessed=False,
                output_segmentation_filename='segmentation.nii.gz',
                output_probabilities=False,
                quiet=False,
                input_data=None,
                save_only_segmentations=False,
                save_all_steps=False):

    verbose = not quiet
    save_preprocessed = not save_only_segmentations

    #--------------------------------------------------------------------#
    # Step 1, Load Data
    #--------------------------------------------------------------------#

    data_collection = load_data(inputs=[FLAIR, T1POST],
                                output_folder=output_folder,
                                input_directory=input_directory,
                                ground_truth=ground_truth,
                                input_data=input_data,
                                verbose=verbose)

    #--------------------------------------------------------------------#
    # Step 2, Load Models
    #--------------------------------------------------------------------#

    skullstripping_prediction_parameters = {
        'inputs': ['input_data'],
        'output_directory': output_folder,
        'output_filename': output_segmentation_filename,
        'batch_size': 50,
        'patch_overlaps': 6,
        'output_patch_shape': (56, 56, 6, 1),
        'case_in_filename': False,
        'verbose': verbose
    }

    skullstripping_model = load_model_with_output(
        model_name='skullstrip_mri',
        outputs=[PatchesInference(**skullstripping_prediction_parameters)],
        postprocessors=[
            BinarizeLabel(),
            FillHoles(),
            LargestComponents(postprocessor_string='label')
        ])

    #--------------------------------------------------------------------#
    # Step 3, Add Data Preprocessors
    #--------------------------------------------------------------------#

    if not preprocessed:

        # Random hack to save DICOMs to niftis for further processing.
        preprocessing_steps = [
            DICOMConverter(data_groups=['input_data'],
                           save_output=save_all_steps,
                           verbose=verbose,
                           output_folder=output_folder)
        ]

        if not bias_corrected:
            preprocessing_steps += [
                N4BiasCorrection(data_groups=['input_data'],
                                 save_output=save_all_steps,
                                 verbose=verbose,
                                 output_folder=output_folder)
            ]

        if not registered:
            preprocessing_steps += [
                Coregister(data_groups=['input_data'],
                           save_output=save_all_steps,
                           verbose=verbose,
                           output_folder=output_folder,
                           reference_channel=0)
            ]

        preprocessing_steps += [
            ZeroMeanNormalization(data_groups=['input_data'],
                                  save_output=save_preprocessed,
                                  verbose=verbose,
                                  output_folder=output_folder)
        ]

        data_collection.append_preprocessor(preprocessing_steps)

    #--------------------------------------------------------------------#
    # Step 4, Run Inference
    #--------------------------------------------------------------------#

    if verbose:
        docker_print('Starting New Case...')

        docker_print('Skullstripping Prediction')
        docker_print('======================')

    skullstripping_model.generate_outputs(data_collection, output_folder)

    data_collection.clear_preprocessor_outputs()
예제 #6
0
def predict_brain_mets(output_folder,
                       T2=None,
                       T1POST=None,
                       T1PRE=None,
                       FLAIR=None,
                       ground_truth=None,
                       input_directory=None,
                       bias_corrected=True,
                       registered=False,
                       skullstripped=False,
                       preprocessed=False,
                       output_segmentation_filename='segmentation.nii.gz',
                       output_probabilities=False,
                       quiet=False,
                       input_data=None,
                       save_only_segmentations=False,
                       save_all_steps=False):

    verbose = not quiet
    save_preprocessed = not save_only_segmentations

    #--------------------------------------------------------------------#
    # Step 1, Load Data
    #--------------------------------------------------------------------#

    data_collection = load_data(inputs=[T1PRE, T1POST, T2, FLAIR],
                                output_folder=output_folder,
                                input_directory=input_directory,
                                ground_truth=ground_truth,
                                input_data=input_data,
                                verbose=verbose)

    #--------------------------------------------------------------------#
    # Step 2, Load Models
    #--------------------------------------------------------------------#

    mets_prediction_parameters = {
        'inputs': ['input_data'],
        'output_directory': output_folder,
        'output_filename': output_segmentation_filename,
        'batch_size': 50,
        'patch_overlaps': 8,
        'output_patch_shape': (28, 28, 28, 1),
        'output_channels': [1],
        'case_in_filename': False,
        'verbose': verbose
    }

    mets_model = load_model_with_output(
        model_name='mets_enhancing',
        outputs=[PatchesInference(**mets_prediction_parameters)],
        postprocessors=[BinarizeLabel(postprocessor_string='label')],
        wcc_weights={
            0: 0.1,
            1: 3.0
        })

    #--------------------------------------------------------------------#
    # Step 3, Add Data Preprocessors
    #--------------------------------------------------------------------#

    if not preprocessed:

        # Random hack to save DICOMs to niftis for further processing.
        preprocessing_steps = [
            DICOMConverter(data_groups=['input_data'],
                           save_output=save_all_steps,
                           verbose=verbose,
                           output_folder=output_folder)
        ]

        if not skullstripped:
            skullstripping_prediction_parameters = {
                'inputs': ['input_data'],
                'output_filename':
                os.path.join(output_folder, 'skullstrip_mask.nii.gz'),
                'batch_size':
                50,
                'patch_overlaps':
                3,
                'output_patch_shape': (56, 56, 6, 1),
                'save_to_file':
                False,
                'data_collection':
                data_collection
            }

            skullstripping_model = load_model_with_output(
                model_name='skullstrip_mri',
                outputs=[
                    PatchesInference(**skullstripping_prediction_parameters)
                ],
                postprocessors=[
                    BinarizeLabel(),
                    FillHoles(),
                    LargestComponents()
                ])

        if not bias_corrected:
            preprocessing_steps += [
                N4BiasCorrection(data_groups=['input_data'],
                                 save_output=save_all_steps,
                                 verbose=verbose,
                                 output_folder=output_folder)
            ]

        if not registered:
            preprocessing_steps += [
                Coregister(data_groups=['input_data'],
                           save_output=save_all_steps,
                           verbose=verbose,
                           output_folder=output_folder,
                           reference_channel=1)
            ]

        if not skullstripped:
            preprocessing_steps += [
                ZeroMeanNormalization(data_groups=['input_data'],
                                      save_output=save_all_steps,
                                      verbose=verbose,
                                      output_folder=output_folder)
            ]

            preprocessing_steps += [
                SkullStrip_Model(data_groups=['input_data'],
                                 model=skullstripping_model,
                                 save_output=save_all_steps,
                                 verbose=verbose,
                                 output_folder=output_folder,
                                 reference_channel=[3, 1])
            ]

            preprocessing_steps += [
                ZeroMeanNormalization(
                    data_groups=['input_data'],
                    save_output=save_preprocessed,
                    verbose=verbose,
                    output_folder=output_folder,
                    mask_preprocessor=preprocessing_steps[-1],
                    preprocessor_string='_preprocessed')
            ]

        else:
            preprocessing_steps += [
                ZeroMeanNormalization(data_groups=['input_data'],
                                      save_output=save_preprocessed,
                                      verbose=verbose,
                                      output_folder=output_folder,
                                      mask_zeros=True,
                                      preprocessor_string='_preprocessed')
            ]

        data_collection.append_preprocessor(preprocessing_steps)

    #--------------------------------------------------------------------#
    # Step 4, Run Inference
    #--------------------------------------------------------------------#

    if verbose:
        docker_print('Starting New Case...')

        docker_print('Enhancing Mets Prediction')
        docker_print('======================')

    mets_model.generate_outputs(data_collection, output_folder)

    data_collection.clear_preprocessor_outputs()
예제 #7
0
def skull_strip(output_folder,
                T1POST=None,
                FLAIR=None,
                ground_truth=None,
                input_directory=None,
                bias_corrected=True,
                resampled=False,
                registered=False,
                normalized=False,
                preprocessed=False,
                save_preprocess=False,
                save_all_steps=False,
                mask_output='skullstrip_mask.nii.gz',
                input_data=None,
                verbose=True):

    #--------------------------------------------------------------------#
    # Step 1, Load Data
    #--------------------------------------------------------------------#

    data_collection = load_data(inputs=[FLAIR, T1POST],
                                output_folder=output_folder,
                                input_directory=input_directory,
                                ground_truth=ground_truth,
                                input_data=input_data,
                                verbose=verbose)

    #--------------------------------------------------------------------#
    # Step 2, Load Models
    #--------------------------------------------------------------------#

    skullstripping_prediction_parameters = {
        'inputs': ['input_data'],
        'output_filename': os.path.join(output_folder, mask_output),
        'batch_size': 50,
        'patch_overlaps': 6,
        'channels_first': False,
        'patch_dimensions': [-4, -3, -2],
        'output_patch_shape': (56, 56, 6, 1)
    }

    skullstripping_model = load_model_with_output(
        model_name='skullstrip_mri',
        outputs=[
            ModelPatchesInference(**skullstripping_prediction_parameters)
        ],
        postprocessors=[
            BinarizeLabel(),
            FillHoles(),
            LargestComponents(postprocessor_string='_label')
        ])

    #--------------------------------------------------------------------#
    # Step 3, Add Data Preprocessors
    #--------------------------------------------------------------------#

    if not preprocessed:

        # Random hack to save DICOMs to niftis for further processing.
        preprocessing_steps = [
            DICOMConverter(data_groups=['input_data'],
                           save_output=save_all_steps,
                           verbose=verbose,
                           output_folder=output_folder)
        ]

        if not bias_corrected:
            preprocessing_steps += [
                N4BiasCorrection(data_groups=['input_data'],
                                 save_output=save_all_steps,
                                 verbose=verbose,
                                 output_folder=output_folder)
            ]

        if not registered:
            preprocessing_steps += [
                Coregister(data_groups=['input_data'],
                           save_output=(save_preprocess or save_all_steps),
                           verbose=verbose,
                           output_folder=output_folder,
                           reference_channel=0)
            ]

        preprocessing_steps += [
            ZeroMeanNormalization(data_groups=['input_data'],
                                  save_output=save_all_steps,
                                  verbose=verbose,
                                  output_folder=output_folder)
        ]

        data_collection.append_preprocessor(preprocessing_steps)

    #--------------------------------------------------------------------#
    # Step 4, Run Inference
    #--------------------------------------------------------------------#

    for case in data_collection.cases:

        docker_print('\nStarting New Case...\n')

        docker_print('Skullstripping Prediction')
        docker_print('======================')
        skullstripping_model.generate_outputs(data_collection, case)