Exemplo n.º 1
0
    def execute(self, data_collection):
        """ There is a lot of repeated code in the preprocessors. Think about preprocessor structures and work on this class.
        """

        if self.verbose:
            docker_print('Working on Preprocessor:', self.name)

        for label, data_group in list(self.data_groups.items()):

            self.generate_output_filenames(data_collection, data_group)

            if self.array_input and type(data_group.preprocessed_case) is list:
                data_group.get_data()
            elif not self.array_input and type(
                    data_group.preprocessed_case) is list:
                pass
            elif not self.array_input:
                self.output_data = data_group.preprocessed_case
                self.save_to_file(data_group)
                data_group.preprocessed_case = self.output_filenames

            self.preprocess(data_group)

            if self.save_output:
                self.save_to_file(data_group)

            if self.return_array:
                self.convert_to_array_data(data_group)
Exemplo n.º 2
0
    def execute(self, data_collection, return_array=False):

        """ This function serves operates the main logic of preprocessors.
            Its purpose is to check if data needs to be saved to disk before
            preprocessing (e.g. by an external program), or load data from
            disk if it is to be processed in Python. After processing data
            it can similarly return data in memory or on disk.
        """

        if self.verbose:
            docker_print('Working on Preprocessor:', self.name)

        for label, data_group in self.data_groups_iterator:

            self.generate_output_filenames(data_collection, data_group)

            if self.array_input and type(data_group.preprocessed_case) is list:
                data_group.preprocessed_case, data_group.preprocessed_affine = data_group.get_data(return_affine=True)
            elif not self.array_input and type(data_group.preprocessed_case) is list:
                pass
            elif not self.array_input:
                self.output_data = data_group.preprocessed_case
                self.save_to_file(data_group)
                data_group.preprocessed_case = self.output_filenames

            self.preprocess(data_group)

            if self.save_output:
                self.save_to_file(data_group)

            if return_array:
                self.convert_to_array_data(data_group)
Exemplo n.º 3
0
    def execute(self, data_collection):
        """ There is a lot of repeated code in the preprocessors. Think about preprocessor structures and work on this class.
        """

        if self.verbose:
            docker_print('Working on Preprocessor:', self.name)

        self.initialize(
            data_collection)  # TODO: make overwrite work with initializations

        for label, data_group in self.data_groups.items():

            self.generate_output_filenames(data_collection, data_group)

            self.preprocess(data_group)

            if self.save_output:
                self.save_to_file(data_group)

            # Duplicated code here. In general, this is pretty messy.
            if self.next_prepreprocessor is not None:
                if self.next_prepreprocessor.array_input:
                    self.convert_to_array_data(data_group)
                else:
                    self.save_to_file(data_group)
                    data_group.preprocessed_case = self.output_filenames

            if self.return_array:
                self.convert_to_array_data(data_group)

            self.store_outputs(data_collection, data_group)
Exemplo n.º 4
0
    def execute(self, data_collection, return_array=False):

        """ There is a lot of repeated code in the preprocessors. Think about preprocessor structures and work on this class.
        """

        if self.verbose:
            docker_print('Working on Preprocessor:', self.name)

        for label, data_group in list(self.data_groups.items()):

            self.generate_output_filenames(data_collection, data_group)

            if type(data_group.preprocessed_case) is not list:
                self.output_data = data_group.preprocessed_case            
            else:

                for file_idx, output_filename in enumerate(self.output_filenames):
                    if os.path.isdir(data_group.preprocessed_case[file_idx]):
                        if self.overwrite or not os.path.exists(output_filename):
                            array_data, affine = read_image_files(data_group.preprocessed_case[file_idx], return_affine=True)
                            # TO-DO: Check if subsetting language behaviour below has edge cases.
                            save_data(array_data[..., 0], output_filename, reference_data=affine)
                    else:
                        self.output_filenames[file_idx] = data_group.preprocessed_case[file_idx]

                data_group.preprocessed_case = self.output_filenames
                self.output_data = data_group.preprocessed_case

            if return_array:
                self.convert_to_array_data(data_group)
Exemplo n.º 5
0
def predict_ischemic_stroke(output_folder, B0, DWI, ground_truth=None, input_directory=None, bias_corrected=True, resampled=False, registered=False, normalized=False, preprocessed=False, save_preprocess=False, save_all_steps=False, output_segmentation_filename='segmentation.nii.gz', verbose=True, input_data=None, registration_reference='FLAIR'):

    registration_reference_channel = 1

    #--------------------------------------------------------------------#
    # Step 1, Load Data
    #--------------------------------------------------------------------#

    data_collection = load_data(inputs=[B0, DWI], output_folder=output_folder, input_directory=input_directory, ground_truth=ground_truth, input_data=input_data, verbose=verbose)

    #--------------------------------------------------------------------#
    # Step 2, Load Models
    #--------------------------------------------------------------------#

    stroke_prediction_parameters = {'inputs': ['input_data'], 
                        'output_filename': os.path.join(output_folder, output_segmentation_filename),
                        'batch_size': 50,
                        'patch_overlaps': 8,
                        'output_patch_shape': (62, 62, 6, 1)}

    stroke_model = load_model_with_output(model_name='ischemic_stroke', outputs=[ModelPatchesInference(**stroke_prediction_parameters)], postprocessors=[BinarizeLabel(postprocessor_string='_label')])

    #--------------------------------------------------------------------#
    # Step 3, Add Data Preprocessors
    #--------------------------------------------------------------------#

    if not preprocessed:

        preprocessing_steps = [DICOMConverter(data_groups=['input_data'], save_output=save_all_steps, verbose=verbose, output_folder=output_folder)]

        if not registered:
            preprocessing_steps += [Coregister(data_groups=['input_data'], save_output=(save_preprocess or save_all_steps), verbose=verbose, output_folder=output_folder, reference_channel=registration_reference_channel)]

        if not normalized:
            preprocessing_steps += [ZeroMeanNormalization(data_groups=['input_data'], save_output=save_all_steps, verbose=verbose, output_folder=output_folder, preprocessor_string='_preprocessed')]

        else:
            preprocessing_steps += [ZeroMeanNormalization(data_groups=['input_data'], save_output=save_all_steps, verbose=verbose, output_folder=output_folder, mask_zeros=True, preprocessor_string='_preprocessed')]

        data_collection.append_preprocessor(preprocessing_steps)

    #--------------------------------------------------------------------#
    # Step 4, Run Inference
    #--------------------------------------------------------------------#

    for case in data_collection.cases:

        docker_print('Starting New Case...')
        
        docker_print('Ischemic Stroke Prediction')
        docker_print('======================')
        stroke_model.generate_outputs(data_collection, case)[0]['filenames'][-1]
Exemplo n.º 6
0
    def predict(self, input_data, model=None):

        repetition_offsets = [np.linspace(0, self.input_patch_shape[axis] - 1, self.patch_overlaps, dtype=int) for axis in self.patch_dimensions]

        if self.pad_borders:
            # TODO -- Clean up this border-padding code and make it more readable.
            input_pad_dimensions = [(0, 0)] * input_data.ndim
            repatched_shape = self.output_shape
            new_input_shape = list(input_data.shape)
            for idx, dim in enumerate(self.patch_dimensions):
                # Might not work for odd-shaped patches; check.
                input_pad_dimensions[dim] = (int(self.input_patch_shape[dim] // 2), int(self.input_patch_shape[dim] // 2))
                new_input_shape[dim] += self.input_patch_shape[dim]
            for idx, dim in enumerate(self.output_patch_dimensions):
                repatched_shape[dim] += self.input_patch_shape[dim]

            padded_input_data = np.zeros(new_input_shape)
            if self.channels_first:
                input_slice = [slice(None)] * 2 + [slice(self.input_patch_shape[dim] // 2, -self.input_patch_shape[dim] // 2, None) for dim in self.patch_dimensions]
            else:
                input_slice = [slice(None)] + [slice(self.input_patch_shape[dim] // 2, -self.input_patch_shape[dim] // 2, None) for dim in self.patch_dimensions] + [slice(None)]
            padded_input_data[tuple(input_slice)] = input_data
            input_data = padded_input_data

        repatched_image = np.zeros(repatched_shape)

        corner_data_dims = [input_data.shape[axis] for axis in self.patch_dimensions]
        corner_patch_dims = [self.output_patch_shape[axis] for axis in self.patch_dimensions]

        all_corners = np.indices(corner_data_dims)

        # There must be a better way to round up to an integer..
        possible_corners_slice = [slice(None)] + [slice(self.input_patch_shape[dim] // 2, -self.input_patch_shape[dim] // 2, None) for dim in self.patch_dimensions]
        all_corners = all_corners[tuple(possible_corners_slice)]

        for rep_idx in range(self.patch_overlaps):

            if self.verbose:
                docker_print('Predicting patch set', str(rep_idx + 1) + '/' + str(self.patch_overlaps) + '...')

            corners_grid_shape = [slice(None)]
            for dim in range(all_corners.ndim - 1):
                corners_grid_shape += [slice(repetition_offsets[dim][rep_idx], corner_data_dims[dim], corner_patch_dims[dim])]

            corners_list = all_corners[tuple(corners_grid_shape)]
            corners_list = np.reshape(corners_list, (corners_list.shape[0], -1)).T

            if self.check_empty_patch:
                corners_list = self.remove_empty_patches(input_data, corners_list)

            for corner_list_idx in range(0, corners_list.shape[0], self.batch_size):

                corner_batch = corners_list[corner_list_idx:corner_list_idx + self.batch_size]
                input_patches = self.grab_patch(input_data, corner_batch)
                
                prediction = self.model.predict(input_patches)
                
                self.insert_patch(repatched_image, prediction, corner_batch)

            if rep_idx == 0:
                output_data = np.copy(repatched_image)
            else:
                output_data = output_data + (1.0 / (rep_idx)) * (repatched_image - output_data)  # Running Average

        if self.pad_borders:

            output_slice = [slice(None)] * output_data.ndim  # Weird
            for idx, dim in enumerate(self.output_patch_dimensions):
                # Might not work for odd-shaped patches; check.
                output_slice[dim] = slice(self.input_patch_shape[dim] // 2, -self.input_patch_shape[dim] // 2, 1)
            output_data = output_data[tuple(output_slice)]

        return output_data
Exemplo n.º 7
0
def predict_GBM(output_folder,
                T1POST=None,
                FLAIR=None,
                T1PRE=None,
                ground_truth=None,
                input_directory=None,
                bias_corrected=True,
                resampled=False,
                registered=False,
                skullstripped=False,
                preprocessed=False,
                save_preprocess=False,
                save_all_steps=False,
                output_wholetumor_filename='wholetumor_segmentation.nii.gz',
                output_enhancing_filename='enhancing_segmentation.nii.gz',
                verbose=True,
                input_data=None):

    #--------------------------------------------------------------------#
    # Step 1, Load Data
    #--------------------------------------------------------------------#

    data_collection = load_data(inputs=[FLAIR, T1POST, T1PRE],
                                output_folder=output_folder,
                                input_directory=input_directory,
                                ground_truth=ground_truth,
                                input_data=input_data,
                                verbose=verbose)

    #--------------------------------------------------------------------#
    # Step 2, Load Models
    #--------------------------------------------------------------------#

    wholetumor_prediction_parameters = {
        'inputs': ['input_data'],
        'output_filename': os.path.join(output_folder,
                                        output_wholetumor_filename),
        'batch_size': 50,
        'patch_overlaps': 8,
        'output_patch_shape': (56, 56, 6, 1),
        'input_channels': [0, 1]
    }

    enhancing_prediction_parameters = {
        'inputs': ['input_data'],
        'output_filename': os.path.join(output_folder,
                                        output_enhancing_filename),
        'batch_size': 50,
        'patch_overlaps': 8,
        'output_patch_shape': (56, 56, 6, 1)
    }

    wholetumor_model = load_model_with_output(
        model_name='gbm_wholetumor_mri',
        outputs=[ModelPatchesInference(**wholetumor_prediction_parameters)],
        postprocessors=[BinarizeLabel(postprocessor_string='_label')])
    enhancing_model = load_model_with_output(
        model_name='gbm_enhancingtumor_mri',
        outputs=[ModelPatchesInference(**enhancing_prediction_parameters)],
        postprocessors=[BinarizeLabel(postprocessor_string='_label')])

    if not preprocessed and not skullstripped:

        skullstripping_prediction_parameters = {
            'inputs': ['input_data'],
            'output_filename':
            os.path.join(output_folder, 'skullstrip_mask.nii.gz'),
            'batch_size':
            50,
            'patch_overlaps':
            3,
            'output_patch_shape': (56, 56, 6, 1),
            'save_to_file':
            False
        }

        skullstripping_model = load_model_with_output(
            model_name='skullstrip_mri',
            outputs=[
                ModelPatchesInference(**skullstripping_prediction_parameters)
            ],
            postprocessors=[BinarizeLabel(),
                            FillHoles(),
                            LargestComponents()])

    #--------------------------------------------------------------------#
    # Step 3, Add Data Preprocessors
    #--------------------------------------------------------------------#

    if not preprocessed:

        # Random hack to save DICOMs to niftis for further processing.
        preprocessing_steps = [
            DICOMConverter(data_groups=['input_data'],
                           save_output=save_all_steps,
                           verbose=verbose,
                           output_folder=output_folder)
        ]

        if not bias_corrected:
            preprocessing_steps += [
                N4BiasCorrection(data_groups=['input_data'],
                                 save_output=save_all_steps,
                                 verbose=verbose,
                                 output_folder=output_folder)
            ]

        if not registered:
            preprocessing_steps += [
                Coregister(data_groups=['input_data'],
                           save_output=(save_preprocess or save_all_steps),
                           verbose=verbose,
                           output_folder=output_folder,
                           reference_channel=0)
            ]

        if not skullstripped:

            preprocessing_steps += [
                ZeroMeanNormalization(data_groups=['input_data'],
                                      save_output=save_all_steps,
                                      verbose=verbose,
                                      output_folder=output_folder)
            ]

            preprocessing_steps += [
                SkullStrip_Model(data_groups=['input_data'],
                                 model=skullstripping_model,
                                 save_output=save_all_steps,
                                 verbose=verbose,
                                 output_folder=output_folder,
                                 reference_channel=[0, 1])
            ]

            preprocessing_steps += [
                ZeroMeanNormalization(
                    data_groups=['input_data'],
                    save_output=save_all_steps,
                    verbose=verbose,
                    output_folder=output_folder,
                    mask_preprocessor=preprocessing_steps[-1],
                    preprocessor_string='_preprocessed')
            ]

        data_collection.append_preprocessor(preprocessing_steps)

    #--------------------------------------------------------------------#
    # Step 4, Run Inference
    #--------------------------------------------------------------------#

    for case in data_collection.cases:

        docker_print('\nStarting New Case...\n')

        docker_print('Whole Tumor Prediction')
        docker_print('======================')
        wholetumor_file = wholetumor_model.generate_outputs(
            data_collection, case)[0]['filenames'][-1]

        data_collection.add_channel(case, wholetumor_file)

        docker_print('Enhancing Tumor Prediction')
        docker_print('======================')
        enhancing_model.generate_outputs(data_collection, case)

        data_collection.clear_outputs()
Exemplo n.º 8
0
def predict_ischemic_stroke(output_folder, 
                            B0, 
                            DWI, 
                            ground_truth=None, 
                            input_directory=None,
                            registered=False,
                            preprocessed=False, 
                            save_only_segmentations=False, 
                            save_all_steps=False, 
                            output_segmentation_filename='segmentation.nii.gz',
                            input_data=None, 
                            registration_reference='FLAIR',
                            quiet=False):

    verbose = not quiet
    save_preprocessed = not save_only_segmentations
    registration_reference_channel = 1

    #--------------------------------------------------------------------#
    # Step 1, Load Data
    #--------------------------------------------------------------------#

    data_collection = load_data(inputs=[B0, DWI], output_folder=output_folder, input_directory=input_directory, ground_truth=ground_truth, input_data=input_data, verbose=verbose)

    #--------------------------------------------------------------------#
    # Step 2, Load Models
    #--------------------------------------------------------------------#

    stroke_prediction_parameters = {'inputs': ['input_data'], 
                        'output_directory': output_folder,
                        'output_filename': output_segmentation_filename,
                        'batch_size': 50,
                        'patch_overlaps': 6,
                        'output_patch_shape': (62, 62, 6, 1),
                        'case_in_filename': False,
                        'verbose': verbose}

    stroke_model = load_model_with_output(model_name='ischemic_stroke', outputs=[PatchesInference(**stroke_prediction_parameters)], postprocessors=[BinarizeLabel(postprocessor_string='label')])

    #--------------------------------------------------------------------#
    # Step 3, Add Data Preprocessors
    #--------------------------------------------------------------------#

    if not preprocessed:

        preprocessing_steps = [DICOMConverter(data_groups=['input_data'], save_output=save_all_steps, verbose=verbose, output_folder=output_folder)]

        if not registered:
            preprocessing_steps += [Coregister(data_groups=['input_data'], save_output=save_all_steps, verbose=verbose, output_folder=output_folder, reference_channel=registration_reference_channel)]

        if not preprocessed:
            preprocessing_steps += [ZeroMeanNormalization(data_groups=['input_data'], save_output=save_preprocessed, verbose=verbose, output_folder=output_folder, mask_zeros=True, preprocessor_string='_preprocessed')]

        data_collection.append_preprocessor(preprocessing_steps)

    #--------------------------------------------------------------------#
    # Step 4, Run Inference
    #--------------------------------------------------------------------#

    if verbose:
        docker_print('Starting New Case...')
        
        docker_print('Ischemic Stroke Prediction')
        docker_print('======================')
    
    stroke_model.generate_outputs(data_collection, output_folder)

    data_collection.clear_preprocessor_outputs()
Exemplo n.º 9
0
def predict_GBM(output_folder, 
                T1POST=None, 
                FLAIR=None, 
                T1PRE=None, 
                ground_truth=None, 
                input_directory=None, 
                bias_corrected=True, 
                resampled=False, 
                registered=False, 
                skullstripped=False, 
                preprocessed=False, 
                save_only_segmentations=False, 
                save_all_steps=False, 
                output_wholetumor_filename='wholetumor_segmentation.nii.gz', 
                output_enhancing_filename='enhancing_segmentation.nii.gz', 
                output_probabilities=False, 
                quiet=False, 
                input_data=None, 
                registration_reference='FLAIR'):

    verbose = not quiet
    save_preprocessed = not save_only_segmentations

    #--------------------------------------------------------------------#
    # Step 1, Load Data
    #--------------------------------------------------------------------#

    data_collection = load_data(inputs=[FLAIR, T1POST, T1PRE], output_folder=output_folder, input_directory=input_directory, ground_truth=ground_truth, input_data=input_data, verbose=verbose)

    #--------------------------------------------------------------------#
    # Step 2, Load Models and Postprocessors
    #--------------------------------------------------------------------#

    wholetumor_prediction_parameters = {'output_directory': output_folder,
                        'output_filename': output_wholetumor_filename,
                        'batch_size': 50,
                        'patch_overlaps': 6,
                        'output_patch_shape': (56, 56, 6, 1),
                        'case_in_filename': False,
                        'verbose': verbose}

    enhancing_prediction_parameters = {'output_directory': output_folder,
                        'output_filename': output_enhancing_filename,
                        'batch_size': 50,
                        'patch_overlaps': 6,
                        'output_patch_shape': (56, 56, 6, 1),
                        'case_in_filename': False,
                        'verbose': verbose}

    wholetumor_model = load_model_with_output(model_name='gbm_wholetumor_mri', 
        outputs=[PatchesInference(**wholetumor_prediction_parameters)], 
        postprocessors=[BinarizeLabel(postprocessor_string='label')])

    enhancing_model = load_model_with_output(model_name='gbm_enhancingtumor_mri', 
        outputs=[PatchesInference(**enhancing_prediction_parameters)], 
        postprocessors=[BinarizeLabel(postprocessor_string='label')])

    #--------------------------------------------------------------------#
    # Step 3, Add Data Preprocessors
    #--------------------------------------------------------------------#

    if not preprocessed:

        preprocessing_steps = [DICOMConverter(data_groups=['input_data'], save_output=save_all_steps, verbose=verbose, output_folder=output_folder)]

        if not bias_corrected:
            preprocessing_steps += [N4BiasCorrection(data_groups=['input_data'], save_output=save_all_steps, verbose=verbose, output_folder=output_folder)]

        if not registered:
            preprocessing_steps += [Coregister(data_groups=['input_data'], save_output=save_all_steps, verbose=verbose, output_folder=output_folder, reference_channel=0)]

        if not skullstripped:

            skullstripping_prediction_parameters = {'inputs': ['input_data'], 
                'output_filename': os.path.join(output_folder, 'skullstrip_mask.nii.gz'),
                'batch_size': 50,
                'patch_overlaps': 3,
                'output_patch_shape': (56, 56, 6, 1),
                'save_to_file': False,
                'data_collection': data_collection}

            skullstripping_model = load_model_with_output(model_name='skullstrip_mri', outputs=[PatchesInference(**skullstripping_prediction_parameters)], postprocessors=[BinarizeLabel(), FillHoles(), LargestComponents()])

            preprocessing_steps += [ZeroMeanNormalization(data_groups=['input_data'], save_output=save_all_steps, verbose=verbose, output_folder=output_folder)]

            preprocessing_steps += [SkullStrip_Model(data_groups=['input_data'], model=skullstripping_model, save_output=save_all_steps, verbose=verbose, output_folder=output_folder, reference_channel=[0, 1])]

            preprocessing_steps += [ZeroMeanNormalization(data_groups=['input_data'], save_output=save_preprocessed, verbose=verbose, output_folder=output_folder, mask_preprocessor=preprocessing_steps[-1], preprocessor_string='_preprocessed')]

        else:
            preprocessing_steps += [ZeroMeanNormalization(data_groups=['input_data'], save_output=save_preprocessed, verbose=verbose, output_folder=output_folder, mask_zeros=True, preprocessor_string='_preprocessed')]

        data_collection.append_preprocessor(preprocessing_steps)

    #--------------------------------------------------------------------#
    # Step 4, Run Inference
    #--------------------------------------------------------------------#

    if verbose:
        docker_print('Starting New Case...')
    
        docker_print('Whole Tumor Prediction')
        docker_print('======================')

    wholetumor_file = wholetumor_model.generate_outputs(data_collection, output_folder)[0]['filenames'][-1]
    data_collection.add_channel(output_folder, wholetumor_file)

    if verbose:
        docker_print('Enhancing Tumor Prediction')
        docker_print('======================')

    enhancing_model.generate_outputs(data_collection, output_folder)

    data_collection.clear_preprocessor_outputs()
Exemplo n.º 10
0
    def generate_patch_data(self, input_data, model=None):

        if self.pad_borders:
            # TODO -- Clean up this border-padding code and make it more readable.
            input_pad_dimensions = [(0, 0)] * input_data.ndim
            repatched_shape = self.output_shape
            new_input_shape = list(input_data.shape)
            for idx, dim in enumerate(self.patch_dimensions):
                # Might not work for odd-shaped patches; check.
                input_pad_dimensions[dim] = (int(
                    self.input_patch_shape[dim] //
                    2), int(self.input_patch_shape[dim] // 2))
                new_input_shape[dim] += self.input_patch_shape[dim]
            for idx, dim in enumerate(self.output_patch_dimensions):
                repatched_shape[dim] += self.input_patch_shape[dim]

            padded_input_data = np.zeros(new_input_shape)
            if self.channels_first:
                input_slice = [slice(None)] * 2 + [
                    slice(self.input_patch_shape[dim] // 2,
                          -self.input_patch_shape[dim] // 2, None)
                    for dim in self.patch_dimensions
                ]
            else:
                input_slice = [slice(None)] + [
                    slice(self.input_patch_shape[dim] // 2,
                          -self.input_patch_shape[dim] // 2, None)
                    for dim in self.patch_dimensions
                ] + [slice(None)]
            padded_input_data[tuple(input_slice)] = input_data
            input_data = padded_input_data

        corner_data_dims = [
            input_data.shape[axis] for axis in self.patch_dimensions
        ]

        if self.patch_method == 'random':

            if self.check_empty_patch:
                all_corners = np.array(np.nonzero(input_data))[1:-1]
            else:
                all_corners = np.indices(corner_data_dims)
                # There must be a better way to round up to an integer..
                possible_corners_slice = [slice(None)] + [
                    slice(self.input_patch_shape[dim] // 2,
                          -self.input_patch_shape[dim] // 2, None)
                    for dim in self.patch_dimensions
                ]
                all_corners = all_corners[tuple(possible_corners_slice)]

            all_corners = np.reshape(all_corners, (all_corners.shape[0], -1)).T
            corner_selection = all_corners[np.random.choice(
                all_corners.shape[0], size=self.patch_num, replace=False), :]

            if self.current_layer is not None:
                output_operation = self.model.get_layer_output_function(
                    self.current_layer)

            for corner_list_idx in range(0, corner_selection.shape[0],
                                         self.batch_size):

                print(corner_list_idx, '/', corner_selection.shape[0])

                corner_batch = corner_selection[
                    corner_list_idx:corner_list_idx + self.batch_size]
                input_patches = self.grab_patch(input_data, corner_batch)

                if self.baseline_mean_intensity:
                    prediction = input_patches
                elif self.current_layer is None:
                    prediction = self.model.predict(input_patches)
                else:
                    prediction = output_operation([input_patches])[0]

                prediction = np.mean(prediction, axis=(1, 2, 3), keepdims=True)

                depadded_corner_batch = np.zeros_like(corner_batch)
                for corner_idx, corner in enumerate(corner_batch):
                    depadded_corner_batch[corner_idx] = [
                        corner[dim] - self.input_patch_shape[dim + 1] // 2
                        for dim in range(corner_batch.shape[1])
                    ]
                # prediction = self.model.predict(input_patches)

                self.write_patches_to_hdf5(prediction, depadded_corner_batch)

            pass

        elif self.patch_method == 'grid':

            repetition_offsets = [
                np.linspace(0,
                            self.input_patch_shape[axis] - 1,
                            self.patch_overlaps,
                            dtype=int) for axis in self.patch_dimensions
            ]
            repatched_image = np.zeros(repatched_shape)

            corner_patch_dims = [
                self.output_patch_shape[axis] for axis in self.patch_dimensions
            ]

            for rep_idx in range(self.patch_overlaps):

                if self.verbose:
                    docker_print(
                        'Predicting patch set',
                        str(rep_idx + 1) + '/' + str(self.patch_overlaps) +
                        '...')

                corners_grid_shape = [slice(None)]
                for dim in range(all_corners.ndim - 1):
                    corners_grid_shape += [
                        slice(repetition_offsets[dim][rep_idx],
                              corner_data_dims[dim], corner_patch_dims[dim])
                    ]

                corners_list = all_corners[tuple(corners_grid_shape)]
                corners_list = np.reshape(corners_list,
                                          (corners_list.shape[0], -1)).T

                if self.check_empty_patch:
                    corners_list = self.remove_empty_patches(
                        input_data, corners_list)

                for corner_list_idx in range(0, corners_list.shape[0],
                                             self.batch_size):

                    corner_batch = corners_list[
                        corner_list_idx:corner_list_idx + self.batch_size]
                    input_patches = self.grab_patch(input_data, corner_batch)

                    prediction = self.model.predict(input_patches)

                    print(prediction.shape)
                    print(corner_batch)

                if rep_idx == 0:
                    output_data = np.copy(repatched_image)
                else:
                    output_data = output_data + (1.0 / (rep_idx)) * (
                        repatched_image - output_data)  # Running Average

        # if self.pad_borders:

        #     output_slice = [slice(None)] * output_data.ndim  # Weird
        #     for idx, dim in enumerate(self.output_patch_dimensions):
        #         # Might not work for odd-shaped patches; check.
        #         output_slice[dim] = slice(self.input_patch_shape[dim] // 2, -self.input_patch_shape[dim] // 2, 1)
        #     output_data = output_data[tuple(output_slice)]

        return
Exemplo n.º 11
0
def skull_strip(output_folder,
                T1POST=None,
                FLAIR=None,
                ground_truth=None,
                input_directory=None,
                bias_corrected=True,
                resampled=False,
                registered=False,
                normalized=False,
                preprocessed=False,
                save_preprocess=False,
                save_all_steps=False,
                mask_output='skullstrip_mask.nii.gz',
                input_data=None,
                verbose=True):

    #--------------------------------------------------------------------#
    # Step 1, Load Data
    #--------------------------------------------------------------------#

    data_collection = load_data(inputs=[FLAIR, T1POST],
                                output_folder=output_folder,
                                input_directory=input_directory,
                                ground_truth=ground_truth,
                                input_data=input_data,
                                verbose=verbose)

    #--------------------------------------------------------------------#
    # Step 2, Load Models
    #--------------------------------------------------------------------#

    skullstripping_prediction_parameters = {
        'inputs': ['input_data'],
        'output_filename': os.path.join(output_folder, mask_output),
        'batch_size': 50,
        'patch_overlaps': 6,
        'channels_first': False,
        'patch_dimensions': [-4, -3, -2],
        'output_patch_shape': (56, 56, 6, 1)
    }

    skullstripping_model = load_model_with_output(
        model_name='skullstrip_mri',
        outputs=[
            ModelPatchesInference(**skullstripping_prediction_parameters)
        ],
        postprocessors=[
            BinarizeLabel(),
            FillHoles(),
            LargestComponents(postprocessor_string='_label')
        ])

    #--------------------------------------------------------------------#
    # Step 3, Add Data Preprocessors
    #--------------------------------------------------------------------#

    if not preprocessed:

        # Random hack to save DICOMs to niftis for further processing.
        preprocessing_steps = [
            DICOMConverter(data_groups=['input_data'],
                           save_output=save_all_steps,
                           verbose=verbose,
                           output_folder=output_folder)
        ]

        if not bias_corrected:
            preprocessing_steps += [
                N4BiasCorrection(data_groups=['input_data'],
                                 save_output=save_all_steps,
                                 verbose=verbose,
                                 output_folder=output_folder)
            ]

        if not registered:
            preprocessing_steps += [
                Coregister(data_groups=['input_data'],
                           save_output=(save_preprocess or save_all_steps),
                           verbose=verbose,
                           output_folder=output_folder,
                           reference_channel=0)
            ]

        preprocessing_steps += [
            ZeroMeanNormalization(data_groups=['input_data'],
                                  save_output=save_all_steps,
                                  verbose=verbose,
                                  output_folder=output_folder)
        ]

        data_collection.append_preprocessor(preprocessing_steps)

    #--------------------------------------------------------------------#
    # Step 4, Run Inference
    #--------------------------------------------------------------------#

    for case in data_collection.cases:

        docker_print('\nStarting New Case...\n')

        docker_print('Skullstripping Prediction')
        docker_print('======================')
        skullstripping_model.generate_outputs(data_collection, case)
Exemplo n.º 12
0
def predict_brain_mets(output_folder,
                       T2=None,
                       T1POST=None,
                       T1PRE=None,
                       FLAIR=None,
                       ground_truth=None,
                       input_directory=None,
                       bias_corrected=True,
                       resampled=False,
                       registered=False,
                       skullstripped=False,
                       preprocessed=False,
                       save_preprocess=False,
                       save_all_steps=False,
                       output_segmentation_filename='segmentation.nii.gz',
                       verbose=True,
                       input_data=None,
                       registration_reference='FLAIR'):

    registration_reference_channel = 1

    #--------------------------------------------------------------------#
    # Step 1, Load Data
    #--------------------------------------------------------------------#

    data_collection = load_data(inputs=[T1PRE, T1POST, T2, FLAIR],
                                output_folder=output_folder,
                                input_directory=input_directory,
                                ground_truth=ground_truth,
                                input_data=input_data,
                                verbose=verbose)

    #--------------------------------------------------------------------#
    # Step 2, Load Models
    #--------------------------------------------------------------------#

    mets_prediction_parameters = {
        'inputs': ['input_data'],
        'output_filename':
        os.path.join(output_folder, output_segmentation_filename),
        'batch_size':
        50,
        'patch_overlaps':
        8,
        'output_patch_shape': (28, 28, 28, 1),
        'output_channels': [1]
    }

    mets_model = load_model_with_output(
        model_name='mets_enhancing',
        outputs=[ModelPatchesInference(**mets_prediction_parameters)],
        postprocessors=[BinarizeLabel(postprocessor_string='_label')],
        wcc_weights={
            0: 0.1,
            1: 3.0
        })

    #--------------------------------------------------------------------#
    # Step 3, Add Data Preprocessors
    #--------------------------------------------------------------------#

    if not preprocessed:

        # Random hack to save DICOMs to niftis for further processing.
        preprocessing_steps = [
            DICOMConverter(data_groups=['input_data'],
                           save_output=save_all_steps,
                           verbose=verbose,
                           output_folder=output_folder)
        ]

        if not skullstripped:
            skullstripping_prediction_parameters = {
                'inputs': ['input_data'],
                'output_filename':
                os.path.join(output_folder, 'skullstrip_mask.nii.gz'),
                'batch_size':
                50,
                'patch_overlaps':
                3,
                'output_patch_shape': (56, 56, 6, 1),
                'save_to_file':
                False,
                'data_collection':
                data_collection
            }

            skullstripping_model = load_model_with_output(
                model_name='skullstrip_mri',
                outputs=[
                    ModelPatchesInference(
                        **skullstripping_prediction_parameters)
                ],
                postprocessors=[
                    BinarizeLabel(),
                    FillHoles(),
                    LargestComponents()
                ])

        if not bias_corrected:
            preprocessing_steps += [
                N4BiasCorrection(data_groups=['input_data'],
                                 save_output=save_all_steps,
                                 verbose=verbose,
                                 output_folder=output_folder)
            ]

        if not registered:
            preprocessing_steps += [
                Coregister(data_groups=['input_data'],
                           save_output=(save_preprocess or save_all_steps),
                           verbose=verbose,
                           output_folder=output_folder,
                           reference_channel=registration_reference_channel)
            ]

        if not skullstripped:
            preprocessing_steps += [
                ZeroMeanNormalization(data_groups=['input_data'],
                                      save_output=save_all_steps,
                                      verbose=verbose,
                                      output_folder=output_folder)
            ]

            preprocessing_steps += [
                SkullStrip_Model(data_groups=['input_data'],
                                 model=skullstripping_model,
                                 save_output=save_all_steps,
                                 verbose=verbose,
                                 output_folder=output_folder,
                                 reference_channel=[3, 1])
            ]

            preprocessing_steps += [
                ZeroMeanNormalization(
                    data_groups=['input_data'],
                    save_output=save_all_steps,
                    verbose=verbose,
                    output_folder=output_folder,
                    mask_preprocessor=preprocessing_steps[-1],
                    preprocessor_string='_preprocessed')
            ]

        else:
            preprocessing_steps += [
                ZeroMeanNormalization(data_groups=['input_data'],
                                      save_output=save_all_steps,
                                      verbose=verbose,
                                      output_folder=output_folder,
                                      mask_zeros=True,
                                      preprocessor_string='_preprocessed')
            ]

        data_collection.append_preprocessor(preprocessing_steps)

    #--------------------------------------------------------------------#
    # Step 4, Run Inference
    #--------------------------------------------------------------------#

    for case in data_collection.cases:

        docker_print('Starting New Case...')

        docker_print('Enhancing Mets Prediction')
        docker_print('======================')
        mets_model.generate_outputs(data_collection, case)[0]['filenames'][-1]