Exemplo n.º 1
0
    def test_simple(self):
        data_param = {'mr': {'path_to_search': IMAGE_PATH_2D}}
        reader = ImageReader().initialise(data_param)
        # test properties
        self.default_property_asserts(reader)
        idx, data, interp = reader()

        # test output
        self.assertTrue('mr' in data)
        self.assertTrue(idx in range(len(reader.output_list)))
        self.assertDictEqual(interp, {'mr': (1, )})
        self.assertEqual(data['mr'].shape, (100, 100, 1, 1, 1))
Exemplo n.º 2
0
    def test_renaming(self):
        data_param = {'mr': {'path_to_search': IMAGE_PATH_2D}}
        group_param = {'ct': ('mr', )}
        reader = ImageReader().initialise(data_param, group_param)
        self.renamed_property_asserts(reader)
        idx, data, interp = reader()

        # test output
        self.assertTrue('ct' in data)
        self.assertTrue(idx in range(len(reader.output_list)))
        self.assertDictEqual(interp, {'ct': (1, )})
        self.assertEqual(data['ct'].shape, (100, 100, 1, 1, 1))
Exemplo n.º 3
0
 def test_2d_as_5D_multimodal_properties(self):
     data_param = {
         'mr': {
             'path_to_search': IMAGE_PATH_2D,
             'filename_contains': '_u',
             'pixdim': (2, 2, 2),
             'axcodes': 'RAS'
         }
     }
     grouping_param = {'ct': ('mr', 'mr', 'mr')}
     reader = ImageReader().initialise(data_param, grouping_param)
     self.assertEqual(reader.spatial_ranks, {'ct': 2})
Exemplo n.º 4
0
 def test_preprocessing_zero_padding(self):
     reader = ImageReader(['image'])
     reader.initialise(SINGLE_MOD_DATA, SINGLE_MOD_TASK, single_mod_list)
     idx, data, interp_order = reader()
     self.assertEqual(SINGLE_MOD_DATA['lesion'].interp_order,
                      interp_order['image'][0])
     self.assertAllClose(data['image'].shape, (256, 168, 256, 1, 1))
     reader.add_preprocessing_layers(
         [PadLayer(image_name=['image'], border=(0, 0, 0))])
     idx, data, interp_order = reader(idx=2)
     self.assertEqual(idx, 2)
     self.assertAllClose(data['image'].shape, (256, 168, 256, 1, 1))
    def initialise_dataset_loader(self, data_param=None, task_param=None):
        self.data_param = data_param
        self.autoencoder_param = task_param

        if not self.is_training:
            self._infer_type = look_up_operations(
                self.autoencoder_param.inference_type, SUPPORTED_INFERENCE)
        else:
            self._infer_type = None

        # read each line of csv files into an instance of Subject
        if self.is_training:
            self.reader = ImageReader(['image'])
        if self._infer_type in ('encode', 'encode-decode'):
            self.reader = ImageReader(['image'])
        elif self._infer_type == 'sample':
            self.reader = ()
        elif self._infer_type == 'linear_interpolation':
            self.reader = ImageReader(['feature'])

        if self.reader:
            self.reader.initialise_reader(data_param, task_param)
Exemplo n.º 6
0
    def initialise_dataset_loader(self,
                                  data_param=None,
                                  task_param=None,
                                  data_partitioner=None):
        self.data_param = data_param
        self.autoencoder_param = task_param

        if not self.is_training:
            self._infer_type = look_up_operations(
                self.autoencoder_param.inference_type, SUPPORTED_INFERENCE)
        else:
            self._infer_type = None
        try:
            reader_phase = self.action_param.dataset_to_infer
        except AttributeError:
            reader_phase = None
        file_lists = data_partitioner.get_file_lists_by(phase=reader_phase,
                                                        action=self.action)
        # read each line of csv files into an instance of Subject
        if self.is_evaluation:
            NotImplementedError('Evaluation is not yet '
                                'supported in this application.')
        if self.is_training:
            self.readers = []
            self.csv_reader = []
            for file_list in file_lists:
                reader = ImageReader(['image'])
                reader.initialise(data_param, task_param, file_list)
                self.readers.append(reader)
        if self._infer_type in ('encode', 'encode-decode'):
            self.readers = [ImageReader(['image'])]
            self.readers[0].initialise(data_param, task_param, file_lists[0])
        elif self._infer_type == 'sample':
            self.readers = []
            self.csv_reader = []
        elif self._infer_type == 'linear_interpolation':
            self.csv_reader = []
            self.readers = [ImageReader(['feature'])]
            self.readers[0].initialise(data_param, task_param, file_lists[0])
Exemplo n.º 7
0
    def initialise_dataset_loader(self,
                                  data_param=None,
                                  task_param=None,
                                  data_partitioner=None):
        self.data_param = data_param
        self.autoencoder_param = task_param

        if not self.is_training:
            self._infer_type = look_up_operations(
                self.autoencoder_param.inference_type, SUPPORTED_INFERENCE)
        else:
            self._infer_type = None

        # read each line of csv files into an instance of Subject
        if self.is_training:
            file_lists = []
            if self.action_param.validation_every_n > 0:
                file_lists.append(data_partitioner.train_files)
                file_lists.append(data_partitioner.validation_files)
            else:
                file_lists.append(data_partitioner.train_files)

            self.readers = []
            for file_list in file_lists:
                reader = ImageReader(['image'])
                reader.initialise(data_param, task_param, file_list)
                self.readers.append(reader)
        if self._infer_type in ('encode', 'encode-decode'):
            self.readers = [ImageReader(['image'])]
            self.readers[0].initialise(data_param, task_param,
                                       data_partitioner.inference_files)
        elif self._infer_type == 'sample':
            self.readers = []
        elif self._infer_type == 'linear_interpolation':
            self.readers = [ImageReader(['feature'])]
            self.readers[0].initialise(data_param, task_param,
                                       data_partitioner.inference_files)
def get_label_reader():
    '''
    define the label reader
    :return: label reader
    '''
    reader = ImageReader(['label'])
    reader.initialise(MOD_LABEL_DATA, MOD_LABEl_TASK, mod_label_list)
    label_normaliser = DiscreteLabelNormalisationLayer(
        image_name='label',
        modalities=vars(SINGLE_25D_TASK).get('label'),
        model_filename=os.path.join('testing_data', 'agg_test.txt'))
    reader.add_preprocessing_layers(label_normaliser)
    pad_layer = PadLayer(image_name=('label', ), border=(5, 6, 7))
    reader.add_preprocessing_layers([pad_layer])
    return reader
Exemplo n.º 9
0
    def test_no_2d_resampling_properties(self):
        data_param = {'mr': {'path_to_search': IMAGE_PATH_2D,
                             'csv_file': '2d_test.csv',
                             'pixdim': (2, 2, 2),
                             'axcodes': 'RAS'}}
        reader = ImageReader().initialise(data_param)
        self.assertEqual(reader.output_list[0]['mr'].output_pixdim, (None,))
        self.assertEqual(reader.output_list[0]['mr'].output_axcodes, (None,))
        idx, data, interp = reader()

        # test output
        self.assertTrue('mr' in data)
        self.assertTrue(idx in range(len(reader.output_list)))
        self.assertDictEqual(interp, {'mr': (1,)})
        self.assertEqual(data['mr'].shape, (100, 100, 1, 1, 1))
Exemplo n.º 10
0
    def initialise_dataset_loader(self,
                                  data_param=None,
                                  task_param=None,
                                  data_partitioner=None):
        RegressionApplication.initialise_dataset_loader(
            self, data_param, task_param, data_partitioner)
        if self.is_training:
            return
        if not task_param.error_map:
            # use the regression application implementation
            return

        try:
            reader_phase = self.action_param.dataset_to_infer
        except AttributeError:
            reader_phase = None
        file_lists = data_partitioner.get_file_lists_by(phase=reader_phase,
                                                        action=self.action)
        # modifying the original readers in regression application
        # as we need ground truth labels to generate error maps
        self.readers = [
            ImageReader(['image',
                         'output']).initialise(data_param, task_param,
                                               file_list)
            for file_list in file_lists
        ]

        mean_var_normaliser = MeanVarNormalisationLayer(image_name='image')
        histogram_normaliser = None
        if self.net_param.histogram_ref_file:
            histogram_normaliser = HistogramNormalisationLayer(
                image_name='image',
                modalities=vars(task_param).get('image'),
                model_filename=self.net_param.histogram_ref_file,
                norm_type=self.net_param.norm_type,
                cutoff=self.net_param.cutoff,
                name='hist_norm_layer')

        preprocessors = []
        if self.net_param.normalisation:
            preprocessors.append(histogram_normaliser)
        if self.net_param.whitening:
            preprocessors.append(mean_var_normaliser)
        if self.net_param.volume_padding_size:
            preprocessors.append(
                PadLayer(image_name=SUPPORTED_INPUT,
                         border=self.net_param.volume_padding_size))
        self.readers[0].add_preprocessing_layers(preprocessors)
Exemplo n.º 11
0
def preprocess(
    input_path,
    model_path,
    output_path,
    cutoff,
):
    input_path = Path(input_path)
    output_path = Path(output_path)
    input_dir = input_path.parent

    DATA_PARAM = {
        'Modality0':
        ParserNamespace(
            path_to_search=str(input_dir),
            filename_contains=('nii.gz', ),
            interp_order=0,
            pixdim=None,
            axcodes='RAS',
            loader=None,
        )
    }

    TASK_PARAM = ParserNamespace(image=('Modality0', ))
    data_partitioner = ImageSetsPartitioner()
    file_list = data_partitioner.initialise(DATA_PARAM).get_file_list()
    reader = ImageReader(['image'])
    reader.initialise(DATA_PARAM, TASK_PARAM, file_list)

    binary_masking_func = BinaryMaskingLayer(type_str='mean_plus', )

    hist_norm = HistogramNormalisationLayer(
        image_name='image',
        modalities=['Modality0'],
        model_filename=str(model_path),
        binary_masking_func=binary_masking_func,
        cutoff=cutoff,
        name='hist_norm_layer',
    )

    image = reader.output_list[0]['image']
    data = image.get_data()
    norm_image_dict, mask_dict = hist_norm({'image': data})
    data = norm_image_dict['image']
    nii = nib.Nifti1Image(data.squeeze(), image.original_affine[0])
    dst = output_path
    nii.to_filename(str(dst))
Exemplo n.º 12
0
def get_first_array(directory: Union[str, Path]) -> np.ndarray:
    """
    Use NiftyNet's reader to get RAS images
    """
    directory = Path(directory)
    image_dir = list(directory.glob('**/*.nii.gz'))[0].parent
    input_dict = dict(
        path_to_search=str(image_dir),
        filename_contains='nii',
        axcodes=('R', 'A', 'S'),
    )
    data_parameters = {
        'image': input_dict,
    }
    reader = ImageReader().initialise(data_parameters)
    _, image_dict, _ = reader()
    return image_dict['image'].squeeze()
Exemplo n.º 13
0
    def test_2D_multimodal_properties(self):
        data_param = {'mr': {'path_to_search': IMAGE_PATH_2D_2,
                             'filename_contains': 'x_1_y',
                             'pixdim': (2, 1.5, 2),
                             'axcodes': 'RAS'}}
        grouping_param = {'ct': ('mr', 'mr', 'mr')}
        reader = ImageReader().initialise(data_param, grouping_param)
        self.assertDictEqual(reader.spatial_ranks, {'ct': 2})
        self.assertEqual(reader.output_list[0]['ct'].output_pixdim,
                         ((2.0, 1.5, 2.0),) * 3)
        self.assertEqual(reader.output_list[0]['ct'].output_axcodes,
                         (('R', 'A', 'S'),) * 3)

        # test output
        idx, data, interp = reader()
        self.assertTrue('ct' in data)
        self.assertTrue(idx in range(len(reader.output_list)))
        self.assertDictEqual(interp, {'ct': (1, 1, 1)})
        self.assertEqual(data['ct'].shape, (100, 100, 1, 1, 3))
Exemplo n.º 14
0
 def test_trainable_preprocessing(self):
     label_file = os.path.join('testing_data', 'label_reader.txt')
     if os.path.exists(label_file):
         os.remove(label_file)
     label_normaliser = DiscreteLabelNormalisationLayer(
         image_name='label',
         modalities=vars(LABEL_TASK).get('label'),
         model_filename=os.path.join('testing_data', 'label_reader.txt'))
     reader = ImageReader(['label'])
     with self.assertRaisesRegexp(AssertionError, ''):
         reader.add_preprocessing_layers(label_normaliser)
     reader.initialise_reader(LABEL_DATA, LABEL_TASK)
     reader.add_preprocessing_layers(label_normaliser)
     reader.add_preprocessing_layers(
         [PadLayer(image_name=['label'], border=(10, 5, 5))])
     idx, data, interp_order = reader(idx=0)
     unique_data = np.unique(data['label'])
     expected = np.array(range(156), dtype=np.float32)
     self.assertAllClose(unique_data, expected)
     self.assertAllClose(data['label'].shape, (83, 73, 73, 1, 1))
Exemplo n.º 15
0
    def test_3d_resampling_properties(self):
        data_param = {
            'mr': {'path_to_search': IMAGE_PATH_3D_1,
                   'filename_contains': 'x_y_z_1_1',
                   'pixdim': (4, 3, 4),
                   'axcodes': 'RAS'}}
        reader = ImageReader().initialise(data_param)
        self.assertDictEqual(reader.spatial_ranks, {'mr': 3})
        self.assertEqual(reader.output_list[0]['mr'].output_pixdim,
                         ((4.0, 3.0, 4.0),))
        self.assertEqual(reader.output_list[0]['mr'].output_axcodes,
                         (('R', 'A', 'S'),))
        idx, data, interp = reader()

        # test output
        self.assertTrue('mr' in data)
        self.assertTrue(idx in range(len(reader.output_list)))
        self.assertDictEqual(interp, {'mr': (1,)})
        # allows rounding error spatially
        self.assertAllClose(data['mr'].shape[:3], (12, 8, 10), atol=1)
        self.assertAllClose(data['mr'].shape[3:], (1, 1))
Exemplo n.º 16
0
    def test_3d_multiple_properties(self):
        """
        loading two modalities, grouping subject names only
        """
        data_param = {
            'mr': {
                'path_to_search': IMAGE_PATH_3D,
                'filename_contains': 'Lesion',
                'pixdim': (4, 3, 4),
                'axcodes': 'RAS'
            },
            'ct': {
                'path_to_search': IMAGE_PATH_3D,
                'filename_contains': 'Lesion',
                'pixdim': (4, 3, 4),
                'axcodes': 'RAS'
            }
        }
        reader = ImageReader().initialise(data_param)
        self.assertDictEqual(reader.spatial_ranks, {'mr': 3, 'ct': 3})
        self.assertEqual(reader.output_list[0]['mr'].output_pixdim,
                         ((4.0, 3.0, 4.0), ))
        self.assertEqual(reader.output_list[0]['mr'].output_axcodes,
                         (('R', 'A', 'S'), ))
        idx, data, interp = reader()

        # test output
        self.assertTrue('mr' in data)
        self.assertTrue('ct' in data)
        self.assertTrue(idx in range(len(reader.output_list)))
        self.assertDictEqual(interp, {'mr': (1, ), 'ct': (1, )})
        # allows rounding error spatially
        self.assertAllClose(data['mr'].shape[:3], (62, 83, 62), atol=1)
        self.assertAllClose(data['mr'].shape[3:], (1, 1))
        self.assertAllClose(data['ct'].shape[:3], (62, 83, 62), atol=1)
        self.assertAllClose(data['ct'].shape[3:], (1, 1))
def get_2d_reader():
    data_param = {'mr': {'path_to_search': IMAGE_PATH_2D_1}}
    reader = ImageReader().initialise(data_param)
    return reader
def get_3d_reader():
    data_param = {'mr': {'path_to_search': IMAGE_PATH_3D,
                         'filename_contains': 'FLAIR',
                         'interp_order': 1}}
    reader = ImageReader().initialise(data_param)
    return reader
Exemplo n.º 19
0
    def initialise_dataset_loader(self,
                                  data_param=None,
                                  task_param=None,
                                  data_partitioner=None):
        self.data_param = data_param
        self.regression_param = task_param

        file_lists = self.get_file_lists(data_partitioner)
        # read each line of csv files into an instance of Subject
        if self.is_training:
            self.readers = []
            for file_list in file_lists:
                reader = ImageReader({'image', 'output', 'weight', 'sampler'})
                reader.initialise(data_param, task_param, file_list)
                self.readers.append(reader)
        elif self.is_inference:
            inference_reader = ImageReader(['image'])
            file_list = data_partitioner.inference_files
            inference_reader.initialise(data_param, task_param, file_lists[0])
            self.readers = [inference_reader]
        elif self.is_evaluation:
            file_list = data_partitioner.inference_files
            reader = ImageReader({'image', 'output', 'inferred'})
            reader.initialise(data_param, task_param, file_lists[0])
            self.readers = [reader]
        else:
            raise ValueError(
                'Action `{}` not supported. Expected one of {}'.format(
                    self.action, self.SUPPORTED_ACTIONS))

        mean_var_normaliser = MeanVarNormalisationLayer(image_name='image')
        histogram_normaliser = None
        if self.net_param.histogram_ref_file:
            histogram_normaliser = HistogramNormalisationLayer(
                image_name='image',
                modalities=vars(task_param).get('image'),
                model_filename=self.net_param.histogram_ref_file,
                norm_type=self.net_param.norm_type,
                cutoff=self.net_param.cutoff,
                name='hist_norm_layer')

        normalisation_layers = []
        if self.net_param.normalisation:
            normalisation_layers.append(histogram_normaliser)
        if self.net_param.whitening:
            normalisation_layers.append(mean_var_normaliser)

        augmentation_layers = []
        if self.is_training:
            if self.action_param.random_flipping_axes != -1:
                augmentation_layers.append(
                    RandomFlipLayer(
                        flip_axes=self.action_param.random_flipping_axes))
            if self.action_param.scaling_percentage:
                augmentation_layers.append(
                    RandomSpatialScalingLayer(
                        min_percentage=self.action_param.scaling_percentage[0],
                        max_percentage=self.action_param.scaling_percentage[1])
                )
            if self.action_param.rotation_angle:
                augmentation_layers.append(RandomRotationLayer())
                augmentation_layers[-1].init_uniform_angle(
                    self.action_param.rotation_angle)

        volume_padding_layer = []
        if self.net_param.volume_padding_size:
            volume_padding_layer.append(
                PadLayer(image_name=SUPPORTED_INPUT,
                         border=self.net_param.volume_padding_size,
                         mode=self.net_param.volume_padding_mode))
        for reader in self.readers:
            reader.add_preprocessing_layers(volume_padding_layer +
                                            normalisation_layers +
                                            augmentation_layers)
Exemplo n.º 20
0
    def initialise_dataset_loader(self, data_param=None, task_param=None, data_partitioner=None):

        self.data_param = data_param
        self.segmentation_param = task_param

        # read each line of csv files into an instance of Subject
        if self.is_training:
            file_lists = []
            if self.action_param.validation_every_n > 0:
                file_lists.append(data_partitioner.train_files)
                file_lists.append(data_partitioner.validation_files)
            else:
                file_lists.append(data_partitioner.all_files)

            self.readers = []
            for file_list in file_lists:
                reader = ImageReader(SUPPORTED_INPUT)
                reader.initialise(data_param, task_param, file_list)
                self.readers.append(reader)

        else:  # in the inference process use image input only
            inference_reader = ImageReader(['image'])
            file_list = data_partitioner.inference_files
            inference_reader.initialise(data_param, task_param, file_list)
            self.readers = [inference_reader]

        foreground_masking_layer = None
        if self.net_param.normalise_foreground_only:
            foreground_masking_layer = BinaryMaskingLayer(
                type_str=self.net_param.foreground_type,
                multimod_fusion=self.net_param.multimod_foreground_type,
                threshold=0.0)

        mean_var_normaliser = MeanVarNormalisationLayer(
            image_name='image', binary_masking_func=foreground_masking_layer)
        histogram_normaliser = None
        if self.net_param.histogram_ref_file:
            histogram_normaliser = HistogramNormalisationLayer(
                image_name='image',
                modalities=vars(task_param).get('image'),
                model_filename=self.net_param.histogram_ref_file,
                binary_masking_func=foreground_masking_layer,
                norm_type=self.net_param.norm_type,
                cutoff=self.net_param.cutoff,
                name='hist_norm_layer')

        label_normaliser = None
        if self.net_param.histogram_ref_file:
            label_normaliser = DiscreteLabelNormalisationLayer(
                image_name='label',
                modalities=vars(task_param).get('label'),
                model_filename=self.net_param.histogram_ref_file)

        normalisation_layers = []
        if self.net_param.normalisation:
            normalisation_layers.append(histogram_normaliser)
        if self.net_param.whitening:
            normalisation_layers.append(mean_var_normaliser)
        if task_param.label_normalisation:
            normalisation_layers.append(label_normaliser)

        augmentation_layers = []
        if self.is_training:
            if self.action_param.random_flipping_axes != -1:
                augmentation_layers.append(RandomFlipLayer(
                    flip_axes=self.action_param.random_flipping_axes))
            if self.action_param.scaling_percentage:
                augmentation_layers.append(RandomSpatialScalingLayer(
                    min_percentage=self.action_param.scaling_percentage[0],
                    max_percentage=self.action_param.scaling_percentage[1]))
            if self.action_param.rotation_angle or \
                    self.action_param.rotation_angle_x or \
                    self.action_param.rotation_angle_y or \
                    self.action_param.rotation_angle_z:
                rotation_layer = RandomRotationLayer()
                if self.action_param.rotation_angle:
                    rotation_layer.init_uniform_angle(
                        self.action_param.rotation_angle)
                else:
                    rotation_layer.init_non_uniform_angle(
                        self.action_param.rotation_angle_x,
                        self.action_param.rotation_angle_y,
                        self.action_param.rotation_angle_z)
                augmentation_layers.append(rotation_layer)

        volume_padding_layer = []
        if self.net_param.volume_padding_size:
            volume_padding_layer.append(PadLayer(
                image_name=SUPPORTED_INPUT,
                border=self.net_param.volume_padding_size))

        for reader in self.readers:
            reader.add_preprocessing_layers(
                volume_padding_layer +
                normalisation_layers +
                augmentation_layers)
Exemplo n.º 21
0
def get_3d_reader():
    reader = ImageReader(['image'])
    reader.initialise_reader(MULTI_MOD_DATA, MULTI_MOD_TASK)
    return reader
    def initialise_dataset_loader(
            self, data_param=None, task_param=None, data_partitioner=None):

        self.data_param = data_param
        self.segmentation_param = task_param

        # initialise input image readers
        if self.is_training:
            reader_names = ('image', 'label', 'weight_map', 'sampler')
        elif self.is_inference:
            # in the inference process use `image` input only
            reader_names = ('image',)
        elif self.is_evaluation:
            reader_names = ('image', 'label', 'inferred')
        else:
            tf.logging.fatal(
                'Action `%s` not supported. Expected one of %s',
                self.action, self.SUPPORTED_PHASES)
            raise ValueError
        try:
            reader_phase = self.action_param.dataset_to_infer
        except AttributeError:
            reader_phase = None
        file_lists = data_partitioner.get_file_lists_by(
            phase=reader_phase, action=self.action)
        self.readers = [
            ImageReader(reader_names).initialise(
                data_param, task_param, file_list) for file_list in file_lists]

        # initialise input preprocessing layers
        foreground_masking_layer = BinaryMaskingLayer(
            type_str=self.net_param.foreground_type,
            multimod_fusion=self.net_param.multimod_foreground_type,
            threshold=0.0) \
            if self.net_param.normalise_foreground_only else None
        mean_var_normaliser = MeanVarNormalisationLayer(
            image_name='image', binary_masking_func=foreground_masking_layer) \
            if self.net_param.whitening else None
        percentile_normaliser = PercentileNormalisationLayer(
            image_name='image', binary_masking_func=foreground_masking_layer, cutoff=self.net_param.cutoff) \
            if self.net_param.percentile_normalisation else None
        histogram_normaliser = HistogramNormalisationLayer(
            image_name='image',
            modalities=vars(task_param).get('image'),
            model_filename=self.net_param.histogram_ref_file,
            binary_masking_func=foreground_masking_layer,
            norm_type=self.net_param.norm_type,
            cutoff=self.net_param.cutoff,
            name='hist_norm_layer') \
            if (self.net_param.histogram_ref_file and
                self.net_param.normalisation) else None
        label_normalisers = None
        if self.net_param.histogram_ref_file and \
                task_param.label_normalisation:
            label_normalisers = [DiscreteLabelNormalisationLayer(
                image_name='label',
                modalities=vars(task_param).get('label'),
                model_filename=self.net_param.histogram_ref_file)]
            if self.is_evaluation:
                label_normalisers.append(
                    DiscreteLabelNormalisationLayer(
                        image_name='inferred',
                        modalities=vars(task_param).get('inferred'),
                        model_filename=self.net_param.histogram_ref_file))
                label_normalisers[-1].key = label_normalisers[0].key

        normalisation_layers = []
        if histogram_normaliser is not None:
            normalisation_layers.append(histogram_normaliser)
        if mean_var_normaliser is not None:
            normalisation_layers.append(mean_var_normaliser)
        if percentile_normaliser is not None:
            normalisation_layers.append(percentile_normaliser)
        if task_param.label_normalisation and \
                (self.is_training or not task_param.output_prob):
            normalisation_layers.extend(label_normalisers)

        volume_padding_layer = []
        if self.net_param.volume_padding_size:
            volume_padding_layer.append(PadLayer(
                image_name=SUPPORTED_INPUT,
                border=self.net_param.volume_padding_size,
                mode=self.net_param.volume_padding_mode))

        # initialise training data augmentation layers
        augmentation_layers = []
        if self.is_training:
            train_param = self.action_param
            if train_param.random_flipping_axes != -1:
                augmentation_layers.append(RandomFlipLayer(
                    flip_axes=train_param.random_flipping_axes))
            if train_param.scaling_percentage:
                augmentation_layers.append(RandomSpatialScalingLayer(
                    min_percentage=train_param.scaling_percentage[0],
                    max_percentage=train_param.scaling_percentage[1],
                    antialiasing=train_param.antialiasing))
            if train_param.rotation_angle or \
                    train_param.rotation_angle_x or \
                    train_param.rotation_angle_y or \
                    train_param.rotation_angle_z:
                rotation_layer = RandomRotationLayer()
                if train_param.rotation_angle:
                    rotation_layer.init_uniform_angle(
                        train_param.rotation_angle)
                else:
                    rotation_layer.init_non_uniform_angle(
                        train_param.rotation_angle_x,
                        train_param.rotation_angle_y,
                        train_param.rotation_angle_z)
                augmentation_layers.append(rotation_layer)
            if train_param.do_elastic_deformation:
                spatial_rank = list(self.readers[0].spatial_ranks.values())[0]
                augmentation_layers.append(RandomElasticDeformationLayer(
                    spatial_rank=spatial_rank,
                    num_controlpoints=train_param.num_ctrl_points,
                    std_deformation_sigma=train_param.deformation_sigma,
                    proportion_to_augment=train_param.proportion_to_deform))

        # only add augmentation to first reader (not validation reader)
        self.readers[0].add_preprocessing_layers(
            volume_padding_layer + normalisation_layers + augmentation_layers)

        for reader in self.readers[1:]:
            reader.add_preprocessing_layers(
                volume_padding_layer + normalisation_layers)
Exemplo n.º 23
0
def get_2d_reader():
    reader = ImageReader(['image'])
    reader.initialise(MOD_2D_DATA, MOD_2D_TASK, mod_2d_list)
    return reader
def get_25d_reader():
    reader = ImageReader(['image'])
    reader.initialise(SINGLE_25D_DATA, SINGLE_25D_TASK, single_25d_list)
    return reader
    def initialise_dataset_loader(self,
                                  data_param=None,
                                  task_param=None,
                                  data_partitioner=None):

        self.data_param = data_param
        self.segmentation_param = task_param

        # initialise input image readers
        if self.is_training:
            reader_names = ('image', 'label', 'weight', 'sampler')
        elif self.is_inference:
            # in the inference process use `image` input only
            reader_names = ('image', )
        elif self.is_evaluation:
            reader_names = ('image', 'label', 'inferred')
        else:
            tf.logging.fatal('Action `%s` not supported. Expected one of %s',
                             self.action, self.SUPPORTED_PHASES)
            raise ValueError
        try:
            reader_phase = self.action_param.dataset_to_infer
        except AttributeError:
            reader_phase = None
        file_lists = data_partitioner.get_file_lists_by(phase=reader_phase,
                                                        action=self.action)
        self.readers = [
            ImageReader(reader_names).initialise(data_param, task_param,
                                                 file_list)
            for file_list in file_lists
        ]

        foreground_masking_layer = None
        if self.net_param.normalise_foreground_only:
            foreground_masking_layer = BinaryMaskingLayer(
                type_str=self.net_param.foreground_type,
                multimod_fusion=self.net_param.multimod_foreground_type,
                threshold=0.0)

        mean_var_normaliser = MeanVarNormalisationLayer(
            image_name='image', binary_masking_func=foreground_masking_layer)
        histogram_normaliser = None
        if self.net_param.histogram_ref_file:
            histogram_normaliser = HistogramNormalisationLayer(
                image_name='image',
                modalities=vars(task_param).get('image'),
                model_filename=self.net_param.histogram_ref_file,
                binary_masking_func=foreground_masking_layer,
                norm_type=self.net_param.norm_type,
                cutoff=self.net_param.cutoff,
                name='hist_norm_layer')

        label_normaliser = None
        if self.net_param.histogram_ref_file:
            label_normaliser = DiscreteLabelNormalisationLayer(
                image_name='label',
                modalities=vars(task_param).get('label'),
                model_filename=self.net_param.histogram_ref_file)

        normalisation_layers = []
        if self.net_param.normalisation:
            normalisation_layers.append(histogram_normaliser)
        if self.net_param.whitening:
            normalisation_layers.append(mean_var_normaliser)
        if task_param.label_normalisation and \
                (self.is_training or not task_param.output_prob):
            normalisation_layers.append(label_normaliser)

        augmentation_layers = []
        if self.is_training:
            if self.action_param.random_flipping_axes != -1:
                augmentation_layers.append(
                    RandomFlipLayer(
                        flip_axes=self.action_param.random_flipping_axes))
            if self.action_param.scaling_percentage:
                augmentation_layers.append(
                    RandomSpatialScalingLayer(
                        min_percentage=self.action_param.scaling_percentage[0],
                        max_percentage=self.action_param.scaling_percentage[1])
                )
            if self.action_param.rotation_angle or \
                    self.action_param.rotation_angle_x or \
                    self.action_param.rotation_angle_y or \
                    self.action_param.rotation_angle_z:
                rotation_layer = RandomRotationLayer()
                if self.action_param.rotation_angle:
                    rotation_layer.init_uniform_angle(
                        self.action_param.rotation_angle)
                else:
                    rotation_layer.init_non_uniform_angle(
                        self.action_param.rotation_angle_x,
                        self.action_param.rotation_angle_y,
                        self.action_param.rotation_angle_z)
                augmentation_layers.append(rotation_layer)
            if self.action_param.bias_field_range:
                bias_field_layer = RandomBiasFieldLayer()
                bias_field_layer.init_order(self.action_param.bf_order)
                bias_field_layer.init_uniform_coeff(
                    self.action_param.bias_field_range)
                augmentation_layers.append(bias_field_layer)

        volume_padding_layer = [
            PadLayer(image_name=SUPPORTED_INPUT,
                     border=self.net_param.volume_padding_size,
                     mode=self.net_param.volume_padding_mode,
                     pad_to=self.net_param.volume_padding_to_size)
        ]

        self.readers[0].add_preprocessing_layers(volume_padding_layer +
                                                 normalisation_layers +
                                                 augmentation_layers)

        for reader in self.readers[1:]:
            reader.add_preprocessing_layers(volume_padding_layer +
                                            normalisation_layers)
Exemplo n.º 26
0
        'filename_contains': '_tissue',
        'pixdim': (1.5, 1.5, 1.5),
        'axcodes': ['L', 'P', 'S'],
        'interp_order': 0
    },
    'AIR': {
        'path_to_search': reference_dir,
        'spatial_window_size': (48, 48, 48),
        'filename_contains': '_air',
        'pixdim': (1.5, 1.5, 1.5),
        'axcodes': ['L', 'P', 'S'],
        'interp_order': 0
    },
}

reader = ImageReader().initialise(data_param)

# reference_files = [f for f in os.listdir(reference_dir) if f.endswith('_ct_tra_reg masked.nii')]
# reference_files.sort()
# mask_files = [f for f in os.listdir(reference_dir) if f.endswith('_headMask.nii')]
# mask_files.sort()
inference_files = [
    f for f in os.listdir(inference_dir) if f.endswith('_niftynet_out.nii.gz')
]
inference_files.sort()

with open(os.path.join(inference_dir, 'results.csv'), 'w') as csvfile:
    filewriter = csv.writer(csvfile,
                            delimiter=',',
                            quotechar='|',
                            quoting=csv.QUOTE_MINIMAL)
Exemplo n.º 27
0
def get_3d_reader():
    reader = ImageReader(['image'])
    reader.initialise(MULTI_MOD_DATA, MULTI_MOD_TASK, multi_mod_list)
    return reader
Exemplo n.º 28
0
def get_25d_reader():
    reader = ImageReader(['image'])
    reader.initialise_reader(SINGLE_25D_DATA, SINGLE_25D_TASK)
    return reader
Exemplo n.º 29
0
def get_dynamic_window_reader():
    reader = ImageReader(['image'])
    reader.initialise(DYNAMIC_MOD_DATA, DYNAMIC_MOD_TASK, dynamic_list)
    return reader
    def initialise_dataset_loader(self,
                                  data_param=None,
                                  task_param=None,
                                  data_partitioner=None):

        self.data_param = data_param
        self.classification_param = task_param

        if self.is_training:
            reader_names = ('image', 'label', 'sampler')
        elif self.is_inference:
            reader_names = ('image', )
        elif self.is_evaluation:
            reader_names = ('image', 'label', 'inferred')
        else:
            tf.logging.fatal('Action `%s` not supported. Expected one of %s',
                             self.action, self.SUPPORTED_PHASES)
            raise ValueError
        try:
            reader_phase = self.action_param.dataset_to_infer
        except AttributeError:
            reader_phase = None
        file_lists = data_partitioner.get_file_lists_by(phase=reader_phase,
                                                        action=self.action)
        self.readers = [
            ImageReader(reader_names).initialise(data_param, task_param,
                                                 file_list)
            for file_list in file_lists
        ]

        foreground_masking_layer = BinaryMaskingLayer(
            type_str=self.net_param.foreground_type,
            multimod_fusion=self.net_param.multimod_foreground_type,
            threshold=0.0) \
            if self.net_param.normalise_foreground_only else None

        mean_var_normaliser = MeanVarNormalisationLayer(
            image_name='image', binary_masking_func=foreground_masking_layer) \
            if self.net_param.whitening else None
        histogram_normaliser = HistogramNormalisationLayer(
            image_name='image',
            modalities=vars(task_param).get('image'),
            model_filename=self.net_param.histogram_ref_file,
            binary_masking_func=foreground_masking_layer,
            norm_type=self.net_param.norm_type,
            cutoff=self.net_param.cutoff,
            name='hist_norm_layer') \
            if (self.net_param.histogram_ref_file and
                self.net_param.normalisation) else None

        label_normaliser = DiscreteLabelNormalisationLayer(
            image_name='label',
            modalities=vars(task_param).get('label'),
            model_filename=self.net_param.histogram_ref_file) \
            if (self.net_param.histogram_ref_file and
                task_param.label_normalisation) else None

        normalisation_layers = []
        if histogram_normaliser is not None:
            normalisation_layers.append(histogram_normaliser)
        if mean_var_normaliser is not None:
            normalisation_layers.append(mean_var_normaliser)
        if label_normaliser is not None:
            normalisation_layers.append(label_normaliser)

        augmentation_layers = []
        if self.is_training:
            train_param = self.action_param
            if train_param.random_flipping_axes != -1:
                augmentation_layers.append(
                    RandomFlipLayer(
                        flip_axes=train_param.random_flipping_axes))
            if train_param.scaling_percentage:
                augmentation_layers.append(
                    RandomSpatialScalingLayer(
                        min_percentage=train_param.scaling_percentage[0],
                        max_percentage=train_param.scaling_percentage[1],
                        antialiasing=train_param.antialiasing,
                        isotropic=train_param.isotropic_scaling))
            if train_param.rotation_angle or \
                    self.action_param.rotation_angle_x or \
                    self.action_param.rotation_angle_y or \
                    self.action_param.rotation_angle_z:
                rotation_layer = RandomRotationLayer()
                if train_param.rotation_angle:
                    rotation_layer.init_uniform_angle(
                        train_param.rotation_angle)
                else:
                    rotation_layer.init_non_uniform_angle(
                        self.action_param.rotation_angle_x,
                        self.action_param.rotation_angle_y,
                        self.action_param.rotation_angle_z)
                augmentation_layers.append(rotation_layer)

        # only add augmentation to first reader (not validation reader)
        self.readers[0].add_preprocessing_layers(normalisation_layers +
                                                 augmentation_layers)

        for reader in self.readers[1:]:
            reader.add_preprocessing_layers(normalisation_layers)