Exemplo n.º 1
0
 def test_operations(self):
     reader = ImageReader(['image'])
     reader.initialise(SINGLE_MOD_DATA, SINGLE_MOD_TASK, single_mod_list)
     idx, data, interp_order = reader()
     self.assertEqual(
         SINGLE_MOD_DATA['lesion'].interp_order, interp_order['image'][0])
     self.assertAllClose(data['image'].shape, (256, 168, 256, 1, 1))
Exemplo n.º 2
0
    def initialise_dataset_loader(self, data_param=None, task_param=None):
        self.data_param = data_param
        self.gan_param = task_param

        # read each line of csv files into an instance of Subject
        if self.is_training:
            self.reader = ImageReader(['image', 'conditioning'])
        else:  # in the inference process use image input only
            self.reader = ImageReader(['conditioning'])
        if self.reader:
            self.reader.initialise_reader(data_param, task_param)

        if self.net_param.normalise_foreground_only:
            foreground_masking_layer = BinaryMaskingLayer(
                type_str=self.net_param.foreground_type,
                multimod_fusion=self.net_param.multimod_foreground_type,
                threshold=0.0)
        else:
            foreground_masking_layer = None

        mean_var_normaliser = MeanVarNormalisationLayer(
            image_name='image', binary_masking_func=foreground_masking_layer)
        if self.net_param.histogram_ref_file:
            histogram_normaliser = HistogramNormalisationLayer(
                image_name='image',
                modalities=vars(task_param).get('image'),
                model_filename=self.net_param.histogram_ref_file,
                binary_masking_func=foreground_masking_layer,
                norm_type=self.net_param.norm_type,
                cutoff=self.net_param.cutoff,
                name='hist_norm_layer')
        else:
            histogram_normaliser = None

        normalisation_layers = []
        if self.net_param.normalisation:
            normalisation_layers.append(histogram_normaliser)
        if self.net_param.whitening:
            normalisation_layers.append(mean_var_normaliser)

        augmentation_layers = []
        if self.is_training:
            if self.action_param.random_flipping_axes != -1:
                augmentation_layers.append(
                    RandomFlipLayer(
                        flip_axes=self.action_param.random_flipping_axes))
            if self.action_param.scaling_percentage:
                augmentation_layers.append(
                    RandomSpatialScalingLayer(
                        min_percentage=self.action_param.scaling_percentage[0],
                        max_percentage=self.action_param.scaling_percentage[1])
                )
            if self.action_param.rotation_angle:
                augmentation_layers.append(RandomRotationLayer())
                augmentation_layers[-1].init_uniform_angle(
                    self.action_param.rotation_angle)

        if self.reader:
            self.reader.add_preprocessing_layers(normalisation_layers +
                                                 augmentation_layers)
Exemplo n.º 3
0
def get_3d_reader():
    multi_mod_list = data_partitioner.initialise(
        MULTI_MOD_DATA).get_file_list()
    print(MULTI_MOD_DATA, MULTI_MOD_TASK)
    reader = ImageReader(['image', 'label'])
    reader.initialise(MULTI_MOD_DATA, MULTI_MOD_TASK, multi_mod_list)
    return reader
Exemplo n.º 4
0
 def test_operations(self):
     reader = ImageReader(['image'])
     reader.initialise(SINGLE_MOD_DATA, SINGLE_MOD_TASK, single_mod_list)
     idx, data, interp_order = reader()
     self.assertEqual(SINGLE_MOD_DATA['lesion'].interp_order,
                      interp_order['image'][0])
     self.assertAllClose(data['image'].shape, (256, 168, 256, 1, 1))
Exemplo n.º 5
0
    def initialise_dataset_loader(
            self, data_param=None, task_param=None, data_partitioner=None):
        self.data_param = data_param
        self.autoencoder_param = task_param

        if not self.is_training:
            self._infer_type = look_up_operations(
                self.autoencoder_param.inference_type, SUPPORTED_INFERENCE)
        else:
            self._infer_type = None

        file_lists = self.get_file_lists(data_partitioner)
        # read each line of csv files into an instance of Subject
        if self.is_evaluation:
            NotImplementedError('Evaluation is not yet '
                                'supported in this application.')
        if self.is_training:
            self.readers = []
            for file_list in file_lists:
                reader = ImageReader(['image'])
                reader.initialise(data_param, task_param, file_list)
                self.readers.append(reader)
        if self._infer_type in ('encode', 'encode-decode'):
            self.readers = [ImageReader(['image'])]
            self.readers[0].initialise(data_param,
                                       task_param,
                                       file_lists[0])
        elif self._infer_type == 'sample':
            self.readers = []
        elif self._infer_type == 'linear_interpolation':
            self.readers = [ImageReader(['feature'])]
            self.readers[0].initialise(data_param,
                                       task_param,
                                       [file_lists])
Exemplo n.º 6
0
def get_reader(data_param, image_sets_partitioner, phase):
    # Using Nifty Reader
    if phase == 'training':
        image_reader = ImageReader().initialise(
            data_param, file_list=image_sets_partitioner.get_file_list(TRAIN))

    elif phase == 'validation':
        image_reader = ImageReader().initialise(
            data_param, file_list=image_sets_partitioner.get_file_list(VALID))

    elif phase == 'inference':
        image_reader = ImageReader().initialise(
            data_param, file_list=image_sets_partitioner.get_file_list(INFER))
    else:
        raise Exception('Invalid phase choice: {}'.format(
            {'phase': ['train', 'validation', 'inference']}))

    # Adding preprocessing layers
    mean_variance_norm_layer = MeanVarNormalisationLayer(image_name='image')
    pad_layer = PadLayer(image_name=('image', 'label'), border=(8, 8, 8))
    image_reader.add_preprocessing_layers([mean_variance_norm_layer])

    if phase == 'inference':
        image_reader.add_preprocessing_layers([pad_layer])

    return image_reader
Exemplo n.º 7
0
def get_reader(data_param, grouping_param, image_sets_partitioner, phase):
    # Using Nifty Reader
    if phase == 'training':
        image_reader = ImageReader().initialise(
            data_param,
            grouping_param,
            file_list=image_sets_partitioner.get_file_list(TRAIN))

    elif phase == 'validation':
        image_reader = ImageReader().initialise(
            data_param,
            grouping_param,
            file_list=image_sets_partitioner.get_file_list(VALID))

    elif phase == 'inference':
        # TODO: need to improve that
        if data_param['mask']:
            del data_param['mask']
        if grouping_param['sampler']:
            del grouping_param['sampler']
        image_reader = ImageReader().initialise(
            data_param,
            grouping_param,
            file_list=image_sets_partitioner.get_file_list(INFER))

    else:
        raise Exception('Invalid phase choice: {}'.format(
            {'phase': ['training', 'validation', 'inference']}))
    return image_reader
def get_2d_reader():
    '''
    define the 2d reader
    :return: 2d reader
    '''
    reader = ImageReader(['image'])
    reader.initialise(MOD_2D_DATA, MOD_2D_TASK, mod_2d_list)
    return reader
def get_25d_reader():
    '''
    define the 2.5 d reader
    :return:
    '''
    reader = ImageReader(['image'])
    reader.initialise(SINGLE_25D_DATA, SINGLE_25D_TASK, single_25d_list)
    return reader
def get_3d_reader():
    '''
    define the 3d reader
    :return: 3d reader
    '''
    reader = ImageReader(['image'])
    reader.initialise(MULTI_MOD_DATA, MULTI_MOD_TASK, multi_mod_list)
    return reader
    def initialise_dataset_loader(
            self, data_param=None, task_param=None, data_partitioner=None):

        self.data_param = data_param
        self.segmentation_param = task_param

        # read each line of csv files into an instance of Subject
        if self.is_training:
            file_lists = []
            if self.action_param.validation_every_n > 0:
                file_lists.append(data_partitioner.train_files)
                file_lists.append(data_partitioner.validation_files)
            else:
                file_lists.append(data_partitioner.train_files)

            self.readers = []
            for file_list in file_lists:
                reader = ImageReader(SUPPORTED_INPUT)
                reader.initialise(data_param, task_param, file_list)
                self.readers.append(reader)

        else:  # in the inference process use image input only
            inference_reader = ImageReader(['image'])
            file_list = data_partitioner.inference_files
            inference_reader.initialise(data_param, task_param, file_list)
            self.readers = [inference_reader]

        foreground_masking_layer = None
        if self.net_param.normalise_foreground_only:
            foreground_masking_layer = BinaryMaskingLayer(
                type_str=self.net_param.foreground_type,
                multimod_fusion=self.net_param.multimod_foreground_type,
                threshold=0.0)

        normalisation_layers = []
        if self.net_param.whitening:
            mean_var_normaliser = MeanVarNormalisationLayer(
                image_name='image',
                binary_masking_func=foreground_masking_layer)
            normalisation_layers.append(mean_var_normaliser)

        if task_param.label_normalisation and \
                (self.is_training or not task_param.output_prob):
            label_normaliser = DiscreteLabelNormalisationLayer(
                image_name='label',
                modalities=vars(task_param).get('label'),
                model_filename=self.net_param.histogram_ref_file)
            normalisation_layers.append(label_normaliser)

        volume_padding_layer = []
        if self.net_param.volume_padding_size:
            volume_padding_layer.append(PadLayer(
                image_name=SUPPORTED_INPUT,
                border=self.net_param.volume_padding_size))

        for reader in self.readers:
            reader.add_preprocessing_layers(
                normalisation_layers + volume_padding_layer)
Exemplo n.º 12
0
 def test_properties(self):
     reader = ImageReader(['image'])
     reader.initialise(SINGLE_MOD_DATA, SINGLE_MOD_TASK, single_mod_list)
     self.assertEqual(len(reader.output_list), 4)
     self.assertDictEqual(reader.shapes, {'image': (256, 168, 256, 1, 1)})
     self.assertDictEqual(reader.tf_dtypes, {'image': tf.float32})
     self.assertEqual(reader.names, ['image'])
     self.assertDictEqual(reader.input_sources, {'image': ('lesion', )})
     self.assertEqual(reader.get_subject_id(1)[:4], 'Fin_')
Exemplo n.º 13
0
 def test_trainable_preprocessing(self):
     label_file = os.path.join('testing_data', 'label_reader.txt')
     if os.path.exists(label_file):
         os.remove(label_file)
     label_normaliser = DiscreteLabelNormalisationLayer(
         image_name='label',
         modalities=vars(LABEL_TASK).get('label'),
         model_filename=os.path.join('testing_data', 'label_reader.txt'))
     reader = ImageReader(['label'])
     with self.assertRaisesRegexp(AssertionError, ''):
         reader.add_preprocessing_layers(label_normaliser)
     reader.initialise(LABEL_DATA, LABEL_TASK, label_list)
     reader.add_preprocessing_layers(label_normaliser)
     reader.add_preprocessing_layers(
         [PadLayer(image_name=['label'], border=(10, 5, 5))])
     idx, data, interp_order = reader(idx=0)
     unique_data = np.unique(data['label'])
     expected_v1 = np.array(
         [0., 1., 2., 3., 4., 5., 6., 7., 8.,
          9., 10., 11., 12., 13., 14., 15., 16., 17.,
          18., 19., 20., 21., 22., 23., 24., 25., 26., 27.,
          28., 29., 30., 31., 32., 33., 34., 35., 36.,
          37., 38., 39., 40., 41., 42., 43., 44., 45.,
          46., 47., 48., 49., 50., 51., 52., 53., 54.,
          55., 56., 57., 58., 59., 60., 61., 62., 63.,
          64., 65., 66., 67., 68., 69., 70., 71., 72.,
          73., 74., 75., 76., 77., 78., 79., 80., 81.,
          82., 83., 84., 85., 86., 87., 88., 89., 90.,
          91., 92., 93., 94., 95., 96., 97., 98., 99.,
          100., 101., 102., 103., 104., 105., 106., 107., 108.,
          109., 110., 111., 112., 113., 114., 115., 116., 117.,
          118., 119., 120., 121., 122., 123., 124., 125., 126.,
          127., 128., 129., 130., 131., 132., 133., 134., 135.,
          136., 137., 138., 139., 140., 141., 142., 143., 144.,
          145., 146., 147., 148., 149., 150., 151., 152., 153.,
          154., 155., 156., 157.], dtype=np.float32)
     expected_v2 = np.array(
         [0., 1., 2., 3., 4., 5., 6., 7., 8.,
          9., 10., 11., 12., 13., 14., 15., 16., 17.,
          18., 20., 21., 22., 23., 24., 25., 26., 27.,
          28., 29., 30., 31., 32., 33., 34., 35., 36.,
          37., 38., 39., 40., 41., 42., 43., 44., 45.,
          46., 47., 48., 49., 50., 51., 52., 53., 54.,
          55., 56., 57., 58., 59., 60., 61., 62., 63.,
          64., 65., 66., 67., 68., 69., 70., 71., 72.,
          73., 74., 75., 76., 77., 78., 79., 80., 81.,
          82., 83., 84., 85., 86., 87., 88., 89., 90.,
          91., 92., 93., 94., 95., 96., 97., 98., 99.,
          100., 101., 102., 103., 104., 105., 106., 107., 108.,
          109., 110., 111., 112., 113., 114., 115., 116., 117.,
          118., 119., 120., 121., 122., 123., 124., 125., 126.,
          127., 128., 129., 130., 131., 132., 133., 134., 135.,
          136., 137., 138., 139., 140., 141., 142., 143., 144.,
          145., 146., 147., 148., 149., 150., 151., 152., 153.,
          154., 155., 156., 157.], dtype=np.float32)
     compatible_assert = \
         np.all(unique_data == expected_v1) or \
         np.all(unique_data == expected_v2)
     self.assertTrue(compatible_assert)
     self.assertAllClose(data['label'].shape, (103, 74, 93, 1, 1))
Exemplo n.º 14
0
 def test_trainable_preprocessing(self):
     label_file = os.path.join('testing_data', 'label_reader.txt')
     if os.path.exists(label_file):
         os.remove(label_file)
     label_normaliser = DiscreteLabelNormalisationLayer(
         image_name='label',
         modalities=vars(LABEL_TASK).get('label'),
         model_filename=os.path.join('testing_data', 'label_reader.txt'))
     reader = ImageReader(['label'])
     with self.assertRaisesRegexp(AssertionError, ''):
         reader.add_preprocessing_layers(label_normaliser)
     reader.initialise(LABEL_DATA, LABEL_TASK, label_list)
     reader.add_preprocessing_layers(label_normaliser)
     reader.add_preprocessing_layers(
         [PadLayer(image_name=['label'], border=(10, 5, 5))])
     idx, data, interp_order = reader(idx=0)
     unique_data = np.unique(data['label'])
     expected_v1 = np.array([
         0., 1., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11., 12., 13., 14.,
         15., 16., 17., 18., 19., 20., 21., 22., 23., 24., 25., 26., 27.,
         28., 29., 30., 31., 32., 33., 34., 35., 36., 37., 38., 39., 40.,
         41., 42., 43., 44., 45., 46., 47., 48., 49., 50., 51., 52., 53.,
         54., 55., 56., 57., 58., 59., 60., 61., 62., 63., 64., 65., 66.,
         67., 68., 69., 70., 71., 72., 73., 74., 75., 76., 77., 78., 79.,
         80., 81., 82., 83., 84., 85., 86., 87., 88., 89., 90., 91., 92.,
         93., 94., 95., 96., 97., 98., 99., 100., 101., 102., 103., 104.,
         105., 106., 107., 108., 109., 110., 111., 112., 113., 114., 115.,
         116., 117., 118., 119., 120., 121., 122., 123., 124., 125., 126.,
         127., 128., 129., 130., 131., 132., 133., 134., 135., 136., 137.,
         138., 139., 140., 141., 142., 143., 144., 145., 146., 147., 148.,
         149., 150., 151., 152., 153., 154., 155., 156., 157.
     ],
                            dtype=np.float32)
     expected_v2 = np.array([
         0., 1., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11., 12., 13., 14.,
         15., 16., 17., 18., 20., 21., 22., 23., 24., 25., 26., 27., 28.,
         29., 30., 31., 32., 33., 34., 35., 36., 37., 38., 39., 40., 41.,
         42., 43., 44., 45., 46., 47., 48., 49., 50., 51., 52., 53., 54.,
         55., 56., 57., 58., 59., 60., 61., 62., 63., 64., 65., 66., 67.,
         68., 69., 70., 71., 72., 73., 74., 75., 76., 77., 78., 79., 80.,
         81., 82., 83., 84., 85., 86., 87., 88., 89., 90., 91., 92., 93.,
         94., 95., 96., 97., 98., 99., 100., 101., 102., 103., 104., 105.,
         106., 107., 108., 109., 110., 111., 112., 113., 114., 115., 116.,
         117., 118., 119., 120., 121., 122., 123., 124., 125., 126., 127.,
         128., 129., 130., 131., 132., 133., 134., 135., 136., 137., 138.,
         139., 140., 141., 142., 143., 144., 145., 146., 147., 148., 149.,
         150., 151., 152., 153., 154., 155., 156., 157.
     ],
                            dtype=np.float32)
     compatible_assert = \
         np.all(unique_data == expected_v1) or \
         np.all(unique_data == expected_v2)
     self.assertTrue(compatible_assert)
     self.assertAllClose(data['label'].shape, (103, 74, 93, 1, 1))
Exemplo n.º 15
0
 def test_properties(self):
     reader = ImageReader(['image'])
     reader.initialise(SINGLE_MOD_DATA, SINGLE_MOD_TASK, single_mod_list)
     self.assertEqual(len(reader.output_list), 4)
     self.assertDictEqual(reader.shapes,
                          {'image': (256, 168, 256, 1, 1)})
     self.assertDictEqual(reader.tf_dtypes, {'image': tf.float32})
     self.assertEqual(reader.names, ['image'])
     self.assertDictEqual(reader.input_sources,
                          {'image': ('lesion',)})
     self.assertEqual(reader.get_subject_id(1)[:4], 'Fin_')
Exemplo n.º 16
0
    def initialise_dataset_loader(self, data_param=None, task_param=None):
        self.data_param = data_param
        self.regression_param = task_param

        # read each line of csv files into an instance of Subject
        if self.is_training:
            self.reader = ImageReader(SUPPORTED_INPUT)
        else:  # in the inference process use image input only
            self.reader = ImageReader(['image'])
        self.reader.initialise_reader(data_param, task_param)

        mean_var_normaliser = MeanVarNormalisationLayer(image_name='image')
        if self.net_param.histogram_ref_file:
            histogram_normaliser = HistogramNormalisationLayer(
                image_name='image',
                modalities=vars(task_param).get('image'),
                model_filename=self.net_param.histogram_ref_file,
                norm_type=self.net_param.norm_type,
                cutoff=self.net_param.cutoff,
                name='hist_norm_layer')
        else:
            histogram_normaliser = None

        normalisation_layers = []
        if self.net_param.normalisation:
            normalisation_layers.append(histogram_normaliser)
        if self.net_param.whitening:
            normalisation_layers.append(mean_var_normaliser)

        augmentation_layers = []
        if self.is_training:
            if self.action_param.random_flipping_axes != -1:
                augmentation_layers.append(
                    RandomFlipLayer(
                        flip_axes=self.action_param.random_flipping_axes))
            if self.action_param.scaling_percentage:
                augmentation_layers.append(
                    RandomSpatialScalingLayer(
                        min_percentage=self.action_param.scaling_percentage[0],
                        max_percentage=self.action_param.scaling_percentage[1])
                )
            if self.action_param.rotation_angle:
                augmentation_layers.append(RandomRotationLayer())
                augmentation_layers[-1].init_uniform_angle(
                    self.action_param.rotation_angle)

        volume_padding_layer = []
        if self.net_param.volume_padding_size:
            volume_padding_layer.append(
                PadLayer(image_name=SUPPORTED_INPUT,
                         border=self.net_param.volume_padding_size))
        self.reader.add_preprocessing_layers(volume_padding_layer +
                                             normalisation_layers +
                                             augmentation_layers)
Exemplo n.º 17
0
 def test_preprocessing_zero_padding(self):
     reader = ImageReader(['image'])
     reader.initialise(SINGLE_MOD_DATA, SINGLE_MOD_TASK, single_mod_list)
     idx, data, interp_order = reader()
     self.assertEqual(SINGLE_MOD_DATA['lesion'].interp_order,
                      interp_order['image'][0])
     self.assertAllClose(data['image'].shape, (256, 168, 256, 1, 1))
     reader.add_preprocessing_layers(
         [PadLayer(image_name=['image'], border=(0, 0, 0))])
     idx, data, interp_order = reader(idx=2)
     self.assertEqual(idx, 2)
     self.assertAllClose(data['image'].shape, (256, 168, 256, 1, 1))
Exemplo n.º 18
0
    def initialise_dataset_loader(self,
                                  data_param=None,
                                  task_param=None,
                                  data_partitioner=None):
        self.data_param = data_param
        self.registration_param = task_param

        if self.is_evaluation:
            NotImplementedError('Evaluation is not yet '
                                'supported in this application.')
        try:
            reader_phase = self.action_param.dataset_to_infer
        except AttributeError:
            reader_phase = None
        file_lists = data_partitioner.get_file_lists_by(phase=reader_phase,
                                                        action=self.action)

        self.readers = []
        for file_list in file_lists:
            fixed_reader = ImageReader({'fixed_image', 'fixed_label'})
            fixed_reader.initialise(data_param, task_param, file_list)
            self.readers.append(fixed_reader)

            moving_reader = ImageReader({'moving_image', 'moving_label'})
            moving_reader.initialise(data_param, task_param, file_list)
            self.readers.append(moving_reader)
Exemplo n.º 19
0
 def test_existing_csv(self):
     reader_for_csv = ImageReader(['image'])
     reader_for_csv.initialise(SINGLE_MOD_DATA, SINGLE_MOD_TASK,
                               single_mod_list)
     reader = ImageReader(['image'])
     reader.initialise(EXISTING_DATA, SINGLE_MOD_TASK, existing_list)
     self.assertEqual(len(reader.output_list), 4)
     self.assertDictEqual(reader.spatial_ranks, {'image': 3})
     self.assertDictEqual(reader.shapes, {'image': (256, 168, 256, 1, 1)})
     self.assertDictEqual(reader.tf_dtypes, {'image': tf.float32})
     self.assertEqual(reader.names, ('image', ))
     self.assertDictEqual(reader.input_sources, {'image': ('lesion', )})
     self.assertEqual(reader.get_subject_id(1)[:4], 'Fin_')
     self.assertTrue(isinstance(reader.get_subject(1), dict))
Exemplo n.º 20
0
 def test_existing_csv(self):
     reader_for_csv = ImageReader(['image'])
     reader_for_csv.initialise_reader(SINGLE_MOD_DATA, SINGLE_MOD_TASK)
     reader = ImageReader(['image'])
     reader.initialise_reader(EXISTING_DATA, SINGLE_MOD_TASK)
     self.assertEquals(len(reader.output_list), 4)
     self.assertDictEqual(reader.shapes, {'image': (256, 168, 256, 1, 1)})
     self.assertDictEqual(reader.tf_dtypes, {'image': tf.float32})
     self.assertEqual(reader.names, ['image'])
     self.assertDictEqual(reader.input_sources, {'image': ('lesion', )})
     self.assertEqual(reader.get_subject_id(1)[:4], 'Fin_')
    def initialise_dataset_loader(self,
                                  data_param=None,
                                  task_param=None,
                                  data_partitioner=None):

        self.data_param = data_param
        self.multioutput_param = task_param

        # initialise input image readers
        if self.is_training:
            reader_names = ('image', 'label', 'weight', 'sampler')
        elif self.is_inference:
            # in the inference process use `image` input only
            reader_names = ('image', )
        elif self.is_evaluation:
            reader_names = ('image', 'label', 'inferred')
        else:
            tf.logging.fatal('Action `%s` not supported. Expected one of %s',
                             self.action, self.SUPPORTED_PHASES)
            raise ValueError
        try:
            reader_phase = self.action_param.dataset_to_infer
        except AttributeError:
            reader_phase = None
        file_lists = data_partitioner.get_file_lists_by(phase=reader_phase,
                                                        action=self.action)
        self.readers = [
            ImageReader(reader_names).initialise(data_param, task_param,
                                                 file_list)
            for file_list in file_lists
        ]
Exemplo n.º 22
0
    def test_reader_field(self):
        data_param = {'mr': {'path_to_search': IMAGE_PATH_2D}}
        group_param = {'ct': ('mr', )}
        reader = ImageReader(['ct']).initialise(data_param, group_param)
        self.renamed_property_asserts(reader)
        idx, data, interp = reader()

        # test output
        self.assertTrue('ct' in data)
        self.assertTrue(idx in range(len(reader.output_list)))
        self.assertDictEqual(interp, {'ct': (1, )})
        self.assertEqual(data['ct'].shape, (100, 100, 1, 1, 1))

        with self.assertRaisesRegexp(ValueError, ''):
            # grouping name 'ct' but
            reader = ImageReader(['mr']).initialise(data_param, group_param)
Exemplo n.º 23
0
    def test_3d_concat_properties(self):
        """
        loading two modalities, grouping subject names only
        """
        data_param = {
            'mr': {
                'path_to_search': IMAGE_PATH_3D_1,
                'filename_contains': 'x_y_z_1_1',
                'pixdim': (4, 3, 4),
                'axcodes': 'RAS'
            },
            'ct': {
                'path_to_search': IMAGE_PATH_3D_1,
                'filename_contains': 'x_y_z_1_1',
                'pixdim': (4, 3, 4),
                'axcodes': 'RAS'
            }
        }
        grouping_param = {'image': ('mr', 'ct')}
        reader = ImageReader().initialise(data_param, grouping_param)
        self.assertDictEqual(reader.spatial_ranks, {'image': 3})
        self.assertEqual(reader.output_list[0]['image'].output_pixdim,
                         ((4.0, 3.0, 4.0), ) * 2)
        self.assertEqual(reader.output_list[0]['image'].output_axcodes,
                         (('R', 'A', 'S'), ) * 2)
        idx, data, interp = reader()

        # test output
        self.assertTrue('image' in data)
        self.assertTrue(idx in range(len(reader.output_list)))
        self.assertDictEqual(interp, {'image': (1, 1)})
        # allows rounding error spatially
        self.assertAllClose(data['image'].shape[:3], (12, 8, 10), atol=1)
        self.assertAllClose(data['image'].shape[3:], (1, 2))
Exemplo n.º 24
0
def preprocess(
    input_path,
    model_path,
    output_path,
    cutoff,
):
    input_path = Path(input_path)
    output_path = Path(output_path)
    input_dir = input_path.parent

    DATA_PARAM = {
        'Modality0':
        ParserNamespace(
            path_to_search=str(input_dir),
            filename_contains=('nii.gz', ),
            interp_order=0,
            pixdim=None,
            axcodes='RAS',
            loader=None,
        )
    }

    TASK_PARAM = ParserNamespace(image=('Modality0', ))
    data_partitioner = ImageSetsPartitioner()
    file_list = data_partitioner.initialise(DATA_PARAM).get_file_list()
    reader = ImageReader(['image'])
    reader.initialise(DATA_PARAM, TASK_PARAM, file_list)

    binary_masking_func = BinaryMaskingLayer(type_str='mean_plus', )

    hist_norm = HistogramNormalisationLayer(
        image_name='image',
        modalities=['Modality0'],
        model_filename=str(model_path),
        binary_masking_func=binary_masking_func,
        cutoff=cutoff,
        name='hist_norm_layer',
    )

    image = reader.output_list[0]['image']
    data = image.get_data()
    norm_image_dict, mask_dict = hist_norm({'image': data})
    data = norm_image_dict['image']
    nii = nib.Nifti1Image(data.squeeze(), image.original_affine[0])
    dst = output_path
    nii.to_filename(str(dst))
Exemplo n.º 25
0
 def test_2d_as_5D_multimodal_properties(self):
     data_param = {'mr': {'path_to_search': IMAGE_PATH_2D,
                          'filename_contains': '_u',
                          'pixdim': (2, 2, 2),
                          'axcodes': 'RAS'}}
     grouping_param = {'ct': ('mr', 'mr', 'mr')}
     reader = ImageReader().initialise(data_param, grouping_param)
     self.assertEqual(reader.spatial_ranks, {'ct': 2})
Exemplo n.º 26
0
def get_3d_reader():
    data_param = {
        'mr': {
            'path_to_search': IMAGE_PATH_3D,
            'filename_contains': 'FLAIR',
            'interp_order': 1}}
    reader = ImageReader().initialise(data_param)
    return reader
Exemplo n.º 27
0
    def test_errors(self):
        reader = ImageReader(['image'])
        reader.initialise(BAD_DATA, SINGLE_MOD_TASK, bad_data_list)
        with self.assertRaisesRegexp(ValueError, ''):
            reader = ImageReader(['image'])
            reader.initialise(SINGLE_MOD_DATA, BAD_TASK, single_mod_list)

        reader = ImageReader(['image'])
        reader.initialise(SINGLE_MOD_DATA, SINGLE_MOD_TASK, single_mod_list)
        idx, data, interp_order = reader(idx=100)
        self.assertEqual(idx, -1)
        self.assertEqual(data, None)
        idx, data, interp_order = reader(shuffle=True)
        self.assertEqual(data['image'].shape, (256, 168, 256, 1, 1))
def get_label_reader():
    reader = ImageReader(['label'])
    reader.initialise(MOD_LABEL_DATA, MOD_LABEl_TASK, mod_label_list)
    label_normaliser = DiscreteLabelNormalisationLayer(
        image_name='label',
        modalities=vars(SINGLE_25D_TASK).get('label'),
        model_filename=os.path.join('testing_data', 'agg_test.txt'))
    reader.add_preprocessing_layers(label_normaliser)
    pad_layer = PadLayer(image_name=('label',), border=(5, 6, 7))
    reader.add_preprocessing_layers([pad_layer])
    return reader
Exemplo n.º 29
0
    def initialise_dataset_loader(
            self, data_param=None, task_param=None, data_partitioner=None):
        self.data_param = data_param
        self.segmentation_param = task_param

        # read each line of csv files into an instance of Subject
        if self.is_training:
            file_lists = []
            if self.action_param.validation_every_n > 0:
                file_lists.append(data_partitioner.train_files)
                file_lists.append(data_partitioner.validation_files)
            else:
                file_lists.append(data_partitioner.all_files)
            self.readers = []
            for file_list in file_lists:
                reader = ImageReader(SUPPORTED_INPUT)
                reader.initialise(data_param, task_param, file_list)
                self.readers.append(reader)
        else:  # in the inference process use image input only
            inference_reader = ImageReader(['image'])
            file_list = data_partitioner.inference_files
            inference_reader.initialise(data_param, task_param, file_list)
            self.readers = [inference_reader]

        foreground_masking_layer = None
        if self.net_param.normalise_foreground_only:
            foreground_masking_layer = BinaryMaskingLayer(
                type_str=self.net_param.foreground_type,
                multimod_fusion=self.net_param.multimod_foreground_type,
                threshold=0.0)

        mean_var_normaliser = MeanVarNormalisationLayer(
            image_name='image', binary_masking_func=foreground_masking_layer)

        label_normaliser = DiscreteLabelNormalisationLayer(
            image_name='label',
            modalities=vars(task_param).get('label'),
            model_filename=self.net_param.histogram_ref_file)

        normalisation_layers = []
        normalisation_layers.append(mean_var_normaliser)
        if task_param.label_normalisation:
            normalisation_layers.append(label_normaliser)

        volume_padding_layer = []
        if self.net_param.volume_padding_size:
            volume_padding_layer.append(PadLayer(
                image_name=SUPPORTED_INPUT,
                border=self.net_param.volume_padding_size))
        for reader in self.readers:
            reader.add_preprocessing_layers(
                normalisation_layers + volume_padding_layer)
Exemplo n.º 30
0
 def test_trainable_preprocessing(self):
     label_file = os.path.join('testing_data', 'label_reader.txt')
     if os.path.exists(label_file):
         os.remove(label_file)
     label_normaliser = DiscreteLabelNormalisationLayer(
         image_name='label',
         modalities=vars(LABEL_TASK).get('label'),
         model_filename=os.path.join('testing_data', 'label_reader.txt'))
     reader = ImageReader(['label'])
     with self.assertRaisesRegexp(AssertionError, ''):
         reader.add_preprocessing_layers(label_normaliser)
     reader.initialise_reader(LABEL_DATA, LABEL_TASK)
     reader.add_preprocessing_layers(label_normaliser)
     reader.add_preprocessing_layers(
         [PadLayer(image_name=['label'], border=(10, 5, 5))])
     idx, data, interp_order = reader(idx=0)
     unique_data = np.unique(data['label'])
     expected = np.array(range(156), dtype=np.float32)
     self.assertAllClose(unique_data, expected)
     self.assertAllClose(data['label'].shape, (83, 73, 73, 1, 1))
Exemplo n.º 31
0
    def initialise_dataset_loader(self,
                                  data_param=None,
                                  task_param=None,
                                  data_partitioner=None):
        RegressionApplication.initialise_dataset_loader(
            self, data_param, task_param, data_partitioner)
        if self.is_training:
            return
        if not task_param.error_map:
            return

        file_lists = self.get_file_lists(data_partitioner)
        # modifying the original readers in regression application
        # as we need ground truth labels to generate error maps
        self.readers = []
        for file_list in file_lists:
            reader = ImageReader(['image', 'output'])
            reader.initialise(data_param, task_param, file_list)
            self.readers.append(reader)

        mean_var_normaliser = MeanVarNormalisationLayer(image_name='image')
        histogram_normaliser = None
        if self.net_param.histogram_ref_file:
            histogram_normaliser = HistogramNormalisationLayer(
                image_name='image',
                modalities=vars(task_param).get('image'),
                model_filename=self.net_param.histogram_ref_file,
                norm_type=self.net_param.norm_type,
                cutoff=self.net_param.cutoff,
                name='hist_norm_layer')

        preprocessors = []
        if self.net_param.normalisation:
            preprocessors.append(histogram_normaliser)
        if self.net_param.whitening:
            preprocessors.append(mean_var_normaliser)
        if self.net_param.volume_padding_size:
            preprocessors.append(
                PadLayer(image_name=SUPPORTED_INPUT,
                         border=self.net_param.volume_padding_size))
        self.readers[0].add_preprocessing_layers(preprocessors)
Exemplo n.º 32
0
    def test_simple(self):
        data_param = {'mr': {'path_to_search': IMAGE_PATH_2D}}
        reader = ImageReader().initialise(data_param)
        # test properties
        self.default_property_asserts(reader)
        idx, data, interp = reader()

        # test output
        self.assertTrue('mr' in data)
        self.assertTrue(idx in range(len(reader.output_list)))
        self.assertDictEqual(interp, {'mr': (1, )})
        self.assertEqual(data['mr'].shape, (100, 100, 1, 1, 1))
Exemplo n.º 33
0
    def test_renaming(self):
        data_param = {'mr': {'path_to_search': IMAGE_PATH_2D}}
        group_param = {'ct': ('mr', )}
        reader = ImageReader().initialise(data_param, group_param)
        self.renamed_property_asserts(reader)
        idx, data, interp = reader()

        # test output
        self.assertTrue('ct' in data)
        self.assertTrue(idx in range(len(reader.output_list)))
        self.assertDictEqual(interp, {'ct': (1, )})
        self.assertEqual(data['ct'].shape, (100, 100, 1, 1, 1))
    def initialise_dataset_loader(self,
                                  data_param=None,
                                  task_param=None,
                                  data_partitioner=None):
        self.data_param = data_param
        self.autoencoder_param = task_param

        if not self.is_training:
            self._infer_type = look_up_operations(
                self.autoencoder_param.inference_type, SUPPORTED_INFERENCE)
        else:
            self._infer_type = None

        file_lists = self.get_file_lists(data_partitioner)
        # read each line of csv files into an instance of Subject
        if self.is_evaluation:
            NotImplementedError('Evaluation is not yet '
                                'supported in this application.')
        if self.is_training:
            self.readers = []
            for file_list in file_lists:
                reader = ImageReader(['image'])
                reader.initialise(data_param, task_param, file_list)
                self.readers.append(reader)
        if self._infer_type in ('encode', 'encode-decode'):
            self.readers = [ImageReader(['image'])]
            self.readers[0].initialise(data_param, task_param, file_lists[0])
        elif self._infer_type == 'sample':
            self.readers = []
        elif self._infer_type == 'linear_interpolation':
            self.readers = [ImageReader(['feature'])]
            self.readers[0].initialise(data_param, task_param, [file_lists])
def get_label_reader():
    reader = ImageReader(['label'])
    reader.initialise(MOD_LABEL_DATA, MOD_LABEl_TASK, mod_label_list)
    label_normaliser = DiscreteLabelNormalisationLayer(
        image_name='label',
        modalities=vars(SINGLE_25D_TASK).get('label'),
        model_filename=os.path.join('testing_data', 'agg_test.txt'))
    reader.add_preprocessing_layers(label_normaliser)
    pad_layer = PadLayer(image_name=('label',), border=(5, 6, 7))
    reader.add_preprocessing_layers([pad_layer])
    return reader
Exemplo n.º 36
0
    def initialise_dataset_loader(
            self, data_param=None, task_param=None, data_partitioner=None):
        RegressionApplication.initialise_dataset_loader(
            self, data_param, task_param, data_partitioner)
        if self.is_training:
            return
        if not task_param.error_map:
            return

        file_lists = self.get_file_lists(data_partitioner)
        # modifying the original readers in regression application
        # as we need ground truth labels to generate error maps
        self.readers=[]
        for file_list in file_lists:
            reader = ImageReader(['image', 'output'])
            reader.initialise(data_param, task_param, file_list)
            self.readers.append(reader)

        mean_var_normaliser = MeanVarNormalisationLayer(image_name='image')
        histogram_normaliser = None
        if self.net_param.histogram_ref_file:
            histogram_normaliser = HistogramNormalisationLayer(
                image_name='image',
                modalities=vars(task_param).get('image'),
                model_filename=self.net_param.histogram_ref_file,
                norm_type=self.net_param.norm_type,
                cutoff=self.net_param.cutoff,
                name='hist_norm_layer')

        preprocessors = []
        if self.net_param.normalisation:
            preprocessors.append(histogram_normaliser)
        if self.net_param.whitening:
            preprocessors.append(mean_var_normaliser)
        if self.net_param.volume_padding_size:
            preprocessors.append(PadLayer(
                image_name=SUPPORTED_INPUT,
                border=self.net_param.volume_padding_size))
        self.readers[0].add_preprocessing_layers(preprocessors)
Exemplo n.º 37
0
 def test_existing_csv(self):
     reader_for_csv = ImageReader(['image'])
     reader_for_csv.initialise(
         SINGLE_MOD_DATA, SINGLE_MOD_TASK, single_mod_list)
     reader = ImageReader(['image'])
     reader.initialise(EXISTING_DATA, SINGLE_MOD_TASK, existing_list)
     self.assertEqual(len(reader.output_list), 4)
     self.assertDictEqual(reader.spatial_ranks, {'image': 3})
     self.assertDictEqual(reader.shapes,
                          {'image': (256, 168, 256, 1, 1)})
     self.assertDictEqual(reader.tf_dtypes, {'image': tf.float32})
     self.assertEqual(reader.names, ('image',))
     self.assertDictEqual(reader.input_sources,
                          {'image': ('lesion',)})
     self.assertEqual(reader.get_subject_id(1)[:4], 'Fin_')
     self.assertTrue(isinstance(reader.get_subject(1), dict))
Exemplo n.º 38
0
    def initialise_dataset_loader(
            self, data_param=None, task_param=None, data_partitioner=None):
        self.data_param = data_param
        self.autoencoder_param = task_param

        if not self.is_training:
            self._infer_type = look_up_operations(
                self.autoencoder_param.inference_type, SUPPORTED_INFERENCE)
        else:
            self._infer_type = None

        # read each line of csv files into an instance of Subject
        if self.is_training:
            file_lists = []
            if self.action_param.validation_every_n > 0:
                file_lists.append(data_partitioner.train_files)
                file_lists.append(data_partitioner.validation_files)
            else:
                file_lists.append(data_partitioner.train_files)

            self.readers = []
            for file_list in file_lists:
                reader = ImageReader(['image'])
                reader.initialise(data_param, task_param, file_list)
                self.readers.append(reader)
        if self._infer_type in ('encode', 'encode-decode'):
            self.readers = [ImageReader(['image'])]
            self.readers[0].initialise(data_param,
                                       task_param,
                                       data_partitioner.inference_files)
        elif self._infer_type == 'sample':
            self.readers = []
        elif self._infer_type == 'linear_interpolation':
            self.readers = [ImageReader(['feature'])]
            self.readers[0].initialise(data_param,
                                       task_param,
                                       data_partitioner.inference_files)
Exemplo n.º 39
0
    def test_no_2d_resampling_properties(self):
        data_param = {'mr': {'path_to_search': IMAGE_PATH_2D,
                             'csv_file': '2d_test.csv',
                             'pixdim': (2, 2, 2),
                             'axcodes': 'RAS'}}
        reader = ImageReader().initialise(data_param)
        self.assertEqual(reader.output_list[0]['mr'].output_pixdim, (None,))
        self.assertEqual(reader.output_list[0]['mr'].output_axcodes, (None,))
        idx, data, interp = reader()

        # test output
        self.assertTrue('mr' in data)
        self.assertTrue(idx in range(len(reader.output_list)))
        self.assertDictEqual(interp, {'mr': (1,)})
        self.assertEqual(data['mr'].shape, (100, 100, 1, 1, 1))
Exemplo n.º 40
0
def get_first_array(directory: Union[str, Path]) -> np.ndarray:
    """
    Use NiftyNet's reader to get RAS images
    """
    directory = Path(directory)
    image_dir = list(directory.glob('**/*.nii.gz'))[0].parent
    input_dict = dict(
        path_to_search=str(image_dir),
        filename_contains='nii',
        axcodes=('R', 'A', 'S'),
    )
    data_parameters = {
        'image': input_dict,
    }
    reader = ImageReader().initialise(data_parameters)
    _, image_dict, _ = reader()
    return image_dict['image'].squeeze()
Exemplo n.º 41
0
 def test_preprocessing_zero_padding(self):
     reader = ImageReader(['image'])
     reader.initialise(SINGLE_MOD_DATA, SINGLE_MOD_TASK, single_mod_list)
     idx, data, interp_order = reader()
     self.assertEqual(SINGLE_MOD_DATA['lesion'].interp_order,
                      interp_order['image'][0])
     self.assertAllClose(data['image'].shape, (256, 168, 256, 1, 1))
     reader.add_preprocessing_layers(
         [PadLayer(image_name=['image'], border=(0, 0, 0))])
     idx, data, interp_order = reader(idx=2)
     self.assertEqual(idx, 2)
     self.assertAllClose(data['image'].shape, (256, 168, 256, 1, 1))
Exemplo n.º 42
0
    def initialise_dataset_loader(
            self, data_param=None, task_param=None, data_partitioner=None):
        self.data_param = data_param
        self.registration_param = task_param

        file_lists = self.get_file_lists(data_partitioner)

        if self.is_evaluation:
            NotImplementedError('Evaluation is not yet '
                                'supported in this application.')

        self.readers = []
        for file_list in file_lists:
            fixed_reader = ImageReader({'fixed_image', 'fixed_label'})
            fixed_reader.initialise(data_param, task_param, file_list)
            self.readers.append(fixed_reader)

            moving_reader = ImageReader({'moving_image', 'moving_label'})
            moving_reader.initialise(data_param, task_param, file_list)
            self.readers.append(moving_reader)
Exemplo n.º 43
0
def get_dynamic_window_reader():
    reader = ImageReader(['image', 'sampler'])
    reader.initialise(DYNAMIC_MOD_DATA, DYNAMIC_MOD_TASK, dynamic_list)
    return reader
def get_25d_reader():
    reader = ImageReader(['image'])
    reader.initialise(SINGLE_25D_DATA, SINGLE_25D_TASK, single_25d_list)
    return reader
Exemplo n.º 45
0
def get_3d_reader():
    reader = ImageReader(['image', 'sampler'])
    reader.initialise(MULTI_MOD_DATA, MULTI_MOD_TASK, multi_mod_list)
    return reader
Exemplo n.º 46
0
def get_2d_reader():
    reader = ImageReader(['image', 'sampler'])
    reader.initialise(MOD_2D_DATA, MOD_2D_TASK, mod_2d_list)
    return reader
Exemplo n.º 47
0
def get_dynamic_window_reader():
    dynamic_list = data_partitioner.initialise(DYNAMIC_MOD_DATA).get_file_list()
    reader = ImageReader(['image', 'label'])
    reader.initialise(DYNAMIC_MOD_DATA, DYNAMIC_MOD_TASK, dynamic_list)
    return reader
Exemplo n.º 48
0
    def initialise_dataset_loader(
            self, data_param=None, task_param=None, data_partitioner=None):

        self.data_param = data_param
        self.segmentation_param = task_param

        file_lists = self.get_file_lists(data_partitioner)
        # read each line of csv files into an instance of Subject
        if self.is_training:
            self.readers = []
            for file_list in file_lists:
                reader = ImageReader({'image', 'label', 'weight', 'sampler'})
                reader.initialise(data_param, task_param, file_list)
                self.readers.append(reader)

        elif self.is_inference:
            # in the inference process use image input only
            inference_reader = ImageReader({'image'})
            file_list = data_partitioner.inference_files
            inference_reader.initialise(data_param, task_param, file_list)
            self.readers = [inference_reader]
        elif self.is_evaluation:
            file_list = data_partitioner.inference_files
            reader = ImageReader({'image', 'label', 'inferred'})
            reader.initialise(data_param, task_param, file_list)
            self.readers = [reader]
        else:
            raise ValueError('Action `{}` not supported. Expected one of {}'
                             .format(self.action, self.SUPPORTED_ACTIONS))

        foreground_masking_layer = None
        if self.net_param.normalise_foreground_only:
            foreground_masking_layer = BinaryMaskingLayer(
                type_str=self.net_param.foreground_type,
                multimod_fusion=self.net_param.multimod_foreground_type,
                threshold=0.0)

        mean_var_normaliser = MeanVarNormalisationLayer(
            image_name='image', binary_masking_func=foreground_masking_layer)
        histogram_normaliser = None
        if self.net_param.histogram_ref_file:
            histogram_normaliser = HistogramNormalisationLayer(
                image_name='image',
                modalities=vars(task_param).get('image'),
                model_filename=self.net_param.histogram_ref_file,
                binary_masking_func=foreground_masking_layer,
                norm_type=self.net_param.norm_type,
                cutoff=self.net_param.cutoff,
                name='hist_norm_layer')

        label_normalisers = None
        if self.net_param.histogram_ref_file and \
                task_param.label_normalisation:
            label_normalisers = [DiscreteLabelNormalisationLayer(
                image_name='label',
                modalities=vars(task_param).get('label'),
                model_filename=self.net_param.histogram_ref_file)]
            if self.is_evaluation:
                label_normalisers.append(
                    DiscreteLabelNormalisationLayer(
                        image_name='inferred',
                        modalities=vars(task_param).get('inferred'),
                        model_filename=self.net_param.histogram_ref_file))
                label_normalisers[-1].key = label_normalisers[0].key

        normalisation_layers = []
        if self.net_param.normalisation:
            normalisation_layers.append(histogram_normaliser)
        if self.net_param.whitening:
            normalisation_layers.append(mean_var_normaliser)
        if task_param.label_normalisation and \
                (self.is_training or not task_param.output_prob):
            normalisation_layers.extend(label_normalisers)

        augmentation_layers = []
        if self.is_training:
            if self.action_param.random_flipping_axes != -1:
                augmentation_layers.append(RandomFlipLayer(
                    flip_axes=self.action_param.random_flipping_axes))
            if self.action_param.scaling_percentage:
                augmentation_layers.append(RandomSpatialScalingLayer(
                    min_percentage=self.action_param.scaling_percentage[0],
                    max_percentage=self.action_param.scaling_percentage[1]))
            if self.action_param.rotation_angle or \
                    self.action_param.rotation_angle_x or \
                    self.action_param.rotation_angle_y or \
                    self.action_param.rotation_angle_z:
                rotation_layer = RandomRotationLayer()
                if self.action_param.rotation_angle:
                    rotation_layer.init_uniform_angle(
                        self.action_param.rotation_angle)
                else:
                    rotation_layer.init_non_uniform_angle(
                        self.action_param.rotation_angle_x,
                        self.action_param.rotation_angle_y,
                        self.action_param.rotation_angle_z)
                augmentation_layers.append(rotation_layer)

            # add deformation layer
            if self.action_param.do_elastic_deformation:
                spatial_rank = list(self.readers[0].spatial_ranks.values())[0]
                augmentation_layers.append(RandomElasticDeformationLayer(
                    spatial_rank=spatial_rank,
                    num_controlpoints=self.action_param.num_ctrl_points,
                    std_deformation_sigma=self.action_param.deformation_sigma,
                    proportion_to_augment=self.action_param.proportion_to_deform))

        volume_padding_layer = []
        if self.net_param.volume_padding_size:
            volume_padding_layer.append(PadLayer(
                image_name=SUPPORTED_INPUT,
                border=self.net_param.volume_padding_size))

        # only add augmentation to first reader (not validation reader)
        self.readers[0].add_preprocessing_layers(
            volume_padding_layer +
            normalisation_layers +
            augmentation_layers)

        for reader in self.readers[1:]:
            reader.add_preprocessing_layers(
                volume_padding_layer +
                normalisation_layers)
Exemplo n.º 49
0
def get_3d_reader():
    multi_mod_list = data_partitioner.initialise(MULTI_MOD_DATA).get_file_list()
    print(MULTI_MOD_DATA, MULTI_MOD_TASK)
    reader = ImageReader(['image', 'label'])
    reader.initialise(MULTI_MOD_DATA, MULTI_MOD_TASK, multi_mod_list)
    return reader
Exemplo n.º 50
0
def get_2d_reader():
    mod_2d_list = data_partitioner.initialise(MOD_2D_DATA).get_file_list()
    reader = ImageReader(['image'])
    reader.initialise(MOD_2D_DATA, MOD_2D_TASK, mod_2d_list)
    return reader
Exemplo n.º 51
0
    def test_images2d(self):
        reader = ImageReader(['image'])

        # COLOR IMAGES
        reader.initialise(IMAGE_2D_DATA, IMAGE_2D_TASK_COLOR,
                          image2d_data_list)

        idx, data, interp_order = reader()
        image = data['image']
        # Check index
        self.assertGreaterEqual(idx, 0)
        self.assertLess(idx, 10)
        # Check data type
        self.assertGreaterEqual(image.min(), 0)
        self.assertLessEqual(image.max(), 255)
        self.assertEqual(image.dtype, np.float32)
        # Check shape
        self.assertEqual(image.ndim, 5)
        self.assertAllEqual(image.shape, (100, 100, 1, 1, 3))
        self.assertEqual(interp_order['image'], (1,))

        # GRAY IMAGES
        reader.initialise(IMAGE_2D_DATA, IMAGE_2D_TASK_GRAY,
                          image2d_data_list)

        idx, data, interp_order = reader()
        image = data['image']

        # Check index
        self.assertGreaterEqual(idx, 0)
        self.assertLess(idx, 10)
        # Check data type
        self.assertGreaterEqual(image.min(), 0)
        self.assertLessEqual(image.max(), 255)
        self.assertEqual(image.dtype, np.float32)
        # Check shape
        self.assertEqual(image.ndim, 5)
        self.assertAllEqual(image.shape, (100, 100, 1, 1, 1))
        self.assertEqual(interp_order['image'], (1,))

        gray_idx, gray_data, gray_order = reader(idx=5)

        # SEGMENTATION MASKS
        reader.initialise(IMAGE_2D_DATA, IMAGE_2D_TASK_MASK,
                          image2d_data_list)

        idx, data, interp_order = reader()
        image = data['image']

        # Check index
        self.assertGreaterEqual(idx, 0)
        self.assertLess(idx, 10)
        # Check data type
        self.assertGreaterEqual(image.min(), 0)
        self.assertLessEqual(image.max(), 255)
        self.assertEqual(image.dtype, np.float32)
        self.assertEqual(np.unique(image).size, 2)
        # Check shape
        self.assertEqual(image.ndim, 5)
        self.assertAllEqual(image.shape, (100, 100, 1, 1, 1))
        self.assertEqual(interp_order['image'], (0,))

        # Compare segmentation masks to thresholding original image
        mask_idx, mask_data, mask_order = reader(idx=5)

        gray_data = gray_data['image']
        mask_data = mask_data['image']

        self.assertEqual(gray_idx, mask_idx)
        self.assertEqual(gray_order['image'], (1,))
        self.assertEqual(mask_order['image'], (0,))
        self.assertAllEqual((gray_data > SEG_THRESHOLD) * 255, mask_data)
Exemplo n.º 52
0
    def initialise_dataset_loader(
            self, data_param=None, task_param=None, data_partitioner=None):

        self.data_param = data_param
        self.classification_param = task_param

        file_lists = self.get_file_lists(data_partitioner)
        # read each line of csv files into an instance of Subject
        if self.is_training:
            self.readers = []
            for file_list in file_lists:
                reader = ImageReader(['image', 'label', 'sampler'])
                reader.initialise(data_param, task_param, file_list)
                self.readers.append(reader)

        elif self.is_inference:  
            # in the inference process use image input only
            inference_reader = ImageReader(['image'])
            inference_reader.initialise(data_param, task_param, file_lists[0])
            self.readers = [inference_reader]
        elif self.is_evaluation:
            reader = ImageReader({'image', 'label', 'inferred'})
            reader.initialise(data_param, task_param, file_lists[0])
            self.readers = [reader]
        else:
            raise ValueError('Action `{}` not supported. Expected one of {}'
                             .format(self.action, self.SUPPORTED_ACTIONS))

        foreground_masking_layer = None
        if self.net_param.normalise_foreground_only:
            foreground_masking_layer = BinaryMaskingLayer(
                type_str=self.net_param.foreground_type,
                multimod_fusion=self.net_param.multimod_foreground_type,
                threshold=0.0)

        mean_var_normaliser = MeanVarNormalisationLayer(
            image_name='image', binary_masking_func=foreground_masking_layer)
        histogram_normaliser = None
        if self.net_param.histogram_ref_file:
            histogram_normaliser = HistogramNormalisationLayer(
                image_name='image',
                modalities=vars(task_param).get('image'),
                model_filename=self.net_param.histogram_ref_file,
                binary_masking_func=foreground_masking_layer,
                norm_type=self.net_param.norm_type,
                cutoff=self.net_param.cutoff,
                name='hist_norm_layer')

        label_normaliser = None
        if self.net_param.histogram_ref_file:
            label_normaliser = DiscreteLabelNormalisationLayer(
                image_name='label',
                modalities=vars(task_param).get('label'),
                model_filename=self.net_param.histogram_ref_file)

        normalisation_layers = []
        if self.net_param.normalisation:
            normalisation_layers.append(histogram_normaliser)
        if self.net_param.whitening:
            normalisation_layers.append(mean_var_normaliser)
        if task_param.label_normalisation:
            normalisation_layers.append(label_normaliser)

        augmentation_layers = []
        if self.is_training:
            if self.action_param.random_flipping_axes != -1:
                augmentation_layers.append(RandomFlipLayer(
                    flip_axes=self.action_param.random_flipping_axes))
            if self.action_param.scaling_percentage:
                augmentation_layers.append(RandomSpatialScalingLayer(
                    min_percentage=self.action_param.scaling_percentage[0],
                    max_percentage=self.action_param.scaling_percentage[1]))
            if self.action_param.rotation_angle or \
                    self.action_param.rotation_angle_x or \
                    self.action_param.rotation_angle_y or \
                    self.action_param.rotation_angle_z:
                rotation_layer = RandomRotationLayer()
                if self.action_param.rotation_angle:
                    rotation_layer.init_uniform_angle(
                        self.action_param.rotation_angle)
                else:
                    rotation_layer.init_non_uniform_angle(
                        self.action_param.rotation_angle_x,
                        self.action_param.rotation_angle_y,
                        self.action_param.rotation_angle_z)
                augmentation_layers.append(rotation_layer)

        for reader in self.readers:
            reader.add_preprocessing_layers(
                normalisation_layers +
                augmentation_layers)
Exemplo n.º 53
0
def get_concentric_window_reader():
    reader = ImageReader(['image', 'label'])
    reader.initialise(MULTI_WINDOW_DATA, MULTI_WINDOW_TASK, multi_mod_list)
    return reader
Exemplo n.º 54
0
    def initialise_dataset_loader(
            self, data_param=None, task_param=None, data_partitioner=None):

        self.data_param = data_param
        self.segmentation_param = task_param

        # read each line of csv files into an instance of Subject
        if self.is_training:
            file_lists = []
            if self.action_param.validation_every_n > 0:
                file_lists.append(data_partitioner.train_files)
                file_lists.append(data_partitioner.validation_files)
            else:
                file_lists.append(data_partitioner.train_files)

            self.readers = []
            for file_list in file_lists:
                reader = ImageReader(SUPPORTED_INPUT)
                reader.initialise(data_param, task_param, file_list)
                self.readers.append(reader)

        else:  # in the inference process use image input only
            inference_reader = ImageReader(['image'])
            file_list = data_partitioner.inference_files
            inference_reader.initialise(data_param, task_param, file_list)
            self.readers = [inference_reader]

        foreground_masking_layer = None
        if self.net_param.normalise_foreground_only:
            foreground_masking_layer = BinaryMaskingLayer(
                type_str=self.net_param.foreground_type,
                multimod_fusion=self.net_param.multimod_foreground_type,
                threshold=0.0)

        mean_var_normaliser = MeanVarNormalisationLayer(
            image_name='image', binary_masking_func=foreground_masking_layer)
        histogram_normaliser = None
        if self.net_param.histogram_ref_file:
            histogram_normaliser = HistogramNormalisationLayer(
                image_name='image',
                modalities=vars(task_param).get('image'),
                model_filename=self.net_param.histogram_ref_file,
                binary_masking_func=foreground_masking_layer,
                norm_type=self.net_param.norm_type,
                cutoff=self.net_param.cutoff,
                name='hist_norm_layer')

        label_normaliser = None
        if self.net_param.histogram_ref_file:
            label_normaliser = DiscreteLabelNormalisationLayer(
                image_name='label',
                modalities=vars(task_param).get('label'),
                model_filename=self.net_param.histogram_ref_file)

        normalisation_layers = []
        if self.net_param.normalisation:
            normalisation_layers.append(histogram_normaliser)
        if self.net_param.whitening:
            normalisation_layers.append(mean_var_normaliser)
        if task_param.label_normalisation:
            normalisation_layers.append(label_normaliser)

        augmentation_layers = []
        if self.is_training:
            if self.action_param.random_flipping_axes != -1:
                augmentation_layers.append(RandomFlipLayer(
                    flip_axes=self.action_param.random_flipping_axes))
            if self.action_param.scaling_percentage:
                augmentation_layers.append(RandomSpatialScalingLayer(
                    min_percentage=self.action_param.scaling_percentage[0],
                    max_percentage=self.action_param.scaling_percentage[1]))
            if self.action_param.rotation_angle or \
                    self.action_param.rotation_angle_x or \
                    self.action_param.rotation_angle_y or \
                    self.action_param.rotation_angle_z:
                rotation_layer = RandomRotationLayer()
                if self.action_param.rotation_angle:
                    rotation_layer.init_uniform_angle(
                        self.action_param.rotation_angle)
                else:
                    rotation_layer.init_non_uniform_angle(
                        self.action_param.rotation_angle_x,
                        self.action_param.rotation_angle_y,
                        self.action_param.rotation_angle_z)
                augmentation_layers.append(rotation_layer)

        volume_padding_layer = []
        if self.net_param.volume_padding_size:
            volume_padding_layer.append(PadLayer(
                image_name=SUPPORTED_INPUT,
                border=self.net_param.volume_padding_size))

        for reader in self.readers:
            reader.add_preprocessing_layers(
                volume_padding_layer +
                normalisation_layers +
                augmentation_layers)
Exemplo n.º 55
0
    def initialise_dataset_loader(
            self, data_param=None, task_param=None, data_partitioner=None):
        self.data_param = data_param
        self.regression_param = task_param

        # read each line of csv files into an instance of Subject
        if self.is_training:
            file_lists = []
            if self.action_param.validation_every_n > 0:
                file_lists.append(data_partitioner.train_files)
                file_lists.append(data_partitioner.validation_files)
            else:
                file_lists.append(data_partitioner.train_files)

            self.readers = []
            for file_list in file_lists:
                reader = ImageReader(SUPPORTED_INPUT)
                reader.initialise(data_param, task_param, file_list)
                self.readers.append(reader)
        else:
            inference_reader = ImageReader(['image'])
            file_list = data_partitioner.inference_files
            inference_reader.initialise(data_param, task_param, file_list)
            self.readers = [inference_reader]

        mean_var_normaliser = MeanVarNormalisationLayer(
            image_name='image')
        histogram_normaliser = None
        if self.net_param.histogram_ref_file:
            histogram_normaliser = HistogramNormalisationLayer(
                image_name='image',
                modalities=vars(task_param).get('image'),
                model_filename=self.net_param.histogram_ref_file,
                norm_type=self.net_param.norm_type,
                cutoff=self.net_param.cutoff,
                name='hist_norm_layer')

        normalisation_layers = []
        if self.net_param.normalisation:
            normalisation_layers.append(histogram_normaliser)
        if self.net_param.whitening:
            normalisation_layers.append(mean_var_normaliser)

        augmentation_layers = []
        if self.is_training:
            if self.action_param.random_flipping_axes != -1:
                augmentation_layers.append(RandomFlipLayer(
                    flip_axes=self.action_param.random_flipping_axes))
            if self.action_param.scaling_percentage:
                augmentation_layers.append(RandomSpatialScalingLayer(
                    min_percentage=self.action_param.scaling_percentage[0],
                    max_percentage=self.action_param.scaling_percentage[1]))
            if self.action_param.rotation_angle:
                augmentation_layers.append(RandomRotationLayer())
                augmentation_layers[-1].init_uniform_angle(
                    self.action_param.rotation_angle)

        volume_padding_layer = []
        if self.net_param.volume_padding_size:
            volume_padding_layer.append(PadLayer(
                image_name=SUPPORTED_INPUT,
                border=self.net_param.volume_padding_size))
        for reader in self.readers:
            reader.add_preprocessing_layers(volume_padding_layer +
                                            normalisation_layers +
                                            augmentation_layers)
Exemplo n.º 56
0
    def initialise_dataset_loader(
            self, data_param=None, task_param=None, data_partitioner=None):
        self.data_param = data_param
        self.gan_param = task_param

        # read each line of csv files into an instance of Subject
        if self.is_training:
            file_lists = []
            if self.action_param.validation_every_n > 0:
                file_lists.append(data_partitioner.train_files)
                file_lists.append(data_partitioner.validation_files)
            else:
                file_lists.append(data_partitioner.train_files)
            self.readers = []
            for file_list in file_lists:
                reader = ImageReader(['image', 'conditioning'])
                reader.initialise(data_param, task_param, file_list)
                self.readers.append(reader)
        else:
            inference_reader = ImageReader(['conditioning'])
            file_list = data_partitioner.inference_files
            inference_reader.initialise(data_param, task_param, file_list)
            self.readers = [inference_reader]

        foreground_masking_layer = None
        if self.net_param.normalise_foreground_only:
            foreground_masking_layer = BinaryMaskingLayer(
                type_str=self.net_param.foreground_type,
                multimod_fusion=self.net_param.multimod_foreground_type,
                threshold=0.0)

        mean_var_normaliser = MeanVarNormalisationLayer(
            image_name='image',
            binary_masking_func=foreground_masking_layer)
        histogram_normaliser = None
        if self.net_param.histogram_ref_file:
            histogram_normaliser = HistogramNormalisationLayer(
                image_name='image',
                modalities=vars(task_param).get('image'),
                model_filename=self.net_param.histogram_ref_file,
                binary_masking_func=foreground_masking_layer,
                norm_type=self.net_param.norm_type,
                cutoff=self.net_param.cutoff,
                name='hist_norm_layer')

        normalisation_layers = []
        if self.net_param.normalisation:
            normalisation_layers.append(histogram_normaliser)
        if self.net_param.whitening:
            normalisation_layers.append(mean_var_normaliser)

        augmentation_layers = []
        if self.is_training:
            if self.action_param.random_flipping_axes != -1:
                augmentation_layers.append(RandomFlipLayer(
                    flip_axes=self.action_param.random_flipping_axes))
            if self.action_param.scaling_percentage:
                augmentation_layers.append(RandomSpatialScalingLayer(
                    min_percentage=self.action_param.scaling_percentage[0],
                    max_percentage=self.action_param.scaling_percentage[1]))
            if self.action_param.rotation_angle:
                augmentation_layers.append(RandomRotationLayer())
                augmentation_layers[-1].init_uniform_angle(
                    self.action_param.rotation_angle)

        for reader in self.readers:
            reader.add_preprocessing_layers(
                normalisation_layers + augmentation_layers)
    def test_volume_loader(self):
        expected_T1 = np.array(
            [0.0, 8.24277910972, 21.4917343731,
             27.0551695202, 32.6186046672, 43.5081573038,
             53.3535675285, 61.9058849776, 70.0929786194,
             73.9944243858, 77.7437509974, 88.5331971492,
             100.0])
        expected_FLAIR = np.array(
            [0.0, 5.36540863446, 15.5386130103,
             20.7431912042, 26.1536608309, 36.669150376,
             44.7821246138, 50.7930589961, 56.1703089214,
             59.2393548654, 63.1565641037, 78.7271261392,
             100.0])

        reader = ImageReader(['image'])
        reader.initialise(DATA_PARAM, TASK_PARAM, file_list)
        self.assertAllClose(len(reader._file_list), 4)

        foreground_masking_layer = BinaryMaskingLayer(
            type_str='otsu_plus',
            multimod_fusion='or')
        hist_norm = HistogramNormalisationLayer(
            image_name='image',
            modalities=vars(TASK_PARAM).get('image'),
            model_filename=MODEL_FILE,
            binary_masking_func=foreground_masking_layer,
            cutoff=(0.05, 0.95),
            name='hist_norm_layer')
        if os.path.exists(MODEL_FILE):
            os.remove(MODEL_FILE)
        hist_norm.train(reader.output_list)
        out_map = hist_norm.mapping

        self.assertAllClose(out_map['T1'], expected_T1)
        self.assertAllClose(out_map['FLAIR'], expected_FLAIR)

        # normalise a uniformly sampled random image
        test_shape = (20, 20, 20, 3, 2)
        rand_image = np.random.uniform(low=-10.0, high=10.0, size=test_shape)
        norm_image = np.copy(rand_image)
        norm_image_dict, mask_dict = hist_norm({'image': norm_image})
        norm_image, mask = hist_norm(norm_image, mask_dict)
        self.assertAllClose(norm_image_dict['image'], norm_image)
        self.assertAllClose(mask_dict['image'], mask)

        # apply mean std normalisation
        mv_norm = MeanVarNormalisationLayer(
            image_name='image',
            binary_masking_func=foreground_masking_layer)
        norm_image, _ = mv_norm(norm_image, mask)
        self.assertAllClose(norm_image.shape, mask.shape)

        mv_norm = MeanVarNormalisationLayer(
            image_name='image',
            binary_masking_func=None)
        norm_image, _ = mv_norm(norm_image)

        # mapping should keep at least the order of the images
        rand_image = rand_image[:, :, :, 1, 1].flatten()
        norm_image = norm_image[:, :, :, 1, 1].flatten()

        order_before = rand_image[1:] > rand_image[:-1]
        order_after = norm_image[1:] > norm_image[:-1]
        self.assertAllClose(np.mean(norm_image), 0.0)
        self.assertAllClose(np.std(norm_image), 1.0)
        self.assertAllClose(order_before, order_after)
        if os.path.exists(MODEL_FILE):
            os.remove(MODEL_FILE)
Exemplo n.º 58
0
    def test_initialisation(self):
        with self.assertRaisesRegexp(ValueError, ''):
            reader = ImageReader(['test'])
            reader.initialise(MULTI_MOD_DATA, MULTI_MOD_TASK, multi_mod_list)
        with self.assertRaisesRegexp(AssertionError, ''):
            reader = ImageReader(None)
            # reader.initialise(MULTI_MOD_DATA, MULTI_MOD_TASK, multi_mod_list)

        reader = ImageReader(['image'])
        reader.initialise(MULTI_MOD_DATA, MULTI_MOD_TASK, multi_mod_list)
        self.assertEqual(len(reader.output_list), 4)

        reader = ImageReader(['image'])
        reader.initialise(SINGLE_MOD_DATA, SINGLE_MOD_TASK, single_mod_list)
        self.assertEqual(len(reader.output_list), 4)

        reader = ImageReader(['image'])
        with self.assertRaisesRegexp(ValueError, ''):
            reader.initialise(SINGLE_MOD_DATA, SINGLE_MOD_TASK, [])
Exemplo n.º 59
0
    def initialise_dataset_loader(
            self, data_param=None, task_param=None, data_partitioner=None):
        self.data_param = data_param
        self.regression_param = task_param

        file_lists = self.get_file_lists(data_partitioner)
        # read each line of csv files into an instance of Subject
        if self.is_training:
            self.readers = []
            for file_list in file_lists:
                reader = ImageReader({'image', 'output', 'weight', 'sampler'})
                reader.initialise(data_param, task_param, file_list)
                self.readers.append(reader)
        elif self.is_inference:
            inference_reader = ImageReader(['image'])
            file_list = data_partitioner.inference_files
            inference_reader.initialise(data_param, task_param, file_lists[0])
            self.readers = [inference_reader]
        elif self.is_evaluation:
            file_list = data_partitioner.inference_files
            reader = ImageReader({'image', 'output', 'inferred'})
            reader.initialise(data_param, task_param, file_lists[0])
            self.readers = [reader]
        else:
            raise ValueError('Action `{}` not supported. Expected one of {}'
                             .format(self.action, self.SUPPORTED_ACTIONS))

        mean_var_normaliser = MeanVarNormalisationLayer(
            image_name='image')
        histogram_normaliser = None
        if self.net_param.histogram_ref_file:
            histogram_normaliser = HistogramNormalisationLayer(
                image_name='image',
                modalities=vars(task_param).get('image'),
                model_filename=self.net_param.histogram_ref_file,
                norm_type=self.net_param.norm_type,
                cutoff=self.net_param.cutoff,
                name='hist_norm_layer')

        normalisation_layers = []
        if self.net_param.normalisation:
            normalisation_layers.append(histogram_normaliser)
        if self.net_param.whitening:
            normalisation_layers.append(mean_var_normaliser)

        augmentation_layers = []
        if self.is_training:
            if self.action_param.random_flipping_axes != -1:
                augmentation_layers.append(RandomFlipLayer(
                    flip_axes=self.action_param.random_flipping_axes))
            if self.action_param.scaling_percentage:
                augmentation_layers.append(RandomSpatialScalingLayer(
                    min_percentage=self.action_param.scaling_percentage[0],
                    max_percentage=self.action_param.scaling_percentage[1]))
            if self.action_param.rotation_angle:
                augmentation_layers.append(RandomRotationLayer())
                augmentation_layers[-1].init_uniform_angle(
                    self.action_param.rotation_angle)

        volume_padding_layer = []
        if self.net_param.volume_padding_size:
            volume_padding_layer.append(PadLayer(
                image_name=SUPPORTED_INPUT,
                border=self.net_param.volume_padding_size))
        for reader in self.readers:
            reader.add_preprocessing_layers(volume_padding_layer +
                                            normalisation_layers +
                                            augmentation_layers)