def get_reader(data_param, image_sets_partitioner, phase): # Using Nifty Reader if phase == 'training': image_reader = ImageReader().initialise( data_param, file_list=image_sets_partitioner.get_file_list(TRAIN)) elif phase == 'validation': image_reader = ImageReader().initialise( data_param, file_list=image_sets_partitioner.get_file_list(VALID)) elif phase == 'inference': image_reader = ImageReader().initialise( data_param, file_list=image_sets_partitioner.get_file_list(INFER)) else: raise Exception('Invalid phase choice: {}'.format( {'phase': ['train', 'validation', 'inference']})) # Adding preprocessing layers mean_variance_norm_layer = MeanVarNormalisationLayer(image_name='image') pad_layer = PadLayer(image_name=('image', 'label'), border=(8, 8, 8)) image_reader.add_preprocessing_layers([mean_variance_norm_layer]) if phase == 'inference': image_reader.add_preprocessing_layers([pad_layer]) return image_reader
def initialise_dataset_loader( self, data_param=None, task_param=None, data_partitioner=None): self.data_param = data_param self.segmentation_param = task_param # read each line of csv files into an instance of Subject if self.is_training: file_lists = [] if self.action_param.validation_every_n > 0: file_lists.append(data_partitioner.train_files) file_lists.append(data_partitioner.validation_files) else: file_lists.append(data_partitioner.train_files) self.readers = [] for file_list in file_lists: reader = ImageReader(SUPPORTED_INPUT) reader.initialise(data_param, task_param, file_list) self.readers.append(reader) else: # in the inference process use image input only inference_reader = ImageReader(['image']) file_list = data_partitioner.inference_files inference_reader.initialise(data_param, task_param, file_list) self.readers = [inference_reader] foreground_masking_layer = None if self.net_param.normalise_foreground_only: foreground_masking_layer = BinaryMaskingLayer( type_str=self.net_param.foreground_type, multimod_fusion=self.net_param.multimod_foreground_type, threshold=0.0) normalisation_layers = [] if self.net_param.whitening: mean_var_normaliser = MeanVarNormalisationLayer( image_name='image', binary_masking_func=foreground_masking_layer) normalisation_layers.append(mean_var_normaliser) if task_param.label_normalisation and \ (self.is_training or not task_param.output_prob): label_normaliser = DiscreteLabelNormalisationLayer( image_name='label', modalities=vars(task_param).get('label'), model_filename=self.net_param.histogram_ref_file) normalisation_layers.append(label_normaliser) volume_padding_layer = [] if self.net_param.volume_padding_size: volume_padding_layer.append(PadLayer( image_name=SUPPORTED_INPUT, border=self.net_param.volume_padding_size)) for reader in self.readers: reader.add_preprocessing_layers( normalisation_layers + volume_padding_layer)
def test_pad_to_without_data_dict(self): rand_image = np.random.random([10, 10, 2, 1]) data_dict = {'image': rand_image} # test without dictionary tst = PadLayer(('image', ), (0, ), pad_to=(5, 5, 10)) padded = tst.layer_op(data_dict['image'])[0] self.assertTrue(padded.shape == (10, 10, 10, 1)) depadded = tst.inverse_op(padded)[0] self.assertTrue(np.all(depadded == rand_image))
def test_pad_to_odd_numbers(self): rand_image = np.random.random([10, 10, 2, 1]) data_dict = {'image': rand_image} tst = PadLayer(('image', ), (0, ), pad_to=(15, 17, 10)) # test straightforward pad_to padded = tst.layer_op(data_dict) self.assertTrue(padded[0]['image'].shape == (15, 17, 10, 1)) depadded = tst.inverse_op(padded[0]) self.assertTrue(np.all(depadded[0]['image'] == rand_image))
def test_pad_to_simple(self): rand_image = np.random.random([10, 10, 2, 1]) data_dict = {'image': rand_image} tst = PadLayer(('image', ), (0, ), pad_to=(52, 52, 2)) padded = tst.layer_op(data_dict) self.assertTrue(padded[0]['image'].shape == (52, 52, 2, 1)) depadded = tst.inverse_op(padded[0]) self.assertTrue(np.all(depadded[0]['image'] == rand_image))
def initialise_dataset_loader(self, data_param=None, task_param=None): self.data_param = data_param self.regression_param = task_param # read each line of csv files into an instance of Subject if self.is_training: self.reader = ImageReader(SUPPORTED_INPUT) else: # in the inference process use image input only self.reader = ImageReader(['image']) self.reader.initialise_reader(data_param, task_param) mean_var_normaliser = MeanVarNormalisationLayer(image_name='image') if self.net_param.histogram_ref_file: histogram_normaliser = HistogramNormalisationLayer( image_name='image', modalities=vars(task_param).get('image'), model_filename=self.net_param.histogram_ref_file, norm_type=self.net_param.norm_type, cutoff=self.net_param.cutoff, name='hist_norm_layer') else: histogram_normaliser = None normalisation_layers = [] if self.net_param.normalisation: normalisation_layers.append(histogram_normaliser) if self.net_param.whitening: normalisation_layers.append(mean_var_normaliser) augmentation_layers = [] if self.is_training: if self.action_param.random_flipping_axes != -1: augmentation_layers.append( RandomFlipLayer( flip_axes=self.action_param.random_flipping_axes)) if self.action_param.scaling_percentage: augmentation_layers.append( RandomSpatialScalingLayer( min_percentage=self.action_param.scaling_percentage[0], max_percentage=self.action_param.scaling_percentage[1]) ) if self.action_param.rotation_angle: augmentation_layers.append(RandomRotationLayer()) augmentation_layers[-1].init_uniform_angle( self.action_param.rotation_angle) volume_padding_layer = [] if self.net_param.volume_padding_size: volume_padding_layer.append( PadLayer(image_name=SUPPORTED_INPUT, border=self.net_param.volume_padding_size)) self.reader.add_preprocessing_layers(volume_padding_layer + normalisation_layers + augmentation_layers)
def get_label_reader(): reader = ImageReader(['label']) reader.initialise(MOD_LABEL_DATA, MOD_LABEl_TASK, mod_label_list) label_normaliser = DiscreteLabelNormalisationLayer( image_name='label', modalities=vars(SINGLE_25D_TASK).get('label'), model_filename=os.path.join('testing_data', 'agg_test.txt')) reader.add_preprocessing_layers(label_normaliser) pad_layer = PadLayer(image_name=('label',), border=(5, 6, 7)) reader.add_preprocessing_layers([pad_layer]) return reader
def test_trainable_preprocessing(self): label_file = os.path.join('testing_data', 'label_reader.txt') if os.path.exists(label_file): os.remove(label_file) label_normaliser = DiscreteLabelNormalisationLayer( image_name='label', modalities=vars(LABEL_TASK).get('label'), model_filename=os.path.join('testing_data', 'label_reader.txt')) reader = ImageReader(['label']) with self.assertRaisesRegexp(AssertionError, ''): reader.add_preprocessing_layers(label_normaliser) reader.initialise(LABEL_DATA, LABEL_TASK, label_list) reader.add_preprocessing_layers(label_normaliser) reader.add_preprocessing_layers( [PadLayer(image_name=['label'], border=(10, 5, 5))]) idx, data, interp_order = reader(idx=0) unique_data = np.unique(data['label']) expected_v1 = np.array([ 0., 1., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11., 12., 13., 14., 15., 16., 17., 18., 19., 20., 21., 22., 23., 24., 25., 26., 27., 28., 29., 30., 31., 32., 33., 34., 35., 36., 37., 38., 39., 40., 41., 42., 43., 44., 45., 46., 47., 48., 49., 50., 51., 52., 53., 54., 55., 56., 57., 58., 59., 60., 61., 62., 63., 64., 65., 66., 67., 68., 69., 70., 71., 72., 73., 74., 75., 76., 77., 78., 79., 80., 81., 82., 83., 84., 85., 86., 87., 88., 89., 90., 91., 92., 93., 94., 95., 96., 97., 98., 99., 100., 101., 102., 103., 104., 105., 106., 107., 108., 109., 110., 111., 112., 113., 114., 115., 116., 117., 118., 119., 120., 121., 122., 123., 124., 125., 126., 127., 128., 129., 130., 131., 132., 133., 134., 135., 136., 137., 138., 139., 140., 141., 142., 143., 144., 145., 146., 147., 148., 149., 150., 151., 152., 153., 154., 155., 156., 157. ], dtype=np.float32) expected_v2 = np.array([ 0., 1., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11., 12., 13., 14., 15., 16., 17., 18., 20., 21., 22., 23., 24., 25., 26., 27., 28., 29., 30., 31., 32., 33., 34., 35., 36., 37., 38., 39., 40., 41., 42., 43., 44., 45., 46., 47., 48., 49., 50., 51., 52., 53., 54., 55., 56., 57., 58., 59., 60., 61., 62., 63., 64., 65., 66., 67., 68., 69., 70., 71., 72., 73., 74., 75., 76., 77., 78., 79., 80., 81., 82., 83., 84., 85., 86., 87., 88., 89., 90., 91., 92., 93., 94., 95., 96., 97., 98., 99., 100., 101., 102., 103., 104., 105., 106., 107., 108., 109., 110., 111., 112., 113., 114., 115., 116., 117., 118., 119., 120., 121., 122., 123., 124., 125., 126., 127., 128., 129., 130., 131., 132., 133., 134., 135., 136., 137., 138., 139., 140., 141., 142., 143., 144., 145., 146., 147., 148., 149., 150., 151., 152., 153., 154., 155., 156., 157. ], dtype=np.float32) compatible_assert = \ np.all(unique_data == expected_v1) or \ np.all(unique_data == expected_v2) self.assertTrue(compatible_assert) self.assertAllClose(data['label'].shape, (103, 74, 93, 1, 1))
def test_preprocessing_zero_padding(self): reader = ImageReader(['image']) reader.initialise(SINGLE_MOD_DATA, SINGLE_MOD_TASK, single_mod_list) idx, data, interp_order = reader() self.assertEqual(SINGLE_MOD_DATA['lesion'].interp_order, interp_order['image'][0]) self.assertAllClose(data['image'].shape, (256, 168, 256, 1, 1)) reader.add_preprocessing_layers( [PadLayer(image_name=['image'], border=(0, 0, 0))]) idx, data, interp_order = reader(idx=2) self.assertEqual(idx, 2) self.assertAllClose(data['image'].shape, (256, 168, 256, 1, 1))
def run_inverse_test(self, is_array, layer_param, expected_shape): if is_array: x = self.get_3d_input() else: x = self.get_3d_input_dict() padding_layer = PadLayer(**layer_param) out_acti, _ = padding_layer.inverse_op(x) print(padding_layer) if is_array: self.assertAllClose(out_acti.shape, expected_shape) else: self.assertAllClose(out_acti['image'].shape, expected_shape)
def initialise_dataset_loader(self, data_param=None, task_param=None, data_partitioner=None): RegressionApplication.initialise_dataset_loader( self, data_param, task_param, data_partitioner) if self.is_training: return if not task_param.error_map: # use the regression application implementation return try: reader_phase = self.action_param.dataset_to_infer except AttributeError: reader_phase = None file_lists = data_partitioner.get_file_lists_by(phase=reader_phase, action=self.action) # modifying the original readers in regression application # as we need ground truth labels to generate error maps self.readers = [ ImageReader(['image', 'output']).initialise(data_param, task_param, file_list) for file_list in file_lists ] mean_var_normaliser = MeanVarNormalisationLayer(image_name='image') histogram_normaliser = None if self.net_param.histogram_ref_file: histogram_normaliser = HistogramNormalisationLayer( image_name='image', modalities=vars(task_param).get('image'), model_filename=self.net_param.histogram_ref_file, norm_type=self.net_param.norm_type, cutoff=self.net_param.cutoff, name='hist_norm_layer') preprocessors = [] if self.net_param.normalisation: preprocessors.append(histogram_normaliser) if self.net_param.whitening: preprocessors.append(mean_var_normaliser) if self.net_param.volume_padding_size: preprocessors.append( PadLayer(image_name=SUPPORTED_INPUT, border=self.net_param.volume_padding_size)) self.readers[0].add_preprocessing_layers(preprocessors)
def test_trainable_preprocessing(self): label_file = os.path.join('testing_data', 'label_reader.txt') if os.path.exists(label_file): os.remove(label_file) label_normaliser = DiscreteLabelNormalisationLayer( image_name='label', modalities=vars(LABEL_TASK).get('label'), model_filename=os.path.join('testing_data', 'label_reader.txt')) reader = ImageReader(['label']) with self.assertRaisesRegexp(AssertionError, ''): reader.add_preprocessing_layers(label_normaliser) reader.initialise_reader(LABEL_DATA, LABEL_TASK) reader.add_preprocessing_layers(label_normaliser) reader.add_preprocessing_layers( [PadLayer(image_name=['label'], border=(10, 5, 5))]) idx, data, interp_order = reader(idx=0) unique_data = np.unique(data['label']) expected = np.array(range(156), dtype=np.float32) self.assertAllClose(unique_data, expected) self.assertAllClose(data['label'].shape, (83, 73, 73, 1, 1))
def add_preprocessing(image_reader, phase, window_border=None, data_augmentation=False): rotation_layer = Rotate() rotation_layer.init_uniform_angle([-25.0, 25.0]) if phase == 'training': if data_augmentation: image_reader.add_preprocessing_layers([rotation_layer]) elif phase == 'inference': if window_border: pad_layer = PadLayer(image_name=('image', 'label'), border=window_border, mode='constant') image_reader.add_preprocessing_layers([pad_layer]) else: raise Exception('Invalid window border!') else: raise Exception('Invalid phase choice: {}'.format( {'phase': ['training', 'validation', 'inference']})) return image_reader
def initialise_dataset_loader(self, data_param=None, task_param=None, data_partitioner=None): self.data_param = data_param self.regression_param = task_param file_lists = self.get_file_lists(data_partitioner) # read each line of csv files into an instance of Subject if self.is_training: self.readers = [] for file_list in file_lists: reader = ImageReader({'image', 'output', 'weight', 'sampler'}) reader.initialise(data_param, task_param, file_list) self.readers.append(reader) elif self.is_inference: inference_reader = ImageReader(['image']) file_list = data_partitioner.inference_files inference_reader.initialise(data_param, task_param, file_lists[0]) self.readers = [inference_reader] elif self.is_evaluation: file_list = data_partitioner.inference_files reader = ImageReader({'image', 'output', 'inferred'}) reader.initialise(data_param, task_param, file_lists[0]) self.readers = [reader] else: raise ValueError( 'Action `{}` not supported. Expected one of {}'.format( self.action, self.SUPPORTED_ACTIONS)) mean_var_normaliser = MeanVarNormalisationLayer(image_name='image') histogram_normaliser = None if self.net_param.histogram_ref_file: histogram_normaliser = HistogramNormalisationLayer( image_name='image', modalities=vars(task_param).get('image'), model_filename=self.net_param.histogram_ref_file, norm_type=self.net_param.norm_type, cutoff=self.net_param.cutoff, name='hist_norm_layer') normalisation_layers = [] if self.net_param.normalisation: normalisation_layers.append(histogram_normaliser) if self.net_param.whitening: normalisation_layers.append(mean_var_normaliser) augmentation_layers = [] if self.is_training: if self.action_param.random_flipping_axes != -1: augmentation_layers.append( RandomFlipLayer( flip_axes=self.action_param.random_flipping_axes)) if self.action_param.scaling_percentage: augmentation_layers.append( RandomSpatialScalingLayer( min_percentage=self.action_param.scaling_percentage[0], max_percentage=self.action_param.scaling_percentage[1]) ) if self.action_param.rotation_angle: augmentation_layers.append(RandomRotationLayer()) augmentation_layers[-1].init_uniform_angle( self.action_param.rotation_angle) volume_padding_layer = [] if self.net_param.volume_padding_size: volume_padding_layer.append( PadLayer(image_name=SUPPORTED_INPUT, border=self.net_param.volume_padding_size, mode=self.net_param.volume_padding_mode)) for reader in self.readers: reader.add_preprocessing_layers(volume_padding_layer + normalisation_layers + augmentation_layers)
def initialise_dataset_loader(self, data_param=None, task_param=None, data_partitioner=None): self.data_param = data_param self.segmentation_param = task_param # read each line of csv files into an instance of Subject if self.is_training: file_lists = [] if self.action_param.validation_every_n > 0: file_lists.append(data_partitioner.train_files) file_lists.append(data_partitioner.validation_files) else: file_lists.append(data_partitioner.all_files) self.readers = [] for file_list in file_lists: reader = ImageReader(SUPPORTED_INPUT) reader.initialise(data_param, task_param, file_list) self.readers.append(reader) else: # in the inference process use image input only inference_reader = ImageReader(['image']) file_list = data_partitioner.inference_files inference_reader.initialise(data_param, task_param, file_list) self.readers = [inference_reader] foreground_masking_layer = None if self.net_param.normalise_foreground_only: foreground_masking_layer = BinaryMaskingLayer( type_str=self.net_param.foreground_type, multimod_fusion=self.net_param.multimod_foreground_type, threshold=0.0) mean_var_normaliser = MeanVarNormalisationLayer( image_name='image', binary_masking_func=foreground_masking_layer) histogram_normaliser = None if self.net_param.histogram_ref_file: histogram_normaliser = HistogramNormalisationLayer( image_name='image', modalities=vars(task_param).get('image'), model_filename=self.net_param.histogram_ref_file, binary_masking_func=foreground_masking_layer, norm_type=self.net_param.norm_type, cutoff=self.net_param.cutoff, name='hist_norm_layer') label_normaliser = None if self.net_param.histogram_ref_file: label_normaliser = DiscreteLabelNormalisationLayer( image_name='label', modalities=vars(task_param).get('label'), model_filename=self.net_param.histogram_ref_file) normalisation_layers = [] if self.net_param.normalisation: normalisation_layers.append(histogram_normaliser) if self.net_param.whitening: normalisation_layers.append(mean_var_normaliser) if task_param.label_normalisation: normalisation_layers.append(label_normaliser) augmentation_layers = [] if self.is_training: if self.action_param.random_flipping_axes != -1: augmentation_layers.append(RandomFlipLayer( flip_axes=self.action_param.random_flipping_axes)) if self.action_param.scaling_percentage: augmentation_layers.append(RandomSpatialScalingLayer( min_percentage=self.action_param.scaling_percentage[0], max_percentage=self.action_param.scaling_percentage[1])) if self.action_param.rotation_angle or \ self.action_param.rotation_angle_x or \ self.action_param.rotation_angle_y or \ self.action_param.rotation_angle_z: rotation_layer = RandomRotationLayer() if self.action_param.rotation_angle: rotation_layer.init_uniform_angle( self.action_param.rotation_angle) else: rotation_layer.init_non_uniform_angle( self.action_param.rotation_angle_x, self.action_param.rotation_angle_y, self.action_param.rotation_angle_z) augmentation_layers.append(rotation_layer) volume_padding_layer = [] if self.net_param.volume_padding_size: volume_padding_layer.append(PadLayer( image_name=SUPPORTED_INPUT, border=self.net_param.volume_padding_size)) for reader in self.readers: reader.add_preprocessing_layers( volume_padding_layer + normalisation_layers + augmentation_layers)
def initialise_dataset_loader( self, data_param=None, task_param=None, data_partitioner=None): self.data_param = data_param self.segmentation_param = task_param # initialise input image readers if self.is_training: reader_names = ('image', 'label', 'weight_map', 'sampler') elif self.is_inference: # in the inference process use `image` input only reader_names = ('image',) elif self.is_evaluation: reader_names = ('image', 'label', 'inferred') else: tf.logging.fatal( 'Action `%s` not supported. Expected one of %s', self.action, self.SUPPORTED_PHASES) raise ValueError try: reader_phase = self.action_param.dataset_to_infer except AttributeError: reader_phase = None file_lists = data_partitioner.get_file_lists_by( phase=reader_phase, action=self.action) self.readers = [ ImageReader(reader_names).initialise( data_param, task_param, file_list) for file_list in file_lists] # initialise input preprocessing layers foreground_masking_layer = BinaryMaskingLayer( type_str=self.net_param.foreground_type, multimod_fusion=self.net_param.multimod_foreground_type, threshold=0.0) \ if self.net_param.normalise_foreground_only else None mean_var_normaliser = MeanVarNormalisationLayer( image_name='image', binary_masking_func=foreground_masking_layer) \ if self.net_param.whitening else None percentile_normaliser = PercentileNormalisationLayer( image_name='image', binary_masking_func=foreground_masking_layer, cutoff=self.net_param.cutoff) \ if self.net_param.percentile_normalisation else None histogram_normaliser = HistogramNormalisationLayer( image_name='image', modalities=vars(task_param).get('image'), model_filename=self.net_param.histogram_ref_file, binary_masking_func=foreground_masking_layer, norm_type=self.net_param.norm_type, cutoff=self.net_param.cutoff, name='hist_norm_layer') \ if (self.net_param.histogram_ref_file and self.net_param.normalisation) else None label_normalisers = None if self.net_param.histogram_ref_file and \ task_param.label_normalisation: label_normalisers = [DiscreteLabelNormalisationLayer( image_name='label', modalities=vars(task_param).get('label'), model_filename=self.net_param.histogram_ref_file)] if self.is_evaluation: label_normalisers.append( DiscreteLabelNormalisationLayer( image_name='inferred', modalities=vars(task_param).get('inferred'), model_filename=self.net_param.histogram_ref_file)) label_normalisers[-1].key = label_normalisers[0].key normalisation_layers = [] if histogram_normaliser is not None: normalisation_layers.append(histogram_normaliser) if mean_var_normaliser is not None: normalisation_layers.append(mean_var_normaliser) if percentile_normaliser is not None: normalisation_layers.append(percentile_normaliser) if task_param.label_normalisation and \ (self.is_training or not task_param.output_prob): normalisation_layers.extend(label_normalisers) volume_padding_layer = [] if self.net_param.volume_padding_size: volume_padding_layer.append(PadLayer( image_name=SUPPORTED_INPUT, border=self.net_param.volume_padding_size, mode=self.net_param.volume_padding_mode)) # initialise training data augmentation layers augmentation_layers = [] if self.is_training: train_param = self.action_param if train_param.random_flipping_axes != -1: augmentation_layers.append(RandomFlipLayer( flip_axes=train_param.random_flipping_axes)) if train_param.scaling_percentage: augmentation_layers.append(RandomSpatialScalingLayer( min_percentage=train_param.scaling_percentage[0], max_percentage=train_param.scaling_percentage[1], antialiasing=train_param.antialiasing)) if train_param.rotation_angle or \ train_param.rotation_angle_x or \ train_param.rotation_angle_y or \ train_param.rotation_angle_z: rotation_layer = RandomRotationLayer() if train_param.rotation_angle: rotation_layer.init_uniform_angle( train_param.rotation_angle) else: rotation_layer.init_non_uniform_angle( train_param.rotation_angle_x, train_param.rotation_angle_y, train_param.rotation_angle_z) augmentation_layers.append(rotation_layer) if train_param.do_elastic_deformation: spatial_rank = list(self.readers[0].spatial_ranks.values())[0] augmentation_layers.append(RandomElasticDeformationLayer( spatial_rank=spatial_rank, num_controlpoints=train_param.num_ctrl_points, std_deformation_sigma=train_param.deformation_sigma, proportion_to_augment=train_param.proportion_to_deform)) # only add augmentation to first reader (not validation reader) self.readers[0].add_preprocessing_layers( volume_padding_layer + normalisation_layers + augmentation_layers) for reader in self.readers[1:]: reader.add_preprocessing_layers( volume_padding_layer + normalisation_layers)
def initialise_dataset_loader(self, data_param=None, task_param=None, data_partitioner=None): self.data_param = data_param self.segmentation_param = task_param # initialise input image readers if self.is_training: reader_names = ('image', 'label', 'weight', 'sampler') elif self.is_inference: # in the inference process use `image` input only reader_names = ('image', ) elif self.is_evaluation: reader_names = ('image', 'label', 'inferred') else: tf.logging.fatal('Action `%s` not supported. Expected one of %s', self.action, self.SUPPORTED_PHASES) raise ValueError try: reader_phase = self.action_param.dataset_to_infer except AttributeError: reader_phase = None file_lists = data_partitioner.get_file_lists_by(phase=reader_phase, action=self.action) self.readers = [ ImageReader(reader_names).initialise(data_param, task_param, file_list) for file_list in file_lists ] foreground_masking_layer = None if self.net_param.normalise_foreground_only: foreground_masking_layer = BinaryMaskingLayer( type_str=self.net_param.foreground_type, multimod_fusion=self.net_param.multimod_foreground_type, threshold=0.0) mean_var_normaliser = MeanVarNormalisationLayer( image_name='image', binary_masking_func=foreground_masking_layer) histogram_normaliser = None if self.net_param.histogram_ref_file: histogram_normaliser = HistogramNormalisationLayer( image_name='image', modalities=vars(task_param).get('image'), model_filename=self.net_param.histogram_ref_file, binary_masking_func=foreground_masking_layer, norm_type=self.net_param.norm_type, cutoff=self.net_param.cutoff, name='hist_norm_layer') label_normaliser = None if self.net_param.histogram_ref_file: label_normaliser = DiscreteLabelNormalisationLayer( image_name='label', modalities=vars(task_param).get('label'), model_filename=self.net_param.histogram_ref_file) normalisation_layers = [] if self.net_param.normalisation: normalisation_layers.append(histogram_normaliser) if self.net_param.whitening: normalisation_layers.append(mean_var_normaliser) if task_param.label_normalisation and \ (self.is_training or not task_param.output_prob): normalisation_layers.append(label_normaliser) augmentation_layers = [] if self.is_training: if self.action_param.random_flipping_axes != -1: augmentation_layers.append( RandomFlipLayer( flip_axes=self.action_param.random_flipping_axes)) if self.action_param.scaling_percentage: augmentation_layers.append( RandomSpatialScalingLayer( min_percentage=self.action_param.scaling_percentage[0], max_percentage=self.action_param.scaling_percentage[1]) ) if self.action_param.rotation_angle or \ self.action_param.rotation_angle_x or \ self.action_param.rotation_angle_y or \ self.action_param.rotation_angle_z: rotation_layer = RandomRotationLayer() if self.action_param.rotation_angle: rotation_layer.init_uniform_angle( self.action_param.rotation_angle) else: rotation_layer.init_non_uniform_angle( self.action_param.rotation_angle_x, self.action_param.rotation_angle_y, self.action_param.rotation_angle_z) augmentation_layers.append(rotation_layer) if self.action_param.bias_field_range: bias_field_layer = RandomBiasFieldLayer() bias_field_layer.init_order(self.action_param.bf_order) bias_field_layer.init_uniform_coeff( self.action_param.bias_field_range) augmentation_layers.append(bias_field_layer) volume_padding_layer = [ PadLayer(image_name=SUPPORTED_INPUT, border=self.net_param.volume_padding_size, mode=self.net_param.volume_padding_mode, pad_to=self.net_param.volume_padding_to_size) ] self.readers[0].add_preprocessing_layers(volume_padding_layer + normalisation_layers + augmentation_layers) for reader in self.readers[1:]: reader.add_preprocessing_layers(volume_padding_layer + normalisation_layers)
'interp_order': 0 }, } #This option is to read 2D slices # data_param = {'MR': {'path_to_search': './sample_images','spatial_window_size': (48,48,41), # 'filename_contains': ['middleSlice','mhd'], 'pixdim': (1,1), 'axcodes': ['L','P','S'],'interp_order': 3}, # 'sampler': {'path_to_search': './sample_images', 'spatial_window_size': (48,48,1), # 'filename_contains': ['middleLabels','mhd'],'pixdim': (1,1), 'axcodes': ['L','P','S'],'interp_order': 0}, # } #Create image reader and add padding layer with 'constant' value 0 #This will ensure that samples are taken even at the beginning and ending slice reader = ImageReader().initialise(data_param) reader.add_preprocessing_layers( PadLayer(image_name=['MR', 'sampler'], border=(20, 20, 20), mode='constant')) _, img, _ = reader(idx=0) #Create samplers with window_size weighted_sampler = WeightedSampler(reader, window_sizes=(48, 48, 48)) balanced_sampler = BalancedSampler(reader, window_sizes=(48, 48, 48)) uniform_sampler = UniformSampler(reader, window_sizes=(48, 48, 48)) #Generate N samples for each type N = 30 import tensorflow as tf # adding the tensorflow tensors next_window = weighted_sampler.pop_batch_op()
def initialise_dataset_loader(self, data_param=None, task_param=None, data_partitioner=None): self.data_param = data_param self.regression_param = task_param # initialise input image readers if self.is_training: reader_names = ('image', 'output', 'weight', 'sampler') elif self.is_inference: # in the inference process use `image` input only reader_names = ('image', ) elif self.is_evaluation: reader_names = ('image', 'output', 'inferred') else: tf.logging.fatal('Action `%s` not supported. Expected one of %s', self.action, self.SUPPORTED_PHASES) raise ValueError try: reader_phase = self.action_param.dataset_to_infer except AttributeError: reader_phase = None file_lists = data_partitioner.get_file_lists_by(phase=reader_phase, action=self.action) self.readers = [ ImageReader(reader_names).initialise(data_param, task_param, file_list) for file_list in file_lists ] # initialise input preprocessing layers mean_var_normaliser = MeanVarNormalisationLayer(image_name='image') \ if self.net_param.whitening else None histogram_normaliser = HistogramNormalisationLayer( image_name='image', modalities=vars(task_param).get('image'), model_filename=self.net_param.histogram_ref_file, norm_type=self.net_param.norm_type, cutoff=self.net_param.cutoff, name='hist_norm_layer') \ if (self.net_param.histogram_ref_file and self.net_param.normalisation) else None normalisation_layers = [] if histogram_normaliser is not None: normalisation_layers.append(histogram_normaliser) if mean_var_normaliser is not None: normalisation_layers.append(mean_var_normaliser) volume_padding_layer = [] if self.net_param.volume_padding_size: volume_padding_layer.append( PadLayer(image_name=SUPPORTED_INPUT, border=self.net_param.volume_padding_size, mode=self.net_param.volume_padding_mode)) # initialise training data augmentation layers augmentation_layers = [] if self.is_training: train_param = self.action_param if train_param.random_flipping_axes != -1: augmentation_layers.append( RandomFlipLayer( flip_axes=train_param.random_flipping_axes)) if train_param.scaling_percentage: augmentation_layers.append( RandomSpatialScalingLayer( min_percentage=train_param.scaling_percentage[0], max_percentage=train_param.scaling_percentage[1])) if train_param.rotation_angle: rotation_layer = RandomRotationLayer() if train_param.rotation_angle: rotation_layer.init_uniform_angle( train_param.rotation_angle) augmentation_layers.append(rotation_layer) if train_param.do_elastic_deformation: spatial_rank = list(self.readers[0].spatial_ranks.values())[0] augmentation_layers.append( RandomElasticDeformationLayer( spatial_rank=spatial_rank, num_controlpoints=train_param.num_ctrl_points, std_deformation_sigma=train_param.deformation_sigma, proportion_to_augment=train_param.proportion_to_deform) ) # only add augmentation to first reader (not validation reader) self.readers[0].add_preprocessing_layers(volume_padding_layer + normalisation_layers + augmentation_layers) for reader in self.readers[1:]: reader.add_preprocessing_layers(volume_padding_layer + normalisation_layers)
# config parameters ### spatial_window_size = (100, 100) border = (12, 12) volume_padding_size = (50, 50) data_param = \ {'MR': { 'path_to_search': '~/Desktop/useful_scripts/visualise_windows', 'filename_contains': 'example.png'}} ### # create an image reader ### reader = ImageReader().initialise(data_param) reader.add_preprocessing_layers( # add volume padding layer [PadLayer(image_name=['MR'], border=volume_padding_size, mode='constant')]) ### # show 'volume' -- without window sampling ### image_2d = ImageWindowDataset(reader)()['MR'][0, :, :, 0, 0, 0] vis_coordinates(image_2d, saving_name='output/image.png') ### # create & show uniform random samples ### uniform_sampler = UniformSampler(reader, spatial_window_size, windows_per_image=100) next_window = uniform_sampler.pop_batch_op() coords = []
def initialise_dataset_loader(self, data_param=None, task_param=None, data_partitioner=None): self.data_param = data_param self.segmentation_param = task_param file_lists = self.get_file_lists(data_partitioner) # read each line of csv files into an instance of Subject if self.is_training: self.readers = [] for file_list in file_lists: reader = ImageReader({'image', 'label', 'weight', 'sampler'}) reader.initialise(data_param, task_param, file_list) self.readers.append(reader) elif self.is_inference: # in the inference process use image input only inference_reader = ImageReader({'image'}) file_list = pd.concat([ data_partitioner.inference_files, data_partitioner.validation_files ], axis=0) file_list.index = range(file_list.shape[0]) inference_reader.initialise(data_param, task_param, file_list) self.readers = [inference_reader] elif self.is_evaluation: file_list = data_partitioner.inference_files reader = ImageReader({'image', 'label', 'inferred'}) reader.initialise(data_param, task_param, file_list) self.readers = [reader] else: raise ValueError( 'Action `{}` not supported. Expected one of {}'.format( self.action, self.SUPPORTED_ACTIONS)) foreground_masking_layer = None if self.net_param.normalise_foreground_only: foreground_masking_layer = BinaryMaskingLayer( type_str=self.net_param.foreground_type, multimod_fusion=self.net_param.multimod_foreground_type, threshold=0.0) mean_var_normaliser = MeanVarNormalisationLayer( image_name='image', binary_masking_func=foreground_masking_layer) histogram_normaliser = None if self.net_param.histogram_ref_file: histogram_normaliser = HistogramNormalisationLayer( image_name='image', modalities=vars(task_param).get('image'), model_filename=self.net_param.histogram_ref_file, binary_masking_func=foreground_masking_layer, norm_type=self.net_param.norm_type, cutoff=self.net_param.cutoff, name='hist_norm_layer') label_normalisers = None if self.net_param.histogram_ref_file and \ task_param.label_normalisation: label_normalisers = [ DiscreteLabelNormalisationLayer( image_name='label', modalities=vars(task_param).get('label'), model_filename=self.net_param.histogram_ref_file) ] if self.is_evaluation: label_normalisers.append( DiscreteLabelNormalisationLayer( image_name='inferred', modalities=vars(task_param).get('inferred'), model_filename=self.net_param.histogram_ref_file)) label_normalisers[-1].key = label_normalisers[0].key normalisation_layers = [] if self.net_param.normalisation: normalisation_layers.append(histogram_normaliser) if self.net_param.whitening: normalisation_layers.append(mean_var_normaliser) if task_param.label_normalisation and \ (self.is_training or not task_param.output_prob): normalisation_layers.extend(label_normalisers) augmentation_layers = [] if self.is_training: if self.action_param.random_flipping_axes != -1: augmentation_layers.append( RandomFlipLayer( flip_axes=self.action_param.random_flipping_axes)) if self.action_param.scaling_percentage: augmentation_layers.append( RandomSpatialScalingLayer( min_percentage=self.action_param.scaling_percentage[0], max_percentage=self.action_param.scaling_percentage[1]) ) if self.action_param.rotation_angle or \ self.action_param.rotation_angle_x or \ self.action_param.rotation_angle_y or \ self.action_param.rotation_angle_z: rotation_layer = RandomRotationLayer() if self.action_param.rotation_angle: rotation_layer.init_uniform_angle( self.action_param.rotation_angle) else: rotation_layer.init_non_uniform_angle( self.action_param.rotation_angle_x, self.action_param.rotation_angle_y, self.action_param.rotation_angle_z) augmentation_layers.append(rotation_layer) # add deformation layer if self.action_param.do_elastic_deformation: spatial_rank = list(self.readers[0].spatial_ranks.values())[0] augmentation_layers.append( RandomElasticDeformationLayer( spatial_rank=spatial_rank, num_controlpoints=self.action_param.num_ctrl_points, std_deformation_sigma=self.action_param. deformation_sigma, proportion_to_augment=self.action_param. proportion_to_deform)) volume_padding_layer = [] if self.net_param.volume_padding_size: volume_padding_layer.append( PadLayer(image_name=SUPPORTED_INPUT, border=self.net_param.volume_padding_size)) # only add augmentation to first reader (not validation reader) self.readers[0].add_preprocessing_layers(volume_padding_layer + normalisation_layers + augmentation_layers) for reader in self.readers[1:]: reader.add_preprocessing_layers(volume_padding_layer + normalisation_layers)
reader.add_preprocessing_layers( MeanVarNormalisationLayer(image_name='MR', binary_masking_func=binary_masking_func)) _, vol, _ = reader(idx=idx) plot_slices(vol['MR'], vol['CT'], vol['LABELS'], idx, title='Normalized MR image') #sitk.WriteImage(sitk.GetImageFromArray(vol['MR'][:,:,:,0,0]),'./whitenned.mhd') # Use border as half of spatial_window_size, so the NN can learn more from the borders #Use constant mode padding, I have modified niftynet.layers.pad to use constant_values=np.amin(image) reader.add_preprocessing_layers( PadLayer(image_name=['MR', 'CT', 'LABELS'], border=(24, 24, 48), mode='constant')) _, vol, _ = reader(idx=idx) plot_slices(vol['MR'], vol['CT'], vol['LABELS'], idx, title='Padded images') #sitk.WriteImage(sitk.GetImageFromArray(vol['CT'][:,:,:,0,0]),'./padded.mhd') print(vol['MR'].shape) #Add rotation layer rotation_layer = Rotate() rotation_layer.init_uniform_angle( [-10.0, 10.0]) #randomly rotate between -10 and 10 degrees reader.add_preprocessing_layers([rotation_layer]) _, vol, _ = reader(idx=idx) plot_slices(vol['MR'], vol['CT'], vol['LABELS'], idx, title='Rotated images') #sitk.WriteImage(sitk.GetImageFromArray(vol['CT'][:,:,:,0,0]),'./rotated.mhd')