def get_train_transform(patch_size): tr_transforms = [] tr_transforms.append( SpatialTransform_2( patch_size, [i // 2 for i in patch_size], do_elastic_deform=True, deformation_scale=(0, 0.05), do_rotation=True, angle_x=(-5 / 360. * 2 * np.pi, 5 / 360. * 2 * np.pi), angle_y=(-5 / 360. * 2 * np.pi, 5 / 360. * 2 * np.pi), angle_z=(-5 / 360. * 2 * np.pi, 5 / 360. * 2 * np.pi), do_scale=True, scale=(0.75, 1.25), border_mode_data='constant', border_cval_data=-2.34, border_mode_seg='constant', border_cval_seg=0)) tr_transforms.append(MirrorTransform(axes=(0, 1, 2))) tr_transforms.append( BrightnessMultiplicativeTransform((0.7, 1.5), per_channel=True, p_per_sample=0.15)) tr_transforms.append( GammaTransform(gamma_range=(0.5, 2), invert_image=True, per_channel=True, p_per_sample=0.15)) tr_transforms.append( GaussianNoiseTransform(noise_variance=(0, 0.15), p_per_sample=0.15)) tr_transforms = Compose(tr_transforms) return tr_transforms
def get_train_transform(patch_size): """ data augmentation for training data, inspired by: https://github.com/MIC-DKFZ/batchgenerators/blob/master/batchgenerators/examples/brats2017/brats2017_dataloader_3D.py :param patch_size: shape of network's input :return list of transformations """ train_transforms = [] def rad(deg): return (-deg / 360 * 2 * np.pi, deg / 360 * 2 * np.pi) train_transforms.append( SpatialTransform_2( patch_size, (10, 10, 10), do_elastic_deform=True, deformation_scale=(0, 0.25), do_rotation=True, angle_z=rad(15), angle_x=(0, 0), angle_y=(0, 0), do_scale=True, scale=(0.75, 1.25), border_mode_data='constant', border_cval_data=0, border_mode_seg='constant', border_cval_seg=0, order_seg=1, random_crop=False, p_el_per_sample=0.2, p_rot_per_sample=0.2, p_scale_per_sample=0.2, )) train_transforms.append(MirrorTransform(axes=(0, 1))) train_transforms.append( BrightnessMultiplicativeTransform((0.7, 1.5), per_channel=True, p_per_sample=0.2)) train_transforms.append( GammaTransform(gamma_range=(0.2, 1.0), invert_image=False, per_channel=False, p_per_sample=0.2)) train_transforms.append( GaussianNoiseTransform(noise_variance=(0, 0.05), p_per_sample=0.2)) train_transforms.append( GaussianBlurTransform(blur_sigma=(0.2, 1.0), different_sigma_per_channel=False, p_per_channel=0.0, p_per_sample=0.2)) return Compose(train_transforms)
def get_train_transform(patch_size): # we now create a list of transforms. These are not necessarily the best transforms to use for BraTS, this is just # to showcase some things tr_transforms = [] # the first thing we want to run is the SpatialTransform. It reduces the size of our data to patch_size and thus # also reduces the computational cost of all subsequent operations. All subsequent operations do not modify the # shape and do not transform spatially, so no border artifacts will be introduced # Here we use the new SpatialTransform_2 which uses a new way of parameterizing elastic_deform # We use all spatial transformations with a probability of 0.2 per sample. This means that 1 - (1 - 0.1) ** 3 = 27% # of samples will be augmented, the rest will just be cropped tr_transforms.append( SpatialTransform_2( patch_size, [i // 2 for i in patch_size], do_elastic_deform=True, deformation_scale=(0, 0.25), do_rotation=True, angle_x=(-15 / 360. * 2 * np.pi, 15 / 360. * 2 * np.pi), angle_y=(-15 / 360. * 2 * np.pi, 15 / 360. * 2 * np.pi), angle_z=(-15 / 360. * 2 * np.pi, 15 / 360. * 2 * np.pi), do_scale=True, scale=(0.75, 1.25), border_mode_data='constant', border_cval_data=0, border_mode_seg='constant', border_cval_seg=0, order_seg=1, order_data=3, random_crop=True, p_el_per_sample=0.1, p_rot_per_sample=0.1, p_scale_per_sample=0.1)) # now we mirror along all axes tr_transforms.append(MirrorTransform(axes=(0, 1, 2))) # gamma transform. This is a nonlinear transformation of intensity values # (https://en.wikipedia.org/wiki/Gamma_correction) tr_transforms.append( GammaTransform(gamma_range=(0.5, 2), invert_image=False, per_channel=True, p_per_sample=0.15)) # we can also invert the image, apply the transform and then invert back tr_transforms.append( GammaTransform(gamma_range=(0.5, 2), invert_image=True, per_channel=True, p_per_sample=0.15)) # Gaussian Noise tr_transforms.append( GaussianNoiseTransform(noise_variance=(0, 0.05), p_per_sample=0.15)) # now we compose these transforms together tr_transforms = Compose(tr_transforms) return tr_transforms
def _augment_data(self, batch_generator, type=None): if self.Config.DATA_AUGMENTATION: num_processes = 16 # 2D: 8 is a bit faster than 16 # num_processes = 8 else: num_processes = 6 tfs = [] #transforms if self.Config.NORMALIZE_DATA: tfs.append(ZeroMeanUnitVarianceTransform(per_channel=self.Config.NORMALIZE_PER_CHANNEL)) if self.Config.DATA_AUGMENTATION: if type == "train": # scale: inverted: 0.5 -> bigger; 2 -> smaller # patch_center_dist_from_border: if 144/2=72 -> always exactly centered; otherwise a bit off center (brain can get off image and will be cut then) if self.Config.DAUG_SCALE: center_dist_from_border = int(self.Config.INPUT_DIM[0] / 2.) - 10 # (144,144) -> 62 tfs.append(SpatialTransform(self.Config.INPUT_DIM, patch_center_dist_from_border=center_dist_from_border, do_elastic_deform=self.Config.DAUG_ELASTIC_DEFORM, alpha=(90., 120.), sigma=(9., 11.), do_rotation=self.Config.DAUG_ROTATE, angle_x=(-0.8, 0.8), angle_y=(-0.8, 0.8), angle_z=(-0.8, 0.8), do_scale=True, scale=(0.9, 1.5), border_mode_data='constant', border_cval_data=0, order_data=3, border_mode_seg='constant', border_cval_seg=0, order_seg=0, random_crop=True, p_el_per_sample=0.2, p_rot_per_sample=0.2, p_scale_per_sample=0.2)) if self.Config.DAUG_RESAMPLE: tfs.append(SimulateLowResolutionTransform(zoom_range=(0.5, 1), p_per_sample=0.2)) if self.Config.DAUG_NOISE: tfs.append(GaussianNoiseTransform(noise_variance=(0, 0.05), p_per_sample=0.2)) if self.Config.DAUG_MIRROR: tfs.append(MirrorTransform()) if self.Config.DAUG_FLIP_PEAKS: tfs.append(FlipVectorAxisTransform()) tfs.append(NumpyToTensor(keys=["data", "seg"], cast_to="float")) #num_cached_per_queue 1 or 2 does not really make a difference batch_gen = MultiThreadedAugmenter(batch_generator, Compose(tfs), num_processes=num_processes, num_cached_per_queue=1, seeds=None, pin_memory=True) return batch_gen # data: (batch_size, channels, x, y), seg: (batch_size, channels, x, y)
def _augment_data(self, batch_generator, type=None): if self.Config.DATA_AUGMENTATION: num_processes = 15 # 15 is a bit faster than 8 on cluster # num_processes = multiprocessing.cpu_count() # on cluster: gives all cores, not only assigned cores else: num_processes = 6 tfs = [] if self.Config.NORMALIZE_DATA: tfs.append(ZeroMeanUnitVarianceTransform(per_channel=self.Config.NORMALIZE_PER_CHANNEL)) if self.Config.SPATIAL_TRANSFORM == "SpatialTransformPeaks": SpatialTransformUsed = SpatialTransformPeaks elif self.Config.SPATIAL_TRANSFORM == "SpatialTransformCustom": SpatialTransformUsed = SpatialTransformCustom else: SpatialTransformUsed = SpatialTransform if self.Config.DATA_AUGMENTATION: if type == "train": # patch_center_dist_from_border: # if 144/2=72 -> always exactly centered; otherwise a bit off center # (brain can get off image and will be cut then) if self.Config.DAUG_SCALE: if self.Config.INPUT_RESCALING: source_mm = 2 # for bb target_mm = float(self.Config.RESOLUTION[:-2]) scale_factor = target_mm / source_mm scale = (scale_factor, scale_factor) else: scale = (0.9, 1.5) if self.Config.PAD_TO_SQUARE: patch_size = self.Config.INPUT_DIM else: patch_size = None # keeps dimensions of the data # spatial transform automatically crops/pads to correct size center_dist_from_border = int(self.Config.INPUT_DIM[0] / 2.) - 10 # (144,144) -> 62 tfs.append(SpatialTransformUsed(patch_size, patch_center_dist_from_border=center_dist_from_border, do_elastic_deform=self.Config.DAUG_ELASTIC_DEFORM, alpha=self.Config.DAUG_ALPHA, sigma=self.Config.DAUG_SIGMA, do_rotation=self.Config.DAUG_ROTATE, angle_x=self.Config.DAUG_ROTATE_ANGLE, angle_y=self.Config.DAUG_ROTATE_ANGLE, angle_z=self.Config.DAUG_ROTATE_ANGLE, do_scale=True, scale=scale, border_mode_data='constant', border_cval_data=0, order_data=3, border_mode_seg='constant', border_cval_seg=0, order_seg=0, random_crop=True, p_el_per_sample=self.Config.P_SAMP, p_rot_per_sample=self.Config.P_SAMP, p_scale_per_sample=self.Config.P_SAMP)) if self.Config.DAUG_RESAMPLE: tfs.append(SimulateLowResolutionTransform(zoom_range=(0.5, 1), p_per_sample=0.2, per_channel=False)) if self.Config.DAUG_RESAMPLE_LEGACY: tfs.append(ResampleTransformLegacy(zoom_range=(0.5, 1))) if self.Config.DAUG_GAUSSIAN_BLUR: tfs.append(GaussianBlurTransform(blur_sigma=self.Config.DAUG_BLUR_SIGMA, different_sigma_per_channel=False, p_per_sample=self.Config.P_SAMP)) if self.Config.DAUG_NOISE: tfs.append(GaussianNoiseTransform(noise_variance=self.Config.DAUG_NOISE_VARIANCE, p_per_sample=self.Config.P_SAMP)) if self.Config.DAUG_MIRROR: tfs.append(MirrorTransform()) if self.Config.DAUG_FLIP_PEAKS: tfs.append(FlipVectorAxisTransform()) tfs.append(NumpyToTensor(keys=["data", "seg"], cast_to="float")) #num_cached_per_queue 1 or 2 does not really make a difference batch_gen = MultiThreadedAugmenter(batch_generator, Compose(tfs), num_processes=num_processes, num_cached_per_queue=1, seeds=None, pin_memory=True) return batch_gen # data: (batch_size, channels, x, y), seg: (batch_size, channels, x, y)
def _make_training_transforms(self): if self.no_data_augmentation: print("No data augmentation will be performed during training!") return [] patch_size = self.patch_size[::-1] # (x, y, z) order rot_angle_x = self.training_augmentation_args.get('angle_x', 15) rot_angle_y = self.training_augmentation_args.get('angle_y', 15) rot_angle_z = self.training_augmentation_args.get('angle_z', 15) p_per_sample = self.training_augmentation_args.get( 'p_per_sample', 0.15) train_transforms = [ SpatialTransform_2( patch_size, patch_size // 2, do_elastic_deform=self.training_augmentation_args.get( 'do_elastic_deform', True), deformation_scale=self.training_augmentation_args.get( 'deformation_scale', (0, 0.25)), do_rotation=self.training_augmentation_args.get( 'do_rotation', True), angle_x=(-rot_angle_x / 360. * 2 * np.pi, rot_angle_x / 360. * 2 * np.pi), angle_y=(-rot_angle_y / 360. * 2 * np.pi, rot_angle_y / 360. * 2 * np.pi), angle_z=(-rot_angle_z / 360. * 2 * np.pi, rot_angle_z / 360. * 2 * np.pi), do_scale=self.training_augmentation_args.get('do_scale', True), scale=self.training_augmentation_args.get( 'scale', (0.75, 1.25)), border_mode_data='nearest', border_cval_data=0, order_data=3, # border_mode_seg='nearest', border_cval_seg=0, # order_seg=0, random_crop=False, p_el_per_sample=self.training_augmentation_args.get( 'p_el_per_sample', 0.5), p_rot_per_sample=self.training_augmentation_args.get( 'p_rot_per_sample', 0.5), p_scale_per_sample=self.training_augmentation_args.get( 'p_scale_per_sample', 0.5)) ] if self.training_augmentation_args.get("do_mirror", False): train_transforms.append(MirrorTransform(axes=(0, 1, 2))) train_transforms.append( BrightnessMultiplicativeTransform( self.training_augmentation_args.get('brightness_range', (0.7, 1.5)), per_channel=True, p_per_sample=p_per_sample)) train_transforms.append( GaussianNoiseTransform( noise_variance=self.training_augmentation_args.get( 'gaussian_noise_variance', (0, 0.05)), p_per_sample=p_per_sample)) train_transforms.append( GammaTransform(gamma_range=self.training_augmentation_args.get( 'gamma_range', (0.5, 2)), invert_image=False, per_channel=True, p_per_sample=p_per_sample)) print("train_transforms\n", train_transforms) return train_transforms
def get_insaneDA_augmentation(dataloader_train, dataloader_val, patch_size, params=default_3D_augmentation_params, border_val_seg=-1, seeds_train=None, seeds_val=None, order_seg=1, order_data=3, deep_supervision_scales=None, soft_ds=False, classes=None, pin_memory=True): assert params.get( 'mirror') is None, "old version of params, use new keyword do_mirror" tr_transforms = [] if params.get("selected_data_channels") is not None: tr_transforms.append( DataChannelSelectionTransform( params.get("selected_data_channels"))) if params.get("selected_seg_channels") is not None: tr_transforms.append( SegChannelSelectionTransform(params.get("selected_seg_channels"))) # don't do color augmentations while in 2d mode with 3d data because the color channel is overloaded!! if params.get("dummy_2D") is not None and params.get("dummy_2D"): ignore_axes = (0, ) tr_transforms.append(Convert3DTo2DTransform()) else: ignore_axes = None tr_transforms.append( SpatialTransform(patch_size, patch_center_dist_from_border=None, do_elastic_deform=params.get("do_elastic"), alpha=params.get("elastic_deform_alpha"), sigma=params.get("elastic_deform_sigma"), do_rotation=params.get("do_rotation"), angle_x=params.get("rotation_x"), angle_y=params.get("rotation_y"), angle_z=params.get("rotation_z"), do_scale=params.get("do_scaling"), scale=params.get("scale_range"), border_mode_data=params.get("border_mode_data"), border_cval_data=0, order_data=order_data, border_mode_seg="constant", border_cval_seg=border_val_seg, order_seg=order_seg, random_crop=params.get("random_crop"), p_el_per_sample=params.get("p_eldef"), p_scale_per_sample=params.get("p_scale"), p_rot_per_sample=params.get("p_rot"), independent_scale_for_each_axis=params.get( "independent_scale_factor_for_each_axis"))) if params.get("dummy_2D"): tr_transforms.append(Convert2DTo3DTransform()) # we need to put the color augmentations after the dummy 2d part (if applicable). Otherwise the overloaded color # channel gets in the way tr_transforms.append(GaussianNoiseTransform(p_per_sample=0.15)) tr_transforms.append( GaussianBlurTransform((0.5, 1.5), different_sigma_per_channel=True, p_per_sample=0.2, p_per_channel=0.5)) tr_transforms.append( BrightnessMultiplicativeTransform(multiplier_range=(0.70, 1.3), p_per_sample=0.15)) tr_transforms.append( ContrastAugmentationTransform(contrast_range=(0.65, 1.5), p_per_sample=0.15)) tr_transforms.append( SimulateLowResolutionTransform(zoom_range=(0.5, 1), per_channel=True, p_per_channel=0.5, order_downsample=0, order_upsample=3, p_per_sample=0.25, ignore_axes=ignore_axes)) tr_transforms.append( GammaTransform(params.get("gamma_range"), True, True, retain_stats=params.get("gamma_retain_stats"), p_per_sample=0.15)) # inverted gamma if params.get("do_gamma"): tr_transforms.append( GammaTransform(params.get("gamma_range"), False, True, retain_stats=params.get("gamma_retain_stats"), p_per_sample=params["p_gamma"])) if params.get("do_mirror") or params.get("mirror"): tr_transforms.append(MirrorTransform(params.get("mirror_axes"))) if params.get("mask_was_used_for_normalization") is not None: mask_was_used_for_normalization = params.get( "mask_was_used_for_normalization") tr_transforms.append( MaskTransform(mask_was_used_for_normalization, mask_idx_in_seg=0, set_outside_to=0)) tr_transforms.append(RemoveLabelTransform(-1, 0)) if params.get("move_last_seg_chanel_to_data") is not None and params.get( "move_last_seg_chanel_to_data"): tr_transforms.append( MoveSegAsOneHotToData(1, params.get("all_segmentation_labels"), 'seg', 'data')) if params.get("cascade_do_cascade_augmentations" ) and not None and params.get( "cascade_do_cascade_augmentations"): if params.get("cascade_random_binary_transform_p") > 0: tr_transforms.append( ApplyRandomBinaryOperatorTransform( channel_idx=list( range(-len(params.get("all_segmentation_labels")), 0)), p_per_sample=params.get( "cascade_random_binary_transform_p"), key="data", strel_size=params.get( "cascade_random_binary_transform_size"))) if params.get("cascade_remove_conn_comp_p") > 0: tr_transforms.append( RemoveRandomConnectedComponentFromOneHotEncodingTransform( channel_idx=list( range(-len(params.get("all_segmentation_labels")), 0)), key="data", p_per_sample=params.get("cascade_remove_conn_comp_p"), fill_with_other_class_p=params.get( "cascade_remove_conn_comp_max_size_percent_threshold" ), dont_do_if_covers_more_than_X_percent=params.get( "cascade_remove_conn_comp_fill_with_other_class_p") )) tr_transforms.append(RenameTransform('seg', 'target', True)) if deep_supervision_scales is not None: if soft_ds: assert classes is not None tr_transforms.append( DownsampleSegForDSTransform3(deep_supervision_scales, 'target', 'target', classes)) else: tr_transforms.append( DownsampleSegForDSTransform2(deep_supervision_scales, 0, 0, input_key='target', output_key='target')) tr_transforms.append(NumpyToTensor(['data', 'target'], 'float')) tr_transforms = Compose(tr_transforms) batchgenerator_train = MultiThreadedAugmenter( dataloader_train, tr_transforms, params.get('num_threads'), params.get("num_cached_per_thread"), seeds=seeds_train, pin_memory=pin_memory) val_transforms = [] val_transforms.append(RemoveLabelTransform(-1, 0)) if params.get("selected_data_channels") is not None: val_transforms.append( DataChannelSelectionTransform( params.get("selected_data_channels"))) if params.get("selected_seg_channels") is not None: val_transforms.append( SegChannelSelectionTransform(params.get("selected_seg_channels"))) if params.get("move_last_seg_chanel_to_data") is not None and params.get( "move_last_seg_chanel_to_data"): val_transforms.append( MoveSegAsOneHotToData(1, params.get("all_segmentation_labels"), 'seg', 'data')) val_transforms.append(RenameTransform('seg', 'target', True)) if deep_supervision_scales is not None: if soft_ds: assert classes is not None val_transforms.append( DownsampleSegForDSTransform3(deep_supervision_scales, 'target', 'target', classes)) else: val_transforms.append( DownsampleSegForDSTransform2(deep_supervision_scales, 0, 0, input_key='target', output_key='target')) val_transforms.append(NumpyToTensor(['data', 'target'], 'float')) val_transforms = Compose(val_transforms) batchgenerator_val = MultiThreadedAugmenter( dataloader_val, val_transforms, max(params.get('num_threads') // 2, 1), params.get("num_cached_per_thread"), seeds=seeds_val, pin_memory=pin_memory) return batchgenerator_train, batchgenerator_val
def get_batches(self, batch_size=128, type=None, subjects=None, num_batches=None): data = subjects seg = [] #6 -> >30GB RAM if self.HP.DATA_AUGMENTATION: num_processes = 8 # 6 is a bit faster than 16 else: num_processes = 6 nr_of_samples = len(subjects) * self.HP.INPUT_DIM[0] if num_batches is None: num_batches_multithr = int( nr_of_samples / batch_size / num_processes) #number of batches for exactly one epoch else: num_batches_multithr = int(num_batches / num_processes) if self.HP.TYPE == "combined": # Simple with .npy -> just a little bit faster than Nifti (<10%) and f1 not better => use Nifti # batch_gen = SlicesBatchGeneratorRandomNpyImg_fusion((data, seg), batch_size=batch_size) batch_gen = SlicesBatchGeneratorRandomNpyImg_fusion( (data, seg), batch_size=batch_size) else: batch_gen = SlicesBatchGeneratorRandomNiftiImg( (data, seg), batch_size=batch_size) # batch_gen = SlicesBatchGeneratorRandomNiftiImg_5slices((data, seg), batch_size=batch_size) batch_gen.HP = self.HP tfs = [] #transforms if self.HP.NORMALIZE_DATA: tfs.append( ZeroMeanUnitVarianceTransform( per_channel=self.HP.NORMALIZE_PER_CHANNEL)) if self.HP.DATASET == "Schizo" and self.HP.RESOLUTION == "2mm": tfs.append(PadToMultipleTransform(16)) if self.HP.DATA_AUGMENTATION: if type == "train": # scale: inverted: 0.5 -> bigger; 2 -> smaller # patch_center_dist_from_border: if 144/2=72 -> always exactly centered; otherwise a bit off center (brain can get off image and will be cut then) if self.HP.DAUG_SCALE: center_dist_from_border = int( self.HP.INPUT_DIM[0] / 2.) - 10 # (144,144) -> 62 tfs.append( SpatialTransform( self.HP.INPUT_DIM, patch_center_dist_from_border= center_dist_from_border, do_elastic_deform=self.HP.DAUG_ELASTIC_DEFORM, alpha=(90., 120.), sigma=(9., 11.), do_rotation=self.HP.DAUG_ROTATE, angle_x=(-0.8, 0.8), angle_y=(-0.8, 0.8), angle_z=(-0.8, 0.8), do_scale=True, scale=(0.9, 1.5), border_mode_data='constant', border_cval_data=0, order_data=3, border_mode_seg='constant', border_cval_seg=0, order_seg=0, random_crop=True)) if self.HP.DAUG_RESAMPLE: tfs.append(ResampleTransform(zoom_range=(0.5, 1))) if self.HP.DAUG_NOISE: tfs.append(GaussianNoiseTransform(noise_variance=(0, 0.05))) if self.HP.DAUG_MIRROR: tfs.append(MirrorTransform()) if self.HP.DAUG_FLIP_PEAKS: tfs.append(FlipVectorAxisTransform()) #num_cached_per_queue 1 or 2 does not really make a difference batch_gen = MultiThreadedAugmenter(batch_gen, Compose(tfs), num_processes=num_processes, num_cached_per_queue=1, seeds=None) return batch_gen # data: (batch_size, channels, x, y), seg: (batch_size, channels, x, y)
def get_train_transforms(self) -> List[AbstractTransform]: # used for transpost and rot90 matching_axes = np.array( [sum([i == j for j in self.patch_size]) for i in self.patch_size]) valid_axes = list(np.where(matching_axes == np.max(matching_axes))[0]) tr_transforms = [] if self.data_aug_params['selected_seg_channels'] is not None: tr_transforms.append( SegChannelSelectionTransform( self.data_aug_params['selected_seg_channels'])) if self.do_dummy_2D_aug: ignore_axes = (0, ) tr_transforms.append(Convert3DTo2DTransform()) patch_size_spatial = self.patch_size[1:] else: patch_size_spatial = self.patch_size ignore_axes = None tr_transforms.append( SpatialTransform( patch_size_spatial, patch_center_dist_from_border=None, do_elastic_deform=False, do_rotation=True, angle_x=self.data_aug_params["rotation_x"], angle_y=self.data_aug_params["rotation_y"], angle_z=self.data_aug_params["rotation_z"], p_rot_per_axis=0.5, do_scale=True, scale=self.data_aug_params['scale_range'], border_mode_data="constant", border_cval_data=0, order_data=3, border_mode_seg="constant", border_cval_seg=-1, order_seg=1, random_crop=False, p_el_per_sample=0.2, p_scale_per_sample=0.2, p_rot_per_sample=0.4, independent_scale_for_each_axis=True, )) if self.do_dummy_2D_aug: tr_transforms.append(Convert2DTo3DTransform()) if np.any(matching_axes > 1): tr_transforms.append( Rot90Transform((0, 1, 2, 3), axes=valid_axes, data_key='data', label_key='seg', p_per_sample=0.5), ) if np.any(matching_axes > 1): tr_transforms.append( TransposeAxesTransform(valid_axes, data_key='data', label_key='seg', p_per_sample=0.5)) tr_transforms.append( OneOfTransform([ MedianFilterTransform((2, 8), same_for_each_channel=False, p_per_sample=0.2, p_per_channel=0.5), GaussianBlurTransform((0.3, 1.5), different_sigma_per_channel=True, p_per_sample=0.2, p_per_channel=0.5) ])) tr_transforms.append(GaussianNoiseTransform(p_per_sample=0.1)) tr_transforms.append( BrightnessTransform(0, 0.5, per_channel=True, p_per_sample=0.1, p_per_channel=0.5)) tr_transforms.append( OneOfTransform([ ContrastAugmentationTransform(contrast_range=(0.5, 2), preserve_range=True, per_channel=True, data_key='data', p_per_sample=0.2, p_per_channel=0.5), ContrastAugmentationTransform(contrast_range=(0.5, 2), preserve_range=False, per_channel=True, data_key='data', p_per_sample=0.2, p_per_channel=0.5), ])) tr_transforms.append( SimulateLowResolutionTransform(zoom_range=(0.25, 1), per_channel=True, p_per_channel=0.5, order_downsample=0, order_upsample=3, p_per_sample=0.15, ignore_axes=ignore_axes)) tr_transforms.append( GammaTransform((0.7, 1.5), invert_image=True, per_channel=True, retain_stats=True, p_per_sample=0.1)) tr_transforms.append( GammaTransform((0.7, 1.5), invert_image=True, per_channel=True, retain_stats=True, p_per_sample=0.1)) if self.do_mirroring: tr_transforms.append(MirrorTransform(self.mirror_axes)) tr_transforms.append( BlankRectangleTransform([[max(1, p // 10), p // 3] for p in self.patch_size], rectangle_value=np.mean, num_rectangles=(1, 5), force_square=False, p_per_sample=0.4, p_per_channel=0.5)) tr_transforms.append( BrightnessGradientAdditiveTransform( lambda x, y: np.exp( np.random.uniform(np.log(x[y] // 6), np.log(x[y]))), (-0.5, 1.5), max_strength=lambda x, y: np.random.uniform(-5, -1) if np.random.uniform() < 0.5 else np.random.uniform(1, 5), mean_centered=False, same_for_all_channels=False, p_per_sample=0.3, p_per_channel=0.5)) tr_transforms.append( LocalGammaTransform( lambda x, y: np.exp( np.random.uniform(np.log(x[y] // 6), np.log(x[y]))), (-0.5, 1.5), lambda: np.random.uniform(0.01, 0.8) if np.random.uniform() < 0.5 else np.random.uniform(1.5, 4), same_for_all_channels=False, p_per_sample=0.3, p_per_channel=0.5)) tr_transforms.append( SharpeningTransform(strength=(0.1, 1), same_for_each_channel=False, p_per_sample=0.2, p_per_channel=0.5)) if any(self.use_mask_for_norm.values()): tr_transforms.append( MaskTransform(self.use_mask_for_norm, mask_idx_in_seg=0, set_outside_to=0)) tr_transforms.append(RemoveLabelTransform(-1, 0)) if self.data_aug_params["move_last_seg_chanel_to_data"]: all_class_labels = np.arange(1, self.num_classes) tr_transforms.append( MoveSegAsOneHotToData(1, all_class_labels, 'seg', 'data')) if self.data_aug_params["cascade_do_cascade_augmentations"]: tr_transforms.append( ApplyRandomBinaryOperatorTransform(channel_idx=list( range(-len(all_class_labels), 0)), p_per_sample=0.4, key="data", strel_size=(1, 8), p_per_label=1)) tr_transforms.append( RemoveRandomConnectedComponentFromOneHotEncodingTransform( channel_idx=list(range(-len(all_class_labels), 0)), key="data", p_per_sample=0.2, fill_with_other_class_p=0.15, dont_do_if_covers_more_than_X_percent=0)) tr_transforms.append(RenameTransform('seg', 'target', True)) if self.regions is not None: tr_transforms.append( ConvertSegmentationToRegionsTransform(self.regions, 'target', 'target')) if self.deep_supervision_scales is not None: tr_transforms.append( DownsampleSegForDSTransform2(self.deep_supervision_scales, 0, input_key='target', output_key='target')) tr_transforms.append(NumpyToTensor(['data', 'target'], 'float')) return tr_transforms
batchgen = DataLoader(data.camera(), 1, None, False) #batch = next(batchgen) #print(batch['data'].shape) def plot_batch(batch): batch_size = batch['data'].shape[0] for i in range(batch_size): plt.subplot(1, batch_size, i+1) plt.imshow(batch['data'][i, 0], cmap="gray") plt.show() #plot_batch(batch) my_transforms = [] brightness_transform = ContrastAugmentationTransform((0.3, 3.), preserve_range=True) my_transforms.append(brightness_transform) noise_transform = GaussianNoiseTransform(noise_variance=(0, 20)) ## my_transforms.append(noise_transform) spatial_transform = SpatialTransform_2(data.camera().shape, np.array(data.camera().shape)//2, do_elastic_deform=True, deformation_scale=(0,0.05), do_rotation=True, angle_z=(0, 2*np.pi), do_scale=True, scale=(0.8, 1.2), border_mode_data='constant', border_cval_data=0, order_data=1, random_crop=False) my_transforms.append(spatial_transform) all_transforms = Compose(my_transforms) multithreaded_generator = MultiThreadedAugmenter(batchgen, all_transforms, 4, 2, seeds=None) plot_batch(next(multithreaded_generator))
scale=(0.98, 1.02), translate=(0.03, 0.03)), mt_transforms.RandomTensorChannelShift((-0.10, 0.10)), mt_transforms.ToTensor() # mt_transforms.NormalizeInstance(), ]) gamma_t = GammaTransform(data_key="img", gamma_range=(0.1, 10)) mirror_t = MirrorTransform(data_key="img", label_key="seg") spatial_t = SpatialTransform(patch_size=(8, 8, 8), data_key="img", label_key="seg") gauss_noise_t = GaussianNoiseTransform(data_key="img", noise_variance=(0, 1)) zoom_t = ZoomTransform(zoom_factors=2, data_key="img") def show_basic(x, gt, info=None): if info is not None: print("Test for " + info) print("img size: {}, max: {}, min: {}, avg: {}.".format( x.shape, np.max(x), np.min(x), np.average(x))) print("gt size: {}, max: {}, min: {}, avg: {}.\n".format( gt.shape, np.max(gt), np.min(gt), np.average(gt))) if __name__ == "__main__":
def get_insaneDA_augmentation(dataloader_train, dataloader_val, patch_size, params=default_3D_augmentation_params, border_val_seg=-1, seeds_train=None, seeds_val=None, order_seg=1, order_data=3, deep_supervision_scales=None, soft_ds=False, classes=None, pin_memory=True, regions=None): assert params.get( 'mirror') is None, "old version of params, use new keyword do_mirror" tr_transforms = [] # 'patch_size': array([288, 320]), # 'border_val_seg': -1, # 'seeds_train': None, # 'seeds_val': None, # 'order_seg': 1, # 'order_data': 3, # 'deep_supervision_scales': [[1, 1, 1], # [1.0, 0.5, 0.5], # [1.0, 0.25, 0.25], # [0.5, 0.125, 0.125], # [0.5, 0.0625, 0.0625]], # 'soft_ds': False, # 'classes': None, # 'pin_memory': True, # 'regions': None # params # {'selected_data_channels': None, # 'selected_seg_channels': [0], # 'do_elastic': True, # 'elastic_deform_alpha': (0.0, 300.0), # 'elastic_deform_sigma': (9.0, 15.0), # 'p_eldef': 0.1, # 'do_scaling': True, # 'scale_range': (0.65, 1.6), # 'independent_scale_factor_for_each_axis': True, # 'p_independent_scale_per_axis': 0.3, # 'p_scale': 0.3, # 'do_rotation': True, # 'rotation_x': (-3.141592653589793, 3.141592653589793), # 'rotation_y': (-0.5235987755982988, 0.5235987755982988), # 'rotation_z': (-0.5235987755982988, 0.5235987755982988), # 'rotation_p_per_axis': 1, # 'p_rot': 0.7, # 'random_crop': False, # 'random_crop_dist_to_border': None, # 'do_gamma': True, # 'gamma_retain_stats': True, # 'gamma_range': (0.5, 1.6), # 'p_gamma': 0.3, # 'do_mirror': True, # 'mirror_axes': (0, 1, 2), # 'dummy_2D': True, # 'mask_was_used_for_normalization': OrderedDict([(0, False)]), # 'border_mode_data': 'constant', # 'all_segmentation_labels': None, # 'move_last_seg_chanel_to_data': False, # 'cascade_do_cascade_augmentations': False, # 'cascade_random_binary_transform_p': 0.4, # 'cascade_random_binary_transform_p_per_label': 1, # 'cascade_random_binary_transform_size': (1, 8), # 'cascade_remove_conn_comp_p': 0.2, # 'cascade_remove_conn_comp_max_size_percent_threshold': 0.15, # 'cascade_remove_conn_comp_fill_with_other_class_p': 0.0, # 'do_additive_brightness': True, # 'additive_brightness_p_per_sample': 0.3, # 'additive_brightness_p_per_channel': 1, # 'additive_brightness_mu': 0, # 'additive_brightness_sigma': 0.2, # 'num_threads': 12, # 'num_cached_per_thread': 1, # 'patch_size_for_spatialtransform': array([288, 320])} # selected_data_channels is None if params.get("selected_data_channels") is not None: tr_transforms.append( DataChannelSelectionTransform( params.get("selected_data_channels"))) # selected_seg_channels is [0] if params.get("selected_seg_channels") is not None: tr_transforms.append( SegChannelSelectionTransform(params.get("selected_seg_channels"))) # don't do color augmentations while in 2d mode with 3d data because the color channel is overloaded!! # dummy_2D is True if params.get("dummy_2D") is not None and params.get("dummy_2D"): ignore_axes = (0, ) tr_transforms.append(Convert3DTo2DTransform()) else: ignore_axes = None tr_transforms.append( SpatialTransform(patch_size, patch_center_dist_from_border=None, do_elastic_deform=params.get("do_elastic"), alpha=params.get("elastic_deform_alpha"), sigma=params.get("elastic_deform_sigma"), do_rotation=params.get("do_rotation"), angle_x=params.get("rotation_x"), angle_y=params.get("rotation_y"), angle_z=params.get("rotation_z"), do_scale=params.get("do_scaling"), scale=params.get("scale_range"), border_mode_data=params.get("border_mode_data"), border_cval_data=0, order_data=order_data, border_mode_seg="constant", border_cval_seg=border_val_seg, order_seg=order_seg, random_crop=params.get("random_crop"), p_el_per_sample=params.get("p_eldef"), p_scale_per_sample=params.get("p_scale"), p_rot_per_sample=params.get("p_rot"), independent_scale_for_each_axis=params.get( "independent_scale_factor_for_each_axis"), p_independent_scale_per_axis=params.get( "p_independent_scale_per_axis"))) if params.get("dummy_2D"): tr_transforms.append(Convert2DTo3DTransform()) # we need to put the color augmentations after the dummy 2d part (if applicable). Otherwise the overloaded color # channel gets in the way tr_transforms.append(GaussianNoiseTransform(p_per_sample=0.15)) tr_transforms.append( GaussianBlurTransform((0.5, 1.5), different_sigma_per_channel=True, p_per_sample=0.2, p_per_channel=0.5)) tr_transforms.append( BrightnessMultiplicativeTransform(multiplier_range=(0.70, 1.3), p_per_sample=0.15)) tr_transforms.append( ContrastAugmentationTransform(contrast_range=(0.65, 1.5), p_per_sample=0.15)) tr_transforms.append( SimulateLowResolutionTransform(zoom_range=(0.5, 1), per_channel=True, p_per_channel=0.5, order_downsample=0, order_upsample=3, p_per_sample=0.25, ignore_axes=ignore_axes)) tr_transforms.append( GammaTransform(params.get("gamma_range"), True, True, retain_stats=params.get("gamma_retain_stats"), p_per_sample=0.15)) # inverted gamma # do_additive_brightness is True if params.get("do_additive_brightness"): tr_transforms.append( BrightnessTransform( params.get("additive_brightness_mu"), params.get("additive_brightness_sigma"), True, p_per_sample=params.get("additive_brightness_p_per_sample"), p_per_channel=params.get("additive_brightness_p_per_channel"))) # do_gamma is True if params.get("do_gamma"): tr_transforms.append( GammaTransform(params.get("gamma_range"), False, True, retain_stats=params.get("gamma_retain_stats"), p_per_sample=params["p_gamma"])) # do_mirror is True if params.get("do_mirror") or params.get("mirror"): tr_transforms.append(MirrorTransform(params.get("mirror_axes"))) # mask_was_used_for_normalization is OrderedDict([(0, False)]), if params.get("mask_was_used_for_normalization") is not None: mask_was_used_for_normalization = params.get( "mask_was_used_for_normalization") tr_transforms.append( MaskTransform(mask_was_used_for_normalization, mask_idx_in_seg=0, set_outside_to=0)) tr_transforms.append(RemoveLabelTransform(-1, 0)) # move_last_seg_chanel_to_data is False if params.get("move_last_seg_chanel_to_data") is not None and params.get( "move_last_seg_chanel_to_data"): tr_transforms.append( MoveSegAsOneHotToData(1, params.get("all_segmentation_labels"), 'seg', 'data')) if params.get("cascade_do_cascade_augmentations" ) and not None and params.get( "cascade_do_cascade_augmentations"): if params.get("cascade_random_binary_transform_p") > 0: tr_transforms.append( ApplyRandomBinaryOperatorTransform( channel_idx=list( range(-len(params.get("all_segmentation_labels")), 0)), p_per_sample=params.get( "cascade_random_binary_transform_p"), key="data", strel_size=params.get( "cascade_random_binary_transform_size"))) if params.get("cascade_remove_conn_comp_p") > 0: tr_transforms.append( RemoveRandomConnectedComponentFromOneHotEncodingTransform( channel_idx=list( range(-len(params.get("all_segmentation_labels")), 0)), key="data", p_per_sample=params.get("cascade_remove_conn_comp_p"), fill_with_other_class_p=params.get( "cascade_remove_conn_comp_max_size_percent_threshold" ), dont_do_if_covers_more_than_X_percent=params.get( "cascade_remove_conn_comp_fill_with_other_class_p") )) tr_transforms.append(RenameTransform('seg', 'target', True)) # regions is None if regions is not None: tr_transforms.append( ConvertSegmentationToRegionsTransform(regions, 'target', 'target')) # deep_supervision_scales is a not None if deep_supervision_scales is not None: # soft_ds is False if soft_ds: assert classes is not None tr_transforms.append( DownsampleSegForDSTransform3(deep_supervision_scales, 'target', 'target', classes)) else: tr_transforms.append( DownsampleSegForDSTransform2(deep_supervision_scales, 0, 0, input_key='target', output_key='target')) tr_transforms.append(NumpyToTensor(['data', 'target'], 'float')) tr_transforms = Compose(tr_transforms) batchgenerator_train = MultiThreadedAugmenter( dataloader_train, tr_transforms, params.get('num_threads'), params.get("num_cached_per_thread"), seeds=seeds_train, pin_memory=pin_memory) # ======================================================== val_transforms = [] val_transforms.append(RemoveLabelTransform(-1, 0)) # selected_data_channels is None if params.get("selected_data_channels") is not None: val_transforms.append( DataChannelSelectionTransform( params.get("selected_data_channels"))) # selected_seg_channels is [0] if params.get("selected_seg_channels") is not None: val_transforms.append( SegChannelSelectionTransform(params.get("selected_seg_channels"))) # move_last_seg_chanel_to_data is False if params.get("move_last_seg_chanel_to_data") is not None and params.get( "move_last_seg_chanel_to_data"): val_transforms.append( MoveSegAsOneHotToData(1, params.get("all_segmentation_labels"), 'seg', 'data')) val_transforms.append(RenameTransform('seg', 'target', True)) # regions is None if regions is not None: val_transforms.append( ConvertSegmentationToRegionsTransform(regions, 'target', 'target')) # deep_supervision_scales is not None if deep_supervision_scales is not None: # soft_ds is False if soft_ds: assert classes is not None val_transforms.append( DownsampleSegForDSTransform3(deep_supervision_scales, 'target', 'target', classes)) else: val_transforms.append( DownsampleSegForDSTransform2(deep_supervision_scales, 0, 0, input_key='target', output_key='target')) val_transforms.append(NumpyToTensor(['data', 'target'], 'float')) val_transforms = Compose(val_transforms) batchgenerator_val = MultiThreadedAugmenter( dataloader_val, val_transforms, max(params.get('num_threads') // 2, 1), params.get("num_cached_per_thread"), seeds=seeds_val, pin_memory=pin_memory) return batchgenerator_train, batchgenerator_val
def get_moreDA_augmentation( dataloader_train, dataloader_val, patch_size, params=default_3D_augmentation_params, border_val_seg=-1, seeds_train=None, seeds_val=None, order_seg=1, order_data=3, deep_supervision_scales=None, soft_ds=False, classes=None, pin_memory=True, anisotropy=False, extra_label_keys=None, val_mode=False, use_conf=False, ): ''' Work as Dataloader with augmentation :return: train_loader, val_loader for each iterator, return {'data': (B, D, H, W), 'target': (B, D, H, W)} ''' if not val_mode: assert params.get( 'mirror' ) is None, "old version of params, use new keyword do_mirror" tr_transforms = [] if params.get("selected_data_channels") is not None: tr_transforms.append( DataChannelSelectionTransform( params.get("selected_data_channels"))) if params.get("selected_seg_channels") is not None: tr_transforms.append( SegChannelSelectionTransform( params.get("selected_seg_channels"))) # anistropic setting if anisotropy or params.get("dummy_2D"): ignore_axes = (0, ) tr_transforms.append( Convert3DTo2DTransform(extra_label_keys=extra_label_keys)) patch_size = patch_size[1:] # 2D patch size print('Using dummy2d data augmentation') params["elastic_deform_alpha"] = (0., 200.) params["elastic_deform_sigma"] = (9., 13.) params["rotation_x"] = (-180. / 360 * 2. * np.pi, 180. / 360 * 2. * np.pi) params["rotation_y"] = (-0. / 360 * 2. * np.pi, 0. / 360 * 2. * np.pi) params["rotation_z"] = (-0. / 360 * 2. * np.pi, 0. / 360 * 2. * np.pi) else: ignore_axes = None # 1. Spatial Transform: rotation, scaling tr_transforms.append( SpatialTransform(patch_size, patch_center_dist_from_border=None, do_elastic_deform=params.get("do_elastic"), alpha=params.get("elastic_deform_alpha"), sigma=params.get("elastic_deform_sigma"), do_rotation=params.get("do_rotation"), angle_x=params.get("rotation_x"), angle_y=params.get("rotation_y"), angle_z=params.get("rotation_z"), p_rot_per_axis=params.get("rotation_p_per_axis"), do_scale=params.get("do_scaling"), scale=params.get("scale_range"), border_mode_data=params.get("border_mode_data"), border_cval_data=0, order_data=order_data, border_mode_seg="constant", border_cval_seg=border_val_seg, order_seg=order_seg, random_crop=params.get("random_crop"), p_el_per_sample=params.get("p_eldef"), p_scale_per_sample=params.get("p_scale"), p_rot_per_sample=params.get("p_rot"), independent_scale_for_each_axis=params.get( "independent_scale_factor_for_each_axis"), extra_label_keys=extra_label_keys)) if anisotropy or params.get("dummy_2D"): tr_transforms.append( Convert2DTo3DTransform(extra_label_keys=extra_label_keys)) # 2. Noise Augmentation: gaussian noise, gaussian blur tr_transforms.append(GaussianNoiseTransform(p_per_sample=0.1)) tr_transforms.append( GaussianBlurTransform((0.5, 1.), different_sigma_per_channel=True, p_per_sample=0.2, p_per_channel=0.5)) # 3. Color Augmentation: brightness, constrast, low resolution, gamma_transform tr_transforms.append( BrightnessMultiplicativeTransform(multiplier_range=(0.75, 1.25), p_per_sample=0.15)) if params.get("do_additive_brightness"): tr_transforms.append( BrightnessTransform(params.get("additive_brightness_mu"), params.get("additive_brightness_sigma"), True, p_per_sample=params.get( "additive_brightness_p_per_sample"), p_per_channel=params.get( "additive_brightness_p_per_channel"))) tr_transforms.append(ContrastAugmentationTransform(p_per_sample=0.15)) tr_transforms.append( SimulateLowResolutionTransform(zoom_range=(0.5, 1), per_channel=True, p_per_channel=0.5, order_downsample=0, order_upsample=3, p_per_sample=0.25, ignore_axes=ignore_axes)) tr_transforms.append( GammaTransform(params.get("gamma_range"), True, True, retain_stats=params.get("gamma_retain_stats"), p_per_sample=0.1)) # inverted gamma if params.get("do_gamma"): tr_transforms.append( GammaTransform(params.get("gamma_range"), False, True, retain_stats=params.get("gamma_retain_stats"), p_per_sample=params["p_gamma"])) # 4. Mirror Transform if params.get("do_mirror") or params.get("mirror"): tr_transforms.append( MirrorTransform(params.get("mirror_axes"), extra_label_keys=extra_label_keys)) # if params.get("mask_was_used_for_normalization") is not None: # mask_was_used_for_normalization = params.get("mask_was_used_for_normalization") # tr_transforms.append(MaskTransform(mask_was_used_for_normalization, mask_idx_in_seg=0, set_outside_to=0)) tr_transforms.append( RemoveLabelTransform(-1, 0, extra_label_keys=extra_label_keys)) tr_transforms.append(RenameTransform('data', 'image', True)) tr_transforms.append(RenameTransform('seg', 'gt', True)) if deep_supervision_scales is not None: if soft_ds: assert classes is not None tr_transforms.append( DownsampleSegForDSTransform3(deep_supervision_scales, 'gt', 'gt', classes)) else: tr_transforms.append( DownsampleSegForDSTransform2( deep_supervision_scales, 0, 0, input_key='gt', output_key='gt', extra_label_keys=extra_label_keys)) toTensorKeys = [ 'image', 'gt' ] + extra_label_keys if extra_label_keys is not None else [ 'image', 'gt' ] tr_transforms.append(NumpyToTensor(toTensorKeys, 'float')) tr_transforms = Compose(tr_transforms) if seeds_train is not None: seeds_train = [seeds_train] * params.get('num_threads') if use_conf: num_threads = 1 num_cached_per_thread = 1 else: num_threads, num_cached_per_thread = params.get( 'num_threads'), params.get("num_cached_per_thread") batchgenerator_train = MultiThreadedAugmenter(dataloader_train, tr_transforms, num_threads, num_cached_per_thread, seeds=seeds_train, pin_memory=pin_memory) val_transforms = [] val_transforms.append( RemoveLabelTransform(-1, 0, extra_label_keys=extra_label_keys)) if params.get("selected_data_channels") is not None: val_transforms.append( DataChannelSelectionTransform( params.get("selected_data_channels"))) if params.get("selected_seg_channels") is not None: val_transforms.append( SegChannelSelectionTransform( params.get("selected_seg_channels"))) val_transforms.append(RenameTransform('data', 'image', True)) val_transforms.append(RenameTransform('seg', 'gt', True)) if deep_supervision_scales is not None: if soft_ds: assert classes is not None val_transforms.append( DownsampleSegForDSTransform3(deep_supervision_scales, 'gt', 'gt', classes)) else: val_transforms.append( DownsampleSegForDSTransform2( deep_supervision_scales, 0, 0, input_key='gt', output_key='gt', extra_label_keys=extra_label_keys)) val_transforms.append(NumpyToTensor(toTensorKeys, 'float')) val_transforms = Compose(val_transforms) # batchgenerator_val = MultiThreadedAugmenter(dataloader_val, val_transforms, max(params.get('num_threads') // 2, 1), # params.get("num_cached_per_thread"), # seeds=seeds_val, pin_memory=pin_memory) if seeds_val is not None: seeds_val = [seeds_val] * 1 # batchgenerator_val = MultiThreadedAugmenter(dataloader_val, val_transforms, 1, # params.get("num_cached_per_thread"), # seeds=seeds_val, pin_memory=False) batchgenerator_val = SingleThreadedAugmenter(dataloader_val, val_transforms) else: val_transforms = [] val_transforms.append( RemoveLabelTransform(-1, 0, extra_label_keys=extra_label_keys)) if params.get("selected_data_channels") is not None: val_transforms.append( DataChannelSelectionTransform( params.get("selected_data_channels"))) if params.get("selected_seg_channels") is not None: val_transforms.append( SegChannelSelectionTransform( params.get("selected_seg_channels"))) val_transforms.append(RenameTransform('data', 'image', True)) val_transforms.append(RenameTransform('seg', 'gt', True)) if deep_supervision_scales is not None: if soft_ds: assert classes is not None val_transforms.append( DownsampleSegForDSTransform3(deep_supervision_scales, 'gt', 'gt', classes)) else: val_transforms.append( DownsampleSegForDSTransform2( deep_supervision_scales, 0, 0, input_key='gt', output_key='gt', extra_label_keys=extra_label_keys)) toTensorKeys = [ 'image', 'gt' ] + extra_label_keys if extra_label_keys is not None else [ 'image', 'gt' ] val_transforms.append(NumpyToTensor(toTensorKeys, 'float')) val_transforms = Compose(val_transforms) batchgenerator_val = SingleThreadedAugmenter(dataloader_val, val_transforms) if dataloader_train is not None: batchgenerator_train = SingleThreadedAugmenter( dataloader_train, val_transforms) else: batchgenerator_train = None return batchgenerator_train, batchgenerator_val
def run(self, img_data, seg_data): # Define label for segmentation for segmentation augmentation if self.seg_augmentation: seg_label = "seg" else: seg_label = "class" # Create a parser for the batchgenerators module data_generator = DataParser(img_data, seg_data, seg_label) # Initialize empty transform list transforms = [] # Add mirror augmentation if self.mirror: aug_mirror = MirrorTransform(axes=self.config_mirror_axes) transforms.append(aug_mirror) # Add contrast augmentation if self.contrast: aug_contrast = ContrastAugmentationTransform( self.config_contrast_range, preserve_range=self.config_contrast_preserverange, per_channel=self.coloraug_per_channel, p_per_sample=self.config_p_per_sample) transforms.append(aug_contrast) # Add brightness augmentation if self.brightness: aug_brightness = BrightnessMultiplicativeTransform( self.config_brightness_range, per_channel=self.coloraug_per_channel, p_per_sample=self.config_p_per_sample) transforms.append(aug_brightness) # Add gamma augmentation if self.gamma: aug_gamma = GammaTransform(self.config_gamma_range, invert_image=False, per_channel=self.coloraug_per_channel, retain_stats=True, p_per_sample=self.config_p_per_sample) transforms.append(aug_gamma) # Add gaussian noise augmentation if self.gaussian_noise: aug_gaussian_noise = GaussianNoiseTransform( self.config_gaussian_noise_range, p_per_sample=self.config_p_per_sample) transforms.append(aug_gaussian_noise) # Add spatial transformations as augmentation # (rotation, scaling, elastic deformation) if self.rotations or self.scaling or self.elastic_deform or \ self.cropping: # Identify patch shape (full image or cropping) if self.cropping: patch_shape = self.cropping_patch_shape else: patch_shape = img_data[0].shape[0:-1] # Assembling the spatial transformation aug_spatial_transform = SpatialTransform( patch_shape, [i // 2 for i in patch_shape], do_elastic_deform=self.elastic_deform, alpha=self.config_elastic_deform_alpha, sigma=self.config_elastic_deform_sigma, do_rotation=self.rotations, angle_x=self.config_rotations_angleX, angle_y=self.config_rotations_angleY, angle_z=self.config_rotations_angleZ, do_scale=self.scaling, scale=self.config_scaling_range, border_mode_data='constant', border_cval_data=0, border_mode_seg='constant', border_cval_seg=0, order_data=3, order_seg=0, p_el_per_sample=self.config_p_per_sample, p_rot_per_sample=self.config_p_per_sample, p_scale_per_sample=self.config_p_per_sample, random_crop=self.cropping) # Append spatial transformation to transformation list transforms.append(aug_spatial_transform) # Compose the batchgenerators transforms all_transforms = Compose(transforms) # Assemble transforms into a augmentation generator augmentation_generator = SingleThreadedAugmenter( data_generator, all_transforms) # Perform the data augmentation x times (x = cycles) aug_img_data = None aug_seg_data = None for i in range(0, self.cycles): # Run the computation process for the data augmentations augmentation = next(augmentation_generator) # Access augmentated data from the batchgenerators data structure if aug_img_data is None and aug_seg_data is None: aug_img_data = augmentation["data"] aug_seg_data = augmentation[seg_label] # Concatenate the new data augmentated data with the cached data else: aug_img_data = np.concatenate( (augmentation["data"], aug_img_data), axis=0) aug_seg_data = np.concatenate( (augmentation[seg_label], aug_seg_data), axis=0) # Transform data from channel-first back to channel-last structure # Data structure channel-first 3D: (batch, channel, x, y, z) # Data structure channel-last 3D: (batch, x, y, z, channel) aug_img_data = np.moveaxis(aug_img_data, 1, -1) aug_seg_data = np.moveaxis(aug_seg_data, 1, -1) # Return augmentated image and segmentation data return aug_img_data, aug_seg_data
my_transforms = [] spatial_transform = SpatialTransform( img.shape, np.array(img.shape) // 2, do_elastic_deform=False, do_rotation=True, angle_z=(0, 2 * np.pi), # 旋转 do_scale=True, scale=(0.3, 3.), # 缩放 border_mode_data='constant', border_cval_data=0, order_data=1, random_crop=False) my_transforms.append(spatial_transform) GaussianNoise = GaussianNoiseTransform() # 高斯噪声 my_transforms.append(GaussianNoise) GaussianBlur = GaussianBlurTransform() # 高斯模糊 my_transforms.append(GaussianBlur) Brightness = BrightnessTransform(0, 0.2) # 亮度 my_transforms.append(Brightness) brightness_transform = ContrastAugmentationTransform( (0.3, 3.), preserve_range=True) # 对比度 my_transforms.append(brightness_transform) SimulateLowResolution = SimulateLowResolutionTransform() # 低分辨率 my_transforms.append(SimulateLowResolution) Gamma = GammaTransform() # 伽马增强 my_transforms.append(Gamma) mirror_transform = MirrorTransform(axes=(0, 1)) # 镜像 my_transforms.append(mirror_transform) all_transforms = Compose(my_transforms)