def test_transforms(self): landmarks_dict = dict( t1=np.linspace(0, 100, 13), t2=np.linspace(0, 100, 13), ) random_transforms = ( RandomFlip(axes=(0, 1, 2), flip_probability=1), RandomNoise(), RandomBiasField(), RandomElasticDeformation(proportion_to_augment=1), RandomAffine(), RandomMotion(proportion_to_augment=1), ) intensity_transforms = ( Rescale(), ZNormalization(), HistogramStandardization(landmarks_dict=landmarks_dict), ) for transform in random_transforms: sample = self.get_sample() transformed = transform(sample) for transform in intensity_transforms: sample = self.get_sample() transformed = transform(sample)
def test_transforms(self): landmarks_dict = dict( t1=np.linspace(0, 100, 13), t2=np.linspace(0, 100, 13), ) transforms = ( CenterCropOrPad((9, 21, 30)), ToCanonical(), Resample((1, 1.1, 1.25)), RandomFlip(axes=(0, 1, 2), flip_probability=1), RandomMotion(proportion_to_augment=1), RandomGhosting(proportion_to_augment=1, axes=(0, 1, 2)), RandomSpike(), RandomNoise(), RandomBlur(), RandomSwap(patch_size=2, num_iterations=5), Lambda(lambda x: 1.5 * x, types_to_apply=INTENSITY), RandomBiasField(), Rescale((0, 1)), ZNormalization(masking_method='label'), HistogramStandardization(landmarks_dict=landmarks_dict), RandomElasticDeformation(proportion_to_augment=1), RandomAffine(), Pad((1, 2, 3, 0, 5, 6)), Crop((3, 2, 8, 0, 1, 4)), ) transformed = self.get_sample() for transform in transforms: transformed = transform(transformed)
def initialize_transforms_simple(p=0.8): transforms = [ RandomFlip(axes=(0, 1, 2), flip_probability=1, p=p), #RandomAffine(scales=(0.9, 1.1), degrees=(10), isotropic=False, # default_pad_value='otsu', image_interpolation=Interpolation.LINEAR, # p = p, seed=None), # *** SLOWS DOWN DATALOADER *** #RandomElasticDeformation(num_control_points = 7, max_displacement = 7.5, # locked_borders = 2, image_interpolation = Interpolation.LINEAR, # p = 0.5, seed = None), RandomMotion(degrees=10, translation=10, num_transforms=2, image_interpolation='linear', p=p), RandomAnisotropy(axes=(0, 1, 2), downsampling=2), RandomBiasField(coefficients=0.5, order=3, p=p), RandomBlur(std=(0, 2), p=p), RandomNoise(mean=0, std=(0, 5), p=p), RescaleIntensity((0, 255)) ] transform = tio.Compose(transforms) return transform
def mri_artifact(p=1): return OneOf( { RandomMotion(): 0.34, RandomGhosting(): 0.33, RandomSpike(): 0.33 }, p=p)
def motion(parameters): return RandomMotion( degrees=parameters["degrees"], translation=parameters["translation"], num_transforms=parameters["num_transforms"], image_interpolation=parameters["interpolation"], p=parameters["probability"], )
def _get_default_transforms(self): io_transforms = Compose([ RandomMotion(), RandomFlip(axes=(1, )), RandomAffine(scales=(0.9, 1.2), degrees=(10), isotropic=False, default_pad_value='otsu', image_interpolation='bspline'), RescaleIntensity((0, 1)) ]) return io_transforms
def transform(self): if hp.mode == '3d': training_transform = Compose([ # ToCanonical(), CropOrPad((hp.crop_or_pad_size, hp.crop_or_pad_size, hp.crop_or_pad_size), padding_mode='reflect'), RandomMotion(), RandomBiasField(), ZNormalization(), RandomNoise(), RandomFlip(axes=(0, )), OneOf({ RandomAffine(): 0.8, RandomElasticDeformation(): 0.2, }), ]) elif hp.mode == '2d': training_transform = Compose([ CropOrPad((hp.crop_or_pad_size, hp.crop_or_pad_size, 1), padding_mode='reflect'), RandomMotion(), RandomBiasField(), ZNormalization(), RandomNoise(), RandomFlip(axes=(0, )), OneOf({ RandomAffine(): 0.8, RandomElasticDeformation(): 0.2, }), ]) else: raise Exception('no such kind of mode!') return training_transform
def training_network(landmarks, dataset, subjects): training_transform = Compose([ ToCanonical(), Resample(4), CropOrPad((48, 60, 48), padding_mode='reflect'), RandomMotion(), HistogramStandardization({'mri': landmarks}), RandomBiasField(), ZNormalization(masking_method=ZNormalization.mean), RandomNoise(), RandomFlip(axes=(0, )), OneOf({ RandomAffine(): 0.8, RandomElasticDeformation(): 0.2, }), ]) validation_transform = Compose([ ToCanonical(), Resample(4), CropOrPad((48, 60, 48), padding_mode='reflect'), HistogramStandardization({'mri': landmarks}), ZNormalization(masking_method=ZNormalization.mean), ]) training_split_ratio = 0.9 num_subjects = len(dataset) num_training_subjects = int(training_split_ratio * num_subjects) training_subjects = subjects[:num_training_subjects] validation_subjects = subjects[num_training_subjects:] training_set = tio.SubjectsDataset(training_subjects, transform=training_transform) validation_set = tio.SubjectsDataset(validation_subjects, transform=validation_transform) print('Training set:', len(training_set), 'subjects') print('Validation set:', len(validation_set), 'subjects') return training_set, validation_set
test = {'T1': {'csv_file':'/data/romain/HCPdata/Motion_brain_ms_train_hcp400.csv'} } conditions = [("corr", "<", 0.98), ("|", "noise", "==", 1)] subjects_dict, info = get_subject_list_and_csv_info_from_data_prameters(test, fpath_idx='filename', conditions=conditions,shuffle_order=True) data_parameters = {'image': {'csv_file': '/data/romain/data_exemple/file_ms.csv', 'type': torchio.INTENSITY}, 'label1': {'csv_file': '/data/romain/data_exemple/file_p1.csv', 'type': torchio.LABEL}, 'label2': {'csv_file': '/data/romain/data_exemple/file_p2.csv', 'type': torchio.LABEL}, 'label3': {'csv_file': '/data/romain/data_exemple/file_p3.csv', 'type': torchio.LABEL}, 'sampler': {'csv_file': '/data/romain/data_exemple/file_mask.csv', 'type': torchio.SAMPLING_MAP}} paths_dict, info = get_subject_list_and_csv_info_from_data_prameters(data_parameters) #,shuffle_order=False) landmarks_file = '/data/romain/data_exemple/landmarks_hcp100.npy' transforms = (HistogramStandardization(landmarks_file, mask_field_name='sampler'),) transforms = (RandomElasticDeformation(num_control_points=8, proportion_to_augment=1, deformation_std=25, image_interpolation=Interpolation.BSPLINE),) transforms = (RandomMotion(seed=42, degrees=0, translation=15, num_transforms=2, verbose=True,proportion_to_augment=1),) transforms = (RandomBiasField(coefficients_range=(-0.5, 0.5),order=3), ) transform = Compose(transforms) #should be done in ImagesDataset dataset = ImagesDataset(paths_dict, transform=transform) dataset_not = ImagesDataset(paths_dict, transform=None) dataload = torch.utils.data.DataLoader(dataset, num_workers=0, batch_size=1) dataloadnot = torch.utils.data.DataLoader(dataset_not, num_workers=0, batch_size=1) ddd = dataset[0] #next(iter(dataset)) ii = np.squeeze( ddd['image']['data'][0], axis=1) ddno = dataset_not[0] # dd= next(iter(dataload)) ddno = next(iter(dataloadnot))
def get_data_loader(cfg: DictConfig, _) -> dict: log = logging.getLogger(__name__) transform = Compose([ RandomMotion(), RandomBiasField(), RandomNoise(), RandomFlip(axes=(0, )), ]) log.info(f"Data loader selected: {cfg['dataset']}") try: log.info("Attempting to use defined data loader") dataset = getattr(datasets, cfg["dataset"])(cfg, transform) except ImportError: log.info( "Not a defined data loader... Attempting to use torchio loader") dataset = getattr(torchio.datasets, cfg["dataset"])(root=cfg["base_path"], transform=transform, download=True) for subject in random.sample(dataset._subjects, cfg["plot_number"]): plot_subject( subject, os.path.join(os.environ["OUTPUT_PATH"], cfg["save_plot_dir"], subject["subject_id"]), ) sampler = GridSampler(patch_size=cfg["patch_size"]) samples_per_volume = len(sampler._compute_locations( dataset[0])) # type: ignore with open_dict(cfg): cfg["size"] = dataset[0].spatial_shape val_size = max(1, int(0.2 * len(dataset))) test_set, train_set, val_set = split_dataset( dataset, [21, len(dataset) - val_size - 21, val_size]) train_loader = __create_data_loader( train_set, queue_max_length=samples_per_volume * cfg["queue_length"], queue_samples_per_volume=samples_per_volume, sampler=sampler, verbose=log.level > 0, batch_size=cfg["batch"], ) val_loader = __create_data_loader( val_set, queue_max_length=samples_per_volume * cfg["queue_length"], queue_samples_per_volume=samples_per_volume, sampler=sampler, verbose=log.level > 0, batch_size=cfg["batch"], ) test_loader = __create_data_loader( test_set, queue_max_length=samples_per_volume * cfg["queue_length"], queue_samples_per_volume=samples_per_volume, sampler=sampler, verbose=log.level > 0, batch_size=cfg["batch"], ) return { "data_loader_train": train_loader, "data_loader_val": val_loader, "data_loader_test": test_loader, }
def compose_transforms() -> Compose: print(f"{ctime()}: Setting up transformations...") """ # Our Preprocessing Options available in TorchIO are: * Intensity - NormalizationTransform - RescaleIntensity - ZNormalization - HistogramStandardization * Spatial - CropOrPad - Crop - Pad - Resample - ToCanonical We should read and experiment with these, but for now will just use a bunch with the default values. """ preprocessors = [ ToCanonical(p=1), ZNormalization(masking_method=None, p=1), # alternately, use RescaleIntensity ] """ # Our Augmentation Options available in TorchIO are: * Spatial - RandomFlip - RandomAffine - RandomElasticDeformation * Intensity - RandomMotion - RandomGhosting - RandomSpike - RandomBiasField - RandomBlur - RandomNoise - RandomSwap We should read and experiment with these, but for now will just use a bunch with the default values. """ augments = [ RandomFlip(axes=(0, 1, 2), flip_probability=0.5), RandomAffine(image_interpolation="linear", p=0.8), # default, compromise on speed + quality # this will be most processing intensive, leave out for now, see results # RandomElasticDeformation(p=1), RandomMotion(), RandomSpike(), RandomBiasField(), RandomBlur(), RandomNoise(), ] transform = Compose(preprocessors + augments) print(f"{ctime()}: Transformations registered.") return transform
def define_transform(transform, p, blur_std=4, motion_trans=10, motion_deg=10, motion_num=2, biascoeff=0.5, noise_std=0.25, affine_trans=10, affine_deg=10, elastic_disp=7.5, resample_size=1, target_shape=0): ### (1) try with different blur if transform == 'blur': transforms = [RandomBlur(std=(blur_std, blur_std), p=p, seed=None)] transforms = Compose(transforms) ### (2) try with different motion artifacts if transform == 'motion': transforms = [ RandomMotion(degrees=motion_deg, translation=motion_trans, num_transforms=motion_num, image_interpolation=Interpolation.LINEAR, p=p, seed=None), ] transforms = Compose(transforms) ### (3) with random bias fields if transform == 'biasfield': transforms = [ RandomBiasField(coefficients=biascoeff, order=3, p=p, seed=None) ] transforms = Compose(transforms) ### (4) try with different noise artifacts if transform == 'noise': transforms = [ RandomNoise(mean=0, std=(noise_std, noise_std), p=p, seed=None) ] transforms = Compose(transforms) ### (5) try with different warp (affine transformatins) if transform == 'affine': transforms = [ RandomAffine(scales=(1, 1), degrees=(affine_deg), isotropic=False, default_pad_value='otsu', image_interpolation=Interpolation.LINEAR, p=p, seed=None) ] transforms = Compose(transforms) ### (6) try with different warp (elastic transformations) if transform == 'elastic': transforms = [ RandomElasticDeformation(num_control_points=elastic_disp, max_displacement=20, locked_borders=2, image_interpolation=Interpolation.LINEAR, p=p, seed=None), ] transforms = Compose(transforms) if transform == 'resample': transforms = [ Resample(target=resample_size, image_interpolation=Interpolation.LINEAR, p=p), CropOrPad(target_shape=target_shape, p=1) ] transforms = Compose(transforms) return transforms
sample = dataset[0] transform = tio.Compose([histogram_transform, znorm_transform]) znormed = transform(sample) fig, ax = plt.subplots(dpi=100) plot_histogram(ax, znormed.mri.data, label='Z-normed', alpha=1) ax.set_title('Intensity values of one sample after z-normalization') ax.set_xlabel('Intensity') ax.grid() training_transform = Compose([ ToCanonical(), # Resample(4), CropOrPad((112, 112, 48), padding_mode=0), #reflect , original 112,112,48 RandomMotion(num_transforms=6, image_interpolation='nearest', p=0.2), HistogramStandardization({'mri': landmarks}), RandomBiasField(p=0.2), RandomBlur(p=0.2), ZNormalization(masking_method=ZNormalization.mean), RandomFlip(axes=['inferior-superior'], flip_probability=0.2), # RandomNoise(std=0.5, p=0.2), RandomGhosting(intensity=1.8, p=0.2), # RandomNoise(), # RandomFlip(axes=(0,)), # OneOf({ # RandomAffine(): 0.8, # RandomElasticDeformation(): 0.2, # }), ])
def main(): opt = parsing_data() print("[INFO]Reading data") # Dictionary with data parameters for NiftyNet Reader if torch.cuda.is_available(): print('[INFO] GPU available.') device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") else: raise Exception( "[INFO] No GPU found or Wrong gpu id, please run without --cuda") # FOLDERS fold_dir = opt.model_dir fold_dir_model = os.path.join(fold_dir, 'models') if not os.path.exists(fold_dir_model): os.makedirs(fold_dir_model) save_path = os.path.join(fold_dir_model, './CP_{}.pth') output_path = os.path.join(fold_dir, 'output') if not os.path.exists(output_path): os.makedirs(output_path) output_path = os.path.join(output_path, 'output_{}.nii.gz') # LOGGING orig_stdout = sys.stdout if os.path.exists(os.path.join(fold_dir, 'out.txt')): compt = 0 while os.path.exists( os.path.join(fold_dir, 'out_' + str(compt) + '.txt')): compt += 1 f = open(os.path.join(fold_dir, 'out_' + str(compt) + '.txt'), 'w') else: f = open(os.path.join(fold_dir, 'out.txt'), 'w') sys.stdout = f # SPLITS split_path = dict() split_path['control'] = opt.split_control split_path['augm_control'] = opt.split_control split_path['lesion'] = opt.split_lesion for dataset in DATASETS: assert os.path.isfile( split_path[dataset]), f'{dataset}: split not found' path_file = dict() path_file['control'] = opt.path_control path_file['augm_control'] = opt.path_control path_file['lesion'] = opt.path_lesion list_split = ['training', 'validation'] paths_dict = dict() for dataset in DATASETS: df_split = pd.read_csv(split_path[dataset], header=None) list_file = dict() for split in list_split: list_file[split] = df_split[df_split[1].isin([split])][0].tolist() paths_dict_dataset = {split: [] for split in list_split} for split in list_split: for subject in list_file[split]: subject_data = [] for modality in MODALITIES[dataset]: subject_data.append( Image( modality, path_file[dataset] + subject + modality + '.nii.gz', torchio.INTENSITY)) if split in ['training', 'validation']: subject_data.append( Image('label', path_file[dataset] + subject + 'Label.nii.gz', torchio.LABEL)) paths_dict_dataset[split].append(Subject(*subject_data)) print(dataset, split, len(paths_dict_dataset[split])) paths_dict[dataset] = paths_dict_dataset # PREPROCESSING transform_training = dict() transform_validation = dict() for dataset in DATASETS: if dataset == 'augm_control': transform_training[dataset] = ( Rescale((0, 1)), ToCanonical(), RandomMotion(), RandomGhosting(), RandomBiasField(), RandomBlur((0, 2)), ZNormalization(), CenterCropOrPad((144, 192, 144)), RandomAffine(scales=(0.9, 1.1), degrees=10), RandomNoise(std_range=(0, 0.10)), RandomFlip(axes=(0, )), ) transform_training[dataset] = Compose(transform_training[dataset]) else: transform_training[dataset] = ( ToCanonical(), ZNormalization(), CenterCropOrPad((144, 192, 144)), RandomAffine(scales=(0.9, 1.1), degrees=10), RandomNoise(std_range=(0, 0.10)), RandomFlip(axes=(0, )), ) transform_training[dataset] = Compose(transform_training[dataset]) transform_validation[dataset] = ( ToCanonical(), ZNormalization(), CenterCropOrPad((144, 192, 144)), ) transform_validation[dataset] = Compose(transform_validation[dataset]) transform = { 'training': transform_training, 'validation': transform_validation } # MODEL norm_op_kwargs = {'eps': 1e-5, 'affine': True} dropout_op_kwargs = {'p': 0, 'inplace': True} net_nonlin = nn.LeakyReLU net_nonlin_kwargs = {'negative_slope': 1e-2, 'inplace': True} print("[INFO] Building model") model = Generic_UNet(input_modalities=['T1', 'all'], base_num_features=32, num_classes=nb_classes, num_pool=4, num_conv_per_stage=2, feat_map_mul_on_downscale=2, conv_op=torch.nn.Conv3d, norm_op=torch.nn.InstanceNorm3d, norm_op_kwargs=norm_op_kwargs, nonlin=net_nonlin, nonlin_kwargs=net_nonlin_kwargs, convolutional_pooling=False, convolutional_upsampling=False, final_nonlin=torch.nn.Softmax(1), input_features={ 'T1': 1, 'all': 4 }) print("[INFO] Training") train(paths_dict, model, transform, device, save_path, opt) sys.stdout = orig_stdout f.close()