def test_no_changes(self): transform = ToCanonical() transformed = transform(self.sample_subject) self.assertTensorEqual(transformed.t1.data, self.sample_subject.t1.data) self.assertTensorEqual(transformed.t1.affine, self.sample_subject.t1.affine)
def test_transforms(self): landmarks_dict = dict( t1=np.linspace(0, 100, 13), t2=np.linspace(0, 100, 13), ) transforms = ( CenterCropOrPad((9, 21, 30)), ToCanonical(), Resample((1, 1.1, 1.25)), RandomFlip(axes=(0, 1, 2), flip_probability=1), RandomMotion(proportion_to_augment=1), RandomGhosting(proportion_to_augment=1, axes=(0, 1, 2)), RandomSpike(), RandomNoise(), RandomBlur(), RandomSwap(patch_size=2, num_iterations=5), Lambda(lambda x: 1.5 * x, types_to_apply=INTENSITY), RandomBiasField(), Rescale((0, 1)), ZNormalization(masking_method='label'), HistogramStandardization(landmarks_dict=landmarks_dict), RandomElasticDeformation(proportion_to_augment=1), RandomAffine(), Pad((1, 2, 3, 0, 5, 6)), Crop((3, 2, 8, 0, 1, 4)), ) transformed = self.get_sample() for transform in transforms: transformed = transform(transformed)
def test_LAS_to_RAS(self): self.sample.t1.affine[0, 0] = -1 # Change orientation to 'LAS' transform = ToCanonical() transformed = transform(self.sample) self.assertEqual(transformed.t1.orientation, ('R', 'A', 'S')) self.assertTensorEqual(transformed.t1.data, self.sample.t1.data.numpy()[:, ::-1, :, :]) fixture = np.eye(4) fixture[0, -1] = -self.sample.t1.spatial_shape[0] + 1 self.assertTensorEqual(transformed.t1.affine, fixture)
def training_network(landmarks, dataset, subjects): training_transform = Compose([ ToCanonical(), Resample(4), CropOrPad((48, 60, 48), padding_mode='reflect'), RandomMotion(), HistogramStandardization({'mri': landmarks}), RandomBiasField(), ZNormalization(masking_method=ZNormalization.mean), RandomNoise(), RandomFlip(axes=(0, )), OneOf({ RandomAffine(): 0.8, RandomElasticDeformation(): 0.2, }), ]) validation_transform = Compose([ ToCanonical(), Resample(4), CropOrPad((48, 60, 48), padding_mode='reflect'), HistogramStandardization({'mri': landmarks}), ZNormalization(masking_method=ZNormalization.mean), ]) training_split_ratio = 0.9 num_subjects = len(dataset) num_training_subjects = int(training_split_ratio * num_subjects) training_subjects = subjects[:num_training_subjects] validation_subjects = subjects[num_training_subjects:] training_set = tio.SubjectsDataset(training_subjects, transform=training_transform) validation_set = tio.SubjectsDataset(validation_subjects, transform=validation_transform) print('Training set:', len(training_set), 'subjects') print('Validation set:', len(validation_set), 'subjects') return training_set, validation_set
def get_brats( data_root='/scratch/weina/dld_data/brats2019/MICCAI_BraTS_2019_Data_Training/', fold=1, seed=torch.distributed.get_rank() if torch.distributed.is_initialized() else 0, **kwargs): """ data iter for brats """ logging.debug("BratsIter:: fold = {}, seed = {}".format(fold, seed)) # args for transforms d_size, h_size, w_size = 155, 240, 240 input_size = [7, 223, 223] spacing = (d_size / input_size[0], h_size / input_size[1], w_size / input_size[2]) Mean, Std, Max = read_brats_mean(fold, data_root) normalize = transforms.Normalize(mean=Mean, std=Std) training_transform = Compose([ # RescaleIntensity((0, 1)), # so that there are no negative values for RandomMotion # RandomMotion(), # HistogramStandardization({MRI: landmarks}), RandomBiasField(), # ZNormalization(masking_method=ZNormalization.mean), RandomNoise(), ToCanonical(), Resample(spacing), # CropOrPad((48, 60, 48)), RandomFlip(axes=(0, )), OneOf({ RandomAffine(): 0.8, RandomElasticDeformation(): 0.2, }), normalize ]) val_transform = Compose([Resample(spacing), normalize]) train = BratsIter(csv_file=os.path.join(data_root, 'IDH_label', 'train_fold_{}.csv'.format(fold)), brats_path=os.path.join(data_root, 'all'), brats_transform=training_transform, shuffle=True) val = BratsIter(csv_file=os.path.join(data_root, 'IDH_label', 'val_fold_{}.csv'.format(fold)), brats_path=os.path.join(data_root, 'all'), brats_transform=val_transform, shuffle=False) return train, val
def to_canonical_transform(parameters=None): return ToCanonical()
OneOf, Compose, ) d_size, h_size, w_size = 155, 240, 240 input_size = [7, 223, 223] spacing = (d_size / input_size[0], h_size / input_size[1], w_size / input_size[2]) training_transform = Compose([ # RescaleIntensity((0, 1)), # so that there are no negative values for RandomMotion # RandomMotion(), # HistogramStandardization({MRI: landmarks}), RandomBiasField(), # ZNormalization(masking_method=ZNormalization.mean), RandomNoise(), ToCanonical(), Resample(spacing), # CropOrPad((48, 60, 48)), RandomFlip(axes=(0, )), OneOf({ RandomAffine(): 0.8, RandomElasticDeformation(): 0.2, }), ]) fold = 1 data_root = '../../dld_data/brats2019/MICCAI_BraTS_2019_Data_Training/' torch.manual_seed(0) torch.cuda.manual_seed(0)
def main(): opt = parsing_data() print("[INFO] Reading data.") # Dictionary with data parameters for NiftyNet Reader if torch.cuda.is_available(): print('[INFO] GPU available.') device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") else: raise Exception( "[INFO] No GPU found or Wrong gpu id, please run without --cuda") # FOLDERS fold_dir = opt.model_dir checkpoint_path = os.path.join(fold_dir, 'models', './CP_{}.pth') checkpoint_path = checkpoint_path.format(opt.epoch_infe) assert os.path.isfile(checkpoint_path), 'no checkpoint found' output_path = opt.output_dir if not os.path.exists(output_path): os.makedirs(output_path) output_path = os.path.join(output_path, 'output_{}.nii.gz') # SPLITS split_path = opt.dataset_split assert os.path.isfile(split_path), 'split file not found' print('Split file found: {}'.format(split_path)) # Reading csv file df_split = pd.read_csv(split_path, header=None) list_file = dict() list_split = ['inference', 'validation'] for split in list_split: list_file[split] = df_split[df_split[1].isin([split.lower() ])][0].tolist() # filing paths add_name = '_sym' if opt.add_sym else '' paths_dict = {split: [] for split in list_split} for split in list_split: for subject in list_file[split]: subject_data = [] for modality in MODALITIES: subject_modality = opt.path_file + subject + modality + add_name + '.nii.gz' if os.path.isfile(subject_modality): subject_data.append( Image(modality, subject_modality, torchio.INTENSITY)) if len(subject_data) > 0: paths_dict[split].append(Subject(*subject_data)) transform_inference = ( ToCanonical(), ZNormalization(), ) transform_inference = Compose(transform_inference) # MODEL norm_op_kwargs = {'eps': 1e-5, 'affine': True} dropout_op_kwargs = {'p': 0, 'inplace': True} net_nonlin = nn.LeakyReLU net_nonlin_kwargs = {'negative_slope': 1e-2, 'inplace': True} print("[INFO] Building model.") model = Generic_UNet(input_modalities=['T1', 'all'], base_num_features=32, num_classes=opt.nb_classes, num_pool=4, num_conv_per_stage=2, feat_map_mul_on_downscale=2, conv_op=torch.nn.Conv3d, norm_op=torch.nn.InstanceNorm3d, norm_op_kwargs=norm_op_kwargs, nonlin=net_nonlin, nonlin_kwargs=net_nonlin_kwargs, convolutional_pooling=False, convolutional_upsampling=False, final_nonlin=lambda x: x, input_features={ 'T1': 1, 'all': 4 }) paths_inf = paths_dict['inference'] + paths_dict['validation'] inference_padding(paths_inf, model, transform_inference, device, output_path, checkpoint_path, opt)
subjects.append(subject) images_dir = Path(predict_dir) labels_dir = Path(labels_dir) image_paths = sorted(images_dir.glob('*.mhd')) label_paths = sorted(labels_dir.glob('*/*.mhd')) subjects = [] do_subject(image_paths, label_paths) training_set = tio.SubjectsDataset(subjects) toc = ToCanonical() for i,subj in enumerate(training_set.subjects): gt = subj['gt'][tio.DATA] subj = toc(subj) pred = subj['pred'][tio.DATA]#.permute(0,1,3,2) # preds.append(pred) # gts.append(gt) preds = pred.numpy() gts = gt.numpy()
def compose_transforms() -> Compose: print(f"{ctime()}: Setting up transformations...") """ # Our Preprocessing Options available in TorchIO are: * Intensity - NormalizationTransform - RescaleIntensity - ZNormalization - HistogramStandardization * Spatial - CropOrPad - Crop - Pad - Resample - ToCanonical We should read and experiment with these, but for now will just use a bunch with the default values. """ preprocessors = [ ToCanonical(p=1), ZNormalization(masking_method=None, p=1), # alternately, use RescaleIntensity ] """ # Our Augmentation Options available in TorchIO are: * Spatial - RandomFlip - RandomAffine - RandomElasticDeformation * Intensity - RandomMotion - RandomGhosting - RandomSpike - RandomBiasField - RandomBlur - RandomNoise - RandomSwap We should read and experiment with these, but for now will just use a bunch with the default values. """ augments = [ RandomFlip(axes=(0, 1, 2), flip_probability=0.5), RandomAffine(image_interpolation="linear", p=0.8), # default, compromise on speed + quality # this will be most processing intensive, leave out for now, see results # RandomElasticDeformation(p=1), RandomMotion(), RandomSpike(), RandomBiasField(), RandomBlur(), RandomNoise(), ] transform = Compose(preprocessors + augments) print(f"{ctime()}: Transformations registered.") return transform
def test_no_changes(self): transform = ToCanonical() transformed = transform(self.sample) assert_array_equal(transformed.t1.data, self.sample.t1.data) assert_array_equal(transformed.t1.affine, self.sample.t1.affine)
def main(): opt = parsing_data() print("[INFO]Reading data") # Dictionary with data parameters for NiftyNet Reader if torch.cuda.is_available(): print('[INFO] GPU available.') device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") else: raise Exception( "[INFO] No GPU found or Wrong gpu id, please run without --cuda") # FOLDERS fold_dir = opt.model_dir fold_dir_model = os.path.join(fold_dir, 'models') if not os.path.exists(fold_dir_model): os.makedirs(fold_dir_model) save_path = os.path.join(fold_dir_model, './CP_{}.pth') output_path = os.path.join(fold_dir, 'output') if not os.path.exists(output_path): os.makedirs(output_path) output_path = os.path.join(output_path, 'output_{}.nii.gz') # LOGGING orig_stdout = sys.stdout if os.path.exists(os.path.join(fold_dir, 'out.txt')): compt = 0 while os.path.exists( os.path.join(fold_dir, 'out_' + str(compt) + '.txt')): compt += 1 f = open(os.path.join(fold_dir, 'out_' + str(compt) + '.txt'), 'w') else: f = open(os.path.join(fold_dir, 'out.txt'), 'w') sys.stdout = f # SPLITS split_path_source = opt.dataset_split_source assert os.path.isfile(split_path_source), 'source file not found' split_path_target = opt.dataset_split_target assert os.path.isfile(split_path_target), 'target file not found' split_path = dict() split_path['source'] = split_path_source split_path['target'] = split_path_target path_file = dict() path_file['source'] = opt.path_source path_file['target'] = opt.path_target list_split = [ 'training', 'validation', ] paths_dict = dict() for domain in ['source', 'target']: df_split = pd.read_csv(split_path[domain], header=None) list_file = dict() for split in list_split: list_file[split] = df_split[df_split[1].isin([split])][0].tolist() paths_dict_domain = {split: [] for split in list_split} for split in list_split: for subject in list_file[split]: subject_data = [] for modality in MODALITIES[domain]: subject_data.append( Image( modality, path_file[domain] + subject + modality + '.nii.gz', torchio.INTENSITY)) if split in ['training', 'validation']: subject_data.append( Image('label', path_file[domain] + subject + 'Label.nii.gz', torchio.LABEL)) #subject_data[] = paths_dict_domain[split].append(Subject(*subject_data)) print(domain, split, len(paths_dict_domain[split])) paths_dict[domain] = paths_dict_domain # PREPROCESSING transform_training = dict() transform_validation = dict() for domain in ['source', 'target']: transform_training[domain] = ( ToCanonical(), ZNormalization(), CenterCropOrPad((144, 192, 48)), RandomAffine(scales=(0.9, 1.1), degrees=10), RandomNoise(std_range=(0, 0.10)), RandomFlip(axes=(0, )), ) transform_training[domain] = Compose(transform_training[domain]) transform_validation[domain] = ( ToCanonical(), ZNormalization(), CenterCropOrPad((144, 192, 48)), ) transform_validation[domain] = Compose(transform_validation[domain]) transform = { 'training': transform_training, 'validation': transform_validation } # MODEL norm_op_kwargs = {'eps': 1e-5, 'affine': True} dropout_op_kwargs = {'p': 0, 'inplace': True} net_nonlin = nn.LeakyReLU net_nonlin_kwargs = {'negative_slope': 1e-2, 'inplace': True} print("[INFO] Building model") model = Generic_UNet(input_modalities=MODALITIES_TARGET, base_num_features=32, num_classes=nb_classes, num_pool=4, num_conv_per_stage=2, feat_map_mul_on_downscale=2, conv_op=torch.nn.Conv3d, norm_op=torch.nn.InstanceNorm3d, norm_op_kwargs=norm_op_kwargs, nonlin=net_nonlin, nonlin_kwargs=net_nonlin_kwargs, convolutional_pooling=False, convolutional_upsampling=False, final_nonlin=torch.nn.Softmax(1)) print("[INFO] Training") train(paths_dict, model, transform, device, save_path, opt) sys.stdout = orig_stdout f.close()
def main(): opt = parsing_data() print("[INFO] Reading data") # Dictionary with data parameters for NiftyNet Reader if torch.cuda.is_available(): print('[INFO] GPU available.') device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") else: raise Exception( "[INFO] No GPU found or Wrong gpu id, please run without --cuda") # FOLDERS fold_dir = opt.model_dir fold_dir_model = os.path.join(fold_dir, 'models') if not os.path.exists(fold_dir_model): os.makedirs(fold_dir_model) save_path = os.path.join(fold_dir_model, './CP_{}.pth') output_path = os.path.join(fold_dir, 'output') if not os.path.exists(output_path): os.makedirs(output_path) output_path = os.path.join(output_path, 'output_{}.nii.gz') # LOGGING orig_stdout = sys.stdout if os.path.exists(os.path.join(fold_dir, 'out.txt')): compt = 0 while os.path.exists( os.path.join(fold_dir, 'out_' + str(compt) + '.txt')): compt += 1 f = open(os.path.join(fold_dir, 'out_' + str(compt) + '.txt'), 'w') else: f = open(os.path.join(fold_dir, 'out.txt'), 'w') #sys.stdout = f print("[INFO] Hyperparameters") print('Alpha: {}'.format(opt.alpha)) print('Beta: {}'.format(opt.beta)) print('Beta_DA: {}'.format(opt.beta_da)) print('Weight Reg: {}'.format(opt.weight_crf)) # SPLITS split_path_source = opt.dataset_split_source assert os.path.isfile(split_path_source), 'source file not found' split_path_target = opt.dataset_split_target assert os.path.isfile(split_path_target), 'target file not found' split_path = dict() split_path['source'] = split_path_source split_path['target'] = split_path_target path_file = dict() path_file['source'] = opt.path_source path_file['target'] = opt.path_target list_split = ['training', 'validation', 'inference'] paths_dict = dict() for domain in ['source', 'target']: df_split = pd.read_csv(split_path[domain], header=None) list_file = dict() for split in list_split: list_file[split] = df_split[df_split[1].isin([split])][0].tolist() list_file['inference'] += list_file['validation'] paths_dict_domain = {split: [] for split in list_split} for split in list_split: for subject in list_file[split]: subject_data = [] for modality in MODALITIES[domain]: subject_data.append( Image( modality, path_file[domain] + subject + modality + '.nii.gz', torchio.INTENSITY)) if split in ['training', 'validation']: if domain == 'source': subject_data.append( Image( 'label', path_file[domain] + subject + 't1_seg.nii.gz', torchio.LABEL)) else: subject_data.append( Image( 'scribble', path_file[domain] + subject + 't2scribble_cor.nii.gz', torchio.LABEL)) #subject_data[] = paths_dict_domain[split].append(Subject(*subject_data)) print(domain, split, len(paths_dict_domain[split])) paths_dict[domain] = paths_dict_domain # PREPROCESSING transform_training = dict() transform_validation = dict() for domain in ['source', 'target']: transformations = ( ToCanonical(), ZNormalization(), CenterCropOrPad((288, 128, 48)), RandomAffine(scales=(0.9, 1.1), degrees=10), RandomNoise(std_range=(0, 0.10)), RandomFlip(axes=(0, )), ) transform_training[domain] = Compose(transformations) for domain in ['source', 'target']: transformations = (ToCanonical(), ZNormalization(), CenterCropOrPad((288, 128, 48))) transform_validation[domain] = Compose(transformations) transform = { 'training': transform_training, 'validation': transform_validation } # MODEL norm_op_kwargs = {'eps': 1e-5, 'affine': True} net_nonlin = nn.LeakyReLU net_nonlin_kwargs = {'negative_slope': 1e-2, 'inplace': True} print("[INFO] Building model") model = UNet2D5(input_channels=1, base_num_features=16, num_classes=NB_CLASSES, num_pool=4, conv_op=nn.Conv3d, norm_op=nn.InstanceNorm3d, norm_op_kwargs=norm_op_kwargs, nonlin=net_nonlin, nonlin_kwargs=net_nonlin_kwargs) print("[INFO] Training") #criterion = DC_and_CE_loss({}, {}) criterion = DC_CE(NB_CLASSES) train(paths_dict, model, transform, criterion, device, save_path, opt)