def test_save_sample(self): dataset = ImagesDataset(self.subjects_list, transform=lambda x: x) _ = len(dataset) # for coverage sample = dataset[0] output_path = self.dir / 'test.nii.gz' paths_dict = {'t1': output_path} dataset.save_sample(sample, paths_dict) nii = nib.load(str(output_path)) ndims_output = len(nii.shape) ndims_sample = len(sample['t1'][DATA].shape) assert ndims_sample == ndims_output + 1
fitpars = np.zeros((6, nT)) fitpars[1, 55:] = -15 #fitpars[dim_modif, :45] = -7.5 #fitpars[dim_modif, 45:] = 7.5 #ov(sample["T1"]["data"][0], sample["T1"]["affine"]) transform = RandomMotionTimeCourseAffines(fitpars=fitpars, time_points=time_points, pct_oversampling=0.30, verbose=True, combine_axis=0) transformed = transform(sample) dataset.save_sample( transformed, dict(T1='/home/romain.valabregue/tmp/mot/t1_motion_axis0.nii.gz')) sample = dataset[0] transform = RandomMotionTimeCourseAffines(fitpars=fitpars, time_points=time_points, pct_oversampling=0.30, verbose=True, combine_axis=1) transformed = transform(sample) dataset.save_sample( transformed, dict(T1='/home/romain.valabregue/tmp/mot/t1_motion_axis1.nii.gz')) sample = dataset[0] transform = RandomMotionTimeCourseAffines(fitpars=fitpars,
ddno = dataset_not[0] # dd= next(iter(dataload)) ddno = next(iter(dataloadnot)) ii = np.squeeze( dd['image']['data'][0,0,:],axis=1) iio = np.squeeze( ddno['image']['data'][0,0,:],axis=1) ov(ii) ov(iio) #save sample = dataset[0] output = dict(image=Path('/tmp/test_im.nii.gz'),label1=Path('/tmp/test_p1.nii.gz'),label2=Path('/tmp/test_p2.nii.gz')) dataset.save_sample(sample, output) #explore out_dir = '/data/romain/data_exemple/augment/' suj = [[ Image('T1','/data/romain/data_exemple/nifti_proc/PRISMA_MBB_DB/2017_03_07_DEV_236_MBB_DB_Pilote02/anat_S02_t1mpr_SAG_NSel_S176/cat12/s_S02_t1mpr_SAG_NSel_S176.nii.gz',INTENSITY), Image('mask','/data/romain/data_exemple/nifti_proc/PRISMA_MBB_DB/2017_03_07_DEV_236_MBB_DB_Pilote02/anat_S02_t1mpr_SAG_NSel_S176/cat12/mask_brain_erode_dilate.nii.gz',LABEL) ]] transforms = Compose((RandomBiasField(coefficients_range=(-0.5, 0.5),order=3, verbose=True), )) landmarks_file = '/data/romain/data_exemple/landmarks_hcp300_res100.txt' transforms = Compose((HistogramStandardization(landmarks_file, verbose=True, masking_method='mask'), Rescale(masking_method='mask',verbose=True))) transforms = HistogramEqualize(verbose=True,masking_method='mask') transforms = Rescale(verbose=True,masking_method='mask') torch.manual_seed(12)
$ torchio-transform ~/Dropbox/MRI/t1.nii.gz RandomMotion /tmp/t1_motion.nii.gz --seed 42 --kwargs "degrees=10 translation=10 num_transforms=3 proportion_to_augment=1" """ from pprint import pprint from torchio import Image, ImagesDataset, transforms, INTENSITY, LABEL, Subject subject = Subject( Image('label', '~/Dropbox/MRI/t1_brain_seg.nii.gz', LABEL), Image('t1', '~/Dropbox/MRI/t1.nii.gz', INTENSITY), ) subjects_list = [subject] dataset = ImagesDataset(subjects_list) sample = dataset[0] transform = transforms.RandomMotion( seed=42, degrees=10, translation=10, num_transforms=3, ) transformed = transform(sample) pprint(transformed['t1']['random_motion_times']) pprint(transformed['t1']['random_motion_degrees']) pprint(transformed['t1']['random_motion_translation']) dataset.save_sample(transformed, dict(t1='/tmp/t1_motion.nii.gz')) dataset.save_sample(transformed, dict(label='/tmp/t1_brain_seg_motion.nii.gz'))
Image('resection_gray_matter_left', images_dir / gp('gray_matter_left_seg'), None), Image('resection_resectable_left', images_dir / gp('resectable_left_seg'), None), Image('resection_gray_matter_right', images_dir / gp('gray_matter_right_seg'), None), Image('resection_resectable_right', images_dir / gp('resectable_right_seg'), None), ) df_volumes = pd.read_csv(Path('~/episurg/volumes.csv').expanduser()) volumes = df_volumes.Volume.values transform = RandomResection( volumes=volumes, # sigmas_range=(0.75, 0.75), keep_original=True, verbose=True, # seed=42, ) dataset = ImagesDataset([subject]) transformed = dataset[0] for i in range(10): transformed = transform(dataset[0]) dataset.save_sample( transformed, dict( image=f'/tmp/resected_{i}.nii.gz', # image_original='/tmp/resected_original.nii.gz', label=f'/tmp/resected_label_{i}.nii.gz', ), )
suj = [[ Image( 'T1', '/home/romain/QCcnn/motion_cati_brain_ms/brain_s_S02_Sag_MPRAGE.nii.gz', 'intensity'), ]] dataset = ImagesDataset(suj, transform=transforms) s = dataset[0] ov(s['T1']['data'][0]) tt = dataset.get_transform() plt.figure() plt.plot(tt.fitpars.T) dataset.save_sample( s, dict(T1='/home/romain/QCcnn/motion_cati_brain_ms/toto10.nii')) #look at distribution of metric on simulate motion dir_cache = '/network/lustre/dtlake01/opendata/data/ds000030/rrr/CNN_cache' dd = gdir(dir_cache, 'mask_mv') fr = gfile(dd, 'resul') name_res = get_parent_path(dd)[1] res = [pd.read_csv(ff) for ff in fr] sell_col = res[0].keys() sell_col = [ 'L1', 'MSE', 'corr', 'mean_DispP', 'rmse_Disp', 'rmse_DispTF', 'ssim', 'ssim_all', 'ssim_brain', 'ssim_p1', 'ssim_p2' ] sell_col = [ 'L1', 'MSE', 'corr', 'ssim', 'ssim_all', 'ssim_brain', 'ssim_p1', 'ssim_p2'
from pprint import pprint from torchio import ImagesDataset, transforms, INTENSITY paths = [{ 't1': dict(path='~/Dropbox/MRI/t1.nii.gz', type=INTENSITY), 'colin': dict(path='/tmp/colin27_t1_tal_lin.nii.gz', type=INTENSITY), }] dataset = ImagesDataset(paths) sample = dataset[0] transform = transforms.RandomMotion( seed=42, degrees=20, translation=15, num_transforms=3, verbose=True, ) transformed = transform(sample) pprint(transformed['t1']['random_motion_times']) dataset.save_sample(transformed, dict(t1='/tmp/t1_motion.nii.gz')) dataset.save_sample(transformed, dict(colin='/tmp/colin_motion.nii.gz'))
from torchio.transforms import Lambda from torchio import Image, ImagesDataset, INTENSITY, LABEL, Subject subject = Subject( Image('label', '~/Dropbox/MRI/t1_brain_seg.nii.gz', LABEL), Image('t1', '~/Dropbox/MRI/t1.nii.gz', INTENSITY), ) subjects_list = [subject] dataset = ImagesDataset(subjects_list) sample = dataset[0] transform = Lambda(lambda x: -1.5 * x, types_to_apply=INTENSITY) transformed = transform(sample) dataset.save_sample(transformed, {'t1': '/tmp/t1_lambda.nii'})
fp = corrupt_data(80, sigma=1, method='step', amplitude=20) fp = np.zeros([6, 200]) fp[3, :] = 20 dico_params['fitpars'] = fp for zs in z_slice: t = RandomMotionFromTimeCourse(**dico_params) dataset = ImagesDataset(suj, transform=Compose( (CenterCropOrPad(target_shape=(182, 218, zs)), t))) sample = dataset[0] fout = dirpath[0] + '/mot_step_rot_xN{}'.format(zs) dataset.save_sample(sample, dict(image=fout + '.nii')) if do_plot: #check the fitpars fit_pars = t.fitpars fig = plt.figure('fitpars_{}'.format(zs)) plt.plot(fit_pars.T) fit_pars = t.fitpars_interp fig = plt.figure('fit_interp_{}'.format(zs)) plt.plot(fit_pars.reshape(6, -1).T) sys.exit(1) #3D case #def rotate_coordinates(): import math
Image('mask', '/data/romain/HCPdata/suj_100307/brain_mT1w_1mm.nii', LABEL) ]] t = MotionSimTransform(std_rotation_angle=3, std_translation=2, nufft=True, proc_scale=0, verbose=True, freq_encoding_dim=(0, ), mvt_param=[0, 0, 0, 1, 10, 0]) transforms = Compose([t]) dataset = ImagesDataset(suj, transform=transforms) sample = dataset[0] dataset.save_sample(sample, dict(T1='/tmp/toto_no_center.nii')) sample_orig = dataset[0] for i in range(0, 1): sample = deepcopy(sample_orig) transformed = transforms(sample) name = 'mot' path = out_dir + f'{i}_{name}.nii.gz' dataset.save_sample(transformed, dict(T1=path)) ########################################################### T E S T S with txt import pandas as pd import nibabel as nb