def test_train_histogram(self):
     paths = [sample['image']['path'] for sample in self.dataset]
     HistogramStandardization.train(
         paths,
         masking_function=HistogramStandardization.mean,
         output_path=(self.dir / 'landmarks.txt'),
     )
     HistogramStandardization.train(
         paths,
         mask_path=self.dataset[0]['label']['path'],
         output_path=(self.dir / 'landmarks.npy'),
     )
Esempio n. 2
0
def create_normalization_file(use_controls, use_nofcd, mods):
    """
        Creates t1_landmark.npy file using torchio library for brain normalizations.
    """

    for j in range(1, mods + 1):
        fcd_paths = sorted(glob.glob(FCD_FOLDER + f'fcd_*.{j}.nii.gz'))
        nofcd_paths = sorted(glob.glob(FCD_FOLDER + f'nofcd_*.{j}.nii.gz'))
        control_paths = sorted(
            glob.glob(CONTROL_FOLDER + f'control_*.{j}.nii.gz'))

        mri_paths = fcd_paths
        if use_nofcd:
            mri_paths += nofcd_paths
        if use_controls:
            mri_paths += control_paths

        t1_landmarks_path = Path(f'./data/t1_landmarks_{j}.npy')

        if t1_landmarks_path.is_file():
            continue
            # os.remove(f'./data/t1_landmarks_{j}.npy')

        t1_landmarks = (t1_landmarks_path if t1_landmarks_path.is_file() else
                        HistogramStandardization.train(mri_paths))

        np.save(t1_landmarks_path, t1_landmarks, allow_pickle=True)
Esempio n. 3
0
    def test_transforms(self):
        landmarks_dict = dict(
            t1=np.linspace(0, 100, 13),
            t2=np.linspace(0, 100, 13),
        )
        random_transforms = (
            RandomFlip(axes=(0, 1, 2), flip_probability=1),
            RandomNoise(),
            RandomBiasField(),
            RandomElasticDeformation(proportion_to_augment=1),
            RandomAffine(),
            RandomMotion(proportion_to_augment=1),
        )
        intensity_transforms = (
            Rescale(),
            ZNormalization(),
            HistogramStandardization(landmarks_dict=landmarks_dict),
        )
        for transform in random_transforms:
            sample = self.get_sample()
            transformed = transform(sample)

        for transform in intensity_transforms:
            sample = self.get_sample()
            transformed = transform(sample)
def create_normalization_file(use_controls, use_nofcd):
    """
        Creates t1_landmark.npy file using torchio library for brain normalizations.
    """
    FCD_FOLDER = './data/fcd_brains/'
    fcd_paths = sorted(list(filter(lambda x: 'nofcd' not in x, os.listdir(FCD_FOLDER))))
    fcd_paths = list(map(lambda x: FCD_FOLDER + x, fcd_paths))
    nofcd_paths = sorted(list(filter(lambda x: 'nofcd' in x, os.listdir(FCD_FOLDER))))
    nofcd_paths = list(map(lambda x: FCD_FOLDER + x, nofcd_paths))

    CONTROL_FOLDER = './data/control_brains/'
    control_paths = sorted(os.listdir(CONTROL_FOLDER))
    control_paths = list(map(lambda x: CONTROL_FOLDER + x, control_paths))

    mri_paths = fcd_paths
    if use_nofcd: 
        mri_paths += fcd_paths
    if use_controls:
        mri_paths += control_paths

    t1_landmarks_path = Path('./data/t1_landmarks.npy')

    if t1_landmarks_path.is_file():
        os.remove('./data/t1_landmarks.npy')
    
    t1_landmarks = (
        t1_landmarks_path
        if t1_landmarks_path.is_file()
        else HistogramStandardization.train(mri_paths)
    )

    np.save(t1_landmarks_path, t1_landmarks, allow_pickle=True)
 def test_with_saved_dict(self):
     landmarks = np.linspace(0, 100, 13)
     landmarks_dict = {'image': landmarks}
     torch.save(landmarks_dict, self.dir / 'landmarks_dict.pth')
     landmarks_dict = torch.load(self.dir / 'landmarks_dict.pth')
     transform = HistogramStandardization(landmarks_dict)
     transform(self.dataset[0])
    def __init__(self,
                 h,
                 w,
                 nb_of_dims,
                 latent_dim,
                 use_coronal,
                 use_sagital,
                 p,
                 experiment_name,
                 parallel,
                 model_weights='best_model.pth',
                 thr=.5):
        self.model = PatchModel(h, w, nb_of_dims, latent_dim, p).cuda()
        if parallel:
            self.model = nn.DataParallel(self.model)
        self.model.load_state_dict(torch.load(model_weights))
        self.model.eval()
        self.h = h
        self.w = w
        self.best_t = thr
        self.nb_of_dims = nb_of_dims
        self.use_coronal = use_coronal
        self.use_sagital = use_sagital
        self.experiment_name = experiment_name
        gray_matter_template = nib.load(
            './data/MNI152_T1_1mm_brain_gray.nii.gz')
        self.gmpm = gray_matter_template.get_fdata() > 0
        t1_landmarks = Path('./data/t1_landmarks.npy')
        landmarks_dict = {'mri': t1_landmarks}
        histogram_transform = HistogramStandardization(landmarks_dict)
        znorm_transform = ZNormalization(masking_method=ZNormalization.mean)

        self.transform = torchio.transforms.Compose(
            [histogram_transform, znorm_transform])
Esempio n. 7
0
 def test_transforms(self):
     landmarks_dict = dict(
         t1=np.linspace(0, 100, 13),
         t2=np.linspace(0, 100, 13),
     )
     transforms = (
         CenterCropOrPad((9, 21, 30)),
         ToCanonical(),
         Resample((1, 1.1, 1.25)),
         RandomFlip(axes=(0, 1, 2), flip_probability=1),
         RandomMotion(proportion_to_augment=1),
         RandomGhosting(proportion_to_augment=1, axes=(0, 1, 2)),
         RandomSpike(),
         RandomNoise(),
         RandomBlur(),
         RandomSwap(patch_size=2, num_iterations=5),
         Lambda(lambda x: 1.5 * x, types_to_apply=INTENSITY),
         RandomBiasField(),
         Rescale((0, 1)),
         ZNormalization(masking_method='label'),
         HistogramStandardization(landmarks_dict=landmarks_dict),
         RandomElasticDeformation(proportion_to_augment=1),
         RandomAffine(),
         Pad((1, 2, 3, 0, 5, 6)),
         Crop((3, 2, 8, 0, 1, 4)),
     )
     transformed = self.get_sample()
     for transform in transforms:
         transformed = transform(transformed)
def get_hist_landmarks(img_path, lndmrk_path, names):
    image_paths = list(map(lambda x: os.path.join(img_path, str(x), 'T1w', 
                                                  'T1w_acpc_dc_restore_brain.nii.gz'), 
                       names))
    landmarks = HistogramStandardization.train(image_paths, 
                                               masking_function=cut_img_mask,
                                               output_path=lndmrk_path)
    return landmarks
Esempio n. 9
0
def training_network(landmarks, dataset, subjects):
    training_transform = Compose([
        ToCanonical(),
        Resample(4),
        CropOrPad((48, 60, 48), padding_mode='reflect'),
        RandomMotion(),
        HistogramStandardization({'mri': landmarks}),
        RandomBiasField(),
        ZNormalization(masking_method=ZNormalization.mean),
        RandomNoise(),
        RandomFlip(axes=(0, )),
        OneOf({
            RandomAffine(): 0.8,
            RandomElasticDeformation(): 0.2,
        }),
    ])

    validation_transform = Compose([
        ToCanonical(),
        Resample(4),
        CropOrPad((48, 60, 48), padding_mode='reflect'),
        HistogramStandardization({'mri': landmarks}),
        ZNormalization(masking_method=ZNormalization.mean),
    ])

    training_split_ratio = 0.9
    num_subjects = len(dataset)
    num_training_subjects = int(training_split_ratio * num_subjects)

    training_subjects = subjects[:num_training_subjects]
    validation_subjects = subjects[num_training_subjects:]

    training_set = tio.SubjectsDataset(training_subjects,
                                       transform=training_transform)

    validation_set = tio.SubjectsDataset(validation_subjects,
                                         transform=validation_transform)

    print('Training set:', len(training_set), 'subjects')
    print('Validation set:', len(validation_set), 'subjects')
    return training_set, validation_set
def get_image_patches(input_img_name,
                      mod_nb,
                      gmpm=None,
                      use_coronal=False,
                      use_sagital=False,
                      input_mask_name=None,
                      augment=True,
                      h=16,
                      w=32,
                      coef=.2,
                      record_results=False,
                      pred_labels=None):
    subject_dict = {
        'mri': torchio.Image(input_img_name, torchio.INTENSITY),
    }

    # torchio normalization
    t1_landmarks = Path(f'./data/t1_landmarks_{mod_nb}.npy')
    landmarks_dict = {'mri': t1_landmarks}
    histogram_transform = HistogramStandardization(landmarks_dict)
    znorm_transform = ZNormalization(masking_method=ZNormalization.mean)
    transform = torchio.transforms.Compose(
        [histogram_transform, znorm_transform])
    subject = torchio.Subject(subject_dict)
    zimage = transform(subject)
    target_np = zimage['mri'].data[0].numpy()

    if input_mask_name is not None:
        mask = nib.load(input_mask_name)
        mask_np = (mask.get_fdata() > 0).astype('float')
    else:
        mask_np = np.zeros_like(target_np)

    all_patches, all_labels, side_mask_np, mid_mask_np = get_patches_and_labels(
        target_np,
        gmpm,
        mask_np,
        use_coronal=use_coronal,
        use_sagital=use_sagital,
        h=h,
        w=w,
        coef=coef,
        augment=augment,
        record_results=record_results,
        pred_labels=pred_labels)
    if not record_results:
        return all_patches, all_labels
    else:
        return side_mask_np, mid_mask_np
Esempio n. 11
0
 def test_train_histogram(self):
     paths = [subject.image.path for subject in self.dataset]
     # Use a function to mask
     HistogramStandardization.train(
         paths,
         masking_function=HistogramStandardization.mean,
         output_path=(self.dir / 'landmarks.txt'),
     )
     # Use a file to mask
     HistogramStandardization.train(
         paths,
         mask_path=self.dataset[0].label.path,
         output_path=(self.dir / 'landmarks.npy'),
     )
     # Use files to mask
     masks = [subject.label.path for subject in self.dataset]
     HistogramStandardization.train(
         paths,
         mask_path=masks,
         output_path=(self.dir / 'landmarks_masks.npy'),
     )
 def test_with_saved_array(self):
     landmarks = np.linspace(0, 100, 13)
     np.save(self.dir / 'landmarks.npy', landmarks)
     landmarks_dict = {'image': self.dir / 'landmarks.npy'}
     transform = HistogramStandardization(landmarks_dict)
     transform(self.dataset[0])
Esempio n. 13
0
sample = dataset[0]
transform = tio.Compose([histogram_transform, znorm_transform])
znormed = transform(sample)

fig, ax = plt.subplots(dpi=100)
plot_histogram(ax, znormed.mri.data, label='Z-normed', alpha=1)
ax.set_title('Intensity values of one sample after z-normalization')
ax.set_xlabel('Intensity')
ax.grid()

training_transform = Compose([
    ToCanonical(),
    #  Resample(4),
    CropOrPad((112, 112, 48), padding_mode=0),  #reflect , original 112,112,48
    RandomMotion(num_transforms=6, image_interpolation='nearest', p=0.2),
    HistogramStandardization({'mri': landmarks}),
    RandomBiasField(p=0.2),
    RandomBlur(p=0.2),
    ZNormalization(masking_method=ZNormalization.mean),
    RandomFlip(axes=['inferior-superior'], flip_probability=0.2),
    #  RandomNoise(std=0.5, p=0.2),
    RandomGhosting(intensity=1.8, p=0.2),
    #  RandomNoise(),
    #  RandomFlip(axes=(0,)),
    #  OneOf({
    #      RandomAffine(): 0.8,
    #      RandomElasticDeformation(): 0.2,
    #  }),
])

validation_transform = Compose([
 def test_wrong_image_key(self):
     landmarks = np.linspace(0, 100, 13)
     landmarks_dict = {'wrong_key': landmarks}
     transform = HistogramStandardization(landmarks_dict)
     with self.assertRaises(KeyError):
         transform(self.dataset[0])
 def test_normalize(self):
     landmarks = np.linspace(0, 100, 13)
     landmarks_dict = {'image': landmarks}
     transform = HistogramStandardization(landmarks_dict)
     transform(self.dataset[0])
Esempio n. 16
0
import pandas as pd
import matplotlib.pyplot as plt

test = {'T1': {'csv_file':'/data/romain/HCPdata/Motion_brain_ms_train_hcp400.csv'} }
conditions = [("corr", "<", 0.98), ("|", "noise", "==", 1)]
subjects_dict, info = get_subject_list_and_csv_info_from_data_prameters(test, fpath_idx='filename', conditions=conditions,shuffle_order=True)

data_parameters = {'image': {'csv_file': '/data/romain/data_exemple/file_ms.csv', 'type': torchio.INTENSITY},
                   'label1': {'csv_file': '/data/romain/data_exemple/file_p1.csv', 'type': torchio.LABEL},
                   'label2': {'csv_file': '/data/romain/data_exemple/file_p2.csv', 'type': torchio.LABEL},
                   'label3': {'csv_file': '/data/romain/data_exemple/file_p3.csv', 'type': torchio.LABEL},
                   'sampler': {'csv_file': '/data/romain/data_exemple/file_mask.csv', 'type': torchio.SAMPLING_MAP}}
paths_dict, info = get_subject_list_and_csv_info_from_data_prameters(data_parameters) #,shuffle_order=False)

landmarks_file = '/data/romain/data_exemple/landmarks_hcp100.npy'
transforms = (HistogramStandardization(landmarks_file, mask_field_name='sampler'),)

transforms = (RandomElasticDeformation(num_control_points=8, proportion_to_augment=1, deformation_std=25, image_interpolation=Interpolation.BSPLINE),)
transforms = (RandomMotion(seed=42, degrees=0, translation=15, num_transforms=2, verbose=True,proportion_to_augment=1),)
transforms = (RandomBiasField(coefficients_range=(-0.5, 0.5),order=3), )

transform = Compose(transforms) #should be done in ImagesDataset
dataset = ImagesDataset(paths_dict, transform=transform)
dataset_not = ImagesDataset(paths_dict, transform=None)
dataload = torch.utils.data.DataLoader(dataset, num_workers=0, batch_size=1)
dataloadnot = torch.utils.data.DataLoader(dataset_not, num_workers=0, batch_size=1)

ddd = dataset[0] #next(iter(dataset))
ii = np.squeeze( ddd['image']['data'][0], axis=1)

ddno = dataset_not[0] #
Esempio n. 17
0
 def test_bad_paths_lengths(self):
     with self.assertRaises(ValueError):
         HistogramStandardization.train(
             [1, 2],
             mask_path=[1, 2, 3],
         )