Esempio n. 1
0
    def __getitem__(self, index: int) -> Subject:
        if not isinstance(index, int):
            raise ValueError(f'Index "{index}" must be int, not {type(index)}')

        if self.load_from_dir:
            subject = torch.load(self._subjects[index])
            if self.add_to_load is not None:
                #print('adding subject with {}'.format(self.add_to_load))
                ii = subject.get_images()
                image_path = ii[0]['path']
                if 'original' in self.add_to_load:
                    #print('adding original subject')
                    ss = Subject(image = Image(image_path, INTENSITY))
                    # sss = self._get_sample_dict_from_subject(ss)
                    #sss = copy.deepcopy(ss)
                    sss = ss

                    subject['original'] = sss['image']

                    if self.add_to_load=='original': #trick to use both orig and mask :hmmm....
                        add_to_load = None
                    else:
                        add_to_load = self.add_to_load[8:]
                else:
                    add_to_load = self.add_to_load

                if add_to_load is not None:
                    image_add = gfile(get_parent_path(image_path), self.add_to_load_regexp)[0]
                    #print('adding image {} to {}'.format(image_add,self.add_to_load))
                    ss = Subject(image = Image(image_add, LABEL))
                    #sss = self._get_sample_dict_from_subject(ss)
                    #sss = copy.deepcopy(ss)
                    sss = ss
                    hh = subject.history
                    for hhh in hh:
                        if 'RandomElasticDeformation' in hhh[0]:
                            from torchio.transforms import RandomElasticDeformation
                            num_cp =  hhh[1]['coarse_grid'].shape[1]
                            rr = RandomElasticDeformation(num_control_points=num_cp)
                            sss = rr.apply_given_transform(sss, hhh[1]['coarse_grid'])

                    subject[add_to_load] = sss['image']
            #print('subject with keys {}'.format(subject.keys()))
        else:
            subject = self._subjects[index]
            subject = copy.deepcopy(subject)  # cheap since images not loaded yet
            if self.load_getitem:
                subject.load()

        # Apply transform (this is usually the bottleneck)
        if self._transform is not None:
            subject = self._transform(subject)

        if self.save_to_dir is not None:
            res_dir = self.save_to_dir
            fname = res_dir + '/subject{:05d}'.format(index)
            if 'image_orig' in subject: subject.pop('image_orig')
            torch.save(subject, fname + '_subject.pt')

        return subject
    def transform(self):

        if hp.mode == '3d':
            if hp.aug:
                training_transform = Compose([
                    # ToCanonical(),
                    CropOrPad((hp.crop_or_pad_size), padding_mode='reflect'),
                    # RandomMotion(),
                    RandomBiasField(),
                    ZNormalization(),
                    RandomNoise(),
                    RandomFlip(axes=(0, )),
                    OneOf({
                        RandomAffine(): 0.8,
                        RandomElasticDeformation(): 0.2,
                    }),
                ])
            else:
                training_transform = Compose([
                    CropOrPad((hp.crop_or_pad_size, hp.crop_or_pad_size,
                               hp.crop_or_pad_size),
                              padding_mode='reflect'),
                    ZNormalization(),
                ])
        elif hp.mode == '2d':
            if hp.aug:
                training_transform = Compose([
                    CropOrPad((hp.crop_or_pad_size), padding_mode='reflect'),
                    # RandomMotion(),
                    RandomBiasField(),
                    ZNormalization(),
                    RandomNoise(),
                    RandomFlip(axes=(0, )),
                    OneOf({
                        RandomAffine(): 0.8,
                        RandomElasticDeformation(): 0.2,
                    }),
                ])
            else:
                training_transform = Compose([
                    CropOrPad((hp.crop_or_pad_size, hp.crop_or_pad_size,
                               hp.crop_or_pad_size),
                              padding_mode='reflect'),
                    ZNormalization(),
                ])

        else:
            raise Exception('no such kind of mode!')

        return training_transform
Esempio n. 3
0
    def test_transforms(self):
        landmarks_dict = dict(
            t1=np.linspace(0, 100, 13),
            t2=np.linspace(0, 100, 13),
        )
        random_transforms = (
            RandomFlip(axes=(0, 1, 2), flip_probability=1),
            RandomNoise(),
            RandomBiasField(),
            RandomElasticDeformation(proportion_to_augment=1),
            RandomAffine(),
            RandomMotion(proportion_to_augment=1),
        )
        intensity_transforms = (
            Rescale(),
            ZNormalization(),
            HistogramStandardization(landmarks_dict=landmarks_dict),
        )
        for transform in random_transforms:
            sample = self.get_sample()
            transformed = transform(sample)

        for transform in intensity_transforms:
            sample = self.get_sample()
            transformed = transform(sample)
Esempio n. 4
0
    def __init__(self, transform1, m1, p1, transform2, m2, p2):
        ranges = {
            'flip': np.zeros(10),
            'affine': np.linspace(0, 180, 10),
            'noise': np.linspace(0, 0.5, 10),
            'blur': np.arange(10),
            'elasticD': np.zeros(10)
        }

        transforms = {
            'flip': lambda magnitude, p: RandomFlip(p=p),
            'affine':
            lambda magnitude, p: RandomAffine(degrees=(magnitude), p=p),
            'noise': lambda magnitude, p: RandomNoise(std=magnitude, p=p),
            'blur': lambda magnitude, p: RandomBlur(std=magnitude, p=p),
            'elasticD': lambda magnitude, p: RandomElasticDeformation(p=p)
        }

        self.transform1 = transforms[transform1]
        self.t1_input = transform1
        self.m1 = ranges[transform1][m1]
        self.m1_input = m1
        self.p1 = p1

        self.transform2 = transforms[transform2]
        self.t2_input = transform2
        self.m2 = ranges[transform2][m2]
        self.m2_input = m2
        self.p2 = p2

        self.kappa = 0.0
Esempio n. 5
0
def get_motion_transform(type='motion1'):
    if 'motion1' in type:
        dico_params_mot = {
            "maxDisp": (1, 6),
            "maxRot": (1, 6),
            "noiseBasePars": (5, 20, 0.8),
            "swallowFrequency": (2, 6, 0.5),
            "swallowMagnitude": (3, 6),
            "suddenFrequency": (2, 6, 0.5),
            "suddenMagnitude": (3, 6),
            "verbose": False,
            "keep_original": True,
            "proba_to_augment": 1,
            "preserve_center_pct": 0.1,
            "keep_original": True,
            "compare_to_original": True,
            "oversampling_pct": 0,
            "correct_motion": False
        }

        dico_params_mot = {
            "maxDisp": (1, 4),
            "maxRot": (1, 4),
            "noiseBasePars": (5, 20, 0.8),
            "swallowFrequency": (2, 6, 0.5),
            "swallowMagnitude": (3, 4),
            "suddenFrequency": (2, 6, 0.5),
            "suddenMagnitude": (3, 4),
            "verbose": False,
            "keep_original": True,
            "proba_to_augment": 1,
            "preserve_center_pct": 0.1,
            "keep_original": True,
            "compare_to_original": True,
            "oversampling_pct": 0,
            "correct_motion": False
        }

    if 'elastic1' in type:
        dico_elast = {
            'num_control_points': 6,
            'max_displacement': (30, 30, 30),
            'proportion_to_augment': 1,
            'image_interpolation': Interpolation.LINEAR
        }

    if type == 'motion1':
        transforms = Compose([
            RandomMotionFromTimeCourse(**dico_params_mot),
        ])

    elif type == 'elastic1_and_motion1':
        transforms = Compose([
            RandomElasticDeformation(**dico_elast),
            RandomMotionFromTimeCourse(**dico_params_mot)
        ])
    if type == 'random_noise_1':
        transforms = Compose([RandomNoise(std=(0.020, 0.2))])

    return transforms
Esempio n. 6
0
 def test_transforms(self):
     landmarks_dict = dict(
         t1=np.linspace(0, 100, 13),
         t2=np.linspace(0, 100, 13),
     )
     transforms = (
         CenterCropOrPad((9, 21, 30)),
         ToCanonical(),
         Resample((1, 1.1, 1.25)),
         RandomFlip(axes=(0, 1, 2), flip_probability=1),
         RandomMotion(proportion_to_augment=1),
         RandomGhosting(proportion_to_augment=1, axes=(0, 1, 2)),
         RandomSpike(),
         RandomNoise(),
         RandomBlur(),
         RandomSwap(patch_size=2, num_iterations=5),
         Lambda(lambda x: 1.5 * x, types_to_apply=INTENSITY),
         RandomBiasField(),
         Rescale((0, 1)),
         ZNormalization(masking_method='label'),
         HistogramStandardization(landmarks_dict=landmarks_dict),
         RandomElasticDeformation(proportion_to_augment=1),
         RandomAffine(),
         Pad((1, 2, 3, 0, 5, 6)),
         Crop((3, 2, 8, 0, 1, 4)),
     )
     transformed = self.get_sample()
     for transform in transforms:
         transformed = transform(transformed)
 def test_random_elastic_deformation(self):
     transform = RandomElasticDeformation(
         proportion_to_augment=1,
         seed=42,
     )
     keys = ('t1', 't2', 'label')
     fixtures = 2463.8931905687296, 2465.493324966148, 2532
     transformed = transform(self.sample)
     for key, fixture in zip(keys, fixtures):
         self.assertAlmostEqual(transformed[key][DATA].sum(), fixture)
Esempio n. 8
0
 def test_folding(self):
     # Assume shape is (10, 20, 30) and spacing is (1, 1, 1)
     # Then grid spacing is (10/(12-2), 20/(5-2), 30/(5-2))
     # or (1, 6.7, 10), and half is (0.5, 3.3, 5)
     transform = RandomElasticDeformation(
         num_control_points=(12, 5, 5),
         max_displacement=6,
     )
     with self.assertWarns(RuntimeWarning):
         transformed = transform(self.sample)
 def test_random_elastic_deformation(self):
     transform = RandomElasticDeformation(
         proportion_to_augment=1,
         seed=42,
     )
     keys = ('t1', 't2', 'label')
     fixtures = 764.2669872255983, 734.7786045279245, 752
     transformed = transform(self.sample)
     for key, fixture in zip(keys, fixtures):
         assert transformed[key][DATA].sum() == fixture
Esempio n. 10
0
def elastic(patch_size = None, p = 1):
    if patch_size is not None:
        num_controls = patch_size
        max_displacement = np.divide(patch_size, 10)
        if patch_size[-1] == 1:
            max_displacement[-1] = 0.1 # ensure maximum displacement is never grater than patch size
    else:
        # use defaults defined in torchio
        num_controls = 7 
        max_displacement = 7.5
    return RandomElasticDeformation(max_displacement = max_displacement, p = p)
Esempio n. 11
0
 def test_no_displacement(self):
     transform = RandomElasticDeformation(max_displacement=0)
     transformed = transform(self.sample_subject)
     self.assertTensorEqual(
         self.sample_subject.t1.data,
         transformed.t1.data,
     )
     self.assertTensorEqual(
         self.sample_subject.label.data,
         transformed.label.data,
     )
 def get_torchio_transformer(mask=False):
     if mask:
         interpolation = 'linear'
     else:
         interpolation = image_interpolation
     return RandomElasticDeformation(
         num_control_points=num_control_points,
         max_displacement=max_displacement,
         locked_borders=locked_borders,
         image_interpolation=interpolation,
         p=p,
         seed=seed)
 def test_random_elastic_deformation(self):
     transform = RandomElasticDeformation(
         proportion_to_augment=1,
         seed=42,
     )
     keys = ('t1', 't2', 'label')
     fixtures = 2328.8125, 2317.3125, 2308
     transformed = transform(self.sample)
     for key, fixture in zip(keys, fixtures):
         data = transformed[key][DATA]
         total = data.sum().item()
         self.assertAlmostEqual(total, fixture)
Esempio n. 14
0
    def test_collate(self):
        # Keys missing in one of the samples will not be present in the batch
        # This is relevant for the case in which a transform is applied to some
        # samples only, according to its probability (p argument)
        transform_no = RandomElasticDeformation(p=0, max_displacement=1)
        transform_yes = RandomElasticDeformation(p=1, max_displacement=1)
        sample_no = transform_no(self.sample)
        sample_yes = transform_yes(self.sample)
        data = sample_no, sample_yes

        class Dataset:
            def __init__(self, data):
                self.data = data

            def __len__(self):
                return len(self.data)

            def __getitem__(self, index):
                return self.data[index]

        loader = DataLoader(Dataset(data), batch_size=2)
        next(iter(loader))
 def test_random_elastic_deformation(self):
     transform = RandomElasticDeformation(
         num_control_points=5,
         max_displacement=(2, 3, 5),  # half grid spacing is (3.3, 3.3, 5)
     )
     keys = ('t1', 't2', 'label')
     fixtures = 2916.7192, 2955.1265, 2950
     transformed = transform(self.sample_subject, seed=42)
     for key, fixture in zip(keys, fixtures):
         sample_data = self.sample_subject[key].numpy()
         transformed_data = transformed[key].numpy()
         transformed_total = transformed_data.sum()
         # Make sure that intensities have changed
         self.assertTensorNotEqual(sample_data, transformed_data)
         self.assertAlmostEqual(transformed_total, fixture, places=4)
Esempio n. 16
0
 def test_random_elastic_deformation(self):
     transform = RandomElasticDeformation(
         num_control_points=5,
         max_displacement=(2, 3, 5),  # half grid spacing is (3.3, 3.3, 5)
         seed=42,
     )
     keys = ('t1', 't2', 'label')
     fixtures = 2953.9197, 2989.769, 2975
     transformed = transform(self.sample)
     for key, fixture in zip(keys, fixtures):
         sample_data = self.sample[key][torchio.DATA].numpy()
         transformed_data = transformed[key][torchio.DATA].numpy()
         transformed_total = transformed_data.sum()
         # Make sure that intensities have changed
         assert not np.array_equal(sample_data, transformed_data)
         self.assertAlmostEqual(transformed_total, fixture, places=4)
Esempio n. 17
0
def get_brats(
        data_root='/scratch/weina/dld_data/brats2019/MICCAI_BraTS_2019_Data_Training/',
        fold=1,
        seed=torch.distributed.get_rank()
    if torch.distributed.is_initialized() else 0,
        **kwargs):
    """ data iter for brats
    """
    logging.debug("BratsIter:: fold = {}, seed = {}".format(fold, seed))
    # args for transforms
    d_size, h_size, w_size = 155, 240, 240
    input_size = [7, 223, 223]
    spacing = (d_size / input_size[0], h_size / input_size[1],
               w_size / input_size[2])
    Mean, Std, Max = read_brats_mean(fold, data_root)
    normalize = transforms.Normalize(mean=Mean, std=Std)
    training_transform = Compose([
        # RescaleIntensity((0, 1)),  # so that there are no negative values for RandomMotion
        # RandomMotion(),
        # HistogramStandardization({MRI: landmarks}),
        RandomBiasField(),
        # ZNormalization(masking_method=ZNormalization.mean),
        RandomNoise(),
        ToCanonical(),
        Resample(spacing),
        # CropOrPad((48, 60, 48)),
        RandomFlip(axes=(0, )),
        OneOf({
            RandomAffine(): 0.8,
            RandomElasticDeformation(): 0.2,
        }),
        normalize
    ])
    val_transform = Compose([Resample(spacing), normalize])

    train = BratsIter(csv_file=os.path.join(data_root, 'IDH_label',
                                            'train_fold_{}.csv'.format(fold)),
                      brats_path=os.path.join(data_root, 'all'),
                      brats_transform=training_transform,
                      shuffle=True)

    val = BratsIter(csv_file=os.path.join(data_root, 'IDH_label',
                                          'val_fold_{}.csv'.format(fold)),
                    brats_path=os.path.join(data_root, 'all'),
                    brats_transform=val_transform,
                    shuffle=False)
    return train, val
Esempio n. 18
0
def random_augment(x):
    '''Randomly augment input data.

    Returns: Randomly augmented input
    '''

    # Data augmentations to be used
    transforms_dict = {
        RandomFlip(): 1,
        RandomElasticDeformation(): 1,
        RandomAffine(): 1,
        RandomNoise(): 1,
        RandomBlur(): 1
    }

    # Create random transform, with a p chance to apply augmentation
    transform = OneOf(transforms_dict, p=0.95)
    return augment(x, transform)
Esempio n. 19
0
def predict_majority(model, x, y):
    '''Augments all samples of the original data, and chooses majority predictions predicted by the model.

    Usage: predict_majority(model, x_original, y_original)
    '''

    # Reshape arrays
    x = np.reshape(x, (len(x), 40, 40, 4, 1))
    y = [x - 1 for x in y]
    y = to_categorical(y, 5)

    # Predict majority
    x_flip = augment(x.copy(), RandomFlip())
    x_ed = augment(x.copy(), RandomElasticDeformation())
    x_affine = augment(x.copy(), RandomAffine())
    x_noise = augment(x.copy(), RandomNoise())
    x_blur = augment(x.copy(), RandomBlur())

    y_true = pred_list(y)
    y_pred = pred_list(model.predict(x.copy()))
    y_flip = pred_list(model.predict(x_flip.copy()))
    y_ed = pred_list(model.predict(x_ed.copy()))
    y_affine = pred_list(model.predict(x_affine.copy()))
    y_noise = pred_list(model.predict(x_noise.copy()))
    y_blur = pred_list(model.predict(x_blur.copy()))

    y_most = []
    correct = 0
    print(
        '\nEntry Number | Prediction (None, Flip, Elastic Deformation, Affine, Noise, Blur) | Actual'
    )
    for i in range(len(y_true)):
        preds = [
            y_pred[i], y_flip[i], y_ed[i], y_affine[i], y_noise[i], y_blur[i]
        ]
        most = max(set(preds), key=preds.count)
        y_most.append(most)
        print('Entry', i, '| Predictions:', preds, '| Most Occuring:', most,
              '| Correct:', y_true[i])
        if most == y_true[i]:
            correct += 1
    print('\nTest Accuracy: ', correct / len(y_true))
    print('Quadratic Weighted Kappa: ',
          cohen_kappa_score(y_true, y_most, weights='quadratic'))
Esempio n. 20
0
def training_network(landmarks, dataset, subjects):
    training_transform = Compose([
        ToCanonical(),
        Resample(4),
        CropOrPad((48, 60, 48), padding_mode='reflect'),
        RandomMotion(),
        HistogramStandardization({'mri': landmarks}),
        RandomBiasField(),
        ZNormalization(masking_method=ZNormalization.mean),
        RandomNoise(),
        RandomFlip(axes=(0, )),
        OneOf({
            RandomAffine(): 0.8,
            RandomElasticDeformation(): 0.2,
        }),
    ])

    validation_transform = Compose([
        ToCanonical(),
        Resample(4),
        CropOrPad((48, 60, 48), padding_mode='reflect'),
        HistogramStandardization({'mri': landmarks}),
        ZNormalization(masking_method=ZNormalization.mean),
    ])

    training_split_ratio = 0.9
    num_subjects = len(dataset)
    num_training_subjects = int(training_split_ratio * num_subjects)

    training_subjects = subjects[:num_training_subjects]
    validation_subjects = subjects[num_training_subjects:]

    training_set = tio.SubjectsDataset(training_subjects,
                                       transform=training_transform)

    validation_set = tio.SubjectsDataset(validation_subjects,
                                         transform=validation_transform)

    print('Training set:', len(training_set), 'subjects')
    print('Validation set:', len(validation_set), 'subjects')
    return training_set, validation_set
Esempio n. 21
0
def elastic(parameters):

    # define defaults
    num_controls = 7
    max_displacement = 7.5
    if parameters["patch_size"] is not None:
        # define the control points and swap axes for augmentation
        num_controls = []
        for _, n in enumerate(parameters["patch_size"]):
            num_controls.append(max(n, 5))  # always at least have 5
        max_displacement = np.divide(num_controls, 10)
        if num_controls[-1] == 1:
            # ensure maximum displacement is never greater than patch size
            max_displacement[-1] = 0.1
        max_displacement = max_displacement.tolist()

    return RandomElasticDeformation(
        num_control_points=num_controls,
        max_displacement=max_displacement,
        p=parameters["probability"],
    )
Esempio n. 22
0
 def test_coarse_grid_removed(self):
     with self.assertRaises(ValueError):
         RandomElasticDeformation(
             num_control_points=(4, 5, 6),
             locked_borders=2,
         )
Esempio n. 23
0
 def test_wrong_locked_borders(self):
     with self.assertRaises(ValueError):
         RandomElasticDeformation(locked_borders=-1)
Esempio n. 24
0
 def test_max_displacement_negative(self):
     with self.assertRaises(ValueError):
         RandomElasticDeformation(max_displacement=-1)
Esempio n. 25
0
 def test_max_displacement_no_num(self):
     with self.assertRaises(ValueError):
         RandomElasticDeformation(max_displacement=None)
Esempio n. 26
0
 def test_num_control_points_small(self):
     with self.assertRaises(ValueError):
         RandomElasticDeformation(num_control_points=3)
Esempio n. 27
0
 def test_inputs_interpolation_string(self):
     with self.assertRaises(TypeError):
         RandomElasticDeformation(image_interpolation='linear')
Esempio n. 28
0
 def test_num_control_points(self):
     RandomElasticDeformation(num_control_points=5)
     RandomElasticDeformation(num_control_points=(5, 6, 7))
Esempio n. 29
0
 def test_max_displacement(self):
     RandomElasticDeformation(max_displacement=5)
     RandomElasticDeformation(max_displacement=(5, 6, 7))
Esempio n. 30
0
 def test_inputs_pta_lt_zero(self):
     with self.assertRaises(ValueError):
         RandomElasticDeformation(proportion_to_augment=-1)