def test_rotation_origin(self):
     # Rotation around far away point, image should be empty
     transform = RandomAffine(
         degrees=(90, 90),
         default_pad_value=0,
         center='origin',
     )
     transformed = transform(self.sample_subject)
     total = transformed.t1.data.sum()
     self.assertEqual(total, 0)
 def test_rotation_image(self):
     # Rotation around image center
     transform = RandomAffine(
         degrees=(90, 90),
         default_pad_value=0,
         center='image',
     )
     transformed = transform(self.sample_subject)
     total = transformed.t1.data.sum()
     self.assertNotEqual(total, 0)
Beispiel #3
0
    def test_no_rotation(self):
        transform = RandomAffine(
            scales=(1, 1),
            degrees=(0, 0),
            default_pad_value=0,
            center='image',
        )
        transformed = transform(self.sample_subject)
        self.assertTensorAlmostEqual(self.sample_subject.t1.data, transformed.t1.data)

        transform = RandomAffine(
            scales=(1, 1),
            degrees=(180, 180),
            default_pad_value=0,
            center='image',
        )
        transformed = transform(self.sample_subject)
        transformed = transform(transformed)
        self.assertTensorAlmostEqual(self.sample_subject.t1.data, transformed.t1.data)
Beispiel #4
0
def get_tranformation_list(choice=1):
    if isinstance(choice,int):
        choice = [choice]
    transfo_list, transfo_name = [], []

    if 1 in choice:
        transfo_list +=  [
            RandomAffineFFT(scales=(1, 1), degrees=(10, 10)),
            RandomAffineFFT(scales=(1.2, 1.2), degrees=(0, 0)),
            RandomAffineFFT(scales=(0.8, 0.8), degrees=(0, 0)),
        ]
        transfo_name += ['tAfft_R10', 'tAfft_S1.2', 'tAfft_S08']
    if 2 in choice:
        transfo_list += [
            RandomAffine(scales=(1, 1), degrees=(10, 10), image_interpolation='nearest'),
            RandomAffine(scales=(1.2, 1.2), degrees=(0, 0), image_interpolation='nearest'),
            RandomAffine(scales=(0.8, 0.8), degrees=(0, 0), image_interpolation='nearest'),
        ]
        transfo_name += ['tAffN_R10', 'tAffN_S1.2', 'tAffN_S08']

    return transfo_list, transfo_name
Beispiel #5
0
 def _get_default_transforms(self):
     io_transforms = Compose([
         RandomMotion(),
         RandomFlip(axes=(1, )),
         RandomAffine(scales=(0.9, 1.2),
                      degrees=(10),
                      isotropic=False,
                      default_pad_value='otsu',
                      image_interpolation='bspline'),
         RescaleIntensity((0, 1))
     ])
     return io_transforms
Beispiel #6
0
    def test_translation(self):
        transform = RandomAffine(scales=(1, 1), degrees=0, translation=(5, 5))
        transformed = transform(self.sample)

        # I think the right test should be the following one:
        # self.assertTensorAlmostEqual(
        #     self.sample.t1.data[:, :-5, :-5, :-5],
        #     transformed.t1.data[:, 5:, 5:, 5:]
        # )

        # However the passing test is this one:
        self.assertTensorAlmostEqual(self.sample.t1.data[:, :-5, :-5, 5:],
                                     transformed.t1.data[:, 5:, 5:, :-5])
 def get_torchio_transformer(mask=False):
     if mask:
         interpolation = 'linear'
     else:
         interpolation = image_interpolation
     return RandomAffine(scales=scales,
                         degrees=degrees,
                         translation=translation,
                         isotropic=isotropic,
                         center=center,
                         default_pad_value=default_pad_value,
                         image_interpolation=interpolation,
                         p=p,
                         seed=seed)
Beispiel #8
0
    def transform(self):

        if hp.mode == '3d':
            training_transform = Compose([
                # ToCanonical(),
                CropOrPad((hp.crop_or_pad_size, hp.crop_or_pad_size,
                           hp.crop_or_pad_size),
                          padding_mode='reflect'),
                RandomMotion(),
                RandomBiasField(),
                ZNormalization(),
                RandomNoise(),
                RandomFlip(axes=(0, )),
                OneOf({
                    RandomAffine(): 0.8,
                    RandomElasticDeformation(): 0.2,
                }),
            ])
        elif hp.mode == '2d':
            training_transform = Compose([
                CropOrPad((hp.crop_or_pad_size, hp.crop_or_pad_size, 1),
                          padding_mode='reflect'),
                RandomMotion(),
                RandomBiasField(),
                ZNormalization(),
                RandomNoise(),
                RandomFlip(axes=(0, )),
                OneOf({
                    RandomAffine(): 0.8,
                    RandomElasticDeformation(): 0.2,
                }),
            ])
        else:
            raise Exception('no such kind of mode!')

        return training_transform
Beispiel #9
0
def get_brats(
        data_root='/scratch/weina/dld_data/brats2019/MICCAI_BraTS_2019_Data_Training/',
        fold=1,
        seed=torch.distributed.get_rank()
    if torch.distributed.is_initialized() else 0,
        **kwargs):
    """ data iter for brats
    """
    logging.debug("BratsIter:: fold = {}, seed = {}".format(fold, seed))
    # args for transforms
    d_size, h_size, w_size = 155, 240, 240
    input_size = [7, 223, 223]
    spacing = (d_size / input_size[0], h_size / input_size[1],
               w_size / input_size[2])
    Mean, Std, Max = read_brats_mean(fold, data_root)
    normalize = transforms.Normalize(mean=Mean, std=Std)
    training_transform = Compose([
        # RescaleIntensity((0, 1)),  # so that there are no negative values for RandomMotion
        # RandomMotion(),
        # HistogramStandardization({MRI: landmarks}),
        RandomBiasField(),
        # ZNormalization(masking_method=ZNormalization.mean),
        RandomNoise(),
        ToCanonical(),
        Resample(spacing),
        # CropOrPad((48, 60, 48)),
        RandomFlip(axes=(0, )),
        OneOf({
            RandomAffine(): 0.8,
            RandomElasticDeformation(): 0.2,
        }),
        normalize
    ])
    val_transform = Compose([Resample(spacing), normalize])

    train = BratsIter(csv_file=os.path.join(data_root, 'IDH_label',
                                            'train_fold_{}.csv'.format(fold)),
                      brats_path=os.path.join(data_root, 'all'),
                      brats_transform=training_transform,
                      shuffle=True)

    val = BratsIter(csv_file=os.path.join(data_root, 'IDH_label',
                                          'val_fold_{}.csv'.format(fold)),
                    brats_path=os.path.join(data_root, 'all'),
                    brats_transform=val_transform,
                    shuffle=False)
    return train, val
Beispiel #10
0
def random_augment(x):
    '''Randomly augment input data.

    Returns: Randomly augmented input
    '''

    # Data augmentations to be used
    transforms_dict = {
        RandomFlip(): 1,
        RandomElasticDeformation(): 1,
        RandomAffine(): 1,
        RandomNoise(): 1,
        RandomBlur(): 1
    }

    # Create random transform, with a p chance to apply augmentation
    transform = OneOf(transforms_dict, p=0.95)
    return augment(x, transform)
Beispiel #11
0
def predict_majority(model, x, y):
    '''Augments all samples of the original data, and chooses majority predictions predicted by the model.

    Usage: predict_majority(model, x_original, y_original)
    '''

    # Reshape arrays
    x = np.reshape(x, (len(x), 40, 40, 4, 1))
    y = [x - 1 for x in y]
    y = to_categorical(y, 5)

    # Predict majority
    x_flip = augment(x.copy(), RandomFlip())
    x_ed = augment(x.copy(), RandomElasticDeformation())
    x_affine = augment(x.copy(), RandomAffine())
    x_noise = augment(x.copy(), RandomNoise())
    x_blur = augment(x.copy(), RandomBlur())

    y_true = pred_list(y)
    y_pred = pred_list(model.predict(x.copy()))
    y_flip = pred_list(model.predict(x_flip.copy()))
    y_ed = pred_list(model.predict(x_ed.copy()))
    y_affine = pred_list(model.predict(x_affine.copy()))
    y_noise = pred_list(model.predict(x_noise.copy()))
    y_blur = pred_list(model.predict(x_blur.copy()))

    y_most = []
    correct = 0
    print(
        '\nEntry Number | Prediction (None, Flip, Elastic Deformation, Affine, Noise, Blur) | Actual'
    )
    for i in range(len(y_true)):
        preds = [
            y_pred[i], y_flip[i], y_ed[i], y_affine[i], y_noise[i], y_blur[i]
        ]
        most = max(set(preds), key=preds.count)
        y_most.append(most)
        print('Entry', i, '| Predictions:', preds, '| Most Occuring:', most,
              '| Correct:', y_true[i])
        if most == y_true[i]:
            correct += 1
    print('\nTest Accuracy: ', correct / len(y_true))
    print('Quadratic Weighted Kappa: ',
          cohen_kappa_score(y_true, y_most, weights='quadratic'))
Beispiel #12
0
def training_network(landmarks, dataset, subjects):
    training_transform = Compose([
        ToCanonical(),
        Resample(4),
        CropOrPad((48, 60, 48), padding_mode='reflect'),
        RandomMotion(),
        HistogramStandardization({'mri': landmarks}),
        RandomBiasField(),
        ZNormalization(masking_method=ZNormalization.mean),
        RandomNoise(),
        RandomFlip(axes=(0, )),
        OneOf({
            RandomAffine(): 0.8,
            RandomElasticDeformation(): 0.2,
        }),
    ])

    validation_transform = Compose([
        ToCanonical(),
        Resample(4),
        CropOrPad((48, 60, 48), padding_mode='reflect'),
        HistogramStandardization({'mri': landmarks}),
        ZNormalization(masking_method=ZNormalization.mean),
    ])

    training_split_ratio = 0.9
    num_subjects = len(dataset)
    num_training_subjects = int(training_split_ratio * num_subjects)

    training_subjects = subjects[:num_training_subjects]
    validation_subjects = subjects[num_training_subjects:]

    training_set = tio.SubjectsDataset(training_subjects,
                                       transform=training_transform)

    validation_set = tio.SubjectsDataset(validation_subjects,
                                         transform=validation_transform)

    print('Training set:', len(training_set), 'subjects')
    print('Validation set:', len(validation_set), 'subjects')
    return training_set, validation_set
Beispiel #13
0
 def test_scale_too_large(self):
     with self.assertRaises(ValueError):
         RandomAffine(scales=1.5)
Beispiel #14
0
 def test_negative_scales(self):
     with self.assertRaises(ValueError):
         RandomAffine(scales=(-1, 1))
Beispiel #15
0
 def test_incompatible_args_isotropic(self):
     with self.assertRaises(ValueError):
         RandomAffine(scales=(0.8, 0.5, 0.1), isotropic=True)
Beispiel #16
0
 def test_wrong_image_interpolation_value(self):
     with self.assertRaises(ValueError):
         RandomAffine(image_interpolation='wrong')
Beispiel #17
0
 def test_wrong_image_interpolation_type(self):
     with self.assertRaises(TypeError):
         RandomAffine(image_interpolation=0)
Beispiel #18
0
 def test_wrong_default_pad_value(self):
     with self.assertRaises(ValueError):
         RandomAffine(default_pad_value='wrong')
Beispiel #19
0
 def test_wrong_center(self):
     with self.assertRaises(ValueError):
         RandomAffine(center=0)
 def get_torchio_transformer(mask=False):
     if mask:
         interpolation = Interpolation.LINEAR
     else:
         interpolation = image_interpolation
     return RandomAffine(scales, degrees, isotropic, default_pad_value, interpolation, p, seed, is_tensor)
Beispiel #21
0
)

# Mock PyTorch model
model = lambda x: x

# Define training and patches sampling parameters
num_epochs = 4
patch_size = 128
queue_length = 100
samples_per_volume = 1
batch_size = 2

# Define transforms for data normalization and augmentation
transforms = (
    ZNormalization(),
    RandomAffine(scales=(0.9, 1.1), degrees=10),
    RandomNoise(std_range=(0, 0.25)),
    RandomFlip(axes=(0, )),
)
transform = Compose(transforms)

# Populate a list with dictionaries of paths
one_subject_dict = {
    'T1':
    dict(path='../BRATS2018_crop_renamed/LGG75_T1.nii.gz',
         type=torchio.INTENSITY),
    'T2':
    dict(path='../BRATS2018_crop_renamed/LGG75_T2.nii.gz',
         type=torchio.INTENSITY),
    'label':
    dict(path='../BRATS2018_crop_renamed/LGG75_Label.nii.gz',
Beispiel #22
0
def affine(p=1):
    return RandomAffine(p=p)
Beispiel #23
0
 def test_too_many_translation_values(self):
     with self.assertRaises(ValueError):
         RandomAffine(translation=(-10, 4, 42))
Beispiel #24
0
 def test_scales_range_with_negative_min(self):
     with self.assertRaises(ValueError):
         RandomAffine(scales=(-1, 4))
Beispiel #25
0
 def test_zero_probabilities(self):
     with self.assertRaises(ValueError):
         OneOf({RandomAffine(): 0, RandomElasticDeformation(): 0})
Beispiel #26
0
 def test_negative_probabilities(self):
     with self.assertRaises(ValueError):
         OneOf({RandomAffine(): -1, RandomElasticDeformation(): 1})
Beispiel #27
0
 def test_wrong_degrees_type(self):
     with self.assertRaises(ValueError):
         RandomAffine(degrees='wrong')
Beispiel #28
0
 def test_too_many_scales_values(self):
     with self.assertRaises(ValueError):
         RandomAffine(scales=(1., 4., 12.))
Beispiel #29
0
 def test_wrong_translation_type(self):
     with self.assertRaises(ValueError):
         RandomAffine(translation='wrong')
Beispiel #30
0
 def test_too_many_degrees_values(self):
     with self.assertRaises(ValueError):
         RandomAffine(degrees=(-10., 4., 42.))