예제 #1
0
    def test_no_rotation(self):
        transform = RandomAffine(
            scales=(1, 1),
            degrees=(0, 0),
            default_pad_value=0,
            center='image',
        )
        transformed = transform(self.sample_subject)
        self.assertTensorAlmostEqual(
            self.sample_subject.t1.data,
            transformed.t1.data,
        )

        transform = RandomAffine(
            scales=(1, 1),
            degrees=(180, 180),
            default_pad_value=0,
            center='image',
        )
        transformed = transform(self.sample_subject)
        transformed = transform(transformed)
        self.assertTensorAlmostEqual(
            self.sample_subject.t1.data,
            transformed.t1.data,
        )
예제 #2
0
 def test_parse_scales(self):
     def do_assert(transform):
         self.assertEqual(transform.scales, 3 * (0.9, 1.1))
     do_assert(RandomAffine(scales=0.1))
     do_assert(RandomAffine(scales=(0.9, 1.1)))
     do_assert(RandomAffine(scales=3 * (0.1,)))
     do_assert(RandomAffine(scales=3 * [0.9, 1.1]))
예제 #3
0
 def test_parse_translation(self):
     def do_assert(transform):
         self.assertEqual(transform.translation, 3 * (-10, 10))
     do_assert(RandomAffine(translation=10))
     do_assert(RandomAffine(translation=(-10, 10)))
     do_assert(RandomAffine(translation=3 * (10,)))
     do_assert(RandomAffine(translation=3 * [-10, 10]))
예제 #4
0
 def test_parse_degrees(self):
     def do_assert(transform):
         self.assertEqual(transform.degrees, 3 * (-10, 10))
     do_assert(RandomAffine(degrees=10))
     do_assert(RandomAffine(degrees=(-10, 10)))
     do_assert(RandomAffine(degrees=3 * (10,)))
     do_assert(RandomAffine(degrees=3 * [-10, 10]))
    def transform(self):

        if hp.mode == '3d':
            if hp.aug:
                training_transform = Compose([
                    # ToCanonical(),
                    CropOrPad((hp.crop_or_pad_size), padding_mode='reflect'),
                    # RandomMotion(),
                    RandomBiasField(),
                    ZNormalization(),
                    RandomNoise(),
                    RandomFlip(axes=(0, )),
                    OneOf({
                        RandomAffine(): 0.8,
                        RandomElasticDeformation(): 0.2,
                    }),
                ])
            else:
                training_transform = Compose([
                    CropOrPad((hp.crop_or_pad_size, hp.crop_or_pad_size,
                               hp.crop_or_pad_size),
                              padding_mode='reflect'),
                    ZNormalization(),
                ])
        elif hp.mode == '2d':
            if hp.aug:
                training_transform = Compose([
                    CropOrPad((hp.crop_or_pad_size), padding_mode='reflect'),
                    # RandomMotion(),
                    RandomBiasField(),
                    ZNormalization(),
                    RandomNoise(),
                    RandomFlip(axes=(0, )),
                    OneOf({
                        RandomAffine(): 0.8,
                        RandomElasticDeformation(): 0.2,
                    }),
                ])
            else:
                training_transform = Compose([
                    CropOrPad((hp.crop_or_pad_size, hp.crop_or_pad_size,
                               hp.crop_or_pad_size),
                              padding_mode='reflect'),
                    ZNormalization(),
                ])

        else:
            raise Exception('no such kind of mode!')

        return training_transform
예제 #6
0
    def test_transforms(self):
        landmarks_dict = dict(
            t1=np.linspace(0, 100, 13),
            t2=np.linspace(0, 100, 13),
        )
        random_transforms = (
            RandomFlip(axes=(0, 1, 2), flip_probability=1),
            RandomNoise(),
            RandomBiasField(),
            RandomElasticDeformation(proportion_to_augment=1),
            RandomAffine(),
            RandomMotion(proportion_to_augment=1),
        )
        intensity_transforms = (
            Rescale(),
            ZNormalization(),
            HistogramStandardization(landmarks_dict=landmarks_dict),
        )
        for transform in random_transforms:
            sample = self.get_sample()
            transformed = transform(sample)

        for transform in intensity_transforms:
            sample = self.get_sample()
            transformed = transform(sample)
예제 #7
0
 def test_transforms(self):
     landmarks_dict = dict(
         t1=np.linspace(0, 100, 13),
         t2=np.linspace(0, 100, 13),
     )
     transforms = (
         CenterCropOrPad((9, 21, 30)),
         ToCanonical(),
         Resample((1, 1.1, 1.25)),
         RandomFlip(axes=(0, 1, 2), flip_probability=1),
         RandomMotion(proportion_to_augment=1),
         RandomGhosting(proportion_to_augment=1, axes=(0, 1, 2)),
         RandomSpike(),
         RandomNoise(),
         RandomBlur(),
         RandomSwap(patch_size=2, num_iterations=5),
         Lambda(lambda x: 1.5 * x, types_to_apply=INTENSITY),
         RandomBiasField(),
         Rescale((0, 1)),
         ZNormalization(masking_method='label'),
         HistogramStandardization(landmarks_dict=landmarks_dict),
         RandomElasticDeformation(proportion_to_augment=1),
         RandomAffine(),
         Pad((1, 2, 3, 0, 5, 6)),
         Crop((3, 2, 8, 0, 1, 4)),
     )
     transformed = self.get_sample()
     for transform in transforms:
         transformed = transform(transformed)
예제 #8
0
def affine(parameters):
    return RandomAffine(
        scales=parameters["scales"],
        degrees=parameters["degrees"],
        translation=parameters["translation"],
        p=parameters["probability"],
    )
예제 #9
0
    def __init__(self, transform1, m1, p1, transform2, m2, p2):
        ranges = {
            'flip': np.zeros(10),
            'affine': np.linspace(0, 180, 10),
            'noise': np.linspace(0, 0.5, 10),
            'blur': np.arange(10),
            'elasticD': np.zeros(10)
        }

        transforms = {
            'flip': lambda magnitude, p: RandomFlip(p=p),
            'affine':
            lambda magnitude, p: RandomAffine(degrees=(magnitude), p=p),
            'noise': lambda magnitude, p: RandomNoise(std=magnitude, p=p),
            'blur': lambda magnitude, p: RandomBlur(std=magnitude, p=p),
            'elasticD': lambda magnitude, p: RandomElasticDeformation(p=p)
        }

        self.transform1 = transforms[transform1]
        self.t1_input = transform1
        self.m1 = ranges[transform1][m1]
        self.m1_input = m1
        self.p1 = p1

        self.transform2 = transforms[transform2]
        self.t2_input = transform2
        self.m2 = ranges[transform2][m2]
        self.m2_input = m2
        self.p2 = p2

        self.kappa = 0.0
예제 #10
0
    def build(self):
        SEED = 42
        data = pd.read_csv(self.data)
        ab = data.label

        ############################################
        transforms = [
            RescaleIntensity((0, 1)),
            RandomAffine(),
            transformss.ToTensor(),
        ]
        transform = Compose(transforms)
        #############################################

        dataset_dir = self.dataset_dir
        dataset_dir = Path(dataset_dir)

        images_dir = dataset_dir
        labels_dir = dataset_dir
        image_paths = sorted(images_dir.glob('**/*.nii'))
        label_paths = sorted(labels_dir.glob('**/*.nii'))
        assert len(image_paths) == len(label_paths)

        # These two names are arbitrary
        MRI = 'features'
        BRAIN = 'targets'

        #split dataset into training and validation
        from catalyst.utils import split_dataframe_train_test

        train_image_paths, valid_image_paths = split_dataframe_train_test(
            image_paths, test_size=0.2, random_state=SEED)

        #training data
        subjects = []
        i = 0
        for (image_path, label_path) in zip(train_image_paths, label_paths):
            subject_dict = {
                MRI: torchio.Image(image_path, torchio.INTENSITY),
                BRAIN: ab[i],
            }
            i = i + 1
            subject = torchio.Subject(subject_dict)
            subjects.append(subject)
        train_data = torchio.ImagesDataset(subjects)

        #validation data
        subjects = []
        for (image_path, label_path) in zip(valid_image_paths, label_paths):
            subject_dict = {
                MRI: torchio.Image(image_path, torchio.INTENSITY),
                BRAIN: ab[i],
            }
            i = i + 1
            subject = torchio.Subject(subject_dict)
            subjects.append(subject)
        test_data = torchio.ImagesDataset(subjects)
        return train_data, test_data
예제 #11
0
 def test_rotation_image(self):
     # Rotation around image center
     transform = RandomAffine(
         degrees=(90, 90),
         default_pad_value=0,
         center='image',
     )
     transformed = transform(self.sample_subject)
     total = transformed.t1.data.sum()
     self.assertNotEqual(total, 0)
예제 #12
0
 def test_rotation_origin(self):
     # Rotation around far away point, image should be empty
     transform = RandomAffine(
         degrees=(90, 90),
         default_pad_value=0,
         center='origin',
     )
     transformed = transform(self.sample_subject)
     total = transformed.t1.data.sum()
     self.assertEqual(total, 0)
예제 #13
0
    def test_no_rotation(self):
        transform = RandomAffine(
            scales=(1, 1),
            degrees=(0, 0),
            default_pad_value=0,
            center='image',
        )
        transformed = transform(self.sample)
        assert_array_equal(self.sample.t1.data, transformed.t1.data)

        transform = RandomAffine(
            scales=(1, 1),
            degrees=(180, 180),
            default_pad_value=0,
            center='image',
        )
        transformed = transform(self.sample)
        transformed = transform(transformed)
        assert_array_equal(self.sample.t1.data, transformed.t1.data)
예제 #14
0
def get_tranformation_list(choice=1):
    if isinstance(choice,int):
        choice = [choice]
    transfo_list, transfo_name = [], []

    if 1 in choice:
        transfo_list +=  [
            RandomAffineFFT(scales=(1, 1), degrees=(10, 10)),
            RandomAffineFFT(scales=(1.2, 1.2), degrees=(0, 0)),
            RandomAffineFFT(scales=(0.8, 0.8), degrees=(0, 0)),
        ]
        transfo_name += ['tAfft_R10', 'tAfft_S1.2', 'tAfft_S08']
    if 2 in choice:
        transfo_list += [
            RandomAffine(scales=(1, 1), degrees=(10, 10), image_interpolation='nearest'),
            RandomAffine(scales=(1.2, 1.2), degrees=(0, 0), image_interpolation='nearest'),
            RandomAffine(scales=(0.8, 0.8), degrees=(0, 0), image_interpolation='nearest'),
        ]
        transfo_name += ['tAffN_R10', 'tAffN_S1.2', 'tAffN_S08']

    return transfo_list, transfo_name
예제 #15
0
 def _get_default_transforms(self):
     io_transforms = Compose([
         RandomMotion(),
         RandomFlip(axes=(1, )),
         RandomAffine(scales=(0.9, 1.2),
                      degrees=(10),
                      isotropic=False,
                      default_pad_value='otsu',
                      image_interpolation='bspline'),
         RescaleIntensity((0, 1))
     ])
     return io_transforms
예제 #16
0
    def test_translation(self):
        transform = RandomAffine(scales=(1, 1), degrees=0, translation=(5, 5))
        transformed = transform(self.sample)

        # I think the right test should be the following one:
        # self.assertTensorAlmostEqual(
        #     self.sample.t1.data[:, :-5, :-5, :-5],
        #     transformed.t1.data[:, 5:, 5:, 5:]
        # )

        # However the passing test is this one:
        self.assertTensorAlmostEqual(self.sample.t1.data[:, :-5, :-5, 5:],
                                     transformed.t1.data[:, 5:, 5:, :-5])
 def get_torchio_transformer(mask=False):
     if mask:
         interpolation = 'linear'
     else:
         interpolation = image_interpolation
     return RandomAffine(scales=scales,
                         degrees=degrees,
                         translation=translation,
                         isotropic=isotropic,
                         center=center,
                         default_pad_value=default_pad_value,
                         image_interpolation=interpolation,
                         p=p,
                         seed=seed)
예제 #18
0
def get_brats(
        data_root='/scratch/weina/dld_data/brats2019/MICCAI_BraTS_2019_Data_Training/',
        fold=1,
        seed=torch.distributed.get_rank()
    if torch.distributed.is_initialized() else 0,
        **kwargs):
    """ data iter for brats
    """
    logging.debug("BratsIter:: fold = {}, seed = {}".format(fold, seed))
    # args for transforms
    d_size, h_size, w_size = 155, 240, 240
    input_size = [7, 223, 223]
    spacing = (d_size / input_size[0], h_size / input_size[1],
               w_size / input_size[2])
    Mean, Std, Max = read_brats_mean(fold, data_root)
    normalize = transforms.Normalize(mean=Mean, std=Std)
    training_transform = Compose([
        # RescaleIntensity((0, 1)),  # so that there are no negative values for RandomMotion
        # RandomMotion(),
        # HistogramStandardization({MRI: landmarks}),
        RandomBiasField(),
        # ZNormalization(masking_method=ZNormalization.mean),
        RandomNoise(),
        ToCanonical(),
        Resample(spacing),
        # CropOrPad((48, 60, 48)),
        RandomFlip(axes=(0, )),
        OneOf({
            RandomAffine(): 0.8,
            RandomElasticDeformation(): 0.2,
        }),
        normalize
    ])
    val_transform = Compose([Resample(spacing), normalize])

    train = BratsIter(csv_file=os.path.join(data_root, 'IDH_label',
                                            'train_fold_{}.csv'.format(fold)),
                      brats_path=os.path.join(data_root, 'all'),
                      brats_transform=training_transform,
                      shuffle=True)

    val = BratsIter(csv_file=os.path.join(data_root, 'IDH_label',
                                          'val_fold_{}.csv'.format(fold)),
                    brats_path=os.path.join(data_root, 'all'),
                    brats_transform=val_transform,
                    shuffle=False)
    return train, val
예제 #19
0
def random_augment(x):
    '''Randomly augment input data.

    Returns: Randomly augmented input
    '''

    # Data augmentations to be used
    transforms_dict = {
        RandomFlip(): 1,
        RandomElasticDeformation(): 1,
        RandomAffine(): 1,
        RandomNoise(): 1,
        RandomBlur(): 1
    }

    # Create random transform, with a p chance to apply augmentation
    transform = OneOf(transforms_dict, p=0.95)
    return augment(x, transform)
예제 #20
0
def predict_majority(model, x, y):
    '''Augments all samples of the original data, and chooses majority predictions predicted by the model.

    Usage: predict_majority(model, x_original, y_original)
    '''

    # Reshape arrays
    x = np.reshape(x, (len(x), 40, 40, 4, 1))
    y = [x - 1 for x in y]
    y = to_categorical(y, 5)

    # Predict majority
    x_flip = augment(x.copy(), RandomFlip())
    x_ed = augment(x.copy(), RandomElasticDeformation())
    x_affine = augment(x.copy(), RandomAffine())
    x_noise = augment(x.copy(), RandomNoise())
    x_blur = augment(x.copy(), RandomBlur())

    y_true = pred_list(y)
    y_pred = pred_list(model.predict(x.copy()))
    y_flip = pred_list(model.predict(x_flip.copy()))
    y_ed = pred_list(model.predict(x_ed.copy()))
    y_affine = pred_list(model.predict(x_affine.copy()))
    y_noise = pred_list(model.predict(x_noise.copy()))
    y_blur = pred_list(model.predict(x_blur.copy()))

    y_most = []
    correct = 0
    print(
        '\nEntry Number | Prediction (None, Flip, Elastic Deformation, Affine, Noise, Blur) | Actual'
    )
    for i in range(len(y_true)):
        preds = [
            y_pred[i], y_flip[i], y_ed[i], y_affine[i], y_noise[i], y_blur[i]
        ]
        most = max(set(preds), key=preds.count)
        y_most.append(most)
        print('Entry', i, '| Predictions:', preds, '| Most Occuring:', most,
              '| Correct:', y_true[i])
        if most == y_true[i]:
            correct += 1
    print('\nTest Accuracy: ', correct / len(y_true))
    print('Quadratic Weighted Kappa: ',
          cohen_kappa_score(y_true, y_most, weights='quadratic'))
예제 #21
0
파일: data.py 프로젝트: JIiminIT/Torch
def training_network(landmarks, dataset, subjects):
    training_transform = Compose([
        ToCanonical(),
        Resample(4),
        CropOrPad((48, 60, 48), padding_mode='reflect'),
        RandomMotion(),
        HistogramStandardization({'mri': landmarks}),
        RandomBiasField(),
        ZNormalization(masking_method=ZNormalization.mean),
        RandomNoise(),
        RandomFlip(axes=(0, )),
        OneOf({
            RandomAffine(): 0.8,
            RandomElasticDeformation(): 0.2,
        }),
    ])

    validation_transform = Compose([
        ToCanonical(),
        Resample(4),
        CropOrPad((48, 60, 48), padding_mode='reflect'),
        HistogramStandardization({'mri': landmarks}),
        ZNormalization(masking_method=ZNormalization.mean),
    ])

    training_split_ratio = 0.9
    num_subjects = len(dataset)
    num_training_subjects = int(training_split_ratio * num_subjects)

    training_subjects = subjects[:num_training_subjects]
    validation_subjects = subjects[num_training_subjects:]

    training_set = tio.SubjectsDataset(training_subjects,
                                       transform=training_transform)

    validation_set = tio.SubjectsDataset(validation_subjects,
                                         transform=validation_transform)

    print('Training set:', len(training_set), 'subjects')
    print('Validation set:', len(validation_set), 'subjects')
    return training_set, validation_set
예제 #22
0
 def test_scale_too_large(self):
     with self.assertRaises(ValueError):
         RandomAffine(scales=1.5)
예제 #23
0
 def test_negative_scales(self):
     with self.assertRaises(ValueError):
         RandomAffine(scales=(-1, 1))
예제 #24
0
 def test_too_many_translation_values(self):
     with self.assertRaises(ValueError):
         RandomAffine(translation=(-10, 4, 42))
예제 #25
0
 def test_wrong_image_interpolation_type(self):
     with self.assertRaises(TypeError):
         RandomAffine(image_interpolation=0)
예제 #26
0
 def test_scales_range_with_negative_min(self):
     with self.assertRaises(ValueError):
         RandomAffine(scales=(-1, 4))
예제 #27
0
 def test_wrong_degrees_type(self):
     with self.assertRaises(ValueError):
         RandomAffine(degrees='wrong')
예제 #28
0
 def test_wrong_image_interpolation_value(self):
     with self.assertRaises(ValueError):
         RandomAffine(image_interpolation='wrong')
예제 #29
0
 def test_wrong_translation_type(self):
     with self.assertRaises(ValueError):
         RandomAffine(translation='wrong')
예제 #30
0
 def test_incompatible_args_isotropic(self):
     with self.assertRaises(ValueError):
         RandomAffine(scales=(0.8, 0.5, 0.1), isotropic=True)