Esempio n. 1
0
def get_transforms(size: int, scope: str = 'geometric', crop='random'):
    augs = {
        'strong':
        albu.Compose([
            albu.HorizontalFlip(),
            albu.ShiftScaleRotate(shift_limit=0.0,
                                  scale_limit=0.2,
                                  rotate_limit=20,
                                  p=.4),
            albu.ElasticTransform(),
            albu.OpticalDistortion(),
            albu.OneOf([
                albu.CLAHE(clip_limit=2),
                albu.IAASharpen(),
                albu.IAAEmboss(),
                albu.RandomBrightnessContrast(),
                albu.RandomGamma()
            ],
                       p=0.5),
            albu.OneOf([
                albu.RGBShift(),
                albu.HueSaturationValue(),
            ], p=0.5),
        ]),
        'weak':
        albu.Compose([
            albu.HorizontalFlip(),
        ]),
        'geometric':
        albu.OneOf([
            albu.HorizontalFlip(always_apply=True),
            albu.ShiftScaleRotate(always_apply=True),
            albu.Transpose(always_apply=True),
            albu.OpticalDistortion(always_apply=True),
            albu.ElasticTransform(always_apply=True),
        ])
    }

    crop_fn = {
        'random': albu.RandomCrop(size, size, always_apply=True),
        'center': albu.CenterCrop(size, size, always_apply=True)
    }[crop]
    pad = albu.PadIfNeeded(size, size)
    if scope == "nothing":
        pipeline = albu.Compose([crop_fn, pad],
                                additional_targets={'target': 'image'})
    else:
        aug_fn = augs[scope]
        pipeline = albu.Compose([aug_fn, crop_fn, pad],
                                additional_targets={'target': 'image'})

    def process(a, b):
        r = pipeline(image=a, target=b)
        return r['image'], r['target']

    return process
Esempio n. 2
0
def get_augmentations():
    return A.OneOf([
                      A.RandomContrast(limit=(0.8,1.2),p=0.2),
                      A.MotionBlur(blur_limit=15,p=0.2),
                      A.Cutout(num_holes=8, max_h_size=8, max_w_size=8, fill_value=0,p=0.8),
                      A.Cutout(num_holes=16, max_h_size=4, max_w_size=4, fill_value=255,p=0.8),
                      A.Cutout(num_holes=3, max_h_size=20, max_w_size=20, fill_value=0,p=0.8),
                      A.Cutout(num_holes=10, max_h_size=20, max_w_size=20, fill_value=255,p=0.8),
                      A.ShiftScaleRotate(shift_limit=0.06,scale_limit=0.1,rotate_limit=15,border_mode=cv2.BORDER_CONSTANT,value=255,p=0.8),
                      A.ElasticTransform(alpha=30,sigma=5,alpha_affine=10,border_mode=cv2.BORDER_CONSTANT,value=255,p=1.0),
                      A.ElasticTransform(alpha=60,sigma=15,alpha_affine=20,border_mode=cv2.BORDER_CONSTANT,value=255,p=1.0),
                      ],p=AUGM_PROB)
Esempio n. 3
0
def get_transform(name='default', resize=512):
    if name == 'default':
        transform = A.Compose([
            A.Resize(resize, resize),
            A.HorizontalFlip(p=0.5),
            A.VerticalFlip(p=0.5),
            A.OneOf([
                A.RandomContrast(),
                A.RandomGamma(),
                A.RandomBrightness(),
                A.ColorJitter(brightness=0.07,
                              contrast=0.07,
                              saturation=0.1,
                              hue=0.1,
                              always_apply=False,
                              p=0.3),
            ],
                    p=0.3),
            A.OneOf([
                A.ElasticTransform(
                    alpha=120, sigma=120 * 0.05, alpha_affine=120 * 0.03),
                A.GridDistortion(),
                A.OpticalDistortion(distort_limit=2, shift_limit=0.5),
            ],
                    p=0.0),
            A.ShiftScaleRotate(),
        ])
    elif name == 'train1':
        transform = A.Compose([
            A.RandomCrop(resize, resize, True),
            A.HorizontalFlip(p=0.5),
            A.VerticalFlip(p=0.5),
            A.ColorJitter(brightness=0.07,
                          contrast=0.07,
                          saturation=0.1,
                          hue=0.1,
                          always_apply=False,
                          p=0.3),
            A.ElasticTransform(alpha=120,
                               sigma=120 * 0.05,
                               alpha_affine=120 * 0.03),
            A.ChannelShuffle(p=0.6)
        ])

    elif name == 'val' or name == 'test':
        transform = A.Compose([A.Resize(resize, resize)])
    else:
        return None
    return transform
def hard_spatial_augmentations(image_size: Tuple[int, int], rot_angle=45):
    return A.Compose([
        A.OneOf([
            A.NoOp(),
            A.RandomGridShuffle(grid=(4, 4)),
            A.RandomGridShuffle(grid=(3, 3)),
            A.RandomGridShuffle(grid=(2, 2)),
        ]),
        A.MaskDropout(max_objects=10),
        A.OneOf([
            A.ShiftScaleRotate(scale_limit=0.1,
                               rotate_limit=rot_angle,
                               border_mode=cv2.BORDER_CONSTANT,
                               value=0,
                               mask_value=0),
            A.NoOp(),
        ]),
        A.OneOf([
            A.ElasticTransform(border_mode=cv2.BORDER_CONSTANT,
                               value=0,
                               mask_value=0),
            A.GridDistortion(border_mode=cv2.BORDER_CONSTANT,
                             value=0,
                             mask_value=0),
            A.NoOp(),
        ]),
        # D4
        A.Compose([A.Transpose(), A.RandomRotate90()]),
    ])
Esempio n. 5
0
def test_elastic_transform_interpolation(monkeypatch, interpolation):
    image = np.random.randint(low=0,
                              high=256,
                              size=(100, 100, 3),
                              dtype=np.uint8)
    mask = np.random.randint(low=0, high=2, size=(100, 100), dtype=np.uint8)
    monkeypatch.setattr(
        'albumentations.augmentations.transforms.ElasticTransform.get_params',
        lambda *_: {'random_state': 1111})
    aug = A.ElasticTransform(alpha=1,
                             sigma=50,
                             alpha_affine=50,
                             interpolation=interpolation,
                             p=1)
    data = aug(image=image, mask=mask)
    expected_image = F.elastic_transform(
        image,
        alpha=1,
        sigma=50,
        alpha_affine=50,
        interpolation=interpolation,
        border_mode=cv2.BORDER_REFLECT_101,
        random_state=np.random.RandomState(1111))
    expected_mask = F.elastic_transform(
        mask,
        alpha=1,
        sigma=50,
        alpha_affine=50,
        interpolation=cv2.INTER_NEAREST,
        border_mode=cv2.BORDER_REFLECT_101,
        random_state=np.random.RandomState(1111))
    assert np.array_equal(data['image'], expected_image)
    assert np.array_equal(data['mask'], expected_mask)
Esempio n. 6
0
def get_train_transform():
    crop_height = 256
    crop_width = 256

    return albu.Compose([
        albu.PadIfNeeded(min_height=crop_height, min_width=crop_width, p=1),
        albu.RandomSizedCrop((int(0.3 * crop_height), 288), crop_height, crop_width, p=1),
        albu.HorizontalFlip(p=0.5),
        albu.OneOf([
            albu.IAAAdditiveGaussianNoise(p=0.5),
            albu.GaussNoise(p=0.5),
        ], p=0.2),
        albu.OneOf([
            albu.MotionBlur(p=0.2),
            albu.MedianBlur(blur_limit=3, p=0.1),
            albu.Blur(blur_limit=3, p=0.1),
        ], p=0.2),
        albu.ShiftScaleRotate(shift_limit=0.0625, scale_limit=0, rotate_limit=20, p=0.1),
        albu.OneOf([
            albu.OpticalDistortion(p=0.3),
            albu.GridDistortion(p=0.1),
            albu.IAAPiecewiseAffine(p=0.3),
        ], p=0.2),
        albu.OneOf([
            albu.CLAHE(clip_limit=2, p=0.5),
            albu.IAASharpen(p=0.5),
            albu.IAAEmboss(p=0.5),
            albu.RandomBrightnessContrast(p=0.5),
        ], p=0.3),
        albu.HueSaturationValue(p=0.3),
        albu.JpegCompression(p=0.2, quality_lower=20, quality_upper=99),
        albu.ElasticTransform(p=0.1),
        albu.Normalize(p=1)
    ], p=1)
Esempio n. 7
0
def get_transforms(image_size):

    transforms_train = albumentations.Compose([
        albumentations.Transpose(p=0.5),
        albumentations.VerticalFlip(p=0.5),
        albumentations.HorizontalFlip(p=0.5),
        albumentations.RandomBrightness(limit=0.2, p=0.75),
        albumentations.RandomContrast(limit=0.2, p=0.75),
        albumentations.OneOf([
            albumentations.MotionBlur(blur_limit=5),
            albumentations.MedianBlur(blur_limit=5),
            albumentations.GaussianBlur(blur_limit=5),
            albumentations.GaussNoise(var_limit=(5.0, 30.0)),
        ], p=0.7),

        albumentations.OneOf([
            albumentations.OpticalDistortion(distort_limit=1.0),
            albumentations.GridDistortion(num_steps=5, distort_limit=1.),
            albumentations.ElasticTransform(alpha=3),
        ], p=0.7),

        albumentations.CLAHE(clip_limit=4.0, p=0.7),
        albumentations.HueSaturationValue(hue_shift_limit=10, sat_shift_limit=20, val_shift_limit=10, p=0.5),
        albumentations.ShiftScaleRotate(shift_limit=0.1, scale_limit=0.1, rotate_limit=15, border_mode=0, p=0.85),
        albumentations.Resize(image_size, image_size),
        albumentations.Cutout(max_h_size=int(image_size * 0.375), max_w_size=int(image_size * 0.375), num_holes=1, p=0.7),
        albumentations.Normalize()
    ])

    transforms_val = albumentations.Compose([
        albumentations.Resize(image_size, image_size),
        albumentations.Normalize()
    ])

    return transforms_train, transforms_val
Esempio n. 8
0
def da_policy_combination_old(img_size):
    print("Using Data Augmentation Combinations Old")
    train_aug = [
        albumentations.ElasticTransform(p=0.72,
                                        alpha=177,
                                        sigma=177 * 0.05,
                                        alpha_affine=176 * 0.03),
        albumentations.GridDistortion(p=0.675, distort_limit=0.3),
        albumentations.OpticalDistortion(p=0.2,
                                         distort_limit=0.2,
                                         shift_limit=0.2),
        albumentations.ShiftScaleRotate(p=0.56,
                                        shift_limit=0.2,
                                        scale_limit=0.0,
                                        rotate_limit=0),  # shift
        albumentations.ShiftScaleRotate(p=0.25,
                                        shift_limit=0.0,
                                        scale_limit=0.2,
                                        rotate_limit=0),  # scale
        albumentations.VerticalFlip(p=0.325),
        albumentations.HorizontalFlip(p=0.3),
        albumentations.Rotate(p=0.625,
                              limit=45,
                              interpolation=1,
                              border_mode=0),
    ]

    train_aug = common_test_augmentation(img_size) + train_aug

    train_aug_img = []

    val_aug = common_test_augmentation(img_size)

    return train_aug, train_aug_img, val_aug
Esempio n. 9
0
def get_transforms(*, data_type):
    if data_type == "light_train":
        return Compose([
            Resize(CFG.size, CFG.size),
            Transpose(p=0.5),
            HorizontalFlip(p=0.5),
            VerticalFlip(p=0.5),
            ShiftScaleRotate(scale_limit=(0, 0), p=0.5),
            ToTensorV2(),
        ])

    if data_type == "train":
        return Compose([
            Resize(CFG.size, CFG.size),
            Transpose(p=0.5),
            HorizontalFlip(p=0.5),
            VerticalFlip(p=0.5),
            albumentations.OneOf([
                albumentations.ElasticTransform(
                    alpha=1, sigma=20, alpha_affine=10),
                albumentations.GridDistortion(num_steps=6, distort_limit=0.1),
                albumentations.OpticalDistortion(distort_limit=0.05,
                                                 shift_limit=0.05),
            ],
                                 p=0.2),
            albumentations.core.composition.PerChannel(albumentations.OneOf([
                albumentations.MotionBlur(p=.05),
                albumentations.MedianBlur(blur_limit=3, p=.05),
                albumentations.Blur(blur_limit=3, p=.05),
            ]),
                                                       p=1.0),
            albumentations.OneOf([
                albumentations.CoarseDropout(max_holes=16,
                                             max_height=CFG.size // 16,
                                             max_width=CFG.size // 16,
                                             fill_value=0,
                                             p=0.5),
                albumentations.GridDropout(ratio=0.09, p=0.5),
                albumentations.Cutout(num_holes=8,
                                      max_h_size=CFG.size // 16,
                                      max_w_size=CFG.size // 16,
                                      p=0.2),
            ],
                                 p=0.5),
            albumentations.ShiftScaleRotate(
                shift_limit=0.0625, scale_limit=0.2, rotate_limit=45, p=0.5),
            ToTensorV2(),
        ],
                       additional_targets={
                           'r': 'image',
                           'g': 'image',
                           'b': 'image',
                           'y': 'image',
                       })

    elif data_type == 'valid':
        return Compose([
            Resize(CFG.size, CFG.size),
            ToTensorV2(),
        ])
    def get_train_transform(self):

        # fill values for cutout or cropping portion
        fill_value = [255. * mean for mean in self.means]
        rc_padding = 32
        rc_pval = 0.2
        randomCrop = [albumentations.PadIfNeeded(min_height=self.size+rc_padding, min_width=self.size+rc_padding, 
                                                  border_mode=cv2.BORDER_REPLICATE, value=fill_value, p=1.0),
                        
                      albumentations.OneOf([
                                albumentations.RandomCrop(height=self.size, width=self.size, p=rc_pval),
                                albumentations.CenterCrop(height=self.size, width=self.size, p=1-rc_pval),
                              ], p=1.0)
          ]

        train_tf = albumentations.Compose([
                    albumentations.Resize(self.size,self.size),
                    albumentations.RandomBrightness(limit=0.2, p=0.5),
                    albumentations.RandomContrast(limit=0.2, p=0.5),
                    albumentations.Rotate(limit=(-10,10), p=0.70),
                    randomCrop[0], randomCrop[1],
                    albumentations.HorizontalFlip(p=0.7),
                    albumentations.ElasticTransform(sigma=50, alpha=1, alpha_affine=10,p=0.10),
                    albumentations.CoarseDropout(max_holes=1, max_height=64, max_width=64, min_height=16, min_width=16, fill_value=fill_value, p=0.70),
                    albumentations.Normalize(mean=self.means, std=self.stds),
                    ToTensor()
        ])

        train_tf = AlbumCompose(train_tf)
        return train_tf
Esempio n. 11
0
    def get_aug(mode="train"):
        if mode == "Nor":
            aug = A.Compose([
                ToTensor(),
            ])
        elif mode == "train":
            aug = A.Compose([
                A.Flip(),
                A.ShiftScaleRotate(rotate_limit=1.0, p=0.8),
                # Pixels
                A.OneOf([
                    A.IAAEmboss(p=1.0),
                    A.IAASharpen(p=1.0),
                    A.Blur(p=1.0),
                ],
                        p=0.5),
                # Affine
                A.OneOf(
                    [A.ElasticTransform(p=1.0),
                     A.IAAPiecewiseAffine(p=1.0)],
                    p=0.5),
                A.Normalize(p=1.0),
                ToTensor(),
            ])
        else:
            aug = A.Compose([
                A.Normalize(p=1.0),
                ToTensor(),
            ])

        return aug
Esempio n. 12
0
 def album(self): #이미지 변환
     transform = A.Compose([
         #A.RandomRotate90(),
         A.Flip(p=0.2),
         #A.Transpose(),
         A.ChannelShuffle(p=0.3),
         A.ElasticTransform(p=0.3,border_mode=cv2.BORDER_REFLECT_101,alpha_affine=40),
         A.OneOf([
             A.IAAAdditiveGaussianNoise(),
             A.GaussNoise(),
         ], p=0.2),
         A.OneOf([
             A.MotionBlur(p=.2),
             A.MedianBlur(blur_limit=3, p=0.1),
             A.Blur(blur_limit=3, p=0.1),
         ], p=0.2),
         A.ShiftScaleRotate(shift_limit=0.0625, scale_limit=0.2, rotate_limit=45, p=0.2),
         A.OneOf([
             A.OpticalDistortion(p=0.3),
             A.GridDistortion(p=.1),
             A.IAAPiecewiseAffine(p=0.3),
         ], p=0.2),
         A.OneOf([
             A.CLAHE(clip_limit=2),
             A.IAASharpen(),
             A.IAAEmboss(),
             A.RandomBrightnessContrast(),
         ], p=0.3),
         A.HueSaturationValue(p=0.3),
     ])
     image = cv2.cvtColor(self.srcResize, cv2.COLOR_BGR2RGB)
     transformed = transform(image=image)['image']
     self.update(transformed)
    def __init__(self, data_dir, train=True, **kwargs):

        if train:
            transform_shape = A.Compose([
                A.ElasticTransform(alpha=2, sigma=5, alpha_affine=5),
                A.RandomScale((-.15, .1)),
                A.PadIfNeeded(160, 160, value=0, border_mode=1),
                A.CenterCrop(160, 160),
                A.HorizontalFlip(),
                A.Rotate(limit=5),
            ])

            transform_color = A.Compose([
                A.RandomBrightnessContrast(brightness_limit=.15,
                                           contrast_limit=.15),
                A.GaussianBlur(blur_limit=7),
                A.GaussNoise(var_limit=.001, )
            ])
        else:
            transform_shape = transform_color = None

        super(brain_dataset, self).__init__(data_dir,
                                            sample=True,
                                            transform_shape=transform_shape,
                                            transform_color=transform_color,
                                            **kwargs)
Esempio n. 14
0
        def __init__(self, dataset, file, factor=1):
            super(TrainDataset, self).__init__()
            import h5py
            DATA = h5py.File(file, 'r')

            self.data = DATA.get(f'{dataset}/data')
            self.av = DATA.get(f'{dataset}/av')
            self.field = DATA.get(f'{dataset}/radial-field')
            self.mask = DATA.get(f'{dataset}/mask')

            self.geo_aug = A.Compose([
                #A.PadIfNeeded(800, 800, value=0, border_mode=cv2.BORDER_CONSTANT),
                #A.RandomCrop(da_config['crop-size'], da_config['crop-size']),
                A.HorizontalFlip(p=0.5),
                A.Rotate(limit=(-180, 180)),
                A.ElasticTransform(
                    alpha=da_config['elastic-transform']['alpha'],
                    sigma=da_config['elastic-transform']['sigma'],
                    alpha_affine=da_config['elastic-transform']
                    ['alpha-affine'],
                    border_mode=cv2.BORDER_CONSTANT,
                    p=.9),
                A.VerticalFlip(p=0.5),
                ToTensorV2()
            ])
            self.factor = factor
            self._data_length = len(self.data)
Esempio n. 15
0
    def add_transforms(self):
        if self.train:
            self.transforms += [
                A.Resize(int(self.img_size[0] * 1.1), int(self.img_size[1] * 1.1)),
                A.RandomCrop(self.img_size[0], self.img_size[1]),
                A.HorizontalFlip(p=0.5),
                A.VerticalFlip(p=0.5),
                A.Rotate(p=0.5, border_mode=BORDER_REFLECT, value=0),

                # Pixels
                A.OneOf([
                    A.IAAEmboss(p=1.0),
                    A.IAASharpen(p=1.0),
                    A.Blur(p=1.0),
                ], p=0.5),

                # Affine
                A.OneOf([
                    A.ElasticTransform(p=1.0),
                    A.IAAPiecewiseAffine(p=1.0)
                ], p=0.5),
            ]
        else:
            self.transforms += [
                A.Resize(self.img_size[0], self.img_size[1]),
            ]
Esempio n. 16
0
 def __init__(self, image_size):
     self.data_transform = {
         'train_transform':A.Compose([
           A.Transpose(p=0.5),
           A.VerticalFlip(p=0.5),
           A.HorizontalFlip(p=0.5),
           A.RandomBrightness(limit=0.2, p=0.75),
           A.RandomContrast(limit=0.2, p=0.75),
           A.OneOf([
               A.MotionBlur(blur_limit=5),
               A.MedianBlur(blur_limit=5),
               A.GaussianBlur(blur_limit=5),
               A.GaussNoise(var_limit=(5.0, 30.0)),], p=0.7),
           A.OneOf([
               A.OpticalDistortion(distort_limit=1.0),
               A.GridDistortion(num_steps=5, distort_limit=1.),
               A.ElasticTransform(alpha=3),], p=0.7),
           A.CLAHE(clip_limit=4.0, p=0.7),
           A.HueSaturationValue(hue_shift_limit=10, sat_shift_limit=20, val_shift_limit=10, p=0.5),
           A.ShiftScaleRotate(shift_limit=0.1, scale_limit=0.1, rotate_limit=15, border_mode=0, p=0.85),
           A.Resize(image_size, image_size),
           A.Cutout(max_h_size=int(image_size * 0.375), max_w_size=int(image_size * 0.375), num_holes=1, p=0.7),    
           A.Normalize()
           ]),
         'test_transform': A.Compose([
           A.Resize(image_size, image_size),
           A.Normalize(),
           A.Resize(image_size, image_size)
           ])}
Esempio n. 17
0
def _strong_aug(p=0.5):
    import albumentations
    return albumentations.Compose([
        albumentations.HorizontalFlip(p=0.5),
        albumentations.ShiftScaleRotate(shift_limit=0.0625,
                                        scale_limit=0.2,
                                        rotate_limit=0,
                                        p=0.5,
                                        border_mode=cv2.BORDER_CONSTANT),
        albumentations.OneOf([
            albumentations.OpticalDistortion(p=0.5,
                                             border_mode=cv2.BORDER_CONSTANT),
            albumentations.GridDistortion(p=0.5,
                                          border_mode=cv2.BORDER_CONSTANT),
            albumentations.IAAPiecewiseAffine(p=0.5),
            albumentations.ElasticTransform(p=0.5,
                                            border_mode=cv2.BORDER_CONSTANT),
        ],
                             p=0.5),
        albumentations.OneOf([
            albumentations.CLAHE(clip_limit=2),
            albumentations.IAASharpen(),
            albumentations.IAAEmboss(),
        ],
                             p=0.5),
        albumentations.OneOf([
            albumentations.RandomBrightnessContrast(p=0.5),
        ],
                             p=0.4),
        albumentations.HueSaturationValue(p=0.5),
    ],
                                  p=p)
Esempio n. 18
0
    def get_aug(mode="train"):
        if mode=="Nor":
            aug=A.Compose([
                ToTensor(),
            ])
        elif mode =="train":
            print("train aug")
            mean = (0.485,0.456,0.406)
            std = (0.229,0.224,0.225)
            aug=A.Compose([
                A.Flip(),
                A.ShiftScaleRotate(rotate_limit=1.0, p=0.8),
                # Pixels
                A.OneOf([
                    A.IAAEmboss(p=1.0),
                    A.IAASharpen(p=1.0),
                    A.Blur(p=1.0),
                ], p=0.5),
                # Affine
                A.OneOf([
                    A.ElasticTransform(p=1.0),
                    A.IAAPiecewiseAffine(p=1.0)
                ], p=0.5),

                A.Normalize(mean=mean,std=std,max_pixel_value=255.0,always_apply=True),
            ])
        else:
            print("valid/test aug")
            mean = (0.485,0.456,0.406)
            std = (0.229,0.224,0.225)
            aug=A.Compose([
                A.Normalize(mean=mean,std=std,max_pixel_value=255.0,always_apply=True),
            ])

        return aug 
Esempio n. 19
0
def get_transforms(size: int, scope: str = 'geometric', crop='random'):
    augs = {
        'weak':
        albu.Compose([
            albu.HorizontalFlip(),
        ]),
        'geometric':
        albu.OneOf([
            albu.HorizontalFlip(always_apply=True),
            albu.ShiftScaleRotate(always_apply=True),
            albu.Transpose(always_apply=True),
            albu.OpticalDistortion(always_apply=True),
            albu.ElasticTransform(always_apply=True),
        ])
    }

    aug_fn = augs[scope]
    crop_fn = {
        'random': albu.RandomCrop(size, size, always_apply=True),
        'center': albu.CenterCrop(size, size, always_apply=True)
    }[crop]
    pad = albu.PadIfNeeded(size, size)

    pipeline = albu.Compose([aug_fn, pad, crop_fn],
                            additional_targets={'target': 'image'})

    def process(a, b):
        r = pipeline(image=a, target=b)
        return r['image'], r['target']

    return process
def get_transforms(type="albumentations"):
    if type == "albumentations":
        train_transforms = albumentations.Compose([
            albumentations.Transpose(p=0.5),
            albumentations.OneOf([
                albumentations.VerticalFlip(p=0.5),
                albumentations.HorizontalFlip(p=0.5),
            ]),
            albumentations.OneOf([
                albumentations.RandomBrightness(limit=0.2, p=0.75),
                albumentations.RandomContrast(limit=0.2, p=0.75),
            ]),
            albumentations.OneOf([
                albumentations.MotionBlur(blur_limit=5),
                albumentations.MedianBlur(blur_limit=5),
                albumentations.GaussianBlur(blur_limit=5),
                albumentations.GaussNoise(var_limit=(5.0, 30.0)),
            ],
                                 p=0.7),
            albumentations.OneOf([
                albumentations.OpticalDistortion(distort_limit=1.0),
                albumentations.GridDistortion(num_steps=5, distort_limit=1.),
                albumentations.ElasticTransform(alpha=3),
            ],
                                 p=0.7),

            # albumentations.OneOf([
            #     albumentations.CLAHE(clip_limit=4.0, p=0.7),
            #     albumentations.HueSaturationValue(hue_shift_limit=10, sat_shift_limit=20, val_shift_limit=10, p=0.5),
            #     albumentations.ShiftScaleRotate(shift_limit=0.1, scale_limit=0.1, rotate_limit=15, border_mode=0,
            #                                     p=0.85),
            # ]),
            albumentations.Resize(256, 256),
            # albumentations.Cutout(max_h_size=int(256 * 0.375), max_w_size=int(256 * 0.375), num_holes=1, p=0.7),
            # albumentations.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
        ])

        test_transforms = albumentations.Compose([
            albumentations.Resize(256, 256),
            # albumentations.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
        ])
    else:
        train_transforms = transforms.Compose([
            # AdvancedHairAugmentation(hairs_folder='/kaggle/input/melanoma-hairs'),
            transforms.RandomResizedCrop(size=256, scale=(0.9, 1.0)),
            transforms.RandomHorizontalFlip(),
            transforms.RandomVerticalFlip(),
            Microscope(p=0.5),
            transforms.Resize((256, 256)),
            transforms.ToTensor(),
            transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                 std=[0.229, 0.224, 0.225])
        ])
        test_transforms = transforms.Compose([
            transforms.Resize((256, 256)),
            transforms.ToTensor(),
            transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                 std=[0.229, 0.224, 0.225])
        ])
    return train_transforms, test_transforms
Esempio n. 21
0
def elastic_tranform_r_brightness(p=1.0):
    return albumentations.Compose([
        albumentations.ElasticTransform(p=p),
        albumentations.RandomBrightnessContrast(p=p),
        albumentations.RandomRain(p=p)
    ],
                                  p=p)
Esempio n. 22
0
def get_transforms(phase, size, mean, std):
    list_transforms = []
    if phase == "train":
        list_transforms.extend(
            [
                albu.HorizontalFlip(),
                albu.OneOf([
                    albu.RandomContrast(),
                    albu.RandomGamma(),
                    albu.RandomBrightness(),
                    ], p=0.3),
                albu.OneOf([
                    albu.ElasticTransform(alpha=120, sigma=120 * 0.05, alpha_affine=120 * 0.03),
                    albu.GridDistortion(),
                    albu.OpticalDistortion(distort_limit=2, shift_limit=0.5),
                    ], p=0.3), 
                albu.ShiftScaleRotate(),
            ]
        )
    list_transforms.extend(
        [
            albu.Normalize(mean=mean, std=std, p=1),
            albu.Resize(size, size),
            ToTensorV2(),
        ]
    )

    list_trfms = albu.Compose(list_transforms)
    return list_trfms
Esempio n. 23
0
def get_transforms(phase, width=1600, height=256):
    list_transforms = []
    if phase == "train":
        list_transforms.extend([
            albu.HorizontalFlip(),
            albu.OneOf([
                albu.RandomContrast(),
                albu.RandomGamma(),
                albu.RandomBrightness(),
            ],
                       p=0.3),
            albu.OneOf([
                albu.ElasticTransform(
                    alpha=120, sigma=120 * 0.05, alpha_affine=120 * 0.03),
                albu.GridDistortion(),
                albu.OpticalDistortion(distort_limit=2, shift_limit=0.5),
            ],
                       p=0.3),
            albu.ShiftScaleRotate(),
        ])
    list_transforms.extend([
        albu.Resize(width, height, always_apply=True),
        albu.Normalize(mean=(0.485, 0.456, 0.406),
                       std=(0.229, 0.224, 0.225),
                       p=1),
        ToTensor(),
    ])
    list_trfms = albu.Compose(list_transforms)
    return list_trfms
Esempio n. 24
0
def cifar_alb_trainData():
    '''Apply Albumentations data transforms to the dataset and returns iterable'''

    train_transform = [
        A.HorizontalFlip(p=0.15),
        A.ShiftScaleRotate(shift_limit=0.05,
                           scale_limit=0.05,
                           rotate_limit=15,
                           p=0.25),
        A.RGBShift(r_shift_limit=15, g_shift_limit=15, b_shift_limit=15,
                   p=0.5),
        A.RandomBrightnessContrast(p=0.25),
        A.RandomGamma(p=0.25),
        A.CLAHE(p=0.25),
        A.ChannelShuffle(p=0.1),
        A.ElasticTransform(p=0.1),
        A.MotionBlur(blur_limit=17, p=0.1),
        A.Cutout(num_holes=1,
                 max_h_size=16,
                 max_w_size=16,
                 fill_value=mean,
                 always_apply=False,
                 p=0.5),
        A.Normalize(mean=mean, std=std),
        ToTensor()
    ]

    transforms_result = A.Compose(train_transform)
    return lambda img: transforms_result(image=np.array(img))["image"]
Esempio n. 25
0
def cifar_alb_trainData():
    '''Apply Albumentations data transforms to the dataset and returns iterable'''
    mean = (0.491, 0.482, 0.446)
    std = (0.247, 0.243, 0.261)
    train_transform = [
        A.ShiftScaleRotate(shift_limit=0.05,
                           scale_limit=0.05,
                           rotate_limit=15,
                           p=0.5),
        A.RandomCrop(height=32, width=32),
        A.RGBShift(r_shift_limit=15, g_shift_limit=15, b_shift_limit=15,
                   p=0.5),
        A.RandomBrightnessContrast(p=0.5),
        A.GaussNoise(),
        A.ElasticTransform(),
        # A.MaskDropout((10,15), p=1),
        A.Cutout(num_holes=1,
                 max_h_size=16,
                 max_w_size=16,
                 fill_value=mean,
                 always_apply=False,
                 p=0.5),
        A.Normalize(mean=mean, std=std),
        ToTensorV2()
    ]

    transforms_result = A.Compose(train_transform)
    return lambda img: transforms_result(image=np.array(img))["image"]
    def __init__(self, imgs_dir, masks_dir, labels_dir=None):
        self.imgs_dir = imgs_dir
        self.masks_dir = masks_dir
        self.images_filesnames = sorted(os.listdir(imgs_dir))
        self.labels_dir = labels_dir
        self.labels, self.negative_samples_idx = None, []

        if labels_dir is not None:
            self.labels = pd.read_csv(labels_dir)['labels'].to_numpy()
            # For now: use 1 for benign/malignant, 0 for normal
            self.labels[self.labels != 2] = 1
            self.labels[self.labels == 2] = 0
            self.negative_samples_idx = (self.labels == 0)

        # Specify data augmentations here
        self.transformations = A.Compose([
            A.Resize(256, 256),
            A.OneOf([
                A.RandomRotate90(p=0.5),
                A.HorizontalFlip(p=0.5),
                A.VerticalFlip(p=0.5),
                A.ShiftScaleRotate(scale_limit=0.5,
                                   rotate_limit=0,
                                   shift_limit=0.1,
                                   p=0.5,
                                   border_mode=0),
                A.RandomBrightnessContrast(p=0.2),
                A.GridDistortion(p=0.2),
                A.ElasticTransform(p=0.2)
            ]),
        ])
def get_individual_transforms():
    transforms = A.Compose([
        A.OneOf(
            [
                A.Transpose(p=1.0),
                A.VerticalFlip(p=1.0),
                A.HorizontalFlip(p=1.0),
                A.RandomRotate90(p=1.0),
                A.NoOp(),
            ],
            p=1.0,
        ),
        A.OneOf(
            [
                A.ElasticTransform(p=1.0),
                A.GridDistortion(p=1.0),
                A.OpticalDistortion(p=1.0),
                A.NoOp(),
            ],
            p=1.0,
        ),
        A.OneOf(
            [
                A.GaussNoise(p=1.0),
                A.GaussianBlur(p=1.0),
                A.ISONoise(p=1.0),
                A.CoarseDropout(
                    p=1.0, max_holes=16, max_height=16, max_width=16),
                A.NoOp(),
            ],
            p=1.0,
        ),
    ])

    return transforms
Esempio n. 28
0
def aug_medium(prob=1):
    return aug.Compose([
        aug.Flip(),
        aug.OneOf([
            aug.CLAHE(clip_limit=2, p=.5),
            aug.IAASharpen(p=.25),
        ],
                  p=0.35),
        aug.OneOf([
            aug.RandomContrast(),
            aug.RandomGamma(),
            aug.RandomBrightness(),
        ],
                  p=0.3),
        aug.OneOf([
            aug.ElasticTransform(
                alpha=120, sigma=120 * 0.05, alpha_affine=120 * 0.03),
            aug.GridDistortion(),
            aug.OpticalDistortion(distort_limit=2, shift_limit=0.5),
        ],
                  p=0.3),
        aug.ShiftScaleRotate(rotate_limit=12),
        aug.OneOf([
            aug.GaussNoise(p=.35),
            SaltPepperNoise(level_limit=0.0002, p=.7),
            aug.ISONoise(p=.7),
        ],
                  p=.5),
        aug.Cutout(num_holes=3, p=.25),
    ],
                       p=prob)
Esempio n. 29
0
    def get_augmentation(
        aug_prob: float = 1.0,
        rotate_limit: int = 45,
        scale_limit: float = 0.1,
        shift_limit: float = 0.3,
    ):
        """Returns image augmentor."""
        aug = alb.Compose(
            [
                # Flips, shifts, scales, rotations.
                alb.ShiftScaleRotate(
                    shift_limit=shift_limit,
                    scale_limit=scale_limit,
                    rotate_limit=rotate_limit,
                    p=aug_prob,
                ),
                # Transforms.
                alb.ElasticTransform(),
            ],
            p=aug_prob,
        )

        def augment(image):
            augmented = aug(image=image.numpy())
            return augmented["image"]

        def _mapper(image):
            image_aug = tf.py_function(augment, [image], image.dtype)
            image_aug.set_shape(image.shape)
            return image_aug

        return _mapper
    def composeAugmentation(self):
        if self.source == 'train':
            self.total = 10
            self.augment = albumentations.Compose([
                albumentations.Rotate(3, always_apply=True),
                albumentations.RandomSizedCrop((self.sizeY // 2, 700),
                                               self.sizeY,
                                               self.sizeX,
                                               1,
                                               always_apply=True),
                albumentations.HorizontalFlip(),
                #albumentations.GridDistortion(always_apply=False),
                #albumentations.IAAAffine(rotate=2, shear=5, always_apply=False),

                #albumentations.OpticalDistortion(),
                albumentations.ElasticTransform(alpha=64,
                                                sigma=24,
                                                always_apply=False,
                                                alpha_affine=0),
                albumentations.RandomBrightnessContrast(0.1,
                                                        0.1,
                                                        always_apply=False),
                #albumentations.Blur(always_apply=False)
            ])
        else:
            self.total = 4
            self.augment = albumentations.Compose([
                albumentations.RandomSizedCrop((self.sizeY, self.sizeY),
                                               self.sizeY,
                                               self.sizeX,
                                               1,
                                               always_apply=True),
            ])