Exemplo n.º 1
0
 def __init__(self):
     self.policy = A.Compose([
         A.OneOf([
             A.Rotate(180),
             A.Flip(),
         ], p=0.3),
         A.ShiftScaleRotate(shift_limit=0.2, scale_limit=0.5, rotate_limit=0, p=0.2),
         A.OneOf([
             A.CoarseDropout(max_holes=16, max_height=16, max_width=16, p=0.3),
             A.GridDropout(ratio=0.3, p=0.3),
         ]),
         A.OneOf([
             A.ElasticTransform(sigma=10, alpha_affine=25, p=0.3),
             A.RandomFog(fog_coef_lower=0.2, fog_coef_upper=0.7, p=0.2),
         ], p=0.2),
         A.OneOf([
             A.IAAAdditiveGaussianNoise(),
             A.GaussNoise(),
             A.ISONoise()
         ], p=0.2),
         A.OneOf([
             A.MotionBlur(p=.3),
             A.MedianBlur(blur_limit=5, p=0.3),
             A.Blur(blur_limit=5, p=0.3),
             A.GaussianBlur(p=0.3)
         ], p=0.2),
         A.OneOf([
             A.ChannelShuffle(p=.3),
             A.HueSaturationValue(p=0.3),
             A.ToGray(p=0.3),
             A.ChannelDropout(p=0.3),
             A.InvertImg(p=0.1)
         ], p=0.2),
         A.OneOf([
             A.OpticalDistortion(p=0.3),
             A.GridDistortion(p=.2),
             A.IAAPiecewiseAffine(p=0.3),
         ], p=0.2),
         A.OneOf([
             A.CLAHE(clip_limit=2),
             A.IAASharpen(),
             A.IAAEmboss(),
         ], p=0.2),
         A.RandomBrightnessContrast(brightness_limit=0.3, contrast_limit=0.3, p=0.3),
         A.Solarize(p=0.2),
     ])
Exemplo n.º 2
0
def aug(p=0.5):
    return Compose(
        [
            ShiftScaleRotate(border_mode=cv2.BORDER_CONSTANT,
                             rotate_limit=20,
                             shift_limit=0.2,
                             scale_limit=0.2,
                             p=1),  ## interpolation=cv2.INTER_CUBIC
            HorizontalFlip(p=0.5),
            # VerticalFlip(p=0.5),
            # RandomBrightnessContrast(brightness_limit=0.25, contrast_limit=0.25, p=1),
            RandomGamma(p=0.5),  # (gamma_limit=(50, 150), p=1)
            albumentations.IAASharpen(p=0.5),
            albumentations.OpticalDistortion(p=0.5),
            albumentations.RandomBrightnessContrast(p=0.2)
        ],
        p=p)
Exemplo n.º 3
0
def get_training_augmentation1(image_size: tuple = (320, 640)):
    """

    Args:
        image_size:

    Returns:

    """
    train_transform = [
        albu.HorizontalFlip(p=0.5),
        albu.ShiftScaleRotate(scale_limit=0.3, rotate_limit=15, shift_limit=0.1, p=0.5, border_mode=0),
        albu.GridDistortion(p=0.5),
        albu.OpticalDistortion(p=0.5, distort_limit=0.1, shift_limit=0.2),
        albu.Resize(*image_size),
    ]
    return albu.Compose(train_transform)
Exemplo n.º 4
0
def get_augmentations(pretrained, img_size, segmentation_problem, randaug_n, randaug_m, cutout_size):
    train_aug, val_aug, train_albumentation, val_albumentation = None, None, None, None
    if not segmentation_problem:
        train_aug = transforms.Compose([
            transforms.ToPILImage(),  # because the input dtype is numpy.ndarray
            transforms.Resize(img_size, interpolation=Image.BICUBIC),
            transforms.RandomHorizontalFlip(),
            transforms.ToTensor()
        ])

        val_aug = transforms.Compose([
            transforms.ToPILImage(),  # because the input dtype is numpy.ndarray
            transforms.Resize(img_size, interpolation=Image.BICUBIC),
            transforms.ToTensor(),
        ])

        if pretrained:
            #train_aug.transforms.append(Lighting(0.1, _IMAGENET_PCA['eigval'], _IMAGENET_PCA['eigvec']), )
            train_aug.transforms.append(transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]))
            val_aug.transforms.append(transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]))

        if randaug_n > 0 and randaug_m > 0:
            train_aug.transforms.insert(1, RandAugment(randaug_n, randaug_m))

        if cutout_size > 0:
            train_aug.transforms.append(CutoutDefault(cutout_size))

    else:  # Segmentation problem
        train_albumentation = [
            albumentations.Resize(img_size, img_size),
            albumentations.ElasticTransform(p=0.72, alpha=177, sigma=177 * 0.05, alpha_affine=176 * 0.03),
            albumentations.GridDistortion(p=0.675, distort_limit=0.3),
            albumentations.OpticalDistortion(p=0.2, distort_limit=0.2, shift_limit=0.2),

            albumentations.ShiftScaleRotate(p=0.56, shift_limit=0.2, scale_limit=0.0, rotate_limit=0),  # shift
            albumentations.ShiftScaleRotate(p=0.25, shift_limit=0.0, scale_limit=0.2, rotate_limit=0),  # scale

            albumentations.VerticalFlip(p=0.325),
            albumentations.HorizontalFlip(p=0.3),
        ]

        val_albumentation = [
            albumentations.Resize(img_size, img_size),
        ]

    return train_aug, val_aug, train_albumentation, val_albumentation
Exemplo n.º 5
0
def aug_wo_crop():
    transform = A.Compose([
                    A.HorizontalFlip(p=1.0),
                    A.VerticalFlip(p=1.0),
                    A.RandomRotate90(p=1.0),
                    A.Transpose(p=1.0),
                    A.ShiftScaleRotate(shift_limit=0.01, scale_limit=0.04, rotate_limit=0, p=1.0),
                    A.Blur(p=1.0, blur_limit = 3),
                    A.OneOf([
                        A.ElasticTransform(p=1.0, alpha=120, sigma=120 * 0.05, alpha_affine=120 * 0.03),
                        A.GridDistortion(p=1.0),
                        A.OpticalDistortion(p=1.0, distort_limit=1, shift_limit=0.1)                  
                    ],
                    p=1.0)
                ], p = 1)
    
    return transform
def get_training_augmentation_large():
    train_transform = [
        # albu.ShiftScaleRotate(scale_limit=0.5, rotate_limit=0, shift_limit=0.1, p=0.5, border_mode=0),
        albu.ShiftScaleRotate(scale_limit=0.5,
                              rotate_limit=0,
                              shift_limit=0.1,
                              p=0.5,
                              border_mode=0),
        albu.OpticalDistortion(p=0.5, distort_limit=2, shift_limit=0.5),
        #albu.RandomGridShuffle(p=0.5, grid=(3,3)),
        #albu.RandomCrop(p=0.5, height=250, width=325),# ADD
        #albu.GridDistortion(p=0.5),# ADD
        # albu.RandomBrightnessContrast(p=0.5),
        #albu.ToGray(p=1.0),
        albu.Resize(640, 960)  #640)
    ]
    return albu.Compose(train_transform)
Exemplo n.º 7
0
def aug_flip_and_rotate(load_path=None):
    if load_path:
        return A.load(load_path)
    else:
        aug_seq = A.Compose([
            A.Rotate(limit=(-90, 90), p=0.5),
            A.Flip(p=0.5),
            A.OpticalDistortion(distort_limit=0.05,
                                shift_limit=0.05,
                                interpolation=cv2.INTER_LINEAR,
                                border_mode=cv2.BORDER_REFLECT_101,
                                value=None,
                                mask_value=None,
                                always_apply=False,
                                p=0.5)
        ])
        return aug_seq
Exemplo n.º 8
0
def create_train_transform(flip, noise, cutout, resize, size=112, bright=True):

    translist = []
    if resize:
        if size == 112:
            translist += [albumentations.Resize(128, 128)]
        elif size == 224:
            translist += [albumentations.Resize(256, 256)]
        translist += [albumentations.RandomCrop(size, size, always_apply=True)]
    if flip:
        translist += [
            albumentations.OneOf([albumentations.HorizontalFlip()], p=0.5)
        ]

    if noise:
        translist += [
            albumentations.OneOf([
                albumentations.MotionBlur(blur_limit=5),
                albumentations.MedianBlur(blur_limit=5),
                albumentations.OpticalDistortion(),
                albumentations.GaussNoise(var_limit=(5.0, 30.0))
            ],
                                 p=0.5)
        ]

    if bright:
        translist += [
            albumentations.RandomBrightness(limit=0.2, always_apply=False)
        ]

    if cutout:
        translist += [
            albumentations.Cutout(max_h_size=int(size * 0.2),
                                  max_w_size=int(size * 0.2),
                                  num_holes=1,
                                  p=0.3)
        ]

    #translist+=[albumentations.Normalize(mean=(0.2481, 0.2292, 0.2131), std = (0.2167,0.2071,0.2014))]
    #trainlist+=[albumentations.Normalize(mean=(0.2539, 0.2348, 0.2189), std = (0.2195,0.2110,0.2061))]
    translist += [
        albumentations.Normalize(mean=(0.2580, 0.2360, 0.2215),
                                 std=(0.2235, 0.2132, 0.2100))
    ]
    transform = albumentations.Compose(translist)
    return transform
Exemplo n.º 9
0
    def __init__(self, input_shape, train):
        self.transform = None
        try:
            import albumentations as A
            check_version(A.__version__, '1.0.3',
                          hard=True)  # version requirement

            if train:
                # lane mark used
                # self.transform = A.Compose([
                #     A.Resize(height=input_shape[0],width=input_shape[0],interpolation=cv2.INTER_AREA),
                #     # A.RandomRotate90(p=0.5),
                #     # A.VerticalFlip(p=0.5),
                #     # A.HorizontalFlip(p=0.5),
                #     # A.ChannelShuffle(p=0.5),
                #     A.OpticalDistortion(p=0.5),
                #     A.GridDistortion(p=0.5),
                #     A.HueSaturationValue(p=0.5),
                #     A.RandomBrightnessContrast(0.5),
                #     # A.Transpose(p=0.5)
                #     ])

                # normal
                self.transform = A.Compose([
                    A.Resize(height=input_shape[0],
                             width=input_shape[0],
                             interpolation=cv2.INTER_AREA),
                    A.RandomRotate90(p=0.5),
                    A.VerticalFlip(p=0.5),
                    A.HorizontalFlip(p=0.5),
                    A.ChannelShuffle(p=0.5),
                    A.OpticalDistortion(p=0.5),
                    A.GridDistortion(p=0.5),
                    A.HueSaturationValue(p=0.5),
                    A.RandomBrightnessContrast(0.5),
                    A.Transpose(p=0.5)
                ])
            else:
                self.transform = A.Compose(
                    [A.Resize(input_shape[0], input_shape[1])], )
            # LOGGER.info(colorstr('albumentations: ') + ', '.join(f'{x}' for x in self.transform.transforms if x.p))
        except ImportError:  # package not installed, skip
            pass
        except Exception as e:
            LOGGER.info(colorstr('albumentations: ') + f'{e}')
Exemplo n.º 10
0
def get_transforms(image_size):

    transforms_train = albumentations.Compose([
        albumentations.Transpose(p=0.5),
        albumentations.VerticalFlip(p=0.5),
        albumentations.HorizontalFlip(p=0.5),
        albumentations.RandomBrightness(limit=0.2, p=0.75),
        albumentations.RandomContrast(limit=0.2, p=0.75),
        albumentations.OneOf([
            albumentations.MotionBlur(blur_limit=5),
            albumentations.MedianBlur(blur_limit=5),
            albumentations.GaussianBlur(blur_limit=5),
            albumentations.GaussNoise(var_limit=(5.0, 30.0)),
        ],
                             p=0.7),
        albumentations.OneOf([
            albumentations.OpticalDistortion(distort_limit=1.0),
            albumentations.GridDistortion(num_steps=5, distort_limit=1.),
            albumentations.ElasticTransform(alpha=3),
        ],
                             p=0.7),
        albumentations.CLAHE(clip_limit=4.0, p=0.7),
        albumentations.HueSaturationValue(hue_shift_limit=10,
                                          sat_shift_limit=20,
                                          val_shift_limit=10,
                                          p=0.5),
        albumentations.ShiftScaleRotate(shift_limit=0.1,
                                        scale_limit=0.1,
                                        rotate_limit=15,
                                        border_mode=0,
                                        p=0.85),
        albumentations.Resize(image_size, image_size),
        albumentations.Cutout(max_h_size=int(image_size * 0.375),
                              max_w_size=int(image_size * 0.375),
                              num_holes=1,
                              p=0.7),
        albumentations.Normalize()
    ])

    transforms_val = albumentations.Compose([
        albumentations.Resize(image_size, image_size),
        albumentations.Normalize()
    ])

    return transforms_train, transforms_val
Exemplo n.º 11
0
def get_training_augmentation(width=525, height=350):
    train_transform = [
        albu.Resize(height=1024, width=704),
        # albu.PadIfNeeded(min_height=640, min_width=480),
        albu.HorizontalFlip(p=0.5),
        albu.ShiftScaleRotate(scale_limit=0.5,
                              rotate_limit=0,
                              shift_limit=0.1,
                              p=0.5,
                              border_mode=0),
        albu.GridDistortion(p=0.5),
        albu.OpticalDistortion(p=0.5, distort_limit=2, shift_limit=0.5),
        # albu.Resize(width=width, height=height)
        # albu.Resize(1050, 700),
        # albu.RandomCrop(672,672),
        #
    ]
    return albu.Compose(train_transform)
Exemplo n.º 12
0
def get_training_augmentation(p=0.5, size=(320, 640)):
    train_transform = [
        albu.Resize(*size),
        albu.HorizontalFlip(p=p),
        albu.ShiftScaleRotate(scale_limit=0.5,
                              rotate_limit=15,
                              shift_limit=0.1,
                              border_mode=0,
                              p=p),
        albu.GridDistortion(p=p),
        albu.OpticalDistortion(distort_limit=0.1,
                               shift_limit=0.5,
                               p=p),
        # albu.Cutout(p=p),
        albu.OneOf([albu.Blur(p=p),
                    albu.GaussianBlur(p=p)], p=p),
    ]
    return albu.Compose(train_transform)
Exemplo n.º 13
0
def albumentation(output_folder_name, main_path, original_height, original_width, input_path):

  '''
    - output_folder_name : you should give just the name of the output folder, it will be created by function
    - main_path : the folder that output folder will be created and results will be saved
    - input_path : the folder that includes images and labels in seperate folders
  '''
  
  os.mkdir(main_path + '/'+ output_folder_name)
  os.mkdir(main_path + '/'+ output_folder_name +'/images')
  os.mkdir(main_path + '/'+ output_folder_name +'/labels')

  for img in sorted(os.listdir(input_path + '/images')):

    image = cv2.imread(input_path +'/images/' + img, 0)
    mask  = cv2.imread(input_path +'/labels/' + img, 0)
    
    ##############################################################
    aug = A.Compose([
      A.OneOf([
          A.RandomSizedCrop(min_max_height=(50, 101), height=original_height, width=original_width, p=0.5),
          A.PadIfNeeded(min_height=original_height, min_width=original_width, p=0.5)
      ], p=1),    
      A.VerticalFlip(p=0.5),              
      A.RandomRotate90(p=0.5),
      A.OneOf([
          A.ElasticTransform(alpha=120, sigma=120 * 0.05, alpha_affine=120 * 0.03, p=0.5),
          A.GridDistortion(p=0.5),
          A.OpticalDistortion(distort_limit=2, shift_limit=0.5, p=1)                  
          ], p=0.8),
      A.CLAHE(p=0.8),
      A.RandomBrightnessContrast(p=0.8),    
      A.RandomGamma(p=0.8)])
    ##############################################################

    augmented = aug(image=image, mask=mask)

    image = augmented['image']
    mask = augmented['mask']

    cv2.imwrite(main_path +'/'+ output_folder_name +'/images/' + img, image)
    cv2.imwrite(main_path +'/' + output_folder_name +'/labels/' + img, mask)

  print("Results are saved in output directory.")
Exemplo n.º 14
0
def load_agumentation_pipelines():
    # Define the augmentation pipeline
    augmentation_pipeline_train = A.Compose(
        [
            A.Resize(width=512, height=512),
            A.HorizontalFlip(p=0.5),  # apply horizontal flip to 50% of images
            A.Rotate(limit=90,
                     p=0.5),  # apply random with limit of 90° to 50% of images
            A.OneOf(
                [
                    # apply one of transforms to 30% of images
                    A.RandomBrightnessContrast(
                    ),  # apply random contrast & brightness
                    A.RandomGamma(),  # apply random gamma
                ],
                p=0.3,
            ),
            A.OneOf(
                [
                    # apply one of transforms to 30% images
                    A.ElasticTransform(
                        alpha=120, sigma=120 * 0.05, alpha_affine=120 * 0.03),
                    A.GridDistortion(),
                    A.OpticalDistortion(distort_limit=2, shift_limit=0.5),
                ],
                p=0.3,
            ),
            A.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
            ToTensorV2(),  # convert the image to PyTorch tensor
        ],
        p=1,
    )

    # Define the transformation pipeline for test
    tranformation_pipeline_test = A.Compose(
        [
            A.Resize(width=512, height=512),
            A.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
            ToTensorV2(),  # convert the image to PyTorch tensor
        ],
        p=1,
    )

    return augmentation_pipeline_train, tranformation_pipeline_test
Exemplo n.º 15
0
def get_training_augmentation(size=480):
    train_transform = [
        #             albu.HorizontalFlip(p=0.5),
        #             albu.VerticalFlip(p=0.5),
        albu.RandomBrightnessContrast(p=0.3),
        albu.CLAHE(p=0.3),
        albu.ShiftScaleRotate(scale_limit=0.75,
                              rotate_limit=0,
                              shift_limit=0.1,
                              p=0.5,
                              border_mode=0),
        albu.GridDistortion(p=0.5),
        albu.OpticalDistortion(p=0.5, distort_limit=2, shift_limit=0.5),
        #             albu.RandomResizedCrop( 320, 640, scale=(0.5, 1.0), p=0.75),
        #             albu.RandomResizedCrop( 350, 525, scale=(0.5, 1.0), p=0.75),
        #             albu.Resize(350, 525)
        albu.Resize(size, size)
    ]
    return albu.Compose(train_transform)
Exemplo n.º 16
0
def augmentation(img_size):
    return albu.Compose([
        #spatial wise
        #albu.RandomCrop(img_size[0],img_size[1]),
        albu.RandomResizedCrop(img_size[0],img_size[1],
                               interpolation=cv2.INTER_NEAREST),
        albu.Rotate(interpolation=cv2.INTER_NEAREST),
        albu.HorizontalFlip(),
        albu.ElasticTransform(alpha_affine=10,interpolation=cv2.INTER_NEAREST),
        albu.OpticalDistortion(interpolation=cv2.INTER_NEAREST),

        #pixel transform
        albu.RandomGamma((100,180),p=0.8),
        albu.Blur(5),
        albu.RandomBrightness(limit=(0.05,0.20)),
        albu.RandomContrast(limit=(0,0.20),p=0.5),
        albu.MotionBlur( blur_limit =7),

        ])
Exemplo n.º 17
0
def get_training_augmentation():
    train_transform = [
        albu.Resize(320, 640),
        albu.HorizontalFlip(p=0.5),
        albu.VerticalFlip(p=0.5),
        albu.ShiftScaleRotate(scale_limit=0.5, rotate_limit=15, shift_limit=0.15, p=0.9, border_mode=cv2.BORDER_REFLECT),
        #albu.GridDistortion(p=0.5),
        albu.OpticalDistortion(p=0.5, distort_limit=2, shift_limit=0.5),
        #albu.OneOf([
        #    albu.CLAHE(clip_limit=2),
        #    albu.IAASharpen(),
        #    albu.IAAEmboss(),
        #    albu.RandomContrast(),
        #    albu.RandomBrightness(),
        #], p=0.3),
        albu.RandomBrightnessContrast(p=0.5),
        albu.GaussNoise(p=0.5)
    ]
    return albu.Compose(train_transform)
Exemplo n.º 18
0
def get_training_augmentation():
    train_transform = [

        A.HorizontalFlip(p=0.5),
        A.VerticalFlip(p=0.5),
        
        A.Rotate(limit=3, p=0.5),
        A.OpticalDistortion(p=0.2),

        A.OneOf([
            CropNonEmptyMaskIfExists(256,320, p=0.3),
            A.RandomCrop(256, 320, p=0.7),
        ], p=1.0),
        A.PadIfNeeded(min_height=256, min_width=550, 
                      border_mode=cv2.BORDER_CONSTANT, 
                      value=0, p=0.3),
        A.RandomCrop(256, 320, p=1.0)
    ]
    return A.Compose(train_transform)
Exemplo n.º 19
0
 def build_Transform(self):
     self.as_tensor = T.Compose([
         T.ToTensor(),
         T.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]),
         # T.Normalize([0.66406784, 0.50002077, 0.7019763],
         #             [0.15964855, 0.24647547, 0.13597253]),
     ])
     self.identity = rasterio.Affine(1, 0, 0, 0, 1, 0)
     self.resizefunc = A.Compose(
         [A.Resize(self.imgsize[0], self.imgsize[1])])
     if self.transform is None:
         self.transform = A.Compose([
             A.HorizontalFlip(p=0.5),
             A.VerticalFlip(p=0.5),
             A.ChannelShuffle(p=0.25),
             A.OneOf([
                 A.HueSaturationValue(12, 12, 12, p=0.8),
                 A.CLAHE(clip_limit=2),
                 A.RandomBrightnessContrast(),
             ],
                     p=0.5),
             A.OneOf([
                 A.RandomContrast(),
                 A.RandomGamma(),
                 A.RandomBrightness(),
                 A.ColorJitter(brightness=0.07,
                               contrast=0.07,
                               saturation=0.1,
                               hue=0.1,
                               always_apply=False,
                               p=0.6),
             ],
                     p=0.5),
             A.OneOf([
                 A.ElasticTransform(
                     alpha=120, sigma=120 * 0.05, alpha_affine=120 * 0.03),
                 A.GridDistortion(),
                 A.OpticalDistortion(distort_limit=2, shift_limit=0.5),
             ],
                     p=0.3),
             A.ShiftScaleRotate(p=0.5),
         ])
Exemplo n.º 20
0
def classification_augmentation():
    """Rationalized version of the one above.
    """

    return alb.Compose(
        [
            alb.HorizontalFlip(p=.5),  # 128 mus
            alb.OneOf(
                [
                    # These two do the same thing I think. Keeping only
                    # the faster one.
                    alb.IAAAdditiveGaussianNoise(p=1.),  # 484 mus
                    # alb.GaussNoise(p=1.),  # 1.11 ms
                ],
                p=0.2),  # 1.03 ms with both
            alb.OneOf([
                alb.MotionBlur(p=1.),
                alb.MedianBlur(blur_limit=3, p=1.),
                alb.Blur(blur_limit=3, p=1.),
            ],
                      p=0.2),  # 40 mus
            alb.ShiftScaleRotate(
                shift_limit=0.1, scale_limit=0.1, rotate_limit=30, p=0.8),
            # (above) 348 mus
            alb.OneOf(
                [
                    alb.OpticalDistortion(p=1.),  # 95 mus
                    alb.GridDistortion(p=1.),  # 101 mus
                    # alb.IAAPiecewiseAffine(p=1.),  # 5.61 ms
                ],
                p=0.2),  # 2.48 ms -> 113 mus with the 2 first ones
            alb.OneOf([
                alb.CLAHE(clip_limit=2, p=1.),
                alb.IAASharpen(p=1.),
                alb.IAAEmboss(p=1.),
                alb.RandomContrast(p=1.),
                alb.RandomBrightness(p=1.),
            ],
                      p=0.3),  # 257 mus
            alb.HueSaturationValue(p=0.3),  # 395 mus
        ],
        p=0.9)  # 3.84 ms -> 1.52 ms
def get_training_augmentation(x: int = 320, y: int = 640):
    train_transform = [
        albu.HorizontalFlip(p=0.5),
        albu.ShiftScaleRotate(scale_limit=0.5,
                              rotate_limit=0,
                              shift_limit=0.1,
                              p=0.5,
                              border_mode=0),
        # albu.RandomGamma(p=0.75),
        albu.GridDistortion(p=0.25),
        albu.OpticalDistortion(p=0.5, distort_limit=2, shift_limit=0.5),
        albu.Resize(x, y),
        # albu.Normalize(always_apply=True),
    ]
    return albu.Compose(train_transform,
                        additional_targets={
                            "image2": "image",
                            "image3": "image",
                            "image4": "image"
                        })
def get_transforms(phase, width=1600, height=256):
    list_transforms = []
    if phase == "train":
        list_transforms.extend([
            albu.HorizontalFlip(p=0.5),
            albu.ShiftScaleRotate(scale_limit=0.5,
                                  rotate_limit=0,
                                  shift_limit=0.1,
                                  p=0.5,
                                  border_mode=0),
            albu.GridDistortion(p=0.5),
            albu.OpticalDistortion(p=0.5, distort_limit=2, shift_limit=0.5),
        ])
    list_transforms.extend([
        albu.Resize(width, height),
        # albu.Normalize(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225), p=1),
        # ToTensor(),
    ])
    list_trfms = albu.Compose(list_transforms)
    return list_trfms
Exemplo n.º 23
0
def get_train_transforms_v2():
    return A.Compose(
        [
            A.Resize(config.SIZE, config.SIZE),
            A.OneOf(
                [
                    A.GaussNoise(var_limit=1.15),
                    A.MultiplicativeNoise(multiplier=1.1),
                ],
                p=0.2,
            ),
            A.RandomBrightnessContrast(
                contrast_limit=0.12, brightness_limit=0.12, p=0.2
            ),
            A.OpticalDistortion(distort_limit=0.07, shift_limit=0.07, p=0.25),
            A.GaussianBlur(p=0.15),
            A.RandomGridShuffle(grid=(4, 4), p=0.2),
            ToTensorV2(),
        ]
    )
def generate_ds(size):
    trfm = A.Compose([
        A.Resize(size, size, p=1.0),
        A.HorizontalFlip(),
        A.VerticalFlip(),
        A.RandomRotate90(),
        A.ShiftScaleRotate(shift_limit=0.0625, scale_limit=0.2, rotate_limit=20, p=0.9, 
                         border_mode=cv2.BORDER_REFLECT),
        A.OneOf([
            A.OpticalDistortion(p=0.4),
            A.GridDistortion(p=.1),
            A.IAAPiecewiseAffine(p=0.4),
        ], p=0.3),
        A.OneOf([
            A.HueSaturationValue(10,15,10),
            A.CLAHE(clip_limit=3),
            A.RandomBrightnessContrast(),            
        ], p=0.5)
    ], p=1.0)

    return HubDataset(DATA_PATH, window=WINDOW, overlap=MIN_OVERLAP, transform=trfm)
Exemplo n.º 25
0
def data_augmentation(image_path, aug_num):
    image = cv2.imread(image_path, cv2.IMREAD_COLOR)
    image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
    augmentation = A.Compose(
        [
            A.RandomRotate90(),
            A.Flip(),
            A.Transpose(),
            A.OneOf([
                A.IAAAdditiveGaussianNoise(),
                A.GaussNoise(),
            ], p=0.2),
            A.OneOf([
                A.MotionBlur(p=0.2),
                A.MedianBlur(blur_limit=3, p=0.1),
                A.Blur(blur_limit=3, p=0.1),
            ],
                    p=0.2),
            A.ShiftScaleRotate(
                shift_limit=0.0625, scale_limit=0.2, rotate_limit=45, p=0.2),
            A.OneOf([
                A.OpticalDistortion(p=0.3),
                A.GridDistortion(p=0.1),
                A.IAAPiecewiseAffine(p=0.3),
            ],
                    p=0.2),
            A.OneOf([
                A.CLAHE(clip_limit=2),
                A.IAASharpen(),
                A.IAAEmboss(),
                A.RandomBrightnessContrast(),
            ],
                    p=0.3),
            # A.HueSaturationValue(p=0.3),
        ],
        p=0.5)
    patches = []
    for _ in range(aug_num):
        patches.append(augmentation(image=image)['image'])
    return patches
def get_medium_augmentations(image_size):
    return A.Compose([
        A.OneOf([
            A.ShiftScaleRotate(shift_limit=0.05,
                               scale_limit=0.1,
                               rotate_limit=15,
                               border_mode=cv2.BORDER_CONSTANT,
                               value=0),
            A.OpticalDistortion(distort_limit=0.11,
                                shift_limit=0.15,
                                border_mode=cv2.BORDER_CONSTANT,
                                value=0),
            A.NoOp()
        ]),
        ZeroTopAndBottom(p=0.3),
        A.RandomSizedCrop(min_max_height=(int(image_size[0] * 0.75),
                                          image_size[0]),
                          height=image_size[0],
                          width=image_size[1],
                          p=0.3),
        A.OneOf([
            A.RandomBrightnessContrast(brightness_limit=0.5,
                                       contrast_limit=0.4),
            IndependentRandomBrightnessContrast(brightness_limit=0.25,
                                                contrast_limit=0.24),
            A.RandomGamma(gamma_limit=(50, 150)),
            A.NoOp()
        ]),
        A.OneOf([
            FancyPCA(alpha_std=4),
            A.RGBShift(r_shift_limit=20, b_shift_limit=15, g_shift_limit=15),
            A.HueSaturationValue(hue_shift_limit=5, sat_shift_limit=5),
            A.NoOp()
        ]),
        A.OneOf([ChannelIndependentCLAHE(p=0.5),
                 A.CLAHE(),
                 A.NoOp()]),
        A.HorizontalFlip(p=0.5),
        A.VerticalFlip(p=0.5)
    ])
Exemplo n.º 27
0
def albumentations_list(MAGN: int = 4):
    """
    Returns standard list of albumentations transforms, each of mangitude `MAGN`.
    
    Args:
        MAGN (int): Magnitude of each transform in the returned list.
    """
    M = MAGN
    transform_list = [
        # PIXEL-LEVEL
        A.RandomContrast(limit=M * .1, always_apply=True),
        A.RandomBrightness(limit=M * .1, always_apply=True),
        A.Equalize(always_apply=True),
        A.OpticalDistortion(distort_limit=M * .2,
                            shift_limit=M * .1,
                            always_apply=True),
        A.RGBShift(r_shift_limit=M * 10,
                   g_shift_limit=M * 10,
                   b_shift_limit=M * 10,
                   always_apply=True),
        A.ISONoise(color_shift=(M * .01, M * .1),
                   intensity=(M * .02, M * .2),
                   always_apply=True),
        A.RandomFog(fog_coef_lower=M * .01,
                    fog_coef_upper=M * .1,
                    always_apply=True),
        A.CoarseDropout(max_holes=M * 10, always_apply=True),
        A.GaussNoise(var_limit=(M, M * 50), always_apply=True),

        # SPATIAL
        A.Rotate(always_apply=True),
        A.Transpose(always_apply=True),
        A.NoOp(always_apply=True),
        A.ElasticTransform(alpha=M * .25,
                           sigma=M * 3,
                           alpha_affine=M * 3,
                           always_apply=True),
        A.GridDistortion(distort_limit=M * .075, always_apply=True)
    ]
    return transform_list
Exemplo n.º 28
0
    def _setup_transform(self, cfg):
        # Albumentation example: https://albumentations.readthedocs.io/en/latest/examples.html
        self.img_mask_transform = A.Compose([
            A.ShiftScaleRotate(shift_limit=0.1, scale_limit=0.2, rotate_limit=175, p=0.8, border_mode=cv2.BORDER_CONSTANT),
            A.Flip(),
            A.Transpose(),
            A.OneOf([
                A.ElasticTransform(),
                A.OpticalDistortion(),
                A.GridDistortion(),
                A.IAAPiecewiseAffine(),
            ]),
            A.OneOf([
                    A.RandomCrop(height=self.size_crop,width=self.size_crop,p=0.5),  
                    A.CenterCrop(height=self.size_crop,width=self.size_crop,p=0.5)
            ]),            
            A.Cutout(num_holes=8, max_h_size=8, max_w_size=8, fill_value=0,p=0.5),
            ],p=0.8)

        self.img_pixel_transform = A.Compose([
            A.OneOf([
                A.IAAAdditiveGaussianNoise(),
                A.GaussNoise(),
            ], p=0.2),
            A.OneOf([
                A.MotionBlur(p=0.2),
                A.MedianBlur(blur_limit=3, p=0.1),
                A.Blur(blur_limit=3, p=0.1),
            ], p=0.2),
            A.OneOf([
                A.IAASharpen(),
                A.IAAEmboss(),
                # A.RandomBrightnessContrast(),            
            ], p=0.3),
            A.HueSaturationValue(hue_shift_limit=3,sat_shift_limit=20,val_shift_limit=3 ,p=0.2),
        ],p=0.5)
        # Torch transform
        self.resize_transform = transforms.Resize(cfg.MODEL.IMAGE_SIZE, Image.NEAREST)
        self.to_tensor_transform = transforms.ToTensor()
        self.normalize_transform = transforms.Normalize(mean=cfg.TRAIN.NORMALIZE_MEAN, std=cfg.TRAIN.NORMALIZE_STD)
Exemplo n.º 29
0
def aug_heavy(prob=0.9):
    return aug.Compose([
        aug.Flip(),
        aug.OneOf([
            aug.CLAHE(clip_limit=2, p=.5),
            aug.IAASharpen(p=.25),
            aug.IAAEmboss(p=.25),
        ],
                  p=.35),
        aug.OneOf([
            aug.IAAAdditiveGaussianNoise(p=.3),
            aug.GaussNoise(p=.7),
            SaltPepperNoise(level_limit=0.0002, p=.7),
            aug.ISONoise(p=.3),
        ],
                  p=.5),
        aug.OneOf([
            aug.MotionBlur(p=.2),
            aug.MedianBlur(blur_limit=3, p=.3),
            aug.Blur(blur_limit=3, p=.5),
        ],
                  p=.4),
        aug.OneOf([
            aug.RandomContrast(p=.5),
            aug.RandomBrightness(p=.5),
            aug.RandomGamma(p=.5),
        ],
                  p=.4),
        aug.ShiftScaleRotate(
            shift_limit=.0625, scale_limit=0.1, rotate_limit=12, p=.7),
        aug.OneOf([
            aug.GridDistortion(p=.2),
            aug.ElasticTransform(
                alpha=120, sigma=120 * 0.05, alpha_affine=120 * 0.03, p=.2),
            aug.OpticalDistortion(distort_limit=2, shift_limit=0.5, p=.2),
        ],
                  p=.6),
        aug.HueSaturationValue(p=.5),
    ],
                       p=prob)
Exemplo n.º 30
0
    def __init__(self, args):
        MEAN, STD = get_stat(args.mix_window)
        self.transform = A.Compose([
            A.RandomResizedCrop(args.image_size,
                                args.image_size,
                                interpolation=cv2.INTER_LINEAR,
                                scale=(0.8, 1)),
            # A.Resize(args.image_size, args.image_size),
            A.HorizontalFlip(),
            A.VerticalFlip(),  # add this -> freebie in tta stage

            ################################
            # Test augmentation
            ################################
            A.OneOf([
                A.ShiftScaleRotate(shift_limit=0.0625,
                                   scale_limit=0.1,
                                   rotate_limit=30,
                                   border_mode=cv2.BORDER_CONSTANT,
                                   value=0),
                A.GridDistortion(distort_limit=0.2,
                                 border_mode=cv2.BORDER_CONSTANT,
                                 value=0),
                A.OpticalDistortion(distort_limit=0.2,
                                    shift_limit=0.15,
                                    border_mode=cv2.BORDER_CONSTANT,
                                    value=0),
                A.NoOp()
            ]),
            A.OneOf([
                A.IAAAdditiveGaussianNoise(),
                A.GaussNoise(),
                A.MedianBlur(blur_limit=3),
                A.Blur(blur_limit=3),
            ],
                    p=0.15),
            A.Normalize(MEAN, STD),
            ToTensor()
        ])