Example #1
0
def get_train_transforms(
        height: int = 14 * 32,  # 14*32 then 28*32
        width: int = 18 * 32):  #18*32 then 37*32
    return A.Compose([
        A.HorizontalFlip(p=0.5),
        A.IAAAdditiveGaussianNoise(p=0.2),
        A.IAAPerspective(p=0.4),
        A.OneOf([
            A.CLAHE(p=1.0),
            A.RandomBrightness(p=1.0),
            A.RandomGamma(p=1.0),
        ],
                p=0.5),
        A.OneOf([
            A.IAASharpen(p=1.0),
            A.Blur(blur_limit=3, p=1.0),
            A.MotionBlur(blur_limit=3, p=1.0),
        ],
                p=0.5),
        A.OneOf([
            A.RandomContrast(p=1.0),
            A.HueSaturationValue(p=1.0),
        ],
                p=0.5),
        A.Resize(height=height, width=width, p=1.0),
    ],
                     p=1.0)
Example #2
0
def get_training_augmentation():
    
    return albu.Compose([

        albu.HorizontalFlip(p=0.5),
        albu.VerticalFlip(p=0.5),
        albu.Transpose(p=0.5),
        albu.ShiftScaleRotate(scale_limit=0.5, rotate_limit=0, shift_limit=0.1, p=0.3, border_mode=0),
        #albu.RandomCrop(height=320, width=320, always_apply=True),
        
        albu.IAAAdditiveGaussianNoise(p=0.2),
        albu.IAAPerspective(p=0.5),
        albu.RandomContrast(p=0.5),
        albu.HueSaturationValue(p=0.5),
        
        albu.OneOf(
            [
                albu.CLAHE(p=1),
                albu.RandomGamma(p=1),
            ],
            p=0.3,
        ),

        albu.OneOf(
            [
                albu.IAASharpen(p=1),
                albu.Blur(blur_limit=3, p=1),
                albu.MotionBlur(blur_limit=3, p=1),
            ],
            p=0.3,
        ),

    ])
Example #3
0
def gentle_transform(p):
    return albu.Compose(
        [
            # p=0.5
            albu.HorizontalFlip(p=0.5),
            albu.VerticalFlip(p=0.5),
            albu.OneOf(
                [
                    albu.IAASharpen(p=1),
                    albu.Blur(blur_limit=3, p=1),
                    albu.MotionBlur(blur_limit=3, p=1),
                ],
                p=0.5,
            ),
            albu.OneOf(
                [
                    albu.RandomBrightness(p=1),
                    albu.RandomGamma(p=1),
                ],
                p=0.5,
            ),
            # p=0.2
            albu.ShiftScaleRotate(rotate_limit=30,
                                  scale_limit=0.15,
                                  border_mode=cv2.BORDER_CONSTANT,
                                  value=[0, 0, 0],
                                  p=0.2),
            albu.IAAAdditiveGaussianNoise(p=0.2),
            albu.IAAPerspective(p=0.2),
        ],
        p=p,
        additional_targets={
            'image{}'.format(_): 'image'
            for _ in range(1, 65)
        })
Example #4
0
    def __init__(self, width, height):
        super().__init__()
        max_size = int(max(width, height) * 1.05)

        self.transforms = AL.Compose([
            AL.LongestMaxSize(max_size),
            AL.IAAPerspective(scale=(0.02, 0.04), keep_size=True, p=0.3),
            AL.OneOf([
                AL.HorizontalFlip(p=1),
                AL.VerticalFlip(p=1),
                AL.Transpose(p=1)
            ],
                     p=0.7),
            AL.RandomRotate90(p=0.5),
            AL.OneOf([
                AL.RandomBrightnessContrast(p=0.8),
                AL.RandomGamma(p=0.8),
                AL.HueSaturationValue(p=0.3)
            ]),
            Filling(max_size),
            AL.CoarseDropout(max_holes=12,
                             max_height=24,
                             max_width=24,
                             min_holes=4,
                             min_height=8,
                             min_width=8,
                             p=0.3),
            AL.OneOf([AL.Resize(height, width),
                      AL.RandomCrop(height, width)],
                     p=1),
        ])
Example #5
0
 def __init__(self,
              images,
              labels,
              batch_size=16,
              image_shape=(256, 512, 1),
              do_shuffle_at_epoch_end=True,
              length=None,
              do_augment=True):
     super().__init__(images, labels, batch_size, image_shape,
                      do_shuffle_at_epoch_end, length, do_augment)
     self.augmenting_pipeline = A.Compose([
         A.HorizontalFlip(),
         A.IAAAffine(translate_percent={"x": (-1, 1)}, mode="reflect", p=1),
         A.PadIfNeeded(min_width=int(self.input_shape[1] * 2),
                       min_height=self.input_shape[0]),
         A.GridDistortion(p=0.8, distort_limit=0.5),
         A.ElasticTransform(p=0.5,
                            alpha=10,
                            sigma=100 * 0.03,
                            alpha_affine=0),
         A.CenterCrop(width=self.input_shape[1],
                      height=self.input_shape[0]),
         A.IAAPerspective(scale=(0, 0.10), p=1),
         A.ShiftScaleRotate(shift_limit=0,
                            scale_limit=(.0, 0.4),
                            rotate_limit=0,
                            p=0.5),
         A.CLAHE(clip_limit=2.0, p=0.5),
         A.Lambda(
             image=self.convert_image,
             mask=self.convert_segmentations,
         ),
     ])
Example #6
0
def get_training_augmentation():
    train_transform = [
        albu.HorizontalFlip(p=0.5),

        #         albu.ShiftScaleRotate(scale_limit=0.5, rotate_limit=0, shift_limit=0.1, p=1, border_mode=0),
        albu.PadIfNeeded(min_height=320, min_width=320, always_apply=True),
        #         albu.RandomCrop(height=1000, width=1000, always_apply=True),
        albu.IAAAdditiveGaussianNoise(p=0.2),
        albu.IAAPerspective(p=0.5),
        albu.OneOf(
            [
                albu.CLAHE(p=1),
                albu.RandomBrightness(p=1),
                albu.RandomGamma(p=1),
            ],
            p=0.9,
        ),
        albu.OneOf(
            [
                albu.IAASharpen(p=1),
                albu.Blur(blur_limit=3, p=1),
                albu.MotionBlur(blur_limit=3, p=1),
            ],
            p=0.9,
        ),
        albu.OneOf(
            [
                albu.RandomContrast(p=1),
                albu.HueSaturationValue(p=1),
            ],
            p=0.9,
        ),
    ]
    return albu.Compose(train_transform)
def apply_training_augmentation():
    train_transform = [
        A.HorizontalFlip(p=0.5),
        A.ShiftScaleRotate(scale_limit=0.5, rotate_limit=0, shift_limit=0.1, p=1, border_mode=0),
        A.PadIfNeeded(min_height=320, min_width=320, always_apply=True, border_mode=0),
        A.RandomCrop(height=320, width=320, always_apply=True),
        A.IAAAdditiveGaussianNoise(p=0.2),
        A.IAAPerspective(p=0.5),
        A.OneOf(
            [
                A.CLAHE(p=1),
                A.RandomBrightness(p=1),
                A.RandomGamma(p=1),
            ],
            p=0.9,
        ),
        A.OneOf(
            [
                A.IAASharpen(p=1),
                A.Blur(blur_limit=3, p=1),
                A.MotionBlur(blur_limit=3, p=1),
            ],
            p=0.9,
        ),
        A.OneOf(
            [
                A.RandomContrast(p=1),
                A.HueSaturationValue(p=1),
            ],
            p=0.9,
        ),
        A.Lambda(mask=round_clip_0_1)
    ]
    return A.Compose(train_transform)
def get_training_augmentation():
    train_transform = [
        albu.HorizontalFlip(p=0.5),
        albu.IAAAdditiveGaussianNoise(p=0.2),
        albu.IAAPerspective(p=0.5),
        albu.PadIfNeeded(min_height=1216,
                         min_width=512,
                         always_apply=True,
                         border_mode=0),
        #0.9的機率取出OneOf中的其中一個, 各個抽中的機率皆為1/3( 因為1/(1+1+1) )
        albu.OneOf(
            [
                albu.CLAHE(p=1),
                albu.RandomBrightness(p=1),
                albu.RandomGamma(p=1),
            ],
            p=0.9,
        ),
        albu.OneOf(
            [
                albu.IAASharpen(p=1),
                albu.Blur(blur_limit=3, p=1),
                albu.MotionBlur(blur_limit=3, p=1),
            ],
            p=0.9,
        ),
        albu.OneOf(
            [
                albu.RandomContrast(p=1),
                albu.HueSaturationValue(p=1),
            ],
            p=0.9,
        ),
    ]
    return albu.Compose(train_transform)
Example #9
0
def get_training_augmentation():
    """Builds random transformations we want to apply to our dataset.

    Arguments:
        
    Returns:
        A albumentation functions to pass our images to.
    Raises:

    """
    train_transform = [
        A.IAAAdditiveGaussianNoise(p=0.2),
        A.IAAPerspective(p=0.5),
        A.OneOf([A.CLAHE(p=1), A.RandomBrightness(p=1), A.RandomGamma(p=1),], p=0.9,),
        A.OneOf(
            [
                A.IAASharpen(p=1),
                A.Blur(blur_limit=3, p=1),
                A.MotionBlur(blur_limit=3, p=1),
            ],
            p=0.9,
        ),
        A.OneOf([A.RandomContrast(p=1), A.HueSaturationValue(p=1),], p=0.9,),
        A.Lambda(mask=round_clip_0_1),
    ]
    return A.Compose(train_transform)
Example #10
0
def get_training_augmentation():
    train_transform = albu.Compose([
        albu.IAAAdditiveGaussianNoise(p=0.2),
        albu.IAAPerspective(p=0.5),
        albu.OneOf(
            [
                albu.CLAHE(p=1),
                albu.RandomBrightness(p=1),
                albu.RandomGamma(p=1),
            ],
            p=0.9,
        ),
        albu.OneOf(
            [
                albu.IAASharpen(p=1),
                albu.Blur(blur_limit=3, p=1),
                albu.MotionBlur(blur_limit=3, p=1),
            ],
            p=0.9,
        ),
        albu.OneOf(
            [
                albu.RandomContrast(p=1),
                albu.HueSaturationValue(p=1),
            ],
            p=0.9,
        ),
    ],
                                   additional_targets={'depth': 'mask'})

    return train_transform
Example #11
0
def augmentation(image_size, train=True):
    max_crop = image_size // 5
    if train:
        data_transform = A.Compose([
            A.Resize(image_size, image_size),
            A.Compose([
                A.OneOf([
                    A.RandomRain(p=0.1),
                    A.GaussNoise(mean=15),
                    A.GaussianBlur(blur_limit=10, p=0.4),
                    A.MotionBlur(p=0.2)
                ]),
                A.OneOf([
                    A.RGBShift(p=1.0,
                               r_shift_limit=(-10, 10),
                               g_shift_limit=(-10, 10),
                               b_shift_limit=(-10, 10)),
                    A.RandomBrightnessContrast(
                        brightness_limit=0.3, contrast_limit=0.1, p=1),
                    A.HueSaturationValue(hue_shift_limit=20, p=1),
                ],
                        p=0.6),
                A.OneOf([
                    A.CLAHE(clip_limit=2),
                    A.IAASharpen(),
                    A.IAAEmboss(),
                ]),
                A.OneOf([A.IAAPerspective(p=0.3),
                         A.ElasticTransform(p=0.1)]),
                A.OneOf([
                    A.Rotate(limit=25, p=0.6),
                    A.IAAAffine(
                        scale=0.9,
                        translate_px=15,
                        rotate=25,
                        shear=0.2,
                    )
                ],
                        p=1),
                A.Cutout(num_holes=1,
                         max_h_size=max_crop,
                         max_w_size=max_crop,
                         p=0.2)
            ],
                      p=1),
            A.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
            AT.ToTensor()
        ])
    else:
        data_transform = transforms.Compose([
            transforms.ToPILImage(),
            transforms.Resize((image_size, image_size)),
            transforms.ToTensor(),
            transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                 std=[0.229, 0.224, 0.225])
        ])
    return data_transform
Example #12
0
 def __init__(self):
     self.aug = A.Compose([
         A.IAAPerspective(scale=(0.05, 0.2), p=0.5),
         A.ShiftScaleRotate(scale_limit=0.5, p=0.5),
         A.OpticalDistortion(distort_limit=0.5, p=0.5),
         A.GridDistortion(distort_limit=0.5, p=0.5),
         A.ElasticTransform(p=0.5)
     ],
                          p=1)
     pass
Example #13
0
def spatial_and_noise(p):
    return albu.Compose([
        albu.ShiftScaleRotate(rotate_limit=30,
                              scale_limit=15,
                              border_mode=cv2.BORDER_CONSTANT,
                              value=-1024,
                              p=0.5),
        albu.IAAAdditiveGaussianNoise(p=0.2),
        albu.IAAPerspective(p=0.2),
        albu.IAAPiecewiseAffine(p=0.2)
    ], p=p, additional_targets={'image{}'.format(_) : 'image' for _ in range(1, 101)})
Example #14
0
def DistortionBase():
    return A.Compose([
        A.OneOf(
            [
                A.IAAPerspective(),
                A.IAAPiecewiseAffine(),
                A.GridDistortion(),
                A.OpticalDistortion(distort_limit=2, shift_limit=0.5),
            ],
            p=0.50,
        )
    ])
Example #15
0
def aug_img(img, blur_limit=7):
    annotations = {'image': img}
    aug = albumentations.Compose([
        albumentations.GaussNoise(p=1),
        albumentations.MotionBlur(p=1, blur_limit=blur_limit),
        albumentations.Rotate(5),
        albumentations.OpticalDistortion(p=1),
        albumentations.IAAPerspective(scale=(0.005, 0.01), p=1),
        albumentations.CLAHE(p=1),
        albumentations.RandomBrightnessContrast(p=1)
    ])
    augmented = aug(**annotations)
    return augmented['image']
Example #16
0
 def train_transforms(self):
     return A.Compose([
         A.HorizontalFlip(p=0.5),
         A.ShiftScaleRotate(scale_limit=0.5,
                            rotate_limit=0,
                            shift_limit=0.1,
                            p=1,
                            border_mode=0),
         A.PadIfNeeded(
             min_height=self.args.arch.image_height,
             min_width=self.args.arch.image_width,
             always_apply=True,
             border_mode=0,
         ),
         A.RandomCrop(
             height=self.args.arch.image_height,
             width=self.args.arch.image_width,
             always_apply=True,
         ),
         A.IAAAdditiveGaussianNoise(p=0.2),
         A.IAAPerspective(p=0.5),
         A.OneOf(
             [
                 A.CLAHE(p=1),
                 A.RandomBrightnessContrast(contrast_limit=0.0, p=1),
                 A.RandomGamma(p=1),
             ],
             p=0.9,
         ),
         A.OneOf(
             [
                 A.IAASharpen(p=1),
                 A.Blur(blur_limit=3, p=1),
                 A.MotionBlur(blur_limit=3, p=1),
             ],
             p=0.9,
         ),
         A.OneOf(
             [
                 A.RandomBrightnessContrast(brightness_limit=0.0, p=1),
                 A.HueSaturationValue(p=1),
             ],
             p=0.9,
         ),
         A.Normalize(
             mean=[0.485, 0.456, 0.406],
             std=[0.229, 0.224, 0.225],
             max_pixel_value=255.0,
         ),
         ToTensorV2(),
     ])
Example #17
0
    def __init__(self, cfg, phase='train'):
        self.phase = phase
        self.size = cfg['image_size']
        self.root_dir = cfg['root_dir']

        cls_names = cfg['class_names']
        self.category_id_to_name = {k: v for k, v in enumerate(cls_names)}
        self.category_name_to_id = {
            v: k
            for k, v in self.category_id_to_name.items()
        }

        if self.phase == 'train':
            self.data_list = self.load_annos(cfg['train_data_file'],
                                             self.category_name_to_id)
        else:
            self.data_list = self.load_annos(cfg['val_data_file'],
                                             self.category_name_to_id)

        self.aug = A.Compose(
            [
                #   A.RandomScale(scale_limit=0.1, p=0.5),
                A.ShiftScaleRotate(
                    shift_limit=0.2, scale_limit=0.4, rotate_limit=45),
                A.PadIfNeeded(self.size[0], self.size[1]),
                #   A.RandomSizedCrop(min_max_height=(int(self.size[0]*0.8), self.size[0]*1.2),
                #                     height=self.size[0],
                #                     width=self.size[1],
                #                     w2h_ratio=self.size[1]/self.size[0]),
                A.RandomResizedCrop(
                    self.size[0], self.size[1], scale=(0.8, 1.0)),
                A.IAAPerspective(scale=(0.05, 0.1)),
                A.Rotate(),
                A.Flip(),
                # A.RandomSizedBBoxSafeCrop(height=self.size[1], width=self.size[0]),
                #   A.RandomBrightnessContrast(p=0.5),
                #   A.HueSaturationValue(p=0.5),
                A.ColorJitter()
            ],
            bbox_params=A.BboxParams(format='pascal_voc',
                                     label_fields=['cls_ids'],
                                     min_area=0.3,
                                     min_visibility=0.3))

        self.to_tensor = T.Compose([
            T.ToTensor(),
            T.Normalize(cfg['imagenet_default_mean'],
                        cfg['imagenet_default_std'])
        ])
def light_augmentations():
    return A.Compose([
        A.HorizontalFlip(),
        A.RandomBrightnessContrast(),
        A.OneOf([
            A.ShiftScaleRotate(scale_limit=0.05,
                               rotate_limit=15,
                               border_mode=cv2.BORDER_CONSTANT),
            A.IAAAffine(),
            A.IAAPerspective(),
            A.NoOp()
        ]),
        A.HueSaturationValue(),
        A.Normalize()
    ])
Example #19
0
def generate_transforms3(img_size):
    train_transform = Compose([
        A.RandomResizedCrop(img_size, img_size, scale=(0.9, 1), p=1),
        A.HorizontalFlip(p=0.5),
        A.VerticalFlip(p=0.5),
        A.ShiftScaleRotate(p=0.5),
        A.HueSaturationValue(hue_shift_limit=10,
                             sat_shift_limit=10,
                             val_shift_limit=10,
                             p=0.7),
        A.RandomBrightnessContrast(brightness_limit=(-0.2, 0.2),
                                   contrast_limit=(-0.2, 0.2),
                                   p=0.7),
        A.CLAHE(clip_limit=(1, 4), p=0.5),
        A.OneOf([
            A.OpticalDistortion(distort_limit=1.0),
            A.GridDistortion(num_steps=5, distort_limit=1.0),
            A.ElasticTransform(alpha=3),
        ],
                p=0.2),
        A.Resize(img_size, img_size),
        A.OneOf([
            A.JpegCompression(),
            A.Downscale(scale_min=0.1, scale_max=0.15),
        ],
                p=0.2),
        A.IAAPerspective(p=0.2),
        A.IAASharpen(p=0.2),
        A.Cutout(max_h_size=int(img_size * 0.1),
                 max_w_size=int(img_size * 0.1),
                 num_holes=5,
                 p=0.5),
        Normalize(mean=(0.485, 0.456, 0.406),
                  std=(0.229, 0.224, 0.225),
                  max_pixel_value=255.0,
                  p=1.0),
        ToTensorV2(),
    ])
    val_transform = Compose([
        Resize(height=img_size, width=img_size),
        Normalize(mean=(0.485, 0.456, 0.406),
                  std=(0.229, 0.224, 0.225),
                  max_pixel_value=255.0,
                  p=1.0),
        ToTensorV2(),
    ])

    return {"train": train_transform, "val": val_transform}
def get_training_augmentation():
    train_transform = [
        albu.HorizontalFlip(p=0.5),
        albu.VerticalFlip(p=0.2),
        albu.ShiftScaleRotate(scale_limit=0.5,
                              rotate_limit=50,
                              shift_limit=0.1,
                              p=1,
                              border_mode=0),
        #320 384  448 512  640
        #        albu.GridDistortion(num_steps=2, distort_limit=0.2, interpolation=1, border_mode=0, value=None, always_apply=False, p=0.5),
        albu.PadIfNeeded(min_height=padheight,
                         min_width=padwidth,
                         always_apply=True,
                         border_mode=0),
        albu.Resize(height=padheight, width=padwidth),
        albu.RandomCrop(height=inputheight,
                        width=inputwidth,
                        always_apply=True),  #the last size
        albu.ChannelShuffle(p=0.1),
        albu.IAAAdditiveGaussianNoise(p=0.2),
        albu.IAAPerspective(p=0.5),
        albu.OneOf(
            [
                albu.CLAHE(p=1),
                albu.RandomBrightness(p=1),
                albu.RandomGamma(p=1),
            ],
            p=0.9,
        ),
        albu.OneOf(
            [
                albu.IAASharpen(p=1),
                albu.Blur(blur_limit=3, p=1),
                albu.MotionBlur(blur_limit=3, p=1),
            ],
            p=0.9,
        ),
        albu.OneOf(
            [
                albu.RandomContrast(p=1),
                albu.HueSaturationValue(p=1),
            ],
            p=0.9,
        ),
    ]
    return albu.Compose(train_transform)
Example #21
0
def build_transforms(cfg, is_train, debug=False):
    to_compose = [albu.Resize(*cfg.inputs.size)]
    if is_train and cfg.augmentation.enable:
        to_compose.extend([
            albu.HorizontalFlip(p=0.5),
            albu.VerticalFlip(p=0.5),
            albu.RandomRotate90(p=0.5),
            albu.Transpose(p=0.5),
            albu.ShiftScaleRotate(scale_limit=0.2,
                                  rotate_limit=0,
                                  shift_limit=0.2,
                                  p=0.2,
                                  border_mode=0),
            albu.IAAAdditiveGaussianNoise(p=0.2),
            albu.IAAPerspective(p=0.5),
            albu.OneOf(
                [
                    albu.CLAHE(p=1),
                    albu.RandomBrightness(p=1),
                    albu.RandomGamma(p=1),
                ],
                p=0.9,
            ),
            albu.OneOf(
                [
                    albu.IAASharpen(p=1),
                    albu.Blur(blur_limit=3, p=1),
                    albu.MotionBlur(blur_limit=3, p=1),
                ],
                p=0.9,
            ),
            albu.OneOf(
                [
                    albu.RandomContrast(p=1),
                    albu.HueSaturationValue(p=1),
                ],
                p=0.9,
            ),
            albu.Compose(
                [albu.VerticalFlip(p=0.5),
                 albu.RandomRotate90(p=0.5)])
        ])
    if debug:
        return albu.Compose(to_compose)
    to_compose.append(albu.Normalize(**cfg.inputs.normalize))
    to_compose.append(ToTensorV2(transpose_mask=True))
    return albu.Compose(to_compose)
Example #22
0
def create_augmentations(img_height=224,img_width=224,p=0.1):
    AUGMENTATIONS = albumentations.Compose([
        albumentations.Resize(img_height, img_width, p=1.),
        albumentations.HorizontalFlip(p=0.5),
        albumentations.IAAPerspective(p=p, scale=(0.01, 0.05)),
        albumentations.GridDistortion(p=p, distort_limit=0.2), albumentations.GridDistortion(p=p,distort_limit=0.2),
        albumentations.CoarseDropout(p=p, max_holes=10, max_height=25, max_width=25),
        albumentations.GaussNoise(p=p,var_limit=(40.0, 70.0)),
        albumentations.ShiftScaleRotate(shift_limit=0.1, scale_limit=0.1, rotate_limit=10, interpolation=1,border_mode=4, always_apply=False, p=2*p),
        albumentations.ColorJitter(brightness=0.4, contrast=0.4, saturation=0.4, hue=0.2, p=2*p),
        albumentations.Blur(p=p,blur_limit=10),
        albumentations.ToGray(p=p),
        albumentations.ChannelShuffle(p=0.05),
        albumentations.RandomGamma(p=p,gamma_limit=(20,200)),
        AddShadow(p=p),
    ])
    return AUGMENTATIONS
Example #23
0
def default_adjusted(image, mask):
    alpha = randrange(30, 45)
    sigma = randrange(5, 7)
    scale = uniform(0.015, 0.075)
    train_transform = [
        albu.ShiftScaleRotate(scale_limit=0.2,
                              rotate_limit=0,
                              shift_limit=0.1,
                              p=1,
                              border_mode=0),
        albu.PadIfNeeded(min_height=512,
                         min_width=512,
                         always_apply=True,
                         border_mode=0),
        albu.IAAAdditiveGaussianNoise(p=0.2),
        albu.IAAPerspective(p=0.5),
        albu.OneOf(
            [
                albu.CLAHE(p=1),
                albu.RandomBrightnessContrast(p=1),
                albu.RandomGamma(p=1),
            ],
            p=0.9,
        ),
        albu.OneOf(
            [
                albu.RandomBrightnessContrast(brightness_limit=0, p=1),
                albu.HueSaturationValue(p=1),
            ],
            p=0.9,
        ),
        albu.OneOf(
            [
                albu.ElasticTransform(alpha=alpha,
                                      sigma=sigma,
                                      p=1,
                                      border_mode=cv2.BORDER_CONSTANT),
                albu.IAAAffine(scale=scale, p=1, mode="constant"),
            ],
            p=0.7,
        ),
    ]
    aug_func = albu.Compose(train_transform)
    augmented = aug_func(image=image, mask=mask)
    return augmented['image'], augmented['mask']
Example #24
0
def augmentation(image, bboxes, classes_ids):
    transform = A.Compose(
        [
            A.HorizontalFlip(p=0.5),
            A.ShiftScaleRotate(p=0.5),
            A.Blur(blur_limit=4, p=0.5),
            A.RandomCrop(200, 200, p=0.5),
            A.IAAPerspective(p=0.5),
            A.Resize(420, 420, p=1.0)
        ],
        bbox_params=A.BboxParams(format='pascal_voc',
                                 label_fields=['classes_ids']),
    )
    transformed = transform(image=image,
                            bboxes=bboxes,
                            classes_ids=classes_ids)
    return transformed['image'], transformed['bboxes'], transformed[
        'classes_ids']
def get_preprocessing(preprocessing_fn, apply_augmentation=False):
    """Construct preprocessing transform

    Args:
        preprocessing_fn (callable): data normalization function (can be specific for each pretrained neural network)
        apply_augmentation (boolean): apply data augmentation or not
    Return:
        transform: albumentations.Compose
    """

    _transform = [A.Resize(384, 480)]

    if apply_augmentation:
        _transform += [
            A.HorizontalFlip(p=0.5),
            A.ShiftScaleRotate(scale_limit=0.5,
                               rotate_limit=0,
                               shift_limit=0.1,
                               p=1,
                               border_mode=0),
            A.IAAAdditiveGaussianNoise(p=0.2),
            A.IAAPerspective(p=0.5),
            A.OneOf(
                [A.CLAHE(p=1),
                 A.RandomBrightness(p=1),
                 A.RandomGamma(p=1)],
                p=0.9),
            A.OneOf([
                A.IAASharpen(p=1),
                A.Blur(blur_limit=3, p=1),
                A.MotionBlur(blur_limit=3, p=1)
            ],
                    p=0.9),
            A.OneOf([A.RandomContrast(p=1),
                     A.HueSaturationValue(p=1)], p=0.9)
        ]

    _transform += [
        A.Lambda(image=preprocessing_fn),
        A.Lambda(image=to_CHW, mask=to_CHW)
    ]

    return A.Compose(_transform)
Example #26
0
def hard_transforms(border_reflect=2):
    result = [
        albu.HorizontalFlip(p=0.5),
        albu.VerticalFlip(p=0.5),
        albu.ShiftScaleRotate(shift_limit=0.1,
                              scale_limit=0.1,
                              rotate_limit=15,
                              border_mode=border_reflect,
                              p=0.5),
        albu.IAAPerspective(scale=(0.02, 0.05), p=0.3),
        albu.RandomBrightnessContrast(brightness_limit=0.2,
                                      contrast_limit=0.2,
                                      p=0.3),
        albu.RandomGamma(gamma_limit=(85, 115), p=0.3),
        albu.HueSaturationValue(p=0.3),
        albu.ImageCompression(quality_lower=80, p=0.5),
    ]

    return result
    def __init__(self):
        super().__init__()

        self.transforms = AL.Compose([
            AL.IAAPerspective(scale=(0.02, 0.04), keep_size=True, p=0.3),
            AL.RandomBrightnessContrast(p=0.3),
            AL.RandomGamma(p=0.3),
            AL.GaussianBlur(blur_limit=9, p=0.4),
            AL.GaussNoise(p=0.3),
            AL.RandomRotate90(p=0.6),
            AL.Rotate(limit=15, border_mode=cv2.BORDER_CONSTANT, p=0.2),
            AL.CoarseDropout(max_holes=24,
                             max_height=24,
                             max_width=24,
                             min_holes=8,
                             min_height=8,
                             min_width=8,
                             p=0.4),
        ])
Example #28
0
def get_training_augmentation(height, width, downsample_factor=1):
    downsample_fn = partial(downsample, factor=downsample_factor)
    train_transform = [
        albu.Lambda(image=downsample_fn, mask=downsample_fn),
        albu.HorizontalFlip(p=0.5),
        albu.ShiftScaleRotate(scale_limit=0.5,
                              rotate_limit=0,
                              shift_limit=0.1,
                              p=1,
                              border_mode=0),
        albu.PadIfNeeded(min_height=height,
                         min_width=width,
                         always_apply=True,
                         border_mode=0),
        albu.RandomCrop(height=height, width=width, always_apply=True),
        albu.IAAAdditiveGaussianNoise(p=0.2),
        albu.IAAPerspective(p=0.5),
        albu.OneOf(
            [
                albu.CLAHE(p=1),
                albu.RandomBrightness(p=1),
                albu.RandomGamma(p=1),
            ],
            p=0.9,
        ),
        albu.OneOf(
            [
                albu.IAASharpen(p=1),
                albu.Blur(blur_limit=3, p=1),
                albu.MotionBlur(blur_limit=3, p=1),
            ],
            p=0.9,
        ),
        albu.OneOf(
            [
                albu.RandomContrast(p=1),
                albu.HueSaturationValue(p=1),
            ],
            p=0.9,
        ),
    ]
    return albu.Compose(train_transform)
def get_training_augmentation():
    SIZE = 320  # default here is 320, 640 is too large
    train_transform = [

        albu.HorizontalFlip(p=0.5),  # p = probability of applying this transform

        albu.ShiftScaleRotate(scale_limit=0.5, rotate_limit=0, shift_limit=0.1, p=1, border_mode=0),

        # Can we play with different sizes?
        albu.PadIfNeeded(min_height=SIZE, min_width=SIZE, always_apply=True, border_mode=0),
        albu.RandomCrop(height=SIZE, width=SIZE, always_apply=True),

        albu.IAAAdditiveGaussianNoise(p=0.2),
        albu.IAAPerspective(p=0.5),

        albu.OneOf(
            [
                albu.CLAHE(p=1),
                albu.RandomBrightnessContrast(p=1),
                albu.RandomGamma(p=1),
            ],
            p=0.9,
        ),

        albu.OneOf(
            [
                albu.IAASharpen(p=1),
                albu.Blur(blur_limit=3, p=1),
                albu.MotionBlur(blur_limit=3, p=1),
            ],
            p=0.9,
        ),

        albu.OneOf(
            [
                albu.RandomBrightnessContrast(p=1),
                albu.HueSaturationValue(p=1),
            ],
            p=0.9,
        ),
    ]
    return albu.Compose(train_transform)
Example #30
0
    def __get_transform(self):
        transform = [
            albu.HorizontalFlip(p=0.5),
            albu.ShiftScaleRotate(scale_limit=0.2,
                                  rotate_limit=0,
                                  shift_limit=0.1,
                                  p=1,
                                  border_mode=0),
            albu.PadIfNeeded(min_height=self.size,
                             min_width=self.size,
                             always_apply=True,
                             border_mode=0),
            albu.RandomCrop(height=self.size,
                            width=self.size,
                            always_apply=True),
            albu.IAAAdditiveGaussianNoise(p=0.2),
            albu.IAAPerspective(p=0.5)
        ]

        return albu.Compose(transform)