Example #1
0
def augmentations(prob=0.95):
    
    transformer = A.Compose([

            A.OneOf([A.HorizontalFlip(p=prob),
                     A.VerticalFlip(p=prob)], p=prob),

            A.ShiftScaleRotate(p=prob, shift_limit=0.2, scale_limit=.2, rotate_limit=45),
            A.RandomRotate90(p=prob),
            A.Transpose(p=prob),
            A.OneOf([A.RandomContrast(limit=0.2, p=prob),
                     A.RandomGamma(gamma_limit=(70, 130), p=prob),
                     A.RandomBrightness(limit=0.2, p=prob)],p=prob),
            A.HueSaturationValue(p=prob),
            A.OneOf([
                    A.MotionBlur(p=prob),
                    A.MedianBlur(blur_limit=3, p=prob),
                    A.Blur(blur_limit=3, p=prob)
            ], p=prob),
            A.OpticalDistortion(p=prob),
            A.GridDistortion(p=prob),
            A.OneOf([
                    A.IAAAdditiveGaussianNoise(p=prob),
                    A.GaussNoise(p=prob),
                  ], p=prob),
    ], p=prob)
    return transformer
Example #2
0
def hard_transforms():
    result = [
        # miscelaneous
        A.IAAAdditiveGaussianNoise(p=0.2),
        #A.CoarseDropout(max_holes=10, max_height=50, max_width=50, min_height=15, min_width=15, p=0.25),

        # brightness
        #A.OneOf(
        #    [
        #        A.RandomBrightnessContrast(p=1),
        #        A.RandomGamma(p=1),
        #    ],
        #    p=0.9,
        #),

        # sharpening / blurring
        A.OneOf(
            [
                A.IAASharpen(p=1),
                A.Blur(blur_limit=3, p=1),
            ],
            p=0.9,
        ),

        #
    ]

    return result
def get_next_augmentation():
    train_transform = [
        albu.ChannelShuffle(p=0.1),
        albu.IAAAdditiveGaussianNoise(p=0.2),
        albu.OneOf(
            [
                albu.CLAHE(p=1),
                albu.RandomBrightness(p=1),
                albu.RandomGamma(p=1),
            ],
            p=0.9,
        ),
        albu.OneOf(
            [
                albu.IAASharpen(p=1),
                albu.Blur(blur_limit=3, p=1),
                albu.MotionBlur(blur_limit=3, p=1),
            ],
            p=0.9,
        ),
        albu.OneOf(
            [
                albu.RandomContrast(p=1),
                albu.HueSaturationValue(p=1),
            ],
            p=0.9,
        ),
    ]
    return albu.Compose(train_transform)
Example #4
0
def v_flib_g_blur(p=1.0):
    return albumentations.Compose([
        albumentations.VerticalFlip(p=p),
        albumentations.GaussianBlur(p=p),
        albumentations.IAAAdditiveGaussianNoise(p=p)
    ],
                                  p=p)
Example #5
0
    def augment(self, img0, img1):
        transform = A.Compose([
            A.IAAAdditiveGaussianNoise(p=0.05),
            A.OneOf([
                A.IAASharpen(p=1.0),
                A.Blur(blur_limit=3, p=1.0),
            ],
                    p=0.5),
            A.OneOf([
                A.RandomBrightnessContrast(p=1.0),
                A.HueSaturationValue(p=1.0),
                A.RandomGamma(p=1.0),
            ],
                    p=0.5),
            A.OneOf([
                A.RandomFog(p=1.0),
                A.RandomRain(p=1.0),
                A.RandomShadow(p=1.0),
                A.RandomSnow(p=1.0),
                A.RandomSunFlare(p=1.0)
            ],
                    p=0.05),
        ],
                              additional_targets={'img1': 'image'})
        transformed = transform(image=img0, img1=img1)
        img0 = transformed["image"]
        img1 = transformed["img1"]

        return img0, img1
Example #6
0
    def __init__(self,
                 scale_range: float = (0.35, 0.65),
                 input_size: int = (416, 416),
                 augmentation: bool = False) -> None:

        if augmentation:
            self.crop_func = RandomCropAndResize(scale_range, input_size)
            self.aug_func = alb.Compose([
                alb.OneOf([
                    alb.RGBShift(),
                    alb.ToGray(),
                    alb.NoOp(),
                ]),
                alb.RandomBrightnessContrast(),
                alb.OneOf([
                    alb.GaussNoise(),
                    alb.IAAAdditiveGaussianNoise(),
                    alb.CoarseDropout(fill_value=100),
                ])
            ])
        else:
            scale = (scale_range[0] + scale_range[1]) / 2.
            self.crop_func = CenterCropAndResize(scale, input_size)
            self.aug_func = None

        self.heatmap_stride = 4
        self.heatmap_size = (input_size[0] // self.heatmap_stride,
                             input_size[1] // self.heatmap_stride)
Example #7
0
def get_train_transforms(
        height: int = 14 * 32,  # 14*32 then 28*32
        width: int = 18 * 32):  #18*32 then 37*32
    return A.Compose([
        A.HorizontalFlip(p=0.5),
        A.IAAAdditiveGaussianNoise(p=0.2),
        A.IAAPerspective(p=0.4),
        A.OneOf([
            A.CLAHE(p=1.0),
            A.RandomBrightness(p=1.0),
            A.RandomGamma(p=1.0),
        ],
                p=0.5),
        A.OneOf([
            A.IAASharpen(p=1.0),
            A.Blur(blur_limit=3, p=1.0),
            A.MotionBlur(blur_limit=3, p=1.0),
        ],
                p=0.5),
        A.OneOf([
            A.RandomContrast(p=1.0),
            A.HueSaturationValue(p=1.0),
        ],
                p=0.5),
        A.Resize(height=height, width=width, p=1.0),
    ],
                     p=1.0)
Example #8
0
def get_augmentations_transform(crop_size=128, p=0.5, phase="train"):
    imagenet_stats = {'mean':[0.485, 0.456, 0.406], 'std':[0.229, 0.224, 0.225]}
    if phase == "train" or "test":
        aug_factor_list = [
            A.RandomResizedCrop(height=crop_size, width=crop_size, scale=(0.8, 1.0)),
            A.Cutout(num_holes=8, p=p),
            A.RandomRotate90(p=p),
            A.HorizontalFlip(p=p),
            A.VerticalFlip(p=p),
            A.HueSaturationValue(hue_shift_limit=20, sat_shift_limit=50, val_shift_limit=50),
            A.RandomBrightnessContrast(brightness_limit=0.2, contrast_limit=0.2),
            A.ShiftScaleRotate(shift_limit=0.0625, scale_limit=0.2, rotate_limit=45, p=p),
            A.OneOf([
                A.IAAAdditiveGaussianNoise(),
                A.GaussNoise(),
            ], p=p),
            A.OneOf([
                A.MotionBlur(p=0.2),
                A.MedianBlur(blur_limit=3, p=0.1),
                A.Blur(blur_limit=3, p=0.1),
            ], p=p),
            A.OneOf([
                A.OpticalDistortion(p=0.3),
                A.GridDistortion(p=0.1),
                A.IAAPiecewiseAffine(p=0.3),
            ], p=p),
            ToTensor(normalize=imagenet_stats)
        ]
        transformed_image = A.Compose(aug_factor_list)
        return transformed_image
    elif phase == "valid":
        transformed_image = A.Compose([ToTensor(normalize=imagenet_stats)])
        return transformed_image
    else:
        TypeError("Invalid phase type.")
Example #9
0
 def album(self): #이미지 변환
     transform = A.Compose([
         #A.RandomRotate90(),
         A.Flip(p=0.2),
         #A.Transpose(),
         A.ChannelShuffle(p=0.3),
         A.ElasticTransform(p=0.3,border_mode=cv2.BORDER_REFLECT_101,alpha_affine=40),
         A.OneOf([
             A.IAAAdditiveGaussianNoise(),
             A.GaussNoise(),
         ], p=0.2),
         A.OneOf([
             A.MotionBlur(p=.2),
             A.MedianBlur(blur_limit=3, p=0.1),
             A.Blur(blur_limit=3, p=0.1),
         ], p=0.2),
         A.ShiftScaleRotate(shift_limit=0.0625, scale_limit=0.2, rotate_limit=45, p=0.2),
         A.OneOf([
             A.OpticalDistortion(p=0.3),
             A.GridDistortion(p=.1),
             A.IAAPiecewiseAffine(p=0.3),
         ], p=0.2),
         A.OneOf([
             A.CLAHE(clip_limit=2),
             A.IAASharpen(),
             A.IAAEmboss(),
             A.RandomBrightnessContrast(),
         ], p=0.3),
         A.HueSaturationValue(p=0.3),
     ])
     image = cv2.cvtColor(self.srcResize, cv2.COLOR_BGR2RGB)
     transformed = transform(image=image)['image']
     self.update(transformed)
def augment(image):
    transform = A.Compose([
        A.HorizontalFlip(p=0.5),
        A.OneOf([
            A.IAAAdditiveGaussianNoise(),
            A.GaussNoise(),
        ], p=0.2),
        A.OneOf([
            A.MotionBlur(p=.2),
            A.MedianBlur(blur_limit=3, p=0.1),
            A.Blur(blur_limit=3, p=0.1),
        ],
                p=0.2),
        A.ShiftScaleRotate(shift_limit=0.0625,
                           scale_limit=0.2,
                           rotate_limit=15,
                           p=0.2),
        A.OneOf([
            A.CLAHE(clip_limit=2),
            A.IAASharpen(),
            A.IAAEmboss(),
            A.RandomBrightnessContrast(),
            A.RandomGamma(),
        ],
                p=0.5),
        A.HueSaturationValue(p=0.3),
    ])
    augmented_image = transform(image=image)['image']
    return augmented_image
Example #11
0
def gentle_transform(p):
    return albu.Compose(
        [
            # p=0.5
            albu.HorizontalFlip(p=0.5),
            albu.VerticalFlip(p=0.5),
            albu.OneOf(
                [
                    albu.IAASharpen(p=1),
                    albu.Blur(blur_limit=3, p=1),
                    albu.MotionBlur(blur_limit=3, p=1),
                ],
                p=0.5,
            ),
            albu.OneOf(
                [
                    albu.RandomBrightness(p=1),
                    albu.RandomGamma(p=1),
                ],
                p=0.5,
            ),
            # p=0.2
            albu.ShiftScaleRotate(rotate_limit=30,
                                  scale_limit=0.15,
                                  border_mode=cv2.BORDER_CONSTANT,
                                  value=[0, 0, 0],
                                  p=0.2),
            albu.IAAAdditiveGaussianNoise(p=0.2),
            albu.IAAPerspective(p=0.2),
        ],
        p=p,
        additional_targets={
            'image{}'.format(_): 'image'
            for _ in range(1, 65)
        })
Example #12
0
def augmentations(image_size: int):
    channel_augs = [
        A.HueSaturationValue(p=0.5),
        A.ChannelShuffle(p=0.5),
    ]

    result = [
        # *pre_transform(image_size),
        A.OneOf([
            A.IAAAdditiveGaussianNoise(),
            A.GaussNoise(),
        ], p=0.5),
        A.OneOf([
            A.MotionBlur(blur_limit=3, p=0.7),
            A.MedianBlur(blur_limit=3, p=1.0),
            A.Blur(blur_limit=3, p=0.7),
        ],
                p=0.5),
        A.OneOf(channel_augs),
        A.OneOf([
            A.CLAHE(clip_limit=2),
            A.IAASharpen(),
            A.IAAEmboss(),
        ],
                p=0.5),
        A.RandomBrightnessContrast(brightness_limit=0.5,
                                   contrast_limit=0.5,
                                   p=0.5),
        A.RandomGamma(p=0.5),
        A.OneOf([A.MedianBlur(p=0.5), A.MotionBlur(p=0.5)]),
        A.RandomGamma(gamma_limit=(85, 115), p=0.5),
    ]
    return A.Compose(result, bbox_params=BBOX_PARAMS)
 def weak_aug(self, p=0.5):
     '''Create a weakly augmented image framework'''
     return A.Compose([
         A.HorizontalFlip(),
         A.OneOf([
             A.IAAAdditiveGaussianNoise(),
             A.GaussNoise(),
         ], p=0.2),
         A.OneOf([
             A.MotionBlur(p=0.2),
             A.MedianBlur(blur_limit=3, p=0.1),
             A.Blur(blur_limit=3, p=0.1),
         ],
                 p=0.2),
         A.ShiftScaleRotate(
             shift_limit=0.0625, scale_limit=0.2, rotate_limit=10, p=0.2),
         A.OpticalDistortion(p=0.2),
         A.OneOf([
             A.CLAHE(clip_limit=2),
             A.IAASharpen(),
             A.IAAEmboss(),
         ],
                 p=0.3),
     ],
                      p=p)
Example #14
0
def augment_image(image):
    # Works with single image
    transform = A.Compose([
        A.HorizontalFlip(),
        A.OneOf([
            A.IAAAdditiveGaussianNoise(),
            A.GaussNoise(),
        ], p=0.2),
        A.OneOf([
            A.MotionBlur(),
            A.MedianBlur(blur_limit=3),
            A.Blur(blur_limit=3),
        ],
                p=0.2),
        A.ShiftScaleRotate(shift_limit=0.0625,
                           scale_limit=0.2,
                           rotate_limit=45,
                           p=0.2),
        A.OneOf([
            A.OpticalDistortion(p=0.3),
        ], p=0.2),
        A.OneOf([
            A.IAASharpen(p=1.),
            A.IAAEmboss(p=1.),
            A.RandomBrightnessContrast(p=1.),
        ],
                p=0.3),
        A.HueSaturationValue(hue_shift_limit=5,
                             sat_shift_limit=5,
                             val_shift_limit=5,
                             p=0.3),
    ])

    return transform(image=image)['image']
Example #15
0
 def augment_image(self, image):
     transform = A.Compose([
         A.OneOf([
             A.IAAAdditiveGaussianNoise(),
             A.GaussNoise(),
         ], p=0.3),
         A.OneOf([
             A.MotionBlur(p=.4),
             A.MedianBlur(blur_limit=3, p=0.3),
             A.Blur(blur_limit=3, p=0.3),
         ],
                 p=0.4),
         A.OneOf([
             A.CLAHE(clip_limit=2),
             A.IAASharpen(),
             A.IAAEmboss(),
             A.RandomBrightnessContrast(),
         ],
                 p=0.3),
         A.HueSaturationValue(p=0.3),
     ])
     image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
     augmented_image = transform(image=image)['image']
     augmented_image = cv2.cvtColor(augmented_image, cv2.COLOR_RGB2BGR)
     return augmented_image
Example #16
0
def get_training_augmentation():
    train_transform = [
        A.RandomSizedCrop(min_max_height=(300, 360), height=320, width=320,
                          always_apply=True),
        A.HorizontalFlip(p=0.5),
        A.OneOf([
            A.CLAHE(),
            A.RandomBrightnessContrast(),
            A.RandomGamma(),
            A.HueSaturationValue(),
            A.NoOp()
        ]),
        A.OneOf([
            A.IAAAdditiveGaussianNoise(p=0.2),
            A.IAASharpen(),
            A.Blur(blur_limit=3),
            A.MotionBlur(blur_limit=3),
            A.NoOp()
        ]),
        A.OneOf([
            A.RandomFog(),
            A.RandomSunFlare(),
            A.RandomRain(),
            A.RandomSnow(),
            A.NoOp()
        ]),
        A.Normalize(),
    ]
    return A.Compose(train_transform)
Example #17
0
def get_training_augmentation():
    train_transform = [
        albu.HorizontalFlip(p=0.5),
        albu.IAAAdditiveGaussianNoise(p=0.2),
        albu.IAAPerspective(p=0.5),
        albu.PadIfNeeded(min_height=1216,
                         min_width=512,
                         always_apply=True,
                         border_mode=0),
        #0.9的機率取出OneOf中的其中一個, 各個抽中的機率皆為1/3( 因為1/(1+1+1) )
        albu.OneOf(
            [
                albu.CLAHE(p=1),
                albu.RandomBrightness(p=1),
                albu.RandomGamma(p=1),
            ],
            p=0.9,
        ),
        albu.OneOf(
            [
                albu.IAASharpen(p=1),
                albu.Blur(blur_limit=3, p=1),
                albu.MotionBlur(blur_limit=3, p=1),
            ],
            p=0.9,
        ),
        albu.OneOf(
            [
                albu.RandomContrast(p=1),
                albu.HueSaturationValue(p=1),
            ],
            p=0.9,
        ),
    ]
    return albu.Compose(train_transform)
Example #18
0
def get_training_augmentation():
    train_transform = albu.Compose([
        albu.IAAAdditiveGaussianNoise(p=0.2),
        albu.IAAPerspective(p=0.5),
        albu.OneOf(
            [
                albu.CLAHE(p=1),
                albu.RandomBrightness(p=1),
                albu.RandomGamma(p=1),
            ],
            p=0.9,
        ),
        albu.OneOf(
            [
                albu.IAASharpen(p=1),
                albu.Blur(blur_limit=3, p=1),
                albu.MotionBlur(blur_limit=3, p=1),
            ],
            p=0.9,
        ),
        albu.OneOf(
            [
                albu.RandomContrast(p=1),
                albu.HueSaturationValue(p=1),
            ],
            p=0.9,
        ),
    ],
                                   additional_targets={'depth': 'mask'})

    return train_transform
def get_training_augmentation():
    train_transform = [
        albu.ShiftScaleRotate(scale_limit=0.1,
                              rotate_limit=0.,
                              shift_limit=0.1,
                              p=1,
                              border_mode=0),
        albu.IAAAdditiveGaussianNoise(p=0.2),
        albu.OneOf(
            [
                albu.CLAHE(p=1),
                albu.RandomBrightness(p=1),
                albu.RandomGamma(p=1),
            ],
            p=0.6,
        ),
        albu.OneOf(
            [
                albu.IAASharpen(p=1),
                albu.Blur(blur_limit=3, p=1),
                albu.MotionBlur(blur_limit=3, p=1),
            ],
            p=0.6,
        ),
        albu.OneOf(
            [
                albu.RandomContrast(p=1),
                albu.HueSaturationValue(p=1),
            ],
            p=0.6,
        ),
    ]
    return albu.Compose(train_transform)
Example #20
0
def strong_aug(p=.5):
    return A.Compose([
        A.RandomRotate90(),
        A.Flip(),
        A.Transpose(),
        A.OneOf([
            A.IAAAdditiveGaussianNoise(),
            A.GaussNoise(),
        ], p=0.2),
        A.OneOf([
            A.MotionBlur(p=.2),
            A.MedianBlur(blur_limit=3, p=.1),
            A.Blur(blur_limit=3, p=.1),
        ],
                p=0.2),
        A.ShiftScaleRotate(
            shift_limit=0.0625, scale_limit=0.2, rotate_limit=45, p=.2),
        A.OneOf([
            A.OpticalDistortion(p=0.3),
            A.GridDistortion(p=.1),
            A.IAAPiecewiseAffine(p=0.3),
        ],
                p=0.2),
        A.OneOf([
            A.CLAHE(clip_limit=2),
            A.IAASharpen(),
            A.IAAEmboss(),
            A.RandomContrast(),
            A.RandomBrightness(),
        ],
                p=0.3),
        A.HueSaturationValue(p=0.3),
    ],
                     p=p)
Example #21
0
def get_training_augmentation():
    """Builds random transformations we want to apply to our dataset.

    Arguments:
        
    Returns:
        A albumentation functions to pass our images to.
    Raises:

    """
    train_transform = [
        HorizontalFlipWithHomo(p=0.5),
        A.IAAAdditiveGaussianNoise(p=0.2),
        A.augmentations.transforms.RandomShadow(
            shadow_roi=(0, 0.5, 1, 1),
            num_shadows_lower=1,
            num_shadows_upper=1,
            shadow_dimension=3,
            always_apply=False,
            p=0.5,
        ),
        A.OneOf([A.RandomBrightness(p=1),], p=0.3,),
        A.OneOf([A.Blur(blur_limit=3, p=1), A.MotionBlur(blur_limit=3, p=1),], p=0.3,),
        A.OneOf([A.RandomContrast(p=1), A.HueSaturationValue(p=1),], p=0.3,),
        RandomCropWithHomo(height=256, width=256, always_apply=True),
    ]
    return A.Compose(train_transform)
Example #22
0
def get_augmentations(p=0.5, image_size=224):
    imagenet_stats = {
        "mean": [0.485, 0.456, 0.406],
        "std": [0.229, 0.224, 0.225]
    }
    train_tfms = A.Compose([
        # A.Resize(image_size, image_size),
        A.RandomResizedCrop(image_size, image_size),
        A.ShiftScaleRotate(shift_limit=0.15,
                           scale_limit=0.4,
                           rotate_limit=45,
                           p=p),
        A.Cutout(p=p),
        A.RandomRotate90(p=p),
        A.Flip(p=p),
        A.OneOf(
            [
                A.RandomBrightnessContrast(
                    brightness_limit=0.2,
                    contrast_limit=0.2,
                ),
                A.HueSaturationValue(hue_shift_limit=20,
                                     sat_shift_limit=50,
                                     val_shift_limit=50),
            ],
            p=p,
        ),
        A.OneOf(
            [
                A.IAAAdditiveGaussianNoise(),
                A.GaussNoise(),
            ],
            p=p,
        ),
        A.CoarseDropout(max_holes=10, p=p),
        A.OneOf(
            [
                A.MotionBlur(p=0.2),
                A.MedianBlur(blur_limit=3, p=0.1),
                A.Blur(blur_limit=3, p=0.1),
            ],
            p=p,
        ),
        A.OneOf(
            [
                A.OpticalDistortion(p=0.3),
                A.GridDistortion(p=0.1),
                A.IAAPiecewiseAffine(p=0.3),
            ],
            p=p,
        ),
        ToTensor(normalize=imagenet_stats),
    ])

    valid_tfms = A.Compose([
        A.CenterCrop(image_size, image_size),
        ToTensor(normalize=imagenet_stats)
    ])

    return train_tfms, valid_tfms
Example #23
0
def get_train_transform():
    crop_height = 256
    crop_width = 256

    return albu.Compose([
        albu.PadIfNeeded(min_height=crop_height, min_width=crop_width, p=1),
        albu.RandomSizedCrop((int(0.3 * crop_height), 288), crop_height, crop_width, p=1),
        albu.HorizontalFlip(p=0.5),
        albu.OneOf([
            albu.IAAAdditiveGaussianNoise(p=0.5),
            albu.GaussNoise(p=0.5),
        ], p=0.2),
        albu.OneOf([
            albu.MotionBlur(p=0.2),
            albu.MedianBlur(blur_limit=3, p=0.1),
            albu.Blur(blur_limit=3, p=0.1),
        ], p=0.2),
        albu.ShiftScaleRotate(shift_limit=0.0625, scale_limit=0, rotate_limit=20, p=0.1),
        albu.OneOf([
            albu.OpticalDistortion(p=0.3),
            albu.GridDistortion(p=0.1),
            albu.IAAPiecewiseAffine(p=0.3),
        ], p=0.2),
        albu.OneOf([
            albu.CLAHE(clip_limit=2, p=0.5),
            albu.IAASharpen(p=0.5),
            albu.IAAEmboss(p=0.5),
            albu.RandomBrightnessContrast(p=0.5),
        ], p=0.3),
        albu.HueSaturationValue(p=0.3),
        albu.JpegCompression(p=0.2, quality_lower=20, quality_upper=99),
        albu.ElasticTransform(p=0.1),
        albu.Normalize(p=1)
    ], p=1)
Example #24
0
        def __init__(self, data_dir, mode):
            assert mode in ['train', 'val', 'test']
            self.mode = mode
            self.fn = list(Path(data_dir).rglob('*.jpg'))

            a_transforms_list = [A.Resize(350, 350), A.RandomCrop(350, 350)]
            if mode == 'train':
                a_transforms_list.extend([
                    A.HorizontalFlip(),
                    A.VerticalFlip(),
                    A.HueSaturationValue(),
                    A.ShiftScaleRotate(),
                    A.OneOf([
                        A.IAAAdditiveGaussianNoise(),
                        A.GaussNoise(),
                    ],
                            p=0.2),
                    A.OneOf([
                        A.MotionBlur(p=.2),
                        A.MedianBlur(blur_limit=3, p=0.1),
                        A.Blur(blur_limit=3, p=0.1),
                    ],
                            p=0.2)
                ])
            a_transforms_list.extend([ToTensor()])
            self.transforms = A.Compose(a_transforms_list)
Example #25
0
def get_training_augmentation():
    train_transform = [
        albu.HorizontalFlip(p=0.5),

        #         albu.ShiftScaleRotate(scale_limit=0.5, rotate_limit=0, shift_limit=0.1, p=1, border_mode=0),
        albu.PadIfNeeded(min_height=320, min_width=320, always_apply=True),
        #         albu.RandomCrop(height=1000, width=1000, always_apply=True),
        albu.IAAAdditiveGaussianNoise(p=0.2),
        albu.IAAPerspective(p=0.5),
        albu.OneOf(
            [
                albu.CLAHE(p=1),
                albu.RandomBrightness(p=1),
                albu.RandomGamma(p=1),
            ],
            p=0.9,
        ),
        albu.OneOf(
            [
                albu.IAASharpen(p=1),
                albu.Blur(blur_limit=3, p=1),
                albu.MotionBlur(blur_limit=3, p=1),
            ],
            p=0.9,
        ),
        albu.OneOf(
            [
                albu.RandomContrast(p=1),
                albu.HueSaturationValue(p=1),
            ],
            p=0.9,
        ),
    ]
    return albu.Compose(train_transform)
Example #26
0
def det_train_augs(height: int, width: int) -> albu.Compose:
    return albu.Compose([
        albu.Resize(height=height, width=width),
        albu.ShiftScaleRotate(shift_limit=0.025,
                              scale_limit=0.1,
                              rotate_limit=10),
        albu.Flip(),
        albu.RandomRotate90(),
        albu.OneOf(
            [
                albu.HueSaturationValue(p=1.0),
                albu.IAAAdditiveGaussianNoise(p=1.0),
                albu.IAASharpen(p=1.0),
                albu.RandomBrightnessContrast(
                    brightness_limit=0.1, contrast_limit=0.1, p=1.0),
                albu.RandomGamma(p=1.0),
            ],
            p=1.0,
        ),
        albu.OneOf(
            [
                albu.Blur(blur_limit=3, p=1.0),
                albu.MotionBlur(blur_limit=3, p=1.0)
            ],
            p=1.0,
        ),
        albu.Normalize(),
    ])
def car_6dof_pixel_tfms(opt):
    tfms = []
    if opt.aug_brightness_contrast > 0:
        tfm = A.RandomBrightnessContrast(brightness_limit=opt.brightness_limit,
                                         contrast_limit=opt.contrast_limit,
                                         p=opt.aug_brightness_contrast)
        tfms.append(tfm)
    if opt.aug_hue > 0:
        tfm = A.HueSaturationValue(hue_shift_limit=opt.hue_shift_limit,
                                   sat_shift_limit=0,
                                   val_shift_limit=0,
                                   p=opt.aug_hue)
        tfms.append(tfm)
    if opt.aug_blur > 0:
        tfm = A.GaussianBlur(blur_limit=opt.blur_limit, p=opt.aug_blur)
        tfms.append(tfm)
    if opt.aug_noise > 0:
        tfm = A.IAAAdditiveGaussianNoise(scale=opt.noise_scale,
                                         p=opt.aug_noise)
        tfms.append(tfm)
    tfms = A.Compose(tfms)

    def _wrapper(image):
        return tfms(image=image)['image']

    return _wrapper
def apply_training_augmentation():
    train_transform = [
        A.HorizontalFlip(p=0.5),
        A.ShiftScaleRotate(scale_limit=0.5, rotate_limit=0, shift_limit=0.1, p=1, border_mode=0),
        A.PadIfNeeded(min_height=320, min_width=320, always_apply=True, border_mode=0),
        A.RandomCrop(height=320, width=320, always_apply=True),
        A.IAAAdditiveGaussianNoise(p=0.2),
        A.IAAPerspective(p=0.5),
        A.OneOf(
            [
                A.CLAHE(p=1),
                A.RandomBrightness(p=1),
                A.RandomGamma(p=1),
            ],
            p=0.9,
        ),
        A.OneOf(
            [
                A.IAASharpen(p=1),
                A.Blur(blur_limit=3, p=1),
                A.MotionBlur(blur_limit=3, p=1),
            ],
            p=0.9,
        ),
        A.OneOf(
            [
                A.RandomContrast(p=1),
                A.HueSaturationValue(p=1),
            ],
            p=0.9,
        ),
        A.Lambda(mask=round_clip_0_1)
    ]
    return A.Compose(train_transform)
def albumentation():
    transform = albumentations.Compose([          
                    albumentations.OneOf([
                        albumentations.GaussNoise(),
                        albumentations.IAAAdditiveGaussianNoise()
                    ]),
                    albumentations.OneOf([
                        albumentations.MotionBlur(blur_limit=3, p=0.2),
                        albumentations.MedianBlur(blur_limit=3, p=0.1),
                        albumentations.Blur(blur_limit=2, p=0.1)
                    ]),
                    albumentations.OneOf([
                        albumentations.RandomBrightness(limit=(0.1, 0.4)),
                        albumentations.HueSaturationValue(hue_shift_limit=(0, 128), sat_shift_limit=(0, 60), val_shift_limit=(0, 20)),
                        albumentations.RGBShift(r_shift_limit=30, g_shift_limit=30, b_shift_limit=30)
                    ]),
                    albumentations.OneOf([
                        albumentations.CLAHE(),
                        albumentations.ChannelShuffle(),
                        albumentations.IAASharpen(),
                        albumentations.IAAEmboss(),
                        albumentations.RandomBrightnessContrast(),
                    ]),                
                    albumentations.OneOf([
                        albumentations.RandomGamma(gamma_limit=(35,255)),
                        albumentations.OpticalDistortion(),
                        albumentations.GridDistortion(),
                        albumentations.IAAPiecewiseAffine()
                    ]),                
                    A_torch.ToTensor(normalize={
                        "mean": [0.485, 0.456, 0.406],
                        "std" : [0.229, 0.224, 0.225]})
                    ])
    return transform
Example #30
0
def predefined_transform() -> None:
    """
    Example from docs
    https://github.com/albumentations-team/albumentations_examples/blob/master/notebooks/example.ipynb
    :return:
    """

    return A.Compose([
        A.RandomRotate90(),
        A.Flip(),
        A.Transpose(),
        A.OneOf([
            A.IAAAdditiveGaussianNoise(),
            A.GaussNoise(),
        ], p=0.2),
        A.OneOf([
            A.MotionBlur(p=.2),
            A.MedianBlur(blur_limit=3, p=0.1),
            A.Blur(blur_limit=3, p=0.1),
        ], p=0.2),
        A.ShiftScaleRotate(shift_limit=0.0625, scale_limit=0.2, rotate_limit=45, p=0.2),
        A.OneOf([
            A.OpticalDistortion(p=0.3),
            A.GridDistortion(p=.1),
            A.IAAPiecewiseAffine(p=0.3),
        ], p=0.2),
        A.OneOf([
            A.CLAHE(clip_limit=2),
            A.IAASharpen(),
            A.IAAEmboss(),
            A.RandomBrightnessContrast(),            
        ], p=0.3),
        A.HueSaturationValue(p=0.3),
    ])