Beispiel #1
0
def get_augmentations(p=0.5, image_size=224):
    imagenet_stats = {
        "mean": [0.485, 0.456, 0.406],
        "std": [0.229, 0.224, 0.225]
    }
    train_tfms = A.Compose([
        # A.Resize(image_size, image_size),
        A.RandomResizedCrop(image_size, image_size),
        A.ShiftScaleRotate(shift_limit=0.15,
                           scale_limit=0.4,
                           rotate_limit=45,
                           p=p),
        A.Cutout(p=p),
        A.RandomRotate90(p=p),
        A.Flip(p=p),
        A.OneOf(
            [
                A.RandomBrightnessContrast(
                    brightness_limit=0.2,
                    contrast_limit=0.2,
                ),
                A.HueSaturationValue(hue_shift_limit=20,
                                     sat_shift_limit=50,
                                     val_shift_limit=50),
            ],
            p=p,
        ),
        A.OneOf(
            [
                A.IAAAdditiveGaussianNoise(),
                A.GaussNoise(),
            ],
            p=p,
        ),
        A.CoarseDropout(max_holes=10, p=p),
        A.OneOf(
            [
                A.MotionBlur(p=0.2),
                A.MedianBlur(blur_limit=3, p=0.1),
                A.Blur(blur_limit=3, p=0.1),
            ],
            p=p,
        ),
        A.OneOf(
            [
                A.OpticalDistortion(p=0.3),
                A.GridDistortion(p=0.1),
                A.IAAPiecewiseAffine(p=0.3),
            ],
            p=p,
        ),
        ToTensor(normalize=imagenet_stats),
    ])

    valid_tfms = A.Compose([
        A.CenterCrop(image_size, image_size),
        ToTensor(normalize=imagenet_stats)
    ])

    return train_tfms, valid_tfms
Beispiel #2
0
 def __init__(self, image_size=(112, 112), augmentation=False):
     self.image_size = image_size
     if augmentation:
         self.crop_func = RandomCropAndResize(size=image_size)
         w, h = image_size
         self.aug_func = alb.Compose([
             alb.RGBShift(),
             alb.RandomBrightnessContrast(),
             alb.OneOf([
                 alb.Rotate(limit=5),
                 alb.GridDistortion(distort_limit=0.2),
                 alb.ElasticTransform(alpha=50, sigma=10, alpha_affine=2),
             ],
                       p=0.7),
             alb.OneOf([alb.GaussNoise(),
                        alb.IAAAdditiveGaussianNoise()]),
             alb.CoarseDropout(max_holes=1,
                               max_height=h // 2,
                               max_width=w // 2,
                               min_height=h // 4,
                               min_width=w // 4,
                               fill_value=128)
         ])
     else:
         self.crop_func = CenterCropAndResize(size=image_size)
         self.aug_func = None
Beispiel #3
0
    def __init__(self, Config, anno, swap_size=[7,7], unswap=None, swap=None, totensor=None, train=False, train_val=False, test=False):
        self.root_path = Config.rawdata_root
        self.numcls = Config.numcls
        self.dataset = Config.dataset
        if isinstance(anno, pandas.core.frame.DataFrame):
            self.paths = anno['ImageName'].tolist()
            self.labels = anno['label'].tolist()
        elif isinstance(anno, dict):
            self.paths = anno['img_name']
            self.labels = anno['label']

        self.cls_dict = get_sample_dict(self.paths, self.labels)
        if train:
            self.update_sample()

        if train_val:
            self.paths, self.labels = random_sample(self.paths, self.labels)
        self.unswap = unswap
        self.swap = swap
        self.totensor = totensor
        self.cfg = Config
        self.train = train
        self.swap_size = swap_size
        self.test = test
        #self.cutout = albu.Compose([albu.CoarseDropout(max_holes=7, max_height=20, max_width=20, p=1)])
        resize_reso = 256
        crop_reso = 224
        self.cutout = albu.Compose([albu.Resize(resize_reso, resize_reso), 
                               albu.Rotate(limit=15),
                               albu.RandomCrop(crop_reso, crop_reso),
                               albu.HorizontalFlip(p=0.5),
                               albu.CoarseDropout(max_holes=7, max_height=20, max_width=20, p=1), 
                               albu.Normalize(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225)),
                               ])
Beispiel #4
0
def get_train_transforms():
    return A.Compose(
        [
            A.RandomSizedCrop(
                min_max_height=(650, 1024), height=1024, width=1024, p=0.5),
            A.OneOf([
                A.HueSaturationValue(hue_shift_limit=0.68,
                                     sat_shift_limit=0.68,
                                     val_shift_limit=0.1,
                                     p=0.9),
                A.RandomGamma(p=0.9),
                A.RandomBrightnessContrast(
                    brightness_limit=0.1, contrast_limit=0.1, p=0.9),
            ],
                    p=0.9),
            # A.CLAHE(p=1.0),
            A.ToGray(p=0.01),
            A.HorizontalFlip(p=0.5),
            A.VerticalFlip(p=0.5),
            A.RandomRotate90(p=0.5),
            A.Resize(height=512, width=512, p=1),
            A.CoarseDropout(max_holes=20,
                            max_height=32,
                            max_width=32,
                            fill_value=0,
                            p=0.25),
            ToTensorV2(p=1.0),
        ],
        p=1.0,
        bbox_params=A.BboxParams(format='pascal_voc',
                                 min_area=0,
                                 min_visibility=0,
                                 label_fields=['labels']))
 def __init__(self,
              img_size=224,
              mean=(0.485, 0.456, 0.406),
              std=(0.229, 0.224, 0.225)):
     super(ImageTransform_7, self).__init__()
     self.transform = {
         'train':
         albu.Compose([
             albu.RandomResizedCrop(img_size, img_size),
             albu.ColorJitter(p=0.5),
             albu.HorizontalFlip(p=0.5),
             albu.VerticalFlip(p=0.5),
             albu.Transpose(p=0.5),
             albu.CLAHE(p=0.5),
             albu.MotionBlur(p=0.5),
             albu.Normalize(mean, std),
             albu.CoarseDropout(
                 max_height=15, max_width=15, max_holes=8, p=0.5),
             ToTensorV2(),
         ],
                      p=1.0),
         'val':
         albu.Compose([
             albu.Resize(img_size, img_size),
             albu.Normalize(mean, std),
             ToTensorV2(),
         ],
                      p=1.0)
     }
Beispiel #6
0
    def __init__(self, width, height):
        super().__init__()
        max_size = int(max(width, height) * 1.05)

        self.transforms = AL.Compose([
            AL.LongestMaxSize(max_size),
            AL.IAAPerspective(scale=(0.02, 0.04), keep_size=True, p=0.3),
            AL.OneOf([
                AL.HorizontalFlip(p=1),
                AL.VerticalFlip(p=1),
                AL.Transpose(p=1)
            ],
                     p=0.7),
            AL.RandomRotate90(p=0.5),
            AL.OneOf([
                AL.RandomBrightnessContrast(p=0.8),
                AL.RandomGamma(p=0.8),
                AL.HueSaturationValue(p=0.3)
            ]),
            Filling(max_size),
            AL.CoarseDropout(max_holes=12,
                             max_height=24,
                             max_width=24,
                             min_holes=4,
                             min_height=8,
                             min_width=8,
                             p=0.3),
            AL.OneOf([AL.Resize(height, width),
                      AL.RandomCrop(height, width)],
                     p=1),
        ])
Beispiel #7
0
def medium_3(image_size, p=1.0):
    cutout_crop = int(0.25 * image_size)
    return A.Compose(
        [
            # RandomCrop(input_size) / RandomResizedCrop (0.08, 1)
            A.HorizontalFlip(p=0.5),  # vflip
            A.VerticalFlip(p=0.5),  # hflip
            A.ShiftScaleRotate(
                shift_limit=0.0625, scale_limit=0.2, rotate_limit=45, p=0.3),
            A.OneOf([
                A.RandomFog(
                    fog_coef_lower=0.3, fog_coef_upper=1.0, alpha_coef=.1),
                A.ImageCompression(quality_lower=20, quality_upper=99),
            ],
                    p=0.3),
            A.RandomBrightnessContrast(brightness_limit=0.125,
                                       contrast_limit=0.2,
                                       p=0.5),  # contrast_limit=0.5
            A.HueSaturationValue(hue_shift_limit=5,
                                 sat_shift_limit=30,
                                 val_shift_limit=20,
                                 p=0.2),
            A.GaussNoise(var_limit=(1, 50), p=0.4),
            A.CoarseDropout(min_holes=1,
                            max_holes=2,
                            max_height=cutout_crop,
                            max_width=cutout_crop,
                            p=0.5),
        ],
        p=p)
Beispiel #8
0
def get_transforms(*, data_type):
    if data_type == "light_train":
        return Compose([
            Resize(CFG.size, CFG.size),
            Transpose(p=0.5),
            HorizontalFlip(p=0.5),
            VerticalFlip(p=0.5),
            ShiftScaleRotate(scale_limit=(0, 0), p=0.5),
            ToTensorV2(),
        ])

    if data_type == "train":
        return Compose([
            Resize(CFG.size, CFG.size),
            Transpose(p=0.5),
            HorizontalFlip(p=0.5),
            VerticalFlip(p=0.5),
            albumentations.OneOf([
                albumentations.ElasticTransform(
                    alpha=1, sigma=20, alpha_affine=10),
                albumentations.GridDistortion(num_steps=6, distort_limit=0.1),
                albumentations.OpticalDistortion(distort_limit=0.05,
                                                 shift_limit=0.05),
            ],
                                 p=0.2),
            albumentations.core.composition.PerChannel(albumentations.OneOf([
                albumentations.MotionBlur(p=.05),
                albumentations.MedianBlur(blur_limit=3, p=.05),
                albumentations.Blur(blur_limit=3, p=.05),
            ]),
                                                       p=1.0),
            albumentations.OneOf([
                albumentations.CoarseDropout(max_holes=16,
                                             max_height=CFG.size // 16,
                                             max_width=CFG.size // 16,
                                             fill_value=0,
                                             p=0.5),
                albumentations.GridDropout(ratio=0.09, p=0.5),
                albumentations.Cutout(num_holes=8,
                                      max_h_size=CFG.size // 16,
                                      max_w_size=CFG.size // 16,
                                      p=0.2),
            ],
                                 p=0.5),
            albumentations.ShiftScaleRotate(
                shift_limit=0.0625, scale_limit=0.2, rotate_limit=45, p=0.5),
            ToTensorV2(),
        ],
                       additional_targets={
                           'r': 'image',
                           'g': 'image',
                           'b': 'image',
                           'y': 'image',
                       })

    elif data_type == 'valid':
        return Compose([
            Resize(CFG.size, CFG.size),
            ToTensorV2(),
        ])
Beispiel #9
0
    def __init__(self,
                 scale_range: float = (0.35, 0.65),
                 input_size: int = (416, 416),
                 augmentation: bool = False) -> None:

        if augmentation:
            self.crop_func = RandomCropAndResize(scale_range, input_size)
            self.aug_func = alb.Compose([
                alb.OneOf([
                    alb.RGBShift(),
                    alb.ToGray(),
                    alb.NoOp(),
                ]),
                alb.RandomBrightnessContrast(),
                alb.OneOf([
                    alb.GaussNoise(),
                    alb.IAAAdditiveGaussianNoise(),
                    alb.CoarseDropout(fill_value=100),
                ])
            ])
        else:
            scale = (scale_range[0] + scale_range[1]) / 2.
            self.crop_func = CenterCropAndResize(scale, input_size)
            self.aug_func = None

        self.heatmap_stride = 4
        self.heatmap_size = (input_size[0] // self.heatmap_stride,
                             input_size[1] // self.heatmap_stride)
Beispiel #10
0
    def __init__(
        self, horizontal_flip_prob=0.0, vertical_flip_prob=0.0, gaussian_blur_prob=0.0,
        rotate_degree=0.0, cutout=0.0, cutout_height=0, cutout_width=0,
        mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5), train=True
    ):
        transforms_list = []

        if train:
            if horizontal_flip_prob > 0:  # Horizontal Flip
                transforms_list += [A.HorizontalFlip(p=horizontal_flip_prob)]
            if vertical_flip_prob > 0:  # Vertical Flip
                transforms_list += [A.VerticalFlip(p=vertical_flip_prob)]
            if gaussian_blur_prob > 0:  # Patch Gaussian Augmentation
                transforms_list += [A.GaussianBlur(p=gaussian_blur_prob)]
            if rotate_degree > 0:  # Rotate image
                transforms_list += [A.Rotate(limit=rotate_degree)]
            if cutout > 0:  # CutOut
                transforms_list += [A.CoarseDropout(
                    p=cutout, max_holes=1, fill_value=tuple([x * 255.0 for x in mean]),
                    max_height=cutout_height, max_width=cutout_width, min_height=1, min_width=1
                )]
        
        transforms_list += [
            A.Normalize(mean=mean, std=std, always_apply=True),
            ToTensor()
            Cutout()
        ]

        self.transform = A.Compose(transforms_list)
Beispiel #11
0
def apply_transforms(mean, std):
    train_transforms = A.Compose([
        A.HorizontalFlip(p=0.2),
        A.ShiftScaleRotate(shift_limit=0.1,
                           scale_limit=0.1,
                           rotate_limit=10,
                           p=0.2),
        A.CoarseDropout(
            max_holes=1,
            max_height=16,
            max_width=16,
            min_holes=1,
            min_height=16,
            min_width=16,
            fill_value=tuple((x * 255.0 for x in mean)),
            p=0.2,
        ),
        A.ToGray(p=0.15),
        A.Normalize(mean=mean, std=std, always_apply=True),
        ToTensorV2(),
    ])

    test_transforms = A.Compose([
        A.Normalize(mean=mean, std=std, always_apply=True),
        ToTensorV2(),
    ])

    return lambda img: train_transforms(image=np.array(img))[
        "image"], lambda img: test_transforms(image=np.array(img))["image"]


# if __name__ == "__main__":
#     pass
def albumentations_transforms(p=1.0, is_train=False):
    '''Applies image augmentations to image dataset 
    RandomCrop 32, 32 (after padding of 4) >> FlipLR >> Followed by CutOut(8, 8)
    
    Returns:
        list of transforms'''
    mean = (0.491, 0.482, 0.446)
    std = (0.2302, 0.2265, 0.2262)
    mean = np.mean(mean)
    train_transforms = [
        A.Normalize(mean=mean, std=std),
        A.PadIfNeeded(min_height=40,
                      min_width=40,
                      border_mode=4,
                      always_apply=True,
                      p=1.0),
        A.RandomCrop(32, 32, always_apply=True, p=1.0),
        A.HorizontalFlip(p=0.5),
        A.CoarseDropout(max_holes=1,
                        max_height=16,
                        max_width=16,
                        min_holes=1,
                        min_height=16,
                        min_width=16,
                        fill_value=(0.4914, 0.4822, 0.4465),
                        mask_fill_value=None),
        ToTensorV2()
    ]
    transforms_result = A.Compose(train_transforms)
    return lambda img: transforms_result(image=np.array(img))["image"]
def build_transforms(args, n_classes):
    train_transforms = A.Compose([
        A.HorizontalFlip(),
        A.RandomResizedCrop(width=args.image_size,
                            height=args.image_size,
                            scale=(0.7, 1.2)),
        A.CoarseDropout(max_height=int(args.image_size / 5),
                        max_width=int(args.image_size / 5)),
        A.OneOf([
            A.RandomBrightnessContrast(),
            A.RandomGamma(),
            A.Blur(blur_limit=5),
        ]),
        ToTensorV2(),
    ])
    train_transforms = AlbumentationsSegmentationWrapperTransform(
        train_transforms, class_num=n_classes, ignore_indices=[
            255,
        ])
    test_transforms = A.Compose([
        A.Resize(width=args.image_size, height=args.image_size),
        ToTensorV2(),
    ])
    test_transforms = AlbumentationsSegmentationWrapperTransform(
        test_transforms, class_num=n_classes, ignore_indices=[
            255,
        ])
    return train_transforms, test_transforms
Beispiel #14
0
def get_train_transform(mu, sigma):

    """
    Args:
        tensor (Tensor): Tensor image of size (C, H, W) to be normalized.
    Returns:
        Tensor: Normalized image.
    """
    train_transform = A.Compose([
                             A.HorizontalFlip(p=0.4),
                             A.ShiftScaleRotate(),
                             A.Normalize(mean=(mu), 
                                         std=(sigma)),
                             A.CoarseDropout(max_holes=1, 
                                             max_height=16, 
                                             max_width=16, 
                                             min_holes=1, 
                                             min_height=16,
                                             min_width=16,
                                             fill_value=(mu)),
                             A.ToGray(),
                             ToTensorV2(),
])

    return(train_transform)
def medium_augmentations():
    return A.Compose([
        A.HorizontalFlip(),
        A.ShiftScaleRotate(scale_limit=0.1,
                           rotate_limit=15,
                           border_mode=cv2.BORDER_CONSTANT),
        # Add occasion blur/sharpening
        A.OneOf([A.GaussianBlur(), A.IAASharpen(),
                 A.NoOp()]),
        # Spatial-preserving augmentations:
        A.OneOf([A.CoarseDropout(),
                 A.MaskDropout(max_objects=5),
                 A.NoOp()]),
        A.GaussNoise(),
        A.OneOf([
            A.RandomBrightnessContrast(),
            A.CLAHE(),
            A.HueSaturationValue(),
            A.RGBShift(),
            A.RandomGamma()
        ]),
        # Weather effects
        A.RandomFog(fog_coef_lower=0.01, fog_coef_upper=0.3, p=0.1),
        A.Normalize(),
    ])
    def get_train_transform(self):

        # fill values for cutout or cropping portion
        fill_value = [255. * mean for mean in self.means]
        rc_padding = 32
        rc_pval = 0.2
        randomCrop = [albumentations.PadIfNeeded(min_height=self.size+rc_padding, min_width=self.size+rc_padding, 
                                                  border_mode=cv2.BORDER_REPLICATE, value=fill_value, p=1.0),
                        
                      albumentations.OneOf([
                                albumentations.RandomCrop(height=self.size, width=self.size, p=rc_pval),
                                albumentations.CenterCrop(height=self.size, width=self.size, p=1-rc_pval),
                              ], p=1.0)
          ]

        train_tf = albumentations.Compose([
                    albumentations.Resize(self.size,self.size),
                    albumentations.RandomBrightness(limit=0.2, p=0.5),
                    albumentations.RandomContrast(limit=0.2, p=0.5),
                    albumentations.Rotate(limit=(-10,10), p=0.70),
                    randomCrop[0], randomCrop[1],
                    albumentations.HorizontalFlip(p=0.7),
                    albumentations.ElasticTransform(sigma=50, alpha=1, alpha_affine=10,p=0.10),
                    albumentations.CoarseDropout(max_holes=1, max_height=64, max_width=64, min_height=16, min_width=16, fill_value=fill_value, p=0.70),
                    albumentations.Normalize(mean=self.means, std=self.stds),
                    ToTensor()
        ])

        train_tf = AlbumCompose(train_tf)
        return train_tf
Beispiel #17
0
def da_policy_DA7b(crop_size):
    # additional_aug = [*
    #     zoom_crop(scale=(0.85, 1.15), do_rand=True),
    #     cutout(n_holes=(1, 2), length=(32, 84), p=.5),
    #     brightness(change=(0.33, 0.68), p=.5),
    #     contrast(scale=(0.7, 1.4), p=.5),
    # ]
    # return get_transforms(do_flip=False, max_warp=0.25, max_zoom=1.25, max_rotate=17, xtra_tfms=additional_aug)

    train_da = albumentations.Compose([
        albumentations.ShiftScaleRotate(scale_limit=0.15, rotate_limit=0),
        albumentations.Resize(crop_size, crop_size),
        albumentations.CoarseDropout(p=0.5,
                                     min_holes=1,
                                     max_holes=2,
                                     min_width=16,
                                     min_height=16,
                                     max_width=64,
                                     max_height=64),
        albumentations.RandomBrightnessContrast(p=0.5,
                                                brightness_limit=0.2,
                                                contrast_limit=0.25),
        albumentations.Rotate(p=0.5, limit=17)
    ])

    val_da = albumentations.Compose(
        [albumentations.Resize(crop_size, crop_size)])

    return train_da, val_da
Beispiel #18
0
def train_image_augmentation(image, img_size):
    image = np.array(image)

    augmentation = A.Compose(
        [
            A.Resize(img_size, img_size),
            A.CenterCrop(img_size, img_size, p=1.0),
            A.Transpose(p=0.5),
            A.HorizontalFlip(p=0.5),
            A.VerticalFlip(p=0.5),
            A.ShiftScaleRotate(p=0.5),
            A.Blur(blur_limit=3),
            A.OpticalDistortion(p=0.5),
            A.GridDistortion(p=0.5),
            A.HueSaturationValue(hue_shift_limit=0.2,
                                 sat_shift_limit=0.2,
                                 val_shift_limit=0.2,
                                 p=0.5),
            A.CoarseDropout(p=0.5),
            A.Cutout(p=0.5),
            A.Normalize(
                mean=[0.485, 0.456, 0.406],
                std=[0.229, 0.224, 0.225],
                max_pixel_value=255.0,
                p=1.0,
            )
        ],
        p=1.0,
    )

    augmented_image = augmentation(image=image)

    return augmented_image['image']
Beispiel #19
0
def get_train_transforms():
    # noinspection PyTypeChecker
    return A.Compose([
        A.RandomSizedCrop(
            min_max_height=(850, 850), height=1024, width=1024, p=0.3),
        A.OneOf([
            A.HueSaturationValue(hue_shift_limit=0.2,
                                 sat_shift_limit=0.2,
                                 val_shift_limit=0.2,
                                 p=0.8),
            A.RandomBrightnessContrast(
                brightness_limit=0.2, contrast_limit=0.2, p=0.9)
        ],
                p=0.5),
        A.OneOf([
            A.RandomRain(rain_type='drizzle', p=0.2),
            A.GaussianBlur(blur_limit=7, p=0.5),
            A.GaussNoise((0.2, 0.25), p=0.3),
            A.RandomShadow(p=0.2)
        ],
                p=0.4),
        A.ToGray(p=0.01),
        A.Flip(p=0.5),
        A.CoarseDropout(max_height=64,
                        max_width=64,
                        min_holes=3,
                        min_height=32,
                        min_width=32,
                        p=0.5),
        A.Resize(Config.Train.img_size, Config.Train.img_size, p=1.0),
        ToTensorV2(p=1.0),
    ],
                     bbox_params=BboxParams('pascal_voc',
                                            label_fields=['labels'],
                                            min_visibility=0.0))
Beispiel #20
0
 def __init__(self, batch_size, num_classes, data_dir, img_size=(256, 256)):
     super().__init__()
     self.data_dir = data_dir
     self.batch_size = batch_size
     self.train_transform = A.Compose(
         [
             A.RandomResizedCrop(img_size, img_size, p=1.0),
             A.Transpose(p=0.5),
             A.HorizontalFlip(p=0.5),
             A.VerticalFlip(p=0.5),
             A.ShiftScaleRotate(p=0.5),
             A.HueSaturationValue(hue_shift_limit=0.2,
                                  sat_shift_limit=0.2,
                                  val_shift_limit=0.2,
                                  p=0.5),
             A.RandomBrightnessContrast(brightness_limit=(-0.1, 0.1),
                                        contrast_limit=(-0.1, 0.1),
                                        p=0.5),
             A.Normalize(
                 mean=[0.485, 0.456, 0.406],
                 std=[0.229, 0.224, 0.225],
                 max_pixel_value=255.0,
                 p=1.0,
             ),
             A.CoarseDropout(p=0.5),
             A.Cutout(p=0.5),
             ToTensorV2(p=1.0),
         ],
         p=1.0,
     )
Beispiel #21
0
    def __init__(self):
        self.aug = A.Compose([
            A.OneOf([
                A.Blur(blur_limit=10, p=0.5),
                A.MedianBlur(blur_limit=5, p=0.5),
            ]),
            A.OneOf([
                A.CoarseDropout(max_holes=7,
                                min_holes=3,
                                min_height=3,
                                min_width=1,
                                max_height=16,
                                max_width=4,
                                fill_value=255,
                                p=0.5),
                A.CoarseDropout(max_holes=7,
                                min_holes=3,
                                min_height=3,
                                min_width=1,
                                max_height=16,
                                max_width=4,
                                fill_value=170,
                                p=0.5),
                A.CoarseDropout(max_holes=7,
                                min_holes=3,
                                min_height=3,
                                min_width=1,
                                max_height=16,
                                max_width=4,
                                fill_value=85,
                                p=0.5),
                A.CoarseDropout(max_holes=7,
                                min_holes=3,
                                min_height=3,
                                min_width=1,
                                max_height=16,
                                max_width=4,
                                fill_value=0,
                                p=0.5),
            ]),

            # A.RandomContrast(limit=0.05, p=0.75),
            # A.RandomBrightness(limit=0.05, p=0.75),
            # A.RandomBrightnessContrast(contrast_limit=0.05, brightness_limit=0.05, p=0.75),
        ])
Beispiel #22
0
 def _as_cutout_transform(self, value, p, image_shape):
     hole_size = self._calculate_hole_size(value, image_shape)
     return A.CoarseDropout(
         min_holes=self.num_holes,
         max_holes=self.num_holes,
         max_height=hole_size,
         max_width=hole_size,
         p=p,
     )
Beispiel #23
0
def get_train_augmentations(image_size: int = 224):
    return A.Compose([
        A.CoarseDropout(20),
        A.Rotate(30),
        A.RandomCrop(image_size, image_size, p=0.5),
        A.LongestMaxSize(image_size),
        A.PadIfNeeded(image_size, image_size, 0),
        A.Normalize(),
        ToTensor(),
    ])
Beispiel #24
0
 def build_train(self):
     return A.Compose([
         A.Flip(p=0.55),
         A.ShiftScaleRotate(scale_limit=0.5, rotate_limit=0, border_mode=0),
         A.Resize(self.height, self.width),
         A.RandomBrightness(),
         A.RandomContrast(),
         A.OneOf(
             [
                 A.CoarseDropout(max_holes=2, max_height=128,
                                 max_width=128),
                 A.CoarseDropout(max_holes=4, max_height=64, max_width=64),
             ],
             p=0.5,
         ),
         DistortionBase(),
         A.Normalize(self.MEANS, self.STDS),
         ToTensorV2(),
     ])
Beispiel #25
0
def hard_transforms():
    result = [
        albu.RandomRotate90(),
        albu.CoarseDropout(p=0.3),
        albu.RandomBrightnessContrast(brightness_limit=0.1,
                                      contrast_limit=0.1,
                                      p=0.3),
        albu.GridDistortion(p=0.3),
    ]
    return result
def get_transform(mode):
    transforms = []
    if mode == 'train':
        transforms.append(A.OneOf([
            A.Blur(blur_limit=3, p=1.0),
            A.MedianBlur(blur_limit=3, p=1.0)
        ], p=0.25))
        transforms.append(A.CoarseDropout(min_holes=3, min_height=4, min_width=4, max_height=12, max_width=12, p=0.25))

    transforms.append(ToTensorV2(p=1.0))
    return A.Compose(transforms=transforms)
Beispiel #27
0
    def __init__(self):
        self.trans=A.Compose(
                [
                A.HorizontalFlip(p=0.5),
                #A.Cutout(num_holes=2, max_h_size=4, max_w_size=4,fill_value=0.5*255, p=0.5),
                A.CoarseDropout(max_holes=1, max_height=16, max_width=16, min_height=4,
						min_width=4, fill_value=(np.array([0.4914, 0.4822, 0.4465]))*255.0, p=0.75),
                A.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
                ToTensor()
                
                ])
Beispiel #28
0
def hard_transforms():
    result = [
        # albu.RandomRotate90(),
        albu.CoarseDropout(),
        albu.RandomBrightnessContrast(
            brightness_limit=0.2, contrast_limit=0.2, p=0.3
        ),
        albu.GridDistortion(p=0.3, border_mode=0, value=255, mask_value=[255, 255, 255]),
    ]

    return result
Beispiel #29
0
def get_training_augmentation(grayscale=False,
                              height=320,
                              width=640,
                              crop_mode=0):

    mea = mean
    st = std
    if grayscale:
        mea = (mean[0] + mean[1] + mean[2]) / 3
        st = (std[0] + std[1] + std[2]) / 3

    if crop_mode == 0:
        train_transform = [
            albu.PadIfNeeded(height * 3 // 2, width * 3 // 2),
            albu.RandomCrop(height * 3 // 2, width * 3 // 2),
            albu.HorizontalFlip(p=0.5),
            albu.VerticalFlip(p=0.5),
            albu.CoarseDropout(p=0.1),
            albu.ShiftScaleRotate(scale_limit=0.4,
                                  rotate_limit=45,
                                  shift_limit=0.1,
                                  p=0.5,
                                  border_mode=0),
            albu.GridDistortion(p=0.3),
            albu.OpticalDistortion(p=0.3, distort_limit=2, shift_limit=0.5),
            albu.RGBShift(p=0.3),
            albu.Blur(p=0.3),
            albu.MotionBlur(p=0.3),
            albu.PadIfNeeded(height, width),
            albu.RandomCrop(height, width)
        ]
    else:
        train_transform = [
            albu.HorizontalFlip(p=0.5),
            albu.VerticalFlip(p=0.5),
            albu.ShiftScaleRotate(scale_limit=0.4,
                                  rotate_limit=45,
                                  shift_limit=0.1,
                                  p=0.5,
                                  border_mode=0),
            albu.GridDistortion(p=0.5),
            albu.OpticalDistortion(p=0.5, distort_limit=2, shift_limit=0.5),
            albu.RGBShift(p=0.5),
            albu.ToGray(p=0.5),
            albu.Resize(height, width)
        ]

    train_transform.extend([
        #Equalize(p=1.0, by_channels=False),
        albu.Normalize(mean=mea, std=st, p=1),
        ToTensor(),
    ])
    return albu.Compose(train_transform)
Beispiel #30
0
def create_train_transform(flip,
        noise,
        cutout,
        resize,
        size = 112,
        bright = True):
    
    translist = []
    
    if flip:
        translist+=[albumentations.OneOf([
                albumentations.Rotate(limit=30),
                albumentations.IAAPiecewiseAffine(),
                albumentations.ShiftScaleRotate(
                shift_limit=0.02,
                scale_limit=0.3,
                rotate_limit=10,
                ),
                albumentations.HorizontalFlip()],p=0.7)]

    if noise:
        translist+=[albumentations.OneOf([
            albumentations.MotionBlur(blur_limit=6),
            albumentations.MedianBlur(blur_limit=5),
            albumentations.OpticalDistortion(),
            albumentations.CLAHE(),
            albumentations.GaussNoise(var_limit=(5.0,20.0))], p=0.75)]

    if bright:
        translist+=[albumentations.OneOf([
          albumentations.RandomBrightness(limit=0.6),
          #albumentations.Sharpen(),
          albumentations.ColorJitter(),
          albumentations.RandomBrightnessContrast(brightness_limit=0.6, contrast_limit=0.6)],p=0.7)]

    if cutout:
        translist+=[albumentations.OneOf([
            albumentations.CoarseDropout(),
            albumentations.Cutout(max_h_size = int(size * np.random.rand(1)*0.5), max_w_size = int(size * np.random.rand(1)*0.5), num_holes=np.random.randint(1,3))
            ],p=0.75)]
            
    if resize:
        translist+=[albumentations.Resize(size+10, size+10, interpolation=2)]
        translist+=[albumentations.RandomCrop(size,size,always_apply=True)]

    #translist+=[albumentations.Normalize(mean=(0.2481, 0.2292, 0.2131), std = (0.2167,0.2071,0.2014))]
    #translist+=[albumentations.Normalize(mean=(0.2248, 0.2080, 0.1929), std = (0.2231, 0.2140, 0.2083))]
    #trainlist+=[albumentations.Normalize(mean=(0.2539, 0.2348, 0.2189), std = (0.2195,0.2110,0.2061))]
    #translist+=[albumentations.Normalize(mean=(0.2580, 0.2360, 0.2215), std = (0.2235, 0.2132, 0.2100))]
    translist+=[albumentations.Normalize(mean=(0.1977, 0.2115, 0.2275), std = (0.2177, 0.2227, 0.2317))]
    #translist+=[albumentations.Normalize(mean=(0.2527, 0.2343, 0.2177), std = (0.2171, 0.2082, 0.2026))]
    transform = albumentations.Compose(translist)
    return transform