Esempio n. 1
0
    def __init__(self, csv_path, data_path):
        self.data = pd.read_csv(csv_path)
        self.groups = {
            x: y.tolist()
            for x, y in self.data.groupby('sdocid').groups.items()
        }
        self.ids = list(self.groups.keys())
        self.ad_imgs_list = dict(
            zip(
                self.data.index.tolist(),
                self.data.cars_images.map(
                    lambda x: ast.literal_eval(x)).tolist()))

        self.path = data_path

        self.transform = transforms.ToTensor()
        self.class_dict = dict(
            zip(self.data['sdocid'].unique().tolist(),
                range(0, self.data['sdocid'].nunique())))

        self.aug = albu.Compose([
            albu.HorizontalFlip(),
            albu.RandomSizedCrop(
                min_max_height=(200, 215), height=224, width=224, p=0.2),
            albu.Cutout(num_holes=4, max_h_size=20, max_w_size=20, p=0.3)
        ],
                                p=1.)
Esempio n. 2
0
    def cutout_shift_transform(size: tuple,
                               num_holes: int = 4,
                               cut_ratio: float = 0.2,
                               shift: float = 0.25,
                               **kwargs):
        cut_h = int(size[0] * cut_ratio)
        cut_w = int(size[1] * cut_ratio)
        transform = [
            A.Resize(*size, interpolation=cv2.INTER_NEAREST),
            A.Cutout(num_holes=num_holes,
                     max_h_size=cut_h,
                     max_w_size=cut_w,
                     fill_value=0,
                     p=kwargs.get('cutout_p', 0.5)),
            A.ShiftScaleRotate(shift_limit=shift,
                               scale_limit=0,
                               rotate_limit=0,
                               interpolation=cv2.INTER_NEAREST,
                               border_mode=cv2.BORDER_CONSTANT,
                               value=0,
                               p=1.0),
            ToWBM(),
        ]

        return transform
Esempio n. 3
0
def cifar_alb11():
    '''Applies image augmentations to image dataset 
    RandomCrop 32, 32 (after padding of 4) >> FlipLR >> Followed by CutOut(8, 8)
    
    Returns:
        list of transforms'''
    mean = (0.491, 0.482, 0.446)
    mean = np.mean(mean)
    train_transforms = [
        A.Normalize(mean=mean, std=std),
        A.PadIfNeeded(min_height=40,
                      min_width=40,
                      border_mode=4,
                      always_apply=True,
                      p=1.0),
        A.RandomCrop(32, 32, always_apply=True, p=1.0),
        A.HorizontalFlip(p=0.5),
        A.Cutout(num_holes=1,
                 max_h_size=8,
                 max_w_size=8,
                 fill_value=mean,
                 always_apply=False,
                 p=1),
        ToTensor()
    ]
    transforms_result = A.Compose(train_transforms)
    return lambda img: transforms_result(image=np.array(img))["image"]
Esempio n. 4
0
    def on_epoch_start(self, trainer, pl_module):
        if pl_module.hparams.progressive:
            ind = int(trainer.current_epoch / 5) if trainer.current_epoch < 5*3 else 3
            prog = [512, 1024, 1536, 2048]
            #batch = [32, 16, 8, 4]
            batch = [64, 32, 16, 8]
            # For Progressive Resizing
            train_transform = A.Compose([
                        A.RandomResizedCrop(height=prog[ind], width=prog[ind], scale=(0.8, 1.0), ratio=(1, 1), interpolation=1, always_apply=False, p=1.0),
                        A.Flip(always_apply=False, p=0.5),
                        A.RandomGridShuffle(grid=(4, 4), always_apply=False, p=1.0),
                        A.HueSaturationValue(hue_shift_limit=10, sat_shift_limit=10, val_shift_limit=10, always_apply=False, p=0.5),
                        A.RandomBrightnessContrast(brightness_limit=0.2, contrast_limit=0.2, brightness_by_max=True, always_apply=False, p=0.5),
                        A.GaussNoise(var_limit=(10.0, 50.0), mean=0, always_apply=False, p=0.5),
                        #A.Rotate(limit=90, interpolation=1, border_mode=4, value=None, mask_value=None, always_apply=False, p=0.5),
                        A.ShiftScaleRotate(shift_limit=0.0625, scale_limit=0.0, rotate_limit=45, interpolation=1, border_mode=4, value=255, mask_value=None, always_apply=False, p=0.5),
                        A.Normalize(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225), max_pixel_value=255.0, always_apply=False, p=1.0),
                        A.Cutout(num_holes=8, max_h_size=8, max_w_size=8, fill_value=0, always_apply=False, p=0.5),
                        ])

            valid_transform = A.Compose([A.Resize(height=prog[ind], width=prog[ind], interpolation=1, always_apply=False, p=1.0),
                        A.Normalize(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225), max_pixel_value=255.0, always_apply=False, p=1.0)
                        ])

            pl_module.train_dataset = PANDADataset(pl_module.train_df, pl_module.hparams.data_dir, pl_module.hparams.image_format, transform=train_transform, tile=pl_module.hparams.tile, layer=pl_module.hparams.image_layer)
            pl_module.val_dataset = PANDADataset(pl_module.val_df, pl_module.hparams.data_dir, pl_module.hparams.image_format, transform=valid_transform, tile=pl_module.hparams.tile, layer=pl_module.hparams.image_layer)
            trainer.train_dataloader = DataLoader(pl_module.train_dataset, batch_size=batch[ind],
                                                shuffle=True, num_workers=4, drop_last=True)
            trainer.val_dataloaders = [DataLoader(pl_module.val_dataset, batch_size=batch[ind],
                                                shuffle=True, num_workers=4, drop_last=True)]

            trainer.num_training_batches = len(trainer.train_dataloader)#float('inf')
            trainer.num_val_batches = len(trainer.val_dataloaders[0])#float('inf')
            trainer.val_check_batch = trainer.num_training_batches
Esempio n. 5
0
    def __init__(self,Normalize_mean_std=None, H_F=None, Padding = None, R_Crop = None, Rotate=None, cutout=None):
      
        print("in the init of Albumentation" )

        self.transforms=[]
        
        if H_F is not None:
            self.transforms.append(A.HorizontalFlip(p=0.7))
            
        if Padding is not None:
            self.transforms.append(A.PadIfNeeded(min_height=70, min_width=70, border_mode=4, value=None, mask_value=None, 
                                                 always_apply=False, p=1.0))
        if R_Crop is not None:
            self.transforms.append(A.RandomCrop(64,64, always_apply = False, p = 1.0))
            
        if Rotate is not None:
            self.transforms.append(A.Rotate(limit=30, interpolation=1, border_mode=4, value=None, mask_value=None, always_apply=False, 
                                            p=0.5))
        if cutout is not None:
            self.transforms.append(A.Cutout(num_holes=1, max_h_size=32,max_w_size = 32,p=0.7))
            
        if Normalize_mean_std is not None:
            self.transforms.append(A.Normalize(Normalize_mean_std[0],Normalize_mean_std[1], always_apply=True))
            self.transforms.append(ToTensor())

        print("The transformations done are:{}".format(self.transforms))
        self.Transforms=A.Compose(self.transforms)
def get_transforms(image_size):

    transforms_train = albumentations.Compose([
        albumentations.Transpose(p=0.5),
        albumentations.VerticalFlip(p=0.5),
        albumentations.HorizontalFlip(p=0.5),
        albumentations.RandomBrightness(limit=0.2, p=0.75),
        albumentations.RandomContrast(limit=0.2, p=0.75),
        albumentations.OneOf([
            albumentations.MotionBlur(blur_limit=5),
            albumentations.MedianBlur(blur_limit=5),
            albumentations.GaussianBlur(blur_limit=5),
            albumentations.GaussNoise(var_limit=(5.0, 30.0)),
        ], p=0.7),

        albumentations.OneOf([
            albumentations.OpticalDistortion(distort_limit=1.0),
            albumentations.GridDistortion(num_steps=5, distort_limit=1.),
            albumentations.ElasticTransform(alpha=3),
        ], p=0.7),

        albumentations.CLAHE(clip_limit=4.0, p=0.7),
        albumentations.HueSaturationValue(hue_shift_limit=10, sat_shift_limit=20, val_shift_limit=10, p=0.5),
        albumentations.ShiftScaleRotate(shift_limit=0.1, scale_limit=0.1, rotate_limit=15, border_mode=0, p=0.85),
        albumentations.Resize(image_size, image_size),
        albumentations.Cutout(max_h_size=int(image_size * 0.375), max_w_size=int(image_size * 0.375), num_holes=1, p=0.7),
        albumentations.Normalize()
    ])

    transforms_val = albumentations.Compose([
        albumentations.Resize(image_size, image_size),
        albumentations.Normalize()
    ])

    return transforms_train, transforms_val
Esempio n. 7
0
def get_augmentations(p=0.5, image_size=224):
    imagenet_stats = {
        "mean": [0.485, 0.456, 0.406],
        "std": [0.229, 0.224, 0.225]
    }
    train_tfms = A.Compose([
        # A.Resize(image_size, image_size),
        A.RandomResizedCrop(image_size, image_size),
        A.ShiftScaleRotate(shift_limit=0.15,
                           scale_limit=0.4,
                           rotate_limit=45,
                           p=p),
        A.Cutout(p=p),
        A.RandomRotate90(p=p),
        A.Flip(p=p),
        A.OneOf(
            [
                A.RandomBrightnessContrast(
                    brightness_limit=0.2,
                    contrast_limit=0.2,
                ),
                A.HueSaturationValue(hue_shift_limit=20,
                                     sat_shift_limit=50,
                                     val_shift_limit=50),
            ],
            p=p,
        ),
        A.OneOf(
            [
                A.IAAAdditiveGaussianNoise(),
                A.GaussNoise(),
            ],
            p=p,
        ),
        A.CoarseDropout(max_holes=10, p=p),
        A.OneOf(
            [
                A.MotionBlur(p=0.2),
                A.MedianBlur(blur_limit=3, p=0.1),
                A.Blur(blur_limit=3, p=0.1),
            ],
            p=p,
        ),
        A.OneOf(
            [
                A.OpticalDistortion(p=0.3),
                A.GridDistortion(p=0.1),
                A.IAAPiecewiseAffine(p=0.3),
            ],
            p=p,
        ),
        ToTensor(normalize=imagenet_stats),
    ])

    valid_tfms = A.Compose([
        A.CenterCrop(image_size, image_size),
        ToTensor(normalize=imagenet_stats)
    ])

    return train_tfms, valid_tfms
Esempio n. 8
0
def get_augmentation(config):
    normalize = A.Normalize(mean=[0.485, 0.456, 0.406],
                            std=[0.229, 0.224, 0.225])
    to_tensor = ToTensorV2()
    transforms = {}
    train_transform = A.Compose([
        A.CenterCrop(p=1, height=config.TRAIN.HEIGHT,
                     width=config.TRAIN.WIDTH),
        A.Cutout(num_holes=4, max_h_size=10, max_w_size=10, p=0.3),
        A.OneOf([
            A.HueSaturationValue(hue_shift_limit=0.2,
                                 sat_shift_limit=0.2,
                                 val_shift_limit=0.2,
                                 p=0.9),
            A.RandomBrightnessContrast(
                brightness_limit=0.2, contrast_limit=0.2, p=0.9),
        ],
                p=0.5),
        A.HorizontalFlip(p=0.5),
        A.ISONoise(p=0.3), normalize, to_tensor
    ])
    transforms['train'] = train_transform
    valid_transform = A.Compose([
        A.CenterCrop(p=1, height=config.TRAIN.HEIGHT,
                     width=config.TRAIN.WIDTH), normalize, to_tensor
    ])
    transforms['valid'] = valid_transform

    return transforms
Esempio n. 9
0
 def __init__(self, batch_size, num_classes, data_dir, img_size=(256, 256)):
     super().__init__()
     self.data_dir = data_dir
     self.batch_size = batch_size
     self.train_transform = A.Compose(
         [
             A.RandomResizedCrop(img_size, img_size, p=1.0),
             A.Transpose(p=0.5),
             A.HorizontalFlip(p=0.5),
             A.VerticalFlip(p=0.5),
             A.ShiftScaleRotate(p=0.5),
             A.HueSaturationValue(hue_shift_limit=0.2,
                                  sat_shift_limit=0.2,
                                  val_shift_limit=0.2,
                                  p=0.5),
             A.RandomBrightnessContrast(brightness_limit=(-0.1, 0.1),
                                        contrast_limit=(-0.1, 0.1),
                                        p=0.5),
             A.Normalize(
                 mean=[0.485, 0.456, 0.406],
                 std=[0.229, 0.224, 0.225],
                 max_pixel_value=255.0,
                 p=1.0,
             ),
             A.CoarseDropout(p=0.5),
             A.Cutout(p=0.5),
             ToTensorV2(p=1.0),
         ],
         p=1.0,
     )
Esempio n. 10
0
def get_transforms(phase, crop_type=0, size=512):
    list_transforms = []
    if phase == "train":
        list_transforms.extend([
            aug.Flip(),
            aug.Cutout(num_holes=4, p=0.5),
            aug.OneOf([
                aug.RandomContrast(),
                aug.RandomGamma(),
                aug.RandomBrightness(),
            ],
                      p=1),
            aug.ShiftScaleRotate(rotate_limit=90),
            aug.OneOf([
                aug.GaussNoise(p=.35),
            ], p=.5),
        ])
        if crop_type == 0:
            list_transforms.extend([
                CropNonEmptyMaskIfExists(size, size),
            ])

        elif crop_type == 1:
            list_transforms.extend([
                RandomCrop(size, size, p=1.0),
            ])

    list_transforms.extend([
        Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225], p=1),
        ToTensor(),
    ])
    list_trfms = Compose(list_transforms)
    return list_trfms
Esempio n. 11
0
def get_transforms(image_size):

    transforms_train = albumentations.Compose([
        albumentations.HorizontalFlip(p=0.5),
        albumentations.ImageCompression(quality_lower=99, quality_upper=100),
        albumentations.ShiftScaleRotate(shift_limit=0.2,
                                        scale_limit=0.2,
                                        rotate_limit=10,
                                        border_mode=0,
                                        p=0.7),
        albumentations.Resize(image_size, image_size),
        albumentations.Cutout(
            max_h_size=int(image_size * 0.4),
            max_w_size=int(image_size * 0.4),
            num_holes=1,
            p=0.5,
        ),
        albumentations.Normalize(),
    ])

    transforms_val = albumentations.Compose([
        albumentations.Resize(image_size, image_size),
        albumentations.Normalize()
    ])

    return transforms_train, transforms_val
Esempio n. 12
0
def get_transforms(img_size=256, trans_type='train'):

    if trans_type == 'train':
        return A.Compose([
            A.HorizontalFlip(p=0.5),
            A.ShiftScaleRotate(shift_limit=0.2,
                               scale_limit=0.2,
                               rotate_limit=10,
                               border_mode=0,
                               p=0.7),
            A.Resize(img_size, img_size),
            A.Cutout(max_h_size=int(img_size * 0.4),
                     max_w_size=int(img_size * 0.4),
                     num_holes=1,
                     p=0.5),
            A.Normalize(),
            ToTensorV2(p=1.0),
        ])
    #      A.Compose([
    #     A.Resize(height=img_size, width=img_size, p=1),
    #    A.RandomSizedCrop(min_max_height=(int(img_size * 0.8), int(img_size * 0.8)), height=img_size, width=img_size, p=0.5),
    #   A.RandomRotate90(p=0.5),
    #  A.ShiftScaleRotate(shift_limit=0.0625, scale_limit=0.1, rotate_limit=15),
    # A.HorizontalFlip(p=0.5),
    # A.VerticalFlip(p=0.5),
    # A.Cutout(num_holes=8, max_h_size=64, max_w_size=64, fill_value=0, p=0.5),
    # A.Normalize(),
    # ToTensorV2(p=1.0),
    #], p=1.0)

    return A.Compose(
        [A.Resize(img_size, img_size),
         A.Normalize(),
         ToTensorV2(p=1.0)])
Esempio n. 13
0
def get_album_transforms(norm_mean, norm_std):
    """get the train and test transform by albumentations"""
    album_train_transform = A.Compose([
        A.PadIfNeeded(
            min_height=36,
            min_width=36,
            border_mode=cv2.BORDER_REFLECT,
            always_apply=True,
        ),
        A.RandomCrop(height=32, width=32, always_apply=True),
        A.HorizontalFlip(p=0.5),
        A.Cutout(1, 8, 8, .5, p=0.4),
        A.Normalize(
            mean=norm_mean,
            std=norm_std,
        ),
        AP.transforms.ToTensor()
    ])

    album_test_transform = A.Compose([
        A.Normalize(
            mean=norm_mean,
            std=norm_std,
        ),
        AP.transforms.ToTensor()
    ])
    return (album_train_transform, album_test_transform)
def get_train_transforms():
    return A.Compose(
        [
            A.RandomSizedCrop(
                min_max_height=(650, 1024), height=1024, width=1024, p=0.5),
            #A.OneOf([
            A.HueSaturationValue(
                hue_shift_limit=0.014,  #=0.2, 
                sat_shift_limit=0.68,  #0.2,
                val_shift_limit=0.36,  #0.2, 
                p=0.75),
            A.RandomBrightnessContrast(
                brightness_limit=0.1, contrast_limit=0.1, p=0.3),
            #],p=0.9),
            #A.ToGray(p=0.01),
            A.ShiftScaleRotate(scale_limit=(-0.5, 0.5),
                               rotate_limit=0,
                               shift_limit=0.,
                               p=0.5,
                               border_mode=0),
            #A.RandomRotate90(p=0.5),
            A.HorizontalFlip(p=0.5),
            A.VerticalFlip(p=0.5),
            A.Resize(height=1024, width=1024, p=1),
            A.Cutout(
                num_holes=8, max_h_size=64, max_w_size=64, fill_value=0,
                p=0.5),
            ToTensorV2(p=1.0),
        ],
        p=1.0,
        bbox_params=A.BboxParams(format='pascal_voc',
                                 min_area=0,
                                 min_visibility=0,
                                 label_fields=['labels']))
Esempio n. 15
0
def get_train_transforms():
    return A.Compose([
        A.HueSaturationValue(hue_shift_limit=0.4,
                             sat_shift_limit=0.4,
                             val_shift_limit=0.4,
                             p=0.5),
        A.OneOf([
            A.RandomGamma(p=0.5),
            A.RandomBrightnessContrast(
                brightness_limit=0.4, contrast_limit=0.85, p=0.5)
        ],
                p=0.5),
        A.Blur(p=0.5),
        A.ToGray(p=0.1),
        A.RandomRotate90(p=0.5),
        A.Resize(height=1024, width=1024, p=1),
        A.Cutout(
            num_holes=10, max_h_size=64, max_w_size=64, fill_value=0, p=0.5),
        ToTensorV2(p=1.0),
    ],
                     p=1.0,
                     bbox_params=A.BboxParams(format='pascal_voc',
                                              min_area=0,
                                              min_visibility=0,
                                              label_fields=['labels']))
Esempio n. 16
0
def get_augmentations_transform(crop_size=128, p=0.5, phase="train"):
    imagenet_stats = {'mean':[0.485, 0.456, 0.406], 'std':[0.229, 0.224, 0.225]}
    if phase == "train" or "test":
        aug_factor_list = [
            A.RandomResizedCrop(height=crop_size, width=crop_size, scale=(0.8, 1.0)),
            A.Cutout(num_holes=8, p=p),
            A.RandomRotate90(p=p),
            A.HorizontalFlip(p=p),
            A.VerticalFlip(p=p),
            A.HueSaturationValue(hue_shift_limit=20, sat_shift_limit=50, val_shift_limit=50),
            A.RandomBrightnessContrast(brightness_limit=0.2, contrast_limit=0.2),
            A.ShiftScaleRotate(shift_limit=0.0625, scale_limit=0.2, rotate_limit=45, p=p),
            A.OneOf([
                A.IAAAdditiveGaussianNoise(),
                A.GaussNoise(),
            ], p=p),
            A.OneOf([
                A.MotionBlur(p=0.2),
                A.MedianBlur(blur_limit=3, p=0.1),
                A.Blur(blur_limit=3, p=0.1),
            ], p=p),
            A.OneOf([
                A.OpticalDistortion(p=0.3),
                A.GridDistortion(p=0.1),
                A.IAAPiecewiseAffine(p=0.3),
            ], p=p),
            ToTensor(normalize=imagenet_stats)
        ]
        transformed_image = A.Compose(aug_factor_list)
        return transformed_image
    elif phase == "valid":
        transformed_image = A.Compose([ToTensor(normalize=imagenet_stats)])
        return transformed_image
    else:
        TypeError("Invalid phase type.")
Esempio n. 17
0
def get_train_transforms():
    return A.Compose([
        A.JpegCompression(p=0.5),
        A.Rotate(limit=80, p=1.0),
        A.OneOf([
            A.OpticalDistortion(),
            A.GridDistortion(),
            A.IAAPiecewiseAffine(),
        ]),
        A.RandomSizedCrop(min_max_height=(int(resolution * 0.7), input_res),
                          height=resolution,
                          width=resolution,
                          p=1.0),
        A.HorizontalFlip(p=0.5),
        A.VerticalFlip(p=0.5),
        A.GaussianBlur(p=0.3),
        A.OneOf([
            A.RandomBrightnessContrast(),
            A.HueSaturationValue(),
        ]),
        A.Cutout(num_holes=8,
                 max_h_size=resolution // 8,
                 max_w_size=resolution // 8,
                 fill_value=0,
                 p=0.3),
        A.Normalize(),
        ToTensorV2(),
    ],
                     p=1.0)
Esempio n. 18
0
def get_data_transform(path):
    mean, stdev = find_stats(path)
    input_size = 32
    train_albumentation_transform = A.Compose([
        A.Cutout(num_holes=2,
                 max_h_size=8,
                 max_w_size=8,
                 fill_value=[i * 255 for i in mean],
                 always_apply=True,
                 p=0.5),
        #A.RandomCrop(height=8,width=8,p=0.020,always_apply=False),
        A.HorizontalFlip(p=0.7, always_apply=True),
        #A.CoarseDropout(max_holes=1,max_height=16,max_width=16,min_holes=None,min_height=4,min_width=4,fill_value=[i*255 for i in mean],always_apply=True,p=0.7,),
        A.Normalize(mean=tuple(mean),
                    std=tuple(stdev),
                    max_pixel_value=255,
                    always_apply=True,
                    p=1.0),
        A.Resize(input_size, input_size),
        A.RGBShift(r_shift_limit=50, g_shift_limit=50, b_shift_limit=50,
                   p=0.5),
        ToTensor()
    ])

    # Test Phase transformation
    test_transforms = transforms.Compose([
        transforms.ToTensor(),
        transforms.Normalize(tuple(mean), tuple(stdev))
    ])
    train_transforms = AlbumCompose(train_albumentation_transform)
    #     test_transforms = AlbumCompose(test_transforms)
    return train_transforms, test_transforms
Esempio n. 19
0
def cifar10_albumentations(mean, std):

    train_transforms = A.Compose([
        #         A.OneOf([
        #             A.GridDistortion(distort_limit=(-0.3, 0.3), p=0.5),
        #             A.Rotate(limit=(-10, 10), p=0.5)
        #         ]),
        A.GridDistortion(distort_limit=(-0.3, 0.3), p=0.5),
        A.Rotate(limit=(-10, 10), p=0.5),
        A.HorizontalFlip(p=0.25),
        A.Cutout(num_holes=1, max_h_size=12, max_w_size=12),
        A.Normalize(
            mean=mean, std=std
        ),  # Here, the order of normalization and ToTensor() methods matters. Same goes for test_transforms 
        APT.ToTensor()
        #         APT.ToTensorV2()
    ])

    test_transforms = A.Compose([
        A.Normalize(mean=mean, std=std),
        APT.ToTensor()
        #         APT.ToTensorV2()
    ])

    return Albumentation_Transforms(
        train_transforms), Albumentation_Transforms(test_transforms)
Esempio n. 20
0
def get_training_augmentation():
    return A.Compose([
        A.RandomSizedCrop(min_max_height=(300, 360),
                          height=320,
                          width=320,
                          always_apply=True),
        A.HorizontalFlip(p=0.5),
        A.OneOf([
            A.CLAHE(),
            A.RandomBrightnessContrast(),
            A.RandomGamma(),
            A.HueSaturationValue(),
            A.NoOp()
        ]),
        A.OneOf([
            A.IAASharpen(),
            A.Blur(blur_limit=3),
            A.MotionBlur(blur_limit=3),
            A.NoOp()
        ]),
        A.OneOf([
            A.RandomFog(),
            A.RandomSunFlare(src_radius=100),
            A.RandomRain(),
            A.RandomSnow(),
            A.NoOp()
        ]),
        A.Cutout(),
        A.Normalize(),
    ])
Esempio n. 21
0
    def __init__(self, n, m):
        self.n = n
        self.m = m

        m_ratio = self.m / 30.0
        self.augment_list = (
            A.CLAHE(always_apply=True),
            A.Equalize(always_apply=True),
            A.InvertImg(always_apply=True),
            A.Rotate(limit=30 * m_ratio, always_apply=True),
            A.Posterize(num_bits=int(4 * m_ratio), always_apply=True),
            A.Solarize(threshold=m_ratio, always_apply=True),
            A.RGBShift(r_shift_limit=110 * m_ratio,
                       g_shift_limit=110 * m_ratio,
                       b_shift_limit=110 * m_ratio,
                       always_apply=True),
            A.HueSaturationValue(hue_shift_limit=20 * m_ratio,
                                 sat_shift_limit=30 * m_ratio,
                                 val_shift_limit=20 * m_ratio,
                                 always_apply=True),
            A.RandomContrast(limit=m_ratio, always_apply=True),
            A.RandomBrightness(limit=m_ratio, always_apply=True),
            #  A.Sharpen(always_apply=True), 0.1, 1.9),
            A.ShiftScaleRotate(shift_limit=0.3 * m_ratio,
                               shift_limit_y=0,
                               rotate_limit=0,
                               always_apply=True),
            A.ShiftScaleRotate(shift_limit=0.3 * m_ratio,
                               shift_limit_x=0,
                               rotate_limit=0,
                               always_apply=True),
            A.Cutout(num_holes=int(8 * m_ratio), always_apply=True),
            A.IAAAffine(shear=0.3 * m_ratio, always_apply=True))

        assert self.n <= len(self.augment_list)
Esempio n. 22
0
def create_train_transforms(args, size=224):
    if args.model == 'vit_small_patch16_224':
        return Compose([albumentations.Resize(224, 224)])
    else:
        translist = []
        if args.gaussnoise:
            #translist += [albumentations.GaussNoise(var_limit=(5.0, 30.0), p=0.4)]
            translist += [
                albumentations.OneOf(
                    [
                        albumentations.MotionBlur(blur_limit=5),
                        albumentations.MedianBlur(blur_limit=5),
                        #albumentations.GaussianBlur(blur_limit=5),
                        albumentations.GaussNoise(var_limit=(5.0, 30.0))
                    ],
                    p=args.noise_p)
            ]
        if args.clahe:
            translist += [albumentations.CLAHE(clip_limit=4.0, p=0.1)]
        if args.cutout:
            translist += [
                albumentations.Cutout(max_h_size=int(270 * 0.375),
                                      max_w_size=int(480 * 0.375),
                                      num_holes=1,
                                      p=0.8)
            ]

        transform = Compose(translist)
        return transform
Esempio n. 23
0
 def __init__(self):
     self.extra_transform = A.Compose([
         A.HorizontalFlip(p=0.5),
         A.RandomRotate90(p=1),
         A.Cutout(num_holes=8, max_h_size=20, max_w_size=20),
     ],
                                      p=1)
Esempio n. 24
0
def get_pseudo_transforms():
    return A.Compose([
        A.RandomSizedCrop(
            min_max_height=(800, 1024), height=1024, width=1024, p=0.5),
        A.OneOf([
            A.HueSaturationValue(hue_shift_limit=0.2,
                                 sat_shift_limit=0.2,
                                 val_shift_limit=0.2,
                                 p=0.9),
            A.RandomBrightnessContrast(
                brightness_limit=0.2, contrast_limit=0.2, p=0.9),
        ],
                p=0.9),
        A.ToGray(p=0.01),
        A.HorizontalFlip(p=0.5),
        A.VerticalFlip(p=0.5),
        A.RandomRotate90(p=0.5),
        A.Transpose(p=0.5),
        A.JpegCompression(quality_lower=85, quality_upper=95, p=0.2),
        A.OneOf(
            [A.Blur(blur_limit=3, p=1.0),
             A.MedianBlur(blur_limit=3, p=1.0)],
            p=0.1),
        A.Resize(height=SIZE, width=SIZE, p=1),
        A.Cutout(
            num_holes=8, max_h_size=64, max_w_size=64, fill_value=0, p=0.5),
        ToTensorV2(p=1.0),
    ],
                     p=1.0,
                     bbox_params=A.BboxParams(format='pascal_voc',
                                              min_area=0,
                                              min_visibility=0,
                                              label_fields=['labels']))
Esempio n. 25
0
def albumentations_transforms_old(p=1.0, is_train=False):
    # Mean and standard deviation of train dataset
    mean = np.array([0.4914, 0.4822, 0.4465])
    std = np.array([0.2023, 0.1994, 0.2010])
    #transforms_list = []
    # Use data aug only for train data
    if is_train:
        train_transforms = [
            A.Normalize(mean=mean, std=std),
            A.PadIfNeeded(min_height=40,
                          min_width=40,
                          border_mode=4,
                          always_apply=True,
                          p=1.0),
            A.RandomCrop(32, 32, always_apply=True, p=1.0),
            A.HorizontalFlip(p=0.5),
            A.Cutout(num_holes=1,
                     max_h_size=8,
                     max_w_size=8,
                     always_apply=False,
                     p=1),
            ToTensorV2()
        ]
    data_transforms = Compose(train_transforms, p=p)
    return lambda img: data_transforms(image=np.array(img))["image"]
Esempio n. 26
0
 def __init__(self, image_list, is_test=False):
     self.image_list = image_list
     self.is_test = is_test
     mean = np.array([0.4914, 0.4822, 0.4465])
     SD = np.array([0.2023, 0.1994, 0.2010])
     self.aug_train = A.Compose([
         A.PadIfNeeded(min_height=40,
                       min_width=40,
                       border_mode=BORDER_CONSTANT,
                       value=mean * 255.0,
                       p=1.0),
         A.RandomCrop(height=32, width=32, p=1.0),
         A.HorizontalFlip(p=0.5),
         #A.Rotate(limit=(-90, 90)),
         #A.VerticalFlip(p=0.5),
         A.Cutout(num_holes=1,
                  max_h_size=8,
                  max_w_size=8,
                  fill_value=mean * 255,
                  p=0.5),
         #A.Blur(blur_limit=16),
         A.Normalize(mean=mean, std=SD, max_pixel_value=255, p=1.0),
         ToTensor()
     ])
     self.aug_test = A.Compose([ToTensor()])
Esempio n. 27
0
def cifar_alb_trainData():
    '''Apply Albumentations data transforms to the dataset and returns iterable'''

    train_transform = [
        A.HorizontalFlip(p=0.15),
        A.ShiftScaleRotate(shift_limit=0.05,
                           scale_limit=0.05,
                           rotate_limit=15,
                           p=0.25),
        A.RGBShift(r_shift_limit=15, g_shift_limit=15, b_shift_limit=15,
                   p=0.5),
        A.RandomBrightnessContrast(p=0.25),
        A.RandomGamma(p=0.25),
        A.CLAHE(p=0.25),
        A.ChannelShuffle(p=0.1),
        A.ElasticTransform(p=0.1),
        A.MotionBlur(blur_limit=17, p=0.1),
        A.Cutout(num_holes=1,
                 max_h_size=16,
                 max_w_size=16,
                 fill_value=mean,
                 always_apply=False,
                 p=0.5),
        A.Normalize(mean=mean, std=std),
        ToTensor()
    ]

    transforms_result = A.Compose(train_transform)
    return lambda img: transforms_result(image=np.array(img))["image"]
Esempio n. 28
0
def get_train_transform():
    return A.Compose([
        A.RandomSizedCrop(
            min_max_height=(800, 800), height=1024, width=1024, p=0.5),
        A.OneOf([
            A.HueSaturationValue(hue_shift_limit=0.2,
                                 sat_shift_limit=0.2,
                                 val_shift_limit=0.2,
                                 p=0.9),
            A.RandomBrightnessContrast(
                brightness_limit=0.2, contrast_limit=0.2, p=0.9),
        ],
                p=0.9),
        A.ToGray(p=0.01),
        A.HorizontalFlip(p=0.5),
        A.VerticalFlip(p=0.5),
        A.Resize(height=our_image_size, width=our_image_size, p=1),
        A.Cutout(num_holes=8,
                 max_h_size=our_image_size // 8,
                 max_w_size=our_image_size // 8,
                 fill_value=0,
                 p=0.5)
    ],
                     bbox_params={
                         'format': 'pascal_voc',
                         'label_fields': ['labels']
                     })
Esempio n. 29
0
def get_train_transforms():
    return A.Compose([
        A.RandomSizedCrop(
            min_max_height=(800, 800), height=1024, width=1024, p=0.5),
        A.OneOf([
            A.HueSaturationValue(hue_shift_limit=0.2,
                                 sat_shift_limit=0.2,
                                 val_shift_limit=0.2,
                                 p=0.9),
            A.RandomBrightnessContrast(
                brightness_limit=0.2, contrast_limit=0.2, p=0.9),
        ],
                p=0.9),
        A.ToGray(p=0.01),
        A.HorizontalFlip(p=0.5),
        A.VerticalFlip(p=0.5),
        A.Resize(height=512, width=512, p=1),
        A.Cutout(
            num_holes=8, max_h_size=64, max_w_size=64, fill_value=0, p=0.5),
        ToTensorV2(p=1.0),
    ],
                     p=1.0,
                     bbox_params=A.BboxParams(format='pascal_voc',
                                              min_area=0,
                                              min_visibility=0,
                                              label_fields=['labels']))
Esempio n. 30
0
def get_album_transforms(norm_mean, norm_std):
    '''
    get the train and test transform by albumentations
    '''
    train_transform = A.Compose([
        A.PadIfNeeded(min_height=36,
                      min_width=36,
                      border_mode=4,
                      value=[0, 0, 0],
                      always_apply=True),
        A.RandomResizedCrop(height=32, width=32, always_apply=True),
        A.Flip(0.5),
        A.Cutout(num_holes=1,
                 max_h_size=8,
                 max_w_size=8,
                 fill_value=0,
                 always_apply=False,
                 p=0.5),
        A.Normalize(mean=norm_mean, std=norm_std),
        ToTensorV2()
    ])
    test_transform = A.Compose(
        [A.Normalize(mean=norm_mean, std=norm_std),
         ToTensorV2()])
    return train_transform, test_transform