Ejemplo n.º 1
0
def strong_aug(p=.5):
    return Compose([
        HorizontalFlip(),
        OneOf([
            IAAAdditiveGaussianNoise(),
            GaussNoise(),
        ], p=0.4),
        OneOf([
            MotionBlur(p=.2),
            MedianBlur(blur_limit=3, p=.1),
            Blur(blur_limit=3, p=.1),
        ],
              p=0.3),
        OneOf([
            OpticalDistortion(p=0.3),
            GridDistortion(p=.1),
            IAAPiecewiseAffine(p=0.3),
        ],
              p=0.2),
        OneOf([
            CLAHE(clip_limit=2),
            IAASharpen(),
            RandomContrast(),
            RandomBrightness(),
        ],
              p=0.3),
        HueSaturationValue(p=0.3),
        ChannelShuffle(),
        Cutout(num_holes=20, max_h_size=16, max_w_size=16)
    ],
                   p=p)
Ejemplo n.º 2
0
def strong_aug(p=0.5):
    return Compose([
        OneOf([
            CoarseDropout(p=0.5),
            Cutout(p=0.5),
        ], p=0.3),
        OneOf([
            IAAAdditiveGaussianNoise(),
            GaussNoise(),
        ], p=0.2),
        ShiftScaleRotate(
            shift_limit=0.0625, scale_limit=0.2, rotate_limit=45, p=0.2),
        OneOf([
            MotionBlur(p=0.2),
            MedianBlur(blur_limit=3, p=0.1),
            Blur(blur_limit=3, p=0.1),
        ],
              p=0.2),
        OneOf([
            OpticalDistortion(p=0.3),
            GridDistortion(p=0.1),
            IAAPiecewiseAffine(p=0.3),
        ],
              p=0.2),
        OneOf([
            IAASharpen(),
            IAAEmboss(),
            RandomBrightnessContrast(),
        ], p=0.2)
    ],
                   p=p)
    def prepare_data(self):
        if self.data_prepared:
            return True

        transformation_list = [
            ShiftScaleRotate(shift_limit=0.1,
                             scale_limit=0.1,
                             rotate_limit=0.5),
            GridDistortion(),
            Cutout()
        ]
        albumentations_transform = Compose(transformation_list)

        # Dataset parameters
        data_param = {
            'mode': self.hparams.output_mode,
            'transform': albumentations_transform,
            'metadata': self.hparams.metadata,
            'one_hot_time': self.hparams.one_hot_time,
            'consensus_threshold': self.hparams.consensus_threshold,
            'cleaning_strat': self.hparams.cleaning_strat,
            'relabeled_name': self.hparams.relabeled_name,
        }

        # Creating dataset
        self.dataset = SONYCUST_TALNet(self.hparams.path_to_SONYCUST,
                                       **data_param)
        self.train_dataset, self.val_dataset, self.test_dataset = self.dataset.train_validation_test_split(
        )
        self.data_prepared = True
Ejemplo n.º 4
0
 def __init__(self):
     self.albumentations_transform = Compose({
         Cutout(max_h_size=12, max_w_size=12, num_holes=1, p=0.5),
         HorizontalFlip(p=0.5),
         VerticalFlip(p=0.5),
         Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
     })
Ejemplo n.º 5
0
 def __init__(self, df, image_idx, mode='train'):
     self.imglist = df['image_path'].values
     self.labellist = df['label'].values
     self.index = image_idx
     self.mode = mode
     self.train_transformation = Compose([
         # RandomRotate90(),
         GridDistortion(p=0.6),
         HorizontalFlip(p=0.6),
         ElasticTransform(alpha=1, sigma=25, alpha_affine=50, p=0.75),
         OneOf([
             IAAAdditiveGaussianNoise(),
             GaussNoise(),
         ], p=0.5),
         Cutout(num_holes=30,
                max_h_size=9,
                max_w_size=11,
                fill_value=128,
                p=0.75),
         ShiftScaleRotate(shift_limit=0.0625,
                          scale_limit=0.15,
                          rotate_limit=15,
                          p=0.75),
         # Normalize(),
         # ToTensor(),
     ])
     self.valid_transformation = Compose([
         # Normalize(),
         # ToTensor(),
     ])
Ejemplo n.º 6
0
 def strong_aug(self):
     color_r = random.randint(0, 256)
     color_g = random.randint(0, 256)
     color_b = random.randint(0, 256)
     num_holes = random.randint(1, 2)
     if num_holes == 2:
         max_h_size = random.randint(15, 30)
         max_w_size = random.randint(15, 30)
     else:
         max_h_size = random.randint(30, 60)
         max_w_size = random.randint(30, 60)
     return Compose([
         OneOf([
             OneOf([
                 MultiplicativeNoise(multiplier=[0.5, 1.5], elementwise=True, per_channel=True, p=0.2),
                 IAAAdditiveGaussianNoise(),
                 GaussNoise()]),
             OneOf([
                 InvertImg(),
                 ToSepia()]),
             OneOf([
                 ChannelDropout(channel_drop_range=(1, 1), fill_value=0),
                 ChannelShuffle()]),
             HueSaturationValue(hue_shift_limit=20, sat_shift_limit=30, val_shift_limit=20, p=0.1)],
             p=0.25),
         Cutout(num_holes=num_holes, max_h_size=max_h_size, max_w_size=max_w_size,
                fill_value=[color_r, color_g, color_b], p=0.9),
     ])
Ejemplo n.º 7
0
def get_transforms(phase):
    """
        This function returns the transformation list.
        These are some commonly used augmentation techniques that 
        I believed would be useful.
    """
    list_transforms = []
    if phase == "train":
        list_transforms.extend([
            HorizontalFlip(p=0.5),
            VerticalFlip(p=0.5),
            Cutout(num_holes=4, p=0.5),
            ShiftScaleRotate(p=1, border_mode=cv2.BORDER_CONSTANT),
            #             OneOf([
            #             ElasticTransform(p=0.1, alpha=1, sigma=50, alpha_affine=50,border_mode=cv2.BORDER_CONSTANT),
            #             GridDistortion(distort_limit =0.05 ,border_mode=cv2.BORDER_CONSTANT, p=0.1),
            #             OpticalDistortion(p=0.1, distort_limit= 0.05, shift_limit=0.2,border_mode=cv2.BORDER_CONSTANT)
            #             ], p=0.3),
            #              OneOf([
            #             Blur(blur_limit=7)
            #             ], p=0.4),
            #             RandomGamma(p=0.8)
        ])
    list_transforms.extend([
        #             RandomResizedCrop(height = 224, width = 224, p = 1),
        #             Normalize(mean=[0.485,0.456,0.406], std=[0.229,0.224,0.225], p=1),
        ToTensor(),
    ])
    list_trfms = Compose(list_transforms)
    return list_trfms
Ejemplo n.º 8
0
def get_transforms1(*, data, CFG):
    if data == 'train':
        return Compose([
            #RandomCrop(512,512,p=1),
            HorizontalFlip(p=CFG.augmentation.augmix_p),
            VerticalFlip(p=CFG.augmentation.augmix_p),
            RandomContrast(p=CFG.augmentation.contrast_p),
            #GaussNoise(p=0.5),
            RandomRotate90(p=CFG.augmentation.rotate_90_p),
            #RandomGamma(p=0.5),
            RandomBrightnessContrast(p=CFG.augmentation.bright_contrast_p),
            RandomAugMix(severity=CFG.augmentation.augmix_s,
                         width=3,
                         alpha=1.,
                         p=CFG.augmentation.augmix_p),
            #GaussianBlur(p=0.5),
            GridMask(num_grid=CFG.augmentation.grdimask_n,
                     p=CFG.augmentation.grdimask_p),
            Cutout(p=CFG.augmentation.cutout_p,
                   max_h_size=CFG.augmentation.cutout_h,
                   max_w_size=CFG.augmentation.cutout_w),
            Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
        ])
    elif data == 'valid':
        return Compose([
            Normalize(
                mean=[0.485, 0.456, 0.406],
                std=[0.229, 0.224, 0.225],
            )
        ])
Ejemplo n.º 9
0
def strong_aug(p=1.0):
    return Compose(
        [
            Cutout(num_holes=1,
                   max_h_size=16,
                   max_w_size=16,
                   fill_value=[0.4914 * 255, 0.4822 * 255, 0.4465 * 255],
                   p=1.),
            HorizontalFlip(p=0.5),
            # RandomGamma(p=0.5),
            # ElasticTransform(value=10),
            ShiftScaleRotate(
                shift_limit=0.0625, scale_limit=0.2, rotate_limit=20, p=1.),

            # Below are Required
            OneOf([
                CLAHE(clip_limit=2),
                IAASharpen(),
                IAAEmboss(),
                RandomContrast(),
                RandomBrightness(),
            ],
                  p=0.3),
            # HueSaturationValue(p=0.3),
            ToTensor(normalize={
                'mean': (0.4914, 0.4822, 0.4465),
                'std': (0.2023, 0.1994, 0.2010)
            })
        ],
        p=p)
Ejemplo n.º 10
0
    def policy_space(self, trial):

        augments = list()

        # Shiftscalerotate
        shift_limit = trial.suggest_uniform(f"shift_limit", 0, 0.25)
        scale_limit = trial.suggest_uniform(f"scale_limit", 0, 0.25)
        rotate_limit = trial.suggest_int(f"rotate_limit", 0, 45)
        p = trial.suggest_uniform(f"shiftscalerotate_prob", 0, 1)
        augments.append(
            ShiftScaleRotate(shift_limit=shift_limit,
                             scale_limit=scale_limit,
                             rotate_limit=rotate_limit,
                             p=p))

        # GridDistortion
        num_steps = trial.suggest_int("num_steps", 1, 7)
        distort_limit = trial.suggest_uniform("distort_limit", 0, 0.5)
        p = trial.suggest_uniform(f"griddistortion_prob", 0, 1)
        augments.append(
            GridDistortion(num_steps=num_steps,
                           distort_limit=distort_limit,
                           p=p))

        # Cutout
        n_hole = trial.suggest_int("num_holes", 1, 16)
        h_size = trial.suggest_int("max_h_size", 1, 20)
        w_size = trial.suggest_int("max_w_size", 1, 20)
        p = trial.suggest_uniform(f"cutout_prob", 0, 1)
        augments.append(
            Cutout(num_holes=n_hole, max_h_size=h_size, max_w_size=w_size,
                   p=p))

        return Compose(augments)
Ejemplo n.º 11
0
    def __init__(self):
        self.mean = np.array([0.4914, 0.4822, 0.4465])
        self.std = np.array([0.2023, 0.1994, 0.2010])

        self.transforms_elist = [
            PadIfNeeded(min_height=36, min_width=36, value=self.mean * 255.0),
            RandomCrop(height=32, width=32, p=1.0),
            HorizontalFlip(p=0.5),
            # RandomBrightnessContrast(),
            # Rotate(limit=7),
            Cutout(num_holes=1,
                   max_h_size=8,
                   max_w_size=8,
                   fill_value=self.mean * 255.0,
                   p=0.5),
        ]

        self.transforms_test = [
            Resize(32, 32),
        ]

        self.transforms_main = [
            Normalize(mean=self.mean,
                      std=self.std,
                      max_pixel_value=255.0,
                      p=1.0),
            ToTensor(),
        ]
def train_transform(upside_down=False):
    return Compose(
        [
            Resize(202, 202, interpolation=cv2.INTER_NEAREST),
            PadIfNeeded(min_height=SIZE,
                        min_width=SIZE,
                        border_mode=cv2.BORDER_REPLICATE),
            VerticalFlip(p=int(upside_down)),
            HorizontalFlip(p=0.5),
            Cutout(p=0.5, num_holes=5, max_h_size=5, max_w_size=5),
            OneOf([
                Blur(),
                IAAAdditiveGaussianNoise(),
            ], p=0.1),
            ElasticTransform(
                p=0.25,
                alpha=1,
                sigma=30,  # TODO
                alpha_affine=30),  # TODO
            ShiftScaleRotate(
                p=0.25,
                rotate_limit=.15,  # TODO
                shift_limit=.15,  # TODO
                scale_limit=.15,  # TODO
                interpolation=cv2.INTER_CUBIC,
                #border_mode=cv2.BORDER_REFLECT_101),
                border_mode=cv2.BORDER_REPLICATE),
            Normalize(),
        ],
        p=1)
def get_transforms(*, data):
    
    if data == 'train':
        return Compose([
            #Resize(CFG.size, CFG.size),
            RandomResizedCrop(CFG.size, CFG.size, scale=(0.85, 1.0)),
            HorizontalFlip(p=0.5),
            RandomBrightnessContrast(p=0.2, brightness_limit=(-0.2, 0.2), contrast_limit=(-0.2, 0.2)),
            HueSaturationValue(p=0.2, hue_shift_limit=0.2, sat_shift_limit=0.2, val_shift_limit=0.2),
            ShiftScaleRotate(p=0.2, shift_limit=0.0625, scale_limit=0.2, rotate_limit=20),
            CoarseDropout(p=0.2),
            Cutout(p=0.2, max_h_size=16, max_w_size=16, fill_value=(0., 0., 0.), num_holes=16),
            Normalize(
                mean=[0.485, 0.456, 0.406],
                std=[0.229, 0.224, 0.225],
            ),
            ToTensorV2(),
        ])
    
    elif data == 'valid':
        return Compose([
            Resize(CFG.size, CFG.size),
            Normalize(
                mean=[0.485, 0.456, 0.406],
                std=[0.229, 0.224, 0.225],
            ),
            ToTensorV2(),

        ])
def hard_transform(image_size=224, p=0.5):
    transforms = [
        Cutout(
            num_holes=4,
            max_w_size=image_size // 4,
            max_h_size=image_size // 4,
            p=p
        ),
        ShiftScaleRotate(
            shift_limit=0.1,
            scale_limit=0.1,
            rotate_limit=15,
            border_mode=cv2.BORDER_REFLECT,
            p=p
        ),
        IAAPerspective(scale=(0.02, 0.05), p=p),
        OneOf(
            [
                HueSaturationValue(p=p),
                ToGray(p=p),
                RGBShift(p=p),
                ChannelShuffle(p=p),
            ]
        ),
        RandomBrightnessContrast(
            brightness_limit=0.5, contrast_limit=0.5, p=p
        ),
        RandomGamma(p=p),
        CLAHE(p=p),
        JpegCompression(quality_lower=50, p=p),
    ]
    transforms = Compose(transforms)
    return transforms
Ejemplo n.º 15
0
def get_transforms(*, data):

    if data == 'train':
        return Compose([
            RandomResizedCrop(CFG.size, CFG.size),
            Transpose(p=0.5),
            HorizontalFlip(p=0.5),
            VerticalFlip(p=0.5),
            ShiftScaleRotate(p=0.5),
            HueSaturationValue(hue_shift_limit=0.2, sat_shift_limit=0.2, val_shift_limit=0.2, p=0.5),
            RandomBrightnessContrast(brightness_limit=(-0.1,0.1), contrast_limit=(-0.1, 0.1), p=0.5),
            CoarseDropout(p=0.5),
            Cutout(p=0.5),
            Normalize(
                mean=[0.485, 0.456, 0.406],
                std=[0.229, 0.224, 0.225],
            ),
            ToTensorV2(),
        ])

    elif data == 'valid':
        return Compose([
            Resize(CFG.size, CFG.size),
            Normalize(
                mean=[0.485, 0.456, 0.406],
                std=[0.229, 0.224, 0.225],
            ),
            ToTensorV2(),
        ])
Ejemplo n.º 16
0
    def gettraintransforms(self, mean, std, p=1):
        # Train Phase transformations

        albumentations_transform = Compose([
            RandomRotate90(),
            Flip(),
            GaussNoise(p=0.6, mean=mean),
            OneOf([
                MotionBlur(p=0.2),
                MedianBlur(blur_limit=3, p=0.1),
                Blur(blur_limit=3, p=0.1),
            ],
                  p=0.2),
            ShiftScaleRotate(
                shift_limit=0.0625, scale_limit=0.2, rotate_limit=45, p=0.3),
            OneOf([
                OpticalDistortion(p=0.4),
                GridDistortion(p=0.2),
            ], p=0.3),
            HueSaturationValue(hue_shift_limit=20,
                               sat_shift_limit=0.1,
                               val_shift_limit=0.1,
                               p=0.3),
            Cutout(always_apply=True,
                   num_holes=2,
                   max_h_size=10,
                   max_w_size=10,
                   fill_value=(255 * .6)),
            Normalize(mean=mean, std=std, always_apply=True),
            pytorch.ToTensorV2(always_apply=True),
        ],
                                           p=p)

        return albumentations_transform
Ejemplo n.º 17
0
def transform_v2(config):
    train_transforms = Compose([
        HorizontalFlip(p=0.5),
        ImageCompression(quality_lower=99, quality_upper=100),
        ShiftScaleRotate(shift_limit=0.25,
                         scale_limit=0.25,
                         rotate_limit=10,
                         border_mode=0,
                         p=0.7),
        Resize(config.image_size, config.image_size),
        Cutout(max_h_size=int(config.image_size * 0.6),
               max_w_size=int(config.image_size * 0.6),
               num_holes=1,
               p=0.5),
        Normalize(),
        ToTensor()
    ])

    test_transforms = Compose([
        Resize(config.image_size, config.image_size),
        Normalize(),
        ToTensor()
    ])

    return train_transforms, test_transforms
Ejemplo n.º 18
0
 def __init__(self,flag):
     self.traintransform = Compose([HorizontalFlip(),
                                    Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
                                    Cutout(num_holes=1, max_h_size=4, max_w_size=4, p =0.3),
                                    ToTensor()])
     self.testtransform = Compose([Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
                                   ToTensor()])
     self.flag = flag
Ejemplo n.º 19
0
 def __init__(self):
     self.albTrainTransforms = Compose([  # Resize(256, 256),
         Rotate((-10.0, 10.0)),
         HorizontalFlip(p=0.5),
         VerticalFlip(p=0.5),
         Cutout(num_holes=8, max_h_size=8, max_w_size=8),
         Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]),
         ToTensor()
     ])  # this is train transforms
Ejemplo n.º 20
0
 def __init__(self):
     self.alb_transform = Compose([
         Rotate((-30.0, 30.0)),
         HorizontalFlip(),
         RGBShift(r_shift_limit=50, g_shift_limit=50, b_shift_limit=50, p=0.5),
         Cutout(num_holes=8, max_h_size=8, max_w_size=8, fill_value=[0.4914*255, 0.4822*255, 0.4465*255], always_apply=False, p=0.7),
         Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
         ToTensor()
     ])
Ejemplo n.º 21
0
 def __init__(self):
     self.albumentation_transforms = Compose([
         PadIfNeeded(min_height=36, min_width=36),
         Cutout(num_holes=4),
         # RandomSizedCrop':{'height':32,'width':32,'min_max_height':[28,28]},
         RandomCrop(32,32),
         HorizontalFlip(),
         Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)), 
         ToTensor()])
Ejemplo n.º 22
0
 def __init__(self):
     self.albumentations_transform = Compose({
         PadIfNeeded(min_height=40,min_width=40),
         RandomCrop(32,32),
         Flip(),
         VerticalFlip(p=0.5),
         Cutout(max_h_size=8,max_w_size=8,num_holes=1),
         Normalize((0.49139968, 0.48215841, 0.44653091), (0.24703223, 0.24348513, 0.26158784))
     })
 def strong_aug(p=1.0):
     return Compose([
         PadIfNeeded(min_height=36, min_width=36, p=1),
         RandomCrop(height=32, width=32, p=1),
         HorizontalFlip(p=0.5),
         Cutout(
             num_holes=1, max_h_size=8, max_w_size=8, fill_value=127,
             p=0.4),
     ],
                    p=p)
Ejemplo n.º 24
0
 def __init__(self, p=0.5):
     self.aug = Compose([
         RandomBrightnessContrast(),
         RandomGamma(),
         CLAHE(),
         OneOf([
             GaussNoise(),
             Cutout(num_holes=10, max_h_size=5, max_w_size=5)
         ])
     ])
Ejemplo n.º 25
0
 def strong_aug(self,p=.5):
     return Compose([
         Transpose(),
         ShiftScaleRotate(shift_limit=0.0625, scale_limit=0.2, rotate_limit=45, p=0.2),
         HueSaturationValue(p=0.3),
         HorizontalFlip(always_apply=False, p=0.5),
         Cutout(num_holes=1, max_h_size=8, max_w_size=8, fill_value=0.5*255)
 #        Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
 #        ToTensor()
       ], p=p)
 def __init__(self, image_size=(126, 32)):
     self.image_size = image_size
     self.background_blend = BackgroundBlend(
         "/home/agarcia/repos/mafat-radar-challenge/mafat_radar_challenge/data/mafat_background_v1_spectrogram.npy",
         alpha=0.8,
         p=0.2,
     )
     self.gaussian_filter = GaussianFilter(kernel_size=(20, 1))
     self.rolling_x = RollingX(shift=(-20, 20))
     self.rolling_y = RollingY(shift=(-35, 35))
     self.delta = Delta()
     self.deltadelta = DeltaDelta()
     self.background_supp = BackgroundSuppression()
     self.freq_mask = FreqMask(F=(5, 25), num_masks=(1, 3))
     self.time_mask = TimeMask(T=(1, 5), num_masks=(1, 10))
     self.aug = Compose([
         Lambda(self.rolling_x.transform),
         Lambda(self.rolling_y.transform),
         # Lambda(self.background_supp.transform),  # Background suppresion
         Lambda(minmax_norm),  # This is needed for Noise and Blur addition
         HorizontalFlip(p=0.5),
         VerticalFlip(p=0.5),
         Rotate(limit=(180, 180), p=0.5),
         # ShiftScaleRotate(
         #     shift_limit=0.1,
         #     scale_limit=0,
         #     rotate_limit=0,
         #     p=0.5,
         #     border_mode=cv2.BORDER_CONSTANT,
         # ),
         # OneOf(
         #     [
         #         MultiplicativeNoise(
         #             multiplier=[0.8, 1.3], elementwise=True, p=0.25
         #         ),
         #         GaussianBlur(p=0.25, blur_limit=(1, 3)),
         #     ]
         # ),
         # RandomBrightnessContrast(
         #     brightness_limit=0.1, contrast_limit=0.1, p=0.1
         # ),
         Cutout(
             num_holes=1,
             max_h_size=int(0.2 * self.image_size[0]),
             max_w_size=int(0.2 * self.image_size[1]),
             p=0.5,
         ),
         # Lambda(self.delta.transform),
         # Lambda(self.deltadelta.transform),
         # Lambda(self.background_blend.transform),
         # Lambda(self.gaussian_filter.transform),  # Gaussian
         Lambda(self.time_mask.transform),
         Lambda(self.freq_mask.transform),
         # iaa.CenterCropToFixedSize(height=90, width=None),
     ])
Ejemplo n.º 27
0
 def __init__(self):
     self.albumentation_transforms = Compose([
         Rotate((-7.0, 7.0)),
         Cutout(),
         CoarseDropout(),
         # RandomSizedCrop':{'height':32,'width':32,'min_max_height':[28,28]},
         # RandomCrop(10,10),
         HorizontalFlip(),
         Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]),
         ToTensor()
     ])
Ejemplo n.º 28
0
 def __init__(self):
     self.albTrainTransforms = Compose([  # Resize(256, 256),
         Rotate((-10.0, 10.0)),
         HorizontalFlip(p=0.5),
         ChannelShuffle(p=0.5),
         PadIfNeeded(min_height=36, min_width=36),
         RandomCrop(height=32, width=32, p=1.0),
         Cutout(num_holes=1, max_h_size=8, max_w_size=8),
         Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]),
         ToTensor()
     ])
Ejemplo n.º 29
0
 def build_train(self):
     return Compose([
         HorizontalFlip(p=0.5),
         VerticalFlip(p=0.5),
         Normalize(mean=self.MEAN, std=self.STD),
         RandomContrast(p=0.2),
         RandomBrightness(p=0.2),
         RandomSizedCrop((240, 256), self.H, self.W, w2h_ratio=1600 / 256),
         Cutout(max_h_size=32, max_w_size=32),
         ToTensor(),
     ])
Ejemplo n.º 30
0
 def __init__(self):
     self.albumentations_transform_train = Compose([
       HorizontalFlip(),
       
       Cutout(),
       # CLAHE(),
       Normalize(
         mean=[0.5, 0.5, 0.5],
         std=[0.5, 0.5, 0.5],
       ),
       ToTensor()
     ])