Esempio n. 1
0
 def AlbumentationTrainTransform(self):
     tf = tc.Compose([ta.PadIfNeeded(4, 4, always_apply=True),
                     ta.RandomCrop(height=32, width=32, always_apply=True),
                     ta.Cutout(num_holes = 1, max_h_size=8, max_w_size=8, always_apply=True),
                     ta.HorizontalFlip(),
                     tp.ToTensor(dict (mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5)))
                     ])
     return lambda img: tf(image = np.array(img))["image"]
Esempio n. 2
0
def model12_train_transforms():
  transform = C.Compose([
    A.PadIfNeeded(min_height=70, min_width=70, border_mode=cv2.BORDER_CONSTANT,
         value=0.5),
    A.RandomCrop(height=64, width=64),
    A.HorizontalFlip(p=0.5),
    A.Cutout(num_holes=1, max_h_size=32, max_w_size=32, p=1),
    P.ToTensor(dict (mean=(0.4802, 0.4481, 0.3975), std=(0.2302, 0.2265, 0.2262)))
    ])
  return lambda img: transform(image = np.array(img))["image"]
Esempio n. 3
0
def model11_davidnet_train_transforms():
  transform = C.Compose([
    A.PadIfNeeded(min_height=36, min_width=36, border_mode=cv2.BORDER_CONSTANT,
        value=0.5),
    A.RandomCrop(height=32, width=32, p=1),
    A.HorizontalFlip(p=0.5),
    A.Cutout(num_holes=1, max_h_size=8, max_w_size=8, p=1),
    P.ToTensor(dict (mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5)))
    ])
  return lambda img: transform(image = np.array(img))["image"]
Esempio n. 4
0
def get_presize_combine_transforms_V4():
    transforms_presize = A.Compose([
        transforms.PadIfNeeded(600, 800),
        geometric.Perspective(
            scale=[0, .1],
            pad_mode=cv2.BORDER_REFLECT,
            interpolation=cv2.INTER_AREA, p = .3),
        transforms.Flip(),
        geometric.ShiftScaleRotate(interpolation=cv2.INTER_LANCZOS4, p = 0.95, scale_limit=0.0),
        crops.RandomResizedCrop(
            TARGET_SIZE, TARGET_SIZE,
            scale=(config['rrc_scale_min'], config['rrc_scale_max']),
            ratio=(.70, 1.4),
            interpolation=cv2.INTER_CUBIC,
            p=1.0),
        transforms.Transpose()
        #rotate.Rotate(interpolation=cv2.INTER_LANCZOS4, p = 0.99),
    ])
    
    transforms_postsize = A.Compose([
        #imgaug.IAAPiecewiseAffine(),

        transforms.CoarseDropout(),
        transforms.CLAHE(p=.1),
        transforms.RandomToneCurve(scale=.1, p=0.2),
        transforms.RandomBrightnessContrast(
            brightness_limit=.1, 
            contrast_limit=0.4,
            p=.8),
        transforms.HueSaturationValue(
            hue_shift_limit=20, 
            sat_shift_limit=50,
            val_shift_limit=0, 
            p=0.5),
        transforms.Equalize(p=0.05),
        transforms.FancyPCA(p=0.05),
        transforms.RandomGridShuffle(p=0.1),
        A.OneOf([
                transforms.MotionBlur(blur_limit=(3, 9)),
                transforms.GaussianBlur(),
                transforms.MedianBlur()
            ], p=0.1),
        transforms.ISONoise(p=.2),
        transforms.GaussNoise(var_limit=127., p=.3),
        A.OneOf([
            transforms.GridDistortion(interpolation=cv2.INTER_AREA, distort_limit=[0.7, 0.7], p=0.5),
            transforms.OpticalDistortion(interpolation=cv2.INTER_AREA, p=.3),
        ], p=.3),
        geometric.ElasticTransform(alpha=4, sigma=4, alpha_affine=4, interpolation=cv2.INTER_AREA, p=0.3),
        transforms.CoarseDropout(),
        transforms.Normalize(),
        ToTensorV2()
    ])
    return transforms_presize, transforms_postsize
Esempio n. 5
0
def get_valid_transforms():
    return A.Compose([
        transforms.Transpose(),
        transforms.PadIfNeeded(600, 800),
        rotate.Rotate(interpolation=cv2.INTER_LANCZOS4, p = 0.90),
        crops.RandomResizedCrop(
            TARGET_SIZE_VALID, TARGET_SIZE_VALID,
            scale=(.75, 1),
            interpolation=cv2.INTER_CUBIC,
            p=1.0),
        transforms.Flip(),
        #transforms.RandomToneCurve(scale=.1),
        #transforms.RandomBrightnessContrast(brightness_limit=0.0, contrast_limit=0.3, p=.7),
        #transforms.HueSaturationValue(hue_shift_limit=10, 
        #                           sat_shift_limit=10,
        #                           val_shift_limit=5, p=0.6),
        transforms.Normalize(),
        ToTensorV2()
    ])
Esempio n. 6
0
def get_train_transforms():
    return A.Compose([
        transforms.PadIfNeeded(600, 800),
        geometric.ShiftScaleRotate(interpolation=cv2.INTER_LANCZOS4, p = 0.99, scale_limit=0.8),
        geometric.Perspective(pad_mode=cv2.BORDER_REFLECT,interpolation=cv2.INTER_AREA),
        crops.RandomResizedCrop(
            TARGET_SIZE, TARGET_SIZE,
            scale=(config['rrc_scale_min'], config['rrc_scale_max']),
            interpolation=cv2.INTER_CUBIC,
            p=1.0),
        transforms.Transpose(),
        transforms.Flip(),
        transforms.CoarseDropout(),
        transforms.CLAHE(p=.1),
        transforms.RandomToneCurve(scale=.1),
        transforms.RandomBrightnessContrast(
            brightness_limit=.1, 
            contrast_limit=0.3,
            p=.7),
        transforms.HueSaturationValue(
            hue_shift_limit=20, 
            sat_shift_limit=60,
            val_shift_limit=0, 
            p=0.6),
        transforms.RandomGridShuffle(p=0.1),
        A.OneOf([
                transforms.MotionBlur(blur_limit=(3, 9)),
                transforms.GaussianBlur(),
                transforms.MedianBlur()
            ], p=0.2),
        transforms.ISONoise(p=.3),
        transforms.GaussNoise(var_limit=255., p=.3),
        A.OneOf([
            transforms.GridDistortion(interpolation=cv2.INTER_AREA, distort_limit=[0.7, 0.7], p=0.5),
            transforms.OpticalDistortion(interpolation=cv2.INTER_AREA, p=.3),
        ], p=.3),
        geometric.ElasticTransform(alpha=4, sigma=100, alpha_affine=100, interpolation=cv2.INTER_AREA, p=0.3),
        transforms.CoarseDropout(),
        transforms.Normalize(),
        ToTensorV2()
    ])
Esempio n. 7
0
stats = ((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010))  # normalize image
classes = ('plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse',
           'ship', 'truck')

pmda_train_transform = transforms.Compose([
    transforms.RandomCrop(32, padding=4, padding_mode='reflect'),
    transforms.RandomHorizontalFlip(),
    transforms.CenterCrop(32),
    transforms.ToTensor(),
    transforms.Normalize(*stats)
])
train_transform = A.Compose([
    albtransforms.PadIfNeeded(min_height=40,
                              min_width=40,
                              border_mode=4,
                              value=[0, 0, 0],
                              always_apply=False,
                              p=1.),
    #albtransforms.RandomCrop(32,32,always_apply=False, p=1.0),
    albtransforms.RandomCrop(32, 32, always_apply=False, p=1.),
    #albtransforms.HorizontalFlip(1.0),
    albtransforms.HorizontalFlip(0.5),
    albtransforms.Cutout(num_holes=8,
                         max_h_size=8,
                         max_w_size=8,
                         always_apply=False,
                         p=0.1),
    A.Normalize(*stats),
    ToTensor()
])