예제 #1
0
def case3_cls_train_augs(name, **kwargs):
    return [
        A.Compose([
            A.ISONoise(),
            A.GridDistortion(distort_limit=0.1),
            A.HorizontalFlip(),
            A.VerticalFlip(),
            A.RandomRotate90(),
            A.ShiftScaleRotate(
                rotate_limit=30, border_mode=cv2.BORDER_CONSTANT, value=0),
            A.Normalize()
        ],
                  p=1.0),
        A.Compose([
            A.ISONoise(),
            A.OpticalDistortion(),
            A.HorizontalFlip(),
            A.VerticalFlip(),
            A.RandomRotate90(),
            A.ShiftScaleRotate(
                rotate_limit=30, border_mode=cv2.BORDER_CONSTANT, value=0),
            A.Normalize()
        ],
                  p=1.0),
    ]
예제 #2
0
 def __init__(self, noisy=False, p=1):
     argumentors = [
         RandomBrightness(p=0.25),
         RandomContrast(p=0.25),
         RandomHue(p=0.25),
         RandomSaturation(p=0.25),
         RandomEqualize(p=0.0625),
         RandomAutoContrast(p=0.0625),
         RandomAlpha(p=0.25),
     ]
     if noisy:
         argumentors.extend(
             [
                 RandomPosterize(p=0.0625),
                 A.Solarize(threshold=(50, 255 - 50), p=0.0625),
                 RandomBlur(p=0.125),
                 RandomUnsharpMask(p=0.125),
                 A.IAASharpen(alpha=(0, 0.5), p=0.125),
                 GaussNoise(p=0.125),
                 SpeckleNoise(p=0.125),
                 A.ISONoise(color_shift=(0, 0.05), intensity=(0, 0.5), p=0.125),
                 A.JpegCompression(quality_lower=50, quality_upper=100, p=0.125),
             ]
         )
     super().__init__(argumentors, p=p)
예제 #3
0
def get_augmentation(config):
    normalize = A.Normalize(mean=[0.485, 0.456, 0.406],
                            std=[0.229, 0.224, 0.225])
    to_tensor = ToTensorV2()
    transforms = {}
    train_transform = A.Compose([
        A.CenterCrop(p=1, height=config.TRAIN.HEIGHT,
                     width=config.TRAIN.WIDTH),
        A.Cutout(num_holes=4, max_h_size=10, max_w_size=10, p=0.3),
        A.OneOf([
            A.HueSaturationValue(hue_shift_limit=0.2,
                                 sat_shift_limit=0.2,
                                 val_shift_limit=0.2,
                                 p=0.9),
            A.RandomBrightnessContrast(
                brightness_limit=0.2, contrast_limit=0.2, p=0.9),
        ],
                p=0.5),
        A.HorizontalFlip(p=0.5),
        A.ISONoise(p=0.3), normalize, to_tensor
    ])
    transforms['train'] = train_transform
    valid_transform = A.Compose([
        A.CenterCrop(p=1, height=config.TRAIN.HEIGHT,
                     width=config.TRAIN.WIDTH), normalize, to_tensor
    ])
    transforms['valid'] = valid_transform

    return transforms
예제 #4
0
def aug_medium(prob=1):
    return aug.Compose([
        aug.Flip(),
        aug.OneOf([
            aug.CLAHE(clip_limit=2, p=.5),
            aug.IAASharpen(p=.25),
        ],
                  p=0.35),
        aug.OneOf([
            aug.RandomContrast(),
            aug.RandomGamma(),
            aug.RandomBrightness(),
        ],
                  p=0.3),
        aug.OneOf([
            aug.ElasticTransform(
                alpha=120, sigma=120 * 0.05, alpha_affine=120 * 0.03),
            aug.GridDistortion(),
            aug.OpticalDistortion(distort_limit=2, shift_limit=0.5),
        ],
                  p=0.3),
        aug.ShiftScaleRotate(rotate_limit=12),
        aug.OneOf([
            aug.GaussNoise(p=.35),
            SaltPepperNoise(level_limit=0.0002, p=.7),
            aug.ISONoise(p=.7),
        ],
                  p=.5),
        aug.Cutout(num_holes=3, p=.25),
    ],
                       p=prob)
def get_individual_transforms():
    transforms = A.Compose([
        A.OneOf(
            [
                A.Transpose(p=1.0),
                A.VerticalFlip(p=1.0),
                A.HorizontalFlip(p=1.0),
                A.RandomRotate90(p=1.0),
                A.NoOp(),
            ],
            p=1.0,
        ),
        A.OneOf(
            [
                A.ElasticTransform(p=1.0),
                A.GridDistortion(p=1.0),
                A.OpticalDistortion(p=1.0),
                A.NoOp(),
            ],
            p=1.0,
        ),
        A.OneOf(
            [
                A.GaussNoise(p=1.0),
                A.GaussianBlur(p=1.0),
                A.ISONoise(p=1.0),
                A.CoarseDropout(
                    p=1.0, max_holes=16, max_height=16, max_width=16),
                A.NoOp(),
            ],
            p=1.0,
        ),
    ])

    return transforms
예제 #6
0
    def __init__(self, root_dir, is_train):
        super(FaceDataset, self).__init__()

        #self.local_rank = local_rank
        self.is_train = is_train
        self.input_size = 256
        self.num_kps = 68
        transform_list = []
        if is_train:
            transform_list += \
                [
                    A.ColorJitter(brightness=0.8, contrast=0.5, p=0.5),
                    A.ToGray(p=0.1),
                    A.ISONoise(p=0.1),
                    A.MedianBlur(blur_limit=(1,7), p=0.1),
                    A.GaussianBlur(blur_limit=(1,7), p=0.1),
                    A.MotionBlur(blur_limit=(5,12), p=0.1),
                    A.ImageCompression(quality_lower=50, quality_upper=90, p=0.05),
                    A.ShiftScaleRotate(shift_limit=0.1, scale_limit=0.2, rotate_limit=40, interpolation=cv2.INTER_LINEAR,
                        border_mode=cv2.BORDER_CONSTANT, value=0, mask_value=0, p=0.8),
                    A.HorizontalFlip(p=0.5),
                    RectangleBorderAugmentation(limit=0.33, fill_value=0, p=0.2),
                ]
        transform_list += \
            [
                A.geometric.resize.Resize(self.input_size, self.input_size, interpolation=cv2.INTER_LINEAR, always_apply=True),
                A.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]),
                ToTensorV2(),
            ]
        self.transform = A.ReplayCompose(transform_list,
                                         keypoint_params=A.KeypointParams(
                                             format='xy',
                                             remove_invisible=False))
        self.root_dir = root_dir
        with open(osp.join(root_dir, 'annot.pkl'), 'rb') as f:
            annot = pickle.load(f)
            self.X, self.Y = annot
        train_size = int(len(self.X) * 0.99)

        if is_train:
            self.X = self.X[:train_size]
            self.Y = self.Y[:train_size]
        else:
            self.X = self.X[train_size:]
            self.Y = self.Y[train_size:]
        #if local_rank==0:
        #    logging.info('data_transform_list:%s'%transform_list)
        flip_parts = ([1, 17], [2, 16], [3, 15], [4, 14], [5, 13], [6, 12],
                      [7, 11], [8, 10], [18, 27], [19, 26], [20, 25], [21, 24],
                      [22, 23], [32, 36], [33, 35], [37, 46], [38, 45],
                      [39, 44], [40, 43], [41, 48], [42, 47], [49,
                                                               55], [50, 54],
                      [51, 53], [62, 64], [61, 65], [68, 66], [59,
                                                               57], [60, 56])
        self.flip_order = np.arange(self.num_kps)
        for pair in flip_parts:
            self.flip_order[pair[1] - 1] = pair[0] - 1
            self.flip_order[pair[0] - 1] = pair[1] - 1
        logging.info('len:%d' % len(self.X))
        print('!!!len:%d' % len(self.X))
예제 #7
0
def aug_train():
    ''' augmentation for training a FR-system '''
    aug = alt.Compose([
        alt.Resize(height=112, width=112),
        alt.HorizontalFlip(p=0.5),
        alt.HueSaturationValue(hue_shift_limit=10,
                               sat_shift_limit=20,
                               val_shift_limit=10,
                               p=0.2),
        alt.RGBShift(r_shift_limit=20,
                     g_shift_limit=20,
                     b_shift_limit=20,
                     p=0.2),
        alt.RandomBrightnessContrast(brightness_limit=0.2,
                                     contrast_limit=0.2,
                                     p=0.3),
        alt.ToGray(p=0.01),
        alt.MotionBlur(blur_limit=7, p=0.2),  # default=11
        alt.GaussianBlur(blur_limit=7, p=0.2),  # default=11
        alt.GaussNoise(var_limit=(5.0, 20.0), mean=0,
                       p=0.1),  # defalt var_limit=(10.0, 30.0)
        alt.ISONoise(p=0.2),
        alt.Normalize(),
        ToTensor()
    ])
    return aug
예제 #8
0
def get_training_augmentation(min_area=0., min_visibility=0.):
    train_transform = [
        albu.OneOf([
            albu.ISONoise(p=.5),
            albu.GaussNoise(p=0.4),
            albu.Blur(blur_limit=3, p=0.1),
        ]),
        albu.OneOf([
            albu.CLAHE(clip_limit=2),
        ], p=0.2),
        albu.OneOf([
            albu.RandomSnow(snow_point_lower=0.,
                            snow_point_upper=0.2,
                            brightness_coeff=2.,
                            p=0.5),
            albu.RandomSunFlare(p=0.5),
        ]),
        albu.OneOf([
            albu.RGBShift(p=0.1),
            albu.ChannelShuffle(p=0.2),
        ])
    ]
    return albu.Compose(train_transform,
                        bbox_params={
                            'format': 'coco',
                            'min_area': min_area,
                            'min_visibility': min_visibility,
                            'label_fields': ['category_id']
                        })
예제 #9
0
    def __iso_noise(self,img):
        transform = A.Compose([
            A.ISONoise(color_shift=(0.01,0.1),intensity=(1.5,2.3))
        ])
        transformed         =   transform(image=img)
        transformed_image   =   transformed["image"]

        return transformed_image
예제 #10
0
def get_transformer(face_policy: str, patch_size: int, net_normalizer: transforms.Normalize, train: bool):
    # Transformers and traindb
    if face_policy == 'scale':
        # The loader crops the face isotropically then scales to a square of size patch_size_load
        loading_transformations = [
            A.PadIfNeeded(min_height=patch_size, min_width=patch_size,
                          border_mode=cv2.BORDER_CONSTANT, value=0,always_apply=True),
            A.Resize(height=patch_size,width=patch_size,always_apply=True),
        ]
        if train:
            downsample_train_transformations = [
                A.Downscale(scale_max=0.5, scale_min=0.5, p=0.5),  # replaces scaled dataset
            ]
        else:
            downsample_train_transformations = []
    elif face_policy == 'tight':
        # The loader crops the face tightly without any scaling
        loading_transformations = [
            A.LongestMaxSize(max_size=patch_size, always_apply=True),
            A.PadIfNeeded(min_height=patch_size, min_width=patch_size,
                          border_mode=cv2.BORDER_CONSTANT, value=0,always_apply=True),
        ]
        if train:
            downsample_train_transformations = [
                A.Downscale(scale_max=0.5, scale_min=0.5, p=0.5),  # replaces scaled dataset
            ]
        else:
            downsample_train_transformations = []
    else:
        raise ValueError('Unknown value for face_policy: {}'.format(face_policy))

    if train:
        aug_transformations = [
            A.Compose([
                A.HorizontalFlip(),
                A.OneOf([
                    A.RandomBrightnessContrast(),
                    A.HueSaturationValue(hue_shift_limit=10, sat_shift_limit=30, val_shift_limit=20),
                ]),
                A.OneOf([
                    A.ISONoise(),
                    A.IAAAdditiveGaussianNoise(scale=(0.01 * 255, 0.03 * 255)),
                ]),
                A.Downscale(scale_min=0.7, scale_max=0.9, interpolation=cv2.INTER_LINEAR),
                A.ImageCompression(quality_lower=50, quality_upper=99),
            ], )
        ]
    else:
        aug_transformations = []

    # Common final transformations
    final_transformations = [
        A.Normalize(mean=net_normalizer.mean, std=net_normalizer.std, ),
        ToTensorV2(),
    ]
    transf = A.Compose(
        loading_transformations + downsample_train_transformations + aug_transformations + final_transformations)
    return transf
def get_hard_augmentations(image_size):
    return A.Compose([
        A.OneOf([
            A.ShiftScaleRotate(shift_limit=0.05, scale_limit=0.1,
                               rotate_limit=45,
                               border_mode=cv2.BORDER_CONSTANT, value=0),
            A.ElasticTransform(alpha_affine=0,
                               alpha=35,
                               sigma=5,
                               border_mode=cv2.BORDER_CONSTANT,
                               value=0),
            A.OpticalDistortion(distort_limit=0.11, shift_limit=0.15,
                                border_mode=cv2.BORDER_CONSTANT,
                                value=0),
            A.GridDistortion(border_mode=cv2.BORDER_CONSTANT,
                             value=0),
            A.NoOp()
        ]),

        A.OneOf([

            A.RandomSizedCrop(min_max_height=(int(image_size[0] * 0.75), image_size[0]),
                              height=image_size[0],
                              width=image_size[1], p=0.3),
            A.NoOp()
        ]),

        A.ISONoise(p=0.5),

        # Brightness/contrast augmentations
        A.OneOf([
            A.RandomBrightnessContrast(brightness_limit=0.5,
                                       contrast_limit=0.4),
            IndependentRandomBrightnessContrast(brightness_limit=0.25,
                                                contrast_limit=0.24),
            A.RandomGamma(gamma_limit=(50, 150)),
            A.NoOp()
        ]),

        A.OneOf([
            A.RGBShift(r_shift_limit=40, b_shift_limit=30, g_shift_limit=30),
            A.HueSaturationValue(hue_shift_limit=10,
                                 sat_shift_limit=10),
            A.ToGray(p=0.2),
            A.NoOp()
        ]),

        A.ChannelDropout(),
        A.RandomGridShuffle(p=0.3),

        # D4
        A.Compose([
            A.RandomRotate90(),
            A.Transpose()
        ])
    ])
예제 #12
0
    def __init__(self):
        distortions = [
            A.RGBShift(p=0.1),
            A.RandomBrightness(limit=0.5, p=0.1),
            A.RandomContrast(limit=0.5, p=0.1),
        ]
        random.shuffle(distortions)
        total_dist = distortions + [A.ISONoise()]

        self.distort_transform = A.Compose(total_dist)
예제 #13
0
 def __init__(self):
     self.augmentor = A.Compose(
         [
             A.MotionBlur(p=0.25),
             A.ColorJitter(p=0.5),
             A.RandomRain(p=0.1),  # random occlusion
             A.RandomSunFlare(p=0.1),
             A.JpegCompression(p=0.25),
             A.ISONoise(p=0.25)
         ],
         p=1.0)
예제 #14
0
def get_gray_aug_trans(use_color_aug,
                       use_shape_aug,
                       mean=(0.5, 0.5, 0.5),
                       std=(0.5, 0.5, 0.5)):
    transform = transforms.Compose([
        transforms.ToTensor(),  # range [0, 255] -> [0.0,1.0]
        # range [0.0, 1.0] -> [-1.0,1.0]
        transforms.Normalize(mean=mean, std=std)
    ])

    if use_color_aug:
        c_aug = A.Compose([
            A.RandomBrightnessContrast(p=0.7,
                                       brightness_limit=0.5,
                                       contrast_limit=0.5),
            A.CoarseDropout(p=0.5,
                            max_holes=8,
                            max_height=16,
                            max_width=16,
                            min_height=8,
                            min_width=8,
                            fill_value=0),
            A.OneOf([
                A.Blur(p=1, blur_limit=7),
                A.MotionBlur(p=1, blur_limit=7),
                A.MedianBlur(p=1, blur_limit=7),
                A.GaussianBlur(p=1, blur_limit=7)
            ],
                    p=0.5),
            A.OneOf([
                A.RandomGamma(p=1, gamma_limit=(80, 120)),
                A.GaussNoise(p=1, var_limit=(10.0, 50.0)),
                A.ISONoise(p=1, color_shift=(0.01, 0.05),
                           intensity=(0.1, 0.5)),
            ],
                    p=0.3),
            A.JpegCompression(quality_lower=10, quality_upper=30, p=0.3),
            # A.JpegCompression(quality_lower=50, quality_upper=100, p=1),
        ])
    else:
        c_aug = None

    # crop_size = (args.train_input_h, args.train_input_w)

    if use_shape_aug:
        shape_aug = EzImageBaseAug()
    else:
        shape_aug = None

    return transform, c_aug, shape_aug
예제 #15
0
def faceaug():
    ''' choose the augmentation for face-recognition '''
    aug = alt.Compose([
              alt.HorizontalFlip(p=0.5),
              alt.HueSaturationValue(hue_shift_limit=10, sat_shift_limit=20, val_shift_limit=10, p=0.2),
              alt.RGBShift(r_shift_limit=20, g_shift_limit=20, b_shift_limit=20, p=0.2),
              alt.RandomBrightnessContrast(brightness_limit=0.2, contrast_limit=0.2, p=0.3),
              alt.ToGray(p=0.01),
              alt.MotionBlur(blur_limit=7, p=0.2),    # default=11
              alt.GaussianBlur(blur_limit=7, p=0.2),  # default=11
              alt.GaussNoise(var_limit=(5.0, 20.0), mean=0, p=0.1), # default var_limit=(10.0, 30.0)
              alt.ISONoise(p=0.2),
              # alt.Normalize(),
              ToTensor()])
    return aug
def hard_color_augmentations():
    return A.Compose([
        A.RandomBrightnessContrast(brightness_limit=0.3,
                                   contrast_limit=0.3,
                                   brightness_by_max=True),
        A.RandomGamma(gamma_limit=(90, 110)),
        A.OneOf(
            [A.NoOp(),
             A.MultiplicativeNoise(),
             A.GaussNoise(),
             A.ISONoise()]),
        A.OneOf([A.RGBShift(), A.HueSaturationValue(),
                 A.NoOp()]),
        A.RandomFog(fog_coef_lower=0.05, fog_coef_upper=0.3),
    ])
예제 #17
0
def main():
  size = (432,432)
  color_dic = {1:[255,255,255]}

  img_paths = [p.replace('\\', '/') for p in glob('dataset/train/img_aug/**', recursive=True) if os.path.isfile(p)]
  mask_paths = list(map(lambda x: x.replace('/img_aug/', '/mask_aug/'), img_paths))

  batch_size = 16
  splits = math.ceil(len(img_paths)/batch_size)

  empty = []

  # albumentation
  # https://qiita.com/kurilab/items/b69e1be8d0224ae139ad
  transforms = albu.OneOf([
                  albu.ShiftScaleRotate(shift_limit=0.2, scale_limit=0.2, rotate_limit=90),
                  albu.GaussNoise(),
                  albu.ISONoise(intensity=(0.7,0.9)),
                  albu.Downscale(),
                  albu.ElasticTransform(),
                  albu.GaussianBlur(),
                  albu.MultiplicativeNoise(multiplier=(2.0,3.0)),
                  ])

  for i in range(splits):
    tfrecord_fname = '_record_' + str(i) + '.tfrecord'
    save_path = os.path.join('dataset', tfrecord_fname)

    # tfrecordのファイルは(画像データ数 / バッチサイズ)分作成する
    with tf.io.TFRecordWriter(tfrecord_fname) as writer:
      for img_d, mask_d in zip(img_paths[i::splits], mask_paths[i::splits]):
        # 画像変形
        img = cv2.imread(img_d)
        mask = cv2.imread(mask_d)
        #augmented = transforms(image=img, mask=mask)
        #img, mask = augmented['image'], augmented['mask']
        img = cv2.resize(img, (size[0], size[1]), cv2.INTER_NEAREST)
        mask = cv2.resize(mask, (size[0], size[1]), cv2.INTER_NEAREST)
        # byte列に変換
        img = np2byte(img)
        mask = np2byte(mask)
        #img = np2byte(np.float32(img/127.5 - 1))
        #mask = np2byte(convert_mask(mask, color_dic))
        # シリアライズして書き出し
        proto = serialize_example(img, mask)
        writer.write(proto.SerializeToString())
    if i>2 : break
예제 #18
0
def get_augmentation(save_path=None, load_path=None):
        if load_path:
            return A.load(load_path)
        else:
            aug_seq1 = A.OneOf([
                A.Rotate(limit=(-90, 90), p=1.0),
                A.Flip(p=1.0),
                A.OpticalDistortion(always_apply=False, p=1.0, distort_limit=(-0.3, 0.3), 
                                    shift_limit=(-0.05, 0.05), interpolation=3, 
                                    border_mode=3, value=(0, 0, 0), mask_value=None),
            ], p=1.0)
            aug_seq2 = A.OneOf([
                # A.ChannelDropout(always_apply=False, p=1.0, channel_drop_range=(1, 1), fill_value=0),
                A.RGBShift(r_shift_limit=15, g_shift_limit=15,
                           b_shift_limit=15, p=1.0),
                A.RandomBrightnessContrast(always_apply=False, p=1.0, brightness_limit=(
                    -0.2, 0.2), contrast_limit=(-0.2, 0.2), brightness_by_max=True)
            ], p=1.0)
            aug_seq3 = A.OneOf([
                A.GaussNoise(always_apply=False, p=1.0, var_limit=(10, 50)),
                A.ISONoise(always_apply=False, p=1.0, intensity=(
                    0.1, 1.0), color_shift=(0.01, 0.3)),
                A.MultiplicativeNoise(always_apply=False, p=1.0, multiplier=(
                    0.8, 1.6), per_channel=True, elementwise=True),
            ], p=1.0)
            aug_seq4 = A.OneOf([
                A.Equalize(always_apply=False, p=1.0,
                           mode='pil', by_channels=True),
                A.InvertImg(always_apply=False, p=1.0),
                A.MotionBlur(always_apply=False, p=1.0, blur_limit=(3, 7)),
                A.RandomFog(always_apply=False, p=1.0, 
                            fog_coef_lower=0.01, fog_coef_upper=0.2, alpha_coef=0.2)
            ], p=1.0)
            aug_seq = A.Compose([
                # A.Resize(self.img_size, self.img_size),
                # aug_seq1,
                aug_seq2,
                aug_seq3,
                aug_seq4,
                # A.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
                # A.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]),
            ])
            # aug_path = '/home/jitesh/prj/classification/test/bolt/aug/aug_seq.json'
            if save_path:
                A.save(aug_seq, save_path)
            # loaded_transform = A.load(aug_path)
            return aug_seq
예제 #19
0
    def setup_augmentors(self, augmentations):
        self.augmentors = []
        for aug_name, aug_config in augmentations.items():
            aug = None

            def get_albu(aug):
                return albu.Compose(aug)

            if aug_name == 'image_compression':
                aug = get_albu([
                    albu.ImageCompression(
                        quality_lower=aug_config.get('quality_lower', 90),
                        quality_upper=aug_config.get('quality_upper'),
                        p=aug_config.get('probabilty', 0.5))
                ])
            elif aug_name == 'posterize':
                aug = get_albu([
                    albu.Posterize(num_bits=aug_config.get('num_bits', 4),
                                   p=aug_config.get('probability', 0.5))
                ])
            elif aug_name == 'blur':
                aug = get_albu([
                    albu.Blur(blur_limit=aug_config.get('blur_limit', 7),
                              p=aug_config.get('probabilty', 0.5))
                ])
            elif aug_name == 'median_blur':
                aug = get_albu([
                    albu.MedianBlur(blur_limit=aug_config.get('blur_limit', 7),
                                    p=aug_config.get('probabilty', 0.5))
                ])
            elif aug_name == 'iso_noise':
                aug = get_albu([
                    albu.ISONoise(
                        color_shift=(aug_config.get('min_color_shift', 0.01),
                                     aug_config.get('max_color_shift', 0.05)),
                        intensity=(aug_config.get('min_intensity', 0.1),
                                   aug_config.get('min_intensity', 0.5)),
                        p=aug_config.get('probabilty', 0.5))
                ])
            if not aug:
                continue
            aug.name, aug.p, aug.base = aug_name, aug_config[
                'probability'], self

            self.augmentors.append(aug)

        return
예제 #20
0
 def __init__(self):
     self.policy = A.Compose([
         A.OneOf([
             A.Rotate(180),
             A.Flip(),
         ], p=0.3),
         A.ShiftScaleRotate(shift_limit=0.2, scale_limit=0.5, rotate_limit=0, p=0.2),
         A.OneOf([
             A.CoarseDropout(max_holes=16, max_height=16, max_width=16, p=0.3),
             A.GridDropout(ratio=0.3, p=0.3),
         ]),
         A.OneOf([
             A.ElasticTransform(sigma=10, alpha_affine=25, p=0.3),
             A.RandomFog(fog_coef_lower=0.2, fog_coef_upper=0.7, p=0.2),
         ], p=0.2),
         A.OneOf([
             A.IAAAdditiveGaussianNoise(),
             A.GaussNoise(),
             A.ISONoise()
         ], p=0.2),
         A.OneOf([
             A.MotionBlur(p=.3),
             A.MedianBlur(blur_limit=5, p=0.3),
             A.Blur(blur_limit=5, p=0.3),
             A.GaussianBlur(p=0.3)
         ], p=0.2),
         A.OneOf([
             A.ChannelShuffle(p=.3),
             A.HueSaturationValue(p=0.3),
             A.ToGray(p=0.3),
             A.ChannelDropout(p=0.3),
             A.InvertImg(p=0.1)
         ], p=0.2),
         A.OneOf([
             A.OpticalDistortion(p=0.3),
             A.GridDistortion(p=.2),
             A.IAAPiecewiseAffine(p=0.3),
         ], p=0.2),
         A.OneOf([
             A.CLAHE(clip_limit=2),
             A.IAASharpen(),
             A.IAAEmboss(),
         ], p=0.2),
         A.RandomBrightnessContrast(brightness_limit=0.3, contrast_limit=0.3, p=0.3),
         A.Solarize(p=0.2),
     ])
예제 #21
0
def aug_medium(prob=1):
    return aug.Compose([
        aug.HorizontalFlip(p=.5),
        aug.OneOf([
            aug.CLAHE(clip_limit=2, p=.5),
            aug.IAASharpen(p=.25),
            ], p=0.35),
        aug.RandomBrightnessContrast(p=.7),
        aug.OneOf([
            aug.GaussNoise(p=.35),
            aug.ISONoise(p=.7),
            aug.ImageCompression(quality_lower=70, quality_upper=100, p=.7)
            ], p=.6),
        aug.RGBShift(p=.5),
        aug.HueSaturationValue(hue_shift_limit=8, sat_shift_limit=12, val_shift_limit=8, p=.5),
        aug.ToGray(p=.3)
    ], p=prob)
예제 #22
0
def create_datasets(train_file_list, val_file_list):
    train_transform = A.Compose([
        A.RandomBrightnessContrast(p=0.5),
        A.GaussNoise(p=.25),
        A.ISONoise(p=.15),
        A.RandomShadow(p=.2),
        A.MotionBlur(p=.1),
        #A.Normalize(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225)),
        ToTensorV2(),
    ])
    train_dataset = GroundDataset(images_filepaths=train_file_list,
                                  transform=train_transform)

    val_transform = A.Compose([
        #A.Normalize(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225)),
        ToTensorV2(),
    ])
    val_dataset = GroundDataset(images_filepaths=val_file_list,
                                transform=val_transform)

    return train_dataset, val_dataset
예제 #23
0
def aug_heavy(prob=0.9):
    return aug.Compose([
        aug.Flip(),
        aug.OneOf([
            aug.CLAHE(clip_limit=2, p=.5),
            aug.IAASharpen(p=.25),
            aug.IAAEmboss(p=.25),
        ],
                  p=.35),
        aug.OneOf([
            aug.IAAAdditiveGaussianNoise(p=.3),
            aug.GaussNoise(p=.7),
            SaltPepperNoise(level_limit=0.0002, p=.7),
            aug.ISONoise(p=.3),
        ],
                  p=.5),
        aug.OneOf([
            aug.MotionBlur(p=.2),
            aug.MedianBlur(blur_limit=3, p=.3),
            aug.Blur(blur_limit=3, p=.5),
        ],
                  p=.4),
        aug.OneOf([
            aug.RandomContrast(p=.5),
            aug.RandomBrightness(p=.5),
            aug.RandomGamma(p=.5),
        ],
                  p=.4),
        aug.ShiftScaleRotate(
            shift_limit=.0625, scale_limit=0.1, rotate_limit=12, p=.7),
        aug.OneOf([
            aug.GridDistortion(p=.2),
            aug.ElasticTransform(
                alpha=120, sigma=120 * 0.05, alpha_affine=120 * 0.03, p=.2),
            aug.OpticalDistortion(distort_limit=2, shift_limit=0.5, p=.2),
        ],
                  p=.6),
        aug.HueSaturationValue(p=.5),
    ],
                       p=prob)
예제 #24
0
def albumentations_list(MAGN: int = 4):
    """
    Returns standard list of albumentations transforms, each of mangitude `MAGN`.
    
    Args:
        MAGN (int): Magnitude of each transform in the returned list.
    """
    M = MAGN
    transform_list = [
        # PIXEL-LEVEL
        A.RandomContrast(limit=M * .1, always_apply=True),
        A.RandomBrightness(limit=M * .1, always_apply=True),
        A.Equalize(always_apply=True),
        A.OpticalDistortion(distort_limit=M * .2,
                            shift_limit=M * .1,
                            always_apply=True),
        A.RGBShift(r_shift_limit=M * 10,
                   g_shift_limit=M * 10,
                   b_shift_limit=M * 10,
                   always_apply=True),
        A.ISONoise(color_shift=(M * .01, M * .1),
                   intensity=(M * .02, M * .2),
                   always_apply=True),
        A.RandomFog(fog_coef_lower=M * .01,
                    fog_coef_upper=M * .1,
                    always_apply=True),
        A.CoarseDropout(max_holes=M * 10, always_apply=True),
        A.GaussNoise(var_limit=(M, M * 50), always_apply=True),

        # SPATIAL
        A.Rotate(always_apply=True),
        A.Transpose(always_apply=True),
        A.NoOp(always_apply=True),
        A.ElasticTransform(alpha=M * .25,
                           sigma=M * 3,
                           alpha_affine=M * 3,
                           always_apply=True),
        A.GridDistortion(distort_limit=M * .075, always_apply=True)
    ]
    return transform_list
예제 #25
0
def get_unlabel_aug(use_color_aug, use_shape_aug):
    if use_color_aug:
        c_aug = A.Compose([
            A.RandomBrightnessContrast(p=0.8,
                                       brightness_limit=0.5,
                                       contrast_limit=0.5),
            A.CoarseDropout(p=0.8,
                            max_holes=16,
                            max_height=20,
                            max_width=20,
                            min_height=8,
                            min_width=8,
                            fill_value=0),
            A.OneOf([
                A.Blur(p=1, blur_limit=7),
                A.MotionBlur(p=1, blur_limit=7),
                A.MedianBlur(p=1, blur_limit=7),
                A.GaussianBlur(p=1, blur_limit=7)
            ],
                    p=0.8),
            A.OneOf([
                A.RandomGamma(p=1, gamma_limit=(80, 120)),
                A.GaussNoise(p=1, var_limit=(10.0, 50.0)),
                A.ISONoise(p=1, color_shift=(0.1, 0.5), intensity=(0.3, 1.0)),
            ],
                    p=0.8),
            # A.CLAHE(p=0.7, clip_limit=4.0, tile_grid_size=(8, 8)),
            A.JpegCompression(quality_lower=10, quality_upper=30, p=0.8),
        ])
    else:
        c_aug = None

    # crop_size = (args.train_input_h, args.train_input_w)

    if use_shape_aug:
        shape_aug = EzImageBaseAug()
    else:
        shape_aug = None

    return c_aug, shape_aug
예제 #26
0
def get_image_augmentation():
    """ Augmentations just for input and output images (not for masks) """
    image_transform = [
        albu.OneOf([
          albu.Blur(p=0.2, blur_limit=(3, 5)),
          albu.GaussNoise(p=0.2, var_limit=(10.0, 50.0)),
          albu.ISONoise(p=0.2, intensity=(0.1, 0.5), color_shift=(0.01, 0.05)),
          albu.ImageCompression(p=0.2, quality_lower=90, quality_upper=100, compression_type=0),
          albu.MultiplicativeNoise(p=0.2, multiplier=(0.9, 1.1), per_channel=True, elementwise=True),
        ], p=1),
        albu.OneOf([
          albu.HueSaturationValue(p=0.2, hue_shift_limit=(-10, 10), sat_shift_limit=(-10, 10), val_shift_limit=(-10, 10)),
          albu.RandomBrightness(p=0.3, limit=(-0.1, 0.1)),
          albu.RandomGamma(p=0.3, gamma_limit=(80, 100), eps=1e-07),
          albu.ToGray(p=0.1),
          albu.ToSepia(p=0.1),
        ], p=1)
    ]
    return albu.Compose(image_transform, additional_targets={
        'image1': 'image',
        'image2': 'image'
    })
예제 #27
0
 def __init__(self, noisy: bool = False, grayscale: bool = False, p=1):
     argumentors = [
         RandomBrightness(p=0.25),
         RandomContrast(p=0.25),
         RandomHue(p=0.25),
         RandomSaturation(p=0.25),
         RandomAlpha(p=0.25),
     ]
     if noisy:
         argumentors.extend([
             RandomEqualize(p=0.0625),
             RandomAutoContrast(p=0.0625),
             RandomPosterize(p=0.0625),
             A.Solarize(threshold=(50, 255 - 50), p=0.0625),
             RandomBlur(p=0.125),
             RandomUnsharpMask(p=0.125),
             GaussNoise(p=0.125),
         ])
     if not grayscale and noisy:
         argumentors.extend([
             A.ISONoise(color_shift=(0, 0.05), intensity=(0, 0.5), p=0.125)
         ])
     super().__init__(argumentors, p=p)
예제 #28
0
    def __init__(self, args: argparse.Namespace) -> None:
        super().__init__(args)
        self.data_folder = Path(self.args.get('data_folder', DATA_FOLDER))
        self.height = self.args.get('height', IMG_HEIGHT)
        self.width = self.args.get('width', IMG_WIDTH)
        self.patch_size = self.args.get('patch_size', PATCH_SIZE)
        self.n_classes = self.args.get('n_classes', N_CLASSES)
        self.dims = (3, self.height, self.width)
        self.output_dims = (self.n_classes, self.height, self.width)
        self.metadata = parse_metadata(self.data_folder / 'metainfo.json')

        self.train_val_transform = A.Compose([
            A.Resize(self.height, self.width),
            A.HorizontalFlip(p=0.5),
            A.ISONoise(),
            A.ColorJitter(),
            A.Normalize(mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5)),
            ToTensorV2()
        ])
        self.test_transform = A.Compose([
            A.Resize(self.height, self.width),
            A.Normalize(mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5)),
            ToTensorV2()
        ])
import cv2
import os
import random
import numpy as np
from random import randint
import albumentations as A
import numpy as np
import cv2
import sys
import os
from tqdm import tqdm
from utils import GetOverlappingBlocks, getListOfFiles, ImageResize

transform = A.Compose([
    A.OneOf([
        A.ISONoise(p=0.4),
        A.JpegCompression(
            quality_lower=50, quality_upper=70, always_apply=False, p=0.8),
    ],
            p=0.6),
    A.OneOf([
        A.MotionBlur(blur_limit=10, p=.8),
        A.MedianBlur(blur_limit=3, p=0.75),
        A.GaussianBlur(blur_limit=7, p=0.75),
    ],
            p=0.8),
    A.OneOf([
        A.RandomBrightnessContrast(
            brightness_limit=0.3, contrast_limit=0.3, p=0.75),
        A.RandomShadow(num_shadows_lower=1,
                       num_shadows_upper=18,
예제 #30
0
    def __init__(self,
                 mean=(0.5, 0.5, 0.5),
                 std=(0.5, 0.5, 0.5),
                 pad_dim=(0, 0),
                 random_crop_dim=(0, 0),
                 resize=(0, 0),
                 horizontal_flip=0,
                 vertical_flip=0,
                 rotate_degree=0,
                 rotation=0,
                 cutout=0,
                 cutout_dim=(1, 1),
                 hsv=0,
                 iso_noise=0,
                 bright_contrast=0,
                 gaussian_blur=0,
                 train=False,
                 modest_input=True):
        """Transformations to be applied on the data
        Arguments:
            mean : Tuple of mean values for each channel
                (default: (0.5,0.5,0.5))
            std : Tuple of standard deviation values for each channel
                (default: (0.5,0.5,0.5))
            pad_dim (tuple, optional): Pad side of the image
                pad_dim[0]: minimal result image height (int)
                pad_dim[1]: minimal result image width (int)
                (default: (0,0))
            random_crop_dim (tuple, optional): Crop a random part of the input
                random_crop_dim[0]: height of the crop (int)
                random_crop_dim[1]: width of the crop (int)
                (default: (0,0))
            resize (tuple, optional): Resize input
                resize[0]: new height of the input (int)
                resize[1]: new width of the input (int)
                (default: (0,0))
            horizontal_flip (float, optional): Probability of image being flipped horizontaly 
                (default: 0)
            vertical_flip (int, optional): Probability of image being flipped vertically 
                (default: 0)
            rotation (int, optional): Probability of image being rotated 
                (default: 0)
            cutout (int, optional): Probability of image being cutout 
                (default: 0)
            cutout_dim (list, optional): Cutout a random part of the image
                cutout_dimtransformations.append(ToTensor())[0]: height of the cutout (int)
                cutout_dim[1]: width of the cutout (int)
                (default: (1,1))
            transform_train : If True, transformations for training data else for testing data
                (default : False)  
        Returns:
            Transformations that is to applied on the data
        """

        transformations = []
        if train:
            if sum(pad_dim) > 0:
                transformations.append(
                    A.PadIfNeeded(min_height=pad_dim[0],
                                  min_width=pad_dim[1],
                                  p=1.0))

            if sum(random_crop_dim) > 0:
                transformations.append(
                    A.RandomCrop(height=random_crop_dim[0],
                                 width=random_crop_dim[1],
                                 p=1.0))

            if horizontal_flip:
                transformations.append(A.HorizontalFlip(p=horizontal_flip))

            if vertical_flip:
                transformations.append(A.VerticalFlip(p=vertical_flip))

            if gaussian_blur:
                transformations.append(A.GaussianBlur(p=gaussian_blur))

            if rotation:
                transformations.append(
                    A.Rotate(limit=rotate_degree, p=rotation))

            if cutout:
                transformations.append(
                    A.CoarseDropout(max_holes=1,
                                    fill_value=tuple(x * 255 for x in mean),
                                    max_height=cutout_dim[0],
                                    max_width=cutout_dim[1],
                                    min_height=1,
                                    min_width=1,
                                    p=cutout))

            if hsv:
                transformations.append(
                    A.HueSaturationValue(hue_shift_limit=20,
                                         sat_shift_limit=30,
                                         val_shift_limit=20,
                                         always_apply=False,
                                         p=hsv))

            if iso_noise:
                transformations.append(
                    A.ISONoise(color_shift=(0.01, 0.05),
                               intensity=(0.1, 0.5),
                               always_apply=False,
                               p=iso_noise))

            if bright_contrast:
                transformations.append(
                    A.RandomBrightnessContrast(brightness_limit=0.2,
                                               contrast_limit=0.2,
                                               brightness_by_max=True,
                                               always_apply=False,
                                               p=bright_contrast))

        if modest_input:
            transformations.append(
                A.Normalize(mean=mean, std=std, always_apply=True))

        if sum(resize) > 0:
            transformations.append(
                A.Resize(height=resize[0],
                         width=resize[1],
                         interpolation=1,
                         always_apply=False,
                         p=1))

        transformations.append(ToTensor())

        self.transform = A.Compose(transformations)