def test_multiplicative_noise_rgb(image):
    dtype = image.dtype

    m = 0.5
    aug = A.MultiplicativeNoise(m, p=1)
    result = aug(image=image)["image"]
    image = F.clip(image * m, dtype, F.MAX_VALUES_BY_DTYPE[dtype])
    assert np.allclose(image, result)

    aug = A.MultiplicativeNoise(elementwise=True, p=1)
    params = aug.get_params_dependent_on_targets({"image": image})
    mul = params["multiplier"]
    assert mul.shape == image.shape[:2] + (1,)
    result = aug.apply(image, mul)
    image = F.clip(image.astype(np.float32) * mul, dtype, F.MAX_VALUES_BY_DTYPE[dtype])
    assert np.allclose(image, result)

    aug = A.MultiplicativeNoise(per_channel=True, p=1)
    params = aug.get_params_dependent_on_targets({"image": image})
    mul = params["multiplier"]
    assert mul.shape == (3,)
    result = aug.apply(image, mul)
    image = F.clip(image.astype(np.float32) * mul, dtype, F.MAX_VALUES_BY_DTYPE[dtype])
    assert np.allclose(image, result)

    aug = A.MultiplicativeNoise(elementwise=True, per_channel=True, p=1)
    params = aug.get_params_dependent_on_targets({"image": image})
    mul = params["multiplier"]
    assert mul.shape == image.shape
    result = aug.apply(image, mul)
    image = F.clip(image.astype(np.float32) * mul, image.dtype, F.MAX_VALUES_BY_DTYPE[image.dtype])
    assert np.allclose(image, result)
Example #2
0
    def __multiplicative_noise(self,img):
        transform = A.Compose([
            A.MultiplicativeNoise(multiplier=(0.3,3.5),elementwise=False),
            A.MultiplicativeNoise(multiplier=(2.3,5.5),elementwise=True),
            A.MultiplicativeNoise(multiplier=(0.3,1.5),elementwise=False)
        ])
        transformed         = transform(image=img)
        transformed_image   = transformed["image"]

        return transformed_image
Example #3
0
 def get_transforms(stage: str = None, mode: str = None):
     if mode == 'train':
         return albumentations.Compose([
             # blur
             albumentations.OneOf([
                 albumentations.Blur((1, 4), p=1.0),
                 albumentations.GaussianBlur(3, p=1.0),
                 albumentations.MedianBlur(blur_limit=5, p=1.0),
             ], p=3/4),
             # transformations
             albumentations.ShiftScaleRotate(scale_limit=0.2, rotate_limit=25, border_mode=cv2.BORDER_CONSTANT, value=0, p=1.0),
             # cut and drop
             # distortion
             albumentations.OneOf([
                 albumentations.OpticalDistortion(0.6, p=1.0),
                 albumentations.GridDistortion(8, 0.06, border_mode=cv2.BORDER_CONSTANT, value=0, p=1.0),
                 albumentations.ElasticTransform(sigma=10, alpha=1, alpha_affine=10, border_mode=cv2.BORDER_CONSTANT, value=0, p=1.0),
             ], p=3/4),
             # add noise
             albumentations.OneOf([
                 albumentations.GaussNoise((0, 250), p=1.0),
                 albumentations.MultiplicativeNoise(p=1.0),
             ], p=2/3),
             # common
             albumentations.Normalize(TRAIN_MEAN, TRAIN_STD),
             GridMask((3, 7), rotate=15, p=0.75),
             ToTensorV2(),
         ])
     elif mode == 'valid':
         return albumentations.Compose([
             albumentations.Normalize(TRAIN_MEAN, TRAIN_STD),
             ToTensorV2(),
         ])
     else:
         raise ValueError('mode is %s' % mode)
Example #4
0
def get_transform(is_train):
    if is_train:
        return albumentations.Compose(
        [   
            albumentations.Resize(224,224),
            albumentations.OneOf([
                albumentations.JpegCompression(quality_lower=20, quality_upper=70, p=0.5),
                albumentations.Downscale(scale_min=0.25, scale_max=0.50, interpolation=1, p=0.5),
            ], p=0.6),
            albumentations.HorizontalFlip(p=0.5),
            albumentations.VerticalFlip(p=0.5),
#             albumentations.ShiftScaleRotate(shift_limit=0.0625, scale_limit=0.1, rotate_limit=45),
            albumentations.GaussNoise(p=0.2),
            albumentations.RandomBrightnessContrast(0.3,0.3, p=0.7),
            albumentations.RandomGamma(p=0.2),    
            albumentations.CLAHE(p=0.2),
            albumentations.ChannelShuffle(p=0.2),
            albumentations.MultiplicativeNoise(multiplier=[0.5, 1.5], elementwise=True, p=0.3),
            albumentations.HueSaturationValue(hue_shift_limit=10, sat_shift_limit=10, val_shift_limit=10, p=0.7),     
            albumentations.Normalize(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225), max_pixel_value=255.0)
        ])
    else:
        return albumentations.Compose(
        [
            albumentations.Resize(224,224),
            albumentations.Normalize(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225), max_pixel_value=255.0)
        ])
Example #5
0
    def __init__(self, outputs=6):
        super().__init__()
        self.net = models.resnet34(True)
        self.linear = Linear(1000, outputs)

        df = pd.read_csv("/home/dipet/kaggle/prostate/input/prostate-cancer-grade-assessment/train.csv")
        self.train_df, self.valid_df = train_test_split(df, test_size=0.2)
        self.data_dir = "/datasets/panda/train_64_100"

        self.train_transforms = A.Compose(
            [
                A.Compose(
                    [
                        A.OneOf([A.GaussNoise(), A.MultiplicativeNoise(elementwise=True)]),
                        A.RandomBrightnessContrast(0.02, 0.02),
                        A.HueSaturationValue(0, 10, 10),
                        A.Flip(),
                        A.RandomGridShuffle(grid=(10, 10)),
                        A.GridDistortion(),
                        A.Rotate()
                    ],
                    p=0.5,
                ),
                A.ToFloat(),
            ]
        )
        self.valid_transforms = A.Compose([A.ToFloat()])
    def augment_img(self, img, mask, mat, idx_item=None):
        """ img already in float32 BGR format, not uint8
        """

        # horizontal flip
        p_flip = np.random.uniform()  # in [0,1)
        if p_flip > 1:  # 0.33:
            uv_cx = np.array([1686.2379, 0])
            IMG_SHAPE = (2710, 3384, 3)  # img.shape = h,w,c
            uv_cx_new = self.convert_uv_to_uv_preprocessed(uv_cx, IMG_SHAPE)
            cx_mat = uv_cx_new[0]
            cx_img = cx_mat * self.factor_downsample
            img_flipped = scripts.flip_image_hor.flip_hor_at_u(img, cx_img)
            mask_flipped = scripts.flip_image_hor.flip_hor_at_u(mask, cx_img)
            mat_flipped = scripts.flip_image_hor.flip_hor_at_u(mat, cx_mat)
            mat_flipped[:, :, 4] *= -1  # x
            mat_flipped[:, :, 2] *= -1  # sin(yaw)
            mat_flipped[:, :, 3] *= -1  # roll
        else:
            img_flipped = img
            mask_flipped = mask
            mat_flipped = mat

        # grayish - change HSV values
        p_sat = np.random.uniform()  # in [0,1)
        if p_sat > 1:
            img_desat = reduce_saturation(img_flipped, sat_shift_range=(-0.15, 0))
        else:
            img_desat = img_flipped

        # gamma change
        aug_gamma = albumentations.RandomGamma(gamma_limit=(85, 115),
                                               p=0.33,
                                               )

        # gaussian noise
        aug_noise = albumentations.MultiplicativeNoise(multiplier=(0.90, 1.00),
                                                       elementwise=True,
                                                       per_channel=True,
                                                       p=0.33,
                                                       )

        # apply all augmentations to image
        aug_tot = albumentations.Compose([aug_gamma, aug_noise], p=1)
        img_augmented = aug_tot(image=img_desat)['image']

        # for debugging purposes
        if False:
            fig, ax = plt.subplots(3, 2, figsize=(9, 6))
            ax[0][0].imshow(img[:, :, ::-1])
            ax[0][1].imshow(img_augmented[:, :, ::-1])
            ax[1][0].imshow(mat[:, :, 0])
            ax[1][1].imshow(mat_flipped[:, :, 0])
            ax[2][0].imshow(mat[:, :, 4])  # x
            ax[2][1].imshow(mat_flipped[:, :, 4])
            # fig.tight_layout()
            plt.show()
            fig.savefig('plots_aug/{:05d}.png'.format(idx_item))

        return img_augmented, mask_flipped, mat_flipped
Example #7
0
 def __init__(self):
     self.aug = A.MultiplicativeNoise((0, 1),
                                      per_channel=True,
                                      elementwise=True,
                                      p=1)
     self.imgaug_transform = iaa.MultiplyElementwise(mul=(0, 1),
                                                     per_channel=True)
Example #8
0
def r_ss_roate_m_blur(p=1.0):
    return albumentations.Compose([
        albumentations.ShiftScaleRotate(p=p),
        albumentations.MedianBlur(p=p),
        albumentations.MultiplicativeNoise(p=p)
    ],
                                  p=p)
Example #9
0
 def __init__(self):
     self.aug = A.Compose([
         A.IAAAdditiveGaussianNoise(scale=(1., 5.), p=0.8),
         A.GaussNoise(var_limit=(10.0, 300.0), p=0.8),
         A.MultiplicativeNoise(p=0.8),
     ],
                          p=1)
     pass
Example #10
0
def pet_augmentation():
    transform_list = [
        albu.Resize(320, 320),
        albu.HorizontalFlip(p=0.5),
        albu.MultiplicativeNoise(p=0.7, multiplier=(0.8, 1.2), elementwise=True),
        albu.GaussianBlur(p=0.5, blur_limit=3)
    ]
    return albu.Compose(transform_list)
def pet_augmentation():
    transform_list = [
        albu.Resize(320, 320),
        albu.HorizontalFlip(p=0.5),
        albu.GaussianBlur(blur_limit=7, p=0.3, always_apply=False),
        albu.MultiplicativeNoise(p=0.7,
                                 multiplier=(0.85, 0.9),
                                 elementwise=True),
    ]
    return albu.Compose(transform_list)
def get_train_transforms(p=1.0):
    return A.Compose(
        [
            A.OneOf(
                [
                    A.CenterCrop(2 * IMG_SIZE // 3, 2 * IMG_SIZE // 3, p=0.5),
                    A.CenterCrop(3 * IMG_SIZE // 4, 3 * IMG_SIZE // 4, p=0.5),
                ],
                p=0.33,
            ),
            A.Resize(
                IMG_SIZE, IMG_SIZE, interpolation=1, always_apply=True, p=1),
            A.Flip(),
            A.Transpose(),
            GridMask(num_grid=(1, 4), rotate=15, p=0.33),
            A.OneOf(
                [
                    A.MedianBlur(blur_limit=3, p=0.5),
                    A.Blur(blur_limit=3, p=0.5),
                ],
                p=0.5,
            ),
            A.OneOf(
                [
                    A.ShiftScaleRotate(
                        interpolation=1,
                        shift_limit=0.05,
                        scale_limit=0.1,
                        rotate_limit=15,
                        p=0.5,
                    ),
                    A.IAAPiecewiseAffine(scale=(0.02, 0.04), p=0.5),
                ],
                p=0.33,
            ),
            A.OneOf(
                [
                    A.HueSaturationValue(
                        hue_shift_limit=20,
                        sat_shift_limit=30,
                        val_shift_limit=20,
                        p=0.5,
                    ),
                    A.RandomBrightnessContrast(p=0.5),
                ],
                p=0.5,
            ),
            A.MultiplicativeNoise(
                multiplier=[0.9, 1.1], elementwise=True, p=0.3),
            A.Normalize(mean, std, max_pixel_value=255.0, always_apply=True),
            ToTensorV2(p=1.0),
        ],
        p=p,
    )
Example #13
0
def augment(img_size, mean=[0.5] * 3, std=[0.5] * 3):
    aug_seq1 = A.OneOf([
        A.HorizontalFlip(p=1.0),
        A.VerticalFlip(p=1.0),
        A.Transpose(p=1.0),
    ],
                       p=1.0)
    aug_seq2 = A.OneOf(
        [
            # A.RandomGamma(always_apply=False, p=1.0, gamma_limit=(80, 120), eps=1e-07),
            A.RandomBrightnessContrast(always_apply=False,
                                       p=1.0,
                                       brightness_limit=(-0.2, 0.2),
                                       contrast_limit=(-0.2, 0.2),
                                       brightness_by_max=True),
        ],
        p=1.0)
    aug_seq3 = A.OneOf([
        A.RGBShift(always_apply=False,
                   p=1.0,
                   r_shift_limit=(-10, 10),
                   g_shift_limit=(-10, 10),
                   b_shift_limit=(-10, 10)),
        A.HueSaturationValue(always_apply=False,
                             p=1.0,
                             hue_shift_limit=(-4, 4),
                             sat_shift_limit=(-30, 30),
                             val_shift_limit=(-20, 20)),
    ],
                       p=1.0)
    aug_seq4 = A.OneOf([
        A.MultiplicativeNoise(
            always_apply=False,
            p=1.0,
            multiplier=(0.8999999761581421, 1.100000023841858),
            per_channel=True,
            elementwise=True),
        A.MotionBlur(always_apply=False, p=1.0, blur_limit=(3, 7)),
        A.GaussNoise(always_apply=False, p=1.0, var_limit=(10.0, 50.0)),
        A.Blur(always_apply=False, p=1.0, blur_limit=(3, 7)),
    ],
                       p=1.0)
    aug_seq = A.Compose([
        A.Resize(img_size, img_size),
        aug_seq1,
        aug_seq2,
        aug_seq3,
        aug_seq4,
        A.Normalize(mean=mean, std=std),
        # A.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]),
    ])
    return aug_seq
Example #14
0
 def get_transforms(stage: str = None, mode: str = None):
     if mode == 'train':
         return albumentations.Compose([
             # transformations
             albumentations.ShiftScaleRotate(
                 scale_limit=0.2,
                 rotate_limit=25,
                 border_mode=cv2.BORDER_CONSTANT,
                 value=0,
                 p=1.0),
             # distortion
             albumentations.OneOf([
                 albumentations.OpticalDistortion(1.2, p=1.0),
                 albumentations.GridDistortion(
                     8,
                     0.06,
                     border_mode=cv2.BORDER_CONSTANT,
                     value=0,
                     p=1.0),
                 albumentations.ElasticTransform(
                     sigma=10,
                     alpha=1,
                     alpha_affine=10,
                     border_mode=cv2.BORDER_CONSTANT,
                     value=0,
                     p=1.0),
             ],
                                  p=3 / 4),
             # custom
             MorphologyGradient(binarize=True, p=1.0),
             # add noise
             albumentations.OneOf([
                 albumentations.GaussNoise((0, 150), p=1.0),
                 albumentations.MultiplicativeNoise(p=1.0),
             ],
                                  p=2 / 3),
             # common
             albumentations.Normalize(TRAIN_MEAN, TRAIN_STD),
             GridMask(5, rotate=45, p=0.9),
             ToTensorV2(),
         ])
     elif mode == 'valid':
         return albumentations.Compose([
             MorphologyGradient(binarize=True, p=1.0),
             albumentations.Normalize(TRAIN_MEAN, TRAIN_STD),
             ToTensorV2(),
         ])
     else:
         raise ValueError('mode is %s' % mode)
def hard_color_augmentations():
    return A.Compose([
        A.RandomBrightnessContrast(brightness_limit=0.3,
                                   contrast_limit=0.3,
                                   brightness_by_max=True),
        A.RandomGamma(gamma_limit=(90, 110)),
        A.OneOf(
            [A.NoOp(),
             A.MultiplicativeNoise(),
             A.GaussNoise(),
             A.ISONoise()]),
        A.OneOf([A.RGBShift(), A.HueSaturationValue(),
                 A.NoOp()]),
        A.RandomFog(fog_coef_lower=0.05, fog_coef_upper=0.3),
    ])
Example #16
0
def get_augmentation(save_path=None, load_path=None):
        if load_path:
            return A.load(load_path)
        else:
            aug_seq1 = A.OneOf([
                A.Rotate(limit=(-90, 90), p=1.0),
                A.Flip(p=1.0),
                A.OpticalDistortion(always_apply=False, p=1.0, distort_limit=(-0.3, 0.3), 
                                    shift_limit=(-0.05, 0.05), interpolation=3, 
                                    border_mode=3, value=(0, 0, 0), mask_value=None),
            ], p=1.0)
            aug_seq2 = A.OneOf([
                # A.ChannelDropout(always_apply=False, p=1.0, channel_drop_range=(1, 1), fill_value=0),
                A.RGBShift(r_shift_limit=15, g_shift_limit=15,
                           b_shift_limit=15, p=1.0),
                A.RandomBrightnessContrast(always_apply=False, p=1.0, brightness_limit=(
                    -0.2, 0.2), contrast_limit=(-0.2, 0.2), brightness_by_max=True)
            ], p=1.0)
            aug_seq3 = A.OneOf([
                A.GaussNoise(always_apply=False, p=1.0, var_limit=(10, 50)),
                A.ISONoise(always_apply=False, p=1.0, intensity=(
                    0.1, 1.0), color_shift=(0.01, 0.3)),
                A.MultiplicativeNoise(always_apply=False, p=1.0, multiplier=(
                    0.8, 1.6), per_channel=True, elementwise=True),
            ], p=1.0)
            aug_seq4 = A.OneOf([
                A.Equalize(always_apply=False, p=1.0,
                           mode='pil', by_channels=True),
                A.InvertImg(always_apply=False, p=1.0),
                A.MotionBlur(always_apply=False, p=1.0, blur_limit=(3, 7)),
                A.RandomFog(always_apply=False, p=1.0, 
                            fog_coef_lower=0.01, fog_coef_upper=0.2, alpha_coef=0.2)
            ], p=1.0)
            aug_seq = A.Compose([
                # A.Resize(self.img_size, self.img_size),
                # aug_seq1,
                aug_seq2,
                aug_seq3,
                aug_seq4,
                # A.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
                # A.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]),
            ])
            # aug_path = '/home/jitesh/prj/classification/test/bolt/aug/aug_seq.json'
            if save_path:
                A.save(aug_seq, save_path)
            # loaded_transform = A.load(aug_path)
            return aug_seq
Example #17
0
def main():
  size = (432,432)
  color_dic = {1:[255,255,255]}

  img_paths = [p.replace('\\', '/') for p in glob('dataset/train/img_aug/**', recursive=True) if os.path.isfile(p)]
  mask_paths = list(map(lambda x: x.replace('/img_aug/', '/mask_aug/'), img_paths))

  batch_size = 16
  splits = math.ceil(len(img_paths)/batch_size)

  empty = []

  # albumentation
  # https://qiita.com/kurilab/items/b69e1be8d0224ae139ad
  transforms = albu.OneOf([
                  albu.ShiftScaleRotate(shift_limit=0.2, scale_limit=0.2, rotate_limit=90),
                  albu.GaussNoise(),
                  albu.ISONoise(intensity=(0.7,0.9)),
                  albu.Downscale(),
                  albu.ElasticTransform(),
                  albu.GaussianBlur(),
                  albu.MultiplicativeNoise(multiplier=(2.0,3.0)),
                  ])

  for i in range(splits):
    tfrecord_fname = '_record_' + str(i) + '.tfrecord'
    save_path = os.path.join('dataset', tfrecord_fname)

    # tfrecordのファイルは(画像データ数 / バッチサイズ)分作成する
    with tf.io.TFRecordWriter(tfrecord_fname) as writer:
      for img_d, mask_d in zip(img_paths[i::splits], mask_paths[i::splits]):
        # 画像変形
        img = cv2.imread(img_d)
        mask = cv2.imread(mask_d)
        #augmented = transforms(image=img, mask=mask)
        #img, mask = augmented['image'], augmented['mask']
        img = cv2.resize(img, (size[0], size[1]), cv2.INTER_NEAREST)
        mask = cv2.resize(mask, (size[0], size[1]), cv2.INTER_NEAREST)
        # byte列に変換
        img = np2byte(img)
        mask = np2byte(mask)
        #img = np2byte(np.float32(img/127.5 - 1))
        #mask = np2byte(convert_mask(mask, color_dic))
        # シリアライズして書き出し
        proto = serialize_example(img, mask)
        writer.write(proto.SerializeToString())
    if i>2 : break
Example #18
0
def dataset_transforms():
    p = 1. / 3.
    transforms = A.Compose([
        A.RandomBrightnessContrast(p=p),
        A.OneOf([
            A.MotionBlur(blur_limit=3),
            A.MedianBlur(blur_limit=3),
            A.Blur(blur_limit=3),
        ],
                p=p),
        A.OneOf([
            A.MultiplicativeNoise(),
            A.GaussNoise(),
        ], p=p),
        ToTensorV2()
    ])

    return transforms
def hard_transforms(crop_size=512):
    return albu.Compose([
        albu.ShiftScaleRotate(shift_limit=0,
                              scale_limit=0.1,
                              rotate_limit=180,
                              border_mode=cv2.BORDER_CONSTANT,
                              value=0,
                              mask_value=0),
        albu.CropNonEmptyMaskIfExists(crop_size, crop_size, p=1),
        albu.RandomShadow(shadow_roi=(0, 0, 1, 1),
                          num_shadows_lower=1,
                          num_shadows_upper=4,
                          shadow_dimension=7,
                          always_apply=False,
                          p=0.5),
        albu.HueSaturationValue(p=0.3),
        albu.OneOf([
            albu.IAAAdditiveGaussianNoise(),
            albu.GaussNoise(),
            albu.MultiplicativeNoise(
                multiplier=[0.5, 1.5], per_channel=True, p=1)
        ],
                   p=0.3),
        albu.OneOf([
            albu.MotionBlur(p=0.2),
            albu.MedianBlur(blur_limit=3, p=0.1),
            albu.Blur(blur_limit=3, p=0.1),
        ],
                   p=0.2),
        albu.OneOf([
            albu.CLAHE(clip_limit=2),
            albu.IAASharpen(),
            albu.IAAEmboss(),
            albu.RandomBrightnessContrast(
                brightness_limit=0.2, contrast_limit=0.2, p=0.3),
            albu.RandomGamma(gamma_limit=(85, 115), p=0.3),
        ],
                   p=0.3),
        albu.JpegCompression(quality_lower=40, quality_upper=100, p=0.5),
        albu.Cutout(
            num_holes=25, max_h_size=5, max_w_size=5, fill_value=0, p=0.3),
    ],
                        p=1)
Example #20
0
def get_train_transforms_v2():
    return A.Compose(
        [
            A.Resize(config.SIZE, config.SIZE),
            A.OneOf(
                [
                    A.GaussNoise(var_limit=1.15),
                    A.MultiplicativeNoise(multiplier=1.1),
                ],
                p=0.2,
            ),
            A.RandomBrightnessContrast(
                contrast_limit=0.12, brightness_limit=0.12, p=0.2
            ),
            A.OpticalDistortion(distort_limit=0.07, shift_limit=0.07, p=0.25),
            A.GaussianBlur(p=0.15),
            A.RandomGridShuffle(grid=(4, 4), p=0.2),
            ToTensorV2(),
        ]
    )
def even_more_transform(height, width, mappings, p=2 / 3):
    scale = random.randint(2, 4)
    return Compose([
        OneOf([
            JpegCompression(quality_lower=20, quality_upper=70, p=0.5),
            Downscale(scale_min=0.25, scale_max=0.50, interpolation=1, p=0.5),
            Resize(height // scale, width // scale, interpolation=1, p=1.0)
        ],
              p=0.6),
        HorizontalFlip(p=0.5),
        A.augmentations.transforms.GaussNoise(p=0.2),
        A.RandomBrightnessContrast(p=0.2),
        A.RandomGamma(p=0.2),
        A.CLAHE(p=0.2),
        A.ChannelShuffle(p=0.2),
        A.MultiplicativeNoise(multiplier=[0.5, 1.5], elementwise=True, p=0.1),
        A.HueSaturationValue(
            hue_shift_limit=10, sat_shift_limit=10, val_shift_limit=10, p=0.2),
    ],
                   p=0.9,
                   additional_targets=mappings)
Example #22
0
def get_image_augmentation():
    """ Augmentations just for input and output images (not for masks) """
    image_transform = [
        albu.OneOf([
          albu.Blur(p=0.2, blur_limit=(3, 5)),
          albu.GaussNoise(p=0.2, var_limit=(10.0, 50.0)),
          albu.ISONoise(p=0.2, intensity=(0.1, 0.5), color_shift=(0.01, 0.05)),
          albu.ImageCompression(p=0.2, quality_lower=90, quality_upper=100, compression_type=0),
          albu.MultiplicativeNoise(p=0.2, multiplier=(0.9, 1.1), per_channel=True, elementwise=True),
        ], p=1),
        albu.OneOf([
          albu.HueSaturationValue(p=0.2, hue_shift_limit=(-10, 10), sat_shift_limit=(-10, 10), val_shift_limit=(-10, 10)),
          albu.RandomBrightness(p=0.3, limit=(-0.1, 0.1)),
          albu.RandomGamma(p=0.3, gamma_limit=(80, 100), eps=1e-07),
          albu.ToGray(p=0.1),
          albu.ToSepia(p=0.1),
        ], p=1)
    ]
    return albu.Compose(image_transform, additional_targets={
        'image1': 'image',
        'image2': 'image'
    })
Example #23
0
    def __init__(self, folds, img_height, img_width, mean, std):
        df = pd.read_csv('../input/train_folds.csv')
        df = df[[
            'image_id', 'grapheme_root', 'vowel_diacritic',
            'consonant_diacritic', 'kfold'
        ]]

        df = df[df.kfold.isin(folds)].reset_index(drop=True)

        self.image_ids = df.image_id.values

        self.grapheme_root = df.grapheme_root.values
        self.vowel_diacritic = df.vowel_diacritic.values
        self.consonant_diacritic = df.consonant_diacritic

        if len(folds) == 1:
            self.aug = albumentations.Compose([
                albumentations.Resize(img_height, img_width),
                albumentations.Normalize(mean, std, always_apply=True)
            ])
        else:
            self.aug = albumentations.Compose([
                albumentations.Resize(img_height, img_width),
                albumentations.ShiftScaleRotate(shift_limit=0.0625,
                                                scale_limit=0.1,
                                                rotate_limit=5,
                                                p=0.9),
                albumentations.Rotate(limit=5),
                albumentations.RandomContrast(limit=0.2),
                albumentations.GaussianBlur(blur_limit=7),
                albumentations.RandomGamma(),
                albumentations.RandomShadow(),
                albumentations.GaussNoise(),
                albumentations.ChannelShuffle(),
                #albumentations.Cutout(),
                albumentations.Equalize(),
                albumentations.MultiplicativeNoise(),
                albumentations.Normalize(mean, std, always_apply=True)
            ])
Example #24
0
 def get_transforms(stage: str = None, mode: str = None):
     if mode == 'train':
         return albumentations.Compose([
             # blur
             albumentations.Blur((1, 4), p=1.0),
             # transformations
             albumentations.ShiftScaleRotate(
                 scale_limit=0.2,
                 rotate_limit=25,
                 border_mode=cv2.BORDER_CONSTANT,
                 value=0,
                 p=1.0),
             # cut and drop
             albumentations.Cutout(num_holes=10,
                                   max_h_size=SIZE // 6,
                                   max_w_size=SIZE // 6,
                                   p=1.0),
             # distortion
             albumentations.OpticalDistortion(0.6, p=1.0),
             albumentations.GridDistortion(8,
                                           0.06,
                                           border_mode=cv2.BORDER_CONSTANT,
                                           value=0,
                                           p=1.0),
             # add noise
             albumentations.GaussNoise((0, 250), p=1.0),
             albumentations.MultiplicativeNoise(p=1.0),
             albumentations.Normalize(TRAIN_MEAN, TRAIN_STD),
             ToTensorV2(),
         ])
     elif mode == 'valid':
         return albumentations.Compose([
             albumentations.Normalize(TRAIN_MEAN, TRAIN_STD),
             ToTensorV2(),
         ])
     else:
         raise ValueError('mode is %s' % mode)
Example #25
0
def hard_transforms():
    return albu.Compose([
        albu.Rotate(limit=30,
                    interpolation=cv2.INTER_LINEAR,
                    border_mode=cv2.BORDER_CONSTANT,
                    value=(0, 0, 0)),
        albu.RandomSizedBBoxSafeCrop(width=64, height=64, erosion_rate=0.2),
        albu.InvertImg(p=0.3),
        albu.HueSaturationValue(p=0.3),
        albu.OneOf([
            albu.IAAAdditiveGaussianNoise(),
            albu.GaussNoise(),
            albu.MultiplicativeNoise(
                multiplier=[0.5, 1.5], per_channel=True, p=1)
        ],
                   p=0.3),
        albu.OneOf([
            albu.MotionBlur(p=0.2),
            albu.MedianBlur(blur_limit=3, p=0.1),
            albu.Blur(blur_limit=3, p=0.1),
        ],
                   p=0.2),
        albu.OneOf([
            albu.CLAHE(clip_limit=2),
            albu.IAASharpen(),
            albu.IAAEmboss(),
            albu.RandomBrightnessContrast(
                brightness_limit=0.2, contrast_limit=0.2, p=0.3),
            albu.RandomGamma(gamma_limit=(85, 115), p=0.3),
        ],
                   p=0.3),
        albu.JpegCompression(quality_lower=30, quality_upper=100, p=0.5),
        albu.Cutout(
            num_holes=10, max_h_size=5, max_w_size=5, fill_value=0, p=0.5),
    ],
                        p=1,
                        bbox_params=albu.BboxParams(format='pascal_voc'))
Example #26
0
def training_augmentation():
    train_transform = [
        A.IAAAdditiveGaussianNoise(p=1.5),
        A.MultiplicativeNoise(multiplier=1.5, p=1),

        A.OneOf(
            [
                A.CLAHE(p=1),
                A.RandomBrightness(p=1),
                A.RandomGamma(p=1),
            ],
            p=0.5,
        ),

        A.OneOf(
            [
                A.IAASharpen(p=1.5),
                A.Blur(blur_limit=3, p=1),
            ],
            p=0.5,
        ),
        A.HorizontalFlip(p=0.7)
    ]
    return A.Compose(train_transform)
Example #27
0
    A.RGBShift(always_apply=False,
               p=1.0,
               r_shift_limit=(-10, 10),
               g_shift_limit=(-10, 10),
               b_shift_limit=(-10, 10)),
    A.HueSaturationValue(always_apply=False,
                         p=1.0,
                         hue_shift_limit=(-4, 4),
                         sat_shift_limit=(-30, 30),
                         val_shift_limit=(-20, 20)),
],
                   p=1.0)
aug_seq4 = A.OneOf([
    A.MultiplicativeNoise(always_apply=False,
                          p=1.0,
                          multiplier=(0.8999999761581421, 1.100000023841858),
                          per_channel=True,
                          elementwise=True),
    A.MotionBlur(always_apply=False, p=1.0, blur_limit=(3, 7)),
    A.GaussNoise(always_apply=False, p=1.0, var_limit=(10.0, 50.0)),
    A.Blur(always_apply=False, p=1.0, blur_limit=(3, 7)),
],
                   p=1.0)
aug_seq = A.Compose([
    A.Resize(img_size, img_size),
    # aug_seq1,
    # aug_seq2,
    # aug_seq3,
    # aug_seq4,
    A.Normalize(mean=mean, std=std),
    # A.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]),
    def __call__(self, example):
        if self.train:
            x, y = example
        else:
            x = example

        # augmix
        if _evaluate_ratio(self.augmix_ratio):
            x = cv2.cvtColor(x, cv2.COLOR_GRAY2RGB)
            x = apply_aug(
                RandomAugMix(severity=3,
                             width=3,
                             depth=-1,
                             alpha=1.,
                             always_apply=False,
                             p=1), x)
            x = cv2.cvtColor(x.astype(np.uint8), cv2.COLOR_RGB2GRAY)

        # --- Train/Test common preprocessing ---
        if self.crop:
            x = crop_char_image(x, threshold=self.threshold)
        if self.size is not None:
            x = resize(x, size=self.size) / 255
        if self.sigma > 0.:
            x = add_gaussian_noise(x, sigma=self.sigma)

        # --- Augmentation ---
        if self.affine:
            x = affine_image(x)

        # albumentations...
        x = x.astype(np.float32)
        assert x.ndim == 2

        if _evaluate_ratio(self.blur_ratio):
            r = np.random.uniform()
            if r < 0.25:
                x = apply_aug(A.Blur(p=1.0), x)
            elif r < 0.5:
                x = apply_aug(A.MedianBlur(blur_limit=5, p=1.0), x)
            elif r < 0.75:
                x = apply_aug(A.GaussianBlur(p=1.0), x)
            else:
                x = apply_aug(A.MotionBlur(p=1.0), x)

        if _evaluate_ratio(self.noise_ratio):
            r = np.random.uniform()
            if r < 0.50:
                x = apply_aug(A.GaussNoise(var_limit=5. / 255., p=1.0), x)
            else:
                x = apply_aug(A.MultiplicativeNoise(p=1.0), x)

        if _evaluate_ratio(self.cutout_ratio):
            # A.Cutout(num_holes=2,  max_h_size=2, max_w_size=2, p=1.0)  # Deprecated...
            x = apply_aug(
                A.CoarseDropout(max_holes=10, max_height=8, max_width=8,
                                p=1.0), x)

        if _evaluate_ratio(self.grid_distortion_ratio):
            x = apply_aug(A.GridDistortion(p=1.0), x)

        if _evaluate_ratio(self.elastic_distortion_ratio):
            x = apply_aug(
                A.ElasticTransform(sigma=50, alpha=1, alpha_affine=10, p=1.0),
                x)

        if _evaluate_ratio(self.random_brightness_ratio):
            # A.RandomBrightness(p=1.0)  # Deprecated...
            # A.RandomContrast(p=1.0)    # Deprecated...
            x = apply_aug(A.RandomBrightnessContrast(p=1.0), x)

        if _evaluate_ratio(self.piece_affine_ratio):
            x = apply_aug(A.IAAPiecewiseAffine(p=1.0), x)

        if _evaluate_ratio(self.ssr_ratio):
            x = apply_aug(
                A.ShiftScaleRotate(shift_limit=0.0625,
                                   scale_limit=0.1,
                                   rotate_limit=30,
                                   p=1.0), x)

        if self.normalize:
            x = (x.astype(np.float32) - 0.0692) / 0.2051
        if x.ndim == 2:
            x = x[None, :, :]
        x = x.astype(np.float32)
        if self.train:
            y = y.astype(np.int64)
            return x, y
        else:
            return x
Example #29
0
 def get_transforms(stage: str = None, mode: str = None):
     if mode == 'train':
         return albumentations.Compose([
             # transformations
             albumentations.ShiftScaleRotate(
                 scale_limit=0.2,
                 rotate_limit=10,
                 border_mode=cv2.BORDER_CONSTANT,
                 value=0,
                 p=1.0),
             # cut and drop
             albumentations.OneOf([
                 albumentations.Cutout(num_holes=10,
                                       max_h_size=SIZE // 10,
                                       max_w_size=SIZE // 10,
                                       p=1.0),
                 albumentations.CoarseDropout(max_holes=10,
                                              max_height=SIZE // 10,
                                              max_width=SIZE // 10,
                                              p=1.0),
                 GridMask((3, 5), rotate=45, p=1.0),
             ],
                                  p=0.9),
             # blur
             albumentations.OneOf([
                 albumentations.Blur((1, 4), p=1.0),
                 albumentations.GaussianBlur(3, p=1.0),
                 albumentations.MedianBlur(blur_limit=5, p=1.0),
             ],
                                  p=0.5),
             # distortion
             albumentations.OneOf([
                 albumentations.OpticalDistortion(
                     0.6, border_mode=cv2.BORDER_CONSTANT, p=1.0),
                 albumentations.GridDistortion(
                     8,
                     0.06,
                     border_mode=cv2.BORDER_CONSTANT,
                     value=0,
                     p=1.0),
                 albumentations.ElasticTransform(
                     sigma=10,
                     alpha=1,
                     alpha_affine=10,
                     border_mode=cv2.BORDER_CONSTANT,
                     value=0,
                     p=1.0),
             ],
                                  p=0.9),
             # add noise
             albumentations.OneOf([
                 albumentations.GaussNoise((0, 50), p=1.0),
                 albumentations.MultiplicativeNoise(p=1.0),
             ],
                                  p=0.8),
             # common
             albumentations.Normalize(TRAIN_MEAN, TRAIN_STD),
             ToTensorV2(),
         ])
     elif mode == 'valid':
         return albumentations.Compose([
             albumentations.Normalize(TRAIN_MEAN, TRAIN_STD),
             ToTensorV2(),
         ])
     else:
         raise ValueError('mode is %s' % mode)
Example #30
0
def get_transform_imagenet(use_albu_aug):
    if use_albu_aug:
        train_transform = al.Compose([
            # al.Flip(p=0.5),
            al.Resize(256, 256, interpolation=2),
            al.RandomResizedCrop(224,
                                 224,
                                 scale=(0.08, 1.0),
                                 ratio=(3. / 4., 4. / 3.),
                                 interpolation=2),
            al.HorizontalFlip(),
            al.OneOf(
                [
                    al.OneOf(
                        [
                            al.ShiftScaleRotate(
                                border_mode=cv2.BORDER_CONSTANT,
                                rotate_limit=30),  # , p=0.05),
                            al.OpticalDistortion(
                                border_mode=cv2.BORDER_CONSTANT,
                                distort_limit=5.0,
                                shift_limit=0.1),
                            # , p=0.05),
                            al.GridDistortion(border_mode=cv2.BORDER_CONSTANT
                                              ),  # , p=0.05),
                            al.ElasticTransform(
                                border_mode=cv2.BORDER_CONSTANT,
                                alpha_affine=15),  # , p=0.05),
                        ],
                        p=0.1),
                    al.OneOf(
                        [
                            al.RandomGamma(),  # p=0.05),
                            al.HueSaturationValue(),  # p=0.05),
                            al.RGBShift(),  # p=0.05),
                            al.CLAHE(),  # p=0.05),
                            al.ChannelShuffle(),  # p=0.05),
                            al.InvertImg(),  # p=0.05),
                        ],
                        p=0.1),
                    al.OneOf(
                        [
                            al.RandomSnow(),  # p=0.05),
                            al.RandomRain(),  # p=0.05),
                            al.RandomFog(),  # p=0.05),
                            al.RandomSunFlare(num_flare_circles_lower=1,
                                              num_flare_circles_upper=2,
                                              src_radius=110),
                            # p=0.05, ),
                            al.RandomShadow(),  # p=0.05),
                        ],
                        p=0.1),
                    al.RandomBrightnessContrast(p=0.1),
                    al.OneOf(
                        [
                            al.GaussNoise(),  # p=0.05),
                            al.ISONoise(),  # p=0.05),
                            al.MultiplicativeNoise(),  # p=0.05),
                        ],
                        p=0.1),
                    al.OneOf(
                        [
                            al.ToGray(),  # p=0.05),
                            al.ToSepia(),  # p=0.05),
                            al.Solarize(),  # p=0.05),
                            al.Equalize(),  # p=0.05),
                            al.Posterize(),  # p=0.05),
                            al.FancyPCA(),  # p=0.05),
                        ],
                        p=0.1),
                    al.OneOf(
                        [
                            # al.MotionBlur(blur_limit=1),
                            al.Blur(blur_limit=[3, 5]),
                            al.MedianBlur(blur_limit=[3, 5]),
                            al.GaussianBlur(blur_limit=[3, 5]),
                        ],
                        p=0.1),
                    al.OneOf(
                        [
                            al.CoarseDropout(),  # p=0.05),
                            al.Cutout(),  # p=0.05),
                            al.GridDropout(),  # p=0.05),
                            al.ChannelDropout(),  # p=0.05),
                            al.RandomGridShuffle(),  # p=0.05),
                        ],
                        p=0.1),
                    al.OneOf(
                        [
                            al.Downscale(),  # p=0.1),
                            al.ImageCompression(quality_lower=60),  # , p=0.1),
                        ],
                        p=0.1),
                ],
                p=0.5),
            al.Normalize(),
            ToTensorV2()
        ])
    else:
        train_transform = transforms.Compose([
            transforms.Resize(256),
            transforms.RandomResizedCrop(224),
            transforms.RandomHorizontalFlip(),
            transforms.ToTensor(),
        ])
    test_transform = transforms.Compose([
        transforms.Resize(256),
        transforms.CenterCrop(224),
        transforms.ToTensor(),
    ])

    if use_albu_aug:
        train_transform = MultiDataTransformAlbu(train_transform)
    else:
        train_transform = MultiDataTransform(train_transform)

    return train_transform, test_transform