def strong_aug(p=0.5): return Compose( [ ShiftScaleRotate(shift_limit=0.0125, scale_limit=0.03, rotate_limit=0.5, p=0.8), RandomGamma(gamma_limit=(80, 120)), RandomBrightnessContrast(), OneOf( [ MotionBlur(p=0.6), MedianBlur(blur_limit=3, p=0.5), MedianBlur(blur_limit=(5, 7), p=0.3), Blur(blur_limit=3, p=0.5), Blur(blur_limit=(5, 7), p=0.3), ], p=0.6, ), RandomCrop(image.shape[0] - 10, image.shape[1] - 6, p=0.6), OneOf( [ OpticalDistortion(), ], p=0.8, ), ], p=p, )
def augment_flips_gray(p=.5): ''' usage: au = augment_flips() res = au(image=image, mask=make) res_img, res_mask = res['image'], res['mask'] ''' return Compose( [ # CLAHE(), OneOf([ RandomRotate90(), ShiftScaleRotate(border_mode=cv2.BORDER_CONSTANT, value=0) ], p=0.35), RandomGamma(gamma_limit=(75, 140)), Blur(blur_limit=3), # HueSaturationValue(), OneOf([ MotionBlur(p=.2), MedianBlur(blur_limit=3, p=0.1), Blur(blur_limit=3, p=0.1), ], p=0.2), OneOf([ IAAAdditiveGaussianNoise(), GaussNoise(), ], p=0.2), ], p=p)
def strong_aug2(p=1.0): return Compose( [ Flip(p=0.75), # ok # RandomBrightnessContrast(brightness_limit=0.2, contrast_limit=0.2, p=0.5), RandomBrightnessContrast(brightness_limit=(-0.2, 0.2), contrast_limit=0.2, p=1.0, brightness_by_max=False), ShiftScaleRotate(shift_limit=0.1, scale_limit=0.2, rotate_limit=45, border_mode=cv2.BORDER_CONSTANT, p=0.2), OneOf([ IAASharpen(p=1), Blur(blur_limit=5, p=1.0), MedianBlur(blur_limit=5, p=1.0), MotionBlur(p=1.0), ], p=0.6), OneOf([ HueSaturationValue(hue_shift_limit=10, sat_shift_limit=30, val_shift_limit=20, p=1.0), RGBShift(p=1.0), RandomGamma(p=1), ], p=0.3), IAAAdditiveGaussianNoise(p=.2), ], p=p)
def aug_train(resolution, p=1): return Compose([Resize(resolution, resolution), OneOf([ HorizontalFlip(), VerticalFlip(), RandomRotate90(), Transpose()], p=0.5), OneOf([ IAAAdditiveGaussianNoise(), GaussNoise(), ], p=0.5), OneOf([ MotionBlur(p=.2), MedianBlur(blur_limit=3, p=0.1), Blur(blur_limit=3, p=0.1), ], p=0.5), ShiftScaleRotate(shift_limit=0.0625, scale_limit=0.2, rotate_limit=45, p=0.2), OneOf([ OpticalDistortion(p=0.3), GridDistortion(p=.1), IAAPiecewiseAffine(p=0.3), ], p=0.5), OneOf([ CLAHE(clip_limit=2), IAASharpen(), IAAEmboss(), RandomBrightnessContrast(), ], p=0.5), HueSaturationValue(p=0.3), Normalize() ], p=p)
def blend_original(img): img = img.copy() h, w = img.shape[:2] rect = detector(img) if len(rect) == 0: return img else: rect = rect[0] sp = predictor(img, rect) landmarks = np.array([[p.x, p.y] for p in sp.parts()]) outline = landmarks[[*range(17), *range(26, 16, -1)]] Y, X = skimage.draw.polygon(outline[:, 1], outline[:, 0]) raw_mask = np.zeros(img.shape[:2], dtype=np.uint8) raw_mask[Y, X] = 1 face = img * np.expand_dims(raw_mask, -1) # add warping h1 = random.randint(h - h // 2, h + h // 2) w1 = random.randint(w - w // 2, w + w // 2) while abs(h1 - h) < h // 3 and abs(w1 - w) < w // 3: h1 = random.randint(h - h // 2, h + h // 2) w1 = random.randint(w - w // 2, w + w // 2) face = cv2.resize(face, (w1, h1), interpolation=random.choice([cv2.INTER_LINEAR, cv2.INTER_AREA, cv2.INTER_CUBIC])) face = cv2.resize(face, (w, h), interpolation=random.choice([cv2.INTER_LINEAR, cv2.INTER_AREA, cv2.INTER_CUBIC])) raw_mask = binary_erosion(raw_mask, iterations=random.randint(4, 10)) img[raw_mask, :] = face[raw_mask, :] if random.random() < 0.2: img = OneOf([GaussianBlur(), Blur()], p=0.5)(image=img)["image"] # image compression if random.random() < 0.5: img = ImageCompression(quality_lower=40, quality_upper=95)(image=img)["image"] return img
def strong_aug(p=.5): return Compose([ HorizontalFlip(), OneOf([ IAAAdditiveGaussianNoise(), GaussNoise(), ], p=0.4), OneOf([ MotionBlur(p=.2), MedianBlur(blur_limit=3, p=.1), Blur(blur_limit=3, p=.1), ], p=0.3), OneOf([ OpticalDistortion(p=0.3), GridDistortion(p=.1), IAAPiecewiseAffine(p=0.3), ], p=0.2), OneOf([ CLAHE(clip_limit=2), IAASharpen(), RandomContrast(), RandomBrightness(), ], p=0.3), HueSaturationValue(p=0.3), ChannelShuffle(), Cutout(num_holes=20, max_h_size=16, max_w_size=16) ], p=p)
def strong_aug(p=0.5): return Compose([ RandomRotate90(), Flip(), Transpose(), OneOf([ IAAAdditiveGaussianNoise(), GaussNoise(), ], p=0.2), OneOf([ MotionBlur(p=0.2), MedianBlur(blur_limit=3, p=0.1), Blur(blur_limit=3, p=0.1), ], p=0.2), ShiftScaleRotate(shift_limit=0.0625, scale_limit=0.2, rotate_limit=45, p=0.2), OneOf([ OpticalDistortion(p=0.3), GridDistortion(p=0.1), IAAPiecewiseAffine(p=0.3), ], p=0.2), OneOf([ CLAHE(clip_limit=2), IAASharpen(), IAAEmboss(), RandomBrightnessContrast(), ], p=0.3), HueSaturationValue(p=0.3), ], p=p)
def data_augmentation(self, original_image): """ 进行样本和掩膜的随机增强 Args: original_image: 原始图片 Return: image_aug: 增强后的图片 """ augmentations = Compose([ HorizontalFlip(p=0.4), ShiftScaleRotate(shift_limit=0.07, rotate_limit=0, p=0.4), RGBShift(r_shift_limit=10, g_shift_limit=10, b_shift_limit=10, p=0.3), # 亮度、对比度 RandomGamma(gamma_limit=(80, 120), p=0.1), RandomBrightnessContrast(p=0.1), # 模糊 OneOf([ MotionBlur(p=0.1), MedianBlur(blur_limit=3, p=0.1), Blur(blur_limit=3, p=0.1), ], p=0.3), OneOf([ IAAAdditiveGaussianNoise(), GaussNoise(), ], p=0.2) ]) augmented = augmentations(image=original_image) image_aug = augmented['image'] return image_aug
def medium_aug(p=1.0): return Compose( [ HorizontalFlip(p=0.5), ShiftScaleRotate(p=0.75, shift_limit=0.1, scale_limit=0.2, rotate_limit=45, border_mode=cv2.BORDER_CONSTANT), RandomBrightnessContrast( brightness_limit=0.6, contrast_limit=0.6, p=0.5), OneOf([ HueSaturationValue(p=1.0), RGBShift(p=1.0), ChannelShuffle(p=1.0) ], p=0.5), OneOf([ Blur(p=1.0), MedianBlur(p=1.0), MotionBlur(p=1.0), ], p=0.3), OneOf([GridDistortion(p=1.0), ElasticTransform(p=1.0)], p=0.3), OneOf([ CLAHE(p=1.0), IAASharpen(p=1.0), ], p=0.3), IAAAdditiveGaussianNoise(p=0.5) # ToGray(p=1.0), ], p=p)
def __getitem__(self, idx): file = self.files[idx] file_path = os.path.join(os.path.join(PATH, self.mode + '_images'), file) image = cv2.imread(file_path) mean = (0.485, 0.456, 0.406) std = (0.229, 0.224, 0.225) train_aug = Compose([ OneOf([ VerticalFlip(), HorizontalFlip(), ], p=0.5), OneOf([ MotionBlur(p=0.4), MedianBlur(p=0.4, blur_limit=3), Blur(p=0.5, blur_limit=3) ], p=0.4), OneOf([IAAAdditiveGaussianNoise(), GaussNoise()], p=0.4), Normalize(mean=mean, std=std, p=1), # CLAHE(p=0.5), ToTensor() ]) augmented = train_aug(image=image) image = augmented['image'] label = np.array(self.labels[idx]) label = torch.tensor(label, dtype=torch.float32) return (image, label)
def gettraintransforms(self, mean, std, p=1): # Train Phase transformations albumentations_transform = Compose([ # RandomRotate90(), PadIfNeeded(72, 72, border_mode=cv2.BORDER_REFLECT, always_apply=True), RandomCrop(64, 64, True), Flip(), GaussNoise(p=0.8, mean=mean), OneOf([ MotionBlur(p=0.4), MedianBlur(blur_limit=3, p=0.2), Blur(blur_limit=3, p=0.2), ], p=0.4), ShiftScaleRotate(shift_limit=0.0625, scale_limit=0.2, rotate_limit=45, p=0.6), OneOf([ OpticalDistortion(p=0.8), GridDistortion(p=0.4), ], p=0.6), HueSaturationValue(hue_shift_limit=20, sat_shift_limit=0.1, val_shift_limit=0.1, p=0.6), CoarseDropout(always_apply=True, max_holes=1, min_holes=1, max_height=16, max_width=16, fill_value=(255 * .6), min_height=16, min_width=16), Normalize(mean=mean, std=std, always_apply=True), pytorch.ToTensorV2(always_apply=True), ], p=p) return albumentations_transform;
def get_transforms(): return Compose([ RandomRotate90(p=0.5), Flip(p=0.5), Transpose(p=0.5), OneOf([ IAAAdditiveGaussianNoise(), GaussNoise(), ], p=0.2), OneOf([ MotionBlur(p=.2), MedianBlur(blur_limit=3, p=0.1), Blur(blur_limit=3, p=0.1), ], p=0.2), ShiftScaleRotate(shift_limit=0.0625, scale_limit=0.2, rotate_limit=45, p=0.2), OneOf([ CLAHE(clip_limit=2), IAASharpen(), IAAEmboss(), RandomBrightnessContrast(), ], p=0.3), HueSaturationValue(p=0.3), ])
def __call__(self, original_image): self.augmentation_pipeline = Compose( [ Resize(650, 650, always_apply=True), HorizontalFlip(p=0.5), VerticalFlip(p=0.5), ShiftScaleRotate(rotate_limit=25.0, p=0.7), OneOf([IAAEmboss(p=1), IAASharpen(p=1), Blur(p=1)], p=0.5), IAAPiecewiseAffine(p=0.5), Resize(self.height, self.width, always_apply=True), Normalize( mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225], always_apply=True ), ToTensor() ] ) augmented = self.augmentation_pipeline( image=original_image ) image = augmented["image"] return image
def strong_aug(config, aug_prob): return Compose( [ # Resize(config.image_height, config.image_width, always_apply=True), RandomSizedCrop( p=config.random_sized_crop_prob, min_max_height=(int( config.image_height * config.min_max_height), config.image_height), height=config.image_height, width=config.image_width, w2h_ratio=config.image_width / config.image_height), HorizontalFlip(p=config.horizontal_flip_prob), RandomGamma(p=config.random_gamma_prob), RandomContrast(p=config.random_contrast_prob, limit=config.random_contrast_limit), RandomBrightness(p=config.random_brightness_prob, limit=config.random_brightness_limit), OneOf([ MotionBlur(p=config.motion_blur_prob), MedianBlur(blur_limit=config.median_blur_limit, p=config.median_blur_prob), Blur(blur_limit=config.blur_limit, p=config.blur_prob), ], p=config.one_of_blur_prob), CLAHE(clip_limit=config.clahe_limit, p=config.clahe_prob), IAAEmboss(p=config.iaaemboss_prob), HueSaturationValue(p=config.hue_saturation_value_prob, hue_shift_limit=config.hue_shift_limit, sat_shift_limit=config.sat_shift_limit, val_shift_limit=config.val_shift_limit) ], p=aug_prob)
def hard_augmentations2(use_d4=False): d4 = Compose([RandomRotate90(p=1), HorizontalFlip(p=1), VerticalFlip(p=1)]) fliplr = HorizontalFlip() aug = Compose( [ RandomBrightness(p=1), # OneOf([IAAAdditiveGaussianNoise(), , ], p=0.2), # GaussNoise(p=1), IAAAdditiveGaussianNoise(p=1), OneOf([ MotionBlur(p=1), MedianBlur(blur_limit=3, p=1), Blur(blur_limit=3, p=1) ], p=0.5), ShiftScaleRotate( shift_limit=0.0625, scale_limit=0.1, rotate_limit=5, p=1), # OneOf([GridDistortion(p=0.1), IAAPiecewiseAffine(p=0.3)], p=0.2), # GridDistortion(p=1), # IAAPiecewiseAffine(p=1) ], p=1) default = default_transforms() return Compose([d4 if use_d4 else fliplr, aug, default])
def strong_aug(p=1): return Compose([ ToFloat(), RandomRotate90(), Flip(), Transpose(), OneOf([ IAAAdditiveGaussianNoise(), GaussNoise(), ], p=0.2), OneOf([ MotionBlur(p=0.2), MedianBlur(blur_limit=3, p=0.1), Blur(blur_limit=3, p=0.1), ], p=0.2), ShiftScaleRotate( shift_limit=0.0625, scale_limit=0.2, rotate_limit=45, p=0.2), OneOf([ OpticalDistortion(p=0.3), GridDistortion(p=0.1), IAAPiecewiseAffine(p=0.3), ], p=0.2), FromFloat(dtype='uint16', max_value=65535.0) ], p=p)
def get_photometric(self): coeff = int(3 * self.strength) k = max(1, coeff if coeff % 2 else coeff - 1) return Compose([ OneOf( [ # CLAHE(clip_limit=2, p=.4), IAASharpen(p=.5), IAAEmboss(p=.5), ], p=0.2), OneOf([ IAAAdditiveGaussianNoise(p=.3), GaussNoise(p=.7), ], p=.2), OneOf([ MotionBlur(p=.2), MedianBlur(blur_limit=k, p=.3), Blur(blur_limit=k, p=.5), ], p=.2), OneOf([ RandomContrast(), RandomBrightness(), ], p=.2) ])
def get_train_transforms(): augmentations = Compose([ Resize(236,236), Flip(), OneOf([ IAAAdditiveGaussianNoise(p=.5), GaussNoise(p=.4), ], p=0.4), OneOf([ MotionBlur(p=0.6), Blur(blur_limit=3, p=0.2), ], p=0.4), ShiftScaleRotate(shift_limit=0.0725, scale_limit=0.2, rotate_limit=45, p=0.6), OneOf([ OpticalDistortion(p=0.3), GridDistortion(p=0.4), IAAPiecewiseAffine(p=0.2), ], p=0.3), OneOf([ CLAHE(clip_limit=2), IAASharpen(), IAAEmboss(), RandomBrightnessContrast(), ], p=0.25), HueSaturationValue(p=0.3), CenterCrop(224,224), Normalize( mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225] ), ToTensor() ]) return lambda img:augmentations(image=np.array(img))
def strong_aug(p=1): return Compose([ RandomRotate90(), Flip(), Transpose(), OneOf([ IAAAdditiveGaussianNoise(), GaussNoise(), ], p=0.2), OneOf([ MotionBlur(p=0.2), MedianBlur(blur_limit=3, p=0.1), Blur(blur_limit=3, p=0.1), ], p=0.2), OneOf([ CLAHE(clip_limit=2), IAASharpen(), IAAEmboss(), RandomBrightnessContrast(), ], p=0.3), HueSaturationValue(p=0.3), ], p=p)
def __init__(self, root_dir, partition, augment=True): self.root_dir = root_dir self.list_IDs = os.listdir( os.path.join(self.root_dir, 'x_{}'.format(partition))) self.partition = partition self.augment = augment self.augmentator = Compose([ # Non destructive transformations VerticalFlip(p=0.6), HorizontalFlip(p=0.6), RandomRotate90(), Transpose(p=0.6), ShiftScaleRotate(p=0.45, scale_limit=(0.1, 0.3)), # # Non-rigid transformations ElasticTransform(p=0.25, alpha=160, sigma=180 * 0.05, alpha_affine=120 * 0.03), Blur(blur_limit=3, p=0.2), # Color augmentation RandomBrightness(p=0.5), RandomContrast(p=0.5), RandomGamma(p=0.5), CLAHE(p=0.5) ])
def train_pipeline(cache, mask_db, path): image, mask = read_image_and_mask_cached(cache, mask_db, (101, 101), path) args = Compose([ LabelMaskBorder(), HorizontalFlip(p=0.5), OneOf([ ShiftScaleRotate(rotate_limit=15, border_mode=cv2.BORDER_REPLICATE), RandomSizedCrop(min_max_height=(70, 100), height=101, width=101) ], p=0.2), GaussNoise(p=0.2), OneOf([ RandomBrightness(limit=0.4), RandomGamma(), ], p=0.5), OneOf([Blur(), MedianBlur(), MotionBlur()], p=0.2), OneOf([ ElasticTransform(alpha=10, sigma=10, alpha_affine=10), GridDistortion() ], p=0.2), Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]), PadIfNeeded(128, 128, cv2.BORDER_REPLICATE), ChannelsFirst() ])(image=image, mask=mask) return args['image'], args.get('mask')
def strong_aug(p=.5): return Compose([ JpegCompression(p=0.9), HorizontalFlip(p=0.5), OneOf([ IAAAdditiveGaussianNoise(), GaussNoise(), ], p=0.5), OneOf([ MotionBlur(p=.2), MedianBlur(blur_limit=3, p=.1), Blur(blur_limit=3, p=.1), ], p=0.5), ShiftScaleRotate( shift_limit=0.0625, scale_limit=0.2, rotate_limit=15, p=.5), OneOf([ CLAHE(clip_limit=2), IAASharpen(), IAAEmboss(), RandomContrast(), RandomBrightness(), ], p=0.5), HueSaturationValue(p=0.5), ], p=p)
def augmentation_pipeline(self, p=0.5): return Compose( [ HorizontalFlip(p=0.5), OneOf([ IAAAdditiveGaussianNoise(), GaussNoise(), ], p=0.2), OneOf( [ MotionBlur(p=0.2), #MedianBlur(blur_limit=3, p=0.1), Blur(blur_limit=3, p=0.1), ], p=0.1), OneOf([ ShiftScaleRotate(shift_limit=0.0625, scale_limit=0.2, rotate_limit=5, p=0.9), IAAPerspective(scale=(.02, .05)) ], p=0.3) ], p=p)
def get_corrupter(self): distortion_augs = OneOf([OpticalDistortion(p=1), GridDistortion(p=1)], p=1) effects_augs = OneOf([ IAASharpen(p=1), IAAEmboss(p=1), IAAPiecewiseAffine(p=1), IAAPerspective(p=1), CLAHE(p=1) ], p=1) misc_augs = OneOf([ ShiftScaleRotate(p=1), HueSaturationValue(p=1), RandomBrightnessContrast(p=1) ], p=1) blur_augs = OneOf( [Blur(p=1), MotionBlur(p=1), MedianBlur(p=1), GaussNoise(p=1)], p=1) aug = Compose([distortion_augs, effects_augs, misc_augs, blur_augs]) return aug
def alb_transform_train(imsize = 256, p=1): albumentations_transform = Compose([ # RandomCrop(imsize), # RandomRotate90(), Flip(), # Transpose(), OneOf([ IAAAdditiveGaussianNoise(), GaussNoise(), ], p=0.2), OneOf([ MotionBlur(p=.2), MedianBlur(blur_limit=3, p=.1), Blur(blur_limit=3, p=.1), ], p=0.2), ShiftScaleRotate(shift_limit=0.0625, scale_limit=0.2, rotate_limit=45, p=.2), OneOf([ OpticalDistortion(p=0.3), GridDistortion(p=.1), IAAPiecewiseAffine(p=0.3), ], p=0.2), OneOf([ # CLAHE(clip_limit=2), IAASharpen(), IAAEmboss(), RandomContrast(), RandomBrightness(), ], p=0.3), Normalize( mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225] ) ], p=p) return albumentations_transform
def strong_aug(p=1): return Compose([ OneOf([ RandomRotate90(p=1), Flip(p=1), ], p=1), ShiftScaleRotate(shift_limit=0.1, scale_limit=0.1, rotate_limit=45, p=1, value=0, border_mode=2), OneOf([ IAAAdditiveGaussianNoise(p=0.7), GaussNoise(p=0.7), ], p=1), OneOf([ MotionBlur(p=0.7), MedianBlur(blur_limit=3, p=0.7), Blur(blur_limit=3, p=0.7), ], p=1), RandomBrightnessContrast(p=0.5), OneOf([ CLAHE(clip_limit=2), IAASharpen(), IAAEmboss(), RandomBrightnessContrast(p=0.7), ], p=1) ], p=p)
def transform(config, image, mask): try: p = config["train"]["dap"]["p"] except: p = 1 assert 0 <= p <= 1 # Inspire by: https://albumentations.readthedocs.io/en/latest/examples.html return Compose([ Flip(), Transpose(), OneOf([IAAAdditiveGaussianNoise(), GaussNoise()], p=0.2), OneOf([ MotionBlur(p=0.2), MedianBlur(blur_limit=3, p=0.1), Blur(blur_limit=3, p=0.1) ], p=0.2), ShiftScaleRotate(shift_limit=0.0625, scale_limit=0.2, rotate_limit=45, p=0.2), OneOf([IAASharpen(), IAAEmboss(), RandomBrightnessContrast()], p=0.3), HueSaturationValue(p=0.3), ])(image=image, mask=mask, p=p)
def generate_color_augmentation(aug_cfg: CfgNode) -> Union[Compose, None]: """ generate color augmentation object :param aug_cfg: augmentation config :return color_aug: color augmentation object """ color_aug_list = [] if aug_cfg.BRIGHTNESS_CONTRAST_PROB > 0: color_aug_list.append( RandomBrightnessContrast(p=aug_cfg.BRIGHTNESS_CONTRAST_PROB)) if aug_cfg.BLURRING_PROB > 0: blurring = OneOf([ MotionBlur(aug_cfg.BLUR_LIMIT, p=1), MedianBlur(aug_cfg.BLUR_LIMIT, p=1), Blur(aug_cfg.BLUR_LIMIT, p=1), ], p=aug_cfg.BLURRING_PROB) color_aug_list.append(blurring) if aug_cfg.GAUSS_NOISE_PROB > 0: color_aug_list.append(GaussNoise(p=aug_cfg.GAUSS_NOISE_PROB)) if aug_cfg.GRID_MASK_PROB > 0: color_aug_list.append( GridMask(num_grid=(3, 7), p=aug_cfg.GRID_MASK_PROB)) if len(color_aug_list) > 0: color_aug = Compose(color_aug_list, p=1) return color_aug else: return None
def strong_aug(p=.5): return Compose([ HorizontalFlip(p=0.5), ToGray(p=0.1), OneOf([ IAAAdditiveGaussianNoise(), GaussNoise(), ], p=0.4), OneOf([ MotionBlur(p=.2), MedianBlur(blur_limit=3, p=.1), Blur(blur_limit=3, p=.1), ], p=0.2), OneOf([ OpticalDistortion(p=0.3), GridDistortion(p=.1), IAAPiecewiseAffine(p=0.3), ], p=0.2), OneOf([ CLAHE(clip_limit=2), IAASharpen(), RandomContrast(), RandomBrightness(), ], p=0.3), HueSaturationValue(p=0.3), ], p=p)
def gettraintransforms(self, mean, std, p=1): # Train Phase transformations albumentations_transform = Compose([ RandomRotate90(), Flip(), GaussNoise(p=0.6, mean=mean), OneOf([ MotionBlur(p=0.2), MedianBlur(blur_limit=3, p=0.1), Blur(blur_limit=3, p=0.1), ], p=0.2), ShiftScaleRotate( shift_limit=0.0625, scale_limit=0.2, rotate_limit=45, p=0.3), OneOf([ OpticalDistortion(p=0.4), GridDistortion(p=0.2), ], p=0.3), HueSaturationValue(hue_shift_limit=20, sat_shift_limit=0.1, val_shift_limit=0.1, p=0.3), Cutout(always_apply=True, num_holes=2, max_h_size=10, max_w_size=10, fill_value=(255 * .6)), Normalize(mean=mean, std=std, always_apply=True), pytorch.ToTensorV2(always_apply=True), ], p=p) return albumentations_transform