def transform_v3(config): train_transforms = Compose([ ImageCompression(quality_lower=60, quality_upper=100, p=0.5), GaussNoise(p=1), GaussianBlur(blur_limit=3, p=1), HorizontalFlip(), Resize(config.image_size, config.image_size), OneOf([RandomBrightnessContrast(), FancyPCA(), HueSaturationValue()], p=1), ShiftScaleRotate(shift_limit=0.1, scale_limit=0.2, rotate_limit=10, border_mode=cv2.BORDER_CONSTANT, p=1), ToTensor() ]) test_transforms = Compose([ GaussNoise(p=1), GaussianBlur(blur_limit=3, p=1), Resize(config.image_size, config.image_size), OneOf([RandomBrightnessContrast(), FancyPCA(), HueSaturationValue()], p=1), ToTensor() ]) return train_transforms, test_transforms
def strong_aug(p=1): return Compose([ OneOf([ RandomRotate90(p=1), Flip(p=1), ], p=1), ShiftScaleRotate(shift_limit=0.1, scale_limit=0.1, rotate_limit=45, p=1, value=0, border_mode=2), OneOf([ IAAAdditiveGaussianNoise(p=0.7), GaussNoise(p=0.7), ], p=1), OneOf([ MotionBlur(p=0.7), MedianBlur(blur_limit=3, p=0.7), Blur(blur_limit=3, p=0.7), ], p=1), RandomBrightnessContrast(p=0.5), OneOf([ CLAHE(clip_limit=2), IAASharpen(), IAAEmboss(), RandomBrightnessContrast(p=0.7), ], p=1) ], p=p)
def get_transforms(phase, mean, std): list_transforms = [] if phase == "train": if crop_image_size is not None: list_transforms.extend( [CropNonEmptyMaskIfExists(crop_image_size[0], crop_image_size[1], p=0.85), HorizontalFlip(p=0.5), VerticalFlip(p=0.5), RandomBrightnessContrast(p=0.1, brightness_limit=0.1, contrast_limit=0.1) ]) else: list_transforms.extend( [HorizontalFlip(p=0.5), VerticalFlip(p=0.5), RandomBrightnessContrast(p=0.1, brightness_limit=0.1, contrast_limit=0.1) ] ) list_transforms.extend( [ Normalize(mean=mean, std=std, p=1), ToTensor() ] ) list_trfms = Compose(list_transforms) return list_trfms
def __init__(self, brightness_limit=0.1, contrast_limit=0.1, brightness_by_max=True, always_apply=False, p=0.5, p_asym=0.2): StereoTransformAsym.__init__(self, always_apply, p, p_asym) RandomBrightnessContrast.__init__(self, brightness_limit, contrast_limit, brightness_by_max, always_apply, p)
def train_multi_augment12(image, bboxes=None, category_id=None): h, w = image.shape[0], image.shape[1] if bboxes is not None: aug = Compose( [ HorizontalFlip(p=0.5), ShiftScaleRotate( shift_limit=0.05, scale_limit=0.05, rotate_limit=5, border_mode=cv2.BORDER_REPLICATE, p=1, ), RandomSizedCrop(min_max_height=(int(h * 0.9), h), height=h, width=w, p=0.25), RandomBrightnessContrast( brightness_limit=0.0, contrast_limit=0.3, p=0.25), ], p=1, bbox_params=BboxParams(format="pascal_voc", label_fields=["category_id"]), ) augmented = aug(image=image, bboxes=bboxes, category_id=category_id) else: # Normal aug = Compose( [ HorizontalFlip(p=0.5), ShiftScaleRotate( shift_limit=0.05, scale_limit=0.05, rotate_limit=5, border_mode=cv2.BORDER_REPLICATE, p=1, ), RandomSizedCrop(min_max_height=(int(h * 0.9), h), height=h, width=w, p=0.25), RandomBrightnessContrast( brightness_limit=0.3, contrast_limit=0.3, p=0.25), ], p=1, ) augmented = aug(image=image) return augmented
def transform(config, image, mask): try: p = config["train"]["dap"]["p"] except: p = 1 assert 0 <= p <= 1 # Inspire by: https://albumentations.readthedocs.io/en/latest/examples.html return Compose([ Flip(), Transpose(), OneOf([IAAAdditiveGaussianNoise(), GaussNoise()], p=0.2), OneOf([ MotionBlur(p=0.2), MedianBlur(blur_limit=3, p=0.1), Blur(blur_limit=3, p=0.1) ], p=0.2), ShiftScaleRotate(shift_limit=0.0625, scale_limit=0.2, rotate_limit=45, p=0.2), OneOf([IAASharpen(), IAAEmboss(), RandomBrightnessContrast()], p=0.3), HueSaturationValue(p=0.3), ])(image=image, mask=mask, p=p)
def get_transforms(*, data): if data == 'train': return Compose([ RandomResizedCrop(CFG.size, CFG.size), Transpose(p=0.5), HorizontalFlip(p=0.5), VerticalFlip(p=0.5), ShiftScaleRotate(p=0.5), HueSaturationValue(hue_shift_limit=0.2, sat_shift_limit=0.2, val_shift_limit=0.2, p=0.5), RandomBrightnessContrast(brightness_limit=(-0.1, 0.1), contrast_limit=(-0.1, 0.1), p=0.5), Normalize( mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225], ), ToTensorV2(), ]) elif data == 'valid': return Compose([ Resize(CFG.size, CFG.size), Normalize( mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225], ), ToTensorV2(), ])
def data_augmentation(self, original_image): """ 进行样本和掩膜的随机增强 Args: original_image: 原始图片 Return: image_aug: 增强后的图片 """ augmentations = Compose([ HorizontalFlip(p=0.4), ShiftScaleRotate(shift_limit=0.07, rotate_limit=0, p=0.4), RGBShift(r_shift_limit=10, g_shift_limit=10, b_shift_limit=10, p=0.3), # 亮度、对比度 RandomGamma(gamma_limit=(80, 120), p=0.1), RandomBrightnessContrast(p=0.1), # 模糊 OneOf([ MotionBlur(p=0.1), MedianBlur(blur_limit=3, p=0.1), Blur(blur_limit=3, p=0.1), ], p=0.3), OneOf([ IAAAdditiveGaussianNoise(), GaussNoise(), ], p=0.2) ]) augmented = augmentations(image=original_image) image_aug = augmented['image'] return image_aug
def strong_aug(p=1): return Compose([ RandomRotate90(), Flip(), Transpose(), OneOf([ IAAAdditiveGaussianNoise(), GaussNoise(), ], p=0.2), OneOf([ MotionBlur(p=0.2), MedianBlur(blur_limit=3, p=0.1), Blur(blur_limit=3, p=0.1), ], p=0.2), OneOf([ CLAHE(clip_limit=2), IAASharpen(), IAAEmboss(), RandomBrightnessContrast(), ], p=0.3), HueSaturationValue(p=0.3), ], p=p)
def __getitem__(self, index): path = os.path.join(self.data_dir, self.filename[index] + self.suffix) label = None if self.ids is None else self.ids[index] img = Image.open(path) img = img.convert("RGB") if self.transform is not None: img = self.transform(img) else: if self.augumentation: img = np.asarray(img) task = [ HorizontalFlip(p=0.5), VerticalFlip(p=0.5), RandomGamma(), RandomBrightnessContrast(p=0.5), PadIfNeeded(self.img_size, self.img_size), ShiftScaleRotate(scale_limit=0.5, p=0.5) ] aug = Compose(task) aug_data = aug(image=img) img = aug_data["image"] #img = cv2.resize(img,(self.img_size,self.img_size)) img = Image.fromarray(img) img = self.transform(img) if label is not None: return img, label else: return img
def __getitem__(self, index): img_path = self.image_paths[index] mask_path = self.label_paths[index] if img_path.endswith(".npy"): img = np.load(img_path) else: img = cv2.imread(img_path) if mask_path.endswith(".npy"): mask = np.load(mask_path) else: mask = cv2.imread(mask_path, 0) if self.augmentation: task = [ HorizontalFlip(p=0.5), VerticalFlip(p=0.5), RandomGamma(), RandomBrightnessContrast(p=0.5), PadIfNeeded(self.img_size, self.img_size), ShiftScaleRotate(scale_limit=0.5, p=0.5), #Normalize(mean=[0.210, 0.210, 0.210], std=[0.196, 0.196, 0.196], always_apply=True) ] aug = Compose(task) aug_data = aug(image=img, mask=mask) img, mask = aug_data["image"], aug_data["mask"] img = self._normalize(img) img = cv2.resize(img, (self.img_size, self.img_size)) mask = cv2.resize(mask, (self.img_size, self.img_size)) mask = mask // 255.0 if img.ndim < 3: img = np.expand_dims(img, 0) else: img = np.transpose(img, axes=[2, 0, 1]) return torch.from_numpy(img), torch.from_numpy(mask)
def hard_aug(original_height=128, original_width=128, k=4): aug = Compose([ OneOf([ RandomSizedCrop( min_max_height=(original_height // k, original_height), height=original_height, width=original_width, p=0.5), PadIfNeeded( min_height=original_height, min_width=original_width, p=0.5) ], p=1), VerticalFlip(p=0.5), HorizontalFlip(p=0.5), RandomRotate90(p=0.5), Transpose(p=0.5), OneOf([ ElasticTransform( p=0.5, alpha=120, sigma=120 * 0.05, alpha_affine=120 * 0.03), GridDistortion(p=0.5), OpticalDistortion(p=1, distort_limit=2, shift_limit=0.5) ], p=0.8), CLAHE(p=0.8), RandomBrightnessContrast(p=0.8), RandomGamma(p=0.8) ]) return aug
def seg_transforms(phase, resize=(512, 512), mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225)): """ Get segmentation albumentation tansforms Args: phase: train or valid resize: input image shape into model Returns: albu compose transforms Raises: IOError: An error occurred accessing ablumentation object. """ assert (phase in ['train', 'valid', 'test']) transforms_list = [] if phase == 'train': transforms_list.extend([ # Rotate VerticalFlip(p=0.5), HorizontalFlip(p=0.5), ShiftScaleRotate(rotate_limit=20, border_mode=0, p=0.2), RandomBrightnessContrast(brightness_limit=0.1, contrast_limit=0.1, p=0.2), Resize(resize[0] + 64, resize[1] + 64, interpolation=Image.BILINEAR), Normalize(mean=mean, std=std, p=1), RandomCrop(resize[0], resize[1]), ToTensor(), ]) else: transforms_list.extend([ Resize(resize[0], resize[1], interpolation=Image.BILINEAR), Normalize(mean=mean, std=std, p=1), ToTensor(), ]) transforms = Compose(transforms_list) return transforms
def just_brightness_flip(p=1.0): return Compose( [ Flip(p=0.75), # ok # RandomBrightnessContrast(brightness_limit=0.2, contrast_limit=0.2, p=0.5), RandomBrightnessContrast(brightness_limit=0.2, contrast_limit=0.2, p=1.0, brightness_by_max=False), # RandomBrightnessContrast(brightness_limit=0.2, contrast_limit=0.2, p=0.5), # ShiftScaleRotate(shift_limit=0.1, scale_limit=0.1, rotate_limit=45, border_mode=cv2.BORDER_CONSTANT, p=0.2), # OneOf([ # Blur(blur_limit=5, p=1.0), # MedianBlur(blur_limit=5, p=1.0), # MotionBlur(p=1.0), # ], p=0.2), # OneOf([ # HueSaturationValue(hue_shift_limit=10, sat_shift_limit=30, val_shift_limit=20, p=1.0), # RGBShift(p=1.0) # ], p=0.1), # GaussNoise(p=0.1), # OneOf([ # GridDistortion(p=1.0), # ElasticTransform(p=1.0) # ], p=0.2), # OneOf([ # CLAHE(p=1.0), # IAASharpen(p=1.0), # ], p=0.2) ], p=p)
def strong_aug2(p=1.0): return Compose( [ Flip(p=0.75), # ok # RandomBrightnessContrast(brightness_limit=0.2, contrast_limit=0.2, p=0.5), RandomBrightnessContrast(brightness_limit=(-0.2, 0.2), contrast_limit=0.2, p=1.0, brightness_by_max=False), ShiftScaleRotate(shift_limit=0.1, scale_limit=0.2, rotate_limit=45, border_mode=cv2.BORDER_CONSTANT, p=0.2), OneOf([ IAASharpen(p=1), Blur(blur_limit=5, p=1.0), MedianBlur(blur_limit=5, p=1.0), MotionBlur(p=1.0), ], p=0.6), OneOf([ HueSaturationValue(hue_shift_limit=10, sat_shift_limit=30, val_shift_limit=20, p=1.0), RGBShift(p=1.0), RandomGamma(p=1), ], p=0.3), IAAAdditiveGaussianNoise(p=.2), ], p=p)
def strong_aug(p=0.5): return Compose( [ ShiftScaleRotate(shift_limit=0.0125, scale_limit=0.03, rotate_limit=0.5, p=0.8), RandomGamma(gamma_limit=(80, 120)), RandomBrightnessContrast(), OneOf( [ MotionBlur(p=0.6), MedianBlur(blur_limit=3, p=0.5), MedianBlur(blur_limit=(5, 7), p=0.3), Blur(blur_limit=3, p=0.5), Blur(blur_limit=(5, 7), p=0.3), ], p=0.6, ), RandomCrop(image.shape[0] - 10, image.shape[1] - 6, p=0.6), OneOf( [ OpticalDistortion(), ], p=0.8, ), ], p=p, )
def init_augmentations(self): common = [ HorizontalFlip(), Rotate(limit=10), RandomBrightnessContrast(), ToGray(p=0.05) ] random_crop_aug = [ RandomResizedCrop(height=self.params.input_height, width=self.params.input_width, scale=(0.35, 1.0)) ] random_crop_aug.extend(common) simple_resize_aug = [ Resize(height=self.params.input_height, width=self.params.input_width) ] simple_resize_aug.extend(common) crop = self.get_aug(random_crop_aug, min_visibility=0.5) resize = self.get_aug(simple_resize_aug, min_visibility=0.5) just_resize = self.get_aug([ Resize(height=self.params.input_height, width=self.params.input_width) ]) self.crop_aug = crop self.resize_aug = resize self.just_resize = just_resize
def hard_transform(image_size: int = 256, p: float = 0.5): """Hard augmentations""" transforms = Compose([ ShiftScaleRotate( shift_limit=0.1, scale_limit=0.1, rotate_limit=15, border_mode=cv2.BORDER_REFLECT, p=p, ), IAAPerspective(scale=(0.02, 0.05), p=p), OneOf([ HueSaturationValue(p=p), ToGray(p=p), RGBShift(p=p), ChannelShuffle(p=p), ]), RandomBrightnessContrast( brightness_limit=0.5, contrast_limit=0.5, p=p ), RandomGamma(p=p), CLAHE(p=p), JpegCompression(quality_lower=50, p=p), ]) return transforms
def get_transforms1(*, data, CFG): if data == 'train': return Compose([ #RandomCrop(512,512,p=1), HorizontalFlip(p=CFG.augmentation.augmix_p), VerticalFlip(p=CFG.augmentation.augmix_p), RandomContrast(p=CFG.augmentation.contrast_p), #GaussNoise(p=0.5), RandomRotate90(p=CFG.augmentation.rotate_90_p), #RandomGamma(p=0.5), RandomBrightnessContrast(p=CFG.augmentation.bright_contrast_p), RandomAugMix(severity=CFG.augmentation.augmix_s, width=3, alpha=1., p=CFG.augmentation.augmix_p), #GaussianBlur(p=0.5), GridMask(num_grid=CFG.augmentation.grdimask_n, p=CFG.augmentation.grdimask_p), Cutout(p=CFG.augmentation.cutout_p, max_h_size=CFG.augmentation.cutout_h, max_w_size=CFG.augmentation.cutout_w), Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) ]) elif data == 'valid': return Compose([ Normalize( mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225], ) ])
def strong_aug(p=0.8): """Find all the description of each function:https://github.com/albu/albumentations Probabilities: p1: decides if this augmentation will be applied. The most common case is p1=1 means that we always apply the transformations from above. p1=0 will mean that the transformation block will be ignored. p2: every augmentation has an option to be applied with some probability. p3: decide if OneOf will be applied. In the final run all the p1-p3 probabilities are multiplied. """ return Compose([ ShiftScaleRotate(shift_limit=0.2, scale_limit=0.3, rotate_limit=45, p=0.8, border_mode=cv2.BORDER_CONSTANT), OneOf([ IAAAdditiveGaussianNoise(), GaussNoise(), ], p=0.3), MedianBlur(blur_limit=3, p=0.7), OneOf([ CLAHE(clip_limit=2, p=0.4), IAASharpen(p=0.4), IAAEmboss(p=0.4), RandomBrightnessContrast(p=0.6), HorizontalFlip(p=0.5) ]) ], p=p)
def get_transforms(*, data): if data == 'train': return Compose([ #Resize(CFG.size, CFG.size), RandomResizedCrop(CFG.size, CFG.size, scale=(0.85, 1.0)), HorizontalFlip(p=0.5), RandomBrightnessContrast(p=0.2, brightness_limit=(-0.2, 0.2), contrast_limit=(-0.2, 0.2)), HueSaturationValue(p=0.2, hue_shift_limit=0.2, sat_shift_limit=0.2, val_shift_limit=0.2), ShiftScaleRotate(p=0.2, shift_limit=0.0625, scale_limit=0.2, rotate_limit=20), CoarseDropout(p=0.2), Cutout(p=0.2, max_h_size=16, max_w_size=16, fill_value=(0., 0., 0.), num_holes=16), Normalize( mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225], ), ToTensorV2(), ]) elif data == 'valid': return Compose([ Resize(CFG.size, CFG.size), Normalize( mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225], ), ToTensorV2(), ])
def get_train_transforms(): return Compose( [ #Resize(args.img_size, args.img_size), RandomResizedCrop(args.img_size, args.img_size), Transpose(p=0.5), HorizontalFlip(p=0.5), VerticalFlip(p=0.25), ShiftScaleRotate(p=0.25), HueSaturationValue(hue_shift_limit=0.2, sat_shift_limit=0.2, val_shift_limit=0.2, p=0.25), RandomBrightnessContrast(brightness_limit=(-0.1, 0.1), contrast_limit=(-0.1, 0.1), p=0.5), Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225], max_pixel_value=255.0, p=1.0), CoarseDropout(p=0.5), # Cutout(p=0.5), ToTensorV2(p=1.0), ], p=1.)
def get_transforms(phase): list_transforms = [] if phase == "train": list_transforms.extend( [ OneOf([ RandomSizedCrop(min_max_height=(50, 101), height=original_height, width=original_width, p=0.5), PadIfNeeded(min_height=original_height, min_width=original_width, p=0.5)], p=1), VerticalFlip(p=0.5), # RandomRotate90(p=0.5), OneOf([ ElasticTransform(p=0.5, alpha=120, sigma=120 * 0.05, alpha_affine=120 * 0.03), GridDistortion(p=0.5), OpticalDistortion(p=1, distort_limit=2, shift_limit=0.5) ], p=0.8), CLAHE(p=0.8), RandomBrightnessContrast(p=0.8), RandomGamma(p=0.8), ] ) list_transforms.extend( [ Resize(height=int(original_height/4), width=int(original_width/4), interpolation=cv2.INTER_NEAREST), Normalize(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225), p=1), ToTensor(), ] ) list_trfms = Compose(list_transforms) return list_trfms
def get_corrupter(self): distortion_augs = OneOf([OpticalDistortion(p=1), GridDistortion(p=1)], p=1) effects_augs = OneOf([ IAASharpen(p=1), IAAEmboss(p=1), IAAPiecewiseAffine(p=1), IAAPerspective(p=1), CLAHE(p=1) ], p=1) misc_augs = OneOf([ ShiftScaleRotate(p=1), HueSaturationValue(p=1), RandomBrightnessContrast(p=1) ], p=1) blur_augs = OneOf( [Blur(p=1), MotionBlur(p=1), MedianBlur(p=1), GaussNoise(p=1)], p=1) aug = Compose([distortion_augs, effects_augs, misc_augs, blur_augs]) return aug
def medium_aug(p=1.0): return Compose( [ HorizontalFlip(p=0.5), ShiftScaleRotate(p=0.75, shift_limit=0.1, scale_limit=0.2, rotate_limit=45, border_mode=cv2.BORDER_CONSTANT), RandomBrightnessContrast( brightness_limit=0.6, contrast_limit=0.6, p=0.5), OneOf([ HueSaturationValue(p=1.0), RGBShift(p=1.0), ChannelShuffle(p=1.0) ], p=0.5), OneOf([ Blur(p=1.0), MedianBlur(p=1.0), MotionBlur(p=1.0), ], p=0.3), OneOf([GridDistortion(p=1.0), ElasticTransform(p=1.0)], p=0.3), OneOf([ CLAHE(p=1.0), IAASharpen(p=1.0), ], p=0.3), IAAAdditiveGaussianNoise(p=0.5) # ToGray(p=1.0), ], p=p)
def __init__(self, is_train: bool, to_pytorch: bool, preprocess): if is_train: self._aug = Compose([ preprocess, OneOf([ Compose([ HorizontalFlip(p=0.5), GaussNoise(p=0.5), OneOf([ RandomBrightnessContrast(), RandomGamma(), ], p=0.5), Rotate(limit=20, border_mode=cv2.BORDER_CONSTANT), ImageCompression(), CLAHE(), Downscale(scale_min=0.2, scale_max=0.9, p=0.5), ISONoise(p=0.5), MotionBlur(p=0.5) ]), HorizontalFlip(p=0.5) ]) ], p=1) else: self._aug = preprocess self._need_to_pytorch = to_pytorch
def create_train_transforms(size=300): return Compose([ ImageCompression(quality_lower=60, quality_upper=100, p=0.5), GaussNoise(p=0.1), GaussianBlur(blur_limit=3, p=0.05), HorizontalFlip(), OneOf([ IsotropicResize(max_side=size, interpolation_down=cv2.INTER_AREA, interpolation_up=cv2.INTER_CUBIC), IsotropicResize(max_side=size, interpolation_down=cv2.INTER_AREA, interpolation_up=cv2.INTER_LINEAR), IsotropicResize(max_side=size, interpolation_down=cv2.INTER_LINEAR, interpolation_up=cv2.INTER_LINEAR), ], p=1), PadIfNeeded(min_height=size, min_width=size, border_mode=cv2.BORDER_CONSTANT), OneOf([RandomBrightnessContrast(), FancyPCA(), HueSaturationValue()], p=0.7), ToGray(p=0.2), ShiftScaleRotate(shift_limit=0.1, scale_limit=0.2, rotate_limit=10, border_mode=cv2.BORDER_CONSTANT, p=0.5), ])
def generate_color_augmentation(aug_cfg: CfgNode) -> Union[Compose, None]: """ generate color augmentation object :param aug_cfg: augmentation config :return color_aug: color augmentation object """ color_aug_list = [] if aug_cfg.BRIGHTNESS_CONTRAST_PROB > 0: color_aug_list.append( RandomBrightnessContrast(p=aug_cfg.BRIGHTNESS_CONTRAST_PROB)) if aug_cfg.BLURRING_PROB > 0: blurring = OneOf([ MotionBlur(aug_cfg.BLUR_LIMIT, p=1), MedianBlur(aug_cfg.BLUR_LIMIT, p=1), Blur(aug_cfg.BLUR_LIMIT, p=1), ], p=aug_cfg.BLURRING_PROB) color_aug_list.append(blurring) if aug_cfg.GAUSS_NOISE_PROB > 0: color_aug_list.append(GaussNoise(p=aug_cfg.GAUSS_NOISE_PROB)) if aug_cfg.GRID_MASK_PROB > 0: color_aug_list.append( GridMask(num_grid=(3, 7), p=aug_cfg.GRID_MASK_PROB)) if len(color_aug_list) > 0: color_aug = Compose(color_aug_list, p=1) return color_aug else: return None
def strong_aug(p=0.5): return Compose([ RandomRotate90(), Flip(), Transpose(), OneOf([ IAAAdditiveGaussianNoise(), GaussNoise(), ], p=0.2), OneOf([ MotionBlur(p=0.2), MedianBlur(blur_limit=3, p=0.1), Blur(blur_limit=3, p=0.1), ], p=0.2), ShiftScaleRotate(shift_limit=0.0625, scale_limit=0.2, rotate_limit=45, p=0.2), OneOf([ OpticalDistortion(p=0.3), GridDistortion(p=0.1), IAAPiecewiseAffine(p=0.3), ], p=0.2), OneOf([ CLAHE(clip_limit=2), IAASharpen(), IAAEmboss(), RandomBrightnessContrast(), ], p=0.3), HueSaturationValue(p=0.3), ], p=p)
def _create_aug_compose(p=1, pad=256, target_dim=(256, 256), targets=None): if targets is None: targets = {} return Compose( [ RandomRotate90(p=0.3), # Flip(0.1), # Transpose(p=0.1), ShiftScaleRotate(p=0.8, rotate_limit=0, shift_limit=0.025, scale_limit=0.1, value=0, border_mode=cv2.BORDER_CONSTANT), GridDistortion(p=0.8, value=0, border_mode=cv2.BORDER_CONSTANT), # PadIfNeeded(min_height=pad, min_width=pad, border_mode=cv2.BORDER_CONSTANT, value=0, mask_value=0, p=1), # CenterCrop(height=target_dim[0], width=target_dim[1], p=1), # ToFloat(max_value=100,p=1), # HueSaturationValue(p=1) RandomBrightnessContrast( brightness_limit=0.05, contrast_limit=0.05, always_apply=True) # RandomBrightness(limit=0.1,p=1), # GaussNoise(mean=image.mean(),p=1) # OneOf([ # OpticalDistortion(p=1), # GridDistortion(p=0.1) # ], p=1), ], p=p, additional_targets=targets)