def get_aug(name='default', input_shape=[48, 48, 3]): if name == 'default': augmentations = A.Compose([ A.RandomBrightnessContrast(p=0.4), A.RandomGamma(p=0.4), A.HueSaturationValue(hue_shift_limit=20, sat_shift_limit=30, val_shift_limit=30, p=0.4), A.CLAHE(p=0.4), A.Blur(blur_limit=1, p=0.3), A.GaussNoise(var_limit=(50, 80), p=0.3) ], p=1) elif name == 'plates': augmentations = A.Compose([ A.RandomBrightnessContrast(p=0.4), A.RandomGamma(p=0.4), A.HueSaturationValue(hue_shift_limit=20, sat_shift_limit=30, val_shift_limit=30, p=0.4), A.CLAHE(p=0.4), A.HorizontalFlip(p=0.5), A.VerticalFlip(p=0.5), A.Blur(blur_limit=1, p=0.3), A.GaussNoise(var_limit=(50, 80), p=0.3), A.RandomCrop(p=0.8, height=2*input_shape[1]/3, width=2*input_shape[0]/3) ], p=1) elif name == 'deepfake': augmentations = A.Compose([ A.HorizontalFlip(p=0.5), ], p=1) elif name == 'plates2': augmentations = A.Compose([ A.CLAHE(clip_limit=(1,4),p=0.3), A.HorizontalFlip(p=0.5), A.VerticalFlip(p=0.5), A.RandomBrightness(limit=0.2, p=0.3), A.RandomContrast(limit=0.2, p=0.3), # A.Rotate(limit=360, p=0.9), A.RandomRotate90(p=0.3), A.HueSaturationValue(hue_shift_limit=(-50,50), sat_shift_limit=(-15,15), val_shift_limit=(-15,15), p=0.5), # A.Blur(blur_limit=(5,7), p=0.3), A.GaussNoise(var_limit=(10, 50), p=0.3), A.CenterCrop(p=1, height=2*input_shape[1]//3, width=2*input_shape[0]//3), A.Resize(p=1, height=input_shape[1], width=input_shape[0]) ], p=1) else: augmentations = None return augmentations
def get_transform( *, train: bool, test_size: int, normalize: bool = True, no_cutmix: bool = True, max_height: int, max_width: int, ) -> Callable: if train and no_cutmix: transforms = [ A.Resize(height=test_size, width=test_size), A.CoarseDropout( max_holes=8, max_height=max_height, max_width=max_width, fill_value=255, p=0.7), A.ShiftScaleRotate( shift_limit=0.0625, scale_limit=0.1, rotate_limit=45, p=0.5 ), A.RandomBrightnessContrast(), A.GaussianBlur(), A.GaussNoise(), ] elif train: transforms = [ A.Resize(height=test_size, width=test_size), A.ShiftScaleRotate( shift_limit=0.0625, scale_limit=0.1, rotate_limit=45, p=0.5 ), A.RandomBrightnessContrast(), A.GaussianBlur(), A.GaussNoise(), ] else: transforms = [ A.Resize(height=test_size, width=test_size), ] if normalize: transforms.append(A.Normalize()) transforms.extend([ ToTensorV2(), ]) return A.Compose(transforms)
def strong_aug(p=.5): return A.Compose([ A.RandomRotate90(), A.Flip(), A.Transpose(), A.OneOf([ A.IAAAdditiveGaussianNoise(), A.GaussNoise(), ], p=0.2), A.OneOf([ A.MotionBlur(p=.2), A.MedianBlur(blur_limit=3, p=.1), A.Blur(blur_limit=3, p=.1), ], p=0.2), A.ShiftScaleRotate( shift_limit=0.0625, scale_limit=0.2, rotate_limit=45, p=.2), A.OneOf([ A.OpticalDistortion(p=0.3), A.GridDistortion(p=.1), A.IAAPiecewiseAffine(p=0.3), ], p=0.2), A.OneOf([ A.CLAHE(clip_limit=2), A.IAASharpen(), A.IAAEmboss(), A.RandomContrast(), A.RandomBrightness(), ], p=0.3), A.HueSaturationValue(p=0.3), ], p=p)
def generate_train_transformes(target_size=(50, 50)): transforms = [ A.Resize(height=target_size[0], width=target_size[1]), A.HorizontalFlip(p=0.5), A.VerticalFlip(p=0.25), A.Rotate(limit=(-90, 90), p=1.), A.RandomResizedCrop(height=target_size[0], width=target_size[0], scale=(0.5, 1.), p=0.5), A.OneOf([ A.Blur(p=ColorEffectP), A.GaussNoise(p=ColorEffectP), A.Downscale(p=ColorEffectP), A.RGBShift(p=ColorEffectP, r_shift_limit=20, g_shift_limit=20, b_shift_limit=20), A.RandomGamma(p=ColorEffectP), A.RandomBrightnessContrast(p=ColorEffectP) ], p=0.5), ToTensor() ] return A.Compose(transforms)
def get_individual_transforms(): transforms = A.Compose([ A.OneOf( [ A.Transpose(p=1.0), A.VerticalFlip(p=1.0), A.HorizontalFlip(p=1.0), A.RandomRotate90(p=1.0), A.NoOp(), ], p=1.0, ), A.OneOf( [ A.ElasticTransform(p=1.0), A.GridDistortion(p=1.0), A.OpticalDistortion(p=1.0), A.NoOp(), ], p=1.0, ), A.OneOf( [ A.GaussNoise(p=1.0), A.GaussianBlur(p=1.0), A.ISONoise(p=1.0), A.CoarseDropout( p=1.0, max_holes=16, max_height=16, max_width=16), A.NoOp(), ], p=1.0, ), ]) return transforms
def image_augment(p=.5, cut_size=8): imgaugment = A.Compose( [ A.HorizontalFlip(p=0.3), A.GaussNoise(p=.1), # A.OneOf([ # A.Blur(blur_limit=3, p=.1), # A.GaussNoise(p=.1), # ], p=0.2), A.ShiftScaleRotate(shift_limit=0.0625, scale_limit=0.1, rotate_limit=10, border_mode=cv2.BORDER_CONSTANT, value=(0, 0, 0), p=.3), A.RandomBrightnessContrast(p=0.3), A.HueSaturationValue(hue_shift_limit=20, sat_shift_limit=20, val_shift_limit=20, p=0.1), A.Cutout( num_holes=1, max_h_size=cut_size, max_w_size=cut_size, p=0.3) ], p=p) return imgaugment
def get_train_transform(): crop_height = 256 crop_width = 256 return albu.Compose([ albu.PadIfNeeded(min_height=crop_height, min_width=crop_width, p=1), albu.RandomSizedCrop((int(0.3 * crop_height), 288), crop_height, crop_width, p=1), albu.HorizontalFlip(p=0.5), albu.OneOf([ albu.IAAAdditiveGaussianNoise(p=0.5), albu.GaussNoise(p=0.5), ], p=0.2), albu.OneOf([ albu.MotionBlur(p=0.2), albu.MedianBlur(blur_limit=3, p=0.1), albu.Blur(blur_limit=3, p=0.1), ], p=0.2), albu.ShiftScaleRotate(shift_limit=0.0625, scale_limit=0, rotate_limit=20, p=0.1), albu.OneOf([ albu.OpticalDistortion(p=0.3), albu.GridDistortion(p=0.1), albu.IAAPiecewiseAffine(p=0.3), ], p=0.2), albu.OneOf([ albu.CLAHE(clip_limit=2, p=0.5), albu.IAASharpen(p=0.5), albu.IAAEmboss(p=0.5), albu.RandomBrightnessContrast(p=0.5), ], p=0.3), albu.HueSaturationValue(p=0.3), albu.JpegCompression(p=0.2, quality_lower=20, quality_upper=99), albu.ElasticTransform(p=0.1), albu.Normalize(p=1) ], p=1)
def get_transforms(image_size): transforms_train = albumentations.Compose([ albumentations.Transpose(p=0.5), albumentations.VerticalFlip(p=0.5), albumentations.HorizontalFlip(p=0.5), albumentations.RandomBrightness(limit=0.2, p=0.75), albumentations.RandomContrast(limit=0.2, p=0.75), albumentations.OneOf([ albumentations.MotionBlur(blur_limit=5), albumentations.MedianBlur(blur_limit=5), albumentations.GaussianBlur(blur_limit=5), albumentations.GaussNoise(var_limit=(5.0, 30.0)), ], p=0.7), albumentations.OneOf([ albumentations.OpticalDistortion(distort_limit=1.0), albumentations.GridDistortion(num_steps=5, distort_limit=1.), albumentations.ElasticTransform(alpha=3), ], p=0.7), albumentations.CLAHE(clip_limit=4.0, p=0.7), albumentations.HueSaturationValue(hue_shift_limit=10, sat_shift_limit=20, val_shift_limit=10, p=0.5), albumentations.ShiftScaleRotate(shift_limit=0.1, scale_limit=0.1, rotate_limit=15, border_mode=0, p=0.85), albumentations.Resize(image_size, image_size), albumentations.Cutout(max_h_size=int(image_size * 0.375), max_w_size=int(image_size * 0.375), num_holes=1, p=0.7), albumentations.Normalize() ]) transforms_val = albumentations.Compose([ albumentations.Resize(image_size, image_size), albumentations.Normalize() ]) return transforms_train, transforms_val
def get_transforms(type="albumentations"): if type == "albumentations": train_transforms = albumentations.Compose([ albumentations.Transpose(p=0.5), albumentations.OneOf([ albumentations.VerticalFlip(p=0.5), albumentations.HorizontalFlip(p=0.5), ]), albumentations.OneOf([ albumentations.RandomBrightness(limit=0.2, p=0.75), albumentations.RandomContrast(limit=0.2, p=0.75), ]), albumentations.OneOf([ albumentations.MotionBlur(blur_limit=5), albumentations.MedianBlur(blur_limit=5), albumentations.GaussianBlur(blur_limit=5), albumentations.GaussNoise(var_limit=(5.0, 30.0)), ], p=0.7), albumentations.OneOf([ albumentations.OpticalDistortion(distort_limit=1.0), albumentations.GridDistortion(num_steps=5, distort_limit=1.), albumentations.ElasticTransform(alpha=3), ], p=0.7), # albumentations.OneOf([ # albumentations.CLAHE(clip_limit=4.0, p=0.7), # albumentations.HueSaturationValue(hue_shift_limit=10, sat_shift_limit=20, val_shift_limit=10, p=0.5), # albumentations.ShiftScaleRotate(shift_limit=0.1, scale_limit=0.1, rotate_limit=15, border_mode=0, # p=0.85), # ]), albumentations.Resize(256, 256), # albumentations.Cutout(max_h_size=int(256 * 0.375), max_w_size=int(256 * 0.375), num_holes=1, p=0.7), # albumentations.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) ]) test_transforms = albumentations.Compose([ albumentations.Resize(256, 256), # albumentations.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) ]) else: train_transforms = transforms.Compose([ # AdvancedHairAugmentation(hairs_folder='/kaggle/input/melanoma-hairs'), transforms.RandomResizedCrop(size=256, scale=(0.9, 1.0)), transforms.RandomHorizontalFlip(), transforms.RandomVerticalFlip(), Microscope(p=0.5), transforms.Resize((256, 256)), transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) ]) test_transforms = transforms.Compose([ transforms.Resize((256, 256)), transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) ]) return train_transforms, test_transforms
def get_transform(image_size, base_size=366): if image_size > base_size: resize = albumentations.Resize(image_size, image_size) else: resize = albumentations.CenterCrop(image_size, image_size) train_transform = albumentations.Compose([ albumentations.VerticalFlip(p=0.5), albumentations.HorizontalFlip(p=0.5), # albumentations.Equalize(p=0.3), # albumentations.OneOf([ # albumentations.RandomContrast(), # albumentations.RandomBrightness(), # albumentations.CLAHE(), # ],p=0.3), albumentations.OneOf([ albumentations.GaussianBlur(blur_limit=3), albumentations.GaussNoise(var_limit=(3, 10)), albumentations.MedianBlur(blur_limit=3) ], p=0.5), resize, # albumentations.Cutout(max_h_size = int(image_size * 0.1), max_w_size = int(image_size * 0.1), num_holes = 3, p =0.3), albumentations.Normalize(), ToTensorV2() ]) val_transform = albumentations.Compose([ resize, albumentations.Normalize(), ToTensorV2( ) # always use V2 follow this answer: https://albumentations.ai/docs/faq/#which-transformation-should-i-use-to-convert-a-numpy-array-with-an-image-or-a-mask-to-a-pytorch-tensor-totensor-or-totensorv2 ]) return train_transform, val_transform
def __init__(self, data_dir, mode): assert mode in ['train', 'val', 'test'] self.mode = mode self.fn = list(Path(data_dir).rglob('*.jpg')) a_transforms_list = [A.Resize(350, 350), A.RandomCrop(350, 350)] if mode == 'train': a_transforms_list.extend([ A.HorizontalFlip(), A.VerticalFlip(), A.HueSaturationValue(), A.ShiftScaleRotate(), A.OneOf([ A.IAAAdditiveGaussianNoise(), A.GaussNoise(), ], p=0.2), A.OneOf([ A.MotionBlur(p=.2), A.MedianBlur(blur_limit=3, p=0.1), A.Blur(blur_limit=3, p=0.1), ], p=0.2) ]) a_transforms_list.extend([ToTensor()]) self.transforms = A.Compose(a_transforms_list)
def on_epoch_start(self, trainer, pl_module): if pl_module.hparams.progressive: ind = int(trainer.current_epoch / 3) if trainer.current_epoch < 3*7 else 7 prog = [256, 512, 768, 1024, 1280, 1536, 1792, 2048] batch = [32, 32, 32, 16, 8, 8, 4, 4] # For Progressive Resizing train_transform = A.Compose([ A.RandomResizedCrop(height=prog[ind], width=prog[ind], scale=(0.8, 1.0), ratio=(0.75, 1.3333333333333333), interpolation=1, always_apply=False, p=1.0), A.Flip(always_apply=False, p=0.5), A.RandomBrightnessContrast(brightness_limit=0.2, contrast_limit=0.2, brightness_by_max=True, always_apply=False, p=0.5), A.GaussNoise(var_limit=(10.0, 50.0), mean=0, always_apply=False, p=0.5), #A.Rotate(limit=90, interpolation=1, border_mode=4, value=None, mask_value=None, always_apply=False, p=0.5), A.ShiftScaleRotate(shift_limit=0.0625, scale_limit=0.1, rotate_limit=45, interpolation=1, border_mode=4, value=None, mask_value=None, always_apply=False, p=0.5), A.Normalize(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225), max_pixel_value=255.0, always_apply=False, p=1.0) ]) valid_transform = A.Compose([A.Resize(height=prog[ind], width=prog[ind], interpolation=1, always_apply=False, p=1.0), A.Normalize(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225), max_pixel_value=255.0, always_apply=False, p=1.0) ]) pl_module.train_dataset = PANDADataset(pl_module.train_df, pl_module.hparams.data_dir, pl_module.hparams.image_format, transform=train_transform, tile=pl_module.hparams.tile, layer=pl_module.hparams.image_layer) pl_module.val_dataset = PANDADataset(pl_module.val_df, pl_module.hparams.data_dir, pl_module.hparams.image_format, transform=valid_transform, tile=pl_module.hparams.tile, layer=pl_module.hparams.image_layer) trainer.train_dataloader = DataLoader(pl_module.train_dataset, batch_size=batch[ind], shuffle=True, num_workers=4, drop_last=True) trainer.val_dataloaders = [DataLoader(pl_module.val_dataset, batch_size=batch[ind], shuffle=True, num_workers=4, drop_last=True)] trainer.num_training_batches = len(trainer.train_dataloader)#float('inf') trainer.num_val_batches = len(trainer.val_dataloaders[0])#float('inf') trainer.val_check_batch = trainer.num_training_batches
def get_training_augmentation(min_area=0., min_visibility=0.): train_transform = [ albu.OneOf([ albu.ISONoise(p=.5), albu.GaussNoise(p=0.4), albu.Blur(blur_limit=3, p=0.1), ]), albu.OneOf([ albu.CLAHE(clip_limit=2), ], p=0.2), albu.OneOf([ albu.RandomSnow(snow_point_lower=0., snow_point_upper=0.2, brightness_coeff=2., p=0.5), albu.RandomSunFlare(p=0.5), ]), albu.OneOf([ albu.RGBShift(p=0.1), albu.ChannelShuffle(p=0.2), ]) ] return albu.Compose(train_transform, bbox_params={ 'format': 'coco', 'min_area': min_area, 'min_visibility': min_visibility, 'label_fields': ['category_id'] })
def get_augmentations(p=0.5, image_size=224): imagenet_stats = { "mean": [0.485, 0.456, 0.406], "std": [0.229, 0.224, 0.225] } train_tfms = A.Compose([ # A.Resize(image_size, image_size), A.RandomResizedCrop(image_size, image_size), A.ShiftScaleRotate(shift_limit=0.15, scale_limit=0.4, rotate_limit=45, p=p), A.Cutout(p=p), A.RandomRotate90(p=p), A.Flip(p=p), A.OneOf( [ A.RandomBrightnessContrast( brightness_limit=0.2, contrast_limit=0.2, ), A.HueSaturationValue(hue_shift_limit=20, sat_shift_limit=50, val_shift_limit=50), ], p=p, ), A.OneOf( [ A.IAAAdditiveGaussianNoise(), A.GaussNoise(), ], p=p, ), A.CoarseDropout(max_holes=10, p=p), A.OneOf( [ A.MotionBlur(p=0.2), A.MedianBlur(blur_limit=3, p=0.1), A.Blur(blur_limit=3, p=0.1), ], p=p, ), A.OneOf( [ A.OpticalDistortion(p=0.3), A.GridDistortion(p=0.1), A.IAAPiecewiseAffine(p=0.3), ], p=p, ), ToTensor(normalize=imagenet_stats), ]) valid_tfms = A.Compose([ A.CenterCrop(image_size, image_size), ToTensor(normalize=imagenet_stats) ]) return train_tfms, valid_tfms
def __init__(self, path, _, autoaugment): self.imgs_pre, self.lbls_pre = load_data(path, "pre") self.imgs_post, self.lbls_post = load_data(path, "post") assert len(self.imgs_pre) == len(self.imgs_post) assert len(self.imgs_post) == len(self.lbls_post) data_frame = pd.read_csv("/workspace/xview2/utils/index.csv") self.idx = [] self.idx.extend( data_frame[data_frame["1"] == 1]["idx"].values.tolist()) self.idx.extend( data_frame[data_frame["2"] == 1]["idx"].values.tolist()) self.idx.extend( data_frame[data_frame["3"] == 1]["idx"].values.tolist()) self.idx.extend( data_frame[data_frame["4"] == 1]["idx"].values.tolist()) self.idx = sorted(list(set(self.idx))) self.crop = A.CropNonEmptyMaskIfExists(p=1, width=512, height=512) self.zoom = A.RandomScale(p=0.2, scale_limit=(0, 0.3), interpolation=cv2.INTER_CUBIC) self.hflip = A.HorizontalFlip(p=0.33) self.vflip = A.VerticalFlip(p=0.33) self.noise = A.GaussNoise(p=0.1) self.brctr = A.RandomBrightnessContrast(p=0.2) self.normalize = A.Normalize() self.use_autoaugment = autoaugment if self.use_autoaugment: self.autoaugment = ImageNetPolicy()
def GaussNoise(cls,image, target, p=0.5): if MTransform.check(p): aug = albu.GaussNoise(var_limit=(20, 100), always_apply=False, p=0.5) result = aug(image=image, target=target) return result["image"], result["target"] else: return image, target
def h_flip_blur(p=1.0): return albumentations.Compose([ albumentations.HorizontalFlip(p=p), albumentations.Blur(p=p), albumentations.GaussNoise(p=p) ], p=p)
def aug_medium(prob=1): return aug.Compose([ aug.Flip(), aug.OneOf([ aug.CLAHE(clip_limit=2, p=.5), aug.IAASharpen(p=.25), ], p=0.35), aug.OneOf([ aug.RandomContrast(), aug.RandomGamma(), aug.RandomBrightness(), ], p=0.3), aug.OneOf([ aug.ElasticTransform( alpha=120, sigma=120 * 0.05, alpha_affine=120 * 0.03), aug.GridDistortion(), aug.OpticalDistortion(distort_limit=2, shift_limit=0.5), ], p=0.3), aug.ShiftScaleRotate(rotate_limit=12), aug.OneOf([ aug.GaussNoise(p=.35), SaltPepperNoise(level_limit=0.0002, p=.7), aug.ISONoise(p=.7), ], p=.5), aug.Cutout(num_holes=3, p=.25), ], p=prob)
def train_get_transforms(): return A.Compose([ # A.Resize(random.randint(config.img_size, config.img_size+128), random.randint(config.img_size, config.img_size+128)), # A.RandomCrop(config.img_size, config.img_size), # A.crops.transforms.CenterCrop(256, 256, p=1.0), # A.Resize(config.img_size, config.img_size), # A.crops.transforms.CenterCrop(224, 224, p=1.0), A.OneOf([ A.MotionBlur(blur_limit=5), A.MedianBlur(blur_limit=5), A.GaussianBlur(blur_limit=5), A.GaussNoise(var_limit=(5.0, 30.0)) ], p=0.8), A.RandomBrightness(limit=0.1, p=0.5), A.RandomContrast(limit=[0.9, 1.1], p=0.5), A.Transpose(p=0.5), A.Rotate(limit=90, interpolation=1, border_mode=4, always_apply=False, p=0.5), A.HorizontalFlip(p=0.5), A.VerticalFlip(p=0.5), A.ShiftScaleRotate(p=0.5), # A.HueSaturationValue(hue_shift_limit=0.2, sat_shift_limit=0.2, val_shift_limit=0.2, p=0.5), # A.RandomBrightnessContrast(brightness_limit=(-0.1,0.1), contrast_limit=(-0.1, 0.1), p=0.5), # A.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225], max_pixel_value=255.0, p=1.0), # A.CoarseDropout(p=0.5), # A.Cutout(max_h_size=int(config.img_size * 0.5), max_w_size=int(config.img_size * 0.5), num_holes=1, p=0.8), ToTensorV2() ])
def augment_image(self, image): transform = A.Compose([ A.OneOf([ A.IAAAdditiveGaussianNoise(), A.GaussNoise(), ], p=0.3), A.OneOf([ A.MotionBlur(p=.4), A.MedianBlur(blur_limit=3, p=0.3), A.Blur(blur_limit=3, p=0.3), ], p=0.4), A.OneOf([ A.CLAHE(clip_limit=2), A.IAASharpen(), A.IAAEmboss(), A.RandomBrightnessContrast(), ], p=0.3), A.HueSaturationValue(p=0.3), ]) image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) augmented_image = transform(image=image)['image'] augmented_image = cv2.cvtColor(augmented_image, cv2.COLOR_RGB2BGR) return augmented_image
def augment_image(self, image, annotations): new_annotations = annotations if annotations['bboxes'].any(): annotation = { 'image': image, 'bboxes': annotations['bboxes'], 'category_id': annotations['labels'] } aug = self.get_aug([ A.VerticalFlip(), A.HorizontalFlip(), A.RGBShift(), A.Blur(blur_limit=7), A.GaussNoise(), A.OpticalDistortion(distort_limit=0.2), A.GridDistortion(), A.ShiftScaleRotate( p=0.75, shift_limit=0.1, rotate_limit=45, scale_limit=0.2), ], min_area=(1024 * 0.05)**2) augmented = aug(**annotation) image = augmented['image'] annotations['bboxes'] = np.array(augmented['bboxes']) annotations['labels'] = np.array(augmented['category_id']) return image, annotations
def __data_generation(self, training_temp, label_temp): 'Generates data containing batch_size samples' # X : (n_samples, *dim, n_channels) # Initialization X = np.empty((self.batch_size, *self.dim, self.n_channels)) y = np.empty((self.batch_size), dtype=int) # Generate data for i, img_label in enumerate (zip(training_temp, label_temp)): # Store sample training_img, training_label = img_label # If for training, add AUGMENTATION if self.augmentation: transform = A.Compose([ A.GaussianBlur(p=0.2), # gaussian blur A.RandomBrightnessContrast(p=0.5), # brightness contrast change A.GaussNoise(p=0.5), # inject gaussian noise A.GridDistortion(p=0.2), # grid distortion A.ShiftScaleRotate(shift_limit=0.1, rotate_limit=5, p=0.5), ]) training_img = transform(image=training_img)['image'] self.add_line(training_img, p=0.5) training_img = training_img.astype("float32") / 255.0 training_img = np.expand_dims(training_img, axis=2) X[i,] = training_img # Store class y[i] = training_label return X, tf.keras.utils.to_categorical(y, num_classes=self.n_classes)
def get_train_transforms(cfg): image_size = cfg["image_size"] return A.Compose([ A.RandomSizedCrop(min_max_height=(image_size - 200, image_size - 200), height=image_size, width=image_size, p=0.5), A.OneOf([ A.HueSaturationValue(hue_shift_limit=0.2, sat_shift_limit=0.2, val_shift_limit=0.2, p=0.9), A.RandomBrightnessContrast( brightness_limit=0.2, contrast_limit=0.2, p=0.9), A.GaussNoise( var_limit=(0.01, .005), mean=0, always_apply=False, p=0.6), ], p=0.9), A.ToGray(p=0.01), A.HorizontalFlip(p=0.5), A.VerticalFlip(p=0.5), A.Resize(height=image_size, width=image_size, p=1), A.CoarseDropout( max_holes=8, max_height=32, max_width=128, fill_value=0, p=0.3), A.CoarseDropout( max_holes=8, max_height=128, max_width=32, fill_value=0, p=0.3), ToTensorV2(p=1.0), ], p=1.0, bbox_params=A.BboxParams(format='pascal_voc', min_area=0, min_visibility=0, label_fields=['labels']))
def __init__(self, image_size): self.data_transform = { 'train_transform':A.Compose([ A.Transpose(p=0.5), A.VerticalFlip(p=0.5), A.HorizontalFlip(p=0.5), A.RandomBrightness(limit=0.2, p=0.75), A.RandomContrast(limit=0.2, p=0.75), A.OneOf([ A.MotionBlur(blur_limit=5), A.MedianBlur(blur_limit=5), A.GaussianBlur(blur_limit=5), A.GaussNoise(var_limit=(5.0, 30.0)),], p=0.7), A.OneOf([ A.OpticalDistortion(distort_limit=1.0), A.GridDistortion(num_steps=5, distort_limit=1.), A.ElasticTransform(alpha=3),], p=0.7), A.CLAHE(clip_limit=4.0, p=0.7), A.HueSaturationValue(hue_shift_limit=10, sat_shift_limit=20, val_shift_limit=10, p=0.5), A.ShiftScaleRotate(shift_limit=0.1, scale_limit=0.1, rotate_limit=15, border_mode=0, p=0.85), A.Resize(image_size, image_size), A.Cutout(max_h_size=int(image_size * 0.375), max_w_size=int(image_size * 0.375), num_holes=1, p=0.7), A.Normalize() ]), 'test_transform': A.Compose([ A.Resize(image_size, image_size), A.Normalize(), A.Resize(image_size, image_size) ])}
def cifar_alb_trainData(): '''Apply Albumentations data transforms to the dataset and returns iterable''' mean = (0.491, 0.482, 0.446) std = (0.247, 0.243, 0.261) train_transform = [ A.ShiftScaleRotate(shift_limit=0.05, scale_limit=0.05, rotate_limit=15, p=0.5), A.RandomCrop(height=32, width=32), A.RGBShift(r_shift_limit=15, g_shift_limit=15, b_shift_limit=15, p=0.5), A.RandomBrightnessContrast(p=0.5), A.GaussNoise(), A.ElasticTransform(), # A.MaskDropout((10,15), p=1), A.Cutout(num_holes=1, max_h_size=16, max_w_size=16, fill_value=mean, always_apply=False, p=0.5), A.Normalize(mean=mean, std=std), ToTensorV2() ] transforms_result = A.Compose(train_transform) return lambda img: transforms_result(image=np.array(img))["image"]
def __init__(self, scale_range: float = (0.35, 0.65), input_size: int = (416, 416), augmentation: bool = False) -> None: if augmentation: self.crop_func = RandomCropAndResize(scale_range, input_size) self.aug_func = alb.Compose([ alb.OneOf([ alb.RGBShift(), alb.ToGray(), alb.NoOp(), ]), alb.RandomBrightnessContrast(), alb.OneOf([ alb.GaussNoise(), alb.IAAAdditiveGaussianNoise(), alb.CoarseDropout(fill_value=100), ]) ]) else: scale = (scale_range[0] + scale_range[1]) / 2. self.crop_func = CenterCropAndResize(scale, input_size) self.aug_func = None self.heatmap_stride = 4 self.heatmap_size = (input_size[0] // self.heatmap_stride, input_size[1] // self.heatmap_stride)
def albumentation(): transform = albumentations.Compose([ albumentations.OneOf([ albumentations.GaussNoise(), albumentations.IAAAdditiveGaussianNoise() ]), albumentations.OneOf([ albumentations.MotionBlur(blur_limit=3, p=0.2), albumentations.MedianBlur(blur_limit=3, p=0.1), albumentations.Blur(blur_limit=2, p=0.1) ]), albumentations.OneOf([ albumentations.RandomBrightness(limit=(0.1, 0.4)), albumentations.HueSaturationValue(hue_shift_limit=(0, 128), sat_shift_limit=(0, 60), val_shift_limit=(0, 20)), albumentations.RGBShift(r_shift_limit=30, g_shift_limit=30, b_shift_limit=30) ]), albumentations.OneOf([ albumentations.CLAHE(), albumentations.ChannelShuffle(), albumentations.IAASharpen(), albumentations.IAAEmboss(), albumentations.RandomBrightnessContrast(), ]), albumentations.OneOf([ albumentations.RandomGamma(gamma_limit=(35,255)), albumentations.OpticalDistortion(), albumentations.GridDistortion(), albumentations.IAAPiecewiseAffine() ]), A_torch.ToTensor(normalize={ "mean": [0.485, 0.456, 0.406], "std" : [0.229, 0.224, 0.225]}) ]) return transform
def __init__(self, outputs=6): super().__init__() self.net = models.resnet34(True) self.linear = Linear(1000, outputs) df = pd.read_csv("/home/dipet/kaggle/prostate/input/prostate-cancer-grade-assessment/train.csv") self.train_df, self.valid_df = train_test_split(df, test_size=0.2) self.data_dir = "/datasets/panda/train_64_100" self.train_transforms = A.Compose( [ A.Compose( [ A.OneOf([A.GaussNoise(), A.MultiplicativeNoise(elementwise=True)]), A.RandomBrightnessContrast(0.02, 0.02), A.HueSaturationValue(0, 10, 10), A.Flip(), A.RandomGridShuffle(grid=(10, 10)), A.GridDistortion(), A.Rotate() ], p=0.5, ), A.ToFloat(), ] ) self.valid_transforms = A.Compose([A.ToFloat()])
def __init__(self, data_dir, is_train=True): self.paths = sorted(glob.glob(data_dir + '/*/*')) self.transform_train = A.Compose([ A.Resize(height=1000, width=1000), A.RandomBrightnessContrast(brightness_limit=0.1, contrast_limit=0.1, p=0.5), A.RandomGamma(p=0.5), A.HorizontalFlip(p=0.5), A.OneOf([ A.MotionBlur(blur_limit=3), A.GlassBlur(max_delta=3), A.GaussianBlur(blur_limit=3) ], p=0.5), A.GaussNoise(p=0.5), A.Normalize(mean=(0.446, 0.469, 0.472), std=(0.326, 0.330, 0.338), max_pixel_value=255.0, p=1.0), ToTensorV2(p=1.0), ]) self.transform_valid = A.Compose([ A.Resize(height=1000, width=1000), A.Normalize(mean=(0.446, 0.469, 0.472), std=(0.326, 0.330, 0.338), max_pixel_value=255.0, p=1.0), ToTensorV2(p=1.0), ]) if is_train: self.data_transforms = self.transform_train else: self.data_transforms = self.transform_valid
def get_train_transforms(): return A.Compose([ A.RandomSizedCrop((140, 140), width=160, height=160, p=0.25), A.OneOf([ A.HueSaturationValue(hue_shift_limit=0.2, sat_shift_limit=0.2, val_shift_limit=0.2, p=0.9), A.RandomBrightnessContrast( brightness_limit=0.2, contrast_limit=0.2, p=0.9), ], p=0.9), A.HorizontalFlip(p=0.5), A.VerticalFlip(p=0.5), A.Rotate(limit=45, p=1), A.ChannelShuffle(p=0.05), A.FancyPCA(), A.GaussNoise(p=0.25), A.Blur(blur_limit=4, p=0.1), A.Cutout(num_holes=8, max_h_size=4, max_w_size=4, fill_value=0, p=0.1), A.Resize( 160, 160, ), A.Normalize(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225)), ToTensorV2(p=1.0), ], p=1.0)