def strong_aug(self): color_r = random.randint(0, 256) color_g = random.randint(0, 256) color_b = random.randint(0, 256) num_holes = random.randint(1, 2) if num_holes == 2: max_h_size = random.randint(15, 30) max_w_size = random.randint(15, 30) else: max_h_size = random.randint(30, 60) max_w_size = random.randint(30, 60) return Compose([ OneOf([ OneOf([ MultiplicativeNoise(multiplier=[0.5, 1.5], elementwise=True, per_channel=True, p=0.2), IAAAdditiveGaussianNoise(), GaussNoise() ]), OneOf([InvertImg(), ToSepia()]), OneOf([ ChannelDropout(channel_drop_range=(1, 1), fill_value=0), ChannelShuffle() ]), HueSaturationValue(hue_shift_limit=20, sat_shift_limit=30, val_shift_limit=20, p=0.1) ], p=0.25), Cutout(num_holes=num_holes, max_h_size=max_h_size, max_w_size=max_w_size, fill_value=[color_r, color_g, color_b], p=0.9), ])
def get_transforms(phase_config): list_transforms = [] if phase_config.Noise: list_transforms.append( OneOf([ GaussNoise(), IAAAdditiveGaussianNoise(), ], p=0.5), ) if phase_config.Contrast: list_transforms.append( OneOf([ RandomContrast(0.5), RandomGamma(), RandomBrightness(), ], p=0.5), ) if phase_config.Blur: list_transforms.append( OneOf([ MotionBlur(p=.2), MedianBlur(blur_limit=3, p=0.1), Blur(blur_limit=3, p=0.1), ], p=0.5)) if phase_config.Distort: list_transforms.append( OneOf([ OpticalDistortion(p=0.3), GridDistortion(p=.1), IAAPiecewiseAffine(p=0.3), ], p=0.5)) list_transforms.extend([ Normalize(mean=phase_config.mean, std=phase_config.std, p=1), ToTensor(), ]) return Compose(list_transforms)
def get_transforms(phase): original_height = 256 original_width = 1600 list_transforms = [] if phase == "train": list_transforms.extend([ OneOf([ RandomSizedCrop(min_max_height=(50, 101), height=original_height, width=original_width, p=0.5), PadIfNeeded(min_height=original_height, min_width=original_width, p=0.5) ], p=1), VerticalFlip(p=0.5), # RandomRotate90(p=0.5), OneOf([ ElasticTransform(p=0.5, alpha=120, sigma=120 * 0.05, alpha_affine=120 * 0.03), GridDistortion(p=0.5), OpticalDistortion(p=1, distort_limit=2, shift_limit=0.5) ], p=0.8), CLAHE(p=0.8), RandomBrightnessContrast(p=0.8), RandomGamma(p=0.8), ]) list_transforms.extend([ Resize(height=128, width=800, interpolation=cv2.INTER_NEAREST), Normalize(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225), p=1), ToTensor(), ]) list_trfms = Compose(list_transforms) return list_trfms
def data_augmentation(original_image, original_mask, crop=False, height=None, width=None): augmentations = Compose([ HorizontalFlip(p=0.4), VerticalFlip(p=0.4), ShiftScaleRotate(shift_limit=0.07, rotate_limit=0, p=0.4), CLAHE(p=0.3), RandomGamma(gamma_limit=(80, 120), p=0.1), RandomBrightnessContrast(p=0.1), OneOf([ MotionBlur(p=0.1), MedianBlur(blur_limit=3, p=0.1), Blur(blur_limit=3, p=0.1), ], p=0.3), OneOf([ IAAAdditiveGaussianNoise(), GaussNoise(), ], p=0.2) ]) if crop: assert height and width crop_aug = RandomCrop(height=height, width=width, always_apply=True) crop_sample = crop_aug(image=original_image, mask=original_mask) original_image = crop_sample['image'] original_mask = crop_sample['mask'] augmented = augmentations(image=original_image, mask=original_mask) image_aug = augmented['image'] mask_aug = augmented['mask'] return image_aug, mask_aug
def augment(img): img = np.array(img * 255, dtype=np.uint8) print(img.shape) generator = Compose([ Resize(240, 240), HorizontalFlip(), OneOf([ IAAAdditiveGaussianNoise(), GaussNoise(), ], p=0.2), OneOf([ MotionBlur(p=0.2), MedianBlur(blur_limit=3, p=0.1), Blur(blur_limit=3, p=0.1), ], p=0.2), ShiftScaleRotate( shift_limit=0.0625, scale_limit=0.2, rotate_limit=75, p=0.5), OneOf([ OpticalDistortion(p=0.3), GridDistortion(p=0.1), IAAPiecewiseAffine(p=0.3), ], p=0.2), OneOf([ CLAHE(clip_limit=2), IAASharpen(), IAAEmboss(), RandomBrightnessContrast(), ], p=0.3), RandomCrop(224, 224), HueSaturationValue(p=0.3), ], p=1) img = generator(image=img)['image'] return img
def __init__(self, target_size): transformation = [] transformation += [Resize(target_size, target_size)] transformation += [ RandomResizedCrop(target_size, target_size, scale=(0.8, 1.0)), OneOf([ ShiftScaleRotate(), GridDistortion(), OpticalDistortion(), ElasticTransform(approximate=True) ], p=0.3), OneOf([ IAAAdditiveGaussianNoise(), GaussNoise(), MedianBlur(blur_limit=3), Blur(blur_limit=3) ], p=0.3) ] transformation += [Normalize(), ToTensor()] print(transformation) self.transform = Compose(transformation)
def get_transforms(): """ Quick utility function to return the augmentations for the training/validation generators """ aug_train = Compose([ HorizontalFlip(p=0.5), OneOf([ RandomContrast(), RandomGamma(), RandomBrightness(), ], p=0.3), OneOf([ ElasticTransform(alpha=120, sigma=120 * 0.05, alpha_affine=120 * 0.03), GridDistortion(), OpticalDistortion(distort_limit=2, shift_limit=0.5), ], p=0.3), ToFloat(max_value=1) ],p=1) aug_val = Compose([ ToFloat(max_value=1) ],p=1) return aug_train, aug_val
def aug_daniel_part1(prob=1.0, img_size=224): return Compose( [ OneOf( [CLAHE(clip_limit=2, p=.6), IAASharpen(p=.2), IAAEmboss(p=.2)], p=.7), OneOf([ IAAAdditiveGaussianNoise(p=.3), GaussNoise(p=.7), ], p=.5), RandomRotate90(p=0.5), Flip(p=0.5), Transpose(p=0.5), #OneOf([ # MotionBlur(p=.2), # MedianBlur(blur_limit=3, p=.3), # Blur(blur_limit=3, p=.5), #], p=.4), RandomBrightnessContrast(p=.5), ], p=prob)
def aug_daniel(prob=0.8): return Compose([ RandomRotate90(p=0.5), Transpose(p=0.5), Flip(p=0.5), OneOf([ IAAAdditiveGaussianNoise(), GaussNoise(), #Blur(), ], p=0.3), OneOf([ CLAHE(clip_limit=2), IAASharpen(), IAAEmboss(), OneOf([ RandomContrast(), RandomBrightness(), ]), #Blur(), #GaussNoise() ], p=0.5), HueSaturationValue(p=0.5) ], p=prob)
def _augmentation(self): if self.split == 'train': self.transformation = Compose([ RandomShiftRotate(always_apply=True), RGBShiftStereo(always_apply=True, p_asym=0.3), OneOf([ GaussNoiseStereo(always_apply=True, p_asym=1.0), RandomBrightnessContrastStereo(always_apply=True, p_asym=0.5) ], p=1.0) ]) else: self.transformation = None
def get_dataset(self): """ Loads and wraps training and validation datasets Returns: torch.utils.data.TensorDataset: trainset, valset """ # Split dataset to train and validate sets to evaluate the model train_ids, val_ids = train_test_split(self.images_df, test_size=self.test_size) self.val_ids = val_ids # Drop small images (mostly just clouds, water or corrupted) train_ids['file_size_kb'] = train_ids['ImageId'].map( lambda c_img_id: os.stat(os.path.join(self.path_train, c_img_id) ).st_size / 1024) train_ids = train_ids[train_ids['file_size_kb'] > 40] # keep only >40kb files # Undersample empty images to balance the dataset ships = train_ids[train_ids['has_ship'] == 1] no_ships = train_ids[train_ids['has_ship'] == 0].sample( frac=self.empty_frac) # Take only this fraction of empty images self.train_ids = pd.concat([ships, no_ships], axis=0) # Define transformations for augmentation and without it self.transform_no_aug = transforms.Compose([ transforms.Resize((int(768 / self.resize_factor), int(768 / self.resize_factor))), transforms.ToTensor() ]) if self.aug: self.transform = Compose([ Resize(height=int(768 / self.resize_factor), width=int(768 / self.resize_factor)), OneOf([RandomRotate90(), Transpose(), Flip()], p=0.3) ]) else: self.transform = self.transform_no_aug # TensorDataset wrapper trainset = AirbusDS_train(self.path_train, self.aug, self.transform, self.train_ids, self.masks) valset = AirbusDS_val(self.path_train, False, self.transform_no_aug, self.val_ids, self.masks) testset = AirbusDS_test(self.path_test, self.transform) return trainset, valset, testset
def create_train_transforms(conf): height = conf['crop_height'] width = conf['crop_width'] return Compose([ # OneOf([ # RandomSizedCrop(min_max_height=(int(height * 0.8), int(height * 1.2)), w2h_ratio=1., height=height, # width=width, p=0.8), # RandomCrop(height=height, width=width, p=0.2)], p=1), HorizontalFlip(), ImageCompression(p=0.1), RandomBrightnessContrast(p=0.4), RandomGamma(p=0.4), OneOf([RGBShift(), HueSaturationValue()], p=0.2) ])
def strong_aug(p=1): return Compose( [ OneOf([ JpegCompression(quality_lower=15, quality_upper=40, p=1), Downscale(scale_min=0.5, scale_max=0.9, p=1) ], p=0.5), #OneOf([IAAAdditiveGaussianNoise(p=1), GaussNoise(p=1)], p=0.15), RandomBrightness(p=0.15), #OneOf([MotionBlur(blur_limit=5,p=1),MedianBlur(blur_limit=5, p=1),Blur(blur_limit=5, p=1)], p=0.15), HorizontalFlip(p=0.5), ], p=p)
def strong_aug(p=.5): return Compose([ RandomRotate90(), Flip(), Transpose(), OneOf([ IAAAdditiveGaussianNoise(), GaussNoise(), ], p=.2), OneOf([ MotionBlur(p=.2), MedianBlur(blur_limit=3, p=.1), Blur(blur_limit=3, p=.1) ], p=.2), OneOf([ CLAHE(clip_limit=2), IAASharpen(), IAAEmboss(), RandomBrightnessContrast(), ], p=.3), HueSaturationValue(p=.3), ], p=p)
def pixel_aug(p=.5): print('[DATA]: pixel aug') from albumentations import JpegCompression, Blur, Downscale, CLAHE, HueSaturationValue, \ RandomBrightnessContrast, IAAAdditiveGaussianNoise, GaussNoise, GaussianBlur, MedianBlur, MotionBlur, \ Compose, OneOf from random import sample, randint, uniform return Compose( [ # Jpeg Compression OneOf([JpegCompression(quality_lower=20, quality_upper=99, p=1)], p=0.2), # Gaussian Noise OneOf([ IAAAdditiveGaussianNoise(loc=randint(1, 9), p=1), GaussNoise(mean=uniform(0, 10.0), p=1), ], p=0.3), # Blur OneOf([ GaussianBlur(blur_limit=15, p=1), MotionBlur(blur_limit=19, p=1), Downscale(scale_min=0.3, scale_max=0.99, p=1), Blur(blur_limit=15, p=1), MedianBlur(blur_limit=9, p=1) ], p=0.4), # Color OneOf([ CLAHE(clip_limit=4.0, p=1), HueSaturationValue(p=1), RandomBrightnessContrast(p=1), ], p=0.1) ], p=p)
def StrongAug(self, image, p=1): image = image.astype(np.uint8) aug = Compose([ RandomRotate90(), Flip(), Transpose(), OneOf([ IAAAdditiveGaussianNoise(), GaussNoise(), ], p=0.2), OneOf([ MotionBlur(p=.2), MedianBlur(blur_limit=3, p=.1), Blur(blur_limit=3, p=.1), ], p=0.2), ShiftScaleRotate( shift_limit=0.0625, scale_limit=0.2, rotate_limit=45, p=.2), OneOf([ OpticalDistortion(p=0.3), GridDistortion(p=.1), IAAPiecewiseAffine(p=0.3), ], p=0.2), OneOf([ CLAHE(clip_limit=2), IAASharpen(), IAAEmboss(), RandomContrast(), RandomBrightness(), ], p=0.3), HueSaturationValue(p=0.3), ], p=p) output = aug(image=image)['image'] return output.astype(np.float32)
def strong_aug(img_shape, p=0.5): return [ RandomRotate90(), Flip(), CoarseDropout(2, int(img_shape[0] * 0.1), int(img_shape[1] * 0.1), 1, int(img_shape[0] * 0.05), int(img_shape[1] * 0.05), p=1.), # mudar para 5 a 10 % do tamanho da imagem OneOf( [ IAAAdditiveGaussianNoise( scale=(0.05 * 255, 0.1 * 255)), # default: (0.01 * 255, 0.05 * 255) GaussNoise(), ], p=1.), OneOf([ MotionBlur(p=0.2), MedianBlur(blur_limit=3, p=0.1), Blur(blur_limit=3, p=0.1), ], p=1.), ShiftScaleRotate(shift_limit=0.0625, scale_limit=(0.0, 0.3), rotate_limit=25, p=0.35), OneOf( [ CLAHE(clip_limit=2), IAASharpen(), RandomBrightnessContrast(brightness_limit=0.3), #default: 0.2 ], p=1.), ]
def strong_aug(image, p=0.5): image2 = Compose( [ #加躁 OneOf([ IAAAdditiveGaussianNoise(), GaussNoise(), ISONoise(), ], p=0.2), OneOf( [ #模糊 MotionBlur(p=0.1), MedianBlur(blur_limit=3, p=0.1), Blur(blur_limit=3, p=0.1), JpegCompression(p=.1), ], p=0.2), OneOf( [ #锐化 CLAHE(clip_limit=2), IAASharpen(), IAAEmboss(), RandomBrightnessContrast(), ], p=0.3), OneOf( [ #直方图均衡,对比度,色度变化,pca HueSaturationValue(), RandomBrightnessContrast(), Equalize(), # FancyPCA(), ], p=0.3), ToGray(p=0.1), ], p=p)(image=image)['image'] return image2
def generate_transforms(image_size): train_transform = Compose([ #Resize(height=image_size[0], width=image_size[1]), OneOf( [RandomBrightness(limit=0.1, p=1), RandomContrast(limit=0.1, p=1)]), OneOf([ MotionBlur(blur_limit=3), MedianBlur(blur_limit=3), GaussianBlur(blur_limit=3) ], p=0.5), VerticalFlip(p=0.5), HorizontalFlip(p=0.5), ShiftScaleRotate( shift_limit=0.2, scale_limit=0.2, rotate_limit=20, interpolation=cv2.INTER_LINEAR, border_mode=cv2.BORDER_REFLECT_101, p=1, ), Normalize(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225), max_pixel_value=255.0, p=1.0) ]) val_transform = Compose([ #Resize(height=image_size[0], width=image_size[1]), Normalize(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225), max_pixel_value=255.0, p=1.0) ]) return {"train": train_transform, "val": val_transform}
def strong_aug(p=0.5): return Compose( [ RandomRotate90(), Flip(), Transpose(), ElasticTransform(p=1.0), OneOf([ IAAAdditiveGaussianNoise(), GaussNoise(), ], p=0.2), OneOf([ MotionBlur(p=0.2), MedianBlur(blur_limit=3, p=0.1), Blur(blur_limit=3, p=0.1), ], p=0.2), ShiftScaleRotate( shift_limit=0.0625, scale_limit=0.2, rotate_limit=45, p=0.2), OneOf([ OpticalDistortion(p=0.3), GridDistortion(p=0.1), IAAPiecewiseAffine(p=0.3), ], p=0.2), OneOf( [ # CLAHE(clip_limit=2), IAASharpen(), IAAEmboss(), RandomContrast(), RandomBrightness(), ], p=0.3), # HueSaturationValue(p=0.3), ], p=p)
def strong_aug(p=.5, config=None): return Compose([ HorizontalFlip(p=0.5), VerticalFlip(p=0.5), RandomRotate90(p=0.5), Transpose(p=0.5), OneOf([ IAAAdditiveGaussianNoise(), GaussNoise(), ], p=0.2), OneOf([ MotionBlur(p=.2), MedianBlur(blur_limit=3, p=.1), Blur(blur_limit=3, p=.1), ], p=0.2), ShiftScaleRotate(shift_limit=0.001, scale_limit=0.1, rotate_limit=20, p=.2), Compose([ OpticalDistortion(p=0.3), GridDistortion(p=.1), IAAPiecewiseAffine(p=0.3), ], p=0.2), OneOf([ CLAHE(clip_limit=2), IAASharpen(), IAAEmboss(), RandomContrast(), RandomBrightness(), ], p=0.3), HueSaturationValue(p=0.3), RandomCrop(height=224, width=224, p=1.0), ])
def strong_aug(p=1): return Compose( [ Rotate(limit=20, p=0.8), # RandomRotate90(), # HorizontalFlip(p=0.4), # Transpose(), OneOf([ IAAAdditiveGaussianNoise(), GaussNoise(), ], p=0.7), OneOf([ MotionBlur(p=.7), MedianBlur(blur_limit=7, p=0.7), Blur(blur_limit=7, p=0.7), ], p=0.4), ShiftScaleRotate( shift_limit=0.0625, scale_limit=0.2, rotate_limit=35, p=0.2), OneOf([ OpticalDistortion(p=0.3), GridDistortion(p=.1), IAAPiecewiseAffine(p=0.3), ], p=0.4), OneOf([ CLAHE(), IAASharpen(), IAAEmboss(), RandomBrightnessContrast(0.3, 0.7), JpegCompression(70), RandomBrightness(-0.6) ], p=1), HueSaturationValue(p=0.3), ], p=p)
def __init__(self, image_ids, transform=True, preprocessing_fn=None): """ Dataset class for segmentation problem :param image_ids: ids of the images, list :param transform: True/False, no transform in validation :param preprocessing_fn: a function for preprocessing image """ # we create a empty dictionary to store image # and mask paths self.data = defaultdict(dict) # for augmentations self.transform = transform # preprocessing function to normalize # images self.preprocessing_fn = preprocessing_fn # albumentation augmentations # we have shift, scale & rotate # # applied with 80% probability # # and then one of gamma and brightness/contrast # # is applied to the image # # albumentation takes care of which augmentation # # is applied to image and mask self.aug = Compose([ ShiftScaleRotate(shift_limit=0.0625, scale_limit=0.1, rotate_limit=10, p=0.8), OneOf( [ RandomGamma(gamma_limit=(90, 110)), RandomBrightnessContrast(brightness_limit=0.1, contrast_limit=0.1), ], p=0.5, ), ]) # going over all image_ids to store # image and maks paths for imgid in image_ids: # TODO: where is TRAIN_PATH comes from files = glob.glob(os.path.join(TRAIN_PATH, imgid, '*.png')) self.data[counter] = { 'img_path': os.path.join(TRAIN_PATH, imgid + '.png'), 'mask_path': os.path.join(TRAIN_PATH, imgid + '_mask.png'), }
def create_train_image_generator(X_train, y_train, batch=BATCH, supervision=False): aug = Compose([ VerticalFlip(p=0.25), HorizontalFlip(p=0.5), OneOf([ ElasticTransform(p=0.5, alpha=1, sigma=50, alpha_affine=50), GridDistortion(p=0.5), ShiftScaleRotate(p=0.5), ], p=0.5), CLAHE(p=0.5), RandomContrast(p=0.5), RandomBrightness(p=0.5), RandomGamma(p=0.5), JpegCompression(p=0.5), Blur(p=0.5) ]) while True: image_rgb = [] image_mask = [] k = np.random.randint(0, 100) np.random.seed(k) np.random.shuffle(X_train) np.random.seed(k) np.random.shuffle(y_train) for i in range(X_train.shape[0]): augmented = aug(image=X_train[i], mask=y_train[i, ..., 0]) image_rgb += [augmented['image']] image_mask += [augmented['mask']] if len(image_rgb) == batch: if supervision: m = np.expand_dims(np.stack(image_mask, 0), -1) / 255. yield np.stack(image_rgb, 0) / 255., { 'output_1': m, 'output_2': m, 'output_3': m, 'output_4': m } else: yield np.stack(image_rgb, 0) / 255., np.expand_dims( np.stack(image_mask, 0), -1) / 255. image_rgb, image_mask = [], [] return train_generator
def get_photometric(self): return Compose([ OneOf([ CLAHE(clip_limit=2, p=.8), IAASharpen(p=.8), IAAEmboss(p=.8), ], p=0.6), OneOf([ IAAAdditiveGaussianNoise(p=.6), GaussNoise(p=.7), ], p=.5), OneOf([ MotionBlur(p=.5), MedianBlur(blur_limit=self.k, p=.3), Blur(blur_limit=self.k, p=.5), ], p=.5), OneOf([ RandomContrast(), RandomBrightness(), ], p=.8), ], p=0.95)
def vanilla_transform(p): return Compose([ HorizontalFlip(p=0.5), VerticalFlip(p=0.5), ShiftScaleRotate(rotate_limit=30, scale_limit=0.15, border_mode=cv2.BORDER_CONSTANT, value=[0, 0, 0], p=0.5), IAAAdditiveGaussianNoise(p=0.2), IAAPerspective(p=0.5), OneOf( [ CLAHE(p=1), RandomBrightness(p=1), RandomGamma(p=1), ], p=0.5, ), OneOf( [ IAASharpen(p=1), Blur(blur_limit=3, p=1), MotionBlur(blur_limit=3, p=1), ], p=0.5, ), OneOf( [ RandomContrast(p=1), HueSaturationValue(p=1), ], p=0.5, ), ], p=p)
def strong_aug(p=0.5): return Compose([ HorizontalFlip(), OneOf([ IAAAdditiveGaussianNoise(), GaussNoise(), ], p=0.2), OneOf([ MotionBlur(p=0.25), GaussianBlur(p=0.5), Blur(blur_limit=3, p=0.25), ], p=0.2), HueSaturationValue(p=0.2), OneOf([ RandomBrightness(), IAASharpen(), IAAEmboss(), RandomBrightnessContrast(), ], p=0.6), ToSepia(p=0.1) ], p=p)
def get_aug(p=1.0): return Compose([ HorizontalFlip(), VerticalFlip(), RandomRotate90(), ShiftScaleRotate(shift_limit=0.0625, scale_limit=0.2, rotate_limit=15, p=0.9, border_mode=cv2.BORDER_REFLECT), OneOf([ OpticalDistortion(p=0.3), GridDistortion(p=.1), IAAPiecewiseAffine(p=0.3), ], p=0.3), OneOf([ HueSaturationValue(10, 15, 10), CLAHE(clip_limit=2), RandomBrightnessContrast(), ], p=0.3), ], p=p)
def shiftscalerotate_aug(): augs_list = [ OneOf([ ShiftScaleRotate(scale_limit=.15, rotate_limit=15, border_mode=cv2.BORDER_REPLICATE, p=0.5), IAAAffine(shear=20, p=0.5), IAAPerspective(p=0.5), ], p=0.5), Normalize(), ToTensorV2() ] return Compose(augs_list, p=1)
def strong_aug(p=1): return OneOf( [ # ShiftScaleRotate(shift_limit=0.0625, scale_limit=0.2, rotate_limit=45, p=1), # IAASharpen(p=1), IAAEmboss(p=1), # RandomBrightnessContrast(p=1), # HorizontalFlip(p=1), VerticalFlip(p=1), # Compose([VerticalFlip(p=1), HorizontalFlip(p=1)]), # ElasticTransform(p=1, alpha=400, sigma=400 * 0.05, alpha_affine=400 * 0.03), GridDistortion(p=1), OpticalDistortion(p=1) ], p=p)