def get_transforms(*, data_type): if data_type == "light_train": return Compose([ Resize(CFG.size, CFG.size), Transpose(p=0.5), HorizontalFlip(p=0.5), VerticalFlip(p=0.5), ShiftScaleRotate(scale_limit=(0, 0), p=0.5), ToTensorV2(), ]) if data_type == "train": return Compose([ Resize(CFG.size, CFG.size), Transpose(p=0.5), HorizontalFlip(p=0.5), VerticalFlip(p=0.5), albumentations.OneOf([ albumentations.ElasticTransform( alpha=1, sigma=20, alpha_affine=10), albumentations.GridDistortion(num_steps=6, distort_limit=0.1), albumentations.OpticalDistortion(distort_limit=0.05, shift_limit=0.05), ], p=0.2), albumentations.core.composition.PerChannel(albumentations.OneOf([ albumentations.MotionBlur(p=.05), albumentations.MedianBlur(blur_limit=3, p=.05), albumentations.Blur(blur_limit=3, p=.05), ]), p=1.0), albumentations.OneOf([ albumentations.CoarseDropout(max_holes=16, max_height=CFG.size // 16, max_width=CFG.size // 16, fill_value=0, p=0.5), albumentations.GridDropout(ratio=0.09, p=0.5), albumentations.Cutout(num_holes=8, max_h_size=CFG.size // 16, max_w_size=CFG.size // 16, p=0.2), ], p=0.5), albumentations.ShiftScaleRotate( shift_limit=0.0625, scale_limit=0.2, rotate_limit=45, p=0.5), ToTensorV2(), ], additional_targets={ 'r': 'image', 'g': 'image', 'b': 'image', 'y': 'image', }) elif data_type == 'valid': return Compose([ Resize(CFG.size, CFG.size), ToTensorV2(), ])
def aug(self, image): ''' center crop, resize, affine, hue, saturation, ''' if self.phase == 'train': imgaug = Compose([ transforms.Resize(height=self.input_shape[0], width=self.input_shape[1]), RandomRotate90(), Flip(), Transpose(), OneOf([ IAAAdditiveGaussianNoise(), GaussNoise(), ], p=0.2), OneOf([ MotionBlur(p=.2), MedianBlur(blur_limit=3, p=.1), Blur(blur_limit=3, p=.1), ], p=0.2), ShiftScaleRotate(shift_limit=0.0625, scale_limit=0.2, rotate_limit=45, p=.2), ToTensorV2(), ], p=1) else: imgaug = Compose([transforms.Resize(height=self.input_shape[0], width=self.input_shape[1]), ToTensorV2()]) image = imgaug(image=image)['image'] return image
def get_train_transforms(): return Compose( [ #Resize(args.img_size, args.img_size), RandomResizedCrop(args.img_size, args.img_size), Transpose(p=0.5), HorizontalFlip(p=0.5), VerticalFlip(p=0.25), ShiftScaleRotate(p=0.25), HueSaturationValue(hue_shift_limit=0.2, sat_shift_limit=0.2, val_shift_limit=0.2, p=0.25), RandomBrightnessContrast(brightness_limit=(-0.1, 0.1), contrast_limit=(-0.1, 0.1), p=0.5), Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225], max_pixel_value=255.0, p=1.0), CoarseDropout(p=0.5), # Cutout(p=0.5), ToTensorV2(p=1.0), ], p=1.)
def aug_daniel_prepadded(prob=1.0, image_size=448): return Compose([ RandomRotate90(p=0.5), Transpose(p=0.5), Flip(p=0.5), OneOf([ RandomCrop(height=image_size, width=image_size, p=0.3), Compose([ #3.94 determined by largest angle possible rotatable without introducing nodata pixels into center crop area ShiftScaleRotate(shift_limit=0.0, scale_limit=0.0, rotate_limit=6, border_mode=cv2.BORDER_CONSTANT, p=1.0), CenterCrop(height=int(round(236/224*image_size)), width=int(round(236/224*image_size)), p=1.0), RandomCrop(height=image_size, width=image_size, p=1.0) ], p=0.4), Compose([ #3.94 determined by largest angle possible rotatable without introducing nodata pixels into center crop area ShiftScaleRotate(shift_limit=0.0, scale_limit=0.0, rotate_limit=12, border_mode=cv2.BORDER_CONSTANT, p=1.0), CenterCrop(height=image_size, width=image_size, p=1.0) ], p=0.3) ], p=1.0), #OneOf([ #IAASharpen(), #IAAEmboss(), # RandomBrightnessContrast(brightness_limit=0.01, contrast_limit=0.01) # This causes a blackout for some reason #Blur(), #GaussNoise() #], p=0.5), IAASharpen(p=0.2), IAAAdditiveGaussianNoise(p=0.2), # HueSaturationValue(p=0.3) #ShiftScaleRotate(shift_limit=0.0, scale_limit=0.0, rotate_limit=2, border_mode=cv2.BORDER_CONSTANT, p=.75), #ChannelShuffle(p=0.33) ], p=prob)
def hard_aug(original_height=128, original_width=128, k=4): aug = Compose([ OneOf([ RandomSizedCrop( min_max_height=(original_height // k, original_height), height=original_height, width=original_width, p=0.5), PadIfNeeded( min_height=original_height, min_width=original_width, p=0.5) ], p=1), VerticalFlip(p=0.5), HorizontalFlip(p=0.5), RandomRotate90(p=0.5), Transpose(p=0.5), OneOf([ ElasticTransform( p=0.5, alpha=120, sigma=120 * 0.05, alpha_affine=120 * 0.03), GridDistortion(p=0.5), OpticalDistortion(p=1, distort_limit=2, shift_limit=0.5) ], p=0.8), CLAHE(p=0.8), RandomBrightnessContrast(p=0.8), RandomGamma(p=0.8) ]) return aug
def __init__(self, root_dir, partition, augment=True): self.root_dir = root_dir self.list_IDs = os.listdir( os.path.join(self.root_dir, 'x_{}'.format(partition))) self.partition = partition self.augment = augment self.augmentator = Compose([ # Non destructive transformations VerticalFlip(p=0.6), HorizontalFlip(p=0.6), RandomRotate90(), Transpose(p=0.6), ShiftScaleRotate(p=0.45, scale_limit=(0.1, 0.3)), # # Non-rigid transformations ElasticTransform(p=0.25, alpha=160, sigma=180 * 0.05, alpha_affine=120 * 0.03), Blur(blur_limit=3, p=0.2), # Color augmentation RandomBrightness(p=0.5), RandomContrast(p=0.5), RandomGamma(p=0.5), CLAHE(p=0.5) ])
def aug_daniel(prob=0.8): return Compose( [ RandomRotate90(p=0.5), Transpose(p=0.5), Flip(p=0.5), OneOf( [ IAAAdditiveGaussianNoise(), GaussNoise(), #Blur(), ], p=0.3), OneOf( [ CLAHE(clip_limit=2), IAASharpen(), IAAEmboss(), OneOf([ RandomContrast(), RandomBrightness(), ]), #Blur(), #GaussNoise() ], p=0.5), HueSaturationValue(p=0.5) ], p=prob)
def transform(config, image, mask): try: p = config["train"]["dap"]["p"] except: p = 1 assert 0 <= p <= 1 # Inspire by: https://albumentations.readthedocs.io/en/latest/examples.html return Compose([ Flip(), Transpose(), OneOf([IAAAdditiveGaussianNoise(), GaussNoise()], p=0.2), OneOf([ MotionBlur(p=0.2), MedianBlur(blur_limit=3, p=0.1), Blur(blur_limit=3, p=0.1) ], p=0.2), ShiftScaleRotate(shift_limit=0.0625, scale_limit=0.2, rotate_limit=45, p=0.2), OneOf([IAASharpen(), IAAEmboss(), RandomBrightnessContrast()], p=0.3), HueSaturationValue(p=0.3), ])(image=image, mask=mask, p=p)
def create_train_transforms(conf): height = conf['crop_height'] width = conf['crop_width'] return Compose([ SafeRotate(45, p=0.4, border_mode=cv2.BORDER_CONSTANT), OneOf([ RandomSizedCrop(min_max_height=(int(height * 0.7), int( height * 1.3)), w2h_ratio=1., height=height, width=width, p=0.8), RandomCrop(height=height, width=width, p=0.2) ], p=1), HorizontalFlip(), VerticalFlip(), RandomRotate90(), Transpose(), ImageCompression(p=0.1), Lighting(alphastd=0.3), RandomBrightnessContrast(p=0.4), RandomGamma(p=0.4), OneOf([RGBShift(), HueSaturationValue()], p=0.2) ], additional_targets={'image1': 'image'})
def strong_aug(p=1): return Compose([ ToFloat(), RandomRotate90(), Flip(), Transpose(), OneOf([ IAAAdditiveGaussianNoise(), GaussNoise(), ], p=0.2), OneOf([ MotionBlur(p=0.2), MedianBlur(blur_limit=3, p=0.1), Blur(blur_limit=3, p=0.1), ], p=0.2), ShiftScaleRotate( shift_limit=0.0625, scale_limit=0.2, rotate_limit=45, p=0.2), OneOf([ OpticalDistortion(p=0.3), GridDistortion(p=0.1), IAAPiecewiseAffine(p=0.3), ], p=0.2), FromFloat(dtype='uint16', max_value=65535.0) ], p=p)
def aug_train(resolution, p=1): return Compose([Resize(resolution, resolution), OneOf([ HorizontalFlip(), VerticalFlip(), RandomRotate90(), Transpose()], p=0.5), OneOf([ IAAAdditiveGaussianNoise(), GaussNoise(), ], p=0.5), OneOf([ MotionBlur(p=.2), MedianBlur(blur_limit=3, p=0.1), Blur(blur_limit=3, p=0.1), ], p=0.5), ShiftScaleRotate(shift_limit=0.0625, scale_limit=0.2, rotate_limit=45, p=0.2), OneOf([ OpticalDistortion(p=0.3), GridDistortion(p=.1), IAAPiecewiseAffine(p=0.3), ], p=0.5), OneOf([ CLAHE(clip_limit=2), IAASharpen(), IAAEmboss(), RandomBrightnessContrast(), ], p=0.5), HueSaturationValue(p=0.3), Normalize() ], p=p)
def get_light_augmentations(width, height): return [ HorizontalFlip(p=1), VerticalFlip(p=1), Transpose(p=1), RandomSizedCrop((height - 4, height - 2), height, width), ]
def get_train_transforms(): return Compose( [ # RandomResizedCrop(CFG['img_size'], CFG['img_size']), SmallestMaxSize(max_size=512), # OneOf([RandomCrop(500, 500, p=0.4), # CenterCrop(500, 500, p=0.5), # RandomResizedCrop(512, 512, p=0.1)]), RandomCrop(320, 320), OneOf( [Transpose(p=0.5), HorizontalFlip(p=0.5), VerticalFlip(p=0.5)]), # ShiftScaleRotate(p=0.5), HueSaturationValue(hue_shift_limit=0.2, sat_shift_limit=0.2, val_shift_limit=0.2, p=0.5), RandomBrightnessContrast(brightness_limit=(-0.1, 0.1), contrast_limit=(-0.1, 0.1), p=0.5), Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225], max_pixel_value=255.0, p=1.0), # Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5], max_pixel_value=255.0, p=1.0), # CoarseDropout(p=0.5), ToTensorV2(p=1.0), ], p=1.)
def aug_with_crop(width=640, height=480, crop_prob=1): return Compose( [ # RandomCrop(width=480, height=640, p=crop_prob), HorizontalFlip(p=0.5), VerticalFlip(p=0.5), RandomRotate90(p=0.5), Transpose(p=0.5), ShiftScaleRotate( shift_limit=0.01, scale_limit=0.04, rotate_limit=0, p=0.25), RandomBrightnessContrast(p=0.5), RandomGamma(p=0.25), IAAEmboss(p=0.25), Blur(p=0.01, blur_limit=3), OneOf([ ElasticTransform(p=0.5, alpha=120, sigma=120 * 0.05, alpha_affine=120 * 0.03), GridDistortion(p=0.5), OpticalDistortion(p=1, distort_limit=2, shift_limit=0.5) ], p=0.8) ], p=1)
def get_inference_transforms(): return Compose( [ RandomResizedCrop(CFG['img_size'], CFG['img_size']), Transpose(p=0.5), HorizontalFlip(p=0.5), VerticalFlip(p=0.5), HueSaturationValue( hue_shift_limit=0.2, sat_shift_limit=0.2, val_shift_limit=0.2, p=0.5, ), RandomBrightnessContrast( brightness_limit=(-0.1,0.1), contrast_limit=(-0.1, 0.1), p=0.5, ), Normalize( mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225], max_pixel_value=255.0, p=1.0, ), ToTensorV2(p=1.0), ], p=1. )
def flow(data_dir, Timage, Tmask, batch, size1, size2, augument=False): images_ = os.listdir(data_dir + Timage) shuffle(images_) ids_int = list(range(len(images_))) NORMALIZE = 127.5 while True: try: for start in range(0, len(ids_int), batch): x_batch = [] y_batch = [] end = min(start + batch, len(images_)) batch_create = ids_int[start:end] jbs = dict() for loads in batch_create: try: img = cv2.imread( os.path.join(data_dir, Timage, images_[loads])) img = image_resize(img, width=param_maps["scale"]) height_o_image, width_o_image = img.shape[ 0], img.shape[1] if height_o_image % 2 != 0: height_o_image = height_o_image - 1 if width_o_image % 2 != 0: width_o_image = width_o_image - 1 jbs["width"] = width_o_image * 2 jbs["height"] = height_o_image * 2 img = cv2.resize(img, (width_o_image, height_o_image)) img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) masks = cv2.imread( os.path.join(data_dir, Tmask, images_[loads])) masks = cv2.resize(masks, (jbs["width"], jbs["height"])) except: continue if augument: aug = Compose([ VerticalFlip(p=0.1), Transpose(p=0.01), RandomGamma(p=0.06), OpticalDistortion(p=0.00, distort_limit=0.7, shift_limit=0.3) ]) augmented = aug(image=img, mask=masks) img = augmented['image'] masks = augmented['mask'] x_batch.append(img) y_batch.append(masks) else: x_batch.append(img) y_batch.append(masks) x_batch = np.array(x_batch) / NORMALIZE x_batch = x_batch - 1 y_batch = np.array(y_batch) / NORMALIZE y_batch = y_batch - 1 yield x_batch, y_batch except: continue
def get_raw_tta(): transform_test_raw = Compose( [Resize(RAW_CROP_SIZE, RAW_CROP_SIZE), ToTensor()]) transform_test_hf = Compose([ Resize(RAW_CROP_SIZE, RAW_CROP_SIZE), HorizontalFlip(p=1, always_apply=True), ToTensor() ]) transform_test_vf = Compose([ Resize(RAW_CROP_SIZE, RAW_CROP_SIZE), VerticalFlip(p=1, always_apply=True), ToTensor() ]) transform_test_tr = Compose( [Resize(RAW_CROP_SIZE, RAW_CROP_SIZE), Transpose(), ToTensor()]) return [ transform_test_raw, transform_test_hf, transform_test_vf, transform_test_tr ]
def get_transforms(): return Compose([ RandomRotate90(p=0.5), Flip(p=0.5), Transpose(p=0.5), # OneOf([ # IAAAdditiveGaussianNoise(), # GaussNoise(), # ], p=0.2), # OneOf([ # MotionBlur(p=.2), # MedianBlur(blur_limit=3, p=0.1), # Blur(blur_limit=3, p=0.1), # ], p=0.2), ShiftScaleRotate(shift_limit=0.0625, scale_limit=0.2, rotate_limit=45, p=0.2), # OneOf([ # CLAHE(clip_limit=2), # IAASharpen(), # IAAEmboss(), # RandomBrightnessContrast(), # ], p=0.3), HueSaturationValue(p=0.3), ])
def strong_aug(p=0.5): return Compose([ RandomRotate90(), Flip(), Transpose(), OneOf([ IAAAdditiveGaussianNoise(), GaussNoise(), ], p=0.2), OneOf([ MotionBlur(p=0.2), MedianBlur(blur_limit=3, p=0.1), Blur(blur_limit=3, p=0.1), ], p=0.2), ShiftScaleRotate(shift_limit=0.0625, scale_limit=0.2, rotate_limit=45, p=0.2), OneOf([ OpticalDistortion(p=0.3), GridDistortion(p=0.1), IAAPiecewiseAffine(p=0.3), ], p=0.2), OneOf([ CLAHE(clip_limit=2), IAASharpen(), IAAEmboss(), RandomBrightnessContrast(), ], p=0.3), HueSaturationValue(p=0.3), ], p=p)
def __init__(self, root_dir, annotation_lines, class_number, transform=None, loader=default_loader): self.annotation_lines = annotation_lines self.class_number = class_number self.transform = transform self.loader = loader self.root_dir = root_dir curr_size = 512 min_max_height = (curr_size - curr_size // 2, curr_size - 1) self.transform_strong = Compose([ RandomSizedCrop(min_max_height=min_max_height, height=curr_size, width=curr_size, p=1.0), OneOf([ Transpose(p=0.5), HorizontalFlip(p=0.5), VerticalFlip(p=0.5), Rotate(p=0.5), ], p=1.0), ElasticTransform(alpha=curr_size, sigma=curr_size * 0.05, alpha_affine=10, p=1.0) ])
def get_transforms(*, data): if data == 'train': return Compose([ RandomResizedCrop(CFG.size, CFG.size), Transpose(p=0.5), HorizontalFlip(p=0.5), VerticalFlip(p=0.5), ShiftScaleRotate(p=0.5), HueSaturationValue(hue_shift_limit=0.2, sat_shift_limit=0.2, val_shift_limit=0.2, p=0.5), RandomBrightnessContrast(brightness_limit=(-0.1, 0.1), contrast_limit=(-0.1, 0.1), p=0.5), Normalize( mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225], ), ToTensorV2(), ]) elif data == 'valid': return Compose([ Resize(CFG.size, CFG.size), Normalize( mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225], ), ToTensorV2(), ])
def test_transpose_both_image_and_mask(): image = np.ones((8, 6, 3)) mask = np.ones((8, 6)) augmentation = Transpose(p=1) augmented = augmentation(image=image, mask=mask) assert augmented['image'].shape == (6, 8, 3) assert augmented['mask'].shape == (6, 8)
def strong_aug(p=1): return Compose([ RandomRotate90(), Flip(), Transpose(), OneOf([ IAAAdditiveGaussianNoise(), GaussNoise(), ], p=0.2), OneOf([ MotionBlur(p=0.2), MedianBlur(blur_limit=3, p=0.1), Blur(blur_limit=3, p=0.1), ], p=0.2), OneOf([ CLAHE(clip_limit=2), IAASharpen(), IAAEmboss(), RandomBrightnessContrast(), ], p=0.3), HueSaturationValue(p=0.3), ], p=p)
def strong_aug(p=0.5, crop_size=(512, 512)): return Compose([ RandomResizedCrop(crop_size[0], crop_size[1], scale=(0.3, 1.0), ratio=(0.75, 1.3), interpolation=4, p=1.0), RandomRotate90(), Flip(), Transpose(), OneOf([ IAAAdditiveGaussianNoise(), GaussNoise(), ], p=0.8), OneOf([ MotionBlur(p=0.5), MedianBlur(blur_limit=3, p=0.5), Blur(blur_limit=3, p=0.5), ], p=0.3), ShiftScaleRotate( shift_limit=0.2, scale_limit=0.5, rotate_limit=180, p=0.8), OneOf([ OpticalDistortion(p=0.5), GridDistortion(p=0.5), IAAPiecewiseAffine(p=0.5), ElasticTransform(p=0.5), ], p=0.3), OneOf([ CLAHE(clip_limit=2), IAASharpen(), IAAEmboss(), RandomBrightnessContrast(), ], p=0.3), OneOf([ GaussNoise(), RandomRain( p=0.2, brightness_coefficient=0.9, drop_width=1, blur_value=5), RandomSnow(p=0.4, brightness_coeff=0.5, snow_point_lower=0.1, snow_point_upper=0.3), RandomShadow(p=0.2, num_shadows_lower=1, num_shadows_upper=1, shadow_dimension=5, shadow_roi=(0, 0.5, 1, 1)), RandomFog( p=0.5, fog_coef_lower=0.3, fog_coef_upper=0.5, alpha_coef=0.1) ], p=0.3), RGBShift(), HueSaturationValue(p=0.9), ], p=p)
def light_aug_detection(p=1.0): return Compose([ Flip(p=1), Transpose(p=0.8), ShiftScaleRotate(shift_limit=0.01, scale_limit=0.01, rotate_limit=30, p=1), RandomBrightnessContrast(brightness_limit=(-0.01, 0.01), contrast_limit=(-0.01, 0.01), p=p), ], bbox_params=BboxParams(format='pascal_voc', label_fields=['category_ids']), p=p)
def strong_aug(p=1): return Compose([ HorizontalFlip(p=0.5), VerticalFlip(p=0.5), Transpose(p=0.5), ShiftScaleRotate(shift_limit=0.1, scale_limit=0.1, rotate_limit=20, p=0.5), RandomBrightnessContrast(brightness_limit=0.15, contrast_limit=0.15, p=0.2), HueSaturationValue(hue_shift_limit=10, sat_shift_limit=20, p=0.2) ], p=p)
def transpose(image, mask): aug = Transpose(p=1) augmented = aug(image=image, mask=mask) image_transposed = augmented['image'] mask_transposed = augmented['mask'] return image_transposed, mask_transposed
def get_png_tta(): transform_test = Compose([ToTensor()]) tfm_hf = Compose([HorizontalFlip(p=1, always_apply=True), ToTensor()]) tfm_vf = Compose([VerticalFlip(p=1, always_apply=True), ToTensor()]) tfm_tr = Compose([Transpose(), ToTensor()]) return [transform_test, tfm_hf, tfm_vf, tfm_tr]
def augment_flips_color(p=.5): return Compose([ RandomRotate90(), Transpose(), ShiftScaleRotate(shift_limit=0.0625, scale_limit=0.50, rotate_limit=45, p=.75), Blur(blur_limit=3), VerticalFlip(), HorizontalFlip() ], p=p)
def strong_aug(self,p=.5): return Compose([ Transpose(), ShiftScaleRotate(shift_limit=0.0625, scale_limit=0.2, rotate_limit=45, p=0.2), HueSaturationValue(p=0.3), HorizontalFlip(always_apply=False, p=0.5), Cutout(num_holes=1, max_h_size=8, max_w_size=8, fill_value=0.5*255) # Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)) # ToTensor() ], p=p)