def __init__(self, r_shift_limit=20, g_shift_limit=20, b_shift_limit=20, always_apply=False, p=0.5, p_asym=0.2): StereoTransformAsym.__init__(self, always_apply, p, p_asym) RGBShift.__init__(self, r_shift_limit, g_shift_limit, b_shift_limit, always_apply, p)
def __init__(self, root_dir, partition, augment): self.root_dir = root_dir self.list_IDs = os.listdir(os.path.join(self.root_dir, 'y_{}'.format(partition))) self.partition = partition self.augment = augment self.to_tensor = transforms.ToTensor() self.augmentator = Compose([ # Non destructive transformations VerticalFlip(p=0.6), HorizontalFlip(p=0.6), RandomRotate90(), Transpose(p=0.6), ShiftScaleRotate(p=0.45, scale_limit=(0.1, 0.3)), # # Non-rigid transformations # ElasticTransform(p=0.25, alpha=160, sigma=180 * 0.05, alpha_affine=120 * 0.03), Blur(blur_limit=3, p=0.2), # Color augmentation RandomBrightness(p=0.5), RandomContrast(p=0.5), RGBShift(p=0.3), RandomGamma(p=0.5), CLAHE(p=0.5) ] )
def hard_transform(image_size: int = 256, p: float = 0.5, **kwargs): """Hard augmentations (on training)""" _add_transform_default_params(kwargs) transforms = Compose([ ShiftScaleRotate( shift_limit=0.1, scale_limit=0.1, rotate_limit=15, border_mode=cv2.BORDER_REFLECT, p=p, ), IAAPerspective(scale=(0.02, 0.05), p=p), OneOf([ HueSaturationValue(p=p), ToGray(p=p), RGBShift(p=p), ChannelShuffle(p=p), ]), RandomBrightnessContrast(brightness_limit=0.5, contrast_limit=0.5, p=p), RandomGamma(p=p), CLAHE(p=p), JpegCompression(quality_lower=50, p=p), PadIfNeeded(image_size, image_size, border_mode=cv2.BORDER_CONSTANT), ], **kwargs) return transforms
def medium_aug(p=1.0): return Compose( [ HorizontalFlip(p=0.5), ShiftScaleRotate(p=0.75, shift_limit=0.1, scale_limit=0.2, rotate_limit=45, border_mode=cv2.BORDER_CONSTANT), RandomBrightnessContrast( brightness_limit=0.6, contrast_limit=0.6, p=0.5), OneOf([ HueSaturationValue(p=1.0), RGBShift(p=1.0), ChannelShuffle(p=1.0) ], p=0.5), OneOf([ Blur(p=1.0), MedianBlur(p=1.0), MotionBlur(p=1.0), ], p=0.3), OneOf([GridDistortion(p=1.0), ElasticTransform(p=1.0)], p=0.3), OneOf([ CLAHE(p=1.0), IAASharpen(p=1.0), ], p=0.3), IAAAdditiveGaussianNoise(p=0.5) # ToGray(p=1.0), ], p=p)
def create_train_transforms(conf): height = conf['crop_height'] width = conf['crop_width'] return Compose([ SafeRotate(45, p=0.4, border_mode=cv2.BORDER_CONSTANT), OneOf([ RandomSizedCrop(min_max_height=(int(height * 0.7), int( height * 1.3)), w2h_ratio=1., height=height, width=width, p=0.8), RandomCrop(height=height, width=width, p=0.2) ], p=1), HorizontalFlip(), VerticalFlip(), RandomRotate90(), Transpose(), ImageCompression(p=0.1), Lighting(alphastd=0.3), RandomBrightnessContrast(p=0.4), RandomGamma(p=0.4), OneOf([RGBShift(), HueSaturationValue()], p=0.2) ], additional_targets={'image1': 'image'})
def __call__(self, image, mask): augmentation = Compose( [ # Flip(p=0.75), # RandomBrightnessContrast(brightness_limit=0.3, contrast_limit=0.3, p=1.0) # ShiftScaleRotate(p=0.2, shift_limit=0.1, scale_limit=0.1, rotate_limit=45, border_mode=cv2.BORDER_CONSTANT), # Blur(blur_limit=5, p=0.5), # MedianBlur(blur_limit=5, p=0.5), # MotionBlur(p=0.5), # GaussNoise(p=0.5), # HueSaturationValue(hue_shift_limit=10, sat_shift_limit=30, val_shift_limit=20, p=1.0), RGBShift(p=1.0) # GridDistortion(p=1.0), # ElasticTransform(p=1.0) # CLAHE(p=0.5), # IAASharpen(p=0.5) ], p=1.0) data = {"image": image, "mask": mask} augmented = augmentation(**data) image, mask = augmented["image"], augmented["mask"] return image, mask
def make(p=0.5): return Compose( [ OneOf([IAAAdditiveGaussianNoise(), GaussNoise(), ISONoise()], p=0.9), MotionBlur(p=0.3), ShiftScaleRotate(shift_limit=0.0925, scale_limit=0.4, rotate_limit=7, border_mode=cv2.BORDER_CONSTANT, value=0, p=0.6), # IAAPerspective(scale=(.055, .060), keep_size=False, p=.2), # OpticalDistortion(p=0.2), OneOf([ CLAHE(clip_limit=2), IAASharpen(), IAAEmboss(), RandomBrightnessContrast(), ], p=0.3), HueSaturationValue(p=0.3), RGBShift(40, 40, 40) ], p=p)
def get_input_pair(self, image_info): dataset_path = self.dataset_path img_path = os.path.join(dataset_path, image_info["dataset_folder"], self.images_folder, image_info["name"] + '_' + image_info["position"] + '.' + self.image_type) mask_path = os.path.join(dataset_path, image_info["dataset_folder"], self.masks_folder, image_info["name"] + '_' + image_info["position"] + '.' + self.mask_type) img = Image.open(img_path) mask = Image.open(mask_path) img_array = np.array(img) mask_array = np.array(mask).astype(np.float32) aug = Compose([ RandomCrop(224, 224), RandomRotate90(), Flip(), OneOf([ RGBShift(), CLAHE(clip_limit=2) ], p=0.4), ToTensor() ], p=1) augmented = aug(image=img_array, mask=mask_array) augmented_img = augmented['image'] augmented_mask = augmented['mask'] augmented_mask = augmented_mask.squeeze().long() return {"features": augmented_img, "targets": augmented_mask}
def augmentation(img, n): """ Make random augmentations with image n times. :rtype: ndarray :param img: image in matrix form :param n: how many augmentations need to apply :return: list of augmented versions of image """ methods = [ElasticTransform(**elastic_params), RandomGamma(**gamma_params), GridDistortion(**all_other), RGBShift(**r_shift_params), Rotate(**rotate_params), RandomBrightness(**brightness_params) ] for i in range(len(methods)): methods[i] = Compose([methods[i], ], p=1) chosen = np.random.choice(methods, replace=False, size=n) augmented = np.empty((n,), dtype=np.object) for i, method in enumerate(chosen): transformed = transform_image(method(image=img)["image"]) if to_normalize: transformed = normalize(transformed) augmented[i] = transformed return augmented
def transforms_train(aug_proba=1.): return Compose(transforms=[ HorizontalFlip(p=0.5), Rotate(limit=25, p=0.5, border_mode=cv2.BORDER_CONSTANT, value=0, interpolation=cv2.INTER_CUBIC), OneOf([ IAAAdditiveGaussianNoise(p=1), GaussNoise(p=1), ], p=0.2), OneOf([ HueSaturationValue(hue_shift_limit=10, sat_shift_limit=15, val_shift_limit=10, p=1), RGBShift(r_shift_limit=10, g_shift_limit=10, b_shift_limit=10, p=1) ]), OneOf([RandomContrast(p=1), RandomBrightness(p=1)], p=0.3), OpticalDistortion(p=0.1), Resize(*SIZE), Normalize() ], p=aug_proba, additional_targets={'trimap': 'mask'})
def main(): image = zeros([200, 200, 3]) image[75:125, 75:125] = 255 transform = RGBShift(40, 40, 40, always_apply=True) while True: transformed = transform(image=image)['image'] if imshowWait(image, transformed) == 27: break
def albumentations_transforms(p=1.0, is_train=False): # Mean and standard deviation of train dataset mean = np.array([0.4914, 0.4822, 0.4465]) std = np.array([0.2023, 0.1994, 0.2010]) transforms_list = [] # Use data aug only for train data if is_train: transforms_list.extend([ PadIfNeeded(min_height=72, min_width=72, p=1.0), RandomCrop(height=64, width=64, p=1.0), HorizontalFlip(p=0.25), Rotate(limit=15, p=0.25), RGBShift(r_shift_limit=20, g_shift_limit=20, b_shift_limit=20, p=0.25), #CoarseDropout(max_holes=1, max_height=32, max_width=32, min_height=8, #min_width=8, fill_value=mean*255.0, p=0.5), ]) transforms_list.extend([ Normalize(mean=mean, std=std, max_pixel_value=255.0, p=1.0), ToTensor() ]) data_transforms = Compose(transforms_list, p=p) return lambda img: data_transforms(image=np.array(img))["image"]
def get_input_pair(self, image_info): dataset_path = self.dataset_path img_path = os.path.join( dataset_path, image_info["dataset_folder"], self.images_folder, image_info["name"] + '_' + image_info["position"] + '.' + self.image_type) img = Image.open(img_path) img_array = np.array(img) augm = Compose([ RandomCrop(224, 224), RandomRotate90(), Flip(), OneOf([RGBShift(), CLAHE(clip_limit=2)], p=0.4), ToTensor() ], p=1) augmented = augm(image=img_array) augmented_img = augmented['image'] return {"features": augmented_img, "targets": augmented_img}
def hard_transform(image_size=224, p=0.5): transforms = [ Cutout( num_holes=4, max_w_size=image_size // 4, max_h_size=image_size // 4, p=p ), ShiftScaleRotate( shift_limit=0.1, scale_limit=0.1, rotate_limit=15, border_mode=cv2.BORDER_REFLECT, p=p ), IAAPerspective(scale=(0.02, 0.05), p=p), OneOf( [ HueSaturationValue(p=p), ToGray(p=p), RGBShift(p=p), ChannelShuffle(p=p), ] ), RandomBrightnessContrast( brightness_limit=0.5, contrast_limit=0.5, p=p ), RandomGamma(p=p), CLAHE(p=p), JpegCompression(quality_lower=50, p=p), ] transforms = Compose(transforms) return transforms
def data_augmentation(self, original_image): """ 进行样本和掩膜的随机增强 Args: original_image: 原始图片 Return: image_aug: 增强后的图片 """ augmentations = Compose([ HorizontalFlip(p=0.4), ShiftScaleRotate(shift_limit=0.07, rotate_limit=0, p=0.4), RGBShift(r_shift_limit=10, g_shift_limit=10, b_shift_limit=10, p=0.3), # 亮度、对比度 RandomGamma(gamma_limit=(80, 120), p=0.1), RandomBrightnessContrast(p=0.1), # 模糊 OneOf([ MotionBlur(p=0.1), MedianBlur(blur_limit=3, p=0.1), Blur(blur_limit=3, p=0.1), ], p=0.3), OneOf([ IAAAdditiveGaussianNoise(), GaussNoise(), ], p=0.2) ]) augmented = augmentations(image=original_image) image_aug = augmented['image'] return image_aug
def hard_transform(image_size: int = 256, p: float = 0.5): """Hard augmentations""" transforms = Compose([ ShiftScaleRotate( shift_limit=0.1, scale_limit=0.1, rotate_limit=15, border_mode=cv2.BORDER_REFLECT, p=p, ), IAAPerspective(scale=(0.02, 0.05), p=p), OneOf([ HueSaturationValue(p=p), ToGray(p=p), RGBShift(p=p), ChannelShuffle(p=p), ]), RandomBrightnessContrast( brightness_limit=0.5, contrast_limit=0.5, p=p ), RandomGamma(p=p), CLAHE(p=p), JpegCompression(quality_lower=50, p=p), ]) return transforms
def strong_aug2(p=1.0): return Compose( [ Flip(p=0.75), # ok # RandomBrightnessContrast(brightness_limit=0.2, contrast_limit=0.2, p=0.5), RandomBrightnessContrast(brightness_limit=(-0.2, 0.2), contrast_limit=0.2, p=1.0, brightness_by_max=False), ShiftScaleRotate(shift_limit=0.1, scale_limit=0.2, rotate_limit=45, border_mode=cv2.BORDER_CONSTANT, p=0.2), OneOf([ IAASharpen(p=1), Blur(blur_limit=5, p=1.0), MedianBlur(blur_limit=5, p=1.0), MotionBlur(p=1.0), ], p=0.6), OneOf([ HueSaturationValue(hue_shift_limit=10, sat_shift_limit=30, val_shift_limit=20, p=1.0), RGBShift(p=1.0), RandomGamma(p=1), ], p=0.3), IAAAdditiveGaussianNoise(p=.2), ], p=p)
def color_aug(p=0.5): return Compose( [ RandomBrightness(p=0.5), RandomGamma(p=0.5), RGBShift(p=0.5), ], p=p)
def strong_aug(p=0.5, crop_size=(512, 512)): return Compose([ RandomResizedCrop(crop_size[0], crop_size[1], scale=(0.3, 1.0), ratio=(0.75, 1.3), interpolation=4, p=1.0), RandomRotate90(), Flip(), Transpose(), OneOf([ IAAAdditiveGaussianNoise(), GaussNoise(), ], p=0.8), OneOf([ MotionBlur(p=0.5), MedianBlur(blur_limit=3, p=0.5), Blur(blur_limit=3, p=0.5), ], p=0.3), ShiftScaleRotate( shift_limit=0.2, scale_limit=0.5, rotate_limit=180, p=0.8), OneOf([ OpticalDistortion(p=0.5), GridDistortion(p=0.5), IAAPiecewiseAffine(p=0.5), ElasticTransform(p=0.5), ], p=0.3), OneOf([ CLAHE(clip_limit=2), IAASharpen(), IAAEmboss(), RandomBrightnessContrast(), ], p=0.3), OneOf([ GaussNoise(), RandomRain( p=0.2, brightness_coefficient=0.9, drop_width=1, blur_value=5), RandomSnow(p=0.4, brightness_coeff=0.5, snow_point_lower=0.1, snow_point_upper=0.3), RandomShadow(p=0.2, num_shadows_lower=1, num_shadows_upper=1, shadow_dimension=5, shadow_roi=(0, 0.5, 1, 1)), RandomFog( p=0.5, fog_coef_lower=0.3, fog_coef_upper=0.5, alpha_coef=0.1) ], p=0.3), RGBShift(), HueSaturationValue(p=0.9), ], p=p)
def __init__(self): self.alb_transform = Compose([ Rotate((-30.0, 30.0)), HorizontalFlip(), RGBShift(r_shift_limit=50, g_shift_limit=50, b_shift_limit=50, p=0.5), Cutout(num_holes=8, max_h_size=8, max_w_size=8, fill_value=[0.4914*255, 0.4822*255, 0.4465*255], always_apply=False, p=0.7), Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)), ToTensor() ])
def augmentation_color(p=0.5): return Compose([ CLAHE(clip_limit=3.0, tile_grid_size=(2, 2), always_apply=False, p=0.2), #Blur(blur_limit=(2.0, 4.0), p=0.2), RandomBrightnessContrast(brightness_limit=(-0.3, 0.3), contrast_limit=(-0.2, 0.2), p=0.3), RGBShift(r_shift_limit=(-10, 10), g_shift_limit=(0, 10), b_shift_limit=(0, 20), p=0.3), HueSaturationValue(hue_shift_limit=(0, 20), sat_shift_limit=0, val_shift_limit=0, p=0.3), ], p=p)
def train_val_dataloaders(train_path: str, val_path: str, augment: bool, batch_size: int): """Form the dataloaders for training and validation and store them in the dictionary. :param train_path: path to images for trainin :param val_path: path to images for validation :param batch_size: size of the batch :return: the dictionary with dataloaders """ if augment: train_transform = Compose([ Blur(p=0.1), ChannelDropout(p=0.1), Flip(p=0.5), GaussNoise((10.0, 30.0), 25.0, p=0.1), HueSaturationValue(p=0.1), RandomBrightnessContrast(brightness_limit=(-0.20, 0.50), p=0.1), RandomGamma(p=0.1), RandomRotate90(p=0.5), RGBShift(p=0.1), Transpose(p=0.25), Resize(224, 224, p=1.0), Normalize(), ToTensorV2(), ]) else: train_transform = Compose( [Resize(224, 224), Normalize(), ToTensorV2()]) val_transforms = Compose([Resize(224, 224), Normalize(), ToTensorV2()]) train_dataset = AlbumentationsImageFolder(train_path, train_transform) val_dataset = AlbumentationsImageFolder(val_path, val_transforms) dataloader = dict() dataloader["train"] = torch.utils.data.DataLoader( dataset=train_dataset, batch_size=batch_size, shuffle=True, num_workers=4, drop_last=True, ) dataloader["val"] = torch.utils.data.DataLoader( dataset=val_dataset, batch_size=batch_size, shuffle=True, num_workers=4, drop_last=True, ) return dataloader
def get_input_pair(self, data_info_row): if len(self.channels) == 0: raise Exception('You have to specify at least one channel.') instance_name = '_'.join( [data_info_row['name'], data_info_row['position']]) image_path = get_filepath(self.dataset_path, data_info_row['dataset_folder'], self.images_folder, instance_name, file_type=self.image_type) mask_path = get_filepath(self.dataset_path, data_info_row['dataset_folder'], self.masks_folder, instance_name, file_type=self.mask_type) images_array = filter_by_channels(read_tensor(image_path), self.channels) if images_array.ndim == 2: images_array = np.expand_dims(images_array, -1) masks_array = read_tensor(mask_path) if self.channels[0] == 'rgb': rgb_tensor = images_array[:, :, :3].astype(np.uint8) rgb_aug = Compose( [OneOf([RGBShift(), CLAHE(clip_limit=2)], p=0.4)], p=0.9) augmented_rgb = rgb_aug(image=rgb_tensor, mask=masks_array) images_array = np.concatenate( [augmented_rgb['image'], images_array[:, :, 3:]], axis=2) masks_array = augmented_rgb['mask'] aug = Compose([ RandomRotate90(), Flip(), OneOf([ RandomSizedCrop(min_max_height=(int( self.image_size * 0.7), self.image_size), height=self.image_size, width=self.image_size) ], p=0.4), ToTensor() ]) augmented = aug(image=images_array, mask=masks_array) augmented_images = augmented['image'] augmented_masks = augmented['mask'] return {'features': augmented_images, 'targets': augmented_masks}
def __init__(self): self.alb_transform = Compose([ #VerticalFlip(p=.5), HorizontalFlip(p=.5), #CoarseDropout(max_holes=1, max_height=16, max_width=16, fill_value=[0.4914*255, 0.4822*255, 0.4465*255], always_apply=False, p=0.5), #HueSaturationValue(hue_shift_limit=(-25,0),sat_shift_limit=0,val_shift_limit=0,p=1), RGBShift(50, 50, 50), Rotate(p=.5, border_mode=cv2.BORDER_CONSTANT), Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)), Cutout(num_holes=3), ToTensor() ])
def augmentator(p=0.5): return OneOf([ Blur(blur_limit=5, p=1.), RandomGamma(gamma_limit=(50, 150), p=1.), HueSaturationValue( hue_shift_limit=20, sat_shift_limit=30, val_shift_limit=20, p=1.), RGBShift(r_shift_limit=15, g_shift_limit=5, b_shift_limit=15, p=1.), RandomBrightness(limit=.25, p=1.), RandomContrast(limit=.25, p=1.), MedianBlur(blur_limit=5, p=1.), CLAHE(clip_limit=2.0, tile_grid_size=(8, 8), p=1.) ], p=p)
def augument(): augm = Compose([ RGBShift(), RandomBrightness(), RandomContrast(), HueSaturationValue(p=0.2), ChannelShuffle(), CLAHE(), Blur(), ToGray(), CoarseDropout() ], p=0.5) return augm
def create_train_transforms(conf): height = conf['crop_height'] width = conf['crop_width'] return Compose([ # OneOf([ # RandomSizedCrop(min_max_height=(int(height * 0.8), int(height * 1.2)), w2h_ratio=1., height=height, # width=width, p=0.8), # RandomCrop(height=height, width=width, p=0.2)], p=1), HorizontalFlip(), ImageCompression(p=0.1), RandomBrightnessContrast(p=0.4), RandomGamma(p=0.4), OneOf([RGBShift(), HueSaturationValue()], p=0.2) ])
def __init__(self, box_format='coco'): self.tsfm = Compose( [ HorizontalFlip(), # RandomResizedCrop(512, 512, scale=(0.75, 1)), RandomBrightnessContrast(0.4, 0.4), GaussNoise(), RGBShift(), CLAHE(), RandomGamma() ], bbox_params=BboxParams(format=box_format, min_visibility=0.75, label_fields=['labels']))
def get_random_aug(min_area=0., min_visibility=0.): aug = Compose([ RandomRotate90(p=random.uniform(0, 1)), GaussianBlur(p=random.uniform(0, 1)), GaussNoise(p=random.uniform(0, 1)), HueSaturationValue(p=random.uniform(0, 1)), RGBShift(p=random.uniform(0, 1)) ]) return Compose(aug, bbox_params={ 'format': 'pascal_voc', 'min_area': min_area, 'min_visibility': min_visibility, 'label_fields': ['category_id'] })
def get_augmentations(img_size): return Compose([ Resize(height=int(img_size * 1.5), width=int(img_size * 1.5), p=1), RandomSizedCrop(min_max_height=(int(img_size * 0.9), img_size), height=img_size, width=img_size, always_apply=True, p=1), RandomRotate90(), Flip(), Transpose(), OneOf([ IAAAdditiveGaussianNoise(), GaussNoise(), ], p=0.4), OneOf([ GlassBlur(p=1), GaussianBlur(p=1), MotionBlur(p=1), MedianBlur(blur_limit=3, p=1), Blur(blur_limit=3, p=1), ], p=0.4), ShiftScaleRotate(shift_limit=0.0625, scale_limit=0.2, rotate_limit=45, p=0.2), OneOf([ OpticalDistortion(p=1), ElasticTransform(), GridDistortion(p=1), IAAPiecewiseAffine(p=1), ], p=0.4), OneOf( [ CLAHE(clip_limit=2), # Histogram Equalization IAASharpen(), IAAEmboss(), RandomBrightnessContrast(), RGBShift() ], p=0.4), HueSaturationValue(p=0.3), ToSepia(p=0.2), Cutout(p=0.2), RandomScale(p=0.2) ])