def data_aug(images): seq = iaa.Sometimes( 0.5, iaa.Identity(), iaa.Sometimes( 0.5, iaa.Sequential([ iaa.Sometimes( 0.5, iaa.OneOf([ iaa.AdditiveGaussianNoise(scale=(0, 0.1 * 255)), iaa.AdditiveLaplaceNoise(scale=(0, 0.1 * 255)), iaa.ReplaceElementwise(0.03, [0, 255]), iaa.GaussianBlur(sigma=(0.0, 3.0)), iaa.BilateralBlur(d=(3, 10), sigma_color=(10, 250), sigma_space=(10, 250)) ])), iaa.OneOf([ iaa.Add((-40, 40)), iaa.AddElementwise((-20, 20)), iaa.pillike.EnhanceBrightness() ]), iaa.OneOf([ iaa.GammaContrast((0.2, 2.0)), iaa.SigmoidContrast(gain=(3, 10), cutoff=(0.4, 0.6)), iaa.LogContrast(gain=(0.6, 1.4)), iaa.AllChannelsCLAHE(), iaa.Sharpen(alpha=(0.0, 1.0), lightness=(0.75, 2.0)), ]) ]))) images = seq(images=images) return images
def aug_probaV(bg_color=255): aug_list = [] # Compatible with 16-bit image aug_list.append( augmenters.AllChannelsCLAHE(clip_limit=(10, 80), tile_grid_size_px=(4, 8))) # aug_list.append(augmenters.normalize_images()) return augmenters.Sequential(aug_list, random_order=False)
def chapter_augmenters_allchannelsclahe(): fn_start = "contrast/allchannelsclahe" aug = iaa.AllChannelsCLAHE() run_and_save_augseq(fn_start + ".jpg", aug, [ia.quokka(size=(128, 128)) for _ in range(4 * 2)], cols=4, rows=2) aug = iaa.AllChannelsCLAHE(clip_limit=(1, 10)) run_and_save_augseq(fn_start + "_random_clip_limit.jpg", aug, [ia.quokka(size=(128, 128)) for _ in range(4 * 2)], cols=4, rows=2) aug = iaa.AllChannelsCLAHE(clip_limit=(1, 10), per_channel=True) run_and_save_augseq(fn_start + "_per_channel.jpg", aug, [ia.quokka(size=(128, 128)) for _ in range(4 * 2)], cols=4, rows=2)
def main(): parser = argparse.ArgumentParser(description="Contrast check script") parser.add_argument("--per_channel", dest="per_channel", action="store_true") args = parser.parse_args() augs = [] for p in [0.25, 0.5, 1.0, 2.0, (0.5, 1.5), [0.5, 1.0, 1.5]]: augs.append(("GammaContrast " + str(p), iaa.GammaContrast(p, per_channel=args.per_channel))) for cutoff in [0.25, 0.5, 0.75]: for gain in [5, 10, 15, 20, 25]: augs.append(("SigmoidContrast " + str(cutoff) + " " + str(gain), iaa.SigmoidContrast(gain, cutoff, per_channel=args.per_channel))) for gain in [0.0, 0.25, 0.5, 1.0, 2.0, (0.5, 1.5), [0.5, 1.0, 1.5]]: augs.append(("LogContrast " + str(gain), iaa.LogContrast(gain, per_channel=args.per_channel))) for alpha in [-1.0, 0.5, 0, 0.5, 1.0, 2.0, (0.5, 1.5), [0.5, 1.0, 1.5]]: augs.append(("LinearContrast " + str(alpha), iaa.LinearContrast(alpha, per_channel=args.per_channel))) augs.append(("AllChannelsHistogramEqualization", iaa.AllChannelsHistogramEqualization())) augs.append(("HistogramEqualization (Lab)", iaa.HistogramEqualization(to_colorspace=iaa.HistogramEqualization.Lab))) augs.append(("HistogramEqualization (HSV)", iaa.HistogramEqualization(to_colorspace=iaa.HistogramEqualization.HSV))) augs.append(("HistogramEqualization (HLS)", iaa.HistogramEqualization(to_colorspace=iaa.HistogramEqualization.HLS))) for clip_limit in [0.1, 1, 5, 10]: for tile_grid_size_px in [3, 7]: augs.append(("AllChannelsCLAHE %d %dx%d" % (clip_limit, tile_grid_size_px, tile_grid_size_px), iaa.AllChannelsCLAHE(clip_limit=clip_limit, tile_grid_size_px=tile_grid_size_px, per_channel=args.per_channel))) for clip_limit in [1, 5, 10, 100, 200]: for tile_grid_size_px in [3, 7, 15]: augs.append(("CLAHE %d %dx%d" % (clip_limit, tile_grid_size_px, tile_grid_size_px), iaa.CLAHE(clip_limit=clip_limit, tile_grid_size_px=tile_grid_size_px))) images = [data.astronaut()] * 16 images = ia.imresize_many_images(np.uint8(images), (128, 128)) for name, aug in augs: print("-----------") print(name) print("-----------") images_aug = aug.augment_images(images) images_aug[0] = images[0] grid = ia.draw_grid(images_aug, rows=4, cols=4) ia.imshow(grid)
"Histogram_Equalization": iaa.HistogramEqualization(), # Augmenter to perform standard histogram equalization on images, applied to all channels of each input image "All_Channels_Histogram_Equalization": iaa.AllChannelsHistogramEqualization(), # Contrast Limited Adaptive Histogram Equalization (CLAHE). This augmenter applies CLAHE to images, a form of # histogram equalization that normalizes within local image patches. # Creates a CLAHE augmenter with clip limit uniformly sampled from [cl_lo..cl_hi], i.e. 1 is rather low contrast # and 50 is rather high contrast. Kernel sizes of SxS, where S is uniformly sampled from [t_lo..t_hi]. # Sampling happens once per image. (Note: more parameters are available for further specification) "CLAHE": lambda cl_lo, cl_hi, t_lo, t_hi: iaa.CLAHE(clip_limit=(cl_lo, cl_hi), tile_grid_size_px=(t_lo, t_hi)), # Contrast Limited Adaptive Histogram Equalization (refer above), applied to all channels of the input images. # CLAHE performs histogram equalization within image patches, i.e. over local neighbourhoods "All_Channels_CLAHE": lambda cl_lo, cl_hi, t_lo, t_hi: iaa.AllChannelsCLAHE(clip_limit=(cl_lo, cl_hi), tile_grid_size_px=(t_lo, t_hi)), # Augmenter that changes the contrast of images using a unique formula (using gamma). # Multiplier for gamma function is between lo and hi,, sampled randomly per image (higher values darken image) # For percent of all images values are sampled independently per channel. "Gamma_Contrast": lambda lo, hi, percent: iaa.GammaContrast((lo, hi), per_channel=percent), # Augmenter that changes the contrast of images using a unique formula (linear). # Multiplier for linear function is between lo and hi, sampled randomly per image # For percent of all images values are sampled independently per channel. "Linear_Contrast": lambda lo, hi, percent: iaa.LinearContrast((lo, hi), per_channel=percent), # Augmenter that changes the contrast of images using a unique formula (using log). # Multiplier for log function is between lo and hi, sampled randomly per image. # For percent of all images values are sampled independently per channel. # Values around 1.0 lead to a contrast-adjusted images. Values above 1.0 quickly lead to partially broken
import random from imgaug import augmenters as iaa from imgaug.augmentables.bbs import BoundingBox, BoundingBoxesOnImage import cv2 from verifier import fitCoords aug = iaa.Sequential( [ iaa.Sometimes(0.5, iaa.Crop(percent=(0.1, 0.3), keep_size=False)), iaa.Sometimes(0.5, iaa.MotionBlur(15, random.randint(0, 360))), iaa.OneOf([ iaa.AllChannelsCLAHE(clip_limit=10), iaa.AdditiveGaussianNoise(scale=(10, 35)), iaa.FastSnowyLandscape(lightness_threshold=(50, 115), from_colorspace="BGR") ]), # iaa.Sometimes(0.25, iaa.Affine(scale={"x": (1.0, 1.2), "y": (1.0, 1.2)})), iaa.Sometimes(0.25, iaa.Multiply((0.85, 1.15))), iaa.Sometimes(0.25, iaa.ContrastNormalization((0.85, 1.15))), # iaa.Affine(rotate=(0, 360)) ], random_order=False ) def customAugmentations(image, box): y1, x1, y2, x2 = box bb = BoundingBox(x1=x1, x2=x2, y1=y1, y2=y2) augImage, augBox = aug(image=image, bounding_boxes=bb) augBox = fitCoords([augBox.y1_int, augBox.x1_int, augBox.y2_int, augBox.x2_int], augImage.shape[:2])
def transform(self, image: np.ndarray, target: str, condition: int) -> Tuple[torch.Tensor, torch.Tensor, int]: """Transforms and normalizes the data. If in training mode the data is augmentated. Args: image (np.ndarray): Image to transform target (str): Training target condition (int): Condition Returns: Tuple[torch.Tensor, torch.Tensor, int]: Augmented image, target and condition """ # Resize resize = iaa.Resize({"height": 224, "width": 224}) image = resize.augment_image(image) # Random horizontal flipping and erase if self.train: if random.random() > 0.5: # flip image flip = iaa.HorizontalFlip(1.0) image = flip.augment_image(image) # flip class if target == "a": target = "d" elif target == "d": target = "a" # flip condition if condition == 2: condition = 4 elif condition == 4: condition = 2 #imgaug seq = iaa.Sequential([ iaa.Sometimes(0.5, iaa.Affine(rotate=(-15, 15))), iaa.Sometimes(0.3, iaa.EdgeDetect(alpha=(0.3, 0.8))), iaa.Sometimes(0.5, iaa.MotionBlur(k=iap.Choice([3, 5, 7]))), iaa.OneOf([ iaa.Dropout(p=(0, 0.3), per_channel=0.5), iaa.CoarseSaltAndPepper(0.05, size_percent=(0.01, 0.09)) ]), iaa.Sometimes(0.5, iaa.AllChannelsCLAHE(clip_limit=(1, 10))) ]) image = seq.augment_image(image) # Transform to tensor image = TF.to_tensor(image) # Transform to one hot encoding target = torch.tensor(self.target_dict[target]) #normalize image to fit pretrained vgg model normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) image = normalize(image) return image, target, condition
def build_augmentation_pipeline(self, apply_prob=0.5): cfg = self.cfg sometimes = lambda aug: iaa.Sometimes(apply_prob, aug) pipeline = iaa.Sequential(random_order=False) pre_resize = cfg.get("pre_resize") crop_sampling = cfg.get("crop_sampling", "hybrid") if pre_resize: width, height = pre_resize pipeline.add(iaa.Resize({"height": height, "width": width})) if crop_sampling == "none": self.default_size = width, height if crop_sampling != "none": # Add smart, keypoint-aware image cropping pipeline.add(iaa.PadToFixedSize(*self.default_size)) pipeline.add( augmentation.KeypointAwareCropToFixedSize( *self.default_size, cfg.get("max_shift", 0.4), crop_sampling, )) if cfg.get("fliplr", False): opt = cfg.get("fliplr", False) if type(opt) == int: pipeline.add(sometimes(iaa.Fliplr(opt))) else: pipeline.add(sometimes(iaa.Fliplr(0.5))) if cfg.get("rotation", False): opt = cfg.get("rotation", False) if type(opt) == int: pipeline.add(sometimes(iaa.Affine(rotate=(-opt, opt)))) else: pipeline.add(sometimes(iaa.Affine(rotate=(-10, 10)))) if cfg.get("hist_eq", False): pipeline.add(sometimes(iaa.AllChannelsHistogramEqualization())) if cfg.get("motion_blur", False): opts = cfg.get("motion_blur", False) if type(opts) == list: opts = dict(opts) pipeline.add(sometimes(iaa.MotionBlur(**opts))) else: pipeline.add(sometimes(iaa.MotionBlur(k=7, angle=(-90, 90)))) if cfg.get("covering", False): pipeline.add( sometimes( iaa.CoarseDropout( (0, 0.02), size_percent=(0.01, 0.05)))) # , per_channel=0.5))) if cfg.get("elastic_transform", False): pipeline.add(sometimes(iaa.ElasticTransformation(sigma=5))) if cfg.get("gaussian_noise", False): opt = cfg.get("gaussian_noise", False) if type(opt) == int or type(opt) == float: pipeline.add( sometimes( iaa.AdditiveGaussianNoise(loc=0, scale=(0.0, opt), per_channel=0.5))) else: pipeline.add( sometimes( iaa.AdditiveGaussianNoise(loc=0, scale=(0.0, 0.05 * 255), per_channel=0.5))) if cfg.get("grayscale", False): pipeline.add(sometimes(iaa.Grayscale(alpha=(0.5, 1.0)))) def get_aug_param(cfg_value): if isinstance(cfg_value, dict): opt = cfg_value else: opt = {} return opt cfg_cnt = cfg.get("contrast", {}) cfg_cnv = cfg.get("convolution", {}) contrast_aug = ["histeq", "clahe", "gamma", "sigmoid", "log", "linear"] for aug in contrast_aug: aug_val = cfg_cnt.get(aug, False) cfg_cnt[aug] = aug_val if aug_val: cfg_cnt[aug + "ratio"] = cfg_cnt.get(aug + "ratio", 0.1) convolution_aug = ["sharpen", "emboss", "edge"] for aug in convolution_aug: aug_val = cfg_cnv.get(aug, False) cfg_cnv[aug] = aug_val if aug_val: cfg_cnv[aug + "ratio"] = cfg_cnv.get(aug + "ratio", 0.1) if cfg_cnt["histeq"]: opt = get_aug_param(cfg_cnt["histeq"]) pipeline.add( iaa.Sometimes(cfg_cnt["histeqratio"], iaa.AllChannelsHistogramEqualization(**opt))) if cfg_cnt["clahe"]: opt = get_aug_param(cfg_cnt["clahe"]) pipeline.add( iaa.Sometimes(cfg_cnt["claheratio"], iaa.AllChannelsCLAHE(**opt))) if cfg_cnt["log"]: opt = get_aug_param(cfg_cnt["log"]) pipeline.add( iaa.Sometimes(cfg_cnt["logratio"], iaa.LogContrast(**opt))) if cfg_cnt["linear"]: opt = get_aug_param(cfg_cnt["linear"]) pipeline.add( iaa.Sometimes(cfg_cnt["linearratio"], iaa.LinearContrast(**opt))) if cfg_cnt["sigmoid"]: opt = get_aug_param(cfg_cnt["sigmoid"]) pipeline.add( iaa.Sometimes(cfg_cnt["sigmoidratio"], iaa.SigmoidContrast(**opt))) if cfg_cnt["gamma"]: opt = get_aug_param(cfg_cnt["gamma"]) pipeline.add( iaa.Sometimes(cfg_cnt["gammaratio"], iaa.GammaContrast(**opt))) if cfg_cnv["sharpen"]: opt = get_aug_param(cfg_cnv["sharpen"]) pipeline.add( iaa.Sometimes(cfg_cnv["sharpenratio"], iaa.Sharpen(**opt))) if cfg_cnv["emboss"]: opt = get_aug_param(cfg_cnv["emboss"]) pipeline.add( iaa.Sometimes(cfg_cnv["embossratio"], iaa.Emboss(**opt))) if cfg_cnv["edge"]: opt = get_aug_param(cfg_cnv["edge"]) pipeline.add( iaa.Sometimes(cfg_cnv["edgeratio"], iaa.EdgeDetect(**opt))) return pipeline
y_min = bb_box.y1 x_max = bb_box.x2 y_max = bb_box.y2 cls_id = bb_box.label x_cen, y_cen, w, h = xyxy2xywh(x_min, y_min, x_max, y_max) f.write("%d %.06f %.06f %.06f %.06f\n" % (cls_id, x_cen, y_cen, w, h)) Width = 640 Height = 640 blur = iaa.AverageBlur(k=(2, 11)) #! 2~11 random emboss = iaa.Emboss(alpha=(1.0, 1.0), strength=(2.0, 2.0)) gray = iaa.RemoveSaturation(from_colorspace=iaa.CSPACE_BGR) contrast = iaa.AllChannelsCLAHE(clip_limit=(10, 10), per_channel=True) bright = iaa.MultiplyAndAddToBrightness(mul=(0.5, 1.5), add=(-30, 30)) color = iaa.pillike.EnhanceColor() sharpen = iaa.Sharpen(alpha=(0.5, 1.0)) #! 0.5 ~ 1.0 random edge = iaa.pillike.FilterEdgeEnhance() augmentations = [[bright], [emboss], [color], [edge]] #! choice augmentation ## rotates = [[iaa.Affine(rotate=90)], [iaa.Affine(rotate=180)], [iaa.Affine(rotate=270)]] flip = iaa.Fliplr(1.0) #! 100% left & right dir = "C:\\Users\\jeongseokoon\\AI-hub\\data\\original\\" save_aug_dir = "C:\\Users\\jeongseokoon\\AI-hub\\data\\images\\" #! Absolute path
def data_aug(images): seq = iaa.Sometimes( 0.5, iaa.Identity(), iaa.Sometimes( 0.5, iaa.Sequential([ iaa.Fliplr(0.5), iaa.Sometimes( 0.5, iaa.OneOf([ iaa.Add((-40, 40)), iaa.AddElementwise((-40, 40)), iaa.AdditiveGaussianNoise(scale=(0, 0.2 * 255)), iaa.AdditiveLaplaceNoise(scale=(0, 0.2 * 255)), iaa.AdditivePoissonNoise((0, 40)), iaa.MultiplyElementwise((0.5, 1.5)), iaa.ReplaceElementwise(0.1, [0, 255]), iaa.SaltAndPepper(0.1) ])), iaa.OneOf([ iaa.Cutout(nb_iterations=2, size=0.15, cval=0, squared=False), iaa.CoarseDropout((0.0, 0.05), size_percent=(0.02, 0.25)), iaa.Dropout(p=(0, 0.2)), iaa.CoarseSaltAndPepper(0.05, size_percent=(0.01, 0.1)), iaa.Cartoon(), iaa.BlendAlphaVerticalLinearGradient(iaa.TotalDropout(1.0), min_value=0.2, max_value=0.8), iaa.GaussianBlur(sigma=(0.0, 3.0)), iaa.AverageBlur(k=(2, 11)), iaa.MedianBlur(k=(3, 11)), iaa.BilateralBlur(d=(3, 10), sigma_color=(10, 250), sigma_space=(10, 250)), iaa.MotionBlur(k=20), iaa.AllChannelsCLAHE(), iaa.Sharpen(alpha=(0.0, 1.0), lightness=(0.75, 2.0)), iaa.Emboss(alpha=(0.0, 1.0), strength=(0.5, 1.5)), iaa.Affine(scale=(0.5, 1.5)), iaa.Affine(translate_px={ "x": (-20, 20), "y": (-20, 20) }), iaa.Affine(shear=(-16, 16)), iaa.pillike.EnhanceSharpness() ]), iaa.OneOf([ iaa.GammaContrast((0.5, 2.0)), iaa.SigmoidContrast(gain=(3, 10), cutoff=(0.4, 0.6)), iaa.LogContrast(gain=(0.6, 1.4)), iaa.LinearContrast((0.4, 1.6)), iaa.pillike.EnhanceBrightness() ]) ]), iaa.Sometimes(0.5, iaa.RandAugment(n=2, m=9), iaa.RandAugment(n=(0, 3), m=(0, 9))))) images = seq(images=images) return images
def build_augmentation_pipeline(self, height=None, width=None, apply_prob=0.5): sometimes = lambda aug: iaa.Sometimes(apply_prob, aug) pipeline = iaa.Sequential(random_order=False) cfg = self.cfg if cfg["mirror"]: opt = cfg["mirror"] # fliplr if type(opt) == int: pipeline.add(sometimes(iaa.Fliplr(opt))) else: pipeline.add(sometimes(iaa.Fliplr(0.5))) if cfg["rotation"] > 0: pipeline.add( iaa.Sometimes( cfg["rotratio"], iaa.Affine(rotate=(-cfg["rotation"], cfg["rotation"])), )) if cfg["motion_blur"]: opts = cfg["motion_blur_params"] pipeline.add(sometimes(iaa.MotionBlur(**opts))) if cfg["covering"]: pipeline.add( sometimes( iaa.CoarseDropout(0.02, size_percent=0.3, per_channel=0.5))) if cfg["elastic_transform"]: pipeline.add(sometimes(iaa.ElasticTransformation(sigma=5))) if cfg.get("gaussian_noise", False): opt = cfg.get("gaussian_noise", False) if type(opt) == int or type(opt) == float: pipeline.add( sometimes( iaa.AdditiveGaussianNoise(loc=0, scale=(0.0, opt), per_channel=0.5))) else: pipeline.add( sometimes( iaa.AdditiveGaussianNoise(loc=0, scale=(0.0, 0.05 * 255), per_channel=0.5))) if cfg.get("grayscale", False): pipeline.add(sometimes(iaa.Grayscale(alpha=(0.5, 1.0)))) def get_aug_param(cfg_value): if isinstance(cfg_value, dict): opt = cfg_value else: opt = {} return opt cfg_cnt = cfg.get("contrast", {}) cfg_cnv = cfg.get("convolution", {}) contrast_aug = ["histeq", "clahe", "gamma", "sigmoid", "log", "linear"] for aug in contrast_aug: aug_val = cfg_cnt.get(aug, False) cfg_cnt[aug] = aug_val if aug_val: cfg_cnt[aug + "ratio"] = cfg_cnt.get(aug + "ratio", 0.1) convolution_aug = ["sharpen", "emboss", "edge"] for aug in convolution_aug: aug_val = cfg_cnv.get(aug, False) cfg_cnv[aug] = aug_val if aug_val: cfg_cnv[aug + "ratio"] = cfg_cnv.get(aug + "ratio", 0.1) if cfg_cnt["histeq"]: opt = get_aug_param(cfg_cnt["histeq"]) pipeline.add( iaa.Sometimes(cfg_cnt["histeqratio"], iaa.AllChannelsHistogramEqualization(**opt))) if cfg_cnt["clahe"]: opt = get_aug_param(cfg_cnt["clahe"]) pipeline.add( iaa.Sometimes(cfg_cnt["claheratio"], iaa.AllChannelsCLAHE(**opt))) if cfg_cnt["log"]: opt = get_aug_param(cfg_cnt["log"]) pipeline.add( iaa.Sometimes(cfg_cnt["logratio"], iaa.LogContrast(**opt))) if cfg_cnt["linear"]: opt = get_aug_param(cfg_cnt["linear"]) pipeline.add( iaa.Sometimes(cfg_cnt["linearratio"], iaa.LinearContrast(**opt))) if cfg_cnt["sigmoid"]: opt = get_aug_param(cfg_cnt["sigmoid"]) pipeline.add( iaa.Sometimes(cfg_cnt["sigmoidratio"], iaa.SigmoidContrast(**opt))) if cfg_cnt["gamma"]: opt = get_aug_param(cfg_cnt["gamma"]) pipeline.add( iaa.Sometimes(cfg_cnt["gammaratio"], iaa.GammaContrast(**opt))) if cfg_cnv["sharpen"]: opt = get_aug_param(cfg_cnv["sharpen"]) pipeline.add( iaa.Sometimes(cfg_cnv["sharpenratio"], iaa.Sharpen(**opt))) if cfg_cnv["emboss"]: opt = get_aug_param(cfg_cnv["emboss"]) pipeline.add( iaa.Sometimes(cfg_cnv["embossratio"], iaa.Emboss(**opt))) if cfg_cnv["edge"]: opt = get_aug_param(cfg_cnv["edge"]) pipeline.add( iaa.Sometimes(cfg_cnv["edgeratio"], iaa.EdgeDetect(**opt))) if height is not None and width is not None: if not cfg.get("crop_by", False): crop_by = 0.15 else: crop_by = cfg.get("crop_by", False) pipeline.add( iaa.Sometimes( cfg.get("cropratio", 0.4), iaa.CropAndPad(percent=(-crop_by, crop_by), keep_size=False), )) pipeline.add(iaa.Resize({"height": height, "width": width})) return pipeline
def augmentation_of_image(self, test_image, output_path): self.test_image = test_image self.output_path = output_path #define the Augmenters #properties: A range of values signifies that one of these numbers is randmoly chosen for every augmentation for every batch # Apply affine transformations to each image. rotate = iaa.Affine(rotate=(-90, 90)) scale = iaa.Affine(scale={ "x": (0.5, 0.9), "y": (0.5, 0.9) }) translation = iaa.Affine(translate_percent={ "x": (-0.15, 0.15), "y": (-0.15, 0.15) }) shear = iaa.Affine(shear=(-2, 2)) #plagio parallhlogrammo wihthin a range (-8,8) zoom = iaa.PerspectiveTransform( scale=(0.01, 0.15), keep_size=True) # do not change the output size of the image h_flip = iaa.Fliplr(1.0) # flip horizontally all images (100%) v_flip = iaa.Flipud(1.0) #flip vertically all images padding = iaa.KeepSizeByResize( iaa.CropAndPad(percent=(0.05, 0.25)) ) #positive values correspond to padding 5%-25% of the image,but keeping the origial output size of the new image #More augmentations blur = iaa.GaussianBlur( sigma=(0, 1.22) ) # blur images with a sigma 0-2,a number ofthis range is randomly chosen everytime.Low values suggested for this application contrast = iaa.contrast.LinearContrast((0.75, 1.5)) #change the contrast by a factor of 0.75 and 1.5 sampled randomly per image contrast_channels = iaa.LinearContrast( (0.75, 1.5), per_channel=True ) #and for 50% of all images also independently per channel: sharpen = iaa.Sharpen(alpha=(0, 1.0), lightness=(0.75, 1.5)) #sharpen with an alpha from 0(no sharpening) - 1(full sharpening) and change the lightness form 0.75 to 1.5 gauss_noise = iaa.AdditiveGaussianNoise( scale=0.111 * 255, per_channel=True ) #some random gaussian noise might occur in cell images,especially when image quality is poor laplace_noise = iaa.AdditiveLaplaceNoise( scale=(0, 0.111 * 255) ) #we choose to be in a small range, as it is logical for training the cell images #Brightness brightness = iaa.Multiply( (0.35, 1.65 )) #change brightness between 35% or 165% of the original image brightness_channels = iaa.Multiply( (0.5, 1.5), per_channel=0.75 ) # change birghtness for 25% of images.For the remaining 75%, change it, but also channel-wise. #CHANNELS (RGB)=(Red,Green,Blue) red = iaa.WithChannels(0, iaa.Add( (10, 100))) #increase each Red-pixels value within the range 10-100 red_rot = iaa.WithChannels(0, iaa.Affine( rotate=(0, 45))) #rotate each image's red channel by 0-45 degrees green = iaa.WithChannels(1, iaa.Add( (10, 100))) #increase each Green-pixels value within the range 10-100 green_rot = iaa.WithChannels(1, iaa.Affine( rotate=(0, 45))) #rotate each image's green channel by 0-45 degrees blue = iaa.WithChannels(2, iaa.Add( (10, 100))) #increase each Blue-pixels value within the range 10-100 blue_rot = iaa.WithChannels(2, iaa.Affine( rotate=(0, 45))) #rotate each image's blue channel by 0-45 degrees #colors channel_shuffle = iaa.ChannelShuffle(1.0) #shuffle all images of the batch grayscale = iaa.Grayscale(1.0) hue_n_saturation = iaa.MultiplyHueAndSaturation( (0.5, 1.5), per_channel=True ) #change hue and saturation with this range of values for different values add_hue_saturation = iaa.AddToHueAndSaturation( (-50, 50), per_channel=True) #add more hue and saturation to its pixels #Quantize colors using k-Means clustering kmeans_color = iaa.KMeansColorQuantization( n_colors=(4, 16) ) #quantizes to k means 4 to 16 colors (randomly chosen). Quantizes colors up to 16 colors #Alpha Blending blend = iaa.AlphaElementwise((0, 1.0), iaa.Grayscale((0, 1.0))) #blend depending on which value is greater #Contrast augmentors clahe = iaa.CLAHE(tile_grid_size_px=((3, 21), [ 0, 2, 3, 4, 5, 6, 7 ])) #create a clahe contrast augmentor H=(3,21) and W=(0,7) histogram = iaa.HistogramEqualization( ) #performs histogram equalization #Augmentation list of metadata augmentors OneofRed = iaa.OneOf([red]) OneofGreen = iaa.OneOf([green]) OneofBlue = iaa.OneOf([blue]) contrast_n_shit = iaa.OneOf( [contrast, brightness, brightness_channels]) SomeAug = iaa.SomeOf( 2, [rotate, scale, translation, shear, h_flip, v_flip], random_order=True) SomeClahe = iaa.SomeOf( 2, [ clahe, iaa.CLAHE(clip_limit=(1, 10)), iaa.CLAHE(tile_grid_size_px=(3, 21)), iaa.GammaContrast((0.5, 2.0)), iaa.AllChannelsCLAHE(), iaa.AllChannelsCLAHE(clip_limit=(1, 10), per_channel=True) ], random_order=True) #Random selection from clahe augmentors edgedetection = iaa.OneOf([ iaa.EdgeDetect(alpha=(0, 0.7)), iaa.DirectedEdgeDetect(alpha=(0, 0.7), direction=(0.0, 1.0)) ]) # Search in some images either for all edges or for directed edges.These edges are then marked in a black and white image and overlayed with the original image using an alpha of 0 to 0.7. canny_filter = iaa.OneOf([ iaa.Canny(), iaa.Canny(alpha=(0.5, 1.0), sobel_kernel_size=[3, 7]) ]) #choose one of the 2 canny filter options OneofNoise = iaa.OneOf([blur, gauss_noise, laplace_noise]) Color_1 = iaa.OneOf([ channel_shuffle, grayscale, hue_n_saturation, add_hue_saturation, kmeans_color ]) Color_2 = iaa.OneOf([ channel_shuffle, grayscale, hue_n_saturation, add_hue_saturation, kmeans_color ]) Flip = iaa.OneOf([histogram, v_flip, h_flip]) #Define the augmentors used in the DA Augmentors = [ SomeAug, SomeClahe, SomeClahe, edgedetection, sharpen, canny_filter, OneofRed, OneofGreen, OneofBlue, OneofNoise, Color_1, Color_2, Flip, contrast_n_shit ] for i in range(0, 14): img = cv2.imread(test_image) #read you image images = np.array( [img for _ in range(14)], dtype=np.uint8 ) # 12 is the size of the array that will hold 8 different images images_aug = Augmentors[i].augment_images( images ) #alternate between the different augmentors for a test image cv2.imwrite( os.path.join(output_path, test_image + "new" + str(i) + '.jpg'), images_aug[i]) #write all changed images
def build_augmentation_pipeline(self, height=None, width=None, apply_prob=0.5): sometimes = lambda aug: iaa.Sometimes(apply_prob, aug) pipeline = iaa.Sequential(random_order=False) cfg = self.cfg if cfg.get('fliplr', False): opt = cfg.get('fliplr', False) if type(opt) == int: pipeline.add(sometimes(iaa.Fliplr(opt))) else: pipeline.add(sometimes(iaa.Fliplr(0.5))) if cfg.get('rotation', False): opt = cfg.get('rotation', False) if type(opt) == int: pipeline.add(sometimes(iaa.Affine(rotate=(-opt, opt)))) else: pipeline.add(sometimes(iaa.Affine(rotate=(-10, 10)))) if cfg.get('motion_blur', False): opts = cfg.get('motion_blur', False) if type(opts) == list: opts = dict(opts) pipeline.add(sometimes(iaa.MotionBlur(**opts))) else: pipeline.add(sometimes(iaa.MotionBlur(k=7, angle=(-90, 90)))) if cfg.get('covering', False): pipeline.add( sometimes( iaa.CoarseDropout(0.02, size_percent=0.3, per_channel=0.5))) if cfg.get('elastic_transform', False): pipeline.add(sometimes(iaa.ElasticTransformation(sigma=5))) if cfg.get('gaussian_noise', False): opt = cfg.get('gaussian_noise', False) if type(opt) == int or type(opt) == float: pipeline.add( sometimes( iaa.AdditiveGaussianNoise(loc=0, scale=(0.0, opt), per_channel=0.5))) else: pipeline.add( sometimes( iaa.AdditiveGaussianNoise(loc=0, scale=(0.0, 0.05 * 255), per_channel=0.5))) if cfg.get('grayscale', False): pipeline.add(sometimes(iaa.Grayscale(alpha=(0.5, 1.0)))) if cfg.get('hist_eq', False): pipeline.add(sometimes(iaa.AllChannelsHistogramEqualization())) if height is not None and width is not None: if not cfg.get('crop_by', False): crop_by = 0.15 else: crop_by = cfg.get('crop_by', False) pipeline.add( iaa.Sometimes( cfg.cropratio, iaa.CropAndPad(percent=(-crop_by, crop_by), keep_size=False))) pipeline.add(iaa.Resize({"height": height, "width": width})) if cfg.get('gamma', False): pipeline.add( sometimes(iaa.GammaContrast((0.5, 2.0), per_channel=True))) if cfg.get('logcontrast', False): pipeline.add( sometimes(iaa.LogContrast(gain=(0.6, 1.4), per_channel=True))) if cfg.get('allchannelsclahe', False): pipeline.add( sometimes( iaa.AllChannelsCLAHE(clip_limit=(1, 10), per_channel=True))) return pipeline
transformed_image = transform(image=image) elif augmentation == 'linear_contrast': transform = iaa.LinearContrast((0.4, 1.6)) transformed_image = transform(image=image) elif augmentation == 'histogram_equalization': transform = iaa.HistogramEqualization() transformed_image = transform(image=image) elif augmentation == 'all_channels_he': transform = iaa.AllChannelsHistogramEqualization() transformed_image = transform(image=image) elif augmentation == 'all_channels_clahe': transform = iaa.AllChannelsCLAHE() transformed_image = transform(image=image) ## Compression elif augmentation == 'image_compression': transform = ImageCompression(always_apply=True, quality_lower=10) transformed_image = transform(image=image)['image'] elif augmentation == 'downscale': transform = Downscale(always_apply=True) transformed_image = transform(image=image)['image'] elif augmentation == 'pixelate': transform = iaa.imgcorruptlike.Pixelate(severity=4) transformed_image = transform(image=image)
def augment(img_data, config, augment=True): assert 'filepath' in img_data assert 'bboxes' in img_data assert 'width' in img_data assert 'height' in img_data img_data_aug = copy.deepcopy(img_data) aug_list = [] img = cv2.imread(img_data_aug['filepath']) if augment: rows, cols = img.shape[:2] #[START] Pallete Augmentation pallete_augmentation(img=img, img_data=img_data_aug, config=config) #[END] Pallete Augmentation if config.use_horizontal_flips and np.random.randint(0, 2) == 0: img = cv2.flip(img, 1) for bbox in img_data_aug['bboxes']: x1 = bbox['x1'] x2 = bbox['x2'] bbox['x2'] = cols - x1 bbox['x1'] = cols - x2 if config.use_vertical_flips and np.random.randint(0, 2) == 0: img = cv2.flip(img, 0) for bbox in img_data_aug['bboxes']: y1 = bbox['y1'] y2 = bbox['y2'] bbox['y2'] = rows - y1 bbox['y1'] = rows - y2 if config.rot_90: angle = np.random.choice([0, 90, 180, 270], 1)[0] if angle == 270: img = np.transpose(img, (1, 0, 2)) img = cv2.flip(img, 0) elif angle == 180: img = cv2.flip(img, -1) elif angle == 90: img = np.transpose(img, (1, 0, 2)) img = cv2.flip(img, 1) elif angle == 0: pass for bbox in img_data_aug['bboxes']: x1 = bbox['x1'] x2 = bbox['x2'] y1 = bbox['y1'] y2 = bbox['y2'] if angle == 270: bbox['x1'] = y1 bbox['x2'] = y2 bbox['y1'] = cols - x2 bbox['y2'] = cols - x1 elif angle == 180: bbox['x2'] = cols - x1 bbox['x1'] = cols - x2 bbox['y2'] = rows - y1 bbox['y1'] = rows - y2 elif angle == 90: bbox['x1'] = rows - y2 bbox['x2'] = rows - y1 bbox['y1'] = x1 bbox['y2'] = x2 elif angle == 0: pass if config.color: aug_list.append( np.random.choice([ iaa.MultiplyHueAndSaturation((0.5, 1.5), per_channel=True), iaa.AddToHueAndSaturation((-50, 50), per_channel=True), iaa.KMeansColorQuantization(), iaa.UniformColorQuantization(), iaa.Grayscale(alpha=(0.0, 1.0)) ])) if config.contrast: aug_list.append( np.random.choice([ iaa.GammaContrast((0.5, 2.0), per_channel=True), iaa.SigmoidContrast(gain=(3, 10), cutoff=(0.4, 0.6), per_channel=True), iaa.LogContrast(gain=(0.6, 1.4), per_channel=True), iaa.LinearContrast((0.4, 1.6), per_channel=True), iaa.AllChannelsCLAHE(clip_limit=(1, 10), per_channel=True), iaa.AllChannelsHistogramEqualization(), iaa.HistogramEqualization() ])) ## Augmentation aug = iaa.SomeOf((0, None), aug_list, random_order=True) seq = iaa.Sequential(aug) img = seq.augment_image(img) ## img_data_aug['width'] = img.shape[1] img_data_aug['height'] = img.shape[0] return img_data_aug, img
iaa.OneOf([ iaa.MultiplyAndAddToBrightness(mul=(0.3, 1.6), add=(-50, 50)), iaa.MultiplyHueAndSaturation((0.5, 1.5), per_channel=True), iaa.ChannelShuffle(0.5), iaa.RemoveSaturation(), iaa.Grayscale(alpha=(0.0, 1.0)), iaa.ChangeColorTemperature((1100, 35000)), ]), iaa.OneOf([ iaa.MedianBlur(k=(3, 7)), iaa.BilateralBlur( d=(3, 10), sigma_color=(10, 250), sigma_space=(10, 250)), iaa.MotionBlur(k=(3, 9), angle=[-45, 45]), iaa.MeanShiftBlur(spatial_radius=(5.0, 10.0), color_radius=(5.0, 10.0)), iaa.AllChannelsCLAHE(clip_limit=(1, 10)), iaa.AllChannelsHistogramEqualization(), iaa.GammaContrast((0.5, 1.5), per_channel=True), iaa.GammaContrast((0.5, 1.5)), iaa.SigmoidContrast(gain=(3, 10), cutoff=(0.4, 0.6), per_channel=True), iaa.SigmoidContrast(gain=(3, 10), cutoff=(0.4, 0.6)), iaa.HistogramEqualization(), iaa.Sharpen(alpha=0.5) ]), iaa.OneOf([ iaa.AveragePooling([2, 3]), iaa.MaxPooling(([2, 3], [2, 3])), ]), iaa.OneOf([ iaa.Clouds(), iaa.Snowflakes(flake_size=(0.1, 0.4), speed=(0.01, 0.05)),
def create_augmenters(height, width, height_augmentable, width_augmentable, only_augmenters): def lambda_func_images(images, random_state, parents, hooks): return images def lambda_func_heatmaps(heatmaps, random_state, parents, hooks): return heatmaps def lambda_func_keypoints(keypoints, random_state, parents, hooks): return keypoints def assertlambda_func_images(images, random_state, parents, hooks): return True def assertlambda_func_heatmaps(heatmaps, random_state, parents, hooks): return True def assertlambda_func_keypoints(keypoints, random_state, parents, hooks): return True augmenters_meta = [ iaa.Sequential([iaa.Noop(), iaa.Noop()], random_order=False, name="Sequential_2xNoop"), iaa.Sequential([iaa.Noop(), iaa.Noop()], random_order=True, name="Sequential_2xNoop_random_order"), iaa.SomeOf((1, 3), [iaa.Noop(), iaa.Noop(), iaa.Noop()], random_order=False, name="SomeOf_3xNoop"), iaa.SomeOf((1, 3), [iaa.Noop(), iaa.Noop(), iaa.Noop()], random_order=True, name="SomeOf_3xNoop_random_order"), iaa.OneOf([iaa.Noop(), iaa.Noop(), iaa.Noop()], name="OneOf_3xNoop"), iaa.Sometimes(0.5, iaa.Noop(), name="Sometimes_Noop"), iaa.WithChannels([1, 2], iaa.Noop(), name="WithChannels_1_and_2_Noop"), iaa.Noop(name="Noop"), iaa.Lambda(func_images=lambda_func_images, func_heatmaps=lambda_func_heatmaps, func_keypoints=lambda_func_keypoints, name="Lambda"), iaa.AssertLambda(func_images=assertlambda_func_images, func_heatmaps=assertlambda_func_heatmaps, func_keypoints=assertlambda_func_keypoints, name="AssertLambda"), iaa.AssertShape((None, height_augmentable, width_augmentable, None), name="AssertShape"), iaa.ChannelShuffle(0.5, name="ChannelShuffle") ] augmenters_arithmetic = [ iaa.Add((-10, 10), name="Add"), iaa.AddElementwise((-10, 10), name="AddElementwise"), #iaa.AddElementwise((-500, 500), name="AddElementwise"), iaa.AdditiveGaussianNoise(scale=(5, 10), name="AdditiveGaussianNoise"), iaa.AdditiveLaplaceNoise(scale=(5, 10), name="AdditiveLaplaceNoise"), iaa.AdditivePoissonNoise(lam=(1, 5), name="AdditivePoissonNoise"), iaa.Multiply((0.5, 1.5), name="Multiply"), iaa.MultiplyElementwise((0.5, 1.5), name="MultiplyElementwise"), iaa.Dropout((0.01, 0.05), name="Dropout"), iaa.CoarseDropout((0.01, 0.05), size_percent=(0.01, 0.1), name="CoarseDropout"), iaa.ReplaceElementwise((0.01, 0.05), (0, 255), name="ReplaceElementwise"), #iaa.ReplaceElementwise((0.95, 0.99), (0, 255), name="ReplaceElementwise"), iaa.SaltAndPepper((0.01, 0.05), name="SaltAndPepper"), iaa.ImpulseNoise((0.01, 0.05), name="ImpulseNoise"), iaa.CoarseSaltAndPepper((0.01, 0.05), size_percent=(0.01, 0.1), name="CoarseSaltAndPepper"), iaa.Salt((0.01, 0.05), name="Salt"), iaa.CoarseSalt((0.01, 0.05), size_percent=(0.01, 0.1), name="CoarseSalt"), iaa.Pepper((0.01, 0.05), name="Pepper"), iaa.CoarsePepper((0.01, 0.05), size_percent=(0.01, 0.1), name="CoarsePepper"), iaa.Invert(0.1, name="Invert"), # ContrastNormalization iaa.JpegCompression((50, 99), name="JpegCompression") ] augmenters_blend = [ iaa.Alpha((0.01, 0.99), iaa.Noop(), name="Alpha"), iaa.AlphaElementwise((0.01, 0.99), iaa.Noop(), name="AlphaElementwise"), iaa.SimplexNoiseAlpha(iaa.Noop(), name="SimplexNoiseAlpha"), iaa.FrequencyNoiseAlpha((-2.0, 2.0), iaa.Noop(), name="FrequencyNoiseAlpha") ] augmenters_blur = [ iaa.GaussianBlur(sigma=(1.0, 5.0), name="GaussianBlur"), iaa.AverageBlur(k=(3, 11), name="AverageBlur"), iaa.MedianBlur(k=(3, 11), name="MedianBlur"), iaa.BilateralBlur(d=(3, 11), name="BilateralBlur"), iaa.MotionBlur(k=(3, 11), name="MotionBlur") ] augmenters_color = [ # InColorspace (deprecated) iaa.WithColorspace(to_colorspace="HSV", children=iaa.Noop(), name="WithColorspace"), iaa.WithHueAndSaturation(children=iaa.Noop(), name="WithHueAndSaturation"), iaa.MultiplyHueAndSaturation((0.8, 1.2), name="MultiplyHueAndSaturation"), iaa.MultiplyHue((-1.0, 1.0), name="MultiplyHue"), iaa.MultiplySaturation((0.8, 1.2), name="MultiplySaturation"), iaa.AddToHueAndSaturation((-10, 10), name="AddToHueAndSaturation"), iaa.AddToHue((-10, 10), name="AddToHue"), iaa.AddToSaturation((-10, 10), name="AddToSaturation"), iaa.ChangeColorspace(to_colorspace="HSV", name="ChangeColorspace"), iaa.Grayscale((0.01, 0.99), name="Grayscale"), iaa.KMeansColorQuantization((2, 16), name="KMeansColorQuantization"), iaa.UniformColorQuantization((2, 16), name="UniformColorQuantization") ] augmenters_contrast = [ iaa.GammaContrast(gamma=(0.5, 2.0), name="GammaContrast"), iaa.SigmoidContrast(gain=(5, 20), cutoff=(0.25, 0.75), name="SigmoidContrast"), iaa.LogContrast(gain=(0.7, 1.0), name="LogContrast"), iaa.LinearContrast((0.5, 1.5), name="LinearContrast"), iaa.AllChannelsCLAHE(clip_limit=(2, 10), tile_grid_size_px=(3, 11), name="AllChannelsCLAHE"), iaa.CLAHE(clip_limit=(2, 10), tile_grid_size_px=(3, 11), to_colorspace="HSV", name="CLAHE"), iaa.AllChannelsHistogramEqualization( name="AllChannelsHistogramEqualization"), iaa.HistogramEqualization(to_colorspace="HSV", name="HistogramEqualization"), ] augmenters_convolutional = [ iaa.Convolve(np.float32([[0, 0, 0], [0, 1, 0], [0, 0, 0]]), name="Convolve_3x3"), iaa.Sharpen(alpha=(0.01, 0.99), lightness=(0.5, 2), name="Sharpen"), iaa.Emboss(alpha=(0.01, 0.99), strength=(0, 2), name="Emboss"), iaa.EdgeDetect(alpha=(0.01, 0.99), name="EdgeDetect"), iaa.DirectedEdgeDetect(alpha=(0.01, 0.99), name="DirectedEdgeDetect") ] augmenters_edges = [iaa.Canny(alpha=(0.01, 0.99), name="Canny")] augmenters_flip = [ iaa.Fliplr(1.0, name="Fliplr"), iaa.Flipud(1.0, name="Flipud") ] augmenters_geometric = [ iaa.Affine(scale=(0.9, 1.1), translate_percent={ "x": (-0.05, 0.05), "y": (-0.05, 0.05) }, rotate=(-10, 10), shear=(-10, 10), order=0, mode="constant", cval=(0, 255), name="Affine_order_0_constant"), iaa.Affine(scale=(0.9, 1.1), translate_percent={ "x": (-0.05, 0.05), "y": (-0.05, 0.05) }, rotate=(-10, 10), shear=(-10, 10), order=1, mode="constant", cval=(0, 255), name="Affine_order_1_constant"), iaa.Affine(scale=(0.9, 1.1), translate_percent={ "x": (-0.05, 0.05), "y": (-0.05, 0.05) }, rotate=(-10, 10), shear=(-10, 10), order=3, mode="constant", cval=(0, 255), name="Affine_order_3_constant"), iaa.Affine(scale=(0.9, 1.1), translate_percent={ "x": (-0.05, 0.05), "y": (-0.05, 0.05) }, rotate=(-10, 10), shear=(-10, 10), order=1, mode="edge", cval=(0, 255), name="Affine_order_1_edge"), iaa.Affine(scale=(0.9, 1.1), translate_percent={ "x": (-0.05, 0.05), "y": (-0.05, 0.05) }, rotate=(-10, 10), shear=(-10, 10), order=1, mode="constant", cval=(0, 255), backend="skimage", name="Affine_order_1_constant_skimage"), # TODO AffineCv2 iaa.PiecewiseAffine(scale=(0.01, 0.05), nb_rows=4, nb_cols=4, order=1, mode="constant", name="PiecewiseAffine_4x4_order_1_constant"), iaa.PiecewiseAffine(scale=(0.01, 0.05), nb_rows=4, nb_cols=4, order=0, mode="constant", name="PiecewiseAffine_4x4_order_0_constant"), iaa.PiecewiseAffine(scale=(0.01, 0.05), nb_rows=4, nb_cols=4, order=1, mode="edge", name="PiecewiseAffine_4x4_order_1_edge"), iaa.PiecewiseAffine(scale=(0.01, 0.05), nb_rows=8, nb_cols=8, order=1, mode="constant", name="PiecewiseAffine_8x8_order_1_constant"), iaa.PerspectiveTransform(scale=(0.01, 0.05), keep_size=False, name="PerspectiveTransform"), iaa.PerspectiveTransform(scale=(0.01, 0.05), keep_size=True, name="PerspectiveTransform_keep_size"), iaa.ElasticTransformation( alpha=(1, 10), sigma=(0.5, 1.5), order=0, mode="constant", cval=0, name="ElasticTransformation_order_0_constant"), iaa.ElasticTransformation( alpha=(1, 10), sigma=(0.5, 1.5), order=1, mode="constant", cval=0, name="ElasticTransformation_order_1_constant"), iaa.ElasticTransformation( alpha=(1, 10), sigma=(0.5, 1.5), order=1, mode="nearest", cval=0, name="ElasticTransformation_order_1_nearest"), iaa.ElasticTransformation( alpha=(1, 10), sigma=(0.5, 1.5), order=1, mode="reflect", cval=0, name="ElasticTransformation_order_1_reflect"), iaa.Rot90((1, 3), keep_size=False, name="Rot90"), iaa.Rot90((1, 3), keep_size=True, name="Rot90_keep_size") ] augmenters_pooling = [ iaa.AveragePooling(kernel_size=(1, 16), keep_size=False, name="AveragePooling"), iaa.AveragePooling(kernel_size=(1, 16), keep_size=True, name="AveragePooling_keep_size"), iaa.MaxPooling(kernel_size=(1, 16), keep_size=False, name="MaxPooling"), iaa.MaxPooling(kernel_size=(1, 16), keep_size=True, name="MaxPooling_keep_size"), iaa.MinPooling(kernel_size=(1, 16), keep_size=False, name="MinPooling"), iaa.MinPooling(kernel_size=(1, 16), keep_size=True, name="MinPooling_keep_size"), iaa.MedianPooling(kernel_size=(1, 16), keep_size=False, name="MedianPooling"), iaa.MedianPooling(kernel_size=(1, 16), keep_size=True, name="MedianPooling_keep_size") ] augmenters_segmentation = [ iaa.Superpixels(p_replace=(0.05, 1.0), n_segments=(10, 100), max_size=64, interpolation="cubic", name="Superpixels_max_size_64_cubic"), iaa.Superpixels(p_replace=(0.05, 1.0), n_segments=(10, 100), max_size=64, interpolation="linear", name="Superpixels_max_size_64_linear"), iaa.Superpixels(p_replace=(0.05, 1.0), n_segments=(10, 100), max_size=128, interpolation="linear", name="Superpixels_max_size_128_linear"), iaa.Superpixels(p_replace=(0.05, 1.0), n_segments=(10, 100), max_size=224, interpolation="linear", name="Superpixels_max_size_224_linear"), iaa.UniformVoronoi(n_points=(250, 1000), name="UniformVoronoi"), iaa.RegularGridVoronoi(n_rows=(16, 31), n_cols=(16, 31), name="RegularGridVoronoi"), iaa.RelativeRegularGridVoronoi(n_rows_frac=(0.07, 0.14), n_cols_frac=(0.07, 0.14), name="RelativeRegularGridVoronoi"), ] augmenters_size = [ iaa.Resize((0.8, 1.2), interpolation="nearest", name="Resize_nearest"), iaa.Resize((0.8, 1.2), interpolation="linear", name="Resize_linear"), iaa.Resize((0.8, 1.2), interpolation="cubic", name="Resize_cubic"), iaa.CropAndPad(percent=(-0.2, 0.2), pad_mode="constant", pad_cval=(0, 255), keep_size=False, name="CropAndPad"), iaa.CropAndPad(percent=(-0.2, 0.2), pad_mode="edge", pad_cval=(0, 255), keep_size=False, name="CropAndPad_edge"), iaa.CropAndPad(percent=(-0.2, 0.2), pad_mode="constant", pad_cval=(0, 255), name="CropAndPad_keep_size"), iaa.Pad(percent=(0.05, 0.2), pad_mode="constant", pad_cval=(0, 255), keep_size=False, name="Pad"), iaa.Pad(percent=(0.05, 0.2), pad_mode="edge", pad_cval=(0, 255), keep_size=False, name="Pad_edge"), iaa.Pad(percent=(0.05, 0.2), pad_mode="constant", pad_cval=(0, 255), name="Pad_keep_size"), iaa.Crop(percent=(0.05, 0.2), keep_size=False, name="Crop"), iaa.Crop(percent=(0.05, 0.2), name="Crop_keep_size"), iaa.PadToFixedSize(width=width + 10, height=height + 10, pad_mode="constant", pad_cval=(0, 255), name="PadToFixedSize"), iaa.CropToFixedSize(width=width - 10, height=height - 10, name="CropToFixedSize"), iaa.KeepSizeByResize(iaa.CropToFixedSize(height=height - 10, width=width - 10), interpolation="nearest", name="KeepSizeByResize_CropToFixedSize_nearest"), iaa.KeepSizeByResize(iaa.CropToFixedSize(height=height - 10, width=width - 10), interpolation="linear", name="KeepSizeByResize_CropToFixedSize_linear"), iaa.KeepSizeByResize(iaa.CropToFixedSize(height=height - 10, width=width - 10), interpolation="cubic", name="KeepSizeByResize_CropToFixedSize_cubic"), ] augmenters_weather = [ iaa.FastSnowyLandscape(lightness_threshold=(100, 255), lightness_multiplier=(1.0, 4.0), name="FastSnowyLandscape"), iaa.Clouds(name="Clouds"), iaa.Fog(name="Fog"), iaa.CloudLayer(intensity_mean=(196, 255), intensity_freq_exponent=(-2.5, -2.0), intensity_coarse_scale=10, alpha_min=0, alpha_multiplier=(0.25, 0.75), alpha_size_px_max=(2, 8), alpha_freq_exponent=(-2.5, -2.0), sparsity=(0.8, 1.0), density_multiplier=(0.5, 1.0), name="CloudLayer"), iaa.Snowflakes(name="Snowflakes"), iaa.SnowflakesLayer(density=(0.005, 0.075), density_uniformity=(0.3, 0.9), flake_size=(0.2, 0.7), flake_size_uniformity=(0.4, 0.8), angle=(-30, 30), speed=(0.007, 0.03), blur_sigma_fraction=(0.0001, 0.001), name="SnowflakesLayer") ] augmenters = (augmenters_meta + augmenters_arithmetic + augmenters_blend + augmenters_blur + augmenters_color + augmenters_contrast + augmenters_convolutional + augmenters_edges + augmenters_flip + augmenters_geometric + augmenters_pooling + augmenters_segmentation + augmenters_size + augmenters_weather) if only_augmenters is not None: augmenters_reduced = [] for augmenter in augmenters: if any([ re.search(pattern, augmenter.name) for pattern in only_augmenters ]): augmenters_reduced.append(augmenter) augmenters = augmenters_reduced return augmenters
def __init__(self, AllChannelsCLAHE_ratio=None): self.AllChannelsCLAHE_ratio = AllChannelsCLAHE_ratio self.seq = iaa.Sequential([ iaa.AllChannelsCLAHE(clip_limit=(0.8, 0.8)), ])
class AugmentationScheme: # Dictionary containing all possible augmentation functions Augmentations = { # Convert images to HSV, then increase each pixel's Hue (H), Saturation (S) or Value/lightness (V) [0, 1, 2] # value by an amount in between lo and hi: "HSV": lambda channel, lo, hi: iaa.WithColorspace( to_colorspace="HSV", from_colorspace="RGB", children=iaa.WithChannels(channel, iaa.Add((lo, hi)))), # The augmenter first transforms images to HSV color space, then adds random values (lo to hi) # to the H and S channels and afterwards converts back to RGB. # (independently per channel and the same value for all pixels within that channel) "Add_To_Hue_And_Saturation": lambda lo, hi: iaa.AddToHueAndSaturation((lo, hi), per_channel=True), # Increase each pixel’s channel-value (redness/greenness/blueness) [0, 1, 2] by value in between lo and hi: "Increase_Channel": lambda channel, lo, hi: iaa.WithChannels(channel, iaa.Add((lo, hi))), # Rotate each image’s channel [R=0, G=1, B=2] by value in between lo and hi degrees: "Rotate_Channel": lambda channel, lo, hi: iaa.WithChannels(channel, iaa.Affine(rotate=(lo, hi))), # Augmenter that never changes input images (“no operation”). "No_Operation": iaa.Noop(), # Pads images, i.e. adds columns/rows to them. Pads image by value in between lo and hi # percent relative to its original size (only accepts positive values in range[0, 1]): # If s_i is false, The value will be sampled once per image and used for all sides # (i.e. all sides gain/lose the same number of rows/columns) # NOTE: automatically resizes images back to their original size after it has augmented them. "Pad_Percent": lambda lo, hi, s_i: iaa.Pad( percent=(lo, hi), keep_size=True, sample_independently=s_i), # Pads images by a number of pixels between lo and hi # If s_i is false, The value will be sampled once per image and used for all sides # (i.e. all sides gain/lose the same number of rows/columns) "Pad_Pixels": lambda lo, hi, s_i: iaa.Pad( px=(lo, hi), keep_size=True, sample_independently=s_i), # Crops/cuts away pixels at the sides of the image. # Crops images by value in between lo and hi (only accepts positive values in range[0, 1]): # If s_i is false, The value will be sampled once per image and used for all sides # (i.e. all sides gain/lose the same number of rows/columns) # NOTE: automatically resizes images back to their original size after it has augmented them. "Crop_Percent": lambda lo, hi, s_i: iaa.Crop( percent=(lo, hi), keep_size=True, sample_independently=s_i), # Crops images by a number of pixels between lo and hi # If s_i is false, The value will be sampled once per image and used for all sides # (i.e. all sides gain/lose the same number of rows/columns) "Crop_Pixels": lambda lo, hi, s_i: iaa.Crop( px=(lo, hi), keep_size=True, sample_independently=s_i), # Flip/mirror percent (i.e 0.5) of the input images horizontally # The default probability is 0, so to flip all images, percent=1 "Flip_lr": iaa.Fliplr(1), # Flip/mirror percent (i.e 0.5) of the input images vertically # The default probability is 0, so to flip all images, percent=1 "Flip_ud": iaa.Flipud(1), # Completely or partially transform images to their superpixel representation. # Generate s_pix_lo to s_pix_hi superpixels per image. Replace each superpixel with a probability between # prob_lo and prob_hi with range[0, 1] (sampled once per image) by its average pixel color. "Superpixels": lambda prob_lo, prob_hi, s_pix_lo, s_pix_hi: iaa.Superpixels( p_replace=(prob_lo, prob_hi), n_segments=(s_pix_lo, s_pix_hi)), # Change images to grayscale and overlay them with the original image by varying strengths, # effectively removing alpha_lo to alpha_hi of the color: "Grayscale": lambda alpha_lo, alpha_hi: iaa.Grayscale(alpha=(alpha_lo, alpha_hi)), # Blur each image with a gaussian kernel with a sigma between sigma_lo and sigma_hi: "Gaussian_Blur": lambda sigma_lo, sigma_hi: iaa.GaussianBlur(sigma=(sigma_lo, sigma_hi) ), # Blur each image using a mean over neighbourhoods that have random sizes, # which can vary between h_lo and h_hi in height and w_lo and w_hi in width: "Average_Blur": lambda h_lo, h_hi, w_lo, w_hi: iaa.AverageBlur(k=((h_lo, h_hi), (w_lo, w_hi))), # Blur each image using a median over neighbourhoods that have a random size between lo x lo and hi x hi: "Median_Blur": lambda lo, hi: iaa.MedianBlur(k=(lo, hi)), # Sharpen an image, then overlay the results with the original using an alpha between alpha_lo and alpha_hi: "Sharpen": lambda alpha_lo, alpha_hi, lightness_lo, lightness_hi: iaa. Sharpen(alpha=(alpha_lo, alpha_hi), lightness=(lightness_lo, lightness_hi)), # Emboss an image, then overlay the results with the original using an alpha between alpha_lo and alpha_hi: "Emboss": lambda alpha_lo, alpha_hi, strength_lo, strength_hi: iaa.Emboss( alpha=(alpha_lo, alpha_hi), strength=(strength_lo, strength_hi)), # Detect edges in images, turning them into black and white images and # then overlay these with the original images using random alphas between alpha_lo and alpha_hi: "Detect_Edges": lambda alpha_lo, alpha_hi: iaa.EdgeDetect(alpha=(alpha_lo, alpha_hi)), # Detect edges having random directions between dir_lo and dir_hi (i.e (0.0, 1.0) = 0 to 360 degrees) in # images, turning the images into black and white versions and then overlay these with the original images # using random alphas between alpha_lo and alpha_hi: "Directed_edge_Detect": lambda alpha_lo, alpha_hi, dir_lo, dir_hi: iaa.DirectedEdgeDetect( alpha=(alpha_lo, alpha_hi), direction=(dir_lo, dir_hi)), # Add random values between lo and hi to images. In percent of all images the values differ per channel # (3 sampled value). In the rest of the images the value is the same for all channels: "Add": lambda lo, hi, percent: iaa.Add((lo, hi), per_channel=percent), # Adds random values between lo and hi to images, with each value being sampled per pixel. # In percent of all images the values differ per channel (3 sampled value). In the rest of the images # the value is the same for all channels: "Add_Element_Wise": lambda lo, hi, percent: iaa.AddElementwise( (lo, hi), per_channel=percent), # Add gaussian noise (aka white noise) to an image, sampled once per pixel from a normal # distribution N(0, s), where s is sampled per image and varies between lo and hi*255 for percent of all # images (sampled once for all channels) and sampled three (RGB) times (channel-wise) # for the rest from the same normal distribution: "Additive_Gaussian_Noise": lambda lo, hi, percent: iaa.AdditiveGaussianNoise(scale=(lo, hi), per_channel=percent), # Multiply in percent of all images each pixel with random values between lo and hi and multiply # the pixels in the rest of the images channel-wise, # i.e. sample one multiplier independently per channel and pixel: "Multiply": lambda lo, hi, percent: iaa.Multiply((lo, hi), per_channel=percent), # Multiply values of pixels with possibly different values for neighbouring pixels, # making each pixel darker or brighter. Multiply each pixel with a random value between lo and hi: "Multiply_Element_Wise": lambda lo, hi, percent: iaa.MultiplyElementwise( (0.5, 1.5), per_channel=0.5), # Augmenter that sets a certain fraction of pixels in images to zero. # Sample per image a value p from the range lo<=p<=hi and then drop p percent of all pixels in the image # (i.e. convert them to black pixels), but do this independently per channel in percent of all images "Dropout": lambda lo, hi, percent: iaa.Dropout(p=(lo, hi), per_channel=percent), # Augmenter that sets rectangular areas within images to zero. # Drop d_lo to d_hi percent of all pixels by converting them to black pixels, # but do that on a lower-resolution version of the image that has s_lo to s_hi percent of the original size, # Also do this in percent of all images channel-wise, so that only the information of some # channels is set to 0 while others remain untouched: "Coarse_Dropout": lambda d_lo, d_hi, s_lo, s_hi, percent: iaa.CoarseDropout( (d_lo, d_hi), size_percent=(s_hi, s_hi), per_channel=percent), # Augmenter that inverts all values in images, i.e. sets a pixel from value v to 255-v. # For c_percent of all images, invert all pixels in these images channel-wise with probability=i_percent # (per image). In the rest of the images, invert i_percent of all channels: "Invert": lambda i_percent, c_percent: iaa.Invert(i_percent, per_channel=c_percent), # Augmenter that changes the contrast of images. # Normalize contrast by a factor of lo to hi, sampled randomly per image # and for percent of all images also independently per channel: "Contrast_Normalisation": lambda lo, hi, percent: iaa.ContrastNormalization( (lo, hi), per_channel=percent), # Scale images to a value of lo to hi percent of their original size but do this independently per axis: "Scale": lambda x_lo, x_hi, y_lo, y_hi: iaa.Affine(scale={ "x": (x_lo, x_hi), "y": (y_lo, y_hi) }), # Translate images by lo to hi percent on x-axis and y-axis independently: "Translate_Percent": lambda x_lo, x_hi, y_lo, y_hi: iaa.Affine(translate_percent={ "x": (x_lo, x_hi), "y": (y_lo, y_hi) }), # Translate images by lo to hi pixels on x-axis and y-axis independently: "Translate_Pixels": lambda x_lo, x_hi, y_lo, y_hi: iaa.Affine(translate_px={ "x": (x_lo, x_hi), "y": (y_lo, y_hi) }), # Rotate images by lo to hi degrees: "Rotate": lambda lo, hi: iaa.Affine(rotate=(lo, hi)), # Shear images by lo to hi degrees: "Shear": lambda lo, hi: iaa.Affine(shear=(lo, hi)), # Augmenter that places a regular grid of points on an image and randomly moves the neighbourhood of # these point around via affine transformations. This leads to local distortions. # Distort images locally by moving points around, each with a distance v (percent relative to image size), # where v is sampled per point from N(0, z) z is sampled per image from the range lo to hi: "Piecewise_Affine": lambda lo, hi: iaa.PiecewiseAffine(scale=(lo, hi)), # Augmenter to transform images by moving pixels locally around using displacement fields. # Distort images locally by moving individual pixels around following a distortions field with # strength sigma_lo to sigma_hi. The strength of the movement is sampled per pixel from the range # alpha_lo to alpha_hi: "Elastic_Transformation": lambda alpha_lo, alpha_hi, sigma_lo, sigma_hi: iaa. ElasticTransformation(alpha=(alpha_lo, alpha_hi), sigma=(sigma_lo, sigma_hi)), # Weather augmenters are computationally expensive and will not work effectively on certain data sets # Augmenter to draw clouds in images. "Clouds": iaa.Clouds(), # Augmenter to draw fog in images. "Fog": iaa.Fog(), # Augmenter to add falling snowflakes to images. "Snowflakes": iaa.Snowflakes(), # Replaces percent of all pixels in an image by either x or y "Replace_Element_Wise": lambda percent, x, y: iaa.ReplaceElementwise(percent, [x, y]), # Adds laplace noise (somewhere between gaussian and salt and peeper noise) to an image, sampled once per pixel # from a laplace distribution Laplace(0, s), where s is sampled per image and varies between lo and hi*255 for # percent of all images (sampled once for all channels) and sampled three (RGB) times (channel-wise) # for the rest from the same laplace distribution: "Additive_Laplace_Noise": lambda lo, hi, percent: iaa.AdditiveLaplaceNoise(scale=(lo, hi), per_channel=percent), # Adds poisson noise (similar to gaussian but different distribution) to an image, sampled once per pixel from # a poisson distribution Poisson(s), where s is sampled per image and varies between lo and hi for percent of # all images (sampled once for all channels) and sampled three (RGB) times (channel-wise) # for the rest from the same poisson distribution: "Additive_Poisson_Noise": lambda lo, hi, percent: iaa.AdditivePoissonNoise(lam=(lo, hi), per_channel=percent), # Adds salt and pepper noise to an image, i.e. some white-ish and black-ish pixels. # Replaces percent of all pixels with salt and pepper noise "Salt_And_Pepper": lambda percent: iaa.SaltAndPepper(percent), # Adds coarse salt and pepper noise to image, i.e. rectangles that contain noisy white-ish and black-ish pixels # Replaces percent of all pixels with salt/pepper in an image that has lo to hi percent of the input image size, # then upscales the results to the input image size, leading to large rectangular areas being replaced. "Coarse_Salt_And_Pepper": lambda percent, lo, hi: iaa.CoarseSaltAndPepper(percent, size_percent=(lo, hi)), # Adds salt noise to an image, i.e white-ish pixels # Replaces percent of all pixels with salt noise "Salt": lambda percent: iaa.Salt(percent), # Adds coarse salt noise to image, i.e. rectangles that contain noisy white-ish pixels # Replaces percent of all pixels with salt in an image that has lo to hi percent of the input image size, # then upscales the results to the input image size, leading to large rectangular areas being replaced. "Coarse_Salt": lambda percent, lo, hi: iaa.CoarseSalt(percent, size_percent=(lo, hi)), # Adds Pepper noise to an image, i.e Black-ish pixels # Replaces percent of all pixels with Pepper noise "Pepper": lambda percent: iaa.Pepper(percent), # Adds coarse pepper noise to image, i.e. rectangles that contain noisy black-ish pixels # Replaces percent of all pixels with salt in an image that has lo to hi percent of the input image size, # then upscales the results to the input image size, leading to large rectangular areas being replaced. "Coarse_Pepper": lambda percent, lo, hi: iaa.CoarsePepper(percent, size_percent=(lo, hi)), # In an alpha blending, two images are naively mixed. E.g. Let A be the foreground image, B be the background # image and a is the alpha value. Each pixel intensity is then computed as a * A_ij + (1-a) * B_ij. # Images passed in must be a numpy array of type (height, width, channel) "Blend_Alpha": lambda image_fg, image_bg, alpha: iaa.blend_alpha( image_fg, image_bg, alpha), # Blur/Denoise an image using a bilateral filter. # Bilateral filters blur homogeneous and textured areas, while trying to preserve edges. # Blurs all images using a bilateral filter with max distance d_lo to d_hi with ranges for sigma_colour # and sigma space being define by sc_lo/sc_hi and ss_lo/ss_hi "Bilateral_Blur": lambda d_lo, d_hi, sc_lo, sc_hi, ss_lo, ss_hi: iaa.BilateralBlur( d=(d_lo, d_hi), sigma_color=(sc_lo, sc_hi), sigma_space=(ss_lo, ss_hi)), # Augmenter that sharpens images and overlays the result with the original image. # Create a motion blur augmenter with kernel size of (kernel x kernel) and a blur angle of either x or y degrees # (randomly picked per image). "Motion_Blur": lambda kernel, x, y: iaa.MotionBlur(k=kernel, angle=[x, y]), # Augmenter to apply standard histogram equalization to images (similar to CLAHE) "Histogram_Equalization": iaa.HistogramEqualization(), # Augmenter to perform standard histogram equalization on images, applied to all channels of each input image "All_Channels_Histogram_Equalization": iaa.AllChannelsHistogramEqualization(), # Contrast Limited Adaptive Histogram Equalization (CLAHE). This augmenter applies CLAHE to images, a form of # histogram equalization that normalizes within local image patches. # Creates a CLAHE augmenter with clip limit uniformly sampled from [cl_lo..cl_hi], i.e. 1 is rather low contrast # and 50 is rather high contrast. Kernel sizes of SxS, where S is uniformly sampled from [t_lo..t_hi]. # Sampling happens once per image. (Note: more parameters are available for further specification) "CLAHE": lambda cl_lo, cl_hi, t_lo, t_hi: iaa.CLAHE( clip_limit=(cl_lo, cl_hi), tile_grid_size_px=(t_lo, t_hi)), # Contrast Limited Adaptive Histogram Equalization (refer above), applied to all channels of the input images. # CLAHE performs histogram equalization within image patches, i.e. over local neighbourhoods "All_Channels_CLAHE": lambda cl_lo, cl_hi, t_lo, t_hi: iaa.AllChannelsCLAHE( clip_limit=(cl_lo, cl_hi), tile_grid_size_px=(t_lo, t_hi)), # Augmenter that changes the contrast of images using a unique formula (using gamma). # Multiplier for gamma function is between lo and hi,, sampled randomly per image (higher values darken image) # For percent of all images values are sampled independently per channel. "Gamma_Contrast": lambda lo, hi, percent: iaa.GammaContrast( (lo, hi), per_channel=percent), # Augmenter that changes the contrast of images using a unique formula (linear). # Multiplier for linear function is between lo and hi, sampled randomly per image # For percent of all images values are sampled independently per channel. "Linear_Contrast": lambda lo, hi, percent: iaa.LinearContrast( (lo, hi), per_channel=percent), # Augmenter that changes the contrast of images using a unique formula (using log). # Multiplier for log function is between lo and hi, sampled randomly per image. # For percent of all images values are sampled independently per channel. # Values around 1.0 lead to a contrast-adjusted images. Values above 1.0 quickly lead to partially broken # images due to exceeding the datatype’s value range. "Log_Contrast": lambda lo, hi, percent: iaa.LogContrast((lo, hi), per_channel=percent), # Augmenter that changes the contrast of images using a unique formula (sigmoid). # Multiplier for sigmoid function is between lo and hi, sampled randomly per image. c_lo and c_hi decide the # cutoff value that shifts the sigmoid function in horizontal direction (Higher values mean that the switch # from dark to light pixels happens later, i.e. the pixels will remain darker). # For percent of all images values are sampled independently per channel: "Sigmoid_Contrast": lambda lo, hi, c_lo, c_hi, percent: iaa.SigmoidContrast( (lo, hi), (c_lo, c_hi), per_channel=percent), # Augmenter that calls a custom (lambda) function for each batch of input image. # Extracts Canny Edges from images (refer to description in CO) # Good default values for min and max are 100 and 200 'Custom_Canny_Edges': lambda min_val, max_val: iaa.Lambda(func_images=CO.Edges( min_value=min_val, max_value=max_val)), } # AugmentationScheme objects require images and labels. # 'augs' is a list that contains all data augmentations in the scheme def __init__(self): self.augs = [iaa.Flipud(1)] def __call__(self, image): image = np.array(image) aug_scheme = iaa.Sometimes( 0.5, iaa.SomeOf(random.randrange(1, len(self.augs) + 1), self.augs, random_order=True)) aug_img = self.aug_scheme.augment_image(image) # fixes negative strides aug_img = aug_img[..., ::1] - np.zeros_like(aug_img) return aug_img