def main(): augs = [ ("0", iaa.JpegCompression(compression=0)), ("1", iaa.JpegCompression(compression=1)), ("25", iaa.JpegCompression(compression=25)), ("50", iaa.JpegCompression(compression=50)), ("75", iaa.JpegCompression(compression=75)), ("99", iaa.JpegCompression(compression=99)), ("100", iaa.JpegCompression(compression=100)), ("(0, 50)", iaa.JpegCompression(compression=(0, 50))), ("(50, 100)", iaa.JpegCompression(compression=(50, 100))), ("(0, 100)", iaa.JpegCompression(compression=(0, 100))), ] image = ia.quokka(size=(256, 256), extract="square") images = np.uint8([image] * (5 * 5)) for i, (name, aug) in enumerate(augs): print(i, name) images_aug = aug.augment_images(images) ia.imshow(ia.draw_grid(images_aug, cols=5, rows=5))
def new_gen_train_trans(image, mask): image = np.array(image) mask = np.array(mask) h, w = mask.shape th, tw = args.train_size crop_scales = [1.0, 0.875, 0.75, 0.625, 0.5] hue_factor = 0.6 brightness_factor = 0.6 # was 0.5 p_flip = 0.5 jpeg_scale = 0, 80 # was 70 p_erase_class = 0.5 crop_scale = np.random.choice(crop_scales) ch, cw = [int(x * crop_scale) for x in (h, w)] i = np.random.randint(0, h - ch + 1) j = np.random.randint(0, w - cw + 1) image = image[i:i + ch, j:j + cw, :] mask = mask[i:i + ch, j:j + cw] brightness = iaa.MultiplyBrightness( (1 - brightness_factor, 1 + brightness_factor)) hue = iaa.MultiplyHue((1 - hue_factor, 1 + hue_factor)) jpeg = iaa.JpegCompression(compression=jpeg_scale) img_transforms = iaa.Sequential([brightness, hue, jpeg]) image = img_transforms(image=image) if np.random.rand() < p_flip: image = np.flip(image, axis=1) mask = np.flip(mask, axis=1) image = Image.fromarray(image) mask = Image.fromarray(mask) # Resize, 1 for Image.LANCZOS image = TF.resize(image, (th, tw), interpolation=1) # Resize, 0 for Image.NEAREST mask = TF.resize(mask, (th, tw), interpolation=0) # From PIL to Tensor image = TF.to_tensor(image) # Normalize image = TF.normalize(image, args.dataset_mean, args.dataset_std) # Convert ids to train_ids mask = np.array(mask, np.uint8) mask = torch.from_numpy(mask) # Numpy array to tensor return image, mask
def __init__(self): sometimes = lambda aug: iaa.Sometimes(0.3, aug) self.aug = iaa.Sequential( iaa.SomeOf( (1, 5), [ # blur sometimes( iaa.OneOf([ iaa.GaussianBlur(sigma=(0, 1.0)), iaa.MotionBlur(k=3) ])), # color # sometimes( # iaa.AddToHueAndSaturation(value=(-10, 10), per_channel=True)), sometimes( iaa.SigmoidContrast(gain=(3, 10), cutoff=(0.4, 0.6), per_channel=True)), sometimes(iaa.Invert(0.25, per_channel=0.5)), sometimes(iaa.Solarize(0.5, threshold=(32, 128))), sometimes(iaa.Dropout2d(p=0.5)), sometimes(iaa.Multiply((0.5, 1.5), per_channel=0.5)), sometimes(iaa.Add((-40, 40), per_channel=0.5)), sometimes(iaa.JpegCompression(compression=(5, 80))), # distort sometimes( iaa.Crop(percent=(0.01, 0.05), sample_independently=True)), sometimes(iaa.PerspectiveTransform(scale=(0.01, 0.01))), sometimes( iaa.Affine( scale=(0.7, 1.3), translate_percent=(-0.1, 0.1), # rotate=(-5, 5), shear=(-5, 5), order=[0, 1], cval=(0, 255), mode=ia.ALL)), sometimes(iaa.PiecewiseAffine(scale=(0.01, 0.01))), sometimes( iaa.OneOf([ iaa.Dropout(p=(0, 0.1)), iaa.CoarseDropout(p=(0, 0.1), size_percent=(0.02, 0.25)) ])), ], random_order=True), random_order=True)
def __init__(self, phase="test", real_world_aug=False): self.mean = torch.tensor([0.485, 0.456, 0.406]).view(3, 1, 1) self.std = torch.tensor([0.229, 0.224, 0.225]).view(3, 1, 1) self.phase = phase if real_world_aug: self.RWA = iaa.SomeOf((1, None), [ iaa.LinearContrast((0.6, 1.4)), iaa.JpegCompression(compression=(0, 60)), iaa.GaussianBlur(sigma=(0.0, 3.0)), iaa.AdditiveGaussianNoise(scale=(0, 0.1 * 255)) ], random_order=True) else: self.RWA = None
def __init__(self, args, samples_directory, basemodel_preprocess, generator_type, shuffle): self.samples_directory = samples_directory self.model_type = args["type"] self.base_model = args["base_model"] self.basemodel_preprocess = basemodel_preprocess self.batch_size = args["batch_size"] self.sample_files = [] self.img_dims = (args["img_dim"], args["img_dim"]) # dimensions that images get resized into when loaded self.age_deviation = args["age_deviation"] self.predict_gender = args["predict_gender"] if "predict_gender" in args else False self.range_mode = args["range_mode"] if "range_mode" in args else False self.age_classes_number = age_ranges_number() if self.range_mode else AGES_NUMBER self.dataset_size = None self.generator_type = generator_type self.shuffle = shuffle sometimes = lambda aug: iaa.Sometimes(0.5, aug) self.seq = iaa.Sequential( [ iaa.Fliplr(0.5), iaa.ChannelShuffle(0.25), sometimes( iaa.OneOf([ iaa.Dropout((0.01, 0.1), per_channel=0.5), # randomly remove up to 10% of the pixels iaa.CoarseDropout((0.03, 0.15), size_percent=(0.02, 0.2), per_channel=True) ]) ), iaa.OneOf([ iaa.GaussianBlur((0, 0.4)), iaa.MedianBlur((1,3)), iaa.MotionBlur(k=(3,5), angle=(0,360)) ]), iaa.JpegCompression((0,50)), iaa.OneOf([ iaa.Multiply((0.7, 1.4), per_channel=0.5), iaa.GammaContrast((0.7, 1.4), per_channel=0.5) ]), iaa.Grayscale(alpha=(0.0, 1.0)), sometimes(iaa.CropAndPad( percent=(-0.05, 0.1), pad_mode=imgaug.ALL, pad_cval=(0, 255) )) ], random_order=True # horizontally flip 50% of all images ) self.load_sample_files() self.indexes = np.arange(self.dataset_size) self.on_epoch_end() # for training data: call ensures that samples are shuffled in first epoch if shuffle is set to True
def dataAug(imgPath, txtPath): images, bbs= loadData(imgPath, txtPath) seq = iaa.Sequential([ iaa.Sometimes(0.25, iaa.AdditiveGaussianNoise(scale=0.05*255)), iaa.Affine(translate_px={"x": (1, 5)}), iaa.MultiplyAndAddToBrightness(mul=(0.5, 1.5), add=(-30, 30)), iaa.Sometimes(0.25,iaa.imgcorruptlike.MotionBlur(severity=(1,2))), iaa.Resize({"height": (0.75, 1.25), "width": (0.75, 1.25)}), iaa.CropAndPad(percent=(-0.25, 0.25)), iaa.JpegCompression(compression=(0, 66)) ]) image_aug, bbs_aug = seq(images=images, bounding_boxes=bbs) return image_aug[0], bbs_aug[0]
def train(model): """Train the model.""" # Training dataset. dataset_train = CharacterDataset() dataset_train.load_characters("train") dataset_train.prepare() # Validation dataset dataset_val = CharacterDataset() dataset_val.load_characters("val") dataset_val.prepare() #Augmentation aug = iaa.SomeOf(2, [ iaa.AdditiveGaussianNoise(scale=(0, 0.10 * 255)), iaa.MotionBlur(), iaa.GaussianBlur(sigma=(0.0, 2.0)), iaa.RemoveSaturation(mul=(0, 0.5)), iaa.GammaContrast(), iaa.Rotate(rotate=(-45, 45)), iaa.PerspectiveTransform(scale=(0.01, 0.15)), iaa.JpegCompression(compression=(0, 75)), iaa.imgcorruptlike.Spatter(severity=(1, 4)), iaa.Rain(speed=(0.1, 0.3)), iaa.Fog() ]) custom_callbacks = [ ReduceLROnPlateau(monitor='val_loss', factor=0.1, patience=5, verbose=1), EarlyStopping(monitor='val_loss', min_delta=0, patience=10, verbose=1) ] # *** This training schedule is an example. Update to your needs *** # Since we're using a very small dataset, and starting from # COCO trained weights, we don't need to train too long. Also, # no need to train all layers, just the heads should do it. print("Training network heads") model.train(dataset_train, dataset_val, learning_rate=config.LEARNING_RATE, epochs=100, layers='heads', augmentation=aug, custom_callbacks=custom_callbacks)
def return_augmenter(parameter, method): if method == 'subtract': augmenter = iaa.Sequential([iaa.Add(parameter, True)]) if method == 'add': augmenter = iaa.Sequential([iaa.Add(parameter, True)]) if method == 'gauss': augmenter = iaa.Sequential( [iaa.AdditiveGaussianNoise(0, parameter, True)]) if method == 'compress': augmenter = iaa.Sequential([iaa.JpegCompression(parameter)]) if method == 'subtract_hsv': augmenter = iaa.Sequential(iaa.AddToHueAndSaturation(parameter, True)) if method == 'add_hsv': augmenter = iaa.Sequential(iaa.AddToHueAndSaturation(parameter, True)) if method == 'contrast': augmenter = iaa.Sequential(iaa.GammaContrast(parameter, False)) return augmenter
def __init__(self, ): import imgaug.augmenters as ia self.default_deg_templates = { 'sr4x': ia.Sequential([ # It's almost like a 4x bicubic downsampling ia.Resize((0.25000, 0.25001), cv2.INTER_AREA), ia.Resize({ 'height': 512, 'width': 512 }, cv2.INTER_CUBIC), ]), 'sr4x8x': ia.Sequential([ ia.Resize((0.125, 0.25), cv2.INTER_AREA), ia.Resize({ 'height': 512, 'width': 512 }, cv2.INTER_CUBIC), ]), 'denoise': ia.OneOf([ ia.AdditiveGaussianNoise(scale=(20, 40), per_channel=True), ia.AdditiveLaplaceNoise(scale=(20, 40), per_channel=True), ia.AdditivePoissonNoise(lam=(15, 30), per_channel=True), ]), 'deblur': ia.OneOf([ ia.MotionBlur(k=(10, 20)), ia.GaussianBlur((3.0, 8.0)), ]), 'jpeg': ia.JpegCompression(compression=(50, 85)), '16x': Mosaic16x(), } rand_deg_list = [ self.default_deg_templates['deblur'], self.default_deg_templates['denoise'], self.default_deg_templates['jpeg'], self.default_deg_templates['sr4x8x'], ] self.default_deg_templates['face_renov'] = ia.Sequential( rand_deg_list, random_order=True)
def train(model): """Train the model.""" # Training dataset. dataset_train = PlateDataset() dataset_train.load_plates("train") dataset_train.prepare() # Validation dataset dataset_val = PlateDataset() dataset_val.load_plates("val") dataset_val.prepare() #Augmentation aug = iaa.OneOf([ iaa.GaussianBlur(sigma=(0, 1.0)), iaa.MotionBlur(), iaa.RemoveSaturation((0.0, 0.5)), iaa.GammaContrast(), iaa.Rotate(rotate=(-45, 45)), iaa.PerspectiveTransform(scale=(0.01, 0.15)), iaa.SaltAndPepper(), iaa.JpegCompression(compression=(0, 75)), iaa.imgcorruptlike.Spatter(severity=(1, 4)), iaa.imgcorruptlike.DefocusBlur(severity=1) ]) custom_callbacks = [ ReduceLROnPlateau(monitor='val_loss', factor=0.1, patience=5, verbose=1), EarlyStopping(monitor='val_loss', min_delta=0, patience=10, verbose=1) ] # Since we're using a very small dataset, and starting from # COCO trained weights, we don't need to train too long. Also, # no need to train all layers, just the heads should do it. print("Training network heads") model.train(dataset_train, dataset_val, learning_rate=config.LEARNING_RATE, epochs=100, layers='all', augmentation=aug, custom_callbacks=custom_callbacks)
def __init__(self, trainset, name='no_name'): self.NAME = name # Add one for the background class (0) which is not explicitly specified in the # classes dict. self.NUM_CLASSES = 1 + len(trainset['classes']) # Disable validation since we do not have ground truth. self.VALIDATION_STEPS = 0 # This is the mean pixel of the training images. self.MEAN_PIXEL = np.array(trainset['mean_pixel']) self.AUGMENTATION = iaa.SomeOf((0, None), [ iaa.Fliplr(1.0), iaa.Flipud(1.0), iaa.Affine(rotate=[90, 180, 270]), iaa.GaussianBlur(sigma=(1.0, 2.0)), iaa.JpegCompression(compression=(25, 50)), ], random_order=True) super().__init__()
def get_augmenter(self): seq = iaa.Sequential([ iaa.SomeOf((2, 6), [ iaa.Flipud(0.5), iaa.Rot90(k=(0, 3)), iaa.GaussianBlur(sigma=(0.0, 3.0)), iaa.MotionBlur(angle=(0, 360), k=(3, 8)), iaa.Add((-50, 5)), iaa.AddElementwise((-20, 2)), iaa.AdditiveGaussianNoise(scale=0.05 * 255), iaa.Multiply((0.3, 1.05)), iaa.SaltAndPepper(p=(0.1, 0.3)), iaa.JpegCompression(compression=(20, 90)), iaa.Affine(shear=(-15, 15)), iaa.Affine(rotate=(-10, 10)), ]) ]) return seq
def get_simple_ill_seq(self): light_change = 20 seq = iaa.Sequential([ # 全局调整,含有颜色空间调整 iaa.Sometimes( 0.5, iaa.OneOf([ iaa.WithColorspace( to_colorspace="HSV", from_colorspace="RGB", children=iaa.OneOf([ iaa.WithChannels(0, iaa.Add((-5, 5))), iaa.WithChannels(1, iaa.Add((-20, 20))), iaa.WithChannels( 2, iaa.Add((-light_change, light_change))), ])), iaa.Grayscale((0.2, 0.6)), iaa.Add((-light_change, light_change)), iaa.Multiply((0.8, 1.2)), ])), # 椒盐噪声 iaa.Sometimes( 0.5, iaa.OneOf( [iaa.Alpha((0.2, 0.6), iaa.SaltAndPepper((0.01, 0.03)))])), # 对比度调整 iaa.Sometimes(0.5, iaa.OneOf([ iaa.ContrastNormalization((0.8, 1.2)), ])), iaa.Sometimes( 0.5, iaa.OneOf([ iaa.AdditiveGaussianNoise(0, 1), iaa.AdditivePoissonNoise(1), iaa.JpegCompression((30, 60)), iaa.GaussianBlur(sigma=1), iaa.AverageBlur(1), iaa.MedianBlur(1), ])), ]) return seq
def complex_imgaug(x, org_size, scale_size): """input single RGB PIL Image instance""" x = np.array(x) x = x[np.newaxis, :, :, :] aug_seq = iaa.Sequential([ iaa.Sometimes(0.5, iaa.OneOf([ iaa.GaussianBlur((3, 15)), iaa.AverageBlur(k=(3, 15)), iaa.MedianBlur(k=(3, 15)), iaa.MotionBlur((5, 25)) ])), iaa.Resize(scale_size, interpolation=ia.ALL), iaa.Sometimes(0.2, iaa.AdditiveGaussianNoise(loc=0, scale=(0.0, 0.1*255), per_channel=0.5)), iaa.Sometimes(0.7, iaa.JpegCompression(compression=(10, 65))), iaa.Resize(org_size), ]) aug_img = aug_seq(images=x) return aug_img[0]
def augmentation(self, img): # Sometimes(0.5, ...) applies the given augmenter in 50% of all cases, # e.g. Sometimes(0.5, GaussianBlur(0.3)) would blur roughly every second # image. sometimes = lambda aug: iaa.Sometimes(0.5, aug) seq = iaa.Sequential([ sometimes(iaa.JpegCompression(compression=(1, 30))), sometimes(iaa.GaussianBlur(sigma=(0.2, 1.0))), sometimes(iaa.MultiplyHue((0.9, 1.1))), sometimes( iaa.AdditiveGaussianNoise(scale=0.01 * 255, per_channel=0.5)) ], random_order=True) img_aug = seq(image=img) #cv2.imwrite("asd.jpg", img_aug) return img_aug
def _load_augmentation_aug_non_geometric(): return iaa.Sequential([ iaa.Sometimes(0.3, iaa.Multiply((0.5, 1.5), per_channel=0.5)), iaa.Sometimes(0.2, iaa.JpegCompression(compression=(70, 99))), iaa.Sometimes(0.2, iaa.GaussianBlur(sigma=(0, 3.0))), iaa.Sometimes(0.2, iaa.MotionBlur(k=15, angle=[-45, 45])), iaa.Sometimes(0.2, iaa.MultiplyHue((0.5, 1.5))), iaa.Sometimes(0.2, iaa.MultiplySaturation((0.5, 1.5))), iaa.Sometimes( 0.34, iaa.MultiplyHueAndSaturation((0.5, 1.5), per_channel=True)), iaa.Sometimes(0.34, iaa.Grayscale(alpha=(0.0, 1.0))), iaa.Sometimes(0.2, iaa.ChangeColorTemperature((1100, 10000))), iaa.Sometimes(0.1, iaa.GammaContrast((0.5, 2.0))), iaa.Sometimes(0.2, iaa.SigmoidContrast(gain=(3, 10), cutoff=(0.4, 0.6))), iaa.Sometimes(0.1, iaa.CLAHE()), iaa.Sometimes(0.1, iaa.HistogramEqualization()), iaa.Sometimes(0.2, iaa.LinearContrast((0.5, 2.0), per_channel=0.5)), iaa.Sometimes(0.1, iaa.Emboss(alpha=(0, 1.0), strength=(0, 2.0))) ])
def create_augmentation_seq(self): aug_list = [] # Slight Affine Transformation #aug_list.append(augmenters.Affine(scale=(0.9, 1), #translate_px = (-2, 2), #rotate=(-2, 2), #shear=(-2, 2), #cval=self.args.bg_color)) # Crop out a 32x32 patches aug_list.append( augmenters.CropToFixedSize(width=32, height=32, position=(0.5, 0.5))) # Random quality compression aug_list.append(augmenters.JpegCompression(compression=[0, 85])) # Perform Flip # aug_list.append(augmenters.Fliplr(0.33, name="horizontal_flip")) # aug_list.append(augmenters.Flipud(0.33, name="vertical_flip")) self.aug_seq = augmenters.Sequential(aug_list, random_order=False)
def __init__(self, da): assert check_data_aug(da), 'Parameters for custom data augmentation missing. Should have: {}'.format( keys_data_aug) aug_geometric = [iaa.Affine(scale=(da['scalem'], da['scaleM']), translate_percent={'x': (-da['trans'], da['trans']), 'y': (-da['trans'], da['trans'])}, rotate=(-da['rot'], da['rot']), shear=(-da['shear'], da['shear']), cval=FILL_COLOR), iaa.PerspectiveTransform(scale=(0, da['pers']), cval=FILL_COLOR, keep_size=True)] aug_camera = [iaa.GaussianBlur(sigma=(0, da['sigma'])), iaa.MotionBlur(k=(da['mot_km'], da['mot_kM']), angle=(-da['mot_an'], da['mot_an']), direction=(da['mot_dm'], da['mot_dM'])), iaa.JpegCompression(compression=(da['jpegm'], da['jpegM'])), iaa.LinearContrast(alpha=(da['con_alpham'], da['con_alphaM']), per_channel=da['con_chan']), iaa.MultiplyHueAndSaturation(mul=(da['col_mulm'], da['col_mulM']), per_channel=da['col_chan'], from_colorspace='BGR'), iaa.AddToHueAndSaturation((da['col_addm'], da['col_addM']), per_channel=da['col_chan']) ] cutout = iaa.Cutout(nb_iterations=(0, da['co_num']), size=(da['co_sm'], da['co_sM']), squared=False, cval=FILL_COLOR) # Create a mix of all others self.augmenter = iaa.Sequential([iaa.SomeOf((0, 1), aug_geometric), # none or 1 iaa.SomeOf((0, len(aug_camera) - 2), aug_camera), # from none to all-2 cutout], random_order=True) # mix the apply order
def get_jpeg(img, degrade_dict): qf = degrade_dict["qf"] trans = ia.JpegCompression(compression=qf) degrade_function = lambda x: trans.augment_image(x) img = degrade_function(img.astype(np.uint8)) return img
image_files = [f for f in listdir(IMAGE_DIR) if isfile(join(IMAGE_DIR, f))] shuffled_image_files = random.sample(image_files, len(image_files)) shuffled_image_files = random.sample(image_files, len(shuffled_image_files))[:MAX] # Kjørt de her modifikasjonene en om gangen for å få 750 bilder av hver augmentasjon. seq = iaa.Sequential([ # For å lag noise # iaa.SaltAndPepper((0.12, 0.15), per_channel=0.1) # For å lag bildan lyser, # iaa.MultiplyAndAddToBrightness(mul=(1.5), add=(-30, 30)) # Brukt den her for å gjør dem mørkan # iaa.Multiply((0.25, 0.35), per_channel=0.2) # Brukt den her for å lag store firkanta på bilda # iaa.CoarseDropout((0.1, 0.15), size_percent=(0.01, 0.02)) iaa.JpegCompression(compression=(96, 98)) ]) for image in tqdm(shuffled_image_files): if len(image) > 0: # Åpne bildet im = Image.open(join(IMAGE_DIR, image)) image_brightness = get_brightness(im) # Gjøre om til array med type uint8, (1920, 1080, 3) im = np.asarray(im).astype(np.uint8) # Ekspandere arrayet til å se ut som (1, 1920, 1080, 3), nødvendig siden iaa forventer en 4D matrise im = np.expand_dims(im, 0) # Augmentere bildet
path = 'idcard/' sqe_list = [ iaa.ChangeColorspace(from_colorspace="RGB", to_colorspace="HSV"), iaa.WithChannels(0, iaa.Add((-50, 50))), iaa.WithChannels(1, iaa.Add((-50, 50))), iaa.WithChannels(2, iaa.Add((-50, 50))), iaa.ChangeColorspace(from_colorspace="HSV", to_colorspace="RGB"), iaa.Add((-80, 80), per_channel=0.5), iaa.Multiply((0.5, 1.5), per_channel=0.5), iaa.AverageBlur(k=((5), (1, 3))), iaa.AveragePooling(2), iaa.AddElementwise((-20, -5)), iaa.AdditiveGaussianNoise(scale=(0, 0.05 * 255)), iaa.JpegCompression(compression=(50, 99)), iaa.MultiplyHueAndSaturation(mul_hue=(0.5, 1.5)), iaa.WithBrightnessChannels(iaa.Add((-50, 50))), iaa.WithBrightnessChannels(iaa.Add((-50, 50)), to_colorspace=[iaa.CSPACE_Lab, iaa.CSPACE_HSV]), iaa.MaxPooling(2), iaa.MinPooling((1, 2)), # iaa.Superpixels(p_replace=(0.1, 0.2), n_segments=(16, 128)), iaa.Clouds(), iaa.Fog(), iaa.AdditiveGaussianNoise(scale=0.1 * 255, per_channel=True), iaa.Dropout(p=(0, 0.2)), # iaa.WithChannels(0, iaa.Affine(rotate=(0, 0))), iaa.ChannelShuffle(0.35), iaa.WithColorspace(to_colorspace="HSV",
def get_jpeg(): return ia.JpegCompression(compression=(50, 85))
iaa.Invert(0.01, per_channel=0.5), iaa.AddToHueAndSaturation((-1, 1)), iaa.MultiplyHueAndSaturation((-1, 1)) ]), # Change brightness and contrast iaa.OneOf([ iaa.Add((-10, 10), per_channel=0.5), iaa.Multiply((0.5, 1.5), per_channel=0.5), iaa.GammaContrast(gamma=(0.5, 1.75), per_channel=0.5), iaa.SigmoidContrast(cutoff=(0, 1), per_channel=0.5), iaa.LogContrast(gain=(0.5, 1), per_channel=0.5), iaa.LinearContrast(alpha=(0.25, 1.75), per_channel=0.5), iaa.HistogramEqualization() ]), sometimes(iaa.ElasticTransformation(alpha=(0.5, 3.5), sigma=0.25)), # move pixels locally around (with random strengths) sometimes(iaa.PiecewiseAffine(scale=(0.01, 0.05))), # sometimes move parts of the image around sometimes(iaa.PerspectiveTransform(scale=(0.01, 0.2))), iaa.JpegCompression((0.1, 1)) ] ), # With 10 % probability apply one the of the weather conditions iaa.Sometimes(0.2, iaa.OneOf([ iaa.Clouds(), iaa.Fog(), iaa.Snowflakes() ])) ])
def __init__(self): self.aug = iaa.Sequential([iaa.JpegCompression(100)])
[[0, 0], [max_width, 0], [max_width, max_height], [0, max_height]], dtype='float32') M = cv2.getPerspectiveTransform(rect, dst) warped = cv2.warpPerspective(image, M, (max_width, max_height)) return warped ocr_synthetic_transforms = iaa.Sequential([ iaa.Resize({ 'height': 64, 'width': 320 }), iaa.PerspectiveTransform(scale=(0.01, 0.05)), iaa.Sometimes(0.5, iaa.Multiply((0.5, 1.5))), iaa.Sometimes(0.5, iaa.JpegCompression(compression=(70, 99))), iaa.Affine( scale={ 'x': (0.95, 1.02), 'y': (0.95, 1.02) }, translate_percent={ 'x': (-0.02, 0.02), 'y': (-0.02, 0.02) }, rotate=(-3, 3), shear=(-5, 5), ) ]) pad_transform = iaa.PadToAspectRatio(1.0, position='center')
def fuckery(): return iaa.OneOf([ # iaa.Invert(0.1, per_channel=0.5), iaa.JpegCompression(compression=(80, 97)), ])
def __init__(self, list_file, train, transform, device, little_train=False, S=7): print('data init') self.train = train self.transform = transform self.fnames = [] self.boxes = [] self.labels = [] self.S = S self.B = 2 self.C = 20 self.device = device self.augmentation = iaa.Sometimes( 0.5, iaa.SomeOf( (1, 6), [ iaa.Dropout([0.05, 0.2]), # drop 5% or 20% of all pixels iaa.Sharpen((0.1, 1.0)), # sharpen the image iaa.GaussianBlur(sigma=(2., 3.5)), iaa.OneOf([ iaa.GaussianBlur(sigma=(2., 3.5)), iaa.AverageBlur(k=(2, 5)), iaa.BilateralBlur(d=(7, 12), sigma_color=(10, 250), sigma_space=(10, 250)), iaa.MedianBlur(k=(3, 7)), ]), # iaa.Fliplr(1.0), # iaa.Flipud(1.0), iaa.AddElementwise((-50, 50)), iaa.AdditiveGaussianNoise(scale=(0, 0.1 * 255)), iaa.JpegCompression(compression=(80, 95)), iaa.Multiply((0.5, 1.5)), iaa.MultiplyElementwise((0.5, 1.5)), iaa.ReplaceElementwise(0.05, [0, 255]), iaa.WithColorspace(to_colorspace="HSV", from_colorspace="RGB", children=iaa.WithChannels( 2, iaa.Add((-10, 50)))), iaa.OneOf([ iaa.WithColorspace(to_colorspace="HSV", from_colorspace="RGB", children=iaa.WithChannels( 1, iaa.Add((-10, 50)))), iaa.WithColorspace(to_colorspace="HSV", from_colorspace="RGB", children=iaa.WithChannels( 2, iaa.Add((-10, 50)))), ]), ], random_order=True)) torch.manual_seed(23) with open(list_file) as f: lines = f.readlines() if little_train: lines = lines[:64] for line in lines: splited = line.strip().split() self.fnames.append(splited[0]) self.num_samples = len(self.fnames)
from imgaug.augmentables.segmaps import SegmentationMapsOnImage import skimage import numpy as np ##path to dataset path_to_images = '/home/avaneesh/Desktop/Data_Set_15_09_2020_12_08_03/images' path_to_masks = '/home/avaneesh/Desktop/Data_Set_15_09_2020_12_08_03/masks' # list of names image_name_list = os.listdir(path_to_images) mask_name_list = os.listdir(path_to_masks) ##augmentation functions used in sequences pipeline seq1 = iaa.Sequential([ iaa.AdditiveGaussianNoise(scale=(0, 0.3), per_channel=True), iaa.Add((-25, 60)), iaa.JpegCompression(compression=(30, 87)), iaa.MedianBlur(k=(1, 3)), iaa.PiecewiseAffine(scale=(0.01, 0.05)), iaa.Fliplr(0.5), iaa.Flipud(0.5), iaa.CoarseDropout((0.0, 0.09), size_percent=(0.02, 0.15)) ], random_order=True) seq2 = iaa.Sequential([ iaa.GaussianBlur(sigma=(0, 3)), iaa.MultiplyHueAndSaturation((0.5, 1.5)), iaa.JpegCompression(compression=(45, 87)), iaa.PiecewiseAffine(scale=(0.01, 0.05)), iaa.SaltAndPepper(p=(0.1, 0.15), per_channel=True), iaa.Fliplr(0.5),
def __init__(self, base_data_path, train, transform, id_name_path, device, little_train=False, read_mode='jpeg4py', input_size=224, C=2048, test_mode=False): print('data init') self.train = train self.base_data_path = base_data_path self.transform = transform self.fnames = [] self.resize = input_size self.little_train = little_train self.id_name_path = id_name_path self.C = C self.read_mode = read_mode self.device = device self._test = test_mode self.fnames = self.get_data_list(base_data_path) self.num_samples = len(self.fnames) self.get_id_map() self.cls_path_map = self.get_cls_pathlist_map() self.img_augsometimes = lambda aug: iaa.Sometimes(0.5, aug) self.augmentation = iaa.Sequential( [ # augment without change bboxes self.img_augsometimes( iaa.SomeOf( (1, 4), [ iaa.Dropout([0.05, 0.2 ]), # drop 5% or 20% of all pixels iaa.Sharpen((0.1, .8)), # sharpen the image # iaa.GaussianBlur(sigma=(2., 3.5)), iaa.OneOf([ iaa.GaussianBlur(sigma=(2., 3.5)), iaa.AverageBlur(k=(2, 5)), iaa.BilateralBlur(d=(7, 12), sigma_color=(10, 250), sigma_space=(10, 250)), iaa.MedianBlur(k=(3, 7)), ]), iaa.AddElementwise((-50, 50)), iaa.AdditiveGaussianNoise(scale=(0, 0.1 * 255)), iaa.JpegCompression(compression=(80, 95)), iaa.Multiply((0.5, 1.5)), iaa.MultiplyElementwise((0.5, 1.5)), iaa.ReplaceElementwise(0.05, [0, 255]), # iaa.WithColorspace(to_colorspace="HSV", from_colorspace="RGB", # children=iaa.WithChannels(2, iaa.Add((-10, 50)))), iaa.OneOf([ iaa.WithColorspace(to_colorspace="HSV", from_colorspace="RGB", children=iaa.WithChannels( 1, iaa.Add((-10, 50)))), iaa.WithColorspace(to_colorspace="HSV", from_colorspace="RGB", children=iaa.WithChannels( 2, iaa.Add((-10, 50)))), ]), iaa.Affine(scale={ "x": (0.8, 1.2), "y": (0.8, 1.2) }, translate_percent={ "x": (-0.2, 0.2), "y": (-0.2, 0.2) }, rotate=(-25, 25), shear=(-8, 8)) ], random_order=True)), iaa.Fliplr(.5), iaa.Flipud(.25), ], random_order=True)
def __init__(self, rgb_mean, randomImg, insize): sometimes = lambda aug: iaa.Sometimes(0.7, aug) self.rand_img_dir = randomImg self.rgb_mean = rgb_mean self.inp_dim = insize # self.randomImgList = glob.glob( randomImg + '*.jpg') self.aug = iaa.Sequential([ sometimes(iaa.Affine( scale={"x": (0.8, 1.2), "y": (0.8, 1.2)}, # scale images to 80-120% of their size, individually per axis translate_percent={"x": (-0.1, 0.1), "y": (-0.1, 0.1)}, # translate by -20 to +20 percent (per axis) rotate=(-25, 25), # rotate by -45 to +45 degrees shear=(-6, 6), # shear by -16 to +16 degrees order=[0, 1], # use nearest neighbour or bilinear interpolation (fast) cval=(0, 255), # if mode is constant, use a cval between 0 and 255 mode=ia.ALL # use any of scikit-image's warping modes (see 2nd image from the top for examples) )), iaa.OneOf([ iaa.Fliplr(0.5), iaa.GaussianBlur( sigma=iap.Uniform(0.0, 1.0) ), iaa.BlendAlphaSimplexNoise( foreground=iaa.BlendAlphaSimplexNoise( foreground=iaa.EdgeDetect(1.0), background=iaa.LinearContrast((0.1, .8)), per_channel=True ), background=iaa.BlendAlphaFrequencyNoise( exponent=(-.5, -.1), foreground=iaa.Affine( rotate=(-10, 10), translate_px={"x": (-1, 1), "y": (-1, 1)} ), # background=iaa.AddToHueAndSaturation((-4, 4)), # per_channel=True ), per_channel=True, aggregation_method="max", sigmoid=False ), iaa.BlendAlpha( factor=(0.2, 0.8), foreground=iaa.Sharpen(1.0, lightness=2), background=iaa.CoarseDropout(p=0.1, size_px=8) ), iaa.BlendAlpha( factor=(0.2, 0.8), foreground=iaa.Affine(rotate=(-5, 5)), per_channel=True ), iaa.MotionBlur(k=15, angle=[-5, 5]), iaa.BlendAlphaCheckerboard(nb_rows=2, nb_cols=(1, 4), foreground=iaa.AddToHue((-10, 10))), iaa.BlendAlphaElementwise((0, 1.0), iaa.AddToHue(10)), iaa.BilateralBlur( d=(3, 10), sigma_color=(1, 5), sigma_space=(1, 5)), iaa.AdditiveGaussianNoise(scale=0.02 * 255), iaa.AddElementwise((-5, 5), per_channel=0.5), iaa.AdditiveLaplaceNoise(scale=0.01 * 255), iaa.AdditivePoissonNoise(20), iaa.Cutout(fill_mode="gaussian", fill_per_channel=True), iaa.CoarseDropout(0.02, size_percent=0.1), iaa.SaltAndPepper(0.1, per_channel=True), iaa.JpegCompression(compression=(70, 99)), iaa.ImpulseNoise(0.02), iaa.Dropout(p=(0, 0.04)), iaa.Sharpen(alpha=0.1), ]) # oneof ])