def get_corrupter(self):
     distortion_augs = OneOf([OpticalDistortion(p=1),
                              GridDistortion(p=1)],
                             p=1)
     effects_augs = OneOf([
         IAASharpen(p=1),
         IAAEmboss(p=1),
         IAAPiecewiseAffine(p=1),
         IAAPerspective(p=1),
         CLAHE(p=1)
     ],
                          p=1)
     misc_augs = OneOf([
         ShiftScaleRotate(p=1),
         HueSaturationValue(p=1),
         RandomBrightnessContrast(p=1)
     ],
                       p=1)
     blur_augs = OneOf(
         [Blur(p=1),
          MotionBlur(p=1),
          MedianBlur(p=1),
          GaussNoise(p=1)],
         p=1)
     aug = Compose([distortion_augs, effects_augs, misc_augs, blur_augs])
     return aug
Example #2
0
def hard_transform(image_size: int = 256, p: float = 0.5, **kwargs):
    """Hard augmentations (on training)"""
    _add_transform_default_params(kwargs)

    transforms = Compose([
        ShiftScaleRotate(
            shift_limit=0.1,
            scale_limit=0.1,
            rotate_limit=15,
            border_mode=cv2.BORDER_REFLECT,
            p=p,
        ),
        IAAPerspective(scale=(0.02, 0.05), p=p),
        OneOf([
            HueSaturationValue(p=p),
            ToGray(p=p),
            RGBShift(p=p),
            ChannelShuffle(p=p),
        ]),
        RandomBrightnessContrast(brightness_limit=0.5, contrast_limit=0.5,
                                 p=p),
        RandomGamma(p=p),
        CLAHE(p=p),
        JpegCompression(quality_lower=50, p=p),
        PadIfNeeded(image_size, image_size, border_mode=cv2.BORDER_CONSTANT),
    ], **kwargs)
    return transforms
def hard_transform(image_size=224, p=0.5):
    transforms = [
        Cutout(
            num_holes=4,
            max_w_size=image_size // 4,
            max_h_size=image_size // 4,
            p=p
        ),
        ShiftScaleRotate(
            shift_limit=0.1,
            scale_limit=0.1,
            rotate_limit=15,
            border_mode=cv2.BORDER_REFLECT,
            p=p
        ),
        IAAPerspective(scale=(0.02, 0.05), p=p),
        OneOf(
            [
                HueSaturationValue(p=p),
                ToGray(p=p),
                RGBShift(p=p),
                ChannelShuffle(p=p),
            ]
        ),
        RandomBrightnessContrast(
            brightness_limit=0.5, contrast_limit=0.5, p=p
        ),
        RandomGamma(p=p),
        CLAHE(p=p),
        JpegCompression(quality_lower=50, p=p),
    ]
    transforms = Compose(transforms)
    return transforms
Example #4
0
    def __init__(self, root_dir= r"C:\Users\Indy-Windows\Desktop\carvana\carvana\data\train", transform=None, image_size=(512, 512)):

        #Initialize Directory Tree from current working directory if no directory is provided
        self.root_dir = root_dir
        self.img_dir = os.path.join(self.root_dir, 'images')
        self.mask_dir = os.path.join(self.root_dir, 'masks')

        self.img_transform = transform
        self.num_img = len(os.listdir(self.img_dir))
        self.num_mask = len(os.listdir(self.mask_dir))

        self.img_list = os.listdir(self.img_dir)
        self.mask_list = os.listdir(self.mask_dir)

        self.image_height = image_size[1]
        self.image_width = image_size[0]

        self.transform = transform

        self.album_transform = Compose([
            HorizontalFlip(),
            ShiftScaleRotate(shift_limit=0.0625, scale_limit=0.2, rotate_limit=45, p=0.2),
            OneOf([
                ElasticTransform(p=.2),
                IAAPerspective(p=.35),
            ], p=.35)
        ])

        if self.transform == None:
            self.transform = self.album_transform
Example #5
0
 def augmentation_pipeline(self, p=0.5):
     return Compose(
         [
             HorizontalFlip(p=0.5),
             OneOf([
                 IAAAdditiveGaussianNoise(),
                 GaussNoise(),
             ], p=0.2),
             OneOf(
                 [
                     MotionBlur(p=0.2),
                     #MedianBlur(blur_limit=3, p=0.1),
                     Blur(blur_limit=3, p=0.1),
                 ],
                 p=0.1),
             OneOf([
                 ShiftScaleRotate(shift_limit=0.0625,
                                  scale_limit=0.2,
                                  rotate_limit=5,
                                  p=0.9),
                 IAAPerspective(scale=(.02, .05))
             ],
                   p=0.3)
         ],
         p=p)
Example #6
0
def hard_transform(image_size: int = 256, p: float = 0.5):
    """Hard augmentations"""
    transforms = Compose([
        ShiftScaleRotate(
            shift_limit=0.1,
            scale_limit=0.1,
            rotate_limit=15,
            border_mode=cv2.BORDER_REFLECT,
            p=p,
        ),
        IAAPerspective(scale=(0.02, 0.05), p=p),
        OneOf([
            HueSaturationValue(p=p),
            ToGray(p=p),
            RGBShift(p=p),
            ChannelShuffle(p=p),
        ]),
        RandomBrightnessContrast(
            brightness_limit=0.5, contrast_limit=0.5, p=p
        ),
        RandomGamma(p=p),
        CLAHE(p=p),
        JpegCompression(quality_lower=50, p=p),
    ])
    return transforms
Example #7
0
def main():
    a = IAAPerspective(keep_size=False, always_apply=True)
    img = np.zeros([100, 100])
    img[25:75, 25:75] = 255
    key = 0
    while key != 27:
        transformedImg = a(image=img)['image']
        key = imshowWait(img=img,
                         transformedImg=(transformedImg, transformedImg.shape))
Example #8
0
def get_transforms(*, data):

    if data == 'train':
        return Compose([
            HorizontalFlip(p=0.5),
            VerticalFlip(p=0.5),
            RandomRotate90(p=0.5),
            Transpose(p=0.5),
            ShiftScaleRotate(scale_limit=0.2,
                             rotate_limit=0,
                             shift_limit=0.2,
                             p=0.2,
                             border_mode=0),
            IAAAdditiveGaussianNoise(p=0.2),
            IAAPerspective(p=0.5),
            OneOf(
                [
                    CLAHE(p=1),
                    RandomBrightness(p=1),
                    RandomGamma(p=1),
                ],
                p=0.9,
            ),
            OneOf(
                [
                    IAASharpen(p=1),
                    Blur(blur_limit=3, p=1),
                    MotionBlur(blur_limit=3, p=1),
                ],
                p=0.9,
            ),
            OneOf(
                [
                    RandomContrast(p=1),
                    HueSaturationValue(p=1),
                ],
                p=0.9,
            ),
            Compose([VerticalFlip(p=0.5),
                     RandomRotate90(p=0.5)]),
            Normalize(
                mean=[0.485, 0.456, 0.406],
                std=[0.229, 0.224, 0.225],
            ),
            ToTensorV2(),  #ToTensor(num_classes=2),
        ])
    elif data == 'valid':
        return Compose([
            Resize(256, 256),
            Normalize(
                mean=[0.485, 0.456, 0.406],
                std=[0.229, 0.224, 0.225],
            ),
            ToTensorV2(),
        ])
def strong_aug(p=0.5):
    return Compose([
        Resize(IMG_SIZE, IMG_SIZE),
        HorizontalFlip(),
        OneOf([
            IAAPerspective(),
            ShiftScaleRotate(shift_limit=0.0625, scale_limit=0.05, rotate_limit=0, p=0.2),
        ]),
        #RandomBrightnessContrast(),
        Normalize(),
    ])
Example #10
0
def hard_transforms(image_size):
    min_holes, max_holes = 1, 2
    size = 30

    return [
        # Random shifts, stretches and turns with a 50% probability
        ShiftScaleRotate(
            shift_limit=0.2,
            scale_limit=0.2,
            rotate_limit=180,
            border_mode=BORDER_CONSTANT,
            p=0.1
        ),

        IAAPerspective(scale=(0.02, 0.05), p=0.1),

        # Random brightness / contrast with a 30% probability
        RandomBrightnessContrast(
            brightness_limit=0.2, contrast_limit=0.2, p=0.1
        ),

        OneOf([
            GaussNoise(var_limit=1.0, p=1.0),
            MultiplicativeNoise(multiplier=(0.9, 1), p=1.0)
        ], p=0.1),

        OneOf([
            GaussianBlur(blur_limit=3, p=1.0),
            Blur(p=1.0),
        ], p=0.1),

        # CoarseDropout(
        #     min_holes=min_holes,
        #     max_holes=max_holes,
        #     # min_height=image_height // 4,
        #     # max_height=image_height // 4,
        #     # min_width=image_width // 4,
        #     # max_width=image_width // 4,
        #     min_height=size,
        #     max_height=size,
        #     min_width=size,
        #     max_width=size,
        #     fill_value=0,
        #     p=1.0
        # ),

        # Random gamma changes with a 30% probability
        RandomGamma(gamma_limit=(85, 115), p=0.1),
        ImageCompression(
            quality_lower=70,
            quality_upper=100,
            p=0.1
        ),
    ]
Example #11
0
 def get_geoometric(self):
     geometric = [
         ShiftScaleRotate(shift_limit=0.0625,
                          scale_limit=(-.1, .1),
                          rotate_limit=45,
                          p=.5),
         IAAPerspective(scale=(.05, .2), keep_size=True, p=.1),
         #             OneOf([
         #                 OpticalDistortion(p=0.3),
         #                 GridDistortion(p=0.3),
         #                 IAAPiecewiseAffine(p=0.3),
         #             ], p=0.65)
     ]
     return Compose(geometric)
def shiftscalerotate_aug():
    augs_list = [
        OneOf([
            ShiftScaleRotate(scale_limit=.15,
                             rotate_limit=15,
                             border_mode=cv2.BORDER_REPLICATE,
                             p=0.5),
            IAAAffine(shear=20, p=0.5),
            IAAPerspective(p=0.5),
        ],
              p=0.5),
        Normalize(),
        ToTensorV2()
    ]
    return Compose(augs_list, p=1)
 def strong_aug(p=1.0):
     return Compose(
         [
             RandomSizedCrop((100, HEIGHT), HEIGHT, WIDTH, w2h_ratio=1.0, p=1.0),
             Compose(
                 [
                     Flip(),
                     RandomRotate90(),
                     Transpose(),
                     OneOf([IAAAdditiveGaussianNoise(), GaussNoise()], p=0.2),
                     OneOf(
                         [MedianBlur(blur_limit=3), Blur(blur_limit=3), MotionBlur()]
                     ),
                     ShiftScaleRotate(args.shift, args.scale, args.rotate),
                     # min_max_height: (height of crop before resizing)
                     # crop_height = randint(min_height, max_height), endpoints included
                     # crop_width = crop_height * w2h_ratio
                     # height, width: height/width after crop and resize, for convenience, just use args for resize
                     OneOf(
                         [
                             GridDistortion(p=0.5),
                             ElasticTransform(p=0.5),
                             IAAPerspective(),
                             IAAPiecewiseAffine(),
                         ]
                     ),
                     OneOf(
                         [
                             RGBShift(args.r_shift, args.g_shift, args.b_shift),
                             HueSaturationValue(
                                 args.hue_shift, args.sat_shift, args.val_shift
                             ),
                             #                     ChannelShuffle(),
                             CLAHE(args.clip),
                             RandomBrightnessContrast(
                                 args.brightness, args.contrast
                             ),
                             RandomGamma(gamma_limit=(80, 120)),
                             #                     ToGray(),
                             ImageCompression(quality_lower=75, quality_upper=100),
                         ]
                     ),
                 ],
                 p=p,
             ),
             ToFloat(max_value=255),
         ]
     )
Example #14
0
def strong_aug(p=0.5):
    return Compose(
        [
            RandomRotate90(),
            Flip(),
            Transpose(),
            IAAPerspective(),
            OneOf([IAAAdditiveGaussianNoise(), GaussNoise()], p=0.2),
            OneOf([MotionBlur(p=0.2), MedianBlur(blur_limit=3, p=0.1), Blur(blur_limit=3, p=0.1)], p=0.2),
            ShiftScaleRotate(shift_limit=0.0625, scale_limit=0.2, rotate_limit=45, p=0.2),
            OneOf([OpticalDistortion(p=0.3), GridDistortion(p=0.1), IAAPiecewiseAffine(p=0.3)], p=0.2),
            OneOf([CLAHE(clip_limit=2), IAASharpen(), IAAEmboss(), RandomBrightnessContrast()], p=0.3),
            HueSaturationValue(p=0.3),
            Resize(256, 256, p=1, always_apply=True),
        ],
        p=p,
    )
Example #15
0
def train_trasforms_standard(conf):
    height = conf['crop_height']
    width = conf['crop_width']
    return Compose([
        RandomSizedCrop2x(height, width, scale_shift=0.5, p=1),
        OneOf([IAAPiecewiseAffine(),
               IAAPerspective(),
               OpticalDistortion(border_mode=cv2.BORDER_CONSTANT),
               GridDistortion(border_mode=cv2.BORDER_CONSTANT),
               ElasticTransform(border_mode=cv2.BORDER_CONSTANT)],
              p=0.3),
        RandomBrightnessContrast(),
        RandomGamma(),
        OneOf([MedianBlur(), GaussianBlur()], p=0.2),
        OneOf([IAAAdditiveGaussianNoise(per_channel=True),GaussNoise()])
    ]
    )
Example #16
0
def watermark_with_transparency(image, new_object, input_size):
    random_angle = random.randint(-30, 30)
    new_object = imutils.rotate_bound(new_object, random_angle)
    image = Image.fromarray(image)
    new_object = Image.fromarray(new_object)

    width, height = image.size
    mask_width, mask_height = new_object.size
    
    position = random.randint(0,width - mask_width), random.randint(0,height - mask_height)

    mask = np.array(new_object)
    mask_1 = mask < 240
    mask_2 = mask > 5
    mask = np.array(mask_1 & mask_2, dtype='uint8')*255
    #return mask
    mask = Image.fromarray(mask[:,:,0])

    new_image = Image.new('RGB', (width, height), (0,0,0))
    new_mask = Image.new('RGB', (width, height), (0,0,0))
    
    new_mask.paste(mask, position)
    new_image.paste(image, (0,0))
    new_image.paste(new_object, position, mask=mask)
    
    A = np.array(new_image)
    A = A[position[1]:position[1]+mask_width,position[0]:position[0]+mask_height]
    A = cv2.GaussianBlur(A, (9,9),0)
    
    new_image = np.array(new_image)
    new_image[position[1]:position[1]+mask_width,position[0]:position[0]+mask_height]
    
    
    new_image, new_mask = augment(
        [IAAPerspective(p=1), Resize(p=1, height=input_size[0], width=input_size[1])],
        np.array(new_image),
        np.array(new_mask)
    )
    new_mask = Image.fromarray(new_mask).convert('L')
    new_mask = np.array(new_mask)
    new_mask = cv2.dilate(new_mask,(5,5),iterations = 3)
    new_mask = cv2.morphologyEx(new_mask, cv2.MORPH_CLOSE, (4, 4), 5)
    new_mask = cv2.erode(new_mask,(3, 3),iterations = 3)
    return new_image, new_mask
Example #17
0
def hard_transform():
    transforms = [
        ShiftScaleRotate(shift_limit=0.1,
                         scale_limit=0.1,
                         rotate_limit=15,
                         border_mode=cv2.BORDER_REFLECT,
                         p=0.5),
        IAAPerspective(scale=(0.02, 0.05), p=0.3),
        RandomBrightnessContrast(brightness_limit=0.2,
                                 contrast_limit=0.2,
                                 p=0.3),
        RandomGamma(gamma_limit=(85, 115), p=0.3),
        HueSaturationValue(p=0.3),
        ChannelShuffle(p=0.5),
        ToGray(p=0.2),
        CLAHE(p=0.3),
        RGBShift(p=0.3),
        JpegCompression(quality_lower=50),
    ]
    transforms = Compose(transforms)
    return transforms
Example #18
0
def strong_aug(p=0.8):
    return Compose([
        # RandomRotate90(),
        # Flip(),
        # Transpose(),
        OneOf([
            IAAAdditiveGaussianNoise(),
            GaussNoise(),
        ], p=0.2),
        OneOf([ #模糊
            MotionBlur(p=0.5),
            MedianBlur(blur_limit=3, p=0.5),
            Blur(blur_limit=3, p=0.5),
            JpegCompression(p=1,quality_lower=7,quality_upper=40)
        ], p=1),
        # ShiftScaleRotate(shift_limit=0.0625, scale_limit=0.2, rotate_limit=45, p=0.2),
        OneOf([ 
            IAAPiecewiseAffine(p=1,scale=(0.005, 0.01), nb_rows=4, nb_cols=4),
            IAAPerspective(p=1,scale=(random.uniform(0.01,0.03),random.uniform(0.01, 0.03))),
            ElasticTransform(p=1,alpha=random.randint(50,100), sigma=random.randint(8,13), alpha_affine=0,border_mode=3),
        ], p=0.2),
        OneOf([ 
            ElasticTransform(p=1,alpha=random.randint(50,100), sigma=random.randint(8,13), alpha_affine=0,border_mode=3),
        ], p=0.6),
        OneOf([ 
            OpticalDistortion(p=1,distort_limit=0.2,border_mode=3),
            # GridDistortion(p=1,distort_limit=0.1,border_mode=3),
        ], p=0.1),        
        OneOf([
            CLAHE(clip_limit=2,p=0.5),
            # IAASharpen(),
            IAAEmboss(p=0.5),
            RandomBrightnessContrast(p=1), #随机调整亮度饱和度,和下一个区别?
            HueSaturationValue(p=1), #随机调整hsv值
            RGBShift(p=0.5), #随机调整rgb值
            ChannelShuffle(p=0.5), #RGB通道调换
            InvertImg(p=0.1), #255-像素值,反转图像
        ], p=0.5),    

    ], p=p) 
Example #19
0
def watermark_with_transparency(image, new_object, input_size):
    width, height = image.size
    mask_width, mask_height = new_object.size

    position = random.randint(0,width - mask_width), random.randint(0,height - mask_height)

    mask = new_object.point(thresh)

    new_image = Image.new('L', (width, height))
    new_mask = Image.new('L', (width, height))

    new_mask.paste(mask, position)
    new_image.paste(image, (0,0))
    new_image.paste(new_object, position, mask=mask)

    new_image, new_mask = augment(
        [IAAPerspective(p=1), Resize(p=1, height=input_size[0], width=input_size[1])],
        np.array(new_image),
        np.array(new_mask)
    )

    return new_image, new_mask
Example #20
0
def vanilla_transform(p):
    return Compose([
        HorizontalFlip(p=0.5),
        VerticalFlip(p=0.5),
        ShiftScaleRotate(rotate_limit=30,
                         scale_limit=0.15,
                         border_mode=cv2.BORDER_CONSTANT,
                         value=[0, 0, 0],
                         p=0.5),
        IAAAdditiveGaussianNoise(p=0.2),
        IAAPerspective(p=0.5),
        OneOf(
            [
                CLAHE(p=1),
                RandomBrightness(p=1),
                RandomGamma(p=1),
            ],
            p=0.5,
        ),
        OneOf(
            [
                IAASharpen(p=1),
                Blur(blur_limit=3, p=1),
                MotionBlur(blur_limit=3, p=1),
            ],
            p=0.5,
        ),
        OneOf(
            [
                RandomContrast(p=1),
                HueSaturationValue(p=1),
            ],
            p=0.5,
        ),
    ],
                   p=p)
Example #21
0
def aug_mega_hardcore(p=.95):
    return Compose([
        OneOf([CLAHE(clip_limit=2),
               IAASharpen(p=.25),
               IAAEmboss(p=.25)],
              p=.35),
        OneOf([
            IAAAdditiveGaussianNoise(p=.3),
            GaussNoise(p=.7),
        ], p=.5),
        RandomRotate90(),
        Flip(),
        Transpose(),
        OneOf([
            MotionBlur(p=.2),
            MedianBlur(blur_limit=3, p=.3),
            Blur(blur_limit=3, p=.5),
        ],
              p=.4),
        OneOf([
            RandomContrast(p=.5),
            RandomBrightness(p=.5),
        ], p=.4),
        ShiftScaleRotate(
            shift_limit=.0, scale_limit=.45, rotate_limit=45, p=.7),
        OneOf([
            OpticalDistortion(p=0.3),
            GridDistortion(p=0.2),
            ElasticTransform(p=.2),
            IAAPerspective(p=.2),
            IAAPiecewiseAffine(p=.3),
        ],
              p=.6),
        HueSaturationValue(p=.5)
    ],
                   p=p)
def transform(image, mask, image_name, mask_name):

    x, y = image, mask

    rand = random.uniform(0, 1)
    if (rand > 0.5):

        images_name = [f"{image_name}"]
        masks_name = [f"{mask_name}"]
        images_aug = [x]
        masks_aug = [y]

        it = iter(images_name)
        it2 = iter(images_aug)
        imagedict = dict(zip(it, it2))

        it = iter(masks_name)
        it2 = iter(masks_aug)
        masksdict = dict(zip(it, it2))

        return imagedict, masksdict

    mask_density = np.count_nonzero(y)

    ## Augmenting only images with Gloms
    if (mask_density > 0):
        try:
            h, w, c = x.shape
        except Exception as e:
            image = image[:-1]
            x, y = image, mask
            h, w, c = x.shape

        aug = Blur(p=1, blur_limit=3)
        augmented = aug(image=x, mask=y)
        x0 = augmented['image']
        y0 = augmented['mask']

        #    aug = CenterCrop(p=1, height=32, width=32)
        #    augmented = aug(image=x, mask=y)
        #    x1 = augmented['image']
        #    y1 = augmented['mask']

        ## Horizontal Flip
        aug = HorizontalFlip(p=1)
        augmented = aug(image=x, mask=y)
        x2 = augmented['image']
        y2 = augmented['mask']

        aug = VerticalFlip(p=1)
        augmented = aug(image=x, mask=y)
        x3 = augmented['image']
        y3 = augmented['mask']

        #      aug = Normalize(p=1)
        #      augmented = aug(image=x, mask=y)
        #      x4 = augmented['image']
        #      y4 = augmented['mask']

        aug = Transpose(p=1)
        augmented = aug(image=x, mask=y)
        x5 = augmented['image']
        y5 = augmented['mask']

        aug = RandomGamma(p=1)
        augmented = aug(image=x, mask=y)
        x6 = augmented['image']
        y6 = augmented['mask']

        ## Optical Distortion
        aug = OpticalDistortion(p=1, distort_limit=2, shift_limit=0.5)
        augmented = aug(image=x, mask=y)
        x7 = augmented['image']
        y7 = augmented['mask']

        ## Grid Distortion
        aug = GridDistortion(p=1)
        augmented = aug(image=x, mask=y)
        x8 = augmented['image']
        y8 = augmented['mask']

        aug = RandomGridShuffle(p=1)
        augmented = aug(image=x, mask=y)
        x9 = augmented['image']
        y9 = augmented['mask']

        aug = HueSaturationValue(p=1)
        augmented = aug(image=x, mask=y)
        x10 = augmented['image']
        y10 = augmented['mask']

        #        aug = PadIfNeeded(p=1)
        #        augmented = aug(image=x, mask=y)
        #        x11 = augmented['image']
        #        y11 = augmented['mask']

        aug = RGBShift(p=1)
        augmented = aug(image=x, mask=y)
        x12 = augmented['image']
        y12 = augmented['mask']

        ## Random Brightness
        aug = RandomBrightness(p=1)
        augmented = aug(image=x, mask=y)
        x13 = augmented['image']
        y13 = augmented['mask']

        ## Random  Contrast
        aug = RandomContrast(p=1)
        augmented = aug(image=x, mask=y)
        x14 = augmented['image']
        y14 = augmented['mask']

        #aug = MotionBlur(p=1)
        #augmented = aug(image=x, mask=y)
        #   x15 = augmented['image']
        #  y15 = augmented['mask']

        aug = MedianBlur(p=1, blur_limit=5)
        augmented = aug(image=x, mask=y)
        x16 = augmented['image']
        y16 = augmented['mask']

        aug = GaussianBlur(p=1, blur_limit=3)
        augmented = aug(image=x, mask=y)
        x17 = augmented['image']
        y17 = augmented['mask']

        aug = GaussNoise(p=1)
        augmented = aug(image=x, mask=y)
        x18 = augmented['image']
        y18 = augmented['mask']

        aug = GlassBlur(p=1)
        augmented = aug(image=x, mask=y)
        x19 = augmented['image']
        y19 = augmented['mask']

        aug = CLAHE(clip_limit=1.0,
                    tile_grid_size=(8, 8),
                    always_apply=False,
                    p=1)
        augmented = aug(image=x, mask=y)
        x20 = augmented['image']
        y20 = augmented['mask']

        aug = ChannelShuffle(p=1)
        augmented = aug(image=x, mask=y)
        x21 = augmented['image']
        y21 = augmented['mask']

        aug = ToGray(p=1)
        augmented = aug(image=x, mask=y)
        x22 = augmented['image']
        y22 = augmented['mask']

        aug = ToSepia(p=1)
        augmented = aug(image=x, mask=y)
        x23 = augmented['image']
        y23 = augmented['mask']

        aug = JpegCompression(p=1)
        augmented = aug(image=x, mask=y)
        x24 = augmented['image']
        y24 = augmented['mask']

        aug = ImageCompression(p=1)
        augmented = aug(image=x, mask=y)
        x25 = augmented['image']
        y25 = augmented['mask']

        aug = Cutout(p=1)
        augmented = aug(image=x, mask=y)
        x26 = augmented['image']
        y26 = augmented['mask']

        #       aug = CoarseDropout(p=1, max_holes=8, max_height=32, max_width=32)
        #       augmented = aug(image=x, mask=y)
        #       x27 = augmented['image']
        #       y27 = augmented['mask']

        #       aug = ToFloat(p=1)
        #       augmented = aug(image=x, mask=y)
        #       x28 = augmented['image']
        #       y28 = augmented['mask']

        aug = FromFloat(p=1)
        augmented = aug(image=x, mask=y)
        x29 = augmented['image']
        y29 = augmented['mask']

        ## Random Brightness and Contrast
        aug = RandomBrightnessContrast(p=1)
        augmented = aug(image=x, mask=y)
        x30 = augmented['image']
        y30 = augmented['mask']

        aug = RandomSnow(p=1)
        augmented = aug(image=x, mask=y)
        x31 = augmented['image']
        y31 = augmented['mask']

        aug = RandomRain(p=1)
        augmented = aug(image=x, mask=y)
        x32 = augmented['image']
        y32 = augmented['mask']

        aug = RandomFog(p=1)
        augmented = aug(image=x, mask=y)
        x33 = augmented['image']
        y33 = augmented['mask']

        aug = RandomSunFlare(p=1)
        augmented = aug(image=x, mask=y)
        x34 = augmented['image']
        y34 = augmented['mask']

        aug = RandomShadow(p=1)
        augmented = aug(image=x, mask=y)
        x35 = augmented['image']
        y35 = augmented['mask']

        aug = Lambda(p=1)
        augmented = aug(image=x, mask=y)
        x36 = augmented['image']
        y36 = augmented['mask']

        aug = ChannelDropout(p=1)
        augmented = aug(image=x, mask=y)
        x37 = augmented['image']
        y37 = augmented['mask']

        aug = ISONoise(p=1)
        augmented = aug(image=x, mask=y)
        x38 = augmented['image']
        y38 = augmented['mask']

        aug = Solarize(p=1)
        augmented = aug(image=x, mask=y)
        x39 = augmented['image']
        y39 = augmented['mask']

        aug = Equalize(p=1)
        augmented = aug(image=x, mask=y)
        x40 = augmented['image']
        y40 = augmented['mask']

        aug = Posterize(p=1)
        augmented = aug(image=x, mask=y)
        x41 = augmented['image']
        y41 = augmented['mask']

        aug = Downscale(p=1)
        augmented = aug(image=x, mask=y)
        x42 = augmented['image']
        y42 = augmented['mask']

        aug = MultiplicativeNoise(p=1)
        augmented = aug(image=x, mask=y)
        x43 = augmented['image']
        y43 = augmented['mask']

        aug = FancyPCA(p=1)
        augmented = aug(image=x, mask=y)
        x44 = augmented['image']
        y44 = augmented['mask']

        #       aug = MaskDropout(p=1)
        #       augmented = aug(image=x, mask=y)
        #       x45 = augmented['image']
        #       y45 = augmented['mask']

        aug = GridDropout(p=1)
        augmented = aug(image=x, mask=y)
        x46 = augmented['image']
        y46 = augmented['mask']

        aug = ColorJitter(p=1)
        augmented = aug(image=x, mask=y)
        x47 = augmented['image']
        y47 = augmented['mask']

        ## ElasticTransform
        aug = ElasticTransform(p=1,
                               alpha=120,
                               sigma=512 * 0.05,
                               alpha_affine=512 * 0.03)
        augmented = aug(image=x, mask=y)
        x50 = augmented['image']
        y50 = augmented['mask']

        aug = CropNonEmptyMaskIfExists(p=1, height=22, width=32)
        augmented = aug(image=x, mask=y)
        x51 = augmented['image']
        y51 = augmented['mask']

        aug = IAAAffine(p=1)
        augmented = aug(image=x, mask=y)
        x52 = augmented['image']
        y52 = augmented['mask']

        #        aug = IAACropAndPad(p=1)
        #        augmented = aug(image=x, mask=y)
        #        x53 = augmented['image']
        #        y53 = augmented['mask']

        aug = IAAFliplr(p=1)
        augmented = aug(image=x, mask=y)
        x54 = augmented['image']
        y54 = augmented['mask']

        aug = IAAFlipud(p=1)
        augmented = aug(image=x, mask=y)
        x55 = augmented['image']
        y55 = augmented['mask']

        aug = IAAPerspective(p=1)
        augmented = aug(image=x, mask=y)
        x56 = augmented['image']
        y56 = augmented['mask']

        aug = IAAPiecewiseAffine(p=1)
        augmented = aug(image=x, mask=y)
        x57 = augmented['image']
        y57 = augmented['mask']

        aug = LongestMaxSize(p=1)
        augmented = aug(image=x, mask=y)
        x58 = augmented['image']
        y58 = augmented['mask']

        aug = NoOp(p=1)
        augmented = aug(image=x, mask=y)
        x59 = augmented['image']
        y59 = augmented['mask']

        #       aug = RandomCrop(p=1, height=22, width=22)
        #       augmented = aug(image=x, mask=y)
        #       x61 = augmented['image']
        #       y61 = augmented['mask']

        #      aug = RandomResizedCrop(p=1, height=22, width=20)
        #      augmented = aug(image=x, mask=y)
        #      x63 = augmented['image']
        #      y63 = augmented['mask']

        aug = RandomScale(p=1)
        augmented = aug(image=x, mask=y)
        x64 = augmented['image']
        y64 = augmented['mask']

        #      aug = RandomSizedCrop(p=1, height=22, width=20, min_max_height = [32,32])
        #      augmented = aug(image=x, mask=y)
        #      x66 = augmented['image']
        #      y66 = augmented['mask']

        #      aug = Resize(p=1, height=22, width=20)
        #      augmented = aug(image=x, mask=y)
        #      x67 = augmented['image']
        #      y67 = augmented['mask']

        aug = Rotate(p=1)
        augmented = aug(image=x, mask=y)
        x68 = augmented['image']
        y68 = augmented['mask']

        aug = ShiftScaleRotate(p=1)
        augmented = aug(image=x, mask=y)
        x69 = augmented['image']
        y69 = augmented['mask']

        aug = SmallestMaxSize(p=1)
        augmented = aug(image=x, mask=y)
        x70 = augmented['image']
        y70 = augmented['mask']

        images_aug.extend([
            x, x0, x2, x3, x5, x6, x7, x8, x9, x10, x12, x13, x14, x16, x17,
            x18, x19, x20, x21, x22, x23, x24, x25, x26, x29, x30, x31, x32,
            x33, x34, x35, x36, x37, x38, x39, x40, x41, x42, x43, x44, x46,
            x47, x50, x51, x52, x54, x55, x56, x57, x58, x59, x64, x68, x69,
            x70
        ])

        masks_aug.extend([
            y, y0, y2, y3, y5, y6, y7, y8, y9, y10, y12, y13, y14, y16, y17,
            y18, y19, y20, y21, y22, y23, y24, y25, y26, y29, y30, y31, y32,
            y33, y34, y35, y36, y37, y38, y39, y40, y41, y42, y43, y44, y46,
            y47, y50, y51, y52, y54, y55, y56, y57, y58, y59, y64, y68, y69,
            y70
        ])

        idx = -1
        images_name = []
        masks_name = []
        for i, m in zip(images_aug, masks_aug):
            if idx == -1:
                tmp_image_name = f"{image_name}"
                tmp_mask_name = f"{mask_name}"
            else:
                tmp_image_name = f"{image_name}_{smalllist[idx]}"
                tmp_mask_name = f"{mask_name}_{smalllist[idx]}"
            images_name.extend(tmp_image_name)
            masks_name.extend(tmp_mask_name)
            idx += 1

        it = iter(images_name)
        it2 = iter(images_aug)
        imagedict = dict(zip(it, it2))

        it = iter(masks_name)
        it2 = iter(masks_aug)
        masksdict = dict(zip(it, it2))

    return imagedict, masksdict
train_df = train_df.append(holdout)
print('train_df.shape:', train_df.shape)

nunique = list(train_df.nunique())[1:-2]
print('nunique:', nunique)

# --- IMAGE DATABUNCH ---
if not config['FINETUNE']:
    train_tf = Compose([
        OneOf([
            ShiftScaleRotate(shift_limit=0.0625,
                             scale_limit=0.1,
                             rotate_limit=15,
                             always_apply=True),
            IAAPerspective(always_apply=True),
            IAAPiecewiseAffine(always_apply=True)
        ],
              p=0.5)
    ])

    def new_open_image(fn: PathOrStr,
                       div: bool = True,
                       convert_mode: str = 'L',
                       after_open: Callable = None,
                       transforms=True) -> Image:
        "Return `Image` object created from image in file `fn`."
        with warnings.catch_warnings():
            warnings.simplefilter("ignore",
                                  UserWarning)  # EXIF warning from TiffPlugin
            x = PIL.Image.open(fn).convert(convert_mode)
Example #24
0
def compose_augmentations(img_height,
                          img_width,
                          flip_p=0.5,
                          translate_p=0.5,
                          distort_p=0.5,
                          color_p=0.5,
                          overlays_p=0.15,
                          blur_p=0.25,
                          noise_p=0.25):
    # Resize
    resize_p = 1 if img_height != 1024 else 0

    # Random sized crop
    if img_height == 1024:
        min_max_height = (896, 960)
    elif img_height == 512:
        min_max_height = (448, 480)
    elif img_height == 256:
        min_max_height = (224, 240)
    else:
        raise NotImplementedError

    return Compose([
        Resize(p=resize_p, height=img_height, width=img_width),
        OneOf([
            HorizontalFlip(p=0.5),
            VerticalFlip(p=0.5),
            Transpose(p=0.5),
            RandomRotate90(p=0.5),
        ],
              p=flip_p),
        OneOf([
            Rotate(p=0.25, limit=10),
            RandomSizedCrop(p=0.5,
                            min_max_height=min_max_height,
                            height=img_height,
                            width=img_width),
            OneOrOther(IAAAffine(p=0.1, translate_percent=0.05),
                       IAAPerspective(p=0.1)),
        ],
              p=translate_p),
        OneOf([
            ElasticTransform(p=0.5,
                             alpha=10,
                             sigma=img_height * 0.05,
                             alpha_affine=img_height * 0.03,
                             approximate=True),
            GridDistortion(p=0.5),
            OpticalDistortion(p=0.5),
            IAAPiecewiseAffine(p=0.25, scale=(0.01, 0.03)),
        ],
              p=distort_p),
        OneOrOther(
            OneOf([
                CLAHE(p=0.5),
                RandomGamma(p=0.5),
                RandomContrast(p=0.5),
                RandomBrightness(p=0.5),
                RandomBrightnessContrast(p=0.5),
            ],
                  p=color_p),
            OneOf([IAAEmboss(p=0.1),
                   IAASharpen(p=0.1),
                   IAASuperpixels(p=0)],
                  p=overlays_p)),
        OneOrOther(
            OneOf([
                Blur(p=0.2),
                MedianBlur(p=0.1),
                MotionBlur(p=0.1),
                GaussianBlur(p=0.1),
            ],
                  p=blur_p),
            OneOf([GaussNoise(p=0.2),
                   IAAAdditiveGaussianNoise(p=0.1)],
                  p=noise_p)),
        Normalize(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225)),
        ToTensor(sigmoid=False),
    ])
Example #25
0
def do_augmentation(dataset_dir, output_dir, file_ext, strAugs):
    print("hi")

    for indDataset in dataset_dir:
        files_list = os.listdir(indDataset)
        imagesList = filterImages(files_list, file_ext)

        for augstr in strAugs:
            for image_name in imagesList:
                try:
                    base_name = os.path.splitext(image_name)[0]
                    image = cv2.imread(indDataset + image_name)
                    image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)

                    tree = ET.parse(indDataset + base_name + ".xml")
                    root = tree.getroot()

                    bbox = []
                    label = []
                    idx = 0
                    xminxml = []
                    yminxml = []
                    xmaxxml = []
                    ymaxxml = [
                    ]  # save to a new array to prevent out of order access

                    for xmin in root.iter('xmin'):
                        bbox.append([int(float(xmin.text))])
                        xminxml.append(xmin)

                    for ymin in root.iter('ymin'):
                        bbox[idx].append(int(float(ymin.text)))
                        idx += 1
                        yminxml.append(ymin)

                    idx = 0
                    for xmax in root.iter('xmax'):
                        bbox[idx].append(int(float(xmax.text)))
                        idx += 1
                        xmaxxml.append(xmax)

                    idx = 0
                    for ymax in root.iter('ymax'):
                        bbox[idx].append(int(float(ymax.text)))
                        idx += 1
                        ymaxxml.append(ymax)

                    idx = 0
                    for name in root.iter('name'):
                        label.append(name.text)
                        idx += 1

                    height, width, channels = image.shape
                    annotations = {
                        'image': image.copy(),
                        'bboxes': bbox,
                        'category_id': label
                    }
                    # category_id_to_name = {'car': 'car', 'tree': 'tree', 'house': 'house', 'pool':'pool'}
                    # category_id_to_name = {'car': 'car', 'truck': 'truck', 'bike': 'bike'}
                    category_id_to_name = {'cigarette': 'cigarette'}
                    # classNames = {1: 'parking sign',
                    #               2: 'stop sign',
                    #               3: 'tunnel sign',}
                    # visualize(annotations, category_id_to_name)
                    # plt.show()
                    ###augment the image
                    ###random crop
                    aug = get_aug([
                        HorizontalFlip(p=0.5),
                        # RandomSizedBBoxSafeCrop(height=300, width=300, p=0.3),
                        RandomBrightnessContrast(brightness_limit=0.1,
                                                 contrast_limit=0.1,
                                                 p=0.3),
                        # RandomRain(blur_value=2, p=0.5),
                        RandomSunFlare(p=0.3, src_radius=50),
                        # Cutout(max_h_size=20, max_w_size=20, p=0.4),
                        IAAPerspective(scale=(0.1, 0.1), p=0.4),
                        # ShiftScaleRotate(scale_limit=0.2, border_mode=cv2.BORDER_CONSTANT, p=1.0),
                        # RandomScale(p=0.3),
                        # Rotate(p=0.3, border_mode=cv2.BORDER_CONSTANT, limit=30),
                        # RandomGamma(p=1.0),
                        IAAPiecewiseAffine(scale=(0.01, 0.01), p=0.4)
                    ])
                    #
                    # aug = get_aug([HorizontalFlip(p=1)])
                    augmented = aug(**annotations)
                    # print(augmented)
                    # visualize(augmented, category_id_to_name)
                    # plt.show()
                    # print(augmented)
                    if augmented["bboxes"] == []:
                        continue  # if it could not generate labels
                    print(len(augmented["bboxes"]))
                    idx = 0
                    for xmin in xminxml:
                        xmin.text = str(int(augmented['bboxes'][idx][0]))
                        idx += 1

                    idx = 0
                    for ymin in yminxml:
                        ymin.text = str(int(augmented['bboxes'][idx][1]))
                        idx += 1

                    idx = 0
                    for xmax in xmaxxml:
                        xmax.text = str(int(augmented['bboxes'][idx][2]))
                        idx += 1

                    idx = 0
                    for ymax in ymaxxml:
                        ymax.text = str(int(augmented['bboxes'][idx][3]))
                        idx += 1

                    for fileName in root.iter('filename'):
                        fileName.text = base_name + augstr + file_ext
                    # plt.show()

                    # write to file
                    tree.write(output_dir + base_name + augstr + ".xml")
                    image = cv2.cvtColor(augmented['image'], cv2.COLOR_RGB2BGR)
                    cv2.imwrite(output_dir + base_name + augstr + file_ext,
                                image)
                    print("saved image ", base_name + augstr + file_ext)

                except Exception as e:
                    print(e)
                    print("Exception: ", augmented)
                    # cv2.imshow("augmented", augmented["image"])
                    # cv2.waitKey(0)
    #
    ##Create validation file###
    # create_validation_file(output_dir)
    # print("Created Validation Files")
    #
    print("copying original")
    ###Copy original into source directory
    for folder in dataset_dir:
        filenames = os.listdir(folder)
        print(filenames)
        for name in filenames:
            print("in: ", folder + name)
            print("out: ", output_dir + name)
            print("name:", name)
            shutil.copy(folder + name, output_dir + name)
            print("moved: " + name)
    print("Done")
import numpy as np
import cv2
from matplotlib import pyplot as plt
from albumentations import (HorizontalFlip, IAAPerspective, ShiftScaleRotate, CLAHE, RandomRotate90,
                            Transpose, Blur, OpticalDistortion, GridDistortion, HueSaturationValue,
                            IAAAdditiveGaussianNoise, GaussNoise, MotionBlur, MedianBlur, IAAPiecewiseAffine,
                            IAASharpen, IAAEmboss, RandomBrightnessContrast, Flip, OneOf, Compose)

image = cv2.imread("test.jpg", 1)  # BGR
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
aug = HorizontalFlip(p=1)
img_horizontalflip = aug(image=image)['image']
aug = IAAPerspective(p=1)
img_IAAPerspective = aug(image=image)['image']
aug = ShiftScaleRotate(p=1)
img_ShiftScaleRotate = aug(image=image)['image']

aug = GaussNoise(var_limit=10, p=1)
img_gaussNoise = aug(image=image)['image']


def strong_aug(p=.5):
    return Compose([
        RandomRotate90(),
        Flip(),
        Transpose(),
        OneOf([
            IAAAdditiveGaussianNoise(),
            GaussNoise(),
        ], p=.2),
        OneOf([
Example #27
0
def apply_image_augmentation(augmentator, image):
    augmented_image = augmentator(image=image)["image"]
    return augmented_image


def horizontal_stack_images(image_1, image_2):
    return np.hstack((image_1, image_2))


if __name__ == "__main__":
    print("[INFO] Augmentation example, press 'q' to finish the example")
    # Instancia o objeto de augmentação
    augmentator = Compose([
        Rotate(15),
        Blur(5),
        IAAPerspective(scale=(0.025, 0.05), p=0.8),
        RandomBrightnessContrast(brightness_limit=0.35, contrast_limit=0.35)
    ],
                          p=0.7)

    dataset, (characters_images,
              characters_labels) = read_hdf5_dataset("characters_dataset.hdf5")

    for idx, image in enumerate(characters_images):
        augmented_image = apply_image_augmentation(augmentator, image)
        cv2.imshow("Character x Augmented character",
                   horizontal_stack_images(image, augmented_image))
        print("[INFO] Label => ", characters_labels[idx])

        key = cv2.waitKey(0) & 0xff
        if key == ord('q'):
# %%
# Image Compression
aug = ImageCompression(quality_lower=50, quality_upper=50)
image8 = (image * 256).astype("uint8")
augmented = aug(image=image8, mask=mask)

image_scaled = augmented["image"]
mask_scaled = augmented["mask"]

visualize(image_scaled, mask_scaled, original_image=image8, original_mask=mask)


# %%
# IAAPerspective
aug = IAAPerspective()
image8 = (image * 256).astype("uint8")
mask8 = mask.astype("uint8")
augmented = aug(image=image8, mask=mask8)

image_scaled = augmented["image"]
mask_scaled = augmented["mask"]

visualize(
    image_scaled, mask_scaled, original_image=image8, original_mask=mask8
)


# %%
# MultiplicativeNoise
aug = MultiplicativeNoise(multiplier=(0.8, 1.2))
Example #29
0
def perspective(image, mask):
    aug = IAAPerspective(p=1.)
    augmented = aug(image=image, mask=mask)
    return augmented['image'], augmented['mask']
Example #30
0
 def IAAP(self, image, scale=0.2, p=1):
     image = image.astype(np.uint8)
     aug = IAAPerspective(scale=scale, p=p)
     output = aug(image=image)['image']
     return output.astype(np.float32)