Example #1
0
def hard_transform(image_size: int = 256, p: float = 0.5, **kwargs):
    """Hard augmentations (on training)"""
    _add_transform_default_params(kwargs)

    transforms = Compose([
        ShiftScaleRotate(
            shift_limit=0.1,
            scale_limit=0.1,
            rotate_limit=15,
            border_mode=cv2.BORDER_REFLECT,
            p=p,
        ),
        IAAPerspective(scale=(0.02, 0.05), p=p),
        OneOf([
            HueSaturationValue(p=p),
            ToGray(p=p),
            RGBShift(p=p),
            ChannelShuffle(p=p),
        ]),
        RandomBrightnessContrast(brightness_limit=0.5, contrast_limit=0.5,
                                 p=p),
        RandomGamma(p=p),
        CLAHE(p=p),
        JpegCompression(quality_lower=50, p=p),
        PadIfNeeded(image_size, image_size, border_mode=cv2.BORDER_CONSTANT),
    ], **kwargs)
    return transforms
Example #2
0
    def train_aug(self, image, label):
        aug = Compose(
            [
                OneOf(
                    [CLAHE(), IAASharpen(), IAAEmboss()], p=0.5),
                # OneOf([IAAAdditiveGaussianNoise(), GaussNoise()], p=0.2),
                # OneOf([MotionBlur(p=0.2), MedianBlur(blur_limit=3, p=0.1), Blur(blur_limit=3, p=0.1)], p=0.2),
                RandomContrast(),
                RandomBrightness(),
                # ChannelShuffle(),
                RandomRotate90(),
                Flip(),
                # RandomScale(scale_limit=(0.0, 0.1)),
                OneOf([
                    ElasticTransform(),
                    OpticalDistortion(),
                    GridDistortion(),
                    IAAPiecewiseAffine()
                ],
                      p=0.5),
                # HueSaturationValue(p=0.3),
            ],
            p=0.9)
        augmented = aug(image=image, mask=label)
        augmented = ToGray(p=1)(image=augmented['image'],
                                mask=augmented['mask'])
        augmented = RandomCrop(256, 256)(image=augmented['image'],
                                         mask=augmented['mask'])
        image, label = augmented['image'], augmented['mask']

        return image, label
    def init_augmentations(self):
        common = [
            HorizontalFlip(),
            Rotate(limit=10),
            RandomBrightnessContrast(),
            ToGray(p=0.05)
        ]

        random_crop_aug = [
            RandomResizedCrop(height=self.params.input_height,
                              width=self.params.input_width,
                              scale=(0.35, 1.0))
        ]
        random_crop_aug.extend(common)

        simple_resize_aug = [
            Resize(height=self.params.input_height,
                   width=self.params.input_width)
        ]
        simple_resize_aug.extend(common)

        crop = self.get_aug(random_crop_aug, min_visibility=0.5)

        resize = self.get_aug(simple_resize_aug, min_visibility=0.5)

        just_resize = self.get_aug([
            Resize(height=self.params.input_height,
                   width=self.params.input_width)
        ])

        self.crop_aug = crop
        self.resize_aug = resize
        self.just_resize = just_resize
Example #4
0
def hard_transform(image_size: int = 256, p: float = 0.5):
    """Hard augmentations"""
    transforms = Compose([
        ShiftScaleRotate(
            shift_limit=0.1,
            scale_limit=0.1,
            rotate_limit=15,
            border_mode=cv2.BORDER_REFLECT,
            p=p,
        ),
        IAAPerspective(scale=(0.02, 0.05), p=p),
        OneOf([
            HueSaturationValue(p=p),
            ToGray(p=p),
            RGBShift(p=p),
            ChannelShuffle(p=p),
        ]),
        RandomBrightnessContrast(
            brightness_limit=0.5, contrast_limit=0.5, p=p
        ),
        RandomGamma(p=p),
        CLAHE(p=p),
        JpegCompression(quality_lower=50, p=p),
    ])
    return transforms
Example #5
0
 def apply(self, img, **params):
     thr, _ = cv2.threshold(cv2.cvtColor(img, cv2.COLOR_BGR2GRAY), 0, 255, cv2.THRESH_OTSU + cv2.THRESH_BINARY)
     self.solarize = Solarize(thr, p=1, always_apply=True)
     img = ToGray(p=1)(image=img, force_apply=True)["image"]
     img = self.solarize(image=img, force_apply=True)["image"]
     img = cv2.medianBlur(img, 3)
     return img
def create_train_transforms(size=300):
    return Compose([
        ImageCompression(quality_lower=60, quality_upper=100, p=0.5),
        GaussNoise(p=0.1),
        GaussianBlur(blur_limit=3, p=0.05),
        HorizontalFlip(),
        OneOf([
            IsotropicResize(max_side=size,
                            interpolation_down=cv2.INTER_AREA,
                            interpolation_up=cv2.INTER_CUBIC),
            IsotropicResize(max_side=size,
                            interpolation_down=cv2.INTER_AREA,
                            interpolation_up=cv2.INTER_LINEAR),
            IsotropicResize(max_side=size,
                            interpolation_down=cv2.INTER_LINEAR,
                            interpolation_up=cv2.INTER_LINEAR),
        ],
              p=1),
        PadIfNeeded(min_height=size,
                    min_width=size,
                    border_mode=cv2.BORDER_CONSTANT),
        OneOf([RandomBrightnessContrast(),
               FancyPCA(),
               HueSaturationValue()],
              p=0.7),
        ToGray(p=0.2),
        ShiftScaleRotate(shift_limit=0.1,
                         scale_limit=0.2,
                         rotate_limit=10,
                         border_mode=cv2.BORDER_CONSTANT,
                         p=0.5),
    ])
def hard_transform(image_size=224, p=0.5):
    transforms = [
        Cutout(
            num_holes=4,
            max_w_size=image_size // 4,
            max_h_size=image_size // 4,
            p=p
        ),
        ShiftScaleRotate(
            shift_limit=0.1,
            scale_limit=0.1,
            rotate_limit=15,
            border_mode=cv2.BORDER_REFLECT,
            p=p
        ),
        IAAPerspective(scale=(0.02, 0.05), p=p),
        OneOf(
            [
                HueSaturationValue(p=p),
                ToGray(p=p),
                RGBShift(p=p),
                ChannelShuffle(p=p),
            ]
        ),
        RandomBrightnessContrast(
            brightness_limit=0.5, contrast_limit=0.5, p=p
        ),
        RandomGamma(p=p),
        CLAHE(p=p),
        JpegCompression(quality_lower=50, p=p),
    ]
    transforms = Compose(transforms)
    return transforms
def strong_aug(p=.5):
    return Compose([
        HorizontalFlip(p=0.5),
        ToGray(p=0.1),
        OneOf([
            IAAAdditiveGaussianNoise(),
            GaussNoise(),
        ], p=0.4),
        OneOf([
            MotionBlur(p=.2),
            MedianBlur(blur_limit=3, p=.1),
            Blur(blur_limit=3, p=.1),
        ],
              p=0.2),
        OneOf([
            OpticalDistortion(p=0.3),
            GridDistortion(p=.1),
            IAAPiecewiseAffine(p=0.3),
        ],
              p=0.2),
        OneOf([
            CLAHE(clip_limit=2),
            IAASharpen(),
            RandomContrast(),
            RandomBrightness(),
        ],
              p=0.3),
        HueSaturationValue(p=0.3),
    ],
                   p=p)
def generate_aug_images_gray_scale(path=PATH_TO_IMAGES):
    """
    Generates augmented images in gray scale of a specific folder
    Augmentations
        - Convert the input RGB image to grayscale
    """
    log.info('Generating augmentation in gray scale of the images...')
    log.info('Path of the images: ' + PATH_TO_IMAGES)
    for i, pig_name in enumerate(os.listdir(path)):
        img_path = os.path.join(path, pig_name)
        image_names = glob.glob(os.path.join(img_path, 'DSC*'))
        for image_name in image_names:
            image_name = os.path.basename(image_name)
            img_orig = cv2.imread(os.path.join(img_path, image_name))
            img_orig = cv2.cvtColor(img_orig, cv2.COLOR_BGR2RGB)
            aug = ToGray(p=0.5)
            pig_img_aug3 = aug.apply(img_orig)
            save_aug_image(image_name, img_path, pig_img_aug3, 'GS-')
        log.info('Augmentation in process GS: ' + str(i))
    log.info('Augmentation process is finished')
Example #10
0
def generate_aug_images(path=PATH_TO_IMAGES):
    """
    Generates augmented images of a specific folder
    Augmentations:
        - Randomly change brightness and contrast of the input image
        - Apply Contrast Limited Adaptive Histogram Equalization to the input image
        - Convert the input RGB image to grayscale
        - Blur the input image using a random-sized kernel
        - Simulates fog for the image
        - Randomly change hue, saturation and value of the input image
    """
    log.info('Generating augmentation of the images...')
    log.info('Path of the images: ' + PATH_TO_IMAGES)
    for i, pig_name in enumerate(os.listdir(path)):
        img_path = os.path.join(path, pig_name)
        image_names = glob.glob(os.path.join(img_path, 'DSC*'))
        for image_name in image_names:
            image_name = os.path.basename(image_name)
            img_orig = cv2.imread(os.path.join(img_path, image_name))
            img_orig = cv2.cvtColor(img_orig, cv2.COLOR_BGR2RGB)

            alpha = 1.2
            aug = RandomBrightnessContrast(p=1)
            pig_img_aug1 = aug.apply(img_orig, alpha=alpha)
            save_aug_image(image_name, img_path, pig_img_aug1, 'A1-')

            aug = CLAHE(p=1.0)
            pig_img_aug2 = aug.apply(img_orig)
            save_aug_image(image_name, img_path, pig_img_aug2, 'A2-')

            aug = ToGray(p=0.5)
            pig_img_aug3 = aug.apply(img_orig)
            save_aug_image(image_name, img_path, pig_img_aug3, 'A3-')

            aug = Blur(p=0.5, blur_limit=7)
            pig_img_aug4 = aug.apply(img_orig)
            save_aug_image(image_name, img_path, pig_img_aug4, 'A4-')

            aug = RandomFog(p=1,
                            fog_coef_lower=0.1,
                            fog_coef_upper=0.1,
                            alpha_coef=0.8)
            pig_img_aug5 = aug.apply(img_orig)
            save_aug_image(image_name, img_path, pig_img_aug5, 'A5-')

            aug = HueSaturationValue(hue_shift_limit=200,
                                     sat_shift_limit=70,
                                     val_shift_limit=27,
                                     p=1)
            pig_img_aug6 = aug.apply(img_orig)
            save_aug_image(image_name, img_path, pig_img_aug6, 'A6-')

        print("augmentation in process A1: " + str(i))
    print('augmentation finished (sharpness)')
Example #11
0
def augument():
    augm = Compose([
        RGBShift(),
        RandomBrightness(),
        RandomContrast(),
        HueSaturationValue(p=0.2),
        ChannelShuffle(),
        CLAHE(),
        Blur(),
        ToGray(),
        CoarseDropout()
    ],
                   p=0.5)
    return augm
Example #12
0
 def get_tensor(self, path, grayscale=False):
     try:
         img = cv2.imread(path)
         _, name = os.path.split(path)
         #             top_left, bottom_right = self.boxes[name]
         #             top_left = (max(0, top_left[0] - 30), max(0, top_left[1] - 30))
         #             bottom_right = (min(img.shape[0], bottom_right[0] + 30), min(img.shape[1], bottom_right[1] + 30))
         img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
         #             img = img[top_left[0]:bottom_right[0], top_left[1]:bottom_right[1]]
         img = self.transform(image=img)['image']
         if grayscale:
             img = ToGray(p=1.)(image=img)['image']
         img_tensor = torch.from_numpy(
             np.transpose(img, (2, 0, 1)).astype('float32'))
     except:
         img = cv2.imread(path)
         img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
         img = self.transform(image=img)['image']
         if grayscale:
             img = ToGray(p=1.)(image=img)['image']
         img_tensor = torch.from_numpy(
             np.transpose(img, (2, 0, 1)).astype('float32'))
     return img_tensor
Example #13
0
def create_train_transforms(size=300):
    # defining an augmentation pipeline
    # this will return a transform function that will perform image augmentation.
    return Compose([
        # Decrease Jpeg, WebP compression of an image
        # with the quality_lower parameter as the lower bound on the image quality
        # and the quality_upper as the upper bound on the image quality
        ImageCompression(quality_lower=60, quality_upper=100, p=0.5),
        # used to apply Gaussian noise to the input picture
        # with p as the probability of applying the transform
        GaussNoise(p=0.1),
        # used to blur the input image using a Gaussian filter with a random kernel size
        # with the blur_limit as the maximum Gaussian kernel size for blurring the input image
        GaussianBlur(blur_limit=3, p=0.05),
        # flips the input image horizontally around the y-axis
        HorizontalFlip(),
        # Select one of transforms to apply
        OneOf([
            IsotropicResize(max_side=size,
                            interpolation_down=cv2.INTER_AREA,
                            interpolation_up=cv2.INTER_CUBIC),
            IsotropicResize(max_side=size,
                            interpolation_down=cv2.INTER_AREA,
                            interpolation_up=cv2.INTER_LINEAR),
            IsotropicResize(max_side=size,
                            interpolation_down=cv2.INTER_LINEAR,
                            interpolation_up=cv2.INTER_LINEAR),
        ],
              p=1),
        # Pad side of the image / max if side is less than desired number
        PadIfNeeded(min_height=size,
                    min_width=size,
                    border_mode=cv2.BORDER_CONSTANT),
        # Select one of the following transforms to apply:
        # RandomBrightnessContrast: used to randomly change brightness and contrast of the input image
        # FancyPCA: Augment RGB image using FancyPCA
        # HueSaturationValue: Randomly change hue, saturation and value of the input image
        OneOf([RandomBrightnessContrast(),
               FancyPCA(),
               HueSaturationValue()],
              p=0.7),
        # this converts the input RGB image to grayscale. If the mean pixel value for the resulting image is greater than 127, invert the resulting grayscale image.
        ToGray(p=0.2),
        # this randomly apply affine transforms: translate, scale and rotate the input.
        ShiftScaleRotate(shift_limit=0.1,
                         scale_limit=0.2,
                         rotate_limit=10,
                         border_mode=cv2.BORDER_CONSTANT,
                         p=0.5),
    ])
Example #14
0
 def __init__(self, root_path):
     self.folder_name = [
         name for name in os.listdir(root_path)
         if os.path.isdir(os.path.join(root_path, name))
     ]  #os.listdir(root_path)[:-1]
     self.root = root_path
     #self.image_paths = list(Path(self.root).rglob('*.jpg'))
     self.json_paths = os.path.join(root_path, 'metadata.json')  # 1
     with open(self.json_paths) as json_file:
         self.json_data = json.load(json_file)
     self.transform = Compose([
         Resize(size, size),
         ImageCompression(quality_lower=60, quality_upper=100, p=0.5),
         GaussNoise(p=0.1),
         GaussianBlur(blur_limit=3, p=0.05),
         HorizontalFlip(p=0.5),
         OneOf([
             IsotropicResize(max_side=size,
                             interpolation_down=cv2.INTER_AREA,
                             interpolation_up=cv2.INTER_CUBIC),
             IsotropicResize(max_side=size,
                             interpolation_down=cv2.INTER_AREA,
                             interpolation_up=cv2.INTER_LINEAR),
             IsotropicResize(max_side=size,
                             interpolation_down=cv2.INTER_LINEAR,
                             interpolation_up=cv2.INTER_LINEAR),
         ],
               p=0.7),
         PadIfNeeded(min_height=size,
                     min_width=size,
                     border_mode=cv2.BORDER_CONSTANT),
         OneOf(
             [RandomBrightnessContrast(),
              FancyPCA(),
              HueSaturationValue()],
             p=0.7),
         ToGray(p=0.1),
         ShiftScaleRotate(shift_limit=0.1,
                          scale_limit=0.2,
                          rotate_limit=10,
                          border_mode=cv2.BORDER_CONSTANT,
                          p=0.5),
     ])
     self.normalize = {
         "mean": [0.485, 0.456, 0.406],
         "std": [0.229, 0.224, 0.225]
     }
     #self.len = len(self.image_paths) #folder len
     self.len = len(self.folder_name)
Example #15
0
    def __init__(
        self,
        root_dir="",
    ):
        self.root_dir = os.path.join(root_dir, "images")
        ann_path = os.path.join(root_dir, "label.json")

        assert os.path.exists(ann_path), os.listdir(os.path.dirname(ann_path))
        super(TableBank, self).__init__(root=self.root_dir, annFile=ann_path)

        self.albumentation_transforms = Compose([
            OneOf([
                ImageCompression(quality_lower=5, quality_upper=100, p=1.),
                Blur(blur_limit=(3, 5), p=1.),
                GaussNoise(
                    var_limit=(10.0, 151.0), mean=0, always_apply=False, p=1.),
            ],
                  p=0.5),
            IAAAdditiveGaussianNoise(loc=0,
                                     scale=(10.55, 50.75),
                                     per_channel=False,
                                     always_apply=False,
                                     p=0.2),
            OneOf([
                RGBShift(r_shift_limit=105,
                         g_shift_limit=45,
                         b_shift_limit=40,
                         p=1.),
                ToGray(p=0.8),
                RandomBrightnessContrast(brightness_limit=0.2,
                                         contrast_limit=0.2,
                                         brightness_by_max=False,
                                         p=.3),
            ],
                  p=0.5),
            ToTensor()
        ])

        self.h_flip = RandomHorizontalFlip(0.5)

        self.new_ids = []

        for img_id in self.ids:
            path = self.coco.loadImgs(img_id)[0]["file_name"]
            # print(path)
            if os.path.exists(os.path.join(self.root_dir, path)):
                self.new_ids.append(img_id)
Example #16
0
def strong_aug(p=0.6):
    return Compose([
        RandomShadow(shadow_roi=(0, 0, 1, 1), p=0.75),
        OneOf([MotionBlur(), GaussianBlur()]),
        OneOf([ToGray(), ToSepia()]),
        OneOf([
            InvertImg(),
            RandomBrightnessContrast(brightness_limit=0.75, p=0.75),
            RandomGamma(),
            HueSaturationValue()
        ],
              p=0.75)
    ],
                   bbox_params=BboxParams("pascal_voc",
                                          label_fields=["category_id"],
                                          min_area=0.0,
                                          min_visibility=0.0),
                   p=p)
Example #17
0
def get_strong_augmentations(width, height):
    # TODO maybe consider more augmentations and play with params
    return [
        HorizontalFlip(p=1),
        VerticalFlip(p=1),
        RandomRotate90(p=1),
        Transpose(p=1),
        ToGray(p=1),
        ShiftScaleRotate(p=1),
        RandomCrop(height, width, p=1),
        CenterCrop(height, width, p=1),
        RandomSizedCrop((height - 6, height - 2), height, width, p=1),
        RandomContrast(p=1),
        RandomBrightness(p=1),
        RandomGamma(p=1),
        CLAHE(p=1),
        Blur(blur_limit=3, p=1),
        GaussNoise(p=1)
    ]
def create_train_transforms(size=300):
    return Compose([
        ImageCompression(quality_lower=60, quality_upper=100, p=0.5),
        OneOf([
            IsotropicResize(max_side=size,
                            interpolation_down=cv2.INTER_AREA,
                            interpolation_up=cv2.INTER_CUBIC),
            IsotropicResize(max_side=size,
                            interpolation_down=cv2.INTER_AREA,
                            interpolation_up=cv2.INTER_LINEAR),
            IsotropicResize(max_side=size,
                            interpolation_down=cv2.INTER_LINEAR,
                            interpolation_up=cv2.INTER_LINEAR),
        ],
              p=1),
        PadIfNeeded(min_height=size,
                    min_width=size,
                    border_mode=cv2.BORDER_CONSTANT),
        ToGray(p=0.2)
    ])
Example #19
0
def hard_transform():
    transforms = [
        ShiftScaleRotate(shift_limit=0.1,
                         scale_limit=0.1,
                         rotate_limit=15,
                         border_mode=cv2.BORDER_REFLECT,
                         p=0.5),
        IAAPerspective(scale=(0.02, 0.05), p=0.3),
        RandomBrightnessContrast(brightness_limit=0.2,
                                 contrast_limit=0.2,
                                 p=0.3),
        RandomGamma(gamma_limit=(85, 115), p=0.3),
        HueSaturationValue(p=0.3),
        ChannelShuffle(p=0.5),
        ToGray(p=0.2),
        CLAHE(p=0.3),
        RGBShift(p=0.3),
        JpegCompression(quality_lower=50),
    ]
    transforms = Compose(transforms)
    return transforms
def train_transformations(prob=1.0):
    return Compose([
        PadIfNeeded(min_height=384,
                    min_width=1280,
                    border_mode=cv2.BORDER_CONSTANT,
                    value=(0, 0, 0),
                    always_apply=True),
        OneOf([HorizontalFlip(p=0.5),
               Rotate(limit=20, p=0.3)], p=0.5),
        OneOf([
            ToGray(p=0.3),
            RandomBrightnessContrast(p=0.5),
            CLAHE(p=0.5),
            IAASharpen(p=0.45)
        ],
              p=0.5),
        RandomShadow(p=0.4),
        HueSaturationValue(p=0.3),
        Normalize(always_apply=True)
    ],
                   p=prob)
Example #21
0
def strong_aug(p=1):
    return Compose(
        [
            # RandomRotate90(),
            # Flip(),
            # Transpose(),
            HorizontalFlip(p=0.65),
            OneOf([
                IAAAdditiveGaussianNoise(),
                GaussNoise(),
            ], p=0.6),
            # ElasticTransform(p=1),
            OneOf([
                MotionBlur(p=.4),
                MedianBlur(blur_limit=3, p=.5),
                Blur(blur_limit=3, p=.5),
            ],
                  p=0.5),
            ShiftScaleRotate(
                shift_limit=0.01, scale_limit=0.01, rotate_limit=30, p=.8),
            OneOf(
                [
                    OpticalDistortion(p=0.4),
                    # GridDistortion(p=.1),
                    # IAAPiecewiseAffine(p=0.3),
                ],
                p=0.9),
            OneOf(
                [
                    CLAHE(clip_limit=2),
                    IAASharpen(),
                    # IAAEmboss(),
                    RandomContrast(),
                    RandomBrightness(),
                ],
                p=0.6),
            HueSaturationValue(p=0.3),
            ToGray(p=0.75),
        ],
        p=p)
def get_train_transforms(size=300):
    return Compose([
        ImageCompression(quality_lower=60, quality_upper=100, p=0.5),
        GaussNoise(p=0.1),
        GaussianBlur(blur_limit=3, p=0.05),
        HorizontalFlip(),
        Resize(height=size, width=size),
        PadIfNeeded(min_height=size,
                    min_width=size,
                    border_mode=cv2.BORDER_CONSTANT),
        OneOf([RandomBrightnessContrast(),
               HueSaturationValue()], p=0.5),  # FancyPCA(),
        OneOf([CoarseDropout(), GridDropout()], p=0.2),
        ToGray(p=0.2),
        ShiftScaleRotate(shift_limit=0.1,
                         scale_limit=0.2,
                         rotate_limit=10,
                         border_mode=cv2.BORDER_CONSTANT,
                         p=0.5),
        Normalize(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225)),
        ToTensorV2()
    ])
Example #23
0
def strong_aug(p=0.75):
    return Compose(
        [
            ShiftScaleRotate(scale_limit=0.1, rotate_limit=90),
            Transpose(),
            #IAAAffine(shear=0.1),
            #IAAPerspective(),
            Cutout(num_holes=20, max_h_size=8, max_w_size=8),
            HorizontalFlip(),
            VerticalFlip(),
            GaussNoise(),
            JpegCompression(),
            #RandomShadow(shadow_roi=(0, 0, 1, 1), p=0.75),
            OneOf([MotionBlur(), GaussianBlur()]),
            OneOf([ToGray(), ToSepia()]),
            RandomBrightnessContrast(brightness_limit=0.75, p=0.75)
        ],
        bbox_params=BboxParams("pascal_voc",
                               label_fields=["category_id"],
                               min_area=0.0,
                               min_visibility=0.5),
        p=p)
Example #24
0
def mix_transform2(resize):
    return Compose([
        pre_transform(resize=resize),
        OneOf([
            GaussNoise(p=.9),
            MotionBlur(p=.9),
            MedianBlur(p=.9),
        ], p=.6),
        OneOf([
            RandomContrast(p=.9),
            RandomBrightness(p=.9),
        ], p=.6),
        OneOf([ToGray(p=.9), JpegCompression(p=.9)], p=.6),
        HorizontalFlip(p=.6),
        ShiftScaleRotate(shift_limit=0.0625,
                         scale_limit=0,
                         rotate_limit=10,
                         interpolation=1,
                         border_mode=4,
                         p=0.6),
        post_transform(),
    ])
 def _transform_fn_(one_batch):
     # augmentation
     #w, h = find_new_size(one_batch['image'].shape)
     aug = get_aug([
         Rotate(limit=20, p=0.3),
         ToGray(p=0.3),
         HueSaturationValue(20, 30, 20, p=0.2),
         Resize(1152, 896)
     ])
     if self.train is True:
         augmented = aug(**one_batch)
     else:
         augmented = one_batch
     new_one_batch = {}
     # image
     new_one_batch['image'] = augmented['image'].astype(
         np.float32) / 255.0
     # annotation
     '''
     pad_num = 15 - len(augmented['bboxes'])
     pad = np.ones((pad_num, 4)) * -1
     new_one_batch['bbox'] = np.concatenate([augmented['bboxes'], pad], axis=0)
     new_one_batch['cls'] = np.array(augmented['category_id']+[-1 for _ in range(pad_num)])
     return new_one_batch
     '''
     new_one_batch['annot'] = []
     for bbox, cat in zip(augmented['bboxes'],
                          augmented['category_id']):
         bbox = [int(x) for x in bbox]
         anno = bbox + [cat]
         new_one_batch['annot'].append(anno)
     new_one_batch['annot'] = np.array(new_one_batch['annot'])
     pad_num = 15 - new_one_batch['annot'].shape[0]
     pad = np.ones((pad_num, 5)) * -1
     new_one_batch['annot'] = np.concatenate(
         [new_one_batch['annot'], pad], axis=0)
     return new_one_batch
Example #26
0
def strong_aug(image, p=0.5):
    image2 = Compose(
        [  #加躁
            OneOf([
                IAAAdditiveGaussianNoise(),
                GaussNoise(),
                ISONoise(),
            ],
                  p=0.2),
            OneOf(
                [  #模糊
                    MotionBlur(p=0.1),
                    MedianBlur(blur_limit=3, p=0.1),
                    Blur(blur_limit=3, p=0.1),
                    JpegCompression(p=.1),
                ],
                p=0.2),
            OneOf(
                [  #锐化
                    CLAHE(clip_limit=2),
                    IAASharpen(),
                    IAAEmboss(),
                    RandomBrightnessContrast(),
                ],
                p=0.3),
            OneOf(
                [  #直方图均衡,对比度,色度变化,pca
                    HueSaturationValue(),
                    RandomBrightnessContrast(),
                    Equalize(),
                    # FancyPCA(),
                ],
                p=0.3),
            ToGray(p=0.1),
        ],
        p=p)(image=image)['image']
    return image2
Example #27
0
    def __init__(self,
                 image_fns,
                 gt_boxes=None,
                 label_to_int=None,
                 augment=False,
                 train_image_dir='train_images',
                 test_image_dir='test_images',
                 height=1536,
                 width=1536,
                 feature_scale=0.25):
        self.image_fns = image_fns
        self.gt_boxes = gt_boxes
        self.label_to_int = label_to_int
        self.augment = augment
        self.aug = Compose([
            ShiftScaleRotate(p=0.9,
                             rotate_limit=10,
                             scale_limit=0.2,
                             border_mode=cv2.BORDER_CONSTANT),
            RandomCrop(512, 512, p=1.0),
            ToGray(),
            CLAHE(),
            GaussNoise(),
            GaussianBlur(),
            RandomBrightnessContrast(),
            RandomGamma(),
            RGBShift(),
            HueSaturationValue(),
        ],
                           bbox_params=BboxParams(format='coco',
                                                  min_visibility=0.75))

        self.encoded_cache = None
        self.height = height
        self.width = width
        self.feature_scale = feature_scale
Example #28
0
            OneOf([
                CLAHE(),
                Solarize(),
                RandomBrightness(),
                RandomContrast(limit=0.2),
                RandomBrightnessContrast(),
            ]),
            JpegCompression(quality_lower=20, quality_upper=100),
            RandomShadow(num_shadows_lower=1, num_shadows_upper=2),
            PadIfNeeded(min_height=64,
                        min_width=100,
                        border_mode=cv2.BORDER_CONSTANT,
                        p=0.5),
            Cutout(num_holes=8, max_h_size=16, max_w_size=16),
            InvertImg(p=0.3),
            ToGray()
            #            GridDistortion(num_steps=10, distort_limit = (0, 0.1),  border_mode = cv2.BORDER_CONSTANT)
            #               OneOf([
            #                   GridDistortion(p=0.1),
            #                   IAAPiecewiseAffine(p=0.3),
            #               ], p=1),
        ])

        charset = open('../data/characters', 'r').read()
        dataset = RealImageLMDB('../data/train_lh_xyl.lmdb',
                                transform=tf,
                                transform2=tf2,
                                character=charset)

        data_loader = DataLoader(dataset,
                                 num_workers=2,
Example #29
0
    MedianBlur(blur_limit=(3, 7), p=0.6),
    IAAEmboss(strength=(0.2, 0.99), p=0.4)
],
                  p=0.3)

WeatherTfms = RandomSunFlare(src_radius=80, p=0.1)

NoiseTfms = OneOf(
    [
        GaussNoise(p=0.6),
        IAAAdditiveGaussianNoise(p=0.4),  # stronger
        JpegCompression(quality_lower=25, quality_upper=55, p=0.2)
    ],
    p=0.25)

ColorTonesTfms = OneOf([ToSepia(), ToGray()], p=0.3)

ColorChannelTfms = OneOf(
    [ChannelShuffle(),
     HueSaturationValue(val_shift_limit=5),
     RGBShift()],
    p=0.3)

LightingTfms = OneOf(
    [RandomContrast(p=0.1),
     RandomBrightness(p=0.1),
     CLAHE(p=0.8)], p=0.3)

OtherTfms = FancyPCA(alpha=0.4, p=0.4)

# Cell
Example #30
0

class SlowAugs(str, Enum):
    rotate = "rotate"
    shift_scale_rotate = "shift_scale_rotate"
    shift_hsv = "shift_hsv"
    equalize = "equalize"
    to_gray = "to_gray"
    resize512 = "resize512"
    resize300 = "resize300"
    resize256 = "resize256"
    resize224 = "resize224"
    contrast = "contrast"
    crop = "crop",
    bright = "bright"


SLOW_AUGS_DICT = frozendict(
    rotate=Rotate(always_apply=True),
    shift_scale_rotate=ShiftScaleRotate(always_apply=True),
    shift_hsv=HueSaturationValue(always_apply=True),
    equalize=Equalize(always_apply=True),
    to_gray=ToGray(always_apply=True),
    resize512=Resize(512, 512, always_apply=True),
    resize300=Resize(300, 300, always_apply=True),
    resize256=Resize(256, 256, always_apply=True),
    resiz224=Resize(224, 224, always_apply=True),
    contrast=RandomContrast(always_apply=True),
    crop=RandomCrop(64, 64, always_apply=True),
    bright=RandomBrightness(always_apply=True))