def box_segmentation_aug():
     return Compose([
         OneOf([
             RandomBrightnessContrast(brightness_limit=0.2, p=0.5),
             RandomGamma(gamma_limit=50, p=0.5),
             ChannelShuffle(p=0.5)
         ]),
         OneOf([
             ImageCompression(quality_lower=0, quality_upper=20, p=0.5),
             MultiplicativeNoise(multiplier=(0.3, 0.8),
                                 elementwise=True,
                                 per_channel=True,
                                 p=0.5),
             Blur(blur_limit=(15, 15), p=0.5)
         ]),
         OneOf([
             CenterCrop(height=1000, width=1000, p=0.1),
             RandomGridShuffle(grid=(3, 3), p=0.2),
             CoarseDropout(max_holes=20,
                           max_height=100,
                           max_width=100,
                           fill_value=53,
                           p=0.2)
         ]),
         OneOf([
             GridDistortion(p=0.5, num_steps=2, distort_limit=0.2),
             ElasticTransform(alpha=157, sigma=80, alpha_affine=196, p=0.5),
             OpticalDistortion(distort_limit=0.5, shift_limit=0.5, p=0.5)
         ]),
         OneOf([
             VerticalFlip(p=0.5),
             HorizontalFlip(p=0.5),
             Rotate(limit=44, p=0.5)
         ])
     ])
def create_val_transforms(size=300):
    return Compose([
        ImageCompression(quality_lower=20, quality_upper=20, p=1),
        # IsotropicResize(max_side=size, interpolation_down=cv2.INTER_AREA, interpolation_up=cv2.INTER_CUBIC),
        # PadIfNeeded(min_height=size, min_width=size, border_mode=cv2.BORDER_CONSTANT),
        # transforms.resize(size)
    ])
def create_train_transforms(size=300):
    return Compose([
        ImageCompression(quality_lower=60, quality_upper=100, p=0.5),
        GaussNoise(p=0.1),
        GaussianBlur(blur_limit=3, p=0.05),
        HorizontalFlip(),
        OneOf([
            IsotropicResize(max_side=size,
                            interpolation_down=cv2.INTER_AREA,
                            interpolation_up=cv2.INTER_CUBIC),
            IsotropicResize(max_side=size,
                            interpolation_down=cv2.INTER_AREA,
                            interpolation_up=cv2.INTER_LINEAR),
            IsotropicResize(max_side=size,
                            interpolation_down=cv2.INTER_LINEAR,
                            interpolation_up=cv2.INTER_LINEAR),
        ],
              p=1),
        PadIfNeeded(min_height=size,
                    min_width=size,
                    border_mode=cv2.BORDER_CONSTANT),
        OneOf([RandomBrightnessContrast(),
               FancyPCA(),
               HueSaturationValue()],
              p=0.7),
        ToGray(p=0.2),
        ShiftScaleRotate(shift_limit=0.1,
                         scale_limit=0.2,
                         rotate_limit=10,
                         border_mode=cv2.BORDER_CONSTANT,
                         p=0.5),
    ])
Exemplo n.º 4
0
def transform_v2(config):
    train_transforms = Compose([
        HorizontalFlip(p=0.5),
        ImageCompression(quality_lower=99, quality_upper=100),
        ShiftScaleRotate(shift_limit=0.25,
                         scale_limit=0.25,
                         rotate_limit=10,
                         border_mode=0,
                         p=0.7),
        Resize(config.image_size, config.image_size),
        Cutout(max_h_size=int(config.image_size * 0.6),
               max_w_size=int(config.image_size * 0.6),
               num_holes=1,
               p=0.5),
        Normalize(),
        ToTensor()
    ])

    test_transforms = Compose([
        Resize(config.image_size, config.image_size),
        Normalize(),
        ToTensor()
    ])

    return train_transforms, test_transforms
def blend_original(img):
    img = img.copy()
    h, w = img.shape[:2]
    rect = detector(img)
    if len(rect) == 0:
        return img
    else:
        rect = rect[0]
    sp = predictor(img, rect)
    landmarks = np.array([[p.x, p.y] for p in sp.parts()])
    outline = landmarks[[*range(17), *range(26, 16, -1)]]
    Y, X = skimage.draw.polygon(outline[:, 1], outline[:, 0])
    raw_mask = np.zeros(img.shape[:2], dtype=np.uint8)
    raw_mask[Y, X] = 1
    face = img * np.expand_dims(raw_mask, -1)

    # add warping
    h1 = random.randint(h - h // 2, h + h // 2)
    w1 = random.randint(w - w // 2, w + w // 2)
    while abs(h1 - h) < h // 3 and abs(w1 - w) < w // 3:
        h1 = random.randint(h - h // 2, h + h // 2)
        w1 = random.randint(w - w // 2, w + w // 2)
    face = cv2.resize(face, (w1, h1), interpolation=random.choice([cv2.INTER_LINEAR, cv2.INTER_AREA, cv2.INTER_CUBIC]))
    face = cv2.resize(face, (w, h), interpolation=random.choice([cv2.INTER_LINEAR, cv2.INTER_AREA, cv2.INTER_CUBIC]))

    raw_mask = binary_erosion(raw_mask, iterations=random.randint(4, 10))
    img[raw_mask, :] = face[raw_mask, :]
    if random.random() < 0.2:
        img = OneOf([GaussianBlur(), Blur()], p=0.5)(image=img)["image"]
    # image compression
    if random.random() < 0.5:
        img = ImageCompression(quality_lower=40, quality_upper=95)(image=img)["image"]
    return img
Exemplo n.º 6
0
    def __init__(self, is_train: bool, to_pytorch: bool, preprocess):
        if is_train:
            self._aug = Compose([
                preprocess,
                OneOf([
                    Compose([
                        HorizontalFlip(p=0.5),
                        GaussNoise(p=0.5),
                        OneOf([
                            RandomBrightnessContrast(),
                            RandomGamma(),
                        ],
                              p=0.5),
                        Rotate(limit=20, border_mode=cv2.BORDER_CONSTANT),
                        ImageCompression(),
                        CLAHE(),
                        Downscale(scale_min=0.2, scale_max=0.9, p=0.5),
                        ISONoise(p=0.5),
                        MotionBlur(p=0.5)
                    ]),
                    HorizontalFlip(p=0.5)
                ])
            ],
                                p=1)
        else:
            self._aug = preprocess

        self._need_to_pytorch = to_pytorch
def create_train_transforms(conf):
    height = conf['crop_height']
    width = conf['crop_width']
    return Compose([
        SafeRotate(45, p=0.4, border_mode=cv2.BORDER_CONSTANT),
        OneOf([
            RandomSizedCrop(min_max_height=(int(height * 0.7), int(
                height * 1.3)),
                            w2h_ratio=1.,
                            height=height,
                            width=width,
                            p=0.8),
            RandomCrop(height=height, width=width, p=0.2)
        ],
              p=1),
        HorizontalFlip(),
        VerticalFlip(),
        RandomRotate90(),
        Transpose(),
        ImageCompression(p=0.1),
        Lighting(alphastd=0.3),
        RandomBrightnessContrast(p=0.4),
        RandomGamma(p=0.4),
        OneOf([RGBShift(), HueSaturationValue()], p=0.2)
    ],
                   additional_targets={'image1': 'image'})
Exemplo n.º 8
0
def transform_v3(config):
    train_transforms = Compose([
        ImageCompression(quality_lower=60, quality_upper=100, p=0.5),
        GaussNoise(p=1),
        GaussianBlur(blur_limit=3, p=1),
        HorizontalFlip(),
        Resize(config.image_size, config.image_size),
        OneOf([RandomBrightnessContrast(),
               FancyPCA(),
               HueSaturationValue()],
              p=1),
        ShiftScaleRotate(shift_limit=0.1,
                         scale_limit=0.2,
                         rotate_limit=10,
                         border_mode=cv2.BORDER_CONSTANT,
                         p=1),
        ToTensor()
    ])

    test_transforms = Compose([
        GaussNoise(p=1),
        GaussianBlur(blur_limit=3, p=1),
        Resize(config.image_size, config.image_size),
        OneOf([RandomBrightnessContrast(),
               FancyPCA(),
               HueSaturationValue()],
              p=1),
        ToTensor()
    ])

    return train_transforms, test_transforms
Exemplo n.º 9
0
def hard_transforms(image_size):
    min_holes, max_holes = 1, 2
    size = 30

    return [
        # Random shifts, stretches and turns with a 50% probability
        ShiftScaleRotate(
            shift_limit=0.2,
            scale_limit=0.2,
            rotate_limit=180,
            border_mode=BORDER_CONSTANT,
            p=0.1
        ),

        IAAPerspective(scale=(0.02, 0.05), p=0.1),

        # Random brightness / contrast with a 30% probability
        RandomBrightnessContrast(
            brightness_limit=0.2, contrast_limit=0.2, p=0.1
        ),

        OneOf([
            GaussNoise(var_limit=1.0, p=1.0),
            MultiplicativeNoise(multiplier=(0.9, 1), p=1.0)
        ], p=0.1),

        OneOf([
            GaussianBlur(blur_limit=3, p=1.0),
            Blur(p=1.0),
        ], p=0.1),

        # CoarseDropout(
        #     min_holes=min_holes,
        #     max_holes=max_holes,
        #     # min_height=image_height // 4,
        #     # max_height=image_height // 4,
        #     # min_width=image_width // 4,
        #     # max_width=image_width // 4,
        #     min_height=size,
        #     max_height=size,
        #     min_width=size,
        #     max_width=size,
        #     fill_value=0,
        #     p=1.0
        # ),

        # Random gamma changes with a 30% probability
        RandomGamma(gamma_limit=(85, 115), p=0.1),
        ImageCompression(
            quality_lower=70,
            quality_upper=100,
            p=0.1
        ),
    ]
Exemplo n.º 10
0
def create_train_transforms(conf):
    height = conf['crop_height']
    width = conf['crop_width']
    return Compose([
        # OneOf([
        #     RandomSizedCrop(min_max_height=(int(height * 0.8), int(height * 1.2)), w2h_ratio=1., height=height,
        #                     width=width, p=0.8),
        #     RandomCrop(height=height, width=width, p=0.2)], p=1),
        HorizontalFlip(),
        ImageCompression(p=0.1),
        RandomBrightnessContrast(p=0.4),
        RandomGamma(p=0.4),
        OneOf([RGBShift(), HueSaturationValue()], p=0.2)
    ])
Exemplo n.º 11
0
def create_train_transforms(size=300):
    # defining an augmentation pipeline
    # this will return a transform function that will perform image augmentation.
    return Compose([
        # Decrease Jpeg, WebP compression of an image
        # with the quality_lower parameter as the lower bound on the image quality
        # and the quality_upper as the upper bound on the image quality
        ImageCompression(quality_lower=60, quality_upper=100, p=0.5),
        # used to apply Gaussian noise to the input picture
        # with p as the probability of applying the transform
        GaussNoise(p=0.1),
        # used to blur the input image using a Gaussian filter with a random kernel size
        # with the blur_limit as the maximum Gaussian kernel size for blurring the input image
        GaussianBlur(blur_limit=3, p=0.05),
        # flips the input image horizontally around the y-axis
        HorizontalFlip(),
        # Select one of transforms to apply
        OneOf([
            IsotropicResize(max_side=size,
                            interpolation_down=cv2.INTER_AREA,
                            interpolation_up=cv2.INTER_CUBIC),
            IsotropicResize(max_side=size,
                            interpolation_down=cv2.INTER_AREA,
                            interpolation_up=cv2.INTER_LINEAR),
            IsotropicResize(max_side=size,
                            interpolation_down=cv2.INTER_LINEAR,
                            interpolation_up=cv2.INTER_LINEAR),
        ],
              p=1),
        # Pad side of the image / max if side is less than desired number
        PadIfNeeded(min_height=size,
                    min_width=size,
                    border_mode=cv2.BORDER_CONSTANT),
        # Select one of the following transforms to apply:
        # RandomBrightnessContrast: used to randomly change brightness and contrast of the input image
        # FancyPCA: Augment RGB image using FancyPCA
        # HueSaturationValue: Randomly change hue, saturation and value of the input image
        OneOf([RandomBrightnessContrast(),
               FancyPCA(),
               HueSaturationValue()],
              p=0.7),
        # this converts the input RGB image to grayscale. If the mean pixel value for the resulting image is greater than 127, invert the resulting grayscale image.
        ToGray(p=0.2),
        # this randomly apply affine transforms: translate, scale and rotate the input.
        ShiftScaleRotate(shift_limit=0.1,
                         scale_limit=0.2,
                         rotate_limit=10,
                         border_mode=cv2.BORDER_CONSTANT,
                         p=0.5),
    ])
Exemplo n.º 12
0
 def __init__(self, root_path):
     self.folder_name = [
         name for name in os.listdir(root_path)
         if os.path.isdir(os.path.join(root_path, name))
     ]  #os.listdir(root_path)[:-1]
     self.root = root_path
     #self.image_paths = list(Path(self.root).rglob('*.jpg'))
     self.json_paths = os.path.join(root_path, 'metadata.json')  # 1
     with open(self.json_paths) as json_file:
         self.json_data = json.load(json_file)
     self.transform = Compose([
         Resize(size, size),
         ImageCompression(quality_lower=60, quality_upper=100, p=0.5),
         GaussNoise(p=0.1),
         GaussianBlur(blur_limit=3, p=0.05),
         HorizontalFlip(p=0.5),
         OneOf([
             IsotropicResize(max_side=size,
                             interpolation_down=cv2.INTER_AREA,
                             interpolation_up=cv2.INTER_CUBIC),
             IsotropicResize(max_side=size,
                             interpolation_down=cv2.INTER_AREA,
                             interpolation_up=cv2.INTER_LINEAR),
             IsotropicResize(max_side=size,
                             interpolation_down=cv2.INTER_LINEAR,
                             interpolation_up=cv2.INTER_LINEAR),
         ],
               p=0.7),
         PadIfNeeded(min_height=size,
                     min_width=size,
                     border_mode=cv2.BORDER_CONSTANT),
         OneOf(
             [RandomBrightnessContrast(),
              FancyPCA(),
              HueSaturationValue()],
             p=0.7),
         ToGray(p=0.1),
         ShiftScaleRotate(shift_limit=0.1,
                          scale_limit=0.2,
                          rotate_limit=10,
                          border_mode=cv2.BORDER_CONSTANT,
                          p=0.5),
     ])
     self.normalize = {
         "mean": [0.485, 0.456, 0.406],
         "std": [0.229, 0.224, 0.225]
     }
     #self.len = len(self.image_paths) #folder len
     self.len = len(self.folder_name)
Exemplo n.º 13
0
 def strong_aug(p=1.0):
     return Compose(
         [
             RandomSizedCrop((100, HEIGHT), HEIGHT, WIDTH, w2h_ratio=1.0, p=1.0),
             Compose(
                 [
                     Flip(),
                     RandomRotate90(),
                     Transpose(),
                     OneOf([IAAAdditiveGaussianNoise(), GaussNoise()], p=0.2),
                     OneOf(
                         [MedianBlur(blur_limit=3), Blur(blur_limit=3), MotionBlur()]
                     ),
                     ShiftScaleRotate(args.shift, args.scale, args.rotate),
                     # min_max_height: (height of crop before resizing)
                     # crop_height = randint(min_height, max_height), endpoints included
                     # crop_width = crop_height * w2h_ratio
                     # height, width: height/width after crop and resize, for convenience, just use args for resize
                     OneOf(
                         [
                             GridDistortion(p=0.5),
                             ElasticTransform(p=0.5),
                             IAAPerspective(),
                             IAAPiecewiseAffine(),
                         ]
                     ),
                     OneOf(
                         [
                             RGBShift(args.r_shift, args.g_shift, args.b_shift),
                             HueSaturationValue(
                                 args.hue_shift, args.sat_shift, args.val_shift
                             ),
                             #                     ChannelShuffle(),
                             CLAHE(args.clip),
                             RandomBrightnessContrast(
                                 args.brightness, args.contrast
                             ),
                             RandomGamma(gamma_limit=(80, 120)),
                             #                     ToGray(),
                             ImageCompression(quality_lower=75, quality_upper=100),
                         ]
                     ),
                 ],
                 p=p,
             ),
             ToFloat(max_value=255),
         ]
     )
Exemplo n.º 14
0
    def __init__(
        self,
        root_dir="",
    ):
        self.root_dir = os.path.join(root_dir, "images")
        ann_path = os.path.join(root_dir, "label.json")

        assert os.path.exists(ann_path), os.listdir(os.path.dirname(ann_path))
        super(TableBank, self).__init__(root=self.root_dir, annFile=ann_path)

        self.albumentation_transforms = Compose([
            OneOf([
                ImageCompression(quality_lower=5, quality_upper=100, p=1.),
                Blur(blur_limit=(3, 5), p=1.),
                GaussNoise(
                    var_limit=(10.0, 151.0), mean=0, always_apply=False, p=1.),
            ],
                  p=0.5),
            IAAAdditiveGaussianNoise(loc=0,
                                     scale=(10.55, 50.75),
                                     per_channel=False,
                                     always_apply=False,
                                     p=0.2),
            OneOf([
                RGBShift(r_shift_limit=105,
                         g_shift_limit=45,
                         b_shift_limit=40,
                         p=1.),
                ToGray(p=0.8),
                RandomBrightnessContrast(brightness_limit=0.2,
                                         contrast_limit=0.2,
                                         brightness_by_max=False,
                                         p=.3),
            ],
                  p=0.5),
            ToTensor()
        ])

        self.h_flip = RandomHorizontalFlip(0.5)

        self.new_ids = []

        for img_id in self.ids:
            path = self.coco.loadImgs(img_id)[0]["file_name"]
            # print(path)
            if os.path.exists(os.path.join(self.root_dir, path)):
                self.new_ids.append(img_id)
Exemplo n.º 15
0
 def __init__(self, width, height):
     self.aug = Compose([
         ShiftScaleRotate(p=0.5,
                          rotate_limit=5,
                          scale_limit=0.05,
                          border_mode=cv2.BORDER_CONSTANT),
         ImageCompression(quality_lower=95, quality_upper=100, p=1),
         IAAAffine(shear=0.2, always_apply=False, p=0.3),
         LongestMaxSize(max_size=width if width > height else height),
         PadIfNeeded(min_height=height,
                     min_width=width,
                     border_mode=cv2.BORDER_CONSTANT)
     ],
                        keypoint_params=KeypointParams(
                            format='xy',
                            label_fields=['pose_id', "join_id"],
                            remove_invisible=True))
Exemplo n.º 16
0
def blend_original(img):
    img = img.copy()
    h, w = img.shape[:2]
    # detect faces in image
    rect = detector(img)
    # if there is no faces return the image
    if len(rect) == 0:
        return img
    else:
        rect = rect[0]
    # predict the landmarks in the image
    sp = predictor(img, rect)
    landmarks = np.array([[p.x, p.y] for p in sp.parts()])
    outline = landmarks[[*range(17), *range(26, 16, -1)]]
    # draw the outline of the landmarks on the image
    Y, X = skimage.draw.polygon(outline[:, 1], outline[:, 0])
    raw_mask = np.zeros(img.shape[:2], dtype=np.uint8)
    raw_mask[Y, X] = 1
    face = img * np.expand_dims(raw_mask, -1)

    # add warping
    h1 = random.randint(h - h // 2, h + h // 2)
    w1 = random.randint(w - w // 2, w + w // 2)
    while abs(h1 - h) < h // 3 and abs(w1 - w) < w // 3:
        h1 = random.randint(h - h // 2, h + h // 2)
        w1 = random.randint(w - w // 2, w + w // 2)
    face = cv2.resize(face, (w1, h1),
                      interpolation=random.choice(
                          [cv2.INTER_LINEAR, cv2.INTER_AREA, cv2.INTER_CUBIC]))
    face = cv2.resize(face, (w, h),
                      interpolation=random.choice(
                          [cv2.INTER_LINEAR, cv2.INTER_AREA, cv2.INTER_CUBIC]))
    # Erosion is a mathematical morphology operation that uses a structuring element for shrinking the shapes in an image.
    # this function preforms multi-dimensional binary erosion with a given structuring element.
    raw_mask = binary_erosion(raw_mask, iterations=random.randint(4, 10))
    img[raw_mask, :] = face[raw_mask, :]
    if random.random() < 0.2:
        img = OneOf([GaussianBlur(), Blur()], p=0.5)(image=img)["image"]
    # image compression
    if random.random() < 0.5:
        img = ImageCompression(quality_lower=40,
                               quality_upper=95)(image=img)["image"]
    return img
def create_train_transforms(size=300):
    return Compose([
        ImageCompression(quality_lower=60, quality_upper=100, p=0.5),
        OneOf([
            IsotropicResize(max_side=size,
                            interpolation_down=cv2.INTER_AREA,
                            interpolation_up=cv2.INTER_CUBIC),
            IsotropicResize(max_side=size,
                            interpolation_down=cv2.INTER_AREA,
                            interpolation_up=cv2.INTER_LINEAR),
            IsotropicResize(max_side=size,
                            interpolation_down=cv2.INTER_LINEAR,
                            interpolation_up=cv2.INTER_LINEAR),
        ],
              p=1),
        PadIfNeeded(min_height=size,
                    min_width=size,
                    border_mode=cv2.BORDER_CONSTANT),
        ToGray(p=0.2)
    ])
def get_train_transforms(size=300):
    return Compose([
        ImageCompression(quality_lower=60, quality_upper=100, p=0.5),
        GaussNoise(p=0.1),
        GaussianBlur(blur_limit=3, p=0.05),
        HorizontalFlip(),
        Resize(height=size, width=size),
        PadIfNeeded(min_height=size,
                    min_width=size,
                    border_mode=cv2.BORDER_CONSTANT),
        OneOf([RandomBrightnessContrast(),
               HueSaturationValue()], p=0.5),  # FancyPCA(),
        OneOf([CoarseDropout(), GridDropout()], p=0.2),
        ToGray(p=0.2),
        ShiftScaleRotate(shift_limit=0.1,
                         scale_limit=0.2,
                         rotate_limit=10,
                         border_mode=cv2.BORDER_CONSTANT,
                         p=0.5),
        Normalize(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225)),
        ToTensorV2()
    ])
Exemplo n.º 19
0
            bbox[:, [0, 2]] = width - bbox[:, [2, 0]]
            target["boxes"] = bbox
            if "masks" in target:
                target["masks"] = target["masks"].flip(-1)
            if "keypoints" in target:
                keypoints = target["keypoints"]
                keypoints = _flip_coco_person_keypoints(keypoints, width)
                target["keypoints"] = keypoints
        return image, target


logger = logging.getLogger(__name__)

table_transforms = Compose([
    OneOf([
        ImageCompression(quality_lower=5, quality_upper=100, p=1.),
        Blur(blur_limit=(3, 5), p=1.),
        GaussNoise(var_limit=(10.0, 151.0), mean=0, always_apply=False, p=1.),
    ],
          p=0.5),
    IAAAdditiveGaussianNoise(loc=0,
                             scale=(10.55, 50.75),
                             per_channel=False,
                             always_apply=False,
                             p=0.2),
    OneOf([
        RGBShift(r_shift_limit=105, g_shift_limit=45, b_shift_limit=40, p=1.),
        ToGray(p=0.8),
        RandomBrightnessContrast(brightness_limit=0.2,
                                 contrast_limit=0.2,
                                 brightness_by_max=False,
def transform(image, mask, image_name, mask_name):

    x, y = image, mask

    rand = random.uniform(0, 1)
    if (rand > 0.5):

        images_name = [f"{image_name}"]
        masks_name = [f"{mask_name}"]
        images_aug = [x]
        masks_aug = [y]

        it = iter(images_name)
        it2 = iter(images_aug)
        imagedict = dict(zip(it, it2))

        it = iter(masks_name)
        it2 = iter(masks_aug)
        masksdict = dict(zip(it, it2))

        return imagedict, masksdict

    mask_density = np.count_nonzero(y)

    ## Augmenting only images with Gloms
    if (mask_density > 0):
        try:
            h, w, c = x.shape
        except Exception as e:
            image = image[:-1]
            x, y = image, mask
            h, w, c = x.shape

        aug = Blur(p=1, blur_limit=3)
        augmented = aug(image=x, mask=y)
        x0 = augmented['image']
        y0 = augmented['mask']

        #    aug = CenterCrop(p=1, height=32, width=32)
        #    augmented = aug(image=x, mask=y)
        #    x1 = augmented['image']
        #    y1 = augmented['mask']

        ## Horizontal Flip
        aug = HorizontalFlip(p=1)
        augmented = aug(image=x, mask=y)
        x2 = augmented['image']
        y2 = augmented['mask']

        aug = VerticalFlip(p=1)
        augmented = aug(image=x, mask=y)
        x3 = augmented['image']
        y3 = augmented['mask']

        #      aug = Normalize(p=1)
        #      augmented = aug(image=x, mask=y)
        #      x4 = augmented['image']
        #      y4 = augmented['mask']

        aug = Transpose(p=1)
        augmented = aug(image=x, mask=y)
        x5 = augmented['image']
        y5 = augmented['mask']

        aug = RandomGamma(p=1)
        augmented = aug(image=x, mask=y)
        x6 = augmented['image']
        y6 = augmented['mask']

        ## Optical Distortion
        aug = OpticalDistortion(p=1, distort_limit=2, shift_limit=0.5)
        augmented = aug(image=x, mask=y)
        x7 = augmented['image']
        y7 = augmented['mask']

        ## Grid Distortion
        aug = GridDistortion(p=1)
        augmented = aug(image=x, mask=y)
        x8 = augmented['image']
        y8 = augmented['mask']

        aug = RandomGridShuffle(p=1)
        augmented = aug(image=x, mask=y)
        x9 = augmented['image']
        y9 = augmented['mask']

        aug = HueSaturationValue(p=1)
        augmented = aug(image=x, mask=y)
        x10 = augmented['image']
        y10 = augmented['mask']

        #        aug = PadIfNeeded(p=1)
        #        augmented = aug(image=x, mask=y)
        #        x11 = augmented['image']
        #        y11 = augmented['mask']

        aug = RGBShift(p=1)
        augmented = aug(image=x, mask=y)
        x12 = augmented['image']
        y12 = augmented['mask']

        ## Random Brightness
        aug = RandomBrightness(p=1)
        augmented = aug(image=x, mask=y)
        x13 = augmented['image']
        y13 = augmented['mask']

        ## Random  Contrast
        aug = RandomContrast(p=1)
        augmented = aug(image=x, mask=y)
        x14 = augmented['image']
        y14 = augmented['mask']

        #aug = MotionBlur(p=1)
        #augmented = aug(image=x, mask=y)
        #   x15 = augmented['image']
        #  y15 = augmented['mask']

        aug = MedianBlur(p=1, blur_limit=5)
        augmented = aug(image=x, mask=y)
        x16 = augmented['image']
        y16 = augmented['mask']

        aug = GaussianBlur(p=1, blur_limit=3)
        augmented = aug(image=x, mask=y)
        x17 = augmented['image']
        y17 = augmented['mask']

        aug = GaussNoise(p=1)
        augmented = aug(image=x, mask=y)
        x18 = augmented['image']
        y18 = augmented['mask']

        aug = GlassBlur(p=1)
        augmented = aug(image=x, mask=y)
        x19 = augmented['image']
        y19 = augmented['mask']

        aug = CLAHE(clip_limit=1.0,
                    tile_grid_size=(8, 8),
                    always_apply=False,
                    p=1)
        augmented = aug(image=x, mask=y)
        x20 = augmented['image']
        y20 = augmented['mask']

        aug = ChannelShuffle(p=1)
        augmented = aug(image=x, mask=y)
        x21 = augmented['image']
        y21 = augmented['mask']

        aug = ToGray(p=1)
        augmented = aug(image=x, mask=y)
        x22 = augmented['image']
        y22 = augmented['mask']

        aug = ToSepia(p=1)
        augmented = aug(image=x, mask=y)
        x23 = augmented['image']
        y23 = augmented['mask']

        aug = JpegCompression(p=1)
        augmented = aug(image=x, mask=y)
        x24 = augmented['image']
        y24 = augmented['mask']

        aug = ImageCompression(p=1)
        augmented = aug(image=x, mask=y)
        x25 = augmented['image']
        y25 = augmented['mask']

        aug = Cutout(p=1)
        augmented = aug(image=x, mask=y)
        x26 = augmented['image']
        y26 = augmented['mask']

        #       aug = CoarseDropout(p=1, max_holes=8, max_height=32, max_width=32)
        #       augmented = aug(image=x, mask=y)
        #       x27 = augmented['image']
        #       y27 = augmented['mask']

        #       aug = ToFloat(p=1)
        #       augmented = aug(image=x, mask=y)
        #       x28 = augmented['image']
        #       y28 = augmented['mask']

        aug = FromFloat(p=1)
        augmented = aug(image=x, mask=y)
        x29 = augmented['image']
        y29 = augmented['mask']

        ## Random Brightness and Contrast
        aug = RandomBrightnessContrast(p=1)
        augmented = aug(image=x, mask=y)
        x30 = augmented['image']
        y30 = augmented['mask']

        aug = RandomSnow(p=1)
        augmented = aug(image=x, mask=y)
        x31 = augmented['image']
        y31 = augmented['mask']

        aug = RandomRain(p=1)
        augmented = aug(image=x, mask=y)
        x32 = augmented['image']
        y32 = augmented['mask']

        aug = RandomFog(p=1)
        augmented = aug(image=x, mask=y)
        x33 = augmented['image']
        y33 = augmented['mask']

        aug = RandomSunFlare(p=1)
        augmented = aug(image=x, mask=y)
        x34 = augmented['image']
        y34 = augmented['mask']

        aug = RandomShadow(p=1)
        augmented = aug(image=x, mask=y)
        x35 = augmented['image']
        y35 = augmented['mask']

        aug = Lambda(p=1)
        augmented = aug(image=x, mask=y)
        x36 = augmented['image']
        y36 = augmented['mask']

        aug = ChannelDropout(p=1)
        augmented = aug(image=x, mask=y)
        x37 = augmented['image']
        y37 = augmented['mask']

        aug = ISONoise(p=1)
        augmented = aug(image=x, mask=y)
        x38 = augmented['image']
        y38 = augmented['mask']

        aug = Solarize(p=1)
        augmented = aug(image=x, mask=y)
        x39 = augmented['image']
        y39 = augmented['mask']

        aug = Equalize(p=1)
        augmented = aug(image=x, mask=y)
        x40 = augmented['image']
        y40 = augmented['mask']

        aug = Posterize(p=1)
        augmented = aug(image=x, mask=y)
        x41 = augmented['image']
        y41 = augmented['mask']

        aug = Downscale(p=1)
        augmented = aug(image=x, mask=y)
        x42 = augmented['image']
        y42 = augmented['mask']

        aug = MultiplicativeNoise(p=1)
        augmented = aug(image=x, mask=y)
        x43 = augmented['image']
        y43 = augmented['mask']

        aug = FancyPCA(p=1)
        augmented = aug(image=x, mask=y)
        x44 = augmented['image']
        y44 = augmented['mask']

        #       aug = MaskDropout(p=1)
        #       augmented = aug(image=x, mask=y)
        #       x45 = augmented['image']
        #       y45 = augmented['mask']

        aug = GridDropout(p=1)
        augmented = aug(image=x, mask=y)
        x46 = augmented['image']
        y46 = augmented['mask']

        aug = ColorJitter(p=1)
        augmented = aug(image=x, mask=y)
        x47 = augmented['image']
        y47 = augmented['mask']

        ## ElasticTransform
        aug = ElasticTransform(p=1,
                               alpha=120,
                               sigma=512 * 0.05,
                               alpha_affine=512 * 0.03)
        augmented = aug(image=x, mask=y)
        x50 = augmented['image']
        y50 = augmented['mask']

        aug = CropNonEmptyMaskIfExists(p=1, height=22, width=32)
        augmented = aug(image=x, mask=y)
        x51 = augmented['image']
        y51 = augmented['mask']

        aug = IAAAffine(p=1)
        augmented = aug(image=x, mask=y)
        x52 = augmented['image']
        y52 = augmented['mask']

        #        aug = IAACropAndPad(p=1)
        #        augmented = aug(image=x, mask=y)
        #        x53 = augmented['image']
        #        y53 = augmented['mask']

        aug = IAAFliplr(p=1)
        augmented = aug(image=x, mask=y)
        x54 = augmented['image']
        y54 = augmented['mask']

        aug = IAAFlipud(p=1)
        augmented = aug(image=x, mask=y)
        x55 = augmented['image']
        y55 = augmented['mask']

        aug = IAAPerspective(p=1)
        augmented = aug(image=x, mask=y)
        x56 = augmented['image']
        y56 = augmented['mask']

        aug = IAAPiecewiseAffine(p=1)
        augmented = aug(image=x, mask=y)
        x57 = augmented['image']
        y57 = augmented['mask']

        aug = LongestMaxSize(p=1)
        augmented = aug(image=x, mask=y)
        x58 = augmented['image']
        y58 = augmented['mask']

        aug = NoOp(p=1)
        augmented = aug(image=x, mask=y)
        x59 = augmented['image']
        y59 = augmented['mask']

        #       aug = RandomCrop(p=1, height=22, width=22)
        #       augmented = aug(image=x, mask=y)
        #       x61 = augmented['image']
        #       y61 = augmented['mask']

        #      aug = RandomResizedCrop(p=1, height=22, width=20)
        #      augmented = aug(image=x, mask=y)
        #      x63 = augmented['image']
        #      y63 = augmented['mask']

        aug = RandomScale(p=1)
        augmented = aug(image=x, mask=y)
        x64 = augmented['image']
        y64 = augmented['mask']

        #      aug = RandomSizedCrop(p=1, height=22, width=20, min_max_height = [32,32])
        #      augmented = aug(image=x, mask=y)
        #      x66 = augmented['image']
        #      y66 = augmented['mask']

        #      aug = Resize(p=1, height=22, width=20)
        #      augmented = aug(image=x, mask=y)
        #      x67 = augmented['image']
        #      y67 = augmented['mask']

        aug = Rotate(p=1)
        augmented = aug(image=x, mask=y)
        x68 = augmented['image']
        y68 = augmented['mask']

        aug = ShiftScaleRotate(p=1)
        augmented = aug(image=x, mask=y)
        x69 = augmented['image']
        y69 = augmented['mask']

        aug = SmallestMaxSize(p=1)
        augmented = aug(image=x, mask=y)
        x70 = augmented['image']
        y70 = augmented['mask']

        images_aug.extend([
            x, x0, x2, x3, x5, x6, x7, x8, x9, x10, x12, x13, x14, x16, x17,
            x18, x19, x20, x21, x22, x23, x24, x25, x26, x29, x30, x31, x32,
            x33, x34, x35, x36, x37, x38, x39, x40, x41, x42, x43, x44, x46,
            x47, x50, x51, x52, x54, x55, x56, x57, x58, x59, x64, x68, x69,
            x70
        ])

        masks_aug.extend([
            y, y0, y2, y3, y5, y6, y7, y8, y9, y10, y12, y13, y14, y16, y17,
            y18, y19, y20, y21, y22, y23, y24, y25, y26, y29, y30, y31, y32,
            y33, y34, y35, y36, y37, y38, y39, y40, y41, y42, y43, y44, y46,
            y47, y50, y51, y52, y54, y55, y56, y57, y58, y59, y64, y68, y69,
            y70
        ])

        idx = -1
        images_name = []
        masks_name = []
        for i, m in zip(images_aug, masks_aug):
            if idx == -1:
                tmp_image_name = f"{image_name}"
                tmp_mask_name = f"{mask_name}"
            else:
                tmp_image_name = f"{image_name}_{smalllist[idx]}"
                tmp_mask_name = f"{mask_name}_{smalllist[idx]}"
            images_name.extend(tmp_image_name)
            masks_name.extend(tmp_mask_name)
            idx += 1

        it = iter(images_name)
        it2 = iter(images_aug)
        imagedict = dict(zip(it, it2))

        it = iter(masks_name)
        it2 = iter(masks_aug)
        masksdict = dict(zip(it, it2))

    return imagedict, masksdict
Exemplo n.º 21
0
    elif augmentation == 'histogram_equalization':
        transform = iaa.HistogramEqualization()
        transformed_image = transform(image=image)

    elif augmentation == 'all_channels_he':
        transform = iaa.AllChannelsHistogramEqualization()
        transformed_image = transform(image=image)

    elif augmentation == 'all_channels_clahe':
        transform = iaa.AllChannelsCLAHE()
        transformed_image = transform(image=image)

    ## Compression

    elif augmentation == 'image_compression':
        transform = ImageCompression(always_apply=True, quality_lower=10)
        transformed_image = transform(image=image)['image']

    elif augmentation == 'downscale':
        transform = Downscale(always_apply=True)
        transformed_image = transform(image=image)['image']

    elif augmentation == 'pixelate':
        transform = iaa.imgcorruptlike.Pixelate(severity=4)
        transformed_image = transform(image=image)

    ## Convolutional

    elif augmentation == 'sharpen':
        transform = iaa.Sharpen(alpha=(0.0, 1.0), lightness=(0.75, 2.0))
        transformed_image = transform(image=image)
# %%
# Motion Blur
aug = MotionBlur(blur_limit=5)
image8 = (image * 256).astype("uint8")
augmented = aug(image=image8, mask=mask)

image_scaled = augmented["image"]
mask_scaled = augmented["mask"]

visualize(image_scaled, mask_scaled, original_image=image8, original_mask=mask)


# %%
# Image Compression
aug = ImageCompression(quality_lower=50, quality_upper=50)
image8 = (image * 256).astype("uint8")
augmented = aug(image=image8, mask=mask)

image_scaled = augmented["image"]
mask_scaled = augmented["mask"]

visualize(image_scaled, mask_scaled, original_image=image8, original_mask=mask)


# %%
# IAAPerspective
aug = IAAPerspective()
image8 = (image * 256).astype("uint8")
mask8 = mask.astype("uint8")
augmented = aug(image=image8, mask=mask8)
from albumentations import (
Transpose, ShiftScaleRotate, Blur, OpticalDistortion, GridDistortion, HueSaturationValue,
IAAAdditiveGaussianNoise, GaussNoise, MotionBlur, MedianBlur, RandomBrightnessContrast, IAAPiecewiseAffine,
IAASharpen, IAAEmboss, Flip, OneOf, Compose,Resize,ImageCompression,MultiplicativeNoise,ChannelDropout,IAASuperpixels,GaussianBlur,
HorizontalFlip,RandomGamma,VerticalFlip,ShiftScaleRotate,CLAHE
)

import numpy as np
import torch
from torchvision import transforms

augmentation_techniques_pool = {
                'RandomBrightnessContrast' : RandomBrightnessContrast(brightness_limit=0.05,contrast_limit=0.05,p=1) ,
                'Blur' : Blur(blur_limit=2,p=1),
                'OpticalDistortion' : OpticalDistortion(p=1),
                'ImageCompression': ImageCompression(p=1),
                'MultiplicativeNoise' : MultiplicativeNoise(p=1),
                'IAASharpen': IAASharpen(alpha=(0, 0.2) , p = 1),
                'IAAEmboss' : IAAEmboss(alpha=(0, 0.3) , p = 1),
                'MotionBlur': MotionBlur(blur_limit = 3,p=1),
                'MedianBlur' :MedianBlur(blur_limit=3,p=1),
                'HorizontalFlip': HorizontalFlip(p=1),
                'GaussNoise':GaussNoise(),
                'RandomGamma':RandomGamma(p=1),
                'VerticalFlip': VerticalFlip(p=1),
                'ShiftScaleRotate': ShiftScaleRotate(),
                'HueSaturationValue':HueSaturationValue(),
                'CLAHE':CLAHE(),
                
                }
Exemplo n.º 24
0
device = torch.device("cuda")
output_directory = (args.output_dir + "/" + str(args.lr_scheduler) + "-" +
                    str(args.augmentations) + "-" + str(args.dropout) + "-" +
                    str(args.losses) + "-" +
                    datetime.date.today().strftime("%b-%d-%Y") + "/")

pathlib.Path(output_directory).mkdir(parents=True, exist_ok=False)

transformations = [
    Transformation(VerticalFlip(), probability=0.25, apply_to_mask=True),
    Transformation(HorizontalFlip(), probability=0.25, apply_to_mask=True),
    Transformation(Transpose(), probability=0.25, apply_to_mask=True),
    Transformation(RandomBrightnessContrast(),
                   probability=0.25,
                   apply_to_mask=False),
    Transformation(ImageCompression(quality_lower=50),
                   probability=0.25,
                   apply_to_mask=False),
    Transformation(RGBShift(), probability=0.25, apply_to_mask=False),
]

if args.augmentations == "General":
    transformations.extend([
        # Transformation(ElasticTransform(), probability=0.25, apply_to_mask=True),
        Transformation(ShiftScaleRotate(),
                       probability=0.25,
                       apply_to_mask=True),
        Transformation(Blur(), probability=0.25, apply_to_mask=True),
    ])

# Datasets
Exemplo n.º 25
0
from albumentations import Compose, RandomBrightnessContrast, \
    HorizontalFlip, FancyPCA, HueSaturationValue, OneOf, ToGray, \
    ShiftScaleRotate, ImageCompression, PadIfNeeded, GaussNoise, GaussianBlur
import cv2
from albu import IsotropicResize
size = 256
# Declare an augmentation pipeline

transform = Compose([
    ImageCompression(quality_lower=60, quality_upper=100, p=0.5),
    GaussNoise(p=0.1),
    GaussianBlur(blur_limit=3, p=0.05),
    HorizontalFlip(p=0.5),
    OneOf([
        IsotropicResize(max_side=size, interpolation_down=cv2.INTER_AREA, interpolation_up=cv2.INTER_CUBIC),
        IsotropicResize(max_side=size, interpolation_down=cv2.INTER_AREA, interpolation_up=cv2.INTER_LINEAR),
        IsotropicResize(max_side=size, interpolation_down=cv2.INTER_LINEAR, interpolation_up=cv2.INTER_LINEAR),
    ], p=0.7),
    PadIfNeeded(min_height=size, min_width=size, border_mode=cv2.BORDER_CONSTANT),
    OneOf([RandomBrightnessContrast(), FancyPCA(), HueSaturationValue()], p=0.7),
    ToGray(p=0.2),
    ShiftScaleRotate(shift_limit=0.1, scale_limit=0.2, rotate_limit=10, border_mode=cv2.BORDER_CONSTANT, p=0.5),
])


# Read an image with OpenCV and convert it to the RGB colorspace
image = cv2.imread("/home/ubuntu/dataset/dfdc_image/train/dfdc_train_part_0/aaqaifqrwn/frame0.jpg")
#image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)

# Augment an image
transformed = transform(image=image)
Exemplo n.º 26
0
        RandomGamma(),
        CLAHE()
    ],
          p=0.4),
    OneOf([
        ElasticTransform(
            p=0.2, alpha=120, sigma=120 * 0.1, alpha_affine=120 * 0.03),
        GridDistortion(p=0.5),
        OpticalDistortion(border_mode=0,
                          distort_limit=0.05,
                          interpolation=1,
                          shift_limit=0.05,
                          p=1.0),
    ],
          p=0.25),
    ImageCompression(quality_lower=50, p=0.5),
    Normalize(),
    ToTensor(num_classes=NUM_CLASSES, sigmoid=True)
],
                              p=1)

AUGMENTATIONS_TEST = Compose([
    Resize(height=TRAIN_SHAPE[0], width=TRAIN_SHAPE[1], always_apply=True),
    Normalize(),
    ToTensor(num_classes=NUM_CLASSES, sigmoid=True)
],
                             p=1)

AUGMENTATIONS_TRAIN_CROP = Compose([
    CropNonEmptyMaskIfExists(height=256, width=448, always_apply=True),
    HorizontalFlip(p=0.5),
Exemplo n.º 27
0
    'g_shift_limit': range(-150, 140, 40),
    'b_shift_limit': range(-150, 140, 40),
    'image_compression_limit': range(5, 46, 20),
    'motion_blur_limit': range(7, 26, 1),
    'gauss_noise_limit': range(10, 70, 10)
}
aug = get_aug([
    RandomBrightnessContrast(contrast_limit=augmentation_configuration.get(
        'random_brightness_contrast_limit'),
                             p=0.5),
    RGBShift(r_shift_limit=augmentation_configuration.get('r_shift_limit'),
             g_shift_limit=augmentation_configuration.get('g_shift_limit'),
             b_shift_limit=augmentation_configuration.get('b_shift_limit'),
             p=0.5),
    ImageCompression(quality_lower=augmentation_configuration.get(
        'image_compression_limit'),
                     quality_upper=100,
                     p=0.5),
    MotionBlur(blur_limit=augmentation_configuration.get('motion_blur_limit'),
               p=0.5),
    GaussNoise(var_limit=augmentation_configuration.get('gauss_noise_limit'),
               p=0.5),
    CoarseDropout(max_holes=18,
                  max_height=22,
                  max_width=22,
                  min_holes=8,
                  min_height=10,
                  min_width=10,
                  p=0.5),
])