def __init__(self, data_root='data', split_file='', size=(256, 256), fold=0, resize=False):
     self.data_root = data_root
     pkl_data = pickle.load(open(split_file, 'rb'))
     if fold == -1:
         self.path_list = pkl_data[0]['train']
         self.path_list.extend(pkl_data[0]['val'])
     else:
         self.path_list = pkl_data[fold]['train']
     self.len = len(self.path_list)
     if resize:
         self.transforms = Compose([Resize(height=size[0], width=size[1], interpolation=cv2.INTER_NEAREST),
                                    ShiftScaleRotate(shift_limit=0.1, scale_limit=0.2, rotate_limit=20, p=0.7,
                                                     border_mode=cv2.BORDER_CONSTANT, value=0),
                                    HorizontalFlip(p=0.5),
                                    OneOf([ElasticTransform(p=1, alpha=50, sigma=30, alpha_affine=30,
                                                            border_mode=cv2.BORDER_CONSTANT, value=0),
                                           OpticalDistortion(p=1, distort_limit=0.5, shift_limit=0.1,
                                                             border_mode=cv2.BORDER_CONSTANT, value=0)], p=0.5),
                                    RandomGamma(gamma_limit=(80, 120), p=0.5),
                                    GaussNoise(var_limit=(0.02, 0.1), mean=0, p=0.5)
                                    ])
     else:
         self.transforms = Compose([LongestMaxSize(max_size=max(size)),
                                    PadIfNeeded(min_height=size[0], min_width=size[1], value=0,
                                                border_mode=cv2.BORDER_CONSTANT),
                                    ShiftScaleRotate(shift_limit=0.1, scale_limit=0.2, rotate_limit=20, p=0.7,
                                                     border_mode=cv2.BORDER_CONSTANT, value=0),
                                    HorizontalFlip(p=0.5),
                                    OneOf([ElasticTransform(p=1, alpha=50, sigma=30, alpha_affine=30,
                                                            border_mode=cv2.BORDER_CONSTANT, value=0),
                                           OpticalDistortion(p=1, distort_limit=0.5, shift_limit=0.1,
                                                             border_mode=cv2.BORDER_CONSTANT, value=0)], p=0.5),
                                    RandomGamma(gamma_limit=(80, 120), p=0.5),
                                    GaussNoise(var_limit=(0.02, 0.1), mean=0, p=0.5)
                                    ])
Example #2
0
def get_augmentations():
    crop_p = 0.4
    blur_p = 0.4
    distort1_p = 0.2
    distort2_p = 0.4
    flip_p = 0.4

    crop = RandomSizedCropAlbuAug(crop_p)

    gauss_blur = GaussianBlurAug(p=blur_p, kernel_sizes=range(7, 16, 2))
    motion_blur = MotionBlurAug(p=blur_p, kernel_sizes=(3, 5))

    optical_distort = AlbuAug(
        OpticalDistortion(p=distort1_p, distort_limit=1, shift_limit=0.5))
    grid_distort = AlbuAug(GridDistortion(p=distort1_p))
    elastic1 = AlbuAug(
        ElasticTransform(p=distort2_p,
                         alpha=40,
                         sigma=90 * 0.05,
                         alpha_affine=90 * 0.05))
    elastic2 = AlbuAug(ElasticTransform(p=distort1_p))
    blur_aug = PickOne([gauss_blur, motion_blur])
    distort_aug = PickOne([optical_distort, grid_distort, elastic1, elastic2])
    flip_aug = FlipAug(p=flip_p)

    return ApplyAll([crop, blur_aug, distort_aug, flip_aug])
Example #3
0
def augment(patch_size=patch_size):
    return Compose([
        VerticalFlip(p=.5),
        HorizontalFlip(p=.5),
        HueSaturationValue(hue_shift_limit=(-15, 15),
                           sat_shift_limit=0,
                           val_shift_limit=0,
                           p=.5),
        HueSaturationValue(hue_shift_limit=(-10, 10),
                           sat_shift_limit=(-20, 20),
                           val_shift_limit=0,
                           p=.5),
        Rotate(limit=(0, 359), p=.5, border_mode=cv2.BORDER_CONSTANT),
        RandomBrightnessContrast(brightness_limit=0.15,
                                 contrast_limit=0.1,
                                 always_apply=False,
                                 p=0.5),
        ElasticTransform(always_apply=True,
                         approximate=True,
                         alpha=20,
                         sigma=10,
                         alpha_affine=0,
                         border_mode=cv2.BORDER_CONSTANT),
        GridDistortion(num_steps=16,
                       distort_limit=0.5,
                       border_mode=cv2.BORDER_CONSTANT,
                       always_apply=False,
                       p=0.5),
    ])
Example #4
0
    def augmentation(image,
                     mask,
                     noise=False,
                     transform=False,
                     clahe=True,
                     r_bright=True,
                     r_gamma=True):
        aug_list = [
            VerticalFlip(p=0.5),
            HorizontalFlip(p=0.5),
            RandomRotate90(p=0.5),
        ]
        if r_bright:
            aug_list += [RandomBrightnessContrast(p=.5)]
        if r_gamma:
            aug_list += [RandomGamma(p=.5)]
        if clahe:
            aug_list += [CLAHE(p=1., always_apply=True)]
        if noise:
            aug_list += [GaussNoise(p=.5, var_limit=1.)]
        if transform:
            aug_list += [
                ElasticTransform(p=.5,
                                 sigma=1.,
                                 alpha_affine=20,
                                 border_mode=0)
            ]
        aug = Compose(aug_list)

        augmented = aug(image=image, mask=mask)
        image_heavy = augmented['image']
        mask_heavy = augmented['mask']
        return image_heavy, mask_heavy
Example #5
0
def aug_with_crop(width=640, height=480, crop_prob=1):
    return Compose(
        [
            # RandomCrop(width=480, height=640, p=crop_prob),
            HorizontalFlip(p=0.5),
            VerticalFlip(p=0.5),
            RandomRotate90(p=0.5),
            Transpose(p=0.5),
            ShiftScaleRotate(
                shift_limit=0.01, scale_limit=0.04, rotate_limit=0, p=0.25),
            RandomBrightnessContrast(p=0.5),
            RandomGamma(p=0.25),
            IAAEmboss(p=0.25),
            Blur(p=0.01, blur_limit=3),
            OneOf([
                ElasticTransform(p=0.5,
                                 alpha=120,
                                 sigma=120 * 0.05,
                                 alpha_affine=120 * 0.03),
                GridDistortion(p=0.5),
                OpticalDistortion(p=1, distort_limit=2, shift_limit=0.5)
            ],
                  p=0.8)
        ],
        p=1)
 def __init__(self, df, image_idx, mode='train'):
     self.imglist = df['image_path'].values
     self.labellist = df['label'].values
     self.index = image_idx
     self.mode = mode
     self.train_transformation = Compose([
         # RandomRotate90(),
         GridDistortion(p=0.6),
         HorizontalFlip(p=0.6),
         ElasticTransform(alpha=1, sigma=25, alpha_affine=50, p=0.75),
         OneOf([
             IAAAdditiveGaussianNoise(),
             GaussNoise(),
         ], p=0.5),
         Cutout(num_holes=30,
                max_h_size=9,
                max_w_size=11,
                fill_value=128,
                p=0.75),
         ShiftScaleRotate(shift_limit=0.0625,
                          scale_limit=0.15,
                          rotate_limit=15,
                          p=0.75),
         # Normalize(),
         # ToTensor(),
     ])
     self.valid_transformation = Compose([
         # Normalize(),
         # ToTensor(),
     ])
def train_transform(upside_down=False):
    return Compose(
        [
            Resize(202, 202, interpolation=cv2.INTER_NEAREST),
            PadIfNeeded(min_height=SIZE,
                        min_width=SIZE,
                        border_mode=cv2.BORDER_REPLICATE),
            VerticalFlip(p=int(upside_down)),
            HorizontalFlip(p=0.5),
            Cutout(p=0.5, num_holes=5, max_h_size=5, max_w_size=5),
            OneOf([
                Blur(),
                IAAAdditiveGaussianNoise(),
            ], p=0.1),
            ElasticTransform(
                p=0.25,
                alpha=1,
                sigma=30,  # TODO
                alpha_affine=30),  # TODO
            ShiftScaleRotate(
                p=0.25,
                rotate_limit=.15,  # TODO
                shift_limit=.15,  # TODO
                scale_limit=.15,  # TODO
                interpolation=cv2.INTER_CUBIC,
                #border_mode=cv2.BORDER_REFLECT_101),
                border_mode=cv2.BORDER_REPLICATE),
            Normalize(),
        ],
        p=1)
def get_augmenter(p=1.0):
    return Compose([
        ElasticTransform(
            p=0.8, alpha_affine=10, border_mode=cv2.BORDER_REPLICATE),
        RandomGamma(p=0.8, gamma_limit=(50, 150)),
    ],
                   p=p)
Example #9
0
    def mask_transformation(mask):
        """
        To simulate the deformation noise. affine transform + non-rigid deformations(TPS) + dilation
         This is from paper: `during offline training we generate input masks by deforming the
                             annotated masks via affine transformation as well as non-rigid
                             deformations via thin-plate splines [4], followed by a coarsening
                             step (dilation morphological operation) to remove details of the object contour. `

        :param mask: np.array of shape (h, w)
        :return: np.array of shape (h, w) after deformation noise.
        """
        aug = Compose([
            # add another mask transformation ?
            IAAPiecewiseAffine(p=0.4),  # affine + non-rigid deformation
            ElasticTransform(p=0.6,
                             alpha=50,
                             sigma=50,
                             alpha_affine=20,
                             border_mode=cv2.BORDER_CONSTANT,
                             always_apply=True),
        ])
        augmented = aug(image=mask)
        mask = augmented['image']
        kernel = np.ones((5, 5), np.uint8)
        mask = cv2.dilate(mask, kernel, iterations=random.randrange(1, 10))
        return mask
 def box_segmentation_aug():
     return Compose([
         OneOf([
             RandomBrightnessContrast(brightness_limit=0.2, p=0.5),
             RandomGamma(gamma_limit=50, p=0.5),
             ChannelShuffle(p=0.5)
         ]),
         OneOf([
             ImageCompression(quality_lower=0, quality_upper=20, p=0.5),
             MultiplicativeNoise(multiplier=(0.3, 0.8),
                                 elementwise=True,
                                 per_channel=True,
                                 p=0.5),
             Blur(blur_limit=(15, 15), p=0.5)
         ]),
         OneOf([
             CenterCrop(height=1000, width=1000, p=0.1),
             RandomGridShuffle(grid=(3, 3), p=0.2),
             CoarseDropout(max_holes=20,
                           max_height=100,
                           max_width=100,
                           fill_value=53,
                           p=0.2)
         ]),
         OneOf([
             GridDistortion(p=0.5, num_steps=2, distort_limit=0.2),
             ElasticTransform(alpha=157, sigma=80, alpha_affine=196, p=0.5),
             OpticalDistortion(distort_limit=0.5, shift_limit=0.5, p=0.5)
         ]),
         OneOf([
             VerticalFlip(p=0.5),
             HorizontalFlip(p=0.5),
             Rotate(limit=44, p=0.5)
         ])
     ])
Example #11
0
    def __init__(self, root_dir= r"C:\Users\Indy-Windows\Desktop\carvana\carvana\data\train", transform=None, image_size=(512, 512)):

        #Initialize Directory Tree from current working directory if no directory is provided
        self.root_dir = root_dir
        self.img_dir = os.path.join(self.root_dir, 'images')
        self.mask_dir = os.path.join(self.root_dir, 'masks')

        self.img_transform = transform
        self.num_img = len(os.listdir(self.img_dir))
        self.num_mask = len(os.listdir(self.mask_dir))

        self.img_list = os.listdir(self.img_dir)
        self.mask_list = os.listdir(self.mask_dir)

        self.image_height = image_size[1]
        self.image_width = image_size[0]

        self.transform = transform

        self.album_transform = Compose([
            HorizontalFlip(),
            ShiftScaleRotate(shift_limit=0.0625, scale_limit=0.2, rotate_limit=45, p=0.2),
            OneOf([
                ElasticTransform(p=.2),
                IAAPerspective(p=.35),
            ], p=.35)
        ])

        if self.transform == None:
            self.transform = self.album_transform
Example #12
0
    def initialize_elements(self):
        self.using_roi = hasattr(self.params, "roi_crop")
        self.resizer = self.plain_resize

        if hasattr(self.params, "random_crop_scale"):
            self.resizer = RandomResizedCrop(
                height=self.params.default_height,
                width=self.params.default_width,
                scale=self.params.random_crop_scale,
                ratio=self.params.random_crop_ratio)

        if self.using_roi:
            self.roi_resize = Resize(height=self.params.roi_height,
                                     width=self.params.roi_width)

        starting_aug = [Rotate(limit=15), HorizontalFlip(p=0.5)]

        heavy_aug = [
            # RandomGamma(p=0.1),
            ElasticTransform(p=0.1,
                             alpha=120,
                             sigma=120 * 0.05,
                             alpha_affine=120 * 0.03),
            GaussNoise(p=0.05),
            GaussianBlur(p=0.05)
        ]

        if self.params.data_augmentation == constants.heavy_augmentation:
            starting_aug.extend(heavy_aug)
        self.aug = Compose(starting_aug)
def test_elastic_transform_interpolation(monkeypatch, interpolation):
    image = np.random.randint(low=0,
                              high=256,
                              size=(100, 100, 3),
                              dtype=np.uint8)
    mask = np.random.randint(low=0, high=2, size=(100, 100), dtype=np.uint8)
    monkeypatch.setattr(
        'albumentations.augmentations.transforms.ElasticTransform.get_params',
        lambda *_: {'random_state': 1111})
    aug = ElasticTransform(alpha=1,
                           sigma=50,
                           alpha_affine=50,
                           interpolation=interpolation,
                           p=1)
    data = aug(image=image, mask=mask)
    expected_image = F.elastic_transform_fast(
        image,
        alpha=1,
        sigma=50,
        alpha_affine=50,
        interpolation=interpolation,
        border_mode=cv2.BORDER_REFLECT_101,
        random_state=np.random.RandomState(1111))
    expected_mask = F.elastic_transform_fast(
        mask,
        alpha=1,
        sigma=50,
        alpha_affine=50,
        interpolation=cv2.INTER_NEAREST,
        border_mode=cv2.BORDER_REFLECT_101,
        random_state=np.random.RandomState(1111))
    assert np.array_equal(data['image'], expected_image)
    assert np.array_equal(data['mask'], expected_mask)
Example #14
0
def medium_aug(p=1.0):
    return Compose(
        [
            HorizontalFlip(p=0.5),
            ShiftScaleRotate(p=0.75,
                             shift_limit=0.1,
                             scale_limit=0.2,
                             rotate_limit=45,
                             border_mode=cv2.BORDER_CONSTANT),
            RandomBrightnessContrast(
                brightness_limit=0.6, contrast_limit=0.6, p=0.5),
            OneOf([
                HueSaturationValue(p=1.0),
                RGBShift(p=1.0),
                ChannelShuffle(p=1.0)
            ],
                  p=0.5),
            OneOf([
                Blur(p=1.0),
                MedianBlur(p=1.0),
                MotionBlur(p=1.0),
            ], p=0.3),
            OneOf([GridDistortion(p=1.0),
                   ElasticTransform(p=1.0)], p=0.3),
            OneOf([
                CLAHE(p=1.0),
                IAASharpen(p=1.0),
            ], p=0.3),
            IAAAdditiveGaussianNoise(p=0.5)
            # ToGray(p=1.0),
        ],
        p=p)
Example #15
0
 def __init__(self,
              root_dir,
              annotation_lines,
              class_number,
              transform=None,
              loader=default_loader):
     self.annotation_lines = annotation_lines
     self.class_number = class_number
     self.transform = transform
     self.loader = loader
     self.root_dir = root_dir
     curr_size = 512
     min_max_height = (curr_size - curr_size // 2, curr_size - 1)
     self.transform_strong = Compose([
         RandomSizedCrop(min_max_height=min_max_height,
                         height=curr_size,
                         width=curr_size,
                         p=1.0),
         OneOf([
             Transpose(p=0.5),
             HorizontalFlip(p=0.5),
             VerticalFlip(p=0.5),
             Rotate(p=0.5),
         ],
               p=1.0),
         ElasticTransform(alpha=curr_size,
                          sigma=curr_size * 0.05,
                          alpha_affine=10,
                          p=1.0)
     ])
Example #16
0
def train_pipeline(cache, mask_db, path):
    image, mask = read_image_and_mask_cached(cache, mask_db, (101, 101), path)
    args = Compose([
        LabelMaskBorder(),
        HorizontalFlip(p=0.5),
        OneOf([
            ShiftScaleRotate(rotate_limit=15,
                             border_mode=cv2.BORDER_REPLICATE),
            RandomSizedCrop(min_max_height=(70, 100), height=101, width=101)
        ],
              p=0.2),
        GaussNoise(p=0.2),
        OneOf([
            RandomBrightness(limit=0.4),
            RandomGamma(),
        ], p=0.5),
        OneOf([Blur(), MedianBlur(), MotionBlur()], p=0.2),
        OneOf([
            ElasticTransform(alpha=10, sigma=10, alpha_affine=10),
            GridDistortion()
        ],
              p=0.2),
        Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]),
        PadIfNeeded(128, 128, cv2.BORDER_REPLICATE),
        ChannelsFirst()
    ])(image=image, mask=mask)
    return args['image'], args.get('mask')
Example #17
0
def strong_aug(p=1):
    return Compose(
        [
            HorizontalFlip(p=0.5),
            OneOf([
                RandomCrop(94, 94, p=0.6),
                ShiftScaleRotate(shift_limit=(0.1, 0.1),
                                 scale_limit=(0.05, 0.05),
                                 rotate_limit=10,
                                 p=0.4),
            ],
                  p=0.6),
            OneOf([
                ElasticTransform(p=0.2,
                                 alpha=120,
                                 sigma=120 * 0.05,
                                 alpha_affine=120 * 0.03),
                IAAPiecewiseAffine(p=.4),
                GridDistortion(p=0.4),
            ],
                  p=.4),
            OneOf(
                [
                    # CLAHE(clip_limit=2),
                    RandomGamma((90, 110)),
                    IAAEmboss((0.1, 0.4), (0.1, 0.6)),
                    RandomContrast(0.1),
                    RandomBrightness(0.1),
                ],
                p=0.5),
        ],
        p=p)
Example #18
0
 def train_transform(p=1):
     return Compose([
         HorizontalFlip(p=0.5),
         OneOf([
                 RandomSizedCrop((92, 98), 101, 101,  p=0.6),
                 ShiftScaleRotate(shift_limit=(0, 0.1), scale_limit=(0, 0.05), rotate_limit=10, p=0.4),
         ], p=0.6),
         #OneOf([
         #    IAAAdditiveGaussianNoise(), #may by
         #    GaussNoise(),#may by
         #], p=0.2),
         #OneOf([
         #    MotionBlur(p=0.2),
         #    MedianBlur(blur_limit=3, p=0.3),
         #    Blur(blur_limit=3, p=0.5),
         #], p=0.4),
         OneOf([
             ElasticTransform(p=0.2, alpha=120, sigma=120 * 0.05, alpha_affine=120 * 0.03),
             IAAPiecewiseAffine(p=.4),
             GridDistortion(p=0.4),
         ], p=.4),
         OneOf([
             #CLAHE(clip_limit=2),
             RandomGamma((90,110)),
             ShiftBrightness((5, 20)),
             IAAEmboss((0.1, 0.4), (0.1, 0.6)),
             RandomContrast(0.08),
             RandomBrightness(0.08),
         ], p=0.5),
     ], p=p)
Example #19
0
    def __init__(self, root_dir, partition, augment=True):
        self.root_dir = root_dir
        self.list_IDs = os.listdir(
            os.path.join(self.root_dir, 'x_{}'.format(partition)))
        self.partition = partition
        self.augment = augment
        self.augmentator = Compose([
            # Non destructive transformations
            VerticalFlip(p=0.6),
            HorizontalFlip(p=0.6),
            RandomRotate90(),
            Transpose(p=0.6),
            ShiftScaleRotate(p=0.45, scale_limit=(0.1, 0.3)),

            #     # Non-rigid transformations
            ElasticTransform(p=0.25,
                             alpha=160,
                             sigma=180 * 0.05,
                             alpha_affine=120 * 0.03),
            Blur(blur_limit=3, p=0.2),

            #     Color augmentation
            RandomBrightness(p=0.5),
            RandomContrast(p=0.5),
            RandomGamma(p=0.5),
            CLAHE(p=0.5)
        ])
def strong_aug(p=0.6, im_height=700, im_width=1200):
    dropout_w = int(im_width / 82)
    dropout_h = int(im_height / 9.)
    return Compose([
        Rotate(limit=2, p=0.5),
        RandomCrop(
            height=int(im_height * 0.95), width=int(im_width * 0.9), p=0.3),
        ElasticTransform(p=0.8),
        HorizontalFlip(p=0.5),
        CoarseDropout(max_holes=8,
                      max_height=dropout_w,
                      max_width=dropout_h,
                      min_holes=1,
                      min_height=5,
                      min_width=5,
                      fill_value=0,
                      always_apply=False,
                      p=0.85),
        OneOf([
            MotionBlur(p=0.8),
            Blur(blur_limit=20, p=0.8),
        ], p=0.35),
        Resize(height=256, width=256, p=1)
    ],
                   p=p)
Example #21
0
 def __init__(self):
     self.transformer = Compose([
         HorizontalFlip(p=0.5),
         OneOf([
             GridDistortion(
                 num_steps=5, distort_limit=(-0.46, 0.40), value=(0, 0, 0)),
             ElasticTransform(alpha=1.68,
                              sigma=48.32,
                              alpha_affine=44.97,
                              value=(0, 0, 0)),
             RandomResizedCrop(height=512,
                               width=512,
                               scale=(0.08, 1.0),
                               ratio=(0.75, 1.33))
         ],
               p=.3),
         ShiftScaleRotate(shift_limit=(-0.06, 0.06),
                          scale_limit=(-0.1, 0.1),
                          rotate_limit=(-90, 90),
                          p=0.3),
         Normalize(mean=[0.46009655, 0.43957878, 0.41827092],
                   std=[0.2108204, 0.20766491, 0.21656131],
                   max_pixel_value=255.0,
                   p=1.0),
         ToTensorV2(),
     ])
Example #22
0
 def augment(self, image, mask):
     aug = Compose([
         OneOf([
             RandomSizedCrop(min_max_height=(50, 101),
                             height=self.out_size,
                             width=self.out_size,
                             p=0.5),
             PadIfNeeded(
                 min_height=self.out_size, min_width=self.out_size, p=0.5)
         ],
               p=1),
         VerticalFlip(p=0.5),
         RandomRotate90(p=0.5),
         OneOf([
             ElasticTransform(p=0.5,
                              alpha=120,
                              sigma=120 * 0.05,
                              alpha_affine=120 * 0.03),
             GridDistortion(p=0.5),
             OpticalDistortion(p=1, distort_limit=2, shift_limit=0.5)
         ],
               p=0.8),
         CLAHE(p=0.8),
         RandomBrightnessContrast(p=0.8),
         RandomGamma(p=0.8)
     ])
     augmented = aug(image=image, mask=mask)
     image_heavy = augmented['image']
     mask_heavy = augmented['mask']
     return image_heavy, mask_heavy
Example #23
0
    def train_aug(self, image, label):
        aug = Compose(
            [
                OneOf(
                    [CLAHE(), IAASharpen(), IAAEmboss()], p=0.5),
                # OneOf([IAAAdditiveGaussianNoise(), GaussNoise()], p=0.2),
                # OneOf([MotionBlur(p=0.2), MedianBlur(blur_limit=3, p=0.1), Blur(blur_limit=3, p=0.1)], p=0.2),
                RandomContrast(),
                RandomBrightness(),
                # ChannelShuffle(),
                RandomRotate90(),
                Flip(),
                # RandomScale(scale_limit=(0.0, 0.1)),
                OneOf([
                    ElasticTransform(),
                    OpticalDistortion(),
                    GridDistortion(),
                    IAAPiecewiseAffine()
                ],
                      p=0.5),
                # HueSaturationValue(p=0.3),
            ],
            p=0.9)
        augmented = aug(image=image, mask=label)
        augmented = ToGray(p=1)(image=augmented['image'],
                                mask=augmented['mask'])
        augmented = RandomCrop(256, 256)(image=augmented['image'],
                                         mask=augmented['mask'])
        image, label = augmented['image'], augmented['mask']

        return image, label
Example #24
0
def augmentation(img, n):
    """
    Make random augmentations with image n times.
    :rtype: ndarray
    :param img: image in matrix form
    :param n: how many augmentations need to apply
    :return: list of augmented versions of image
    """
    methods = [ElasticTransform(**elastic_params),
               RandomGamma(**gamma_params),
               GridDistortion(**all_other),
               RGBShift(**r_shift_params),
               Rotate(**rotate_params),
               RandomBrightness(**brightness_params)
               ]
    for i in range(len(methods)):
        methods[i] = Compose([methods[i], ], p=1)
    chosen = np.random.choice(methods, replace=False, size=n)
    augmented = np.empty((n,), dtype=np.object)
    for i, method in enumerate(chosen):
        transformed = transform_image(method(image=img)["image"])
        if to_normalize:
            transformed = normalize(transformed)
        augmented[i] = transformed
    return augmented
def get_transforms(phase):
    list_transforms = []
    if phase == "train":
        list_transforms.extend(
            [
            OneOf([
                RandomSizedCrop(min_max_height=(50, 101), height=original_height, width=original_width, p=0.5),
                PadIfNeeded(min_height=original_height, min_width=original_width, p=0.5)], p=1),    
                VerticalFlip(p=0.5),              
                # RandomRotate90(p=0.5),
                OneOf([
                    ElasticTransform(p=0.5, alpha=120, sigma=120 * 0.05, alpha_affine=120 * 0.03),
                    GridDistortion(p=0.5),
                    OpticalDistortion(p=1, distort_limit=2, shift_limit=0.5)                  
                ], p=0.8),
                CLAHE(p=0.8),
                RandomBrightnessContrast(p=0.8),    
                RandomGamma(p=0.8),
            ]
        )
    list_transforms.extend(
        [
            Resize(height=int(original_height/4), width=int(original_width/4),  interpolation=cv2.INTER_NEAREST),
            Normalize(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225), p=1),
            ToTensor(),
        ]
    )
    list_trfms = Compose(list_transforms)
    return list_trfms
Example #26
0
def hard_aug(original_height=128, original_width=128, k=4):
    aug = Compose([
        OneOf([
            RandomSizedCrop(
                min_max_height=(original_height // k, original_height),
                height=original_height,
                width=original_width,
                p=0.5),
            PadIfNeeded(
                min_height=original_height, min_width=original_width, p=0.5)
        ],
              p=1),
        VerticalFlip(p=0.5),
        HorizontalFlip(p=0.5),
        RandomRotate90(p=0.5),
        Transpose(p=0.5),
        OneOf([
            ElasticTransform(
                p=0.5, alpha=120, sigma=120 * 0.05, alpha_affine=120 * 0.03),
            GridDistortion(p=0.5),
            OpticalDistortion(p=1, distort_limit=2, shift_limit=0.5)
        ],
              p=0.8),
        CLAHE(p=0.8),
        RandomBrightnessContrast(p=0.8),
        RandomGamma(p=0.8)
    ])
    return aug
Example #27
0
def random_elastic_transform(image, label):
    size = random.randrange(100, 500)
    aug = ElasticTransform(p=1, alpha=size, sigma=size *
                           0.05, alpha_affine=size * 0.03)
    augmented = aug(image=image, mask=label)
    image = augmented['image']
    label = augmented['mask']
    return image, label
Example #28
0
def strong_aug(p=0.5, crop_size=(512, 512)):
    return Compose([
        RandomResizedCrop(crop_size[0],
                          crop_size[1],
                          scale=(0.3, 1.0),
                          ratio=(0.75, 1.3),
                          interpolation=4,
                          p=1.0),
        RandomRotate90(),
        Flip(),
        Transpose(),
        OneOf([
            IAAAdditiveGaussianNoise(),
            GaussNoise(),
        ], p=0.8),
        OneOf([
            MotionBlur(p=0.5),
            MedianBlur(blur_limit=3, p=0.5),
            Blur(blur_limit=3, p=0.5),
        ],
              p=0.3),
        ShiftScaleRotate(
            shift_limit=0.2, scale_limit=0.5, rotate_limit=180, p=0.8),
        OneOf([
            OpticalDistortion(p=0.5),
            GridDistortion(p=0.5),
            IAAPiecewiseAffine(p=0.5),
            ElasticTransform(p=0.5),
        ],
              p=0.3),
        OneOf([
            CLAHE(clip_limit=2),
            IAASharpen(),
            IAAEmboss(),
            RandomBrightnessContrast(),
        ],
              p=0.3),
        OneOf([
            GaussNoise(),
            RandomRain(
                p=0.2, brightness_coefficient=0.9, drop_width=1, blur_value=5),
            RandomSnow(p=0.4,
                       brightness_coeff=0.5,
                       snow_point_lower=0.1,
                       snow_point_upper=0.3),
            RandomShadow(p=0.2,
                         num_shadows_lower=1,
                         num_shadows_upper=1,
                         shadow_dimension=5,
                         shadow_roi=(0, 0.5, 1, 1)),
            RandomFog(
                p=0.5, fog_coef_lower=0.3, fog_coef_upper=0.5, alpha_coef=0.1)
        ],
              p=0.3),
        RGBShift(),
        HueSaturationValue(p=0.9),
    ],
                   p=p)
Example #29
0
def augment_data(save_dir):
    """
    A special that implemnets the data augmentation pipeline.
    :param save_dir: Where to save the augmented data?
    :return:
    """

    seed = 1337
    random.seed(seed)
    start_time = time.time()
    print(f"====== Augmenting data. Seed set at {seed} ======")

    data_file = h5py.File(os.path.join(save_dir, 'data_file.h5'), 'r')
    data_shape = data_file['data/data'].shape

    data_aug = np.zeros(shape=data_shape, dtype=np.float32)

    n_samples = data_shape[0]
    img_channels, img_height, img_width, img_depth = data_shape[1:5]

    try:
        aug = alb.load(os.path.join(save_dir, 'aug_pipeline_1.json'))
    except FileNotFoundError:
        print("Pipeline not found. Generating One ...")
        aug = Compose([
            OneOf([VerticalFlip(p=1), HorizontalFlip(p=1)], p=1),
            OneOf([
                ElasticTransform(p=1, sigma=6, alpha_affine=4, alpha=75),
                GridDistortion(p=1),
                OpticalDistortion(p=1, distort_limit=2, shift_limit=0.5)
            ],
                  p=0.8)
        ])

        alb.save(aug, os.path.join(save_dir, 'aug_pipeline_1.json'))

    for data_idx in np.arange(n_samples):
        img = data_file['data/data'][data_idx, ...]
        img = img.reshape(img_channels, img_height, img_width, -1)
        img_aug = aug(image=img[0,
                                ...])['image'].reshape(img_channels,
                                                       img_height, img_width,
                                                       img_depth, -1)

        data_aug[data_idx, ...] = img_aug

        del img_aug
        del img

    data_file.close()

    with h5py.File(os.path.join(save_dir, 'data_aug.h5'), 'w') as file:
        file.create_dataset('data/data', data=data_aug, dtype=np.float32)

    print(
        f"====== Finished augmentation. Time taken: {time.time() - start_time}s ======"
    )
Example #30
0
def warp(image, mask):
    alpha = randrange(30, 45)
    sigma = randrange(5, 7)
    aug = ElasticTransform(alpha=alpha,
                           sigma=sigma,
                           p=1.,
                           border_mode=cv2.BORDER_CONSTANT)
    augmented = aug(image=image, mask=mask)
    return augmented['image'], augmented['mask']