예제 #1
0
def get_strong_train_transform():
    return A.Compose(
        [
            #A.Resize(height=IMG_SIZE, width=IMG_SIZE, p=1),
            A.RandomSizedBBoxSafeCrop(
                IMG_SIZE, IMG_SIZE, interpolation=1, p=0.33),
            A.HorizontalFlip(),
            A.ShiftScaleRotate(shift_limit=0.05,
                               scale_limit=0.1,
                               rotate_limit=10,
                               interpolation=1,
                               p=0.5),
            A.OneOf([
                A.Blur(blur_limit=(1, 3), p=0.33),
                A.MedianBlur(blur_limit=3, p=0.33),
                A.ImageCompression(quality_lower=50, p=0.33),
            ],
                    p=0.5),
            A.OneOf([
                A.RandomGamma(gamma_limit=(85, 115), p=0.33),
                A.RandomBrightnessContrast(brightness_limit=0.2, p=0.33),
                A.HueSaturationValue(hue_shift_limit=25,
                                     sat_shift_limit=25,
                                     val_shift_limit=30,
                                     p=0.5)
            ],
                    p=0.34),
            A.CLAHE(clip_limit=2.5, p=0.5),
            A.Normalize(always_apply=True, p=1.0),
            ToTensorV2(p=1.0)
        ],
        bbox_params=A.BboxParams(format='pascal_voc',
                                 min_area=5,
                                 min_visibility=0.1,
                                 label_fields=['labels']))
예제 #2
0
def cifar_alb_trainData():
    '''Apply Albumentations data transforms to the dataset and returns iterable'''

    train_transform = [
        A.HorizontalFlip(p=0.15),
        A.ShiftScaleRotate(shift_limit=0.05,
                           scale_limit=0.05,
                           rotate_limit=15,
                           p=0.25),
        A.RGBShift(r_shift_limit=15, g_shift_limit=15, b_shift_limit=15,
                   p=0.5),
        A.RandomBrightnessContrast(p=0.25),
        A.RandomGamma(p=0.25),
        A.CLAHE(p=0.25),
        A.ChannelShuffle(p=0.1),
        A.ElasticTransform(p=0.1),
        A.MotionBlur(blur_limit=17, p=0.1),
        A.Cutout(num_holes=1,
                 max_h_size=16,
                 max_w_size=16,
                 fill_value=mean,
                 always_apply=False,
                 p=0.5),
        A.Normalize(mean=mean, std=std),
        ToTensor()
    ]

    transforms_result = A.Compose(train_transform)
    return lambda img: transforms_result(image=np.array(img))["image"]
예제 #3
0
def aug_medium(prob=1):
    return aug.Compose([
        aug.Flip(),
        aug.OneOf([
            aug.CLAHE(clip_limit=2, p=.5),
            aug.IAASharpen(p=.25),
        ],
                  p=0.35),
        aug.OneOf([
            aug.RandomContrast(),
            aug.RandomGamma(),
            aug.RandomBrightness(),
        ],
                  p=0.3),
        aug.OneOf([
            aug.ElasticTransform(
                alpha=120, sigma=120 * 0.05, alpha_affine=120 * 0.03),
            aug.GridDistortion(),
            aug.OpticalDistortion(distort_limit=2, shift_limit=0.5),
        ],
                  p=0.3),
        aug.ShiftScaleRotate(rotate_limit=12),
        aug.OneOf([
            aug.GaussNoise(p=.35),
            SaltPepperNoise(level_limit=0.0002, p=.7),
            aug.ISONoise(p=.7),
        ],
                  p=.5),
        aug.Cutout(num_holes=3, p=.25),
    ],
                       p=prob)
예제 #4
0
    def __init__(self, used_img_size, final_img_size, transform_params):
        self._ratio = max(
            float(final_img_size[0]) / used_img_size[0],
            float(final_img_size[0]) / used_img_size[0])
        self._final_img_size = final_img_size
        self._scale_compose = [
            albumentations.Resize(height=int(used_img_size[0] * self._ratio),
                                  width=int(used_img_size[1] * self._ratio),
                                  always_apply=True),
            albumentations.CenterCrop(height=self._final_img_size[0],
                                      width=self._final_img_size[1],
                                      always_apply=True,
                                      p=1),
        ]
        self._normalize_transform = albumentations.Normalize()
        self._normalize_no_transform = albumentations.Normalize(mean=(0, 0, 0),
                                                                std=(1, 1, 1))
        self._train_compose = self._scale_compose
        if transform_params["filters"]:
            self._train_compose = [
                albumentations.RandomBrightnessContrast(
                    brightness_limit=(-0.1, 0.1),
                    contrast_limit=(-0.1, 0.1),
                    p=0.2),
                albumentations.RandomGamma(gamma_limit=(90, 110), p=0.2),
                albumentations.ChannelShuffle(p=0.2),
            ] + self._scale_compose

        if transform_params["normalize"]:
            self._train_compose.append(albumentations.Normalize())
        else:
            self._train_compose.append(
                albumentations.Normalize(mean=(0, 0, 0), std=(1, 1, 1)))
예제 #5
0
def det_train_augs(height: int, width: int) -> albu.Compose:
    return albu.Compose([
        albu.Resize(height=height, width=width),
        albu.ShiftScaleRotate(shift_limit=0.025,
                              scale_limit=0.1,
                              rotate_limit=10),
        albu.Flip(),
        albu.RandomRotate90(),
        albu.OneOf(
            [
                albu.HueSaturationValue(p=1.0),
                albu.IAAAdditiveGaussianNoise(p=1.0),
                albu.IAASharpen(p=1.0),
                albu.RandomBrightnessContrast(
                    brightness_limit=0.1, contrast_limit=0.1, p=1.0),
                albu.RandomGamma(p=1.0),
            ],
            p=1.0,
        ),
        albu.OneOf(
            [
                albu.Blur(blur_limit=3, p=1.0),
                albu.MotionBlur(blur_limit=3, p=1.0)
            ],
            p=1.0,
        ),
        albu.Normalize(),
    ])
예제 #6
0
def get_train_transforms():
    return A.Compose([
        A.HueSaturationValue(hue_shift_limit=0.4,
                             sat_shift_limit=0.4,
                             val_shift_limit=0.4,
                             p=0.5),
        A.OneOf([
            A.RandomGamma(p=0.5),
            A.RandomBrightnessContrast(
                brightness_limit=0.4, contrast_limit=0.85, p=0.5)
        ],
                p=0.5),
        A.Blur(p=0.5),
        A.ToGray(p=0.1),
        A.RandomRotate90(p=0.5),
        A.Resize(height=1024, width=1024, p=1),
        A.Cutout(
            num_holes=10, max_h_size=64, max_w_size=64, fill_value=0, p=0.5),
        ToTensorV2(p=1.0),
    ],
                     p=1.0,
                     bbox_params=A.BboxParams(format='pascal_voc',
                                              min_area=0,
                                              min_visibility=0,
                                              label_fields=['labels']))
def get_next_augmentation():
    train_transform = [
        albu.ChannelShuffle(p=0.1),
        albu.IAAAdditiveGaussianNoise(p=0.2),
        albu.OneOf(
            [
                albu.CLAHE(p=1),
                albu.RandomBrightness(p=1),
                albu.RandomGamma(p=1),
            ],
            p=0.9,
        ),
        albu.OneOf(
            [
                albu.IAASharpen(p=1),
                albu.Blur(blur_limit=3, p=1),
                albu.MotionBlur(blur_limit=3, p=1),
            ],
            p=0.9,
        ),
        albu.OneOf(
            [
                albu.RandomContrast(p=1),
                albu.HueSaturationValue(p=1),
            ],
            p=0.9,
        ),
    ]
    return albu.Compose(train_transform)
예제 #8
0
def get_training_augmentation(max_dim, crop=True, flip=False, light=False):
    train_transform = [
        A.PadIfNeeded(min_height=max_dim, min_width=max_dim, border_mode=0)
    ]

    if crop:
        train_transform.append(
            A.OneOf([
                A.RandomSizedCrop(min_max_height=(min(max_dim,
                                                      256), min(max_dim, 256)),
                                  height=SHAPE,
                                  width=SHAPE,
                                  p=1),
                A.RandomSizedCrop(min_max_height=(max_dim, max_dim),
                                  height=SHAPE,
                                  width=SHAPE,
                                  p=1)
            ],
                    p=1))
    else:
        train_transform.append(
            A.Resize(height=SHAPE,
                     width=SHAPE,
                     interpolation=1,
                     always_apply=True,
                     p=1))

    if flip:
        train_transform.append(A.VerticalFlip(p=.5))
        train_transform.append(A.RandomRotate90(p=.5))
    if light:
        train_transform.append(A.CLAHE(p=0.8))
        train_transform.append(A.RandomBrightnessContrast(p=0.8))
        train_transform.append(A.RandomGamma(p=0.8))
    return A.Compose(train_transform)
예제 #9
0
def gentle_transform(p):
    return albu.Compose(
        [
            # p=0.5
            albu.HorizontalFlip(p=0.5),
            albu.VerticalFlip(p=0.5),
            albu.OneOf(
                [
                    albu.IAASharpen(p=1),
                    albu.Blur(blur_limit=3, p=1),
                    albu.MotionBlur(blur_limit=3, p=1),
                ],
                p=0.5,
            ),
            albu.OneOf(
                [
                    albu.RandomBrightness(p=1),
                    albu.RandomGamma(p=1),
                ],
                p=0.5,
            ),
            # p=0.2
            albu.ShiftScaleRotate(rotate_limit=30,
                                  scale_limit=0.15,
                                  border_mode=cv2.BORDER_CONSTANT,
                                  value=[0, 0, 0],
                                  p=0.2),
            albu.IAAAdditiveGaussianNoise(p=0.2),
            albu.IAAPerspective(p=0.2),
        ],
        p=p,
        additional_targets={
            'image{}'.format(_): 'image'
            for _ in range(1, 65)
        })
예제 #10
0
def get_transform(is_train):
    if is_train:
        return albumentations.Compose(
        [   
            albumentations.Resize(224,224),
            albumentations.OneOf([
                albumentations.JpegCompression(quality_lower=20, quality_upper=70, p=0.5),
                albumentations.Downscale(scale_min=0.25, scale_max=0.50, interpolation=1, p=0.5),
            ], p=0.6),
            albumentations.HorizontalFlip(p=0.5),
            albumentations.VerticalFlip(p=0.5),
#             albumentations.ShiftScaleRotate(shift_limit=0.0625, scale_limit=0.1, rotate_limit=45),
            albumentations.GaussNoise(p=0.2),
            albumentations.RandomBrightnessContrast(0.3,0.3, p=0.7),
            albumentations.RandomGamma(p=0.2),    
            albumentations.CLAHE(p=0.2),
            albumentations.ChannelShuffle(p=0.2),
            albumentations.MultiplicativeNoise(multiplier=[0.5, 1.5], elementwise=True, p=0.3),
            albumentations.HueSaturationValue(hue_shift_limit=10, sat_shift_limit=10, val_shift_limit=10, p=0.7),     
            albumentations.Normalize(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225), max_pixel_value=255.0)
        ])
    else:
        return albumentations.Compose(
        [
            albumentations.Resize(224,224),
            albumentations.Normalize(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225), max_pixel_value=255.0)
        ])
예제 #11
0
def hard_transforms():
    result = [
        # random flip
        albu.RandomRotate90(),
        # Random shifts, stretches and turns with a 50% probability
        albu.ShiftScaleRotate(shift_limit=0.1,
                              scale_limit=0.1,
                              rotate_limit=15,
                              border_mode=0,
                              p=0.5),
        # add random brightness and contrast, 30% prob
        albu.RandomBrightnessContrast(brightness_limit=0.2,
                                      contrast_limit=0.2,
                                      p=0.3),
        # Random gamma changes with a 30% probability
        albu.RandomGamma(gamma_limit=(85, 115), p=0.3),
        # Randomly changes the hue, saturation, and color value of the input image
        albu.HueSaturationValue(p=0.3),
        albu.JpegCompression(quality_lower=80),
        albu.OneOf([
            albu.MotionBlur(p=0.2),
            albu.MedianBlur(blur_limit=3, p=0.1),
            albu.Blur(blur_limit=3, p=0.1),
        ],
                   p=0.2),
        albu.OneOf([
            albu.OpticalDistortion(p=0.3),
            albu.GridDistortion(p=0.1),
            albu.IAAPiecewiseAffine(p=0.3),
        ],
                   p=0.2),
    ]

    return result
예제 #12
0
 def __init__(self):
     import albumentations
     self.transform = albumentations.Compose([
         albumentations.RandomBrightnessContrast(),
         albumentations.RandomGamma(gamma_limit=(80, 120)),
         albumentations.CLAHE(),
     ])
def augment(image):
    transform = A.Compose([
        A.HorizontalFlip(p=0.5),
        A.OneOf([
            A.IAAAdditiveGaussianNoise(),
            A.GaussNoise(),
        ], p=0.2),
        A.OneOf([
            A.MotionBlur(p=.2),
            A.MedianBlur(blur_limit=3, p=0.1),
            A.Blur(blur_limit=3, p=0.1),
        ],
                p=0.2),
        A.ShiftScaleRotate(shift_limit=0.0625,
                           scale_limit=0.2,
                           rotate_limit=15,
                           p=0.2),
        A.OneOf([
            A.CLAHE(clip_limit=2),
            A.IAASharpen(),
            A.IAAEmboss(),
            A.RandomBrightnessContrast(),
            A.RandomGamma(),
        ],
                p=0.5),
        A.HueSaturationValue(p=0.3),
    ])
    augmented_image = transform(image=image)['image']
    return augmented_image
    def setup(self, stage: Optional[str] = None):
        df = load_df()
        df_train, df_val = split_df(df, self.params)

        self.train_dataset = MaskDataset(
            df_train,
            transform=A.Compose([
                A.RandomResizedCrop(
                    self.params.img_size,
                    self.params.img_size,
                ),
                A.Rotate(13),
                A.HorizontalFlip(),
                A.RandomBrightnessContrast(),
                A.HueSaturationValue(),
                A.RGBShift(),
                A.RandomGamma(),
                MyCoarseDropout(
                    min_holes=1,
                    max_holes=8,
                    max_height=32,
                    max_width=32,
                ),
            ]),
        )
        self.val_dataset = MaskDataset(
            df_val,
            transform=A.Compose([
                A.Resize(
                    self.params.img_size,
                    self.params.img_size,
                ),
            ]),
        )
예제 #15
0
def randomgamma(gamma=(80, 120), p=0.5):
    """
    :param gamma: default (80, 120)
    :param p: probability of applying the transform.
    :return: albumentations RandomGamma
    """
    return album.RandomGamma(gamma_limit=gamma, p=p)
예제 #16
0
def get_transforms(phase, crop_type=0, size=512):
    list_transforms = []
    if phase == "train":
        list_transforms.extend([
            aug.Flip(),
            aug.Cutout(num_holes=4, p=0.5),
            aug.OneOf([
                aug.RandomContrast(),
                aug.RandomGamma(),
                aug.RandomBrightness(),
            ],
                      p=1),
            aug.ShiftScaleRotate(rotate_limit=90),
            aug.OneOf([
                aug.GaussNoise(p=.35),
            ], p=.5),
        ])
        if crop_type == 0:
            list_transforms.extend([
                CropNonEmptyMaskIfExists(size, size),
            ])

        elif crop_type == 1:
            list_transforms.extend([
                RandomCrop(size, size, p=1.0),
            ])

    list_transforms.extend([
        Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225], p=1),
        ToTensor(),
    ])
    list_trfms = Compose(list_transforms)
    return list_trfms
예제 #17
0
    def __init__(self):
        self.transform = None
        try:
            import albumentations as A
            check_version(A.__version__, '1.0.3',
                          hard=True)  # version requirement

            self.transform = A.Compose([
                A.Blur(p=0.01),
                A.MedianBlur(p=0.01),
                A.ToGray(p=0.01),
                A.CLAHE(p=0.01),
                A.RandomBrightnessContrast(p=0.0),
                A.RandomGamma(p=0.0),
                A.ImageCompression(quality_lower=75, p=0.0)
            ],
                                       bbox_params=A.BboxParams(
                                           format='yolo',
                                           label_fields=['class_labels']))

            LOGGER.info(
                colorstr('albumentations: ') +
                ', '.join(f'{x}' for x in self.transform.transforms if x.p))
        except ImportError:  # package not installed, skip
            pass
        except Exception as e:
            LOGGER.info(colorstr('albumentations: ') + f'{e}')
def get_light_augmentations(image_size):
    return A.Compose([
        A.ShiftScaleRotate(shift_limit=0.05,
                           scale_limit=0.1,
                           rotate_limit=15,
                           border_mode=cv2.BORDER_CONSTANT,
                           value=0),
        A.RandomSizedCrop(min_max_height=(int(image_size[0] * 0.85),
                                          image_size[0]),
                          height=image_size[0],
                          width=image_size[1],
                          p=0.3),
        ZeroTopAndBottom(p=0.3),
        # Brightness/contrast augmentations
        A.OneOf([
            A.RandomBrightnessContrast(brightness_limit=0.25,
                                       contrast_limit=0.2),
            IndependentRandomBrightnessContrast(brightness_limit=0.1,
                                                contrast_limit=0.1),
            A.RandomGamma(gamma_limit=(75, 125)),
            A.NoOp()
        ]),
        A.OneOf([ChannelIndependentCLAHE(p=0.5),
                 A.CLAHE(),
                 A.NoOp()]),
        A.HorizontalFlip(p=0.5),
    ])
def get_medium_augmentations(image_size):
    return A.Compose([
        A.OneOf([
            A.ShiftScaleRotate(shift_limit=0.05,
                               scale_limit=0.1,
                               rotate_limit=15,
                               border_mode=cv2.BORDER_CONSTANT,
                               value=0),
            A.OpticalDistortion(distort_limit=0.11,
                                shift_limit=0.15,
                                border_mode=cv2.BORDER_CONSTANT,
                                value=0),
            A.NoOp()
        ]),
        A.RandomSizedCrop(min_max_height=(int(image_size[0] * 0.85),
                                          image_size[0]),
                          height=image_size[0],
                          width=image_size[1],
                          p=0.3),
        A.OneOf([
            A.RandomBrightnessContrast(brightness_limit=0.5,
                                       contrast_limit=0.4),
            IndependentRandomBrightnessContrast(brightness_limit=0.25,
                                                contrast_limit=0.24),
            A.RandomGamma(gamma_limit=(50, 150)),
            A.NoOp()
        ]),
        A.OneOf([
            A.RGBShift(r_shift_limit=20, b_shift_limit=15, g_shift_limit=15),
            A.HueSaturationValue(hue_shift_limit=5, sat_shift_limit=5),
            A.NoOp()
        ]),
        A.HorizontalFlip(p=0.5),
        A.VerticalFlip(p=0.5)
    ])
    def augment_img(self, img, mask, mat, idx_item=None):
        """ img already in float32 BGR format, not uint8
        """

        # horizontal flip
        p_flip = np.random.uniform()  # in [0,1)
        if p_flip > 1:  # 0.33:
            uv_cx = np.array([1686.2379, 0])
            IMG_SHAPE = (2710, 3384, 3)  # img.shape = h,w,c
            uv_cx_new = self.convert_uv_to_uv_preprocessed(uv_cx, IMG_SHAPE)
            cx_mat = uv_cx_new[0]
            cx_img = cx_mat * self.factor_downsample
            img_flipped = scripts.flip_image_hor.flip_hor_at_u(img, cx_img)
            mask_flipped = scripts.flip_image_hor.flip_hor_at_u(mask, cx_img)
            mat_flipped = scripts.flip_image_hor.flip_hor_at_u(mat, cx_mat)
            mat_flipped[:, :, 4] *= -1  # x
            mat_flipped[:, :, 2] *= -1  # sin(yaw)
            mat_flipped[:, :, 3] *= -1  # roll
        else:
            img_flipped = img
            mask_flipped = mask
            mat_flipped = mat

        # grayish - change HSV values
        p_sat = np.random.uniform()  # in [0,1)
        if p_sat > 1:
            img_desat = reduce_saturation(img_flipped, sat_shift_range=(-0.15, 0))
        else:
            img_desat = img_flipped

        # gamma change
        aug_gamma = albumentations.RandomGamma(gamma_limit=(85, 115),
                                               p=0.33,
                                               )

        # gaussian noise
        aug_noise = albumentations.MultiplicativeNoise(multiplier=(0.90, 1.00),
                                                       elementwise=True,
                                                       per_channel=True,
                                                       p=0.33,
                                                       )

        # apply all augmentations to image
        aug_tot = albumentations.Compose([aug_gamma, aug_noise], p=1)
        img_augmented = aug_tot(image=img_desat)['image']

        # for debugging purposes
        if False:
            fig, ax = plt.subplots(3, 2, figsize=(9, 6))
            ax[0][0].imshow(img[:, :, ::-1])
            ax[0][1].imshow(img_augmented[:, :, ::-1])
            ax[1][0].imshow(mat[:, :, 0])
            ax[1][1].imshow(mat_flipped[:, :, 0])
            ax[2][0].imshow(mat[:, :, 4])  # x
            ax[2][1].imshow(mat_flipped[:, :, 4])
            # fig.tight_layout()
            plt.show()
            fig.savefig('plots_aug/{:05d}.png'.format(idx_item))

        return img_augmented, mask_flipped, mat_flipped
예제 #21
0
def augmentation(train: bool) -> Callable:

    initial_size = 2048
    crop_min_max_height = (400, 533)
    crop_width = 512
    crop_height = 384
    if train:
        aug = [
            albu.LongestMaxSize(max_size=initial_size),
            albu.RandomSizedCrop(min_max_height=crop_min_max_height,
                                 width=crop_width,
                                 height=crop_height,
                                 w2h_ratio=crop_width / crop_height),
            albu.HueSaturationValue(hue_shift_limit=7,
                                    sat_shift_limit=10,
                                    val_shift_limit=10),
            albu.RandomBrightnessContrast(),
            albu.RandomGamma()
        ]
    else:
        test_size = int(initial_size * crop_height /
                        np.mean(crop_min_max_height))
        print('Test image max sizes is {} pixels'.format(test_size))
        # for tta probably
        aug = [albu.LongestMaxSize(max_size=test_size)]

    aug.extend([ToTensor()])
    return albu.Compose(aug,
                        bbox_params={
                            'format': 'coco',
                            'min_area': 0,
                            'min_visibility': 0.5,
                            'label_fields': ['labels']
                        })
예제 #22
0
def tr_da_fn(height, width):
    train_transform = [
        A.HorizontalFlip(p=0.5),
        A.ShiftScaleRotate(scale_limit=0.10, rotate_limit=7, shift_limit=0.10, border_mode=cv2.BORDER_CONSTANT, p=1.0),
        A.Perspective(scale=(0.025, 0.04), p=0.3),
        A.RandomResizedCrop(height=height, width=width, scale=(0.9, 1.0), p=0.3),

        A.OneOf(
            [
                A.CLAHE(p=1),
                A.RandomBrightness(p=1),
                A.RandomGamma(p=1),
                A.RandomContrast(limit=0.2, p=1.0),
            ],
            p=0.5,
        ),

        A.OneOf(
            [
                A.Sharpen(alpha=(0.2, 0.5), lightness=(0.5, 1.0), p=1.0),
                A.Blur(blur_limit=[2, 3], p=1.0),
                A.GaussNoise(var_limit=(5, 25), p=1.0),
                # A.MotionBlur(blur_limit=3, p=1.0),
            ],
            p=0.5,
        ),

        A.Lambda(image=_da_negative, p=0.2),

        A.LongestMaxSize(max_size=max(height, width), always_apply=True),
        A.PadIfNeeded(min_height=height, min_width=width, border_mode=cv2.BORDER_CONSTANT, always_apply=True),
    ]
    return A.Compose(train_transform)
예제 #23
0
def get_transforms(phase, size, mean, std):
    list_transforms = []
    if phase == "train":
        list_transforms.extend(
            [
                albu.HorizontalFlip(),
                albu.OneOf([
                    albu.RandomContrast(),
                    albu.RandomGamma(),
                    albu.RandomBrightness(),
                    ], p=0.3),
                albu.OneOf([
                    albu.ElasticTransform(alpha=120, sigma=120 * 0.05, alpha_affine=120 * 0.03),
                    albu.GridDistortion(),
                    albu.OpticalDistortion(distort_limit=2, shift_limit=0.5),
                    ], p=0.3), 
                albu.ShiftScaleRotate(),
            ]
        )
    list_transforms.extend(
        [
            albu.Normalize(mean=mean, std=std, p=1),
            albu.Resize(size, size),
            ToTensorV2(),
        ]
    )

    list_trfms = albu.Compose(list_transforms)
    return list_trfms
예제 #24
0
    def get_transform(self):
      """Get the Albumentations transform based on self.aug"""
      transform_list = [A.Resize(self.H, self.W)] # fix size of image to stabilize dimension

      if 'light' in self.aug:
        # Rigid transformations: preserves image (Light)
        light_transform_list = [
          A.RandomCrop(width = int(0.7*self.W), height = int(0.7*self.H), p = 0.5),
          A.HorizontalFlip(p = 0.5),
          A.VerticalFlip(p = 0.5),
          A.Resize(self.H, self.W)
        ]
        transform_list += light_transform_list

      if 'medium' in self.aug:
        # Non-rigid transofrmations: distort image a bit (Medium)                    
        medium_transform_list = [
          A.GridDistortion(p=0.5),
          A.RandomGamma(gamma_limit=(90, 110), p=0.5)
        ]
        transform_list += medium_transform_list
      
      # Normalize image after aug (mask is not normalized by default). 
      # Need to add max_pixel_value to mimic torchvisions.transforms.Normalize
      transform_list += [ A.Normalize(mean = self.IMG_MEAN, std = self.IMG_STD, max_pixel_value = 1) ]

      transform = A.Compose(transform_list)
      return transform
def apply_training_augmentation():
    train_transform = [
        A.HorizontalFlip(p=0.5),
        A.ShiftScaleRotate(scale_limit=0.5, rotate_limit=0, shift_limit=0.1, p=1, border_mode=0),
        A.PadIfNeeded(min_height=320, min_width=320, always_apply=True, border_mode=0),
        A.RandomCrop(height=320, width=320, always_apply=True),
        A.IAAAdditiveGaussianNoise(p=0.2),
        A.IAAPerspective(p=0.5),
        A.OneOf(
            [
                A.CLAHE(p=1),
                A.RandomBrightness(p=1),
                A.RandomGamma(p=1),
            ],
            p=0.9,
        ),
        A.OneOf(
            [
                A.IAASharpen(p=1),
                A.Blur(blur_limit=3, p=1),
                A.MotionBlur(blur_limit=3, p=1),
            ],
            p=0.9,
        ),
        A.OneOf(
            [
                A.RandomContrast(p=1),
                A.HueSaturationValue(p=1),
            ],
            p=0.9,
        ),
        A.Lambda(mask=round_clip_0_1)
    ]
    return A.Compose(train_transform)
예제 #26
0
def get_training_augmentation():
    train_transform = [
        A.RandomSizedCrop(min_max_height=(300, 360), height=320, width=320,
                          always_apply=True),
        A.HorizontalFlip(p=0.5),
        A.OneOf([
            A.CLAHE(),
            A.RandomBrightnessContrast(),
            A.RandomGamma(),
            A.HueSaturationValue(),
            A.NoOp()
        ]),
        A.OneOf([
            A.IAAAdditiveGaussianNoise(p=0.2),
            A.IAASharpen(),
            A.Blur(blur_limit=3),
            A.MotionBlur(blur_limit=3),
            A.NoOp()
        ]),
        A.OneOf([
            A.RandomFog(),
            A.RandomSunFlare(),
            A.RandomRain(),
            A.RandomSnow(),
            A.NoOp()
        ]),
        A.Normalize(),
    ]
    return A.Compose(train_transform)
예제 #27
0
def build_transforms(args, n_classes):
    train_transforms = A.Compose([
        A.HorizontalFlip(),
        A.RandomResizedCrop(width=args.image_size,
                            height=args.image_size,
                            scale=(0.7, 1.2)),
        A.CoarseDropout(max_height=int(args.image_size / 5),
                        max_width=int(args.image_size / 5)),
        A.OneOf([
            A.RandomBrightnessContrast(),
            A.RandomGamma(),
            A.Blur(blur_limit=5),
        ]),
        ToTensorV2(),
    ])
    train_transforms = AlbumentationsSegmentationWrapperTransform(
        train_transforms, class_num=n_classes, ignore_indices=[
            255,
        ])
    test_transforms = A.Compose([
        A.Resize(width=args.image_size, height=args.image_size),
        ToTensorV2(),
    ])
    test_transforms = AlbumentationsSegmentationWrapperTransform(
        test_transforms, class_num=n_classes, ignore_indices=[
            255,
        ])
    return train_transforms, test_transforms
예제 #28
0
def get_training_augmentation():
    train_transform = albu.Compose([
        albu.IAAAdditiveGaussianNoise(p=0.2),
        albu.IAAPerspective(p=0.5),
        albu.OneOf(
            [
                albu.CLAHE(p=1),
                albu.RandomBrightness(p=1),
                albu.RandomGamma(p=1),
            ],
            p=0.9,
        ),
        albu.OneOf(
            [
                albu.IAASharpen(p=1),
                albu.Blur(blur_limit=3, p=1),
                albu.MotionBlur(blur_limit=3, p=1),
            ],
            p=0.9,
        ),
        albu.OneOf(
            [
                albu.RandomContrast(p=1),
                albu.HueSaturationValue(p=1),
            ],
            p=0.9,
        ),
    ],
                                   additional_targets={'depth': 'mask'})

    return train_transform
예제 #29
0
def get_training_augmentation():
    train_transform = [
        albu.HorizontalFlip(p=0.5),
        albu.IAAAdditiveGaussianNoise(p=0.2),
        albu.IAAPerspective(p=0.5),
        albu.PadIfNeeded(min_height=1216,
                         min_width=512,
                         always_apply=True,
                         border_mode=0),
        #0.9的機率取出OneOf中的其中一個, 各個抽中的機率皆為1/3( 因為1/(1+1+1) )
        albu.OneOf(
            [
                albu.CLAHE(p=1),
                albu.RandomBrightness(p=1),
                albu.RandomGamma(p=1),
            ],
            p=0.9,
        ),
        albu.OneOf(
            [
                albu.IAASharpen(p=1),
                albu.Blur(blur_limit=3, p=1),
                albu.MotionBlur(blur_limit=3, p=1),
            ],
            p=0.9,
        ),
        albu.OneOf(
            [
                albu.RandomContrast(p=1),
                albu.HueSaturationValue(p=1),
            ],
            p=0.9,
        ),
    ]
    return albu.Compose(train_transform)
예제 #30
0
def get_train_transforms():
    return A.Compose(
        [
            A.RandomSizedCrop(
                min_max_height=(650, 1024), height=1024, width=1024, p=0.5),
            A.OneOf([
                A.HueSaturationValue(hue_shift_limit=0.68,
                                     sat_shift_limit=0.68,
                                     val_shift_limit=0.1,
                                     p=0.9),
                A.RandomGamma(p=0.9),
                A.RandomBrightnessContrast(
                    brightness_limit=0.1, contrast_limit=0.1, p=0.9),
            ],
                    p=0.9),
            # A.CLAHE(p=1.0),
            A.ToGray(p=0.01),
            A.HorizontalFlip(p=0.5),
            A.VerticalFlip(p=0.5),
            A.RandomRotate90(p=0.5),
            A.Resize(height=512, width=512, p=1),
            A.CoarseDropout(max_holes=20,
                            max_height=32,
                            max_width=32,
                            fill_value=0,
                            p=0.25),
            ToTensorV2(p=1.0),
        ],
        p=1.0,
        bbox_params=A.BboxParams(format='pascal_voc',
                                 min_area=0,
                                 min_visibility=0,
                                 label_fields=['labels']))