Esempio n. 1
0
def transform_v3(config):
    train_transforms = Compose([
        ImageCompression(quality_lower=60, quality_upper=100, p=0.5),
        GaussNoise(p=1),
        GaussianBlur(blur_limit=3, p=1),
        HorizontalFlip(),
        Resize(config.image_size, config.image_size),
        OneOf([RandomBrightnessContrast(),
               FancyPCA(),
               HueSaturationValue()],
              p=1),
        ShiftScaleRotate(shift_limit=0.1,
                         scale_limit=0.2,
                         rotate_limit=10,
                         border_mode=cv2.BORDER_CONSTANT,
                         p=1),
        ToTensor()
    ])

    test_transforms = Compose([
        GaussNoise(p=1),
        GaussianBlur(blur_limit=3, p=1),
        Resize(config.image_size, config.image_size),
        OneOf([RandomBrightnessContrast(),
               FancyPCA(),
               HueSaturationValue()],
              p=1),
        ToTensor()
    ])

    return train_transforms, test_transforms
Esempio n. 2
0
def augment(patch_size=patch_size):
    return Compose([
        VerticalFlip(p=.5),
        HorizontalFlip(p=.5),
        HueSaturationValue(hue_shift_limit=(-15, 15),
                           sat_shift_limit=0,
                           val_shift_limit=0,
                           p=.5),
        HueSaturationValue(hue_shift_limit=(-10, 10),
                           sat_shift_limit=(-20, 20),
                           val_shift_limit=0,
                           p=.5),
        Rotate(limit=(0, 359), p=.5, border_mode=cv2.BORDER_CONSTANT),
        RandomBrightnessContrast(brightness_limit=0.15,
                                 contrast_limit=0.1,
                                 always_apply=False,
                                 p=0.5),
        ElasticTransform(always_apply=True,
                         approximate=True,
                         alpha=20,
                         sigma=10,
                         alpha_affine=0,
                         border_mode=cv2.BORDER_CONSTANT),
        GridDistortion(num_steps=16,
                       distort_limit=0.5,
                       border_mode=cv2.BORDER_CONSTANT,
                       always_apply=False,
                       p=0.5),
    ])
def aug_train(resolution, p=1):
    return Compose([Resize(resolution, resolution),
                    OneOf([
                        HorizontalFlip(),
                        VerticalFlip(),
                        RandomRotate90(),
                        Transpose()], p=0.5),
                    OneOf([
                        IAAAdditiveGaussianNoise(),
                        GaussNoise(),
                    ], p=0.5),
                    OneOf([
                        MotionBlur(p=.2),
                        MedianBlur(blur_limit=3, p=0.1),
                        Blur(blur_limit=3, p=0.1),
                    ], p=0.5),
                    ShiftScaleRotate(shift_limit=0.0625, scale_limit=0.2, rotate_limit=45, p=0.2),
                    OneOf([
                        OpticalDistortion(p=0.3),
                        GridDistortion(p=.1),
                        IAAPiecewiseAffine(p=0.3),
                    ], p=0.5),
                    OneOf([
                        CLAHE(clip_limit=2),
                        IAASharpen(),
                        IAAEmboss(),
                        RandomBrightnessContrast(),
                    ], p=0.5),
                    HueSaturationValue(p=0.3),
                    Normalize()
                    ], p=p)
def strong_aug(p=0.5):
    return Compose([
        RandomRotate90(),
        Flip(),
        Transpose(),
        OneOf([
            IAAAdditiveGaussianNoise(),
            GaussNoise(),
        ], p=0.2),
        OneOf([
            MotionBlur(p=0.2),
            MedianBlur(blur_limit=3, p=0.1),
            Blur(blur_limit=3, p=0.1),
        ], p=0.2),
        ShiftScaleRotate(shift_limit=0.0625, scale_limit=0.2, rotate_limit=45, p=0.2),
        OneOf([
            OpticalDistortion(p=0.3),
            GridDistortion(p=0.1),
            IAAPiecewiseAffine(p=0.3),
        ], p=0.2),
        OneOf([
            CLAHE(clip_limit=2),
            IAASharpen(),
            IAAEmboss(),
            RandomBrightnessContrast(),
        ], p=0.3),
        HueSaturationValue(p=0.3),
    ], p=p)
Esempio n. 5
0
def strong_aug(p=.5):
    return Compose([
        HorizontalFlip(),
        OneOf([
            IAAAdditiveGaussianNoise(),
            GaussNoise(),
        ], p=0.4),
        OneOf([
            MotionBlur(p=.2),
            MedianBlur(blur_limit=3, p=.1),
            Blur(blur_limit=3, p=.1),
        ],
              p=0.3),
        OneOf([
            OpticalDistortion(p=0.3),
            GridDistortion(p=.1),
            IAAPiecewiseAffine(p=0.3),
        ],
              p=0.2),
        OneOf([
            CLAHE(clip_limit=2),
            IAASharpen(),
            RandomContrast(),
            RandomBrightness(),
        ],
              p=0.3),
        HueSaturationValue(p=0.3),
        ChannelShuffle(),
        Cutout(num_holes=20, max_h_size=16, max_w_size=16)
    ],
                   p=p)
def get_transforms(*, data):

    if data == 'train':
        return Compose([
            RandomResizedCrop(CFG.size, CFG.size),
            Transpose(p=0.5),
            HorizontalFlip(p=0.5),
            VerticalFlip(p=0.5),
            ShiftScaleRotate(p=0.5),
            HueSaturationValue(hue_shift_limit=0.2,
                               sat_shift_limit=0.2,
                               val_shift_limit=0.2,
                               p=0.5),
            RandomBrightnessContrast(brightness_limit=(-0.1, 0.1),
                                     contrast_limit=(-0.1, 0.1),
                                     p=0.5),
            Normalize(
                mean=[0.485, 0.456, 0.406],
                std=[0.229, 0.224, 0.225],
            ),
            ToTensorV2(),
        ])

    elif data == 'valid':
        return Compose([
            Resize(CFG.size, CFG.size),
            Normalize(
                mean=[0.485, 0.456, 0.406],
                std=[0.229, 0.224, 0.225],
            ),
            ToTensorV2(),
        ])
Esempio n. 7
0
def medium_aug(p=1.0):
    return Compose(
        [
            HorizontalFlip(p=0.5),
            ShiftScaleRotate(p=0.75,
                             shift_limit=0.1,
                             scale_limit=0.2,
                             rotate_limit=45,
                             border_mode=cv2.BORDER_CONSTANT),
            RandomBrightnessContrast(
                brightness_limit=0.6, contrast_limit=0.6, p=0.5),
            OneOf([
                HueSaturationValue(p=1.0),
                RGBShift(p=1.0),
                ChannelShuffle(p=1.0)
            ],
                  p=0.5),
            OneOf([
                Blur(p=1.0),
                MedianBlur(p=1.0),
                MotionBlur(p=1.0),
            ], p=0.3),
            OneOf([GridDistortion(p=1.0),
                   ElasticTransform(p=1.0)], p=0.3),
            OneOf([
                CLAHE(p=1.0),
                IAASharpen(p=1.0),
            ], p=0.3),
            IAAAdditiveGaussianNoise(p=0.5)
            # ToGray(p=1.0),
        ],
        p=p)
    def gettraintransforms(self, mean, std, p=1):
        # Train Phase transformations

        albumentations_transform = Compose([
            # RandomRotate90(),
            PadIfNeeded(72, 72, border_mode=cv2.BORDER_REFLECT, always_apply=True),
            RandomCrop(64, 64, True),
            Flip(),
            GaussNoise(p=0.8, mean=mean),
            OneOf([
                MotionBlur(p=0.4),
                MedianBlur(blur_limit=3, p=0.2),
                Blur(blur_limit=3, p=0.2),
            ], p=0.4),
            ShiftScaleRotate(shift_limit=0.0625, scale_limit=0.2, rotate_limit=45, p=0.6),
            OneOf([
                OpticalDistortion(p=0.8),
                GridDistortion(p=0.4),
            ], p=0.6),
            HueSaturationValue(hue_shift_limit=20, sat_shift_limit=0.1, val_shift_limit=0.1, p=0.6),
            CoarseDropout(always_apply=True, max_holes=1, min_holes=1, max_height=16, max_width=16,
                          fill_value=(255 * .6), min_height=16, min_width=16),
            Normalize(mean=mean, std=std, always_apply=True),
            pytorch.ToTensorV2(always_apply=True),

        ], p=p)

        return albumentations_transform;
Esempio n. 9
0
def HSV(img, v):  # [-1, 1]
    assert -1 <= v <= 1
    if random.random() < 0.5:
        v = -v
    transform = Compose([HueSaturationValue(hue_shift_limit=v, sat_shift_limit=v, val_shift_limit=v)])
    Aug_img = transform(image=img)
    return Aug_img
def make(p=0.5):
    return Compose(
        [
            OneOf([IAAAdditiveGaussianNoise(),
                   GaussNoise(),
                   ISONoise()],
                  p=0.9),
            MotionBlur(p=0.3),
            ShiftScaleRotate(shift_limit=0.0925,
                             scale_limit=0.4,
                             rotate_limit=7,
                             border_mode=cv2.BORDER_CONSTANT,
                             value=0,
                             p=0.6),
            # IAAPerspective(scale=(.055, .060), keep_size=False, p=.2),
            # OpticalDistortion(p=0.2),
            OneOf([
                CLAHE(clip_limit=2),
                IAASharpen(),
                IAAEmboss(),
                RandomBrightnessContrast(),
            ],
                  p=0.3),
            HueSaturationValue(p=0.3),
            RGBShift(40, 40, 40)
        ],
        p=p)
Esempio n. 11
0
def hard_transform(image_size: int = 256, p: float = 0.5, **kwargs):
    """Hard augmentations (on training)"""
    _add_transform_default_params(kwargs)

    transforms = Compose([
        ShiftScaleRotate(
            shift_limit=0.1,
            scale_limit=0.1,
            rotate_limit=15,
            border_mode=cv2.BORDER_REFLECT,
            p=p,
        ),
        IAAPerspective(scale=(0.02, 0.05), p=p),
        OneOf([
            HueSaturationValue(p=p),
            ToGray(p=p),
            RGBShift(p=p),
            ChannelShuffle(p=p),
        ]),
        RandomBrightnessContrast(brightness_limit=0.5, contrast_limit=0.5,
                                 p=p),
        RandomGamma(p=p),
        CLAHE(p=p),
        JpegCompression(quality_lower=50, p=p),
        PadIfNeeded(image_size, image_size, border_mode=cv2.BORDER_CONSTANT),
    ], **kwargs)
    return transforms
def create_train_transforms(conf):
    height = conf['crop_height']
    width = conf['crop_width']
    return Compose([
        SafeRotate(45, p=0.4, border_mode=cv2.BORDER_CONSTANT),
        OneOf([
            RandomSizedCrop(min_max_height=(int(height * 0.7), int(
                height * 1.3)),
                            w2h_ratio=1.,
                            height=height,
                            width=width,
                            p=0.8),
            RandomCrop(height=height, width=width, p=0.2)
        ],
              p=1),
        HorizontalFlip(),
        VerticalFlip(),
        RandomRotate90(),
        Transpose(),
        ImageCompression(p=0.1),
        Lighting(alphastd=0.3),
        RandomBrightnessContrast(p=0.4),
        RandomGamma(p=0.4),
        OneOf([RGBShift(), HueSaturationValue()], p=0.2)
    ],
                   additional_targets={'image1': 'image'})
Esempio n. 13
0
def strong_aug(config, aug_prob):
    return Compose(
        [
            # Resize(config.image_height, config.image_width, always_apply=True),
            RandomSizedCrop(
                p=config.random_sized_crop_prob,
                min_max_height=(int(
                    config.image_height * config.min_max_height),
                                config.image_height),
                height=config.image_height,
                width=config.image_width,
                w2h_ratio=config.image_width / config.image_height),
            HorizontalFlip(p=config.horizontal_flip_prob),
            RandomGamma(p=config.random_gamma_prob),
            RandomContrast(p=config.random_contrast_prob,
                           limit=config.random_contrast_limit),
            RandomBrightness(p=config.random_brightness_prob,
                             limit=config.random_brightness_limit),
            OneOf([
                MotionBlur(p=config.motion_blur_prob),
                MedianBlur(blur_limit=config.median_blur_limit,
                           p=config.median_blur_prob),
                Blur(blur_limit=config.blur_limit, p=config.blur_prob),
            ],
                  p=config.one_of_blur_prob),
            CLAHE(clip_limit=config.clahe_limit, p=config.clahe_prob),
            IAAEmboss(p=config.iaaemboss_prob),
            HueSaturationValue(p=config.hue_saturation_value_prob,
                               hue_shift_limit=config.hue_shift_limit,
                               sat_shift_limit=config.sat_shift_limit,
                               val_shift_limit=config.val_shift_limit)
        ],
        p=aug_prob)
 def train_transform(p=1):
     return Compose(
         [
             #            Rescale(SIZE),
             RandomCrop(SIZE),
             RandomBrightness(0.2),
             OneOf([
                 IAAAdditiveGaussianNoise(),
                 GaussNoise(),
             ], p=0.15),
             #            OneOf([
             #                OpticalDistortion(p=0.3),
             #                GridDistortion(p=.1),
             #                IAAPiecewiseAffine(p=0.3),
             #            ], p=0.1),
             #            OneOf([
             #                IAASharpen(),
             #                IAAEmboss(),
             #                RandomContrast(),
             #                RandomBrightness(),
             #            ], p=0.15),
             HueSaturationValue(p=0.15),
             HorizontalFlip(p=0.5),
             Normalize(p=1),
         ],
         p=p)
Esempio n. 15
0
def get_train_transforms():
    augmentations = Compose([
        Resize(236,236),
        Flip(),
        OneOf([
            IAAAdditiveGaussianNoise(p=.5),
            GaussNoise(p=.4),
        ], p=0.4),
        OneOf([
            MotionBlur(p=0.6),
            Blur(blur_limit=3, p=0.2),
        ], p=0.4),
        ShiftScaleRotate(shift_limit=0.0725, scale_limit=0.2, rotate_limit=45, p=0.6),
        OneOf([
            OpticalDistortion(p=0.3),
            GridDistortion(p=0.4),
            IAAPiecewiseAffine(p=0.2),
        ], p=0.3),
        OneOf([
            CLAHE(clip_limit=2),
            IAASharpen(),
            IAAEmboss(),
            RandomBrightnessContrast(),
        ], p=0.25),
        HueSaturationValue(p=0.3),
        CenterCrop(224,224),
        Normalize(
            mean=[0.485, 0.456, 0.406],
            std=[0.229, 0.224, 0.225]
        ),
        ToTensor()
    ])

    return lambda img:augmentations(image=np.array(img))
Esempio n. 16
0
def transforms_train(aug_proba=1.):
    return Compose(transforms=[
        HorizontalFlip(p=0.5),
        Rotate(limit=25,
               p=0.5,
               border_mode=cv2.BORDER_CONSTANT,
               value=0,
               interpolation=cv2.INTER_CUBIC),
        OneOf([
            IAAAdditiveGaussianNoise(p=1),
            GaussNoise(p=1),
        ], p=0.2),
        OneOf([
            HueSaturationValue(hue_shift_limit=10,
                               sat_shift_limit=15,
                               val_shift_limit=10,
                               p=1),
            RGBShift(r_shift_limit=10, g_shift_limit=10, b_shift_limit=10, p=1)
        ]),
        OneOf([RandomContrast(p=1), RandomBrightness(p=1)], p=0.3),
        OpticalDistortion(p=0.1),
        Resize(*SIZE),
        Normalize()
    ],
                   p=aug_proba,
                   additional_targets={'trimap': 'mask'})
Esempio n. 17
0
    def Auger(self):
        List_Transforms = []
        if self.is_train:
            List_Transforms.extend([
                HueSaturationValue(10, 10, 10, p=0.3),
                HorizontalFlip(0.3),
                VerticalFlip(p=0.3),

                # May be it not work,will rescale [0,255]  ->   [0.0,1.0]
                ToFloat(always_apply=True),
                ShiftScaleRotate(
                    shift_limit=0.1,  # no resizing
                    scale_limit=0.1,
                    rotate_limit=3,  # rotate
                    p=0.5,
                    border_mode=cv2.BORDER_REFLECT),
                PadIfNeeded(self.padshape, self.padshape),
            ])
        List_Transforms.extend([
            # [0.12110683835022196, 0.1308642819666743, 0.14265566800591103]
            #Normalize(mean=(0.397657144,0.351649219,0.305031406),std=(0.12110683835022196, 0.1308642819666743, 0.14265566800591103)),
            RandomCrop(self.shape, self.shape),
            ToTensor(),
        ])
        TR = Compose(List_Transforms)
        return TR
Esempio n. 18
0
def aug_daniel(prob=0.8):
    return Compose(
        [
            RandomRotate90(p=0.5),
            Transpose(p=0.5),
            Flip(p=0.5),
            OneOf(
                [
                    IAAAdditiveGaussianNoise(),
                    GaussNoise(),
                    #Blur(),
                ],
                p=0.3),
            OneOf(
                [
                    CLAHE(clip_limit=2),
                    IAASharpen(),
                    IAAEmboss(),
                    OneOf([
                        RandomContrast(),
                        RandomBrightness(),
                    ]),
                    #Blur(),
                    #GaussNoise()
                ],
                p=0.5),
            HueSaturationValue(p=0.5)
        ],
        p=prob)
def strong_aug(p=1):
    return Compose([
        RandomRotate90(),
        Flip(),
        Transpose(),
        OneOf([
            IAAAdditiveGaussianNoise(),
            GaussNoise(),
        ], p=0.2),
        OneOf([
            MotionBlur(p=0.2),
            MedianBlur(blur_limit=3, p=0.1),
            Blur(blur_limit=3, p=0.1),
        ],
              p=0.2),
        OneOf([
            CLAHE(clip_limit=2),
            IAASharpen(),
            IAAEmboss(),
            RandomBrightnessContrast(),
        ],
              p=0.3),
        HueSaturationValue(p=0.3),
    ],
                   p=p)
Esempio n. 20
0
def strong_aug2(p=1.0):
    return Compose(
        [
            Flip(p=0.75),  # ok
            # RandomBrightnessContrast(brightness_limit=0.2, contrast_limit=0.2, p=0.5),
            RandomBrightnessContrast(brightness_limit=(-0.2, 0.2),
                                     contrast_limit=0.2,
                                     p=1.0,
                                     brightness_by_max=False),
            ShiftScaleRotate(shift_limit=0.1,
                             scale_limit=0.2,
                             rotate_limit=45,
                             border_mode=cv2.BORDER_CONSTANT,
                             p=0.2),
            OneOf([
                IAASharpen(p=1),
                Blur(blur_limit=5, p=1.0),
                MedianBlur(blur_limit=5, p=1.0),
                MotionBlur(p=1.0),
            ],
                  p=0.6),
            OneOf([
                HueSaturationValue(hue_shift_limit=10,
                                   sat_shift_limit=30,
                                   val_shift_limit=20,
                                   p=1.0),
                RGBShift(p=1.0),
                RandomGamma(p=1),
            ],
                  p=0.3),
            IAAAdditiveGaussianNoise(p=.2),
        ],
        p=p)
def get_transforms(*, data):
    
    if data == 'train':
        return Compose([
            #Resize(CFG.size, CFG.size),
            RandomResizedCrop(CFG.size, CFG.size, scale=(0.85, 1.0)),
            HorizontalFlip(p=0.5),
            RandomBrightnessContrast(p=0.2, brightness_limit=(-0.2, 0.2), contrast_limit=(-0.2, 0.2)),
            HueSaturationValue(p=0.2, hue_shift_limit=0.2, sat_shift_limit=0.2, val_shift_limit=0.2),
            ShiftScaleRotate(p=0.2, shift_limit=0.0625, scale_limit=0.2, rotate_limit=20),
            CoarseDropout(p=0.2),
            Cutout(p=0.2, max_h_size=16, max_w_size=16, fill_value=(0., 0., 0.), num_holes=16),
            Normalize(
                mean=[0.485, 0.456, 0.406],
                std=[0.229, 0.224, 0.225],
            ),
            ToTensorV2(),
        ])
    
    elif data == 'valid':
        return Compose([
            Resize(CFG.size, CFG.size),
            Normalize(
                mean=[0.485, 0.456, 0.406],
                std=[0.229, 0.224, 0.225],
            ),
            ToTensorV2(),

        ])
Esempio n. 22
0
def hard_transform(image_size: int = 256, p: float = 0.5):
    """Hard augmentations"""
    transforms = Compose([
        ShiftScaleRotate(
            shift_limit=0.1,
            scale_limit=0.1,
            rotate_limit=15,
            border_mode=cv2.BORDER_REFLECT,
            p=p,
        ),
        IAAPerspective(scale=(0.02, 0.05), p=p),
        OneOf([
            HueSaturationValue(p=p),
            ToGray(p=p),
            RGBShift(p=p),
            ChannelShuffle(p=p),
        ]),
        RandomBrightnessContrast(
            brightness_limit=0.5, contrast_limit=0.5, p=p
        ),
        RandomGamma(p=p),
        CLAHE(p=p),
        JpegCompression(quality_lower=50, p=p),
    ])
    return transforms
Esempio n. 23
0
def get_transforms():
    return Compose([
        RandomRotate90(p=0.5),
        Flip(p=0.5),
        Transpose(p=0.5),
        # OneOf([
        #     IAAAdditiveGaussianNoise(),
        #     GaussNoise(),
        # ], p=0.2),
        # OneOf([
        #     MotionBlur(p=.2),
        #     MedianBlur(blur_limit=3, p=0.1),
        #     Blur(blur_limit=3, p=0.1),
        # ], p=0.2),
        ShiftScaleRotate(shift_limit=0.0625,
                         scale_limit=0.2,
                         rotate_limit=45,
                         p=0.2),
        # OneOf([
        #     CLAHE(clip_limit=2),
        #     IAASharpen(),
        #     IAAEmboss(),
        #     RandomBrightnessContrast(),
        # ], p=0.3),
        HueSaturationValue(p=0.3),
    ])
Esempio n. 24
0
def train_transform(sz, downscale=1, p=1):
    augmentation = Compose(
        [
            VerticalFlip(p=0.5),
            HorizontalFlip(p=0.5),
            RandomRotate90(p=0.5),
            # GridDistortion(p=0.5, border_mode=cv2.BORDER_CONSTANT),
            # RandomGamma(p=0.9, gamma_limit=(80, 150)),
            HueSaturationValue(p=0.9,
                               hue_shift_limit=10,
                               sat_shift_limit=20,
                               val_shift_limit=10),
        ],
        p=p)

    def transform_fun(img_path):
        img, mask = read_img_mask(img_path, sz, downscale=downscale)
        data = {"image": img, "mask": mask}
        augmented = augmentation(**data)
        img, mask = augmented["image"], augmented["mask"]

        img = norm_fun(img)
        mask = mask_from_rgb(mask)
        return img, mask

    return transform_fun
def create_train_transforms(size=300):
    return Compose([
        ImageCompression(quality_lower=60, quality_upper=100, p=0.5),
        GaussNoise(p=0.1),
        GaussianBlur(blur_limit=3, p=0.05),
        HorizontalFlip(),
        OneOf([
            IsotropicResize(max_side=size,
                            interpolation_down=cv2.INTER_AREA,
                            interpolation_up=cv2.INTER_CUBIC),
            IsotropicResize(max_side=size,
                            interpolation_down=cv2.INTER_AREA,
                            interpolation_up=cv2.INTER_LINEAR),
            IsotropicResize(max_side=size,
                            interpolation_down=cv2.INTER_LINEAR,
                            interpolation_up=cv2.INTER_LINEAR),
        ],
              p=1),
        PadIfNeeded(min_height=size,
                    min_width=size,
                    border_mode=cv2.BORDER_CONSTANT),
        OneOf([RandomBrightnessContrast(),
               FancyPCA(),
               HueSaturationValue()],
              p=0.7),
        ToGray(p=0.2),
        ShiftScaleRotate(shift_limit=0.1,
                         scale_limit=0.2,
                         rotate_limit=10,
                         border_mode=cv2.BORDER_CONSTANT,
                         p=0.5),
    ])
Esempio n. 26
0
def strong_aug(p=.5):
    return Compose([
        JpegCompression(p=0.9),
        HorizontalFlip(p=0.5),
        OneOf([
            IAAAdditiveGaussianNoise(),
            GaussNoise(),
        ], p=0.5),
        OneOf([
            MotionBlur(p=.2),
            MedianBlur(blur_limit=3, p=.1),
            Blur(blur_limit=3, p=.1),
        ],
              p=0.5),
        ShiftScaleRotate(
            shift_limit=0.0625, scale_limit=0.2, rotate_limit=15, p=.5),
        OneOf([
            CLAHE(clip_limit=2),
            IAASharpen(),
            IAAEmboss(),
            RandomContrast(),
            RandomBrightness(),
        ],
              p=0.5),
        HueSaturationValue(p=0.5),
    ],
                   p=p)
Esempio n. 27
0
def transform(config, image, mask):

    try:
        p = config["train"]["dap"]["p"]
    except:
        p = 1

    assert 0 <= p <= 1

    # Inspire by: https://albumentations.readthedocs.io/en/latest/examples.html
    return Compose([
        Flip(),
        Transpose(),
        OneOf([IAAAdditiveGaussianNoise(),
               GaussNoise()], p=0.2),
        OneOf([
            MotionBlur(p=0.2),
            MedianBlur(blur_limit=3, p=0.1),
            Blur(blur_limit=3, p=0.1)
        ],
              p=0.2),
        ShiftScaleRotate(shift_limit=0.0625,
                         scale_limit=0.2,
                         rotate_limit=45,
                         p=0.2),
        OneOf([IAASharpen(),
               IAAEmboss(),
               RandomBrightnessContrast()], p=0.3),
        HueSaturationValue(p=0.3),
    ])(image=image, mask=mask, p=p)
Esempio n. 28
0
def get_train_transforms():
    return Compose(
        [
            #Resize(args.img_size, args.img_size),
            RandomResizedCrop(args.img_size, args.img_size),
            Transpose(p=0.5),
            HorizontalFlip(p=0.5),
            VerticalFlip(p=0.25),
            ShiftScaleRotate(p=0.25),
            HueSaturationValue(hue_shift_limit=0.2,
                               sat_shift_limit=0.2,
                               val_shift_limit=0.2,
                               p=0.25),
            RandomBrightnessContrast(brightness_limit=(-0.1, 0.1),
                                     contrast_limit=(-0.1, 0.1),
                                     p=0.5),
            Normalize(mean=[0.485, 0.456, 0.406],
                      std=[0.229, 0.224, 0.225],
                      max_pixel_value=255.0,
                      p=1.0),
            CoarseDropout(p=0.5),
            # Cutout(p=0.5),
            ToTensorV2(p=1.0),
        ],
        p=1.)
def strong_aug(p=.5):
    return Compose([
        HorizontalFlip(p=0.5),
        ToGray(p=0.1),
        OneOf([
            IAAAdditiveGaussianNoise(),
            GaussNoise(),
        ], p=0.4),
        OneOf([
            MotionBlur(p=.2),
            MedianBlur(blur_limit=3, p=.1),
            Blur(blur_limit=3, p=.1),
        ],
              p=0.2),
        OneOf([
            OpticalDistortion(p=0.3),
            GridDistortion(p=.1),
            IAAPiecewiseAffine(p=0.3),
        ],
              p=0.2),
        OneOf([
            CLAHE(clip_limit=2),
            IAASharpen(),
            RandomContrast(),
            RandomBrightness(),
        ],
              p=0.3),
        HueSaturationValue(p=0.3),
    ],
                   p=p)
 def get_corrupter(self):
     distortion_augs = OneOf([OpticalDistortion(p=1),
                              GridDistortion(p=1)],
                             p=1)
     effects_augs = OneOf([
         IAASharpen(p=1),
         IAAEmboss(p=1),
         IAAPiecewiseAffine(p=1),
         IAAPerspective(p=1),
         CLAHE(p=1)
     ],
                          p=1)
     misc_augs = OneOf([
         ShiftScaleRotate(p=1),
         HueSaturationValue(p=1),
         RandomBrightnessContrast(p=1)
     ],
                       p=1)
     blur_augs = OneOf(
         [Blur(p=1),
          MotionBlur(p=1),
          MedianBlur(p=1),
          GaussNoise(p=1)],
         p=1)
     aug = Compose([distortion_augs, effects_augs, misc_augs, blur_augs])
     return aug