def __init__(self, data_root='data', split_file='', size=(256, 256), fold=0, resize=False):
     self.data_root = data_root
     pkl_data = pickle.load(open(split_file, 'rb'))
     if fold == -1:
         self.path_list = pkl_data[0]['train']
         self.path_list.extend(pkl_data[0]['val'])
     else:
         self.path_list = pkl_data[fold]['train']
     self.len = len(self.path_list)
     if resize:
         self.transforms = Compose([Resize(height=size[0], width=size[1], interpolation=cv2.INTER_NEAREST),
                                    ShiftScaleRotate(shift_limit=0.1, scale_limit=0.2, rotate_limit=20, p=0.7,
                                                     border_mode=cv2.BORDER_CONSTANT, value=0),
                                    HorizontalFlip(p=0.5),
                                    OneOf([ElasticTransform(p=1, alpha=50, sigma=30, alpha_affine=30,
                                                            border_mode=cv2.BORDER_CONSTANT, value=0),
                                           OpticalDistortion(p=1, distort_limit=0.5, shift_limit=0.1,
                                                             border_mode=cv2.BORDER_CONSTANT, value=0)], p=0.5),
                                    RandomGamma(gamma_limit=(80, 120), p=0.5),
                                    GaussNoise(var_limit=(0.02, 0.1), mean=0, p=0.5)
                                    ])
     else:
         self.transforms = Compose([LongestMaxSize(max_size=max(size)),
                                    PadIfNeeded(min_height=size[0], min_width=size[1], value=0,
                                                border_mode=cv2.BORDER_CONSTANT),
                                    ShiftScaleRotate(shift_limit=0.1, scale_limit=0.2, rotate_limit=20, p=0.7,
                                                     border_mode=cv2.BORDER_CONSTANT, value=0),
                                    HorizontalFlip(p=0.5),
                                    OneOf([ElasticTransform(p=1, alpha=50, sigma=30, alpha_affine=30,
                                                            border_mode=cv2.BORDER_CONSTANT, value=0),
                                           OpticalDistortion(p=1, distort_limit=0.5, shift_limit=0.1,
                                                             border_mode=cv2.BORDER_CONSTANT, value=0)], p=0.5),
                                    RandomGamma(gamma_limit=(80, 120), p=0.5),
                                    GaussNoise(var_limit=(0.02, 0.1), mean=0, p=0.5)
                                    ])
Ejemplo n.º 2
0
def transform_v3(config):
    train_transforms = Compose([
        ImageCompression(quality_lower=60, quality_upper=100, p=0.5),
        GaussNoise(p=1),
        GaussianBlur(blur_limit=3, p=1),
        HorizontalFlip(),
        Resize(config.image_size, config.image_size),
        OneOf([RandomBrightnessContrast(),
               FancyPCA(),
               HueSaturationValue()],
              p=1),
        ShiftScaleRotate(shift_limit=0.1,
                         scale_limit=0.2,
                         rotate_limit=10,
                         border_mode=cv2.BORDER_CONSTANT,
                         p=1),
        ToTensor()
    ])

    test_transforms = Compose([
        GaussNoise(p=1),
        GaussianBlur(blur_limit=3, p=1),
        Resize(config.image_size, config.image_size),
        OneOf([RandomBrightnessContrast(),
               FancyPCA(),
               HueSaturationValue()],
              p=1),
        ToTensor()
    ])

    return train_transforms, test_transforms
 def __init__(self,
              var_limit=(10.0, 50.0),
              mean=0,
              always_apply=False,
              p=0.5,
              p_asym=0.2):
     StereoTransformAsym.__init__(self, always_apply, p, p_asym)
     GaussNoise.__init__(self, var_limit, mean, always_apply, p)
Ejemplo n.º 4
0
def strong_aug(p=0.5, crop_size=(512, 512)):
    return Compose([
        RandomResizedCrop(crop_size[0],
                          crop_size[1],
                          scale=(0.3, 1.0),
                          ratio=(0.75, 1.3),
                          interpolation=4,
                          p=1.0),
        RandomRotate90(),
        Flip(),
        Transpose(),
        OneOf([
            IAAAdditiveGaussianNoise(),
            GaussNoise(),
        ], p=0.8),
        OneOf([
            MotionBlur(p=0.5),
            MedianBlur(blur_limit=3, p=0.5),
            Blur(blur_limit=3, p=0.5),
        ],
              p=0.3),
        ShiftScaleRotate(
            shift_limit=0.2, scale_limit=0.5, rotate_limit=180, p=0.8),
        OneOf([
            OpticalDistortion(p=0.5),
            GridDistortion(p=0.5),
            IAAPiecewiseAffine(p=0.5),
            ElasticTransform(p=0.5),
        ],
              p=0.3),
        OneOf([
            CLAHE(clip_limit=2),
            IAASharpen(),
            IAAEmboss(),
            RandomBrightnessContrast(),
        ],
              p=0.3),
        OneOf([
            GaussNoise(),
            RandomRain(
                p=0.2, brightness_coefficient=0.9, drop_width=1, blur_value=5),
            RandomSnow(p=0.4,
                       brightness_coeff=0.5,
                       snow_point_lower=0.1,
                       snow_point_upper=0.3),
            RandomShadow(p=0.2,
                         num_shadows_lower=1,
                         num_shadows_upper=1,
                         shadow_dimension=5,
                         shadow_roi=(0, 0.5, 1, 1)),
            RandomFog(
                p=0.5, fog_coef_lower=0.3, fog_coef_upper=0.5, alpha_coef=0.1)
        ],
              p=0.3),
        RGBShift(),
        HueSaturationValue(p=0.9),
    ],
                   p=p)
def strong_aug(p=1):
    return Compose([
        RandomRotate90(),
        Flip(),
        Transpose(),
        OneOf([
            IAAAdditiveGaussianNoise(),
            GaussNoise(),
        ], p=0.2),
        OneOf([
            MotionBlur(p=0.2),
            MedianBlur(blur_limit=3, p=0.1),
            Blur(blur_limit=3, p=0.1),
        ],
              p=0.2),
        OneOf([
            CLAHE(clip_limit=2),
            IAASharpen(),
            IAAEmboss(),
            RandomBrightnessContrast(),
        ],
              p=0.3),
        HueSaturationValue(p=0.3),
    ],
                   p=p)
Ejemplo n.º 6
0
 def __getitem__(self, idx):
     file = self.files[idx]
     file_path = os.path.join(os.path.join(PATH, self.mode + '_images'),
                              file)
     image = cv2.imread(file_path)
     mean = (0.485, 0.456, 0.406)
     std = (0.229, 0.224, 0.225)
     train_aug = Compose([
         OneOf([
             VerticalFlip(),
             HorizontalFlip(),
         ], p=0.5),
         OneOf([
             MotionBlur(p=0.4),
             MedianBlur(p=0.4, blur_limit=3),
             Blur(p=0.5, blur_limit=3)
         ],
               p=0.4),
         OneOf([IAAAdditiveGaussianNoise(),
                GaussNoise()], p=0.4),
         Normalize(mean=mean, std=std, p=1),
         # CLAHE(p=0.5),
         ToTensor()
     ])
     augmented = train_aug(image=image)
     image = augmented['image']
     label = np.array(self.labels[idx])
     label = torch.tensor(label, dtype=torch.float32)
     return (image, label)
Ejemplo n.º 7
0
    def __init__(self,
                 df,
                 rgb_channel=False,
                 img_size=512,
                 grid_size=32,
                 is_train=True):
        self.df = df
        self.is_train = is_train
        self.img_size = img_size
        self.rgb_channel = rgb_channel
        self.clahe = cv2.createCLAHE(clipLimit=2.0,
                                     tileGridSize=(grid_size, grid_size))

        # Transforms
        if self.is_train:
            self.transforms = Compose([
                Resize(img_size, img_size),
                HorizontalFlip(p=0.5),
                ShiftScaleRotate(shift_limit=0,
                                 scale_limit=0.1,
                                 rotate_limit=10,
                                 p=0.5,
                                 border_mode=cv2.BORDER_CONSTANT),
                GaussNoise(),
                MultiplicativeNoise(),
                ToTensor()
            ])
        else:
            self.transforms = Compose([Resize(img_size, img_size), ToTensor()])
def strong_aug(p=0.5):
    return Compose([
        RandomRotate90(),
        Flip(),
        Transpose(),
        OneOf([
            IAAAdditiveGaussianNoise(),
            GaussNoise(),
        ], p=0.2),
        OneOf([
            MotionBlur(p=0.2),
            MedianBlur(blur_limit=3, p=0.1),
            Blur(blur_limit=3, p=0.1),
        ], p=0.2),
        ShiftScaleRotate(shift_limit=0.0625, scale_limit=0.2, rotate_limit=45, p=0.2),
        OneOf([
            OpticalDistortion(p=0.3),
            GridDistortion(p=0.1),
            IAAPiecewiseAffine(p=0.3),
        ], p=0.2),
        OneOf([
            CLAHE(clip_limit=2),
            IAASharpen(),
            IAAEmboss(),
            RandomBrightnessContrast(),
        ], p=0.3),
        HueSaturationValue(p=0.3),
    ], p=p)
Ejemplo n.º 9
0
def strong_aug(p=.5):
    return Compose([
        HorizontalFlip(),
        OneOf([
            IAAAdditiveGaussianNoise(),
            GaussNoise(),
        ], p=0.4),
        OneOf([
            MotionBlur(p=.2),
            MedianBlur(blur_limit=3, p=.1),
            Blur(blur_limit=3, p=.1),
        ],
              p=0.3),
        OneOf([
            OpticalDistortion(p=0.3),
            GridDistortion(p=.1),
            IAAPiecewiseAffine(p=0.3),
        ],
              p=0.2),
        OneOf([
            CLAHE(clip_limit=2),
            IAASharpen(),
            RandomContrast(),
            RandomBrightness(),
        ],
              p=0.3),
        HueSaturationValue(p=0.3),
        ChannelShuffle(),
        Cutout(num_holes=20, max_h_size=16, max_w_size=16)
    ],
                   p=p)
Ejemplo n.º 10
0
    def __init__(self, is_train: bool, to_pytorch: bool):
        preprocess = OneOf([
            Resize(height=DATA_HEIGHT, width=DATA_WIDTH),
            Compose([
                Resize(height=int(DATA_HEIGHT * 1.2),
                       width=int(DATA_WIDTH * 1.2)),
                RandomCrop(height=DATA_HEIGHT, width=DATA_WIDTH)
            ])
        ],
                           p=1)

        if is_train:
            self._aug = Compose(
                [
                    preprocess,
                    HorizontalFlip(p=0.5),
                    GaussNoise(p=0.3),
                    # OneOf([
                    #     RandomBrightnessContrast(),
                    #     RandomGamma(),
                    # ], p=0.3),
                    # OneOf([
                    #     ElasticTransform(alpha=120, sigma=120 * 0.05, alpha_affine=120 * 0.03),
                    #     GridDistortion(),
                    #     OpticalDistortion(distort_limit=2, shift_limit=0.5),
                    # ], p=0.3),
                    Rotate(limit=20),
                ],
                p=1)
        else:
            self._aug = preprocess

        self._need_to_pytorch = to_pytorch
Ejemplo n.º 11
0
    def data_augmentation(self, original_image):
        """ 进行样本和掩膜的随机增强
        Args:
            original_image: 原始图片
        Return:
            image_aug: 增强后的图片
        """
        augmentations = Compose([
            HorizontalFlip(p=0.4),
            ShiftScaleRotate(shift_limit=0.07, rotate_limit=0, p=0.4),

            RGBShift(r_shift_limit=10, g_shift_limit=10, b_shift_limit=10, p=0.3),

            # 亮度、对比度
            RandomGamma(gamma_limit=(80, 120), p=0.1),
            RandomBrightnessContrast(p=0.1),
            
            # 模糊
            OneOf([
                    MotionBlur(p=0.1),
                    MedianBlur(blur_limit=3, p=0.1),
                    Blur(blur_limit=3, p=0.1),
                ], p=0.3),
            
            OneOf([
                    IAAAdditiveGaussianNoise(),
                    GaussNoise(),
                ], p=0.2)
        ])
        
        augmented = augmentations(image=original_image)
        image_aug = augmented['image']

        return image_aug
Ejemplo n.º 12
0
def strong_aug(p=1):
    return Compose([
        OneOf([
            RandomRotate90(p=1),
            Flip(p=1),
        ], p=1),
        ShiftScaleRotate(shift_limit=0.1,
                         scale_limit=0.1,
                         rotate_limit=45,
                         p=1,
                         value=0,
                         border_mode=2),
        OneOf([
            IAAAdditiveGaussianNoise(p=0.7),
            GaussNoise(p=0.7),
        ], p=1),
        OneOf([
            MotionBlur(p=0.7),
            MedianBlur(blur_limit=3, p=0.7),
            Blur(blur_limit=3, p=0.7),
        ],
              p=1),
        RandomBrightnessContrast(p=0.5),
        OneOf([
            CLAHE(clip_limit=2),
            IAASharpen(),
            IAAEmboss(),
            RandomBrightnessContrast(p=0.7),
        ],
              p=1)
    ],
                   p=p)
Ejemplo n.º 13
0
def get_transforms(phase, size, mean, std):
    list_transforms = []

    if phase == "train":
        list_transforms.extend(
            [
                HorizontalFlip(p=0.5),
                ShiftScaleRotate(
                    shift_limit=0,  # no resizing
                    scale_limit=0.1,
                    rotate_limit=10,  # rotate
                    p=0.5,
                    border_mode=cv2.BORDER_CONSTANT,
                ),
                GaussNoise(),
                #A.MultiplicativeNoise(multiplier=1.5, p=1),
            ]
        )
    list_transforms.extend(
        [Resize(size, size), Normalize(mean=mean, std=std, p=1), ToTensor(), ]
    )

    list_trfms = Compose(list_transforms)

    return list_trfms
Ejemplo n.º 14
0
    def __init__(self, is_train: bool, to_pytorch: bool, preprocess):
        if is_train:
            self._aug = Compose([
                preprocess,
                OneOf([
                    Compose([
                        HorizontalFlip(p=0.5),
                        GaussNoise(p=0.5),
                        OneOf([
                            RandomBrightnessContrast(),
                            RandomGamma(),
                        ],
                              p=0.5),
                        Rotate(limit=20, border_mode=cv2.BORDER_CONSTANT),
                        ImageCompression(),
                        CLAHE(),
                        Downscale(scale_min=0.2, scale_max=0.9, p=0.5),
                        ISONoise(p=0.5),
                        MotionBlur(p=0.5)
                    ]),
                    HorizontalFlip(p=0.5)
                ])
            ],
                                p=1)
        else:
            self._aug = preprocess

        self._need_to_pytorch = to_pytorch
Ejemplo n.º 15
0
 def get_corrupter(self):
     distortion_augs = OneOf([OpticalDistortion(p=1),
                              GridDistortion(p=1)],
                             p=1)
     effects_augs = OneOf([
         IAASharpen(p=1),
         IAAEmboss(p=1),
         IAAPiecewiseAffine(p=1),
         IAAPerspective(p=1),
         CLAHE(p=1)
     ],
                          p=1)
     misc_augs = OneOf([
         ShiftScaleRotate(p=1),
         HueSaturationValue(p=1),
         RandomBrightnessContrast(p=1)
     ],
                       p=1)
     blur_augs = OneOf(
         [Blur(p=1),
          MotionBlur(p=1),
          MedianBlur(p=1),
          GaussNoise(p=1)],
         p=1)
     aug = Compose([distortion_augs, effects_augs, misc_augs, blur_augs])
     return aug
Ejemplo n.º 16
0
def strong_aug(p=0.8):
    """Find all the description of each function:https://github.com/albu/albumentations
	Probabilities:
	p1: decides if this augmentation will be applied. The most common case is p1=1 means that we always apply the transformations from above. p1=0 will mean that the transformation block will be ignored.
	p2: every augmentation has an option to be applied with some probability.
	p3: decide if OneOf will be applied.
	In the final run all the p1-p3 probabilities are multiplied.
	"""
    return Compose([
        ShiftScaleRotate(shift_limit=0.2,
                         scale_limit=0.3,
                         rotate_limit=45,
                         p=0.8,
                         border_mode=cv2.BORDER_CONSTANT),
        OneOf([
            IAAAdditiveGaussianNoise(),
            GaussNoise(),
        ], p=0.3),
        MedianBlur(blur_limit=3, p=0.7),
        OneOf([
            CLAHE(clip_limit=2, p=0.4),
            IAASharpen(p=0.4),
            IAAEmboss(p=0.4),
            RandomBrightnessContrast(p=0.6),
            HorizontalFlip(p=0.5)
        ])
    ],
                   p=p)
Ejemplo n.º 17
0
def strong_aug(p=.5):
    return Compose([
        JpegCompression(p=0.9),
        HorizontalFlip(p=0.5),
        OneOf([
            IAAAdditiveGaussianNoise(),
            GaussNoise(),
        ], p=0.5),
        OneOf([
            MotionBlur(p=.2),
            MedianBlur(blur_limit=3, p=.1),
            Blur(blur_limit=3, p=.1),
        ],
              p=0.5),
        ShiftScaleRotate(
            shift_limit=0.0625, scale_limit=0.2, rotate_limit=15, p=.5),
        OneOf([
            CLAHE(clip_limit=2),
            IAASharpen(),
            IAAEmboss(),
            RandomContrast(),
            RandomBrightness(),
        ],
              p=0.5),
        HueSaturationValue(p=0.5),
    ],
                   p=p)
    def gettraintransforms(self, mean, std, p=1):
        # Train Phase transformations

        albumentations_transform = Compose([
            # RandomRotate90(),
            PadIfNeeded(72, 72, border_mode=cv2.BORDER_REFLECT, always_apply=True),
            RandomCrop(64, 64, True),
            Flip(),
            GaussNoise(p=0.8, mean=mean),
            OneOf([
                MotionBlur(p=0.4),
                MedianBlur(blur_limit=3, p=0.2),
                Blur(blur_limit=3, p=0.2),
            ], p=0.4),
            ShiftScaleRotate(shift_limit=0.0625, scale_limit=0.2, rotate_limit=45, p=0.6),
            OneOf([
                OpticalDistortion(p=0.8),
                GridDistortion(p=0.4),
            ], p=0.6),
            HueSaturationValue(hue_shift_limit=20, sat_shift_limit=0.1, val_shift_limit=0.1, p=0.6),
            CoarseDropout(always_apply=True, max_holes=1, min_holes=1, max_height=16, max_width=16,
                          fill_value=(255 * .6), min_height=16, min_width=16),
            Normalize(mean=mean, std=std, always_apply=True),
            pytorch.ToTensorV2(always_apply=True),

        ], p=p)

        return albumentations_transform;
Ejemplo n.º 19
0
def augment_flips_gray(p=.5):
    '''
    usage:
     au = augment_flips()
     res = au(image=image, mask=make)
     res_img, res_mask = res['image'], res['mask']
    '''
    return Compose(
        [
            # CLAHE(),
            OneOf([
                RandomRotate90(),
                ShiftScaleRotate(border_mode=cv2.BORDER_CONSTANT, value=0)
            ],
                  p=0.35),
            RandomGamma(gamma_limit=(75, 140)),
            Blur(blur_limit=3),
            # HueSaturationValue(),
            OneOf([
                MotionBlur(p=.2),
                MedianBlur(blur_limit=3, p=0.1),
                Blur(blur_limit=3, p=0.1),
            ],
                  p=0.2),
            OneOf([
                IAAAdditiveGaussianNoise(),
                GaussNoise(),
            ], p=0.2),
        ],
        p=p)
def alb_transform_train(imsize = 256, p=1):
    albumentations_transform = Compose([
    # RandomCrop(imsize),
    # RandomRotate90(),
    Flip(),
    # Transpose(),
    OneOf([
            IAAAdditiveGaussianNoise(),
            GaussNoise(),
        ], p=0.2),
    OneOf([
            MotionBlur(p=.2),
            MedianBlur(blur_limit=3, p=.1),
            Blur(blur_limit=3, p=.1),
        ], p=0.2),
    ShiftScaleRotate(shift_limit=0.0625, scale_limit=0.2, rotate_limit=45, p=.2),
    OneOf([
        OpticalDistortion(p=0.3),
        GridDistortion(p=.1),
        IAAPiecewiseAffine(p=0.3),
        ], p=0.2),
    OneOf([
        # CLAHE(clip_limit=2),
        IAASharpen(),
        IAAEmboss(),
        RandomContrast(),
        RandomBrightness(),
        ], p=0.3),
    Normalize(
        mean=[0.485, 0.456, 0.406],
        std=[0.229, 0.224, 0.225]
        )
    ], p=p)
    return albumentations_transform
Ejemplo n.º 21
0
 def augmentation_pipeline(self, p=0.5):
     return Compose(
         [
             HorizontalFlip(p=0.5),
             OneOf([
                 IAAAdditiveGaussianNoise(),
                 GaussNoise(),
             ], p=0.2),
             OneOf(
                 [
                     MotionBlur(p=0.2),
                     #MedianBlur(blur_limit=3, p=0.1),
                     Blur(blur_limit=3, p=0.1),
                 ],
                 p=0.1),
             OneOf([
                 ShiftScaleRotate(shift_limit=0.0625,
                                  scale_limit=0.2,
                                  rotate_limit=5,
                                  p=0.9),
                 IAAPerspective(scale=(.02, .05))
             ],
                   p=0.3)
         ],
         p=p)
Ejemplo n.º 22
0
def train_pipeline(cache, mask_db, path):
    image, mask = read_image_and_mask_cached(cache, mask_db, (101, 101), path)
    args = Compose([
        LabelMaskBorder(),
        HorizontalFlip(p=0.5),
        OneOf([
            ShiftScaleRotate(rotate_limit=15,
                             border_mode=cv2.BORDER_REPLICATE),
            RandomSizedCrop(min_max_height=(70, 100), height=101, width=101)
        ],
              p=0.2),
        GaussNoise(p=0.2),
        OneOf([
            RandomBrightness(limit=0.4),
            RandomGamma(),
        ], p=0.5),
        OneOf([Blur(), MedianBlur(), MotionBlur()], p=0.2),
        OneOf([
            ElasticTransform(alpha=10, sigma=10, alpha_affine=10),
            GridDistortion()
        ],
              p=0.2),
        Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]),
        PadIfNeeded(128, 128, cv2.BORDER_REPLICATE),
        ChannelsFirst()
    ])(image=image, mask=mask)
    return args['image'], args.get('mask')
Ejemplo n.º 23
0
def strong_aug(p=1.0):
    return Compose(
        [
            HorizontalFlip(p=0.5),
            ShiftScaleRotate(p=0.75,
                             shift_limit=0.1,
                             scale_limit=0.2,
                             rotate_limit=45,
                             border_mode=cv2.BORDER_CONSTANT),
            RandomBrightnessContrast(
                brightness_limit=0.8, contrast_limit=0.8, p=1.0),
            OneOf([
                HueSaturationValue(p=1.0),
                RGBShift(p=1.0),
                ChannelShuffle(p=1.0)
            ],
                  p=1.0),
            OneOf([
                Blur(p=1.0),
                MedianBlur(p=1.0),
                MotionBlur(p=1.0),
            ], p=0.5),
            OneOf([GridDistortion(p=1.0),
                   ElasticTransform(p=1.0)], p=0.5),
            OneOf([
                CLAHE(p=1.0),
                IAASharpen(p=1.0),
            ], p=0.5),
            GaussNoise(p=0.5)
            # ToGray(p=1.0),
        ],
        p=p)
    def generate_color_augmentation(aug_cfg: CfgNode) -> Union[Compose, None]:
        """
        generate color augmentation object
        :param aug_cfg: augmentation config
        :return color_aug: color augmentation object
        """
        color_aug_list = []
        if aug_cfg.BRIGHTNESS_CONTRAST_PROB > 0:
            color_aug_list.append(
                RandomBrightnessContrast(p=aug_cfg.BRIGHTNESS_CONTRAST_PROB))

        if aug_cfg.BLURRING_PROB > 0:
            blurring = OneOf([
                MotionBlur(aug_cfg.BLUR_LIMIT, p=1),
                MedianBlur(aug_cfg.BLUR_LIMIT, p=1),
                Blur(aug_cfg.BLUR_LIMIT, p=1),
            ],
                             p=aug_cfg.BLURRING_PROB)
            color_aug_list.append(blurring)

        if aug_cfg.GAUSS_NOISE_PROB > 0:
            color_aug_list.append(GaussNoise(p=aug_cfg.GAUSS_NOISE_PROB))
        if aug_cfg.GRID_MASK_PROB > 0:
            color_aug_list.append(
                GridMask(num_grid=(3, 7), p=aug_cfg.GRID_MASK_PROB))
        if len(color_aug_list) > 0:
            color_aug = Compose(color_aug_list, p=1)
            return color_aug
        else:
            return None
def create_train_transforms(size=300):
    return Compose([
        ImageCompression(quality_lower=60, quality_upper=100, p=0.5),
        GaussNoise(p=0.1),
        GaussianBlur(blur_limit=3, p=0.05),
        HorizontalFlip(),
        OneOf([
            IsotropicResize(max_side=size,
                            interpolation_down=cv2.INTER_AREA,
                            interpolation_up=cv2.INTER_CUBIC),
            IsotropicResize(max_side=size,
                            interpolation_down=cv2.INTER_AREA,
                            interpolation_up=cv2.INTER_LINEAR),
            IsotropicResize(max_side=size,
                            interpolation_down=cv2.INTER_LINEAR,
                            interpolation_up=cv2.INTER_LINEAR),
        ],
              p=1),
        PadIfNeeded(min_height=size,
                    min_width=size,
                    border_mode=cv2.BORDER_CONSTANT),
        OneOf([RandomBrightnessContrast(),
               FancyPCA(),
               HueSaturationValue()],
              p=0.7),
        ToGray(p=0.2),
        ShiftScaleRotate(shift_limit=0.1,
                         scale_limit=0.2,
                         rotate_limit=10,
                         border_mode=cv2.BORDER_CONSTANT,
                         p=0.5),
    ])
Ejemplo n.º 26
0
def transforms_train(aug_proba=1.):
    return Compose(transforms=[
        HorizontalFlip(p=0.5),
        Rotate(limit=25,
               p=0.5,
               border_mode=cv2.BORDER_CONSTANT,
               value=0,
               interpolation=cv2.INTER_CUBIC),
        OneOf([
            IAAAdditiveGaussianNoise(p=1),
            GaussNoise(p=1),
        ], p=0.2),
        OneOf([
            HueSaturationValue(hue_shift_limit=10,
                               sat_shift_limit=15,
                               val_shift_limit=10,
                               p=1),
            RGBShift(r_shift_limit=10, g_shift_limit=10, b_shift_limit=10, p=1)
        ]),
        OneOf([RandomContrast(p=1), RandomBrightness(p=1)], p=0.3),
        OpticalDistortion(p=0.1),
        Resize(*SIZE),
        Normalize()
    ],
                   p=aug_proba,
                   additional_targets={'trimap': 'mask'})
Ejemplo n.º 27
0
def transform(config, image, mask):

    try:
        p = config["train"]["dap"]["p"]
    except:
        p = 1

    assert 0 <= p <= 1

    # Inspire by: https://albumentations.readthedocs.io/en/latest/examples.html
    return Compose([
        Flip(),
        Transpose(),
        OneOf([IAAAdditiveGaussianNoise(),
               GaussNoise()], p=0.2),
        OneOf([
            MotionBlur(p=0.2),
            MedianBlur(blur_limit=3, p=0.1),
            Blur(blur_limit=3, p=0.1)
        ],
              p=0.2),
        ShiftScaleRotate(shift_limit=0.0625,
                         scale_limit=0.2,
                         rotate_limit=45,
                         p=0.2),
        OneOf([IAASharpen(),
               IAAEmboss(),
               RandomBrightnessContrast()], p=0.3),
        HueSaturationValue(p=0.3),
    ])(image=image, mask=mask, p=p)
Ejemplo n.º 28
0
    def initialize_elements(self):
        self.using_roi = hasattr(self.params, "roi_crop")
        self.resizer = self.plain_resize

        if hasattr(self.params, "random_crop_scale"):
            self.resizer = RandomResizedCrop(
                height=self.params.default_height,
                width=self.params.default_width,
                scale=self.params.random_crop_scale,
                ratio=self.params.random_crop_ratio)

        if self.using_roi:
            self.roi_resize = Resize(height=self.params.roi_height,
                                     width=self.params.roi_width)

        starting_aug = [Rotate(limit=15), HorizontalFlip(p=0.5)]

        heavy_aug = [
            # RandomGamma(p=0.1),
            ElasticTransform(p=0.1,
                             alpha=120,
                             sigma=120 * 0.05,
                             alpha_affine=120 * 0.03),
            GaussNoise(p=0.05),
            GaussianBlur(p=0.05)
        ]

        if self.params.data_augmentation == constants.heavy_augmentation:
            starting_aug.extend(heavy_aug)
        self.aug = Compose(starting_aug)
def strong_aug(p=.5):
    return Compose([
        HorizontalFlip(p=0.5),
        ToGray(p=0.1),
        OneOf([
            IAAAdditiveGaussianNoise(),
            GaussNoise(),
        ], p=0.4),
        OneOf([
            MotionBlur(p=.2),
            MedianBlur(blur_limit=3, p=.1),
            Blur(blur_limit=3, p=.1),
        ],
              p=0.2),
        OneOf([
            OpticalDistortion(p=0.3),
            GridDistortion(p=.1),
            IAAPiecewiseAffine(p=0.3),
        ],
              p=0.2),
        OneOf([
            CLAHE(clip_limit=2),
            IAASharpen(),
            RandomContrast(),
            RandomBrightness(),
        ],
              p=0.3),
        HueSaturationValue(p=0.3),
    ],
                   p=p)
Ejemplo n.º 30
0
def aug_daniel(prob=0.8):
    return Compose(
        [
            RandomRotate90(p=0.5),
            Transpose(p=0.5),
            Flip(p=0.5),
            OneOf(
                [
                    IAAAdditiveGaussianNoise(),
                    GaussNoise(),
                    #Blur(),
                ],
                p=0.3),
            OneOf(
                [
                    CLAHE(clip_limit=2),
                    IAASharpen(),
                    IAAEmboss(),
                    OneOf([
                        RandomContrast(),
                        RandomBrightness(),
                    ]),
                    #Blur(),
                    #GaussNoise()
                ],
                p=0.5),
            HueSaturationValue(p=0.5)
        ],
        p=prob)