Пример #1
0
def augmentations(image_size: int):
    channel_augs = [
        A.HueSaturationValue(p=0.5),
        A.ChannelShuffle(p=0.5),
    ]

    result = [
        # *pre_transform(image_size),
        A.OneOf([
            A.IAAAdditiveGaussianNoise(),
            A.GaussNoise(),
        ], p=0.5),
        A.OneOf([
            A.MotionBlur(blur_limit=3, p=0.7),
            A.MedianBlur(blur_limit=3, p=1.0),
            A.Blur(blur_limit=3, p=0.7),
        ],
                p=0.5),
        A.OneOf(channel_augs),
        A.OneOf([
            A.CLAHE(clip_limit=2),
            A.IAASharpen(),
            A.IAAEmboss(),
        ],
                p=0.5),
        A.RandomBrightnessContrast(brightness_limit=0.5,
                                   contrast_limit=0.5,
                                   p=0.5),
        A.RandomGamma(p=0.5),
        A.OneOf([A.MedianBlur(p=0.5), A.MotionBlur(p=0.5)]),
        A.RandomGamma(gamma_limit=(85, 115), p=0.5),
    ]
    return A.Compose(result, bbox_params=BBOX_PARAMS)
Пример #2
0
 def album(self): #이미지 변환
     transform = A.Compose([
         #A.RandomRotate90(),
         A.Flip(p=0.2),
         #A.Transpose(),
         A.ChannelShuffle(p=0.3),
         A.ElasticTransform(p=0.3,border_mode=cv2.BORDER_REFLECT_101,alpha_affine=40),
         A.OneOf([
             A.IAAAdditiveGaussianNoise(),
             A.GaussNoise(),
         ], p=0.2),
         A.OneOf([
             A.MotionBlur(p=.2),
             A.MedianBlur(blur_limit=3, p=0.1),
             A.Blur(blur_limit=3, p=0.1),
         ], p=0.2),
         A.ShiftScaleRotate(shift_limit=0.0625, scale_limit=0.2, rotate_limit=45, p=0.2),
         A.OneOf([
             A.OpticalDistortion(p=0.3),
             A.GridDistortion(p=.1),
             A.IAAPiecewiseAffine(p=0.3),
         ], p=0.2),
         A.OneOf([
             A.CLAHE(clip_limit=2),
             A.IAASharpen(),
             A.IAAEmboss(),
             A.RandomBrightnessContrast(),
         ], p=0.3),
         A.HueSaturationValue(p=0.3),
     ])
     image = cv2.cvtColor(self.srcResize, cv2.COLOR_BGR2RGB)
     transformed = transform(image=image)['image']
     self.update(transformed)
Пример #3
0
    def __init__(self, root_dir, is_train):
        super(FaceDataset, self).__init__()

        #self.local_rank = local_rank
        self.is_train = is_train
        self.input_size = 256
        self.num_kps = 68
        transform_list = []
        if is_train:
            transform_list += \
                [
                    A.ColorJitter(brightness=0.8, contrast=0.5, p=0.5),
                    A.ToGray(p=0.1),
                    A.ISONoise(p=0.1),
                    A.MedianBlur(blur_limit=(1,7), p=0.1),
                    A.GaussianBlur(blur_limit=(1,7), p=0.1),
                    A.MotionBlur(blur_limit=(5,12), p=0.1),
                    A.ImageCompression(quality_lower=50, quality_upper=90, p=0.05),
                    A.ShiftScaleRotate(shift_limit=0.1, scale_limit=0.2, rotate_limit=40, interpolation=cv2.INTER_LINEAR,
                        border_mode=cv2.BORDER_CONSTANT, value=0, mask_value=0, p=0.8),
                    A.HorizontalFlip(p=0.5),
                    RectangleBorderAugmentation(limit=0.33, fill_value=0, p=0.2),
                ]
        transform_list += \
            [
                A.geometric.resize.Resize(self.input_size, self.input_size, interpolation=cv2.INTER_LINEAR, always_apply=True),
                A.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]),
                ToTensorV2(),
            ]
        self.transform = A.ReplayCompose(transform_list,
                                         keypoint_params=A.KeypointParams(
                                             format='xy',
                                             remove_invisible=False))
        self.root_dir = root_dir
        with open(osp.join(root_dir, 'annot.pkl'), 'rb') as f:
            annot = pickle.load(f)
            self.X, self.Y = annot
        train_size = int(len(self.X) * 0.99)

        if is_train:
            self.X = self.X[:train_size]
            self.Y = self.Y[:train_size]
        else:
            self.X = self.X[train_size:]
            self.Y = self.Y[train_size:]
        #if local_rank==0:
        #    logging.info('data_transform_list:%s'%transform_list)
        flip_parts = ([1, 17], [2, 16], [3, 15], [4, 14], [5, 13], [6, 12],
                      [7, 11], [8, 10], [18, 27], [19, 26], [20, 25], [21, 24],
                      [22, 23], [32, 36], [33, 35], [37, 46], [38, 45],
                      [39, 44], [40, 43], [41, 48], [42, 47], [49,
                                                               55], [50, 54],
                      [51, 53], [62, 64], [61, 65], [68, 66], [59,
                                                               57], [60, 56])
        self.flip_order = np.arange(self.num_kps)
        for pair in flip_parts:
            self.flip_order[pair[1] - 1] = pair[0] - 1
            self.flip_order[pair[0] - 1] = pair[1] - 1
        logging.info('len:%d' % len(self.X))
        print('!!!len:%d' % len(self.X))
def get_next_augmentation():
    train_transform = [
        albu.ChannelShuffle(p=0.1),
        albu.IAAAdditiveGaussianNoise(p=0.2),
        albu.OneOf(
            [
                albu.CLAHE(p=1),
                albu.RandomBrightness(p=1),
                albu.RandomGamma(p=1),
            ],
            p=0.9,
        ),
        albu.OneOf(
            [
                albu.IAASharpen(p=1),
                albu.Blur(blur_limit=3, p=1),
                albu.MotionBlur(blur_limit=3, p=1),
            ],
            p=0.9,
        ),
        albu.OneOf(
            [
                albu.RandomContrast(p=1),
                albu.HueSaturationValue(p=1),
            ],
            p=0.9,
        ),
    ]
    return albu.Compose(train_transform)
Пример #5
0
def get_training_augmentation():
    train_transform = [
        albu.HorizontalFlip(p=0.5),

        #         albu.ShiftScaleRotate(scale_limit=0.5, rotate_limit=0, shift_limit=0.1, p=1, border_mode=0),
        albu.PadIfNeeded(min_height=320, min_width=320, always_apply=True),
        #         albu.RandomCrop(height=1000, width=1000, always_apply=True),
        albu.IAAAdditiveGaussianNoise(p=0.2),
        albu.IAAPerspective(p=0.5),
        albu.OneOf(
            [
                albu.CLAHE(p=1),
                albu.RandomBrightness(p=1),
                albu.RandomGamma(p=1),
            ],
            p=0.9,
        ),
        albu.OneOf(
            [
                albu.IAASharpen(p=1),
                albu.Blur(blur_limit=3, p=1),
                albu.MotionBlur(blur_limit=3, p=1),
            ],
            p=0.9,
        ),
        albu.OneOf(
            [
                albu.RandomContrast(p=1),
                albu.HueSaturationValue(p=1),
            ],
            p=0.9,
        ),
    ]
    return albu.Compose(train_transform)
Пример #6
0
def train_get_transforms():
    return A.Compose([
        # A.Resize(random.randint(config.img_size, config.img_size+128), random.randint(config.img_size, config.img_size+128)),
        # A.RandomCrop(config.img_size, config.img_size),
        # A.crops.transforms.CenterCrop(256, 256, p=1.0),
        # A.Resize(config.img_size, config.img_size),
        # A.crops.transforms.CenterCrop(224, 224, p=1.0),
        A.OneOf([
            A.MotionBlur(blur_limit=5),
            A.MedianBlur(blur_limit=5),
            A.GaussianBlur(blur_limit=5),
            A.GaussNoise(var_limit=(5.0, 30.0))
        ],
                p=0.8),
        A.RandomBrightness(limit=0.1, p=0.5),
        A.RandomContrast(limit=[0.9, 1.1], p=0.5),
        A.Transpose(p=0.5),
        A.Rotate(limit=90,
                 interpolation=1,
                 border_mode=4,
                 always_apply=False,
                 p=0.5),
        A.HorizontalFlip(p=0.5),
        A.VerticalFlip(p=0.5),
        A.ShiftScaleRotate(p=0.5),
        # A.HueSaturationValue(hue_shift_limit=0.2, sat_shift_limit=0.2, val_shift_limit=0.2, p=0.5),
        # A.RandomBrightnessContrast(brightness_limit=(-0.1,0.1), contrast_limit=(-0.1, 0.1), p=0.5),
        # A.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225], max_pixel_value=255.0, p=1.0),
        # A.CoarseDropout(p=0.5),
        # A.Cutout(max_h_size=int(config.img_size * 0.5), max_w_size=int(config.img_size * 0.5), num_holes=1, p=0.8),
        ToTensorV2()
    ])
Пример #7
0
def rgd_shuffle_mo_blur(p=1.0):
    return albumentations.Compose([
        albumentations.ChannelShuffle(p=p),
        albumentations.MotionBlur(p=p),
        albumentations.RandomBrightness(p=1)
    ],
                                  p=p)
Пример #8
0
def get_train_transforms(
        height: int = 14 * 32,  # 14*32 then 28*32
        width: int = 18 * 32):  #18*32 then 37*32
    return A.Compose([
        A.HorizontalFlip(p=0.5),
        A.IAAAdditiveGaussianNoise(p=0.2),
        A.IAAPerspective(p=0.4),
        A.OneOf([
            A.CLAHE(p=1.0),
            A.RandomBrightness(p=1.0),
            A.RandomGamma(p=1.0),
        ],
                p=0.5),
        A.OneOf([
            A.IAASharpen(p=1.0),
            A.Blur(blur_limit=3, p=1.0),
            A.MotionBlur(blur_limit=3, p=1.0),
        ],
                p=0.5),
        A.OneOf([
            A.RandomContrast(p=1.0),
            A.HueSaturationValue(p=1.0),
        ],
                p=0.5),
        A.Resize(height=height, width=width, p=1.0),
    ],
                     p=1.0)
Пример #9
0
def det_train_augs(height: int, width: int) -> albu.Compose:
    return albu.Compose([
        albu.Resize(height=height, width=width),
        albu.ShiftScaleRotate(shift_limit=0.025,
                              scale_limit=0.1,
                              rotate_limit=10),
        albu.Flip(),
        albu.RandomRotate90(),
        albu.OneOf(
            [
                albu.HueSaturationValue(p=1.0),
                albu.IAAAdditiveGaussianNoise(p=1.0),
                albu.IAASharpen(p=1.0),
                albu.RandomBrightnessContrast(
                    brightness_limit=0.1, contrast_limit=0.1, p=1.0),
                albu.RandomGamma(p=1.0),
            ],
            p=1.0,
        ),
        albu.OneOf(
            [
                albu.Blur(blur_limit=3, p=1.0),
                albu.MotionBlur(blur_limit=3, p=1.0)
            ],
            p=1.0,
        ),
        albu.Normalize(),
    ])
def get_transforms(type="albumentations"):
    if type == "albumentations":
        train_transforms = albumentations.Compose([
            albumentations.Transpose(p=0.5),
            albumentations.OneOf([
                albumentations.VerticalFlip(p=0.5),
                albumentations.HorizontalFlip(p=0.5),
            ]),
            albumentations.OneOf([
                albumentations.RandomBrightness(limit=0.2, p=0.75),
                albumentations.RandomContrast(limit=0.2, p=0.75),
            ]),
            albumentations.OneOf([
                albumentations.MotionBlur(blur_limit=5),
                albumentations.MedianBlur(blur_limit=5),
                albumentations.GaussianBlur(blur_limit=5),
                albumentations.GaussNoise(var_limit=(5.0, 30.0)),
            ],
                                 p=0.7),
            albumentations.OneOf([
                albumentations.OpticalDistortion(distort_limit=1.0),
                albumentations.GridDistortion(num_steps=5, distort_limit=1.),
                albumentations.ElasticTransform(alpha=3),
            ],
                                 p=0.7),

            # albumentations.OneOf([
            #     albumentations.CLAHE(clip_limit=4.0, p=0.7),
            #     albumentations.HueSaturationValue(hue_shift_limit=10, sat_shift_limit=20, val_shift_limit=10, p=0.5),
            #     albumentations.ShiftScaleRotate(shift_limit=0.1, scale_limit=0.1, rotate_limit=15, border_mode=0,
            #                                     p=0.85),
            # ]),
            albumentations.Resize(256, 256),
            # albumentations.Cutout(max_h_size=int(256 * 0.375), max_w_size=int(256 * 0.375), num_holes=1, p=0.7),
            # albumentations.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
        ])

        test_transforms = albumentations.Compose([
            albumentations.Resize(256, 256),
            # albumentations.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
        ])
    else:
        train_transforms = transforms.Compose([
            # AdvancedHairAugmentation(hairs_folder='/kaggle/input/melanoma-hairs'),
            transforms.RandomResizedCrop(size=256, scale=(0.9, 1.0)),
            transforms.RandomHorizontalFlip(),
            transforms.RandomVerticalFlip(),
            Microscope(p=0.5),
            transforms.Resize((256, 256)),
            transforms.ToTensor(),
            transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                 std=[0.229, 0.224, 0.225])
        ])
        test_transforms = transforms.Compose([
            transforms.Resize((256, 256)),
            transforms.ToTensor(),
            transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                 std=[0.229, 0.224, 0.225])
        ])
    return train_transforms, test_transforms
Пример #11
0
        def __init__(self, data_dir, mode):
            assert mode in ['train', 'val', 'test']
            self.mode = mode
            self.fn = list(Path(data_dir).rglob('*.jpg'))

            a_transforms_list = [A.Resize(350, 350), A.RandomCrop(350, 350)]
            if mode == 'train':
                a_transforms_list.extend([
                    A.HorizontalFlip(),
                    A.VerticalFlip(),
                    A.HueSaturationValue(),
                    A.ShiftScaleRotate(),
                    A.OneOf([
                        A.IAAAdditiveGaussianNoise(),
                        A.GaussNoise(),
                    ],
                            p=0.2),
                    A.OneOf([
                        A.MotionBlur(p=.2),
                        A.MedianBlur(blur_limit=3, p=0.1),
                        A.Blur(blur_limit=3, p=0.1),
                    ],
                            p=0.2)
                ])
            a_transforms_list.extend([ToTensor()])
            self.transforms = A.Compose(a_transforms_list)
Пример #12
0
def get_augmentations(p=0.5, image_size=224):
    imagenet_stats = {
        "mean": [0.485, 0.456, 0.406],
        "std": [0.229, 0.224, 0.225]
    }
    train_tfms = A.Compose([
        # A.Resize(image_size, image_size),
        A.RandomResizedCrop(image_size, image_size),
        A.ShiftScaleRotate(shift_limit=0.15,
                           scale_limit=0.4,
                           rotate_limit=45,
                           p=p),
        A.Cutout(p=p),
        A.RandomRotate90(p=p),
        A.Flip(p=p),
        A.OneOf(
            [
                A.RandomBrightnessContrast(
                    brightness_limit=0.2,
                    contrast_limit=0.2,
                ),
                A.HueSaturationValue(hue_shift_limit=20,
                                     sat_shift_limit=50,
                                     val_shift_limit=50),
            ],
            p=p,
        ),
        A.OneOf(
            [
                A.IAAAdditiveGaussianNoise(),
                A.GaussNoise(),
            ],
            p=p,
        ),
        A.CoarseDropout(max_holes=10, p=p),
        A.OneOf(
            [
                A.MotionBlur(p=0.2),
                A.MedianBlur(blur_limit=3, p=0.1),
                A.Blur(blur_limit=3, p=0.1),
            ],
            p=p,
        ),
        A.OneOf(
            [
                A.OpticalDistortion(p=0.3),
                A.GridDistortion(p=0.1),
                A.IAAPiecewiseAffine(p=0.3),
            ],
            p=p,
        ),
        ToTensor(normalize=imagenet_stats),
    ])

    valid_tfms = A.Compose([
        A.CenterCrop(image_size, image_size),
        ToTensor(normalize=imagenet_stats)
    ])

    return train_tfms, valid_tfms
Пример #13
0
def strong_aug(p=.5):
    return A.Compose([
        A.RandomRotate90(),
        A.Flip(),
        A.Transpose(),
        A.OneOf([
            A.IAAAdditiveGaussianNoise(),
            A.GaussNoise(),
        ], p=0.2),
        A.OneOf([
            A.MotionBlur(p=.2),
            A.MedianBlur(blur_limit=3, p=.1),
            A.Blur(blur_limit=3, p=.1),
        ],
                p=0.2),
        A.ShiftScaleRotate(
            shift_limit=0.0625, scale_limit=0.2, rotate_limit=45, p=.2),
        A.OneOf([
            A.OpticalDistortion(p=0.3),
            A.GridDistortion(p=.1),
            A.IAAPiecewiseAffine(p=0.3),
        ],
                p=0.2),
        A.OneOf([
            A.CLAHE(clip_limit=2),
            A.IAASharpen(),
            A.IAAEmboss(),
            A.RandomContrast(),
            A.RandomBrightness(),
        ],
                p=0.3),
        A.HueSaturationValue(p=0.3),
    ],
                     p=p)
Пример #14
0
def get_training_augmentation():
    train_transform = albu.Compose([
        albu.IAAAdditiveGaussianNoise(p=0.2),
        albu.IAAPerspective(p=0.5),
        albu.OneOf(
            [
                albu.CLAHE(p=1),
                albu.RandomBrightness(p=1),
                albu.RandomGamma(p=1),
            ],
            p=0.9,
        ),
        albu.OneOf(
            [
                albu.IAASharpen(p=1),
                albu.Blur(blur_limit=3, p=1),
                albu.MotionBlur(blur_limit=3, p=1),
            ],
            p=0.9,
        ),
        albu.OneOf(
            [
                albu.RandomContrast(p=1),
                albu.HueSaturationValue(p=1),
            ],
            p=0.9,
        ),
    ],
                                   additional_targets={'depth': 'mask'})

    return train_transform
Пример #15
0
def get_training_augmentation():
    train_transform = [
        A.RandomSizedCrop(min_max_height=(300, 360), height=320, width=320,
                          always_apply=True),
        A.HorizontalFlip(p=0.5),
        A.OneOf([
            A.CLAHE(),
            A.RandomBrightnessContrast(),
            A.RandomGamma(),
            A.HueSaturationValue(),
            A.NoOp()
        ]),
        A.OneOf([
            A.IAAAdditiveGaussianNoise(p=0.2),
            A.IAASharpen(),
            A.Blur(blur_limit=3),
            A.MotionBlur(blur_limit=3),
            A.NoOp()
        ]),
        A.OneOf([
            A.RandomFog(),
            A.RandomSunFlare(),
            A.RandomRain(),
            A.RandomSnow(),
            A.NoOp()
        ]),
        A.Normalize(),
    ]
    return A.Compose(train_transform)
def Albumentations(params):
    augmentation = A.Compose([
        A.HorizontalFlip(p=0.5),
        A.VerticalFlip(p=0.5),
        A.Transpose(p=0.5),
        A.OneOf([
            A.CLAHE(p=1.),
            A.RandomBrightnessContrast(0.15, 0.15, p=1.),
            A.RandomGamma(p=1.),
            A.HueSaturationValue(p=1.)],
            p=0.5),
        A.OneOf([
            A.Blur(3, p=1.),
            A.MotionBlur(3, p=1.),
            A.Sharpen(p=1.)],
            p=0.5),
        A.ShiftScaleRotate(
            shift_limit=0.05, scale_limit=0.05, rotate_limit=15,
            border_mode=cv2.BORDER_CONSTANT, p=0.75)
        
        # IMPORTANT: You MUST add A.Normalize(...) in the list.
        A.Normalize(mean=params.mean, std=params.std, max_pixel_value=255., always_apply=True)
    ], bbox_params=A.BboxParams(format='coco', label_fields=['category_ids'], min_visibility=0.2))
    
    return augmentation
Пример #17
0
def get_training_augmentation():
    train_transform = [
        albu.ShiftScaleRotate(scale_limit=0.1,
                              rotate_limit=0.,
                              shift_limit=0.1,
                              p=1,
                              border_mode=0),
        albu.IAAAdditiveGaussianNoise(p=0.2),
        albu.OneOf(
            [
                albu.CLAHE(p=1),
                albu.RandomBrightness(p=1),
                albu.RandomGamma(p=1),
            ],
            p=0.6,
        ),
        albu.OneOf(
            [
                albu.IAASharpen(p=1),
                albu.Blur(blur_limit=3, p=1),
                albu.MotionBlur(blur_limit=3, p=1),
            ],
            p=0.6,
        ),
        albu.OneOf(
            [
                albu.RandomContrast(p=1),
                albu.HueSaturationValue(p=1),
            ],
            p=0.6,
        ),
    ]
    return albu.Compose(train_transform)
def apply_training_augmentation():
    train_transform = [
        A.HorizontalFlip(p=0.5),
        A.ShiftScaleRotate(scale_limit=0.5, rotate_limit=0, shift_limit=0.1, p=1, border_mode=0),
        A.PadIfNeeded(min_height=320, min_width=320, always_apply=True, border_mode=0),
        A.RandomCrop(height=320, width=320, always_apply=True),
        A.IAAAdditiveGaussianNoise(p=0.2),
        A.IAAPerspective(p=0.5),
        A.OneOf(
            [
                A.CLAHE(p=1),
                A.RandomBrightness(p=1),
                A.RandomGamma(p=1),
            ],
            p=0.9,
        ),
        A.OneOf(
            [
                A.IAASharpen(p=1),
                A.Blur(blur_limit=3, p=1),
                A.MotionBlur(blur_limit=3, p=1),
            ],
            p=0.9,
        ),
        A.OneOf(
            [
                A.RandomContrast(p=1),
                A.HueSaturationValue(p=1),
            ],
            p=0.9,
        ),
        A.Lambda(mask=round_clip_0_1)
    ]
    return A.Compose(train_transform)
Пример #19
0
def get_training_augmentation():
    """Builds random transformations we want to apply to our dataset.

    Arguments:
        
    Returns:
        A albumentation functions to pass our images to.
    Raises:

    """
    train_transform = [
        HorizontalFlipWithHomo(p=0.5),
        A.IAAAdditiveGaussianNoise(p=0.2),
        A.augmentations.transforms.RandomShadow(
            shadow_roi=(0, 0.5, 1, 1),
            num_shadows_lower=1,
            num_shadows_upper=1,
            shadow_dimension=3,
            always_apply=False,
            p=0.5,
        ),
        A.OneOf([A.RandomBrightness(p=1),], p=0.3,),
        A.OneOf([A.Blur(blur_limit=3, p=1), A.MotionBlur(blur_limit=3, p=1),], p=0.3,),
        A.OneOf([A.RandomContrast(p=1), A.HueSaturationValue(p=1),], p=0.3,),
        RandomCropWithHomo(height=256, width=256, always_apply=True),
    ]
    return A.Compose(train_transform)
Пример #20
0
def cifar_alb_trainData():
    '''Apply Albumentations data transforms to the dataset and returns iterable'''

    train_transform = [
        A.HorizontalFlip(p=0.15),
        A.ShiftScaleRotate(shift_limit=0.05,
                           scale_limit=0.05,
                           rotate_limit=15,
                           p=0.25),
        A.RGBShift(r_shift_limit=15, g_shift_limit=15, b_shift_limit=15,
                   p=0.5),
        A.RandomBrightnessContrast(p=0.25),
        A.RandomGamma(p=0.25),
        A.CLAHE(p=0.25),
        A.ChannelShuffle(p=0.1),
        A.ElasticTransform(p=0.1),
        A.MotionBlur(blur_limit=17, p=0.1),
        A.Cutout(num_holes=1,
                 max_h_size=16,
                 max_w_size=16,
                 fill_value=mean,
                 always_apply=False,
                 p=0.5),
        A.Normalize(mean=mean, std=std),
        ToTensor()
    ]

    transforms_result = A.Compose(train_transform)
    return lambda img: transforms_result(image=np.array(img))["image"]
Пример #21
0
def get_transforms(image_size):

    transforms_train = albumentations.Compose([
        albumentations.Transpose(p=0.5),
        albumentations.VerticalFlip(p=0.5),
        albumentations.HorizontalFlip(p=0.5),
        albumentations.RandomBrightness(limit=0.2, p=0.75),
        albumentations.RandomContrast(limit=0.2, p=0.75),
        albumentations.OneOf([
            albumentations.MotionBlur(blur_limit=5),
            albumentations.MedianBlur(blur_limit=5),
            albumentations.GaussianBlur(blur_limit=5),
            albumentations.GaussNoise(var_limit=(5.0, 30.0)),
        ], p=0.7),

        albumentations.OneOf([
            albumentations.OpticalDistortion(distort_limit=1.0),
            albumentations.GridDistortion(num_steps=5, distort_limit=1.),
            albumentations.ElasticTransform(alpha=3),
        ], p=0.7),

        albumentations.CLAHE(clip_limit=4.0, p=0.7),
        albumentations.HueSaturationValue(hue_shift_limit=10, sat_shift_limit=20, val_shift_limit=10, p=0.5),
        albumentations.ShiftScaleRotate(shift_limit=0.1, scale_limit=0.1, rotate_limit=15, border_mode=0, p=0.85),
        albumentations.Resize(image_size, image_size),
        albumentations.Cutout(max_h_size=int(image_size * 0.375), max_w_size=int(image_size * 0.375), num_holes=1, p=0.7),
        albumentations.Normalize()
    ])

    transforms_val = albumentations.Compose([
        albumentations.Resize(image_size, image_size),
        albumentations.Normalize()
    ])

    return transforms_train, transforms_val
Пример #22
0
 def augment_image(self, image):
     transform = A.Compose([
         A.OneOf([
             A.IAAAdditiveGaussianNoise(),
             A.GaussNoise(),
         ], p=0.3),
         A.OneOf([
             A.MotionBlur(p=.4),
             A.MedianBlur(blur_limit=3, p=0.3),
             A.Blur(blur_limit=3, p=0.3),
         ],
                 p=0.4),
         A.OneOf([
             A.CLAHE(clip_limit=2),
             A.IAASharpen(),
             A.IAAEmboss(),
             A.RandomBrightnessContrast(),
         ],
                 p=0.3),
         A.HueSaturationValue(p=0.3),
     ])
     image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
     augmented_image = transform(image=image)['image']
     augmented_image = cv2.cvtColor(augmented_image, cv2.COLOR_RGB2BGR)
     return augmented_image
Пример #23
0
def get_train_transform():
    crop_height = 256
    crop_width = 256

    return albu.Compose([
        albu.PadIfNeeded(min_height=crop_height, min_width=crop_width, p=1),
        albu.RandomSizedCrop((int(0.3 * crop_height), 288), crop_height, crop_width, p=1),
        albu.HorizontalFlip(p=0.5),
        albu.OneOf([
            albu.IAAAdditiveGaussianNoise(p=0.5),
            albu.GaussNoise(p=0.5),
        ], p=0.2),
        albu.OneOf([
            albu.MotionBlur(p=0.2),
            albu.MedianBlur(blur_limit=3, p=0.1),
            albu.Blur(blur_limit=3, p=0.1),
        ], p=0.2),
        albu.ShiftScaleRotate(shift_limit=0.0625, scale_limit=0, rotate_limit=20, p=0.1),
        albu.OneOf([
            albu.OpticalDistortion(p=0.3),
            albu.GridDistortion(p=0.1),
            albu.IAAPiecewiseAffine(p=0.3),
        ], p=0.2),
        albu.OneOf([
            albu.CLAHE(clip_limit=2, p=0.5),
            albu.IAASharpen(p=0.5),
            albu.IAAEmboss(p=0.5),
            albu.RandomBrightnessContrast(p=0.5),
        ], p=0.3),
        albu.HueSaturationValue(p=0.3),
        albu.JpegCompression(p=0.2, quality_lower=20, quality_upper=99),
        albu.ElasticTransform(p=0.1),
        albu.Normalize(p=1)
    ], p=1)
Пример #24
0
def get_training_augmentation():
    train_transform = [
        albu.HorizontalFlip(p=0.5),
        albu.IAAAdditiveGaussianNoise(p=0.2),
        albu.IAAPerspective(p=0.5),
        albu.PadIfNeeded(min_height=1216,
                         min_width=512,
                         always_apply=True,
                         border_mode=0),
        #0.9的機率取出OneOf中的其中一個, 各個抽中的機率皆為1/3( 因為1/(1+1+1) )
        albu.OneOf(
            [
                albu.CLAHE(p=1),
                albu.RandomBrightness(p=1),
                albu.RandomGamma(p=1),
            ],
            p=0.9,
        ),
        albu.OneOf(
            [
                albu.IAASharpen(p=1),
                albu.Blur(blur_limit=3, p=1),
                albu.MotionBlur(blur_limit=3, p=1),
            ],
            p=0.9,
        ),
        albu.OneOf(
            [
                albu.RandomContrast(p=1),
                albu.HueSaturationValue(p=1),
            ],
            p=0.9,
        ),
    ]
    return albu.Compose(train_transform)
Пример #25
0
def hard_transforms():
    result = [
        # random flip
        albu.Flip(),
        # add random brightness and contrast, 70% prob
        albu.RandomBrightnessContrast(brightness_limit=0.2,
                                      contrast_limit=0.2,
                                      p=0.7),
        # Random gamma changes with a 30% probability
        albu.RandomGamma(gamma_limit=(85, 115), p=0.3),
        # Randomly changes the hue, saturation, and color value of the input image
        albu.HueSaturationValue(p=0.3),
        # apply compression to lower the image quality
        albu.JpegCompression(quality_lower=80),
        # add cutout with 0.5 prob on the image
        albu.Cutout(num_holes=15,
                    max_h_size=20,
                    max_w_size=20,
                    fill_value=5,
                    p=0.5),
        # randomly select of these operations
        albu.OneOf([
            albu.MotionBlur(p=0.2),
            albu.MedianBlur(blur_limit=3, p=0.1),
            albu.Blur(blur_limit=3, p=0.1),
        ],
                   p=0.2),
    ]

    return result
Пример #26
0
 def __init__(self, image_size):
     self.data_transform = {
         'train_transform':A.Compose([
           A.Transpose(p=0.5),
           A.VerticalFlip(p=0.5),
           A.HorizontalFlip(p=0.5),
           A.RandomBrightness(limit=0.2, p=0.75),
           A.RandomContrast(limit=0.2, p=0.75),
           A.OneOf([
               A.MotionBlur(blur_limit=5),
               A.MedianBlur(blur_limit=5),
               A.GaussianBlur(blur_limit=5),
               A.GaussNoise(var_limit=(5.0, 30.0)),], p=0.7),
           A.OneOf([
               A.OpticalDistortion(distort_limit=1.0),
               A.GridDistortion(num_steps=5, distort_limit=1.),
               A.ElasticTransform(alpha=3),], p=0.7),
           A.CLAHE(clip_limit=4.0, p=0.7),
           A.HueSaturationValue(hue_shift_limit=10, sat_shift_limit=20, val_shift_limit=10, p=0.5),
           A.ShiftScaleRotate(shift_limit=0.1, scale_limit=0.1, rotate_limit=15, border_mode=0, p=0.85),
           A.Resize(image_size, image_size),
           A.Cutout(max_h_size=int(image_size * 0.375), max_w_size=int(image_size * 0.375), num_holes=1, p=0.7),    
           A.Normalize()
           ]),
         'test_transform': A.Compose([
           A.Resize(image_size, image_size),
           A.Normalize(),
           A.Resize(image_size, image_size)
           ])}
def albumentation():
    transform = albumentations.Compose([          
                    albumentations.OneOf([
                        albumentations.GaussNoise(),
                        albumentations.IAAAdditiveGaussianNoise()
                    ]),
                    albumentations.OneOf([
                        albumentations.MotionBlur(blur_limit=3, p=0.2),
                        albumentations.MedianBlur(blur_limit=3, p=0.1),
                        albumentations.Blur(blur_limit=2, p=0.1)
                    ]),
                    albumentations.OneOf([
                        albumentations.RandomBrightness(limit=(0.1, 0.4)),
                        albumentations.HueSaturationValue(hue_shift_limit=(0, 128), sat_shift_limit=(0, 60), val_shift_limit=(0, 20)),
                        albumentations.RGBShift(r_shift_limit=30, g_shift_limit=30, b_shift_limit=30)
                    ]),
                    albumentations.OneOf([
                        albumentations.CLAHE(),
                        albumentations.ChannelShuffle(),
                        albumentations.IAASharpen(),
                        albumentations.IAAEmboss(),
                        albumentations.RandomBrightnessContrast(),
                    ]),                
                    albumentations.OneOf([
                        albumentations.RandomGamma(gamma_limit=(35,255)),
                        albumentations.OpticalDistortion(),
                        albumentations.GridDistortion(),
                        albumentations.IAAPiecewiseAffine()
                    ]),                
                    A_torch.ToTensor(normalize={
                        "mean": [0.485, 0.456, 0.406],
                        "std" : [0.229, 0.224, 0.225]})
                    ])
    return transform
Пример #28
0
def get_transforms(*, data_type):
    if data_type == "light_train":
        return Compose([
            Resize(CFG.size, CFG.size),
            Transpose(p=0.5),
            HorizontalFlip(p=0.5),
            VerticalFlip(p=0.5),
            ShiftScaleRotate(scale_limit=(0, 0), p=0.5),
            ToTensorV2(),
        ])

    if data_type == "train":
        return Compose([
            Resize(CFG.size, CFG.size),
            Transpose(p=0.5),
            HorizontalFlip(p=0.5),
            VerticalFlip(p=0.5),
            albumentations.OneOf([
                albumentations.ElasticTransform(
                    alpha=1, sigma=20, alpha_affine=10),
                albumentations.GridDistortion(num_steps=6, distort_limit=0.1),
                albumentations.OpticalDistortion(distort_limit=0.05,
                                                 shift_limit=0.05),
            ],
                                 p=0.2),
            albumentations.core.composition.PerChannel(albumentations.OneOf([
                albumentations.MotionBlur(p=.05),
                albumentations.MedianBlur(blur_limit=3, p=.05),
                albumentations.Blur(blur_limit=3, p=.05),
            ]),
                                                       p=1.0),
            albumentations.OneOf([
                albumentations.CoarseDropout(max_holes=16,
                                             max_height=CFG.size // 16,
                                             max_width=CFG.size // 16,
                                             fill_value=0,
                                             p=0.5),
                albumentations.GridDropout(ratio=0.09, p=0.5),
                albumentations.Cutout(num_holes=8,
                                      max_h_size=CFG.size // 16,
                                      max_w_size=CFG.size // 16,
                                      p=0.2),
            ],
                                 p=0.5),
            albumentations.ShiftScaleRotate(
                shift_limit=0.0625, scale_limit=0.2, rotate_limit=45, p=0.5),
            ToTensorV2(),
        ],
                       additional_targets={
                           'r': 'image',
                           'g': 'image',
                           'b': 'image',
                           'y': 'image',
                       })

    elif data_type == 'valid':
        return Compose([
            Resize(CFG.size, CFG.size),
            ToTensorV2(),
        ])
Пример #29
0
    def __init__(self, data_dir, is_train=True):
        self.paths = sorted(glob.glob(data_dir + '/*/*'))
        self.transform_train = A.Compose([
            A.Resize(height=1000, width=1000),
            A.RandomBrightnessContrast(brightness_limit=0.1,
                                       contrast_limit=0.1,
                                       p=0.5),
            A.RandomGamma(p=0.5),
            A.HorizontalFlip(p=0.5),
            A.OneOf([
                A.MotionBlur(blur_limit=3),
                A.GlassBlur(max_delta=3),
                A.GaussianBlur(blur_limit=3)
            ],
                    p=0.5),
            A.GaussNoise(p=0.5),
            A.Normalize(mean=(0.446, 0.469, 0.472),
                        std=(0.326, 0.330, 0.338),
                        max_pixel_value=255.0,
                        p=1.0),
            ToTensorV2(p=1.0),
        ])

        self.transform_valid = A.Compose([
            A.Resize(height=1000, width=1000),
            A.Normalize(mean=(0.446, 0.469, 0.472),
                        std=(0.326, 0.330, 0.338),
                        max_pixel_value=255.0,
                        p=1.0),
            ToTensorV2(p=1.0),
        ])
        if is_train:
            self.data_transforms = self.transform_train
        else:
            self.data_transforms = self.transform_valid
def train_transformation():
    tsfm = A.Compose([
        #A.Resize(224,224),
        A.Resize(512, 512, interpolation=cv2.INTER_AREA),
        A.OneOf([
            A.RandomBrightness(limit=0.1, p=1),
            A.RandomContrast(limit=0.1, p=1)
        ]),
        A.OneOf([
            A.MotionBlur(blur_limit=3),
            A.MedianBlur(blur_limit=3),
            A.GaussianBlur(blur_limit=3)
        ],
                p=0.5),
        A.VerticalFlip(p=0.5),
        A.HorizontalFlip(p=0.5),
        A.ShiftScaleRotate(
            shift_limit=0.2,
            scale_limit=0.2,
            rotate_limit=20,
            interpolation=cv2.INTER_LINEAR,
            border_mode=cv2.BORDER_REFLECT_101,
            p=1,
        ),
        A.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]),
        A.pytorch.transforms.ToTensorV2()
    ])
    return tsfm