コード例 #1
0
def augmentation_simple(image, p=1., sub_p=0.3):
    augmentation_fun = al.Compose([
        al.OneOf([
            al.IAAAdditiveGaussianNoise(),
            al.GaussNoise(),
        ], p=sub_p),
        al.OneOf([
            al.MotionBlur(p=sub_p),
            al.MedianBlur(blur_limit=3, p=sub_p),
            al.Blur(blur_limit=3, p=sub_p),
        ],
                 p=sub_p),
        al.OneOf([
            al.OpticalDistortion(p=sub_p),
            al.GridDistortion(p=sub_p),
            al.IAAPiecewiseAffine(p=sub_p),
        ],
                 p=sub_p),
        al.OneOf([
            al.CLAHE(clip_limit=3),
            al.IAASharpen(),
            al.IAAEmboss(),
            al.RandomBrightnessContrast()
        ],
                 p=sub_p)
    ],
                                  p=p)
    return augmentation_fun(image)
コード例 #2
0
ファイル: model.py プロジェクト: eugenn/bd_torch
def get_train_transforms():
    return A.Compose([
        A.JpegCompression(p=0.5),
        A.Rotate(limit=80, p=1.0),
        A.OneOf([
            A.OpticalDistortion(),
            A.GridDistortion(),
            A.IAAPiecewiseAffine(),
        ]),
        A.RandomSizedCrop(min_max_height=(int(resolution * 0.7), input_res),
                          height=resolution,
                          width=resolution,
                          p=1.0),
        A.HorizontalFlip(p=0.5),
        A.VerticalFlip(p=0.5),
        A.GaussianBlur(p=0.3),
        A.OneOf([
            A.RandomBrightnessContrast(),
            A.HueSaturationValue(),
        ]),
        A.Cutout(num_holes=8,
                 max_h_size=resolution // 8,
                 max_w_size=resolution // 8,
                 fill_value=0,
                 p=0.3),
        A.Normalize(),
        ToTensorV2(),
    ],
                     p=1.0)
def generate_ds(size):
    trfm = A.Compose([
        A.Resize(size, size, p=1.0),
        A.HorizontalFlip(),
        A.VerticalFlip(),
        A.RandomRotate90(),
        A.ShiftScaleRotate(shift_limit=0.0625,
                           scale_limit=0.2,
                           rotate_limit=20,
                           p=0.9,
                           border_mode=cv2.BORDER_REFLECT),
        A.OneOf([
            A.OpticalDistortion(p=0.4),
            A.GridDistortion(p=.1),
            A.IAAPiecewiseAffine(p=0.4),
        ],
                p=0.3),
        A.OneOf([
            A.HueSaturationValue(10, 15, 10),
            A.CLAHE(clip_limit=3),
            A.RandomBrightnessContrast(),
        ],
                p=0.5)
    ],
                     p=1.0)

    return HubDataset(DATA_PATH,
                      window=WINDOW,
                      overlap=MIN_OVERLAP,
                      transform=trfm)
コード例 #4
0
def predefined_transform() -> None:
    """
    Example from docs
    https://github.com/albumentations-team/albumentations_examples/blob/master/notebooks/example.ipynb
    :return:
    """

    return A.Compose([
        A.RandomRotate90(),
        A.Flip(),
        A.Transpose(),
        A.OneOf([
            A.IAAAdditiveGaussianNoise(),
            A.GaussNoise(),
        ], p=0.2),
        A.OneOf([
            A.MotionBlur(p=.2),
            A.MedianBlur(blur_limit=3, p=0.1),
            A.Blur(blur_limit=3, p=0.1),
        ], p=0.2),
        A.ShiftScaleRotate(shift_limit=0.0625, scale_limit=0.2, rotate_limit=45, p=0.2),
        A.OneOf([
            A.OpticalDistortion(p=0.3),
            A.GridDistortion(p=.1),
            A.IAAPiecewiseAffine(p=0.3),
        ], p=0.2),
        A.OneOf([
            A.CLAHE(clip_limit=2),
            A.IAASharpen(),
            A.IAAEmboss(),
            A.RandomBrightnessContrast(),            
        ], p=0.3),
        A.HueSaturationValue(p=0.3),
    ])
コード例 #5
0
def _strong_aug(p=0.5):
    import albumentations
    return albumentations.Compose([
        albumentations.HorizontalFlip(p=0.5),
        albumentations.ShiftScaleRotate(shift_limit=0.0625,
                                        scale_limit=0.2,
                                        rotate_limit=0,
                                        p=0.5,
                                        border_mode=cv2.BORDER_CONSTANT),
        albumentations.OneOf([
            albumentations.OpticalDistortion(p=0.5,
                                             border_mode=cv2.BORDER_CONSTANT),
            albumentations.GridDistortion(p=0.5,
                                          border_mode=cv2.BORDER_CONSTANT),
            albumentations.IAAPiecewiseAffine(p=0.5),
            albumentations.ElasticTransform(p=0.5,
                                            border_mode=cv2.BORDER_CONSTANT),
        ],
                             p=0.5),
        albumentations.OneOf([
            albumentations.CLAHE(clip_limit=2),
            albumentations.IAASharpen(),
            albumentations.IAAEmboss(),
        ],
                             p=0.5),
        albumentations.OneOf([
            albumentations.RandomBrightnessContrast(p=0.5),
        ],
                             p=0.4),
        albumentations.HueSaturationValue(p=0.5),
    ],
                                  p=p)
コード例 #6
0
def strong_aug(p=.5):
    return A.Compose([
        A.RandomRotate90(),
        A.Flip(),
        A.Transpose(),
        A.OneOf([
            A.IAAAdditiveGaussianNoise(),
            A.GaussNoise(),
        ], p=0.2),
        A.OneOf([
            A.MotionBlur(p=.2),
            A.MedianBlur(blur_limit=3, p=.1),
            A.Blur(blur_limit=3, p=.1),
        ],
                p=0.2),
        A.ShiftScaleRotate(
            shift_limit=0.0625, scale_limit=0.2, rotate_limit=45, p=.2),
        A.OneOf([
            A.OpticalDistortion(p=0.3),
            A.GridDistortion(p=.1),
            A.IAAPiecewiseAffine(p=0.3),
        ],
                p=0.2),
        A.OneOf([
            A.CLAHE(clip_limit=2),
            A.IAASharpen(),
            A.IAAEmboss(),
            A.RandomContrast(),
            A.RandomBrightness(),
        ],
                p=0.3),
        A.HueSaturationValue(p=0.3),
    ],
                     p=p)
コード例 #7
0
def augmentation_hard(image, p=1., sub_p=0.3):
    augmentation_fun = al.Compose([
        al.ShiftScaleRotate(shift_limit=0.1,
                            scale_limit=0.1,
                            rotate_limit=8,
                            p=sub_p,
                            border_mode=cv2.BORDER_CONSTANT),
        al.ElasticTransform(sub_p),
        al.OneOf([
            al.IAAAdditiveGaussianNoise(),
            al.GaussNoise(),
        ], p=sub_p),
        al.OneOf([
            al.MotionBlur(p=sub_p),
            al.MedianBlur(blur_limit=3, p=sub_p),
            al.Blur(blur_limit=3, p=sub_p),
        ],
                 p=sub_p),
        al.OneOf([
            al.OpticalDistortion(p=sub_p),
            al.GridDistortion(p=sub_p),
            al.IAAPiecewiseAffine(p=sub_p),
        ],
                 p=sub_p),
        al.OneOf([
            al.CLAHE(clip_limit=3),
            al.IAASharpen(),
            al.IAAEmboss(),
            al.RandomBrightnessContrast()
        ],
                 p=sub_p)
    ],
                                  p=p)
    return augmentation_fun(image)
コード例 #8
0
 def album(self): #이미지 변환
     transform = A.Compose([
         #A.RandomRotate90(),
         A.Flip(p=0.2),
         #A.Transpose(),
         A.ChannelShuffle(p=0.3),
         A.ElasticTransform(p=0.3,border_mode=cv2.BORDER_REFLECT_101,alpha_affine=40),
         A.OneOf([
             A.IAAAdditiveGaussianNoise(),
             A.GaussNoise(),
         ], p=0.2),
         A.OneOf([
             A.MotionBlur(p=.2),
             A.MedianBlur(blur_limit=3, p=0.1),
             A.Blur(blur_limit=3, p=0.1),
         ], p=0.2),
         A.ShiftScaleRotate(shift_limit=0.0625, scale_limit=0.2, rotate_limit=45, p=0.2),
         A.OneOf([
             A.OpticalDistortion(p=0.3),
             A.GridDistortion(p=.1),
             A.IAAPiecewiseAffine(p=0.3),
         ], p=0.2),
         A.OneOf([
             A.CLAHE(clip_limit=2),
             A.IAASharpen(),
             A.IAAEmboss(),
             A.RandomBrightnessContrast(),
         ], p=0.3),
         A.HueSaturationValue(p=0.3),
     ])
     image = cv2.cvtColor(self.srcResize, cv2.COLOR_BGR2RGB)
     transformed = transform(image=image)['image']
     self.update(transformed)
コード例 #9
0
    def get_aug(mode="train"):
        if mode == "Nor":
            aug = A.Compose([
                ToTensor(),
            ])
        elif mode == "train":
            aug = A.Compose([
                A.Flip(),
                A.ShiftScaleRotate(rotate_limit=1.0, p=0.8),
                # Pixels
                A.OneOf([
                    A.IAAEmboss(p=1.0),
                    A.IAASharpen(p=1.0),
                    A.Blur(p=1.0),
                ],
                        p=0.5),
                # Affine
                A.OneOf(
                    [A.ElasticTransform(p=1.0),
                     A.IAAPiecewiseAffine(p=1.0)],
                    p=0.5),
                A.Normalize(p=1.0),
                ToTensor(),
            ])
        else:
            aug = A.Compose([
                A.Normalize(p=1.0),
                ToTensor(),
            ])

        return aug
コード例 #10
0
def get_augmentations_transform(crop_size=128, p=0.5, phase="train"):
    imagenet_stats = {'mean':[0.485, 0.456, 0.406], 'std':[0.229, 0.224, 0.225]}
    if phase == "train" or "test":
        aug_factor_list = [
            A.RandomResizedCrop(height=crop_size, width=crop_size, scale=(0.8, 1.0)),
            A.Cutout(num_holes=8, p=p),
            A.RandomRotate90(p=p),
            A.HorizontalFlip(p=p),
            A.VerticalFlip(p=p),
            A.HueSaturationValue(hue_shift_limit=20, sat_shift_limit=50, val_shift_limit=50),
            A.RandomBrightnessContrast(brightness_limit=0.2, contrast_limit=0.2),
            A.ShiftScaleRotate(shift_limit=0.0625, scale_limit=0.2, rotate_limit=45, p=p),
            A.OneOf([
                A.IAAAdditiveGaussianNoise(),
                A.GaussNoise(),
            ], p=p),
            A.OneOf([
                A.MotionBlur(p=0.2),
                A.MedianBlur(blur_limit=3, p=0.1),
                A.Blur(blur_limit=3, p=0.1),
            ], p=p),
            A.OneOf([
                A.OpticalDistortion(p=0.3),
                A.GridDistortion(p=0.1),
                A.IAAPiecewiseAffine(p=0.3),
            ], p=p),
            ToTensor(normalize=imagenet_stats)
        ]
        transformed_image = A.Compose(aug_factor_list)
        return transformed_image
    elif phase == "valid":
        transformed_image = A.Compose([ToTensor(normalize=imagenet_stats)])
        return transformed_image
    else:
        TypeError("Invalid phase type.")
コード例 #11
0
ファイル: alpha_base.py プロジェクト: dreyk/emt
 def _strong_aug(p=0.5):
     return albumentations.Compose([
         albumentations.HorizontalFlip(),
         albumentations.VerticalFlip(),
         albumentations.ShiftScaleRotate(
             shift_limit=0, scale_limit=0, rotate_limit=15, p=0.3),
         albumentations.OneOf([
             albumentations.OpticalDistortion(p=0.3),
             albumentations.GridDistortion(p=0.1),
             albumentations.IAAPiecewiseAffine(p=0.3),
         ],
                              p=0.2),
         albumentations.OneOf([
             albumentations.CLAHE(clip_limit=2),
             albumentations.IAASharpen(),
             albumentations.IAAEmboss(),
         ],
                              p=0.3),
         albumentations.OneOf([
             albumentations.RandomBrightnessContrast(p=0.3),
         ],
                              p=0.4),
         albumentations.HueSaturationValue(p=0.3),
     ],
                                   p=p)
コード例 #12
0
ファイル: augmentations.py プロジェクト: kqf/hubmap
def transform(train=True, mean=None, std=None):
    normalize = alb.Compose([
        alb.Normalize(mean=mean or _mean, std=std or _std),
        ToTensorV2(),
    ])

    if not train:
        return normalize

    return alb.Compose([
        alb.HorizontalFlip(),
        alb.VerticalFlip(),
        alb.RandomRotate90(),
        alb.ShiftScaleRotate(shift_limit=0.0625,
                             scale_limit=0.2,
                             rotate_limit=15,
                             p=0.9,
                             border_mode=cv2.BORDER_REFLECT),
        alb.OneOf([
            alb.OpticalDistortion(p=0.3),
            alb.GridDistortion(p=.1),
            alb.IAAPiecewiseAffine(p=0.3),
        ],
                  p=0.3),
        alb.OneOf([
            alb.HueSaturationValue(10, 15, 10),
            alb.CLAHE(clip_limit=2),
            alb.RandomBrightnessContrast(),
        ],
                  p=0.3),
        normalize,
    ])
コード例 #13
0
    def add_transforms(self):
        if self.train:
            self.transforms += [
                A.Resize(int(self.img_size[0] * 1.1), int(self.img_size[1] * 1.1)),
                A.RandomCrop(self.img_size[0], self.img_size[1]),
                A.HorizontalFlip(p=0.5),
                A.VerticalFlip(p=0.5),
                A.Rotate(p=0.5, border_mode=BORDER_REFLECT, value=0),

                # Pixels
                A.OneOf([
                    A.IAAEmboss(p=1.0),
                    A.IAASharpen(p=1.0),
                    A.Blur(p=1.0),
                ], p=0.5),

                # Affine
                A.OneOf([
                    A.ElasticTransform(p=1.0),
                    A.IAAPiecewiseAffine(p=1.0)
                ], p=0.5),
            ]
        else:
            self.transforms += [
                A.Resize(self.img_size[0], self.img_size[1]),
            ]
コード例 #14
0
def hard_transforms():
    result = [
        # random flip
        albu.RandomRotate90(),
        # Random shifts, stretches and turns with a 50% probability
        albu.ShiftScaleRotate(shift_limit=0.1,
                              scale_limit=0.1,
                              rotate_limit=15,
                              border_mode=0,
                              p=0.5),
        # add random brightness and contrast, 30% prob
        albu.RandomBrightnessContrast(brightness_limit=0.2,
                                      contrast_limit=0.2,
                                      p=0.3),
        # Random gamma changes with a 30% probability
        albu.RandomGamma(gamma_limit=(85, 115), p=0.3),
        # Randomly changes the hue, saturation, and color value of the input image
        albu.HueSaturationValue(p=0.3),
        albu.JpegCompression(quality_lower=80),
        albu.OneOf([
            albu.MotionBlur(p=0.2),
            albu.MedianBlur(blur_limit=3, p=0.1),
            albu.Blur(blur_limit=3, p=0.1),
        ],
                   p=0.2),
        albu.OneOf([
            albu.OpticalDistortion(p=0.3),
            albu.GridDistortion(p=0.1),
            albu.IAAPiecewiseAffine(p=0.3),
        ],
                   p=0.2),
    ]

    return result
コード例 #15
0
ファイル: augmentations.py プロジェクト: ternaus/giana
def get_train_transform():
    crop_height = 256
    crop_width = 256

    return albu.Compose([
        albu.PadIfNeeded(min_height=crop_height, min_width=crop_width, p=1),
        albu.RandomSizedCrop((int(0.3 * crop_height), 288), crop_height, crop_width, p=1),
        albu.HorizontalFlip(p=0.5),
        albu.OneOf([
            albu.IAAAdditiveGaussianNoise(p=0.5),
            albu.GaussNoise(p=0.5),
        ], p=0.2),
        albu.OneOf([
            albu.MotionBlur(p=0.2),
            albu.MedianBlur(blur_limit=3, p=0.1),
            albu.Blur(blur_limit=3, p=0.1),
        ], p=0.2),
        albu.ShiftScaleRotate(shift_limit=0.0625, scale_limit=0, rotate_limit=20, p=0.1),
        albu.OneOf([
            albu.OpticalDistortion(p=0.3),
            albu.GridDistortion(p=0.1),
            albu.IAAPiecewiseAffine(p=0.3),
        ], p=0.2),
        albu.OneOf([
            albu.CLAHE(clip_limit=2, p=0.5),
            albu.IAASharpen(p=0.5),
            albu.IAAEmboss(p=0.5),
            albu.RandomBrightnessContrast(p=0.5),
        ], p=0.3),
        albu.HueSaturationValue(p=0.3),
        albu.JpegCompression(p=0.2, quality_lower=20, quality_upper=99),
        albu.ElasticTransform(p=0.1),
        albu.Normalize(p=1)
    ], p=1)
コード例 #16
0
def albumentation():
    transform = albumentations.Compose([          
                    albumentations.OneOf([
                        albumentations.GaussNoise(),
                        albumentations.IAAAdditiveGaussianNoise()
                    ]),
                    albumentations.OneOf([
                        albumentations.MotionBlur(blur_limit=3, p=0.2),
                        albumentations.MedianBlur(blur_limit=3, p=0.1),
                        albumentations.Blur(blur_limit=2, p=0.1)
                    ]),
                    albumentations.OneOf([
                        albumentations.RandomBrightness(limit=(0.1, 0.4)),
                        albumentations.HueSaturationValue(hue_shift_limit=(0, 128), sat_shift_limit=(0, 60), val_shift_limit=(0, 20)),
                        albumentations.RGBShift(r_shift_limit=30, g_shift_limit=30, b_shift_limit=30)
                    ]),
                    albumentations.OneOf([
                        albumentations.CLAHE(),
                        albumentations.ChannelShuffle(),
                        albumentations.IAASharpen(),
                        albumentations.IAAEmboss(),
                        albumentations.RandomBrightnessContrast(),
                    ]),                
                    albumentations.OneOf([
                        albumentations.RandomGamma(gamma_limit=(35,255)),
                        albumentations.OpticalDistortion(),
                        albumentations.GridDistortion(),
                        albumentations.IAAPiecewiseAffine()
                    ]),                
                    A_torch.ToTensor(normalize={
                        "mean": [0.485, 0.456, 0.406],
                        "std" : [0.229, 0.224, 0.225]})
                    ])
    return transform
コード例 #17
0
    def get_aug(mode="train"):
        if mode=="Nor":
            aug=A.Compose([
                ToTensor(),
            ])
        elif mode =="train":
            print("train aug")
            mean = (0.485,0.456,0.406)
            std = (0.229,0.224,0.225)
            aug=A.Compose([
                A.Flip(),
                A.ShiftScaleRotate(rotate_limit=1.0, p=0.8),
                # Pixels
                A.OneOf([
                    A.IAAEmboss(p=1.0),
                    A.IAASharpen(p=1.0),
                    A.Blur(p=1.0),
                ], p=0.5),
                # Affine
                A.OneOf([
                    A.ElasticTransform(p=1.0),
                    A.IAAPiecewiseAffine(p=1.0)
                ], p=0.5),

                A.Normalize(mean=mean,std=std,max_pixel_value=255.0,always_apply=True),
            ])
        else:
            print("valid/test aug")
            mean = (0.485,0.456,0.406)
            std = (0.229,0.224,0.225)
            aug=A.Compose([
                A.Normalize(mean=mean,std=std,max_pixel_value=255.0,always_apply=True),
            ])

        return aug 
コード例 #18
0
def get_augmentations(p=0.5, image_size=224):
    imagenet_stats = {
        "mean": [0.485, 0.456, 0.406],
        "std": [0.229, 0.224, 0.225]
    }
    train_tfms = A.Compose([
        # A.Resize(image_size, image_size),
        A.RandomResizedCrop(image_size, image_size),
        A.ShiftScaleRotate(shift_limit=0.15,
                           scale_limit=0.4,
                           rotate_limit=45,
                           p=p),
        A.Cutout(p=p),
        A.RandomRotate90(p=p),
        A.Flip(p=p),
        A.OneOf(
            [
                A.RandomBrightnessContrast(
                    brightness_limit=0.2,
                    contrast_limit=0.2,
                ),
                A.HueSaturationValue(hue_shift_limit=20,
                                     sat_shift_limit=50,
                                     val_shift_limit=50),
            ],
            p=p,
        ),
        A.OneOf(
            [
                A.IAAAdditiveGaussianNoise(),
                A.GaussNoise(),
            ],
            p=p,
        ),
        A.CoarseDropout(max_holes=10, p=p),
        A.OneOf(
            [
                A.MotionBlur(p=0.2),
                A.MedianBlur(blur_limit=3, p=0.1),
                A.Blur(blur_limit=3, p=0.1),
            ],
            p=p,
        ),
        A.OneOf(
            [
                A.OpticalDistortion(p=0.3),
                A.GridDistortion(p=0.1),
                A.IAAPiecewiseAffine(p=0.3),
            ],
            p=p,
        ),
        ToTensor(normalize=imagenet_stats),
    ])

    valid_tfms = A.Compose([
        A.CenterCrop(image_size, image_size),
        ToTensor(normalize=imagenet_stats)
    ])

    return train_tfms, valid_tfms
コード例 #19
0
 def __init__(self, p=0.5):
     self.p = p
     self.scr = albu.ShiftScaleRotate(shift_limit=0.1,
                                      scale_limit=0.0,
                                      rotate_limit=15,
                                      p=self.p)
     self.ig = albu.IAAAdditiveGaussianNoise(p=self.p)
     self.ipa = albu.IAAPiecewiseAffine(p=self.p)
def get_train_transforms(p=1.0):
    return A.Compose(
        [
            A.OneOf(
                [
                    A.CenterCrop(2 * IMG_SIZE // 3, 2 * IMG_SIZE // 3, p=0.5),
                    A.CenterCrop(3 * IMG_SIZE // 4, 3 * IMG_SIZE // 4, p=0.5),
                ],
                p=0.33,
            ),
            A.Resize(
                IMG_SIZE, IMG_SIZE, interpolation=1, always_apply=True, p=1),
            A.Flip(),
            A.Transpose(),
            GridMask(num_grid=(1, 4), rotate=15, p=0.33),
            A.OneOf(
                [
                    A.MedianBlur(blur_limit=3, p=0.5),
                    A.Blur(blur_limit=3, p=0.5),
                ],
                p=0.5,
            ),
            A.OneOf(
                [
                    A.ShiftScaleRotate(
                        interpolation=1,
                        shift_limit=0.05,
                        scale_limit=0.1,
                        rotate_limit=15,
                        p=0.5,
                    ),
                    A.IAAPiecewiseAffine(scale=(0.02, 0.04), p=0.5),
                ],
                p=0.33,
            ),
            A.OneOf(
                [
                    A.HueSaturationValue(
                        hue_shift_limit=20,
                        sat_shift_limit=30,
                        val_shift_limit=20,
                        p=0.5,
                    ),
                    A.RandomBrightnessContrast(p=0.5),
                ],
                p=0.5,
            ),
            A.MultiplicativeNoise(
                multiplier=[0.9, 1.1], elementwise=True, p=0.3),
            A.Normalize(mean, std, max_pixel_value=255.0, always_apply=True),
            ToTensorV2(p=1.0),
        ],
        p=p,
    )
コード例 #21
0
def spatial_and_noise(p):
    return albu.Compose([
        albu.ShiftScaleRotate(rotate_limit=30,
                              scale_limit=15,
                              border_mode=cv2.BORDER_CONSTANT,
                              value=-1024,
                              p=0.5),
        albu.IAAAdditiveGaussianNoise(p=0.2),
        albu.IAAPerspective(p=0.2),
        albu.IAAPiecewiseAffine(p=0.2)
    ], p=p, additional_targets={'image{}'.format(_) : 'image' for _ in range(1, 101)})
コード例 #22
0
def DistortionBase():
    return A.Compose([
        A.OneOf(
            [
                A.IAAPerspective(),
                A.IAAPiecewiseAffine(),
                A.GridDistortion(),
                A.OpticalDistortion(distort_limit=2, shift_limit=0.5),
            ],
            p=0.50,
        )
    ])
コード例 #23
0
def create_train_transform(flip,
        noise,
        cutout,
        resize,
        size = 112,
        bright = True):
    
    translist = []
    
    if flip:
        translist+=[albumentations.OneOf([
                albumentations.Rotate(limit=30),
                albumentations.IAAPiecewiseAffine(),
                albumentations.ShiftScaleRotate(
                shift_limit=0.02,
                scale_limit=0.3,
                rotate_limit=10,
                ),
                albumentations.HorizontalFlip()],p=0.7)]

    if noise:
        translist+=[albumentations.OneOf([
            albumentations.MotionBlur(blur_limit=6),
            albumentations.MedianBlur(blur_limit=5),
            albumentations.OpticalDistortion(),
            albumentations.CLAHE(),
            albumentations.GaussNoise(var_limit=(5.0,20.0))], p=0.75)]

    if bright:
        translist+=[albumentations.OneOf([
          albumentations.RandomBrightness(limit=0.6),
          #albumentations.Sharpen(),
          albumentations.ColorJitter(),
          albumentations.RandomBrightnessContrast(brightness_limit=0.6, contrast_limit=0.6)],p=0.7)]

    if cutout:
        translist+=[albumentations.OneOf([
            albumentations.CoarseDropout(),
            albumentations.Cutout(max_h_size = int(size * np.random.rand(1)*0.5), max_w_size = int(size * np.random.rand(1)*0.5), num_holes=np.random.randint(1,3))
            ],p=0.75)]
            
    if resize:
        translist+=[albumentations.Resize(size+10, size+10, interpolation=2)]
        translist+=[albumentations.RandomCrop(size,size,always_apply=True)]

    #translist+=[albumentations.Normalize(mean=(0.2481, 0.2292, 0.2131), std = (0.2167,0.2071,0.2014))]
    #translist+=[albumentations.Normalize(mean=(0.2248, 0.2080, 0.1929), std = (0.2231, 0.2140, 0.2083))]
    #trainlist+=[albumentations.Normalize(mean=(0.2539, 0.2348, 0.2189), std = (0.2195,0.2110,0.2061))]
    #translist+=[albumentations.Normalize(mean=(0.2580, 0.2360, 0.2215), std = (0.2235, 0.2132, 0.2100))]
    translist+=[albumentations.Normalize(mean=(0.1977, 0.2115, 0.2275), std = (0.2177, 0.2227, 0.2317))]
    #translist+=[albumentations.Normalize(mean=(0.2527, 0.2343, 0.2177), std = (0.2171, 0.2082, 0.2026))]
    transform = albumentations.Compose(translist)
    return transform
コード例 #24
0
def hard_transforms_2():
    black = (0, 0, 0)

    result = [
        albu.Flip(),
        albu.RandomRotate90(),
        albu.OneOf([
            albu.IAAAdditiveGaussianNoise(),
            albu.GaussNoise(),
        ],
                   p=0.2),
        albu.CoarseDropout(),
        albu.OneOf(
            [
                #   albu.MotionBlur(p=0.2),
                albu.MedianBlur(p=0.1),
                albu.Blur(blur_limit=3, p=0.1)
            ],
            p=0.2),
        albu.ShiftScaleRotate(shift_limit=0.0625,
                              scale_limit=0.2,
                              rotate_limit=45,
                              border_mode=cv2.BORDER_CONSTANT,
                              value=black,
                              mask_value=black,
                              p=0.2),
        albu.OneOf(
            [
                albu.CLAHE(clip_limit=2),
                albu.IAASharpen(),
                #   IAAEmboss(),
                albu.RandomBrightnessContrast(brightness_limit=0.2,
                                              contrast_limit=0.2),
            ],
            p=0.3),
        albu.OneOf([
            albu.OpticalDistortion(border_mode=cv2.BORDER_CONSTANT,
                                   value=black,
                                   mask_value=black,
                                   p=0.3),
            albu.GridDistortion(border_mode=cv2.BORDER_CONSTANT,
                                value=black,
                                mask_value=black,
                                p=0.1),
            albu.IAAPiecewiseAffine(p=0.3),
        ],
                   p=0.2),
        albu.HueSaturationValue(p=0.3)
    ]

    return result
コード例 #25
0
def data_total():
  return A.Compose([
    A.HorizontalFlip(p=0.5),
    A.ShiftScaleRotate(),
    A.RandomBrightnessContrast(brightness_limit=0.15, contrast_limit=0.2, p=0.5),
    A.RandomResizedCrop(512,512,scale = (0.5,0.8)),
    A.VerticalFlip(p=0.5),
    A.OneOf([
            A.OpticalDistortion(p=0.45),
            A.GridDistortion(p=0.1),
            A.IAAPiecewiseAffine(p=0.45)
        ], p=0.5),
    ToTensorV2()
  ])
コード例 #26
0
 def __init__(self, p=0.5):
     self.zhiguang = True
     if self.zhiguang:
         self.blur = albu.Blur(blur_limit=3, p=0.5)
         self.chanelshuffle = albu.ChannelShuffle(p=0.5)
         self.hueSaturation = albu.HueSaturationValue(p=0.5)
         #albu.InvertImg(p=0.2),
         self.togray = albu.ToGray(p=0.2)
     else:
         self.p = p
         self.scr = albu.ShiftScaleRotate(shift_limit=0.1,
                                          scale_limit=0.0,
                                          rotate_limit=15,
                                          p=self.p)
         self.ig = albu.IAAAdditiveGaussianNoise(p=self.p)
         self.ipa = albu.IAAPiecewiseAffine(p=self.p)
コード例 #27
0
 def __init__(self):
     self.policy = A.Compose([
         A.OneOf([
             A.Rotate(180),
             A.Flip(),
         ], p=0.3),
         A.ShiftScaleRotate(shift_limit=0.2, scale_limit=0.5, rotate_limit=0, p=0.2),
         A.OneOf([
             A.CoarseDropout(max_holes=16, max_height=16, max_width=16, p=0.3),
             A.GridDropout(ratio=0.3, p=0.3),
         ]),
         A.OneOf([
             A.ElasticTransform(sigma=10, alpha_affine=25, p=0.3),
             A.RandomFog(fog_coef_lower=0.2, fog_coef_upper=0.7, p=0.2),
         ], p=0.2),
         A.OneOf([
             A.IAAAdditiveGaussianNoise(),
             A.GaussNoise(),
             A.ISONoise()
         ], p=0.2),
         A.OneOf([
             A.MotionBlur(p=.3),
             A.MedianBlur(blur_limit=5, p=0.3),
             A.Blur(blur_limit=5, p=0.3),
             A.GaussianBlur(p=0.3)
         ], p=0.2),
         A.OneOf([
             A.ChannelShuffle(p=.3),
             A.HueSaturationValue(p=0.3),
             A.ToGray(p=0.3),
             A.ChannelDropout(p=0.3),
             A.InvertImg(p=0.1)
         ], p=0.2),
         A.OneOf([
             A.OpticalDistortion(p=0.3),
             A.GridDistortion(p=.2),
             A.IAAPiecewiseAffine(p=0.3),
         ], p=0.2),
         A.OneOf([
             A.CLAHE(clip_limit=2),
             A.IAASharpen(),
             A.IAAEmboss(),
         ], p=0.2),
         A.RandomBrightnessContrast(brightness_limit=0.3, contrast_limit=0.3, p=0.3),
         A.Solarize(p=0.2),
     ])
コード例 #28
0
def hard_transforms():
    result = [
        # Random shifts, stretches and turns with a 50% probability
        A.Flip(),
        A.ShiftScaleRotate(rotate_limit=1.0, p=0.8),

        # Pixels
        A.OneOf([
            A.IAAEmboss(p=1.0),
            A.IAASharpen(p=1.0),
            A.Blur(p=1.0),
        ], p=0.5),

        # Affine
        A.OneOf([
            A.ElasticTransform(p=1.0),
            A.IAAPiecewiseAffine(p=1.0)
        ], p=0.5),
    ]

    return result
コード例 #29
0
def data_augmentation(image_path, aug_num):
    image = cv2.imread(image_path, cv2.IMREAD_COLOR)
    image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
    augmentation = A.Compose(
        [
            A.RandomRotate90(),
            A.Flip(),
            A.Transpose(),
            A.OneOf([
                A.IAAAdditiveGaussianNoise(),
                A.GaussNoise(),
            ], p=0.2),
            A.OneOf([
                A.MotionBlur(p=0.2),
                A.MedianBlur(blur_limit=3, p=0.1),
                A.Blur(blur_limit=3, p=0.1),
            ],
                    p=0.2),
            A.ShiftScaleRotate(
                shift_limit=0.0625, scale_limit=0.2, rotate_limit=45, p=0.2),
            A.OneOf([
                A.OpticalDistortion(p=0.3),
                A.GridDistortion(p=0.1),
                A.IAAPiecewiseAffine(p=0.3),
            ],
                    p=0.2),
            A.OneOf([
                A.CLAHE(clip_limit=2),
                A.IAASharpen(),
                A.IAAEmboss(),
                A.RandomBrightnessContrast(),
            ],
                    p=0.3),
            # A.HueSaturationValue(p=0.3),
        ],
        p=0.5)
    patches = []
    for _ in range(aug_num):
        patches.append(augmentation(image=image)['image'])
    return patches
コード例 #30
0
    def _setup_transform(self, cfg):
        # Albumentation example: https://albumentations.readthedocs.io/en/latest/examples.html
        self.img_mask_transform = A.Compose([
            A.ShiftScaleRotate(shift_limit=0.1, scale_limit=0.2, rotate_limit=175, p=0.8, border_mode=cv2.BORDER_CONSTANT),
            A.Flip(),
            A.Transpose(),
            A.OneOf([
                A.ElasticTransform(),
                A.OpticalDistortion(),
                A.GridDistortion(),
                A.IAAPiecewiseAffine(),
            ]),
            A.OneOf([
                    A.RandomCrop(height=self.size_crop,width=self.size_crop,p=0.5),  
                    A.CenterCrop(height=self.size_crop,width=self.size_crop,p=0.5)
            ]),            
            A.Cutout(num_holes=8, max_h_size=8, max_w_size=8, fill_value=0,p=0.5),
            ],p=0.8)

        self.img_pixel_transform = A.Compose([
            A.OneOf([
                A.IAAAdditiveGaussianNoise(),
                A.GaussNoise(),
            ], p=0.2),
            A.OneOf([
                A.MotionBlur(p=0.2),
                A.MedianBlur(blur_limit=3, p=0.1),
                A.Blur(blur_limit=3, p=0.1),
            ], p=0.2),
            A.OneOf([
                A.IAASharpen(),
                A.IAAEmboss(),
                # A.RandomBrightnessContrast(),            
            ], p=0.3),
            A.HueSaturationValue(hue_shift_limit=3,sat_shift_limit=20,val_shift_limit=3 ,p=0.2),
        ],p=0.5)
        # Torch transform
        self.resize_transform = transforms.Resize(cfg.MODEL.IMAGE_SIZE, Image.NEAREST)
        self.to_tensor_transform = transforms.ToTensor()
        self.normalize_transform = transforms.Normalize(mean=cfg.TRAIN.NORMALIZE_MEAN, std=cfg.TRAIN.NORMALIZE_STD)