Пример #1
0
def predefined_transform() -> None:
    """
    Example from docs
    https://github.com/albumentations-team/albumentations_examples/blob/master/notebooks/example.ipynb
    :return:
    """

    return A.Compose([
        A.RandomRotate90(),
        A.Flip(),
        A.Transpose(),
        A.OneOf([
            A.IAAAdditiveGaussianNoise(),
            A.GaussNoise(),
        ], p=0.2),
        A.OneOf([
            A.MotionBlur(p=.2),
            A.MedianBlur(blur_limit=3, p=0.1),
            A.Blur(blur_limit=3, p=0.1),
        ], p=0.2),
        A.ShiftScaleRotate(shift_limit=0.0625, scale_limit=0.2, rotate_limit=45, p=0.2),
        A.OneOf([
            A.OpticalDistortion(p=0.3),
            A.GridDistortion(p=.1),
            A.IAAPiecewiseAffine(p=0.3),
        ], p=0.2),
        A.OneOf([
            A.CLAHE(clip_limit=2),
            A.IAASharpen(),
            A.IAAEmboss(),
            A.RandomBrightnessContrast(),            
        ], p=0.3),
        A.HueSaturationValue(p=0.3),
    ])
Пример #2
0
 def album(self): #이미지 변환
     transform = A.Compose([
         #A.RandomRotate90(),
         A.Flip(p=0.2),
         #A.Transpose(),
         A.ChannelShuffle(p=0.3),
         A.ElasticTransform(p=0.3,border_mode=cv2.BORDER_REFLECT_101,alpha_affine=40),
         A.OneOf([
             A.IAAAdditiveGaussianNoise(),
             A.GaussNoise(),
         ], p=0.2),
         A.OneOf([
             A.MotionBlur(p=.2),
             A.MedianBlur(blur_limit=3, p=0.1),
             A.Blur(blur_limit=3, p=0.1),
         ], p=0.2),
         A.ShiftScaleRotate(shift_limit=0.0625, scale_limit=0.2, rotate_limit=45, p=0.2),
         A.OneOf([
             A.OpticalDistortion(p=0.3),
             A.GridDistortion(p=.1),
             A.IAAPiecewiseAffine(p=0.3),
         ], p=0.2),
         A.OneOf([
             A.CLAHE(clip_limit=2),
             A.IAASharpen(),
             A.IAAEmboss(),
             A.RandomBrightnessContrast(),
         ], p=0.3),
         A.HueSaturationValue(p=0.3),
     ])
     image = cv2.cvtColor(self.srcResize, cv2.COLOR_BGR2RGB)
     transformed = transform(image=image)['image']
     self.update(transformed)
Пример #3
0
def augmentation_hard(image, p=1., sub_p=0.3):
    augmentation_fun = al.Compose([
        al.ShiftScaleRotate(shift_limit=0.1,
                            scale_limit=0.1,
                            rotate_limit=8,
                            p=sub_p,
                            border_mode=cv2.BORDER_CONSTANT),
        al.ElasticTransform(sub_p),
        al.OneOf([
            al.IAAAdditiveGaussianNoise(),
            al.GaussNoise(),
        ], p=sub_p),
        al.OneOf([
            al.MotionBlur(p=sub_p),
            al.MedianBlur(blur_limit=3, p=sub_p),
            al.Blur(blur_limit=3, p=sub_p),
        ],
                 p=sub_p),
        al.OneOf([
            al.OpticalDistortion(p=sub_p),
            al.GridDistortion(p=sub_p),
            al.IAAPiecewiseAffine(p=sub_p),
        ],
                 p=sub_p),
        al.OneOf([
            al.CLAHE(clip_limit=3),
            al.IAASharpen(),
            al.IAAEmboss(),
            al.RandomBrightnessContrast()
        ],
                 p=sub_p)
    ],
                                  p=p)
    return augmentation_fun(image)
Пример #4
0
    def get_aug(mode="train"):
        if mode=="Nor":
            aug=A.Compose([
                ToTensor(),
            ])
        elif mode =="train":
            print("train aug")
            mean = (0.485,0.456,0.406)
            std = (0.229,0.224,0.225)
            aug=A.Compose([
                A.Flip(),
                A.ShiftScaleRotate(rotate_limit=1.0, p=0.8),
                # Pixels
                A.OneOf([
                    A.IAAEmboss(p=1.0),
                    A.IAASharpen(p=1.0),
                    A.Blur(p=1.0),
                ], p=0.5),
                # Affine
                A.OneOf([
                    A.ElasticTransform(p=1.0),
                    A.IAAPiecewiseAffine(p=1.0)
                ], p=0.5),

                A.Normalize(mean=mean,std=std,max_pixel_value=255.0,always_apply=True),
            ])
        else:
            print("valid/test aug")
            mean = (0.485,0.456,0.406)
            std = (0.229,0.224,0.225)
            aug=A.Compose([
                A.Normalize(mean=mean,std=std,max_pixel_value=255.0,always_apply=True),
            ])

        return aug 
Пример #5
0
 def augment_image(self, image):
     transform = A.Compose([
         A.OneOf([
             A.IAAAdditiveGaussianNoise(),
             A.GaussNoise(),
         ], p=0.3),
         A.OneOf([
             A.MotionBlur(p=.4),
             A.MedianBlur(blur_limit=3, p=0.3),
             A.Blur(blur_limit=3, p=0.3),
         ],
                 p=0.4),
         A.OneOf([
             A.CLAHE(clip_limit=2),
             A.IAASharpen(),
             A.IAAEmboss(),
             A.RandomBrightnessContrast(),
         ],
                 p=0.3),
         A.HueSaturationValue(p=0.3),
     ])
     image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
     augmented_image = transform(image=image)['image']
     augmented_image = cv2.cvtColor(augmented_image, cv2.COLOR_RGB2BGR)
     return augmented_image
Пример #6
0
def augmentation_simple(image, p=1., sub_p=0.3):
    augmentation_fun = al.Compose([
        al.OneOf([
            al.IAAAdditiveGaussianNoise(),
            al.GaussNoise(),
        ], p=sub_p),
        al.OneOf([
            al.MotionBlur(p=sub_p),
            al.MedianBlur(blur_limit=3, p=sub_p),
            al.Blur(blur_limit=3, p=sub_p),
        ],
                 p=sub_p),
        al.OneOf([
            al.OpticalDistortion(p=sub_p),
            al.GridDistortion(p=sub_p),
            al.IAAPiecewiseAffine(p=sub_p),
        ],
                 p=sub_p),
        al.OneOf([
            al.CLAHE(clip_limit=3),
            al.IAASharpen(),
            al.IAAEmboss(),
            al.RandomBrightnessContrast()
        ],
                 p=sub_p)
    ],
                                  p=p)
    return augmentation_fun(image)
Пример #7
0
 def _strong_aug(p=0.5):
     return albumentations.Compose([
         albumentations.HorizontalFlip(),
         albumentations.VerticalFlip(),
         albumentations.ShiftScaleRotate(
             shift_limit=0, scale_limit=0, rotate_limit=15, p=0.3),
         albumentations.OneOf([
             albumentations.OpticalDistortion(p=0.3),
             albumentations.GridDistortion(p=0.1),
             albumentations.IAAPiecewiseAffine(p=0.3),
         ],
                              p=0.2),
         albumentations.OneOf([
             albumentations.CLAHE(clip_limit=2),
             albumentations.IAASharpen(),
             albumentations.IAAEmboss(),
         ],
                              p=0.3),
         albumentations.OneOf([
             albumentations.RandomBrightnessContrast(p=0.3),
         ],
                              p=0.4),
         albumentations.HueSaturationValue(p=0.3),
     ],
                                   p=p)
Пример #8
0
def _strong_aug(p=0.5):
    import albumentations
    return albumentations.Compose([
        albumentations.HorizontalFlip(p=0.5),
        albumentations.ShiftScaleRotate(shift_limit=0.0625,
                                        scale_limit=0.2,
                                        rotate_limit=0,
                                        p=0.5,
                                        border_mode=cv2.BORDER_CONSTANT),
        albumentations.OneOf([
            albumentations.OpticalDistortion(p=0.5,
                                             border_mode=cv2.BORDER_CONSTANT),
            albumentations.GridDistortion(p=0.5,
                                          border_mode=cv2.BORDER_CONSTANT),
            albumentations.IAAPiecewiseAffine(p=0.5),
            albumentations.ElasticTransform(p=0.5,
                                            border_mode=cv2.BORDER_CONSTANT),
        ],
                             p=0.5),
        albumentations.OneOf([
            albumentations.CLAHE(clip_limit=2),
            albumentations.IAASharpen(),
            albumentations.IAAEmboss(),
        ],
                             p=0.5),
        albumentations.OneOf([
            albumentations.RandomBrightnessContrast(p=0.5),
        ],
                             p=0.4),
        albumentations.HueSaturationValue(p=0.5),
    ],
                                  p=p)
Пример #9
0
def get_train_transform():
    crop_height = 256
    crop_width = 256

    return albu.Compose([
        albu.PadIfNeeded(min_height=crop_height, min_width=crop_width, p=1),
        albu.RandomSizedCrop((int(0.3 * crop_height), 288), crop_height, crop_width, p=1),
        albu.HorizontalFlip(p=0.5),
        albu.OneOf([
            albu.IAAAdditiveGaussianNoise(p=0.5),
            albu.GaussNoise(p=0.5),
        ], p=0.2),
        albu.OneOf([
            albu.MotionBlur(p=0.2),
            albu.MedianBlur(blur_limit=3, p=0.1),
            albu.Blur(blur_limit=3, p=0.1),
        ], p=0.2),
        albu.ShiftScaleRotate(shift_limit=0.0625, scale_limit=0, rotate_limit=20, p=0.1),
        albu.OneOf([
            albu.OpticalDistortion(p=0.3),
            albu.GridDistortion(p=0.1),
            albu.IAAPiecewiseAffine(p=0.3),
        ], p=0.2),
        albu.OneOf([
            albu.CLAHE(clip_limit=2, p=0.5),
            albu.IAASharpen(p=0.5),
            albu.IAAEmboss(p=0.5),
            albu.RandomBrightnessContrast(p=0.5),
        ], p=0.3),
        albu.HueSaturationValue(p=0.3),
        albu.JpegCompression(p=0.2, quality_lower=20, quality_upper=99),
        albu.ElasticTransform(p=0.1),
        albu.Normalize(p=1)
    ], p=1)
Пример #10
0
    def add_transforms(self):
        if self.train:
            self.transforms += [
                A.Resize(int(self.img_size[0] * 1.1), int(self.img_size[1] * 1.1)),
                A.RandomCrop(self.img_size[0], self.img_size[1]),
                A.HorizontalFlip(p=0.5),
                A.VerticalFlip(p=0.5),
                A.Rotate(p=0.5, border_mode=BORDER_REFLECT, value=0),

                # Pixels
                A.OneOf([
                    A.IAAEmboss(p=1.0),
                    A.IAASharpen(p=1.0),
                    A.Blur(p=1.0),
                ], p=0.5),

                # Affine
                A.OneOf([
                    A.ElasticTransform(p=1.0),
                    A.IAAPiecewiseAffine(p=1.0)
                ], p=0.5),
            ]
        else:
            self.transforms += [
                A.Resize(self.img_size[0], self.img_size[1]),
            ]
def augment(image):
    transform = A.Compose([
        A.HorizontalFlip(p=0.5),
        A.OneOf([
            A.IAAAdditiveGaussianNoise(),
            A.GaussNoise(),
        ], p=0.2),
        A.OneOf([
            A.MotionBlur(p=.2),
            A.MedianBlur(blur_limit=3, p=0.1),
            A.Blur(blur_limit=3, p=0.1),
        ],
                p=0.2),
        A.ShiftScaleRotate(shift_limit=0.0625,
                           scale_limit=0.2,
                           rotate_limit=15,
                           p=0.2),
        A.OneOf([
            A.CLAHE(clip_limit=2),
            A.IAASharpen(),
            A.IAAEmboss(),
            A.RandomBrightnessContrast(),
            A.RandomGamma(),
        ],
                p=0.5),
        A.HueSaturationValue(p=0.3),
    ])
    augmented_image = transform(image=image)['image']
    return augmented_image
def albumentation():
    transform = albumentations.Compose([          
                    albumentations.OneOf([
                        albumentations.GaussNoise(),
                        albumentations.IAAAdditiveGaussianNoise()
                    ]),
                    albumentations.OneOf([
                        albumentations.MotionBlur(blur_limit=3, p=0.2),
                        albumentations.MedianBlur(blur_limit=3, p=0.1),
                        albumentations.Blur(blur_limit=2, p=0.1)
                    ]),
                    albumentations.OneOf([
                        albumentations.RandomBrightness(limit=(0.1, 0.4)),
                        albumentations.HueSaturationValue(hue_shift_limit=(0, 128), sat_shift_limit=(0, 60), val_shift_limit=(0, 20)),
                        albumentations.RGBShift(r_shift_limit=30, g_shift_limit=30, b_shift_limit=30)
                    ]),
                    albumentations.OneOf([
                        albumentations.CLAHE(),
                        albumentations.ChannelShuffle(),
                        albumentations.IAASharpen(),
                        albumentations.IAAEmboss(),
                        albumentations.RandomBrightnessContrast(),
                    ]),                
                    albumentations.OneOf([
                        albumentations.RandomGamma(gamma_limit=(35,255)),
                        albumentations.OpticalDistortion(),
                        albumentations.GridDistortion(),
                        albumentations.IAAPiecewiseAffine()
                    ]),                
                    A_torch.ToTensor(normalize={
                        "mean": [0.485, 0.456, 0.406],
                        "std" : [0.229, 0.224, 0.225]})
                    ])
    return transform
Пример #13
0
def augmentations(image_size: int):
    channel_augs = [
        A.HueSaturationValue(p=0.5),
        A.ChannelShuffle(p=0.5),
    ]

    result = [
        # *pre_transform(image_size),
        A.OneOf([
            A.IAAAdditiveGaussianNoise(),
            A.GaussNoise(),
        ], p=0.5),
        A.OneOf([
            A.MotionBlur(blur_limit=3, p=0.7),
            A.MedianBlur(blur_limit=3, p=1.0),
            A.Blur(blur_limit=3, p=0.7),
        ],
                p=0.5),
        A.OneOf(channel_augs),
        A.OneOf([
            A.CLAHE(clip_limit=2),
            A.IAASharpen(),
            A.IAAEmboss(),
        ],
                p=0.5),
        A.RandomBrightnessContrast(brightness_limit=0.5,
                                   contrast_limit=0.5,
                                   p=0.5),
        A.RandomGamma(p=0.5),
        A.OneOf([A.MedianBlur(p=0.5), A.MotionBlur(p=0.5)]),
        A.RandomGamma(gamma_limit=(85, 115), p=0.5),
    ]
    return A.Compose(result, bbox_params=BBOX_PARAMS)
 def weak_aug(self, p=0.5):
     '''Create a weakly augmented image framework'''
     return A.Compose([
         A.HorizontalFlip(),
         A.OneOf([
             A.IAAAdditiveGaussianNoise(),
             A.GaussNoise(),
         ], p=0.2),
         A.OneOf([
             A.MotionBlur(p=0.2),
             A.MedianBlur(blur_limit=3, p=0.1),
             A.Blur(blur_limit=3, p=0.1),
         ],
                 p=0.2),
         A.ShiftScaleRotate(
             shift_limit=0.0625, scale_limit=0.2, rotate_limit=10, p=0.2),
         A.OpticalDistortion(p=0.2),
         A.OneOf([
             A.CLAHE(clip_limit=2),
             A.IAASharpen(),
             A.IAAEmboss(),
         ],
                 p=0.3),
     ],
                      p=p)
Пример #15
0
def augment_image(image):
    # Works with single image
    transform = A.Compose([
        A.HorizontalFlip(),
        A.OneOf([
            A.IAAAdditiveGaussianNoise(),
            A.GaussNoise(),
        ], p=0.2),
        A.OneOf([
            A.MotionBlur(),
            A.MedianBlur(blur_limit=3),
            A.Blur(blur_limit=3),
        ],
                p=0.2),
        A.ShiftScaleRotate(shift_limit=0.0625,
                           scale_limit=0.2,
                           rotate_limit=45,
                           p=0.2),
        A.OneOf([
            A.OpticalDistortion(p=0.3),
        ], p=0.2),
        A.OneOf([
            A.IAASharpen(p=1.),
            A.IAAEmboss(p=1.),
            A.RandomBrightnessContrast(p=1.),
        ],
                p=0.3),
        A.HueSaturationValue(hue_shift_limit=5,
                             sat_shift_limit=5,
                             val_shift_limit=5,
                             p=0.3),
    ])

    return transform(image=image)['image']
Пример #16
0
def strong_aug(p=.5):
    return A.Compose([
        A.RandomRotate90(),
        A.Flip(),
        A.Transpose(),
        A.OneOf([
            A.IAAAdditiveGaussianNoise(),
            A.GaussNoise(),
        ], p=0.2),
        A.OneOf([
            A.MotionBlur(p=.2),
            A.MedianBlur(blur_limit=3, p=.1),
            A.Blur(blur_limit=3, p=.1),
        ],
                p=0.2),
        A.ShiftScaleRotate(
            shift_limit=0.0625, scale_limit=0.2, rotate_limit=45, p=.2),
        A.OneOf([
            A.OpticalDistortion(p=0.3),
            A.GridDistortion(p=.1),
            A.IAAPiecewiseAffine(p=0.3),
        ],
                p=0.2),
        A.OneOf([
            A.CLAHE(clip_limit=2),
            A.IAASharpen(),
            A.IAAEmboss(),
            A.RandomContrast(),
            A.RandomBrightness(),
        ],
                p=0.3),
        A.HueSaturationValue(p=0.3),
    ],
                     p=p)
Пример #17
0
    def get_aug(mode="train"):
        if mode == "Nor":
            aug = A.Compose([
                ToTensor(),
            ])
        elif mode == "train":
            aug = A.Compose([
                A.Flip(),
                A.ShiftScaleRotate(rotate_limit=1.0, p=0.8),
                # Pixels
                A.OneOf([
                    A.IAAEmboss(p=1.0),
                    A.IAASharpen(p=1.0),
                    A.Blur(p=1.0),
                ],
                        p=0.5),
                # Affine
                A.OneOf(
                    [A.ElasticTransform(p=1.0),
                     A.IAAPiecewiseAffine(p=1.0)],
                    p=0.5),
                A.Normalize(p=1.0),
                ToTensor(),
            ])
        else:
            aug = A.Compose([
                A.Normalize(p=1.0),
                ToTensor(),
            ])

        return aug
Пример #18
0
def augmentation(image_size, train=True):
    max_crop = image_size // 5
    if train:
        data_transform = A.Compose([
            A.Resize(image_size, image_size),
            A.Compose([
                A.OneOf([
                    A.RandomRain(p=0.1),
                    A.GaussNoise(mean=15),
                    A.GaussianBlur(blur_limit=10, p=0.4),
                    A.MotionBlur(p=0.2)
                ]),
                A.OneOf([
                    A.RGBShift(p=1.0,
                               r_shift_limit=(-10, 10),
                               g_shift_limit=(-10, 10),
                               b_shift_limit=(-10, 10)),
                    A.RandomBrightnessContrast(
                        brightness_limit=0.3, contrast_limit=0.1, p=1),
                    A.HueSaturationValue(hue_shift_limit=20, p=1),
                ],
                        p=0.6),
                A.OneOf([
                    A.CLAHE(clip_limit=2),
                    A.IAASharpen(),
                    A.IAAEmboss(),
                ]),
                A.OneOf([A.IAAPerspective(p=0.3),
                         A.ElasticTransform(p=0.1)]),
                A.OneOf([
                    A.Rotate(limit=25, p=0.6),
                    A.IAAAffine(
                        scale=0.9,
                        translate_px=15,
                        rotate=25,
                        shear=0.2,
                    )
                ],
                        p=1),
                A.Cutout(num_holes=1,
                         max_h_size=max_crop,
                         max_w_size=max_crop,
                         p=0.2)
            ],
                      p=1),
            A.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
            AT.ToTensor()
        ])
    else:
        data_transform = transforms.Compose([
            transforms.ToPILImage(),
            transforms.Resize((image_size, image_size)),
            transforms.ToTensor(),
            transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                 std=[0.229, 0.224, 0.225])
        ])
    return data_transform
Пример #19
0
def get_transforms(size: int, scope: str = 'geometric', crop='random'):
    augs = {
        'strong':
        albu.Compose([
            albu.HorizontalFlip(),
            albu.ShiftScaleRotate(shift_limit=0.0,
                                  scale_limit=0.2,
                                  rotate_limit=20,
                                  p=.4),
            albu.ElasticTransform(),
            albu.OpticalDistortion(),
            albu.OneOf([
                albu.CLAHE(clip_limit=2),
                albu.IAASharpen(),
                albu.IAAEmboss(),
                albu.RandomBrightnessContrast(),
                albu.RandomGamma()
            ],
                       p=0.5),
            albu.OneOf([
                albu.RGBShift(),
                albu.HueSaturationValue(),
            ], p=0.5),
        ]),
        'weak':
        albu.Compose([
            albu.HorizontalFlip(),
        ]),
        'geometric':
        albu.OneOf([
            albu.HorizontalFlip(always_apply=True),
            albu.ShiftScaleRotate(always_apply=True),
            albu.Transpose(always_apply=True),
            albu.OpticalDistortion(always_apply=True),
            albu.ElasticTransform(always_apply=True),
        ])
    }

    crop_fn = {
        'random': albu.RandomCrop(size, size, always_apply=True),
        'center': albu.CenterCrop(size, size, always_apply=True)
    }[crop]
    pad = albu.PadIfNeeded(size, size)
    if scope == "nothing":
        pipeline = albu.Compose([crop_fn, pad],
                                additional_targets={'target': 'image'})
    else:
        aug_fn = augs[scope]
        pipeline = albu.Compose([aug_fn, crop_fn, pad],
                                additional_targets={'target': 'image'})

    def process(a, b):
        r = pipeline(image=a, target=b)
        return r['image'], r['target']

    return process
Пример #20
0
def get_training_augmentation(min_area=0., min_visibility=0.):
    train_transform = [
        albu.OneOf([
            albu.MotionBlur(p=.2),
            albu.MedianBlur(blur_limit=3, p=0.1),
            albu.Blur(blur_limit=3, p=0.1),
        ],
                   p=0.2),
        albu.ShiftScaleRotate(shift_limit=0,
                              scale_limit=0,
                              rotate_limit=15,
                              p=0.5),
        albu.OneOf([
            albu.CLAHE(clip_limit=2),
            albu.IAASharpen(),
            albu.IAAEmboss(),
        ],
                   p=0.3),
        albu.OneOf([
            albu.RandomFog(fog_coef_lower=0.1, fog_coef_upper=0.15, p=0.1),
            albu.RandomShadow(p=0.1),
            albu.RandomBrightness(limit=0.3, p=0.2),
            albu.RandomRain(slant_lower=0,
                            slant_upper=8,
                            drop_length=0,
                            blur_value=4,
                            brightness_coefficient=0.8,
                            rain_type='heavy',
                            p=0.1),
            albu.RandomSunFlare(p=0.2),
        ]),
        albu.OneOf([
            albu.RGBShift(p=0.1),
            albu.HueSaturationValue(p=0.3),
        ]),
        albu.OneOf([
            albu.HorizontalFlip(p=0.5),
            albu.RandomSizedCrop(min_max_height=(720, 1380),
                                 height=1380,
                                 width=720,
                                 interpolation=cv2.INTER_AREA)
        ],
                   p=0.2)
    ]
    return albu.Compose(train_transform,
                        bbox_params={
                            'format': 'coco',
                            'min_area': min_area,
                            'min_visibility': min_visibility,
                            'label_fields': ['category_id']
                        })
Пример #21
0
def run_train(data_path: Path):
    cfg = BasicConfig(
        seed=444,
        name='test_center_vgg_retrain2',
        num_workers=4,
        gpus=(0,),
        batch_size=24,
        num_epochs=15,
        steps=(4, 9, np.inf),
        warmup_epoch=1,
        cooldown_epoch=2,
        train_augmentations=alb.Compose([
            alb.Resize(128, 128),
            alb.OneOf([
                alb.MotionBlur(blur_limit=5, p=0.2),
                alb.MedianBlur(blur_limit=3, p=0.1),
                alb.Blur(blur_limit=5, p=0.1)
            ], p=0.2),
            alb.OneOf([
                alb.ImageCompression(70, compression_type=alb.ImageCompression.ImageCompressionType.JPEG),
                alb.ImageCompression(70, compression_type=alb.ImageCompression.ImageCompressionType.WEBP)
            ], p=0.2),
            alb.OneOf([
                alb.CLAHE(clip_limit=2),
                alb.IAASharpen(),
                alb.IAAEmboss(),
                alb.RandomBrightnessContrast(),
            ], p=0.1),
            alb.Rotate(5, border_mode=cv2.BORDER_REFLECT, p=0.2),
            alb.OneOf([
                alb.RandomResizedCrop(112, 112, scale=(0.9, 1.0), ratio=(0.8, 1.1), p=0.5),
                alb.Resize(112, 112, p=0.5),
            ], p=1.0),
            alb.HorizontalFlip(p=0.5),
            alb.HueSaturationValue(p=0.7),
            alb.ChannelShuffle(p=0.5)
        ]),
        normalize=True,
        weight_normalize=True,
        uniform_subjects=True,
        classifier_mult=3,
        lr_factor=0.1,
        initial_lr=1e-2,
        # extra_rec=(Path('/run/media/andrey/Fast/FairFace/faces_emore/train.rec'),)
    )
    np.random.seed(cfg.seed)
    mx.random.seed(cfg.seed)
    train_df = load_info(data_path, Path('data/train_df.csv'))
    lib.train.train(cfg, train_df)
Пример #22
0
def get_transforms(config):
    size = config['size']
    scope = config['scope']
    crop = config['crop']
    p = config['p']

    augs = {'strong': albu.Compose([albu.HorizontalFlip(),
                                    albu.VerticalFlip(),
                                    albu.ShiftScaleRotate(shift_limit=0.0, scale_limit=0.2, rotate_limit=20, p=.4),
                                    albu.ElasticTransform(),
                                    albu.OpticalDistortion(),
                                    albu.OneOf([
                                        albu.CLAHE(clip_limit=2),
                                        albu.IAASharpen(),
                                        albu.IAAEmboss(),
                                        albu.RandomBrightnessContrast(),
                                        albu.RandomGamma()
                                    ], p=0.5),
                                    albu.OneOf([
                                        albu.RGBShift(),
                                        albu.HueSaturationValue(),
                                    ], p=0.5),
                                    ]),
            'weak': albu.Compose([albu.HorizontalFlip(),
                                  ]),
            'geometric': albu.OneOf([albu.HorizontalFlip(p=p),
                                     albu.VerticalFlip(p=p),
                                     albu.ShiftScaleRotate(p=p),
                                     albu.Transpose(p=p),
                                     # albu.OpticalDistortion(p=p),
                                     # albu.ElasticTransform(p=p),
                                     ])
            }

    aug_fn = augs[scope]
    crop_fn = {'random': albu.RandomCrop(size[0], size[1], always_apply=True),
               'center': albu.CenterCrop(size[0], size[1], always_apply=True), 'None': None}[crop]
    pad = albu.PadIfNeeded(size[0], size[1])

    pipeline = albu.Compose([aug_fn, crop_fn, pad])

    def process(a, b):
        r = pipeline(image=a, mask=b)
        return r['image'], r['mask']

    return process
Пример #23
0
 def __init__(self):
     self.policy = A.Compose([
         A.OneOf([
             A.Rotate(180),
             A.Flip(),
         ], p=0.3),
         A.ShiftScaleRotate(shift_limit=0.2, scale_limit=0.5, rotate_limit=0, p=0.2),
         A.OneOf([
             A.CoarseDropout(max_holes=16, max_height=16, max_width=16, p=0.3),
             A.GridDropout(ratio=0.3, p=0.3),
         ]),
         A.OneOf([
             A.ElasticTransform(sigma=10, alpha_affine=25, p=0.3),
             A.RandomFog(fog_coef_lower=0.2, fog_coef_upper=0.7, p=0.2),
         ], p=0.2),
         A.OneOf([
             A.IAAAdditiveGaussianNoise(),
             A.GaussNoise(),
             A.ISONoise()
         ], p=0.2),
         A.OneOf([
             A.MotionBlur(p=.3),
             A.MedianBlur(blur_limit=5, p=0.3),
             A.Blur(blur_limit=5, p=0.3),
             A.GaussianBlur(p=0.3)
         ], p=0.2),
         A.OneOf([
             A.ChannelShuffle(p=.3),
             A.HueSaturationValue(p=0.3),
             A.ToGray(p=0.3),
             A.ChannelDropout(p=0.3),
             A.InvertImg(p=0.1)
         ], p=0.2),
         A.OneOf([
             A.OpticalDistortion(p=0.3),
             A.GridDistortion(p=.2),
             A.IAAPiecewiseAffine(p=0.3),
         ], p=0.2),
         A.OneOf([
             A.CLAHE(clip_limit=2),
             A.IAASharpen(),
             A.IAAEmboss(),
         ], p=0.2),
         A.RandomBrightnessContrast(brightness_limit=0.3, contrast_limit=0.3, p=0.3),
         A.Solarize(p=0.2),
     ])
def hard_transforms(crop_size=512):
    return albu.Compose([
        albu.ShiftScaleRotate(shift_limit=0,
                              scale_limit=0.1,
                              rotate_limit=180,
                              border_mode=cv2.BORDER_CONSTANT,
                              value=0,
                              mask_value=0),
        albu.CropNonEmptyMaskIfExists(crop_size, crop_size, p=1),
        albu.RandomShadow(shadow_roi=(0, 0, 1, 1),
                          num_shadows_lower=1,
                          num_shadows_upper=4,
                          shadow_dimension=7,
                          always_apply=False,
                          p=0.5),
        albu.HueSaturationValue(p=0.3),
        albu.OneOf([
            albu.IAAAdditiveGaussianNoise(),
            albu.GaussNoise(),
            albu.MultiplicativeNoise(
                multiplier=[0.5, 1.5], per_channel=True, p=1)
        ],
                   p=0.3),
        albu.OneOf([
            albu.MotionBlur(p=0.2),
            albu.MedianBlur(blur_limit=3, p=0.1),
            albu.Blur(blur_limit=3, p=0.1),
        ],
                   p=0.2),
        albu.OneOf([
            albu.CLAHE(clip_limit=2),
            albu.IAASharpen(),
            albu.IAAEmboss(),
            albu.RandomBrightnessContrast(
                brightness_limit=0.2, contrast_limit=0.2, p=0.3),
            albu.RandomGamma(gamma_limit=(85, 115), p=0.3),
        ],
                   p=0.3),
        albu.JpegCompression(quality_lower=40, quality_upper=100, p=0.5),
        albu.Cutout(
            num_holes=25, max_h_size=5, max_w_size=5, fill_value=0, p=0.3),
    ],
                        p=1)
Пример #25
0
 def _pre_aug(p=0.5):
     return albumentations.Compose([
         albumentations.HorizontalFlip(),
         albumentations.ShiftScaleRotate(shift_limit=0, scale_limit=0, rotate_limit=15, p=0.8),
         albumentations.GridDistortion(distort_limit=0.3, p=0.3),
         albumentations.OneOf([
             albumentations.MotionBlur(p=0.5),
             albumentations.Blur(blur_limit=3, p=0.2),
         ], p=0.2),
         albumentations.OneOf([
             albumentations.CLAHE(clip_limit=2),
             albumentations.IAASharpen(),
             albumentations.IAAEmboss(),
         ], p=0.3),
         albumentations.OneOf([
             albumentations.RandomBrightnessContrast(p=0.3),
         ], p=0.4),
         albumentations.HueSaturationValue(p=0.3),
     ], p=p)
Пример #26
0
    def __init__(self, phase, train_file, image_file_path, image_height, image_width, mean, std, binclass):
        self.image_file_path = image_file_path

        df = pd.read_csv(train_file)

        if binclass == 'A':
            class_map = {'A':1,'B':0,'C':0}

        elif binclass == 'B':
            class_map = {'A':0,'B':1,'C':0}

        elif binclass == 'C':
            class_map = {'A':0,'B':0,'C':1}

        else:
            class_map = {'A':0,'B':1,'C':2}

        self.img_id = df['image_id'].apply(lambda x: x.split('.')[0]).values # just take id of image_id
        self.labels = df['label'].apply(lambda x: x[-1]).map(class_map).values # encoding labels

        if phase == 'valid':
            # validation set
            self.aug = albumentations.Compose([
                albumentations.Resize(image_height, image_width),
                albumentations.Normalize(mean, std),
                # albumentations.ToFloat()
                ])
        elif phase == 'train':
            # training set
            self.aug = albumentations.Compose([
                albumentations.Resize(image_height, image_width),
                albumentations.RandomRotate90(p=0.5),
                albumentations.Transpose(p=0.5),
                albumentations.Flip(p=0.5),
                albumentations.OneOf([
                    albumentations.CLAHE(clip_limit=2), albumentations.IAASharpen(), albumentations.IAAEmboss(), 
                    albumentations.RandomBrightness(), albumentations.RandomContrast(),
                    albumentations.JpegCompression(), albumentations.Blur(), albumentations.GaussNoise()], p=0.5), 
                # albumentations.HueSaturationValue(p=0.5), 
                albumentations.ShiftScaleRotate(shift_limit=0.15, scale_limit=0.15, rotate_limit=45, p=0.5),
                albumentations.Normalize(mean, std),
                # albumentations.ToFloat()
                ])
Пример #27
0
def classification_augmentation():
    """Rationalized version of the one above.
    """

    return alb.Compose(
        [
            alb.HorizontalFlip(p=.5),  # 128 mus
            alb.OneOf(
                [
                    # These two do the same thing I think. Keeping only
                    # the faster one.
                    alb.IAAAdditiveGaussianNoise(p=1.),  # 484 mus
                    # alb.GaussNoise(p=1.),  # 1.11 ms
                ],
                p=0.2),  # 1.03 ms with both
            alb.OneOf([
                alb.MotionBlur(p=1.),
                alb.MedianBlur(blur_limit=3, p=1.),
                alb.Blur(blur_limit=3, p=1.),
            ],
                      p=0.2),  # 40 mus
            alb.ShiftScaleRotate(
                shift_limit=0.1, scale_limit=0.1, rotate_limit=30, p=0.8),
            # (above) 348 mus
            alb.OneOf(
                [
                    alb.OpticalDistortion(p=1.),  # 95 mus
                    alb.GridDistortion(p=1.),  # 101 mus
                    # alb.IAAPiecewiseAffine(p=1.),  # 5.61 ms
                ],
                p=0.2),  # 2.48 ms -> 113 mus with the 2 first ones
            alb.OneOf([
                alb.CLAHE(clip_limit=2, p=1.),
                alb.IAASharpen(p=1.),
                alb.IAAEmboss(p=1.),
                alb.RandomContrast(p=1.),
                alb.RandomBrightness(p=1.),
            ],
                      p=0.3),  # 257 mus
            alb.HueSaturationValue(p=0.3),  # 395 mus
        ],
        p=0.9)  # 3.84 ms -> 1.52 ms
Пример #28
0
def hard_transforms():
    result = [
        # Random shifts, stretches and turns with a 50% probability
        A.Flip(),
        A.ShiftScaleRotate(rotate_limit=1.0, p=0.8),

        # Pixels
        A.OneOf([
            A.IAAEmboss(p=1.0),
            A.IAASharpen(p=1.0),
            A.Blur(p=1.0),
        ], p=0.5),

        # Affine
        A.OneOf([
            A.ElasticTransform(p=1.0),
            A.IAAPiecewiseAffine(p=1.0)
        ], p=0.5),
    ]

    return result
Пример #29
0
def data_augmentation(image_path, aug_num):
    image = cv2.imread(image_path, cv2.IMREAD_COLOR)
    image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
    augmentation = A.Compose(
        [
            A.RandomRotate90(),
            A.Flip(),
            A.Transpose(),
            A.OneOf([
                A.IAAAdditiveGaussianNoise(),
                A.GaussNoise(),
            ], p=0.2),
            A.OneOf([
                A.MotionBlur(p=0.2),
                A.MedianBlur(blur_limit=3, p=0.1),
                A.Blur(blur_limit=3, p=0.1),
            ],
                    p=0.2),
            A.ShiftScaleRotate(
                shift_limit=0.0625, scale_limit=0.2, rotate_limit=45, p=0.2),
            A.OneOf([
                A.OpticalDistortion(p=0.3),
                A.GridDistortion(p=0.1),
                A.IAAPiecewiseAffine(p=0.3),
            ],
                    p=0.2),
            A.OneOf([
                A.CLAHE(clip_limit=2),
                A.IAASharpen(),
                A.IAAEmboss(),
                A.RandomBrightnessContrast(),
            ],
                    p=0.3),
            # A.HueSaturationValue(p=0.3),
        ],
        p=0.5)
    patches = []
    for _ in range(aug_num):
        patches.append(augmentation(image=image)['image'])
    return patches
Пример #30
0
    def _setup_transform(self, cfg):
        # Albumentation example: https://albumentations.readthedocs.io/en/latest/examples.html
        self.img_mask_transform = A.Compose([
            A.ShiftScaleRotate(shift_limit=0.1, scale_limit=0.2, rotate_limit=175, p=0.8, border_mode=cv2.BORDER_CONSTANT),
            A.Flip(),
            A.Transpose(),
            A.OneOf([
                A.ElasticTransform(),
                A.OpticalDistortion(),
                A.GridDistortion(),
                A.IAAPiecewiseAffine(),
            ]),
            A.OneOf([
                    A.RandomCrop(height=self.size_crop,width=self.size_crop,p=0.5),  
                    A.CenterCrop(height=self.size_crop,width=self.size_crop,p=0.5)
            ]),            
            A.Cutout(num_holes=8, max_h_size=8, max_w_size=8, fill_value=0,p=0.5),
            ],p=0.8)

        self.img_pixel_transform = A.Compose([
            A.OneOf([
                A.IAAAdditiveGaussianNoise(),
                A.GaussNoise(),
            ], p=0.2),
            A.OneOf([
                A.MotionBlur(p=0.2),
                A.MedianBlur(blur_limit=3, p=0.1),
                A.Blur(blur_limit=3, p=0.1),
            ], p=0.2),
            A.OneOf([
                A.IAASharpen(),
                A.IAAEmboss(),
                # A.RandomBrightnessContrast(),            
            ], p=0.3),
            A.HueSaturationValue(hue_shift_limit=3,sat_shift_limit=20,val_shift_limit=3 ,p=0.2),
        ],p=0.5)
        # Torch transform
        self.resize_transform = transforms.Resize(cfg.MODEL.IMAGE_SIZE, Image.NEAREST)
        self.to_tensor_transform = transforms.ToTensor()
        self.normalize_transform = transforms.Normalize(mean=cfg.TRAIN.NORMALIZE_MEAN, std=cfg.TRAIN.NORMALIZE_STD)