Example #1
0
def train_compose(config):
    compose = Compose([
        Resize(int(1.14 * config.train.img_size), int(1.14 * config.train.img_size)),
        HorizontalFlip(),
        Transpose(),
        CoarseDropout(p=0.3),
        OneOf([
            RandomBrightnessContrast(brightness_limit=0.6),
            RandomGamma(),
        ], p=0.6),
        ShiftScaleRotate(rotate_limit=45),  # 75
        OneOf([
            CLAHE(p=0.5),
            GaussianBlur(3, p=0.3),
            IAASharpen(alpha=(0.2, 0.3), p=0.3),
        ], p=0.8),  # 1
        OneOf([
            # 畸变相关操作
            OpticalDistortion(p=0.3),
            GridDistortion(p=0.2),
            IAAPiecewiseAffine(p=0.3),
        ], p=0.2),
        # add
        OneOf([
            MotionBlur(p=0.3),
            MedianBlur(blur_limit=3, p=0.3),
            Blur(blur_limit=3, p=0.3),
        ], p=0.8),  # 0.8
        Normalize(
            mean=config.train.mean,
            std=config.train.std,
        ),
        RandomCrop(config.train.img_size, config.train.img_size),
        ToTensorV2(),
    ])
    return compose
def face_aug(p=.5):
    return Compose([
        HorizontalFlip(p=0.5),
        OneOf([
            IAAAdditiveGaussianNoise(scale=(1, 3)),
            GaussNoise(var_limit=(1, 5)),
        ],
              p=0.2),
        OneOf([
            MotionBlur(p=.2),
            MedianBlur(blur_limit=3, p=.1),
            Blur(blur_limit=3, p=.1),
        ],
              p=0.2),
        OneOf([
            CLAHE(clip_limit=2),
            IAASharpen(alpha=(0.1, 0.2)),
            IAAEmboss(strength=(0.1, 0.3)),
            RandomContrast(limit=0.1),
            RandomBrightness(limit=0.15),
        ],
              p=0.3)
    ],
                   p=p)
Example #3
0
def strong_aug(p=.5):
    return Compose([
        JpegCompression(p=0.9),
        HorizontalFlip(p=0.5),
        OneOf([
            IAAAdditiveGaussianNoise(),
            GaussNoise(),
        ], p=0.5),
        OneOf([
            MotionBlur(p=.2),
            MedianBlur(blur_limit=3, p=.1),
            Blur(blur_limit=3, p=.1),
        ], p=0.5),
        ShiftScaleRotate(
            shift_limit=0.0625, scale_limit=0.2, rotate_limit=15, p=.5),
        OneOf([
            CLAHE(clip_limit=2),
            IAASharpen(),
            IAAEmboss(),
            RandomContrast(),
            RandomBrightness(),
        ], p=0.5),
        HueSaturationValue(p=0.5),
    ], p=p)
Example #4
0
def get_train_transforms():
    augmentations = Compose([
        Resize(236, 236),
        HorizontalFlip(),
        OneOf([
            IAAAdditiveGaussianNoise(p=.5),
            GaussNoise(p=.4),
        ], p=0.6),
        OneOf([
            MotionBlur(p=0.6),
            Blur(blur_limit=3, p=0.2),
        ], p=0.6),
        ShiftScaleRotate(shift_limit=0.0725,
                         scale_limit=0.2,
                         rotate_limit=45,
                         p=0.6),
        OneOf([
            OpticalDistortion(p=0.3),
            GridDistortion(p=0.4),
            IAAPiecewiseAffine(p=0.2),
        ],
              p=0.6),
        OneOf([
            CLAHE(clip_limit=2),
            IAASharpen(),
            IAAEmboss(),
            RandomBrightnessContrast(),
        ],
              p=0.45),
        HueSaturationValue(p=0.3),
        CenterCrop(224, 224),
        Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
        ToTensor()
    ])

    return lambda img: augmentations(image=np.array(img))
def augmentation():
    transform = [
        #         HorizontalFlip(p=0.5),
        #         VerticalFlip(p=0.5),
        IAAAdditiveGaussianNoise(p=0.2),
        OneOf([RandomGamma(p=1),
               RandomBrightnessContrast(p=1),
               CLAHE(p=1)],
              p=0.9),
        #         OneOf([
        #             RandomRotate90(p=1),
        #             Transpose(p=1)
        #         ], p=0.3),
        OneOf([
            IAASharpen(p=1),
            Blur(blur_limit=3, p=1),
            MotionBlur(blur_limit=3, p=1)
        ],
              p=0.8),
        OneOf(
            [RandomContrast(p=1), HueSaturationValue(p=1)], p=0.8)
    ]
    # return Compose(transform)
    return Compose(transform, p=1, keypoint_params=KeypointParams(format='xy'))
Example #6
0
 def get_photometric(self):
     return Compose([
         OneOf([
             CLAHE(clip_limit=2, p=.8),
             IAASharpen(p=.8),
             IAAEmboss(p=.8),
         ],
               p=0.6),
         OneOf([
             IAAAdditiveGaussianNoise(p=.6),
             GaussNoise(p=.7),
         ], p=.5),
         OneOf([
             MotionBlur(p=.5),
             MedianBlur(blur_limit=self.k, p=.3),
             Blur(blur_limit=self.k, p=.5),
         ],
               p=.5),
         OneOf([
             RandomContrast(),
             RandomBrightness(),
         ], p=.8),
     ],
                    p=1.)
def augment(p=0.5):
    return Compose([
        HorizontalFlip(),
        OneOf([
            IAAAdditiveGaussianNoise(),
            GaussNoise(),
        ], p=0.2),
        OneOf([
            MotionBlur(p=0.25),
            GaussianBlur(p=0.5),
            Blur(blur_limit=3, p=0.25),
        ],
              p=0.2),
        HueSaturationValue(p=0.2),
        OneOf([
            RandomBrightness(),
            IAASharpen(),
            IAAEmboss(),
            RandomBrightnessContrast(),
        ],
              p=0.6),
        ToSepia(p=0.1),
    ],
                   p=p)
Example #8
0
def aug_mega_hardcore(p=.95):
    return Compose([
        OneOf([CLAHE(clip_limit=2),
               IAASharpen(p=.25),
               IAAEmboss(p=.25)],
              p=.35),
        OneOf([
            IAAAdditiveGaussianNoise(p=.3),
            GaussNoise(p=.7),
        ], p=.5),
        RandomRotate90(),
        Flip(),
        Transpose(),
        OneOf([
            MotionBlur(p=.2),
            MedianBlur(blur_limit=3, p=.3),
            Blur(blur_limit=3, p=.5),
        ],
              p=.4),
        OneOf([
            RandomContrast(p=.5),
            RandomBrightness(p=.5),
        ], p=.4),
        ShiftScaleRotate(
            shift_limit=.0, scale_limit=.45, rotate_limit=45, p=.7),
        OneOf([
            OpticalDistortion(p=0.3),
            GridDistortion(p=0.2),
            ElasticTransform(p=.2),
            IAAPerspective(p=.2),
            IAAPiecewiseAffine(p=.3),
        ],
              p=.6),
        HueSaturationValue(p=.5)
    ],
                   p=p)
Example #9
0
    def __call__(self, original_image):
        self.augmentation_pipeline = Compose(
            [
                HorizontalFlip(p=0.5),
                ShiftScaleRotate(rotate_limit=25.0, p=0.7),
                OneOf([
                       IAASharpen(p=1),
                       Blur(p=1)], p=0.5),
                IAAPiecewiseAffine(p=0.5),
                Resize(self.height, self.width, always_apply=True),
                Normalize(
                    mean=[0.485, 0.456, 0.406],
                    std=[0.229, 0.224, 0.225],
                    always_apply=True
                ),
                ToTensor()
            ]
        )

        augmented = self.augmentation_pipeline(
            image=original_image
        )
        image = augmented["image"]
        return image
def get_default_albumentations():
    # Аналог get_default_imgaug c использованием albumentations вместо imgaug.
    # https://albumentations.readthedocs.io/en/latest/examples.html
    return Compose(
        [
            #RandomRotate90(),
            #Flip(),
            #Transpose(),
            OneOf([
                IAAAdditiveGaussianNoise(),
                GaussNoise(),
            ], p=0.2),
            OneOf([
                MotionBlur(p=0.2),
                MedianBlur(blur_limit=3, p=0.1),
                Blur(blur_limit=3, p=0.1),
            ],
                  p=0.2),
            ShiftScaleRotate(
                shift_limit=0.0625, scale_limit=0.2, rotate_limit=45, p=0.2),
            OneOf([
                OpticalDistortion(p=0.3),
                GridDistortion(p=0.1),
                IAAPiecewiseAffine(p=0.3),
            ],
                  p=0.2),
            OneOf([
                CLAHE(clip_limit=2),
                IAASharpen(),
                IAAEmboss(),
                RandomBrightnessContrast(),
            ],
                  p=0.3),
            HueSaturationValue(p=0.3),
        ],
        p=0.9)
Example #11
0
augmentation_pixel_techniques_pool = {
    "RandomBrightnessContrast":
    RandomBrightnessContrast(brightness_limit=(0.2, 0.6),
                             contrast_limit=0.4,
                             p=1),
    "Blur":
    Blur(blur_limit=2, p=1),
    "OpticalDistortion":
    OpticalDistortion(distort_limit=0.20, shift_limit=0.15, p=1),
    "ImageCompression":
    ImageCompression(p=1),
    "MultiplicativeNoise":
    MultiplicativeNoise(multiplier=(0.5, 5), p=1),
    "IAASharpen":
    IAASharpen(alpha=(0.2, 1), lightness=(0.5, 1.0), p=1),
    "IAAEmboss":
    IAAEmboss(alpha=(0.2, 1), strength=(0.5, 1.0), p=1),
    "MotionBlur":
    MotionBlur(blur_limit=15, p=1),
    "MedianBlur":
    MedianBlur(blur_limit=7, p=1),
    "GaussNoise":
    GaussNoise(var_limit=(10.0, 50.0), mean=0, p=1),
    "RandomGamma":
    RandomGamma(gamma_limit=(30, 120), p=1),
    "HueSaturationValue":
    HueSaturationValue(hue_shift_limit=20,
                       sat_shift_limit=30,
                       val_shift_limit=20,
                       p=1),
def _FastRandAugment2d(img_size, set='general'):
    _sets = {
        'general': [
            OneOf([
                Blur(p=1),
                GaussNoise(p=1),
                MotionBlur(p=1),
                MedianBlur(p=1),
            ]),
            HorizontalFlip(p=1),
            Compose([
                PadIfNeeded(min_height=img_size * 2,
                            min_width=img_size * 2,
                            border_mode=cv2.BORDER_WRAP,
                            p=1),
                RandomCrop(img_size, img_size, p=1),
            ],
                    p=1),
            RandomBrightnessContrast(p=1),
            InvertImg(p=1),
            ToGray(p=1),
            RGBShift(p=1),
            OneOf([
                GridDropout(holes_number_x=4,
                            holes_number_y=4,
                            random_offset=True,
                            ratio=r,
                            fill_value=i,
                            p=1) for i in _color_fill
                for r in np.linspace(0.3, 0.7, 4)
            ],
                  p=1),
            NoOp(p=1),
            Equalize(p=1),
            Rotate(45, border_mode=cv2.BORDER_WRAP, p=1),
            ShiftScaleRotate(shift_limit=0.25,
                             scale_limit=0.2,
                             rotate_limit=45,
                             border_mode=cv2.BORDER_WRAP,
                             p=1),
            Solarize(p=1),
            Posterize(p=1),
            IAAPerspective(p=1),
            HueSaturationValue(hue_shift_limit=20,
                               sat_shift_limit=50,
                               val_shift_limit=50,
                               p=1),
            RandomGamma(p=1),
            ChannelShuffle(p=1),
            OneOf([
                CoarseDropout(max_holes=1,
                              max_height=img_size // 2,
                              max_width=img_size // 2,
                              fill_value=i,
                              p=1) for i in _color_fill
            ],
                  p=1),
            OneOf([
                CLAHE(clip_limit=2),
                IAASharpen(),
                IAAEmboss(),
            ], p=1),
        ],
    }

    return _sets[set]
Example #13
0
        GaussNoise(),
    ], p=0.2),
    OneOf([
        MotionBlur(p=0.2),
        MedianBlur(blur_limit=3, p=0.1),
        Blur(blur_limit=3, p=0.1),
    ], p=0.2),
    ShiftScaleRotate(shift_limit=0.0625, scale_limit=0.2, rotate_limit=15, p=1),
    OneOf([
        OpticalDistortion(p=0.3),
        GridDistortion(p=0.1),
        IAAPiecewiseAffine(p=0.3),
    ], p=0.2),
    OneOf([
        CLAHE(clip_limit=2),
        IAASharpen(),
        IAAEmboss(),
        RandomBrightnessContrast(),
    ], p=0.3),
    HueSaturationValue(p=0.3),
], p=1)

#пример
plt.figure(figsize = (12,8))
for i in range(9):
    img = augmentation(image = images_train[0])['image']
    plt.subplot(3, 3, i + 1)
    plt.imshow(img)
    plt.axis('off')
plt.show()
    def initialize(self, config, filename):
        self.config = config
        self.train = 'train' in filename
        self.size = config['image_size']

        if self.train:
            self.transform = Compose([
                Resize(self.size[0], self.size[1]),
                ShiftScaleRotate(shift_limit=0.3,
                                 scale_limit=(0.05, 0.1),
                                 rotate_limit=10,
                                 p=.4),
                OneOf([
                    IAAAdditiveGaussianNoise(),
                    GaussNoise(),
                ], p=0.4),
                OneOf([
                    MotionBlur(p=.2),
                    MedianBlur(blur_limit=5, p=.5),
                    Blur(blur_limit=3, p=.5),
                ],
                      p=0.4),
                OpticalDistortion(p=0.4),
                OneOf([
                    CLAHE(clip_limit=3),
                    IAASharpen(),
                    IAAEmboss(),
                    RandomContrast(),
                    RandomBrightness(),
                    RandomGamma()
                ],
                      p=0.6),
                OneOf([
                    RGBShift(),
                    HueSaturationValue(),
                ], p=0.2),
                JpegCompression(quality_lower=30, quality_upper=100, p=0.4),
                Normalize(
                    mean=[0.485, 0.456, 0.406],
                    std=[0.229, 0.224, 0.225],
                ),
            ])
        else:
            self.transform = Compose([
                Resize(self.size[0], self.size[1]),
                Normalize(
                    mean=[0.485, 0.456, 0.406],
                    std=[0.229, 0.224, 0.225],
                ),
            ])

        with open(filename, 'r') as f:
            reader = csv.reader(f)
            self.anno = list(reader)

        with open(self.config['datasets']['bboxs'], 'r') as bboxs_file:
            bboxs_json = json.load(bboxs_file)
            self.bboxs_dict = {}
            for item in bboxs_json:
                if item['image_id'] in self.bboxs_dict:
                    fixed_bbox_new, area_new = self.fix_bbox(item['bbox'])
                    _, area_old = self.fix_bbox(
                        self.bboxs_dict[item['image_id']])
                    if area_new > area_old:
                        self.bboxs_dict[item['image_id']] = fixed_bbox_new
                else:
                    self.bboxs_dict[item['image_id']] = self.fix_bbox(
                        item['bbox'])[0]

        random.shuffle(self.anno)

        self.anno_size = len(self.anno)
IAAAdditiveGaussianNoise, GaussNoise, MotionBlur, MedianBlur, RandomBrightnessContrast, IAAPiecewiseAffine,
IAASharpen, IAAEmboss, Flip, OneOf, Compose,Resize,ImageCompression,MultiplicativeNoise,ChannelDropout,IAASuperpixels,GaussianBlur,
HorizontalFlip,RandomGamma,VerticalFlip,ShiftScaleRotate,CLAHE
)

import numpy as np
import torch
from torchvision import transforms

augmentation_techniques_pool = {
                'RandomBrightnessContrast' : RandomBrightnessContrast(brightness_limit=0.05,contrast_limit=0.05,p=1) ,
                'Blur' : Blur(blur_limit=2,p=1),
                'OpticalDistortion' : OpticalDistortion(p=1),
                'ImageCompression': ImageCompression(p=1),
                'MultiplicativeNoise' : MultiplicativeNoise(p=1),
                'IAASharpen': IAASharpen(alpha=(0, 0.2) , p = 1),
                'IAAEmboss' : IAAEmboss(alpha=(0, 0.3) , p = 1),
                'MotionBlur': MotionBlur(blur_limit = 3,p=1),
                'MedianBlur' :MedianBlur(blur_limit=3,p=1),
                'HorizontalFlip': HorizontalFlip(p=1),
                'GaussNoise':GaussNoise(),
                'RandomGamma':RandomGamma(p=1),
                'VerticalFlip': VerticalFlip(p=1),
                'ShiftScaleRotate': ShiftScaleRotate(),
                'HueSaturationValue':HueSaturationValue(),
                'CLAHE':CLAHE(),
                
                }


def Cutout(img , cfg):
Example #16
0
aug_student = Compose([
    OneOf([
        Transpose(p=0.5),
        HorizontalFlip(p=0.5),
        VerticalFlip(p=0.5),
        RandomRotate90(p=0.5)
    ],
          p=0.2),
    OneOf([
        IAAAdditiveGaussianNoise(p=0.5),
        GaussNoise(p=0.5),
    ], p=0.2),
    OneOf([
        CLAHE(clip_limit=2),
        IAASharpen(p=0.5),
        IAAEmboss(p=0.5),
        RandomBrightnessContrast(p=0.5),
    ],
          p=0.2),
    HueSaturationValue(p=0.2),
    RandomGamma(p=0.2)
])

aug_teacher = Compose([
    OneOf([
        IAAAdditiveGaussianNoise(p=0.5),
        GaussNoise(p=0.5),
    ], p=0.2),
    OneOf([
        CLAHE(clip_limit=2),
Example #17
0
def compose_augmentations(img_height,
                          img_width,
                          flip_p=0.5,
                          translate_p=0.5,
                          distort_p=0.5,
                          color_p=0.5,
                          overlays_p=0.15,
                          blur_p=0.25,
                          noise_p=0.25):
    # Resize
    resize_p = 1 if img_height != 1024 else 0

    # Random sized crop
    if img_height == 1024:
        min_max_height = (896, 960)
    elif img_height == 512:
        min_max_height = (448, 480)
    elif img_height == 256:
        min_max_height = (224, 240)
    else:
        raise NotImplementedError

    return Compose([
        Resize(p=resize_p, height=img_height, width=img_width),
        OneOf([
            HorizontalFlip(p=0.5),
            VerticalFlip(p=0.5),
            Transpose(p=0.5),
            RandomRotate90(p=0.5),
        ],
              p=flip_p),
        OneOf([
            Rotate(p=0.25, limit=10),
            RandomSizedCrop(p=0.5,
                            min_max_height=min_max_height,
                            height=img_height,
                            width=img_width),
            OneOrOther(IAAAffine(p=0.1, translate_percent=0.05),
                       IAAPerspective(p=0.1)),
        ],
              p=translate_p),
        OneOf([
            ElasticTransform(p=0.5,
                             alpha=10,
                             sigma=img_height * 0.05,
                             alpha_affine=img_height * 0.03,
                             approximate=True),
            GridDistortion(p=0.5),
            OpticalDistortion(p=0.5),
            IAAPiecewiseAffine(p=0.25, scale=(0.01, 0.03)),
        ],
              p=distort_p),
        OneOrOther(
            OneOf([
                CLAHE(p=0.5),
                RandomGamma(p=0.5),
                RandomContrast(p=0.5),
                RandomBrightness(p=0.5),
                RandomBrightnessContrast(p=0.5),
            ],
                  p=color_p),
            OneOf([IAAEmboss(p=0.1),
                   IAASharpen(p=0.1),
                   IAASuperpixels(p=0)],
                  p=overlays_p)),
        OneOrOther(
            OneOf([
                Blur(p=0.2),
                MedianBlur(p=0.1),
                MotionBlur(p=0.1),
                GaussianBlur(p=0.1),
            ],
                  p=blur_p),
            OneOf([GaussNoise(p=0.2),
                   IAAAdditiveGaussianNoise(p=0.1)],
                  p=noise_p)),
        Normalize(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225)),
        ToTensor(sigmoid=False),
    ])
Example #18
0
def aug_test():
    def get_bb_points(msk):
        h, w = msk.shape
        x0 = 0
        x1 = msk.shape[1]
        y0 = 0
        y1 = msk.shape[0]
        for i in range(w):
            if msk[:, i].max() > 200:
                x0 = i
                break
        for i in range(w):
            if msk[:, msk.shape[1] - i - 1].max() > 200:
                x1 = msk.shape[1] - i - 1
                break
        for i in range(h):
            if msk[i, :].max() > 200:
                y0 = i
                break
        for i in range(h):
            if msk[msk.shape[0] - i - 1, :].max() > 200:
                y1 = msk.shape[0] - i - 1
                break
        return (x0, y0), (x1, y1)

    image_name = '7aea0b3e2.jpg'
    p1, p2 = (12, 84), (391, 248)
    img = imread(f'../DATA/aug_test/src/{image_name}')

    h = 300
    alpha, sigma, alpha_affine = h * 2, h * 0.08, h * 0.08

    augs = {
        '1_IAAAdditiveGaussianNoise':
        IAAAdditiveGaussianNoise(scale=(0.01 * 255, 0.05 * 255), p=1.0),
        '1_GaussNoise':
        GaussNoise(var_limit=(20, 120), p=1.0),
        '1_RandomGamma':
        RandomGamma(gamma_limit=(80, 120), p=1.0),
        '2_RandomBrightnessContrast':
        RandomBrightnessContrast(p=1.0),
        '2_MotionBlur':
        MotionBlur(p=1.0),
        '2_MedianBlur':
        MedianBlur(blur_limit=6, p=1.0),
        '2_Blur':
        Blur(blur_limit=9, p=1.0),
        '2_IAASharpen':
        IAASharpen(p=1.0),
        '2_IAAEmboss':
        IAAEmboss(p=1.0),
        '2_IAASuperpixels':
        IAASuperpixels(n_segments=50, p_replace=0.05, p=1.0),
        '3_CLAHE':
        CLAHE(clip_limit=8, p=1.0),
        '3_RGBShift':
        RGBShift(p=1.0),
        '3_ChannelShuffle':
        ChannelShuffle(p=1.0),
        '3_HueSaturationValue':
        HueSaturationValue(p=1.0),
        '3_ToGray':
        ToGray(p=1.0),
        '4_OpticalDistortion':
        OpticalDistortion(border_mode=cv2.BORDER_CONSTANT, p=1.0),
        '4_GridDistortion':
        GridDistortion(border_mode=cv2.BORDER_CONSTANT, p=1.0),
        '4_IAAPiecewiseAffine':
        IAAPiecewiseAffine(nb_rows=4, nb_cols=4, p=1.0),
        '4_IAAPerspective':
        IAAPerspective(p=1.0),
        '4_IAAAffine':
        IAAAffine(mode='constant', p=1.0),
        '4_ElasticTransform':
        ElasticTransform(alpha=alpha,
                         sigma=sigma,
                         alpha_affine=alpha_affine,
                         border_mode=cv2.BORDER_CONSTANT,
                         p=1.0)
    }

    # im_merge.shape[1] * 2, im_merge.shape[1] * 0.08, im_merge.shape[1] * 0.08

    for aug in augs:
        mask = np.zeros(img.shape[:2], dtype=np.uint8)
        cv2.rectangle(mask, p1, p2, 255, 2)
        data = {"image": img.copy(), 'mask': mask}
        augmented = augs[aug](**data)
        augimg = augmented['image']
        draw_shadow_text(augimg, f'{aug}', (5, 15), 0.5, (255, 255, 255), 1)
        ap1, ap2 = get_bb_points(augmented['mask'])
        cv2.rectangle(augimg, ap1, ap2, (0, 255, 0), 2)
        imsave(f'../DATA/aug_test/aug/{aug}-{image_name}', augimg)
Example #19
0
             "IAASharpen": dict(),
             "IAAEmboss": dict(),
             "RandomContrast": dict(),
             "RandomBrightness": dict(),
             "HueSaturationValue": dict(p=0.3),
             "ShiftScaleRotate": dict(shift_limit=0.0625, scale_limit=0.2, rotate_limit=45, p=.2),
             "CLAHE": dict(clip_limit=2),
             "Noise_of": dict(transforms=[IAAAdditiveGaussianNoise(),
                                          GaussNoise()], p=0.2),
             "Blur_of": dict(transforms=[MotionBlur(p=.2),
                                         MedianBlur(blur_limit=3, p=.1),
                                         Blur(blur_limit=3, p=.1)], p=0.2),
             "Optical_of": dict(transforms=[OpticalDistortion(p=0.3),
                                            GridDistortion(p=.1),
                                            IAAPiecewiseAffine(p=0.3)], p=0.2),
             "Color_of": dict(transforms=[IAASharpen(),
                                          # CLAHE(clip_limit=2),
                                          IAAEmboss(),
                                          RandomContrast(),
                                          RandomBrightness()], p=0.3)}


def str2class(classname):
    return getattr(sys.modules[__name__], classname)


class Auger(object):
    def __init__(self, config, local_config):
        self.config = config
        self.local_config = local_config
        self.setup_aug()
#用来进行albumentations增强,主要解决了拷贝多个的问题
Example #21
0
def strong_aug_pixel(p=.5):
    print('[DATA]: strong aug pixel')

    from albumentations import (
        # HorizontalFlip, IAAPerspective, ShiftScaleRotate, CLAHE, RandomRotate90,
        Transpose,
        ShiftScaleRotate,
        Blur,
        OpticalDistortion,
        GridDistortion,
        HueSaturationValue,
        MultiplicativeNoise,
        IAAAdditiveGaussianNoise,
        GaussNoise,
        MotionBlur,
        MedianBlur,
        RandomBrightnessContrast,
        IAAPiecewiseAffine,
        IAASharpen,
        IAAEmboss,
        Flip,
        OneOf,
        Compose,
        JpegCompression,
        CLAHE)

    return Compose(
        [
            # RandomRotate90(),
            # Flip(),
            # Transpose(),
            OneOf([
                MultiplicativeNoise(multiplier=[0.5, 1.5], per_channel=True),
                JpegCompression(quality_lower=39, quality_upper=80)
            ],
                  p=0.2),
            OneOf([
                IAAAdditiveGaussianNoise(),
                GaussNoise(),
            ], p=0.2),
            OneOf([
                MotionBlur(p=.2),
                MedianBlur(blur_limit=3, p=0.1),
                Blur(blur_limit=3, p=0.1),
            ],
                  p=0.2),
            # ShiftScaleRotate(shift_limit=0.0625, scale_limit=0.2, rotate_limit=45, p=0.2),
            # OneOf([
            #     OpticalDistortion(p=0.3),
            #     GridDistortion(p=.1),
            #     IAAPiecewiseAffine(p=0.3),
            # ], p=0.2),
            OneOf([
                CLAHE(clip_limit=2),
                IAASharpen(),
                IAAEmboss(),
                RandomBrightnessContrast(),
            ],
                  p=0.3),
            HueSaturationValue(p=0.3),
        ],
        p=p)
# %%
# RandomGamma

aug = RandomGamma(gamma_limit=(40, 200))
image8 = (image * 256).astype("uint8")
augmented = aug(image=image8, mask=mask)

image_scaled = augmented["image"]
mask_scaled = augmented["mask"]

visualize(image_scaled, mask_scaled, original_image=image8, original_mask=mask)


# %%
# IAASharpen
aug = IAASharpen(alpha=(0.1, 0.2), lightness=(0.5, 0.7))
image8 = (image * 256).astype("uint8")
augmented = aug(image=image8, mask=mask)

image_scaled = augmented["image"]
mask_scaled = augmented["mask"]

visualize(image_scaled, mask_scaled, original_image=image8, original_mask=mask)


# %%
# Blur
aug = Blur(blur_limit=2)
image8 = (image * 256).astype("uint8")
augmented = aug(image=image8, mask=mask)
def _get_data_loader(imgs, trn_df, vld_df):

    import albumentations as A
    from albumentations import (
        Rotate, HorizontalFlip, IAAPerspective, ShiftScaleRotate, CLAHE,
        RandomRotate90, Transpose, ShiftScaleRotate, Blur, OpticalDistortion,
        GridDistortion, HueSaturationValue, IAAAdditiveGaussianNoise,
        GaussNoise, MotionBlur, MedianBlur, RandomBrightnessContrast,
        IAAPiecewiseAffine, IAASharpen, IAAEmboss, Flip, OneOf, Compose)
    from albumentations.pytorch import ToTensor, ToTensorV2

    train_transforms = A.Compose([
        Rotate(20),
        OneOf([
            IAAAdditiveGaussianNoise(),
            GaussNoise(),
        ], p=0.2),
        OneOf([
            MotionBlur(p=.2),
            MedianBlur(blur_limit=3, p=0.1),
            Blur(blur_limit=3, p=0.1),
        ],
              p=0.2),
        ShiftScaleRotate(
            shift_limit=0.0625, scale_limit=0.2, rotate_limit=45, p=0.2),
        OneOf([
            OpticalDistortion(p=0.3),
            GridDistortion(p=.1),
            IAAPiecewiseAffine(p=0.3),
        ],
              p=0.2),
        OneOf([
            CLAHE(clip_limit=2),
            IAASharpen(),
            IAAEmboss(),
            RandomBrightnessContrast(),
        ],
              p=0.3),
        HueSaturationValue(p=0.3),
        ToTensor()
    ],
                                 p=1.0)

    valid_transforms = A.Compose([ToTensor()])

    from torch.utils.data import Dataset, DataLoader
    trn_dataset = BangaliDataset(imgs=imgs,
                                 label_df=trn_df,
                                 transform=train_transforms)
    vld_dataset = BangaliDataset(imgs=imgs,
                                 label_df=vld_df,
                                 transform=valid_transforms)

    rank = dist.get_rank()
    world_size = dist.get_world_size()

    trn_sampler = torch.utils.data.distributed.DistributedSampler(
        trn_dataset,
        num_replicas=world_size,  # worldsize만큼 분할
        rank=rank)

    trn_loader = DataLoader(trn_dataset,
                            shuffle=False,
                            num_workers=8,
                            pin_memory=True,
                            batch_size=BATCH_SIZE,
                            sampler=trn_sampler)

    vld_loader = DataLoader(vld_dataset,
                            shuffle=False,
                            num_workers=NUM_WORKERS,
                            batch_size=BATCH_SIZE)
    return trn_loader, vld_loader
Example #24
0
def build_databunch(data_dir, img_sz, batch_sz):
    # TODO This is to avoid freezing in the middle of the first epoch. Would be nice
    # to fix this.
    num_workers = 0

    train_dir = join(data_dir, 'train')
    train_anns = glob.glob(join(train_dir, '*.json'))
    valid_dir = join(data_dir, 'valid')
    valid_anns = glob.glob(join(valid_dir, '*.json'))

    label_names = get_label_names(train_anns[0])

    # Augmentations
    policy_v3 = [
        [
            Posterize(p=0.8),
            IAAAffine(translate_px=(10, 20), p=1.0),
        ],
        [
            RandomCropNearBBox(p=0.2),
            IAASharpen(p=0.5),
        ],
        [
            Rotate(p=0.6),
            Rotate(p=0.8),
        ],
        [
            Equalize(p=0.8),
            RandomContrast(p=0.2),
        ],
        [
            Solarize(p=0.2),
            IAAAffine(translate_px=(10, 20), p=0.2),
        ],
        [
            IAASharpen(p=0.0),
            ToGray(p=0.4),
        ],
        [
            Equalize(p=1.0),
            IAAAffine(translate_px=(10, 20), p=1.0),
        ],
        [
            Posterize(p=0.8),
            Rotate(p=0.0),
        ],
        [
            RandomContrast(p=0.6),
            Rotate(p=1.0),
        ],
        [
            Equalize(p=0.0),
            Cutout(p=0.8),
        ],
        [
            RandomBrightness(p=1.0),
            IAAAffine(translate_px=(10, 20), p=1.0),
        ],
        [
            RandomContrast(p=0.0),
            IAAAffine(shear=60.0, p=0.8),
        ],
        [
            RandomContrast(p=0.8),
            RandomContrast(p=0.2),
        ],
        [
            Rotate(p=1.0),
            Cutout(p=1.0),
        ],
        [
            Solarize(p=0.8),
            Equalize(p=0.8),
        ],
    ]
    selected_subpolicy = int(np.random.randint(0, 15, 1))
    aug_transforms = policy_v3[selected_subpolicy]
    standard_transforms = [Resize(img_sz, img_sz), ToTensor()]
    aug_transforms.extend(standard_transforms)

    bbox_params = BboxParams(format='coco',
                             min_area=0.,
                             min_visibility=0.2,
                             label_fields=['labels'])
    aug_transforms = Compose(aug_transforms, bbox_params=bbox_params)
    standard_transforms = Compose(standard_transforms, bbox_params=bbox_params)

    train_ds = CocoDataset(train_dir, train_anns, transforms=aug_transforms)
    valid_ds = CocoDataset(valid_dir,
                           valid_anns,
                           transforms=standard_transforms)
    train_ds.label_names = label_names
    valid_ds.label_names = label_names

    train_dl = DataLoader(train_ds,
                          collate_fn=collate_fn,
                          shuffle=True,
                          batch_size=batch_sz,
                          num_workers=num_workers,
                          pin_memory=True)
    valid_dl = DataLoader(valid_ds,
                          collate_fn=collate_fn,
                          batch_size=batch_sz,
                          num_workers=num_workers,
                          pin_memory=True)
    return DataBunch(train_ds, train_dl, valid_ds, valid_dl, label_names)