예제 #1
0
    def __init__(self, n, m):
        self.n = n
        self.m = m

        m_ratio = self.m / 30.0
        self.augment_list = (
            A.CLAHE(always_apply=True),
            A.Equalize(always_apply=True),
            A.InvertImg(always_apply=True),
            A.Rotate(limit=30 * m_ratio, always_apply=True),
            A.Posterize(num_bits=int(4 * m_ratio), always_apply=True),
            A.Solarize(threshold=m_ratio, always_apply=True),
            A.RGBShift(r_shift_limit=110 * m_ratio,
                       g_shift_limit=110 * m_ratio,
                       b_shift_limit=110 * m_ratio,
                       always_apply=True),
            A.HueSaturationValue(hue_shift_limit=20 * m_ratio,
                                 sat_shift_limit=30 * m_ratio,
                                 val_shift_limit=20 * m_ratio,
                                 always_apply=True),
            A.RandomContrast(limit=m_ratio, always_apply=True),
            A.RandomBrightness(limit=m_ratio, always_apply=True),
            #  A.Sharpen(always_apply=True), 0.1, 1.9),
            A.ShiftScaleRotate(shift_limit=0.3 * m_ratio,
                               shift_limit_y=0,
                               rotate_limit=0,
                               always_apply=True),
            A.ShiftScaleRotate(shift_limit=0.3 * m_ratio,
                               shift_limit_x=0,
                               rotate_limit=0,
                               always_apply=True),
            A.Cutout(num_holes=int(8 * m_ratio), always_apply=True),
            A.IAAAffine(shear=0.3 * m_ratio, always_apply=True))

        assert self.n <= len(self.augment_list)
예제 #2
0
def acase2_augs(name, **kwargs):
    return [
        A.Compose(
            [A.Posterize(),
             A.GridDistortion(num_steps=4),
             A.Normalize()],
            p=1.0),
        A.Compose(
            [A.Downscale(),
             A.GridDistortion(num_steps=4),
             A.Normalize()],
            p=1.0),
    ]
예제 #3
0
    def setup_augmentors(self, augmentations):
        self.augmentors = []
        for aug_name, aug_config in augmentations.items():
            aug = None

            def get_albu(aug):
                return albu.Compose(aug)

            if aug_name == 'image_compression':
                aug = get_albu([
                    albu.ImageCompression(
                        quality_lower=aug_config.get('quality_lower', 90),
                        quality_upper=aug_config.get('quality_upper'),
                        p=aug_config.get('probabilty', 0.5))
                ])
            elif aug_name == 'posterize':
                aug = get_albu([
                    albu.Posterize(num_bits=aug_config.get('num_bits', 4),
                                   p=aug_config.get('probability', 0.5))
                ])
            elif aug_name == 'blur':
                aug = get_albu([
                    albu.Blur(blur_limit=aug_config.get('blur_limit', 7),
                              p=aug_config.get('probabilty', 0.5))
                ])
            elif aug_name == 'median_blur':
                aug = get_albu([
                    albu.MedianBlur(blur_limit=aug_config.get('blur_limit', 7),
                                    p=aug_config.get('probabilty', 0.5))
                ])
            elif aug_name == 'iso_noise':
                aug = get_albu([
                    albu.ISONoise(
                        color_shift=(aug_config.get('min_color_shift', 0.01),
                                     aug_config.get('max_color_shift', 0.05)),
                        intensity=(aug_config.get('min_intensity', 0.1),
                                   aug_config.get('min_intensity', 0.5)),
                        p=aug_config.get('probabilty', 0.5))
                ])
            if not aug:
                continue
            aug.name, aug.p, aug.base = aug_name, aug_config[
                'probability'], self

            self.augmentors.append(aug)

        return
예제 #4
0
def get_yolo_transform(img_size, mode='train'):
    if mode == 'train':
        scale = 1.1
        transform = A.Compose([
            A.LongestMaxSize(max_size=int(img_size * scale)),
            A.PadIfNeeded(min_height=int(img_size * scale),
                          min_width=int(img_size * scale),
                          border_mode=cv2.BORDER_CONSTANT),
            A.RandomCrop(width=img_size, height=img_size),
            A.ColorJitter(
                brightness=0.6, contrast=0.6, saturation=0.6, hue=0.6, p=0.4),
            A.OneOf([
                A.ShiftScaleRotate(
                    rotate_limit=10, border_mode=cv2.BORDER_CONSTANT, p=0.4),
                A.IAAAffine(shear=10, mode='constant', p=0.4)
            ],
                    p=1.0),
            A.HorizontalFlip(p=0.5),
            A.Blur(p=0.1),
            A.CLAHE(p=0.1),
            A.Posterize(p=0.1),
            A.ToGray(p=0.1),
            A.ChannelShuffle(p=0.05),
            A.Normalize(mean=[0, 0, 0], std=[1, 1, 1], max_pixel_value=255),
            ToTensorV2()
        ],
                              bbox_params=A.BboxParams(format='yolo',
                                                       min_visibility=0.4,
                                                       label_fields=[]))

    elif mode == 'test':
        transform = A.Compose([
            A.LongestMaxSize(max_size=img_size),
            A.PadIfNeeded(min_height=img_size,
                          min_width=img_size,
                          border_mode=cv2.BORDER_CONSTANT),
            A.Normalize(mean=[0, 0, 0], std=[1, 1, 1], max_pixel_value=255),
            ToTensorV2(),
        ],
                              bbox_params=A.BboxParams(format="yolo",
                                                       min_visibility=0.4,
                                                       label_fields=[]))
    else:
        raise ValueError("'mode' can only accept 'train' or 'test'")

    return transform
예제 #5
0
    def __getitem__(self, index: int) -> Tuple[Any, Any]:

        coco = self.coco
        img_id = self.ids[index]
        ann_ids = coco.getAnnIds(imgIds=img_id)
        bboxes = coco.loadAnns(ann_ids)

        path = coco.loadImgs(img_id)[0]['file_name']

        # image = Image.open(os.path.join(self.root, path)).convert('RGB')
        image = Image.open(os.path.join(self.root, path)).convert('RGB')
        image = np.array(image)

        IMAGE_SIZE = 416
        import albumentations as A
        from albumentations.pytorch import ToTensorV2
        import cv2
        transforms = A.Compose([
            A.LongestMaxSize(max_size=int(IMAGE_SIZE)),
            A.PadIfNeeded(min_height=int(IMAGE_SIZE),
                          min_width=int(IMAGE_SIZE),
                          border_mode=cv2.BORDER_CONSTANT),
            A.RandomCrop(width=IMAGE_SIZE, height=IMAGE_SIZE),
            A.ColorJitter(
                brightness=0.6, contrast=0.6, saturation=0.6, hue=0.6, p=0.6),
            A.ShiftScaleRotate(
                rotate_limit=10, p=0.4, border_mode=cv2.BORDER_CONSTANT),
            A.HorizontalFlip(p=0.5),
            A.Blur(p=0.2),
            A.CLAHE(p=0.2),
            A.Posterize(p=0.2),
            A.ToGray(p=0.1),
            ToTensorV2()
        ],
                               bbox_params=A.BboxParams(format="coco",
                                                        min_visibility=0.4,
                                                        label_fields=[]))

        argumentations = transforms(image=image, bboxes=[[12, 23, 43, 34]])
        image = argumentations['image']
        bboxes = argumentations['bboxes']

        target = torch.tensor([0, 0, 0, 0])

        return image, target
 def __init__(self, k: int = 5, always_apply: bool = True, p: float = 1.0):
     super(RandAugmentAlb, self).__init__(always_apply, p)
     self.k = k
     self.candidates = [
         AutoContrast(p=1.0),
         A.Equalize(p=1.0),
         A.InvertImg(p=1.0),
         Rotate(30., p=1.0),
         A.Posterize([4, 8], p=1.0),
         A.Solarize([0, 256], p=1.0),
         A.RandomBrightnessContrast(brightness_limit=0.,
                                    contrast_limit=(0.05, 0.95),
                                    p=1.0),
         A.RandomBrightnessContrast(brightness_limit=(0.05, 0.95),
                                    contrast_limit=0.,
                                    p=1.0),
         ShearX(0.3),
         ShearY(0.3),
         Translate(0.45),
     ]
예제 #7
0
    def __init__(
        self,
        transforms=None,
        mean=(0, 0, 0),
        std=(1, 1, 1),
        width=3,
        depth=-1,
        alpha=1.,
        p=1.,
    ):
        self.transforms = transforms
        self.mean = mean
        self.std = std
        self.width = width
        self.depth = depth
        self.alpha = alpha
        self.p = p

        if self.transforms is None:
            self.transforms = [
                AutoContrast(cutoff=0, p=1),
                albu.Equalize(mode='pil', p=1),
                albu.Posterize(num_bits=(3, 4), p=1),
                albu.ShiftScaleRotate(shift_limit=0, scale_limit=0, rotate_limit=5, border_mode=cv2.BORDER_CONSTANT, value=(0, 0, 0), p=1),  # rotate
                albu.Solarize(threshold=77, p=1),
                RandomShear(shear_x=0.09, shear_y=0, border_mode=cv2.BORDER_CONSTANT, value=(0, 0, 0), p=1),
                RandomShear(shear_x=0, shear_y=0.09, border_mode=cv2.BORDER_CONSTANT, value=(0, 0, 0), p=1),
                VerticalShift(shift_limit=0.09, border_mode=cv2.BORDER_CONSTANT, value=(0, 0, 0), p=1),
                HorizontalShift(shift_limit=0.09, border_mode=cv2.BORDER_CONSTANT, value=(0, 0, 0), p=1),
                # ImageNet-C
                albu.HueSaturationValue(hue_shift_limit=0, sat_shift_limit=(-36, 0), val_shift_limit=0, p=1),  # saturation
                albu.RandomContrast(limit=(-0.36, 0), p=1),
                albu.RandomBrightness(limit=(-0.36, 0), p=1),
                albu.OneOf([  # sharpness
                    albu.IAASharpen(alpha=(0.1, 0.5), lightness=0, p=1),
                    albu.Blur(blur_limit=7, p=1),
                ], p=0.5),
            ]
예제 #8
0
            hue_shift_limit=5, sat_shift_limit=5, val_shift_limit=5, p=1),
        A.RandomBrightnessContrast(p=1),
    ],
            p=0.5),
    A.JpegCompression(p=0.2),
    A.OneOf([
        A.MedianBlur(p=1),
        A.Blur(p=1),
    ], p=0.3)
],
                        p=1)

composition = A.Compose([
    A.OneOf([
        A.JpegCompression(p=1),
        A.MedianBlur(p=1),
        A.Blur(p=1),
    ], p=0.8)
],
                        p=1)

composition = A.Compose([
    A.Posterize(p=0.5, num_bits=4),
], p=1)

imglist = []
for i in range(4):
    transformed = composition(image=img, mask=mask[:, :, 0:3])
    imglist += [transformed['image'], transformed['mask']]
plot_img(tuple(imglist), nrow=2)
예제 #9
0
SAVE_MODEL = True
LOAD_MODEL = True

# Data augmentation for images
train_transforms = A.Compose(
    [
        A.Resize(width=96, height=96),
        A.Rotate(limit=15, border_mode=cv2.BORDER_CONSTANT, p=0.8),
        A.IAAAffine(shear=15, scale=1.0, mode="constant", p=0.2),
        A.RandomBrightnessContrast(contrast_limit=0.5, brightness_limit=0.5, p=0.2),
        A.OneOf([
            A.GaussNoise(p=0.8),
            A.CLAHE(p=0.8),
            A.ImageCompression(p=0.8),
            A.RandomGamma(p=0.8),
            A.Posterize(p=0.8),
            A.Blur(p=0.8),
        ], p=1.0),
        A.OneOf([
            A.GaussNoise(p=0.8),
            A.CLAHE(p=0.8),
            A.ImageCompression(p=0.8),
            A.RandomGamma(p=0.8),
            A.Posterize(p=0.8),
            A.Blur(p=0.8),
        ], p=1.0),
        A.ShiftScaleRotate(shift_limit=0.1, scale_limit=0.1, rotate_limit=0, p=0.2, border_mode=cv2.BORDER_CONSTANT),
        A.Normalize(
            mean=[0.4897, 0.4897, 0.4897],
            std=[0.2330, 0.2330, 0.2330],
            max_pixel_value=255.0,
예제 #10
0
def posterize(m, minval=0, maxval=8):
    level = int(M2Level(m, minval, maxval))
    return albu.Posterize(
        num_bits=(level, level),
        p=1)
        albumentations.RandomCrop(width=IMAGE_SIZE, height=IMAGE_SIZE),

        albumentations.ColorJitter(brightness=0.6, contrast=0.6, saturation=0.6, hue=0.6, p=0.4),

        albumentations.OneOf(
            [
                albumentations.ShiftScaleRotate(
                    rotate_limit=20, p=0.5, border_mode=cv2.BORDER_CONSTANT
                ),
                albumentations.IAAAffine(shear=15, p=0.5, mode="constant"),
            ],
            p=1.0,
        ),
        albumentations.HorizontalFlip(p=0.5),
        albumentations.Blur(p=0.1),
        albumentations.Posterize(p=0.1),
        albumentations.CLAHE(p=0.1),
        albumentations.ToGray(p=0.1),
        albumentations.ChannelShuffle(p=0.05),
        albumentations.Normalize(mean=[0, 0, 0], std=[1, 1, 1], max_pixel_value=255,),
        ToTensorV2(),
    ],
    bbox_params=albumentations.BboxParams(format="yolo", min_visibility=0.4, label_fields=[],),
)
test_transforms = albumentations.Compose(
    [
        albumentations.LongestMaxSize(max_size=IMAGE_SIZE),
        albumentations.PadIfNeeded(
            min_height=IMAGE_SIZE, min_width=IMAGE_SIZE, border_mode=cv2.BORDER_CONSTANT
        ),
        albumentations.Normalize(mean=[0, 0, 0], std=[1, 1, 1], max_pixel_value=255,),
예제 #12
0
    def get_images(self, imgpath):
        # Pick random clone, crypt or fufi
        u01 = np.random.uniform()
        if u01 < self.cpfr_frac[0]:
            img, mask = self.all_svs_opened[imgpath].fetch_clone(
                prop_displ=0.45)
        elif u01 < np.sum(self.cpfr_frac[0:2]):
            img, mask = self.all_svs_opened[imgpath].fetch_partial(
                prop_displ=0.45)
        elif u01 < np.sum(self.cpfr_frac[0:3]):
            img, mask = self.all_svs_opened[imgpath].fetch_fufi(
                prop_displ=0.45)
        else:
            img, mask = self.all_svs_opened[imgpath].fetch_rndmtile()

        if self.dilate_masks == True:
            n_dil = int(5 / self.um_per_pixel)  # if mpp is one or less than 1
            # dilate if desired
            for i in range(mask.shape[2]):
                mask[:, :, i] = cv2.morphologyEx(mask[:, :, i].copy(),
                                                 cv2.MORPH_DILATE,
                                                 st_3,
                                                 iterations=n_dil)

        if self.aug == True:
            composition = A.Compose([
                A.HorizontalFlip(),
                A.VerticalFlip(),
                A.Rotate(border_mode=cv2.BORDER_CONSTANT),
                A.OneOf([
                    A.ElasticTransform(alpha=1000,
                                       sigma=30,
                                       alpha_affine=30,
                                       border_mode=cv2.BORDER_CONSTANT,
                                       p=1),
                    A.GridDistortion(border_mode=cv2.BORDER_CONSTANT, p=1),
                ],
                        p=0.5),
                A.CLAHE(p=0.2),
                A.HueSaturationValue(hue_shift_limit=12,
                                     sat_shift_limit=12,
                                     val_shift_limit=12,
                                     p=0.3),
                A.RandomBrightnessContrast(p=0.3),
                A.Posterize(p=0.1, num_bits=4),
                A.OneOf([
                    A.JpegCompression(p=1),
                    A.MedianBlur(p=1),
                    A.Blur(p=1),
                    A.GlassBlur(p=1, max_delta=2, sigma=0.4),
                    A.IAASharpen(p=1)
                ],
                        p=0.3)
            ],
                                    p=1)
            transformed = composition(image=img, mask=mask)
            img, mask = transformed['image'], transformed['mask']
        mask_list = [mask[:, :, ii] for ii in range(mask.shape[2])]

        if self.stride_bool:
            mask_list = [cv2.pyrDown(mask_ii.copy()) for mask_ii in mask_list]

        mask_list = [
            cv2.threshold(mask_ii, 120, 255, cv2.THRESH_BINARY)[1]
            for mask_ii in mask_list
        ]

        ## convert to floating point space, normalize and mask non-used clones
        img = img.astype(np.float32) / 255
        mask_list = [mask_ii.astype(np.float32) / 255 for mask_ii in mask_list]
        if self.normalize:
            img = (img - self.norm_mean) / self.norm_std

        return img, np.stack(mask_list, axis=2)
def get_train_transforms_atopy(input_size,
                               use_crop=False,
                               use_no_color_aug=False):
    if use_crop:
        resize = [
            al.Resize(int(input_size * 1.2), int(input_size * 1.2)),
            al.RandomSizedCrop(min_max_height=(int(input_size * 0.6),
                                               int(input_size * 1.2)),
                               height=input_size,
                               width=input_size)
        ]
    else:
        resize = [al.Resize(input_size, input_size)]
    return al.Compose(resize + [
        al.Flip(p=0.5),
        al.OneOf([
            al.RandomRotate90(),
            al.Rotate(limit=180),
        ], p=0.5),
        al.OneOf([
            al.ShiftScaleRotate(),
            al.OpticalDistortion(),
            al.GridDistortion(),
            al.ElasticTransform(),
        ],
                 p=0.3),
        al.RandomGridShuffle(p=0.05),
        al.OneOf([
            al.RandomGamma(),
            al.HueSaturationValue(),
            al.RGBShift(),
            al.CLAHE(),
            al.ChannelShuffle(),
            al.InvertImg(),
        ],
                 p=0.1),
        al.RandomSnow(p=0.05),
        al.RandomRain(p=0.05),
        al.RandomFog(p=0.05),
        al.RandomSunFlare(p=0.05),
        al.RandomShadow(p=0.05),
        al.RandomBrightnessContrast(p=0.05),
        al.GaussNoise(p=0.2),
        al.ISONoise(p=0.2),
        al.MultiplicativeNoise(p=0.2),
        al.ToGray(p=0.05),
        al.ToSepia(p=0.05),
        al.Solarize(p=0.05),
        al.Equalize(p=0.05),
        al.Posterize(p=0.05),
        al.FancyPCA(p=0.05),
        al.OneOf([
            al.MotionBlur(blur_limit=3),
            al.Blur(blur_limit=3),
            al.MedianBlur(blur_limit=3),
            al.GaussianBlur(blur_limit=3),
        ],
                 p=0.05),
        al.CoarseDropout(p=0.05),
        al.Cutout(p=0.05),
        al.GridDropout(p=0.05),
        al.ChannelDropout(p=0.05),
        al.Downscale(p=0.1),
        al.ImageCompression(quality_lower=60, p=0.2),
        al.Normalize(),
        ToTensorV2()
    ])
def get_train_transforms_mmdetection(input_size,
                                     use_crop=False,
                                     use_no_color_aug=False,
                                     use_center_crop=False,
                                     center_crop_ratio=0.9,
                                     use_gray=False):
    if isinstance(input_size, int):
        input_size = (input_size[0], input_size[1])
    return al.Compose([
        al.RandomResizedCrop(height=input_size[0],
                             width=input_size[1],
                             scale=(0.4, 1.0),
                             interpolation=0,
                             p=0.5),
        al.Resize(input_size[0], input_size[1], p=1.0),
        al.HorizontalFlip(p=0.5),
        al.OneOf([
            al.ShiftScaleRotate(border_mode=0,
                                shift_limit=(-0.2, 0.2),
                                scale_limit=(-0.2, 0.2),
                                rotate_limit=(-20, 20)),
            al.OpticalDistortion(border_mode=0,
                                 distort_limit=[-0.5, 0.5],
                                 shift_limit=[-0.5, 0.5]),
            al.GridDistortion(
                num_steps=5, distort_limit=[-0., 0.3], border_mode=0),
            al.ElasticTransform(border_mode=0),
            al.IAAPerspective(),
            al.RandomGridShuffle()
        ],
                 p=0.1),
        al.Rotate(limit=(-25, 25), border_mode=0, p=0.1),
        al.OneOf([
            al.RandomBrightnessContrast(brightness_limit=(-0.2, 0.2),
                                        contrast_limit=(-0.2, 0.2)),
            al.HueSaturationValue(hue_shift_limit=(-20, 20),
                                  sat_shift_limit=(-30, 30),
                                  val_shift_limit=(-20, 20)),
            al.RandomGamma(gamma_limit=(30, 150)),
            al.RGBShift(),
            al.CLAHE(clip_limit=(1, 15)),
            al.ChannelShuffle(),
            al.InvertImg(),
        ],
                 p=0.1),
        al.RandomSnow(p=0.05),
        al.RandomRain(p=0.05),
        al.RandomFog(p=0.05),
        al.RandomSunFlare(num_flare_circles_lower=1,
                          num_flare_circles_upper=2,
                          src_radius=110,
                          p=0.05),
        al.RandomShadow(p=0.05),
        al.GaussNoise(var_limit=(10, 20), p=0.05),
        al.ISONoise(color_shift=(0, 15), p=0.05),
        al.MultiplicativeNoise(p=0.05),
        al.OneOf([
            al.ToGray(p=1. if use_gray else 0.05),
            al.ToSepia(p=0.05),
            al.Solarize(p=0.05),
            al.Equalize(p=0.05),
            al.Posterize(p=0.05),
            al.FancyPCA(p=0.05),
        ],
                 p=0.05),
        al.OneOf([
            al.MotionBlur(blur_limit=(3, 7)),
            al.Blur(blur_limit=(3, 7)),
            al.MedianBlur(blur_limit=3),
            al.GaussianBlur(blur_limit=3),
        ],
                 p=0.05),
        al.CoarseDropout(p=0.05),
        al.Cutout(num_holes=30,
                  max_h_size=37,
                  max_w_size=37,
                  fill_value=0,
                  p=0.05),
        al.GridDropout(p=0.05),
        al.ChannelDropout(p=0.05),
        al.Downscale(scale_min=0.5, scale_max=0.9, p=0.1),
        al.ImageCompression(quality_lower=60, p=0.2),
        al.Normalize(),
        ToTensorV2()
    ])
예제 #15
0
def get_transform_imagenet(use_albu_aug):
    if use_albu_aug:
        train_transform = al.Compose([
            # al.Flip(p=0.5),
            al.Resize(256, 256, interpolation=2),
            al.RandomResizedCrop(224,
                                 224,
                                 scale=(0.08, 1.0),
                                 ratio=(3. / 4., 4. / 3.),
                                 interpolation=2),
            al.HorizontalFlip(),
            al.OneOf(
                [
                    al.OneOf(
                        [
                            al.ShiftScaleRotate(
                                border_mode=cv2.BORDER_CONSTANT,
                                rotate_limit=30),  # , p=0.05),
                            al.OpticalDistortion(
                                border_mode=cv2.BORDER_CONSTANT,
                                distort_limit=5.0,
                                shift_limit=0.1),
                            # , p=0.05),
                            al.GridDistortion(border_mode=cv2.BORDER_CONSTANT
                                              ),  # , p=0.05),
                            al.ElasticTransform(
                                border_mode=cv2.BORDER_CONSTANT,
                                alpha_affine=15),  # , p=0.05),
                        ],
                        p=0.1),
                    al.OneOf(
                        [
                            al.RandomGamma(),  # p=0.05),
                            al.HueSaturationValue(),  # p=0.05),
                            al.RGBShift(),  # p=0.05),
                            al.CLAHE(),  # p=0.05),
                            al.ChannelShuffle(),  # p=0.05),
                            al.InvertImg(),  # p=0.05),
                        ],
                        p=0.1),
                    al.OneOf(
                        [
                            al.RandomSnow(),  # p=0.05),
                            al.RandomRain(),  # p=0.05),
                            al.RandomFog(),  # p=0.05),
                            al.RandomSunFlare(num_flare_circles_lower=1,
                                              num_flare_circles_upper=2,
                                              src_radius=110),
                            # p=0.05, ),
                            al.RandomShadow(),  # p=0.05),
                        ],
                        p=0.1),
                    al.RandomBrightnessContrast(p=0.1),
                    al.OneOf(
                        [
                            al.GaussNoise(),  # p=0.05),
                            al.ISONoise(),  # p=0.05),
                            al.MultiplicativeNoise(),  # p=0.05),
                        ],
                        p=0.1),
                    al.OneOf(
                        [
                            al.ToGray(),  # p=0.05),
                            al.ToSepia(),  # p=0.05),
                            al.Solarize(),  # p=0.05),
                            al.Equalize(),  # p=0.05),
                            al.Posterize(),  # p=0.05),
                            al.FancyPCA(),  # p=0.05),
                        ],
                        p=0.1),
                    al.OneOf(
                        [
                            # al.MotionBlur(blur_limit=1),
                            al.Blur(blur_limit=[3, 5]),
                            al.MedianBlur(blur_limit=[3, 5]),
                            al.GaussianBlur(blur_limit=[3, 5]),
                        ],
                        p=0.1),
                    al.OneOf(
                        [
                            al.CoarseDropout(),  # p=0.05),
                            al.Cutout(),  # p=0.05),
                            al.GridDropout(),  # p=0.05),
                            al.ChannelDropout(),  # p=0.05),
                            al.RandomGridShuffle(),  # p=0.05),
                        ],
                        p=0.1),
                    al.OneOf(
                        [
                            al.Downscale(),  # p=0.1),
                            al.ImageCompression(quality_lower=60),  # , p=0.1),
                        ],
                        p=0.1),
                ],
                p=0.5),
            al.Normalize(),
            ToTensorV2()
        ])
    else:
        train_transform = transforms.Compose([
            transforms.Resize(256),
            transforms.RandomResizedCrop(224),
            transforms.RandomHorizontalFlip(),
            transforms.ToTensor(),
        ])
    test_transform = transforms.Compose([
        transforms.Resize(256),
        transforms.CenterCrop(224),
        transforms.ToTensor(),
    ])

    if use_albu_aug:
        train_transform = MultiDataTransformAlbu(train_transform)
    else:
        train_transform = MultiDataTransform(train_transform)

    return train_transform, test_transform
        ),
        A.RandomCrop(width=IMAGE_SIZE, height=IMAGE_SIZE),
        A.ColorJitter(
            brightness=0.6, contrast=0.6, saturation=0.6, hue=0.6, p=0.4),
        A.OneOf(
            [
                A.ShiftScaleRotate(
                    rotate_limit=10, p=0.4, border_mode=cv2.BORDER_CONSTANT),
                A.IAAAffine(shear=10, p=0.4, mode="constant"),
            ],
            p=1.0,
        ),
        A.HorizontalFlip(p=0.5),
        A.Blur(p=0.1),
        A.CLAHE(p=0.1),
        A.Posterize(p=0.1),
        A.ToGray(p=0.1),
        A.ChannelShuffle(p=0.05),
        A.Normalize(
            mean=[0, 0, 0],
            std=[1, 1, 1],
            max_pixel_value=255,
        ),
        ToTensorV2(),
    ],
    bbox_params=A.BboxParams(
        format="yolo",
        min_visibility=0.4,
        label_fields=[],
    ),
)
예제 #17
0
"""

min_transforms = A.Compose([
    A.LongestMaxSize(max_size=int(IMAGE_SIZE)),
    A.PadIfNeeded(min_height=int(IMAGE_SIZE),
                  min_width=int(IMAGE_SIZE),
                  border_mode=cv2.BORDER_CONSTANT),
    A.RandomCrop(width=IMAGE_SIZE, height=IMAGE_SIZE),
    A.ColorJitter(brightness=0.6, contrast=0.6, saturation=0.6, hue=0.6,
                  p=0.6),
    A.ShiftScaleRotate(rotate_limit=10, p=0.4,
                       border_mode=cv2.BORDER_CONSTANT),
    A.HorizontalFlip(p=0.5),
    A.Blur(p=0.2),
    A.CLAHE(p=0.2),
    A.Posterize(p=0.2),
    A.ToGray(p=0.1),
    ToTensorV2()
],
                           bbox_params=A.BboxParams(format="yolo",
                                                    min_visibility=0.4,
                                                    label_fields=[]))

max_transforms = A.Compose([
    A.LongestMaxSize(max_size=int(IMAGE_SIZE)),
    A.PadIfNeeded(min_height=int(IMAGE_SIZE),
                  min_width=int(IMAGE_SIZE),
                  border_mode=cv2.BORDER_CONSTANT),
    ToTensorV2()
],
                           bbox_params=A.BboxParams(format="yolo",