def train_transforms():
    return A.Compose(
        [
            A.RandomSizedCrop(
                min_max_height=(512, 540), height=600, width=600, p=0.1),
            A.OneOf([
                A.HueSaturationValue(hue_shift_limit=0.3,
                                     sat_shift_limit=0.3,
                                     val_shift_limit=0.3,
                                     p=0.5),
                A.RandomBrightnessContrast(
                    brightness_limit=0.3, contrast_limit=0.3, p=0.5),
            ],
                    p=0.9),
            A.ShiftScaleRotate(
                shift_limit=0.0625, scale_limit=0.1, rotate_limit=10, p=0.5),
            # A.Cutout(num_holes=8, max_h_size=32, max_w_size=32, fill_value=0, p=0.5),
            # A.Cutout(num_holes=4, max_h_size=100, max_w_size=2, fill_value=0, p=0.5)
        ],
        p=1.0,
        bbox_params=A.BboxParams(format='pascal_voc',
                                 label_fields=['class_labels']))
示例#2
0
def get_train_transforms(config, force_light=False):
    aug_list=[
        CropLemon(p=1),
        A.Transpose(p=0.5),
        A.HorizontalFlip(p=0.5),
        A.VerticalFlip(p=0.5),
        A.Rotate(p=0.5),
        A.OneOf([
            A.GaussNoise(p=1),
            A.GaussianBlur(p=1),
        ], p=0.3),
        A.RandomBrightnessContrast(brightness_limit=(-0.2, 0.2), contrast_limit=(-0.2, 0.2), p=0.3),
        A.HueSaturationValue(hue_shift_limit=5, val_shift_limit=5, p=0.3),
        A.Resize(config.img_size, config.img_size),
        A.Normalize(
            mean=[0.485, 0.456, 0.406],
            std=[0.229, 0.224, 0.225],
            max_pixel_value=255.0,
            p=1.0),
        ToTensorV2(p=1.0),
    ]
    return A.Compose(aug_list, p=1.0)
示例#3
0
 def __init__(self, img, data, img_size):
     """ 
     arguments
     ---------
     img : list
         list of images, in the original size (height, width, 3)
     data : list of dict
         Each dict has :
             'image' : index of the image. The index should match with img
             'mask' : [rr, cc]
             'box' : [[xmin, ymin], [xmax,ymax]]
             'size' : the size of the image that the data was created with
                     IMPORTANT : (WIDTH, HEIGHT)
     img_size : tuple
         Desired output image size
         The axes will be swapped to match pygame.
         IMPORTANT : (WIDTH, HEIGHT)
     """
     self.img = img
     self.data = data
     self.n = len(data)
     self.output_size = img_size
     self.aug = A.Compose(
         [
             A.OneOf([
                 A.RandomGamma((40, 200), p=1),
                 A.RandomBrightness(limit=0.5, p=1),
                 A.RandomContrast(limit=0.5, p=1),
                 A.RGBShift(p=1),
             ],
                     p=0.8),
             A.VerticalFlip(p=0.5),
             A.RandomRotate90(p=1),
             A.Resize(img_size[1], img_size[0]),
         ],
         bbox_params=A.BboxParams(format='albumentations',
                                  label_fields=['bbox_classes']),
         keypoint_params=A.KeypointParams(format='xy'),
     )
示例#4
0
def stong_aug():
    # strong aug for  train
    train_transform = [
        albu.Resize(height=configs.input_size, width=configs.input_size),
        albu.HorizontalFlip(p=0.5),
        albu.VerticalFlip(p=0.5),
        albu.RandomRotate90(p=0.5),
        albu.OneOf([
            albu.CenterCrop(
                p=0.5, height=configs.input_size, width=configs.input_size),
            albu.ElasticTransform(
                p=0.5, alpha=120, sigma=120 * 0.05, alpha_affine=120 * 0.03),
            albu.GridDistortion(p=0.5),
            albu.OpticalDistortion(p=1, distort_limit=1, shift_limit=0.5),
        ],
                   p=0.8),
        albu.Normalize(mean=(0.485, 0.456, 0.406),
                       std=(0.229, 0.224, 0.225),
                       p=1),
        AT.ToTensor(),
    ]
    return albu.Compose(train_transform)
示例#5
0
def main():
    image = cv2.imread("cuiyan.png")
    image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)

    def visualize(image):
        plt.figure(figsize=(6, 6))
        plt.axis("off")
        plt.imshow(image)
        # plt.show()

    transform = A.Compose([
        A.RandomCrop(111, 222),
        A.OneOf([A.RGBShift(), A.HueSaturationValue()]),
    ])

    random.seed(42)
    transformed = transform(image=image)
    visualize(transformed["image"])

    A.save(transform, "./transform.json")
    A.save(transform, "./transform.yml", data_format="yaml")
    pprint.pprint(A.to_dict(transform))
def get_train_transforms():
    return A.Compose(
        [
            # A.RandomSizedCrop(min_max_height=(512, 512), height=1024, width=1024, p=0.5),

            #### THESE ARE THE SAFE ONES ####
            A.OneOf([
                A.HueSaturationValue(hue_shift_limit=0.2,
                                     sat_shift_limit=0.2,
                                     val_shift_limit=0.2,
                                     p=0.9),
                A.RandomBrightnessContrast(
                    brightness_limit=0.2, contrast_limit=0.2, p=0.9),
            ],
                    p=0.9),
            A.ToGray(p=0.01),
            # A.HorizontalFlip(p=0.5),
            # A.VerticalFlip(p=0.5),
            # #################################

            # A.RandomScale (scale_limit=(0.1, 2.0), interpolation=1, p=0.5),
            # A.OneOf([
            #     A.RandomSizedBBoxSafeCrop (256, 256, p=1.0),
            #     A.RandomSizedBBoxSafeCrop (1024, 1024, p=1.0),
            # ], p=0.9),
            # A.IAAPerspective (scale=(0.05, 0.1), keep_size=True, always_apply=False, p=0.5),
            A.Resize(height=512, width=512, p=1.0),

            # A.Cutout(num_holes=8, max_h_size=64, max_w_size=64, fill_value=0, p=0.5),
            ToTensorV2(p=1.0),
        ],
        # p=1.0,
        # bbox_params=A.BboxParams(
        #     format='pascal_voc',
        #     min_area=0,
        #     min_visibility=0,
        #     label_fields=['labels'],
        # )
    )
示例#7
0
    def __init__(self, size=512, mode='train'):
        assert mode in ['train', 'val', 'test']

        if mode == 'train':
            self.transform = A.Compose([
                A.HorizontalFlip(p=0.5),
                A.ShiftScaleRotate(
                    p=0.5,
                    shift_limit=0.0625,
                    scale_limit=0.1,
                    rotate_limit=10,
                    interpolation=1,
                    border_mode=4,
                ),
                A.OneOf([
                    A.Cutout(
                        p=1.0,
                        num_holes=6,
                        max_h_size=32,
                        max_w_size=32,
                    ),
                    A.GridDropout(p=1.0,
                                  ratio=0.5,
                                  unit_size_min=64,
                                  unit_size_max=128,
                                  random_offset=True),
                ],
                        p=0.5),
                A.Resize(size, size, p=1.0),
            ])
        elif mode == 'val':
            self.transform = A.Compose([
                A.Resize(size, size, p=1.0),
            ])
        elif mode == 'test':
            self.transform = A.Compose([
                A.Resize(size, size, p=1.0),
            ])
示例#8
0
def test_data_augmentation(data_dir, save_dir, filename):
    """画像の変換のテスト。目視したいので結果を`../___check/image/`に保存しちゃう。"""
    base_size = 32 if filename == "cifar.png" else 256
    bboxes = [(0.41, 0.39, 0.70, 0.75)]
    keypoints = [(0.52 * base_size, 0.52 * base_size)]
    aug = A.Compose(
        [
            A.OneOf(
                [
                    tk.image.Standardize(),
                    tk.image.ToGrayScale(p=0.125),
                    tk.image.RandomBinarize(p=0.125),
                ],
                p=0.25,
            ),
            # tk.image.RandomRotate(),  # TODO
            tk.image.RandomTransform(size=(256, 256), base_scale=2.0),
            tk.image.RandomColorAugmentors(noisy=True),
            tk.image.SpeckleNoise(),
            tk.image.GridMask(),
        ],
        bbox_params=A.BboxParams(format="albumentations", label_fields=["classes"]),
        keypoint_params=A.KeypointParams(format="xy"),
    )
    img_path = data_dir / filename
    original_img = tk.ndimage.load(img_path)
    for i in range(32):
        augmented = aug(
            image=original_img, bboxes=bboxes, classes=[0], keypoints=keypoints
        )
        img = augmented["image"]
        img = tk.od.plot_objects(img, None, None, augmented["bboxes"])
        for x, y in augmented["keypoints"]:
            x = np.clip(int(x), 1, img.shape[1] - 2)
            y = np.clip(int(y), 1, img.shape[1] - 2)
            img[range(y - 1, y + 2), x, :] = [[[255, 255, 0]]]
            img[y, range(x - 1, x + 2), :] = [[[255, 255, 0]]]
        tk.ndimage.save(save_dir / f"{img_path.stem}.DA.{i}.png", img)
示例#9
0
def get_augumentation(phase,
                      width=512,
                      height=512,
                      min_area=0.,
                      min_visibility=0.):
    list_transforms = []
    if phase == 'train':
        list_transforms.extend([
            albu.HorizontalFlip(p=0.5),
            albu.OneOf([
                albu.RandomGamma(),
                albu.RandomBrightnessContrast(),
                albu.VerticalFlip(p=0.5),
            ],
                       p=0.3),
            albu.ShiftScaleRotate(),
        ])

    list_transforms.extend([
        albu.Resize(width, height),
    ])

    if phase == 'train':
        list_transforms.extend(
            [albu.CenterCrop(p=0.2, height=height, width=width)])
    if (phase == 'show'):
        return albu.Compose(list_transforms)

    list_transforms.extend([ToTensor()])
    if (phase == 'test'):
        return albu.Compose(list_transforms)

    return albu.Compose(list_transforms,
                        bbox_params=albu.BboxParams(
                            format='pascal_voc',
                            min_area=min_area,
                            min_visibility=min_visibility,
                            label_fields=['category_id']))
示例#10
0
def create_data_transforms(args, split='train'):
    """Create data transofrms

    Args:
        args: data transforms configs
        split (str, optional): split for train, val or test. Defaults to 'train'.

    Returns:
        albumentation: pytorch data augmentations
    """
    base_transform = create_base_transforms(args, split=split)

    if split == 'train':
        aug_transform = alb.Compose([
            alb.Rotate(limit=30),
            alb.Cutout(1, 25, 25, p=0.1),
            alb.RandomResizedCrop(256, 256, scale=(0.5, 1.0), p=0.5),
            alb.Resize(args.image_size, args.image_size),
            alb.HorizontalFlip(),
            alb.ToGray(p=0.1),
            alb.GaussNoise(p=0.1),
            alb.OneOf([
                alb.RandomBrightnessContrast(),
                alb.FancyPCA(),
                alb.HueSaturationValue(),
            ],
                      p=0.7),
            alb.GaussianBlur(blur_limit=3, p=0.05),
        ])
        data_transform = alb.Compose([*aug_transform, *base_transform])

    elif split == 'val':
        data_transform = base_transform

    elif split == 'test':
        data_transform = base_transform

    return data_transform
示例#11
0
class Resize(object):
    width = height = opt.scale if opt.scale else 256

    train_transform = A.Compose(
        [
            A.RandomResizedCrop(
                height=height, width=width, scale=(0.8, 1.0), p=1.0),
            A.OneOf([
                A.HueSaturationValue(hue_shift_limit=0.4,
                                     sat_shift_limit=0.4,
                                     val_shift_limit=0.4,
                                     p=0.9),
                A.RandomBrightnessContrast(
                    brightness_limit=0.3, contrast_limit=0.3, p=0.9),
            ],
                    p=0.9),  # 色相饱和度对比度增强
            A.GaussianBlur(p=0.5),
            A.HorizontalFlip(p=0.5),
            A.VerticalFlip(p=0.5),
            A.Transpose(p=0.5),  # TTA×8
            A.Normalize(max_pixel_value=1.0, p=1.0),
            A.CoarseDropout(p=0.5, max_width=32, max_height=32),
            # A.Cutout(p=0.5),
            ToTensorV2(p=1.0),
        ],
        p=1.0,
    )

    divisor = 8  # padding成8的倍数
    val_transform = A.Compose(
        [
            # A.PadIfNeeded(min_height=None, min_width=None, pad_height_divisor=divisor, pad_width_divisor=divisor, p=1.0),
            A.Resize(height=height, width=width, p=1.0),
            A.Normalize(max_pixel_value=1.0, p=1.0),
            ToTensorV2(p=1.0),
        ],
        p=1.0,
    )
示例#12
0
    def __init__(
        self,
        transforms=None,
        mean=(0, 0, 0),
        std=(1, 1, 1),
        width=3,
        depth=-1,
        alpha=1.,
        p=1.,
    ):
        self.transforms = transforms
        self.mean = mean
        self.std = std
        self.width = width
        self.depth = depth
        self.alpha = alpha
        self.p = p

        if self.transforms is None:
            self.transforms = [
                AutoContrast(cutoff=0, p=1),
                albu.Equalize(mode='pil', p=1),
                albu.Posterize(num_bits=(3, 4), p=1),
                albu.ShiftScaleRotate(shift_limit=0, scale_limit=0, rotate_limit=5, border_mode=cv2.BORDER_CONSTANT, value=(0, 0, 0), p=1),  # rotate
                albu.Solarize(threshold=77, p=1),
                RandomShear(shear_x=0.09, shear_y=0, border_mode=cv2.BORDER_CONSTANT, value=(0, 0, 0), p=1),
                RandomShear(shear_x=0, shear_y=0.09, border_mode=cv2.BORDER_CONSTANT, value=(0, 0, 0), p=1),
                VerticalShift(shift_limit=0.09, border_mode=cv2.BORDER_CONSTANT, value=(0, 0, 0), p=1),
                HorizontalShift(shift_limit=0.09, border_mode=cv2.BORDER_CONSTANT, value=(0, 0, 0), p=1),
                # ImageNet-C
                albu.HueSaturationValue(hue_shift_limit=0, sat_shift_limit=(-36, 0), val_shift_limit=0, p=1),  # saturation
                albu.RandomContrast(limit=(-0.36, 0), p=1),
                albu.RandomBrightness(limit=(-0.36, 0), p=1),
                albu.OneOf([  # sharpness
                    albu.IAASharpen(alpha=(0.1, 0.5), lightness=0, p=1),
                    albu.Blur(blur_limit=7, p=1),
                ], p=0.5),
            ]
示例#13
0
def build_transforms(args, n_classes):
    train_transforms = A.Compose([
        A.HorizontalFlip(),
        A.RandomResizedCrop(width=args.image_size, height=args.image_size, scale=(0.5, 2.)),
        A.CoarseDropout(max_height=int(args.image_size / 5), max_width=int(args.image_size / 5), max_holes=5),
        A.Rotate(limit=(-30, 30)),
        A.ColorJitter(),
        A.OneOf([
            A.RandomGamma(),
            A.RandomBrightnessContrast(),
            A.Blur(blur_limit=5),
        ]),
        ToTensorV2(),
    ])
    train_transforms = AlbumentationsSegmentationWrapperTransform(train_transforms, class_num=n_classes,
                                                                  ignore_indices=[255, ])
    test_transforms = A.Compose([
        A.Resize(width=args.image_size, height=args.image_size),
        ToTensorV2(),
    ])
    test_transforms = AlbumentationsSegmentationWrapperTransform(test_transforms, class_num=n_classes,
                                                                 ignore_indices=[255, ])
    return train_transforms, test_transforms
示例#14
0
    def __init__(self, data_dir,is_train=True):
        self.paths=sorted(glob.glob(data_dir+'/*/*'))
        self.transform_train = A.Compose([
            A.RandomResizedCrop(height=500, width=900),  # change ratio and scale 500 / 900 
            A.RandomBrightnessContrast(brightness_limit=0.1, contrast_limit=0.1, p=0.5),
            A.RandomGamma(p=0.5),
            A.HorizontalFlip(p=0.5),
            A.Rotate(limit=10,p=0.5),
            A.OneOf([A.MotionBlur(blur_limit=3), A.GlassBlur(max_delta=3), A.GaussianBlur(blur_limit=3)], p=0.5),
            A.GaussNoise(p=0.5),
            A.Normalize(mean=(0.446, 0.469, 0.472), std=(0.326, 0.330, 0.338), max_pixel_value=255.0, p=1.0),
            ToTensorV2(p=1.0),
        ])

        self.transform_valid = A.Compose([ 
            A.Resize(height=500, width=900),
            A.Normalize(mean=(0.446, 0.469, 0.472), std=(0.326, 0.330, 0.338), max_pixel_value=255.0, p=1.0),
            ToTensorV2(p=1.0),
        ])
        if is_train:
            self.data_transforms=self.transform_train
        else:
            self.data_transforms = self.transform_valid
示例#15
0
def get_train_transforms_distortion(cfg):
    return A.Compose([
        A.CenterCrop(cfg["img_size"], cfg["img_size"], p=1.0),
        A.HueSaturationValue(),
        A.OneOf(
            [
                A.OpticalDistortion(p=0.4),
                A.GridDistortion(p=0.2),
                A.IAAPiecewiseAffine(p=0.4),
            ],
            p=0.5,
        ),
        A.RandomBrightnessContrast(brightness_limit=(-0.3, 0.3),
                                   contrast_limit=(-0.1, 0.1),
                                   p=0.5),
        A.Normalize(
            mean=[0.56019358, 0.52410121, 0.501457],
            std=[0.23318603, 0.24300033, 0.24567522],
            max_pixel_value=255.0,
            p=1.0,
        ),
        ToTensorV2(p=1.0),
    ])
示例#16
0
def dataAugumentation2(phase,
                       mean=(0.485, 0.456, 0.406),
                       std=(0.229, 0.224, 0.225)):
    trans = []
    if phase == "train":
        trans.extend([
            albu.HorizontalFlip(p=0.5),  # only horizontal flip as of now
            albu.VerticalFlip(p=0.5),
            albu.RandomCrop(256, 256, always_apply=True),
            albu.IAAAdditiveGaussianNoise(p=0.2),
            albu.OneOf(
                [
                    albu.RandomContrast(p=1),
                    albu.HueSaturationValue(p=1),
                ],
                p=0.5,
            )
        ])
    trans.extend([
        albu.Normalize(mean=mean, std=std, p=1),
    ])
    all_trans = albu.Compose(trans)
    return all_trans
示例#17
0
def choco_transform(p):
    return albu.Compose([
        # Flips
        albu.HorizontalFlip(p=0.5),
        albu.VerticalFlip(p=0.5),
        # Shift, scale, rotate
        albu.ShiftScaleRotate(rotate_limit=0, 
                         scale_limit=0.15,  
                         border_mode=cv2.BORDER_CONSTANT, 
                         value=[255, 255, 255],
                         p=0.5),
        # Noise
        albu.IAAAdditiveGaussianNoise(p=0.2),
        # Color
        albu.Solarize(p=0.2),
        albu.ToGray(p=0.2),
        # Contrast, brightness
        albu.OneOf([
                albu.RandomGamma(p=1),
                albu.RandomContrast(p=1),
                albu.RandomBrightness(p=1)
            ], p=0.5)
    ], p=p, additional_targets={'image{}'.format(_) : 'image' for _ in range(1, 101)})
示例#18
0
def get_hard_train_transforms(mean, std):
    transforms = [
        albu.OneOf([
            albu.RandomSizedCrop(min_max_height=(200, 256),
                                 height=256,
                                 width=1600,
                                 w2h_ratio=1600 / 256,
                                 p=0.5),
            ShiftScaleRotate(shift_limit=0.25,
                             scale_limit=0.25,
                             rotate_limit=90,
                             border_mode=cv.BORDER_CONSTANT,
                             value=0,
                             mask_value=0),
        ]),
        albu.RandomBrightnessContrast(),
        albu.RandomGamma(),
        Flip(),
        Normalize(mean=mean, std=std),
        ToTensor()
    ]

    return Compose(transforms)
示例#19
0
def get_train_transforms(input_size=256):
    return A.Compose(
        [
            A.RandomCrop(input_size, input_size),
            A.HorizontalFlip(),
            A.VerticalFlip(),
            A.OneOf(
                [
                    A.HueSaturationValue(
                        hue_shift_limit=0.2,
                        sat_shift_limit=0.2,
                        val_shift_limit=0.2,
                        p=0.9,
                    ),
                    A.RandomBrightnessContrast(brightness_limit=0.2, contrast_limit=0.15, p=0.9),
                ],
                p=0.9,
            ),
            A.ToFloat(255),
            ToTensorV2(),
        ],
        additional_targets={"image1": "image"},
    )
def get_light_augmentations(image_size):
    return A.Compose([
        A.ShiftScaleRotate(shift_limit=0.05,
                           scale_limit=0.1,
                           rotate_limit=15,
                           border_mode=cv2.BORDER_CONSTANT,
                           value=0),
        A.RandomSizedCrop(min_max_height=(int(image_size[0] * 0.85),
                                          image_size[0]),
                          height=image_size[0],
                          width=image_size[1],
                          p=0.3),
        # Brightness/contrast augmentations
        A.OneOf([
            A.RandomBrightnessContrast(brightness_limit=0.25,
                                       contrast_limit=0.2),
            IndependentRandomBrightnessContrast(brightness_limit=0.1,
                                                contrast_limit=0.1),
            A.RandomGamma(gamma_limit=(75, 125)),
            A.NoOp()
        ]),
        A.HorizontalFlip(p=0.5),
    ])
示例#21
0
 def get_train_transforms() -> A.Compose:
     return A.Compose(
         [
             A.RandomSizedCrop(
                 min_max_height=(800, 800), height=1024, width=1024, p=0.5
             ),
             A.OneOf(
                 [
                     A.HueSaturationValue(
                         hue_shift_limit=0.2,
                         sat_shift_limit=0.2,
                         val_shift_limit=0.2,
                         p=0.9,
                     ),
                     A.RandomBrightnessContrast(
                         brightness_limit=0.2, contrast_limit=0.2, p=0.9
                     ),
                 ],
                 p=0.9,
             ),
             A.ToGray(p=0.01),
             A.HorizontalFlip(p=0.5),
             A.VerticalFlip(p=0.5),
             A.Resize(height=512, width=512, p=1),
             A.Cutout(
                 num_holes=8, max_h_size=64, max_w_size=64, fill_value=0, p=0.5
             ),
             ToTensorV2(p=1.0),
         ],
         p=1.0,
         bbox_params=A.BboxParams(
             format="pascal_voc",
             min_area=0,
             min_visibility=0,
             label_fields=["labels"],
         ),
     )
def make_augmentation(output_size, is_validation):
    aug = None
    if is_validation:
        aug = get_aug([
            A.Resize(width=output_size[0],
                     height=output_size[1],
                     always_apply=True),
            A.Normalize(),
        ],
                      min_visibility=0.1)
    else:
        aug = get_aug([
            A.RGBShift(p=0.1),
            A.OneOf([
                A.RandomBrightnessContrast(p=0.5),
                A.HueSaturationValue(),
                A.RandomGamma(p=0.25),
                A.RandomBrightness(p=0.25),
                A.Blur(blur_limit=2, p=0.25),
            ],
                    p=0.01),
            A.HorizontalFlip(p=0.5),
            A.VerticalFlip(p=0.05),
            A.ShiftScaleRotate(shift_limit=0.1,
                               scale_limit=0.05,
                               rotate_limit=15,
                               border_mode=0,
                               p=0.2,
                               value=(144.75479165, 137.70713403, 129.666091),
                               mask_value=0.0),
            A.Resize(width=output_size[0],
                     height=output_size[1],
                     always_apply=True),
            A.Normalize(),
        ],
                      min_visibility=0.1)
    return aug
def light_post_image_transform():
    return A.OneOf([
        A.NoOp(),
        A.Compose(
            [
                A.PadIfNeeded(1024 + 10,
                              1024 + 10,
                              border_mode=cv2.BORDER_CONSTANT,
                              value=0,
                              mask_value=0),
                A.RandomSizedCrop((1024 - 5, 1024 + 5), 1024, 1024),
            ],
            p=0.2,
        ),
        A.ShiftScaleRotate(
            shift_limit=0.02,
            rotate_limit=3,
            scale_limit=0.02,
            border_mode=cv2.BORDER_CONSTANT,
            mask_value=0,
            value=0,
            p=0.2,
        ),
    ])
示例#24
0
def get_transforms(image_size):
    normMean_160 = [0.59610313, 0.45660403, 0.39085752]
    normStd_160 = [0.25930294, 0.23150486, 0.22701606]

    normMean_224 = [0.59610313, 0.45660552, 0.3908535]
    normStd_224 = [0.25930434, 0.23150496, 0.22701606]

    transforms_train = albumentations.Compose([
        albumentations.HorizontalFlip(p=0.5),
        albumentations.RandomBrightnessContrast(0.2, p=0.2),
        albumentations.OneOf([
            albumentations.MotionBlur(blur_limit=5),
            albumentations.MedianBlur(blur_limit=5),
            albumentations.GaussianBlur(blur_limit=5),
            albumentations.GaussNoise(var_limit=(5.0, 15.0)),
        ],
                             p=0.5),
        albumentations.ShiftScaleRotate(shift_limit=0.1,
                                        scale_limit=0.1,
                                        rotate_limit=10,
                                        border_mode=0,
                                        p=0.5),
        albumentations.Resize(image_size, image_size),
        albumentations.CoarseDropout(max_height=int(image_size * 0.15),
                                     max_width=int(image_size * 0.15),
                                     max_holes=6,
                                     p=0.7),
        albumentations.Normalize(std=normStd_160, mean=normMean_160)
    ])

    transforms_test = albumentations.Compose([
        albumentations.Resize(image_size, image_size),
        albumentations.Normalize(std=normStd_160, mean=normStd_160)
    ])

    return transforms_train, transforms_test
示例#25
0
def get_transform(train):
    if train:
        transform = A.Compose([
            A.RandomSizedBBoxSafeCrop(width=525, height=600),
            A.HorizontalFlip(p=0.5),
            A.OneOf([
                A.HueSaturationValue(hue_shift_limit=10,
                                     sat_shift_limit=30,
                                     val_shift_limit=20,
                                     p=1),
                A.RandomBrightnessContrast(
                    brightness_limit=0.2, contrast_limit=0.2, p=1),
            ],
                    p=1),
            A.Normalize(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225)),
            A.Resize(height=512, width=512, p=1.0),
            ToTensorV2()
        ],
                              bbox_params=A.BboxParams(
                                  format='pascal_voc',
                                  min_area=0,
                                  min_visibility=0,
                                  label_fields=['class_labels']))
        return transform
    else:
        transform = A.Compose([
            A.Normalize(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225)),
            A.Resize(height=512, width=512, p=1.0),
            ToTensorV2()
        ],
                              bbox_params=A.BboxParams(
                                  format='pascal_voc',
                                  min_area=0,
                                  min_visibility=0,
                                  label_fields=['class_labels']))
        return transform
示例#26
0
def get_train_transforms():
    return A.Compose([
        A.RandomSizedCrop(
            min_max_height=(500, 500), height=1024, width=1024, p=0.35),
        A.OneOf([
            A.HueSaturationValue(hue_shift_limit=0.2,
                                 sat_shift_limit=0.2,
                                 val_shift_limit=0.2,
                                 p=0.9),
            A.RandomBrightnessContrast(
                brightness_limit=0.2, contrast_limit=0.2, p=0.9),
        ],
                p=0.9),
        A.ToGray(p=0.01),
        A.HorizontalFlip(p=0.5),
        A.VerticalFlip(p=0.5),
        A.Resize(height=512, width=512, p=1),
        ToTensorV2(p=1.0),
    ],
                     p=1.0,
                     bbox_params=A.BboxParams(format='pascal_voc',
                                              min_area=0,
                                              min_visibility=0,
                                              label_fields=['labels']))
示例#27
0
 def __random_transform(self, img):
     composition = albu.Compose([
         albu.OneOf([
             albu.ShiftScaleRotate(rotate_limit=8,
                                   scale_limit=0.16,
                                   shift_limit=0,
                                   border_mode=0,
                                   value=0,
                                   p=0.5),
             albu.CoarseDropout(max_holes=16,
                                max_height=DIM // 10,
                                max_width=DIM // 10,
                                fill_value=0,
                                p=0.5)
         ],
                    p=0.5),
         albu.ShiftScaleRotate(rotate_limit=0,
                               scale_limit=0.,
                               shift_limit=0.12,
                               border_mode=0,
                               value=0,
                               p=0.5)
     ])
     return composition(image=img)['image']
def get_augmentation_transforms(add_non_spatial=True):
    transforms = [
        A.VerticalFlip(p=0.5),
        A.HorizontalFlip(p=0.5),
        A.RandomRotate90(p=0.5),
        A.OneOf([
            A.ElasticTransform(
                alpha=120, sigma=120 * 0.05, alpha_affine=120 * 0.03, p=0.5),
            A.GridDistortion(p=0.5),
            A.OpticalDistortion(distort_limit=2, shift_limit=0.5, p=1)
        ],
                p=0.8),
    ]

    if add_non_spatial:
        transforms.extend([
            A.CLAHE(p=0.8),
            A.RandomBrightnessContrast(p=0.8),
            A.RandomGamma(p=0.8)
        ])

    aug_transform = A.Compose(transforms)

    return aug_transform
示例#29
0
def resize_transforms(image_size=224):
    BORDER_CONSTANT = 0
    pre_size = int(image_size * 1.5)

    random_crop = albu.Compose(
        [
            albu.SmallestMaxSize(pre_size, p=1),
            albu.RandomCrop(image_size, image_size, p=1),
        ]
    )

    rescale = albu.Compose([albu.Resize(image_size, image_size, p=1)])

    random_crop_big = albu.Compose(
        [
            albu.LongestMaxSize(pre_size, p=1),
            albu.RandomCrop(image_size, image_size, p=1),
        ]
    )

    # Converts the image to a square of size image_size x image_size
    result = [albu.OneOf([random_crop, rescale, random_crop_big], p=1)]

    return result
    def __init__(self, img_root_path='', ann_file=None, training_flag=True, shuffle=True):

        self.color_augmentor = ColorDistort()

        self.training_flag = training_flag

        self.lst = self.parse_file(img_root_path, ann_file)

        self.shuffle = shuffle

        self.train_trans = A.Compose([
                                      A.RandomBrightnessContrast(p=0.75, brightness_limit=0.1, contrast_limit=0.2),

                                      A.CLAHE(clip_limit=4.0, p=0.7),
                                      A.HueSaturationValue(hue_shift_limit=10, sat_shift_limit=20,
                                                           val_shift_limit=10, p=0.5),

                                      A.OneOf([
                                          A.MotionBlur(blur_limit=5),
                                          A.MedianBlur(blur_limit=5),
                                          A.GaussianBlur(blur_limit=5),
                                          A.GaussNoise(var_limit=(5.0, 30.0)),
                                      ], p=0.7)
                                      ])