예제 #1
0
def test_grid_dropout_mask(image):
    mask = np.ones([256, 320], dtype=np.uint8)
    aug = A.GridDropout(p=1, mask_fill_value=0)
    result = aug(image=image, mask=mask)
    # with mask on ones and fill_value = 0 the sum of pixels is smaller
    assert result["image"].sum() < image.sum()
    assert result["image"].shape == image.shape
    assert result["mask"].sum() < mask.sum()
    assert result["mask"].shape == mask.shape

    # with mask of zeros and fill_value = 0 mask should not change
    mask = np.zeros([256, 320], dtype=np.uint8)
    aug = A.GridDropout(p=1, mask_fill_value=0)
    result = aug(image=image, mask=mask)
    assert result["image"].sum() < image.sum()
    assert np.all(result["mask"] == 0)

    # with mask mask_fill_value=100, mask sum is larger
    mask = np.random.randint(0, 10, [256, 320], np.uint8)
    aug = A.GridDropout(p=1, mask_fill_value=100)
    result = aug(image=image, mask=mask)
    assert result["image"].sum() < image.sum()
    assert result["mask"].sum() > mask.sum()

    # with mask mask_fill_value=None, mask is not changed
    mask = np.ones([256, 320], dtype=np.uint8)
    aug = A.GridDropout(p=1, mask_fill_value=None)
    result = aug(image=image, mask=mask)
    assert result["image"].sum() < image.sum()
    assert result["mask"].sum() == mask.sum()
예제 #2
0
def get_transforms(*, data_type):
    if data_type == "light_train":
        return Compose([
            Resize(CFG.size, CFG.size),
            Transpose(p=0.5),
            HorizontalFlip(p=0.5),
            VerticalFlip(p=0.5),
            ShiftScaleRotate(scale_limit=(0, 0), p=0.5),
            ToTensorV2(),
        ])

    if data_type == "train":
        return Compose([
            Resize(CFG.size, CFG.size),
            Transpose(p=0.5),
            HorizontalFlip(p=0.5),
            VerticalFlip(p=0.5),
            albumentations.OneOf([
                albumentations.ElasticTransform(
                    alpha=1, sigma=20, alpha_affine=10),
                albumentations.GridDistortion(num_steps=6, distort_limit=0.1),
                albumentations.OpticalDistortion(distort_limit=0.05,
                                                 shift_limit=0.05),
            ],
                                 p=0.2),
            albumentations.core.composition.PerChannel(albumentations.OneOf([
                albumentations.MotionBlur(p=.05),
                albumentations.MedianBlur(blur_limit=3, p=.05),
                albumentations.Blur(blur_limit=3, p=.05),
            ]),
                                                       p=1.0),
            albumentations.OneOf([
                albumentations.CoarseDropout(max_holes=16,
                                             max_height=CFG.size // 16,
                                             max_width=CFG.size // 16,
                                             fill_value=0,
                                             p=0.5),
                albumentations.GridDropout(ratio=0.09, p=0.5),
                albumentations.Cutout(num_holes=8,
                                      max_h_size=CFG.size // 16,
                                      max_w_size=CFG.size // 16,
                                      p=0.2),
            ],
                                 p=0.5),
            albumentations.ShiftScaleRotate(
                shift_limit=0.0625, scale_limit=0.2, rotate_limit=45, p=0.5),
            ToTensorV2(),
        ],
                       additional_targets={
                           'r': 'image',
                           'g': 'image',
                           'b': 'image',
                           'y': 'image',
                       })

    elif data_type == 'valid':
        return Compose([
            Resize(CFG.size, CFG.size),
            ToTensorV2(),
        ])
예제 #3
0
파일: transforms.py 프로젝트: srdsam/A4C3D
    def get_transforms(transcfg):
        img_height, img_width = transcfg['img_size']

        transforms = []

        if transcfg.get("grid_dropout", False):
            chance, apply_to_mask = transcfg.get("grid_dropout")
            if not apply_to_mask:
                apply_to_mask = None  # None is correct parameter rather than False
            transforms.append(A.GridDropout(ratio=chance,
                                            unit_size_min=10,
                                            unit_size_max=50,
                                            random_offset=True,
                                            fill_value=0,
                                            mask_fill_value=apply_to_mask))

        if transcfg.get("randomresizedcrop", False):
            scale = transcfg.get("randomresizedcrop")
            transforms.append(A.RandomResizedCrop(height=img_height, width=img_width, scale=scale, ratio=(0.8, 1.2), p=1))
        elif img_height != input_height or img_width != input_width:
            transforms.append(A.Resize(height=img_height, width=img_width))

        if transcfg.get("shiftscalerotate", False):
            transforms.append(A.ShiftScaleRotate(rotate_limit=(-10, 10), border_mode=cv2.BORDER_CONSTANT, value=0, p=0.5))

        if transcfg.get("normalize", True):
            mean, std = cfg['data']['3d']['mean'], cfg['data']['3d']['std']
            transforms.append(A.Normalize(mean=mean, std=std))

        return A.Compose(transforms)
예제 #4
0
def get_transform(
        target_size=256,
        transform_list='horizontal_flip', # random_crop | keep_aspect
        # augment_ratio=0.5,
        is_train=True,
        ):
    transform = list()
    transform_list = transform_list.split(', ')
    # augments = list()

    
    for transform_name in transform_list:
        # default resize
        transform.append(albumentations.Resize(height=target_size, width=target_size,p=1))

        if transform_name == 'random_crop':
            # scale = (0.6, 1.0) if is_train else (0.8, 1.0)
            transform.append(albumentations.RandomResizedCrop(height=target_size, width=target_size,p=1))
        # elif transform_name == 'resize':
        #     transform.append(Resize(target_size))
        elif transform_name == 'horizontal_flip':
            transform.append(albumentations.HorizontalFlip(p=0.5))
        elif transform_name == 'vertical_flip':
            transform.append(albumentations.VerticalFlip(p=0.5))
        elif transform_name == 'griddropout':
            transform.append(albumentations.GridDropout())


    # transform.append(RandomApply(augments, p=augment_ratio))   
    transform.append(ToTensorV2())
    transform.append(albumentations.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]))
    return albumentations.Compose(transform)
예제 #5
0
 def __init__(self):
     self.policy = A.Compose([
         A.OneOf([
             A.Rotate(180),
             A.Flip(),
         ], p=0.3),
         A.ShiftScaleRotate(shift_limit=0.2, scale_limit=0.5, rotate_limit=0, p=0.2),
         A.OneOf([
             A.CoarseDropout(max_holes=16, max_height=16, max_width=16, p=0.3),
             A.GridDropout(ratio=0.3, p=0.3),
         ]),
         A.OneOf([
             A.ElasticTransform(sigma=10, alpha_affine=25, p=0.3),
             A.RandomFog(fog_coef_lower=0.2, fog_coef_upper=0.7, p=0.2),
         ], p=0.2),
         A.OneOf([
             A.IAAAdditiveGaussianNoise(),
             A.GaussNoise(),
             A.ISONoise()
         ], p=0.2),
         A.OneOf([
             A.MotionBlur(p=.3),
             A.MedianBlur(blur_limit=5, p=0.3),
             A.Blur(blur_limit=5, p=0.3),
             A.GaussianBlur(p=0.3)
         ], p=0.2),
         A.OneOf([
             A.ChannelShuffle(p=.3),
             A.HueSaturationValue(p=0.3),
             A.ToGray(p=0.3),
             A.ChannelDropout(p=0.3),
             A.InvertImg(p=0.1)
         ], p=0.2),
         A.OneOf([
             A.OpticalDistortion(p=0.3),
             A.GridDistortion(p=.2),
             A.IAAPiecewiseAffine(p=0.3),
         ], p=0.2),
         A.OneOf([
             A.CLAHE(clip_limit=2),
             A.IAASharpen(),
             A.IAAEmboss(),
         ], p=0.2),
         A.RandomBrightnessContrast(brightness_limit=0.3, contrast_limit=0.3, p=0.3),
         A.Solarize(p=0.2),
     ])
예제 #6
0
def test_grid_dropout_params(ratio, holes_number_x, holes_number_y,
                             unit_size_min, unit_size_max, shift_x, shift_y):
    img = np.random.randint(0, 256, [256, 320], np.uint8)

    aug = A.GridDropout(
        ratio=ratio,
        unit_size_min=unit_size_min,
        unit_size_max=unit_size_max,
        holes_number_x=holes_number_x,
        holes_number_y=holes_number_y,
        shift_x=shift_x,
        shift_y=shift_y,
        random_offset=False,
        fill_value=0,
        p=1,
    )
    result = aug(image=img)["image"]
    # with fill_value = 0 the sum of pixels is smaller
    assert result.sum() < img.sum()
    assert result.shape == img.shape
    params = aug.get_params_dependent_on_targets({"image": img})
    holes = params["holes"]
    assert len(holes[0]) == 4
    # check grid offsets
    if shift_x:
        assert holes[0][0] == shift_x
    else:
        assert holes[0][0] == 0
    if shift_y:
        assert holes[0][1] == shift_y
    else:
        assert holes[0][1] == 0
    # for grid set with limits
    if unit_size_min and unit_size_max:
        assert max(
            1, unit_size_min * ratio) <= (holes[0][2] - holes[0][0]) <= min(
                max(1, unit_size_max * ratio), 256)
    elif holes_number_x and holes_number_y:
        assert (holes[0][2] - holes[0][0]) == max(
            1, int(ratio * 320 // holes_number_x))
        assert (holes[0][3] - holes[0][1]) == max(
            1, int(ratio * 256 // holes_number_y))
예제 #7
0
def hard_transforms():
    result = [
        #albu.JpegCompression(quality_lower=20, quality_upper=40, p=.1),
        albu.ShiftScaleRotate(p=.5,
                              scale_limit=(-.2, .2),
                              rotate_limit=5,
                              shift_limit=(-.01, .01),
                              border_mode=0),
        albu.GridDistortion(p=.5, border_mode=0, distort_limit=.25),

        #albu.ElasticTransform(p=.1, alpha=70, sigma=9, alpha_affine=7),
        # albu.OneOf([
        #albu.CoarseDropout(max_holes=8, max_height=32, max_width=16, min_holes=4, min_height=2, min_width=2, fill_value=0, p=.1),
        albu.GridDropout(ratio=.5, p=.5),
        #], p=.3),
        # albu.RandomBrightnessContrast(brightness_limit=0.2, contrast_limit=0.2, p=0.3),
        # #albu.CenterCrop(112, 112, .3),
        albu.Blur(blur_limit=3, p=.3),
    ]

    return result
예제 #8
0
    def __init__(self, size=512, mode='train'):
        assert mode in ['train', 'val', 'test']

        if mode == 'train':
            self.transform = A.Compose([
                A.HorizontalFlip(p=0.5),
                A.ShiftScaleRotate(
                    p=0.5,
                    shift_limit=0.0625,
                    scale_limit=0.1,
                    rotate_limit=10,
                    interpolation=1,
                    border_mode=4,
                ),
                A.OneOf([
                    A.Cutout(
                        p=1.0,
                        num_holes=6,
                        max_h_size=32,
                        max_w_size=32,
                    ),
                    A.GridDropout(p=1.0,
                                  ratio=0.5,
                                  unit_size_min=64,
                                  unit_size_max=128,
                                  random_offset=True),
                ],
                        p=0.5),
                A.Resize(size, size, p=1.0),
            ])
        elif mode == 'val':
            self.transform = A.Compose([
                A.Resize(size, size, p=1.0),
            ])
        elif mode == 'test':
            self.transform = A.Compose([
                A.Resize(size, size, p=1.0),
            ])
예제 #9
0
 def __init__(self, p=0.1, drop_ratio=0.4):
     self.p = p
     self.drop_ratio = drop_ratio
     self.aug = A.GridDropout(p=self.p, ratio=self.drop_ratio)
예제 #10
0
    def __init__(self,
                 train_list,
                 test_list,
                 database_root,
                 store_memory=True,
                 data_aug=False,
                 test_aug=False):
        """Initialize the Dataset object
        Args:
        train_list: TXT file or list with the paths of the images to use for training (Images must be between 0 and 255)
        test_list: TXT file or list with the paths of the images to use for testing (Images must be between 0 and 255)
        database_root: Path to the root of the Database
        store_memory: True stores all the training images, False loads at runtime the images
        data_aug: decide if data augmentation on training images will be executed
        test_aug: decide if data augmentation on testing images will be executed
        Returns:
        """

        self.test_aug_num = 1
        if not store_memory and data_aug:
            sys.stderr.write(
                'Online data augmentation not supported when the data is not stored in memory!'
            )
            sys.exit()

        # Load training images (path) and labels
        print('Started loading files...')
        if not isinstance(train_list, list) and train_list is not None:
            with open(train_list) as t:
                train_paths = t.readlines()
        elif isinstance(train_list, list):
            train_paths = train_list
        else:
            train_paths = []
        if not isinstance(test_list, list) and test_list is not None:
            with open(test_list) as t:
                test_paths = t.readlines()
        elif isinstance(test_list, list):
            test_paths = test_list
        else:
            test_paths = []
        self.images_train = []
        self.images_train_path = []
        self.labels_train = []
        self.labels_train_path = []
        for idx, line in enumerate(train_paths):
            if store_memory:
                img = imageio.imread(
                    os.path.join(database_root, str(line.split()[0])))
                label = imageio.imread(
                    os.path.join(database_root, str(line.split()[1])))
                if data_aug:
                    if idx == 0:
                        sys.stdout.write('Performing the data augmentation')
                    # fig, ax = plt.subplots(4, 6, figsize=(21, 7))
                    # for i in range(60):
                    for i in range(6):
                        rand = np.random.uniform(0.6, 1.3, 1)
                        aug_pipeline_scale_hflip = A.Compose([
                            A.HorizontalFlip(p=0.5),
                            A.Resize(int(img.shape[0] * rand),
                                     int(img.shape[1] * rand),
                                     p=1),
                            A.RandomBrightnessContrast(brightness_limit=0.2,
                                                       contrast_limit=0.2,
                                                       p=0.8),
                            A.GaussNoise(p=0.2),
                            A.GridDropout(
                                mask_fill_value=0, random_offset=True, p=0.03)
                        ],
                                                             p=1.0)
                        augmented = aug_pipeline_scale_hflip(image=img,
                                                             mask=label)
                        self.images_train.append(
                            np.array(augmented['image'], dtype=np.uint8))
                        self.labels_train.append(
                            np.array(augmented['mask'], dtype=np.uint8))

                    # original image
                    self.images_train.append(np.array(img, dtype=np.uint8))
                    self.labels_train.append(np.array(label, dtype=np.uint8))

                # no data augmentation
                else:
                    if idx == 0:
                        sys.stdout.write('Loading the data')
                    self.images_train.append(np.array(img, dtype=np.uint8))
                    self.labels_train.append(np.array(label, dtype=np.uint8))
                if (idx + 1) % 50 == 0:
                    sys.stdout.write('.')
            self.images_train_path.append(
                os.path.join(database_root, str(line.split()[0])))
            self.labels_train_path.append(
                os.path.join(database_root, str(line.split()[1])))
        sys.stdout.write('\n')
        self.images_train_path = np.array(self.images_train_path)
        self.labels_train_path = np.array(self.labels_train_path)

        # Load testing images (path) and labels
        self.images_test = []
        self.images_test_path = []
        for idx, line in enumerate(test_paths):
            if store_memory:
                img = imageio.imread(
                    os.path.join(database_root, str(line.split()[0])))
                self.images_test.append(np.array(img, dtype=np.uint8))
                self.images_test_path.append(
                    os.path.join(database_root, str(line.split()[0])))
                if test_aug:
                    for _ in range(1):
                        aug_pipeline_bcnoise = A.Compose(
                            [A.HorizontalFlip(p=1.0)], p=1.0)
                        augmented_2 = aug_pipeline_bcnoise(image=img)
                        self.images_test.append(
                            np.array(augmented_2['image'], dtype=np.uint8))
                        self.images_test_path.append(
                            os.path.join(database_root, str(line.split()[0])))
                if idx == 0:
                    self.test_aug_num = len(self.images_test)
                if (idx + 1) // 1000 == 0:
                    # print('Loaded ' + str(idx) + ' test images')
                    pass
        print('Done initializing Dataset')

        self.train_ptr = 0
        self.test_ptr = 0
        self.train_size = max(len(self.images_train_path),
                              len(self.images_train))
        self.test_size = max(len(self.images_test_path), len(self.images_test))
        self.train_idx = np.arange(self.train_size)
        np.random.shuffle(self.train_idx)
        self.store_memory = store_memory
    def __init__(self, folds, img_height, img_width, mean, std):
        df = pd.read_csv('../input/dataset/train_folds.csv')
        df = df[['Image', 'label', 'kfold']]

        df = df[df.kfold.isin(folds)].reset_index(drop=True)

        self.image_ids = df.Image.values

        self.labels = df.label.values

        if len(folds) == 1:
            self.aug = albumentations.Compose([
                albumentations.Resize(img_height, img_width),
                albumentations.Normalize(mean, std, always_apply=True)
            ])
        else:
            self.aug = albumentations.Compose([
                albumentations.Resize(img_height, img_width),
                albumentations.OneOf([
                    albumentations.ShiftScaleRotate(shift_limit=0.0625,
                                                    scale_limit=0.1,
                                                    rotate_limit=45),
                    albumentations.Rotate(limit=5),
                    albumentations.RandomGamma(),
                    albumentations.RandomShadow(),
                    albumentations.RandomGridShuffle(),
                    albumentations.ElasticTransform(),
                    albumentations.RGBShift(),
                ]),
                albumentations.OneOf([
                    albumentations.OneOf([
                        albumentations.Blur(),
                        albumentations.MedianBlur(),
                        albumentations.MotionBlur(),
                        albumentations.GaussianBlur(),
                    ]),
                    albumentations.OneOf([
                        albumentations.GaussNoise(),
                        albumentations.IAAAdditiveGaussianNoise(),
                        albumentations.ISONoise()
                    ]),
                ]),
                albumentations.OneOf([
                    albumentations.RandomBrightness(),
                    albumentations.RandomContrast(),
                    albumentations.RandomBrightnessContrast(),
                ]),
                albumentations.OneOf([
                    albumentations.OneOf([
                        albumentations.Cutout(),
                        albumentations.CoarseDropout(),
                        albumentations.GridDistortion(),
                        albumentations.GridDropout(),
                        albumentations.OpticalDistortion()
                    ]),
                    albumentations.OneOf([
                        albumentations.HorizontalFlip(),
                        albumentations.VerticalFlip(),
                        albumentations.RandomRotate90(),
                        albumentations.Transpose()
                    ]),
                ]),

                # albumentations.OneOf([
                #         albumentations.RandomSnow(),
                #         albumentations.RandomRain(),
                #         albumentations.RandomFog(),
                #     ]),
                albumentations.Normalize(mean, std, always_apply=True)
            ])
def get_train_transforms_atopy(input_size,
                               use_crop=False,
                               use_no_color_aug=False):
    if use_crop:
        resize = [
            al.Resize(int(input_size * 1.2), int(input_size * 1.2)),
            al.RandomSizedCrop(min_max_height=(int(input_size * 0.6),
                                               int(input_size * 1.2)),
                               height=input_size,
                               width=input_size)
        ]
    else:
        resize = [al.Resize(input_size, input_size)]
    return al.Compose(resize + [
        al.Flip(p=0.5),
        al.OneOf([
            al.RandomRotate90(),
            al.Rotate(limit=180),
        ], p=0.5),
        al.OneOf([
            al.ShiftScaleRotate(),
            al.OpticalDistortion(),
            al.GridDistortion(),
            al.ElasticTransform(),
        ],
                 p=0.3),
        al.RandomGridShuffle(p=0.05),
        al.OneOf([
            al.RandomGamma(),
            al.HueSaturationValue(),
            al.RGBShift(),
            al.CLAHE(),
            al.ChannelShuffle(),
            al.InvertImg(),
        ],
                 p=0.1),
        al.RandomSnow(p=0.05),
        al.RandomRain(p=0.05),
        al.RandomFog(p=0.05),
        al.RandomSunFlare(p=0.05),
        al.RandomShadow(p=0.05),
        al.RandomBrightnessContrast(p=0.05),
        al.GaussNoise(p=0.2),
        al.ISONoise(p=0.2),
        al.MultiplicativeNoise(p=0.2),
        al.ToGray(p=0.05),
        al.ToSepia(p=0.05),
        al.Solarize(p=0.05),
        al.Equalize(p=0.05),
        al.Posterize(p=0.05),
        al.FancyPCA(p=0.05),
        al.OneOf([
            al.MotionBlur(blur_limit=3),
            al.Blur(blur_limit=3),
            al.MedianBlur(blur_limit=3),
            al.GaussianBlur(blur_limit=3),
        ],
                 p=0.05),
        al.CoarseDropout(p=0.05),
        al.Cutout(p=0.05),
        al.GridDropout(p=0.05),
        al.ChannelDropout(p=0.05),
        al.Downscale(p=0.1),
        al.ImageCompression(quality_lower=60, p=0.2),
        al.Normalize(),
        ToTensorV2()
    ])
def get_train_transforms_mmdetection(input_size,
                                     use_crop=False,
                                     use_no_color_aug=False,
                                     use_center_crop=False,
                                     center_crop_ratio=0.9,
                                     use_gray=False):
    if isinstance(input_size, int):
        input_size = (input_size[0], input_size[1])
    return al.Compose([
        al.RandomResizedCrop(height=input_size[0],
                             width=input_size[1],
                             scale=(0.4, 1.0),
                             interpolation=0,
                             p=0.5),
        al.Resize(input_size[0], input_size[1], p=1.0),
        al.HorizontalFlip(p=0.5),
        al.OneOf([
            al.ShiftScaleRotate(border_mode=0,
                                shift_limit=(-0.2, 0.2),
                                scale_limit=(-0.2, 0.2),
                                rotate_limit=(-20, 20)),
            al.OpticalDistortion(border_mode=0,
                                 distort_limit=[-0.5, 0.5],
                                 shift_limit=[-0.5, 0.5]),
            al.GridDistortion(
                num_steps=5, distort_limit=[-0., 0.3], border_mode=0),
            al.ElasticTransform(border_mode=0),
            al.IAAPerspective(),
            al.RandomGridShuffle()
        ],
                 p=0.1),
        al.Rotate(limit=(-25, 25), border_mode=0, p=0.1),
        al.OneOf([
            al.RandomBrightnessContrast(brightness_limit=(-0.2, 0.2),
                                        contrast_limit=(-0.2, 0.2)),
            al.HueSaturationValue(hue_shift_limit=(-20, 20),
                                  sat_shift_limit=(-30, 30),
                                  val_shift_limit=(-20, 20)),
            al.RandomGamma(gamma_limit=(30, 150)),
            al.RGBShift(),
            al.CLAHE(clip_limit=(1, 15)),
            al.ChannelShuffle(),
            al.InvertImg(),
        ],
                 p=0.1),
        al.RandomSnow(p=0.05),
        al.RandomRain(p=0.05),
        al.RandomFog(p=0.05),
        al.RandomSunFlare(num_flare_circles_lower=1,
                          num_flare_circles_upper=2,
                          src_radius=110,
                          p=0.05),
        al.RandomShadow(p=0.05),
        al.GaussNoise(var_limit=(10, 20), p=0.05),
        al.ISONoise(color_shift=(0, 15), p=0.05),
        al.MultiplicativeNoise(p=0.05),
        al.OneOf([
            al.ToGray(p=1. if use_gray else 0.05),
            al.ToSepia(p=0.05),
            al.Solarize(p=0.05),
            al.Equalize(p=0.05),
            al.Posterize(p=0.05),
            al.FancyPCA(p=0.05),
        ],
                 p=0.05),
        al.OneOf([
            al.MotionBlur(blur_limit=(3, 7)),
            al.Blur(blur_limit=(3, 7)),
            al.MedianBlur(blur_limit=3),
            al.GaussianBlur(blur_limit=3),
        ],
                 p=0.05),
        al.CoarseDropout(p=0.05),
        al.Cutout(num_holes=30,
                  max_h_size=37,
                  max_w_size=37,
                  fill_value=0,
                  p=0.05),
        al.GridDropout(p=0.05),
        al.ChannelDropout(p=0.05),
        al.Downscale(scale_min=0.5, scale_max=0.9, p=0.1),
        al.ImageCompression(quality_lower=60, p=0.2),
        al.Normalize(),
        ToTensorV2()
    ])
예제 #14
0
def randAugment(N=2, M=4, p=1.0, mode="all", cut_out=False):
    """
    Examples:
        >>> # M from 0 to 20
        >>> transforms = randAugment(N=3, M=8, p=0.8, mode='all', cut_out=False)
    """
    # Magnitude(M) search space
    scale = np.linspace(0, 0.4, 20)
    translate = np.linspace(0, 0.4, 20)
    rot = np.linspace(0, 30, 20)
    shear_x = np.linspace(0, 20, 20)
    shear_y = np.linspace(0, 20, 20)
    contrast = np.linspace(0.0, 0.4, 20)
    bright = np.linspace(0.0, 0.4, 20)
    sat = np.linspace(0.0, 0.2, 20)
    hue = np.linspace(0.0, 0.2, 20)
    shar = np.linspace(0.0, 0.9, 20)
    blur = np.linspace(0, 0.2, 20)
    noise = np.linspace(0, 1, 20)
    cut = np.linspace(0, 0.6, 20)
    # Transformation search space
    Aug = [  # geometrical
        albumentations.Affine(scale=(1.0 - scale[M], 1.0 + scale[M]), p=p),
        albumentations.Affine(translate_percent=(-translate[M], translate[M]),
                              p=p),
        albumentations.Affine(rotate=(-rot[M], rot[M]), p=p),
        albumentations.Affine(shear={'x': (-shear_x[M], shear_x[M])}, p=p),
        albumentations.Affine(shear={'y': (-shear_y[M], shear_y[M])}, p=p),
        # Color Based
        albumentations.RandomContrast(limit=contrast[M], p=p),
        albumentations.RandomBrightness(limit=bright[M], p=p),
        albumentations.ColorJitter(brightness=0.0,
                                   contrast=0.0,
                                   saturation=sat[M],
                                   hue=0.0,
                                   p=p),
        albumentations.ColorJitter(brightness=0.0,
                                   contrast=0.0,
                                   saturation=0.0,
                                   hue=hue[M],
                                   p=p),
        albumentations.Sharpen(alpha=(0.1, shar[M]), lightness=(0.5, 1.0),
                               p=p),
        albumentations.core.composition.PerChannel(albumentations.OneOf([
            albumentations.MotionBlur(p=0.5),
            albumentations.MedianBlur(blur_limit=3, p=1),
            albumentations.Blur(blur_limit=3, p=1),
        ]),
                                                   p=blur[M] * p),
        albumentations.GaussNoise(var_limit=(8.0 * noise[M], 64.0 * noise[M]),
                                  per_channel=True,
                                  p=p)
    ]
    # Sampling from the Transformation search space
    if mode == "geo":
        transforms = albumentations.SomeOf(Aug[0:5], N)
    elif mode == "color":
        transforms = albumentations.SomeOf(Aug[5:], N)
    else:
        transforms = albumentations.SomeOf(Aug, N)

    if cut_out:
        cut_trans = albumentations.OneOf([
            albumentations.CoarseDropout(
                max_holes=8, max_height=16, max_width=16, fill_value=0, p=1),
            albumentations.GridDropout(ratio=cut[M], p=1),
            albumentations.Cutout(
                num_holes=8, max_h_size=16, max_w_size=16, p=1),
        ],
                                         p=cut[M])
        transforms = albumentations.Compose([transforms, cut_trans])

    return transforms
예제 #15
0
     ],
     p=0.5,
 ),
 albumentations.OneOf(
     [
         albumentations.Blur(p=0.1),
         albumentations.GaussianBlur(p=0.1),
         albumentations.MotionBlur(p=0.1),
     ],
     p=0.1,
 ),
 albumentations.OneOf(
     [
         albumentations.GaussNoise(p=0.1),
         albumentations.ISONoise(p=0.1),
         albumentations.GridDropout(ratio=0.5, p=0.2),
         albumentations.CoarseDropout(
             max_holes=16,
             min_holes=8,
             max_height=16,
             max_width=16,
             min_height=8,
             min_width=8,
             p=0.2,
         ),
     ],
     p=0.2,
 ),
 albumentations.Normalize(
     mean=[0.485, 0.456, 0.406],
     std=[0.229, 0.224, 0.225],
예제 #16
0
def get_transform_imagenet(use_albu_aug):
    if use_albu_aug:
        train_transform = al.Compose([
            # al.Flip(p=0.5),
            al.Resize(256, 256, interpolation=2),
            al.RandomResizedCrop(224,
                                 224,
                                 scale=(0.08, 1.0),
                                 ratio=(3. / 4., 4. / 3.),
                                 interpolation=2),
            al.HorizontalFlip(),
            al.OneOf(
                [
                    al.OneOf(
                        [
                            al.ShiftScaleRotate(
                                border_mode=cv2.BORDER_CONSTANT,
                                rotate_limit=30),  # , p=0.05),
                            al.OpticalDistortion(
                                border_mode=cv2.BORDER_CONSTANT,
                                distort_limit=5.0,
                                shift_limit=0.1),
                            # , p=0.05),
                            al.GridDistortion(border_mode=cv2.BORDER_CONSTANT
                                              ),  # , p=0.05),
                            al.ElasticTransform(
                                border_mode=cv2.BORDER_CONSTANT,
                                alpha_affine=15),  # , p=0.05),
                        ],
                        p=0.1),
                    al.OneOf(
                        [
                            al.RandomGamma(),  # p=0.05),
                            al.HueSaturationValue(),  # p=0.05),
                            al.RGBShift(),  # p=0.05),
                            al.CLAHE(),  # p=0.05),
                            al.ChannelShuffle(),  # p=0.05),
                            al.InvertImg(),  # p=0.05),
                        ],
                        p=0.1),
                    al.OneOf(
                        [
                            al.RandomSnow(),  # p=0.05),
                            al.RandomRain(),  # p=0.05),
                            al.RandomFog(),  # p=0.05),
                            al.RandomSunFlare(num_flare_circles_lower=1,
                                              num_flare_circles_upper=2,
                                              src_radius=110),
                            # p=0.05, ),
                            al.RandomShadow(),  # p=0.05),
                        ],
                        p=0.1),
                    al.RandomBrightnessContrast(p=0.1),
                    al.OneOf(
                        [
                            al.GaussNoise(),  # p=0.05),
                            al.ISONoise(),  # p=0.05),
                            al.MultiplicativeNoise(),  # p=0.05),
                        ],
                        p=0.1),
                    al.OneOf(
                        [
                            al.ToGray(),  # p=0.05),
                            al.ToSepia(),  # p=0.05),
                            al.Solarize(),  # p=0.05),
                            al.Equalize(),  # p=0.05),
                            al.Posterize(),  # p=0.05),
                            al.FancyPCA(),  # p=0.05),
                        ],
                        p=0.1),
                    al.OneOf(
                        [
                            # al.MotionBlur(blur_limit=1),
                            al.Blur(blur_limit=[3, 5]),
                            al.MedianBlur(blur_limit=[3, 5]),
                            al.GaussianBlur(blur_limit=[3, 5]),
                        ],
                        p=0.1),
                    al.OneOf(
                        [
                            al.CoarseDropout(),  # p=0.05),
                            al.Cutout(),  # p=0.05),
                            al.GridDropout(),  # p=0.05),
                            al.ChannelDropout(),  # p=0.05),
                            al.RandomGridShuffle(),  # p=0.05),
                        ],
                        p=0.1),
                    al.OneOf(
                        [
                            al.Downscale(),  # p=0.1),
                            al.ImageCompression(quality_lower=60),  # , p=0.1),
                        ],
                        p=0.1),
                ],
                p=0.5),
            al.Normalize(),
            ToTensorV2()
        ])
    else:
        train_transform = transforms.Compose([
            transforms.Resize(256),
            transforms.RandomResizedCrop(224),
            transforms.RandomHorizontalFlip(),
            transforms.ToTensor(),
        ])
    test_transform = transforms.Compose([
        transforms.Resize(256),
        transforms.CenterCrop(224),
        transforms.ToTensor(),
    ])

    if use_albu_aug:
        train_transform = MultiDataTransformAlbu(train_transform)
    else:
        train_transform = MultiDataTransform(train_transform)

    return train_transform, test_transform
예제 #17
0
파일: transforms.py 프로젝트: jphdotam/T1T2
            transforms.append(A.Rotate(90, p=p))
        if p := trans_cfg.get('shiftscalerotate', False):
            transforms.append(A.ShiftScaleRotate(p=p))
        if p := trans_cfg.get('elastictransform', False):
            transforms.append(A.ElasticTransform(p=p))
        if p := trans_cfg.get('griddistortion', False):
            transforms.append(A.GridDistortion(p=p))
        if p := trans_cfg.get('hflip', False):
            transforms.append(A.HorizontalFlip(p=p))
        if p := trans_cfg.get('vflip', False):
            transforms.append(A.VerticalFlip(p=p))
        if p := trans_cfg.get('brightnesscontrast', False):
            transforms.append(A.RandomBrightnessContrast(p=p))
        if p := trans_cfg.get('griddropout', False):
            transforms.append(
                A.GridDropout(fill_value=0, mask_fill_value=0, p=p))
        if p := trans_cfg.get('channeldropout', False):
            transforms.append(A.ChannelDropout(channel_drop_range=(1, 1), p=p))
        if p := trans_cfg.get('blur', False):
            transforms.append(
                A.OneOf([
                    A.MedianBlur(blur_limit=5, p=p),
                    A.Blur(blur_limit=5, p=p)
                ]))
        if p := trans_cfg.get('noise', False):
            transforms.append(
                A.OneOf([A.GaussNoise(p=p),
                         A.MultiplicativeNoise(p=p)]))
        if p := trans_cfg.get('hsv', False):
            transforms.append(A.HueSaturationValue(p=p))