Exemple #1
0
def get_training_augmentation():
    train_transform = [
        albu.HorizontalFlip(p=0.5),
        albu.ShiftScaleRotate(scale_limit=0.5,
                              rotate_limit=0,
                              shift_limit=0.1,
                              p=1,
                              border_mode=0),
        albu.PadIfNeeded(min_height=320,
                         min_width=320,
                         always_apply=True,
                         border_mode=0),
        albu.RandomCrop(height=320, width=320, always_apply=True),
        #albu.Blur(blur_limit=7, always_apply= True,p = 0.8),
        albu.IAAAdditiveGaussianNoise(p=0.2),
        albu.IAAPerspective(p=0.5),
        albu.OneOf(
            [
                albu.CLAHE(p=1),
                albu.RandomBrightness(p=1),
                albu.RandomGamma(p=1),
            ],
            p=0.9,
        ),
        albu.OneOf(
            [
                albu.IAASharpen(p=1),
                albu.Blur(blur_limit=3, p=1),
                albu.MotionBlur(blur_limit=3, p=1),
            ],
            p=0.9,
        ),
        albu.OneOf(
            [
                albu.RandomContrast(p=1),
                albu.HueSaturationValue(p=1),
            ],
            p=0.9,
        ),
    ]
    return albu.Compose(train_transform)
def get_training_augmentation(dim=512, rot_limit=45):
    train_transform = [
        A.HorizontalFlip(p=0.5),
        A.ShiftScaleRotate(scale_limit=0.5,
                           rotate_limit=rot_limit,
                           shift_limit=0.1,
                           p=1,
                           border_mode=0),
        A.PadIfNeeded(min_height=dim,
                      min_width=dim,
                      always_apply=True,
                      border_mode=0),
        A.RandomCrop(height=dim, width=dim, always_apply=True),
        A.IAAAdditiveGaussianNoise(p=0.2),
        A.IAAPerspective(p=0.5),
        A.OneOf(
            [
                A.CLAHE(p=1),
                A.RandomBrightness(p=1),
                A.RandomGamma(p=1),
            ],
            p=0.9,
        ),
        A.OneOf(
            [
                A.IAASharpen(p=1),
                A.Blur(blur_limit=3, p=1),
                A.MotionBlur(blur_limit=3, p=1),
            ],
            p=0.9,
        ),
        A.OneOf(
            [
                A.RandomContrast(p=1),
                A.HueSaturationValue(p=1),
            ],
            p=0.9,
        ),
        A.Lambda(mask=round_clip_0_1)
    ]
    return A.Compose(train_transform)
Exemple #3
0
def test_transform_pipeline_serialization(seed, image, mask):
    aug = A.Compose([
        A.OneOrOther(
            A.Compose([
                A.Resize(1024, 1024),
                A.RandomSizedCrop(min_max_height=(256, 1024),
                                  height=512,
                                  width=512,
                                  p=1),
                A.OneOf([
                    A.RandomSizedCrop(min_max_height=(256, 512),
                                      height=384,
                                      width=384,
                                      p=0.5),
                    A.RandomSizedCrop(min_max_height=(256, 512),
                                      height=512,
                                      width=512,
                                      p=0.5),
                ]),
            ]),
            A.Compose([
                A.Resize(1024, 1024),
                A.RandomSizedCrop(min_max_height=(256, 1025),
                                  height=256,
                                  width=256,
                                  p=1),
                A.OneOf([A.HueSaturationValue(p=0.5),
                         A.RGBShift(p=0.7)], p=1),
            ]),
        ),
        A.HorizontalFlip(p=1),
        A.RandomBrightnessContrast(p=0.5),
    ])
    serialized_aug = A.to_dict(aug)
    deserialized_aug = A.from_dict(serialized_aug)
    set_seed(seed)
    aug_data = aug(image=image, mask=mask)
    set_seed(seed)
    deserialized_aug_data = deserialized_aug(image=image, mask=mask)
    assert np.array_equal(aug_data["image"], deserialized_aug_data["image"])
    assert np.array_equal(aug_data["mask"], deserialized_aug_data["mask"])
def hard_augmentations():
    return A.Compose([
        A.RandomRotate90(),
        A.Transpose(),
        A.RandomGridShuffle(),
        A.ShiftScaleRotate(scale_limit=0.1,
                           rotate_limit=45,
                           border_mode=cv2.BORDER_CONSTANT,
                           mask_value=0,
                           value=0),
        A.ElasticTransform(border_mode=cv2.BORDER_CONSTANT,
                           alpha_affine=5,
                           mask_value=0,
                           value=0),
        # Add occasion blur
        A.OneOf([
            A.GaussianBlur(),
            A.GaussNoise(),
            A.IAAAdditiveGaussianNoise(),
            A.NoOp()
        ]),
        # D4 Augmentations
        A.OneOf([A.CoarseDropout(),
                 A.MaskDropout(max_objects=10),
                 A.NoOp()]),
        # Spatial-preserving augmentations:
        A.OneOf([
            A.RandomBrightnessContrast(brightness_by_max=True),
            A.CLAHE(),
            A.HueSaturationValue(),
            A.RGBShift(),
            A.RandomGamma(),
            A.NoOp(),
        ]),
        # Weather effects
        A.OneOf([
            A.RandomFog(fog_coef_lower=0.01, fog_coef_upper=0.3, p=0.1),
            A.NoOp()
        ]),
        A.Normalize(),
    ])
Exemple #5
0
def init_transforms(color_aug=False):
    mean, std = get_imagenet_mean_std()
    val_trf = A.Compose([A.Normalize(mean, std), ToTensor()], p=1)
    if color_aug:
        train_trf = A.Compose(
            [
                A.OneOf(
                    [
                        A.RandomBrightnessContrast(
                            brightness_limit=0.3, contrast_limit=0.2, p=0.7),
                        A.HueSaturationValue(hue_shift_limit=10,
                                             sat_shift_limit=30,
                                             val_shift_limit=0,
                                             p=0.7),
                        A.CLAHE(p=0.5),
                        A.ToGray(p=0.5),
                        A.ChannelShuffle(p=0.1),
                    ],
                    p=0.6,
                ),
                A.OneOf(
                    [
                        A.GaussianBlur(p=0.5),
                        A.Blur(p=0.5)  # ,
                        # A.MotionBlur(p=0.2)
                    ],
                    p=0.3,
                ),
                A.OneOf(
                    [A.GaussNoise(p=0.5),
                     A.IAAAdditiveGaussianNoise(p=0.5)],
                    p=0.1),
                A.Normalize(mean, std),
                ToTensor(),
            ],
            p=1,
        )
    else:
        train_trf = val_trf

    return train_trf, val_trf
Exemple #6
0
 def __init__(self, crop_size=800):
     self.master = albumentations.Compose([
         albumentations.RandomCrop(crop_size, crop_size),
         albumentations.RandomRotate90(p=0.5),
         albumentations.Transpose(p=0.5),
         albumentations.Flip(p=0.5),
         albumentations.OneOf([
             albumentations.RandomBrightness(),
             albumentations.RandomContrast(),
             albumentations.HueSaturationValue(),
         ],
                              p=0.5),
         albumentations.ElasticTransform(),
         albumentations.ShiftScaleRotate(shift_limit=0.1,
                                         scale_limit=0.02,
                                         rotate_limit=15,
                                         p=0.5),
         albumentations.Normalize(mean=[0.798, 0.621, 0.841],
                                  std=[0.125, 0.228, 0.089]),
     ])
     self.to_tensor = ToTensor()
def hard_transforms():
    result = [
        # Random shifts, stretches and turns with a 50% probability
        albu.ShiftScaleRotate(shift_limit=0.1,
                              scale_limit=0.1,
                              rotate_limit=15,
                              border_mode=BORDER_REFLECT,
                              p=0.5),
        albu.IAAPerspective(scale=(0.02, 0.05), p=0.3),
        # Random brightness / contrast with a 30% probability
        albu.RandomBrightnessContrast(brightness_limit=0.2,
                                      contrast_limit=0.2,
                                      p=0.3),
        # Random gamma changes with a 30% probability
        albu.RandomGamma(gamma_limit=(85, 115), p=0.3),
        # Randomly changes the hue, saturation, and color value of the input image
        albu.HueSaturationValue(p=0.3),
        albu.JpegCompression(quality_lower=80),
    ]

    return result
Exemple #8
0
def create_dg_data_transforms(args, split='train'):
    '''
        define the domain generalization transforms accoding to different parameters
        Args:
            args ([type]): contain the specific parmaters
            split (str, optinal):
                'train': to generate the domain generalization transforms for training
                'val': to generate the domain generalization transforms for validation
                'test': to generaate the domain generalization transforms for testing
    '''
    base_transform = create_base_transforms(args, split=split)
    if split == 'train':
        dg_aug_transform = alb.Compose([
            alb.HueSaturationValue(p=0.1),
        ])
        data_transform = alb.Compose([*dg_aug_transform, *base_transform])

    else:
        data_transform = base_transform

    return data_transform
def get_riadd_test_transforms(args):
    image_size = args['img_size']
    test_transforms = albumentations.Compose([
        albumentations.Resize(image_size, image_size),
        albumentations.HorizontalFlip(p=0.5),
        albumentations.HueSaturationValue(hue_shift_limit=10, sat_shift_limit=10, val_shift_limit=10, p=0.5),
        albumentations.RandomBrightnessContrast(brightness_limit=(-0.2,0.2), contrast_limit=(-0.2, 0.2), p=0.5),
        albumentations.Normalize(
            mean=[0.485, 0.456, 0.406],
            std=[0.229, 0.224, 0.225],
        ),
        ToTensorV2(),
    ])
    return test_transforms


# if __name__ == '__main__':
#     img = cv2.imread('/media/ExtDiskB/Hanson/datasets/wheat/RIADD/valid/1.png')
#     img1 = preprocessing(img)
#     # result=color_seperate(hsv_img, thresh_image)
#     cv2.imwrite('1222.png',img1)
def generate_ds(size):
    trfm = A.Compose([
        A.Resize(size, size, p=1.0),
        A.HorizontalFlip(),
        A.VerticalFlip(),
        A.RandomRotate90(),
        A.ShiftScaleRotate(shift_limit=0.0625, scale_limit=0.2, rotate_limit=20, p=0.9, 
                         border_mode=cv2.BORDER_REFLECT),
        A.OneOf([
            A.OpticalDistortion(p=0.4),
            A.GridDistortion(p=.1),
            A.IAAPiecewiseAffine(p=0.4),
        ], p=0.3),
        A.OneOf([
            A.HueSaturationValue(10,15,10),
            A.CLAHE(clip_limit=3),
            A.RandomBrightnessContrast(),            
        ], p=0.5)
    ], p=1.0)

    return HubDataset(DATA_PATH, window=WINDOW, overlap=MIN_OVERLAP, transform=trfm)
def even_more_transform(height, width, mappings, p=2 / 3):
    scale = random.randint(2, 4)
    return Compose([
        OneOf([
            JpegCompression(quality_lower=20, quality_upper=70, p=0.5),
            Downscale(scale_min=0.25, scale_max=0.50, interpolation=1, p=0.5),
            Resize(height // scale, width // scale, interpolation=1, p=1.0)
        ],
              p=0.6),
        HorizontalFlip(p=0.5),
        A.augmentations.transforms.GaussNoise(p=0.2),
        A.RandomBrightnessContrast(p=0.2),
        A.RandomGamma(p=0.2),
        A.CLAHE(p=0.2),
        A.ChannelShuffle(p=0.2),
        A.MultiplicativeNoise(multiplier=[0.5, 1.5], elementwise=True, p=0.1),
        A.HueSaturationValue(
            hue_shift_limit=10, sat_shift_limit=10, val_shift_limit=10, p=0.2),
    ],
                   p=0.9,
                   additional_targets=mappings)
Exemple #12
0
    def compose_transform_tr(self):
        composed_transforms = A.Compose([
            # A.HorizontalFlip(),
            # A.Rotate(),
            # A.RandomScale(0.2),
            A.LongestMaxSize(self.base_size, always_apply=True),
            A.PadIfNeeded(self.base_size,
                          self.base_size,
                          always_apply=True,
                          border_mode=cv2.BORDER_CONSTANT),
            # A.RandomSizedCrop((self.args.crop_size, self.args.crop_size),height=self.args.crop_size, width=self.args.crop_size),
            A.RandomBrightnessContrast(),
            A.HueSaturationValue(),
            A.FancyPCA(),
            A.RandomGamma(),
            A.GaussianBlur(),
            A.Normalize(),
            ToTensorV2()
        ])

        return composed_transforms
Exemple #13
0
def get_train_transforms():
	return A.Compose([A.OneOf([A.HueSaturationValue(hue_shift_limit=0.2, sat_shift_limit= 0.2, val_shift_limit=0.2, p=0.9),
							   
					  A.RandomBrightnessContrast(brightness_limit=0.2, contrast_limit=0.2, p=0.9)],p=0.9),
					  
					  A.ToGray(p=0.01),
					  
					  A.HorizontalFlip(p=0.5),
					  
					  A.VerticalFlip(p=0.5),
					  
					  A.Resize(height=64, width=64, p=1),
					  
					  A.Cutout(num_holes=8, max_h_size=4, max_w_size=4, fill_value=0, p=0.5),
					  
					  ToTensorV2(p=1.0)],
					  
					  p=1.0,
					 
					  bbox_params=A.BboxParams(format='coco',min_area=10, min_visibility=0,label_fields=['labels'])
					  )
Exemple #14
0
def medium_augmentations(image_size: Tuple[int, int], rot_angle=15):
    return A.Compose([
        A.OneOf([
            A.RandomSizedCrop((image_size[0], int(image_size[0] * 1.25)), image_size[0], image_size[1], p=0.05),
            A.RandomSizedCrop((image_size[0], int(image_size[0] * 1.5)), image_size[0], image_size[1], p=0.10),
            A.RandomSizedCrop((image_size[0], int(image_size[0] * 2)), image_size[0], image_size[1], p=0.15),
            A.Resize(image_size[0], image_size[1], p=0.7),
        ], p=1.0),

        A.HorizontalFlip(p=0.5),
        A.ShiftScaleRotate(shift_limit=0.1, scale_limit=0.05, rotate_limit=15, border_mode=cv2.BORDER_REFLECT, p=0.3),

        A.RandomBrightnessContrast(brightness_limit=0.2, contrast_limit=0.2, p=0.2),
        A.RandomGamma(gamma_limit=(85, 115), p=0.2),
        A.HueSaturationValue(p=0.2),
        A.CLAHE(p=0.2),
        A.JpegCompression(quality_lower=50, p=0.2),

        A.Normalize(),
        ToTensor()
    ])
def get_training_augmentation():
    """ Apply random transformations at each epoch  """
    train_transform = [

        A.HorizontalFlip(p=0.5), #Rotation on y axes

        A.ShiftScaleRotate(scale_limit=0.2, rotate_limit=10, shift_limit=0.05, p=0.5, border_mode=0),

        #A.RandomSizedCrop((320 , 320 ), height=480, width=320), #Crop

        A.IAAAdditiveGaussianNoise(p=0.2),

        A.IAAPerspective(p=0.5),

        A.OneOf([A.CLAHE(p=1),A.RandomBrightness(p=1),A.RandomGamma(p=1)],p=0.5),

        A.OneOf([A.RandomContrast(p=1),A.HueSaturationValue(p=1)],p=0.5),
        
        A.Lambda(mask=round_clip_0_1)
    ]
    return A.Compose(train_transform)
def ImgAug(i0):
    Aug = albumentations.Compose(
        [
            albumentations.HorizontalFlip(p=0.25),
            #albumentations.RandomBrightness(p=0.25),
            albumentations.RandomGamma((80, 120), p=0.25),
            albumentations.HueSaturationValue(hue_shift_limit=20, p=0.25),
            #albumentations.CLAHE(p=0.25),
            albumentations.Blur(p=0.25),
            albumentations.ShiftScaleRotate(
                shift_limit=0, rotate_limit=20, p=0.25),
            albumentations.augmentations.transforms.ToGray(p=0.25)
        ],
        p=0.50)

    if type(i0) != np.ndarray:
        i0 = i0.numpy()
    i0 = Aug(image=i0.astype('uint8'))['image']
    i0 = i0.astype('float32') / 127.5 - 1

    return i0
Exemple #17
0
def get_transforms(size: int, scope: str = 'geometric', crop='random'):
    augs = {'strong': albu.Compose([albu.HorizontalFlip(),
                                    albu.ShiftScaleRotate(shift_limit=0.0, scale_limit=0.2, rotate_limit=20, p=.4),
                                    albu.ElasticTransform(),
                                    albu.OpticalDistortion(),
                                    albu.OneOf([
                                        albu.CLAHE(clip_limit=2),
                                        albu.IAASharpen(),
                                        albu.IAAEmboss(),
                                        albu.RandomBrightnessContrast(),
                                        albu.RandomGamma()
                                    ], p=0.5),
                                    albu.OneOf([
                                        albu.RGBShift(),
                                        albu.HueSaturationValue(),
                                    ], p=0.5),
                                    ]),
            'weak': albu.Compose([albu.HorizontalFlip(),
                                  ]),
            'geometric': albu.OneOf([albu.HorizontalFlip(always_apply=True),
                                     albu.ShiftScaleRotate(always_apply=True, scale_limit=.5, rotate_limit=30),
                                     albu.Transpose(always_apply=True),
                                     albu.OpticalDistortion(always_apply=True, distort_limit=0.1, shift_limit=0.1),
                                     albu.ElasticTransform(always_apply=True),
                                     ]),
            'empty': NoOp(),
            }

    aug_fn = augs[scope]
    crop_fn = {'random': albu.RandomCrop(size, size, always_apply=True),
               'center': albu.CenterCrop(size, size, always_apply=True)}[crop]
    pad = albu.PadIfNeeded(size, size)

    pipeline = albu.Compose([aug_fn, crop_fn, pad])

    def process(a):
        r = pipeline(image=a)
        return r['image']

    return process
    def setup(self, stage: str):
        img_files = get_img_files()

        folds = KFold(
            n_splits=self.hp.n_splits,
            random_state=self.hp.seed,
            shuffle=True,
        )
        train_idx, val_idx = list(folds.split(img_files))[self.hp.fold]

        self.train_dataset = MaskDataset(
            img_files[train_idx],
            transform=A.Compose([
                A.RandomResizedCrop(
                    self.hp.img_size,
                    self.hp.img_size,
                ),
                A.Rotate(13),
                A.HorizontalFlip(),
                A.RandomBrightnessContrast(),
                A.HueSaturationValue(),
                A.RGBShift(),
                A.RandomGamma(),
                MyCoarseDropout(
                    min_holes=1,
                    max_holes=8,
                    max_height=32,
                    max_width=32,
                ),
            ]),
        )
        self.val_dataset = MaskDataset(
            img_files[val_idx],
            transform=A.Compose([
                A.Resize(
                    self.hp.img_size,
                    self.hp.img_size,
                ),
            ]),
        )
Exemple #19
0
    def _setup_transform(self, cfg):
        # Albumentation example: https://albumentations.readthedocs.io/en/latest/examples.html
        self.img_mask_transform = A.Compose([
            A.ShiftScaleRotate(shift_limit=0.1, scale_limit=0.2, rotate_limit=175, p=0.8, border_mode=cv2.BORDER_CONSTANT),
            A.Flip(),
            A.Transpose(),
            A.OneOf([
                A.ElasticTransform(),
                A.OpticalDistortion(),
                A.GridDistortion(),
                A.IAAPiecewiseAffine(),
            ]),
            A.OneOf([
                    A.RandomCrop(height=self.size_crop,width=self.size_crop,p=0.5),  
                    A.CenterCrop(height=self.size_crop,width=self.size_crop,p=0.5)
            ]),            
            A.Cutout(num_holes=8, max_h_size=8, max_w_size=8, fill_value=0,p=0.5),
            ],p=0.8)

        self.img_pixel_transform = A.Compose([
            A.OneOf([
                A.IAAAdditiveGaussianNoise(),
                A.GaussNoise(),
            ], p=0.2),
            A.OneOf([
                A.MotionBlur(p=0.2),
                A.MedianBlur(blur_limit=3, p=0.1),
                A.Blur(blur_limit=3, p=0.1),
            ], p=0.2),
            A.OneOf([
                A.IAASharpen(),
                A.IAAEmboss(),
                # A.RandomBrightnessContrast(),            
            ], p=0.3),
            A.HueSaturationValue(hue_shift_limit=3,sat_shift_limit=20,val_shift_limit=3 ,p=0.2),
        ],p=0.5)
        # Torch transform
        self.resize_transform = transforms.Resize(cfg.MODEL.IMAGE_SIZE, Image.NEAREST)
        self.to_tensor_transform = transforms.ToTensor()
        self.normalize_transform = transforms.Normalize(mean=cfg.TRAIN.NORMALIZE_MEAN, std=cfg.TRAIN.NORMALIZE_STD)
Exemple #20
0
def aug_heavy(prob=0.9):
    return aug.Compose([
        aug.Flip(),
        aug.OneOf([
            aug.CLAHE(clip_limit=2, p=.5),
            aug.IAASharpen(p=.25),
            aug.IAAEmboss(p=.25),
        ],
                  p=.35),
        aug.OneOf([
            aug.IAAAdditiveGaussianNoise(p=.3),
            aug.GaussNoise(p=.7),
            SaltPepperNoise(level_limit=0.0002, p=.7),
            aug.ISONoise(p=.3),
        ],
                  p=.5),
        aug.OneOf([
            aug.MotionBlur(p=.2),
            aug.MedianBlur(blur_limit=3, p=.3),
            aug.Blur(blur_limit=3, p=.5),
        ],
                  p=.4),
        aug.OneOf([
            aug.RandomContrast(p=.5),
            aug.RandomBrightness(p=.5),
            aug.RandomGamma(p=.5),
        ],
                  p=.4),
        aug.ShiftScaleRotate(
            shift_limit=.0625, scale_limit=0.1, rotate_limit=12, p=.7),
        aug.OneOf([
            aug.GridDistortion(p=.2),
            aug.ElasticTransform(
                alpha=120, sigma=120 * 0.05, alpha_affine=120 * 0.03, p=.2),
            aug.OpticalDistortion(distort_limit=2, shift_limit=0.5, p=.2),
        ],
                  p=.6),
        aug.HueSaturationValue(p=.5),
    ],
                       p=prob)
def get_medium_augmentations(image_size):
    return A.Compose([
        A.OneOf([
            A.ShiftScaleRotate(shift_limit=0.05,
                               scale_limit=0.1,
                               rotate_limit=15,
                               border_mode=cv2.BORDER_CONSTANT,
                               value=0),
            A.OpticalDistortion(distort_limit=0.11,
                                shift_limit=0.15,
                                border_mode=cv2.BORDER_CONSTANT,
                                value=0),
            A.NoOp()
        ]),
        ZeroTopAndBottom(p=0.3),
        A.RandomSizedCrop(min_max_height=(int(image_size[0] * 0.75),
                                          image_size[0]),
                          height=image_size[0],
                          width=image_size[1],
                          p=0.3),
        A.OneOf([
            A.RandomBrightnessContrast(brightness_limit=0.5,
                                       contrast_limit=0.4),
            IndependentRandomBrightnessContrast(brightness_limit=0.25,
                                                contrast_limit=0.24),
            A.RandomGamma(gamma_limit=(50, 150)),
            A.NoOp()
        ]),
        A.OneOf([
            FancyPCA(alpha_std=4),
            A.RGBShift(r_shift_limit=20, b_shift_limit=15, g_shift_limit=15),
            A.HueSaturationValue(hue_shift_limit=5, sat_shift_limit=5),
            A.NoOp()
        ]),
        A.OneOf([ChannelIndependentCLAHE(p=0.5),
                 A.CLAHE(),
                 A.NoOp()]),
        A.HorizontalFlip(p=0.5),
        A.VerticalFlip(p=0.5)
    ])
Exemple #22
0
def get_training_augmentation():
    train_transform = [

        albu.HorizontalFlip(p=0.5), #水平翻转

        albu.ShiftScaleRotate(scale_limit=0.5, rotate_limit=0, shift_limit=0.1, p=1, border_mode=0), # 平移缩放旋转

        albu.PadIfNeeded(min_height=224, min_width=224, always_apply=True, border_mode=0),  # 加padding
        albu.RandomCrop(height=224, width=224, always_apply=True), # 随机剪裁

        albu.IAAAdditiveGaussianNoise(p=0.2),  # Add gaussian noise to the input image.
        albu.IAAPerspective(p=0.5),  # Perform a random four point perspective transform of the input

        albu.OneOf(
            [
                albu.CLAHE(p=1), # 对比度受限情况下的自适应直方图均衡化算法
                albu.RandomBrightnessContrast(p=1), # Randomly change brightness and contrast
                albu.RandomGamma(p=1), # Gamma变换
            ],
            p=0.9,
        ),

        albu.OneOf(
            [
                albu.IAASharpen(p=1), # Sharpen the input image and overlays the result with the original image.
                albu.Blur(blur_limit=3, p=1),
                albu.MotionBlur(blur_limit=3, p=1),
            ],
            p=0.9,
        ),

        albu.OneOf(
            [
                albu.RandomBrightnessContrast(p=1),
                albu.HueSaturationValue(p=1),  # Randomly change hue, saturation and value of the input image.
            ],
            p=0.9,
        ),
    ]
    return albu.Compose(train_transform)
def get_train_transforms(height=1024, width=1024):
    """Data Algumentation and resize for the training dataset"""
    rnd_cutout = A.OneOf([
        A.Cutout(
            num_holes=8, max_h_size=64, max_w_size=64, fill_value=0, p=0.6),
        A.Cutout(
            num_holes=16, max_h_size=32, max_w_size=32, fill_value=0, p=0.3),
        A.Cutout(
            num_holes=64, max_h_size=16, max_w_size=16, fill_value=0, p=0.1),
    ],
                         p=0.5)

    rnd_effect = A.OneOf([
        A.HueSaturationValue(hue_shift_limit=0.2,
                             sat_shift_limit=0.2,
                             val_shift_limit=0.2,
                             p=0.9),
        A.RandomBrightnessContrast(
            brightness_limit=0.2, contrast_limit=0.2, p=0.9),
    ],
                         p=0.9)

    return A.Compose([
        A.RandomSizedCrop(
            min_max_height=(740, 840), height=1024, width=1024, p=0.5),
        rnd_effect,
        A.ToGray(p=0.01),
        A.HorizontalFlip(p=0.5),
        A.VerticalFlip(p=0.5),
        A.Blur(blur_limit=5, p=0.05),
        A.Resize(height=height, width=width, p=1),
        rnd_cutout,
        ToTensorV2(p=1.0),
    ],
                     p=1.0,
                     bbox_params=A.BboxParams(format='pascal_voc',
                                              min_area=0,
                                              min_visibility=0.3,
                                              label_fields=['labels']))
def get_train_transform():
    return A.Compose([
        A.OneOf([
            A.HueSaturationValue(hue_shift_limit=0.2,
                                 sat_shift_limit=0.2,
                                 val_shift_limit=0.2,
                                 p=0.9),
            A.RandomBrightnessContrast(
                brightness_limit=0.2, contrast_limit=0.2, p=0.9),
        ],
                p=0.9),
        A.ToGray(p=0.01),
        A.HorizontalFlip(p=0.5),
        A.VerticalFlip(p=0.5),
        A.Resize(height=512, width=512, p=1),
        ToTensorV2(p=1.0),
    ],
                     p=1.0,
                     bbox_params=A.BboxParams(format='pascal_voc',
                                              min_area=0,
                                              min_visibility=0,
                                              label_fields=['labels']))
Exemple #25
0
 def __init__(self, opt):
     super(Dataset3D, self).__init__()
     self.opt = opt
     self.augs = A.Compose([
         A.LongestMaxSize(max(self.opt.input_h, self.opt.input_w), always_apply=True),
         A.PadIfNeeded(self.opt.input_h, self.opt.input_w, border_mode=cv2.BORDER_CONSTANT, value=[0, 0, 0]),
         A.Blur(blur_limit=(4, 8), p=0.1),
         # A.ShiftScaleRotate(shift_limit=0.2, scale_limit=(-0.4, 0.2), rotate_limit=0,
         #                    border_mode=cv2.BORDER_CONSTANT, value=(0, 0, 0), p=0.8),
         A.OneOf([
             A.RandomBrightnessContrast(always_apply=True),
             A.RandomGamma(gamma_limit=(60, 140), always_apply=True),
             # A.CLAHE(always_apply=True)
         ], p=0.5),
         A.OneOf([
             A.RGBShift(),
             A.HueSaturationValue(),
             A.ToGray()
         ], p=0.1)
     ],
         keypoint_params=A.KeypointParams(format='xy', remove_invisible=False)
     )
Exemple #26
0
def get_training_augmentation():
    """Builds random transformations we want to apply to our dataset.

    Arguments:
        
    Returns:
        A albumentation functions to pass our images to.
    Raises:

    """
    train_transform = [
        A.IAAAdditiveGaussianNoise(p=0.2),
        A.IAAPerspective(p=0.5),
        A.OneOf(
            [
                A.CLAHE(p=1),
                A.RandomBrightness(p=1),
                A.RandomGamma(p=1),
            ],
            p=0.9,
        ),
        A.OneOf(
            [
                A.IAASharpen(p=1),
                A.Blur(blur_limit=3, p=1),
                A.MotionBlur(blur_limit=3, p=1),
            ],
            p=0.9,
        ),
        A.OneOf(
            [
                A.RandomContrast(p=1),
                A.HueSaturationValue(p=1),
            ],
            p=0.9,
        ),
        A.Lambda(mask=round_clip_0_1),
    ]
    return A.Compose(train_transform)
Exemple #27
0
def get_train_transforms(input_res):
    return A.Compose([
#             A.JpegCompression(p=0.5),
            A.Rotate(limit=45, p=1.0),
#             A.OneOf([
#                 A.OpticalDistortion(),
#                 A.GridDistortion(),
# #                 A.IAAPiecewiseAffine(),
#             ]),
            A.RandomSizedCrop(min_max_height=(int(input_res*0.7), input_res),
                              height=int(input_res*0.85), width=int(input_res*0.85), p=1.0),
            A.HorizontalFlip(p=0.5),
            A.VerticalFlip(p=0.5),
#             A.GaussianBlur(p=0.3),
            A.OneOf([
                A.RandomBrightnessContrast(),   
                A.HueSaturationValue(),
            ]),
            A.Cutout(num_holes=8, max_h_size=int(input_res*0.85)//8, max_w_size=int(input_res*0.85)//8, fill_value=0, p=0.3),
#             A.Normalize(),
            ToTensorV2(),
        ], p=1.0)
Exemple #28
0
def get_tta(image_size=224):
    imagenet_stats = {
        "mean": [0.485, 0.456, 0.406],
        "std": [0.229, 0.224, 0.225]
    }
    tta_tfms = A.Compose(
        [
            A.RandomResizedCrop(image_size, image_size),
            A.HorizontalFlip(p=0.5),
            A.VerticalFlip(p=0.5),
            A.HueSaturationValue(hue_shift_limit=0.2,
                                 sat_shift_limit=0.2,
                                 val_shift_limit=0.2,
                                 p=0.5),
            A.RandomBrightnessContrast(brightness_limit=(-0.1, 0.1),
                                       contrast_limit=(-0.1, 0.1),
                                       p=0.5),
            ToTensor(normalize=imagenet_stats),
        ],
        p=1.0,
    )
    return tta_tfms
def get_train_transforms():
    return A.Compose(
        [
            # A.RandomSizedCrop(min_max_height=(512, 512), height=1024, width=1024, p=0.5),

            #### THESE ARE THE SAFE ONES ####
            A.OneOf([
                A.HueSaturationValue(hue_shift_limit=0.2,
                                     sat_shift_limit=0.2,
                                     val_shift_limit=0.2,
                                     p=0.9),
                A.RandomBrightnessContrast(
                    brightness_limit=0.2, contrast_limit=0.2, p=0.9),
            ],
                    p=0.9),
            A.ToGray(p=0.01),
            # A.HorizontalFlip(p=0.5),
            # A.VerticalFlip(p=0.5),
            # #################################

            # A.RandomScale (scale_limit=(0.1, 2.0), interpolation=1, p=0.5),
            # A.OneOf([
            #     A.RandomSizedBBoxSafeCrop (256, 256, p=1.0),
            #     A.RandomSizedBBoxSafeCrop (1024, 1024, p=1.0),
            # ], p=0.9),
            # A.IAAPerspective (scale=(0.05, 0.1), keep_size=True, always_apply=False, p=0.5),
            A.Resize(height=512, width=512, p=1.0),

            # A.Cutout(num_holes=8, max_h_size=64, max_w_size=64, fill_value=0, p=0.5),
            ToTensorV2(p=1.0),
        ],
        # p=1.0,
        # bbox_params=A.BboxParams(
        #     format='pascal_voc',
        #     min_area=0,
        #     min_visibility=0,
        #     label_fields=['labels'],
        # )
    )
Exemple #30
0
def strong_aug(img_height, img_width, mean, std, p=.5):
    return A.Compose([
        A.Resize(img_height, img_width, always_apply=True),
        A.OneOf([
            A.IAAAdditiveGaussianNoise(),
            A.GaussNoise(),
        ], p=0.2),
        A.OneOf([
            A.MotionBlur(p=.2),
            A.MedianBlur(blur_limit=3, p=0.1),
            A.Blur(blur_limit=3, p=0.1),
        ],
                p=0.2),
        A.ShiftScaleRotate(shift_limit=0.0625,
                           scale_limit=0.2,
                           rotate_limit=45,
                           p=0.3),
        A.OneOf([
            A.OpticalDistortion(p=0.3),
            A.GridDistortion(p=.1),
            A.IAAPiecewiseAffine(p=0.3),
        ],
                p=0.2),
        A.OneOf([
            A.CLAHE(clip_limit=2),
            A.IAASharpen(),
            A.IAAEmboss(),
            A.RandomBrightnessContrast(),
        ],
                p=0.3),
        A.HueSaturationValue(p=0.3),
        A.OneOf([
            GridMask(num_grid=3, rotate=15, p=1),
            GridMask(num_grid=3, mode=2, p=1),
            GridMask(num_grid=3, mode=0)
        ],
                p=0.2),
        A.Normalize(mean, std, always_apply=True)
    ])