Esempio n. 1
0
def get_transform(train: bool, im_size: int = 400):
    if train:
        transforms = A.Compose([
            A.Resize(
                height=im_size, width=im_size, interpolation=cv2.INTER_CUBIC),
            A.ChannelShuffle(p=0.5),
            A.HorizontalFlip(p=0.5),
            A.ColorJitter(p=0.5),
            A.VerticalFlip(p=0.5),
            A.Blur(p=0.5),
            A.Normalize(),
            ToTensorV2(),
        ],
                               bbox_params=A.BboxParams(
                                   format='pascal_voc',
                                   label_fields=['category_ids']))
    else:
        transforms = A.Compose([
            A.Resize(
                height=im_size, width=im_size, interpolation=cv2.INTER_CUBIC),
            A.Normalize(),
            ToTensorV2(),
        ],
                               bbox_params=A.BboxParams(
                                   format='pascal_voc',
                                   label_fields=['category_ids']))
    return transforms
def get_next_augmentation():
    train_transform = [
        albu.ChannelShuffle(p=0.1),
        albu.IAAAdditiveGaussianNoise(p=0.2),
        albu.OneOf(
            [
                albu.CLAHE(p=1),
                albu.RandomBrightness(p=1),
                albu.RandomGamma(p=1),
            ],
            p=0.9,
        ),
        albu.OneOf(
            [
                albu.IAASharpen(p=1),
                albu.Blur(blur_limit=3, p=1),
                albu.MotionBlur(blur_limit=3, p=1),
            ],
            p=0.9,
        ),
        albu.OneOf(
            [
                albu.RandomContrast(p=1),
                albu.HueSaturationValue(p=1),
            ],
            p=0.9,
        ),
    ]
    return albu.Compose(train_transform)
Esempio n. 3
0
def get_training_augmentation():
    return A.Compose([
        A.RandomSizedCrop(min_max_height=(300, 360),
                          height=320,
                          width=320,
                          always_apply=True),
        A.HorizontalFlip(p=0.5),
        A.OneOf([
            A.CLAHE(),
            A.RandomBrightnessContrast(),
            A.RandomGamma(),
            A.HueSaturationValue(),
            A.NoOp()
        ]),
        A.OneOf([
            A.IAASharpen(),
            A.Blur(blur_limit=3),
            A.MotionBlur(blur_limit=3),
            A.NoOp()
        ]),
        A.OneOf([
            A.RandomFog(),
            A.RandomSunFlare(src_radius=100),
            A.RandomRain(),
            A.RandomSnow(),
            A.NoOp()
        ]),
        A.Cutout(),
        A.Normalize(),
    ])
Esempio n. 4
0
def gentle_transform(p):
    return albu.Compose(
        [
            # p=0.5
            albu.HorizontalFlip(p=0.5),
            albu.VerticalFlip(p=0.5),
            albu.OneOf(
                [
                    albu.IAASharpen(p=1),
                    albu.Blur(blur_limit=3, p=1),
                    albu.MotionBlur(blur_limit=3, p=1),
                ],
                p=0.5,
            ),
            albu.OneOf(
                [
                    albu.RandomBrightness(p=1),
                    albu.RandomGamma(p=1),
                ],
                p=0.5,
            ),
            # p=0.2
            albu.ShiftScaleRotate(rotate_limit=30,
                                  scale_limit=0.15,
                                  border_mode=cv2.BORDER_CONSTANT,
                                  value=[0, 0, 0],
                                  p=0.2),
            albu.IAAAdditiveGaussianNoise(p=0.2),
            albu.IAAPerspective(p=0.2),
        ],
        p=p,
        additional_targets={
            'image{}'.format(_): 'image'
            for _ in range(1, 65)
        })
Esempio n. 5
0
def get_augmentations_transform(crop_size=128, p=0.5, phase="train"):
    imagenet_stats = {'mean':[0.485, 0.456, 0.406], 'std':[0.229, 0.224, 0.225]}
    if phase == "train" or "test":
        aug_factor_list = [
            A.RandomResizedCrop(height=crop_size, width=crop_size, scale=(0.8, 1.0)),
            A.Cutout(num_holes=8, p=p),
            A.RandomRotate90(p=p),
            A.HorizontalFlip(p=p),
            A.VerticalFlip(p=p),
            A.HueSaturationValue(hue_shift_limit=20, sat_shift_limit=50, val_shift_limit=50),
            A.RandomBrightnessContrast(brightness_limit=0.2, contrast_limit=0.2),
            A.ShiftScaleRotate(shift_limit=0.0625, scale_limit=0.2, rotate_limit=45, p=p),
            A.OneOf([
                A.IAAAdditiveGaussianNoise(),
                A.GaussNoise(),
            ], p=p),
            A.OneOf([
                A.MotionBlur(p=0.2),
                A.MedianBlur(blur_limit=3, p=0.1),
                A.Blur(blur_limit=3, p=0.1),
            ], p=p),
            A.OneOf([
                A.OpticalDistortion(p=0.3),
                A.GridDistortion(p=0.1),
                A.IAAPiecewiseAffine(p=0.3),
            ], p=p),
            ToTensor(normalize=imagenet_stats)
        ]
        transformed_image = A.Compose(aug_factor_list)
        return transformed_image
    elif phase == "valid":
        transformed_image = A.Compose([ToTensor(normalize=imagenet_stats)])
        return transformed_image
    else:
        TypeError("Invalid phase type.")
def augment(image):
    transform = A.Compose([
        A.HorizontalFlip(p=0.5),
        A.OneOf([
            A.IAAAdditiveGaussianNoise(),
            A.GaussNoise(),
        ], p=0.2),
        A.OneOf([
            A.MotionBlur(p=.2),
            A.MedianBlur(blur_limit=3, p=0.1),
            A.Blur(blur_limit=3, p=0.1),
        ],
                p=0.2),
        A.ShiftScaleRotate(shift_limit=0.0625,
                           scale_limit=0.2,
                           rotate_limit=15,
                           p=0.2),
        A.OneOf([
            A.CLAHE(clip_limit=2),
            A.IAASharpen(),
            A.IAAEmboss(),
            A.RandomBrightnessContrast(),
            A.RandomGamma(),
        ],
                p=0.5),
        A.HueSaturationValue(p=0.3),
    ])
    augmented_image = transform(image=image)['image']
    return augmented_image
Esempio n. 7
0
def transform_train(image, mask):
    if random.random() < 0.5:
        image = albumentations.RandomRotate90(p=1)(image=image)['image']
        mask = albumentations.RandomRotate90(p=1)(image=mask)['image']

    if random.random() < 0.5:
        image = albumentations.Transpose(p=1)(image=image)['image']
        mask = albumentations.Transpose(p=1)(image=mask)['image']

    if random.random() < 0.5:
        image = albumentations.VerticalFlip(p=1)(image=image)['image']
        mask = albumentations.VerticalFlip(p=1)(image=mask)['image']

    if random.random() < 0.5:
        image = albumentations.HorizontalFlip(p=1)(image=image)['image']
        mask = albumentations.HorizontalFlip(p=1)(image=mask)['image']

    # if random.random() < 0.5:
    #     image = albumentations.ShiftScaleRotate(shift_limit=0.0625, scale_limit=0.15, rotate_limit=45, p=1)(image=image)['image']
    #     mask = albumentations.ShiftScaleRotate(shift_limit=0.0625, scale_limit=0.15, rotate_limit=45, p=1)(image=mask)['image']

    if random.random() < 0.5:
        image = albumentations.RandomBrightness(0.1)(image=image)['image']
        image = albumentations.RandomContrast(0.1)(image=image)['image']
        image = albumentations.Blur(blur_limit=3)(image=image)['image']

    # if random.random() < 0.5:
    #     image = albumentations.Cutout(num_holes=1, max_h_size=32, max_w_size=32, p=1)(image)
    #     mask = albumentations.Cutout(num_holes=1, max_h_size=32, max_w_size=32, p=1)(mask)

    return image, mask
Esempio n. 8
0
def predefined_transform() -> None:
    """
    Example from docs
    https://github.com/albumentations-team/albumentations_examples/blob/master/notebooks/example.ipynb
    :return:
    """

    return A.Compose([
        A.RandomRotate90(),
        A.Flip(),
        A.Transpose(),
        A.OneOf([
            A.IAAAdditiveGaussianNoise(),
            A.GaussNoise(),
        ], p=0.2),
        A.OneOf([
            A.MotionBlur(p=.2),
            A.MedianBlur(blur_limit=3, p=0.1),
            A.Blur(blur_limit=3, p=0.1),
        ], p=0.2),
        A.ShiftScaleRotate(shift_limit=0.0625, scale_limit=0.2, rotate_limit=45, p=0.2),
        A.OneOf([
            A.OpticalDistortion(p=0.3),
            A.GridDistortion(p=.1),
            A.IAAPiecewiseAffine(p=0.3),
        ], p=0.2),
        A.OneOf([
            A.CLAHE(clip_limit=2),
            A.IAASharpen(),
            A.IAAEmboss(),
            A.RandomBrightnessContrast(),            
        ], p=0.3),
        A.HueSaturationValue(p=0.3),
    ])
Esempio n. 9
0
    def __init__(self, folds, img_height, img_width, mean, std):
        df = pd.read_csv("../input/train_startified_fold5.csv")

        df = df[df.kfold.isin(folds)]
        self.image_ids = df.image_id.values
        self.grapheme_root = df.grapheme_root.values
        self.vowel_diacritic = df.vowel_diacritic.values
        self.consonant_diacritic = df.consonant_diacritic.values
        self.kfold = df.kfold.values

        self.img_height = img_height
        self.img_width = img_width

        # Just Validate 
        if len(folds) == 1:
            self.augmentations = albumentations.Compose([
                albumentations.Resize(img_height, img_width, always_apply=True),
                albumentations.Normalize(mean, std, always_apply = True)
        ])
        else:
            self.augmentations = albumentations.Compose([
                albumentations.Resize(img_height, img_width, always_apply=True),
                albumentations.Blur(blur_limit=(7, 7), p=0.5),
                albumentations.ShiftScaleRotate(shift_limit = 0.7, scale_limit = 0.5, rotate_limit = 10),
                albumentations.ElasticTransform(),
                albumentations.Normalize(mean, std, always_apply = True)        
                ])
def build_transforms(args, class_index_dict: Dict[str,
                                                  int]) -> Tuple[any, any]:
    train_transforms = AlbumentationsDetectionWrapperTransform(
        [
            A.HorizontalFlip(),
            A.RandomResizedCrop(width=args.image_size,
                                height=args.image_size,
                                scale=(0.8, 1.),
                                p=1.),
            A.OneOf([
                A.RandomGamma(),
                A.RandomBrightnessContrast(),
                A.Blur(blur_limit=5),
            ],
                    p=0.5),
            ToTensorV2(),
        ],
        annotation_transform=VOCAnnotationTransform(class_index_dict))
    test_transforms = AlbumentationsDetectionWrapperTransform(
        [
            A.Resize(width=args.image_size, height=args.image_size),
            ToTensorV2(),
        ],
        annotation_transform=VOCAnnotationTransform(class_index_dict))
    return train_transforms, test_transforms
Esempio n. 11
0
def get_transform(img, boxes, labels, masks):
    trans = A.Compose(
        [
            A.Blur(),
            A.VerticalFlip(),
            A.HorizontalFlip(p=0.5),
            # A.Rotate(limit=30, interpolation=1, border_mode=4, value=None, mask_value=None, always_apply=False, p=0.5),
            #A.HueSaturationValue(hue_shift_limit=20, sat_shift_limit=30, val_shift_limit=20, always_apply=False, p=0.5),
            #A.GaussNoise(var_limit=(10.0, 50.0), mean=0, always_apply=False, p=0.5),
            A.CLAHE(clip_limit=4.0,
                    tile_grid_size=(8, 8),
                    always_apply=False,
                    p=0.5)
        ],
        # A.Cutout(num_holes=8, max_h_size=8, max_w_size=8, fill_value=0, always_apply=False, p=0.5)],
        bbox_params=A.BboxParams(format='pascal_voc',
                                 label_fields=['class_label']))
    img = np.array(img)

    masks = np.array(masks)
    transformed = trans(image=img,
                        bboxes=boxes,
                        mask=masks,
                        class_label=labels)
    transformed_img = transformed['image']
    transformed_bboxes = transformed['bboxes']
    transformed_masks = transformed['mask'].copy()
    transformed_labels = transformed['class_label']

    transformed_img = np.transpose(transformed_img, (2, 0, 1))
    transformed_img = torch.as_tensor(transformed_img).div(255)
    return transformed_img, transformed_bboxes, transformed_labels, transformed_masks
Esempio n. 12
0
 def get_transform(train: bool, im_size: int = 400):
     if train:
         aug = A.Compose([
             A.OneOf([
                 A.RandomSizedCrop(min_max_height=(224, 720),
                                   height=im_size,
                                   width=im_size,
                                   p=0.5),
                 A.Resize(height=im_size,
                          width=im_size,
                          interpolation=cv2.INTER_CUBIC,
                          p=0.5)
             ],
                     p=1),
             A.ChannelShuffle(p=0.5),
             A.HorizontalFlip(p=0.5),
             A.ColorJitter(p=0.5),
             # A.OneOf([
             #     A.ElasticTransform(alpha=120, sigma=120 * 0.05, alpha_affine=120 * 0.03, p=0.5),
             #     A.GridDistortion(p=0.5),
             #     A.OpticalDistortion(distort_limit=2, shift_limit=0.5, p=1)
             # ], p=0.8),
             A.Blur(p=0.5),
             A.Normalize(),
             ToTensorV2(),
         ])
     else:
         aug = A.Compose([
             A.Resize(height=im_size,
                      width=im_size,
                      interpolation=cv2.INTER_CUBIC),
             A.Normalize(),
             ToTensorV2(),
         ])
     return aug
Esempio n. 13
0
def albumtwo(image_file, path_to_train):
    transform2 = A.Compose([
        A.RandomBrightnessContrast(p=0.6),
        A.OneOf([
            A.IAAAdditiveGaussianNoise(),
            A.GaussNoise(),
        ], p=0.4),
        A.OneOf([
            A.MotionBlur(p=.2),
            A.MedianBlur(blur_limit=3, p=0.1),
            A.Blur(blur_limit=3, p=0.1),
        ],
                p=0.2),
        A.HueSaturationValue(),
        A.RGBShift(),
    ])
    image = cv2.imread(image_file)
    image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
    transformed2 = transform2(image=image)
    transformed2_image = transformed2["image"]
    cv2.imwrite(image_file[:-4] + '_atwo.jpg', transformed2_image)
    base = os.path.basename(image_file)
    base = base[:-4] + '.txt'
    shutil.copy2(path_to_train + "/labels/" + base,
                 path_to_train + "/labels/" + base[:-4] + '_atwo.txt')
Esempio n. 14
0
def strong_aug(p=.5):
    return A.Compose(
        [
            A.RandomRotate90(),
            A.Flip(),
            #         Transpose(),
            A.OneOf([
                A.IAAAdditiveGaussianNoise(),
                A.GaussNoise(),
            ], p=0.2),
            A.OneOf([
                A.MotionBlur(p=.2),
                A.MedianBlur(blur_limit=3, p=0.1),
                A.Blur(blur_limit=3, p=0.1),
            ],
                    p=0.2),
            A.ShiftScaleRotate(
                shift_limit=0.0625, scale_limit=0.2, rotate_limit=45, p=0.2),
            A.OneOf([
                A.OpticalDistortion(p=0.3),
                A.GridDistortion(p=.1),
                A.IAAPiecewiseAffine(p=0.3),
            ],
                    p=0.2),
            A.OneOf([
                A.CLAHE(clip_limit=2),
                A.IAASharpen(),
                A.IAAEmboss(),
                A.RandomBrightnessContrast(),
            ],
                    p=0.3),
            A.HueSaturationValue(p=0.3),
        ],
        p=p)
Esempio n. 15
0
def augmentations(image_size: int):
    channel_augs = [
        A.HueSaturationValue(p=0.5),
        A.ChannelShuffle(p=0.5),
    ]

    result = [
        # *pre_transform(image_size),
        A.OneOf([
            A.IAAAdditiveGaussianNoise(),
            A.GaussNoise(),
        ], p=0.5),
        A.OneOf([
            A.MotionBlur(blur_limit=3, p=0.7),
            A.MedianBlur(blur_limit=3, p=1.0),
            A.Blur(blur_limit=3, p=0.7),
        ],
                p=0.5),
        A.OneOf(channel_augs),
        A.OneOf([
            A.CLAHE(clip_limit=2),
            A.IAASharpen(),
            A.IAAEmboss(),
        ],
                p=0.5),
        A.RandomBrightnessContrast(brightness_limit=0.5,
                                   contrast_limit=0.5,
                                   p=0.5),
        A.RandomGamma(p=0.5),
        A.OneOf([A.MedianBlur(p=0.5), A.MotionBlur(p=0.5)]),
        A.RandomGamma(gamma_limit=(85, 115), p=0.5),
    ]
    return A.Compose(result, bbox_params=BBOX_PARAMS)
Esempio n. 16
0
    def __init__(self, csv_path, data_path, train=True):

        if train:
            self.data = pd.read_csv(csv_path)
        else:
            self.data = pd.read_csv(csv_path)

        self.transforms = transforms.ToTensor()

        self.data_path = data_path

        self.train = train
        rotate_crop = albu.Compose([
            albu.Rotate(limit=10, p=1.),
            albu.RandomSizedCrop((185, 202), height=224, width=224, p=1.)
        ],
                                   p=0.5)
        color = albu.OneOf([
            albu.RandomBrightnessContrast(),
            albu.Blur(blur_limit=3),
            albu.GaussNoise()
        ],
                           p=0.3)
        self.aug = albu.Compose([
            albu.HorizontalFlip(),
            albu.RandomSizedBBoxSafeCrop(
                height=224, width=224, erosion_rate=0.6, p=0.45), color
        ],
                                bbox_params={
                                    'format': 'pascal_voc',
                                    'label_fields': ['category_id']
                                },
                                p=1.)
Esempio n. 17
0
def get_training_augmentation(type='basic'):
    if type == 'basic':
        train_transform = [
            album.RandomCrop(height=1024, width=1024, always_apply=True),
            album.HorizontalFlip(p=0.5),
            album.VerticalFlip(p=0.5),
        ]
    elif type == 'advance':
        train_transform = [
            album.RandomCrop(height=1024, width=1024, always_apply=True),
            album.HorizontalFlip(p=0.5),
            album.VerticalFlip(p=0.5),
            album.RandomBrightnessContrast(brightness_limit=0.25, contrast_limit=0.25),
            album.HueSaturationValue(p=0.5),
            album.Blur(blur_limit=7, p=0.5),
            album.ToGray(p=0.5),
            album.IAASharpen(p=0.5),
        ]
    elif type == 'intermediate':
        train_transform = [
            album.RandomCrop(height=1024, width=1024, always_apply=True),
            album.HorizontalFlip(p=0.5),
            album.VerticalFlip(p=0.5),
            album.Rotate(limit=90, p=0.5),
            album.RandomBrightnessContrast(brightness_limit=0.25, contrast_limit=0.25),
            album.HueSaturationValue(p=0.5),
        ]
    else:
        raise ValueError(f'type {type} is not supported')

    return album.Compose(train_transform)
Esempio n. 18
0
def augmentations(prob=0.95):
    
    transformer = A.Compose([

            A.OneOf([A.HorizontalFlip(p=prob),
                     A.VerticalFlip(p=prob)], p=prob),

            A.ShiftScaleRotate(p=prob, shift_limit=0.2, scale_limit=.2, rotate_limit=45),
            A.RandomRotate90(p=prob),
            A.Transpose(p=prob),
            A.OneOf([A.RandomContrast(limit=0.2, p=prob),
                     A.RandomGamma(gamma_limit=(70, 130), p=prob),
                     A.RandomBrightness(limit=0.2, p=prob)],p=prob),
            A.HueSaturationValue(p=prob),
            A.OneOf([
                    A.MotionBlur(p=prob),
                    A.MedianBlur(blur_limit=3, p=prob),
                    A.Blur(blur_limit=3, p=prob)
            ], p=prob),
            A.OpticalDistortion(p=prob),
            A.GridDistortion(p=prob),
            A.OneOf([
                    A.IAAAdditiveGaussianNoise(p=prob),
                    A.GaussNoise(p=prob),
                  ], p=prob),
    ], p=prob)
    return transformer
 def build_transforms(cfg, split='train'):
     if split == 'train':
         train_transform = [
             albu.Resize(cfg.INPUT.SIZE, cfg.INPUT.SIZE),
             # albu.HorizontalFlip(p=0.5),
             albu.OneOf(
                 [
                     # albu.RandomRotate90(p=1),
                     albu.Rotate(p=1, limit=(-15, 15)),
                 ],
                 p=0.5),
             albu.GaussNoise(p=0.5),
             albu.OneOf(
                 [
                     # albu.CLAHE(p=1),
                     albu.RandomBrightnessContrast(p=1),
                 ],
                 p=0.9,
             ),
             albu.OneOf(
                 [
                     albu.IAASharpen(p=1),
                     albu.Blur(p=1),
                     albu.MedianBlur(p=1),
                 ],
                 p=0.9,
             ),
         ]
         return albu.Compose(train_transform, p=0.6)
     else:
         test_transform = [albu.Resize(cfg.INPUT.SIZE, cfg.INPUT.SIZE)]
         return albu.Compose(test_transform)
Esempio n. 20
0
 def get_transforms(stage: str = None, mode: str = None):
     if mode == 'train':
         return albumentations.Compose([
             # blur
             albumentations.OneOf([
                 albumentations.Blur((1, 4), p=1.0),
                 albumentations.GaussianBlur(3, p=1.0),
                 albumentations.MedianBlur(blur_limit=5, p=1.0),
             ], p=3/4),
             # transformations
             albumentations.ShiftScaleRotate(scale_limit=0.2, rotate_limit=25, border_mode=cv2.BORDER_CONSTANT, value=0, p=1.0),
             # cut and drop
             # distortion
             albumentations.OneOf([
                 albumentations.OpticalDistortion(0.6, p=1.0),
                 albumentations.GridDistortion(8, 0.06, border_mode=cv2.BORDER_CONSTANT, value=0, p=1.0),
                 albumentations.ElasticTransform(sigma=10, alpha=1, alpha_affine=10, border_mode=cv2.BORDER_CONSTANT, value=0, p=1.0),
             ], p=3/4),
             # add noise
             albumentations.OneOf([
                 albumentations.GaussNoise((0, 250), p=1.0),
                 albumentations.MultiplicativeNoise(p=1.0),
             ], p=2/3),
             # common
             albumentations.Normalize(TRAIN_MEAN, TRAIN_STD),
             GridMask((3, 7), rotate=15, p=0.75),
             ToTensorV2(),
         ])
     elif mode == 'valid':
         return albumentations.Compose([
             albumentations.Normalize(TRAIN_MEAN, TRAIN_STD),
             ToTensorV2(),
         ])
     else:
         raise ValueError('mode is %s' % mode)
Esempio n. 21
0
def hard_transforms():
    result = [
        # random flip
        albu.RandomRotate90(),
        # Random shifts, stretches and turns with a 50% probability
        albu.ShiftScaleRotate(shift_limit=0.1,
                              scale_limit=0.1,
                              rotate_limit=15,
                              border_mode=0,
                              p=0.5),
        # add random brightness and contrast, 30% prob
        albu.RandomBrightnessContrast(brightness_limit=0.2,
                                      contrast_limit=0.2,
                                      p=0.3),
        # Random gamma changes with a 30% probability
        albu.RandomGamma(gamma_limit=(85, 115), p=0.3),
        # Randomly changes the hue, saturation, and color value of the input image
        albu.HueSaturationValue(p=0.3),
        albu.JpegCompression(quality_lower=80),
        albu.OneOf([
            albu.MotionBlur(p=0.2),
            albu.MedianBlur(blur_limit=3, p=0.1),
            albu.Blur(blur_limit=3, p=0.1),
        ],
                   p=0.2),
        albu.OneOf([
            albu.OpticalDistortion(p=0.3),
            albu.GridDistortion(p=0.1),
            albu.IAAPiecewiseAffine(p=0.3),
        ],
                   p=0.2),
    ]

    return result
Esempio n. 22
0
def augment_image(image):
    # Works with single image
    transform = A.Compose([
        A.HorizontalFlip(),
        A.OneOf([
            A.IAAAdditiveGaussianNoise(),
            A.GaussNoise(),
        ], p=0.2),
        A.OneOf([
            A.MotionBlur(),
            A.MedianBlur(blur_limit=3),
            A.Blur(blur_limit=3),
        ],
                p=0.2),
        A.ShiftScaleRotate(shift_limit=0.0625,
                           scale_limit=0.2,
                           rotate_limit=45,
                           p=0.2),
        A.OneOf([
            A.OpticalDistortion(p=0.3),
        ], p=0.2),
        A.OneOf([
            A.IAASharpen(p=1.),
            A.IAAEmboss(p=1.),
            A.RandomBrightnessContrast(p=1.),
        ],
                p=0.3),
        A.HueSaturationValue(hue_shift_limit=5,
                             sat_shift_limit=5,
                             val_shift_limit=5,
                             p=0.3),
    ])

    return transform(image=image)['image']
Esempio n. 23
0
    def add_transforms(self):
        if self.train:
            self.transforms += [
                A.Resize(int(self.img_size[0] * 1.1), int(self.img_size[1] * 1.1)),
                A.RandomCrop(self.img_size[0], self.img_size[1]),
                A.HorizontalFlip(p=0.5),
                A.VerticalFlip(p=0.5),
                A.Rotate(p=0.5, border_mode=BORDER_REFLECT, value=0),

                # Pixels
                A.OneOf([
                    A.IAAEmboss(p=1.0),
                    A.IAASharpen(p=1.0),
                    A.Blur(p=1.0),
                ], p=0.5),

                # Affine
                A.OneOf([
                    A.ElasticTransform(p=1.0),
                    A.IAAPiecewiseAffine(p=1.0)
                ], p=0.5),
            ]
        else:
            self.transforms += [
                A.Resize(self.img_size[0], self.img_size[1]),
            ]
Esempio n. 24
0
def get_strong_train_transform():
    return A.Compose(
        [
            #A.Resize(height=IMG_SIZE, width=IMG_SIZE, p=1),
            A.RandomSizedBBoxSafeCrop(
                IMG_SIZE, IMG_SIZE, interpolation=1, p=0.33),
            A.HorizontalFlip(),
            A.ShiftScaleRotate(shift_limit=0.05,
                               scale_limit=0.1,
                               rotate_limit=10,
                               interpolation=1,
                               p=0.5),
            A.OneOf([
                A.Blur(blur_limit=(1, 3), p=0.33),
                A.MedianBlur(blur_limit=3, p=0.33),
                A.ImageCompression(quality_lower=50, p=0.33),
            ],
                    p=0.5),
            A.OneOf([
                A.RandomGamma(gamma_limit=(85, 115), p=0.33),
                A.RandomBrightnessContrast(brightness_limit=0.2, p=0.33),
                A.HueSaturationValue(hue_shift_limit=25,
                                     sat_shift_limit=25,
                                     val_shift_limit=30,
                                     p=0.5)
            ],
                    p=0.34),
            A.CLAHE(clip_limit=2.5, p=0.5),
            A.Normalize(always_apply=True, p=1.0),
            ToTensorV2(p=1.0)
        ],
        bbox_params=A.BboxParams(format='pascal_voc',
                                 min_area=5,
                                 min_visibility=0.1,
                                 label_fields=['labels']))
Esempio n. 25
0
    def __init__(self):
        self.transform = None
        try:
            import albumentations as A
            check_version(A.__version__, '1.0.3',
                          hard=True)  # version requirement

            self.transform = A.Compose([
                A.Blur(p=0.01),
                A.MedianBlur(p=0.01),
                A.ToGray(p=0.01),
                A.CLAHE(p=0.01),
                A.RandomBrightnessContrast(p=0.0),
                A.RandomGamma(p=0.0),
                A.ImageCompression(quality_lower=75, p=0.0)
            ],
                                       bbox_params=A.BboxParams(
                                           format='yolo',
                                           label_fields=['class_labels']))

            LOGGER.info(
                colorstr('albumentations: ') +
                ', '.join(f'{x}' for x in self.transform.transforms if x.p))
        except ImportError:  # package not installed, skip
            pass
        except Exception as e:
            LOGGER.info(colorstr('albumentations: ') + f'{e}')
Esempio n. 26
0
    def __getitem__(self, idx):
        aug = albu.Compose([
            albu.Flip(p=0.3),
            albu.Rotate(p=0.9),
            albu.Blur(p=0.4),
            albu.ToFloat(p=1.)
        ])
        # image = self.crop_im[idx]
        # segment = self.crop_mask[idx]
        image = self.images[idx]

        # mask = self.masks[idx]
        segment = self.labels_dict[self.files_names[idx]]['segment']
        segment = preprocessing.rleToMask(segment).reshape((4, 256, 1600))

        # mask = np.transpose(mask, (2, 1, 0))
        # if image.shape != (200, 256, 4):
        #     image = cv2.resize(image, dsize=(200, 256))
        #     mask = cv2.resize(mask, dsize=(200, 256))

        # image = preprocessing.one_augment(aug, image)

        segment = np.transpose(segment, (1, 2, 0))
        image = ToTensor()(image).float()

        segment = ToTensor()(segment).float()

        # mask = ToTensor()(mask).float()
        return (image, segment)
Esempio n. 27
0
def tr_da_fn(height, width):
    train_transform = [
        A.HorizontalFlip(p=0.5),
        A.ShiftScaleRotate(scale_limit=0.10, rotate_limit=7, shift_limit=0.10, border_mode=cv2.BORDER_CONSTANT, p=1.0),
        A.Perspective(scale=(0.025, 0.04), p=0.3),
        A.RandomResizedCrop(height=height, width=width, scale=(0.9, 1.0), p=0.3),

        A.OneOf(
            [
                A.CLAHE(p=1),
                A.RandomBrightness(p=1),
                A.RandomGamma(p=1),
                A.RandomContrast(limit=0.2, p=1.0),
            ],
            p=0.5,
        ),

        A.OneOf(
            [
                A.Sharpen(alpha=(0.2, 0.5), lightness=(0.5, 1.0), p=1.0),
                A.Blur(blur_limit=[2, 3], p=1.0),
                A.GaussNoise(var_limit=(5, 25), p=1.0),
                # A.MotionBlur(blur_limit=3, p=1.0),
            ],
            p=0.5,
        ),

        A.Lambda(image=_da_negative, p=0.2),

        A.LongestMaxSize(max_size=max(height, width), always_apply=True),
        A.PadIfNeeded(min_height=height, min_width=width, border_mode=cv2.BORDER_CONSTANT, always_apply=True),
    ]
    return A.Compose(train_transform)
 def weak_aug(self, p=0.5):
     '''Create a weakly augmented image framework'''
     return A.Compose([
         A.HorizontalFlip(),
         A.OneOf([
             A.IAAAdditiveGaussianNoise(),
             A.GaussNoise(),
         ], p=0.2),
         A.OneOf([
             A.MotionBlur(p=0.2),
             A.MedianBlur(blur_limit=3, p=0.1),
             A.Blur(blur_limit=3, p=0.1),
         ],
                 p=0.2),
         A.ShiftScaleRotate(
             shift_limit=0.0625, scale_limit=0.2, rotate_limit=10, p=0.2),
         A.OpticalDistortion(p=0.2),
         A.OneOf([
             A.CLAHE(clip_limit=2),
             A.IAASharpen(),
             A.IAAEmboss(),
         ],
                 p=0.3),
     ],
                      p=p)
Esempio n. 29
0
def get_pseudo_transforms():
    return A.Compose([
        A.RandomSizedCrop(
            min_max_height=(800, 1024), height=1024, width=1024, p=0.5),
        A.OneOf([
            A.HueSaturationValue(hue_shift_limit=0.2,
                                 sat_shift_limit=0.2,
                                 val_shift_limit=0.2,
                                 p=0.9),
            A.RandomBrightnessContrast(
                brightness_limit=0.2, contrast_limit=0.2, p=0.9),
        ],
                p=0.9),
        A.ToGray(p=0.01),
        A.HorizontalFlip(p=0.5),
        A.VerticalFlip(p=0.5),
        A.RandomRotate90(p=0.5),
        A.Transpose(p=0.5),
        A.JpegCompression(quality_lower=85, quality_upper=95, p=0.2),
        A.OneOf(
            [A.Blur(blur_limit=3, p=1.0),
             A.MedianBlur(blur_limit=3, p=1.0)],
            p=0.1),
        A.Resize(height=SIZE, width=SIZE, p=1),
        A.Cutout(
            num_holes=8, max_h_size=64, max_w_size=64, fill_value=0, p=0.5),
        ToTensorV2(p=1.0),
    ],
                     p=1.0,
                     bbox_params=A.BboxParams(format='pascal_voc',
                                              min_area=0,
                                              min_visibility=0,
                                              label_fields=['labels']))
Esempio n. 30
0
def get_training_augmentation():
    """Builds random transformations we want to apply to our dataset.

    Arguments:
        
    Returns:
        A albumentation functions to pass our images to.
    Raises:

    """
    train_transform = [
        A.IAAAdditiveGaussianNoise(p=0.2),
        A.IAAPerspective(p=0.5),
        A.OneOf([A.CLAHE(p=1), A.RandomBrightness(p=1), A.RandomGamma(p=1),], p=0.9,),
        A.OneOf(
            [
                A.IAASharpen(p=1),
                A.Blur(blur_limit=3, p=1),
                A.MotionBlur(blur_limit=3, p=1),
            ],
            p=0.9,
        ),
        A.OneOf([A.RandomContrast(p=1), A.HueSaturationValue(p=1),], p=0.9,),
        A.Lambda(mask=round_clip_0_1),
    ]
    return A.Compose(train_transform)