Example #1
0
def transform_train(image, mask):
    if random.random() < 0.5:
        image = albumentations.RandomRotate90(p=1)(image=image)['image']
        mask = albumentations.RandomRotate90(p=1)(image=mask)['image']

    if random.random() < 0.5:
        image = albumentations.Transpose(p=1)(image=image)['image']
        mask = albumentations.Transpose(p=1)(image=mask)['image']

    if random.random() < 0.5:
        image = albumentations.VerticalFlip(p=1)(image=image)['image']
        mask = albumentations.VerticalFlip(p=1)(image=mask)['image']

    if random.random() < 0.5:
        image = albumentations.HorizontalFlip(p=1)(image=image)['image']
        mask = albumentations.HorizontalFlip(p=1)(image=mask)['image']

    # if random.random() < 0.5:
    #     image = albumentations.ShiftScaleRotate(shift_limit=0.0625, scale_limit=0.15, rotate_limit=45, p=1)(image=image)['image']
    #     mask = albumentations.ShiftScaleRotate(shift_limit=0.0625, scale_limit=0.15, rotate_limit=45, p=1)(image=mask)['image']

    if random.random() < 0.5:
        image = albumentations.RandomBrightness(0.1)(image=image)['image']
        image = albumentations.RandomContrast(0.1)(image=image)['image']
        image = albumentations.Blur(blur_limit=3)(image=image)['image']

    # if random.random() < 0.5:
    #     image = albumentations.Cutout(num_holes=1, max_h_size=32, max_w_size=32, p=1)(image)
    #     mask = albumentations.Cutout(num_holes=1, max_h_size=32, max_w_size=32, p=1)(mask)

    return image, mask
Example #2
0
 def tta_transforms(self):
     tfms = [
         [],
         [A.VerticalFlip(p=1.)],
         [A.HorizontalFlip(p=1.)],
         [A.VerticalFlip(p=1.), A.HorizontalFlip(p=1.)],
         [A.Transpose(p=1.)],
         [A.Transpose(p=1.), A.VerticalFlip(p=1.)],
         [A.Transpose(p=1.), A.HorizontalFlip(p=1.)],
         [A.Transpose(p=1.), A.VerticalFlip(p=1.), A.HorizontalFlip(p=1.)],
     ]
     return [
         A.Compose([
             self.normalizer(),
             A.PadIfNeeded(
                 self.hparams.aug_pad_size, 
                 self.hparams.aug_pad_size, 
                 border_mode=self.hparams.aug_border_mode, 
                 p=1.
             ),
         ] + tfm + [    
             ToTensorV2(),
         ])
         for tfm in tfms
     ]
Example #3
0
def get_aug(atype, size):
    print('using aug', atype)
    if atype == '0':
        return alb.Compose([
            alb.Resize(size, size, p=1),
            alb.Transpose(p=0.5),
            alb.HorizontalFlip(p=0.5),
            alb.VerticalFlip(p=0.5),
            alb.ShiftScaleRotate(p=0.5),
            alb.Normalize(p=1),
            ToTensorV2(p=1),
        ])
    elif atype == '1':
        return alb.Compose([
            alb.RandomResizedCrop(size, size),
            alb.Transpose(p=0.5),
            alb.HorizontalFlip(p=0.5),
            alb.VerticalFlip(p=0.5),
            alb.ShiftScaleRotate(p=0.5),
            alb.HueSaturationValue(hue_shift_limit=0.2,
                                   sat_shift_limit=0.2,
                                   val_shift_limit=0.2,
                                   p=0.5),
            alb.RandomBrightnessContrast(brightness_limit=(-0.1, 0.1),
                                         contrast_limit=(-0.1, 0.1),
                                         p=0.5),
            alb.Normalize(mean=[0.485, 0.456, 0.406],
                          std=[0.229, 0.224, 0.225],
                          max_pixel_value=255.0,
                          p=1.0),
            alb.CoarseDropout(p=0.5),
            alb.Cutout(p=0.5),
            ToTensorV2(p=1.0),
        ])
    elif atype == '2':
        return alb.Compose([
            alb.RandomResizedCrop(size, size, p=1, scale=(0.9, 1)),
            alb.HueSaturationValue(hue_shift_limit=0.2,
                                   sat_shift_limit=0.2,
                                   val_shift_limit=0.2,
                                   p=0.2),
            alb.RandomBrightnessContrast(brightness_limit=(-0.1, 0.1),
                                         contrast_limit=(-0.1, 0.1),
                                         p=0.2),
            alb.Transpose(p=0.2),
            alb.HorizontalFlip(p=0.2),
            alb.VerticalFlip(p=0.2),
            alb.ShiftScaleRotate(p=0.2),
            alb.Normalize(p=1),
            ToTensorV2(p=1),
        ])
    elif atype == '3':
        return alb.Compose([
            alb.RandomResizedCrop(size, size, p=1),
            alb.HorizontalFlip(p=0.2),
            alb.Normalize(p=1),
            ToTensorV2(p=1),
        ])
    else:
        raise Exception('atype |{}| not supoprted'.format(atype))
Example #4
0
def get_tta_transform(tta_idx):

    if tta_idx == 0:  # original
        tta_test_transform = A.Compose([
            ColorConstancy(p=1),
            A.Normalize(),
            ToTensor(),
        ])
    elif tta_idx == 1:  # horizontal flip
        tta_test_transform = A.Compose([
            ColorConstancy(p=1),
            A.HorizontalFlip(p=1),
            A.Normalize(),
            ToTensor(),
        ])
    elif tta_idx == 2:  # vertical flip
        tta_test_transform = A.Compose([
            ColorConstancy(p=1),
            A.VerticalFlip(p=1),
            A.Normalize(),
            ToTensor(),
        ])
    elif tta_idx == 3:  # random rotate 90 degree
        tta_test_transform = A.Compose([
            ColorConstancy(p=1),
            A.RandomRotate90(p=1),
            A.Normalize(),
            ToTensor(),
        ])
    elif tta_idx == 4:  # flip and transpose
        tta_test_transform = A.Compose([
            ColorConstancy(p=1),
            A.Flip(p=0.5),
            A.Transpose(p=1),
            A.Normalize(),
            ToTensor(),
        ])
    else:  # train transform
        tta_test_transform = A.Compose([
            ColorConstancy(p=1),
            A.OneOf([
                A.Flip(p=0.5),
                A.IAAFliplr(p=0.5),
                A.Transpose(p=0.5),
                A.IAAFlipud(p=0.5),
            ],
                    p=0.5),
            A.OneOf([
                A.Rotate(limit=365, p=0.75),
                A.ShiftScaleRotate(p=0.75),
            ],
                    p=0.75),
            A.Blur(blur_limit=3, p=0.5),
            A.Normalize(),
            ToTensor(),
        ])

    return tta_test_transform
def transform_train(image, mask, infor):

    if random.random() < 0.5:
        image = albumentations.VerticalFlip(p=1)(image=image)['image']
        mask = albumentations.VerticalFlip(p=1)(image=mask)['image']

    if random.random() < 0.5:
        image = albumentations.HorizontalFlip(p=1)(image=image)['image']
        mask = albumentations.HorizontalFlip(p=1)(image=mask)['image']

    # if random.random() < 0.5:

    #     image = albumentations.OneOf([
    #         albumentations.RandomGamma(gamma_limit=(60, 120), p=0.1),
    #         albumentations.RandomBrightnessContrast(brightness_limit=0.2, contrast_limit=0.2, p=0.1),
    #         albumentations.CLAHE(clip_limit=4.0, tile_grid_size=(4, 4), p=0.1),
    #     ])(image=image)['image']

    # if random.random() < 0.25:
    #     image = albumentations.OneOf([
    #         albumentations.Blur(blur_limit=4, p=1),
    #         albumentations.MotionBlur(blur_limit=4, p=1),
    #         albumentations.MedianBlur(blur_limit=4, p=1)
    #     ], p=0.5)(image=image)['image']

    if random.random() < 0.25:
        image = albumentations.Cutout(num_holes=2,
                                      max_h_size=8,
                                      max_w_size=8,
                                      p=1)(image=image)['image']
        mask = albumentations.Cutout(num_holes=2,
                                     max_h_size=8,
                                     max_w_size=8,
                                     p=1)(image=mask)['image']

    if random.random() < 0.5:
        image = albumentations.RandomRotate90(p=1)(image=image)['image']
        mask = albumentations.RandomRotate90(p=1)(image=mask)['image']

    if random.random() < 0.5:
        image = albumentations.Transpose(p=1)(image=image)['image']
        mask = albumentations.Transpose(p=1)(image=mask)['image']

    # if random.random() < 0.5:
    #     image = albumentations.ShiftScaleRotate(shift_limit=0.0625, scale_limit=0.15, rotate_limit=45, p=1)(image=image)['image']
    #     mask = albumentations.ShiftScaleRotate(shift_limit=0.0625, scale_limit=0.15, rotate_limit=45, p=1)(image=mask)['image']

    image = albumentations.Normalize(mean=(0.485, 0.456, 0.406),
                                     std=(0.229, 0.224, 0.225),
                                     max_pixel_value=255.0,
                                     p=1.0)(image=image)['image']
    # image = albumentations.Normalize(mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5), max_pixel_value=255.0, p=1.0)(image=image)['image']

    return image, mask, infor
Example #6
0
def train_get_transforms():
    return A.Compose([
        # A.Resize(random.randint(config.img_size, config.img_size+128), random.randint(config.img_size, config.img_size+128)),
        # A.RandomCrop(config.img_size, config.img_size),
        # A.crops.transforms.CenterCrop(256, 256, p=1.0),
        # A.Resize(config.img_size, config.img_size),
        # A.crops.transforms.CenterCrop(224, 224, p=1.0),
        A.OneOf([
            A.MotionBlur(blur_limit=5),
            A.MedianBlur(blur_limit=5),
            A.GaussianBlur(blur_limit=5),
            A.GaussNoise(var_limit=(5.0, 30.0))
        ],
                p=0.8),
        A.RandomBrightness(limit=0.1, p=0.5),
        A.RandomContrast(limit=[0.9, 1.1], p=0.5),
        A.Transpose(p=0.5),
        A.Rotate(limit=90,
                 interpolation=1,
                 border_mode=4,
                 always_apply=False,
                 p=0.5),
        A.HorizontalFlip(p=0.5),
        A.VerticalFlip(p=0.5),
        A.ShiftScaleRotate(p=0.5),
        # A.HueSaturationValue(hue_shift_limit=0.2, sat_shift_limit=0.2, val_shift_limit=0.2, p=0.5),
        # A.RandomBrightnessContrast(brightness_limit=(-0.1,0.1), contrast_limit=(-0.1, 0.1), p=0.5),
        # A.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225], max_pixel_value=255.0, p=1.0),
        # A.CoarseDropout(p=0.5),
        # A.Cutout(max_h_size=int(config.img_size * 0.5), max_w_size=int(config.img_size * 0.5), num_holes=1, p=0.8),
        ToTensorV2()
    ])
Example #7
0
 def __init__(self, image_size):
     self.data_transform = {
         'train_transform':A.Compose([
           A.Transpose(p=0.5),
           A.VerticalFlip(p=0.5),
           A.HorizontalFlip(p=0.5),
           A.RandomBrightness(limit=0.2, p=0.75),
           A.RandomContrast(limit=0.2, p=0.75),
           A.OneOf([
               A.MotionBlur(blur_limit=5),
               A.MedianBlur(blur_limit=5),
               A.GaussianBlur(blur_limit=5),
               A.GaussNoise(var_limit=(5.0, 30.0)),], p=0.7),
           A.OneOf([
               A.OpticalDistortion(distort_limit=1.0),
               A.GridDistortion(num_steps=5, distort_limit=1.),
               A.ElasticTransform(alpha=3),], p=0.7),
           A.CLAHE(clip_limit=4.0, p=0.7),
           A.HueSaturationValue(hue_shift_limit=10, sat_shift_limit=20, val_shift_limit=10, p=0.5),
           A.ShiftScaleRotate(shift_limit=0.1, scale_limit=0.1, rotate_limit=15, border_mode=0, p=0.85),
           A.Resize(image_size, image_size),
           A.Cutout(max_h_size=int(image_size * 0.375), max_w_size=int(image_size * 0.375), num_holes=1, p=0.7),    
           A.Normalize()
           ]),
         'test_transform': A.Compose([
           A.Resize(image_size, image_size),
           A.Normalize(),
           A.Resize(image_size, image_size)
           ])}
Example #8
0
def get_transforms(size: int, scope: str = 'geometric', crop='random'):
    augs = {
        'weak':
        albu.Compose([
            albu.HorizontalFlip(),
        ]),
        'geometric':
        albu.OneOf([
            albu.HorizontalFlip(always_apply=True),
            albu.ShiftScaleRotate(always_apply=True),
            albu.Transpose(always_apply=True),
            albu.OpticalDistortion(always_apply=True),
            albu.ElasticTransform(always_apply=True),
        ])
    }

    aug_fn = augs[scope]
    crop_fn = {
        'random': albu.RandomCrop(size, size, always_apply=True),
        'center': albu.CenterCrop(size, size, always_apply=True)
    }[crop]
    pad = albu.PadIfNeeded(size, size)

    pipeline = albu.Compose([aug_fn, pad, crop_fn],
                            additional_targets={'target': 'image'})

    def process(a, b):
        r = pipeline(image=a, target=b)
        return r['image'], r['target']

    return process
Example #9
0
def get_transforms(image_size):

    transforms_train = albumentations.Compose([
        albumentations.Transpose(p=0.5),
        albumentations.VerticalFlip(p=0.5),
        albumentations.HorizontalFlip(p=0.5),
        albumentations.RandomBrightness(limit=0.2, p=0.75),
        albumentations.RandomContrast(limit=0.2, p=0.75),
        albumentations.OneOf([
            albumentations.MotionBlur(blur_limit=5),
            albumentations.MedianBlur(blur_limit=5),
            albumentations.GaussianBlur(blur_limit=5),
            albumentations.GaussNoise(var_limit=(5.0, 30.0)),
        ], p=0.7),

        albumentations.OneOf([
            albumentations.OpticalDistortion(distort_limit=1.0),
            albumentations.GridDistortion(num_steps=5, distort_limit=1.),
            albumentations.ElasticTransform(alpha=3),
        ], p=0.7),

        albumentations.CLAHE(clip_limit=4.0, p=0.7),
        albumentations.HueSaturationValue(hue_shift_limit=10, sat_shift_limit=20, val_shift_limit=10, p=0.5),
        albumentations.ShiftScaleRotate(shift_limit=0.1, scale_limit=0.1, rotate_limit=15, border_mode=0, p=0.85),
        albumentations.Resize(image_size, image_size),
        albumentations.Cutout(max_h_size=int(image_size * 0.375), max_w_size=int(image_size * 0.375), num_holes=1, p=0.7),
        albumentations.Normalize()
    ])

    transforms_val = albumentations.Compose([
        albumentations.Resize(image_size, image_size),
        albumentations.Normalize()
    ])

    return transforms_train, transforms_val
def hard_spatial_augmentations(image_size: Tuple[int, int], rot_angle=45):
    return A.Compose([
        A.OneOf([
            A.NoOp(),
            A.RandomGridShuffle(grid=(4, 4)),
            A.RandomGridShuffle(grid=(3, 3)),
            A.RandomGridShuffle(grid=(2, 2)),
        ]),
        A.MaskDropout(max_objects=10),
        A.OneOf([
            A.ShiftScaleRotate(scale_limit=0.1,
                               rotate_limit=rot_angle,
                               border_mode=cv2.BORDER_CONSTANT,
                               value=0,
                               mask_value=0),
            A.NoOp(),
        ]),
        A.OneOf([
            A.ElasticTransform(border_mode=cv2.BORDER_CONSTANT,
                               value=0,
                               mask_value=0),
            A.GridDistortion(border_mode=cv2.BORDER_CONSTANT,
                             value=0,
                             mask_value=0),
            A.NoOp(),
        ]),
        # D4
        A.Compose([A.Transpose(), A.RandomRotate90()]),
    ])
 def __init__(self,
              img_size=224,
              mean=(0.485, 0.456, 0.406),
              std=(0.229, 0.224, 0.225)):
     super(ImageTransform_3, self).__init__()
     self.transform = {
         'train':
         albu.Compose([
             albu.RandomResizedCrop(img_size, img_size),
             albu.ColorJitter(p=0.5),
             albu.HorizontalFlip(p=0.5),
             albu.VerticalFlip(p=0.5),
             albu.Transpose(p=0.5),
             albu.MotionBlur(p=0.5),
             albu.Normalize(mean, std),
             ToTensorV2(),
         ],
                      p=1.0),
         'val':
         albu.Compose([
             albu.Resize(img_size, img_size),
             albu.Normalize(mean, std),
             ToTensorV2(),
         ],
                      p=1.0)
     }
Example #12
0
def get_train_transform(img_size):

    train_transform = A.Compose([
        A.Resize(img_size, img_size),
        A.Transpose(p=0.5),
        A.HorizontalFlip(p=0.5),
        A.VerticalFlip(p=0.5),
        A.ShiftScaleRotate(p=0.5),
        A.HueSaturationValue(hue_shift_limit=0.2,
                             sat_shift_limit=0.2,
                             val_shift_limit=0.2,
                             p=0.5),
        A.RandomBrightnessContrast(brightness_limit=(-0.1, 0.1),
                                   contrast_limit=(-0.1, 0.1),
                                   p=0.5),
        A.Normalize(mean=[0.485, 0.456, 0.406],
                    std=[0.229, 0.224, 0.225],
                    max_pixel_value=255.0,
                    p=1.0),
        A.CoarseDropout(p=0.5),
        A.Cutout(p=0.5),
        ToTensor()
    ])

    return train_transform
Example #13
0
    def __init__(self, outputs=5):
        super().__init__()
        self.net = EfficientNet.from_pretrained('efficientnet-b0')
        self.linear = Sequential(ReLU(), Dropout(),  Linear(1000, outputs))

        df = pd.read_csv("../input/prostate-cancer-grade-assessment/train.csv")
        self.train_df, self.valid_df = train_test_split(df, test_size=0.2)
        self.data_dir = "../input/prostate-cancer-grade-assessment/train_images"

        self.train_transforms = A.Compose(
            [
                A.InvertImg(p=1),
                A.RandomSizedCrop([int(IMAGE_SIZE * 0.9), IMAGE_SIZE], IMAGE_SIZE, IMAGE_SIZE),
                A.Transpose(),
                A.Flip(),
                A.Rotate(90, border_mode=cv.BORDER_CONSTANT, value=(0, 0, 0)),
                A.RandomBrightnessContrast(0.02, 0.02),
                A.HueSaturationValue(0, 10, 10),
                A.Normalize(mean, std, 1),
            ]
        )
        self.valid_transforms = A.Compose([A.InvertImg(p=1), A.Normalize(mean, std, 1),])

        self.criterion = BCEWithLogitsLoss()

        if False and BATCH_SIZE == 1:
            for m in self.modules():
                if isinstance(m, nn.BatchNorm2d):
                    m.weight.requires_grad = False
                    m.bias.requires_grad = False
Example #14
0
def get_train_transform(cfg):

    return A.Compose([
        A.Resize(height=cfg.DATASET.IMG_HEIGHT, width=cfg.DATASET.IMG_WIDTH),
        A.RandomResizedCrop(cfg.DATASET.IMG_HEIGHT,
                            cfg.DATASET.IMG_HEIGHT,
                            p=cfg.DATASET.P_RANDOMRESCROP),
        A.CoarseDropout(p=cfg.DATASET.P_COARSEDROP),
        A.Cutout(num_holes=cfg.DATASET.NUM_HOLES, p=cfg.DATASET.P_CUTOUT),
        A.HorizontalFlip(cfg.DATASET.P_HORIZONATL_FLIP),
        A.VerticalFlip(cfg.DATASET.P_VERTICAL_FLIP),
        A.HueSaturationValue(hue_shift_limit=0.2,
                             sat_shift_limit=0.2,
                             val_shift_limit=0.2,
                             p=0.5),
        A.RandomBrightnessContrast(brightness_limit=(-0.1, 0.1),
                                   contrast_limit=(-0.1, 0.1),
                                   p=0.5),
        A.Transpose(cfg.DATASET.P_TRASPOSE),
        A.ShiftScaleRotate(shift_limit=0.0625,
                           scale_limit=0.2,
                           rotate_limit=15,
                           p=cfg.DATASET.P_SHIFT_SCALE,
                           border_mode=cv2.BORDER_REFLECT),
        A.Normalize(
            mean=[0.485, 0.456, 0.406],
            std=[0.229, 0.224, 0.225],
        ),
        ToTensorV2()
    ])
Example #15
0
    def __init__(self, outputs=5):
        super().__init__()
        self.net = EfficientNet.from_pretrained('efficientnet-b0')
        self.linear = Sequential(ReLU(), Dropout(), Linear(1000, outputs))

        df = pd.read_csv("../input/prostate-cancer-grade-assessment/train.csv")
        self.train_df, self.valid_df = train_test_split(df, test_size=0.2)
        self.data_dir = "/datasets/panda/train_128_100"

        self.train_transforms = A.Compose([
            A.InvertImg(p=1),
            A.RandomSizedCrop([100, 128], 128, 128),
            A.Transpose(),
            A.Flip(),
            A.Rotate(90),
            # A.RandomBrightnessContrast(0.02, 0.02),
            # A.HueSaturationValue(0, 10, 10),
            A.Normalize(mean, std, 1),
        ])
        self.valid_transforms = A.Compose([
            A.InvertImg(p=1),
            A.Normalize(mean, std, 1),
        ])

        self.criterion = BCEWithLogitsLoss()
def flip_transpose(image, mask):
    aug_list = [[image, mask]]

    augmented = A.HorizontalFlip(p=1)(image=image, mask=mask)
    image_hf, mask_hf = augmented['image'], augmented['mask']
    aug_list.append([image_hf, mask_hf])

    augmented = A.VerticalFlip(p=1)(image=image, mask=mask)
    image_vf, mask_vf = augmented['image'], augmented['mask']
    aug_list.append([image_vf, mask_vf])

    augmented = A.VerticalFlip(p=1)(image=image_hf, mask=mask_hf)
    image_hf_vf, mask_hf_vf = augmented['image'], augmented['mask']
    aug_list.append([image_hf_vf, mask_hf_vf])

    augmented = A.Transpose(p=1)(image=image, mask=mask)
    image_t, mask_t = augmented['image'], augmented['mask']
    aug_list.append([image_t, mask_t])

    augmented = A.HorizontalFlip(p=1)(image=image_t, mask=mask_t)
    image_t_hf, mask_t_hf = augmented['image'], augmented['mask']
    aug_list.append([image_t_hf, mask_t_hf])

    augmented = A.VerticalFlip(p=1)(image=image_t, mask=mask_t)
    image_t_vf, mask_t_vf = augmented['image'], augmented['mask']
    aug_list.append([image_t_vf, mask_t_vf])

    augmented = A.VerticalFlip(p=1)(image=image_t_hf, mask=mask_t_hf)
    image_t_hf_vf, mask_t_hf_vf = augmented['image'], augmented['mask']
    aug_list.append([image_t_hf_vf, mask_t_hf_vf])

    return aug_list
def get_transforms(type="albumentations"):
    if type == "albumentations":
        train_transforms = albumentations.Compose([
            albumentations.Transpose(p=0.5),
            albumentations.OneOf([
                albumentations.VerticalFlip(p=0.5),
                albumentations.HorizontalFlip(p=0.5),
            ]),
            albumentations.OneOf([
                albumentations.RandomBrightness(limit=0.2, p=0.75),
                albumentations.RandomContrast(limit=0.2, p=0.75),
            ]),
            albumentations.OneOf([
                albumentations.MotionBlur(blur_limit=5),
                albumentations.MedianBlur(blur_limit=5),
                albumentations.GaussianBlur(blur_limit=5),
                albumentations.GaussNoise(var_limit=(5.0, 30.0)),
            ],
                                 p=0.7),
            albumentations.OneOf([
                albumentations.OpticalDistortion(distort_limit=1.0),
                albumentations.GridDistortion(num_steps=5, distort_limit=1.),
                albumentations.ElasticTransform(alpha=3),
            ],
                                 p=0.7),

            # albumentations.OneOf([
            #     albumentations.CLAHE(clip_limit=4.0, p=0.7),
            #     albumentations.HueSaturationValue(hue_shift_limit=10, sat_shift_limit=20, val_shift_limit=10, p=0.5),
            #     albumentations.ShiftScaleRotate(shift_limit=0.1, scale_limit=0.1, rotate_limit=15, border_mode=0,
            #                                     p=0.85),
            # ]),
            albumentations.Resize(256, 256),
            # albumentations.Cutout(max_h_size=int(256 * 0.375), max_w_size=int(256 * 0.375), num_holes=1, p=0.7),
            # albumentations.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
        ])

        test_transforms = albumentations.Compose([
            albumentations.Resize(256, 256),
            # albumentations.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
        ])
    else:
        train_transforms = transforms.Compose([
            # AdvancedHairAugmentation(hairs_folder='/kaggle/input/melanoma-hairs'),
            transforms.RandomResizedCrop(size=256, scale=(0.9, 1.0)),
            transforms.RandomHorizontalFlip(),
            transforms.RandomVerticalFlip(),
            Microscope(p=0.5),
            transforms.Resize((256, 256)),
            transforms.ToTensor(),
            transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                 std=[0.229, 0.224, 0.225])
        ])
        test_transforms = transforms.Compose([
            transforms.Resize((256, 256)),
            transforms.ToTensor(),
            transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                 std=[0.229, 0.224, 0.225])
        ])
    return train_transforms, test_transforms
Example #18
0
def strong_aug(p=.5):
    return A.Compose([
        A.RandomRotate90(),
        A.Flip(),
        A.Transpose(),
        A.OneOf([
            A.IAAAdditiveGaussianNoise(),
            A.GaussNoise(),
        ], p=0.2),
        A.OneOf([
            A.MotionBlur(p=.2),
            A.MedianBlur(blur_limit=3, p=.1),
            A.Blur(blur_limit=3, p=.1),
        ],
                p=0.2),
        A.ShiftScaleRotate(
            shift_limit=0.0625, scale_limit=0.2, rotate_limit=45, p=.2),
        A.OneOf([
            A.OpticalDistortion(p=0.3),
            A.GridDistortion(p=.1),
            A.IAAPiecewiseAffine(p=0.3),
        ],
                p=0.2),
        A.OneOf([
            A.CLAHE(clip_limit=2),
            A.IAASharpen(),
            A.IAAEmboss(),
            A.RandomContrast(),
            A.RandomBrightness(),
        ],
                p=0.3),
        A.HueSaturationValue(p=0.3),
    ],
                     p=p)
Example #19
0
class Crop256(object):
    width = height = 256

    train_transform = A.Compose(
        [
            A.RandomCrop(height=height, width=width, p=1.0),
            A.HorizontalFlip(p=0.5),
            A.VerticalFlip(p=0.5),
            A.Transpose(p=0.5),  # TTA×8
            ToTensorV2(p=1.0),
        ],
        p=1.0,
        additional_targets={'gt': 'image'})

    divisor = 8  # padding成8的倍数
    val_transform = A.Compose([
        A.PadIfNeeded(min_height=None,
                      min_width=None,
                      pad_height_divisor=divisor,
                      pad_width_divisor=divisor,
                      p=1.0),
        ToTensorV2(p=1.0),
    ],
                              p=1.0,
                              additional_targets={'gt': 'image'})
Example #20
0
 def affine_transform(
         self):  # Affine Transforms: Scale & Translation & Rotation
     return A.Compose(
         [
             # Transpose the input by swapping rows and columns.
             A.Transpose(p=self.p),
             A.OneOf(
                 [
                     # Randomly rotate the input by 90 degrees zero or more times.
                     A.RandomRotate90(p=self.p),
                     # Rotate the input by an angle selected randomly from the uniform distribution.
                     A.Rotate(limit=90, p=self.p),
                     # Randomly apply affine transforms: translate, scale and rotate the input.
                     A.ShiftScaleRotate(shift_limit=0.0625,
                                        scale_limit=0.1,
                                        rotate_limit=45,
                                        p=self.p)
                 ],
                 p=1),
             A.OneOf(
                 [
                     A.HorizontalFlip(
                         p=self.p
                     ),  # Flip the input vertically around the x-axis.
                     A.VerticalFlip(
                         p=self.p
                     ),  # Flip the input horizontally around the y-axis.
                     A.Flip(
                         p=self.p
                     )  # Flip the input either horizontally, vertically or both horizontally and vertically.
                 ],
                 p=1)
         ],
         p=1)
Example #21
0
def get_lung_transforms(*, augment, args):
    transforms_train = A.Compose([
        A.SmallestMaxSize(max_size=args.img_size),
        A.Transpose(p=0.5),
        A.VerticalFlip(p=0.5),
        A.HorizontalFlip(p=0.5),
        A.RandomBrightnessContrast(brightness_limit=0.2,
                                   contrast_limit=0.2,
                                   p=0.75),
        A.Resize(args.img_size, args.img_size),
        A.Normalize(
            mean=[0.485, 0.456, 0.406],
            std=[0.229, 0.224, 0.225],
        ),
        ToTensorV2()
    ])

    transforms_val = A.Compose([
        A.Resize(args.img_size, args.img_size),
        A.Normalize(
            mean=[0.485, 0.456, 0.406],
            std=[0.229, 0.224, 0.225],
        ),
        ToTensorV2()
    ])

    if augment == 'augment':
        return transforms_train
    else:
        return transforms_val
Example #22
0
    def transformer(self):
        """TRANSFORMATION DE NOTRE IMAGE AFIN DE REDUIRE LE BIAIS"""
        if self.is_train:
            transform = A.Compose([
                A.CLAHE(),
                A.RandomRotate90(),
                A.DualTransform(),  # adding
                A.Transpose(),
                A.Resize(height=self.img_size,
                         width=self.img_size,
                         interpolation=cv2.INTER_AREA),  # RESIZE
                A.ShiftScaleRotate(shift_limit=0.0625,
                                   scale_limit=0.50,
                                   rotate_limit=45,
                                   p=.75),
                A.Blur(blur_limit=3),
                A.OpticalDistortion(),
                A.GridDistortion(),
                A.HueSaturationValue(),
            ])
        else:
            transform = A.Compose([
                A.Resize(height=self.img_size,
                         width=self.img_size,
                         interpolation=cv2.INTER_AREA),  # RESIZE
            ])

        return transform
    def __init__(self, batch_size, data_dir, img_size=(256, 256)):

        super().__init__()
        self.data_dir = data_dir
        self.batch_size = batch_size
        self.train_transform = A.Compose(
            [
                GrayToRGB(),
                A.RandomResizedCrop(img_size, img_size, p=1.0),
                A.Transpose(p=0.5),
                A.HorizontalFlip(p=0.5),
                A.VerticalFlip(p=0.5),
                A.ShiftScaleRotate(p=0.5),
                A.HueSaturationValue(hue_shift_limit=0.2,
                                     sat_shift_limit=0.2,
                                     val_shift_limit=0.2,
                                     p=0.5),
                A.RandomBrightnessContrast(brightness_limit=(-0.1, 0.1),
                                           contrast_limit=(-0.1, 0.1),
                                           p=0.5),
                A.Normalize(
                    mean=[0.485, 0.456, 0.406],
                    std=[0.229, 0.224, 0.225],
                    max_pixel_value=255.0,
                    p=1.0,
                ),
                A.CoarseDropout(p=0.5),
                A.Cutout(p=0.5),
                RGBToGray(),
                ToTensorV2(p=1.0),
            ],
            p=1.0,
        )
Example #24
0
def train_image_augmentation(image, img_size):
    image = np.array(image)

    augmentation = A.Compose(
        [
            A.Resize(img_size, img_size),
            A.CenterCrop(img_size, img_size, p=1.0),
            A.Transpose(p=0.5),
            A.HorizontalFlip(p=0.5),
            A.VerticalFlip(p=0.5),
            A.ShiftScaleRotate(p=0.5),
            A.Blur(blur_limit=3),
            A.OpticalDistortion(p=0.5),
            A.GridDistortion(p=0.5),
            A.HueSaturationValue(hue_shift_limit=0.2,
                                 sat_shift_limit=0.2,
                                 val_shift_limit=0.2,
                                 p=0.5),
            A.CoarseDropout(p=0.5),
            A.Cutout(p=0.5),
            A.Normalize(
                mean=[0.485, 0.456, 0.406],
                std=[0.229, 0.224, 0.225],
                max_pixel_value=255.0,
                p=1.0,
            )
        ],
        p=1.0,
    )

    augmented_image = augmentation(image=image)

    return augmented_image['image']
Example #25
0
def test_transpose_both_image_and_mask():
    image = np.ones((8, 6, 3))
    mask = np.ones((8, 6))
    augmentation = A.Transpose(p=1)
    augmented = augmentation(image=image, mask=mask)
    assert augmented['image'].shape == (6, 8, 3)
    assert augmented['mask'].shape == (6, 8)
def Albumentations(params):
    augmentation = A.Compose([
        A.HorizontalFlip(p=0.5),
        A.VerticalFlip(p=0.5),
        A.Transpose(p=0.5),
        A.OneOf([
            A.CLAHE(p=1.),
            A.RandomBrightnessContrast(0.15, 0.15, p=1.),
            A.RandomGamma(p=1.),
            A.HueSaturationValue(p=1.)],
            p=0.5),
        A.OneOf([
            A.Blur(3, p=1.),
            A.MotionBlur(3, p=1.),
            A.Sharpen(p=1.)],
            p=0.5),
        A.ShiftScaleRotate(
            shift_limit=0.05, scale_limit=0.05, rotate_limit=15,
            border_mode=cv2.BORDER_CONSTANT, p=0.75)
        
        # IMPORTANT: You MUST add A.Normalize(...) in the list.
        A.Normalize(mean=params.mean, std=params.std, max_pixel_value=255., always_apply=True)
    ], bbox_params=A.BboxParams(format='coco', label_fields=['category_ids'], min_visibility=0.2))
    
    return augmentation
def get_individual_transforms():
    transforms = A.Compose([
        A.OneOf(
            [
                A.Transpose(p=1.0),
                A.VerticalFlip(p=1.0),
                A.HorizontalFlip(p=1.0),
                A.RandomRotate90(p=1.0),
                A.NoOp(),
            ],
            p=1.0,
        ),
        A.OneOf(
            [
                A.ElasticTransform(p=1.0),
                A.GridDistortion(p=1.0),
                A.OpticalDistortion(p=1.0),
                A.NoOp(),
            ],
            p=1.0,
        ),
        A.OneOf(
            [
                A.GaussNoise(p=1.0),
                A.GaussianBlur(p=1.0),
                A.ISONoise(p=1.0),
                A.CoarseDropout(
                    p=1.0, max_holes=16, max_height=16, max_width=16),
                A.NoOp(),
            ],
            p=1.0,
        ),
    ])

    return transforms
def get_train_aug(size):
    return A.Compose([
        # allows to combine RandomCrop and RandomScale
        A.RandomResizedCrop(size, size),

        # Transpose the input by swapping rows and columns.
        A.Transpose(p=0.5),

        # Flip the input horizontally around the y-axis.
        A.HorizontalFlip(p=0.5),

        # Flip the input horizontally around the x-axis.
        A.VerticalFlip(p=0.5),

        # Randomly apply affine transforms: translate, scale and rotate the input.
        A.ShiftScaleRotate(p=0.5),

        # Randomly change hue, saturation and value of the input image.
        A.HueSaturationValue(hue_shift_limit=0.2,
                             sat_shift_limit=0.2,
                             val_shift_limit=0.2,
                             p=0.5),

        # Randomly change brightness and contrast of the input image.
        A.RandomBrightnessContrast(brightness_limit=(-0.1, 0.1),
                                   contrast_limit=(-0.1, 0.1),
                                   p=0.5),

        # CoarseDropout of the rectangular regions in the image.
        A.CoarseDropout(p=0.5),

        # CoarseDropout of the square regions in the image.
        A.Cutout(p=0.5)
    ])
Example #29
0
def get_train_transforms():
    return A.Compose([
        A.RandomSizedCrop(
            min_max_height=(800, 1024), height=1024, width=1024, p=0.5),
        A.OneOf([
            A.HueSaturationValue(hue_shift_limit=0.2,
                                 sat_shift_limit=0.2,
                                 val_shift_limit=0.2,
                                 p=0.9),
            A.RandomBrightnessContrast(
                brightness_limit=0.2, contrast_limit=0.2, p=0.9),
        ],
                p=0.9),
        A.ToGray(p=0.01),
        A.HorizontalFlip(p=0.5),
        A.VerticalFlip(p=0.5),
        A.RandomRotate90(p=0.5),
        A.Transpose(p=0.5),
        A.JpegCompression(quality_lower=85, quality_upper=95, p=0.2),
        A.OneOf(
            [A.Blur(blur_limit=3, p=1.0),
             A.MedianBlur(blur_limit=3, p=1.0)],
            p=0.1),
        A.Resize(height=1024, width=1024, p=1),
        A.Cutout(
            num_holes=8, max_h_size=64, max_w_size=64, fill_value=0, p=0.5),
        ToTensorV2(p=1.0),
    ],
                     p=1.0,
                     bbox_params=A.BboxParams(format='pascal_voc',
                                              min_area=0,
                                              min_visibility=0,
                                              label_fields=['labels']))
def get_transform(image_size, normalize=True, train=True):
    if train:
        transforms = [
            A.OneOf([
                A.HueSaturationValue(hue_shift_limit=20,
                                     sat_shift_limit=30,
                                     val_shift_limit=30,
                                     p=0.6),
                A.RandomBrightnessContrast(
                    brightness_limit=0.2, contrast_limit=0.2, p=0.6),
            ],
                    p=0.9),
            A.Transpose(p=0.5),
            A.VerticalFlip(p=0.5),
            A.HorizontalFlip(p=0.5),
            # A.CoarseDropout(
            #     max_holes=8, max_height=22,
            #     max_width=22, fill_value=255, p=0.7),
        ]
    else:
        transforms = []

    if normalize:
        transforms.append(A.Normalize())

    transforms.extend([
        ToTensorV2(),
    ])
    return A.Compose(transforms)