Exemple #1
0
def get_training_augmentation(dim = 512, rot_limit = 45):
		if args.aug:
				train_transform = [

						A.HorizontalFlip(p=0.5),


						A.ShiftScaleRotate(scale_limit=0.5, rotate_limit=rot_limit, shift_limit=0.1, p=1, border_mode=0),

						A.PadIfNeeded(min_height=dim, min_width=dim, always_apply=True, border_mode=0),
						A.RandomCrop(height=dim, width=dim, always_apply=True),

						A.IAAAdditiveGaussianNoise(p=0.2),
						A.IAAPerspective(p=0.5),

						A.OneOf(
								[
										A.CLAHE(p=1),
										A.RandomBrightness(p=1),
										A.RandomGamma(p=1),
								],
								p=0.9,
						),

						A.OneOf(
								[
										A.IAASharpen(p=1),
										A.Blur(blur_limit=3, p=1),
										A.MotionBlur(blur_limit=3, p=1),
								],
								p=0.9,
						),

						A.OneOf(
								[
										A.RandomContrast(p=1),
										A.HueSaturationValue(p=1),
								],
								p=0.9,
						),
						A.Lambda(mask=round_clip_0_1)
				]
		else:
				train_transform = [
						A.HorizontalFlip(p=0.5),
						#A.ShiftScaleRotate(scale_limit=0.5, rotate_limit=rot_limit, shift_limit=0.1, p=1, border_mode=0),
						A.PadIfNeeded(min_height=dim, min_width=dim, always_apply=True, border_mode=0),
						A.RandomCrop(height=dim, width=dim, always_apply=True),
						A.Lambda(mask=round_clip_0_1)
				]
		return A.Compose(train_transform)
def color_augment_pool():
    augs = [
        A.RGBShift(),
        A.ToGray(),
        A.ChannelShuffle(),
        A.CLAHE(),
        A.HueSaturationValue(),
        A.RandomContrast(),
        A.RandomGamma(),
        A.Blur(),
        A.MedianBlur(),
        A.JpegCompression()
    ]
    return augs
Exemple #3
0
def get_training_augmentation():
    train_transform = [
        albu.HorizontalFlip(p=0.5),
        albu.VerticalFlip(p=0.5),
        albu.ShiftScaleRotate(scale_limit=(0.1, 0.1),
                              rotate_limit=45,
                              p=0.5,
                              border_mode=0),
        albu.GridDistortion(p=0.5),
        albu.RandomContrast(limit=0.3, p=0.5),
        albu.Resize(352, 544),
        #albu.PadIfNeeded(352, 544, border_mode=0)
    ]
    return albu.Compose(train_transform)
Exemple #4
0
	def __init__(self, size, mean, std, bbox_format='albumentations'):
		super(TrainAugmentation, self).__init__([
			A.RandomContrast(),
			A.RandomGamma(),
			A.HueSaturationValue(sat_shift_limit=50, p=0.6),
			A.CLAHE(),
			A.HorizontalFlip(),
			A.Cutout(p=0.5),
			A.RandomSizedBBoxSafeCrop(size[1], size[0], p=0.8),
			A.Resize(size[1], size[0]),
			A.Normalize(),
			BboxFormatConvert(bbox_format, 'albumentations'),
			ToTensor()
		], bbox_format)
Exemple #5
0
def transform_test(image):

    image_hard = image.copy()
    image_simple = image.copy()

    if random.random() < 0.5:
        image_hard = albumentations.RandomBrightness(0.1)(
            image=image_hard)['image']
        image_hard = albumentations.RandomContrast(0.1)(
            image=image_hard)['image']
        image_hard = albumentations.Blur(blur_limit=3)(
            image=image_hard)['image']

    return image_simple, image_hard
def aug(file):
    if file == 'train':
        return al.Compose([
            al.VerticalFlip(p=0.5),
            al.HorizontalFlip(p=0.5),
            al.RandomRotate90(p=0.5),
            al.OneOf([
                al.GaussNoise(0.002, p=0.5),
                al.IAAAffine(p=0.5),
            ],
                     p=0.2),
            al.OneOf([
                al.Blur(blur_limit=(3, 10), p=0.4),
                al.MedianBlur(blur_limit=3, p=0.3),
                al.MotionBlur(p=0.3)
            ],
                     p=0.3),
            al.OneOf([
                al.RandomBrightness(p=0.3),
                al.RandomContrast(p=0.4),
                al.RandomGamma(p=0.3)
            ],
                     p=0.5),
            al.Cutout(num_holes=20, max_h_size=20, max_w_size=20, p=0.5),
            al.ShiftScaleRotate(shift_limit=0.0625,
                                scale_limit=(0.9, 1),
                                rotate_limit=45,
                                p=0.3),
            al.Normalize(mean=(0.485, 0.456, 0.406),
                         std=(0.229, 0.224, 0.225),
                         p=1),
            ToTensorV2(p=1)
        ])

    elif file == 'validation':
        return al.Compose([
            al.Normalize(mean=(0.485, 0.456, 0.406),
                         std=(0.229, 0.224, 0.225),
                         p=1),
            ToTensorV2(p=1)
        ])

    elif file == 'test':
        return al.Compose([
            al.Normalize(
                mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225), p=1),
            ToTensorV2(p=1)
        ],
                          p=1)
Exemple #7
0
def get_transform(name='default', resize=512):
    if name == 'default':
        transform = A.Compose([
            A.Resize(resize, resize),
            A.HorizontalFlip(p=0.5),
            A.VerticalFlip(p=0.5),
            A.OneOf([
                A.RandomContrast(),
                A.RandomGamma(),
                A.RandomBrightness(),
                A.ColorJitter(brightness=0.07,
                              contrast=0.07,
                              saturation=0.1,
                              hue=0.1,
                              always_apply=False,
                              p=0.3),
            ],
                    p=0.3),
            A.OneOf([
                A.ElasticTransform(
                    alpha=120, sigma=120 * 0.05, alpha_affine=120 * 0.03),
                A.GridDistortion(),
                A.OpticalDistortion(distort_limit=2, shift_limit=0.5),
            ],
                    p=0.0),
            A.ShiftScaleRotate(),
        ])
    elif name == 'train1':
        transform = A.Compose([
            A.RandomCrop(resize, resize, True),
            A.HorizontalFlip(p=0.5),
            A.VerticalFlip(p=0.5),
            A.ColorJitter(brightness=0.07,
                          contrast=0.07,
                          saturation=0.1,
                          hue=0.1,
                          always_apply=False,
                          p=0.3),
            A.ElasticTransform(alpha=120,
                               sigma=120 * 0.05,
                               alpha_affine=120 * 0.03),
            A.ChannelShuffle(p=0.6)
        ])

    elif name == 'val' or name == 'test':
        transform = A.Compose([A.Resize(resize, resize)])
    else:
        return None
    return transform
Exemple #8
0
 def __init__(self, X, Y):
     self.X = X
     self.Y = Y
     self.n = self.X.shape[0]
     self.aug = A.Compose([
         A.OneOf([
             A.RandomGamma((40,200),p=1),
             A.RandomBrightness(p=1),
             A.RandomContrast(p=1),
             A.RGBShift(p=1),
         ], p=0.8),
         A.VerticalFlip(p=0.5),
         A.RandomRotate90(p=1)
     ])
     self.idx = 0
def get_aug(name='default', input_shape=[48, 48, 3]):
    if name == 'default':
        augmentations = A.Compose([
            A.RandomBrightnessContrast(p=0.4),
            A.RandomGamma(p=0.4),
            A.HueSaturationValue(hue_shift_limit=20,
                                 sat_shift_limit=30, val_shift_limit=30, p=0.4),
            A.CLAHE(p=0.4),
            A.Blur(blur_limit=1, p=0.3),
            A.GaussNoise(var_limit=(50, 80), p=0.3)
        ], p=1)
    elif name == 'plates':
        augmentations = A.Compose([
            A.RandomBrightnessContrast(p=0.4),
            A.RandomGamma(p=0.4),
            A.HueSaturationValue(hue_shift_limit=20,
                                 sat_shift_limit=30, 
                                 val_shift_limit=30, 
                                 p=0.4),
            A.CLAHE(p=0.4),
            A.HorizontalFlip(p=0.5),
            A.VerticalFlip(p=0.5),
            A.Blur(blur_limit=1, p=0.3),
            A.GaussNoise(var_limit=(50, 80), p=0.3),
            A.RandomCrop(p=0.8, height=2*input_shape[1]/3, width=2*input_shape[0]/3)
        ], p=1)
    elif name == 'plates2':
        augmentations = A.Compose([
            A.CLAHE(clip_limit=(1,4),p=0.3),
            A.HorizontalFlip(p=0.5),
            A.VerticalFlip(p=0.5),
            A.RandomBrightness(limit=0.2, p=0.3),
            A.RandomContrast(limit=0.2, p=0.3),
            # A.Rotate(limit=360, p=0.9),
            A.RandomRotate90(p=0.3),
            A.HueSaturationValue(hue_shift_limit=(-50,50), 
                                 sat_shift_limit=(-15,15), 
                                 val_shift_limit=(-15,15), 
                                 p=0.5),
#             A.Blur(blur_limit=(5,7), p=0.3),
            A.GaussNoise(var_limit=(10, 50), p=0.3),
            A.CenterCrop(p=1, height=2*input_shape[1]//3, width=2*input_shape[0]//3),
            A.Resize(p=1, height=input_shape[1], width=input_shape[0])
        ], p=1)
    else:
        augmentations = None

    return augmentations
    def setup_pipeline(self, dict_transform):

        tranform_list = []
        if 'shadow' in dict_transform:
            tranform_list.append(
                A.RandomShadow(shadow_roi=(0, 0.5, 1, 1),
                               num_shadows_upper=1,
                               p=0.2))
        if 'scale' in dict_transform:
            tranform_list.append(
                A.RandomScale(scale_limit=float(dict_transform['scale'])))
        if 'rotate' in dict_transform:
            tranform_list.append(
                A.Rotate(limit=float(dict_transform['rotate']), p=0.8))
        if 'shift' in dict_transform:
            tranform_list.append(
                A.ShiftScaleRotate(shift_limit=float(dict_transform['shift']),
                                   scale_limit=0.0,
                                   rotate_limit=0,
                                   interpolation=1,
                                   border_mode=4,
                                   p=0.8))
        if 'brightness' in dict_transform:
            tranform_list.append(
                A.RandomBrightness(limit=float(dict_transform['brightness']),
                                   p=0.8))
        if 'contrast' in dict_transform:
            tranform_list.append(
                A.RandomContrast(limit=float(dict_transform['contrast']),
                                 p=0.8))
        if 'motion_blur' in dict_transform:
            tranform_list.append(A.MotionBlur(p=0.5, blur_limit=7))
        if 'fog' in dict_transform:
            tranform_list.append(
                A.RandomFog(fog_coef_lower=0.0,
                            fog_coef_upper=float(dict_transform['fog']),
                            alpha_coef=0.05,
                            p=0.7))
        if 'rain' in dict_transform:
            tranform_list.append(
                A.RandomRain(brightness_coefficient=0.95,
                             drop_width=1,
                             blur_value=1,
                             p=0.7))
        if 'occlusion' in dict_transform:
            tranform_list.append(
                A.CoarseDropout(max_holes=5, max_height=8, max_width=8, p=0.5))
        self.transform = A.Compose(tranform_list)
Exemple #11
0
def augmentation(images, is_train, info, aug_num):
    if is_train:
        output_path = f'{OUTPUT_PATH}/train'

    else:
        output_path = f'{OUTPUT_PATH}/valid'
        aug_num = int(aug_num * 0.2)

    for label in label_list:
        if not os.path.isdir(f'{output_path}/{label}'):
            os.makedirs(f'{output_path}/{label}')

    for i in tqdm(range(len(images))):
        image = images[i]
        image_name = image.split('/')[-1]
        label = image.split('/')[-2]

        cnt = int(math.ceil(aug_num / info[label]))
        total_images = len(os.listdir(f'{output_path}/{label}'))
        if total_images <= aug_num:
            image = cv2.imread(image)
            transform = A.Resize(224, 224)
            augmented_image = transform(image=image)['image']
            cv2.imwrite(f'{output_path}/{label}/orig_{image_name}',
                        augmented_image)

            for c in range(cnt):
                transform = A.Compose([
                    A.Resize(224, 224, p=1),
                    A.RandomRotate90(p=1),
                    A.OneOf([
                        A.HorizontalFlip(p=0.7),
                        A.VerticalFlip(p=0.7),
                    ],
                            p=0.7),
                    A.OneOf([
                        A.RandomContrast(p=0.7, limit=(-0.5, 0.3)),
                        A.RandomBrightness(p=0.7, limit=(-0.2, 0.3))
                    ],
                            p=0.7),

                    # A.Cutout(p=0.5)
                ])
                augmented_image = transform(image=image)['image']
                cv2.imwrite(f'{output_path}/{label}/aug{c}_{image_name}',
                            augmented_image)

    return output_path
Exemple #12
0
def get_training_augmentation():
    train_transform = [
        albu.Blur(p=0.5),
        albu.Flip(p=0.5),
        albu.RandomBrightness(p=0.5),
        albu.RandomContrast(p=0.5),
        albu.ShiftScaleRotate(scale_limit=0.5,
                              rotate_limit=0,
                              shift_limit=0.1,
                              p=0.5,
                              border_mode=0),
        albu.GridDistortion(p=0.5),
        albu.OpticalDistortion(p=0.5, distort_limit=2, shift_limit=0.5),
        albu.Resize(*INPUT_SIZE)
    ]
    return albu.Compose(train_transform)
Exemple #13
0
 def __init__(self, image_dir):
     self.load_images(image_dir)
     self.aug = A.Compose([
         A.Resize(1100, 1300),
         A.RandomCrop(IMG_SIZE[0], IMG_SIZE[1], p=1),
         A.OneOf([
             A.RandomGamma((40, 200), p=1),
             A.RandomBrightness(p=1),
             A.RandomContrast(p=1),
             A.RGBShift(p=1),
         ],
                 p=0.8),
         A.VerticalFlip(p=0.5),
         A.RandomRotate90(p=1),
     ])
     self.resize_to_mask = A.Resize(MASK_SIZE[0], MASK_SIZE[1])
Exemple #14
0
def build_transforms(cfg, is_train, debug=False):
    to_compose = [albu.Resize(*cfg.inputs.size)]
    if is_train and cfg.augmentation.enable:
        to_compose.extend([
            albu.HorizontalFlip(p=0.5),
            albu.VerticalFlip(p=0.5),
            albu.RandomRotate90(p=0.5),
            albu.Transpose(p=0.5),
            albu.ShiftScaleRotate(scale_limit=0.2,
                                  rotate_limit=0,
                                  shift_limit=0.2,
                                  p=0.2,
                                  border_mode=0),
            albu.IAAAdditiveGaussianNoise(p=0.2),
            albu.IAAPerspective(p=0.5),
            albu.OneOf(
                [
                    albu.CLAHE(p=1),
                    albu.RandomBrightness(p=1),
                    albu.RandomGamma(p=1),
                ],
                p=0.9,
            ),
            albu.OneOf(
                [
                    albu.IAASharpen(p=1),
                    albu.Blur(blur_limit=3, p=1),
                    albu.MotionBlur(blur_limit=3, p=1),
                ],
                p=0.9,
            ),
            albu.OneOf(
                [
                    albu.RandomContrast(p=1),
                    albu.HueSaturationValue(p=1),
                ],
                p=0.9,
            ),
            albu.Compose(
                [albu.VerticalFlip(p=0.5),
                 albu.RandomRotate90(p=0.5)])
        ])
    if debug:
        return albu.Compose(to_compose)
    to_compose.append(albu.Normalize(**cfg.inputs.normalize))
    to_compose.append(ToTensorV2(transpose_mask=True))
    return albu.Compose(to_compose)
def bboxes_augmentation(cfg):
    '''
    '''

    transforms = []

    if cfg['smallest'] > 0:
        transforms += [
            albumentations.SmallestMaxSize(max_size=cfg['smallest'], p=1.0)
        ]

    if cfg.get('random_crop', 0):
        # transforms += [OneOf([albumentations.RandomCrop(height=1024, width=1024, p=0.8),
        #                     albumentations.RandomCrop(height=720, width=720, p=0.8),], p=1.),]
        if cfg.get('safe_crop', 0):
            transforms += [
                albumentations.RandomSizedBBoxSafeCrop(height=cfg['height'],
                                                       width=cfg['width'],
                                                       p=1.)
            ]
        else:
            transforms += [
                albumentations.RandomSizedCrop(cfg['min_max_height'],
                                               height=cfg['height'],
                                               width=cfg['width'],
                                               p=1.0)
            ]

    if cfg.get('flip', 0):
        transforms += [albumentations.HorizontalFlip(p=0.5)]

    transforms += [
        albumentations.RandomBrightness(limit=0.2, p=0.3),
        albumentations.RandomContrast(limit=0.2, p=0.3),
        albumentations.Blur(blur_limit=5, p=0.2),
        albumentations.GaussNoise(var_limit=(5, 20), p=0.2),
        albumentations.ChannelShuffle(p=0.2),
    ]

    bbox_params = {
        'format': 'pascal_voc',
        'min_visibility': cfg['min_visibility'],
        'label_fields': ['labels'],
        'min_area': cfg['min_area']
    } if cfg['bbox'] else {}

    return Compose(transforms, bbox_params=bbox_params, p=1.)
def get_training_augmentation():
    train_transform = [
        albu.HorizontalFlip(p=0.5),
        albu.VerticalFlip(p=0.2),
        albu.ShiftScaleRotate(scale_limit=0.5,
                              rotate_limit=50,
                              shift_limit=0.1,
                              p=1,
                              border_mode=0),
        #320 384  448 512  640
        #        albu.GridDistortion(num_steps=2, distort_limit=0.2, interpolation=1, border_mode=0, value=None, always_apply=False, p=0.5),
        albu.PadIfNeeded(min_height=padheight,
                         min_width=padwidth,
                         always_apply=True,
                         border_mode=0),
        albu.Resize(height=padheight, width=padwidth),
        albu.RandomCrop(height=inputheight,
                        width=inputwidth,
                        always_apply=True),  #the last size
        albu.ChannelShuffle(p=0.1),
        albu.IAAAdditiveGaussianNoise(p=0.2),
        albu.IAAPerspective(p=0.5),
        albu.OneOf(
            [
                albu.CLAHE(p=1),
                albu.RandomBrightness(p=1),
                albu.RandomGamma(p=1),
            ],
            p=0.9,
        ),
        albu.OneOf(
            [
                albu.IAASharpen(p=1),
                albu.Blur(blur_limit=3, p=1),
                albu.MotionBlur(blur_limit=3, p=1),
            ],
            p=0.9,
        ),
        albu.OneOf(
            [
                albu.RandomContrast(p=1),
                albu.HueSaturationValue(p=1),
            ],
            p=0.9,
        ),
    ]
    return albu.Compose(train_transform)
Exemple #17
0
def get_transforms(imsize, train=True, local=True, n_patches=None, is_stack=False):
    if train:
        if local: # local transforms are applied randomly for each patch
            return albu.Compose([
                albu.ShiftScaleRotate(shift_limit=0.1, scale_limit=0.1, rotate_limit=90, p=0.7, border_mode=cv2.BORDER_CONSTANT, value=(255,255,255)),
                albu.HorizontalFlip(p=0.5),
                albu.VerticalFlip(p=0.5),
                albu.Transpose(),
                albu.GaussNoise(p=0.2), # Super Slow!(?)
                albu.CoarseDropout(6, 20, 20, 2, p=0.3, fill_value=255),
                # albu.RandomGridShuffle((3,3), p=0.5),
                # albu.JpegCompression(quality_lower=70, p=0.3),

                # albu.Resize(imsize, imsize),
            ])
        else: #Global transforms are applied consistently across all patches
            return albu.Compose(([
                albu.HorizontalFlip(p=0.5),
                albu.VerticalFlip(p=0.5),
                albu.Transpose(p=0.25),
            ] if not is_stack else []) + [
                albu.RandomGamma((90, 110), p=0.25),
                albu.RandomContrast(p=0.25, limit=0.1),
                albu.RandomBrightness(p=0.25, limit=0.1),
                albu.HueSaturationValue(hue_shift_limit=0, sat_shift_limit=15, val_shift_limit=5, p=0.25),
                albu.Resize(imsize, imsize),
                albu.InvertImg(always_apply=True),
                # albu.Lambda(to_hsv, always_apply=True),
                # albu.Normalize(always_apply=True, mean=[0.5,0.5,0.5], std=[0.5,0.5,0.5]),
                # albu.Normalize(always_apply=True, mean=[0.5, 0.2, 0.85], std=[0.3, 0.3, 0.15]), # HSV normalization
                # albu.Normalize(always_apply=True, mean=[0.81, 0.6, 0.73], std=[0.4, 0.51, 0.41]), # Basic normalization
                albu.Normalize(always_apply=True, mean=[1-0.85, 1-0.71, 1-0.80], std=[0.16, 0.27, 0.18]), # Invert Normalization
                ToTensor()
            ],
            additional_targets={f'image{i}':'image' for i in range(n_patches-1)} if is_stack else None)
    else:
        return albu.Compose([
            albu.Resize(imsize, imsize),
            albu.InvertImg(always_apply=True),
            # albu.Lambda(to_hsv, always_apply=True),
            # albu.Normalize(always_apply=True, mean=[0.5,0.5,0.5], std=[0.5,0.5,0.5]),
            # albu.Normalize(always_apply=True, mean=[0.5, 0.2, 0.85], std=[0.3, 0.3, 0.15]),
            # albu.Normalize(always_apply=True, mean=[0.81, 0.6, 0.73], std=[0.4, 0.51, 0.41]),
            albu.Normalize(always_apply=True, mean=[1-0.85, 1-0.71, 1-0.80], std=[0.16, 0.27, 0.18]),
            ToTensor()
        ],
        additional_targets={f'image{i}':'image' for i in range(n_patches-1)} if is_stack else None)
Exemple #18
0
def get_albumentations_transforms(mode ):
    """
    Composes albumentations transforms.
    Returns the full list of transforms when mode is "train".
    mode should be one of "train", "val".
    """
    # compose validation transforms
    if mode == "val":
        transforms = compo(
            [],
            bbox_params=BboxParams(
                format="pascal_voc",
                min_area=0.0,
                min_visibility=0.0,
                label_fields=["category_id"],
            ),
        )
    # compose train transforms
    # TODO: make transformation parameters configurable from yml
    elif mode == "train":
        transforms = compo(
            [
        # A.Normalize(),
        # A.Blur(p=0.5),
        # A.ColorJitter(p=0.5),
        # A.Downscale(p=0.3),
        # A.Superpixels(p=0.3),
        A.RandomContrast(p=0.5),
        A.ShiftScaleRotate(p=0.8),

        A.HorizontalFlip(p=0.5),
        A.VerticalFlip(p=0.5),
        A.RandomBrightnessContrast(p=0.5),
        A.Sharpen(p = 0.5),

        # A.RGBShift(p=0.5),
        # A.RandomRain(p=0.3),
        # A.RandomFog(p=0.3)
            ],
            bbox_params=BboxParams(
                format="pascal_voc",
                min_area=0.0,
                min_visibility=0.0,
                label_fields=["category_id"],
            ),
        )
    return transforms
Exemple #19
0
    def __init__(self, cfg):
        self.cfg = cfg
        self.data = self.prepare()
        self.mean = self.cfg.mean
        self.std = self.cfg.std

        self.normal_transform = A.Compose([
            A.Resize(384, 288, p=1.0),
            A.HorizontalFlip(p=0.5),
            A.Normalize(p=1.0, mean=self.mean, std=self.std)
        ])

        self.augment_transform = A.Compose([
            A.Resize(384, 288, p=1.0),
            A.HorizontalFlip(p=0.7),
            A.GaussNoise(p=0.5),
            A.ShiftScaleRotate(shift_limit=0.1,
                               scale_limit=0.25,
                               rotate_limit=20,
                               p=0.6,
                               border_mode=0),
            A.OneOf([
                A.CLAHE(p=0.5),
                A.Compose([
                    A.RandomBrightness(limit=0.5, p=0.6),
                    A.RandomContrast(limit=0.4, p=0.6),
                    A.RandomGamma(p=0.6),
                ])
            ],
                    p=0.65),
            A.OneOf([
                A.HueSaturationValue(10, 20, 10, p=1.0),
                A.RGBShift(p=1.0),
                A.Emboss(p=1.0),
            ],
                    p=0.5),
            A.RandomFog(fog_coef_lower=0.3, fog_coef_upper=0.3, p=0.3),
            A.OneOf([
                A.Perspective(p=1.0, scale=(0.05, 0.1)),
                A.GridDistortion(p=1.0, distort_limit=0.25, border_mode=0),
                A.OpticalDistortion(
                    p=1.0, shift_limit=0.1, distort_limit=0.1, border_mode=0)
            ],
                    p=0.65),
            A.Normalize(p=1.0, mean=self.mean, std=self.std),
        ])
Exemple #20
0
def aug_train(p=1):
    return aug.Compose([
        aug.HorizontalFlip(p=0.5),
        aug.OneOf([
            aug.Compose([
                aug.ShiftScaleRotate(rotate_limit=0, p=1),
                aug.RandomSizedCrop((88, 128), 128, 128)
            ]),
            aug.GridDistortion(num_steps=10, distort_limit=np.random.uniform(0, 0.1), p=1),
            aug.ShiftScaleRotate(scale_limit=0, rotate_limit=10, p=1)
        ], p=0.5),
        aug.OneOf([
            aug.RandomBrightness(limit=0.08, p=1),
            aug.RandomContrast(limit=0.08, p=1),
            aug.RandomGamma(gamma_limit=(92, 108), p=1)
        ], p=0.5)
    ], p=p)
    def __init__(self, mode=None, resize=224):
        # assert type(resize) == list, f'resize type is not list '
        if mode == 'train_tfms_mask':
            self.transform = A.Compose([
                A.OneOf([
                    A.Perspective(p=1.0),
                    A.Rotate(limit=20, p=1.0, border_mode=1),
                ],
                        p=0.5),
                A.OneOf([
                    A.RandomBrightness(p=1.0),
                    A.HueSaturationValue(p=1.0),
                    A.RandomContrast(p=1.0),
                ],
                        p=0.5),
                A.Compose([
                    A.Resize(resize, resize),
                    A.Normalize(),
                ])
            ])
        elif mode == 'train_age_gender':

            self.transform = A.Compose([
                A.Rotate(limit=20, p=0.5, border_mode=1),
                A.OneOf(
                    [
                        A.RandomGridShuffle(grid=(2, 2),
                                            p=1.0),  # not using for gender
                        # A.RandomGridShuffle(grid=(4, 2), p=1.0),
                        A.Perspective(p=1.0)
                    ],
                    p=0.5),
                A.GaussNoise(p=0.5),
                A.Compose([
                    A.Resize(resize, resize),
                    A.Normalize(),
                ])
            ])


#        elif mode =
        elif mode == 'valid_tfms':
            self.transform = A.Compose([
                A.Resize(resize, resize),
                A.Normalize(),
            ])
 def __init__(self, data, phase_coeff):
     self.root = data
     self.folder = os.listdir(self.root)
     self.folder.sort()
     self.aug = A.Compose([
         A.ShiftScaleRotate(shift_limit=0.15, scale_limit=0.15, rotate_limit=45, interpolation=1, border_mode=4, always_apply=False, p=0.3),
         A.RandomCrop(220, 220, always_apply=False, p=1.0),
         A.HorizontalFlip(always_apply=False, p=0.2),
         A.VerticalFlip(always_apply=False, p=0.2),
         A.ElasticTransform(alpha=1, sigma=50, alpha_affine=50, interpolation=1, border_mode=4, always_apply=False, p=0.5),
         A.RandomBrightness(limit=0.2, always_apply=False, p=0.2),
         A.RandomContrast(limit=0.2, always_apply=False, p=0.2),
         A.MedianBlur(blur_limit=5, always_apply=False, p=0.2),
         A.GaussNoise(var_limit=(10, 50), always_apply=False, p=0.2),
         A.Resize(256, 256),
     ])
     self.phase_coeff = phase_coeff
def ISIC2020_get_transforms(image_size, is_training):
    # https://github.com/haqishen/SIIM-ISIC-Melanoma-Classification-1st-Place-Solution/blob/master/dataset.py

    if is_training:
        return albumentations.Compose([
            albumentations.Transpose(p=0.5),
            albumentations.VerticalFlip(p=0.5),
            albumentations.HorizontalFlip(p=0.5),
            albumentations.RandomBrightness(limit=0.2, p=0.75),
            albumentations.RandomContrast(limit=0.2, p=0.75),
            albumentations.OneOf([
                albumentations.MotionBlur(blur_limit=5),
                albumentations.MedianBlur(blur_limit=5),
                albumentations.GaussianBlur(blur_limit=5),
                albumentations.GaussNoise(var_limit=(5.0, 30.0)),
            ],
                                 p=0.7),
            albumentations.OneOf([
                albumentations.OpticalDistortion(distort_limit=1.0),
                albumentations.GridDistortion(num_steps=5, distort_limit=1.),
                albumentations.ElasticTransform(alpha=3),
            ],
                                 p=0.7),
            albumentations.CLAHE(clip_limit=4.0, p=0.7),
            albumentations.HueSaturationValue(hue_shift_limit=10,
                                              sat_shift_limit=20,
                                              val_shift_limit=10,
                                              p=0.5),
            albumentations.ShiftScaleRotate(shift_limit=0.1,
                                            scale_limit=0.1,
                                            rotate_limit=15,
                                            border_mode=0,
                                            p=0.85),
            albumentations.Resize(image_size, image_size),
            albumentations.Cutout(max_h_size=int(image_size * 0.375),
                                  max_w_size=int(image_size * 0.375),
                                  num_holes=1,
                                  p=0.7),
            albumentations.Normalize()
        ])

    else:
        return albumentations.Compose([
            albumentations.Resize(image_size, image_size),
            albumentations.Normalize()
        ])
def build_transform(size, mode):
    border_mode = cv2.BORDER_CONSTANT
    norm_mean = (0.68865128, 0.63029681, 0.59308879)
    norm_std = (0.16007041, 0.13788241, 0.10374681)
    if mode == 'train':
        transform = albumentations.Compose([
            albumentations.Resize(size, size),
            albumentations.Flip(),
            albumentations.Rotate(border_mode=border_mode),
            albumentations.ShiftScaleRotate(rotate_limit=15,
                                            scale_limit=0.10,
                                            border_mode=border_mode),
            albumentations.RandomBrightness(limit=0.5),
            albumentations.RandomContrast(limit=0.3),
            albumentations.RandomGamma(),
            albumentations.Normalize(),
            #albumentations.Normalize(mean=norm_mean, std=norm_std)
        ])


#         transform = albumentations.Compose([
#             albumentations.Resize(size, size),
#             albumentations.Flip(),
#             albumentations.Rotate(),
#             albumentations.ShiftScaleRotate(shift_limit=0, rotate_limit=0, scale_limit=0.20),
#             albumentations.RandomBrightness(limit=0.2),
#             albumentations.RandomContrast(limit=0.2),
#             albumentations.Normalize(),
#         ])
    elif mode == 'val':
        transform = albumentations.Compose([
            albumentations.Resize(size, size),
            albumentations.Normalize(),
            #albumentations.Normalize(mean=norm_mean, std=norm_std),
        ])
    elif mode == 'test':
        transform = albumentations.Compose([
            albumentations.Resize(size, size),
            albumentations.Flip(),
            albumentations.Rotate(border_mode=border_mode),
            albumentations.RandomBrightness(limit=0.2),
            albumentations.Normalize(),
            #albumentations.Normalize(mean=norm_mean, std=norm_std),
        ])
    return transform
Exemple #25
0
def get_transforms(image_size):

    transforms_train = albumentations.Compose([
        albumentations.Transpose(p=0.5),
        albumentations.VerticalFlip(p=0.5),
        albumentations.HorizontalFlip(p=0.5),
        albumentations.RandomBrightness(limit=0.2, p=0.75),
        albumentations.RandomContrast(limit=0.2, p=0.75),
        albumentations.OneOf([
            albumentations.MotionBlur(blur_limit=5),
            albumentations.MedianBlur(blur_limit=5),
            albumentations.GaussianBlur(blur_limit=5),
            albumentations.GaussNoise(var_limit=(5.0, 30.0)),
        ],
                             p=0.7),
        albumentations.OneOf([
            albumentations.OpticalDistortion(distort_limit=1.0),
            albumentations.GridDistortion(num_steps=5, distort_limit=1.),
            albumentations.ElasticTransform(alpha=3),
        ],
                             p=0.7),
        albumentations.CLAHE(clip_limit=4.0, p=0.7),
        albumentations.HueSaturationValue(hue_shift_limit=10,
                                          sat_shift_limit=20,
                                          val_shift_limit=10,
                                          p=0.5),
        albumentations.ShiftScaleRotate(shift_limit=0.1,
                                        scale_limit=0.1,
                                        rotate_limit=15,
                                        border_mode=0,
                                        p=0.85),
        albumentations.Resize(image_size, image_size),
        albumentations.Cutout(max_h_size=int(image_size * 0.375),
                              max_w_size=int(image_size * 0.375),
                              num_holes=1,
                              p=0.7),
        albumentations.Normalize()
    ])

    transforms_val = albumentations.Compose([
        albumentations.Resize(image_size, image_size),
        albumentations.Normalize()
    ])

    return transforms_train, transforms_val
Exemple #26
0
def make_transforms(phase,
                    mean=(0.485, 0.456, 0.406),
                    std=(0.229, 0.224, 0.225)):
    list_transforms = []
    if phase == "train":
        list_transforms.extend([
            albu.HorizontalFlip(p=0.5),
            albu.Rotate(limit=15),
            albu.RandomContrast(limit=0.2),
            albu.RandomBrightness(limit=0.2)
        ])
    list_transforms.extend([
        albu.Resize(512, 512),
        Normalize(mean=mean, std=std, p=1),
        ToTensor(),
    ])
    list_trfms = Compose(list_transforms)
    return list_trfms
Exemple #27
0
def complex_preprocess():
    return A.Compose(
        [
            A.HorizontalFlip(),
            #A.RandomCrop(width=600, height=600,p=.9),
            A.VerticalFlip(),
            #A.Crop(x_min = 0, y_min = 0, x_max = 300, y_max = 300),
            #A.CLAHE(clip_limit=4,p=0.3),
            #A.ChannelDropout(p=0.2),
            #A.HueSaturationValue(p=0.2),
            #A.Posterize(p=0.2),
            #A.RGBShift(50,50,50,p=0.3),
            A.RandomGamma((40, 120)),
            A.GaussNoise(p=0.3),
            A.Blur(blur_limit=8, p=0.3),
            A.RandomContrast((-0.4, 0.4))
        ],
        bbox_params=A.BboxParams(format='coco', label_fields=['class_labels']))
Exemple #28
0
def aug_train(p=1, shift_limit=0.0625, scale_limit=0.1):
    aug_list = [
        aug.HorizontalFlip(p=0.5),
        aug.VerticalFlip(p=0.5),
        ShiftScaleRotate(shift_limit=shift_limit,
                         scale_limit=scale_limit,
                         rotate_limit=8,
                         p=0.7),
        aug.OneOf([
            aug.RandomBrightness(limit=0.1, p=1),
            aug.RandomContrast(limit=0.1, p=1),
            aug.RandomGamma(gamma_limit=(90, 110), p=1)
        ],
                  p=0.5),
        aug.CLAHE(clip_limit=2.0, tile_grid_size=(8, 8), p=0.5)
    ]

    return aug.Compose(aug_list, p=p)
    def paired_aug(self, raw, lbl):
        aug = A.Compose([
            A.HorizontalFlip(),
            A.RandomRotate90(p=0.5),
            A.VerticalFlip(),
            A.Transpose(),
            A.ElasticTransform(
                p=0.75, alpha=100, sigma=240 * 0.05, alpha_affine=100 * 0.03),
            A.RandomGamma(p=1, gamma_limit=(30, 236)),
            A.RandomContrast(p=0.8),
            A.GaussNoise(p=0.5),
            A.Blur(p=0.5)
        ],
                        p=0.8)

        ret = aug(image=raw, mask=lbl)
        raw, lbl = ret['image'], ret['mask']
        return raw, lbl
Exemple #30
0
 def __initialize_image_augmentation(image_shape):
     pipeline = augmentator.Compose([
         augmentator.VerticalFlip(p=0.5),
         augmentator.HorizontalFlip(p=0.5),
         augmentator.RandomBrightness(limit=1.2, p=0.5),
         augmentator.RandomGamma(gamma_limit=37, p=0.5),
         augmentator.ElasticTransform(
             alpha=203, sigma=166, alpha_affine=106, p=0.5),
         augmentator.JpegCompression(
             quality_lower=25, quality_upper=100, p=0.5),
         augmentator.RandomContrast(limit=1, p=0.5),
         augmentator.Resize(image_shape[0], image_shape[1], p=1)
     ],
                                    bbox_params={
                                        'format': 'coco',
                                        'label_fields': ["category_id"]
                                    })
     return pipeline