def test_channel_droput():
    img = np.ones((10, 10, 3), dtype=np.float32)

    aug = A.ChannelDropout(channel_drop_range=(1, 1), always_apply=True)  # Drop one channel

    transformed = aug(image=img)["image"]

    assert sum([transformed[:, :, c].max() for c in range(img.shape[2])]) == 2

    aug = A.ChannelDropout(channel_drop_range=(2, 2), always_apply=True)  # Drop two channels
    transformed = aug(image=img)["image"]

    assert sum([transformed[:, :, c].max() for c in range(img.shape[2])]) == 1
def get_tfms_albu(p=0.6): return A.Compose([
        A.HorizontalFlip(),
        A.ShiftScaleRotate(rotate_limit=15),
        A.ChannelDropout(p=0.1),
        A.RandomRain(p=0.1),
        A.GridDistortion(p=0.2)
    ], p=p)
def get_hard_augmentations(image_size):
    return A.Compose([
        A.OneOf([
            A.ShiftScaleRotate(shift_limit=0.05, scale_limit=0.1,
                               rotate_limit=45,
                               border_mode=cv2.BORDER_CONSTANT, value=0),
            A.ElasticTransform(alpha_affine=0,
                               alpha=35,
                               sigma=5,
                               border_mode=cv2.BORDER_CONSTANT,
                               value=0),
            A.OpticalDistortion(distort_limit=0.11, shift_limit=0.15,
                                border_mode=cv2.BORDER_CONSTANT,
                                value=0),
            A.GridDistortion(border_mode=cv2.BORDER_CONSTANT,
                             value=0),
            A.NoOp()
        ]),

        A.OneOf([

            A.RandomSizedCrop(min_max_height=(int(image_size[0] * 0.75), image_size[0]),
                              height=image_size[0],
                              width=image_size[1], p=0.3),
            A.NoOp()
        ]),

        A.ISONoise(p=0.5),

        # Brightness/contrast augmentations
        A.OneOf([
            A.RandomBrightnessContrast(brightness_limit=0.5,
                                       contrast_limit=0.4),
            IndependentRandomBrightnessContrast(brightness_limit=0.25,
                                                contrast_limit=0.24),
            A.RandomGamma(gamma_limit=(50, 150)),
            A.NoOp()
        ]),

        A.OneOf([
            A.RGBShift(r_shift_limit=40, b_shift_limit=30, g_shift_limit=30),
            A.HueSaturationValue(hue_shift_limit=10,
                                 sat_shift_limit=10),
            A.ToGray(p=0.2),
            A.NoOp()
        ]),

        A.ChannelDropout(),
        A.RandomGridShuffle(p=0.3),

        # D4
        A.Compose([
            A.RandomRotate90(),
            A.Transpose()
        ])
    ])
def get_middle_man_data_aug(mean, standard_deviation, height, width):
    return [
        A.Normalize(mean=mean, std=standard_deviation, always_apply=True, p=1.0),
        A.PadIfNeeded(min_height=height+8, min_width=width+8),
        A.RandomCrop(height, width, always_apply=True, p=1.0),
        A.HorizontalFlip(p=0.5),
        A.GridDistortion (num_steps=5, distort_limit=0.3, interpolation=1, border_mode=4, value=None, mask_value=None, always_apply=False, p=0.5),
        A.ChannelDropout(channel_drop_range=(1, 1), fill_value=0, always_apply=False, p=0.5),
        A.Cutout(num_holes=1, max_h_size=8, max_w_size=8, fill_value=mean, always_apply=False, p=0.5),
        ToTensor()
    ]
    def get_train_transform(self):
        if self.fullsizeimage:
            resize_height = 640
            resize_width = 1280
        else:
            resize_height = 512
            resize_width = 512

        return A.Compose(
            [
                A.OneOf(
                    [
                        A.RandomBrightnessContrast(p=0.5),
                        A.RGBShift(p=0.5),
                        A.HueSaturationValue(p=0.5),
                        A.ToGray(p=0.5),
                        A.ChannelDropout(p=0.5),
                        A.ChannelShuffle(p=0.5),
                    ],
                    p=0.5,
                ),
                A.OneOf(
                    [
                        A.Blur(p=0.5),
                        A.GaussNoise(p=0.5),
                        A.IAASharpen(p=0.5),
                    ],
                    p=0.5,
                ),
                A.OneOf(
                    [
                        A.Rotate(limit=20, p=0.5),
                        A.HorizontalFlip(p=0.5),
                        A.VerticalFlip(p=0.5),
                    ],
                    p=0.5,
                ),
                A.Resize(height=resize_height, width=resize_width, p=1.0),
                ToTensorV2(p=1.0),
            ],
            bbox_params=A.BboxParams(
                format="pascal_voc",
                min_area=0,
                min_visibility=0,
                label_fields=["labels"],
            ),
        )
Esempio n. 6
0
 def __init__(self):
     self.policy = A.Compose([
         A.OneOf([
             A.Rotate(180),
             A.Flip(),
         ], p=0.3),
         A.ShiftScaleRotate(shift_limit=0.2, scale_limit=0.5, rotate_limit=0, p=0.2),
         A.OneOf([
             A.CoarseDropout(max_holes=16, max_height=16, max_width=16, p=0.3),
             A.GridDropout(ratio=0.3, p=0.3),
         ]),
         A.OneOf([
             A.ElasticTransform(sigma=10, alpha_affine=25, p=0.3),
             A.RandomFog(fog_coef_lower=0.2, fog_coef_upper=0.7, p=0.2),
         ], p=0.2),
         A.OneOf([
             A.IAAAdditiveGaussianNoise(),
             A.GaussNoise(),
             A.ISONoise()
         ], p=0.2),
         A.OneOf([
             A.MotionBlur(p=.3),
             A.MedianBlur(blur_limit=5, p=0.3),
             A.Blur(blur_limit=5, p=0.3),
             A.GaussianBlur(p=0.3)
         ], p=0.2),
         A.OneOf([
             A.ChannelShuffle(p=.3),
             A.HueSaturationValue(p=0.3),
             A.ToGray(p=0.3),
             A.ChannelDropout(p=0.3),
             A.InvertImg(p=0.1)
         ], p=0.2),
         A.OneOf([
             A.OpticalDistortion(p=0.3),
             A.GridDistortion(p=.2),
             A.IAAPiecewiseAffine(p=0.3),
         ], p=0.2),
         A.OneOf([
             A.CLAHE(clip_limit=2),
             A.IAASharpen(),
             A.IAAEmboss(),
         ], p=0.2),
         A.RandomBrightnessContrast(brightness_limit=0.3, contrast_limit=0.3, p=0.3),
         A.Solarize(p=0.2),
     ])
Esempio n. 7
0
def trainset_albumentations(train_dataset_x, train_y):

  ch_means = (0.48043839, 0.44820218, 0.39760034)
  ch_std = (0.27698959, 0.26908774, 0.28216029)

  train_albumentations_transform = A.Compose([
      A.Rotate((-20.0, 20.0)),
      # A.CourseDropout(0.2),
      A.HorizontalFlip(),
      A.ChannelDropout(channel_drop_range=(1, 1)),
      A.RandomBrightness(0.2),
      A.Normalize(
          mean=[0.49139968, 0.48215841, 0.44653091],
          std=[0.24703223, 0.24348513, 0.26158784],
      ),
      A.Cutout(num_holes=1, max_h_size=10, max_w_size=10, always_apply=True),
      ToTensor()
  ])
  return AlbumentationsDataset(
      file_paths=train_dataset_x,
      labels=train_y,
      transform=train_albumentations_transform,
)
Esempio n. 8
0
def get_training_augmentation(size):
    train_transform = [
        A.LongestMaxSize(max_size=size, always_apply=True),
        A.PadIfNeeded(min_height=size,
                      min_width=size,
                      always_apply=True,
                      border_mode=0),
        # A.RandomCrop(height=size, width=size, always_apply=True),

        # A.VerticalFlip(p=0.5),
        # A.HorizontalFlip(p=0.5),
        # A.RandomRotate90(p=0.5),
        A.ShiftScaleRotate(scale_limit=0.2,
                           rotate_limit=0,
                           shift_limit=0.2,
                           p=0.1,
                           border_mode=0),
        A.IAAPerspective(p=0.1),
        A.CoarseDropout(p=0.1),
        A.ChannelDropout(p=0.1),
        A.RGBShift(p=0.1),
        A.OneOf(
            [A.OpticalDistortion(p=0.5),
             A.GridDistortion(p=0.5)],
            p=0.1,
        ),
        A.OneOf(
            [
                A.CLAHE(p=0.5),
                A.RandomBrightness(p=0.5),
                A.RandomGamma(p=0.5),
            ],
            p=0.5,
        ),
        A.OneOf(
            [
                A.GaussianBlur(p=0.1),
                A.IAASharpen(p=0.5),
                A.Blur(blur_limit=5, p=0.5),
                A.MotionBlur(blur_limit=5, p=0.5),
            ],
            p=0.5,
        ),
        A.OneOf(
            [
                A.RandomContrast(p=0.5),
                A.HueSaturationValue(p=0.5),
            ],
            p=0.1,
        ),
        A.Lambda(mask=round_clip_0_1),
        A.Cutout(num_holes=8,
                 max_h_size=20,
                 max_w_size=20,
                 fill_value=0,
                 always_apply=False,
                 p=0.2),
        A.CoarseDropout(max_holes=8,
                        max_height=20,
                        max_width=20,
                        min_holes=None,
                        min_height=None,
                        min_width=None,
                        fill_value=0,
                        always_apply=False,
                        p=0.2),
        # A.GlassBlur(sigma=0.7, max_delta=4, iterations=2, always_apply=False, mode='fast', p=0.2)
    ]
    return A.Compose(train_transform)
Esempio n. 9
0
def get_transform_imagenet(use_albu_aug):
    if use_albu_aug:
        train_transform = al.Compose([
            # al.Flip(p=0.5),
            al.Resize(256, 256, interpolation=2),
            al.RandomResizedCrop(224,
                                 224,
                                 scale=(0.08, 1.0),
                                 ratio=(3. / 4., 4. / 3.),
                                 interpolation=2),
            al.HorizontalFlip(),
            al.OneOf(
                [
                    al.OneOf(
                        [
                            al.ShiftScaleRotate(
                                border_mode=cv2.BORDER_CONSTANT,
                                rotate_limit=30),  # , p=0.05),
                            al.OpticalDistortion(
                                border_mode=cv2.BORDER_CONSTANT,
                                distort_limit=5.0,
                                shift_limit=0.1),
                            # , p=0.05),
                            al.GridDistortion(border_mode=cv2.BORDER_CONSTANT
                                              ),  # , p=0.05),
                            al.ElasticTransform(
                                border_mode=cv2.BORDER_CONSTANT,
                                alpha_affine=15),  # , p=0.05),
                        ],
                        p=0.1),
                    al.OneOf(
                        [
                            al.RandomGamma(),  # p=0.05),
                            al.HueSaturationValue(),  # p=0.05),
                            al.RGBShift(),  # p=0.05),
                            al.CLAHE(),  # p=0.05),
                            al.ChannelShuffle(),  # p=0.05),
                            al.InvertImg(),  # p=0.05),
                        ],
                        p=0.1),
                    al.OneOf(
                        [
                            al.RandomSnow(),  # p=0.05),
                            al.RandomRain(),  # p=0.05),
                            al.RandomFog(),  # p=0.05),
                            al.RandomSunFlare(num_flare_circles_lower=1,
                                              num_flare_circles_upper=2,
                                              src_radius=110),
                            # p=0.05, ),
                            al.RandomShadow(),  # p=0.05),
                        ],
                        p=0.1),
                    al.RandomBrightnessContrast(p=0.1),
                    al.OneOf(
                        [
                            al.GaussNoise(),  # p=0.05),
                            al.ISONoise(),  # p=0.05),
                            al.MultiplicativeNoise(),  # p=0.05),
                        ],
                        p=0.1),
                    al.OneOf(
                        [
                            al.ToGray(),  # p=0.05),
                            al.ToSepia(),  # p=0.05),
                            al.Solarize(),  # p=0.05),
                            al.Equalize(),  # p=0.05),
                            al.Posterize(),  # p=0.05),
                            al.FancyPCA(),  # p=0.05),
                        ],
                        p=0.1),
                    al.OneOf(
                        [
                            # al.MotionBlur(blur_limit=1),
                            al.Blur(blur_limit=[3, 5]),
                            al.MedianBlur(blur_limit=[3, 5]),
                            al.GaussianBlur(blur_limit=[3, 5]),
                        ],
                        p=0.1),
                    al.OneOf(
                        [
                            al.CoarseDropout(),  # p=0.05),
                            al.Cutout(),  # p=0.05),
                            al.GridDropout(),  # p=0.05),
                            al.ChannelDropout(),  # p=0.05),
                            al.RandomGridShuffle(),  # p=0.05),
                        ],
                        p=0.1),
                    al.OneOf(
                        [
                            al.Downscale(),  # p=0.1),
                            al.ImageCompression(quality_lower=60),  # , p=0.1),
                        ],
                        p=0.1),
                ],
                p=0.5),
            al.Normalize(),
            ToTensorV2()
        ])
    else:
        train_transform = transforms.Compose([
            transforms.Resize(256),
            transforms.RandomResizedCrop(224),
            transforms.RandomHorizontalFlip(),
            transforms.ToTensor(),
        ])
    test_transform = transforms.Compose([
        transforms.Resize(256),
        transforms.CenterCrop(224),
        transforms.ToTensor(),
    ])

    if use_albu_aug:
        train_transform = MultiDataTransformAlbu(train_transform)
    else:
        train_transform = MultiDataTransform(train_transform)

    return train_transform, test_transform
Esempio n. 10
0
    def __call__(self, data):
        rgb, thermal, depth, audio, label, id = data

        rgb = rgb.astype(np.float32)
        height, width, _ = rgb.shape
        albumentations_transform_pixel = {
            'Blur': albumentations.Blur(),
            #'CLAHE':albumentations.CLAHE(),
            'ChannelDropout': albumentations.ChannelDropout(),
            'ChannelShuffle': albumentations.ChannelShuffle(),
            'CoarseDropout': albumentations.CoarseDropout(),
            #'Equalize':albumentations.Equalize(),
            #'FancyPCA':albumentations.FancyPCA(),
            'GaussNoise': albumentations.GaussNoise(),
            'GaussianBlur': albumentations.GaussianBlur(),
            #'GlassBlur':albumentations.GlassBlur(),
            'HueSaturationValue': albumentations.HueSaturationValue(),
            'IAAAdditiveGaussianNoise':
            albumentations.IAAAdditiveGaussianNoise(),
            #'ISONoise':albumentations.ISONoise(),
            'RGBShift': albumentations.RGBShift(),
            'RandomBrightnessContrast':
            albumentations.RandomBrightnessContrast(),
            'RandomFog': albumentations.RandomFog(),
            #'RandomGamma':albumentations.RandomGamma(),
            'RandomRain': albumentations.RandomRain(),
            'RandomShadow': albumentations.RandomShadow(),
            'RandomSnow': albumentations.RandomSnow(),
            'RandomSunFlare': albumentations.RandomSunFlare(),
            'Solarize': albumentations.Solarize(),
        }
        albumentations_transform_bbox = {
            #'HorizontalFlip':albumentations.HorizontalFlip(),
            #'VerticalFlip':albumentations.VerticalFlip(),
            #'CenterCrop':albumentations.CenterCrop(height=height-10, width=width-10, p=0.5),
            #'RandomCropNearBBox':albumentations.RandomCropNearBBox(p=0.5),
            #'Crop':albumentations.Crop(x_min=10, y_min =10, y_max=height-10, x_max=width-10, p=0.5),
            #'ElasticTransform':albumentations.ElasticTransform(),
            #'ShiftScaleRotate':albumentations.ShiftScaleRotate(),
        }
        transform = np.random.choice(
            ['None'] + list(albumentations_transform_pixel.keys()) +
            list(albumentations_transform_bbox.keys()))

        if transform in albumentations_transform_pixel:
            aug = albumentations.Compose(
                [albumentations_transform_pixel[transform]],
                bbox_params={
                    'format': 'pascal_voc',
                    'label_fields': ['labels']
                })
            try:
                annots = np.array(annots).astype(np.float32)
                aug_result = aug(image=rgb,
                                 bboxes=annots[:, :4],
                                 labels=annots[:, 4])
                rgb = aug_result['image']
                annots = np.hstack([
                    aug_result['bboxes'],
                    np.array(aug_result['labels']).reshape(-1, 1)
                ])
            except Exception as e:
                print(
                    f"transform={transform} aug_result['bboxes']={aug_result['bboxes']}            aug_result['labels']={aug_result['labels']}"
                )
                raise Exception(e)

        elif transform in albumentations_transform_bbox:
            aug = albumentations.Compose(
                [albumentations_transform_bbox[transform]],
                bbox_params={
                    'format': 'pascal_voc',
                    'label_fields': ['labels']
                })
            try:
                annots = np.array(annots).astype(np.float32)
                aug_result = aug(image=rgb,
                                 bboxes=annots[:, :4],
                                 labels=annots[:, 4])
                rgb = aug_result['image']
                label = np.hstack([
                    aug_result['bboxes'],
                    np.array(aug_result['labels']).reshape(-1, 1)
                ])
            except Exception as e:
                print(
                    f"transform={transform} aug_result['bboxes']={aug_result['bboxes']}            aug_result['labels']={aug_result['labels']}"
                )
                raise Exception(e)

        return rgb, thermal, depth, audio, label, id
         A.ShiftScaleRotate(
             rotate_limit=18, p=1, border_mode=cv.BORDER_CONSTANT),
         A.IAAAffine(shear=10, p=1, mode="constant"),
         #A.Perspective(scale=(0.05, 0.15), keep_size=True, pad_mode=0, pad_val=0,
         #             mask_pad_val=0, fit_output=False, interpolation=1, always_apply=False, p=1),
     ],
     p=1.0,
 ),
 A.OneOf(
     [
         A.FancyPCA(alpha=0.1, always_apply=False, p=1),
         A.Blur(p=1),
         A.ToGray(p=0.8),
         A.ColorJitter(
             brightness=0.1, contrast=0.1, saturation=0.1, hue=0.1, p=1),
         A.ChannelDropout((1, 1), fill_value=0, always_apply=False, p=1),
     ],
     p=0.3,
 ),
 A.OneOf(
     [  #A.GaussNoise (var_limit=(10.0, 50.0), mean=0, per_channel=True, always_apply=False, p=0.5),
         A.Equalize(mode='cv',
                    by_channels=True,
                    mask=None,
                    mask_params=(),
                    always_apply=False,
                    p=0.8),
         A.MotionBlur(blur_limit=4, p=1),
     ],
     p=0.1,
 )
Esempio n. 12
0
        #return augmented["image"], augmented["mask"]
        return input_tensor,label_tensor

    def checkdataLoader(self):
        print("dummy")


if __name__=="__main__":

    transform=aldu.Compose([
        ###pixel_lvl_transform
        
        aldu.CLAHE(p=1),
        #aldu.RandomBrightnessContrast(),
        aldu.ChannelDropout(),
        #aldu.ISONoise() ,
        #aldu.Downscale(),
        #aldu.MultiplicativeNoise(),
        
        ###spatial lvl transform
        aldu.RandomCrop(256,256),
        aldu.HorizontalFlip(),
        aldu.Rotate(),
        aldu.MaskDropout(),
        aldu.ElasticTransform(),
        aldu.GridDistortion(),
  
        ### normalize
        #aldu.Normalize()
        ])
    AB.OneOf([
        AB.CLAHE(clip_limit=2),
        AB.IAASharpen(),
        AB.IAAEmboss(),
        AB.RandomBrightnessContrast(),
        ], p=0.2
    ),

    # Change Gamma and Saturation
    AB.HueSaturationValue(p=0.2),
    AB.RandomGamma(p=0.2),

    # Random Color Channel manipulation
    AB.OneOf([
        AB.ChannelShuffle(p=0.5),
        AB.ChannelDropout(channel_drop_range=(1, 1), fill_value=0, p=0.5),
        AB.ChannelDropout(channel_drop_range=(1, 1), fill_value=0, p=0.5),
        AB.ChannelDropout(channel_drop_range=(1, 1), fill_value=0, p=0.5),
        AB.ChannelDropout(channel_drop_range=(1, 1), fill_value=0, p=0.5),
        AB.ChannelDropout(channel_drop_range=(1, 1), fill_value=0, p=0.5),
        AB.ChannelDropout(channel_drop_range=(2, 2), fill_value=0, p=0.5),
        AB.ChannelDropout(channel_drop_range=(2, 2), fill_value=0, p=0.5),
        AB.ChannelDropout(channel_drop_range=(2, 2), fill_value=0, p=0.5),
        ], p=0.2
    ),

    # Normalize image using values from ImageNet
    AB.Normalize(
        mean=[0.485, 0.456, 0.406],
        std=[0.229, 0.224, 0.225],
    )
Esempio n. 14
0
        albu.RandomRotate90(p=0.5),
        albu.Flip(p=0.5),
        albu.Transpose(p=0.5),
        albu.ShiftScaleRotate(shift_limit=0.0, scale_limit=0.2, rotate_limit=30, p=0.4),
        albu.core.composition.PerChannel(
            albu.OneOf([
                albu.RandomBrightnessContrast(brightness_limit=0.2, contrast_limit=0.2, p=0.25),
                # albu.HueSaturationValue(hue_shift_limit=0, sat_shift_limit=0.2, val_shift_limit=0.2, p=0.25),
                albu.Blur(blur_limit=3, p=.1)
            ]), p=1.0),
        albu.Cutout(max_h_size=70),
        # albu.ChannelDropout(p=0.05)

    ]),
    "w_dropchannel": albu.Compose([
        albu.ChannelDropout(channel_drop_range=(1, 1), p=0.7),
        albu.RandomResizedCrop(1024, 1024, (0.7, 1)),
        albu.MaskDropout(max_objects=14, p=0.9),
        albu.RandomRotate90(p=0.5),
        albu.Flip(p=0.5),
        albu.Transpose(p=0.5),
        albu.ShiftScaleRotate(shift_limit=0.2, scale_limit=0.2, rotate_limit=30, p=0.4),
        albu.core.composition.PerChannel(
            albu.OneOf([
                albu.RandomBrightnessContrast(brightness_limit=0.2, contrast_limit=0.2, p=0.25),
                albu.HueSaturationValue(hue_shift_limit=0, sat_shift_limit=0.2, val_shift_limit=0.2, p=0.25),
                albu.Blur(blur_limit=3, p=.1)
            ]), p=1.0),
        albu.Cutout(max_h_size=100),
        # albu.ChannelDropout(p=0.05)
def get_hard_augmentations_v2(image_size):
    return A.Compose([
        A.OneOf([
            A.ShiftScaleRotate(shift_limit=0.05,
                               scale_limit=0.1,
                               rotate_limit=45,
                               border_mode=cv2.BORDER_CONSTANT,
                               value=0),
            A.ElasticTransform(alpha_affine=0,
                               alpha=35,
                               sigma=5,
                               border_mode=cv2.BORDER_CONSTANT,
                               value=0),
            A.OpticalDistortion(distort_limit=0.11,
                                shift_limit=0.15,
                                border_mode=cv2.BORDER_CONSTANT,
                                value=0),
            A.GridDistortion(border_mode=cv2.BORDER_CONSTANT, value=0),
            A.NoOp()
        ]),
        A.OneOf([
            ZeroTopAndBottom(p=0.3),
            A.RandomSizedCrop(min_max_height=(int(image_size[0] * 0.75),
                                              image_size[0]),
                              height=image_size[0],
                              width=image_size[1],
                              p=0.3),
            A.NoOp()
        ]),
        A.ISONoise(p=0.5),
        A.JpegCompression(p=0.3, quality_lower=75),

        # Brightness/contrast augmentations
        A.OneOf([
            A.RandomBrightnessContrast(brightness_limit=0.5,
                                       contrast_limit=0.4),
            IndependentRandomBrightnessContrast(brightness_limit=0.25,
                                                contrast_limit=0.24),
            A.RandomGamma(gamma_limit=(50, 150)),
            A.NoOp()
        ]),
        A.OneOf([
            FancyPCA(alpha_std=6),
            A.RGBShift(r_shift_limit=40, b_shift_limit=30, g_shift_limit=30),
            A.HueSaturationValue(hue_shift_limit=10, sat_shift_limit=10),
            A.ToGray(p=0.2),
            A.NoOp()
        ]),

        # Intentionally destroy image quality and assign 0 class in this case
        # A.Compose([
        #     BrightnessContrastDestroy(p=0.1),
        #     A.OneOf([
        #         MakeTooBlurry(),
        #         MakeTooBlurryMedian(),
        #         A.NoOp()
        #     ], p=0.1),
        # ]),

        # Add preprocessing method as an augmentation
        ChannelIndependentCLAHE(p=0.5),
        A.OneOf([
            A.ChannelDropout(p=0.2),
            A.CoarseDropout(p=0.1,
                            max_holes=2,
                            max_width=256,
                            max_height=256,
                            min_height=16,
                            min_width=16),
            A.NoOp()
        ]),
        A.RandomGridShuffle(p=0.3),
        DiagnosisNoise(p=0.2),

        # D4
        A.Compose([A.RandomRotate90(), A.Transpose()])
    ])
def get_train_transforms_mmdetection(input_size,
                                     use_crop=False,
                                     use_no_color_aug=False,
                                     use_center_crop=False,
                                     center_crop_ratio=0.9,
                                     use_gray=False):
    if isinstance(input_size, int):
        input_size = (input_size[0], input_size[1])
    return al.Compose([
        al.RandomResizedCrop(height=input_size[0],
                             width=input_size[1],
                             scale=(0.4, 1.0),
                             interpolation=0,
                             p=0.5),
        al.Resize(input_size[0], input_size[1], p=1.0),
        al.HorizontalFlip(p=0.5),
        al.OneOf([
            al.ShiftScaleRotate(border_mode=0,
                                shift_limit=(-0.2, 0.2),
                                scale_limit=(-0.2, 0.2),
                                rotate_limit=(-20, 20)),
            al.OpticalDistortion(border_mode=0,
                                 distort_limit=[-0.5, 0.5],
                                 shift_limit=[-0.5, 0.5]),
            al.GridDistortion(
                num_steps=5, distort_limit=[-0., 0.3], border_mode=0),
            al.ElasticTransform(border_mode=0),
            al.IAAPerspective(),
            al.RandomGridShuffle()
        ],
                 p=0.1),
        al.Rotate(limit=(-25, 25), border_mode=0, p=0.1),
        al.OneOf([
            al.RandomBrightnessContrast(brightness_limit=(-0.2, 0.2),
                                        contrast_limit=(-0.2, 0.2)),
            al.HueSaturationValue(hue_shift_limit=(-20, 20),
                                  sat_shift_limit=(-30, 30),
                                  val_shift_limit=(-20, 20)),
            al.RandomGamma(gamma_limit=(30, 150)),
            al.RGBShift(),
            al.CLAHE(clip_limit=(1, 15)),
            al.ChannelShuffle(),
            al.InvertImg(),
        ],
                 p=0.1),
        al.RandomSnow(p=0.05),
        al.RandomRain(p=0.05),
        al.RandomFog(p=0.05),
        al.RandomSunFlare(num_flare_circles_lower=1,
                          num_flare_circles_upper=2,
                          src_radius=110,
                          p=0.05),
        al.RandomShadow(p=0.05),
        al.GaussNoise(var_limit=(10, 20), p=0.05),
        al.ISONoise(color_shift=(0, 15), p=0.05),
        al.MultiplicativeNoise(p=0.05),
        al.OneOf([
            al.ToGray(p=1. if use_gray else 0.05),
            al.ToSepia(p=0.05),
            al.Solarize(p=0.05),
            al.Equalize(p=0.05),
            al.Posterize(p=0.05),
            al.FancyPCA(p=0.05),
        ],
                 p=0.05),
        al.OneOf([
            al.MotionBlur(blur_limit=(3, 7)),
            al.Blur(blur_limit=(3, 7)),
            al.MedianBlur(blur_limit=3),
            al.GaussianBlur(blur_limit=3),
        ],
                 p=0.05),
        al.CoarseDropout(p=0.05),
        al.Cutout(num_holes=30,
                  max_h_size=37,
                  max_w_size=37,
                  fill_value=0,
                  p=0.05),
        al.GridDropout(p=0.05),
        al.ChannelDropout(p=0.05),
        al.Downscale(scale_min=0.5, scale_max=0.9, p=0.1),
        al.ImageCompression(quality_lower=60, p=0.2),
        al.Normalize(),
        ToTensorV2()
    ])
def train_function(gpu, world_size, node_rank, gpus, fold_number, group_name):
    import torch.multiprocessing
    torch.multiprocessing.set_sharing_strategy('file_system')

    torch.manual_seed(25)
    np.random.seed(25)

    rank = node_rank * gpus + gpu
    dist.init_process_group(
        backend='nccl',
        init_method='env://',
        world_size=world_size,
        rank=rank
    )

    device = torch.device("cuda:{}".format(gpu) if torch.cuda.is_available() else "cpu")

    batch_size = 64
    width_size = 416
    init_lr = 1e-4
    end_lr = 1e-6
    n_epochs = 20
    emb_size = 512
    margin = 0.5
    dropout = 0.0
    iters_to_accumulate = 1

    if rank == 0:
        wandb.init(project='shopee_effnet0', group=group_name, job_type=str(fold_number))

        checkpoints_dir_name = 'effnet0_{}_{}_{}'.format(width_size, dropout, group_name)
        os.makedirs(checkpoints_dir_name, exist_ok=True)

        wandb.config.model_name = checkpoints_dir_name
        wandb.config.batch_size = batch_size
        wandb.config.width_size = width_size
        wandb.config.init_lr = init_lr
        wandb.config.n_epochs = n_epochs
        wandb.config.emb_size = emb_size
        wandb.config.dropout = dropout
        wandb.config.iters_to_accumulate = iters_to_accumulate
        wandb.config.optimizer = 'adam'
        wandb.config.scheduler = 'ShopeeScheduler'

    df = pd.read_csv('../../dataset/reliable_validation_tm.csv')
    train_df = df[df['fold_group'] != fold_number]
    train_transforms = alb.Compose([
        alb.RandomResizedCrop(width_size, width_size),
        alb.ShiftScaleRotate(shift_limit=0.1, rotate_limit=30),
        alb.HorizontalFlip(),
        alb.OneOf([
            alb.Sequential([
                alb.HueSaturationValue(hue_shift_limit=50),
                alb.RandomBrightnessContrast(),
            ]),
            alb.FancyPCA(),
            alb.ChannelDropout(),
            alb.ChannelShuffle(),
            alb.RGBShift()
        ]),
        alb.CoarseDropout(max_height=int(width_size*0.1), max_width=int(width_size*0.1)),
        alb.OneOf([
            alb.ElasticTransform(),
            alb.OpticalDistortion(),
            alb.GridDistortion()
        ]),
        alb.Resize(width_size, width_size),
        alb.Normalize(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225)),
        ToTensorV2()
    ])
    train_set = ImageDataset(train_df, train_df, '../../dataset/train_images', train_transforms)
    sampler = DistributedSampler(train_set, num_replicas=world_size, rank=rank, shuffle=True)
    train_dataloader = DataLoader(train_set, batch_size=batch_size // world_size, shuffle=False, num_workers=4,
                                  sampler=sampler)

    # valid_df = df[df['fold_strat'] == fold_number]
    valid_transforms = alb.Compose([
        alb.Resize(width_size, width_size),
        alb.Normalize(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225)),
        ToTensorV2()
    ])
    # valid_set = ImageDataset(train_df, valid_df, '../../dataset/train_images', valid_transforms)
    # valid_dataloader = DataLoader(valid_set, batch_size=batch_size // world_size, shuffle=False, num_workers=4)

    test_df = df[df['fold_group'] == fold_number]
    test_set = ImageDataset(test_df, test_df, '../../dataset/train_images', valid_transforms)
    test_dataloader = DataLoader(test_set, batch_size=batch_size // world_size, shuffle=False, num_workers=4)

    model = EfficientNetArcFace(emb_size, train_df['label_group'].nunique(), device, dropout=dropout,
                                backbone='tf_efficientnet_b0_ns', pretrained=True, margin=margin, is_amp=True)
    model = SyncBatchNorm.convert_sync_batchnorm(model)
    model.to(device)
    model = DistributedDataParallel(model, device_ids=[gpu])

    scaler = GradScaler()
    criterion = CrossEntropyLoss()
    # criterion = LabelSmoothLoss(smoothing=0.1)
    optimizer = optim.Adam(model.parameters(), lr=init_lr)
    # scheduler = CosineAnnealingLR(optimizer, T_max=n_epochs, eta_min=end_lr,
    #                               last_epoch=-1)
    # scheduler = CosineAnnealingWarmRestarts(optimizer, T_0=2000, T_mult=1,
    #                                         eta_min=end_lr, last_epoch=-1)
    scheduler = ShopeeScheduler(optimizer, lr_start=init_lr,
                                lr_max=init_lr*batch_size, lr_min=end_lr)

    for epoch in range(n_epochs):
        train_loss, train_duration, train_f1 = train_one_epoch(
            model, train_dataloader, optimizer, criterion, device, scaler,
            scheduler=None, iters_to_accumulate=iters_to_accumulate)
        scheduler.step()

        if rank == 0:
            # valid_loss, valid_duration, valid_f1 = evaluate(model, valid_dataloader, criterion, device)
            embeddings = get_embeddings(model, test_dataloader, device)
            embeddings_f1 = validate_embeddings_f1(embeddings, test_df)

            wandb.log({'train_loss': train_loss, 'train_f1': train_f1,
                       'embeddings_f1': embeddings_f1, 'epoch': epoch})

            filename = '{}_foldnum{}_epoch{}_train_loss{}_f1{}'.format(
                checkpoints_dir_name, fold_number+1, epoch+1,
                round(train_loss, 3), round(embeddings_f1, 3))
            torch.save(model.module.state_dict(), os.path.join(checkpoints_dir_name, '{}.pth'.format(filename)))
            # np.savez_compressed(os.path.join(checkpoints_dir_name, '{}.npz'.format(filename)), embeddings=embeddings)

            print('FOLD NUMBER %d\tEPOCH %d:\t'
                  'TRAIN [duration %.3f sec, loss: %.3f, avg f1: %.3f]\t'
                  'VALID EMBEDDINGS [avg f1: %.3f]\tCurrent time %s' %
                  (fold_number + 1, epoch + 1, train_duration,
                   train_loss, train_f1, embeddings_f1,
                   str(datetime.now(timezone('Europe/Moscow')))))

    if rank == 0:
        wandb.finish()
def get_train_transforms_atopy(input_size,
                               use_crop=False,
                               use_no_color_aug=False):
    if use_crop:
        resize = [
            al.Resize(int(input_size * 1.2), int(input_size * 1.2)),
            al.RandomSizedCrop(min_max_height=(int(input_size * 0.6),
                                               int(input_size * 1.2)),
                               height=input_size,
                               width=input_size)
        ]
    else:
        resize = [al.Resize(input_size, input_size)]
    return al.Compose(resize + [
        al.Flip(p=0.5),
        al.OneOf([
            al.RandomRotate90(),
            al.Rotate(limit=180),
        ], p=0.5),
        al.OneOf([
            al.ShiftScaleRotate(),
            al.OpticalDistortion(),
            al.GridDistortion(),
            al.ElasticTransform(),
        ],
                 p=0.3),
        al.RandomGridShuffle(p=0.05),
        al.OneOf([
            al.RandomGamma(),
            al.HueSaturationValue(),
            al.RGBShift(),
            al.CLAHE(),
            al.ChannelShuffle(),
            al.InvertImg(),
        ],
                 p=0.1),
        al.RandomSnow(p=0.05),
        al.RandomRain(p=0.05),
        al.RandomFog(p=0.05),
        al.RandomSunFlare(p=0.05),
        al.RandomShadow(p=0.05),
        al.RandomBrightnessContrast(p=0.05),
        al.GaussNoise(p=0.2),
        al.ISONoise(p=0.2),
        al.MultiplicativeNoise(p=0.2),
        al.ToGray(p=0.05),
        al.ToSepia(p=0.05),
        al.Solarize(p=0.05),
        al.Equalize(p=0.05),
        al.Posterize(p=0.05),
        al.FancyPCA(p=0.05),
        al.OneOf([
            al.MotionBlur(blur_limit=3),
            al.Blur(blur_limit=3),
            al.MedianBlur(blur_limit=3),
            al.GaussianBlur(blur_limit=3),
        ],
                 p=0.05),
        al.CoarseDropout(p=0.05),
        al.Cutout(p=0.05),
        al.GridDropout(p=0.05),
        al.ChannelDropout(p=0.05),
        al.Downscale(p=0.1),
        al.ImageCompression(quality_lower=60, p=0.2),
        al.Normalize(),
        ToTensorV2()
    ])
Esempio n. 19
0
def train_process(data_path, config):
    def _worker_init_fn_():
        import random
        import numpy as np
        import torch

        random_seed = config.random_seed
        torch.manual_seed(random_seed)
        np.random.seed(random_seed)
        random.seed(random_seed)
        if torch.cuda.is_available():
            torch.cuda.manual_seed(random_seed)

    input_size = (config.img_height, config.img_width)

    PAD_VALUE = (0, 0, 0)
    IGNORE_INDEX = 255
    transforms = [
        abm.RandomResizedCrop(
            scale=(0.7, 1),
            ratio=(1.5, 2),
            height=config.img_height,
            width=config.img_width,
            interpolation=cv2.INTER_NEAREST,
            always_apply=True,
        ),
        abm.OneOf([abm.IAAAdditiveGaussianNoise(),
                   abm.GaussNoise()], p=0.5),
        abm.OneOf(
            [
                abm.MedianBlur(blur_limit=3),
                abm.GaussianBlur(blur_limit=3),
                abm.MotionBlur(blur_limit=3),
            ],
            p=0.5,
        ),
        abm.OneOf([
            abm.ShiftScaleRotate(
                rotate_limit=7,
                interpolation=cv2.INTER_NEAREST,
                border_mode=cv2.BORDER_CONSTANT,
                value=PAD_VALUE,
                mask_value=IGNORE_INDEX,
                p=1.0,
            ),
            abm.ElasticTransform(
                interpolation=cv2.INTER_NEAREST,
                border_mode=cv2.BORDER_CONSTANT,
                alpha_affine=30,
                value=PAD_VALUE,
                mask_value=IGNORE_INDEX,
                p=1.0,
            ),
            abm.Perspective(
                scale=(0.05),
                interpolation=cv2.INTER_NEAREST,
                pad_mode=cv2.BORDER_CONSTANT,
                pad_val=PAD_VALUE,
                mask_pad_val=IGNORE_INDEX,
                keep_size=True,
                fit_output=True,
                p=1.0,
            ),
        ]),
        abm.RandomGamma(gamma_limit=(80, 120), p=0.5),
        abm.RandomBrightnessContrast(brightness_limit=(-0.5, 0.5),
                                     contrast_limit=(-0.5, 0.5),
                                     p=0.5),
        abm.HueSaturationValue(hue_shift_limit=20,
                               sat_shift_limit=30,
                               val_shift_limit=20,
                               p=0.5),
        abm.RandomShadow(p=0.5),
        abm.ChannelShuffle(p=0.5),
        abm.ChannelDropout(p=0.5),
        abm.HorizontalFlip(p=0.5),
        abm.ImageCompression(quality_lower=50, p=0.5),
        abm.Cutout(num_holes=100, max_w_size=8, max_h_size=8, p=0.5),
    ]

    data_transform = DataTransformBase(transforms=transforms,
                                       input_size=input_size,
                                       normalize=True)
    train_dataset = EgoRailDataset(data_path=data_path,
                                   phase="train",
                                   transform=data_transform)
    val_dataset = EgoRailDataset(data_path=data_path,
                                 phase="val",
                                 transform=data_transform)

    # train_dataset.weighted_class()
    weighted_values = [8.90560578, 1.53155476]

    train_data_loader = DataLoader(
        train_dataset,
        batch_size=config.batch_size,
        shuffle=True,
        num_workers=config.num_workers,
        drop_last=True,
        worker_init_fn=_worker_init_fn_(),
    )
    val_data_loader = DataLoader(
        val_dataset,
        batch_size=config.batch_size,
        shuffle=False,
        num_workers=config.num_workers,
        drop_last=True,
    )
    data_loaders_dict = {"train": train_data_loader, "val": val_data_loader}
    model = BiSeNetV2(n_classes=config.num_classes)

    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
    criterion = OHEMCELoss(thresh=config.ohem_ce_loss_thresh,
                           weighted_values=weighted_values)

    base_lr_rate = config.lr_rate / (config.batch_size *
                                     config.batch_multiplier)
    base_weight_decay = config.weight_decay * (config.batch_size *
                                               config.batch_multiplier)

    def _lambda_epoch(epoch):
        import math

        max_epoch = config.num_epochs
        return math.pow((1 - epoch * 1.0 / max_epoch), 0.9)

    optimizer = torch.optim.SGD(
        model.parameters(),
        lr=base_lr_rate,
        momentum=config.momentum,
        weight_decay=base_weight_decay,
    )
    scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=_lambda_epoch)
    trainer = BiSeNetV2Trainer(
        model=model,
        criterion=criterion,
        metric_func=None,
        optimizer=optimizer,
        data_loaders_dict=data_loaders_dict,
        config=config,
        scheduler=scheduler,
        device=device,
    )

    if config.snapshot and os.path.isfile(config.snapshot):
        trainer.resume_checkpoint(config.snapshot)

    with torch.autograd.set_detect_anomaly(True):
        trainer.train()
Esempio n. 20
0
            transforms.append(A.ShiftScaleRotate(p=p))
        if p := trans_cfg.get('elastictransform', False):
            transforms.append(A.ElasticTransform(p=p))
        if p := trans_cfg.get('griddistortion', False):
            transforms.append(A.GridDistortion(p=p))
        if p := trans_cfg.get('hflip', False):
            transforms.append(A.HorizontalFlip(p=p))
        if p := trans_cfg.get('vflip', False):
            transforms.append(A.VerticalFlip(p=p))
        if p := trans_cfg.get('brightnesscontrast', False):
            transforms.append(A.RandomBrightnessContrast(p=p))
        if p := trans_cfg.get('griddropout', False):
            transforms.append(
                A.GridDropout(fill_value=0, mask_fill_value=0, p=p))
        if p := trans_cfg.get('channeldropout', False):
            transforms.append(A.ChannelDropout(channel_drop_range=(1, 1), p=p))
        if p := trans_cfg.get('blur', False):
            transforms.append(
                A.OneOf([
                    A.MedianBlur(blur_limit=5, p=p),
                    A.Blur(blur_limit=5, p=p)
                ]))
        if p := trans_cfg.get('noise', False):
            transforms.append(
                A.OneOf([A.GaussNoise(p=p),
                         A.MultiplicativeNoise(p=p)]))
        if p := trans_cfg.get('hsv', False):
            transforms.append(A.HueSaturationValue(p=p))

        # Do these last so are less likely to need to reflect etc. during skew/rotation
        if trans_cfg.get('centrecrop', False):