Пример #1
0
    def __init__(self, outputs=6):
        super().__init__()
        self.net = models.resnet34(True)
        self.linear = Sequential(ReLU(), Dropout(), Linear(1000, outputs))

        df = pd.read_csv(
            "/home/dipet/kaggle/prostate/input/prostate-cancer-grade-assessment/train.csv"
        )
        self.train_df, self.valid_df = train_test_split(df, test_size=0.2)
        self.data_dir = "/datasets/panda/train_128_100"

        self.train_transforms = A.Compose([
            A.InvertImg(p=1),
            A.RandomGridShuffle(grid=(10, 10)),
            A.RandomScale(0.1),
            A.PadIfNeeded(1280, 1280),
            A.RandomSizedCrop([1000, 1280], 1280, 1280),
            A.Flip(),
            A.Rotate(90),
            A.RandomBrightnessContrast(0.02, 0.02),
            A.HueSaturationValue(0, 10, 10),
            A.Normalize(mean, std, 1),
        ])
        self.valid_transforms = A.Compose([
            A.InvertImg(p=1),
            A.Normalize(mean, std, 1),
        ])
Пример #2
0
    def __init__(self, outputs=5):
        super().__init__()
        self.net = EfficientNet.from_pretrained('efficientnet-b0')
        self.linear = Sequential(ReLU(), Dropout(),  Linear(1000, outputs))

        df = pd.read_csv("../input/prostate-cancer-grade-assessment/train.csv")
        self.train_df, self.valid_df = train_test_split(df, test_size=0.2)
        self.data_dir = "../input/prostate-cancer-grade-assessment/train_images"

        self.train_transforms = A.Compose(
            [
                A.InvertImg(p=1),
                A.RandomSizedCrop([int(IMAGE_SIZE * 0.9), IMAGE_SIZE], IMAGE_SIZE, IMAGE_SIZE),
                A.Transpose(),
                A.Flip(),
                A.Rotate(90, border_mode=cv.BORDER_CONSTANT, value=(0, 0, 0)),
                A.RandomBrightnessContrast(0.02, 0.02),
                A.HueSaturationValue(0, 10, 10),
                A.Normalize(mean, std, 1),
            ]
        )
        self.valid_transforms = A.Compose([A.InvertImg(p=1), A.Normalize(mean, std, 1),])

        self.criterion = BCEWithLogitsLoss()

        if False and BATCH_SIZE == 1:
            for m in self.modules():
                if isinstance(m, nn.BatchNorm2d):
                    m.weight.requires_grad = False
                    m.bias.requires_grad = False
Пример #3
0
    def __init__(self, outputs=5):
        super().__init__()
        self.net = EfficientNet.from_pretrained('efficientnet-b0')
        self.linear = Sequential(ReLU(), Dropout(), Linear(1000, outputs))

        df = pd.read_csv("../input/prostate-cancer-grade-assessment/train.csv")
        self.train_df, self.valid_df = train_test_split(df, test_size=0.2)
        self.data_dir = "/datasets/panda/train_128_100"

        self.train_transforms = A.Compose([
            A.InvertImg(p=1),
            A.RandomSizedCrop([100, 128], 128, 128),
            A.Transpose(),
            A.Flip(),
            A.Rotate(90),
            # A.RandomBrightnessContrast(0.02, 0.02),
            # A.HueSaturationValue(0, 10, 10),
            A.Normalize(mean, std, 1),
        ])
        self.valid_transforms = A.Compose([
            A.InvertImg(p=1),
            A.Normalize(mean, std, 1),
        ])

        self.criterion = BCEWithLogitsLoss()
Пример #4
0
def albumone(image_file, path_to_train):
    transform = A.Compose([
        A.RandomBrightnessContrast(p=0.4),
        A.OneOf([
            A.IAAAdditiveGaussianNoise(),
            A.GaussNoise(),
        ], p=0.3),
        A.OneOf(
            [
                A.MotionBlur(p=.5),
                A.MedianBlur(blur_limit=3, p=0.6),  #mrandom
                A.Blur(blur_limit=3, p=0.6),
            ],
            p=0.2),
        A.HueSaturationValue(p=0.7),
        A.RGBShift(p=0.6),
        A.InvertImg(p=0.4),
    ])
    image = cv2.imread(image_file)
    image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
    transformed = transform(image=image)
    transformed_image = transformed["image"]
    cv2.imwrite(image_file[:-4] + '_aone.jpg', transformed_image)
    base = os.path.basename(image_file)
    base = base[:-4] + '.txt'
    shutil.copy2(path_to_train + "/labels/" + base,
                 path_to_train + "/labels/" + base[:-4] + '_aone.txt')
Пример #5
0
def albu_train():
    return A.Compose(
        [
            A.RandomSizedBBoxSafeCrop(512, 512, p=1.0),
            A.HorizontalFlip(p=0.5),
            A.VerticalFlip(p=0.5),
            A.OneOf([
                A.RandomGamma(),
                A.RandomBrightnessContrast(brightness_limit=0.2,
                                           contrast_limit=0.2),
                A.HueSaturationValue(hue_shift_limit=0.2,
                                     sat_shift_limit=0.2,
                                     val_shift_limit=0.2)
            ],
                    p=0.9),
            #A.CLAHE(p=1.0),  # CLAHE only supports uint8
            A.MedianBlur(blur_limit=7, p=0.5),
            A.CoarseDropout(max_height=64,
                            max_width=64,
                            fill_value=0,
                            min_holes=2,
                            min_height=8,
                            min_width=8,
                            p=0.5),
            A.InvertImg(p=0.5),
            ToTensor()
        ],
        p=1.0,
        bbox_params={
            'format': 'pascal_voc',
            'min_area': 0,
            'min_visibility': 0,
            'label_fields': ['labels']
        })
Пример #6
0
    def __init__(self, n, m):
        self.n = n
        self.m = m

        m_ratio = self.m / 30.0
        self.augment_list = (
            A.CLAHE(always_apply=True),
            A.Equalize(always_apply=True),
            A.InvertImg(always_apply=True),
            A.Rotate(limit=30 * m_ratio, always_apply=True),
            A.Posterize(num_bits=int(4 * m_ratio), always_apply=True),
            A.Solarize(threshold=m_ratio, always_apply=True),
            A.RGBShift(r_shift_limit=110 * m_ratio,
                       g_shift_limit=110 * m_ratio,
                       b_shift_limit=110 * m_ratio,
                       always_apply=True),
            A.HueSaturationValue(hue_shift_limit=20 * m_ratio,
                                 sat_shift_limit=30 * m_ratio,
                                 val_shift_limit=20 * m_ratio,
                                 always_apply=True),
            A.RandomContrast(limit=m_ratio, always_apply=True),
            A.RandomBrightness(limit=m_ratio, always_apply=True),
            #  A.Sharpen(always_apply=True), 0.1, 1.9),
            A.ShiftScaleRotate(shift_limit=0.3 * m_ratio,
                               shift_limit_y=0,
                               rotate_limit=0,
                               always_apply=True),
            A.ShiftScaleRotate(shift_limit=0.3 * m_ratio,
                               shift_limit_x=0,
                               rotate_limit=0,
                               always_apply=True),
            A.Cutout(num_holes=int(8 * m_ratio), always_apply=True),
            A.IAAAffine(shear=0.3 * m_ratio, always_apply=True))

        assert self.n <= len(self.augment_list)
Пример #7
0
def get_transforms(imsize, train=True, local=True, n_patches=None, is_stack=False):
    if train:
        if local: # local transforms are applied randomly for each patch
            return albu.Compose([
                albu.ShiftScaleRotate(shift_limit=0.1, scale_limit=0.1, rotate_limit=90, p=0.7, border_mode=cv2.BORDER_CONSTANT, value=(255,255,255)),
                albu.HorizontalFlip(p=0.5),
                albu.VerticalFlip(p=0.5),
                albu.Transpose(),
                albu.GaussNoise(p=0.2), # Super Slow!(?)
                albu.CoarseDropout(6, 20, 20, 2, p=0.3, fill_value=255),
                # albu.RandomGridShuffle((3,3), p=0.5),
                # albu.JpegCompression(quality_lower=70, p=0.3),

                # albu.Resize(imsize, imsize),
            ])
        else: #Global transforms are applied consistently across all patches
            return albu.Compose(([
                albu.HorizontalFlip(p=0.5),
                albu.VerticalFlip(p=0.5),
                albu.Transpose(p=0.25),
            ] if not is_stack else []) + [
                albu.RandomGamma((90, 110), p=0.25),
                albu.RandomContrast(p=0.25, limit=0.1),
                albu.RandomBrightness(p=0.25, limit=0.1),
                albu.HueSaturationValue(hue_shift_limit=0, sat_shift_limit=15, val_shift_limit=5, p=0.25),
                albu.Resize(imsize, imsize),
                albu.InvertImg(always_apply=True),
                # albu.Lambda(to_hsv, always_apply=True),
                # albu.Normalize(always_apply=True, mean=[0.5,0.5,0.5], std=[0.5,0.5,0.5]),
                # albu.Normalize(always_apply=True, mean=[0.5, 0.2, 0.85], std=[0.3, 0.3, 0.15]), # HSV normalization
                # albu.Normalize(always_apply=True, mean=[0.81, 0.6, 0.73], std=[0.4, 0.51, 0.41]), # Basic normalization
                albu.Normalize(always_apply=True, mean=[1-0.85, 1-0.71, 1-0.80], std=[0.16, 0.27, 0.18]), # Invert Normalization
                ToTensor()
            ],
            additional_targets={f'image{i}':'image' for i in range(n_patches-1)} if is_stack else None)
    else:
        return albu.Compose([
            albu.Resize(imsize, imsize),
            albu.InvertImg(always_apply=True),
            # albu.Lambda(to_hsv, always_apply=True),
            # albu.Normalize(always_apply=True, mean=[0.5,0.5,0.5], std=[0.5,0.5,0.5]),
            # albu.Normalize(always_apply=True, mean=[0.5, 0.2, 0.85], std=[0.3, 0.3, 0.15]),
            # albu.Normalize(always_apply=True, mean=[0.81, 0.6, 0.73], std=[0.4, 0.51, 0.41]),
            albu.Normalize(always_apply=True, mean=[1-0.85, 1-0.71, 1-0.80], std=[0.16, 0.27, 0.18]),
            ToTensor()
        ],
        additional_targets={f'image{i}':'image' for i in range(n_patches-1)} if is_stack else None)
Пример #8
0
    def blur_and_distortion(self, kernel_size=(3, 3)):  # Blur & Distortion
        aug = A.Compose(
            [
                A.OneOf(
                    [
                        A.Blur(
                            blur_limit=kernel_size, p=self.p
                        ),  # Blur the input image using a random-sized kernel.
                        A.MotionBlur(
                            blur_limit=kernel_size, p=self.p
                        ),  # Apply motion blur to the input image using a random-sized kernel.
                        A.MedianBlur(
                            blur_limit=kernel_size, p=self.p
                        ),  # Blur the input image using using a median filter with a random aperture linear size.
                        A.GaussianBlur(
                            blur_limit=kernel_size, p=self.p
                        )  # Blur the input image using using a Gaussian filter with a random kernel size.
                    ],
                    p=1),
                A.OneOf(
                    [
                        A.RandomGamma(gamma_limit=(80, 120), p=self.p),
                        A.OpticalDistortion(
                            distort_limit=0.05, shift_limit=0.05, p=self.p),
                        A.ElasticTransform(p=self.p),
                        A.HueSaturationValue(
                            p=self.p
                        ),  # Randomly change hue, saturation and value of the input image.
                        A.RGBShift(
                            p=self.p
                        ),  # Randomly shift values for each channel of the input RGB image.
                        A.ChannelShuffle(
                            p=self.p
                        ),  # Randomly rearrange channels of the input RGB image.
                        A.CLAHE(
                            p=self.p
                        ),  # Apply Contrast Limited Adaptive Histogram Equalization to the input image.
                        A.InvertImg(
                            p=self.p
                        ),  # Invert the input image by subtracting pixel values from 255.
                    ],
                    p=1),
                A.GaussNoise(
                    var_limit=(10.0, 50.0), mean=0,
                    p=self.p),  # Apply gaussian noise to the input image.
                A.RandomShadow(p=self.p)  # Simulates shadows for the image
            ],
            p=1)

        return aug
Пример #9
0
def get_augmentation(save_path=None, load_path=None):
        if load_path:
            return A.load(load_path)
        else:
            aug_seq1 = A.OneOf([
                A.Rotate(limit=(-90, 90), p=1.0),
                A.Flip(p=1.0),
                A.OpticalDistortion(always_apply=False, p=1.0, distort_limit=(-0.3, 0.3), 
                                    shift_limit=(-0.05, 0.05), interpolation=3, 
                                    border_mode=3, value=(0, 0, 0), mask_value=None),
            ], p=1.0)
            aug_seq2 = A.OneOf([
                # A.ChannelDropout(always_apply=False, p=1.0, channel_drop_range=(1, 1), fill_value=0),
                A.RGBShift(r_shift_limit=15, g_shift_limit=15,
                           b_shift_limit=15, p=1.0),
                A.RandomBrightnessContrast(always_apply=False, p=1.0, brightness_limit=(
                    -0.2, 0.2), contrast_limit=(-0.2, 0.2), brightness_by_max=True)
            ], p=1.0)
            aug_seq3 = A.OneOf([
                A.GaussNoise(always_apply=False, p=1.0, var_limit=(10, 50)),
                A.ISONoise(always_apply=False, p=1.0, intensity=(
                    0.1, 1.0), color_shift=(0.01, 0.3)),
                A.MultiplicativeNoise(always_apply=False, p=1.0, multiplier=(
                    0.8, 1.6), per_channel=True, elementwise=True),
            ], p=1.0)
            aug_seq4 = A.OneOf([
                A.Equalize(always_apply=False, p=1.0,
                           mode='pil', by_channels=True),
                A.InvertImg(always_apply=False, p=1.0),
                A.MotionBlur(always_apply=False, p=1.0, blur_limit=(3, 7)),
                A.RandomFog(always_apply=False, p=1.0, 
                            fog_coef_lower=0.01, fog_coef_upper=0.2, alpha_coef=0.2)
            ], p=1.0)
            aug_seq = A.Compose([
                # A.Resize(self.img_size, self.img_size),
                # aug_seq1,
                aug_seq2,
                aug_seq3,
                aug_seq4,
                # A.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
                # A.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]),
            ])
            # aug_path = '/home/jitesh/prj/classification/test/bolt/aug/aug_seq.json'
            if save_path:
                A.save(aug_seq, save_path)
            # loaded_transform = A.load(aug_path)
            return aug_seq
Пример #10
0
 def __init__(self):
     self.policy = A.Compose([
         A.OneOf([
             A.Rotate(180),
             A.Flip(),
         ], p=0.3),
         A.ShiftScaleRotate(shift_limit=0.2, scale_limit=0.5, rotate_limit=0, p=0.2),
         A.OneOf([
             A.CoarseDropout(max_holes=16, max_height=16, max_width=16, p=0.3),
             A.GridDropout(ratio=0.3, p=0.3),
         ]),
         A.OneOf([
             A.ElasticTransform(sigma=10, alpha_affine=25, p=0.3),
             A.RandomFog(fog_coef_lower=0.2, fog_coef_upper=0.7, p=0.2),
         ], p=0.2),
         A.OneOf([
             A.IAAAdditiveGaussianNoise(),
             A.GaussNoise(),
             A.ISONoise()
         ], p=0.2),
         A.OneOf([
             A.MotionBlur(p=.3),
             A.MedianBlur(blur_limit=5, p=0.3),
             A.Blur(blur_limit=5, p=0.3),
             A.GaussianBlur(p=0.3)
         ], p=0.2),
         A.OneOf([
             A.ChannelShuffle(p=.3),
             A.HueSaturationValue(p=0.3),
             A.ToGray(p=0.3),
             A.ChannelDropout(p=0.3),
             A.InvertImg(p=0.1)
         ], p=0.2),
         A.OneOf([
             A.OpticalDistortion(p=0.3),
             A.GridDistortion(p=.2),
             A.IAAPiecewiseAffine(p=0.3),
         ], p=0.2),
         A.OneOf([
             A.CLAHE(clip_limit=2),
             A.IAASharpen(),
             A.IAAEmboss(),
         ], p=0.2),
         A.RandomBrightnessContrast(brightness_limit=0.3, contrast_limit=0.3, p=0.3),
         A.Solarize(p=0.2),
     ])
class TrainOneCycleConfigs:
    num_workers: int = 8
    batch_size: int = 14  # efficientnet-b2
    # batch_size: int = 11  # efficientnet-b3
    # batch_size: int = 8  # efficientnet-b4
    # batch_size: int = 6  # efficientnet-b5
    # batch_size: int = 4  # efficientnet-b6

    # -------------------
    verbose: bool = True
    verbose_step: int = 1

    # -------------------
    n_epochs: int = 40
    lr: float = 0.001

    # --------------------
    loss: nn.Module = LabelSmoothing(smoothing=.05)

    # --------------------
    step_after_optimizer: bool = True  # do scheduler.step after optimizer.step
    step_after_validation: bool = False
    lr_scheduler = torch.optim.lr_scheduler.OneCycleLR
    scheduler_params = dict(
        max_lr=0.001,
        epochs=n_epochs,
        steps_per_epoch=30000,  # int(len(train_dataset) / batch_size),
        pct_start=0.1,
        anneal_strategy="cos",
        final_div_factor=10**5)

    # Augmentations
    # --------------------
    transforms = A.Compose(
        [
            A.HorizontalFlip(p=0.5),
            A.VerticalFlip(p=0.5),
            A.RandomRotate90(always_apply=False, p=0.5),
            # A.RandomGridShuffle(grid=(3, 3), always_apply=False, p=0.5),
            A.InvertImg(always_apply=False, p=0.5),
            A.Resize(height=512, width=512, p=1.0),
            A.Normalize(always_apply=True),
            ToTensorV2(p=1.0),
        ],
        p=1.0)
class TrainReduceOnPlateauConfigs:
    num_workers: int = 8
    batch_size: int = 12  # 16

    # -------------------
    verbose: bool = True
    verbose_step: int = 1

    # -------------------
    n_epochs: int = 5
    lr: float = 0.001

    # --------------------
    loss: nn.Module = LabelSmoothing(smoothing=.05)

    # --------------------
    step_after_optimizer: bool = False  # do scheduler.step after optimizer.step
    step_after_validation = True  # do scheduler.step after validation stage loss
    lr_scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau
    scheduler_params = dict(mode="min",
                            factor=0.5,
                            patience=1,
                            verbose=True,
                            threshold=0.0001,
                            threshold_mode="abs",
                            cooldown=0,
                            min_lr=1e-8,
                            eps=1e-08)

    # Augmentations
    # --------------------
    transforms = A.Compose(
        [
            A.HorizontalFlip(p=0.5),
            A.VerticalFlip(p=0.5),
            A.RandomRotate90(always_apply=False, p=0.5),
            # A.RandomGridShuffle(grid=(3, 3), always_apply=False, p=0.5),
            A.InvertImg(always_apply=False, p=0.5),
            A.Resize(height=512, width=512, p=1.0),
            A.Normalize(always_apply=True),
            ToTensorV2(p=1.0),
        ],
        p=1.0)
 def __init__(self, k: int = 5, always_apply: bool = True, p: float = 1.0):
     super(RandAugmentAlb, self).__init__(always_apply, p)
     self.k = k
     self.candidates = [
         AutoContrast(p=1.0),
         A.Equalize(p=1.0),
         A.InvertImg(p=1.0),
         Rotate(30., p=1.0),
         A.Posterize([4, 8], p=1.0),
         A.Solarize([0, 256], p=1.0),
         A.RandomBrightnessContrast(brightness_limit=0.,
                                    contrast_limit=(0.05, 0.95),
                                    p=1.0),
         A.RandomBrightnessContrast(brightness_limit=(0.05, 0.95),
                                    contrast_limit=0.,
                                    p=1.0),
         ShearX(0.3),
         ShearY(0.3),
         Translate(0.45),
     ]
Пример #14
0
 def __init__(self, img, data, img_size):
     """ 
     arguments
     ---------
     img : list
         list of images, in the original size (height, width, 3)
     data : list of dict
         Each dict has :
             'image' : index of the image. The index should match with img
             'mask' : [xx, yy]
                     IMPORTANT : (WIDTH, HEIGHT)
             'box' : [[xmin, ymin], [xmax,ymax]]
             'size' : the size of the image that the data was created with
                     IMPORTANT : (WIDTH, HEIGHT)
     img_size : tuple
         Desired output image size
         The axes will be swapped to match pygame.
         IMPORTANT : (WIDTH, HEIGHT)
     """
     self.image = img
     self.data = data
     self.n = len(data)
     self.output_size = img_size
     self.aug = A.Compose([
         A.OneOf([
             A.RandomGamma((40, 200), p=1),
             A.RandomBrightness(limit=0.5, p=1),
             A.RandomContrast(limit=0.5, p=1),
             A.RGBShift(40, 40, 40, p=1),
             A.Downscale(scale_min=0.25, scale_max=0.5, p=1),
             A.ChannelShuffle(p=1),
         ],
                 p=0.8),
         A.InvertImg(p=0.5),
         A.VerticalFlip(p=0.5),
         A.RandomRotate90(p=1),
         A.Resize(img_size[0], img_size[1]),
     ], )
     for datum in data:
         datum['mask_min'] = np.min(datum['mask'], axis=1)
         datum['mask_max'] = np.max(datum['mask'], axis=1) + 1
Пример #15
0
def get_training_augmentation():
    train_transform = [
        # albu.Resize(320, 640),
        # albu.HorizontalFlip(p=0.5),
        # albu.ShiftScaleRotate(scale_limit=0.5, rotate_limit=0, shift_limit=0.1, p=0.5, border_mode=0),
        # albu.GridDistortion(p=0.5),
        # albu.OpticalDistortion(p=0.5, distort_limit=2, shift_limit=0.5),

        # fist prize's augmentations below
        albu.HorizontalFlip(p=0.5),
        albu.VerticalFlip(p=0.5),
        albu.ShiftScaleRotate(scale_limit=0.20, rotate_limit=10, shift_limit=0.1, p=0.5, border_mode=cv2.BORDER_CONSTANT, value=0),
        albu.GridDistortion(p=0.5),
        albu.RandomCrop(height=800,width=1200,p=0.5),
        albu.Resize(320, 640),
        albu.ChannelShuffle(),
        albu.InvertImg(),
        albu.ToGray(),
        albu.Normalize(),
    ]
    return albu.Compose(train_transform)
Пример #16
0
def get_training_augmentation(resize_to=(320, 640), crop_size=(288, 576)):
    print('[get_training_augmentation] crop_size:', crop_size, ', resize_to:',
          resize_to)

    train_transform = [
        albu.HorizontalFlip(p=0.5),
        albu.VerticalFlip(p=0.5),
        albu.ShiftScaleRotate(scale_limit=0.20,
                              rotate_limit=10,
                              shift_limit=0.1,
                              p=0.5,
                              border_mode=cv2.BORDER_CONSTANT,
                              value=0),
        albu.GridDistortion(p=0.5),
        albu.Resize(*resize_to),
        albu.RandomCrop(*crop_size),
        albu.ChannelShuffle(),
        albu.InvertImg(),
        albu.ToGray(),
        albu.Normalize(),
    ]

    return albu.Compose(train_transform)
Пример #17
0
def hard_transforms():
    return albu.Compose([
        albu.Rotate(limit=30,
                    interpolation=cv2.INTER_LINEAR,
                    border_mode=cv2.BORDER_CONSTANT,
                    value=(0, 0, 0)),
        albu.RandomSizedBBoxSafeCrop(width=64, height=64, erosion_rate=0.2),
        albu.InvertImg(p=0.3),
        albu.HueSaturationValue(p=0.3),
        albu.OneOf([
            albu.IAAAdditiveGaussianNoise(),
            albu.GaussNoise(),
            albu.MultiplicativeNoise(
                multiplier=[0.5, 1.5], per_channel=True, p=1)
        ],
                   p=0.3),
        albu.OneOf([
            albu.MotionBlur(p=0.2),
            albu.MedianBlur(blur_limit=3, p=0.1),
            albu.Blur(blur_limit=3, p=0.1),
        ],
                   p=0.2),
        albu.OneOf([
            albu.CLAHE(clip_limit=2),
            albu.IAASharpen(),
            albu.IAAEmboss(),
            albu.RandomBrightnessContrast(
                brightness_limit=0.2, contrast_limit=0.2, p=0.3),
            albu.RandomGamma(gamma_limit=(85, 115), p=0.3),
        ],
                   p=0.3),
        albu.JpegCompression(quality_lower=30, quality_upper=100, p=0.5),
        albu.Cutout(
            num_holes=10, max_h_size=5, max_w_size=5, fill_value=0, p=0.5),
    ],
                        p=1,
                        bbox_params=albu.BboxParams(format='pascal_voc'))
Пример #18
0
             'Flip': A.Flip(p=0.5),
             'Random Rotate': A.RandomRotate90(p=0.5),
             'Rotate': A.Rotate(limit=286, p=0.5),
             'Transpose': A.Transpose(p=0.5),
             'Shift Scale Rotate': A.ShiftScaleRotate(shift_limit=0.8, scale_limit=1.4, rotate_limit=360, p=0.5),
             'Center Crop': A.CenterCrop(height=134, width=94, p=0.5),
             'Random Brightness': A.RandomBrightness(limit=1.3, p=0.5),
             'Random Brightness Contrast': A.RandomBrightnessContrast(p=0.5),
             'Random Gamma': A.RandomGamma(p=0.5),
             'Clahe': A.CLAHE(p=0.5),
             'Hue Saturation Value': A.HueSaturationValue(hue_shift_limit=20, sat_shift_limit=50, val_shift_limit=50, p=0.5),
             'RGB Shift': A.RGBShift(r_shift_limit=105, g_shift_limit=45, b_shift_limit=40, p=0.5),
             'Channel Shuffle': A.ChannelShuffle(p=0.5),
             'Jpeg Compression': A.JpegCompression(quality_lower=7, quality_upper=100, p=0.5),
             'Random Contrast': A.RandomContrast(limit=0.9, p=0.5),
             'Blur': A.Blur(blur_limit=17, p=0.5),
             'Gauss Noise': A.GaussNoise(var_limit=(10.0, 80.0), p=0.5),
             'Invert Image': A.InvertImg(),
}

def WAIT_imgaug_albu(img, aug_name, p=0.5, **kwargs):
  aug = albu_augs[aug_name]
  aug.p = p
  augmented = aug(image=img)
  return augmented['image']

def imgaug_albu(img, aug_name, mask=None, p=0.5, **kwargs):
  aug = albu_augs[aug_name]
  aug.p = p
  augmented = aug(image=img, mask=mask)
  return augmented['image'], augmented['mask']
Пример #19
0
                p=0.2),
        #A.OneOf([
        #         A.OpticalDistortion(interpolation=3, p=0.1),
        #         A.GridDistortion(interpolation=3, p=0.1),
        #         A.IAAPiecewiseAffine(p=0.5),
        #        ], p=0.0),
        A.OneOf([
            A.CLAHE(clip_limit=2),
            A.IAASharpen(),
            A.IAAEmboss(),
        ],
                p=0.2),
        A.RandomBrightnessContrast(p=0.5),
        A.RandomGamma(p=0.5),
        A.ToGray(p=1),
        A.InvertImg(p=0.2)
    ],
    p=1)

mean = [0.485, 0.456, 0.406]
std = [0.229, 0.224, 0.225]
train_transform = transforms.Compose(
    [transforms.ToTensor(),
     transforms.Normalize(mean, std)])
val_transform = transforms.Compose([
    transforms.Resize(config['image_size'], 3),
    transforms.ToTensor(),
    transforms.Normalize(mean, std)
])
tta_transform = transforms.Compose([
    transforms.Resize(config['image_size'], 3),
def get_train_transforms_atopy(input_size,
                               use_crop=False,
                               use_no_color_aug=False):
    if use_crop:
        resize = [
            al.Resize(int(input_size * 1.2), int(input_size * 1.2)),
            al.RandomSizedCrop(min_max_height=(int(input_size * 0.6),
                                               int(input_size * 1.2)),
                               height=input_size,
                               width=input_size)
        ]
    else:
        resize = [al.Resize(input_size, input_size)]
    return al.Compose(resize + [
        al.Flip(p=0.5),
        al.OneOf([
            al.RandomRotate90(),
            al.Rotate(limit=180),
        ], p=0.5),
        al.OneOf([
            al.ShiftScaleRotate(),
            al.OpticalDistortion(),
            al.GridDistortion(),
            al.ElasticTransform(),
        ],
                 p=0.3),
        al.RandomGridShuffle(p=0.05),
        al.OneOf([
            al.RandomGamma(),
            al.HueSaturationValue(),
            al.RGBShift(),
            al.CLAHE(),
            al.ChannelShuffle(),
            al.InvertImg(),
        ],
                 p=0.1),
        al.RandomSnow(p=0.05),
        al.RandomRain(p=0.05),
        al.RandomFog(p=0.05),
        al.RandomSunFlare(p=0.05),
        al.RandomShadow(p=0.05),
        al.RandomBrightnessContrast(p=0.05),
        al.GaussNoise(p=0.2),
        al.ISONoise(p=0.2),
        al.MultiplicativeNoise(p=0.2),
        al.ToGray(p=0.05),
        al.ToSepia(p=0.05),
        al.Solarize(p=0.05),
        al.Equalize(p=0.05),
        al.Posterize(p=0.05),
        al.FancyPCA(p=0.05),
        al.OneOf([
            al.MotionBlur(blur_limit=3),
            al.Blur(blur_limit=3),
            al.MedianBlur(blur_limit=3),
            al.GaussianBlur(blur_limit=3),
        ],
                 p=0.05),
        al.CoarseDropout(p=0.05),
        al.Cutout(p=0.05),
        al.GridDropout(p=0.05),
        al.ChannelDropout(p=0.05),
        al.Downscale(p=0.1),
        al.ImageCompression(quality_lower=60, p=0.2),
        al.Normalize(),
        ToTensorV2()
    ])
def get_train_transforms_mmdetection(input_size,
                                     use_crop=False,
                                     use_no_color_aug=False,
                                     use_center_crop=False,
                                     center_crop_ratio=0.9,
                                     use_gray=False):
    if isinstance(input_size, int):
        input_size = (input_size[0], input_size[1])
    return al.Compose([
        al.RandomResizedCrop(height=input_size[0],
                             width=input_size[1],
                             scale=(0.4, 1.0),
                             interpolation=0,
                             p=0.5),
        al.Resize(input_size[0], input_size[1], p=1.0),
        al.HorizontalFlip(p=0.5),
        al.OneOf([
            al.ShiftScaleRotate(border_mode=0,
                                shift_limit=(-0.2, 0.2),
                                scale_limit=(-0.2, 0.2),
                                rotate_limit=(-20, 20)),
            al.OpticalDistortion(border_mode=0,
                                 distort_limit=[-0.5, 0.5],
                                 shift_limit=[-0.5, 0.5]),
            al.GridDistortion(
                num_steps=5, distort_limit=[-0., 0.3], border_mode=0),
            al.ElasticTransform(border_mode=0),
            al.IAAPerspective(),
            al.RandomGridShuffle()
        ],
                 p=0.1),
        al.Rotate(limit=(-25, 25), border_mode=0, p=0.1),
        al.OneOf([
            al.RandomBrightnessContrast(brightness_limit=(-0.2, 0.2),
                                        contrast_limit=(-0.2, 0.2)),
            al.HueSaturationValue(hue_shift_limit=(-20, 20),
                                  sat_shift_limit=(-30, 30),
                                  val_shift_limit=(-20, 20)),
            al.RandomGamma(gamma_limit=(30, 150)),
            al.RGBShift(),
            al.CLAHE(clip_limit=(1, 15)),
            al.ChannelShuffle(),
            al.InvertImg(),
        ],
                 p=0.1),
        al.RandomSnow(p=0.05),
        al.RandomRain(p=0.05),
        al.RandomFog(p=0.05),
        al.RandomSunFlare(num_flare_circles_lower=1,
                          num_flare_circles_upper=2,
                          src_radius=110,
                          p=0.05),
        al.RandomShadow(p=0.05),
        al.GaussNoise(var_limit=(10, 20), p=0.05),
        al.ISONoise(color_shift=(0, 15), p=0.05),
        al.MultiplicativeNoise(p=0.05),
        al.OneOf([
            al.ToGray(p=1. if use_gray else 0.05),
            al.ToSepia(p=0.05),
            al.Solarize(p=0.05),
            al.Equalize(p=0.05),
            al.Posterize(p=0.05),
            al.FancyPCA(p=0.05),
        ],
                 p=0.05),
        al.OneOf([
            al.MotionBlur(blur_limit=(3, 7)),
            al.Blur(blur_limit=(3, 7)),
            al.MedianBlur(blur_limit=3),
            al.GaussianBlur(blur_limit=3),
        ],
                 p=0.05),
        al.CoarseDropout(p=0.05),
        al.Cutout(num_holes=30,
                  max_h_size=37,
                  max_w_size=37,
                  fill_value=0,
                  p=0.05),
        al.GridDropout(p=0.05),
        al.ChannelDropout(p=0.05),
        al.Downscale(scale_min=0.5, scale_max=0.9, p=0.1),
        al.ImageCompression(quality_lower=60, p=0.2),
        al.Normalize(),
        ToTensorV2()
    ])
Пример #22
0
from albumentations.pytorch import ToTensorV2

c_transform = nn.Sequential(transforms.Resize([256,]), 
                            transforms.CenterCrop(224),
                            transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225)))
ten = torchvision.transforms.ToTensor()

scripted_transforms = torch.jit.script(c_transform)
# %%
transform = A.Compose(
    [A.Resize(width=256,height=256, always_apply=True),
                       A.HorizontalFlip(p=0.5),
                       A.OneOf([
                            A.ShiftScaleRotate(shift_limit=0.05, scale_limit=0.05, rotate_limit=15, p=0.25),
                            A.RandomBrightnessContrast(p=0.1, contrast_limit=0.005, brightness_limit=0.005,),
                            A.InvertImg(p=0.02),
                       ]),
                       A.OneOf([
                           A.RandomCrop(width=224, height=224, p=0.5),
                           A.CenterCrop(width=224, height=224, p=0.5),
                           
                       ]),
                       A.Resize(width=224, height=224, always_apply=True),
                       A.Normalize(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225)),
                       ToTensorV2()
                    ])

W_o_ten_transform = A.Compose(
    [A.Resize(width=256,height=256, always_apply=True),
                       A.HorizontalFlip(p=0.5),
                       A.OneOf([
Пример #23
0
    A.Rotate(limit=(-90, 90), p=1.0),
    A.Flip(p=1.0),
    ], p=1.0)
aug_seq2 = A.OneOf([
    # A.ChannelDropout(always_apply=False, p=1.0, channel_drop_range=(1, 1), fill_value=0),
    A.RGBShift(r_shift_limit=15, g_shift_limit=15, b_shift_limit=15, p=1.0),
    A.RandomBrightnessContrast(always_apply=False, p=1.0, brightness_limit=(-0.2, 0.2), contrast_limit=(-0.2, 0.2), brightness_by_max=True)
    ], p=1.0)
aug_seq3 = A.OneOf([
    A.GaussNoise(always_apply=False, p=1.0, var_limit=(10, 100)),
    A.ISONoise(always_apply=False, p=1.0, intensity=(0.1, 1.0), color_shift=(0.01, 0.3)),
    A.MultiplicativeNoise(always_apply=False, p=1.0, multiplier=(0.8, 1.6), per_channel=True, elementwise=True),
    ], p=1.0)
aug_seq4 = A.OneOf([
    A.Equalize(always_apply=False, p=1.0, mode='pil', by_channels=True),
    A.InvertImg(always_apply=False, p=1.0),
    A.MotionBlur(always_apply=False, p=1.0, blur_limit=(3, 7)),
    A.OpticalDistortion(always_apply=False, p=1.0, distort_limit=(-0.3, 0.3), shift_limit=(-0.05, 0.05), interpolation=0, border_mode=0, value=(0, 0, 0), mask_value=None),
    A.RandomFog(always_apply=False, p=1.0, fog_coef_lower=0.1, fog_coef_upper=0.45, alpha_coef=0.5)
    ], p=1.0)
aug_seq = A.Compose([
    A.Resize(IMG_SIZE, IMG_SIZE),
    aug_seq1,
    aug_seq2,
    aug_seq3,
    aug_seq4,
    A.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
])
aug_path = '/home/jitesh/prj/classification/test/bolt/aug/aug_seq.json'
A.save(aug_seq, aug_path)
loaded_transform = A.load(aug_path)
Пример #24
0
       A.OneOf([
           A.OpticalDistortion(interpolation=3, p=0.1),
           A.GridDistortion(interpolation=3, p=0.1),
           A.IAAPiecewiseAffine(p=0.5),
       ],
               p=0.2),
       A.OneOf([
           A.CLAHE(clip_limit=2),
           A.IAASharpen(),
           A.IAAEmboss(),
       ],
               p=0.2),
       A.RandomBrightnessContrast(p=0.5),
       A.RandomGamma(p=0.5),
       #A.ToGray(p=1),
       A.InvertImg(p=0.1),
       A.CoarseDropout(max_holes=16,
                       max_height=int(0.1 * config['image_sz']),
                       max_width=int(0.1 * config['image_sz']),
                       fill_value=0,
                       p=0.5),
   ],
   p=1)
ta_transform = transforms.Compose([
   transforms.Resize((config['image_sz'], config['image_sz']), 3),
   transforms.Lambda(lambda image: torch.stack([
       transforms.ToTensor()(image),
       transforms.ToTensor()(image.rotate(90, resample=0)),
       transforms.ToTensor()(image.rotate(180, resample=0)),
       transforms.ToTensor()(image.rotate(270, resample=0)),
       transforms.ToTensor()(image.transpose(method=Image.FLIP_TOP_BOTTOM)),
Пример #25
0
epochs = 1000
weight_decay = 1e-3

transforms_val = alb.Compose([
    alb.Resize(height=H, width=W, p=1),
    alb.Normalize(),
])
transforms_train = alb.Compose([
    alb.Resize(height=H, width=W, p=1),
    alb.ShiftScaleRotate(rotate_limit=5,
                         shift_limit=0.0625,
                         scale_limit=0.1,
                         p=0.2,
                         border_mode=cv2.BORDER_REPLICATE),
    alb.HueSaturationValue(hue_shift_limit=10,
                           sat_shift_limit=10,
                           val_shift_limit=10,
                           p=0.2),
    alb.RGBShift(r_shift_limit=10, g_shift_limit=10, b_shift_limit=10, p=0.2),
    alb.ChannelShuffle(p=0.2),
    alb.InvertImg(p=0.2),
    alb.OneOf(
        [
            # 畸变相关操作
            alb.OpticalDistortion(p=0.3),
            alb.GridDistortion(p=.1),
        ],
        p=0.2),
    alb.Normalize(),
])
Пример #26
0
def augmentation_train():
    train_transform = [
        albu.HorizontalFlip(p=0.5),
        albu.VerticalFlip(p=0.5),
        albu.OneOf([
            albu.InvertImg(p=0.5),
            albu.RandomBrightnessContrast(brightness_limit=(-0.5, 0.3),
                                          contrast_limit=(-0.5, 0.3),
                                          brightness_by_max=False,
                                          p=0.5),
            albu.RandomGamma(gamma_limit=(50, 120), p=.5),
            albu.RandomToneCurve(scale=0.4, p=.5),
            albu.HueSaturationValue(hue_shift_limit=20,
                                    sat_shift_limit=20,
                                    val_shift_limit=20,
                                    p=.5),
            albu.ChannelShuffle(p=.5),
            albu.RGBShift(
                r_shift_limit=20, g_shift_limit=20, b_shift_limit=20, p=.5),
        ],
                   p=0.5),
        albu.OneOf([
            albu.RandomFog(
                fog_coef_lower=0.1, fog_coef_upper=.4, alpha_coef=0.06, p=0.5),
            albu.MotionBlur(blur_limit=7, p=0.5),
            albu.MedianBlur(blur_limit=7, p=0.5),
            albu.GlassBlur(sigma=0.5, max_delta=2, p=0.5),
            albu.Sharpen(alpha=(0.1, 0.3), lightness=(0.7, 1.1), p=0.5)
        ],
                   p=0.5),
        albu.OneOf([
            albu.GaussNoise(var_limit=0.03, mean=0, p=0.5),
            albu.MultiplicativeNoise(multiplier=(0.98, 1.02), p=0.5),
            albu.ISONoise(
                color_shift=(0.01, 0.02), intensity=(0.1, 0.3), p=0.5),
        ],
                   p=0.3),
        albu.OneOf([
            albu.ElasticTransform(border_mode=cv2.BORDER_CONSTANT,
                                  interpolation=cv2.INTER_CUBIC,
                                  alpha=1,
                                  sigma=50,
                                  alpha_affine=50,
                                  p=0.5),
            albu.GridDistortion(border_mode=cv2.BORDER_CONSTANT,
                                interpolation=cv2.INTER_CUBIC,
                                distort_limit=(-0.3, 0.3),
                                num_steps=5,
                                p=0.5),
            albu.OpticalDistortion(border_mode=cv2.BORDER_CONSTANT,
                                   interpolation=cv2.INTER_CUBIC,
                                   distort_limit=(-.05, .05),
                                   shift_limit=(-0.05, 0.05),
                                   p=0.5),
            albu.ShiftScaleRotate(border_mode=cv2.BORDER_CONSTANT,
                                  interpolation=cv2.INTER_CUBIC,
                                  shift_limit=(0.05, 0.02),
                                  scale_limit=(-.1, 0),
                                  rotate_limit=2,
                                  p=0.5),
        ],
                   p=0.5),
    ]
    return albu.Compose(train_transform)
Пример #27
0
def get_transform_imagenet(use_albu_aug):
    if use_albu_aug:
        train_transform = al.Compose([
            # al.Flip(p=0.5),
            al.Resize(256, 256, interpolation=2),
            al.RandomResizedCrop(224,
                                 224,
                                 scale=(0.08, 1.0),
                                 ratio=(3. / 4., 4. / 3.),
                                 interpolation=2),
            al.HorizontalFlip(),
            al.OneOf(
                [
                    al.OneOf(
                        [
                            al.ShiftScaleRotate(
                                border_mode=cv2.BORDER_CONSTANT,
                                rotate_limit=30),  # , p=0.05),
                            al.OpticalDistortion(
                                border_mode=cv2.BORDER_CONSTANT,
                                distort_limit=5.0,
                                shift_limit=0.1),
                            # , p=0.05),
                            al.GridDistortion(border_mode=cv2.BORDER_CONSTANT
                                              ),  # , p=0.05),
                            al.ElasticTransform(
                                border_mode=cv2.BORDER_CONSTANT,
                                alpha_affine=15),  # , p=0.05),
                        ],
                        p=0.1),
                    al.OneOf(
                        [
                            al.RandomGamma(),  # p=0.05),
                            al.HueSaturationValue(),  # p=0.05),
                            al.RGBShift(),  # p=0.05),
                            al.CLAHE(),  # p=0.05),
                            al.ChannelShuffle(),  # p=0.05),
                            al.InvertImg(),  # p=0.05),
                        ],
                        p=0.1),
                    al.OneOf(
                        [
                            al.RandomSnow(),  # p=0.05),
                            al.RandomRain(),  # p=0.05),
                            al.RandomFog(),  # p=0.05),
                            al.RandomSunFlare(num_flare_circles_lower=1,
                                              num_flare_circles_upper=2,
                                              src_radius=110),
                            # p=0.05, ),
                            al.RandomShadow(),  # p=0.05),
                        ],
                        p=0.1),
                    al.RandomBrightnessContrast(p=0.1),
                    al.OneOf(
                        [
                            al.GaussNoise(),  # p=0.05),
                            al.ISONoise(),  # p=0.05),
                            al.MultiplicativeNoise(),  # p=0.05),
                        ],
                        p=0.1),
                    al.OneOf(
                        [
                            al.ToGray(),  # p=0.05),
                            al.ToSepia(),  # p=0.05),
                            al.Solarize(),  # p=0.05),
                            al.Equalize(),  # p=0.05),
                            al.Posterize(),  # p=0.05),
                            al.FancyPCA(),  # p=0.05),
                        ],
                        p=0.1),
                    al.OneOf(
                        [
                            # al.MotionBlur(blur_limit=1),
                            al.Blur(blur_limit=[3, 5]),
                            al.MedianBlur(blur_limit=[3, 5]),
                            al.GaussianBlur(blur_limit=[3, 5]),
                        ],
                        p=0.1),
                    al.OneOf(
                        [
                            al.CoarseDropout(),  # p=0.05),
                            al.Cutout(),  # p=0.05),
                            al.GridDropout(),  # p=0.05),
                            al.ChannelDropout(),  # p=0.05),
                            al.RandomGridShuffle(),  # p=0.05),
                        ],
                        p=0.1),
                    al.OneOf(
                        [
                            al.Downscale(),  # p=0.1),
                            al.ImageCompression(quality_lower=60),  # , p=0.1),
                        ],
                        p=0.1),
                ],
                p=0.5),
            al.Normalize(),
            ToTensorV2()
        ])
    else:
        train_transform = transforms.Compose([
            transforms.Resize(256),
            transforms.RandomResizedCrop(224),
            transforms.RandomHorizontalFlip(),
            transforms.ToTensor(),
        ])
    test_transform = transforms.Compose([
        transforms.Resize(256),
        transforms.CenterCrop(224),
        transforms.ToTensor(),
    ])

    if use_albu_aug:
        train_transform = MultiDataTransformAlbu(train_transform)
    else:
        train_transform = MultiDataTransform(train_transform)

    return train_transform, test_transform
Пример #28
0
    alb.Compose([
        alb.ShiftScaleRotate(shift_limit=0,
                             scale_limit=(-.15, 0),
                             rotate_limit=1,
                             border_mode=0,
                             interpolation=3,
                             value=[255, 255, 255],
                             p=1),
        alb.GridDistortion(distort_limit=0.1,
                           border_mode=0,
                           interpolation=3,
                           value=[255, 255, 255],
                           p=.5)
    ],
                p=.15),
    alb.InvertImg(p=.15),
    alb.RGBShift(r_shift_limit=15, g_shift_limit=15, b_shift_limit=15, p=0.3),
    alb.GaussNoise(10, p=.2),
    alb.RandomBrightnessContrast(.05, (-.2, 0), True, p=0.2),
    alb.JpegCompression(95, p=.5),
    alb.ToGray(always_apply=True),
    alb.Normalize((0.7931, 0.7931, 0.7931), (0.1738, 0.1738, 0.1738)),
    # alb.Sharpen()
    ToTensorV2(),
])
test_transform = alb.Compose([
    alb.ToGray(always_apply=True),
    alb.Normalize((0.7931, 0.7931, 0.7931), (0.1738, 0.1738, 0.1738)),
    # alb.Sharpen()
    ToTensorV2(),
])
Пример #29
0
    def __init__(self,
                 ann_file,
                 img_prefix,
                 img_scale,
                 img_norm_cfg,
                 size_divisor=None,
                 proposal_file=None,
                 num_max_proposals=1000,
                 flip_ratio=0,
                 with_mask=True,
                 with_crowd=True,
                 with_label=True,
                 test_mode=False):
        # prefix of images path
        self.img_prefix = img_prefix
        # load annotations (and proposals)
        self.img_infos = self.load_annotations(ann_file)
        if proposal_file is not None:
            self.proposals = self.load_proposals(proposal_file)
        else:
            self.proposals = None
        # filter images with no annotation during training
        if not test_mode:
            valid_inds = self._filter_imgs()
            self.img_infos = [self.img_infos[i] for i in valid_inds]
            if self.proposals is not None:
                self.proposals = [self.proposals[i] for i in valid_inds]

        # (long_edge, short_edge) or [(long1, short1), (long2, short2), ...]
        self.img_scales = img_scale if isinstance(img_scale,
                                                  list) else [img_scale]
        assert mmcv.is_list_of(self.img_scales, tuple)
        # normalization configs
        self.img_norm_cfg = img_norm_cfg

        # max proposals per image
        self.num_max_proposals = num_max_proposals
        # flip ratio
        self.flip_ratio = flip_ratio
        assert flip_ratio >= 0 and flip_ratio <= 1
        # padding border to ensure the image size can be divided by
        # size_divisor (used for FPN)
        self.size_divisor = size_divisor

        # with mask or not (reserved field, takes no effect)
        self.with_mask = with_mask
        # some datasets provide bbox annotations as ignore/crowd/difficult,
        # if `with_crowd` is True, then these info is returned.
        self.with_crowd = with_crowd
        # with label is False for RPN
        self.with_label = with_label
        # in test mode or not
        self.test_mode = test_mode

        # set group flag for the sampler
        if not self.test_mode:
            self._set_group_flag()
        # transforms
        self.img_transform = ImageTransform(size_divisor=self.size_divisor,
                                            **self.img_norm_cfg)
        self.bbox_transform = BboxTransform()
        self.mask_transform = MaskTransform()
        self.numpy2tensor = Numpy2Tensor()
        self.light = A.Compose(
            [
                A.RGBShift(),
                A.InvertImg(),
                A.Blur(),
                A.GaussNoise(),
                A.Flip(),
                A.RandomRotate90()
            ],
            bbox_params={
                'format': 'pascal_voc',
                'min_visibility': 0.4,
                'label_fields': ['category_id']
            },
            p=1)
Пример #30
0
if __name__ == "__main__":
    import json
    import matplotlib.pyplot as plt
    import albumentations as A
    from tqdm import tqdm

    df = pd.read_csv("../input/prostate-cancer-grade-assessment/train.csv")
    # with open("../input/compact_representation.json", "r") as file:
    #     compact_representation = json.load(file)

    mean = [127.66098, 127.66102, 127.66085]
    std = [10.5911, 10.5911045, 10.591107]

    transforms = A.Compose([
        A.InvertImg(p=1),
        A.RandomSizedCrop([100, 128], 128, 128),
        A.Transpose(),
        A.Flip(),
        A.Rotate(90, border_mode=cv.BORDER_CONSTANT, value=(0, 0, 0)),
        A.RandomBrightnessContrast(0.02, 0.02),
        A.HueSaturationValue(0, 10, 10),
        # A.Normalize(mean, std, 1),
    ])

    # dataset = TrainDataset(df, "/datasets/panda/train_64_100", transforms)
    dataset = TrainDatasetBinning(
        pd.read_csv("../input/prostate-cancer-grade-assessment/train.csv"),
        "../input/prostate-cancer-grade-assessment/train_images", transforms,
        1, 256, 36)