示例#1
0
def get_training_augmentation(grayscale=False,
                              height=320,
                              width=640,
                              crop_mode=0):

    mea = mean
    st = std
    if grayscale:
        mea = (mean[0] + mean[1] + mean[2]) / 3
        st = (std[0] + std[1] + std[2]) / 3

    if crop_mode == 0:
        train_transform = [
            albu.PadIfNeeded(height * 3 // 2, width * 3 // 2),
            albu.RandomCrop(height * 3 // 2, width * 3 // 2),
            albu.HorizontalFlip(p=0.5),
            albu.VerticalFlip(p=0.5),
            albu.CoarseDropout(p=0.1),
            albu.ShiftScaleRotate(scale_limit=0.4,
                                  rotate_limit=45,
                                  shift_limit=0.1,
                                  p=0.5,
                                  border_mode=0),
            albu.GridDistortion(p=0.3),
            albu.OpticalDistortion(p=0.3, distort_limit=2, shift_limit=0.5),
            albu.RGBShift(p=0.3),
            albu.Blur(p=0.3),
            albu.MotionBlur(p=0.3),
            albu.PadIfNeeded(height, width),
            albu.RandomCrop(height, width)
        ]
    else:
        train_transform = [
            albu.HorizontalFlip(p=0.5),
            albu.VerticalFlip(p=0.5),
            albu.ShiftScaleRotate(scale_limit=0.4,
                                  rotate_limit=45,
                                  shift_limit=0.1,
                                  p=0.5,
                                  border_mode=0),
            albu.GridDistortion(p=0.5),
            albu.OpticalDistortion(p=0.5, distort_limit=2, shift_limit=0.5),
            albu.RGBShift(p=0.5),
            albu.ToGray(p=0.5),
            albu.Resize(height, width)
        ]

    train_transform.extend([
        #Equalize(p=1.0, by_channels=False),
        albu.Normalize(mean=mea, std=st, p=1),
        ToTensor(),
    ])
    return albu.Compose(train_transform)
示例#2
0
def acase2_augs(name, **kwargs):
    return [
        A.Compose(
            [A.Posterize(),
             A.GridDistortion(num_steps=4),
             A.Normalize()],
            p=1.0),
        A.Compose(
            [A.Downscale(),
             A.GridDistortion(num_steps=4),
             A.Normalize()],
            p=1.0),
    ]
示例#3
0
文件: model.py 项目: eugenn/bd_torch
def get_train_transforms():
    return A.Compose([
        A.JpegCompression(p=0.5),
        A.Rotate(limit=80, p=1.0),
        A.OneOf([
            A.OpticalDistortion(),
            A.GridDistortion(),
            A.IAAPiecewiseAffine(),
        ]),
        A.RandomSizedCrop(min_max_height=(int(resolution * 0.7), input_res),
                          height=resolution,
                          width=resolution,
                          p=1.0),
        A.HorizontalFlip(p=0.5),
        A.VerticalFlip(p=0.5),
        A.GaussianBlur(p=0.3),
        A.OneOf([
            A.RandomBrightnessContrast(),
            A.HueSaturationValue(),
        ]),
        A.Cutout(num_holes=8,
                 max_h_size=resolution // 8,
                 max_w_size=resolution // 8,
                 fill_value=0,
                 p=0.3),
        A.Normalize(),
        ToTensorV2(),
    ],
                     p=1.0)
def hard_spatial_augmentations(image_size: Tuple[int, int], rot_angle=45):
    return A.Compose([
        A.OneOf([
            A.NoOp(),
            A.RandomGridShuffle(grid=(4, 4)),
            A.RandomGridShuffle(grid=(3, 3)),
            A.RandomGridShuffle(grid=(2, 2)),
        ]),
        A.MaskDropout(max_objects=10),
        A.OneOf([
            A.ShiftScaleRotate(scale_limit=0.1,
                               rotate_limit=rot_angle,
                               border_mode=cv2.BORDER_CONSTANT,
                               value=0,
                               mask_value=0),
            A.NoOp(),
        ]),
        A.OneOf([
            A.ElasticTransform(border_mode=cv2.BORDER_CONSTANT,
                               value=0,
                               mask_value=0),
            A.GridDistortion(border_mode=cv2.BORDER_CONSTANT,
                             value=0,
                             mask_value=0),
            A.NoOp(),
        ]),
        # D4
        A.Compose([A.Transpose(), A.RandomRotate90()]),
    ])
示例#5
0
def test_grid_distortion_interpolation(interpolation):
    image = np.random.randint(low=0,
                              high=256,
                              size=(100, 100, 3),
                              dtype=np.uint8)
    mask = np.random.randint(low=0, high=2, size=(100, 100), dtype=np.uint8)
    aug = A.GridDistortion(num_steps=1,
                           distort_limit=(0.3, 0.3),
                           interpolation=interpolation,
                           p=1)
    data = aug(image=image, mask=mask)
    expected_image = F.grid_distortion(image,
                                       num_steps=1,
                                       xsteps=[1.3],
                                       ysteps=[1.3],
                                       interpolation=interpolation,
                                       border_mode=cv2.BORDER_REFLECT_101)
    expected_mask = F.grid_distortion(mask,
                                      num_steps=1,
                                      xsteps=[1.3],
                                      ysteps=[1.3],
                                      interpolation=cv2.INTER_NEAREST,
                                      border_mode=cv2.BORDER_REFLECT_101)
    assert np.array_equal(data['image'], expected_image)
    assert np.array_equal(data['mask'], expected_mask)
示例#6
0
def compose_albumentations(gamma_limit_lower=0,
                           gamma_limit_upper=0,
                           CLAHE_clip_limit=0.,
                           brightness_limit=0,
                           contrast_limit=0.,
                           distort_limit=0.):
    'Compose albumentations augmentations'
    augs = []
    if sum([gamma_limit_lower, gamma_limit_upper]) > 0:
        augs.append(
            A.RandomGamma(gamma_limit=(gamma_limit_lower, gamma_limit_upper),
                          p=0.5))
    if CLAHE_clip_limit > 0:
        augs.append(A.CLAHE(clip_limit=CLAHE_clip_limit))
    if sum([brightness_limit, contrast_limit]) > 0:
        augs.append(
            A.RandomBrightnessContrast(brightness_limit=brightness_limit,
                                       contrast_limit=contrast_limit))
    if distort_limit > 0:
        augs.append(
            A.GridDistortion(num_steps=5,
                             distort_limit=distort_limit,
                             interpolation=1,
                             border_mode=4,
                             p=0.5))
    return augs
    def augment_image(self, image, annotations):
        new_annotations = annotations

        if annotations['bboxes'].any():
            annotation = {
                'image': image,
                'bboxes': annotations['bboxes'],
                'category_id': annotations['labels']
            }

            aug = self.get_aug([
                A.VerticalFlip(),
                A.HorizontalFlip(),
                A.RGBShift(),
                A.Blur(blur_limit=7),
                A.GaussNoise(),
                A.OpticalDistortion(distort_limit=0.2),
                A.GridDistortion(),
                A.ShiftScaleRotate(
                    p=0.75, shift_limit=0.1, rotate_limit=45, scale_limit=0.2),
            ],
                               min_area=(1024 * 0.05)**2)

            augmented = aug(**annotation)

            image = augmented['image']

            annotations['bboxes'] = np.array(augmented['bboxes'])
            annotations['labels'] = np.array(augmented['category_id'])

        return image, annotations
示例#8
0
def da_policy_combination_old(img_size):
    print("Using Data Augmentation Combinations Old")
    train_aug = [
        albumentations.ElasticTransform(p=0.72,
                                        alpha=177,
                                        sigma=177 * 0.05,
                                        alpha_affine=176 * 0.03),
        albumentations.GridDistortion(p=0.675, distort_limit=0.3),
        albumentations.OpticalDistortion(p=0.2,
                                         distort_limit=0.2,
                                         shift_limit=0.2),
        albumentations.ShiftScaleRotate(p=0.56,
                                        shift_limit=0.2,
                                        scale_limit=0.0,
                                        rotate_limit=0),  # shift
        albumentations.ShiftScaleRotate(p=0.25,
                                        shift_limit=0.0,
                                        scale_limit=0.2,
                                        rotate_limit=0),  # scale
        albumentations.VerticalFlip(p=0.325),
        albumentations.HorizontalFlip(p=0.3),
        albumentations.Rotate(p=0.625,
                              limit=45,
                              interpolation=1,
                              border_mode=0),
    ]

    train_aug = common_test_augmentation(img_size) + train_aug

    train_aug_img = []

    val_aug = common_test_augmentation(img_size)

    return train_aug, train_aug_img, val_aug
示例#9
0
    def __data_generation(self, training_temp, label_temp):
        'Generates data containing batch_size samples' # X : (n_samples, *dim, n_channels)
        # Initialization
        X = np.empty((self.batch_size, *self.dim, self.n_channels))
        y = np.empty((self.batch_size), dtype=int)

        # Generate data
        for i, img_label in enumerate (zip(training_temp, label_temp)):
            # Store sample
            training_img, training_label = img_label
            # If for training, add AUGMENTATION
            if self.augmentation:
                transform = A.Compose([
                    A.GaussianBlur(p=0.2), # gaussian blur
                    A.RandomBrightnessContrast(p=0.5), # brightness contrast change
                    A.GaussNoise(p=0.5), # inject gaussian noise
                    A.GridDistortion(p=0.2), # grid distortion
                    A.ShiftScaleRotate(shift_limit=0.1, rotate_limit=5, p=0.5),
                    ])
                training_img = transform(image=training_img)['image']
                self.add_line(training_img, p=0.5)
            training_img = training_img.astype("float32") / 255.0
            training_img = np.expand_dims(training_img, axis=2)
            X[i,] = training_img
            # Store class
            y[i] = training_label
        return X, tf.keras.utils.to_categorical(y, num_classes=self.n_classes)
示例#10
0
def aug_medium(prob=1):
    return aug.Compose([
        aug.Flip(),
        aug.OneOf([
            aug.CLAHE(clip_limit=2, p=.5),
            aug.IAASharpen(p=.25),
        ],
                  p=0.35),
        aug.OneOf([
            aug.RandomContrast(),
            aug.RandomGamma(),
            aug.RandomBrightness(),
        ],
                  p=0.3),
        aug.OneOf([
            aug.ElasticTransform(
                alpha=120, sigma=120 * 0.05, alpha_affine=120 * 0.03),
            aug.GridDistortion(),
            aug.OpticalDistortion(distort_limit=2, shift_limit=0.5),
        ],
                  p=0.3),
        aug.ShiftScaleRotate(rotate_limit=12),
        aug.OneOf([
            aug.GaussNoise(p=.35),
            SaltPepperNoise(level_limit=0.0002, p=.7),
            aug.ISONoise(p=.7),
        ],
                  p=.5),
        aug.Cutout(num_holes=3, p=.25),
    ],
                       p=prob)
    def __init__(self, imgs_dir, masks_dir, labels_dir=None):
        self.imgs_dir = imgs_dir
        self.masks_dir = masks_dir
        self.images_filesnames = sorted(os.listdir(imgs_dir))
        self.labels_dir = labels_dir
        self.labels, self.negative_samples_idx = None, []

        if labels_dir is not None:
            self.labels = pd.read_csv(labels_dir)['labels'].to_numpy()
            # For now: use 1 for benign/malignant, 0 for normal
            self.labels[self.labels != 2] = 1
            self.labels[self.labels == 2] = 0
            self.negative_samples_idx = (self.labels == 0)

        # Specify data augmentations here
        self.transformations = A.Compose([
            A.Resize(256, 256),
            A.OneOf([
                A.RandomRotate90(p=0.5),
                A.HorizontalFlip(p=0.5),
                A.VerticalFlip(p=0.5),
                A.ShiftScaleRotate(scale_limit=0.5,
                                   rotate_limit=0,
                                   shift_limit=0.1,
                                   p=0.5,
                                   border_mode=0),
                A.RandomBrightnessContrast(p=0.2),
                A.GridDistortion(p=0.2),
                A.ElasticTransform(p=0.2)
            ]),
        ])
def get_training_augmentation():

    train_transform = [
        # albu.Resize(320, 640, always_apply=True),
        # albu.VerticalFlip(),
        # albu.HorizontalFlip(),
        # albu.Rotate(limit=20),
        # albu.GridDistortion(),
        albu.Resize(320, 640, always_apply=True),
        albu.HorizontalFlip(),
        albu.VerticalFlip(),
        albu.ShiftScaleRotate(scale_limit=0.5,
                              rotate_limit=0,
                              shift_limit=0.1,
                              p=0.5,
                              border_mode=0),
        albu.GridDistortion(),
        albu.Rotate(limit=20)

        # albu.OneOf([
        #     albu.RandomBrightnessContrast(),
        #     albu.RandomGamma(),
        # ], p=0.3),
        # albu.OneOf([
        #     # albu.ElasticTransform(alpha=120, sigma=120 * 0.05, alpha_affine=120 * 0.03),
        #     albu.GridDistortion(),
        #     albu.OpticalDistortion(distort_limit=2, shift_limit=0.5),
        # ], p=0.3),
        # albu.ShiftScaleRotate(),
    ]
    return albu.Compose(train_transform)
示例#13
0
def get_transforms(phase, size, mean, std):
    list_transforms = []
    if phase == "train":
        list_transforms.extend(
            [
                albu.HorizontalFlip(),
                albu.OneOf([
                    albu.RandomContrast(),
                    albu.RandomGamma(),
                    albu.RandomBrightness(),
                    ], p=0.3),
                albu.OneOf([
                    albu.ElasticTransform(alpha=120, sigma=120 * 0.05, alpha_affine=120 * 0.03),
                    albu.GridDistortion(),
                    albu.OpticalDistortion(distort_limit=2, shift_limit=0.5),
                    ], p=0.3), 
                albu.ShiftScaleRotate(),
            ]
        )
    list_transforms.extend(
        [
            albu.Normalize(mean=mean, std=std, p=1),
            albu.Resize(size, size),
            ToTensorV2(),
        ]
    )

    list_trfms = albu.Compose(list_transforms)
    return list_trfms
def get_transforms(type="albumentations"):
    if type == "albumentations":
        train_transforms = albumentations.Compose([
            albumentations.Transpose(p=0.5),
            albumentations.OneOf([
                albumentations.VerticalFlip(p=0.5),
                albumentations.HorizontalFlip(p=0.5),
            ]),
            albumentations.OneOf([
                albumentations.RandomBrightness(limit=0.2, p=0.75),
                albumentations.RandomContrast(limit=0.2, p=0.75),
            ]),
            albumentations.OneOf([
                albumentations.MotionBlur(blur_limit=5),
                albumentations.MedianBlur(blur_limit=5),
                albumentations.GaussianBlur(blur_limit=5),
                albumentations.GaussNoise(var_limit=(5.0, 30.0)),
            ],
                                 p=0.7),
            albumentations.OneOf([
                albumentations.OpticalDistortion(distort_limit=1.0),
                albumentations.GridDistortion(num_steps=5, distort_limit=1.),
                albumentations.ElasticTransform(alpha=3),
            ],
                                 p=0.7),

            # albumentations.OneOf([
            #     albumentations.CLAHE(clip_limit=4.0, p=0.7),
            #     albumentations.HueSaturationValue(hue_shift_limit=10, sat_shift_limit=20, val_shift_limit=10, p=0.5),
            #     albumentations.ShiftScaleRotate(shift_limit=0.1, scale_limit=0.1, rotate_limit=15, border_mode=0,
            #                                     p=0.85),
            # ]),
            albumentations.Resize(256, 256),
            # albumentations.Cutout(max_h_size=int(256 * 0.375), max_w_size=int(256 * 0.375), num_holes=1, p=0.7),
            # albumentations.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
        ])

        test_transforms = albumentations.Compose([
            albumentations.Resize(256, 256),
            # albumentations.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
        ])
    else:
        train_transforms = transforms.Compose([
            # AdvancedHairAugmentation(hairs_folder='/kaggle/input/melanoma-hairs'),
            transforms.RandomResizedCrop(size=256, scale=(0.9, 1.0)),
            transforms.RandomHorizontalFlip(),
            transforms.RandomVerticalFlip(),
            Microscope(p=0.5),
            transforms.Resize((256, 256)),
            transforms.ToTensor(),
            transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                 std=[0.229, 0.224, 0.225])
        ])
        test_transforms = transforms.Compose([
            transforms.Resize((256, 256)),
            transforms.ToTensor(),
            transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                 std=[0.229, 0.224, 0.225])
        ])
    return train_transforms, test_transforms
示例#15
0
def get_augmentations(p=0.5, image_size=224):
    imagenet_stats = {
        "mean": [0.485, 0.456, 0.406],
        "std": [0.229, 0.224, 0.225]
    }
    train_tfms = A.Compose([
        # A.Resize(image_size, image_size),
        A.RandomResizedCrop(image_size, image_size),
        A.ShiftScaleRotate(shift_limit=0.15,
                           scale_limit=0.4,
                           rotate_limit=45,
                           p=p),
        A.Cutout(p=p),
        A.RandomRotate90(p=p),
        A.Flip(p=p),
        A.OneOf(
            [
                A.RandomBrightnessContrast(
                    brightness_limit=0.2,
                    contrast_limit=0.2,
                ),
                A.HueSaturationValue(hue_shift_limit=20,
                                     sat_shift_limit=50,
                                     val_shift_limit=50),
            ],
            p=p,
        ),
        A.OneOf(
            [
                A.IAAAdditiveGaussianNoise(),
                A.GaussNoise(),
            ],
            p=p,
        ),
        A.CoarseDropout(max_holes=10, p=p),
        A.OneOf(
            [
                A.MotionBlur(p=0.2),
                A.MedianBlur(blur_limit=3, p=0.1),
                A.Blur(blur_limit=3, p=0.1),
            ],
            p=p,
        ),
        A.OneOf(
            [
                A.OpticalDistortion(p=0.3),
                A.GridDistortion(p=0.1),
                A.IAAPiecewiseAffine(p=0.3),
            ],
            p=p,
        ),
        ToTensor(normalize=imagenet_stats),
    ])

    valid_tfms = A.Compose([
        A.CenterCrop(image_size, image_size),
        ToTensor(normalize=imagenet_stats)
    ])

    return train_tfms, valid_tfms
示例#16
0
def cifar10_albumentations(mean, std):

    train_transforms = A.Compose([
        #         A.OneOf([
        #             A.GridDistortion(distort_limit=(-0.3, 0.3), p=0.5),
        #             A.Rotate(limit=(-10, 10), p=0.5)
        #         ]),
        A.GridDistortion(distort_limit=(-0.3, 0.3), p=0.5),
        A.Rotate(limit=(-10, 10), p=0.5),
        A.HorizontalFlip(p=0.25),
        A.Cutout(num_holes=1, max_h_size=12, max_w_size=12),
        A.Normalize(
            mean=mean, std=std
        ),  # Here, the order of normalization and ToTensor() methods matters. Same goes for test_transforms 
        APT.ToTensor()
        #         APT.ToTensorV2()
    ])

    test_transforms = A.Compose([
        A.Normalize(mean=mean, std=std),
        APT.ToTensor()
        #         APT.ToTensorV2()
    ])

    return Albumentation_Transforms(
        train_transforms), Albumentation_Transforms(test_transforms)
示例#17
0
def transform(train=True, mean=None, std=None):
    normalize = alb.Compose([
        alb.Normalize(mean=mean or _mean, std=std or _std),
        ToTensorV2(),
    ])

    if not train:
        return normalize

    return alb.Compose([
        alb.HorizontalFlip(),
        alb.VerticalFlip(),
        alb.RandomRotate90(),
        alb.ShiftScaleRotate(shift_limit=0.0625,
                             scale_limit=0.2,
                             rotate_limit=15,
                             p=0.9,
                             border_mode=cv2.BORDER_REFLECT),
        alb.OneOf([
            alb.OpticalDistortion(p=0.3),
            alb.GridDistortion(p=.1),
            alb.IAAPiecewiseAffine(p=0.3),
        ],
                  p=0.3),
        alb.OneOf([
            alb.HueSaturationValue(10, 15, 10),
            alb.CLAHE(clip_limit=2),
            alb.RandomBrightnessContrast(),
        ],
                  p=0.3),
        normalize,
    ])
示例#18
0
 def __init__(self, image_size):
     self.data_transform = {
         'train_transform':A.Compose([
           A.Transpose(p=0.5),
           A.VerticalFlip(p=0.5),
           A.HorizontalFlip(p=0.5),
           A.RandomBrightness(limit=0.2, p=0.75),
           A.RandomContrast(limit=0.2, p=0.75),
           A.OneOf([
               A.MotionBlur(blur_limit=5),
               A.MedianBlur(blur_limit=5),
               A.GaussianBlur(blur_limit=5),
               A.GaussNoise(var_limit=(5.0, 30.0)),], p=0.7),
           A.OneOf([
               A.OpticalDistortion(distort_limit=1.0),
               A.GridDistortion(num_steps=5, distort_limit=1.),
               A.ElasticTransform(alpha=3),], p=0.7),
           A.CLAHE(clip_limit=4.0, p=0.7),
           A.HueSaturationValue(hue_shift_limit=10, sat_shift_limit=20, val_shift_limit=10, p=0.5),
           A.ShiftScaleRotate(shift_limit=0.1, scale_limit=0.1, rotate_limit=15, border_mode=0, p=0.85),
           A.Resize(image_size, image_size),
           A.Cutout(max_h_size=int(image_size * 0.375), max_w_size=int(image_size * 0.375), num_holes=1, p=0.7),    
           A.Normalize()
           ]),
         'test_transform': A.Compose([
           A.Resize(image_size, image_size),
           A.Normalize(),
           A.Resize(image_size, image_size)
           ])}
示例#19
0
def get_transforms(image_size):

    transforms_train = albumentations.Compose([
        albumentations.Transpose(p=0.5),
        albumentations.VerticalFlip(p=0.5),
        albumentations.HorizontalFlip(p=0.5),
        albumentations.RandomBrightness(limit=0.2, p=0.75),
        albumentations.RandomContrast(limit=0.2, p=0.75),
        albumentations.OneOf([
            albumentations.MotionBlur(blur_limit=5),
            albumentations.MedianBlur(blur_limit=5),
            albumentations.GaussianBlur(blur_limit=5),
            albumentations.GaussNoise(var_limit=(5.0, 30.0)),
        ], p=0.7),

        albumentations.OneOf([
            albumentations.OpticalDistortion(distort_limit=1.0),
            albumentations.GridDistortion(num_steps=5, distort_limit=1.),
            albumentations.ElasticTransform(alpha=3),
        ], p=0.7),

        albumentations.CLAHE(clip_limit=4.0, p=0.7),
        albumentations.HueSaturationValue(hue_shift_limit=10, sat_shift_limit=20, val_shift_limit=10, p=0.5),
        albumentations.ShiftScaleRotate(shift_limit=0.1, scale_limit=0.1, rotate_limit=15, border_mode=0, p=0.85),
        albumentations.Resize(image_size, image_size),
        albumentations.Cutout(max_h_size=int(image_size * 0.375), max_w_size=int(image_size * 0.375), num_holes=1, p=0.7),
        albumentations.Normalize()
    ])

    transforms_val = albumentations.Compose([
        albumentations.Resize(image_size, image_size),
        albumentations.Normalize()
    ])

    return transforms_train, transforms_val
示例#20
0
    def transformer(self):
        """TRANSFORMATION DE NOTRE IMAGE AFIN DE REDUIRE LE BIAIS"""
        if self.is_train:
            transform = A.Compose([
                A.CLAHE(),
                A.RandomRotate90(),
                A.DualTransform(),  # adding
                A.Transpose(),
                A.Resize(height=self.img_size,
                         width=self.img_size,
                         interpolation=cv2.INTER_AREA),  # RESIZE
                A.ShiftScaleRotate(shift_limit=0.0625,
                                   scale_limit=0.50,
                                   rotate_limit=45,
                                   p=.75),
                A.Blur(blur_limit=3),
                A.OpticalDistortion(),
                A.GridDistortion(),
                A.HueSaturationValue(),
            ])
        else:
            transform = A.Compose([
                A.Resize(height=self.img_size,
                         width=self.img_size,
                         interpolation=cv2.INTER_AREA),  # RESIZE
            ])

        return transform
示例#21
0
def get_transforms(phase, width=1600, height=256):
    list_transforms = []
    if phase == "train":
        list_transforms.extend([
            albu.HorizontalFlip(),
            albu.OneOf([
                albu.RandomContrast(),
                albu.RandomGamma(),
                albu.RandomBrightness(),
            ],
                       p=0.3),
            albu.OneOf([
                albu.ElasticTransform(
                    alpha=120, sigma=120 * 0.05, alpha_affine=120 * 0.03),
                albu.GridDistortion(),
                albu.OpticalDistortion(distort_limit=2, shift_limit=0.5),
            ],
                       p=0.3),
            albu.ShiftScaleRotate(),
        ])
    list_transforms.extend([
        albu.Resize(width, height, always_apply=True),
        albu.Normalize(mean=(0.485, 0.456, 0.406),
                       std=(0.229, 0.224, 0.225),
                       p=1),
        ToTensor(),
    ])
    list_trfms = albu.Compose(list_transforms)
    return list_trfms
示例#22
0
def _strong_aug(p=0.5):
    import albumentations
    return albumentations.Compose([
        albumentations.HorizontalFlip(p=0.5),
        albumentations.ShiftScaleRotate(shift_limit=0.0625,
                                        scale_limit=0.2,
                                        rotate_limit=0,
                                        p=0.5,
                                        border_mode=cv2.BORDER_CONSTANT),
        albumentations.OneOf([
            albumentations.OpticalDistortion(p=0.5,
                                             border_mode=cv2.BORDER_CONSTANT),
            albumentations.GridDistortion(p=0.5,
                                          border_mode=cv2.BORDER_CONSTANT),
            albumentations.IAAPiecewiseAffine(p=0.5),
            albumentations.ElasticTransform(p=0.5,
                                            border_mode=cv2.BORDER_CONSTANT),
        ],
                             p=0.5),
        albumentations.OneOf([
            albumentations.CLAHE(clip_limit=2),
            albumentations.IAASharpen(),
            albumentations.IAAEmboss(),
        ],
                             p=0.5),
        albumentations.OneOf([
            albumentations.RandomBrightnessContrast(p=0.5),
        ],
                             p=0.4),
        albumentations.HueSaturationValue(p=0.5),
    ],
                                  p=p)
示例#23
0
def get_train_transform():
    crop_height = 256
    crop_width = 256

    return albu.Compose([
        albu.PadIfNeeded(min_height=crop_height, min_width=crop_width, p=1),
        albu.RandomSizedCrop((int(0.3 * crop_height), 288), crop_height, crop_width, p=1),
        albu.HorizontalFlip(p=0.5),
        albu.OneOf([
            albu.IAAAdditiveGaussianNoise(p=0.5),
            albu.GaussNoise(p=0.5),
        ], p=0.2),
        albu.OneOf([
            albu.MotionBlur(p=0.2),
            albu.MedianBlur(blur_limit=3, p=0.1),
            albu.Blur(blur_limit=3, p=0.1),
        ], p=0.2),
        albu.ShiftScaleRotate(shift_limit=0.0625, scale_limit=0, rotate_limit=20, p=0.1),
        albu.OneOf([
            albu.OpticalDistortion(p=0.3),
            albu.GridDistortion(p=0.1),
            albu.IAAPiecewiseAffine(p=0.3),
        ], p=0.2),
        albu.OneOf([
            albu.CLAHE(clip_limit=2, p=0.5),
            albu.IAASharpen(p=0.5),
            albu.IAAEmboss(p=0.5),
            albu.RandomBrightnessContrast(p=0.5),
        ], p=0.3),
        albu.HueSaturationValue(p=0.3),
        albu.JpegCompression(p=0.2, quality_lower=20, quality_upper=99),
        albu.ElasticTransform(p=0.1),
        albu.Normalize(p=1)
    ], p=1)
示例#24
0
def train_image_augmentation(image, img_size):
    image = np.array(image)

    augmentation = A.Compose(
        [
            A.Resize(img_size, img_size),
            A.CenterCrop(img_size, img_size, p=1.0),
            A.Transpose(p=0.5),
            A.HorizontalFlip(p=0.5),
            A.VerticalFlip(p=0.5),
            A.ShiftScaleRotate(p=0.5),
            A.Blur(blur_limit=3),
            A.OpticalDistortion(p=0.5),
            A.GridDistortion(p=0.5),
            A.HueSaturationValue(hue_shift_limit=0.2,
                                 sat_shift_limit=0.2,
                                 val_shift_limit=0.2,
                                 p=0.5),
            A.CoarseDropout(p=0.5),
            A.Cutout(p=0.5),
            A.Normalize(
                mean=[0.485, 0.456, 0.406],
                std=[0.229, 0.224, 0.225],
                max_pixel_value=255.0,
                p=1.0,
            )
        ],
        p=1.0,
    )

    augmented_image = augmentation(image=image)

    return augmented_image['image']
示例#25
0
    def _get_static_augmentations(self, config):
        augmentations = []
        if config["horizontal_flip"]:
            augmentations.append(albu.HorizontalFlip(p=0.5))
        else:
            augmentations.append(None)
        if config["grid_distortion"]:
            augmentations.append(albu.GridDistortion(distort_limit=0.5, border_mode=cv2.BORDER_REPLICATE, p=1.0))
        else:
            augmentations.append(None)

        if config["hsv_color_shift"]:
            augmentations.append(albu.HueSaturationValue(p=1.0))
        else:
            augmentations.append(None)

        if config["random_brightness_contrast"]:
            augmentations.append(albu.RandomBrightnessContrast(p=1))
        else:
            augmentations.append(None)

        if config["random_shadow"]:
            augmentations.append(albu.RandomShadow(shadow_roi=(0, 0, 1, 1), p=0.2))
        else:
            augmentations.append(None)
        return augmentations
示例#26
0
def get_transforms(*, data_type):
    if data_type == "light_train":
        return Compose([
            Resize(CFG.size, CFG.size),
            Transpose(p=0.5),
            HorizontalFlip(p=0.5),
            VerticalFlip(p=0.5),
            ShiftScaleRotate(scale_limit=(0, 0), p=0.5),
            ToTensorV2(),
        ])

    if data_type == "train":
        return Compose([
            Resize(CFG.size, CFG.size),
            Transpose(p=0.5),
            HorizontalFlip(p=0.5),
            VerticalFlip(p=0.5),
            albumentations.OneOf([
                albumentations.ElasticTransform(
                    alpha=1, sigma=20, alpha_affine=10),
                albumentations.GridDistortion(num_steps=6, distort_limit=0.1),
                albumentations.OpticalDistortion(distort_limit=0.05,
                                                 shift_limit=0.05),
            ],
                                 p=0.2),
            albumentations.core.composition.PerChannel(albumentations.OneOf([
                albumentations.MotionBlur(p=.05),
                albumentations.MedianBlur(blur_limit=3, p=.05),
                albumentations.Blur(blur_limit=3, p=.05),
            ]),
                                                       p=1.0),
            albumentations.OneOf([
                albumentations.CoarseDropout(max_holes=16,
                                             max_height=CFG.size // 16,
                                             max_width=CFG.size // 16,
                                             fill_value=0,
                                             p=0.5),
                albumentations.GridDropout(ratio=0.09, p=0.5),
                albumentations.Cutout(num_holes=8,
                                      max_h_size=CFG.size // 16,
                                      max_w_size=CFG.size // 16,
                                      p=0.2),
            ],
                                 p=0.5),
            albumentations.ShiftScaleRotate(
                shift_limit=0.0625, scale_limit=0.2, rotate_limit=45, p=0.5),
            ToTensorV2(),
        ],
                       additional_targets={
                           'r': 'image',
                           'g': 'image',
                           'b': 'image',
                           'y': 'image',
                       })

    elif data_type == 'valid':
        return Compose([
            Resize(CFG.size, CFG.size),
            ToTensorV2(),
        ])
def get_individual_transforms():
    transforms = A.Compose([
        A.OneOf(
            [
                A.Transpose(p=1.0),
                A.VerticalFlip(p=1.0),
                A.HorizontalFlip(p=1.0),
                A.RandomRotate90(p=1.0),
                A.NoOp(),
            ],
            p=1.0,
        ),
        A.OneOf(
            [
                A.ElasticTransform(p=1.0),
                A.GridDistortion(p=1.0),
                A.OpticalDistortion(p=1.0),
                A.NoOp(),
            ],
            p=1.0,
        ),
        A.OneOf(
            [
                A.GaussNoise(p=1.0),
                A.GaussianBlur(p=1.0),
                A.ISONoise(p=1.0),
                A.CoarseDropout(
                    p=1.0, max_holes=16, max_height=16, max_width=16),
                A.NoOp(),
            ],
            p=1.0,
        ),
    ])

    return transforms
示例#28
0
    def __init__(self, outputs=6):
        super().__init__()
        self.net = models.resnet34(True)
        self.linear = Linear(1000, outputs)

        df = pd.read_csv("/home/dipet/kaggle/prostate/input/prostate-cancer-grade-assessment/train.csv")
        self.train_df, self.valid_df = train_test_split(df, test_size=0.2)
        self.data_dir = "/datasets/panda/train_64_100"

        self.train_transforms = A.Compose(
            [
                A.Compose(
                    [
                        A.OneOf([A.GaussNoise(), A.MultiplicativeNoise(elementwise=True)]),
                        A.RandomBrightnessContrast(0.02, 0.02),
                        A.HueSaturationValue(0, 10, 10),
                        A.Flip(),
                        A.RandomGridShuffle(grid=(10, 10)),
                        A.GridDistortion(),
                        A.Rotate()
                    ],
                    p=0.5,
                ),
                A.ToFloat(),
            ]
        )
        self.valid_transforms = A.Compose([A.ToFloat()])
def albumentation():
    transform = albumentations.Compose([          
                    albumentations.OneOf([
                        albumentations.GaussNoise(),
                        albumentations.IAAAdditiveGaussianNoise()
                    ]),
                    albumentations.OneOf([
                        albumentations.MotionBlur(blur_limit=3, p=0.2),
                        albumentations.MedianBlur(blur_limit=3, p=0.1),
                        albumentations.Blur(blur_limit=2, p=0.1)
                    ]),
                    albumentations.OneOf([
                        albumentations.RandomBrightness(limit=(0.1, 0.4)),
                        albumentations.HueSaturationValue(hue_shift_limit=(0, 128), sat_shift_limit=(0, 60), val_shift_limit=(0, 20)),
                        albumentations.RGBShift(r_shift_limit=30, g_shift_limit=30, b_shift_limit=30)
                    ]),
                    albumentations.OneOf([
                        albumentations.CLAHE(),
                        albumentations.ChannelShuffle(),
                        albumentations.IAASharpen(),
                        albumentations.IAAEmboss(),
                        albumentations.RandomBrightnessContrast(),
                    ]),                
                    albumentations.OneOf([
                        albumentations.RandomGamma(gamma_limit=(35,255)),
                        albumentations.OpticalDistortion(),
                        albumentations.GridDistortion(),
                        albumentations.IAAPiecewiseAffine()
                    ]),                
                    A_torch.ToTensor(normalize={
                        "mean": [0.485, 0.456, 0.406],
                        "std" : [0.229, 0.224, 0.225]})
                    ])
    return transform
示例#30
0
文件: alpha_base.py 项目: dreyk/emt
 def _strong_aug(p=0.5):
     return albumentations.Compose([
         albumentations.HorizontalFlip(),
         albumentations.VerticalFlip(),
         albumentations.ShiftScaleRotate(
             shift_limit=0, scale_limit=0, rotate_limit=15, p=0.3),
         albumentations.OneOf([
             albumentations.OpticalDistortion(p=0.3),
             albumentations.GridDistortion(p=0.1),
             albumentations.IAAPiecewiseAffine(p=0.3),
         ],
                              p=0.2),
         albumentations.OneOf([
             albumentations.CLAHE(clip_limit=2),
             albumentations.IAASharpen(),
             albumentations.IAAEmboss(),
         ],
                              p=0.3),
         albumentations.OneOf([
             albumentations.RandomBrightnessContrast(p=0.3),
         ],
                              p=0.4),
         albumentations.HueSaturationValue(p=0.3),
     ],
                                   p=p)