コード例 #1
0
def light_aug(p=1):
    return Compose(
        [
            # albumentations supports uint8 and float32 inputs. For the latter, all
            # values must lie in the range [0.0, 1.0]. To apply augmentations, we
            # first use a `ToFloat()` transformation, which will inspect the data
            # type of the input image and convert the image to a float32 ndarray where
            # all values lie in the required range [0.0, 1.0].
            ToFloat(),
            RandomBrightness(limit=(0, 0.2), p=0.2),

            # Alternatively, you can specify the maximum possible value for your input
            # and all values will be divided by it instead of using a predefined value
            # for a specific data type.
            # ToFloat(max_value=65535.0),

            # Then we will apply augmentations
            HorizontalFlip(p=0.1),
            ShiftScaleRotate(
                shift_limit=1 / 14, scale_limit=0.1, rotate_limit=15, p=0.9),
            OpticalDistortion(distort_limit=0.1, shift_limit=0.05, p=0.5),

            # You can convert the augmented image back to its original
            # data type by using `FromFloat`.
            # FromFloat(dtype='uint16'),

            # As in `ToFloat` you can specify a `max_value` argument and all input values
            # will be multiplied by it.
            FromFloat(dtype='uint16', max_value=65535.0),
        ],
        p=p,
        additional_targets={"image1": "image"})
コード例 #2
0
def strong_aug(p=0.5):
    return Compose([
        OneOf([
            CoarseDropout(p=0.5),
            Cutout(p=0.5),
        ], p=0.3),
        OneOf([
            IAAAdditiveGaussianNoise(),
            GaussNoise(),
        ], p=0.2),
        ShiftScaleRotate(
            shift_limit=0.0625, scale_limit=0.2, rotate_limit=45, p=0.2),
        OneOf([
            MotionBlur(p=0.2),
            MedianBlur(blur_limit=3, p=0.1),
            Blur(blur_limit=3, p=0.1),
        ],
              p=0.2),
        OneOf([
            OpticalDistortion(p=0.3),
            GridDistortion(p=0.1),
            IAAPiecewiseAffine(p=0.3),
        ],
              p=0.2),
        OneOf([
            IAASharpen(),
            IAAEmboss(),
            RandomBrightnessContrast(),
        ], p=0.2)
    ],
                   p=p)
コード例 #3
0
def get_augmentations(p=1.0):
    return Compose([
        RandomSizedCrop((250, 600), 224, 224),
        OneOf([
            IAAAdditiveGaussianNoise(),
            GaussNoise(),
        ], p=1),
        OneOf([
            MotionBlur(p=.6),
            MedianBlur(blur_limit=3, p=0.6),
            Blur(blur_limit=3, p=0.6),
        ], p=1),
        ShiftScaleRotate(shift_limit=0.0825, scale_limit=0.3, rotate_limit=30, p=1),
        OneOf([
            OpticalDistortion(p=0.5),
            GridDistortion(p=.4),
            IAAPiecewiseAffine(p=0.5),
        ], p=0.8),
        OneOf([
            CLAHE(clip_limit=2),
            IAASharpen(),
            IAAEmboss(),
            RandomBrightnessContrast(),
        ], p=0.9),
        HueSaturationValue(p=0.3),
    ], p=p)
コード例 #4
0
def get_augmentations():
    crop_p = 0.4
    blur_p = 0.4
    distort1_p = 0.2
    distort2_p = 0.4
    flip_p = 0.4

    crop = RandomSizedCropAlbuAug(crop_p)

    gauss_blur = GaussianBlurAug(p=blur_p, kernel_sizes=range(7, 16, 2))
    motion_blur = MotionBlurAug(p=blur_p, kernel_sizes=(3, 5))

    optical_distort = AlbuAug(
        OpticalDistortion(p=distort1_p, distort_limit=1, shift_limit=0.5))
    grid_distort = AlbuAug(GridDistortion(p=distort1_p))
    elastic1 = AlbuAug(
        ElasticTransform(p=distort2_p,
                         alpha=40,
                         sigma=90 * 0.05,
                         alpha_affine=90 * 0.05))
    elastic2 = AlbuAug(ElasticTransform(p=distort1_p))
    blur_aug = PickOne([gauss_blur, motion_blur])
    distort_aug = PickOne([optical_distort, grid_distort, elastic1, elastic2])
    flip_aug = FlipAug(p=flip_p)

    return ApplyAll([crop, blur_aug, distort_aug, flip_aug])
コード例 #5
0
ファイル: datasets.py プロジェクト: lzj1769/Melanoma
def get_transforms():
    Compose([
        RandomRotate90(),
        Flip(),
        Transpose(),
        OneOf([
            IAAAdditiveGaussianNoise(),
            GaussNoise(),
        ], p=0.2),
        OneOf([
            MotionBlur(p=.2),
            MedianBlur(blur_limit=3, p=0.1),
            Blur(blur_limit=3, p=0.1),
        ], p=0.2),
        ShiftScaleRotate(shift_limit=0.0625, scale_limit=0.2, rotate_limit=90, p=0.5),
        OneOf([
            OpticalDistortion(p=0.3),
            GridDistortion(p=.1),
            IAAPiecewiseAffine(p=0.3),
        ], p=0.2),
        OneOf([
            CLAHE(clip_limit=2),
            IAASharpen(),
            IAAEmboss(),
            RandomBrightnessContrast(),
        ], p=0.3),
        HueSaturationValue(p=0.3),
    ], p=0.8)
コード例 #6
0
 def augment(self, image, mask):
     aug = Compose([
         OneOf([
             RandomSizedCrop(min_max_height=(50, 101),
                             height=self.out_size,
                             width=self.out_size,
                             p=0.5),
             PadIfNeeded(
                 min_height=self.out_size, min_width=self.out_size, p=0.5)
         ],
               p=1),
         VerticalFlip(p=0.5),
         RandomRotate90(p=0.5),
         OneOf([
             ElasticTransform(p=0.5,
                              alpha=120,
                              sigma=120 * 0.05,
                              alpha_affine=120 * 0.03),
             GridDistortion(p=0.5),
             OpticalDistortion(p=1, distort_limit=2, shift_limit=0.5)
         ],
               p=0.8),
         CLAHE(p=0.8),
         RandomBrightnessContrast(p=0.8),
         RandomGamma(p=0.8)
     ])
     augmented = aug(image=image, mask=mask)
     image_heavy = augmented['image']
     mask_heavy = augmented['mask']
     return image_heavy, mask_heavy
コード例 #7
0
ファイル: datagen.py プロジェクト: kshitij86/Deblur-Network
def flow(data_dir, Timage, Tmask, batch, size1, size2, augument=False):

    images_ = os.listdir(data_dir + Timage)
    shuffle(images_)
    ids_int = list(range(len(images_)))
    NORMALIZE = 127.5
    while True:
        try:
            for start in range(0, len(ids_int), batch):
                x_batch = []
                y_batch = []
                end = min(start + batch, len(images_))
                batch_create = ids_int[start:end]
                jbs = dict()
                for loads in batch_create:
                    try:
                        img = cv2.imread(
                            os.path.join(data_dir, Timage, images_[loads]))
                        img = image_resize(img, width=param_maps["scale"])
                        height_o_image, width_o_image = img.shape[
                            0], img.shape[1]
                        if height_o_image % 2 != 0:
                            height_o_image = height_o_image - 1
                        if width_o_image % 2 != 0:
                            width_o_image = width_o_image - 1
                        jbs["width"] = width_o_image * 2
                        jbs["height"] = height_o_image * 2
                        img = cv2.resize(img, (width_o_image, height_o_image))
                        img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
                        masks = cv2.imread(
                            os.path.join(data_dir, Tmask, images_[loads]))
                        masks = cv2.resize(masks,
                                           (jbs["width"], jbs["height"]))
                    except:
                        continue
                    if augument:
                        aug = Compose([
                            VerticalFlip(p=0.1),
                            Transpose(p=0.01),
                            RandomGamma(p=0.06),
                            OpticalDistortion(p=0.00,
                                              distort_limit=0.7,
                                              shift_limit=0.3)
                        ])
                        augmented = aug(image=img, mask=masks)
                        img = augmented['image']
                        masks = augmented['mask']
                        x_batch.append(img)
                        y_batch.append(masks)
                    else:
                        x_batch.append(img)
                        y_batch.append(masks)
                x_batch = np.array(x_batch) / NORMALIZE
                x_batch = x_batch - 1
                y_batch = np.array(y_batch) / NORMALIZE
                y_batch = y_batch - 1
                yield x_batch, y_batch

        except:
            continue
コード例 #8
0
 def strong_aug(p=0.7):
     from albumentations import (
         HorizontalFlip, IAAPerspective, ShiftScaleRotate, CLAHE,
         RandomRotate90, Transpose, ShiftScaleRotate, Blur,
         OpticalDistortion, GridDistortion, HueSaturationValue,
         IAAAdditiveGaussianNoise, GaussNoise, MotionBlur, MedianBlur,
         IAAPiecewiseAffine, IAASharpen, IAAEmboss,
         RandomBrightnessContrast, Flip, OneOf, Compose)
     augs = [
         OneOf(
             [
                 MotionBlur(p=0.3),
                 #MedianBlur(blur_limit=3, p=0.1),
                 Blur(blur_limit=10, p=0.3),
             ],
             p=0.3),
         ShiftScaleRotate(shift_limit=0.0625,
                          scale_limit=0.2,
                          rotate_limit=45,
                          p=0.2),
         OneOf(
             [
                 OpticalDistortion(p=0.3),
                 #GridDistortion(p=0.1), # causes segmentation fault, due to version mismatch with open cv...
                 IAAPiecewiseAffine(p=0.2),
             ],
             p=0.3),
     ]
     return Compose(augs, p=p)
コード例 #9
0
ファイル: data.py プロジェクト: wjzzzyx/workspace
    def train_aug(self, image, label):
        aug = Compose(
            [
                OneOf(
                    [CLAHE(), IAASharpen(), IAAEmboss()], p=0.5),
                # OneOf([IAAAdditiveGaussianNoise(), GaussNoise()], p=0.2),
                # OneOf([MotionBlur(p=0.2), MedianBlur(blur_limit=3, p=0.1), Blur(blur_limit=3, p=0.1)], p=0.2),
                RandomContrast(),
                RandomBrightness(),
                # ChannelShuffle(),
                RandomRotate90(),
                Flip(),
                # RandomScale(scale_limit=(0.0, 0.1)),
                OneOf([
                    ElasticTransform(),
                    OpticalDistortion(),
                    GridDistortion(),
                    IAAPiecewiseAffine()
                ],
                      p=0.5),
                # HueSaturationValue(p=0.3),
            ],
            p=0.9)
        augmented = aug(image=image, mask=label)
        augmented = ToGray(p=1)(image=augmented['image'],
                                mask=augmented['mask'])
        augmented = RandomCrop(256, 256)(image=augmented['image'],
                                         mask=augmented['mask'])
        image, label = augmented['image'], augmented['mask']

        return image, label
コード例 #10
0
def strong_aug(p=1):
    return Compose([
        ToFloat(),
        RandomRotate90(),
        Flip(),
        Transpose(),
        OneOf([
            IAAAdditiveGaussianNoise(),
            GaussNoise(),
        ], p=0.2),
        OneOf([
            MotionBlur(p=0.2),
            MedianBlur(blur_limit=3, p=0.1),
            Blur(blur_limit=3, p=0.1),
        ],
              p=0.2),
        ShiftScaleRotate(
            shift_limit=0.0625, scale_limit=0.2, rotate_limit=45, p=0.2),
        OneOf([
            OpticalDistortion(p=0.3),
            GridDistortion(p=0.1),
            IAAPiecewiseAffine(p=0.3),
        ],
              p=0.2),
        FromFloat(dtype='uint16', max_value=65535.0)
    ],
                   p=p)
コード例 #11
0
 def box_segmentation_aug():
     return Compose([
         OneOf([
             RandomBrightnessContrast(brightness_limit=0.2, p=0.5),
             RandomGamma(gamma_limit=50, p=0.5),
             ChannelShuffle(p=0.5)
         ]),
         OneOf([
             ImageCompression(quality_lower=0, quality_upper=20, p=0.5),
             MultiplicativeNoise(multiplier=(0.3, 0.8),
                                 elementwise=True,
                                 per_channel=True,
                                 p=0.5),
             Blur(blur_limit=(15, 15), p=0.5)
         ]),
         OneOf([
             CenterCrop(height=1000, width=1000, p=0.1),
             RandomGridShuffle(grid=(3, 3), p=0.2),
             CoarseDropout(max_holes=20,
                           max_height=100,
                           max_width=100,
                           fill_value=53,
                           p=0.2)
         ]),
         OneOf([
             GridDistortion(p=0.5, num_steps=2, distort_limit=0.2),
             ElasticTransform(alpha=157, sigma=80, alpha_affine=196, p=0.5),
             OpticalDistortion(distort_limit=0.5, shift_limit=0.5, p=0.5)
         ]),
         OneOf([
             VerticalFlip(p=0.5),
             HorizontalFlip(p=0.5),
             Rotate(limit=44, p=0.5)
         ])
     ])
コード例 #12
0
def strong_aug(p=0.5):
    return Compose(
        [
            #RandomRotate90(),
            #Flip(),
            #Transpose(),
            OneOf([
                IAAAdditiveGaussianNoise(),
                GaussNoise(),
            ], p=0.2),
            OneOf([
                MotionBlur(p=0.2),
                MedianBlur(blur_limit=3, p=0.1),
                Blur(blur_limit=3, p=0.1),
            ],
                  p=0.2),
            ShiftScaleRotate(
                shift_limit=0.0625, scale_limit=0.2, rotate_limit=3, p=0.2),
            OneOf([
                OpticalDistortion(p=0.3),
                GridDistortion(p=0.1),
                IAAPiecewiseAffine(p=0.3),
            ],
                  p=0.2),
            OneOf([
                CLAHE(clip_limit=2),
                IAASharpen(),
                IAAEmboss(),
                RandomContrast(),
                RandomBrightness(),
            ],
                  p=0.3),
            HueSaturationValue(p=0.3),
        ],
        p=p)
コード例 #13
0
 def augment(self):  #p=.5):
     return Compose([
         RandomRotate90(),
         Flip(),
         Transpose(),
         ShiftScaleRotate(
             shift_limit=0.0625, scale_limit=0.2, rotate_limit=45, p=.2),
         OneOf([
             OpticalDistortion(p=0.3),
             GridDistortion(p=.1),
             IAAPiecewiseAffine(p=0.3),
         ],
               p=0.2),
         OneOf([
             IAAAdditiveGaussianNoise(),
             GaussNoise(),
         ], p=0.2),
         RandomCrop(256, 256, p=1),
         OneOf([
             MedianBlur(blur_limit=3, p=.1),
             Blur(blur_limit=3, p=.1),
         ],
               p=0.2),
         OneOf([
             CLAHE(clip_limit=2),
             IAASharpen(),
             IAAEmboss(),
             RandomContrast(),
             RandomBrightness(),
         ],
               p=0.3),
         HueSaturationValue(p=0.7),
     ],
                    p=1)
コード例 #14
0
def test_optical_distortion_interpolation(interpolation):
    image = np.random.randint(low=0,
                              high=256,
                              size=(100, 100, 3),
                              dtype=np.uint8)
    mask = np.random.randint(low=0, high=2, size=(100, 100), dtype=np.uint8)
    aug = OpticalDistortion(distort_limit=(0.05, 0.05),
                            shift_limit=(0, 0),
                            interpolation=interpolation,
                            p=1)
    data = aug(image=image, mask=mask)
    expected_image = F.optical_distortion(image,
                                          k=0.05,
                                          dx=0,
                                          dy=0,
                                          interpolation=interpolation,
                                          border_mode=cv2.BORDER_REFLECT_101)
    expected_mask = F.optical_distortion(mask,
                                         k=0.05,
                                         dx=0,
                                         dy=0,
                                         interpolation=cv2.INTER_NEAREST,
                                         border_mode=cv2.BORDER_REFLECT_101)
    assert np.array_equal(data['image'], expected_image)
    assert np.array_equal(data['mask'], expected_mask)
コード例 #15
0
def aug_train(resolution, p=1):
    return Compose([Resize(resolution, resolution),
                    OneOf([
                        HorizontalFlip(),
                        VerticalFlip(),
                        RandomRotate90(),
                        Transpose()], p=0.5),
                    OneOf([
                        IAAAdditiveGaussianNoise(),
                        GaussNoise(),
                    ], p=0.5),
                    OneOf([
                        MotionBlur(p=.2),
                        MedianBlur(blur_limit=3, p=0.1),
                        Blur(blur_limit=3, p=0.1),
                    ], p=0.5),
                    ShiftScaleRotate(shift_limit=0.0625, scale_limit=0.2, rotate_limit=45, p=0.2),
                    OneOf([
                        OpticalDistortion(p=0.3),
                        GridDistortion(p=.1),
                        IAAPiecewiseAffine(p=0.3),
                    ], p=0.5),
                    OneOf([
                        CLAHE(clip_limit=2),
                        IAASharpen(),
                        IAAEmboss(),
                        RandomBrightnessContrast(),
                    ], p=0.5),
                    HueSaturationValue(p=0.3),
                    Normalize()
                    ], p=p)
コード例 #16
0
    def gettraintransforms(self, mean, std, p=1):
        # Train Phase transformations

        albumentations_transform = Compose([
            RandomRotate90(),
            Flip(),
            GaussNoise(p=0.6, mean=mean),
            OneOf([
                MotionBlur(p=0.2),
                MedianBlur(blur_limit=3, p=0.1),
                Blur(blur_limit=3, p=0.1),
            ],
                  p=0.2),
            ShiftScaleRotate(
                shift_limit=0.0625, scale_limit=0.2, rotate_limit=45, p=0.3),
            OneOf([
                OpticalDistortion(p=0.4),
                GridDistortion(p=0.2),
            ], p=0.3),
            HueSaturationValue(hue_shift_limit=20,
                               sat_shift_limit=0.1,
                               val_shift_limit=0.1,
                               p=0.3),
            Cutout(always_apply=True,
                   num_holes=2,
                   max_h_size=10,
                   max_w_size=10,
                   fill_value=(255 * .6)),
            Normalize(mean=mean, std=std, always_apply=True),
            pytorch.ToTensorV2(always_apply=True),
        ],
                                           p=p)

        return albumentations_transform
コード例 #17
0
def aug_with_crop(width=640, height=480, crop_prob=1):
    return Compose(
        [
            # RandomCrop(width=480, height=640, p=crop_prob),
            HorizontalFlip(p=0.5),
            VerticalFlip(p=0.5),
            RandomRotate90(p=0.5),
            Transpose(p=0.5),
            ShiftScaleRotate(
                shift_limit=0.01, scale_limit=0.04, rotate_limit=0, p=0.25),
            RandomBrightnessContrast(p=0.5),
            RandomGamma(p=0.25),
            IAAEmboss(p=0.25),
            Blur(p=0.01, blur_limit=3),
            OneOf([
                ElasticTransform(p=0.5,
                                 alpha=120,
                                 sigma=120 * 0.05,
                                 alpha_affine=120 * 0.03),
                GridDistortion(p=0.5),
                OpticalDistortion(p=1, distort_limit=2, shift_limit=0.5)
            ],
                  p=0.8)
        ],
        p=1)
コード例 #18
0
def strong_aug(p=.5):
    return Compose([
        HorizontalFlip(),
        OneOf([
            IAAAdditiveGaussianNoise(),
            GaussNoise(),
        ], p=0.4),
        OneOf([
            MotionBlur(p=.2),
            MedianBlur(blur_limit=3, p=.1),
            Blur(blur_limit=3, p=.1),
        ], p=0.3),
        OneOf([
            OpticalDistortion(p=0.3),
            GridDistortion(p=.1),
            IAAPiecewiseAffine(p=0.3),
        ], p=0.2),
        OneOf([
            CLAHE(clip_limit=2),
            IAASharpen(),
            RandomContrast(),
            RandomBrightness(),
        ], p=0.3),
        HueSaturationValue(p=0.3),
        ChannelShuffle(),
        Cutout(num_holes=20, max_h_size=16, max_w_size=16)
    ], p=p)
コード例 #19
0
def medium_aug(original_height=128, original_width=128, k=4):
    aug = Compose([
        OneOf([
            RandomSizedCrop(
                min_max_height=(original_height // k, original_height),
                height=original_height,
                width=original_width,
                p=0.5),
            PadIfNeeded(
                min_height=original_height, min_width=original_width, p=0.5)
        ],
              p=1),
        VerticalFlip(p=0.5),
        HorizontalFlip(p=0.5),
        RandomRotate90(p=0.5),
        Transpose(p=0.5),
        OneOf([
            ElasticTransform(
                p=0.5, alpha=120, sigma=120 * 0.05, alpha_affine=120 * 0.03),
            GridDistortion(p=0.5),
            OpticalDistortion(p=1, distort_limit=1, shift_limit=0.5)
        ],
              p=0.8)
    ])
    return aug
コード例 #20
0
def get_train_transforms():
    augmentations = Compose([
        Resize(236,236),
        Flip(),
        OneOf([
            IAAAdditiveGaussianNoise(p=.5),
            GaussNoise(p=.4),
        ], p=0.4),
        OneOf([
            MotionBlur(p=0.6),
            Blur(blur_limit=3, p=0.2),
        ], p=0.4),
        ShiftScaleRotate(shift_limit=0.0725, scale_limit=0.2, rotate_limit=45, p=0.6),
        OneOf([
            OpticalDistortion(p=0.3),
            GridDistortion(p=0.4),
            IAAPiecewiseAffine(p=0.2),
        ], p=0.3),
        OneOf([
            CLAHE(clip_limit=2),
            IAASharpen(),
            IAAEmboss(),
            RandomBrightnessContrast(),
        ], p=0.25),
        HueSaturationValue(p=0.3),
        CenterCrop(224,224),
        Normalize(
            mean=[0.485, 0.456, 0.406],
            std=[0.229, 0.224, 0.225]
        ),
        ToTensor()
    ])

    return lambda img:augmentations(image=np.array(img))
コード例 #21
0
 def get_corrupter(self):
     distortion_augs = OneOf([OpticalDistortion(p=1),
                              GridDistortion(p=1)],
                             p=1)
     effects_augs = OneOf([
         IAASharpen(p=1),
         IAAEmboss(p=1),
         IAAPiecewiseAffine(p=1),
         IAAPerspective(p=1),
         CLAHE(p=1)
     ],
                          p=1)
     misc_augs = OneOf([
         ShiftScaleRotate(p=1),
         HueSaturationValue(p=1),
         RandomBrightnessContrast(p=1)
     ],
                       p=1)
     blur_augs = OneOf(
         [Blur(p=1),
          MotionBlur(p=1),
          MedianBlur(p=1),
          GaussNoise(p=1)],
         p=1)
     aug = Compose([distortion_augs, effects_augs, misc_augs, blur_augs])
     return aug
コード例 #22
0
def strong_aug(p=0.5, crop_size=(512, 512)):
    return Compose([
        RandomResizedCrop(crop_size[0],
                          crop_size[1],
                          scale=(0.3, 1.0),
                          ratio=(0.75, 1.3),
                          interpolation=4,
                          p=1.0),
        RandomRotate90(),
        Flip(),
        Transpose(),
        OneOf([
            IAAAdditiveGaussianNoise(),
            GaussNoise(),
        ], p=0.8),
        OneOf([
            MotionBlur(p=0.5),
            MedianBlur(blur_limit=3, p=0.5),
            Blur(blur_limit=3, p=0.5),
        ],
              p=0.3),
        ShiftScaleRotate(
            shift_limit=0.2, scale_limit=0.5, rotate_limit=180, p=0.8),
        OneOf([
            OpticalDistortion(p=0.5),
            GridDistortion(p=0.5),
            IAAPiecewiseAffine(p=0.5),
            ElasticTransform(p=0.5),
        ],
              p=0.3),
        OneOf([
            CLAHE(clip_limit=2),
            IAASharpen(),
            IAAEmboss(),
            RandomBrightnessContrast(),
        ],
              p=0.3),
        OneOf([
            GaussNoise(),
            RandomRain(
                p=0.2, brightness_coefficient=0.9, drop_width=1, blur_value=5),
            RandomSnow(p=0.4,
                       brightness_coeff=0.5,
                       snow_point_lower=0.1,
                       snow_point_upper=0.3),
            RandomShadow(p=0.2,
                         num_shadows_lower=1,
                         num_shadows_upper=1,
                         shadow_dimension=5,
                         shadow_roi=(0, 0.5, 1, 1)),
            RandomFog(
                p=0.5, fog_coef_lower=0.3, fog_coef_upper=0.5, alpha_coef=0.1)
        ],
              p=0.3),
        RGBShift(),
        HueSaturationValue(p=0.9),
    ],
                   p=p)
コード例 #23
0
def augment_data(save_dir):
    """
    A special that implemnets the data augmentation pipeline.
    :param save_dir: Where to save the augmented data?
    :return:
    """

    seed = 1337
    random.seed(seed)
    start_time = time.time()
    print(f"====== Augmenting data. Seed set at {seed} ======")

    data_file = h5py.File(os.path.join(save_dir, 'data_file.h5'), 'r')
    data_shape = data_file['data/data'].shape

    data_aug = np.zeros(shape=data_shape, dtype=np.float32)

    n_samples = data_shape[0]
    img_channels, img_height, img_width, img_depth = data_shape[1:5]

    try:
        aug = alb.load(os.path.join(save_dir, 'aug_pipeline_1.json'))
    except FileNotFoundError:
        print("Pipeline not found. Generating One ...")
        aug = Compose([
            OneOf([VerticalFlip(p=1), HorizontalFlip(p=1)], p=1),
            OneOf([
                ElasticTransform(p=1, sigma=6, alpha_affine=4, alpha=75),
                GridDistortion(p=1),
                OpticalDistortion(p=1, distort_limit=2, shift_limit=0.5)
            ],
                  p=0.8)
        ])

        alb.save(aug, os.path.join(save_dir, 'aug_pipeline_1.json'))

    for data_idx in np.arange(n_samples):
        img = data_file['data/data'][data_idx, ...]
        img = img.reshape(img_channels, img_height, img_width, -1)
        img_aug = aug(image=img[0,
                                ...])['image'].reshape(img_channels,
                                                       img_height, img_width,
                                                       img_depth, -1)

        data_aug[data_idx, ...] = img_aug

        del img_aug
        del img

    data_file.close()

    with h5py.File(os.path.join(save_dir, 'data_aug.h5'), 'w') as file:
        file.create_dataset('data/data', data=data_aug, dtype=np.float32)

    print(
        f"====== Finished augmentation. Time taken: {time.time() - start_time}s ======"
    )
コード例 #24
0
def opticalDistortion(image, mask):
    aug = OpticalDistortion(p=1, distort_limit=2, shift_limit=0.5)

    augmented = aug(image=image, mask=mask)

    image_optical = augmented['image']
    mask_optical = augmented['mask']

    return image_optical, mask_optical
コード例 #25
0
def transform(p=1):
    aug = Compose([
        RandomRotate90(0.5),
        HueSaturationValue(hue_shift_limit=(-25, 0),
                           sat_shift_limit=0,
                           val_shift_limit=0,
                           p=0),
        OneOf([
            IAAAdditiveGaussianNoise(),
            GaussNoise(),
        ], p=0.1),
        OneOf([
            MedianBlur(blur_limit=3, p=0),
            Blur(blur_limit=3, p=0.1),
        ],
              p=0.1),
        ShiftScaleRotate(
            shift_limit=0.0625, scale_limit=0.2, rotate_limit=45, p=0.2),
        OneOf([
            OpticalDistortion(p=0.3),
            GridDistortion(p=0.1),
            IAAPiecewiseAffine(p=0.3),
        ],
              p=0.2),
        OneOf([
            CLAHE(clip_limit=2),
            IAASharpen(),
            IAAEmboss(),
            RandomBrightnessContrast(brightness_limit=(-0.2, 0.2),
                                     contrast_limit=(-0.2, 0.2),
                                     p=0.3),
        ],
              p=0.3),
        OneOf([
            ElasticTransform(
                p=0.5, alpha=120, sigma=120 * 0.05, alpha_affine=120 * 0.03),
            GridDistortion(p=0.5),
            OpticalDistortion(p=1, distort_limit=1, shift_limit=0.5)
        ],
              p=0.2),
        Resize(512, 512)
    ],
                  p=p)
    return aug
コード例 #26
0
ファイル: imageutil.py プロジェクト: bnsh/pytorch_lib
    def _transform_generic_np(self, npimages, prob):
        """So, we assume these images are batched numpy values, scaled from 0..255"""
        batchsz, height, width, channels = npimages.shape
        assert height == width, "We assume squares as inputs."
        assert channels == 3, "We assume RGB images."
        assert npimages.dtype == np.uint8
        assert 0 <= npimages.min() and npimages.max() <= 255

        # make these the images that albumentation requires.
        outputs = np.zeros((batchsz, self.outsz, self.outsz, channels),
                           dtype=np.uint8)

        ops = Compose( \
            [ \
                Compose( \
                    [ \
                        OneOf([ \
                            IAAAdditiveGaussianNoise(p=1.0), \
                            GaussNoise(p=1.0), \
                        ], p=0.5), \
                        OneOf([ \
                            MotionBlur(p=1.0), \
                            MedianBlur(blur_limit=3, p=1.0), \
                            Blur(blur_limit=3, p=1.0), \
                        ], p=0.5), \
                        RandomGamma(p=0.5), \
                        Rotate(limit=45, interpolation=cv2.INTER_CUBIC, p=0.5), \
                        ShiftScaleRotate(shift_limit=0.0625, scale_limit=0.2, rotate_limit=45, interpolation=cv2.INTER_CUBIC, p=0.5), \
                        OneOf([ \
                            OpticalDistortion(interpolation=cv2.INTER_CUBIC, p=1.0), \
                            GridDistortion(interpolation=cv2.INTER_CUBIC, p=1.0), \
                            IAAPiecewiseAffine(p=1.0), \
                        ], p=0.5), \
                        OneOf([ \
                            CLAHE(clip_limit=2, p=1.0), \
                            IAASharpen(p=1.0), \
                            IAAEmboss(p=1.0), \
                            RandomContrast(p=1.0), \
                            RandomBrightness(p=1.0), \
                        ], p=0.5), \
                        HueSaturationValue(p=0.5), \
                    ], \
                    p=prob \
                ), \
                Resize(self.outsz, self.outsz, interpolation=cv2.INTER_CUBIC), \
            ], \
            p=1.0 \
        )

        # So, the output of ops, should be a dictionary containing an image
        for idx in range(0, batchsz):
            vvv = ops(image=npimages[idx])["image"]
            outputs[idx] = vvv

        return outputs
コード例 #27
0
ファイル: generator.py プロジェクト: Nils19/rsna-competition
 def augmentTest(self):
     return Compose([
         OpticalDistortion(distort_limit=0.02, shift_limit=0.02, border_mode=0, value=0, p=0.1),
         GridDistortion(num_steps=9, distort_limit=0.1, border_mode=0, value=0, p=0.1),
         ShiftScaleRotate(shift_limit=0.03125, scale_limit=0.05, rotate_limit=4, p=.15),
         OneOf([
             IAASharpen(),
             IAAEmboss(),
         ], p=0.05),
         HorizontalFlip(p=0.6),
         ToTensor()
     ], p=0.9)
コード例 #28
0
def empty_aug3():
    return [
        # HorizontalFlip(p=0.001),
        # IAAPiecewiseAffine(p=1.0),
        OneOf([
            OpticalDistortion(p=0.1),
            GridDistortion(p=0.1),
            # IAAPerspective(p=1.0),
            # IAAAffine(p=1.0),
            # IAAPiecewiseAffine(p=1.0),
        ], p=0.0)
    ]
コード例 #29
0
ファイル: experiment.py プロジェクト: catalyst-team/video
    def get_transforms(stage: str = None,
                       mode: str = None,
                       input_size: int = 224):
        train_image_transforms = [
            OpticalDistortion(distort_limit=0.3, p=0.3),
            JpegCompression(quality_lower=50, p=0.8),
            HorizontalFlip(p=0.5),
            MotionBlur(p=0.5),
            ShiftScaleRotate(shift_limit=0.1,
                             scale_limit=0.2,
                             rotate_limit=20,
                             p=0.5),
            RandomBrightnessContrast(brightness_limit=0.3,
                                     contrast_limit=0.2,
                                     p=0.4),
            HueSaturationValue(hue_shift_limit=3,
                               sat_shift_limit=20,
                               val_shift_limit=30,
                               p=0.4),
            CLAHE(clip_limit=2, p=0.3)
        ]
        infer_image_transforms = [
            Resize(input_size, input_size),
            Normalize(),
            ToTorchTensor(p=1.0)
        ]
        stack = TorchStack()

        train_images_fn = GroupTransform(transforms=train_image_transforms +
                                         infer_image_transforms)
        valid_images_fn = GroupTransform(transforms=infer_image_transforms)

        def train_aug_fn(images):
            images = train_images_fn(images)
            images = stack(images)
            return images

        def valid_aug_fn(images):
            images = valid_images_fn(images)
            images = stack(images)
            return images

        train_transforms = Augmentor(dict_key="features",
                                     augment_fn=lambda x: train_aug_fn(x))

        valid_transforms = Augmentor(dict_key="features",
                                     augment_fn=lambda x: valid_aug_fn(x))

        if mode == "train":
            return train_transforms
        else:
            return valid_transforms
コード例 #30
0
def TTA(img, model, model_name, seed=88, niter=4):

    input_size = int(model.get_input_at(0).get_shape()[1])

    AUGMENTATIONS = Compose([
        HorizontalFlip(p=0.25),
        RandomSizedCrop(min_max_height=(int(input_size * 0.75), input_size),
                        height=input_size,
                        width=input_size,
                        p=0.5),
        OneOf([
            ShiftScaleRotate(rotate_limit=25),
            ElasticTransform(
                alpha=120, sigma=120 * 0.05, alpha_affine=120 * 0.03),
            GridDistortion(),
            OpticalDistortion(distort_limit=2, shift_limit=0.5),
        ],
              p=0.5),
        OneOf([RandomContrast(),
               RandomGamma(),
               RandomBrightness()], p=0.5),
        OneOf(
            [Blur(), MedianBlur(),
             GaussNoise(), GaussianBlur()], p=0.5)
    ],
                            p=0.5)

    np.random.seed(seed)
    original_img = img.copy()
    inverted_img = np.invert(img.copy())
    hflipped_img = np.fliplr(img.copy())
    original_img_array = np.empty(
        (niter + 1, img.shape[0], img.shape[1], img.shape[2]))
    inverted_img_array = original_img_array.copy()
    hflipped_img_array = original_img_array.copy()
    original_img_array[0] = original_img
    inverted_img_array[0] = inverted_img
    hflipped_img_array[0] = hflipped_img
    for each_iter in range(niter):
        original_img_array[each_iter +
                           1] = AUGMENTATIONS(image=original_img)['image']
        inverted_img_array[each_iter +
                           1] = AUGMENTATIONS(image=inverted_img)['image']
        hflipped_img_array[each_iter +
                           1] = AUGMENTATIONS(image=hflipped_img)['image']
    tmp_array = np.vstack(
        (original_img_array, inverted_img_array, hflipped_img_array))
    tmp_array = preprocess_input(tmp_array, model_name)

    prediction = np.mean(model.predict(tmp_array), axis=0)

    return prediction