コード例 #1
0
def get_train_transform(image_size=our_image_size):
    return A.Compose(
        [
            A.RandomResizedCrop(height=image_size,
                                width=image_size,
                                scale=(0.5, 1.5),
                                ratio=(0.75, 1.25),
                                p=1),
            A.Resize(height=image_size, width=image_size, p=1.0),
            # Add occasion blur
            A.OneOf([A.GaussianBlur(), A.MotionBlur()], p=0.5),
            A.Rotate(limit=90, border_mode=cv2.BORDER_CONSTANT, value=0,
                     p=0.5),
            # noise
            A.OneOf([
                A.GaussNoise(p=0.5),
                A.RandomGamma(p=0.4),
            ], p=0.5),
            # D4 Augmentations
            A.HorizontalFlip(p=0.5),
            A.VerticalFlip(p=0.5),
            A.RandomRotate90(p=0.5),
            A.Transpose(p=0.2),
            # Spatial-preserving augmentations
            A.HueSaturationValue(hue_shift_limit=0.2,
                                 sat_shift_limit=0.2,
                                 val_shift_limit=0.2,
                                 p=0.7),
            A.RandomBrightnessContrast(
                brightness_limit=0.2, contrast_limit=0.2, p=0.8),
            A.ToGray(p=0.01),
            # cutout
            A.Cutout(num_holes=32,
                     max_h_size=image_size // 16,
                     max_w_size=image_size // 16,
                     fill_value=0,
                     p=0.5),
        ],
        bbox_params={
            'format': 'pascal_voc',
            'label_fields': ['labels']
        })
コード例 #2
0
def mix_transform(resize):
    return A.Compose([
        pre_transform(resize=resize),
        A.GridDistortion(),
        A.RandomRotate90(always_apply=True),
        A.Rotate(limit=10, border_mode=0, p=0.5),
        #         A.OneOf([
        A.Flip(p=0.5),
        A.Transpose(p=0.5),
        #         ], p=0.5),
        A.RandomBrightnessContrast(0.2, 0.2, p=0.3),
        A.OneOf([
            A.Blur(blur_limit=3, p=0.2),
            A.MotionBlur(blur_limit=5, p=0.4),
            A.MedianBlur(blur_limit=5, p=0.4)
        ],
                p=0.3),
        A.GaussNoise((10, 50), p=0.1),
        post_transform()
    ])
コード例 #3
0
ファイル: transformer.py プロジェクト: yida2311/HistoDOI
 def __init__(self, crop_size=1024):
     self.master = albumentations.Compose([
         albumentations.RandomCrop(crop_size, crop_size),
         albumentations.RandomRotate90(p=0.5),
         albumentations.Transpose(p=0.5),
         albumentations.Flip(p=0.5),
         albumentations.OneOf([
             albumentations.RandomBrightness(),
             albumentations.RandomContrast(),
             albumentations.HueSaturationValue(),
         ],
                              p=0.5),
         albumentations.ElasticTransform(),
         albumentations.ShiftScaleRotate(shift_limit=0.1,
                                         scale_limit=0.1,
                                         rotate_limit=15,
                                         p=0.5),
         albumentations.Normalize(mean=[0.798, 0.621, 0.841],
                                  std=[0.125, 0.228, 0.089]),
     ])
コード例 #4
0
    def _image_augment(self, img):  #, bboxes):
        '''
        image_augment
            ARGS: img - numpy array of image
                bboxes - array of bounding boxes in COCO format
        '''
        transform = A.Compose([
            A.HorizontalFlip(p=0.5),
            A.HorizontalFlip(p=0.5),
            A.RandomRotate90(p=0.5),
            A.Rotate(p=0.5),
            A.RandomBrightnessContrast(p=0.3),
        ])  #, bbox_params=A.BboxParams(format='coco'))

        transformed = transform(image=img,
                                # bboxes=bboxes
                                )
        transformed_image = transformed['image']
        # transformed_bboxes = transformed['bboxes']
        return transformed_image  #, transformed_bboxes
コード例 #5
0
def test_transform_pipeline_serialization_with_keypoints(seed, image, keypoints, keypoint_format, labels):
    aug = A.Compose(
        [
            A.OneOrOther(
                A.Compose([A.RandomRotate90(), A.OneOf([A.HorizontalFlip(p=0.5), A.VerticalFlip(p=0.5)])]),
                A.Compose([A.Rotate(p=0.5), A.OneOf([A.HueSaturationValue(p=0.5), A.RGBShift(p=0.7)], p=1)]),
            ),
            A.HorizontalFlip(p=1),
            A.RandomBrightnessContrast(p=0.5),
        ],
        keypoint_params={"format": keypoint_format, "label_fields": ["labels"]},
    )
    serialized_aug = A.to_dict(aug)
    deserialized_aug = A.from_dict(serialized_aug)
    set_seed(seed)
    aug_data = aug(image=image, keypoints=keypoints, labels=labels)
    set_seed(seed)
    deserialized_aug_data = deserialized_aug(image=image, keypoints=keypoints, labels=labels)
    assert np.array_equal(aug_data["image"], deserialized_aug_data["image"])
    assert np.array_equal(aug_data["keypoints"], deserialized_aug_data["keypoints"])
コード例 #6
0
 def create_train_transforms(self, crop_size):
     transforms = [
         A.OneOf([
             RandomCrop(crop_size, crop_size, p=0.3),
             RandomSizedCropAroundBbox(min_max_height=(int(
                 crop_size * 0.65), int(crop_size * 1.4)),
                                       height=crop_size,
                                       width=crop_size,
                                       p=0.7)
         ],
                 p=1),
         A.Rotate(20, p=0.2, border_mode=cv2.BORDER_CONSTANT),
         A.HorizontalFlip(),
         A.VerticalFlip(),
         A.RandomRotate90(),
         A.RandomBrightnessContrast(),
         A.RandomGamma(),
         A.FancyPCA(p=0.2)
     ]
     return A.Compose(transforms, additional_targets={'labels': 'mask'})
コード例 #7
0
def hard_augmentations():
    return A.Compose([
        A.RandomRotate90(),
        A.Transpose(),
        A.RandomGridShuffle(),
        A.ShiftScaleRotate(scale_limit=0.1,
                           rotate_limit=45,
                           border_mode=cv2.BORDER_CONSTANT,
                           mask_value=0,
                           value=0),
        A.ElasticTransform(border_mode=cv2.BORDER_CONSTANT,
                           alpha_affine=5,
                           mask_value=0,
                           value=0),
        # Add occasion blur
        A.OneOf([
            A.GaussianBlur(),
            A.GaussNoise(),
            A.IAAAdditiveGaussianNoise(),
            A.NoOp()
        ]),
        # D4 Augmentations
        A.OneOf([A.CoarseDropout(),
                 A.MaskDropout(max_objects=10),
                 A.NoOp()]),
        # Spatial-preserving augmentations:
        A.OneOf([
            A.RandomBrightnessContrast(brightness_by_max=True),
            A.CLAHE(),
            A.HueSaturationValue(),
            A.RGBShift(),
            A.RandomGamma(),
            A.NoOp(),
        ]),
        # Weather effects
        A.OneOf([
            A.RandomFog(fog_coef_lower=0.01, fog_coef_upper=0.3, p=0.1),
            A.NoOp()
        ]),
        A.Normalize(),
    ])
コード例 #8
0
ファイル: dataset.py プロジェクト: JayChanHoi/EfficientDet
def train_transform(width=512, height=512, min_area=0.0, min_visibility=0.0, lamda_norm=False):
        list_transforms = []
        augment = albu.Compose([
            albu.OneOf(
                [
                    albu.RandomSizedBBoxSafeCrop(p=1.0, height=height, width=width),
                    albu.HorizontalFlip(p=1.0),
                    albu.VerticalFlip(p=1.0),
                    albu.RandomRotate90(p=1.0),
                    albu.NoOp(p=1.0)
                ]
            ),
            albu.OneOf(
                [
                    albu.RandomBrightnessContrast(p=1.0),
                    albu.RandomGamma(p=1.0),
                    albu.NoOp(p=1.0)
                ]
            ),
            albu.OneOf(
                [
                    albu.MotionBlur(p=1.0),
                    albu.RandomFog(p=1.0),
                    albu.RandomRain(p=1.0),
                    albu.CLAHE(p=1.0),
                    albu.ToGray(p=1.0),
                    albu.NoOp(p=1.0)
                ]
            )
        ])
        list_transforms.extend([augment])

        if lamda_norm:
            list_transforms.extend([albu.Lambda(image=lamda_norm_tran)])
        else:
            list_transforms.extend([albu.Normalize(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225), max_pixel_value=255., p=1.0)])
        list_transforms.extend([albu.Resize(height=height, width=width, p=1.0)])

        return albu.Compose(list_transforms, bbox_params=albu.BboxParams(format='pascal_voc', min_area=min_area,
                                                                         min_visibility=min_visibility,
                                                                         label_fields=['label']))
コード例 #9
0
 def __init__(self, img, data, img_size):
     """ 
     arguments
     ---------
     img : list
         list of images, in the original size (height, width, 3)
     data : list of dict
         Each dict has :
             'image' : index of the image. The index should match with img
             'mask' : [xx, yy]
                     IMPORTANT : (WIDTH, HEIGHT)
             'box' : [[xmin, ymin], [xmax,ymax]]
             'size' : the size of the image that the data was created with
                     IMPORTANT : (WIDTH, HEIGHT)
     img_size : tuple
         Desired output image size
         The axes will be swapped to match pygame.
         IMPORTANT : (WIDTH, HEIGHT)
     """
     self.image = img
     self.data = data
     self.n = len(data)
     self.output_size = img_size
     self.aug = A.Compose([
         A.OneOf([
             A.RandomGamma((40, 200), p=1),
             A.RandomBrightness(limit=0.5, p=1),
             A.RandomContrast(limit=0.5, p=1),
             A.RGBShift(40, 40, 40, p=1),
             A.Downscale(scale_min=0.25, scale_max=0.5, p=1),
             A.ChannelShuffle(p=1),
         ],
                 p=0.8),
         A.InvertImg(p=0.5),
         A.VerticalFlip(p=0.5),
         A.RandomRotate90(p=1),
         A.Resize(img_size[0], img_size[1]),
     ], )
     for datum in data:
         datum['mask_min'] = np.min(datum['mask'], axis=1)
         datum['mask_max'] = np.max(datum['mask'], axis=1) + 1
コード例 #10
0
def get_train_transform():
    train_pipline = [
        PhotoMetricDistortion(
            brightness_delta=32,
            contrast_range=(0.5, 1.5),
            saturation_range=(0.5, 1.5),
            hue_delta=18),
        MixUp(p=0.5, mode=1),
        # Mosaic(p=0.2),
        GaussNoise(p=0.2),
        A.Compose([
            A.Flip(p=0.5),
            A.RandomRotate90(p=0.5),
            A.ToGray(p=0.01),
            A.Cutout(num_holes=8, max_h_size=64, max_w_size=64, fill_value=0, p=0.5),
            A.RandomCrop(height=1000, width=1000, p=0.5),
            ToTensorV2(p=1.0)
        ], bbox_params={'format': 'pascal_voc', 'label_fields': ['labels']})
    ]

    return train_pipline
コード例 #11
0
ファイル: RocksAug.py プロジェクト: oradzhabov/bigimage
 def get_validation_augmentation(self, conf, is_stub=False):
     # Since batch-size in validation is 1, validation could be performed by whole crop-size.
     # To provide pos
     test_transform = [
         alb.HorizontalFlip(p=0.5),
         alb.VerticalFlip(p=0.5),
         alb.RandomRotate90(always_apply=False, p=0.5),
         alb.PadIfNeeded(conf.img_wh_crop,
                         conf.img_wh_crop,
                         always_apply=True,
                         border_mode=0),
         # alb.RandomCrop(height=conf.img_wh_crop, width=conf.img_wh_crop, always_apply=True),
     ]
     if is_stub:
         return alb.Compose([
             alb.PadIfNeeded(conf.img_wh_crop,
                             conf.img_wh_crop,
                             always_apply=True,
                             border_mode=0)
         ])
     return alb.Compose(test_transform)
def generate_ds(size):
    trfm = A.Compose([
        A.Resize(size, size, p=1.0),
        A.HorizontalFlip(),
        A.VerticalFlip(),
        A.RandomRotate90(),
        A.ShiftScaleRotate(shift_limit=0.0625, scale_limit=0.2, rotate_limit=20, p=0.9, 
                         border_mode=cv2.BORDER_REFLECT),
        A.OneOf([
            A.OpticalDistortion(p=0.4),
            A.GridDistortion(p=.1),
            A.IAAPiecewiseAffine(p=0.4),
        ], p=0.3),
        A.OneOf([
            A.HueSaturationValue(10,15,10),
            A.CLAHE(clip_limit=3),
            A.RandomBrightnessContrast(),            
        ], p=0.5)
    ], p=1.0)

    return HubDataset(DATA_PATH, window=WINDOW, overlap=MIN_OVERLAP, transform=trfm)
コード例 #13
0
def get_training_augmentation():
    train_transform = [
        A.HorizontalFlip(p=0.5),
        A.VerticalFlip(p=0.5),
        A.RandomRotate90(p=1.),
        # A.ShiftScaleRotate(scale_limit=0.5, rotate_limit=0, shift_limit=0.1, p=1., border_mode=0),
        #
        # A.PadIfNeeded(min_height=128, min_width=128, always_apply=True, border_mode=0),
        # A.RandomCrop(height=128, width=128, always_apply=True),
        #
        # A.IAAAdditiveGaussianNoise(p=0.2),
        # A.IAAPerspective(p=0.5),
        #
        # A.OneOf(
        #     [
        #         A.CLAHE(p=1.),
        #         A.RandomBrightness(p=1.),
        #         A.RandomGamma(p=1.),
        #     ],
        #     p=0.9,
        # ),
        #
        # A.OneOf(
        #     [
        #         A.IAASharpen(p=1.),
        #         A.Blur(blur_limit=3., p=1.),
        #         A.MotionBlur(blur_limit=3., p=1.),
        #     ],
        #     p=0.9,
        # ),
        #
        # A.OneOf(
        #     [
        #         A.RandomContrast(p=1.),
        #         A.HueSaturationValue(p=1.),
        #     ],
        #     p=0.9,
        # ),
    ]
    return A.Compose(train_transform)
コード例 #14
0
def data_augmentation(image_path, aug_num):
    image = cv2.imread(image_path, cv2.IMREAD_COLOR)
    image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
    augmentation = A.Compose(
        [
            A.RandomRotate90(),
            A.Flip(),
            A.Transpose(),
            A.OneOf([
                A.IAAAdditiveGaussianNoise(),
                A.GaussNoise(),
            ], p=0.2),
            A.OneOf([
                A.MotionBlur(p=0.2),
                A.MedianBlur(blur_limit=3, p=0.1),
                A.Blur(blur_limit=3, p=0.1),
            ],
                    p=0.2),
            A.ShiftScaleRotate(
                shift_limit=0.0625, scale_limit=0.2, rotate_limit=45, p=0.2),
            A.OneOf([
                A.OpticalDistortion(p=0.3),
                A.GridDistortion(p=0.1),
                A.IAAPiecewiseAffine(p=0.3),
            ],
                    p=0.2),
            A.OneOf([
                A.CLAHE(clip_limit=2),
                A.IAASharpen(),
                A.IAAEmboss(),
                A.RandomBrightnessContrast(),
            ],
                    p=0.3),
            # A.HueSaturationValue(p=0.3),
        ],
        p=0.5)
    patches = []
    for _ in range(aug_num):
        patches.append(augmentation(image=image)['image'])
    return patches
コード例 #15
0
    def affine_transform(
            self):  # Affine Transforms: Scale & Translation & Rotation
        aug = A.Compose(
            [
                A.Transpose(
                    p=self.p
                ),  # Transpose the input by swapping rows and columns.
                A.OneOf(
                    [
                        A.RandomRotate90(
                            p=self.p
                        ),  # Randomly rotate the input by 90 degrees zero or more times.
                        A.Rotate(
                            limit=90, p=self.p
                        ),  # Rotate the input by an angle selected randomly from the uniform distribution.
                        A.ShiftScaleRotate(
                            shift_limit=0.0625,
                            scale_limit=0.1,
                            rotate_limit=45,
                            p=self.p
                        )  # Randomly apply affine transforms: translate, scale and rotate the input.
                    ],
                    p=1),
                A.OneOf(
                    [
                        A.HorizontalFlip(
                            p=self.p
                        ),  # Flip the input vertically around the x-axis.
                        A.VerticalFlip(
                            p=self.p
                        ),  # Flip the input horizontally around the y-axis.
                        A.Flip(
                            p=self.p
                        )  # Flip the input either horizontally, vertically or both horizontally and vertically.
                    ],
                    p=1)
            ],
            p=1)

        return aug
コード例 #16
0
def train(args):
    images, y = data.load(args.data_dir)

    X = np.array(
        [model.crop_nail(image, height=320, width=320) for image in images])

    X_train, X_test, y_train, y_test = train_test_split(
        X, y, test_size=args.test_size, random_state=0)
    net = model.create()

    X_test = np.array(
        [model.resize(image, net.input_shape[1:3]) for image in X_test])
    X_test = model.preprocess_image(X_test)

    augmentator = albumentations.Compose([
        albumentations.ShiftScaleRotate(shift_limit=0.1,
                                        scale_limit=0.1,
                                        rotate_limit=45,
                                        p=.8),
        albumentations.OneOf([
            albumentations.RandomRotate90(),
            albumentations.Flip(),
        ],
                             p=.6),
        albumentations.Resize(width=net.input_shape[2],
                              height=net.input_shape[1]),
        albumentations.GaussNoise(var_limit=(1, 3)),
    ])

    net.compile(loss='binary_crossentropy',
                optimizer='adam',
                metrics=['accuracy'])

    net.fit_generator(
        AugmentedGenerator(X_train, y_train, 40, augmentator),
        epochs=args.epochs,
        validation_data=(X_test, y_test),
    )

    net.save_weights(args.target, overwrite=True)
コード例 #17
0
def stong_aug():
    # strong aug for  train
    train_transform = [
        albu.Resize(height=configs.input_size, width=configs.input_size),
        albu.HorizontalFlip(p=0.5),
        albu.VerticalFlip(p=0.5),
        albu.RandomRotate90(p=0.5),
        albu.OneOf([
            albu.CenterCrop(
                p=0.5, height=configs.input_size, width=configs.input_size),
            albu.ElasticTransform(
                p=0.5, alpha=120, sigma=120 * 0.05, alpha_affine=120 * 0.03),
            albu.GridDistortion(p=0.5),
            albu.OpticalDistortion(p=1, distort_limit=1, shift_limit=0.5),
        ],
                   p=0.8),
        albu.Normalize(mean=(0.485, 0.456, 0.406),
                       std=(0.229, 0.224, 0.225),
                       p=1),
        AT.ToTensor(),
    ]
    return albu.Compose(train_transform)
コード例 #18
0
def get_valid_transforms():
    return A.Compose([
            # A.ShiftScaleRotate(p=0.5),
            A.RandomRotate90(p=0.5),
            A.HorizontalFlip(p=0.5),
            A.VerticalFlip(p=0.5),
            # A.RandomBrightness(p=0.5),
            # A.RandomContrast(p=0.5),
            # A.RandomGamma(p=0.5),
       #     A.RGBShift(),
         #   A.CLAHE(p=1),
            # A.ToGray(),
            # A.OneOf([
            #     A.RandomContrast(),
            #     A.RandomGamma(),
            #     A.RandomBrightness(),
            # ], p=0.5),
            # A.HueSaturationValue(p=0.5),
            # A.ChannelShuffle(p=0.5),
            #A.Resize(height=384, width=384,  interpolation=cv2.INTER_AREA, p=1),
            ToTensorV2(p=1.0),
        ], p=1.0)
def get_transform(image, mask, original_h, original_w):
    '''


    '''

    transform = A.Compose([
        A.OneOf([
            A.VerticalFlip(p=0.2),
            A.RandomRotate90(p=0.2),
            A.HorizontalFlip(p=0.2)
        ],
                p=0.2),
        A.OneOf([
            A.MotionBlur(p=0.2),
            A.MedianBlur(blur_limit=15, p=0.2),
            A.Blur(blur_limit=15, p=0.2),
            A.GaussNoise(p=0.2),
        ],
                p=0.2),
    ])
    return transform
コード例 #20
0
def resize_transforms(input_size=256):
    result = A.Compose([
        A.HorizontalFlip(p=0.5),
        A.VerticalFlip(p=0.5),
        A.RandomRotate90(p=1),
        A.OneOf([
            A.ShiftScaleRotate(scale_limit=0,
                               rotate_limit=(-45, 45),
                               shift_limit=(-0.1, 0.1),
                               interpolation=0,
                               border_mode=2,
                               p=0.5),
            A.ElasticTransform(alpha_affine=20, sigma=30, border_mode=2, p=0.5)
        ]),
        A.PadIfNeeded(min_height=input_size,
                      min_width=input_size,
                      always_apply=True,
                      border_mode=2),
        A.RandomCrop(input_size, input_size, always_apply=True),
    ])

    return result
コード例 #21
0
ファイル: rotate.py プロジェクト: rhololkeolke/catalyst-rl
 def __init__(self,
              input_key: str = "image",
              output_key: str = "rotation_factor",
              targets_key: str = None,
              rotate_probability: float = 1.,
              hflip_probability: float = 0.5,
              one_hot_classes: int = None):
     """
     Args:
         input_key (str): input key to use from annotation dict
         output_key (str): output key to use to store the result
     """
     self.input_key = input_key
     self.output_key = output_key
     self.targets_key = targets_key
     self.rotate_probability = rotate_probability
     self.hflip_probability = hflip_probability
     self.rotate = A.RandomRotate90()
     self.hflip = A.HorizontalFlip()
     self.one_hot_classes = one_hot_classes * 8 \
         if one_hot_classes is not None \
         else None
コード例 #22
0
 def __init__(self, img, data, img_size):
     """ 
     arguments
     ---------
     img : list
         list of images, in the original size (height, width, 3)
     data : list of dict
         Each dict has :
             'image' : index of the image. The index should match with img
             'mask' : [rr, cc]
             'box' : [[xmin, ymin], [xmax,ymax]]
             'size' : the size of the image that the data was created with
                     IMPORTANT : (WIDTH, HEIGHT)
     img_size : tuple
         Desired output image size
         The axes will be swapped to match pygame.
         IMPORTANT : (WIDTH, HEIGHT)
     """
     self.img = img
     self.data = data
     self.n = len(data)
     self.output_size = img_size
     self.aug = A.Compose(
         [
             A.OneOf([
                 A.RandomGamma((40, 200), p=1),
                 A.RandomBrightness(limit=0.5, p=1),
                 A.RandomContrast(limit=0.5, p=1),
                 A.RGBShift(p=1),
             ],
                     p=0.8),
             A.VerticalFlip(p=0.5),
             A.RandomRotate90(p=1),
             A.Resize(img_size[1], img_size[0]),
         ],
         bbox_params=A.BboxParams(format='albumentations',
                                  label_fields=['bbox_classes']),
         keypoint_params=A.KeypointParams(format='xy'),
     )
コード例 #23
0
def random_rotate_90(image: np.ndarray,
                     annotations: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:
    image_height, image_width = image.shape[:2]

    boxes = annotations[:, :4]
    keypoints = annotations[:, 4:-1].reshape(-1, 2)
    labels = annotations[:, -1:]

    invalid_index = keypoints.sum(axis=1) == -2

    keypoints[:, 0] = np.clip(keypoints[:, 0], 0, image_width - 1)
    keypoints[:, 1] = np.clip(keypoints[:, 1], 0, image_height - 1)

    keypoints[invalid_index] = 0

    category_ids = list(range(boxes.shape[0]))

    transform = albu.Compose(
        [albu.RandomRotate90(p=1)],
        keypoint_params=albu.KeypointParams(format="xy"),
        bbox_params=albu.BboxParams(format="pascal_voc",
                                    label_fields=["category_ids"]),
    )
    transformed = transform(image=image,
                            keypoints=keypoints.tolist(),
                            bboxes=boxes.tolist(),
                            category_ids=category_ids)

    keypoints = np.array(transformed["keypoints"])
    keypoints[invalid_index] = -1

    keypoints = keypoints.reshape(-1, 10)
    boxes = np.array(transformed["bboxes"])
    image = transformed["image"]

    annotations = np.hstack([boxes, keypoints, labels])

    return image, annotations
コード例 #24
0
def clf_train_augs(height: int, width: int) -> albu.Compose:
    """Training augmentations for classification. We prefer for this model to be really
    robust. Feel free to tweak these paramters or ad other augmentations."""

    return albu.Compose([
        albu.Resize(height=height, width=width),
        albu.OneOf([
            albu.IAAAffine(shear=6, rotate=5, always_apply=True),
            albu.ShiftScaleRotate(shift_limit=0.025,
                                  scale_limit=0.1,
                                  rotate_limit=10),
        ]),
        albu.ShiftScaleRotate(shift_limit=0.025,
                              scale_limit=0.1,
                              rotate_limit=10),
        albu.Flip(),
        albu.RandomRotate90(),
        albu.OneOf(
            [
                albu.HueSaturationValue(p=1.0),
                albu.IAAAdditiveGaussianNoise(p=1.0),
                albu.IAASharpen(p=1.0),
                albu.RandomBrightnessContrast(
                    brightness_limit=0.1, contrast_limit=0.1, p=1.0),
                albu.RandomGamma(p=1.0),
            ],
            p=1.0,
        ),
        albu.OneOf(
            [
                albu.Blur(blur_limit=3, p=1.0),
                albu.MedianBlur(blur_limit=3, p=1.0),
                albu.MotionBlur(blur_limit=3, p=1.0),
            ],
            p=1.0,
        ),
        albu.Normalize(),
    ])
コード例 #25
0
def get_albumentations_train_transforms():
    return A.Compose(
        [
            A.RandomSizedCrop(
                min_max_height=(600, 800), height=1024, width=1024, p=0.5),
            A.OneOf([
                A.HueSaturationValue(hue_shift_limit=20,
                                     sat_shift_limit=30,
                                     val_shift_limit=20,
                                     p=1.0),
                A.RandomBrightnessContrast(
                    brightness_limit=0.2, contrast_limit=0.2, p=1.0),
            ],
                    p=0.8),
            A.OneOf(
                [
                    A.RGBShift(p=1.0),
                    A.CLAHE(p=1.0),  # internal logic is rgb order
                    A.RandomGamma(p=1.0),
                ],
                p=0.4),
            # A.CLAHE(p=0.3),
            A.ToGray(p=0.01),
            A.HorizontalFlip(p=0.5),
            A.VerticalFlip(p=0.5),
            A.RandomRotate90(p=0.5),
            A.Cutout(num_holes=8,
                     max_h_size=64,
                     max_w_size=64,
                     fill_value=(124, 117, 104),
                     p=0.5),  # rgb order
        ],
        p=1.0,
        bbox_params=A.BboxParams(format='coco',
                                 min_area=4,
                                 min_visibility=0.01,
                                 label_fields=['category_id']))
コード例 #26
0
    def __init__(self):

        self.val_transform = A.Compose([
            A.Resize(224, 224),
        ])

        self.test_transform = A.Compose([
            A.Resize(224, 224),
        ])

        self.train_transform = A.Compose([
            A.Resize(224, 224),
            A.HueSaturationValue(),
            A.RandomRotate90(),
            A.ShiftScaleRotate(rotate_limit=15),
            A.Blur(),
            A.OpticalDistortion(),
            # A.GridDistortion(),
            A.Cutout(num_holes=16, max_h_size=16, max_w_size=16),
        ])

        # split ratio between train and val dataset
        ratio = 0.8

        # Data file list
        self.train_list = pd.read_csv(f"{DATASET}/train_list.txt",
                                      index_col=False,
                                      sep="\t").to_numpy()
        size = self.train_list.shape[0]
        self.offset = int(size * ratio)
        np.random.shuffle(self.train_list)
        logger.info("after shuffle: {}".format(self.train_list[:, 1]))

        test_list_temp = glob(f"{DATASET}/test/*.jpg")
        self.test_list = []
        for path in test_list_temp:
            self.test_list.append(path[len(DATASET) + 1:])
コード例 #27
0
def hard_augmentations(mask_dropout=True) -> List[A.DualTransform]:
    return [
        # D4 Augmentations
        A.RandomRotate90(p=1),
        A.Transpose(p=0.5),
        # Spatial augmentations
        A.OneOf(
            [
                A.ShiftScaleRotate(scale_limit=0.2, rotate_limit=45, border_mode=cv2.BORDER_REFLECT101),
                A.ElasticTransform(border_mode=cv2.BORDER_REFLECT101, alpha_affine=5),
            ]
        ),
        # Color augmentations
        A.OneOf(
            [
                A.RandomBrightnessContrast(brightness_by_max=True),
                A.CLAHE(),
                A.FancyPCA(),
                A.HueSaturationValue(),
                A.RGBShift(),
                A.RandomGamma(),
            ]
        ),
        # Dropout & Shuffle
        A.OneOf(
            [
                A.RandomGridShuffle(),
                A.CoarseDropout(),
                A.MaskDropout(max_objects=2, mask_fill_value=0) if mask_dropout else A.NoOp(),
            ]
        ),
        # Add occasion blur
        A.OneOf([A.GaussianBlur(), A.GaussNoise(), A.IAAAdditiveGaussianNoise()]),
        # Weather effects
        A.RandomFog(fog_coef_lower=0.01, fog_coef_upper=0.3, p=0.1),
    ]
コード例 #28
0
def heavy_training_transforms():
    return A.Compose([
        A.OneOf([
            A.Transpose(),
            A.VerticalFlip(),
            A.HorizontalFlip(),
            A.RandomRotate90(),
            A.NoOp()
        ],
                p=1.0),
        A.OneOf([
            A.CLAHE(),
            A.RGBShift(),
            A.RandomBrightnessContrast(),
            A.RandomGamma(),
            A.HueSaturationValue(),
            A.NoOp()
        ],
                p=0.15),
        A.OneOf([
            A.ElasticTransform(),
            A.GridDistortion(),
            A.OpticalDistortion(),
            A.NoOp(),
            A.ShiftScaleRotate(),
        ],
                p=1.0),
        A.OneOf([A.GaussNoise(), A.GaussianBlur(),
                 A.NoOp()], p=0.15),
        A.OneOf([
            A.CoarseDropout(max_holes=16, max_height=16, max_width=16),
            A.NoOp()
        ],
                p=1.0),
        A.Normalize()
    ])
コード例 #29
0
def get_augmentation_transforms(add_non_spatial=True):
    transforms = [
        A.VerticalFlip(p=0.5),
        A.HorizontalFlip(p=0.5),
        A.RandomRotate90(p=0.5),
        A.OneOf([
            A.ElasticTransform(
                alpha=120, sigma=120 * 0.05, alpha_affine=120 * 0.03, p=0.5),
            A.GridDistortion(p=0.5),
            A.OpticalDistortion(distort_limit=2, shift_limit=0.5, p=1)
        ],
                p=0.8),
    ]

    if add_non_spatial:
        transforms.extend([
            A.CLAHE(p=0.8),
            A.RandomBrightnessContrast(p=0.8),
            A.RandomGamma(p=0.8)
        ])

    aug_transform = A.Compose(transforms)

    return aug_transform
コード例 #30
0
ファイル: dataset2.py プロジェクト: Interesting6/SegySegUNet
    def __init__(self, data_dir, train=True, ptsize=128):
        # 
        super(F3DS, self).__init__()
        self.train = train
        self.ptsize = ptsize
        self.data_dir = data_dir
        
        self.get_data()  # 
        self.mapDataTo1()   # 先归一化到[0, 1]之间,并用均值标准化,
        self.hw = self.data_cube.shape[1:] # 图片大小  [463+20, 951+20,]

        self.train_seg = [0, 1, 3]
        self.test_seg = [2]
        if train==True: # 训练数据和测试数据
            self.data_cube = self.data_cube[self.train_seg]
            self.label_cube = self.label_cube[self.train_seg]
        else: # test 
            self.data_cube = self.data_cube[self.test_seg]
            self.label_cube = self.label_cube[self.test_seg]

        c = 20
        if self.train:
            self.hwarange = [torch.arange(0, x-ptsize+c, 10) for x in self.hw]
        else:
            self.hwarange = [torch.arange(0, x-ptsize, ptsize) for x in self.hw]
        self.chwn = [self.data_cube.shape[0] ] + [len(x) for x in self.hwarange] # 切面数、h方向滑动个数、w方向滑动个数
        
        self.augment = A.Compose([
            A.PadIfNeeded(min_height=self.ptsize+c, min_width=self.ptsize+c, ),
            A.RandomRotate90(),
            A.HorizontalFlip(),
            A.VerticalFlip(),
        ])
        self.tsf = transforms.Compose([
            transforms.ToTensor(),
        ])