示例#1
0
def get_training_augmentation():
    """Builds random transformations we want to apply to our dataset.

    Arguments:
        
    Returns:
        A albumentation functions to pass our images to.
    Raises:

    """
    train_transform = [
        A.IAAAdditiveGaussianNoise(p=0.2),
        A.IAAPerspective(p=0.5),
        A.OneOf([A.CLAHE(p=1), A.RandomBrightness(p=1), A.RandomGamma(p=1),], p=0.9,),
        A.OneOf(
            [
                A.IAASharpen(p=1),
                A.Blur(blur_limit=3, p=1),
                A.MotionBlur(blur_limit=3, p=1),
            ],
            p=0.9,
        ),
        A.OneOf([A.RandomContrast(p=1), A.HueSaturationValue(p=1),], p=0.9,),
        A.Lambda(mask=round_clip_0_1),
    ]
    return A.Compose(train_transform)
示例#2
0
    def _get_transforms(self):
        resize_transform: list = [
            A.PadIfNeeded(
                min_height=self.h, min_width=self.w, border_mode=cv2.BORDER_CONSTANT
            ),
            A.RandomCrop(self.h, self.w),
        ]

        augmentations: list = [
            A.HorizontalFlip(p=0.5),
            A.RandomBrightnessContrast(p=0.2),
        ]

        def _normalizer(image, **_):
            return image / 255.0

        compatible: list = [
            ToTensorV2(always_apply=True),
            A.Lambda(image=_normalizer),
        ]

        transforms_train_val_ = A.Compose(resize_transform + compatible)
        transforms_test_ = A.Compose(resize_transform + compatible)

        def transforms_train_val(image):
            return transforms_train_val_(image=np.array(image))["image"]

        def transforms_test(image):
            return transforms_test_(image=np.array(image))["image"]

        return transforms_train_val, transforms_test
def get_train_transforms_simple_bright_pad(input_size,
                                           use_crop=False,
                                           use_no_color_aug=False,
                                           use_center_crop=False,
                                           center_crop_ratio=0.9,
                                           use_gray=False):
    def longest_max_size(img, interpolation=cv2.INTER_LINEAR, **params):
        img = F.longest_max_size(img,
                                 max_size=input_size,
                                 interpolation=interpolation)

        return img

    return al.Compose([
        al.Lambda(longest_max_size),
        al.PadIfNeeded(min_height=input_size,
                       min_width=input_size,
                       always_apply=True,
                       border_mode=0),
        al.HorizontalFlip(p=0.5),
        al.RandomBrightness(p=0.2, limit=0.2),
        al.RandomContrast(p=0.1, limit=0.2),
        al.Normalize(),
        ToTensorV2()
    ])
示例#4
0
def get_preprocessing(preprocessing_fn):
    """构造预处理转换

    Args:
        preprocessing_fn (callbale): 标准化函数
            (可以针对每个预先训练的神经网络)
    Return:
        transform: albumentations.Compose

    """

    _transform = [
        albu.Lambda(image=preprocessing_fn),
        albu.Lambda(image=to_tensor, mask=to_tensor),
    ]
    return albu.Compose(_transform)
示例#5
0
def test_lambda_serialization(image, mask, albumentations_bboxes, keypoints, seed, p):
    def vflip_image(image, **kwargs):
        return F.vflip(image)

    def vflip_mask(mask, **kwargs):
        return F.vflip(mask)

    def vflip_bbox(bbox, **kwargs):
        return F.bbox_vflip(bbox, **kwargs)

    def vflip_keypoint(keypoint, **kwargs):
        return F.keypoint_vflip(keypoint, **kwargs)

    aug = A.Lambda(name="vflip", image=vflip_image, mask=vflip_mask, bbox=vflip_bbox, keypoint=vflip_keypoint, p=p)

    serialized_aug = A.to_dict(aug)
    deserialized_aug = A.from_dict(serialized_aug, lambda_transforms={"vflip": aug})
    set_seed(seed)
    aug_data = aug(image=image, mask=mask, bboxes=albumentations_bboxes, keypoints=keypoints)
    set_seed(seed)
    deserialized_aug_data = deserialized_aug(image=image, mask=mask, bboxes=albumentations_bboxes, keypoints=keypoints)
    assert np.array_equal(aug_data["image"], deserialized_aug_data["image"])
    assert np.array_equal(aug_data["mask"], deserialized_aug_data["mask"])
    assert np.array_equal(aug_data["bboxes"], deserialized_aug_data["bboxes"])
    assert np.array_equal(aug_data["keypoints"], deserialized_aug_data["keypoints"])
示例#6
0
 def __init__(self,
              images,
              labels,
              batch_size=16,
              image_shape=(256, 512, 1),
              do_shuffle_at_epoch_end=True,
              length=None,
              do_augment=True):
     super().__init__(images, labels, batch_size, image_shape,
                      do_shuffle_at_epoch_end, length, do_augment)
     self.augmenting_pipeline = A.Compose([
         A.HorizontalFlip(),
         A.IAAAffine(translate_percent={"x": (-1, 1)}, mode="reflect", p=1),
         A.PadIfNeeded(min_width=int(self.input_shape[1] * 2),
                       min_height=self.input_shape[0]),
         A.GridDistortion(p=0.8, distort_limit=0.5),
         A.ElasticTransform(p=0.5,
                            alpha=10,
                            sigma=100 * 0.03,
                            alpha_affine=0),
         A.CenterCrop(width=self.input_shape[1],
                      height=self.input_shape[0]),
         A.IAAPerspective(scale=(0, 0.10), p=1),
         A.ShiftScaleRotate(shift_limit=0,
                            scale_limit=(.0, 0.4),
                            rotate_limit=0,
                            p=0.5),
         A.CLAHE(clip_limit=2.0, p=0.5),
         A.Lambda(
             image=self.convert_image,
             mask=self.convert_segmentations,
         ),
     ])
示例#7
0
    def _get_augmentations(h, w, augment):
        def normalize(x, **_):
            return x / 255.0

        resizing: list = [
            # A.LongestMaxSize(max_size=WIDTH, always_apply=True),
            A.PadIfNeeded(min_height=h,
                          min_width=w,
                          border_mode=cv2.BORDER_CONSTANT),
            A.RandomCrop(h, w),
            # A.Resize(height=HEIGHT, width=WIDTH, always_apply=True),
        ]
        compatibility: list = [
            ToTensorV2(always_apply=True),
            A.Lambda(image=normalize),
        ]

        augmentations: list = []
        if augment:
            augmentations = [
                A.HorizontalFlip(p=0.5),
                A.RandomBrightnessContrast(p=0.2),
            ]

        return A.Compose(
            resizing + augmentations + compatibility,
            bbox_params=A.BboxParams(format="pascal_voc",
                                     min_visibility=0.05,
                                     label_fields=["class_labels"]),
        )
示例#8
0
def get_inference_augmentation():
    """Add paddings to make image shape divisible by 32"""
    test_transform = [
        albu.Normalize(),
        albu.Lambda(image=to_tensor, mask=to_tensor),
    ]
    return albu.Compose(test_transform)
def test_lambda_serialization(image, mask, bboxes, keypoints, seed, p):
    def vflip_image(image, **kwargs):
        return F.vflip(image)

    def vflip_mask(mask, **kwargs):
        return F.vflip(mask)

    def vflip_bbox(bbox, **kwargs):
        return F.bbox_vflip(bbox, **kwargs)

    def vflip_keypoint(keypoint, **kwargs):
        return F.keypoint_vflip(keypoint, **kwargs)

    aug = A.Lambda(name='vflip',
                   image=vflip_image,
                   mask=vflip_mask,
                   bbox=vflip_bbox,
                   keypoint=vflip_keypoint,
                   p=p)

    serialized_aug = A.to_dict(aug)
    deserialized_aug = A.from_dict(serialized_aug,
                                   lambda_transforms={'vflip': aug})
    random.seed(seed)
    aug_data = aug(image=image, mask=mask, bboxes=bboxes, keypoints=keypoints)
    random.seed(seed)
    deserialized_aug_data = deserialized_aug(image=image,
                                             mask=mask,
                                             bboxes=bboxes,
                                             keypoints=keypoints)
    assert np.array_equal(aug_data['image'], deserialized_aug_data['image'])
    assert np.array_equal(aug_data['mask'], deserialized_aug_data['mask'])
    assert np.array_equal(aug_data['bboxes'], deserialized_aug_data['bboxes'])
    assert np.array_equal(aug_data['keypoints'],
                          deserialized_aug_data['keypoints'])
    def get_preprocessing(preprocessing_fn):
        """Construct preprocessing transform

        Args:
            preprocessing_fn (callbale): data normalization function
                (can be specific for each pretrained neural network)
        Return:
            transform: albumentations.Compose

        """

        _transform = [
            albu.Lambda(image=preprocessing_fn),
            albu.Lambda(image=Transforms.to_tensor, mask=Transforms.to_tensor),
        ]
        return albu.Compose(_transform)
示例#11
0
def test_lambda_transform():
    def negate_image(image, **kwargs):
        return -image

    def one_hot_mask(mask, num_channels, **kwargs):
        new_mask = np.eye(num_channels, dtype=np.uint8)[mask]
        return new_mask

    def vflip_bbox(bbox, **kwargs):
        return F.bbox_vflip(bbox, **kwargs)

    def vflip_keypoint(keypoint, **kwargs):
        return F.keypoint_vflip(keypoint, **kwargs)

    aug = A.Lambda(
        image=negate_image, mask=partial(one_hot_mask, num_channels=16), bbox=vflip_bbox, keypoint=vflip_keypoint, p=1
    )

    output = aug(
        image=np.ones((10, 10, 3), dtype=np.float32),
        mask=np.tile(np.arange(0, 10), (10, 1)),
        bboxes=[(10, 15, 25, 35)],
        keypoints=[(20, 30, 40, 50)],
    )
    assert (output["image"] < 0).all()
    assert output["mask"].shape[2] == 16  # num_channels
    assert output["bboxes"] == [F.bbox_vflip((10, 15, 25, 35), 10, 10)]
    assert output["keypoints"] == [F.keypoint_vflip((20, 30, 40, 50), 10, 10)]
示例#12
0
 def get_preprocessing(self, pre_processing_fn):
     """
     Construct preprocessing transform
     from https://github.com/qubvel/segmentation_models.pytorch/blob/master/examples/cars%20segmentation%20(camvid).ipynb 
     [Cell 11]
     Args:
         preprocessing_fn (callbale): data normalization function 
             (can be specific for each pretrained neural network)
     Return:
         transform: albumentations.Compose
     """
     _transform = [
         A.Lambda(name='pre_process', image=pre_processing_fn),
         A.Lambda(name='to_tensor', image=self.to_tensor, mask=self.to_tensor),
     ]
     return A.Compose(_transform)
示例#13
0
def get_preprocessing(preprocessing_fn):
    """Construct preprocessing transform

    Args:
        preprocessing_fn (callbale): data normalization function
            (can be specific for each pretrained neural network)
    Return:
        transform: albumentations.Compose

    """
    # TODO: UserWarning: Using lambda is incompatible with multiprocessing. Consider using regular functions or partial().
    #   warnings.warn('Using lambda is incompatible with multiprocessing.
    _transform = [
        albu.Lambda(image=preprocessing_fn),
        albu.Lambda(image=to_tensor, mask=to_tensor),
    ]
    return albu.Compose(_transform)
示例#14
0
def get_preprocessing(preprocessing_fn, downsample_factor=1):
    """Construct preprocessing transform

    Args:
        preprocessing_fn (callbale): data normalization function
            (can be specific for each pretrained neural network)
    Return:
        transform: albumentations.Compose

    """
    downsample_fn = partial(downsample, factor=downsample_factor)
    _transform = [
        albu.Lambda(image=downsample_fn, mask=downsample_fn),
        albu.Lambda(image=preprocessing_fn),
        albu.Lambda(image=to_tensor, mask=to_tensor),
    ]
    return albu.Compose(_transform)
示例#15
0
def get_preprocessing(preprocessing_fn):
    def to_tensor(x, **kwargs):
        return x.transpose(2, 0, 1).astype('float32')

    """Construct preprocessing transform

    Args:
        preprocessing_fn (callbale): data normalization functio
            (can be specific for each pretrained neural network)
    Return:
        transform: albumentations.Compose
    """
    _transform = [
        albu.Lambda(image=preprocessing_fn),
        albu.Lambda(image=to_tensor, mask=to_tensor),
    ]
    return albu.Compose(_transform)
示例#16
0
def build_test_transform(size=244):
    """Build argmentation process for testing data
    Arg:
        size (int): the output size for images (3, size, size).
    """
    _transform = [albu.Resize(size, size), albu.Lambda(image=to_tensor)]

    return albu.Compose(_transform)
示例#17
0
def get_preprocessing():
    '''
    Preprocess the image and the mask
    '''
    _transform = [
        A.Lambda(image=to_tensor, mask=to_tensor_mask),
    ]
    return A.Compose(_transform)
示例#18
0
def get_preprocessing():
    import torch

    def norm(img, **params):
        return F.normalize(img,
                           mean=(0.485, 0.456, 0.406),
                           std=(0.229, 0.224, 0.225),
                           max_pixel_value=255.0)

    def to_tensor(x, **kwargs):
        return torch.from_numpy(x.transpose(2, 0, 1).astype('float32'))

    _transform = [
        al.Lambda(image=norm),
        al.Lambda(image=to_tensor, mask=to_tensor)
    ]
    return al.Compose(_transform)
示例#19
0
    def build_test(self):
        test_transforms = A.Compose([
            A.Normalize(mean=self.mean, std=self.std),
            AT.ToTensor(),
            A.Lambda(lambda x: torch.cat([x, x, x], 0), always_apply=True)
        ])

        return test_transforms
示例#20
0
文件: dataset.py 项目: kuruparan/PVEN
def get_validation_augmentation():
    test_transform = [
        albu.Lambda(image=pad_image_to_multiplys_of(32), mask=pad_image_to_multiplys_of(32))
        # albu.LongestMaxSize(224),
        # albu.Lambda(image=pad_image_to_multiplys_of(32), mask=pad_image_to_multiplys_of(32))

        # albu.RandomCrop(height=320, width=320, always_apply=True)
    ]
    return albu.Compose(test_transform)
示例#21
0
def get_validation_augmentation(height, width, downsample_factor=1):
    """Add paddings to make image shape divisible by 32"""
    downsample_fn = partial(downsample, factor=downsample_factor)
    test_transform = [
        albu.Lambda(image=downsample_fn, mask=downsample_fn),
        albu.PadIfNeeded(min_height=height, min_width=width),
        albu.CenterCrop(height=height, width=width)
    ]
    return albu.Compose(test_transform)
示例#22
0
    def __init__(self, config, **kwargs):
        super(DataManger_Epoch, self).__init__(config)
        transform = dict()
        transform['train'] = A.Compose([
            A.Resize(config['image_size'][0], config['image_size'][1]),
            A.PadIfNeeded(min_height=config['image_size'][0] + 10,
                          min_width=config['image_size'][1] + 10),
            A.RandomCrop(config['image_size'][0], config['image_size'][1]),
            A.HorizontalFlip(p=0.5),
            A.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
            ToTensorV2(),
            A.Lambda(image=RandomErasing(probability=0.5,
                                         mean=[0.485, 0.456, 0.406])),
        ])

        transform['val'] = A.Compose([
            A.Resize(config['image_size'][0], config['image_size'][1]),
            A.Normalize(
                mean=[0.485, 0.456, 0.406],
                std=[0.229, 0.224, 0.225],
            ),
            ToTensorV2()
        ])

        transform['test'] = A.Compose([
            A.Resize(config['image_size'][0], config['image_size'][1]),
            A.Normalize(
                mean=[0.485, 0.456, 0.406],
                std=[0.229, 0.224, 0.225],
            ),
            ToTensorV2()
        ])

        dataset = dict()
        for _phase in self.datasource.get_phase():
            dataset[_phase] = ImageDataset(self.datasource.get_data(_phase),
                                           transform=transform[_phase])

        self.dataloader['train'] = DataLoader(
            dataset=dataset['train'],
            batch_size=config['batch_size'],
            shuffle=config['shuffle'],
            num_workers=config['num_workers'],
            pin_memory=config['pin_memory'],
            drop_last=config['drop_last'])

        self.dataloader['val'] = DataLoader(dataset=dataset['val'],
                                            batch_size=config['batch_size'],
                                            shuffle=False,
                                            num_workers=config['num_workers'],
                                            pin_memory=config['pin_memory'],
                                            drop_last=config['drop_last'])

        self.dataloader['test'] = DataLoader(dataset['test'],
                                             batch_size=config['batch_size'],
                                             shuffle=False,
                                             drop_last=False)
def get_preprocessing(preprocessing_fn, apply_augmentation=False):
    """Construct preprocessing transform

    Args:
        preprocessing_fn (callable): data normalization function (can be specific for each pretrained neural network)
        apply_augmentation (boolean): apply data augmentation or not
    Return:
        transform: albumentations.Compose
    """

    _transform = [A.Resize(384, 480)]

    if apply_augmentation:
        _transform += [
            A.HorizontalFlip(p=0.5),
            A.ShiftScaleRotate(scale_limit=0.5,
                               rotate_limit=0,
                               shift_limit=0.1,
                               p=1,
                               border_mode=0),
            A.IAAAdditiveGaussianNoise(p=0.2),
            A.IAAPerspective(p=0.5),
            A.OneOf(
                [A.CLAHE(p=1),
                 A.RandomBrightness(p=1),
                 A.RandomGamma(p=1)],
                p=0.9),
            A.OneOf([
                A.IAASharpen(p=1),
                A.Blur(blur_limit=3, p=1),
                A.MotionBlur(blur_limit=3, p=1)
            ],
                    p=0.9),
            A.OneOf([A.RandomContrast(p=1),
                     A.HueSaturationValue(p=1)], p=0.9)
        ]

    _transform += [
        A.Lambda(image=preprocessing_fn),
        A.Lambda(image=to_CHW, mask=to_CHW)
    ]

    return A.Compose(_transform)
示例#24
0
def numpy_to_torch():

    conversion_transformation = albu.Compose([albu.Lambda(image=g.to_tensor)],
                                             additional_targets={
                                                 'quaternion': 'image',
                                                 'scales': 'image',
                                                 'xy': 'image'
                                             })

    return conversion_transformation
示例#25
0
def get_preprocessing(size, mean=(0., 0., 0.), std=(1., 1., 1.)):
    preprocessing_fn = partial(
        normalize_img, mean=mean, std=std,
    )

    transform = [
        albu.Lambda(image=preprocessing_fn),
        albu.Resize(size, size),
        ToTensorV2(),
    ]
    return albu.Compose(transform)
示例#26
0
 def sequence_preprocessing(self):
     transform = A.Compose([
         A.Lambda(image=self.aug_morph_close,
                  keypoint=self.aug_keypoints,
                  p=1.0),
     ],
                           additional_targets={
                               'image1': 'image',
                               'image2': 'image'
                           })
     return transform
示例#27
0
    def default_train_transform(self) -> Callable:
        """
        Default mnist training transforms. Performs

        * Random Rotation between -5 and 5 degrees

        * Normalization using mean: (0.1307,) and std: (0.3081,)

        Returns:
            Callable: The transform, an ``albumentations.Compose`` object.
        """
        return A.Compose([
            A.Lambda(ToNumpy),
            A.Rotate(
                limit=5
            ),  # Randomly rotating the image in the range -5,5 degrees
            A.Normalize(mean=mnist.mean, std=mnist.std,
                        max_pixel_value=1.0),  # Normalizing
            A.Lambda(ToTensor),
        ])
示例#28
0
def get_training_augmentation(augment):
    if augment > 0:
        train_transform = [
            A.GridDistortion(distort_limit=args.augment,
                             interpolation=cv2.INTER_AREA,
                             border_mode=cv2.BORDER_CONSTANT,
                             value=(240, 240, 240),
                             mask_value=0)
        ]
    else:
        train_transform = []

    train_transform += [

        #A.ShiftScaleRotate(rotate_limit=10, scale_limit=0, interpolation=cv2.INTER_AREA, border_mode=cv2.BORDER_CONSTANT, value=1),

        #A.ElasticTransform(
        #    alpha=0.4, alpha_affine=0.3, interpolation=cv2.INTER_AREA, border_mode=cv2.BORDER_CONSTANT, value=(240, 240, 240), mask_value=0),
        A.Rotate(limit=10,
                 interpolation=cv2.INTER_AREA,
                 border_mode=cv2.BORDER_CONSTANT,
                 value=(240, 240, 240),
                 mask_value=0),

        #A.IAAAdditiveGaussianNoise(p=0.1),

        #A.OneOf(
        #    [
        #        A.CLAHE(p=1),
        #        A.RandomBrightness(p=1),
        #        A.RandomGamma(p=1),
        #    ],
        #    p=0.9,
        #),

        #A.OneOf(
        #    [
        #        A.IAASharpen(p=1),
        #        A.Blur(blur_limit=3, p=1),
        #    ],
        #    p=0.9,
        #),
        A.OneOf(
            [
                A.RandomContrast(p=1),
                A.HueSaturationValue(p=1),
            ],
            p=0.9,
        ),
        A.Lambda(mask=round_clip_0_1)
    ]

    return A.Compose(train_transform)
示例#29
0
def predict(model: nn.Module,
            image: np.ndarray,
            image_size,
            tta=None,
            normalize=A.Normalize(),
            batch_size=1,
            activation='sigmoid') -> np.ndarray:
    model.eval()
    tile_step = (image_size[0] // 2, image_size[1] // 2)

    tile_slicer = ImageSlicer(image.shape,
                              image_size,
                              tile_step,
                              weight='pyramid')
    tile_merger = CudaTileMerger(tile_slicer.target_shape, 1,
                                 tile_slicer.weight)
    patches = tile_slicer.split(image)

    transform = A.Compose([normalize, A.Lambda(image=_tensor_from_rgb_image)])

    if tta == 'fliplr':
        model = TTAWrapperFlipLR(model)
        print('Using FlipLR TTA')

    if tta == 'd4':
        model = TTAWrapperD4(model)
        print('Using D4 TTA')

    with torch.no_grad():
        data = list({
            'image': patch,
            'coords': np.array(coords, dtype=np.int)
        } for (patch, coords) in zip(patches, tile_slicer.crops))
        for batch in DataLoader(InMemoryDataset(data, transform),
                                pin_memory=True,
                                batch_size=batch_size):
            image = batch['image'].cuda(non_blocking=True)
            coords = batch['coords']
            mask_batch = model(image)
            tile_merger.integrate_batch(mask_batch, coords)

    mask = tile_merger.merge()
    if activation == 'sigmoid':
        mask = mask.sigmoid()

    if isinstance(activation, float):
        mask = F.relu(mask_batch - activation, inplace=True)

    mask = np.moveaxis(to_numpy(mask), 0, -1)
    mask = tile_slicer.crop_to_orignal_size(mask)

    return mask
def get_training_augmentation(dim = 512, rot_limit = 45):
    train_transform = [

        A.HorizontalFlip(p=0.5),

        A.ShiftScaleRotate(scale_limit=0.5, rotate_limit=rot_limit, shift_limit=0.1, p=1, border_mode=0),

        A.PadIfNeeded(min_height=dim, min_width=dim, always_apply=True, border_mode=0),
        A.RandomCrop(height=dim, width=dim, always_apply=True),

        A.Lambda(mask=round_clip_0_1)
    ]
    return A.Compose(train_transform)