Example #1
0
    def test_forward(self, shape, padding, patchwise_apply, same_on_batch,
                     keepdim, device, dtype):
        seq = K.PatchSequential(
            K.ImageSequential(
                K.ColorJitter(0.1, 0.1, 0.1, 0.1, p=0.5),
                K.RandomPerspective(0.2, p=0.5),
                K.RandomSolarize(0.1, 0.1, p=0.5),
            ),
            K.ColorJitter(0.1, 0.1, 0.1, 0.1),
            K.ImageSequential(
                K.ColorJitter(0.1, 0.1, 0.1, 0.1, p=0.5),
                K.RandomPerspective(0.2, p=0.5),
                K.RandomSolarize(0.1, 0.1, p=0.5),
            ),
            K.ColorJitter(0.1, 0.1, 0.1, 0.1),
            grid_size=(2, 2),
            padding=padding,
            patchwise_apply=patchwise_apply,
            same_on_batch=same_on_batch,
            keepdim=keepdim,
        )
        input = torch.randn(*shape, device=device, dtype=dtype)
        trans = torch.randn(shape[0], 3, 3, device=device, dtype=dtype)
        out = seq(input)
        assert out.shape[-3:] == input.shape[-3:]

        out = seq((input, trans))
        assert out[0].shape[-3:] == input.shape[-3:]
        assert out[1].shape == trans.shape
Example #2
0
    def test_forward(self, shape, padding, patchwise_apply, same_on_batch,
                     keepdim, random_apply, device, dtype):
        torch.manual_seed(11)
        try:  # skip wrong param settings.
            seq = K.PatchSequential(
                K.color.RgbToBgr(),
                K.ColorJitter(0.1, 0.1, 0.1, 0.1),
                K.ImageSequential(
                    K.ColorJitter(0.1, 0.1, 0.1, 0.1, p=0.5),
                    K.RandomPerspective(0.2, p=0.5),
                    K.RandomSolarize(0.1, 0.1, p=0.5),
                ),
                K.RandomMixUp(p=1.0),
                grid_size=(2, 2),
                padding=padding,
                patchwise_apply=patchwise_apply,
                same_on_batch=same_on_batch,
                keepdim=keepdim,
                random_apply=random_apply,
            )
        # TODO: improve me and remove the exception.
        except Exception:
            return

        input = torch.randn(*shape, device=device, dtype=dtype)
        out = seq(input)
        if seq.return_label:
            out, _ = out
        assert out.shape[-3:] == input.shape[-3:]

        reproducibility_test(input, seq)
Example #3
0
    def test_inverse_and_forward_return_transform(self, random_apply, device,
                                                  dtype):
        inp = torch.randn(1, 3, 1000, 500, device=device, dtype=dtype)
        bbox = torch.tensor([[[355, 10], [660, 10], [660, 250], [355, 250]]],
                            device=device,
                            dtype=dtype)
        keypoints = torch.tensor([[[465, 115], [545, 116]]],
                                 device=device,
                                 dtype=dtype)
        mask = bbox_to_mask(
            torch.tensor([[[155, 0], [900, 0], [900, 400], [155, 400]]],
                         device=device,
                         dtype=dtype), 1000, 500)[:, None].float()
        aug = K.AugmentationSequential(
            K.ImageSequential(
                K.ColorJitter(0.1, 0.1, 0.1, 0.1, p=1.0),
                K.RandomAffine(360, p=1.0, return_transform=True)),
            K.ColorJitter(0.1, 0.1, 0.1, 0.1, p=1.0, return_transform=True),
            K.RandomAffine(360, p=1.0, return_transform=True),
            data_keys=["input", "mask", "bbox", "keypoints"],
            random_apply=random_apply,
        )
        with pytest.raises(
                Exception):  # No parameters available for inversing.
            aug.inverse(inp, mask, bbox, keypoints)

        out = aug(inp, mask, bbox, keypoints)
        assert out[0][0].shape == inp.shape
        assert out[1].shape == mask.shape
        assert out[2].shape == bbox.shape
        assert out[3].shape == keypoints.shape

        reproducibility_test((inp, mask, bbox, keypoints), aug)
Example #4
0
    def test_forward_and_inverse(self, random_apply, return_transform, device, dtype):
        inp = torch.randn(1, 3, 1000, 500, device=device, dtype=dtype)
        bbox = torch.tensor([[[355, 10], [660, 10], [660, 250], [355, 250]]], device=device, dtype=dtype)
        keypoints = torch.tensor([[[465, 115], [545, 116]]], device=device, dtype=dtype)
        mask = bbox_to_mask(
            torch.tensor([[[155, 0], [900, 0], [900, 400], [155, 400]]], device=device, dtype=dtype), 1000, 500
        )[:, None].float()
        aug = K.AugmentationSequential(
            K.ImageSequential(
                K.ColorJitter(0.1, 0.1, 0.1, 0.1, p=1.0),
                K.RandomAffine(360, p=1.0, return_transform=True),
            ),
            K.ColorJitter(0.1, 0.1, 0.1, 0.1, p=1.0),
            K.RandomAffine(360, p=1.0),
            data_keys=["input", "mask", "bbox", "keypoints"],
            random_apply=random_apply,
            return_transform=return_transform,
        )
        out = aug(inp, mask, bbox, keypoints)
        if return_transform and isinstance(out, (tuple, list)):
            assert out[0][0].shape == inp.shape
        else:
            assert out[0].shape == inp.shape
        assert out[1].shape == mask.shape
        assert out[2].shape == bbox.shape
        assert out[3].shape == keypoints.shape
        reproducibility_test((inp, mask, bbox, keypoints), aug)

        out_inv = aug.inverse(*out)
        assert out_inv[0].shape == inp.shape
        assert out_inv[1].shape == mask.shape
        assert out_inv[2].shape == bbox.shape
        assert out_inv[3].shape == keypoints.shape
Example #5
0
 def test_forward(self, return_transform, random_apply, device, dtype):
     inp = torch.randn(1, 3, 30, 30, device=device, dtype=dtype)
     aug = K.ImageSequential(
         K.ColorJitter(0.1, 0.1, 0.1, 0.1, p=1.0),
         kornia.filters.MedianBlur((3, 3)),
         K.ColorJitter(0.1, 0.1, 0.1, 0.1, p=1.0, return_transform=True),
         K.ImageSequential(
             K.ColorJitter(0.1, 0.1, 0.1, 0.1, p=1.0)
         ),
         K.ImageSequential(
             K.RandomAffine(360, p=1.0)
         ),
         K.RandomAffine(360, p=1.0),
         K.RandomMixUp(p=1.0),
         return_transform=return_transform,
         random_apply=random_apply,
     )
     out = aug(inp)
     if aug.return_label:
         out, _ = out
     if isinstance(out, (tuple,)):
         assert out[0].shape == inp.shape
     else:
         assert out.shape == inp.shape
     aug.inverse(inp)
     reproducibility_test(inp, aug)
Example #6
0
def kornia_list(MAGN: int = 4):
    """
    Returns standard list of kornia transforms, each with magnitude `MAGN`.
    
    Args:
        MAGN (int): Magnitude of each transform in the returned list.
    """
    transform_list = [
        # SPATIAL
        K.RandomHorizontalFlip(p=1),
        K.RandomRotation(degrees=90., p=1),
        K.RandomAffine(degrees=MAGN * 5.,
                       shear=MAGN / 5,
                       translate=MAGN / 20,
                       p=1),
        K.RandomPerspective(distortion_scale=MAGN / 25, p=1),

        # PIXEL-LEVEL
        K.ColorJitter(brightness=MAGN / 30, p=1),  # brightness
        K.ColorJitter(saturation=MAGN / 30, p=1),  # saturation
        K.ColorJitter(contrast=MAGN / 30, p=1),  # contrast
        K.ColorJitter(hue=MAGN / 30, p=1),  # hue
        K.ColorJitter(p=0),  # identity
        K.RandomMotionBlur(kernel_size=2 * (MAGN // 3) + 1,
                           angle=MAGN,
                           direction=1.,
                           p=1),
        K.RandomErasing(scale=(MAGN / 100, MAGN / 50),
                        ratio=(MAGN / 20, MAGN),
                        p=1),
    ]
    return transform_list
Example #7
0
 def test_forward(self, return_transform, device, dtype):
     inp = torch.randn(1, 3, 30, 30, device=device, dtype=dtype)
     aug = K.ImageSequential(
         K.ColorJitter(0.1, 0.1, 0.1, 0.1, p=1.0),
         kornia.filters.MedianBlur((3, 3)),
         K.ColorJitter(0.1, 0.1, 0.1, 0.1, p=1.0, return_transform=True),
         K.RandomAffine(360, p=1.0),
         return_transform=return_transform,
     )
     out = aug(inp)
     if isinstance(out, (tuple, )):
         assert out[0].shape == inp.shape
     else:
         assert out.shape == inp.shape
Example #8
0
    def test_intensity_only(self):
        seq = K.PatchSequential(
            K.ImageSequential(
                K.ColorJitter(0.1, 0.1, 0.1, 0.1, p=0.5),
                K.RandomPerspective(0.2, p=0.5),
                K.RandomSolarize(0.1, 0.1, p=0.5),
            ),
            K.ColorJitter(0.1, 0.1, 0.1, 0.1),
            K.ImageSequential(
                K.ColorJitter(0.1, 0.1, 0.1, 0.1, p=0.5),
                K.RandomPerspective(0.2, p=0.5),
                K.RandomSolarize(0.1, 0.1, p=0.5),
            ),
            K.ColorJitter(0.1, 0.1, 0.1, 0.1),
            grid_size=(2, 2),
        )
        assert not seq.is_intensity_only()

        seq = K.PatchSequential(
            K.ImageSequential(K.ColorJitter(0.1, 0.1, 0.1, 0.1, p=0.5)),
            K.ColorJitter(0.1, 0.1, 0.1, 0.1),
            K.ColorJitter(0.1, 0.1, 0.1, 0.1, p=0.5),
            K.ColorJitter(0.1, 0.1, 0.1, 0.1),
            grid_size=(2, 2),
        )
        assert seq.is_intensity_only()
Example #9
0
    def __init__(self,
                s_color=0.1, 
                p_color=0.8, 
                p_flip=0.5,
                p_gray=0.2, 
                p_blur=0.5, 
                kernel_min=0.1, 
                kernel_max=2.) -> None:
        super(KorniaAugmentationPipeline, self).__init__()
        
        T_hflip = K.RandomHorizontalFlip(p=p_flip) 
        T_gray = K.RandomGrayscale(p=p_gray)
        T_color = K.ColorJitter(p_color, 0.8*s_color, 0.8*s_color, 0.8*s_color, 0.2*s_color)

        radius = kernel_max*2  # https://dsp.stackexchange.com/questions/10057/gaussian-blur-standard-deviation-radius-and-kernel-size
        kernel_size = int(radius*2 + 1) # needs to be odd.
        kernel_size = (kernel_size, kernel_size)
        T_blur = K.GaussianBlur(kernel_size=kernel_size, sigma=(kernel_min, kernel_max), p=p_blur)
        #T_blur = KorniaRandomGaussianBlur(kernel_size=kernel_size, sigma=(kernel_min, kernel_max), p=p_blur)

        self.transform = nn.Sequential(
            T_hflip,
            T_color,
            T_gray,
            T_blur
        )
Example #10
0
def get_frame_aug(frame_aug, patch_size):
    tt = []

    if 'cj' in frame_aug:
        _cj = 0.1
        tt += [
            #K.RandomGrayscale(p=0.2),
            K.ColorJitter(_cj, _cj, _cj, 0),
        ]

    if 'flip' in frame_aug:
        tt += [
            K.RandomHorizontalFlip(same_on_batch=True),
        ]

    tt += [kornia.color.Normalize(mean=IMG_MEAN, std=IMG_STD)]
    transform = nn.Sequential(*tt)
    print('Frame augs:', transform, frame_aug)

    if 'grid' in frame_aug:
        aug = patch_grid(x,
                         transform=transform,
                         shape=patch_size,
                         stride=patch_size // 2)
    else:
        aug = transform

    return aug
Example #11
0
    def test_inverse_and_forward_return_transform(self, device, dtype):
        inp = torch.randn(1, 3, 1000, 500, device=device, dtype=dtype)
        bbox = torch.tensor([[[355, 10], [660, 10], [660, 250], [355, 250]]],
                            device=device,
                            dtype=dtype)
        keypoints = torch.tensor([[[465, 115], [545, 116]]],
                                 device=device,
                                 dtype=dtype)
        mask = bbox_to_mask(
            torch.tensor([[[155, 0], [900, 0], [900, 400], [155, 400]]],
                         device=device,
                         dtype=dtype), 1000, 500)[:, None].float()
        aug = K.AugmentationSequential(
            K.ColorJitter(0.1, 0.1, 0.1, 0.1, p=1.0, return_transform=True),
            K.RandomAffine(360, p=1.0, return_transform=True),
            data_keys=["input", "mask", "bbox", "keypoints"],
        )

        out_inv = aug.inverse(inp, mask, bbox, keypoints)
        assert out_inv[0].shape == inp.shape
        assert out_inv[1].shape == mask.shape
        assert out_inv[2].shape == bbox.shape
        assert out_inv[3].shape == keypoints.shape

        out = aug(inp, mask, bbox, keypoints)
        assert out[0][0].shape == inp.shape
        assert out[1].shape == mask.shape
        assert out[2].shape == bbox.shape
        assert out[3].shape == keypoints.shape
Example #12
0
    def test_individual_forward_and_inverse(self, device, dtype):
        inp = torch.randn(1, 3, 1000, 500, device=device, dtype=dtype)
        bbox = torch.tensor([[[355, 10], [660, 10], [660, 250], [355, 250]]], device=device, dtype=dtype)
        keypoints = torch.tensor([[[465, 115], [545, 116]]], device=device, dtype=dtype)
        mask = bbox_to_mask(
            torch.tensor([[[155, 0], [900, 0], [900, 400], [155, 400]]], device=device, dtype=dtype), 1000, 500
        )[:, None].float()

        aug = K.AugmentationSequential(
            K.ImageSequential(
                K.ColorJitter(0.1, 0.1, 0.1, 0.1, p=1.0),
                K.RandomAffine(360, p=1.0, return_transform=True),
            ),
            K.RandomAffine(360, p=1.0, return_transform=False),
            data_keys=['input', 'mask', 'bbox', 'keypoints']
        )
        reproducibility_test((inp, mask, bbox, keypoints), aug)

        aug = K.AugmentationSequential(K.RandomAffine(360, p=1.0, return_transform=True))
        assert aug(inp, data_keys=['input'])[0].shape == inp.shape
        aug = K.AugmentationSequential(K.RandomAffine(360, p=1.0, return_transform=False))
        assert aug(inp, data_keys=['input']).shape == inp.shape
        assert aug(mask, data_keys=['mask'], params=aug._params).shape == mask.shape

        assert aug.inverse(inp, data_keys=['input']).shape == inp.shape
        assert aug.inverse(bbox, data_keys=['bbox']).shape == bbox.shape
        assert aug.inverse(keypoints, data_keys=['keypoints']).shape == keypoints.shape
        assert aug.inverse(mask, data_keys=['mask']).shape == mask.shape
    def __init__(self, net, image_size, hidden_layer=-2, projection_size=256, projection_hidden_size=4096, augment_fn=None, moving_average_decay=0.99):
        super().__init__()

        # default SimCLR augmentation

        DEFAULT_AUG = nn.Sequential(
            RandomApply(augs.ColorJitter(0.8, 0.8, 0.8, 0.2), p=0.8),
            augs.RandomGrayscale(p=0.2),
            augs.RandomHorizontalFlip(),
            RandomApply(filters.GaussianBlur2d((3, 3), (1.5, 1.5)), p=0.1),
            augs.RandomResizedCrop((image_size, image_size)),
            color.Normalize(mean=torch.tensor(
                [0.485, 0.456, 0.406]), std=torch.tensor([0.229, 0.224, 0.225]))
        )

        self.augment = default(augment_fn, DEFAULT_AUG)

        self.online_encoder = NetWrapper(net, projection_size, projection_hidden_size, layer=hidden_layer)
        self.target_encoder = None
        self.target_ema_updater = EMA(moving_average_decay)

        self.online_predictor = MultiLayerPerceptron(projection_size, projection_size, projection_hidden_size)

        # send a mock image tensor to instantiate singleton parameters
        self.forward(torch.randn(2, 3, image_size, image_size))
Example #14
0
 def test_jit(self, device, dtype):
     B, C, D, H, W = 2, 3, 5, 4, 4
     img = torch.ones(B, C, D, H, W, device=device, dtype=dtype)
     op = K.VideoSequential(K.ColorJitter(0.1, 0.1, 0.1, 0.1),
                            same_on_frame=True)
     op_jit = torch.jit.script(op)
     assert_close(op(img), op_jit(img))
Example #15
0
def generate_kornia_transforms(image_size=224, resize=256, mean=[], std=[], include_jitter=False):
    mean=torch.tensor(mean) if mean else torch.tensor([0.5, 0.5, 0.5])
    std=torch.tensor(std) if std else torch.tensor([0.1, 0.1, 0.1])
    if torch.cuda.is_available():
        mean=mean.cuda()
        std=std.cuda()
    train_transforms=[G.Resize((resize,resize))]
    if include_jitter:
        train_transforms.append(K.ColorJitter(brightness=0.4, contrast=0.4,
                                   saturation=0.4, hue=0.1))
    train_transforms.extend([K.RandomHorizontalFlip(p=0.5),
           K.RandomVerticalFlip(p=0.5),
           K.RandomRotation(90),
           K.RandomResizedCrop((image_size,image_size)),
           K.Normalize(mean,std)
           ])
    val_transforms=[G.Resize((resize,resize)),
           K.CenterCrop((image_size,image_size)),
           K.Normalize(mean,std)
           ]
    transforms=dict(train=nn.Sequential(*train_transforms),
                val=nn.Sequential(*val_transforms))
    if torch.cuda.is_available():
        for k in transforms:
            transforms[k]=transforms[k].cuda()
    return transforms
Example #16
0
    def __init__(self, opt):
        super().__init__()
        self.wrapped_dataset = create_dataset(opt['dataset'])
        self.cropped_img_size = opt['crop_size']
        self.key1 = opt_get(opt, ['key1'], 'hq')
        self.key2 = opt_get(opt, ['key2'], 'lq')
        for_sr = opt_get(
            opt, ['for_sr'],
            False)  # When set, color alterations and blurs are disabled.

        augmentations = [ \
            augs.RandomHorizontalFlip(),
            augs.RandomResizedCrop((self.cropped_img_size, self.cropped_img_size))]
        if not for_sr:
            augmentations.extend([
                RandomApply(augs.ColorJitter(0.8, 0.8, 0.8, 0.2), p=0.8),
                augs.RandomGrayscale(p=0.2),
                RandomApply(filters.GaussianBlur2d((3, 3), (1.5, 1.5)), p=0.1)
            ])
        if opt['normalize']:
            # The paper calls for normalization. Most datasets/models in this repo don't use this.
            # Recommend setting true if you want to train exactly like the paper.
            augmentations.append(
                augs.Normalize(mean=torch.tensor([0.485, 0.456, 0.406]),
                               std=torch.tensor([0.229, 0.224, 0.225])))
        self.aug = nn.Sequential(*augmentations)
Example #17
0
 def test_exception(self, shape, data_format, device, dtype):
     aug_list = K.VideoSequential(K.ColorJitter(0.1, 0.1, 0.1, 0.1),
                                  data_format=data_format,
                                  same_on_frame=True)
     with pytest.raises(AssertionError):
         img = torch.randn(*shape, device=device, dtype=dtype)
         aug_list(img)
Example #18
0
    def __init__(self, net, image_size=32,
                layer_name_list = [-2],
                 projection_size = 256,
                 projection_hidden_size = 4096,
                 augment_fn = None,
                 moving_average_decay = 0.99,
                 device_ = 'cuda',
                 number_of_classes = 10,
                 mean_data = torch.tensor([0.485, 0.456, 0.406]),
                 std_data = torch.tensor([0.229, 0.224, 0.225])):
        super().__init__()


        DEFAULT_AUG = nn.Sequential(
            RandomApply(augs.ColorJitter(0.8, 0.8, 0.8, 0.2), p=0.8),
            augs.RandomGrayscale(p=0.2),
            augs.RandomHorizontalFlip(),
            RandomApply(filters.GaussianBlur2d((3, 3), (1.5, 1.5)), p=0.1),
            augs.RandomResizedCrop((image_size, image_size)),
            augs.Normalize(mean=mean_data, std=std_data)
        )

        self.augment = default(augment_fn, DEFAULT_AUG)
        self.device = device_

        self.online_encoder = NetWrapper(net, projection_size, projection_hidden_size, layer_name_list=layer_name_list).to(self.device)
        self.target_encoder = None
        self.target_ema_updater = EMA(moving_average_decay)

        self.online_predictor = MLP(projection_size, projection_size, projection_hidden_size).to(self.device)
        self.online_predictor1 = MLP(projection_size, projection_size, 512).to(self.device)
        self.online_predictor2 = MLP(projection_size, projection_size, 512).to(self.device)

        # send a mock image tensor to instantiate singleton parameters
        self.forward(torch.randn(2, 3, image_size, image_size).to(self.device))
Example #19
0
def get_train_augmentation_transforms(
    p_flip_vertical=0.5,
    p_flip_horizontal=0.5,
    max_rotation=10.0,
    max_zoom=1.1,
    max_warp=0.2,
    p_affine=0.75,
    max_lighting=0.2,
    p_lighting=0.75,
):
    """
    Build a set of pytorch image Transforms to use during training:

        p_flip_vertical: probability of a vertical flip
        p_flip_horizontal: probability of a horizontal flip
        max_rotation: maximum rotation angle in degrees
        max_zoom: maximum zoom level
        max_warp: perspective warping scale (from 0.0 to 1.0)
        p_affine: probility of rotation, zoom and perspective warping
        max_lighting: maximum scaling of brightness and contrast
    """
    return [
        kaug.RandomVerticalFlip(p=p_flip_vertical),
        kaug.RandomHorizontalFlip(p=p_flip_horizontal),
        kaug.RandomAffine(p=p_affine, degrees=max_rotation, scale=(1.0, max_zoom)),
        kaug.RandomPerspective(p=p_affine, distortion_scale=max_warp),
        kaug.ColorJitter(p=p_lighting, brightness=max_lighting, contrast=max_lighting),
        # TODO: the kornia transforms work on batches and so by default they
        # add a batch dimension. Until I work out how to apply transform by
        # batches (and only while training) I will just keep this here to
        # remove the batch dimension again
        GetItemTransform(),
    ]
Example #20
0
 def test_construction(self, same_on_batch, return_transform, keepdim):
     K.ImageSequential(
         K.ColorJitter(0.1, 0.1, 0.1, 0.1, p=1.0),
         K.RandomAffine(360, p=1.0),
         same_on_batch=same_on_batch,
         return_transform=return_transform,
         keepdim=keepdim,
     )
Example #21
0
def augm(t, arg, device):
    t = K.ColorJitter(arg.brightness, arg.contrast, arg.saturation, arg.hue)(t)
    random_tensor = 1. + torch.rand(size=[1], dtype=t.dtype, device=device)
    binary_tensor = torch.floor(random_tensor)
    random_tensor, binary_tensor = random_tensor, binary_tensor

    augmented = binary_tensor * t + (1 - binary_tensor) * (1 - t)
    return augmented
Example #22
0
 def test_jit(self, device, dtype):
     B, C, H, W = 2, 3, 4, 4
     img = torch.ones(B, C, H, W, device=device, dtype=dtype)
     op = K.AugmentationSequential(K.ColorJitter(0.1, 0.1, 0.1, 0.1, p=1.0),
                                   K.RandomAffine(360, p=1.0),
                                   same_on_batch=True)
     op_jit = torch.jit.script(op)
     assert_close(op(img), op_jit(img))
Example #23
0
 def test_exception(self, error_param):
     with pytest.raises(Exception):  # AssertError and NotImplementedError
         K.PatchSequential(
             K.ImageSequential(
                 K.ColorJitter(0.1, 0.1, 0.1, 0.1, p=0.5),
                 K.RandomPerspective(0.2, p=0.5),
                 K.RandomSolarize(0.1, 0.1, p=0.5),
             ),
             K.ColorJitter(0.1, 0.1, 0.1, 0.1),
             K.ImageSequential(
                 K.ColorJitter(0.1, 0.1, 0.1, 0.1, p=0.5),
                 K.RandomPerspective(0.2, p=0.5),
                 K.RandomSolarize(0.1, 0.1, p=0.5),
             ),
             K.ColorJitter(0.1, 0.1, 0.1, 0.1),
             **error_param,
         )
Example #24
0
def augm(t, arg):
    t = K.ColorJitter(arg.brightness_var, arg.contrast_var, arg.saturation_var, arg.hue_var)(t)
    random_tensor = 1. - arg.p_flip + torch.rand(size=[1], dtype=t.dtype)
    binary_tensor = torch.floor(random_tensor)
    random_tensor, binary_tensor = random_tensor.to(arg.device), binary_tensor.to(arg.device)

    augmented = binary_tensor * t + (1 - binary_tensor) * (1 - t)
    return augmented
    def __init__(
        self,
        net,
        image_size,
        hidden_layer=-2,
        project_hidden=True,
        project_dim=128,
        augment_both=True,
        use_nt_xent_loss=False,
        augment_fn=None,
        use_bilinear=False,
        use_momentum=False,
        momentum_value=0.999,
        key_encoder=None,
        temperature=0.1,
        fp16=False,
    ):
        super().__init__()
        self.net = OutputHiddenLayer(net, layer=hidden_layer)

        DEFAULT_AUG = nn.Sequential(
            RandomApply(augs.ColorJitter(0.8, 0.8, 0.8, 0.2), p=0.8),
            augs.RandomGrayscale(p=0.2),
            augs.RandomHorizontalFlip(),
            RandomApply(filters.GaussianBlur2d((3, 3), (1.5, 1.5)), p=0.1),
            augs.RandomResizedCrop((image_size, image_size)),
        )

        self.augment = default(augment_fn, DEFAULT_AUG)

        self.augment_both = augment_both

        self.temperature = temperature
        self.use_nt_xent_loss = use_nt_xent_loss

        self.project_hidden = project_hidden
        self.projection = None
        self.project_dim = project_dim

        self.use_bilinear = use_bilinear
        self.bilinear_w = None

        self.use_momentum = use_momentum
        self.ema_updater = EMA(momentum_value)
        self.key_encoder = key_encoder

        # for accumulating queries and keys across calls
        self.queries = None
        self.keys = None

        self.fp16 = fp16

        # send a mock image tensor to instantiate parameters
        init = torch.randn(1, 3, image_size, image_size, device="cuda")
        if self.fp16:
            init = init.half()
        self.forward(init)
Example #26
0
 def __init__(self, im_size=224, device=torch.device('cuda:0')):
     super().__init__()
     self.mean = torch.tensor([0.485, 0.456, 0.406]).to(device)
     self.std = torch.tensor([0.229, 0.224, 0.225]).to(device)
     self.aug = torch.nn.Sequential(
         kornia.geometry.transform.Resize(int(im_size * 1.2)),
         Kaug.RandomCrop((im_size, im_size), padding=8),
         Kaug.ColorJitter(brightness=0.4, contrast=0.4, saturation=0.4),
         Kaug.RandomHorizontalFlip(),
         Kaug.Normalize(mean=self.mean, std=self.std))
Example #27
0
 def __init__(self, cutn):
     super().__init__()
     self.cutn = cutn
     self.augs = nn.Sequential(
         K.RandomHorizontalFlip(p=0.5),
         K.ColorJitter(hue=0.01, saturation=0.01, p=0.7),
         #K.RandomSolarize(0.01, 0.01, p=0.7),
         K.RandomSharpness(0.3, p=0.4),
         K.RandomAffine(degrees=30, translate=0.1, p=0.8, padding_mode='border'),
         K.RandomPerspective(0.2, p=0.4), )
     self.noise_fac = 0.1
Example #28
0
 def __init__(self, opt):
     super().__init__()
     self.wrapped_dataset = create_dataset(opt['dataset'])
     augmentations = [
         RandomApply(augs.ColorJitter(0.8, 0.8, 0.8, 0.2), p=0.8),
         augs.RandomGrayscale(p=0.2),
         RandomApply(filters.GaussianBlur2d((3, 3), (1.5, 1.5)), p=0.1)
     ]
     self.aug = nn.Sequential(*augmentations)
     self.rrc = RandomSharedRegionCrop(opt['latent_multiple'],
                                       opt_get(opt, ['jitter_range'], 0))
    def __init__(
        self,
        net,
        image_size,
        hidden_layer = -2,
        projection_size = 256,
        projection_hidden_size = 2048,
        augment_fn = None,
        augment_fn2 = None,
        moving_average_decay = 0.99,
        ppm_num_layers = 1,
        ppm_gamma = 2,
        distance_thres = 0.1, # the paper uses 0.7, but that leads to nearly all positive hits. need clarification on how the coordinates are normalized before distance calculation.
        similarity_temperature = 0.3,
        alpha = 1.
    ):
        super().__init__()

        # default SimCLR augmentation

        DEFAULT_AUG = nn.Sequential(
            RandomApply(augs.ColorJitter(0.8, 0.8, 0.8, 0.2), p=0.8),
            augs.RandomGrayscale(p=0.2),
            augs.RandomHorizontalFlip(),
            RandomApply(filters.GaussianBlur2d((3, 3), (1.5, 1.5)), p=0.1),
            augs.RandomResizedCrop((image_size, image_size)),
            augs.Normalize(mean=torch.tensor([0.485, 0.456, 0.406]), std=torch.tensor([0.229, 0.224, 0.225]))
        )

        self.augment1 = default(augment_fn, DEFAULT_AUG)
        self.augment2 = default(augment_fn2, self.augment1)

        self.online_encoder = NetWrapper(net, projection_size, projection_hidden_size, layer=hidden_layer)

        self.target_encoder = None
        self.target_ema_updater = EMA(moving_average_decay)

        self.distance_thres = distance_thres
        self.similarity_temperature = similarity_temperature
        self.alpha = alpha

        self.propagate_pixels = PPM(
            chan = projection_size,
            num_layers = ppm_num_layers,
            gamma = ppm_gamma
        )

        # get device of network and make wrapper same device
        device = get_module_device(net)
        self.to(device)

        # send a mock image tensor to instantiate singleton parameters
        self.forward(torch.randn(2, 3, image_size, image_size, device=device))
Example #30
0
def default_aug(image_size: Tuple[int, int] = (360, 360)) -> nn.Module:
    return nn.Sequential(
        aug.ColorJitter(contrast=0.1, brightness=0.1, saturation=0.1, p=0.8),
        aug.RandomVerticalFlip(),
        aug.RandomHorizontalFlip(),
        RandomApply(filters.GaussianBlur2d((3, 3), (0.5, 0.5)), p=0.1),
        aug.RandomResizedCrop(size=image_size, scale=(0.5, 1)),
        aug.Normalize(
            mean=torch.tensor([0.485, 0.456, 0.406]),
            std=torch.tensor([0.229, 0.224, 0.225]),
        ),
    )