Esempio n. 1
0
    def test_same(self, device, dtype):
        B, C, H, W = 1, 3, 64, 64
        PS = 16
        img = torch.rand(B, C, H, W, device=device, dtype=dtype)
        img_gray = kornia.color.rgb_to_grayscale(img)
        centers = torch.tensor([[H / 3.0, W / 3.0], [2.0 * H / 3.0, W / 2.0]],
                               device=device,
                               dtype=dtype).view(1, 2, 2)
        scales = torch.tensor([(H + W) / 4.0, (H + W) / 8.0],
                              device=device,
                              dtype=dtype).view(1, 2, 1, 1)
        ori = torch.tensor([0.0, 30.0], device=device,
                           dtype=dtype).view(1, 2, 1)
        lafs = kornia.feature.laf_from_center_scale_ori(centers, scales, ori)
        sift = SIFTDescriptor(PS).to(device, dtype)
        descs_test_from_rgb = get_laf_descriptors(img, lafs, sift, PS, True)
        descs_test_from_gray = get_laf_descriptors(img_gray, lafs, sift, PS,
                                                   True)

        patches = extract_patches_from_pyramid(img_gray, lafs, PS)
        B1, N1, CH1, H1, W1 = patches.size()
        # Descriptor accepts standard tensor [B, CH, H, W], while patches are [B, N, CH, H, W] shape
        # So we need to reshape a bit :)
        descs_reference = sift(patches.view(B1 * N1, CH1, H1,
                                            W1)).view(B1, N1, -1)
        assert_close(descs_test_from_rgb, descs_reference)
        assert_close(descs_test_from_gray, descs_reference)
Esempio n. 2
0
 def test_gradcheck(self, device):
     B, C, H, W = 1, 1, 32, 32
     PS = 16
     img = torch.rand(B, C, H, W, device=device)
     img = utils.tensor_to_gradcheck_var(img)  # to var
     local_feature = LocalFeature(ScaleSpaceDetector(2),
                                  LAFDescriptor(SIFTDescriptor(PS),
                                                PS)).to(device, img.dtype)
     assert gradcheck(local_feature,
                      img,
                      eps=1e-4,
                      atol=1e-4,
                      raise_exception=True)
Esempio n. 3
0
    def test_jit(self, device, dtype):
        B, C, H, W = 1, 1, 32, 32
        patches = torch.rand(B, C, H, W, device=device, dtype=dtype)
        patches2x = resize(patches, (48, 48))
        inputs = {"image0": patches, "image1": patches2x}
        model = LocalFeatureMatcher(SIFTDescriptor(32),
                                    DescriptorMatcher('snn',
                                                      0.8)).to(device).eval()
        model_jit = torch.jit.script(model)

        out = model(inputs)
        out_jit = model_jit(inputs)
        for k, v in out.items():
            assert_close(v, out_jit[k])
Esempio n. 4
0
 def test_same(self, device, dtype):
     B, C, H, W = 1, 1, 64, 64
     PS = 16
     img = torch.rand(B, C, H, W, device=device, dtype=dtype)
     det = ScaleSpaceDetector(10)
     desc = SIFTDescriptor(PS)
     local_feature = LocalFeature(det, LAFDescriptor(desc,
                                                     PS)).to(device, dtype)
     lafs, responses, descs = local_feature(img)
     lafs1, responses1 = det(img)
     assert_close(lafs, lafs1)
     assert_close(responses, responses1)
     patches = extract_patches_from_pyramid(img, lafs1, PS)
     B1, N1, CH1, H1, W1 = patches.size()
     # Descriptor accepts standard tensor [B, CH, H, W], while patches are [B, N, CH, H, W] shape
     # So we need to reshape a bit :)
     descs1 = desc(patches.view(B1 * N1, CH1, H1, W1)).view(B1, N1, -1)
     assert_close(descs, descs1)
Esempio n. 5
0
 def test_smoke(self, device, dtype):
     det = ScaleSpaceDetector(10)
     desc = SIFTDescriptor(32)
     local_feature = LocalFeature(det, desc).to(device, dtype)
     assert local_feature is not None