Example #1
0
 def test_inverse(self):
     x = torch.tensor([0.1])
     y = torch.tensor([0.2, 2.0])
     z = torch.tensor([0.3, 3.0])
     t = Scale(x, y, z)
     im = t.inverse()._matrix
     im_2 = t._matrix.inverse()
     im_comp = t.get_matrix().inverse()
     self.assertTrue(torch.allclose(im, im_comp))
     self.assertTrue(torch.allclose(im, im_2))
Example #2
0
 def test_broadcast_compose(self):
     t1 = Scale(0.1, 0.1, 0.1)
     N = 10
     scale_n = torch.tensor([0.3] * N)
     tN = Scale(scale_n)
     t1N = t1.compose(tN)
     self.assertTrue(t1._matrix.shape == (1, 4, 4))
     self.assertTrue(tN._matrix.shape == (N, 4, 4))
     self.assertTrue(t1N.get_matrix().shape == (N, 4, 4))
     t11 = t1.compose(t1)
     self.assertTrue(t11.get_matrix().shape == (1, 4, 4))
Example #3
0
 def test_broadcast_compose_fail(self):
     # Cannot compose two transforms which have batch dimensions N and M
     # other than the case where either N or M is 1
     N = 10
     M = 20
     scale_n = torch.tensor([0.3] * N)
     tN = Scale(scale_n)
     x = torch.tensor([0.2] * M)
     y = torch.tensor([0.3] * M)
     z = torch.tensor([0.4] * M)
     tM = Translate(x, y, z)
     with self.assertRaises(ValueError):
         t = tN.compose(tM)
         t.get_matrix()
Example #4
0
 def test_three_mixed_broadcast_grad(self):
     x = 0.1
     y = torch.tensor(0.2, requires_grad=True)
     z = torch.tensor([0.3, 3.0], requires_grad=True)
     t = Scale(x, y, z)
     matrix = torch.tensor(
         [
             [
                 [0.1, 0.0, 0.0, 0.0],
                 [0.0, 0.2, 0.0, 0.0],
                 [0.0, 0.0, 0.3, 0.0],
                 [0.0, 0.0, 0.0, 1.0],
             ],
             [
                 [0.1, 0.0, 0.0, 0.0],
                 [0.0, 0.2, 0.0, 0.0],
                 [0.0, 0.0, 3.0, 0.0],
                 [0.0, 0.0, 0.0, 1.0],
             ],
         ],
         dtype=torch.float32,
     )
     self.assertTrue(torch.allclose(t._matrix, matrix))
     t._matrix.sum().backward()
     self.assertTrue(hasattr(y, "grad"))
     self.assertTrue(hasattr(z, "grad"))
     y_grad = torch.tensor(2.0)
     z_grad = torch.tensor([1.0, 1.0])
     self.assertTrue(torch.allclose(y.grad, y_grad))
     self.assertTrue(torch.allclose(z.grad, z_grad))
Example #5
0
    def test_inverse(self, batch_size=5):
        device = torch.device("cuda:0")

        # generate a random chain of transforms
        for _ in range(10):  # 10 different tries

            # list of transform matrices
            ts = []

            for i in range(10):
                choice = float(torch.rand(1))
                if choice <= 1.0 / 3.0:
                    t_ = Translate(
                        torch.randn((batch_size, 3),
                                    dtype=torch.float32,
                                    device=device),
                        device=device,
                    )
                elif choice <= 2.0 / 3.0:
                    t_ = Rotate(
                        so3_exponential_map(
                            torch.randn(
                                (batch_size, 3),
                                dtype=torch.float32,
                                device=device,
                            )),
                        device=device,
                    )
                else:
                    rand_t = torch.randn((batch_size, 3),
                                         dtype=torch.float32,
                                         device=device)
                    rand_t = rand_t.sign() * torch.clamp(rand_t.abs(), 0.2)
                    t_ = Scale(rand_t, device=device)
                ts.append(t_._matrix.clone())

                if i == 0:
                    t = t_
                else:
                    t = t.compose(t_)

            # generate the inverse transformation in several possible ways
            m1 = t.inverse(invert_composed=True).get_matrix()
            m2 = t.inverse(invert_composed=True)._matrix
            m3 = t.inverse(invert_composed=False).get_matrix()
            m4 = t.get_matrix().inverse()

            # compute the inverse explicitly ...
            m5 = torch.eye(4, dtype=torch.float32, device=device)
            m5 = m5[None].repeat(batch_size, 1, 1)
            for t_ in ts:
                m5 = torch.bmm(torch.inverse(t_), m5)

            # assert all same
            for m in (m1, m2, m3, m4):
                self.assertTrue(torch.allclose(m, m5, atol=1e-3))
Example #6
0
 def test_broadcast_transform_normals(self):
     t1 = Scale(0.1, 0.1, 0.1)
     N = 10
     P = 7
     M = 20
     x = torch.tensor([0.2] * N)
     y = torch.tensor([0.3] * N)
     z = torch.tensor([0.4] * N)
     tN = Translate(x, y, z)
     p1 = t1.transform_normals(torch.randn(P, 3))
     self.assertTrue(p1.shape == (P, 3))
     p2 = t1.transform_normals(torch.randn(1, P, 3))
     self.assertTrue(p2.shape == (1, P, 3))
     p3 = t1.transform_normals(torch.randn(M, P, 3))
     self.assertTrue(p3.shape == (M, P, 3))
     p4 = tN.transform_normals(torch.randn(P, 3))
     self.assertTrue(p4.shape == (N, P, 3))
     p5 = tN.transform_normals(torch.randn(1, P, 3))
     self.assertTrue(p5.shape == (N, P, 3))
Example #7
0
 def test_three_mixed_scalar(self):
     t = Scale(torch.tensor(0.1), 0.2, torch.tensor(0.3))
     matrix = torch.tensor(
         [[
             [0.1, 0.0, 0.0, 0.0],
             [0.0, 0.2, 0.0, 0.0],
             [0.0, 0.0, 0.3, 0.0],
             [0.0, 0.0, 0.0, 1.0],
         ]],
         dtype=torch.float32,
     )
     self.assertTrue(torch.allclose(t._matrix, matrix))
Example #8
0
 def test_single_torch_scalar(self):
     t = Scale(torch.tensor(0.1))
     matrix = torch.tensor(
         [[
             [0.1, 0.0, 0.0, 0.0],
             [0.0, 0.1, 0.0, 0.0],
             [0.0, 0.0, 0.1, 0.0],
             [0.0, 0.0, 0.0, 1.0],
         ]],
         dtype=torch.float32,
     )
     self.assertTrue(torch.allclose(t._matrix, matrix))
Example #9
0
 def test_stack(self):
     rotations = random_rotations(3)
     transform3 = Transform3d().rotate(rotations).translate(
         torch.full((3, 3), 0.3))
     transform1 = Scale(37)
     transform4 = transform1.stack(transform3)
     self.assertEqual(len(transform1), 1)
     self.assertEqual(len(transform3), 3)
     self.assertEqual(len(transform4), 4)
     self.assertClose(
         transform4.get_matrix(),
         torch.cat([transform1.get_matrix(),
                    transform3.get_matrix()]),
     )
     points = torch.rand(4, 5, 3)
     new_points_expect = torch.cat([
         transform1.transform_points(points[:1]),
         transform3.transform_points(points[1:]),
     ])
     new_points = transform4.transform_points(points)
     self.assertClose(new_points, new_points_expect)
Example #10
0
 def test_multiple_broadcast_compose(self):
     t1 = Scale(0.1, 0.1, 0.1)
     t2 = Scale(0.2, 0.2, 0.2)
     N = 10
     scale_n = torch.tensor([0.3] * N)
     tN = Scale(scale_n)
     t1N2 = t1.compose(tN.compose(t2))
     composed_mat = t1N2.get_matrix()
     self.assertTrue(composed_mat.shape == (N, 4, 4))
     expected_mat = torch.eye(3, dtype=torch.float32) * 0.3 * 0.2 * 0.1
     self.assertTrue(torch.allclose(composed_mat[0, :3, :3], expected_mat))
Example #11
0
 def test_single_matrix(self):
     xyz = torch.tensor([[0.1, 0.2, 0.3], [1.0, 2.0, 3.0]])
     t = Scale(xyz)
     matrix = torch.tensor(
         [
             [
                 [0.1, 0.0, 0.0, 0.0],
                 [0.0, 0.2, 0.0, 0.0],
                 [0.0, 0.0, 0.3, 0.0],
                 [0.0, 0.0, 0.0, 1.0],
             ],
             [
                 [1.0, 0.0, 0.0, 0.0],
                 [0.0, 2.0, 0.0, 0.0],
                 [0.0, 0.0, 3.0, 0.0],
                 [0.0, 0.0, 0.0, 1.0],
             ],
         ],
         dtype=torch.float32,
     )
     self.assertTrue(torch.allclose(t._matrix, matrix))
Example #12
0
 def test_three_vector_broadcast(self):
     x = torch.tensor([0.1])
     y = torch.tensor([0.2, 2.0])
     z = torch.tensor([0.3, 3.0])
     t = Scale(x, y, z)
     matrix = torch.tensor(
         [
             [
                 [0.1, 0.0, 0.0, 0.0],
                 [0.0, 0.2, 0.0, 0.0],
                 [0.0, 0.0, 0.3, 0.0],
                 [0.0, 0.0, 0.0, 1.0],
             ],
             [
                 [0.1, 0.0, 0.0, 0.0],
                 [0.0, 2.0, 0.0, 0.0],
                 [0.0, 0.0, 3.0, 0.0],
                 [0.0, 0.0, 0.0, 1.0],
             ],
         ],
         dtype=torch.float32,
     )
     self.assertTrue(torch.allclose(t._matrix, matrix))
Example #13
0
 def test_compose_fail(self):
     # Only composing Transform3d objects is possible
     t1 = Scale(0.1, 0.1, 0.1)
     with self.assertRaises(ValueError):
         t1.compose(torch.randn(100))
Example #14
0
 def test_transform_points_fail(self):
     t1 = Scale(0.1, 0.1, 0.1)
     P = 7
     with self.assertRaises(ValueError):
         t1.transform_points(torch.randn(P))