Esempio n. 1
0
 def test_mat34_transform(self):
   i = [[1.0, 0.0, 0.0, 0.0], [0.0, 1.0, 0.0, 0.0], [0.0, 0.0, 1.0, 0.0]]
   m = [[1.0, 2.0, 3.0, 4.0], [2.0, 3.0, 4.0, 5.0], [3.0, 4.0, 5.0, 6.0]]
   # A few vectors and their images under m:
   zero = [0.0, 0.0, 0.0]
   mzero = [4.0, 5.0, 6.0]
   x = [1.0, 0.0, 0.0]
   mx = [5.0, 7.0, 9.0]
   y = [0.0, 1.0, 0.0]
   my = [6.0, 8.0, 10.0]
   z = [0.0, 0.0, 1.0]
   mz = [7.0, 9.0, 11.0]
   foo = [-10.0, 2.0, -3.0]
   mfoo = [-11.0, -21.0, -31.0]
   # Four vectors
   input_1 = tf.constant([zero, x, y, z, foo])
   identity_1 = geometry.mat34_transform(tf.constant(i), input_1)
   result_1 = geometry.mat34_transform(tf.constant(m), input_1)
   self.assertAllEqual([zero, x, y, z, foo], identity_1)
   self.assertAllEqual([mzero, mx, my, mz, mfoo], result_1)
   # A 2x2 matrix of vectors
   input_2 = tf.constant([[x, y], [z, foo]])
   identity_2 = geometry.mat34_transform(tf.constant(i), input_2)
   result_2 = geometry.mat34_transform(tf.constant(m), input_2)
   self.assertAllEqual([[x, y], [z, foo]], identity_2)
   self.assertAllEqual([[mx, my], [mz, mfoo]], result_2)
   # Batched transforms (2 batches of 3 vectors).
   matrices = tf.constant([i, m])
   input_3 = tf.constant([[zero, x, y], [z, foo, foo]])
   result_3 = geometry.mat34_transform(matrices, input_3)
   self.assertAllEqual([[zero, x, y], [mz, mfoo, mfoo]], result_3)
Esempio n. 2
0
 def test_random_homography(self):
     # Test homographies as follows:
     # 1. generate three random points
     # 2. generate two random camera poses (source, target) and intrinsics
     # 3. compute the plane containing the three points
     # 4. project each point using each camera, to get texture coordinates
     # 5. generate the homography for this plane between the two cameras
     # 6. apply the homography to the target texture coords and we should get
     #    the source texture coords.
     # We do this for (a batch of) 1000 different random homographies.
     batch = 1000
     tf.random.set_seed(2345)
     points = tf.random.uniform([batch, 3, 3], minval=-100, maxval=100)
     source_pose, source_intrinsics = _random_camera(batch)
     target_pose, target_intrinsics = _random_camera(batch)
     source_points = geometry.mat34_transform(source_pose, points)
     target_points = geometry.mat34_transform(target_pose, points)
     # Compute the plane equation in source camera space.
     p0, p1, p2 = tf.unstack(source_points, axis=-2)
     normal = tf.linalg.cross(p1 - p0, p2 - p0)
     offset = -tf.math.reduce_sum(normal * p0, axis=-1, keepdims=True)
     plane = tf.concat([normal, offset], axis=-1)
     # Now we're ready for the homography.
     homography = geometry.inverse_homography(source_pose,
                                              source_intrinsics,
                                              target_pose,
                                              target_intrinsics, plane)
     source_coords = geometry.camera_to_texture_coordinates(
         source_points, source_intrinsics[Ellipsis, tf.newaxis, :])
     target_coords = geometry.camera_to_texture_coordinates(
         target_points, target_intrinsics[Ellipsis, tf.newaxis, :])
     # Apply-homography expects a 2D grid of coords, so add a dimension:
     source_coords = source_coords[Ellipsis, tf.newaxis, :]
     target_coords = target_coords[Ellipsis, tf.newaxis, :]
     result = geometry.apply_homography(homography, target_coords)
     # Every now and then we get a point very close to a camera plane, which
     # means accuracy will be lower and the test can fail. So we'll zero-out
     # all those points.
     source_bad = tf.abs(source_points[Ellipsis, -1]) < 1e-1
     target_bad = tf.abs(target_points[Ellipsis, -1]) < 1e-1
     valid = 1.0 - tf.cast(tf.math.logical_or(source_bad, target_bad),
                           tf.float32)
     valid = valid[Ellipsis, tf.newaxis, tf.newaxis]
     self.assertAllClose((valid * source_coords), (valid * result),
                         atol=1e-03,
                         rtol=1e-03)
Esempio n. 3
0
 def test_mat34_transform_planes(self):
     # Some planes...
     planes = tf.convert_to_tensor([[1.0, 2.0, 3.0, 4.0],
                                    [0.5, 0.3, 0.1, 0.0],
                                    [0.0, 1.0, 0.0, 100.0],
                                    [-1.0, 0.0, -1.0, 0.25]])
     # ...and some points
     points = [[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]]
     # (Since we know it's all linear, four planes and three
     # points are enough to span all possibilities!)
     i = [[1.0, 0.0, 0.0, 0.0], [0.0, 1.0, 0.0, 0.0], [0.0, 0.0, 1.0, 0.0]]
     m = [[1.0, 0.0, 0.0, 4.0], [0.0, 0.0, -1.0, 5.0], [0.0, 1.0, 0.0, 6.0]]
     # Identity transform doesn't change the planes:
     input_planes = tf.constant(planes)
     identity_planes = geometry.mat34_transform_planes(
         tf.constant(i), input_planes)
     self.assertAllEqual(identity_planes, planes)
     # Transform and inverse transform is the identity:
     self.assertAllEqual(
         geometry.mat34_transform_planes(
             tf.constant(m),
             geometry.mat34_transform_planes(
                 geometry.mat34_pose_inverse(tf.constant(m)),
                 input_planes)), planes)
     # Dot products between planes and points are preserved (since
     # m doesn't change scale):
     input_points = tf.constant(points)
     dot_products = tf.matmul(geometry.homogenize(input_points),
                              planes,
                              transpose_b=True)
     transformed_planes = geometry.mat34_transform_planes(
         tf.constant(m), input_planes)
     transformed_points = geometry.mat34_transform(tf.constant(m),
                                                   input_points)
     transformed_dot_products = tf.matmul(
         geometry.homogenize(transformed_points),
         transformed_planes,
         transpose_b=True)
     self.assertAllClose(dot_products, transformed_dot_products)