コード例 #1
0
ファイル: car_lib_test.py プロジェクト: galv/lingvo-copy
    def testFarthestPointSamplerPadding(self):
        points = tf.constant([
            [[0, 1, 1], [1, 1, 1], [2, 1, 1], [3, 1, 1], [4, 1, 1], [5, 1, 1]],
            [[0, 2, 1], [1, 2, 1], [2, 2, 1], [3, 2, 1], [4, 2, 1], [5, 2, 1]],
            [[0, 2, 3], [1, 2, 3], [2, 2, 3], [3, 2, 3], [4, 2, 3], [5, 2, 3]],
            [[0, 2, 1], [1, 2, 1], [2, 2, 1], [3, 2, 1], [4, 2, 1], [5, 2, 1]],
        ],
                             dtype=tf.float32)

        padding = tf.constant([[0, 0, 0, 0, 1, 1], [0, 0, 1, 1, 0, 0],
                               [1, 1, 0, 0, 0, 0], [1, 0, 0, 0, 0, 1]],
                              dtype=tf.float32)

        np_expected_selected_idx = np.array(
            [[0, 1, 2, 3], [0, 1, 4, 5], [2, 3, 4, 5], [1, 2, 3, 4]],
            dtype=np.int32)

        num_points = 4
        selected_idx, _ = car_lib.FarthestPointSampler(points, padding,
                                                       num_points)

        with self.session():
            np_selected_idx = self.evaluate(selected_idx)
            np_selected_idx.sort(axis=1)
            self.assertAllEqual(np_selected_idx, np_expected_selected_idx)
コード例 #2
0
ファイル: car_lib_test.py プロジェクト: shadowkun/lingvo
 def testFarthestPointSamplerOnePoint(self):
   points = tf.constant([
       [[1, 1, 1, 1]],
       [[2, 2, 2, 2]],
   ], dtype=tf.float32)
   padding = tf.zeros((2, 1), dtype=tf.float32)
   selected_idx, _ = car_lib.FarthestPointSampler(points, padding, 1)
   with self.session() as sess:
     selected_idx = sess.run(selected_idx)
     self.assertAllEqual(selected_idx, [[0], [0]])
コード例 #3
0
ファイル: car_lib_test.py プロジェクト: shadowkun/lingvo
 def testFarthestPointSamplerInsufficientPoints(self):
   points = tf.constant([
       [[0, 1, 1]],
       [[2, 2, 2]],
   ], dtype=tf.float32)
   padding = tf.zeros((2, 1), dtype=tf.float32)
   with self.session() as sess:
     with self.assertRaisesRegex(tf.errors.InvalidArgumentError,
                                 r'.*Condition x >= y did not hold.*'):
       sampled_idx, closest_idx = car_lib.FarthestPointSampler(
           points, padding, 2)
       sess.run((sampled_idx, closest_idx))
コード例 #4
0
ファイル: car_lib_test.py プロジェクト: galv/lingvo-copy
    def _testPooling3D(self, pooling_fn):
        num_points_in = 100
        num_points_out = 10
        batch_size = 8
        num_features = 32
        points = tf.random.uniform(shape=(batch_size, num_points_in, 3),
                                   minval=-1,
                                   maxval=1,
                                   dtype=tf.float32)

        # Note: This max pooling impl is incorrect if the feature range is negative.
        features = tf.random.uniform(shape=(batch_size, num_points_in,
                                            num_features),
                                     minval=0,
                                     maxval=1,
                                     dtype=tf.float32)
        padding = tf.zeros((batch_size, num_points_in), dtype=tf.float32)

        pooling_idx, closest_idx = car_lib.FarthestPointSampler(
            points, padding, num_points_out)

        pooled_points, pooled_features = pooling_fn(points=points,
                                                    point_features=features,
                                                    pooling_idx=pooling_idx,
                                                    closest_idx=closest_idx)

        with self.session():
            [
                np_points, np_features, np_pooling_idx, np_closest_idx,
                np_pooled_points, np_pooled_features
            ] = self.evaluate([
                points, features, pooling_idx, closest_idx, pooled_points,
                pooled_features
            ])

            for batch_n in range(batch_size):
                # Grab the selected pooling points from our sampler to compare to
                # the output of our pooling.
                expected_pooled_pts = np_points[batch_n,
                                                np_pooling_idx[batch_n, :], :]
                self.assertAllClose(expected_pooled_pts,
                                    np_pooled_points[batch_n, ...])

                np_batch_features = np_features[batch_n, ...]
                for idx in range(num_points_out):
                    in_group = np_closest_idx[batch_n, :] == idx
                    expected_max = np.max(np_batch_features[in_group], axis=0)
                    self.assertAllClose(expected_max,
                                        np_pooled_features[batch_n, idx])
コード例 #5
0
ファイル: car_lib_test.py プロジェクト: shadowkun/lingvo
 def testFarthestPointSamplerGatherPoints(self):
   points = tf.constant([
       [[0, 1, 1], [1, 1, 1], [2, 1, 1], [3, 1, 1], [4, 1, 1], [5, 1, 1]],
       [[0, 2, 1], [1, 2, 1], [2, 2, 1], [3, 2, 1], [4, 2, 1], [5, 2, 1]],
       [[0, 2, 3], [1, 2, 3], [2, 2, 3], [3, 2, 3], [4, 2, 3], [5, 2, 3]],
       [[0, 2, 1], [1, 2, 1], [2, 2, 1], [3, 2, 1], [4, 2, 1], [5, 2, 1]],
   ], dtype=tf.float32)  # pyformat: disable
   padding = tf.zeros((4, 6), dtype=tf.float32)
   n = 4
   num_points = 3
   selected_idx, _ = car_lib.FarthestPointSampler(points, padding, num_points)
   gather_indices = tf.stack([
       tf.tile(tf.expand_dims(tf.range(n), 1), [1, num_points]), selected_idx
   ],
                             axis=2)
   sampled_points = tf.gather_nd(points, gather_indices)
   with self.session() as sess:
     sampled_points = sess.run(sampled_points)
     self.assertEqual(sampled_points.shape, (n, num_points, 3))
コード例 #6
0
ファイル: car_lib_test.py プロジェクト: shadowkun/lingvo
 def testFarthestPointSamplerSeeded(self):
   points = tf.constant([
       [[0, 1, 1], [1, 1, 1], [2, 1, 1], [3, 1, 1], [4, 1, 1], [5, 1, 1]],
       [[0, 2, 1], [1, 2, 1], [2, 2, 1], [3, 2, 1], [4, 2, 1], [5, 2, 1]],
       [[0, 2, 3], [1, 2, 3], [2, 2, 3], [3, 2, 3], [4, 2, 3], [5, 2, 3]],
       [[0, 2, 1], [1, 2, 1], [2, 2, 1], [3, 2, 1], [4, 2, 1], [5, 2, 1]],
   ], dtype=tf.float32)  # pyformat: disable
   padding = tf.zeros((4, 6), dtype=tf.float32)
   selected_idx, closest_idx = car_lib.FarthestPointSampler(
       points, padding, 3, num_seeded_points=2)
   with self.session() as sess:
     selected_idx, closest_idx = sess.run([selected_idx, closest_idx])
     # First two selected points are seeded.
     self.assertTrue(np.all(selected_idx[:, 0] == 0))
     self.assertTrue(np.all(selected_idx[:, 1] == 1))
     # Third point is the last point since it's farthest.
     self.assertTrue(np.all(selected_idx[:, 2] == 5))
     # Closest indices should either be 0, 1 or 2 since we picked 3 points.
     self.assertTrue(
         np.all((closest_idx >= 0) & (closest_idx < 3)),
         msg='Closest index must be among selected indices.')
コード例 #7
0
ファイル: car_lib_test.py プロジェクト: shadowkun/lingvo
  def testFarthestPointSamplerSelectMinMax(self):
    points = tf.constant([
        [[0, 1, 1], [1, 1, 1], [2, 1, 1], [3, 1, 1], [4, 1, 1], [5, 1, 1]],
        [[0, 2, 1], [1, 2, 1], [2, 2, 1], [3, 2, 1], [4, 2, 1], [5, 2, 1]],
        [[0, 2, 3], [1, 2, 3], [2, 2, 3], [3, 2, 3], [4, 2, 3], [5, 2, 3]],
        [[0, 2, 1], [1, 2, 1], [2, 2, 1], [3, 2, 1], [4, 2, 1], [5, 2, 1]],
    ], dtype=tf.float32)  # pyformat: disable
    padding = tf.zeros((4, 6), dtype=tf.float32)
    selected_idx, closest_idx = car_lib.FarthestPointSampler(points, padding, 2)
    with self.session() as sess:
      selected_idx, closest_idx = sess.run([selected_idx, closest_idx])
      for batch_idx in range(4):
        self.assertIn(
            selected_idx[batch_idx, 1], [0, 5],
            msg=('The second selected point must be one of the end '
                 'points corresponding to index 0 or 5.'))

      # Closest indices should either be 0 or 1 since we picked 2 points.
      self.assertTrue(
          np.all((closest_idx >= 0) & (closest_idx < 2)),
          msg='Closest index must be among selected indices.')
コード例 #8
0
ファイル: car_lib_test.py プロジェクト: galv/lingvo-copy
    def testFarthestPointSamplerAllPoints(self):
        points = tf.constant([
            [[0, 1, 1], [1, 1, 1], [2, 1, 1], [3, 1, 1], [4, 1, 1], [5, 1, 1]],
            [[0, 2, 1], [1, 2, 1], [2, 2, 1], [3, 2, 1], [4, 2, 1], [5, 2, 1]],
            [[0, 2, 3], [1, 2, 3], [2, 2, 3], [3, 2, 3], [4, 2, 3], [5, 2, 3]],
            [[0, 2, 1], [1, 2, 1], [2, 2, 1], [3, 2, 1], [4, 2, 1], [5, 2, 1]],
        ],
                             dtype=tf.float32)  # pyformat: disable
        padding = tf.zeros((4, 6), dtype=tf.float32)
        sampled_idx, closest_idx = car_lib.FarthestPointSampler(
            points, padding, 6)
        with self.session():
            sampled_idx, closest_idx = self.evaluate(
                [sampled_idx, closest_idx])
            for batch_n in range(4):
                self.assertSetEqual(set(sampled_idx[batch_n, :]),
                                    set(np.arange(6)),
                                    msg='All points should be selected.')
                for point_idx in range(6):
                    # For each selected point in sampled_idx, we verify that the
                    # closest_idx assigned to that point matches itself. This is done by
                    # finding the location of the point in sampled_idx (which is shuffled
                    # during sampling). The value of the point in closest_idx should match
                    # the index assigned to the point in sampled_idx.
                    expected_closest_idx = None
                    # location refers to the index where the point appears in sampled_idx.
                    # This should be what closest_idx refers to.
                    for location, sample_idx in enumerate(
                            sampled_idx[batch_n, :]):
                        if point_idx == sample_idx:
                            expected_closest_idx = location
                            break

                    self.assertIsNotNone(
                        expected_closest_idx,
                        'Point not found in sampled_idx result.')
                    self.assertEqual(
                        closest_idx[batch_n][point_idx],
                        expected_closest_idx,
                        msg='Closest index should be the point itself.')
コード例 #9
0
    def FProp(self, theta, input_data):
        """Apply projection to inputs.

    Args:
      theta: A NestedMap object containing weights' values of this layer and its
        children layers.
      input_data: A NestedMap object containing 'points', 'features', 'padding'
        Tensors, all of type tf.float32.
        'points': Shape [N, P1, 3]
        'features': Shape [N, P1, F]
        'padding': Shape [N, P1] where 0 indicates real, 1 indicates padded.

    Returns:
      A NestedMap consisting of the following two NestedMaps,
        grouped_points: consists of the grouped points, features and padding.
        query_points: consists of the sampled points and padding.
    """

        p = self.params
        features = input_data.features
        n, p1, c = py_utils.GetShape(features)
        points = py_utils.HasShape(input_data.points, [n, p1, 3])
        padding = py_utils.HasShape(input_data.padding, [n, p1])

        # Sampling
        sampled_idx, _ = car_lib.FarthestPointSampler(
            points, padding, num_sampled_points=p.num_samples)
        query_points = car_lib.MatmulGather(points,
                                            tf.expand_dims(sampled_idx, -1))
        query_points = tf.squeeze(query_points, -2)

        # Grouping
        grouped_idx, grouped_padding = car_lib.NeighborhoodIndices(
            points,
            query_points,
            p.group_size,
            points_padding=padding,
            max_distance=p.ball_radius,
            sample_neighbors_uniformly=p.sample_neighbors_uniformly)
        grouped_points = car_lib.MatmulGather(points, grouped_idx)
        # Normalize the grouped points based on the location of the query point.
        grouped_points -= tf.expand_dims(query_points, -2)
        grouped_features = car_lib.MatmulGather(features, grouped_idx)

        # Get the padding for the query points.
        query_padding = tf.batch_gather(padding, sampled_idx)

        # Verify the shapes of output tensors.
        query_points = py_utils.HasShape(query_points, [n, p.num_samples, 3])
        query_padding = py_utils.HasShape(query_padding, [n, p.num_samples])
        grouped_features = py_utils.HasShape(
            grouped_features, [n, p.num_samples, p.group_size, c])
        grouped_padding = py_utils.HasShape(grouped_padding,
                                            [n, p.num_samples, p.group_size])

        output_grouped_points = py_utils.NestedMap(points=grouped_points,
                                                   features=grouped_features,
                                                   padding=grouped_padding)
        output_query = py_utils.NestedMap(points=query_points,
                                          padding=query_padding)
        output_map = py_utils.NestedMap({
            'grouped_points': output_grouped_points,
            'query_points': output_query
        })
        return output_map