def test_conv_jacobian_points(self, num_points, num_samples, num_features,
                                  batch_size, radius, num_kernel_points,
                                  dimension):
        cell_sizes = np.float32(np.repeat(radius, dimension))
        points, batch_ids = utils._create_random_point_cloud_segmented(
            batch_size, num_points, dimension=dimension)
        features = np.random.rand(num_points, num_features[0])

        point_samples, batch_ids_samples = \
            utils._create_random_point_cloud_segmented(
                batch_size, num_samples, dimension=dimension)

        point_cloud_samples = PointCloud(point_samples, batch_ids_samples)
        point_cloud = PointCloud(points, batch_ids)
        grid = Grid(point_cloud, cell_sizes)
        neighborhood = Neighborhood(grid, cell_sizes, point_cloud_samples)
        neighborhood.compute_pdf()

        conv_layer = KPConv(num_features[0], num_features[1],
                            num_kernel_points)

        def conv_points(points_in):
            point_cloud._points = points_in
            neighborhood._grid._sorted_points = \
                tf.gather(points_in, grid._sorted_indices)

            conv_result = conv_layer(features, point_cloud,
                                     point_cloud_samples, radius, neighborhood)

            return conv_result

        self.assert_jacobian_is_correct_fn(conv_points, [np.float32(points)],
                                           atol=1e-3,
                                           delta=1e-3)
  def test_neighbors_on_3D_meshgrid_without_gridDS(self,
                                                   num_points_cbrt,
                                                   num_points_samples_cbrt,
                                                   radius,
                                                   expected_num_neighbors):
    num_points = num_points_cbrt**3
    num_samples = num_points_samples_cbrt**3

    points = utils._create_uniform_distributed_point_cloud_3D(
        num_points_cbrt, flat=True)
    batch_ids = np.zeros(num_points)
    points_samples = utils._create_uniform_distributed_point_cloud_3D(
        num_points_samples_cbrt, bb_min=1 / (num_points_samples_cbrt + 1),
        flat=True)
    batch_ids_samples = np.zeros(num_samples)
    point_cloud = PointCloud(points, batch_ids)
    point_cloud_samples = PointCloud(points_samples, batch_ids_samples)

    # without grid
    neigh_ranges, _ = find_neighbors_no_grid(
        point_cloud, point_cloud_samples, radius)
    num_neighbors = np.zeros(num_samples)
    num_neighbors[0] = neigh_ranges[0]
    num_neighbors[1:] = neigh_ranges[1:] - neigh_ranges[:-1]
    expected_num_neighbors = \
        np.ones_like(num_neighbors) * expected_num_neighbors
    self.assertAllEqual(num_neighbors, expected_num_neighbors)
    def test_compute_keys_tf(self, num_points, batch_size, scale, radius,
                             dimension):
        radius = np.repeat(radius, dimension)
        points, batch_ids = utils._create_random_point_cloud_segmented(
            batch_size,
            num_points * batch_size,
            dimension=dimension,
            sizes=np.ones(batch_size, dtype=int) * num_points,
            clean_aabb=False)
        point_cloud = PointCloud(points, batch_ids)

        #Compute the number of cells in the grid.
        aabb = point_cloud.get_AABB()
        aabb_sizes = aabb._aabb_max - aabb._aabb_min
        batch_num_cells = tf.cast(tf.math.ceil(aabb_sizes / radius), tf.int32)
        total_num_cells = tf.maximum(tf.reduce_max(batch_num_cells, axis=0), 1)

        keys_tf = compute_keys_tf(point_cloud, total_num_cells, radius)
        aabb_min = aabb._aabb_min.numpy()

        aabb_min_per_point = aabb_min[batch_ids, :]
        cell_ind = np.floor((points - aabb_min_per_point) / radius).astype(int)
        cell_ind = np.minimum(np.maximum(cell_ind, [0] * dimension),
                              total_num_cells)
        cell_multiplier = np.flip(np.cumprod(np.flip(total_num_cells)))
        cell_multiplier = np.concatenate((cell_multiplier, [1]), axis=0)
        keys = batch_ids * cell_multiplier[0] + \
            np.sum(cell_ind * cell_multiplier[1:].reshape([1, -1]), axis=1)
        # check unsorted keys
        self.assertAllEqual(keys_tf, keys)
Beispiel #4
0
    def test_compute_keys_with_sort(self, num_points, batch_size, scale,
                                    radius, dimension):
        radius = np.repeat(radius, dimension)
        points, batch_ids = utils._create_random_point_cloud_segmented(
            batch_size,
            num_points * batch_size,
            dimension=dimension,
            sizes=np.ones(batch_size, dtype=int) * num_points,
            clean_aabb=False)
        point_cloud = PointCloud(points, batch_ids)
        aabb = point_cloud.get_AABB()
        grid = Grid(point_cloud, radius)

        total_num_cells = grid._num_cells.numpy()
        aabb_min = aabb._aabb_min.numpy()

        aabb_min_per_point = aabb_min[batch_ids, :]
        cell_ind = np.floor((points - aabb_min_per_point) / radius).astype(int)
        cell_ind = np.minimum(np.maximum(cell_ind, [0] * dimension),
                              total_num_cells)
        cell_multiplier = np.flip(np.cumprod(np.flip(total_num_cells)))
        cell_multiplier = np.concatenate((cell_multiplier, [1]), axis=0)
        keys = batch_ids * cell_multiplier[0] + \
            np.sum(cell_ind * cell_multiplier[1:].reshape([1, -1]), axis=1)
        # check unsorted keys
        self.assertAllEqual(grid._cur_keys, keys)

        # sort descending
        sorted_keys = np.flip(np.sort(keys))
        # check if the cell keys per point are equal
        self.assertAllEqual(grid._sorted_keys, sorted_keys)
Beispiel #5
0
    def test_neighbors_on_3D_meshgrid(self, num_points_cbrt,
                                      num_points_samples_cbrt, radius,
                                      expected_num_neighbors):
        num_points = num_points_cbrt**3
        num_samples = num_points_samples_cbrt**3

        points = utils._create_uniform_distributed_point_cloud_3D(
            num_points_cbrt, flat=True)
        batch_ids = np.zeros(num_points)
        points_samples = utils._create_uniform_distributed_point_cloud_3D(
            num_points_samples_cbrt,
            bb_min=1 / (num_points_samples_cbrt + 1),
            flat=True)
        batch_ids_samples = np.zeros(num_samples)
        point_cloud = PointCloud(points, batch_ids)
        point_cloud_samples = PointCloud(points_samples, batch_ids_samples)
        radius = np.float32(np.repeat([radius], 3))
        grid = Grid(point_cloud, radius)
        neighborhood = Neighborhood(grid, radius, point_cloud_samples)

        neigh_ranges = neighborhood._samples_neigh_ranges
        num_neighbors = np.zeros(num_samples)
        num_neighbors[0] = neigh_ranges[0]
        num_neighbors[1:] = neigh_ranges[1:] - neigh_ranges[:-1]
        expected_num_neighbors = \
            np.ones_like(num_neighbors) * expected_num_neighbors
        self.assertAllEqual(num_neighbors, expected_num_neighbors)
    def test_convolution(self, num_points, num_samples, num_features,
                         batch_size, radius, num_kernel_points, dimension):
        cell_sizes = np.float32(np.repeat(radius, dimension))
        points, batch_ids = utils._create_random_point_cloud_segmented(
            batch_size, num_points, dimension=dimension)
        features = np.random.rand(num_points, num_features[0])
        point_cloud = PointCloud(points, batch_ids)

        point_samples, batch_ids_samples = \
            utils._create_random_point_cloud_segmented(
                batch_size, num_samples, dimension=dimension)

        point_cloud_samples = PointCloud(point_samples, batch_ids_samples)
        grid = Grid(point_cloud, cell_sizes)
        neighborhood = Neighborhood(grid, cell_sizes, point_cloud_samples)
        # tf
        conv_layer = KPConv(num_features[0], num_features[1],
                            num_kernel_points)
        conv_result_tf = conv_layer(features, point_cloud, point_cloud_samples,
                                    radius, neighborhood)

        # numpy
        neighbor_ids = neighborhood._original_neigh_ids.numpy()
        nb_ranges = neighborhood._samples_neigh_ranges.numpy()
        nb_ranges = np.concatenate(([0], nb_ranges), axis=0)
        kernel_points = conv_layer._kernel_points.numpy()
        sigma = conv_layer._sigma.numpy()

        # extract variables
        weights = conv_layer._weights.numpy()

        features_on_neighbors = features[neighbor_ids[:, 0]]
        # compute distances to kernel points
        point_diff = (points[neighbor_ids[:, 0]] -\
                      point_samples[neighbor_ids[:, 1]])\
            / np.expand_dims(cell_sizes, 0)
        kernel_point_diff = np.expand_dims(point_diff, axis=1) -\
            np.expand_dims(kernel_points, axis=0)
        distances = np.linalg.norm(kernel_point_diff, axis=2)
        # compute linear interpolation weights for features based on distances
        kernel_weights = np.maximum(1 - (distances / sigma), 0)
        weighted_features = np.expand_dims(features_on_neighbors, axis=2) *\
            np.expand_dims(kernel_weights, axis=1)
        # sum over neighbors (integration)
        weighted_features_per_sample = \
            np.zeros([num_samples, num_features[0], num_kernel_points])
        for i in range(num_samples):
            weighted_features_per_sample[i] = \
                np.sum(weighted_features[nb_ranges[i]:nb_ranges[i + 1]],
                       axis=0)
        # convolution with summation over kernel dimension
        conv_result_np = \
            np.matmul(
                weighted_features_per_sample.reshape(
                    -1,
                    num_features[0] * num_kernel_points),
                weights)

        self.assertAllClose(conv_result_tf, conv_result_np, atol=1e-5)
    def test_basis_proj_jacobian(self, num_points, num_samples, num_features,
                                 batch_size, radius, hidden_size, dimension):
        cell_sizes = np.float32(np.repeat(radius, dimension))
        points, batch_ids = utils._create_random_point_cloud_segmented(
            batch_size, num_points, dimension=dimension)
        features = np.random.rand(num_points, num_features[0])
        point_cloud = PointCloud(points, batch_ids)

        point_samples, batch_ids_samples = \
            utils._create_random_point_cloud_segmented(
                batch_size, num_samples, dimension=dimension)

        point_cloud_samples = PointCloud(point_samples, batch_ids_samples)
        grid = Grid(point_cloud, cell_sizes)
        neighborhood = Neighborhood(grid, cell_sizes, point_cloud_samples)
        nb_ids = neighborhood._original_neigh_ids
        # tf
        conv_layer = MCConv(num_features[0], num_features[1], dimension, 1,
                            [hidden_size])

        neigh_point_coords = points[nb_ids[:, 0].numpy()]
        center_point_coords = point_samples[nb_ids[:, 1].numpy()]
        kernel_input = (neigh_point_coords - center_point_coords) / radius

        basis_weights_tf = tf.reshape(conv_layer._weights_tf[0],
                                      [dimension, hidden_size])
        basis_biases_tf = tf.reshape(conv_layer._bias_tf[0], [1, hidden_size])

        basis_neighs = \
            tf.matmul(kernel_input.astype(np.float32), basis_weights_tf) +\
            basis_biases_tf
        basis_neighs = tf.nn.leaky_relu(basis_neighs)

        _, _, counts = tf.unique_with_counts(neighborhood._neighbors[:, 1])
        max_num_nb = tf.reduce_max(counts).numpy()

        with self.subTest(name='features'):

            def basis_proj_features(features_in):
                return basis_proj_tf(basis_neighs, features_in,
                                     neighborhood) / (max_num_nb)

            self.assert_jacobian_is_correct_fn(basis_proj_features,
                                               [np.float32(features)],
                                               atol=1e-4,
                                               delta=1e-3)

        with self.subTest(name='neigh_basis'):

            def basis_proj_basis_neighs(basis_neighs_in):
                return basis_proj_tf(basis_neighs_in, features,
                                     neighborhood) / (max_num_nb)

            self.assert_jacobian_is_correct_fn(basis_proj_basis_neighs,
                                               [np.float32(basis_neighs)],
                                               atol=1e-4,
                                               delta=1e-3)
 def test_exceptions_raised_at_construction(self, num_points, msgs):
     points = np.random.rand(num_points)
     batch_ids = np.zeros(num_points)
     with self.assertRaisesRegex(ValueError, msgs[0]):
         _ = PointCloud(points, batch_ids)
     points = np.random.rand(num_points, 3)
     with self.assertRaisesRegexp(ValueError, msgs[1]):
         _ = PointCloud(points)
     with self.assertRaisesRegexp(AssertionError, msgs[2]):
         _ = PointCloud(points, batch_ids[1:])
    def compute_pdf(points_in):
      point_cloud = PointCloud(points_in, batch_ids, batch_size)
      grid = Grid(point_cloud, cell_sizes)

      point_cloud_samples = PointCloud(samples, samples_batch_ids, batch_size)
      neighborhood = Neighborhood(grid, cell_sizes, point_cloud_samples)
      neighborhood.compute_pdf(bandwidths, KDEMode.constant, normalize=True)
      # account for influence of neighborhood size
      _, _, counts = tf.unique_with_counts(neighborhood._neighbors[:, 1])
      max_num_nb = tf.cast(tf.reduce_max(counts), tf.float32)
      return neighborhood._pdf / max_num_nb
    def test_grid_datastructure(self, num_points, batch_size, scale, radius,
                                dimension):
        radius = np.float32(np.repeat(radius, dimension))
        points, batch_ids = utils._create_random_point_cloud_segmented(
            batch_size,
            num_points * batch_size,
            dimension=dimension,
            sizes=np.ones(batch_size, dtype=int) * num_points,
            clean_aabb=True)
        point_cloud = PointCloud(points, batch_ids)
        #Compute the number of cells in the grid.
        aabb = point_cloud.get_AABB()
        aabb_sizes = aabb._aabb_max - aabb._aabb_min
        batch_num_cells = tf.cast(tf.math.ceil(aabb_sizes / radius), tf.int32)
        total_num_cells = tf.maximum(tf.reduce_max(batch_num_cells, axis=0), 1)
        keys = compute_keys(point_cloud, total_num_cells, radius)
        keys = tf.sort(keys, direction='DESCENDING')
        ds_tf = build_grid_ds_tf(keys, total_num_cells, batch_size)

        keys = keys.numpy()
        ds_numpy = np.full(
            (batch_size, total_num_cells[0], total_num_cells[1], 2), 0)
        if dimension == 2:
            cells_per_2D_cell = 1
        elif dimension > 2:
            cells_per_2D_cell = np.prod(total_num_cells[2:])
        for key_iter, key in enumerate(keys):
            curDSIndex = key // cells_per_2D_cell
            yIndex = curDSIndex % total_num_cells[1]
            auxInt = curDSIndex // total_num_cells[1]
            xIndex = auxInt % total_num_cells[0]
            curbatch_ids = auxInt // total_num_cells[0]

            if key_iter == 0:
                ds_numpy[curbatch_ids, xIndex, yIndex, 0] = key_iter
            else:
                prevKey = keys[key_iter - 1]
                prevDSIndex = prevKey // cells_per_2D_cell
                if prevDSIndex != curDSIndex:
                    ds_numpy[curbatch_ids, xIndex, yIndex, 0] = key_iter

            nextIter = key_iter + 1
            if nextIter >= len(keys):
                ds_numpy[curbatch_ids, xIndex, yIndex, 1] = len(keys)
            else:
                nextKey = keys[key_iter + 1]
                nextDSIndex = nextKey // cells_per_2D_cell
                if nextDSIndex != curDSIndex:
                    ds_numpy[curbatch_ids, xIndex, yIndex, 1] = key_iter + 1

        # check if the data structure is equal
        self.assertAllEqual(ds_tf, ds_numpy)
    def test_local_pooling(self, num_points, num_samples, batch_size, radius,
                           dimension):
        cell_sizes = np.float32(np.repeat(radius, dimension))
        points, batch_ids = utils._create_random_point_cloud_segmented(
            batch_size, num_points, dimension=dimension)
        features = np.random.rand(num_points, dimension)
        point_cloud = PointCloud(points, batch_ids)

        point_samples, batch_ids_samples = \
            utils._create_random_point_cloud_segmented(
                batch_size, num_samples, dimension=dimension)

        point_cloud_samples = PointCloud(point_samples, batch_ids_samples)

        grid = Grid(point_cloud, cell_sizes)
        neighborhood = Neighborhood(grid, cell_sizes, point_cloud_samples)
        neighbor_ids = neighborhood._original_neigh_ids.numpy()
        features_on_neighbors = features[neighbor_ids[:, 0]]

        #max pooling
        with self.subTest(name='max_pooling_to_sampled'):
            PoolLayer = MaxPooling()
            pool_tf = PoolLayer(features, point_cloud, point_cloud_samples,
                                cell_sizes)

            pool_numpy = np.empty([num_samples, dimension])
            for i in range(num_samples):
                pool_numpy[i] = np.max(
                    features_on_neighbors[neighbor_ids[:, 1] == i], axis=0)

            self.assertAllClose(pool_tf, pool_numpy)
            point_cloud.set_batch_shape([batch_size // 2, 2])
            padded = PoolLayer(features,
                               point_cloud,
                               point_cloud_samples,
                               cell_sizes,
                               return_padded=True)
            self.assertTrue(padded.shape.rank > 2)

        #max pooling
        with self.subTest(name='average_pooling_to_sampled'):
            PoolLayer = AveragePooling()
            pool_tf = PoolLayer(features, point_cloud, point_cloud_samples,
                                cell_sizes)

            pool_numpy = np.empty([num_samples, dimension])
            for i in range(num_samples):
                pool_numpy[i] = np.mean(
                    features_on_neighbors[neighbor_ids[:, 1] == i], axis=0)

            self.assertAllClose(pool_tf, pool_numpy)
Beispiel #12
0
def sample(neighborhood, sample_mode='poisson', name=None):
  """ Sampling for a neighborhood.

  Args:
    neighborhood: A `Neighborhood` instance.
    sample_mode: A `string`, either `'poisson'`or `'cell average'`.

  Returns:
    A `PointCloud` instance, the sampled points.
    An `int` `Tensor` of shape `[S]`, the indices of the sampled points,
      `None` for cell average sampling.

  """
  sample_mode_value = sample_modes[sample_mode.lower()]
  #Compute the sampling.
  sampled_points, sampled_batch_ids, sampled_indices = \
      sampling(neighborhood, sample_mode_value)

  #Save the sampled point cloud.
  if sample_mode_value == 0:
    sampled_indices = tf.gather(
        neighborhood._grid._sorted_indices, sampled_indices)
  else:
    sampled_indices = None
  sampled_point_cloud = PointCloud(
      points=sampled_points, batch_ids=sampled_batch_ids,
      batch_size=neighborhood._point_cloud_sampled._batch_size_numpy)
  return sampled_point_cloud, sampled_indices
Beispiel #13
0
    def test_sampling_poisson_disk_on_random(self, num_points, batch_size,
                                             cell_size, dimension):
        cell_sizes = np.float32(np.repeat(cell_size, dimension))
        points, batch_ids = utils._create_random_point_cloud_segmented(
            batch_size,
            num_points * batch_size,
            dimension=dimension,
            sizes=np.ones(batch_size, dtype=int) * num_points)
        point_cloud = PointCloud(points, batch_ids)
        grid = Grid(point_cloud, cell_sizes)
        neighborhood = Neighborhood(grid, cell_sizes)
        sampled_point_cloud, _ = sample(neighborhood, 'poisson')

        sampled_points = sampled_point_cloud._points.numpy()
        sampled_batch_ids = sampled_point_cloud._batch_ids.numpy()

        min_dist = 1.0
        for i in range(batch_size):
            indices = np.where(sampled_batch_ids == i)
            diff = np.expand_dims(sampled_points[indices], 1) - \
                np.expand_dims(sampled_points[indices], 0)
            dists = np.linalg.norm(diff, axis=2)
            dists = np.sort(dists, axis=1)
            min_dist = min(min_dist, np.amin(dists[:, 1]))

        self.assertLess(min_dist, cell_size + 1e-3)
Beispiel #14
0
def compute_keys_tf(point_cloud: PointCloud, num_cells, cell_size, name=None):
    """ Computes the regular grid cell keys of a point cloud.

    For a point in cell `c` the key is computed as
        \\(key = batch_id * prod_{d=0}^{D} num_cells_{d} + \\)
        \\(sum_{d=0}^{D}( c_{d} prod_{d'=d+1}^{D} num_cells_{d'} ) \\).
    Args:
      point_cloud: A `PointCloud` instance.
      num_cells: An `int` `Tensor` of shape `[D]`, the total number of cells
        per dimension.
      cell_size: An `int` `Tensor` of shape `[D]`, the cell sizes per
        dimension.

    Returns:
      An `int` `Tensor` of shape `[N]`, the keys per point.

  """
    aabb = point_cloud.get_AABB()
    abb_min_per_batch = aabb._aabb_min
    aabb_min_per_point = tf.gather(abb_min_per_batch, point_cloud._batch_ids)
    cell_ind = tf.math.floor(
        (point_cloud._points - aabb_min_per_point) / cell_size)
    cell_ind = tf.cast(cell_ind, tf.int32)
    cell_ind = tf.minimum(tf.maximum(cell_ind, tf.zeros_like(cell_ind)),
                          num_cells)
    cell_multiplier = tf.math.cumprod(num_cells, reverse=True)
    cell_multiplier = tf.concat((cell_multiplier, [1]), axis=0)
    keys = point_cloud._batch_ids * cell_multiplier[0] + \
        tf.math.reduce_sum(cell_ind * tf.reshape(cell_multiplier[1:], [1, -1]),
                           axis=1)
    return tf.cast(keys, tf.int64)
 def test_flatten_unflatten_padded(self, batch_shape, num_points,
                                   dimension):
     batch_size = np.prod(batch_shape)
     points, sizes = utils._create_random_point_cloud_padded(
         num_points, batch_shape, dimension=dimension)
     point_cloud = PointCloud(points, sizes=sizes)
     retrieved_points = point_cloud.get_points().numpy()
     self.assertAllEqual(points.shape, retrieved_points.shape)
     points = points.reshape([batch_size, num_points, dimension])
     retrieved_points = retrieved_points.reshape(
         [batch_size, num_points, dimension])
     sizes = sizes.reshape([batch_size])
     for i in range(batch_size):
         self.assertAllClose(points[i, :sizes[i]],
                             retrieved_points[i, :sizes[i]])
         self.assertTrue(np.all(retrieved_points[i, sizes[i]:] == 0))
Beispiel #16
0
    def test_grid_datastructure(self, num_points, batch_size, scale, radius,
                                dimension):
        radius = np.repeat(radius, dimension)
        points, batch_ids = utils._create_random_point_cloud_segmented(
            batch_size,
            num_points * batch_size,
            dimension=dimension,
            sizes=np.ones(batch_size, dtype=int) * num_points,
            clean_aabb=True)
        point_cloud = PointCloud(points, batch_ids)
        aabb = point_cloud.get_AABB()
        grid = Grid(point_cloud, radius, aabb)

        total_num_cells = grid._num_cells.numpy()
        keys = grid._sorted_keys.numpy()
        ds_numpy = np.full(
            (batch_size, total_num_cells[0], total_num_cells[1], 2), 0)
        if dimension == 2:
            cells_per_2D_cell = 1
        elif dimension > 2:
            cells_per_2D_cell = np.prod(total_num_cells[2:])
        for key_iter, key in enumerate(keys):
            curDSIndex = key // cells_per_2D_cell
            yIndex = curDSIndex % total_num_cells[1]
            auxInt = curDSIndex // total_num_cells[1]
            xIndex = auxInt % total_num_cells[0]
            curbatch_ids = auxInt // total_num_cells[0]

            if key_iter == 0:
                ds_numpy[curbatch_ids, xIndex, yIndex, 0] = key_iter
            else:
                prevKey = keys[key_iter - 1]
                prevDSIndex = prevKey // cells_per_2D_cell
                if prevDSIndex != curDSIndex:
                    ds_numpy[curbatch_ids, xIndex, yIndex, 0] = key_iter

            nextIter = key_iter + 1
            if nextIter >= len(keys):
                ds_numpy[curbatch_ids, xIndex, yIndex, 1] = len(keys)
            else:
                nextKey = keys[key_iter + 1]
                nextDSIndex = nextKey // cells_per_2D_cell
                if nextDSIndex != curDSIndex:
                    ds_numpy[curbatch_ids, xIndex, yIndex, 1] = key_iter + 1

        # check if the data structure is equal
        self.assertAllEqual(grid.get_DS(), ds_numpy)
    def __init__(self,
                 point_cloud: PointCloud,
                 cell_sizes,
                 sample_mode='poisson',
                 name=None):
        #Initialize the attributes.
        self._aabb = point_cloud.get_AABB()
        self._point_clouds = [point_cloud]
        self._cell_sizes = []
        self._neighborhoods = []

        self._dimension = point_cloud._dimension
        self._batch_shape = point_cloud._batch_shape

        #Create the different sampling operations.
        cur_point_cloud = point_cloud
        for sample_iter, cur_cell_sizes in enumerate(cell_sizes):
            cur_cell_sizes = tf.convert_to_tensor(value=cur_cell_sizes,
                                                  dtype=tf.float32)

            # Check if the cell size is defined for all the dimensions.
            # If not, the last cell size value is tiled until all the dimensions
            # have a value.
            cur_num_dims = tf.gather(cur_cell_sizes.shape, 0)
            cur_cell_sizes = tf.cond(
                cur_num_dims < self._dimension, lambda: tf.concat(
                    (cur_cell_sizes,
                     tf.tile(
                         tf.gather(cur_cell_sizes, [
                             tf.rank(cur_cell_sizes) - 1
                         ]), [self._dimension - cur_num_dims])),
                    axis=0), lambda: cur_cell_sizes)
            tf.assert_greater(
                self._dimension + 1,
                cur_num_dims,
                f'Too many dimensions in cell sizes {cur_num_dims} ' + \
                f'instead of max. {self._dimension}')
            # old version, does not run in graph mode
            # if cur_num_dims < self._dimension:
            #   cur_cell_sizes = tf.concat((cur_cell_sizes,
            #                  tf.tile(tf.gather(cur_cell_sizes,
            #                                    [tf.rank(cur_cell_sizes) - 1]),
            #                    [self._dimension - cur_num_dims])),
            #                       axis=0)
            # if cur_num_dims > self._dimension:
            #   raise ValueError(
            #       f'Too many dimensions in cell sizes {cur_num_dims} ' + \
            #       f'instead of max. {self._dimension}')

            self._cell_sizes.append(cur_cell_sizes)

            #Create the sampling operation.
            cur_grid = Grid(cur_point_cloud, cur_cell_sizes, self._aabb)
            cur_neighborhood = Neighborhood(cur_grid, cur_cell_sizes)
            cur_point_cloud, _ = sample(cur_neighborhood, sample_mode)

            self._neighborhoods.append(cur_neighborhood)
            cur_point_cloud.set_batch_shape(self._batch_shape)
            self._point_clouds.append(cur_point_cloud)
Beispiel #18
0
    def test_aabb_diameter(self, batch_shape, max_num_points, dimension):
        points, sizes = utils._create_random_point_cloud_padded(
            max_num_points, batch_shape, dimension)
        batch_size = np.prod(batch_shape)
        diameter_numpy = np.empty(batch_size)
        points_flat = np.reshape(points,
                                 [batch_size, max_num_points, dimension])
        sizes_flat = np.reshape(sizes, [batch_size])
        for i in range(batch_size):
            curr_pts = points_flat[i][:sizes_flat[i]]
            diag = np.amax(curr_pts, axis=0) - np.amin(curr_pts, axis=0)
            diameter_numpy[i] = np.linalg.norm(diag)
        diameter_numpy = np.reshape(diameter_numpy, batch_shape)

        aabb_tf = PointCloud(points, sizes=sizes).get_AABB()
        diameter_tf = aabb_tf.get_diameter()
        self.assertAllClose(diameter_numpy, diameter_tf)
    def test_conv_rigid_jacobian_params(self, num_points, num_samples,
                                        num_features, batch_size, radius,
                                        num_kernel_points, dimension):
        cell_sizes = np.float32(np.repeat(radius, dimension))
        points, batch_ids = utils._create_random_point_cloud_segmented(
            batch_size, num_points, dimension=dimension)
        point_cloud = PointCloud(points, batch_ids)
        point_samples, batch_ids_samples = \
            utils._create_random_point_cloud_segmented(
                batch_size, num_samples, dimension=dimension)

        point_cloud_samples = PointCloud(point_samples, batch_ids_samples)
        grid = Grid(point_cloud, cell_sizes)
        neighborhood = Neighborhood(grid, cell_sizes, point_cloud_samples)
        conv_layer = KPConv(num_features[0], num_features[1],
                            num_kernel_points)

        features = np.random.rand(num_points, num_features[0])

        with self.subTest(name='features'):

            def conv_features(features_in):
                conv_result = conv_layer(features_in, point_cloud,
                                         point_cloud_samples, radius,
                                         neighborhood)
                return conv_result

            self.assert_jacobian_is_correct_fn(conv_features, [features],
                                               atol=1e-3,
                                               delta=1e-3)

        with self.subTest(name='weights'):

            def conv_weights(weigths_in):
                conv_layer._weights = weigths_in
                conv_result = conv_layer(features, point_cloud,
                                         point_cloud_samples, radius,
                                         neighborhood)
                return conv_result

            weights = conv_layer._weights
            self.assert_jacobian_is_correct_fn(conv_weights, [weights],
                                               atol=1e-3,
                                               delta=1e-3)
Beispiel #20
0
    def test_neighbors_are_from_same_batch(self, batch_size, num_points,
                                           num_samples, radius, dimension):
        points, batch_ids = utils._create_random_point_cloud_segmented(
            batch_size, num_points, dimension=dimension)
        samples, batch_ids_samples = utils._create_random_point_cloud_segmented(
            batch_size, num_samples, dimension=dimension)
        radius = np.float32(np.repeat([radius], dimension))

        point_cloud = PointCloud(points, batch_ids)
        point_cloud_samples = PointCloud(samples, batch_ids_samples)
        grid = Grid(point_cloud, radius)
        neighborhood = Neighborhood(grid, radius, point_cloud_samples)

        batch_ids_in = tf.gather(point_cloud._batch_ids,
                                 neighborhood._original_neigh_ids[:, 0])
        batch_ids_out = tf.gather(point_cloud_samples._batch_ids,
                                  neighborhood._original_neigh_ids[:, 1])
        batch_check = batch_ids_in == batch_ids_out
        self.assertTrue(np.all(batch_check))
def compute_neighborhoods(grid,
                          radius,
                          point_cloud_centers=None,
                          max_neighbors=0,
                          return_ranges=False,
                          return_sorted_ids=False,
                          name=None):
    """ Neighborhood of a point cloud.

  Args:
    grid: A 'Grid' instance, the regular grid data structure.
    radius: A `float` `Tensor` of shape `[D]`, the radius used to select the
      neighbors. Should be smaller than the cell size of the grid.
    point_cloud_centers: A 'PointCloud' instance. Samples point cloud.
      If None, the sorted points from the grid will be used.
    max_neighbors: An `int`, maximum number of neighbors per sample,
      if `0` all neighbors are selected. (optional)
    return_ranges: A `bool`, if 'True` returns the neighborhood ranges as a
      second output, default is `False`. (optional)
    return_sorted_ids: A 'bool', if 'True' the neighbor ids are with respect
      to the sorted points in the grid, default is `False`. (optional)

  Returns:
    neighbors: An `int` `Tensor` of shape `[M, 2]`, the indices to neighbor
      pairs, where element `i` is `[neighbor_id, center_id]`.
    ranges: If `return_ranges` is `True` returns a second 'int` Tensor` of
      shape `[N2]`, such that the neighbor indices of center point `i` are
      `neighbors[ranges[i]]:neigbors[ranges[i+1]]` for `i>0`.
  """

    radii = cast_to_num_dims(radius, grid._point_cloud._dimension)
    #Save the attributes.
    if point_cloud_centers is None:
        point_cloud_centers = PointCloud(grid._sorted_points,
                                         grid._sorted_batch_ids,
                                         grid._batch_size)

    #Find the neighbors, with indices with respect to sorted points in the grid
    nb_ranges, neighbors = find_neighbors(grid, point_cloud_centers, radii,
                                          max_neighbors)

    #Original neighIds.
    if not return_sorted_ids:
        aux_original_neigh_ids = tf.gather(grid._sorted_indices, neighbors[:,
                                                                           0])
        original_neigh_ids = tf.concat([
            tf.reshape(aux_original_neigh_ids, [-1, 1]),
            tf.reshape(neighbors[:, 1], [-1, 1])
        ],
                                       axis=-1)
        neighbors = original_neigh_ids
    if return_ranges:
        return neighbors, nb_ranges
    else:
        return neighbors
Beispiel #22
0
    def test_find_neighbors(self, num_points, num_samples, batch_size, radius,
                            dimension):
        cell_sizes = np.repeat(radius, dimension)
        points, batch_ids = utils._create_random_point_cloud_segmented(
            batch_size,
            num_points * batch_size,
            dimension=dimension,
            sizes=np.ones(batch_size, dtype=int) * num_points)
        point_cloud = PointCloud(points, batch_ids)
        samples_points, batch_ids_samples = \
            utils._create_random_point_cloud_segmented(
                batch_size, num_samples * batch_size, dimension=dimension,
                sizes=np.ones(batch_size, dtype=int) * num_samples)
        point_cloud_sampled = PointCloud(samples_points, batch_ids_samples)
        grid = Grid(point_cloud, cell_sizes)
        neighborhood = Neighborhood(grid, cell_sizes, point_cloud_sampled)
        sorted_points = grid._sorted_points

        neighbors_tf = neighborhood._neighbors

        neighbors_numpy = [[] for i in range(num_samples * batch_size)]

        for k in range(batch_size):
            for i in range(num_samples):
                for j in range(num_points):
                    diffArray = (samples_points[i + k * num_samples] - \
                                 sorted_points[(batch_size - k - 1) * num_points + j])\
                                 / cell_sizes
                    if np.linalg.norm(diffArray) < 1.0:
                        neighbors_numpy[k * num_samples + i].append((batch_size - k - 1)\
                                                                    * num_points + j)

        allFound = True
        for neigh in neighbors_tf:
            found = False
            for ref_neigh in neighbors_numpy[neigh[1]]:
                if ref_neigh == neigh[0]:
                    found = True
                allFound = allFound and found
        self.assertTrue(allFound)
    def test_global_pooling(self, num_points, batch_size, dimension):
        points, batch_ids = utils._create_random_point_cloud_segmented(
            batch_size,
            num_points * batch_size,
            dimension=dimension,
            equal_sized_batches=True)
        features = np.random.rand(batch_size, num_points, dimension)
        point_cloud = PointCloud(points, batch_ids)

        # max pooling
        with self.subTest(name='max_pooling'):
            PoolLayer = GlobalMaxPooling()
            pool_tf = PoolLayer(features, point_cloud)
            pool_numpy = np.empty([batch_size, dimension])
            features = features.reshape([-1, dimension])
            for i in range(batch_size):
                pool_numpy[i] = np.max(features[batch_ids == i], axis=0)
            self.assertAllClose(pool_numpy, pool_tf)
            point_cloud.set_batch_shape([batch_size // 2, 2])
            padded = PoolLayer(features, point_cloud, return_padded=True)
            self.assertTrue(padded.shape.rank > 2)

        # average pooling
        with self.subTest(name='average_pooling'):
            PoolLayer = GlobalAveragePooling()
            pool_tf = PoolLayer(features, point_cloud)
            pool_numpy = np.empty([batch_size, dimension])
            for i in range(batch_size):
                pool_numpy[i] = np.mean(features[batch_ids == i], axis=0)
            self.assertAllClose(pool_numpy, pool_tf)
            point_cloud.set_batch_shape([batch_size // 2, 2])
            padded = PoolLayer(features, point_cloud, return_padded=True)
            self.assertTrue(padded.shape.rank > 2)
Beispiel #24
0
    def test_sampling_poisson_disk_on_uniform(self, num_points_sqrt, scale):
        points = utils._create_uniform_distributed_point_cloud_2D(
            num_points_sqrt, scale=scale)
        cell_sizes = scale * np.array([2, 2], dtype=np.float32) \
            / num_points_sqrt
        batch_ids = np.zeros([len(points)])
        point_cloud = PointCloud(points, batch_ids)
        grid = Grid(point_cloud, cell_sizes)
        neighborhood = Neighborhood(grid, cell_sizes)
        sample_point_cloud, _ = sample(neighborhood, 'poisson')

        sampled_points = sample_point_cloud._points.numpy()
        expected_num_pts = num_points_sqrt**2 // 2
        self.assertTrue(len(sampled_points) == expected_num_pts)
Beispiel #25
0
    def test_aabb_min_max(self, batch_size, num_points, dimension):
        points, batch_ids = utils._create_random_point_cloud_segmented(
            batch_size, num_points, dimension)
        aabb_max_numpy = np.empty([batch_size, dimension])
        aabb_min_numpy = np.empty([batch_size, dimension])
        for i in range(batch_size):
            aabb_max_numpy[i] = np.amax(points[batch_ids == i], axis=0)
            aabb_min_numpy[i] = np.amin(points[batch_ids == i], axis=0)

        aabb_tf = PointCloud(points,
                             batch_ids=batch_ids,
                             batch_size=batch_size).get_AABB()

        self.assertAllClose(aabb_max_numpy, aabb_tf._aabb_max)
        self.assertAllClose(aabb_min_numpy, aabb_tf._aabb_min)
Beispiel #26
0
def poisson_disk_sampling(point_cloud,
                          radius=None,
                          neighborhood=None,
                          return_ids=False,
                          name=None):
  """ Poisson disk sampling of a point cloud.

  Note: Either `radius` or `neighborhood` must be provided.

  Args:
    point_cloud: A `PointCloud` instance.
    radius: A `float` or a `float` `Tensor` of shape `[D]`, the radius for the
      Poisson disk sampling.
    neighborhood: A `Neighborhood` instance.
    return_ids: A `bool`, if `True` returns the indices of the sampled points.
      (optional)

    Returns:
      A `PointCloud` instance.
      An `int` `Tensor` of shape `[S]`, if `return_ids` is `True`.

    Raises:
      ValueError: If no radius or neighborhood is given.

  """
  if radius is None and neighborhood is None:
    raise ValueError(
        "Missing Argument! Either radius or neighborhood must be given!")
  if neighborhood is None:
    # compute neighborhood
    radii = cast_to_num_dims(radius, point_cloud)
    grid = Grid(point_cloud, radii)
    neighborhood = Neighborhood(grid, radii)

  #Compute the sampling.
  sampled_points, sampled_batch_ids, sampled_indices = \
      sampling(neighborhood, 1)

  sampled_point_cloud = PointCloud(
      points=sampled_points, batch_ids=sampled_batch_ids,
      batch_size=neighborhood._point_cloud_sampled._batch_size)

  if return_ids:
    sampled_indices = tf.gather(neighborhood._grid._sorted_indices,
                                sampled_indices)
    return sampled_point_cloud, sampled_indices
  else:
    return sampled_point_cloud
Beispiel #27
0
    def test_sampling_average_on_random(self, num_points, batch_size,
                                        cell_size, dimension):
        cell_sizes = np.repeat(cell_size, dimension)
        points, batch_ids = utils._create_random_point_cloud_segmented(
            batch_size,
            num_points * batch_size,
            dimension=dimension,
            sizes=np.ones(batch_size, dtype=int) * num_points)
        #print(points.shape, batch_ids.shape)
        point_cloud = PointCloud(points=points, batch_ids=batch_ids)
        grid = Grid(point_cloud, cell_sizes)
        neighborhood = Neighborhood(grid, cell_sizes)
        sample_point_cloud, _ = sample(neighborhood, 'average')

        sampled_points_tf = sample_point_cloud._points.numpy()
        sorted_keys = neighborhood._grid._sorted_keys.numpy()
        sorted_points = neighborhood._grid._sorted_points.numpy()

        sampled_points_numpy = []
        cur_point = np.repeat(0.0, dimension)
        cur_key = -1
        cur_num_points = 0.0
        for pt_id, cur_key_point in enumerate(sorted_keys):
            if cur_key_point != cur_key:
                if cur_key != -1:
                    cur_point /= cur_num_points
                    sampled_points_numpy.append(cur_point)
                cur_key = cur_key_point
                cur_point = [0.0, 0.0, 0.0]
                cur_num_points = 0.0
            cur_point += sorted_points[pt_id]
            cur_num_points += 1.0
        cur_point /= cur_num_points
        sampled_points_numpy.append(cur_point)

        equal = True
        for point_numpy in sampled_points_numpy:
            found = False
            for point_tf in sampled_points_tf:
                if np.all(np.abs(point_numpy - point_tf) < 0.0001):
                    found = True
            equal = equal and found
        self.assertTrue(equal)
Beispiel #28
0
def cell_average_sampling(point_cloud,
                          cell_sizes=None,
                          grid=None,
                          name=None):
  """ Cell average sampling of a point cloud.

  Note: Either `cell_sizes` or `grid` must be provided.

  Args:
    point_cloud: A `PointCloud` instance.
    cell_sizes: A `float` or a `float` `Tensor` of shape `[D]`, the cell sizes
      for the sampling.
    grid: A `Grid` instance.

    Returns:
      A `PointCloud` instance.

    Raises:
      ValueError: If no radius or grid is given.

  """
  if cell_sizes is None and grid is None:
    raise ValueError(
        "Missing Argument! Either cell_sizes or grid must be given!")
  if grid is None:
    # compute grid
    cell_sizes = cast_to_num_dims(cell_sizes, point_cloud)
    grid = Grid(point_cloud, cell_sizes)

  neighborhood = Neighborhood(grid, cell_sizes)

  #Compute the sampling.
  sampled_points, sampled_batch_ids, sampled_indices = \
      sampling(neighborhood, 0)

  sampled_point_cloud = PointCloud(
      points=sampled_points, batch_ids=sampled_batch_ids,
      batch_size=neighborhood._point_cloud_sampled._batch_size)

  return sampled_point_cloud
    def __init__(self,
                 grid: Grid,
                 radius,
                 point_cloud_sample=None,
                 max_neighbors=0,
                 name=None):
        radii = tf.reshape(
            tf.cast(tf.convert_to_tensor(value=radius), tf.float32), [-1])
        if radii.shape[0] == 1:
            radii = tf.repeat(radius, grid._point_cloud._dimension)
        #Save the attributes.
        if point_cloud_sample is None:
            self._equal_samples = True
            self._point_cloud_sampled = PointCloud(grid._sorted_points,
                                                   grid._sorted_batch_ids,
                                                   grid._batch_size)
        else:
            self._equal_samples = False
            self._point_cloud_sampled = point_cloud_sample
        self._grid = grid
        self._radii = radii
        self.max_neighbors = max_neighbors

        #Find the neighbors.
        self._samples_neigh_ranges, self._neighbors = find_neighbors(
            self._grid, self._point_cloud_sampled, self._radii, max_neighbors)

        #Original neighIds.
        aux_original_neigh_ids = tf.gather(self._grid._sorted_indices,
                                           self._neighbors[:, 0])
        self._original_neigh_ids = tf.concat([
            tf.reshape(aux_original_neigh_ids, [-1, 1]),
            tf.reshape(self._neighbors[:, 1], [-1, 1])
        ],
                                             axis=-1)

        #Initialize the pdf
        self._pdf = None

        self._transposed = None
def _flatten_features(features, point_cloud: PointCloud):
    """ Converts features of shape `[A1, ..., An, C]` to shape `[N, C]`.

  Args:
    features: A `Tensor`.
    point_cloud: A `PointCloud` instance.

  Returns:
    A `Tensor` of shape `[N, C]`.

  """
    if features.shape.ndims > 2:
        sizes = point_cloud.get_sizes()
        features, _ = flatten_batch_to_2d(features, sizes)
        sorting = tf.math.invert_permutation(point_cloud._sorted_indices_batch)
        features = tf.gather(features, sorting)
    else:
        tf.assert_equal(
            tf.shape(features)[0],
            tf.shape(point_cloud._points)[0])
    tf.assert_equal(tf.rank(features), 2)
    return features