Ejemplo n.º 1
0
    def test_neighbors_on_3D_meshgrid(self, num_points_cbrt,
                                      num_points_samples_cbrt, radius,
                                      expected_num_neighbors):
        num_points = num_points_cbrt**3
        num_samples = num_points_samples_cbrt**3

        points = utils._create_uniform_distributed_point_cloud_3D(
            num_points_cbrt, flat=True)
        batch_ids = np.zeros(num_points)
        points_samples = utils._create_uniform_distributed_point_cloud_3D(
            num_points_samples_cbrt,
            bb_min=1 / (num_points_samples_cbrt + 1),
            flat=True)
        batch_ids_samples = np.zeros(num_samples)
        point_cloud = PointCloud(points, batch_ids)
        point_cloud_samples = PointCloud(points_samples, batch_ids_samples)
        radius = np.float32(np.repeat([radius], 3))
        grid = Grid(point_cloud, radius)
        neighborhood = Neighborhood(grid, radius, point_cloud_samples)

        neigh_ranges = neighborhood._samples_neigh_ranges
        num_neighbors = np.zeros(num_samples)
        num_neighbors[0] = neigh_ranges[0]
        num_neighbors[1:] = neigh_ranges[1:] - neigh_ranges[:-1]
        expected_num_neighbors = \
            np.ones_like(num_neighbors) * expected_num_neighbors
        self.assertAllEqual(num_neighbors, expected_num_neighbors)
    def __init__(self, neighborhood):
        if neighborhood._equal_samples:
            self = neighborhood
        else:
            self._equal_samples = False
            self._radii = neighborhood._radii
            self._point_cloud_sampled = neighborhood._grid._point_cloud
            self._grid = Grid(neighborhood._point_cloud_sampled,
                              neighborhood._radii)

            self._original_neigh_ids = tf.reverse(
                neighborhood._original_neigh_ids, axis=[1])
            sort_centers = tf.argsort(self._original_neigh_ids[:, 1])
            self._original_neigh_ids = tf.gather(self._original_neigh_ids,
                                                 sort_centers)

            unsorted_indices = tf.math.invert_permutation(
                self._grid._sorted_indices)
            aux_neigh_ids = tf.gather(unsorted_indices,
                                      self._original_neigh_ids[:, 0])
            self._neighbors = tf.concat([
                tf.reshape(aux_neigh_ids, [-1, 1]),
                tf.reshape(self._original_neigh_ids[:, 1], [-1, 1])
            ],
                                        axis=-1)

            num_neighbors = neighborhood._original_neigh_ids.shape[0]
            num_center_points = self._point_cloud_sampled._points.shape[0]
            self._samples_neigh_ranges = tf.math.unsorted_segment_max(
                tf.range(1, num_neighbors + 1), self._neighbors[:, 1],
                num_center_points)

            self._pdf = None
Ejemplo n.º 3
0
    def test_sampling_poisson_disk_on_random(self, num_points, batch_size,
                                             cell_size, dimension):
        cell_sizes = np.float32(np.repeat(cell_size, dimension))
        points, batch_ids = utils._create_random_point_cloud_segmented(
            batch_size,
            num_points * batch_size,
            dimension=dimension,
            sizes=np.ones(batch_size, dtype=int) * num_points)
        point_cloud = PointCloud(points, batch_ids)
        grid = Grid(point_cloud, cell_sizes)
        neighborhood = Neighborhood(grid, cell_sizes)
        sampled_point_cloud, _ = sample(neighborhood, 'poisson')

        sampled_points = sampled_point_cloud._points.numpy()
        sampled_batch_ids = sampled_point_cloud._batch_ids.numpy()

        min_dist = 1.0
        for i in range(batch_size):
            indices = np.where(sampled_batch_ids == i)
            diff = np.expand_dims(sampled_points[indices], 1) - \
                np.expand_dims(sampled_points[indices], 0)
            dists = np.linalg.norm(diff, axis=2)
            dists = np.sort(dists, axis=1)
            min_dist = min(min_dist, np.amin(dists[:, 1]))

        self.assertLess(min_dist, cell_size + 1e-3)
    def test_conv_jacobian_points(self, num_points, num_samples, num_features,
                                  batch_size, radius, num_kernel_points,
                                  dimension):
        cell_sizes = np.float32(np.repeat(radius, dimension))
        points, batch_ids = utils._create_random_point_cloud_segmented(
            batch_size, num_points, dimension=dimension)
        features = np.random.rand(num_points, num_features[0])

        point_samples, batch_ids_samples = \
            utils._create_random_point_cloud_segmented(
                batch_size, num_samples, dimension=dimension)

        point_cloud_samples = PointCloud(point_samples, batch_ids_samples)
        point_cloud = PointCloud(points, batch_ids)
        grid = Grid(point_cloud, cell_sizes)
        neighborhood = Neighborhood(grid, cell_sizes, point_cloud_samples)
        neighborhood.compute_pdf()

        conv_layer = KPConv(num_features[0], num_features[1],
                            num_kernel_points)

        def conv_points(points_in):
            point_cloud._points = points_in
            neighborhood._grid._sorted_points = \
                tf.gather(points_in, grid._sorted_indices)

            conv_result = conv_layer(features, point_cloud,
                                     point_cloud_samples, radius, neighborhood)

            return conv_result

        self.assert_jacobian_is_correct_fn(conv_points, [np.float32(points)],
                                           atol=1e-3,
                                           delta=1e-3)
Ejemplo n.º 5
0
    def test_compute_keys_with_sort(self, num_points, batch_size, scale,
                                    radius, dimension):
        radius = np.repeat(radius, dimension)
        points, batch_ids = utils._create_random_point_cloud_segmented(
            batch_size,
            num_points * batch_size,
            dimension=dimension,
            sizes=np.ones(batch_size, dtype=int) * num_points,
            clean_aabb=False)
        point_cloud = PointCloud(points, batch_ids)
        aabb = point_cloud.get_AABB()
        grid = Grid(point_cloud, radius)

        total_num_cells = grid._num_cells.numpy()
        aabb_min = aabb._aabb_min.numpy()

        aabb_min_per_point = aabb_min[batch_ids, :]
        cell_ind = np.floor((points - aabb_min_per_point) / radius).astype(int)
        cell_ind = np.minimum(np.maximum(cell_ind, [0] * dimension),
                              total_num_cells)
        cell_multiplier = np.flip(np.cumprod(np.flip(total_num_cells)))
        cell_multiplier = np.concatenate((cell_multiplier, [1]), axis=0)
        keys = batch_ids * cell_multiplier[0] + \
            np.sum(cell_ind * cell_multiplier[1:].reshape([1, -1]), axis=1)
        # check unsorted keys
        self.assertAllEqual(grid._cur_keys, keys)

        # sort descending
        sorted_keys = np.flip(np.sort(keys))
        # check if the cell keys per point are equal
        self.assertAllEqual(grid._sorted_keys, sorted_keys)
Ejemplo n.º 6
0
    def test_grid_datastructure(self, num_points, batch_size, scale, radius,
                                dimension):
        radius = np.repeat(radius, dimension)
        points, batch_ids = utils._create_random_point_cloud_segmented(
            batch_size,
            num_points * batch_size,
            dimension=dimension,
            sizes=np.ones(batch_size, dtype=int) * num_points,
            clean_aabb=True)
        point_cloud = PointCloud(points, batch_ids)
        aabb = point_cloud.get_AABB()
        grid = Grid(point_cloud, radius, aabb)

        total_num_cells = grid._num_cells.numpy()
        keys = grid._sorted_keys.numpy()
        ds_numpy = np.full(
            (batch_size, total_num_cells[0], total_num_cells[1], 2), 0)
        if dimension == 2:
            cells_per_2D_cell = 1
        elif dimension > 2:
            cells_per_2D_cell = np.prod(total_num_cells[2:])
        for key_iter, key in enumerate(keys):
            curDSIndex = key // cells_per_2D_cell
            yIndex = curDSIndex % total_num_cells[1]
            auxInt = curDSIndex // total_num_cells[1]
            xIndex = auxInt % total_num_cells[0]
            curbatch_ids = auxInt // total_num_cells[0]

            if key_iter == 0:
                ds_numpy[curbatch_ids, xIndex, yIndex, 0] = key_iter
            else:
                prevKey = keys[key_iter - 1]
                prevDSIndex = prevKey // cells_per_2D_cell
                if prevDSIndex != curDSIndex:
                    ds_numpy[curbatch_ids, xIndex, yIndex, 0] = key_iter

            nextIter = key_iter + 1
            if nextIter >= len(keys):
                ds_numpy[curbatch_ids, xIndex, yIndex, 1] = len(keys)
            else:
                nextKey = keys[key_iter + 1]
                nextDSIndex = nextKey // cells_per_2D_cell
                if nextDSIndex != curDSIndex:
                    ds_numpy[curbatch_ids, xIndex, yIndex, 1] = key_iter + 1

        # check if the data structure is equal
        self.assertAllEqual(grid.get_DS(), ds_numpy)
Ejemplo n.º 7
0
    def __call__(self,
                 pool_op,
                 features,
                 point_cloud_in: PointCloud,
                 point_cloud_out: PointCloud,
                 pooling_radius,
                 return_sorted=False,
                 return_padded=False,
                 name=None,
                 default_name="custom pooling"):
        """ Computes a local pooling between two point clouds specified by `pool_op`.

    Note:
      In the following, `A1` to `An` are optional batch dimensions.

    Args:
      pool_op: A function of type `tf.math.unsorted_segmented_*`.
      features: A `float` `Tensor` of shape `[N_in, C]` or
        `[A1, ..., An, V_in, C]`.
      point_cloud_in: A `PointCloud` instance on which the features are
        defined.
      point_cloud_out: A `PointCloud` instance, on which the output features
        are defined.
      pooling_radius: A `float` or a `float` `Tensor` of shape `[D]`.
      return_sorted: A `bool`, if 'True' the output tensor is sorted
        according to the sorted batch ids of `point_cloud_out`.
      return_padded: A `bool`, if 'True' the output tensor is sorted and
        zero padded.

    Returns:
      A `float` `Tensor` of shape
        `[N_out, C]`, if `return_padded` is `False`
      or
        `[A1, ..., An, V_out, C]`, if `return_padded` is `True`.

    """
        features = tf.convert_to_tensor(value=features)
        features = _flatten_features(features, point_cloud_in)
        pooling_radius = tf.convert_to_tensor(value=pooling_radius,
                                              dtype=tf.float32)
        if pooling_radius.shape[0] == 1:
            pooling_radius = tf.repeat(pooling_radius,
                                       point_cloud_in._dimension)

        # Compute the grid.
        grid_in = Grid(point_cloud_in, pooling_radius)

        # Compute the neighborhood keys.
        neigh = Neighborhood(grid_in, pooling_radius, point_cloud_out)
        features_on_neighbors = tf.gather(features,
                                          neigh._original_neigh_ids[:, 0])

        # Pool the features in the neighborhoods
        features_out = pool_op(data=features_on_neighbors,
                               segment_ids=neigh._original_neigh_ids[:, 1],
                               num_segments=tf.shape(
                                   point_cloud_out._points)[0])
        return _format_output(features_out, point_cloud_out, return_sorted,
                              return_padded)
    def test_convolution(self, num_points, num_samples, num_features,
                         batch_size, radius, num_kernel_points, dimension):
        cell_sizes = np.float32(np.repeat(radius, dimension))
        points, batch_ids = utils._create_random_point_cloud_segmented(
            batch_size, num_points, dimension=dimension)
        features = np.random.rand(num_points, num_features[0])
        point_cloud = PointCloud(points, batch_ids)

        point_samples, batch_ids_samples = \
            utils._create_random_point_cloud_segmented(
                batch_size, num_samples, dimension=dimension)

        point_cloud_samples = PointCloud(point_samples, batch_ids_samples)
        grid = Grid(point_cloud, cell_sizes)
        neighborhood = Neighborhood(grid, cell_sizes, point_cloud_samples)
        # tf
        conv_layer = KPConv(num_features[0], num_features[1],
                            num_kernel_points)
        conv_result_tf = conv_layer(features, point_cloud, point_cloud_samples,
                                    radius, neighborhood)

        # numpy
        neighbor_ids = neighborhood._original_neigh_ids.numpy()
        nb_ranges = neighborhood._samples_neigh_ranges.numpy()
        nb_ranges = np.concatenate(([0], nb_ranges), axis=0)
        kernel_points = conv_layer._kernel_points.numpy()
        sigma = conv_layer._sigma.numpy()

        # extract variables
        weights = conv_layer._weights.numpy()

        features_on_neighbors = features[neighbor_ids[:, 0]]
        # compute distances to kernel points
        point_diff = (points[neighbor_ids[:, 0]] -\
                      point_samples[neighbor_ids[:, 1]])\
            / np.expand_dims(cell_sizes, 0)
        kernel_point_diff = np.expand_dims(point_diff, axis=1) -\
            np.expand_dims(kernel_points, axis=0)
        distances = np.linalg.norm(kernel_point_diff, axis=2)
        # compute linear interpolation weights for features based on distances
        kernel_weights = np.maximum(1 - (distances / sigma), 0)
        weighted_features = np.expand_dims(features_on_neighbors, axis=2) *\
            np.expand_dims(kernel_weights, axis=1)
        # sum over neighbors (integration)
        weighted_features_per_sample = \
            np.zeros([num_samples, num_features[0], num_kernel_points])
        for i in range(num_samples):
            weighted_features_per_sample[i] = \
                np.sum(weighted_features[nb_ranges[i]:nb_ranges[i + 1]],
                       axis=0)
        # convolution with summation over kernel dimension
        conv_result_np = \
            np.matmul(
                weighted_features_per_sample.reshape(
                    -1,
                    num_features[0] * num_kernel_points),
                weights)

        self.assertAllClose(conv_result_tf, conv_result_np, atol=1e-5)
    def __init__(self,
                 point_cloud: PointCloud,
                 cell_sizes,
                 sample_mode='poisson',
                 name=None):
        #Initialize the attributes.
        self._aabb = point_cloud.get_AABB()
        self._point_clouds = [point_cloud]
        self._cell_sizes = []
        self._neighborhoods = []

        self._dimension = point_cloud._dimension
        self._batch_shape = point_cloud._batch_shape

        #Create the different sampling operations.
        cur_point_cloud = point_cloud
        for sample_iter, cur_cell_sizes in enumerate(cell_sizes):
            cur_cell_sizes = tf.convert_to_tensor(value=cur_cell_sizes,
                                                  dtype=tf.float32)

            # Check if the cell size is defined for all the dimensions.
            # If not, the last cell size value is tiled until all the dimensions
            # have a value.
            cur_num_dims = tf.gather(cur_cell_sizes.shape, 0)
            cur_cell_sizes = tf.cond(
                cur_num_dims < self._dimension, lambda: tf.concat(
                    (cur_cell_sizes,
                     tf.tile(
                         tf.gather(cur_cell_sizes, [
                             tf.rank(cur_cell_sizes) - 1
                         ]), [self._dimension - cur_num_dims])),
                    axis=0), lambda: cur_cell_sizes)
            tf.assert_greater(
                self._dimension + 1,
                cur_num_dims,
                f'Too many dimensions in cell sizes {cur_num_dims} ' + \
                f'instead of max. {self._dimension}')
            # old version, does not run in graph mode
            # if cur_num_dims < self._dimension:
            #   cur_cell_sizes = tf.concat((cur_cell_sizes,
            #                  tf.tile(tf.gather(cur_cell_sizes,
            #                                    [tf.rank(cur_cell_sizes) - 1]),
            #                    [self._dimension - cur_num_dims])),
            #                       axis=0)
            # if cur_num_dims > self._dimension:
            #   raise ValueError(
            #       f'Too many dimensions in cell sizes {cur_num_dims} ' + \
            #       f'instead of max. {self._dimension}')

            self._cell_sizes.append(cur_cell_sizes)

            #Create the sampling operation.
            cur_grid = Grid(cur_point_cloud, cur_cell_sizes, self._aabb)
            cur_neighborhood = Neighborhood(cur_grid, cur_cell_sizes)
            cur_point_cloud, _ = sample(cur_neighborhood, sample_mode)

            self._neighborhoods.append(cur_neighborhood)
            cur_point_cloud.set_batch_shape(self._batch_shape)
            self._point_clouds.append(cur_point_cloud)
    def test_basis_proj_jacobian(self, num_points, num_samples, num_features,
                                 batch_size, radius, hidden_size, dimension):
        cell_sizes = np.float32(np.repeat(radius, dimension))
        points, batch_ids = utils._create_random_point_cloud_segmented(
            batch_size, num_points, dimension=dimension)
        features = np.random.rand(num_points, num_features[0])
        point_cloud = PointCloud(points, batch_ids)

        point_samples, batch_ids_samples = \
            utils._create_random_point_cloud_segmented(
                batch_size, num_samples, dimension=dimension)

        point_cloud_samples = PointCloud(point_samples, batch_ids_samples)
        grid = Grid(point_cloud, cell_sizes)
        neighborhood = Neighborhood(grid, cell_sizes, point_cloud_samples)
        nb_ids = neighborhood._original_neigh_ids
        # tf
        conv_layer = MCConv(num_features[0], num_features[1], dimension, 1,
                            [hidden_size])

        neigh_point_coords = points[nb_ids[:, 0].numpy()]
        center_point_coords = point_samples[nb_ids[:, 1].numpy()]
        kernel_input = (neigh_point_coords - center_point_coords) / radius

        basis_weights_tf = tf.reshape(conv_layer._weights_tf[0],
                                      [dimension, hidden_size])
        basis_biases_tf = tf.reshape(conv_layer._bias_tf[0], [1, hidden_size])

        basis_neighs = \
            tf.matmul(kernel_input.astype(np.float32), basis_weights_tf) +\
            basis_biases_tf
        basis_neighs = tf.nn.leaky_relu(basis_neighs)

        _, _, counts = tf.unique_with_counts(neighborhood._neighbors[:, 1])
        max_num_nb = tf.reduce_max(counts).numpy()

        with self.subTest(name='features'):

            def basis_proj_features(features_in):
                return basis_proj_tf(basis_neighs, features_in,
                                     neighborhood) / (max_num_nb)

            self.assert_jacobian_is_correct_fn(basis_proj_features,
                                               [np.float32(features)],
                                               atol=1e-4,
                                               delta=1e-3)

        with self.subTest(name='neigh_basis'):

            def basis_proj_basis_neighs(basis_neighs_in):
                return basis_proj_tf(basis_neighs_in, features,
                                     neighborhood) / (max_num_nb)

            self.assert_jacobian_is_correct_fn(basis_proj_basis_neighs,
                                               [np.float32(basis_neighs)],
                                               atol=1e-4,
                                               delta=1e-3)
    def compute_pdf(points_in):
      point_cloud = PointCloud(points_in, batch_ids, batch_size)
      grid = Grid(point_cloud, cell_sizes)

      point_cloud_samples = PointCloud(samples, samples_batch_ids, batch_size)
      neighborhood = Neighborhood(grid, cell_sizes, point_cloud_samples)
      neighborhood.compute_pdf(bandwidths, KDEMode.constant, normalize=True)
      # account for influence of neighborhood size
      _, _, counts = tf.unique_with_counts(neighborhood._neighbors[:, 1])
      max_num_nb = tf.cast(tf.reduce_max(counts), tf.float32)
      return neighborhood._pdf / max_num_nb
 def transpose(self):
     """ Returns the transposed neighborhood where center and neighbor points
 are switched. (faster than recomputing)
 """
     if self._transposed is None:
         if self._equal_samples:
             self._transposed = self
         else:
             grid = Grid(self._point_cloud_sampled, self._radii)
             self._transposed = Neighborhood(grid, self._radii,
                                             self._grid._point_cloud)
     return self._transposed
    def test_local_pooling(self, num_points, num_samples, batch_size, radius,
                           dimension):
        cell_sizes = np.float32(np.repeat(radius, dimension))
        points, batch_ids = utils._create_random_point_cloud_segmented(
            batch_size, num_points, dimension=dimension)
        features = np.random.rand(num_points, dimension)
        point_cloud = PointCloud(points, batch_ids)

        point_samples, batch_ids_samples = \
            utils._create_random_point_cloud_segmented(
                batch_size, num_samples, dimension=dimension)

        point_cloud_samples = PointCloud(point_samples, batch_ids_samples)

        grid = Grid(point_cloud, cell_sizes)
        neighborhood = Neighborhood(grid, cell_sizes, point_cloud_samples)
        neighbor_ids = neighborhood._original_neigh_ids.numpy()
        features_on_neighbors = features[neighbor_ids[:, 0]]

        #max pooling
        with self.subTest(name='max_pooling_to_sampled'):
            PoolLayer = MaxPooling()
            pool_tf = PoolLayer(features, point_cloud, point_cloud_samples,
                                cell_sizes)

            pool_numpy = np.empty([num_samples, dimension])
            for i in range(num_samples):
                pool_numpy[i] = np.max(
                    features_on_neighbors[neighbor_ids[:, 1] == i], axis=0)

            self.assertAllClose(pool_tf, pool_numpy)
            point_cloud.set_batch_shape([batch_size // 2, 2])
            padded = PoolLayer(features,
                               point_cloud,
                               point_cloud_samples,
                               cell_sizes,
                               return_padded=True)
            self.assertTrue(padded.shape.rank > 2)

        #max pooling
        with self.subTest(name='average_pooling_to_sampled'):
            PoolLayer = AveragePooling()
            pool_tf = PoolLayer(features, point_cloud, point_cloud_samples,
                                cell_sizes)

            pool_numpy = np.empty([num_samples, dimension])
            for i in range(num_samples):
                pool_numpy[i] = np.mean(
                    features_on_neighbors[neighbor_ids[:, 1] == i], axis=0)

            self.assertAllClose(pool_tf, pool_numpy)
Ejemplo n.º 14
0
    def test_sampling_poisson_disk_on_uniform(self, num_points_sqrt, scale):
        points = utils._create_uniform_distributed_point_cloud_2D(
            num_points_sqrt, scale=scale)
        cell_sizes = scale * np.array([2, 2], dtype=np.float32) \
            / num_points_sqrt
        batch_ids = np.zeros([len(points)])
        point_cloud = PointCloud(points, batch_ids)
        grid = Grid(point_cloud, cell_sizes)
        neighborhood = Neighborhood(grid, cell_sizes)
        sample_point_cloud, _ = sample(neighborhood, 'poisson')

        sampled_points = sample_point_cloud._points.numpy()
        expected_num_pts = num_points_sqrt**2 // 2
        self.assertTrue(len(sampled_points) == expected_num_pts)
Ejemplo n.º 15
0
def poisson_disk_sampling(point_cloud,
                          radius=None,
                          neighborhood=None,
                          return_ids=False,
                          name=None):
  """ Poisson disk sampling of a point cloud.

  Note: Either `radius` or `neighborhood` must be provided.

  Args:
    point_cloud: A `PointCloud` instance.
    radius: A `float` or a `float` `Tensor` of shape `[D]`, the radius for the
      Poisson disk sampling.
    neighborhood: A `Neighborhood` instance.
    return_ids: A `bool`, if `True` returns the indices of the sampled points.
      (optional)

    Returns:
      A `PointCloud` instance.
      An `int` `Tensor` of shape `[S]`, if `return_ids` is `True`.

    Raises:
      ValueError: If no radius or neighborhood is given.

  """
  if radius is None and neighborhood is None:
    raise ValueError(
        "Missing Argument! Either radius or neighborhood must be given!")
  if neighborhood is None:
    # compute neighborhood
    radii = cast_to_num_dims(radius, point_cloud)
    grid = Grid(point_cloud, radii)
    neighborhood = Neighborhood(grid, radii)

  #Compute the sampling.
  sampled_points, sampled_batch_ids, sampled_indices = \
      sampling(neighborhood, 1)

  sampled_point_cloud = PointCloud(
      points=sampled_points, batch_ids=sampled_batch_ids,
      batch_size=neighborhood._point_cloud_sampled._batch_size)

  if return_ids:
    sampled_indices = tf.gather(neighborhood._grid._sorted_indices,
                                sampled_indices)
    return sampled_point_cloud, sampled_indices
  else:
    return sampled_point_cloud
    def test_conv_rigid_jacobian_params(self, num_points, num_samples,
                                        num_features, batch_size, radius,
                                        num_kernel_points, dimension):
        cell_sizes = np.float32(np.repeat(radius, dimension))
        points, batch_ids = utils._create_random_point_cloud_segmented(
            batch_size, num_points, dimension=dimension)
        point_cloud = PointCloud(points, batch_ids)
        point_samples, batch_ids_samples = \
            utils._create_random_point_cloud_segmented(
                batch_size, num_samples, dimension=dimension)

        point_cloud_samples = PointCloud(point_samples, batch_ids_samples)
        grid = Grid(point_cloud, cell_sizes)
        neighborhood = Neighborhood(grid, cell_sizes, point_cloud_samples)
        conv_layer = KPConv(num_features[0], num_features[1],
                            num_kernel_points)

        features = np.random.rand(num_points, num_features[0])

        with self.subTest(name='features'):

            def conv_features(features_in):
                conv_result = conv_layer(features_in, point_cloud,
                                         point_cloud_samples, radius,
                                         neighborhood)
                return conv_result

            self.assert_jacobian_is_correct_fn(conv_features, [features],
                                               atol=1e-3,
                                               delta=1e-3)

        with self.subTest(name='weights'):

            def conv_weights(weigths_in):
                conv_layer._weights = weigths_in
                conv_result = conv_layer(features, point_cloud,
                                         point_cloud_samples, radius,
                                         neighborhood)
                return conv_result

            weights = conv_layer._weights
            self.assert_jacobian_is_correct_fn(conv_weights, [weights],
                                               atol=1e-3,
                                               delta=1e-3)
Ejemplo n.º 17
0
    def test_neighbors_are_from_same_batch(self, batch_size, num_points,
                                           num_samples, radius, dimension):
        points, batch_ids = utils._create_random_point_cloud_segmented(
            batch_size, num_points, dimension=dimension)
        samples, batch_ids_samples = utils._create_random_point_cloud_segmented(
            batch_size, num_samples, dimension=dimension)
        radius = np.float32(np.repeat([radius], dimension))

        point_cloud = PointCloud(points, batch_ids)
        point_cloud_samples = PointCloud(samples, batch_ids_samples)
        grid = Grid(point_cloud, radius)
        neighborhood = Neighborhood(grid, radius, point_cloud_samples)

        batch_ids_in = tf.gather(point_cloud._batch_ids,
                                 neighborhood._original_neigh_ids[:, 0])
        batch_ids_out = tf.gather(point_cloud_samples._batch_ids,
                                  neighborhood._original_neigh_ids[:, 1])
        batch_check = batch_ids_in == batch_ids_out
        self.assertTrue(np.all(batch_check))
Ejemplo n.º 18
0
    def test_sampling_average_on_random(self, num_points, batch_size,
                                        cell_size, dimension):
        cell_sizes = np.repeat(cell_size, dimension)
        points, batch_ids = utils._create_random_point_cloud_segmented(
            batch_size,
            num_points * batch_size,
            dimension=dimension,
            sizes=np.ones(batch_size, dtype=int) * num_points)
        #print(points.shape, batch_ids.shape)
        point_cloud = PointCloud(points=points, batch_ids=batch_ids)
        grid = Grid(point_cloud, cell_sizes)
        neighborhood = Neighborhood(grid, cell_sizes)
        sample_point_cloud, _ = sample(neighborhood, 'average')

        sampled_points_tf = sample_point_cloud._points.numpy()
        sorted_keys = neighborhood._grid._sorted_keys.numpy()
        sorted_points = neighborhood._grid._sorted_points.numpy()

        sampled_points_numpy = []
        cur_point = np.repeat(0.0, dimension)
        cur_key = -1
        cur_num_points = 0.0
        for pt_id, cur_key_point in enumerate(sorted_keys):
            if cur_key_point != cur_key:
                if cur_key != -1:
                    cur_point /= cur_num_points
                    sampled_points_numpy.append(cur_point)
                cur_key = cur_key_point
                cur_point = [0.0, 0.0, 0.0]
                cur_num_points = 0.0
            cur_point += sorted_points[pt_id]
            cur_num_points += 1.0
        cur_point /= cur_num_points
        sampled_points_numpy.append(cur_point)

        equal = True
        for point_numpy in sampled_points_numpy:
            found = False
            for point_tf in sampled_points_tf:
                if np.all(np.abs(point_numpy - point_tf) < 0.0001):
                    found = True
            equal = equal and found
        self.assertTrue(equal)
def density_estimation(point_cloud,
                       bandwidth=0.2,
                       scaling=1.0,
                       mode=KDEMode.constant,
                       normalize=False,
                       name=None):
    """Method to compute the density distribution of a point cloud.

  Note: By default the returned densitity is not normalized.

  Args:
    point_cloud: A `PointCloud` instance.
    bandwidth: A `float` or a `float` `Tensor` of shape `[D]`, bandwidth
      used to compute the pdf. (optional)
    scaling: A 'float' or a `float` `Tensor` of shape '[D]', the points are
      divided by this value prior to the KDE.
    mode: 'KDEMode', mode used to determine the bandwidth. (optional)
    normalize: A `bool`, if `True` each value is divided by be size of the
      respective neighborhood. (optional)

  Returns:
    A `float` `Tensor` of shape `[N]`, the estimated densities.
  """

    bandwidth = cast_to_num_dims(bandwidth, point_cloud._dimension)
    scaling = cast_to_num_dims(scaling, point_cloud._dimension)
    if mode == KDEMode.no_pdf:
        pdf = tf.ones_like(point_cloud._points[:, 0], dtype=tf.float32)
    else:
        grid = Grid(point_cloud, scaling)
        pdf_neighbors, nb_ranges = \
            compute_neighborhoods(grid,
                                  scaling,
                                  return_ranges=True,
                                  return_sorted_ids=True)
        pdf_sorted = compute_pdf(grid, pdf_neighbors, nb_ranges, bandwidth,
                                 scaling, mode.value)
        unsorted_indices = tf.math.invert_permutation(grid._sorted_indices)
        pdf = tf.gather(pdf_sorted, unsorted_indices)
    if normalize:
        pdf = pdf / point_cloud._points.shape[0]
    return pdf
Ejemplo n.º 20
0
    def test_find_neighbors(self, num_points, num_samples, batch_size, radius,
                            dimension):
        cell_sizes = np.repeat(radius, dimension)
        points, batch_ids = utils._create_random_point_cloud_segmented(
            batch_size,
            num_points * batch_size,
            dimension=dimension,
            sizes=np.ones(batch_size, dtype=int) * num_points)
        point_cloud = PointCloud(points, batch_ids)
        samples_points, batch_ids_samples = \
            utils._create_random_point_cloud_segmented(
                batch_size, num_samples * batch_size, dimension=dimension,
                sizes=np.ones(batch_size, dtype=int) * num_samples)
        point_cloud_sampled = PointCloud(samples_points, batch_ids_samples)
        grid = Grid(point_cloud, cell_sizes)
        neighborhood = Neighborhood(grid, cell_sizes, point_cloud_sampled)
        sorted_points = grid._sorted_points

        neighbors_tf = neighborhood._neighbors

        neighbors_numpy = [[] for i in range(num_samples * batch_size)]

        for k in range(batch_size):
            for i in range(num_samples):
                for j in range(num_points):
                    diffArray = (samples_points[i + k * num_samples] - \
                                 sorted_points[(batch_size - k - 1) * num_points + j])\
                                 / cell_sizes
                    if np.linalg.norm(diffArray) < 1.0:
                        neighbors_numpy[k * num_samples + i].append((batch_size - k - 1)\
                                                                    * num_points + j)

        allFound = True
        for neigh in neighbors_tf:
            found = False
            for ref_neigh in neighbors_numpy[neigh[1]]:
                if ref_neigh == neigh[0]:
                    found = True
                allFound = allFound and found
        self.assertTrue(allFound)
Ejemplo n.º 21
0
def cell_average_sampling(point_cloud,
                          cell_sizes=None,
                          grid=None,
                          name=None):
  """ Cell average sampling of a point cloud.

  Note: Either `cell_sizes` or `grid` must be provided.

  Args:
    point_cloud: A `PointCloud` instance.
    cell_sizes: A `float` or a `float` `Tensor` of shape `[D]`, the cell sizes
      for the sampling.
    grid: A `Grid` instance.

    Returns:
      A `PointCloud` instance.

    Raises:
      ValueError: If no radius or grid is given.

  """
  if cell_sizes is None and grid is None:
    raise ValueError(
        "Missing Argument! Either cell_sizes or grid must be given!")
  if grid is None:
    # compute grid
    cell_sizes = cast_to_num_dims(cell_sizes, point_cloud)
    grid = Grid(point_cloud, cell_sizes)

  neighborhood = Neighborhood(grid, cell_sizes)

  #Compute the sampling.
  sampled_points, sampled_batch_ids, sampled_indices = \
      sampling(neighborhood, 0)

  sampled_point_cloud = PointCloud(
      points=sampled_points, batch_ids=sampled_batch_ids,
      batch_size=neighborhood._point_cloud_sampled._batch_size)

  return sampled_point_cloud
Ejemplo n.º 22
0
  def __call__(self,
               features,
               point_cloud_in: PointCloud,
               point_cloud_out: PointCloud,
               conv_radius,
               neighborhood=None,
               kernel_influence_dist=None,
               return_sorted=False,
               return_padded=False,
               name=None):
    """ Computes the Kernel Point Convolution between two point clouds.

    Note:
      In the following, `A1` to `An` are optional batch dimensions.
      `C_in` is the number of input features.
      `C_out` is the number of output features.

    Args:
      features: A `float` `Tensor` of shape `[N1, C_in]` or
        `[A1, ..., An,V, C_in]`.
      point_cloud_in: A 'PointCloud' instance, on which the features are
        defined.
      point_cloud_out: A `PointCloud` instance, on which the output
        features are defined.
      conv_radius: A `float`, the convolution radius.
      neighborhood: A `Neighborhood` instance, defining the neighborhood
        with centers from `point_cloud_out` and neighbors in `point_cloud_in`.
        If `None` it is computed internally. (optional)
      kernel_influence_dist = A `float`, the influence distance of the kernel
        points. If `None` uses `conv_radius / 2.5`, as suggested in Section 3.3
        of the paper. (optional)
      return_sorted: A `boolean`, if `True` the output tensor is sorted
        according to the batch_ids. (optional)
      return_padded: A `bool`, if 'True' the output tensor is sorted and
        zero padded. (optional)

    Returns:
      A `float` `Tensor` of shape
        `[N2, C_out]`, if `return_padded` is `False`
      or
        `[A1, ..., An, V_out, C_out]`, if `return_padded` is `True`.

    """

    features = tf.cast(tf.convert_to_tensor(value=features),
                       dtype=tf.float32)
    features = _flatten_features(features, point_cloud_in)
    self._num_output_points = point_cloud_out._points.shape[0]

    if kernel_influence_dist is None:
      # normalized
      self._sigma = tf.constant(1.0)
    else:
      self._sigma = tf.convert_to_tensor(
        value=kernel_influence_dist / conv_radius, dtype=tf.float32)
    #Create the radii tensor.
    conv_radius = tf.reshape(tf.convert_to_tensor(value=conv_radius,
                                                  dtype=tf.float32),
                             [1, 1])
    radii_tensor = tf.repeat(conv_radius, self._num_dims)

    if neighborhood is None:
      #Compute the grid
      grid = Grid(point_cloud_in, radii_tensor)
      #Compute the neighborhoods
      neigh = Neighborhood(grid, radii_tensor, point_cloud_out)
    else:
      neigh = neighborhood

    #Compute kernel inputs.
    neigh_point_coords = tf.gather(
        point_cloud_in._points, neigh._original_neigh_ids[:, 0])
    center_point_coords = tf.gather(
        point_cloud_out._points, neigh._original_neigh_ids[:, 1])
    points_diff = (neigh_point_coords - center_point_coords) / \
        tf.reshape(radii_tensor, [1, self._num_dims])
    #Compute Monte-Carlo convolution
    convolution_result = self._kp_conv(points_diff, neigh, features)
    return _format_output(convolution_result,
                          point_cloud_out,
                          return_sorted,
                          return_padded)
Ejemplo n.º 23
0
    def __call__(self,
                 features,
                 point_cloud_in: PointCloud,
                 point_cloud_out: PointCloud,
                 radius,
                 neighborhood=None,
                 bandwidth=0.2,
                 return_sorted=False,
                 return_padded=False,
                 name=None):
        """ Computes the Monte-Carlo Convolution between two point clouds.

    Note:
      In the following, `A1` to `An` are optional batch dimensions.
      `C_in` is the number of input features.
      `C_out` is the number of output features.

    Args:
      features: A `float` `Tensor` of shape `[N_in, C_in]` or
        `[A1, ..., An,V, C_in]`.
      point_cloud_in: A 'PointCloud' instance, on which the features are
        defined.
      point_cloud_out: A `PointCloud` instance, on which the output
        features are defined.
      radius: A `float`, the convolution radius.
      neighborhood: A `Neighborhood` instance, defining the neighborhood
        with centers from `point_cloud_out` and neighbors in `point_cloud_in`.
        If `None` it is computed internally. (optional)
      bandwidth: A `float`, the bandwidth used in the kernel density
        estimation on the input point cloud. (optional)
      return_sorted: A `boolean`, if `True` the output tensor is sorted
        according to the batch_ids. (optional)
      return_padded: A `bool`, if 'True' the output tensor is sorted and
        zero padded. (optional)

    Returns:
      A `float` `Tensor` of shape
        `[N_out, C_out]`, if `return_padded` is `False`
      or
        `[A1, ..., An, V_out, C_out]`, if `return_padded` is `True`.

    """

        features = tf.cast(tf.convert_to_tensor(value=features),
                           dtype=tf.float32)

        tf.assert_equal(tf.shape(features)[-1], self._num_features_in)

        features = _flatten_features(features, point_cloud_in)

        #Create the radii tensor.
        radius = tf.reshape(
            tf.convert_to_tensor(value=radius, dtype=tf.float32), [1, 1])
        radii_tensor = tf.repeat(radius, self._num_dims)
        #Create the bandwidth tensor.
        bwTensor = tf.repeat(bandwidth, self._num_dims)

        if neighborhood is None:
            #Compute the grid
            grid = Grid(point_cloud_in, radii_tensor)
            #Compute the neighborhoods
            neigh = Neighborhood(grid, radii_tensor, point_cloud_out)
        else:
            neigh = neighborhood
        pdf = neigh.get_pdf(bandwidth=bwTensor, mode=KDEMode.constant)

        #Compute kernel inputs.
        neigh_point_coords = tf.gather(point_cloud_in._points,
                                       neigh._original_neigh_ids[:, 0])
        center_point_coords = tf.gather(point_cloud_out._points,
                                        neigh._original_neigh_ids[:, 1])
        points_diff = (neigh_point_coords - center_point_coords) / \
            tf.reshape(radii_tensor, [1, self._num_dims])

        #Compute Monte-Carlo convolution
        convolution_result = self._monte_carlo_conv(points_diff, neigh, pdf,
                                                    features,
                                                    self._non_linearity_type)

        return _format_output(convolution_result, point_cloud_out,
                              return_sorted, return_padded)
Ejemplo n.º 24
0
    def test_compute_pdf_tf(self, batch_size, num_points,
                            num_samples_per_batch, cell_size, bandwidth,
                            dimension):
        cell_sizes = np.float32(np.repeat(cell_size, dimension))
        bandwidths = np.float32(np.repeat(bandwidth, dimension))
        points, batch_ids = utils._create_random_point_cloud_segmented(
            batch_size,
            batch_size * num_points,
            dimension,
            equal_sized_batches=True)
        samples = np.full((batch_size * num_samples_per_batch, dimension),
                          0.0,
                          dtype=float)
        for i in range(batch_size):
            cur_choice = np.random.choice(num_points,
                                          num_samples_per_batch,
                                          replace=True)
            samples[num_samples_per_batch * i:num_samples_per_batch * (i + 1), :] = \
                points[cur_choice + i * num_points]
        samples_batch_ids = np.repeat(np.arange(0, batch_size),
                                      num_samples_per_batch)

        point_cloud = PointCloud(points, batch_ids, batch_size)
        grid = Grid(point_cloud, cell_sizes)

        point_cloud_samples = PointCloud(samples, samples_batch_ids,
                                         batch_size)
        neighborhood = Neighborhood(grid, cell_sizes, point_cloud_samples)
        neighbor_ids = neighborhood._neighbors
        pdf_neighbors = Neighborhood(grid, cell_sizes)
        pdf_tf = compute_pdf_tf(pdf_neighbors, bandwidths, KDEMode.constant)
        pdf_tf = tf.gather(pdf_tf, neighbor_ids[:, 0])

        sorted_points = grid._sorted_points.numpy()
        sorted_batch_ids = grid._sorted_batch_ids.numpy()
        neighbor_ids = neighborhood._neighbors

        pdf_real = []
        accum_points = []
        prev_batch_i = -1
        for pt_i, batch_i in enumerate(sorted_batch_ids):
            if batch_i != prev_batch_i:
                if len(accum_points) > 0:
                    test_points = np.array(accum_points)
                    kde_skl = KernelDensity(bandwidth=bandwidth)
                    kde_skl.fit(test_points)
                    log_pdf = kde_skl.score_samples(test_points)
                    pdf = np.exp(log_pdf)
                    if len(pdf_real) > 0:
                        pdf_real = np.concatenate((pdf_real, pdf), axis=0)
                    else:
                        pdf_real = pdf
                accum_points = [sorted_points[pt_i] / cell_size]
                prev_batch_i = batch_i
            else:
                accum_points.append(sorted_points[pt_i] / cell_size)

        test_points = np.array(accum_points)
        kde_skl = KernelDensity(bandwidth=bandwidth)
        kde_skl.fit(test_points)
        log_pdf = kde_skl.score_samples(test_points)
        pdf = np.exp(log_pdf)
        if len(pdf_real) > 0:
            pdf_real = np.concatenate((pdf_real, pdf), axis=0)
        else:
            pdf_real = pdf

        pdf_tf = np.asarray(pdf_tf / float(len(accum_points)))
        pdf_skl = np.asarray(pdf_real)[neighbor_ids[:, 0]]
        self.assertAllClose(pdf_tf, pdf_skl)
    def test_basis_proj(self, num_points, num_samples, num_features,
                        batch_size, radius, hidden_size, dimension):
        cell_sizes = np.float32(np.repeat(radius, dimension))
        points, batch_ids = utils._create_random_point_cloud_segmented(
            batch_size, num_points, dimension=dimension)
        features = np.random.rand(num_points, num_features[0])
        point_cloud = PointCloud(points, batch_ids)

        point_samples, batch_ids_samples = \
            utils._create_random_point_cloud_segmented(
                batch_size, num_samples, dimension=dimension)

        point_cloud_samples = PointCloud(point_samples, batch_ids_samples)
        grid = Grid(point_cloud, cell_sizes)
        neighborhood = Neighborhood(grid, cell_sizes, point_cloud_samples)
        nb_ids = neighborhood._original_neigh_ids
        # tf
        conv_layer = MCConv(num_features[0], num_features[1], dimension, 1,
                            [hidden_size])

        basis_weights_tf = tf.reshape(conv_layer._weights_tf[0],
                                      [dimension, hidden_size])
        basis_biases_tf = tf.reshape(conv_layer._bias_tf[0], [1, hidden_size])

        neigh_point_coords = points[nb_ids[:, 0]]
        center_point_coords = point_samples[nb_ids[:, 1]]
        kernel_input = (neigh_point_coords - center_point_coords) / radius
        basis_neighs = \
            tf.matmul(kernel_input.astype(np.float32), basis_weights_tf) + \
            basis_biases_tf
        basis_neighs = tf.nn.relu(basis_neighs)

        weighted_latent_per_sample_tf = basis_proj_tf(basis_neighs, features,
                                                      neighborhood)

        # numpy
        neighbor_ids = neighborhood._original_neigh_ids.numpy()
        nb_ranges = neighborhood._samples_neigh_ranges.numpy()
        # extract variables
        hidden_weights = basis_weights_tf.numpy()
        hidden_biases = basis_biases_tf.numpy()

        features_on_neighbors = features[neighbor_ids[:, 0]]
        # compute first layer of kernel MLP
        point_diff = (points[neighbor_ids[:, 0]] -\
                      point_samples[neighbor_ids[:, 1]])\
            / np.expand_dims(cell_sizes, 0)

        latent_per_nb = np.dot(point_diff, hidden_weights) + hidden_biases

        latent_relu_per_nb = np.maximum(latent_per_nb, 0)
        # Monte-Carlo integration after first layer
        # weighting with pdf
        weighted_features_per_nb = np.expand_dims(features_on_neighbors, 2) * \
            np.expand_dims(latent_relu_per_nb, 1)
        nb_ranges = np.concatenate(([0], nb_ranges), axis=0)
        # sum (integration)
        weighted_latent_per_sample = \
            np.zeros([num_samples, num_features[0], hidden_size])
        for i in range(num_samples):
            weighted_latent_per_sample[i] = \
                np.sum(weighted_features_per_nb[nb_ranges[i]:nb_ranges[i + 1]],
                       axis=0)

        self.assertAllClose(weighted_latent_per_sample_tf,
                            weighted_latent_per_sample,
                            atol=1e-3)