Exemple #1
0
    def test_compute_keys_with_sort(self, num_points, batch_size, scale,
                                    radius, dimension):
        radius = np.repeat(radius, dimension)
        points, batch_ids = utils._create_random_point_cloud_segmented(
            batch_size,
            num_points * batch_size,
            dimension=dimension,
            sizes=np.ones(batch_size, dtype=int) * num_points,
            clean_aabb=False)
        point_cloud = PointCloud(points, batch_ids)
        aabb = point_cloud.get_AABB()
        grid = Grid(point_cloud, radius)

        total_num_cells = grid._num_cells.numpy()
        aabb_min = aabb._aabb_min.numpy()

        aabb_min_per_point = aabb_min[batch_ids, :]
        cell_ind = np.floor((points - aabb_min_per_point) / radius).astype(int)
        cell_ind = np.minimum(np.maximum(cell_ind, [0] * dimension),
                              total_num_cells)
        cell_multiplier = np.flip(np.cumprod(np.flip(total_num_cells)))
        cell_multiplier = np.concatenate((cell_multiplier, [1]), axis=0)
        keys = batch_ids * cell_multiplier[0] + \
            np.sum(cell_ind * cell_multiplier[1:].reshape([1, -1]), axis=1)
        # check unsorted keys
        self.assertAllEqual(grid._cur_keys, keys)

        # sort descending
        sorted_keys = np.flip(np.sort(keys))
        # check if the cell keys per point are equal
        self.assertAllEqual(grid._sorted_keys, sorted_keys)
    def test_compute_keys_tf(self, num_points, batch_size, scale, radius,
                             dimension):
        radius = np.repeat(radius, dimension)
        points, batch_ids = utils._create_random_point_cloud_segmented(
            batch_size,
            num_points * batch_size,
            dimension=dimension,
            sizes=np.ones(batch_size, dtype=int) * num_points,
            clean_aabb=False)
        point_cloud = PointCloud(points, batch_ids)

        #Compute the number of cells in the grid.
        aabb = point_cloud.get_AABB()
        aabb_sizes = aabb._aabb_max - aabb._aabb_min
        batch_num_cells = tf.cast(tf.math.ceil(aabb_sizes / radius), tf.int32)
        total_num_cells = tf.maximum(tf.reduce_max(batch_num_cells, axis=0), 1)

        keys_tf = compute_keys_tf(point_cloud, total_num_cells, radius)
        aabb_min = aabb._aabb_min.numpy()

        aabb_min_per_point = aabb_min[batch_ids, :]
        cell_ind = np.floor((points - aabb_min_per_point) / radius).astype(int)
        cell_ind = np.minimum(np.maximum(cell_ind, [0] * dimension),
                              total_num_cells)
        cell_multiplier = np.flip(np.cumprod(np.flip(total_num_cells)))
        cell_multiplier = np.concatenate((cell_multiplier, [1]), axis=0)
        keys = batch_ids * cell_multiplier[0] + \
            np.sum(cell_ind * cell_multiplier[1:].reshape([1, -1]), axis=1)
        # check unsorted keys
        self.assertAllEqual(keys_tf, keys)
Exemple #3
0
def compute_keys_tf(point_cloud: PointCloud, num_cells, cell_size, name=None):
    """ Computes the regular grid cell keys of a point cloud.

    For a point in cell `c` the key is computed as
        \\(key = batch_id * prod_{d=0}^{D} num_cells_{d} + \\)
        \\(sum_{d=0}^{D}( c_{d} prod_{d'=d+1}^{D} num_cells_{d'} ) \\).
    Args:
      point_cloud: A `PointCloud` instance.
      num_cells: An `int` `Tensor` of shape `[D]`, the total number of cells
        per dimension.
      cell_size: An `int` `Tensor` of shape `[D]`, the cell sizes per
        dimension.

    Returns:
      An `int` `Tensor` of shape `[N]`, the keys per point.

  """
    aabb = point_cloud.get_AABB()
    abb_min_per_batch = aabb._aabb_min
    aabb_min_per_point = tf.gather(abb_min_per_batch, point_cloud._batch_ids)
    cell_ind = tf.math.floor(
        (point_cloud._points - aabb_min_per_point) / cell_size)
    cell_ind = tf.cast(cell_ind, tf.int32)
    cell_ind = tf.minimum(tf.maximum(cell_ind, tf.zeros_like(cell_ind)),
                          num_cells)
    cell_multiplier = tf.math.cumprod(num_cells, reverse=True)
    cell_multiplier = tf.concat((cell_multiplier, [1]), axis=0)
    keys = point_cloud._batch_ids * cell_multiplier[0] + \
        tf.math.reduce_sum(cell_ind * tf.reshape(cell_multiplier[1:], [1, -1]),
                           axis=1)
    return tf.cast(keys, tf.int64)
    def __init__(self,
                 point_cloud: PointCloud,
                 cell_sizes,
                 sample_mode='poisson',
                 name=None):
        #Initialize the attributes.
        self._aabb = point_cloud.get_AABB()
        self._point_clouds = [point_cloud]
        self._cell_sizes = []
        self._neighborhoods = []

        self._dimension = point_cloud._dimension
        self._batch_shape = point_cloud._batch_shape

        #Create the different sampling operations.
        cur_point_cloud = point_cloud
        for sample_iter, cur_cell_sizes in enumerate(cell_sizes):
            cur_cell_sizes = tf.convert_to_tensor(value=cur_cell_sizes,
                                                  dtype=tf.float32)

            # Check if the cell size is defined for all the dimensions.
            # If not, the last cell size value is tiled until all the dimensions
            # have a value.
            cur_num_dims = tf.gather(cur_cell_sizes.shape, 0)
            cur_cell_sizes = tf.cond(
                cur_num_dims < self._dimension, lambda: tf.concat(
                    (cur_cell_sizes,
                     tf.tile(
                         tf.gather(cur_cell_sizes, [
                             tf.rank(cur_cell_sizes) - 1
                         ]), [self._dimension - cur_num_dims])),
                    axis=0), lambda: cur_cell_sizes)
            tf.assert_greater(
                self._dimension + 1,
                cur_num_dims,
                f'Too many dimensions in cell sizes {cur_num_dims} ' + \
                f'instead of max. {self._dimension}')
            # old version, does not run in graph mode
            # if cur_num_dims < self._dimension:
            #   cur_cell_sizes = tf.concat((cur_cell_sizes,
            #                  tf.tile(tf.gather(cur_cell_sizes,
            #                                    [tf.rank(cur_cell_sizes) - 1]),
            #                    [self._dimension - cur_num_dims])),
            #                       axis=0)
            # if cur_num_dims > self._dimension:
            #   raise ValueError(
            #       f'Too many dimensions in cell sizes {cur_num_dims} ' + \
            #       f'instead of max. {self._dimension}')

            self._cell_sizes.append(cur_cell_sizes)

            #Create the sampling operation.
            cur_grid = Grid(cur_point_cloud, cur_cell_sizes, self._aabb)
            cur_neighborhood = Neighborhood(cur_grid, cur_cell_sizes)
            cur_point_cloud, _ = sample(cur_neighborhood, sample_mode)

            self._neighborhoods.append(cur_neighborhood)
            cur_point_cloud.set_batch_shape(self._batch_shape)
            self._point_clouds.append(cur_point_cloud)
    def test_grid_datastructure(self, num_points, batch_size, scale, radius,
                                dimension):
        radius = np.float32(np.repeat(radius, dimension))
        points, batch_ids = utils._create_random_point_cloud_segmented(
            batch_size,
            num_points * batch_size,
            dimension=dimension,
            sizes=np.ones(batch_size, dtype=int) * num_points,
            clean_aabb=True)
        point_cloud = PointCloud(points, batch_ids)
        #Compute the number of cells in the grid.
        aabb = point_cloud.get_AABB()
        aabb_sizes = aabb._aabb_max - aabb._aabb_min
        batch_num_cells = tf.cast(tf.math.ceil(aabb_sizes / radius), tf.int32)
        total_num_cells = tf.maximum(tf.reduce_max(batch_num_cells, axis=0), 1)
        keys = compute_keys(point_cloud, total_num_cells, radius)
        keys = tf.sort(keys, direction='DESCENDING')
        ds_tf = build_grid_ds_tf(keys, total_num_cells, batch_size)

        keys = keys.numpy()
        ds_numpy = np.full(
            (batch_size, total_num_cells[0], total_num_cells[1], 2), 0)
        if dimension == 2:
            cells_per_2D_cell = 1
        elif dimension > 2:
            cells_per_2D_cell = np.prod(total_num_cells[2:])
        for key_iter, key in enumerate(keys):
            curDSIndex = key // cells_per_2D_cell
            yIndex = curDSIndex % total_num_cells[1]
            auxInt = curDSIndex // total_num_cells[1]
            xIndex = auxInt % total_num_cells[0]
            curbatch_ids = auxInt // total_num_cells[0]

            if key_iter == 0:
                ds_numpy[curbatch_ids, xIndex, yIndex, 0] = key_iter
            else:
                prevKey = keys[key_iter - 1]
                prevDSIndex = prevKey // cells_per_2D_cell
                if prevDSIndex != curDSIndex:
                    ds_numpy[curbatch_ids, xIndex, yIndex, 0] = key_iter

            nextIter = key_iter + 1
            if nextIter >= len(keys):
                ds_numpy[curbatch_ids, xIndex, yIndex, 1] = len(keys)
            else:
                nextKey = keys[key_iter + 1]
                nextDSIndex = nextKey // cells_per_2D_cell
                if nextDSIndex != curDSIndex:
                    ds_numpy[curbatch_ids, xIndex, yIndex, 1] = key_iter + 1

        # check if the data structure is equal
        self.assertAllEqual(ds_tf, ds_numpy)
Exemple #6
0
    def test_grid_datastructure(self, num_points, batch_size, scale, radius,
                                dimension):
        radius = np.repeat(radius, dimension)
        points, batch_ids = utils._create_random_point_cloud_segmented(
            batch_size,
            num_points * batch_size,
            dimension=dimension,
            sizes=np.ones(batch_size, dtype=int) * num_points,
            clean_aabb=True)
        point_cloud = PointCloud(points, batch_ids)
        aabb = point_cloud.get_AABB()
        grid = Grid(point_cloud, radius, aabb)

        total_num_cells = grid._num_cells.numpy()
        keys = grid._sorted_keys.numpy()
        ds_numpy = np.full(
            (batch_size, total_num_cells[0], total_num_cells[1], 2), 0)
        if dimension == 2:
            cells_per_2D_cell = 1
        elif dimension > 2:
            cells_per_2D_cell = np.prod(total_num_cells[2:])
        for key_iter, key in enumerate(keys):
            curDSIndex = key // cells_per_2D_cell
            yIndex = curDSIndex % total_num_cells[1]
            auxInt = curDSIndex // total_num_cells[1]
            xIndex = auxInt % total_num_cells[0]
            curbatch_ids = auxInt // total_num_cells[0]

            if key_iter == 0:
                ds_numpy[curbatch_ids, xIndex, yIndex, 0] = key_iter
            else:
                prevKey = keys[key_iter - 1]
                prevDSIndex = prevKey // cells_per_2D_cell
                if prevDSIndex != curDSIndex:
                    ds_numpy[curbatch_ids, xIndex, yIndex, 0] = key_iter

            nextIter = key_iter + 1
            if nextIter >= len(keys):
                ds_numpy[curbatch_ids, xIndex, yIndex, 1] = len(keys)
            else:
                nextKey = keys[key_iter + 1]
                nextDSIndex = nextKey // cells_per_2D_cell
                if nextDSIndex != curDSIndex:
                    ds_numpy[curbatch_ids, xIndex, yIndex, 1] = key_iter + 1

        # check if the data structure is equal
        self.assertAllEqual(grid.get_DS(), ds_numpy)
    def __init__(self,
                 point_cloud: PointCloud,
                 cell_sizes,
                 aabb=None,
                 name=None):
        cell_sizes = tf.cast(tf.convert_to_tensor(value=cell_sizes),
                             tf.float32)
        if cell_sizes.shape == [] or cell_sizes.shape[0] == 1:
            cell_sizes = tf.repeat(cell_sizes, point_cloud._dimension)
        #Save the attributes.
        self._batch_size = point_cloud._batch_size_numpy
        self._cell_sizes = cell_sizes
        self._point_cloud = point_cloud
        self._aabb = point_cloud.get_AABB()
        #Compute the number of cells in the grid.
        aabb_sizes = self._aabb._aabb_max - self._aabb._aabb_min
        batch_num_cells = tf.cast(tf.math.ceil(aabb_sizes / self._cell_sizes),
                                  tf.int32)
        self._num_cells = tf.maximum(tf.reduce_max(batch_num_cells, axis=0), 1)

        #Compute the key for each point.
        self._cur_keys = compute_keys(self._point_cloud, self._num_cells,
                                      self._cell_sizes)

        #Sort the keys.
        self._sorted_indices = tf.argsort(self._cur_keys,
                                          direction='DESCENDING')
        self._sorted_keys = tf.gather(self._cur_keys, self._sorted_indices)

        #Get the sorted points and batch ids.
        self._sorted_points = tf.gather(self._point_cloud._points,
                                        self._sorted_indices)
        self._sorted_batch_ids = tf.gather(self._point_cloud._batch_ids,
                                           self._sorted_indices)

        self._fast_DS = None