コード例 #1
0
def farthest_pick_knn(points: torch.Tensor, nqueries: int, K: int):

    if knn_c_func_spec is not None:
        return knn_c_func.farthest_pick_knn(points, nqueries, K)

    bs, dim, nx = points.shape

    ratio = nqueries / nx

    batch_x = torch.arange(0, bs, dtype=torch.long,
                           device=points.device).unsqueeze(1).expand(bs, nx)

    x = points.transpose(1, 2).reshape(-1, dim)
    batch_x = batch_x.view(-1)

    indices_queries = fps(x, batch_x, ratio)

    points_queries = x[indices_queries]

    indices_queries = indices_queries.view(bs, -1)
    points_queries = points_queries.view(bs, -1, 3)
    points_queries = points_queries.transpose(1, 2)

    assert (indices_queries.shape[1] == nqueries)

    indices_knn = knn(points, points_queries, K)

    return indices_queries, indices_knn, points_queries
コード例 #2
0
def random_pick_knn(points: torch.Tensor, nqueries: int, K: int):

    if knn_c_func_spec is not None:
        return knn_c_func.random_pick_knn(points, nqueries, K)

    bs, dim, nx = points.shape

    indices_queries = []
    points_queries = []

    for b_id in range(bs):

        indices_queries_ = torch.randperm(nx)[:nqueries]
        indices_queries.append(indices_queries_)

        x = points[b_id].transpose(0,1)
        points_queries.append(x[indices_queries_])


    indices_queries = torch.stack(indices_queries, dim=0)
    points_queries = torch.stack(points_queries, dim=0)
    points_queries = points_queries.transpose(1,2)

    indices_knn = knn(points, points_queries, K)

    return indices_queries, indices_knn, points_queries
コード例 #3
0
    def __call__(self, points, support_points=None):

        if support_points is None and self.stride == 1 and (self.npoints is None):
            support_points = points

        if support_points is None:
            # no support points have been given
            points = points.contiguous()
            if self.stride > 1 or self.stride == 1 and (self.npoints is None):
                support_point_number = max(1, int(points.shape[2]) // self.stride)
            else:
                support_point_number = self.npoints
            support_points_ids, indices, _ = nearest_neighbors.convpoint_pick_knn(
                points.cpu().detach(), support_point_number, self.K
            )

            support_points_ids = support_points_ids.contiguous().long()
            indices = indices.contiguous().long()

            if points.is_cuda:
                indices = indices.cuda()
                support_points_ids = support_points_ids.cuda()
            support_points = batched_index_select(
                points.transpose(1, 2), dim=1, index=support_points_ids
            ).transpose(1, 2)

            return indices, support_points
        else:
            # support points are known, only compute the knn
            indices = nearest_neighbors.knn(
                points.cpu().detach(), support_points.cpu().detach(), self.K
            )
            if points.is_cuda:
                indices = indices.cuda()
            return indices, support_points
コード例 #4
0
ファイル: test.py プロジェクト: valeoai/FKAConv
def nearest_correspondance(pts_src, pts_dest, data_src, K=1):
    pts_src = pts_src.unsqueeze(0).cpu().clone()
    pts_dest = pts_dest.unsqueeze(0).cpu().clone()
    indices = knn(pts_src, pts_dest, K)[0, :, 0]
    if K == 1:
        data_dest = data_src.transpose(0, 1)[indices].transpose(0, 1)
    else:
        # TODO fix that
        data_dest = data_src[indices].mean(1)
    return data_dest
コード例 #5
0
def nearest_correspondance(pts_src, pts_dest, data_src, K=1):
    pts_src = pts_src.unsqueeze(0).cpu().clone()
    pts_dest = pts_dest.unsqueeze(0).cpu().clone()
    indices = knn(pts_src, pts_dest, K)
    if K == 1:
        indices = indices[0, :, 0]
        data_dest = data_src.transpose(0, 1)[indices].transpose(0, 1)
    else:
        data_dest = batched_index_select(
            data_src.unsqueeze(0).cpu(), 2, indices)
        data_dest = data_dest.mean(3)[0]
    return data_dest
コード例 #6
0
    def forward(self, input, points, support_points, indices=None):
        """Forward function of the layer."""

        # support points are known, only compute the knn
        if indices is None:
            indices = nearest_neighbors.knn(points.cpu().detach(),
                                            support_points.cpu().detach(), 1)
            if points.is_cuda:
                indices = indices.cuda()

        if input is None:
            # inpuy is None: do not compute features
            return None, support_points, indices
        else:
            # compute the features
            indices = indices.clone()

            # get the features and point coordinates associated with the indices
            features = self.batched_index_select(input, dim=2,
                                                 index=indices).contiguous()

            return features.squeeze(3), support_points, indices
コード例 #7
0
def quantized_pick_knn(points: torch.Tensor, nqueries: int, K: int):

    if knn_c_func_spec is not None:
        return knn_c_func.quantized_pick_knn(points, nqueries, K)

    bs, dim, nx = points.shape

    mini = points.min(dim=2)[0]
    maxi = points.max(dim=2)[0]

    initial_voxel_size = (maxi - mini).norm(2, dim=1) / math.sqrt(nqueries)

    indices_queries = []
    points_queries = []

    for b_id in range(bs):
        voxel_size = initial_voxel_size[b_id]
        x = points[b_id].transpose(0, 1)

        b_selected_points = []
        count = 0

        x_ids = torch.arange(x.shape[0])

        while (True):
            batch_x = torch.zeros(x.shape[0],
                                  device=points.device,
                                  dtype=torch.long)

            voxel_ids = voxel_grid(x, batch_x, voxel_size)
            _, unique_indices = unique(voxel_ids)

            if count + unique_indices.shape[0] >= nqueries:
                unique_indices = unique_indices[torch.randperm(
                    unique_indices.shape[0])]
                b_selected_points.append(x_ids[unique_indices[:nqueries -
                                                              count]])
                count += unique_indices.shape[0]
                break

            b_selected_points.append(x_ids[unique_indices])
            count += unique_indices.shape[0]

            select = torch.ones(x.shape[0], dtype=torch.bool, device=x.device)
            select[unique_indices] = False
            x = x[select]
            x_ids = x_ids[select]
            voxel_size /= 2

        b_selected_points = torch.cat(b_selected_points, dim=0)
        indices_queries.append(b_selected_points)

        points_queries.append(points[b_id].transpose(0, 1)[b_selected_points])

    indices_queries = torch.stack(indices_queries, dim=0)
    points_queries = torch.stack(points_queries, dim=0)
    points_queries = points_queries.transpose(1, 2)

    indices_knn = knn(points, points_queries, K)

    return indices_queries, indices_knn, points_queries