Пример #1
0
def FPS_cuda(points, npoint):
    points_cuda = torch.from_numpy(points).float().cuda()
    points_cuda = points_cuda.unsqueeze(0)
    with torch.no_grad():
        index_cuda = pointnet2_utils.furthest_point_sample(
            points_cuda, npoint)
    return index_cuda.squeeze(0).cpu().numpy()
Пример #2
0
    def forward(self, xyz, points):
        """
        PointConv with downsampling.
        Input:
            xyz: input points position data, [B, C, N]
            points: input points data, [B, D, N]
        Return:
            new_xyz: sampled points position data, [B, C, S]
            new_points_concat: sample points feature data, [B, D', S]
        """
        #import ipdb; ipdb.set_trace()
        B = xyz.shape[0]
        N = xyz.shape[2]
        xyz = xyz.permute(0, 2, 1)
        points = points.permute(0, 2, 1)

        fps_idx = pointnet2_utils.furthest_point_sample(xyz, self.npoint)
        new_xyz = index_points_gather(xyz, fps_idx)

        new_points, grouped_xyz_norm = group_query(self.nsample, xyz, new_xyz, points)

        grouped_xyz = grouped_xyz_norm.permute(0, 3, 2, 1)
        weights = self.weightnet(grouped_xyz)
        new_points = torch.matmul(input=new_points.permute(0, 1, 3, 2), other = weights.permute(0, 3, 2, 1)).view(B, self.npoint, -1)
        new_points = self.linear(new_points)
        if self.bn:
            new_points = self.bn_linear(new_points.permute(0, 2, 1))
        else:
            new_points = new_points.permute(0, 2, 1)

        new_points = self.relu(new_points)

        return new_xyz.permute(0, 2, 1), new_points, fps_idx
Пример #3
0
    def forward(self, xyz, feats, new_xyz=None):
        '''
        :param pcd: B, C_in, N
        :return:
            new_pcd: B, C_out, np
        '''
        if new_xyz is None:
            assert self.npoint is not None
            xyz_flipped = xyz.transpose(1, 2).contiguous()  # B,3,npoint
            idx = pointnet2_utils.furthest_point_sample(
                xyz, self.npoint)  # B,npoint
            new_xyz_flipped = pointnet2_utils.gather_operation(
                xyz_flipped, idx)  # B,3,npoint
            new_xyz = new_xyz_flipped.transpose(1,
                                                2).contiguous()  # B,npoint,3

        idx = pointnet2_utils.ball_query(self.radius, self.nsample, xyz,
                                         new_xyz)
        gped_feats = pointnet2_utils.grouping_operation(feats,
                                                        idx)  # B,C,np,ns
        gped_feats = F.max_pool2d(gped_feats,
                                  kernel_size=[1, self.nsample])  # B,C,np,1
        gped_feats = gped_feats.squeeze(-1)  # B,C,np

        return self.conv(gped_feats)
Пример #4
0
 def sample_keypoints(self, xyz, point_features):
     """Use furthest point sampling to select keypoints from raw pointcloud."""
     xyz = xyz.unsqueeze(0).contiguous()
     indices = furthest_point_sample(
         xyz, self.cfg.n_keypoints).squeeze(0).long()
     keypoint_xyz = xyz[:, indices].squeeze(0)
     keypoint_features = point_features[indices]
     return keypoint_xyz, keypoint_features
Пример #5
0
 def sample_keypoints(self, points):
     """
     Sample keypoints from raw pointcloud. Assumes unit batch size.
     :points FloatTensor of shape (N, 4).
     :return FloatTensor of shape (n_keypoints, 3),
     """
     points = points.unsqueeze(0).contiguous()
     indices = furthest_point_sample(points, self.cfg.n_keypoints)
     keypoints = points[:, indices.squeeze(0).long(), :3].contiguous()
     return keypoints
Пример #6
0
 def sample_keypoints(self, points):
     """
     fps expects points shape (B, N, 3)
     fps returns indices shape (B, K)
     gather expects features shape (B, C, N)
     """
     points = points[..., :3].contiguous()
     indices = furthest_point_sample(points, self.cfg.NUM_KEYPOINTS)
     keypoints = gather_operation(points.transpose(1, 2).contiguous(), indices)
     keypoints = keypoints.transpose(1, 2).contiguous()
     return keypoints
Пример #7
0
    def forward(self, xyz, features, end_points):
        """
        Args:
            xyz: (B,K,3)
            features: (B,C,K)
        Returns:
            scores: (B,num_proposal,2+3+NH*2+NS*4)
        """
        if self.sampling == 'vote_fps':
            # Farthest point sampling (FPS) on votes
            xyz, features, fps_inds = self.vote_aggregation(xyz, features)
            sample_inds = fps_inds
        elif self.sampling == 'seed_fps':
            # FPS on seed and choose the votes corresponding to the seeds
            # This gets us a slightly better coverage of *object* votes than vote_fps (which tends to get more cluster votes)
            sample_inds = pointnet2_utils.furthest_point_sample(
                end_points['seed_xyz'], self.num_proposal)
            xyz, features, _ = self.vote_aggregation(xyz, features,
                                                     sample_inds)
        elif self.sampling == 'random':
            # Random sampling from the votes
            batch_size, num_seed = end_points['seed_xyz'].shape[:2]
            sample_inds = torch.randint(0,
                                        num_seed,
                                        (batch_size, self.num_proposal),
                                        dtype=torch.int).cuda()
            xyz, features, _ = self.vote_aggregation(xyz, features,
                                                     sample_inds)
        else:
            print('Unknown sampling strategy: %s. Exiting!' % (self.sampling))
            exit()
        end_points[
            'aggregated_vote_xyz'] = xyz  # (batch_size, num_proposal, 3)
        end_points[
            'aggregated_vote_inds'] = sample_inds  # (batch_size, num_proposal,) # should be 0,1,2,...,num_proposal
        # --------- PROPOSAL GENERATION ---------
        net = F.relu(self.bn1(self.conv1(features)))
        net = F.relu(self.bn2(self.conv2(net)))
        net = self.conv3(
            net
        )  # (batch_size, 2+3+num_heading_bin*2+num_size_cluster*4, num_proposal)

        end_points = decode_scores(net, end_points, self.num_class,
                                   self.num_heading_bin, self.num_size_cluster,
                                   self.mean_size_arr)

        return end_points
Пример #8
0
    def forward(self,
                xyz: torch.Tensor,
                features: torch.Tensor = None,
                inds: torch.Tensor = None) -> (torch.Tensor, torch.Tensor):
        r"""
        Parameters
        ----------
        xyz : torch.Tensor
            (B, N, 3) tensor of the xyz coordinates of the features
        features : torch.Tensor
            (B, C, C) tensor of the descriptors of the the features
        inds : torch.Tensor
            (B, npoint) tensor that stores index to the xyz points (values in 0-N-1)

        Returns
        -------
        new_xyz : torch.Tensor
            (B, npoint, 3) tensor of the new features' xyz
        new_features : torch.Tensor
            (B, \sum_k(mlps[k][-1]), npoint) tensor of the new_features descriptors
        inds: torch.Tensor
            (B, npoint) tensor of the inds
        """
        new_features_list = []

        xyz_flipped = xyz.transpose(1, 2).contiguous()
        if inds is None:
            inds = pointnet2_utils.furthest_point_sample(xyz, self.npoint)
        new_xyz = pointnet2_utils.gather_operation(
            xyz_flipped, inds).transpose(
                1, 2).contiguous() if self.npoint is not None else None

        for i in range(len(self.groupers)):
            new_features = self.groupers[i](
                xyz, new_xyz, features)  # (B, C, npoint, nsample)
            new_features = self.mlps[i](
                new_features)  # (B, mlp[-1], npoint, nsample)
            new_features = F.max_pool2d(new_features,
                                        kernel_size=[
                                            1, new_features.size(3)
                                        ])  # (B, mlp[-1], npoint, 1)
            new_features = new_features.squeeze(-1)  # (B, mlp[-1], npoint)

            new_features_list.append(new_features)

        return new_xyz, torch.cat(new_features_list, dim=1), inds
Пример #9
0
    def forward(self, xyz, features=None):
        # type: (_PointnetSAModuleBase, torch.Tensor, torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]
        r"""
        Parameters
        ----------
        xyz : torch.Tensor
            (B, N, 3) tensor of the xyz coordinates of the features
        features : torch.Tensor
            (B, N, C) tensor of the descriptors of the the features

        Returns
        -------
        new_xyz : torch.Tensor
            (B, npoint, 3) tensor of the new features' xyz
        new_features : torch.Tensor
            (B,  \sum_k(mlps[k][-1]), npoint) tensor of the new_features descriptors
        """

        new_features_list = []

        xyz_flipped = xyz.transpose(1, 2).contiguous()
        new_xyz = (pointnet2_utils.gather_operation(
            xyz_flipped, pointnet2_utils.furthest_point_sample(
                xyz, self.npoint)).transpose(1, 2).contiguous()
                   if self.npoint is not None else None)

        for i in range(len(self.groupers)):
            new_features = self.groupers[i](
                xyz, new_xyz, features)  # (B, C, npoint, nsample)

            new_features = self.mlps[i](
                new_features)  # (B, mlp[-1], npoint, nsample)
            new_features = F.max_pool2d(new_features,
                                        kernel_size=[
                                            1, new_features.size(3)
                                        ])  # (B, mlp[-1], npoint, 1)
            new_features = new_features.squeeze(-1)  # (B, mlp[-1], npoint)

            new_features_list.append(new_features)

        return new_xyz, torch.cat(new_features_list, dim=1)
Пример #10
0
    def get_uniform_loss(self,
                         pcd,
                         percentage=[0.004, 0.006, 0.008, 0.010, 0.012],
                         radius=1.0):
        B, N, C = pcd.shape[0], pcd.shape[1], pcd.shape[2]
        npoint = int(N * 0.05)
        loss = 0
        further_point_idx = pn2_utils.furthest_point_sample(
            pcd.permute(0, 2, 1).contiguous(), npoint)
        new_xyz = pn2_utils.gather_operation(
            pcd.permute(0, 2, 1).contiguous(), further_point_idx)  # B,C,N
        for p in percentage:
            nsample = int(N * p)
            r = math.sqrt(p * radius)
            disk_area = math.pi * (radius**2) / N

            idx = pn2_utils.ball_query(r, nsample, pcd.contiguous(),
                                       new_xyz.permute(
                                           0, 2, 1).contiguous())  #b N nsample

            expect_len = math.sqrt(disk_area)

            grouped_pcd = pn2_utils.grouping_operation(
                pcd.permute(0, 2, 1).contiguous(), idx)  #B C N nsample
            grouped_pcd = grouped_pcd.permute(0, 2, 3, 1)  #B N nsample C

            grouped_pcd = torch.cat(torch.unbind(grouped_pcd, dim=1),
                                    dim=0)  #B*N nsample C

            dist, _ = self.knn_uniform(grouped_pcd, grouped_pcd)
            #print(dist.shape)
            uniform_dist = dist[:, :, 1:]  #B*N nsample 1
            uniform_dist = torch.abs(uniform_dist + 1e-8)
            uniform_dist = torch.mean(uniform_dist, dim=1)
            uniform_dist = (uniform_dist - expect_len)**2 / (expect_len + 1e-8)
            mean_loss = torch.mean(uniform_dist)
            mean_loss = mean_loss * math.pow(p * 100, 2)
            loss += mean_loss
        return loss / len(percentage)
Пример #11
0
    def forward(self, points):
        """
        TODO: Document intermediate tensor shapes.
        TODO: Use all feature volume strides of 3D CNN output.
        """
        points, features, coordinates, voxel_population = self.voxelize(points)
        out = self.cnn(features, coordinates, batch_size=1)

        xyz, point_features = torch.split(points, [3, 1], dim=-1)
        out = [(point_features, xyz)] + out

        xyz = xyz.unsqueeze(0).contiguous()
        indices = furthest_point_sample(xyz, cfg.n_keypoints).squeeze(0).long()
        keypoints = points[indices]
        keypoints_xyz, keypoints_features = torch.split(keypoints, [3, 1], dim=-1)
        voxel_features_i, voxel_coords_i = out[2]

        voxel_coords_i = voxel_coords_i.unsqueeze(0).contiguous()
        voxel_features_i = voxel_features_i.unsqueeze(0).permute(0, 2, 1).contiguous()
        keypoints_xyz = keypoints_xyz.unsqueeze(0).contiguous()

        _, out = self.pnet(voxel_coords_i, voxel_features_i, keypoints_xyz)
        return out
Пример #12
0
    def forward(self, xyz: torch.Tensor, features: torch.Tensor = None, new_xyz=None) -> (torch.Tensor, torch.Tensor):
        """
        :param xyz: (B, N, 3) tensor of the xyz coordinates of the features
        :param features: (B, N, C) tensor of the descriptors of the the features
        :param new_xyz:
        :return:
            new_xyz: (B, npoint, 3) tensor of the new features' xyz
            new_features: (B, npoint, \sum_k(mlps[k][-1])) tensor of the new_features descriptors
        """
        new_features_list = []

        xyz_flipped = xyz.transpose(1, 2).contiguous()
        if new_xyz is None:
            new_xyz = pointnet2_utils.gather_operation(
                xyz_flipped,
                pointnet2_utils.furthest_point_sample(xyz, self.npoint)
            ).transpose(1, 2).contiguous() if self.npoint is not None else None

        for i in range(len(self.groupers)):
            new_features = self.groupers[i](xyz, new_xyz, features)  # (B, C, npoint, nsample)
            new_features = self.mlps[i](new_features)  # (B, mlp[-1], npoint, nsample)
            if self.pool_method == 'max_pool':
                new_features = F.max_pool2d(
                    new_features, kernel_size=[1, new_features.size(3)]
                )  # (B, mlp[-1], npoint, 1)
            elif self.pool_method == 'avg_pool':
                new_features = F.avg_pool2d(
                    new_features, kernel_size=[1, new_features.size(3)]
                )  # (B, mlp[-1], npoint, 1)
            else:
                raise NotImplementedError

            new_features = new_features.squeeze(-1)  # (B, mlp[-1], npoint)
            new_features_list.append(new_features)

        return new_xyz, torch.cat(new_features_list, dim=1)
Пример #13
0
    def forward(self,
                xyz: torch.Tensor,
                features: torch.Tensor = None,
                inds: torch.Tensor = None) -> (torch.Tensor, torch.Tensor):
        r"""
        Parameters
        ----------
        xyz : torch.Tensor
            (B, N, 3) tensor of the xyz coordinates of the features
        features : torch.Tensor
            (B, C, N) tensor of the descriptors of the the features
        inds : torch.Tensor
            (B, npoint) tensor that stores index to the xyz points (values in 0-N-1)

        Returns
        -------
        new_xyz : torch.Tensor
            (B, npoint, 3) tensor of the new features' xyz
        new_features : torch.Tensor
            (B, \sum_k(mlps[k][-1]), npoint) tensor of the new_features descriptors
        inds: torch.Tensor
            (B, npoint) tensor of the inds
        """

        xyz_flipped = xyz.transpose(1, 2).contiguous()
        if inds is None:
            inds = pointnet2_utils.furthest_point_sample(xyz, self.npoint)
        else:
            assert (inds.shape[1] == self.npoint)
        new_xyz = pointnet2_utils.gather_operation(
            xyz_flipped, inds).transpose(
                1, 2).contiguous() if self.npoint is not None else None

        if not self.ret_unique_cnt:
            grouped_features, grouped_xyz = self.grouper(
                xyz, new_xyz, features)  # (B, C, npoint, nsample)
        else:
            grouped_features, grouped_xyz, unique_cnt = self.grouper(
                xyz, new_xyz, features
            )  # (B, C, npoint, nsample), (B,3,npoint,nsample), (B,npoint)

        new_features = self.mlp_module(
            grouped_features)  # (B, mlp[-1], npoint, nsample)
        if self.pooling == 'max':
            new_features = F.max_pool2d(new_features,
                                        kernel_size=[
                                            1, new_features.size(3)
                                        ])  # (B, mlp[-1], npoint, 1)
        elif self.pooling == 'avg':
            new_features = F.avg_pool2d(new_features,
                                        kernel_size=[
                                            1, new_features.size(3)
                                        ])  # (B, mlp[-1], npoint, 1)
        elif self.pooling == 'rbf':
            # Use radial basis function kernel for weighted sum of features (normalized by nsample and sigma)
            # Ref: https://en.wikipedia.org/wiki/Radial_basis_function_kernel
            rbf = torch.exp(-1 * grouped_xyz.pow(2).sum(1, keepdim=False) /
                            (self.sigma**2) / 2)  # (B, npoint, nsample)
            new_features = torch.sum(
                new_features * rbf.unsqueeze(1), -1, keepdim=True) / float(
                    self.nsample)  # (B, mlp[-1], npoint, 1)
        new_features = new_features.squeeze(-1)  # (B, mlp[-1], npoint)

        if not self.ret_unique_cnt:
            return new_xyz, new_features, inds
        else:
            return new_xyz, new_features, inds, unique_cnt