Exemple #1
0
    def forward(self, xyz, feats, new_xyz=None):
        '''
        :param pcd: B, C_in, N
        :return:
            new_pcd: B, C_out, np
        '''
        if new_xyz is None:
            assert self.npoint is not None
            xyz_flipped = xyz.transpose(1, 2).contiguous()  # B,3,npoint
            idx = pointnet2_utils.furthest_point_sample(
                xyz, self.npoint)  # B,npoint
            new_xyz_flipped = pointnet2_utils.gather_operation(
                xyz_flipped, idx)  # B,3,npoint
            new_xyz = new_xyz_flipped.transpose(1,
                                                2).contiguous()  # B,npoint,3

        idx = pointnet2_utils.ball_query(self.radius, self.nsample, xyz,
                                         new_xyz)
        gped_feats = pointnet2_utils.grouping_operation(feats,
                                                        idx)  # B,C,np,ns
        gped_feats = F.max_pool2d(gped_feats,
                                  kernel_size=[1, self.nsample])  # B,C,np,1
        gped_feats = gped_feats.squeeze(-1)  # B,C,np

        return self.conv(gped_feats)
Exemple #2
0
    def forward(self, xyz, points, global_features, select_idx, use_sample):
        """
        Input:
            xyz: input points position data, [B, N, C]
            points: input points data, [B, D, N]
        Return:
            new_xyz: sampled points position data, [B, C, S]
            new_points_concat: sample points feature data, [B, D', S]
        """

        B, N, C = xyz.shape

        if use_sample == 1:
            num_points = select_idx.shape[1]
            new_xyz = torch.from_numpy(select_idx).transpose(1,
                                                             2).float().cuda()

        else:
            new_xyz = pointnet2_utils.gather_operation(
                xyz.permute(0, 2, 1).contiguous(), select_idx.int())
            num_points = select_idx.shape[-1]

        new_radius_list = []

        for i, radius in enumerate(self.radius_list):

            K = self.nsample_list[i]
            new_radius = torch.zeros(
                [select_idx.shape[0], select_idx.shape[1]])
            query_and_group = pointnet2_utils.QueryAndGroup(radius,
                                                            K,
                                                            use_xyz=True)
            grouped_points = query_and_group(
                xyz.contiguous(),
                new_xyz.permute(0, 2, 1).contiguous(),
                points.permute(0, 2, 1).contiguous())

            for j in range(len(self.conv_blocks[i])):
                conv = self.conv_blocks[i][j]
                bn = self.bn_blocks[i][j]
                grouped_points = F.leaky_relu(bn(conv(grouped_points)),
                                              negative_slope=0.2)
            new_radius = torch.max(grouped_points, 3)[0]  # [B, D', S]
            new_radius_list.append(new_radius)

        new_xyz = new_xyz.permute(0, 2, 1)
        new_radius_concat = torch.cat(new_radius_list, dim=1)
        new_radius_concat = F.leaky_relu(self.conv1(new_radius_concat),
                                         negative_slope=0.2)
        num_point = new_radius_concat.shape[2]
        new_radius_concat = F.leaky_relu(self.conv2(new_radius_concat),
                                         negative_slope=0.2)
        new_radius_concat = self.conv3(new_radius_concat).transpose(1, 2).view(
            -1, num_point, 6)

        return new_radius_concat
Exemple #3
0
 def get_emd_loss(self, pred, gt, pcd_radius):
     idx, _ = auction_match(pred, gt)
     matched_out = pn2_utils.gather_operation(
         gt.transpose(1, 2).contiguous(), idx)
     matched_out = matched_out.transpose(1, 2).contiguous()
     dist2 = (pred - matched_out)**2
     dist2 = dist2.view(dist2.shape[0], -1)  # <-- ???
     dist2 = torch.mean(dist2, dim=1, keepdims=True)  # B,
     dist2 /= pcd_radius
     return torch.mean(dist2)
Exemple #4
0
 def sample_keypoints(self, points):
     """
     fps expects points shape (B, N, 3)
     fps returns indices shape (B, K)
     gather expects features shape (B, C, N)
     """
     points = points[..., :3].contiguous()
     indices = furthest_point_sample(points, self.cfg.NUM_KEYPOINTS)
     keypoints = gather_operation(points.transpose(1, 2).contiguous(), indices)
     keypoints = keypoints.transpose(1, 2).contiguous()
     return keypoints
Exemple #5
0
def index_points_gather(points, fps_idx):
    """
    Input:
        points: input points data, [B, N, C]
        idx: sample index data, [B, S]
    Return:
        new_points:, indexed points data, [B, S, C]
    """

    points_flipped = points.permute(0, 2, 1).contiguous()
    new_points = pointnet2_utils.gather_operation(points_flipped, fps_idx)
    return new_points.permute(0, 2, 1).contiguous()
Exemple #6
0
 def get_emd_loss(self, pred, gt, radius=1.0):
     '''
     pred and gt is B N 3
     '''
     idx, _ = auction_match(pred.contiguous(), gt.contiguous())
     #gather operation has to be B 3 N
     #print(gt.transpose(1,2).shape)
     matched_out = pn2_utils.gather_operation(
         gt.transpose(1, 2).contiguous(), idx)
     matched_out = matched_out.transpose(1, 2).contiguous()
     dist2 = (pred - matched_out)**2
     dist2 = dist2.view(dist2.shape[0], -1)  # <-- ???
     dist2 = torch.mean(dist2, dim=1, keepdims=True)  # B,
     dist2 /= radius
     return torch.mean(dist2)
    def forward(self,
                xyz: torch.Tensor,
                features: torch.Tensor = None,
                inds: torch.Tensor = None) -> (torch.Tensor, torch.Tensor):
        r"""
        Parameters
        ----------
        xyz : torch.Tensor
            (B, N, 3) tensor of the xyz coordinates of the features
        features : torch.Tensor
            (B, C, C) tensor of the descriptors of the the features
        inds : torch.Tensor
            (B, npoint) tensor that stores index to the xyz points (values in 0-N-1)

        Returns
        -------
        new_xyz : torch.Tensor
            (B, npoint, 3) tensor of the new features' xyz
        new_features : torch.Tensor
            (B, \sum_k(mlps[k][-1]), npoint) tensor of the new_features descriptors
        inds: torch.Tensor
            (B, npoint) tensor of the inds
        """
        new_features_list = []

        xyz_flipped = xyz.transpose(1, 2).contiguous()
        if inds is None:
            inds = pointnet2_utils.furthest_point_sample(xyz, self.npoint)
        new_xyz = pointnet2_utils.gather_operation(
            xyz_flipped, inds).transpose(
                1, 2).contiguous() if self.npoint is not None else None

        for i in range(len(self.groupers)):
            new_features = self.groupers[i](
                xyz, new_xyz, features)  # (B, C, npoint, nsample)
            new_features = self.mlps[i](
                new_features)  # (B, mlp[-1], npoint, nsample)
            new_features = F.max_pool2d(new_features,
                                        kernel_size=[
                                            1, new_features.size(3)
                                        ])  # (B, mlp[-1], npoint, 1)
            new_features = new_features.squeeze(-1)  # (B, mlp[-1], npoint)

            new_features_list.append(new_features)

        return new_xyz, torch.cat(new_features_list, dim=1), inds
Exemple #8
0
    def forward(self, xyz, features=None):
        # type: (_PointnetSAModuleBase, torch.Tensor, torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]
        r"""
        Parameters
        ----------
        xyz : torch.Tensor
            (B, N, 3) tensor of the xyz coordinates of the features
        features : torch.Tensor
            (B, N, C) tensor of the descriptors of the the features

        Returns
        -------
        new_xyz : torch.Tensor
            (B, npoint, 3) tensor of the new features' xyz
        new_features : torch.Tensor
            (B,  \sum_k(mlps[k][-1]), npoint) tensor of the new_features descriptors
        """

        new_features_list = []

        xyz_flipped = xyz.transpose(1, 2).contiguous()
        new_xyz = (pointnet2_utils.gather_operation(
            xyz_flipped, pointnet2_utils.furthest_point_sample(
                xyz, self.npoint)).transpose(1, 2).contiguous()
                   if self.npoint is not None else None)

        for i in range(len(self.groupers)):
            new_features = self.groupers[i](
                xyz, new_xyz, features)  # (B, C, npoint, nsample)

            new_features = self.mlps[i](
                new_features)  # (B, mlp[-1], npoint, nsample)
            new_features = F.max_pool2d(new_features,
                                        kernel_size=[
                                            1, new_features.size(3)
                                        ])  # (B, mlp[-1], npoint, 1)
            new_features = new_features.squeeze(-1)  # (B, mlp[-1], npoint)

            new_features_list.append(new_features)

        return new_xyz, torch.cat(new_features_list, dim=1)
Exemple #9
0
    def get_uniform_loss(self,
                         pcd,
                         percentage=[0.004, 0.006, 0.008, 0.010, 0.012],
                         radius=1.0):
        B, N, C = pcd.shape[0], pcd.shape[1], pcd.shape[2]
        npoint = int(N * 0.05)
        loss = 0
        further_point_idx = pn2_utils.furthest_point_sample(
            pcd.permute(0, 2, 1).contiguous(), npoint)
        new_xyz = pn2_utils.gather_operation(
            pcd.permute(0, 2, 1).contiguous(), further_point_idx)  # B,C,N
        for p in percentage:
            nsample = int(N * p)
            r = math.sqrt(p * radius)
            disk_area = math.pi * (radius**2) / N

            idx = pn2_utils.ball_query(r, nsample, pcd.contiguous(),
                                       new_xyz.permute(
                                           0, 2, 1).contiguous())  #b N nsample

            expect_len = math.sqrt(disk_area)

            grouped_pcd = pn2_utils.grouping_operation(
                pcd.permute(0, 2, 1).contiguous(), idx)  #B C N nsample
            grouped_pcd = grouped_pcd.permute(0, 2, 3, 1)  #B N nsample C

            grouped_pcd = torch.cat(torch.unbind(grouped_pcd, dim=1),
                                    dim=0)  #B*N nsample C

            dist, _ = self.knn_uniform(grouped_pcd, grouped_pcd)
            #print(dist.shape)
            uniform_dist = dist[:, :, 1:]  #B*N nsample 1
            uniform_dist = torch.abs(uniform_dist + 1e-8)
            uniform_dist = torch.mean(uniform_dist, dim=1)
            uniform_dist = (uniform_dist - expect_len)**2 / (expect_len + 1e-8)
            mean_loss = torch.mean(uniform_dist)
            mean_loss = mean_loss * math.pow(p * 100, 2)
            loss += mean_loss
        return loss / len(percentage)
Exemple #10
0
    def forward(self, xyz: torch.Tensor, features: torch.Tensor = None, new_xyz=None) -> (torch.Tensor, torch.Tensor):
        """
        :param xyz: (B, N, 3) tensor of the xyz coordinates of the features
        :param features: (B, N, C) tensor of the descriptors of the the features
        :param new_xyz:
        :return:
            new_xyz: (B, npoint, 3) tensor of the new features' xyz
            new_features: (B, npoint, \sum_k(mlps[k][-1])) tensor of the new_features descriptors
        """
        new_features_list = []

        xyz_flipped = xyz.transpose(1, 2).contiguous()
        if new_xyz is None:
            new_xyz = pointnet2_utils.gather_operation(
                xyz_flipped,
                pointnet2_utils.furthest_point_sample(xyz, self.npoint)
            ).transpose(1, 2).contiguous() if self.npoint is not None else None

        for i in range(len(self.groupers)):
            new_features = self.groupers[i](xyz, new_xyz, features)  # (B, C, npoint, nsample)
            new_features = self.mlps[i](new_features)  # (B, mlp[-1], npoint, nsample)
            if self.pool_method == 'max_pool':
                new_features = F.max_pool2d(
                    new_features, kernel_size=[1, new_features.size(3)]
                )  # (B, mlp[-1], npoint, 1)
            elif self.pool_method == 'avg_pool':
                new_features = F.avg_pool2d(
                    new_features, kernel_size=[1, new_features.size(3)]
                )  # (B, mlp[-1], npoint, 1)
            else:
                raise NotImplementedError

            new_features = new_features.squeeze(-1)  # (B, mlp[-1], npoint)
            new_features_list.append(new_features)

        return new_xyz, torch.cat(new_features_list, dim=1)
    def forward(self,
                xyz: torch.Tensor,
                features: torch.Tensor = None,
                inds: torch.Tensor = None) -> (torch.Tensor, torch.Tensor):
        r"""
        Parameters
        ----------
        xyz : torch.Tensor
            (B, N, 3) tensor of the xyz coordinates of the features
        features : torch.Tensor
            (B, C, N) tensor of the descriptors of the the features
        inds : torch.Tensor
            (B, npoint) tensor that stores index to the xyz points (values in 0-N-1)

        Returns
        -------
        new_xyz : torch.Tensor
            (B, npoint, 3) tensor of the new features' xyz
        new_features : torch.Tensor
            (B, \sum_k(mlps[k][-1]), npoint) tensor of the new_features descriptors
        inds: torch.Tensor
            (B, npoint) tensor of the inds
        """

        xyz_flipped = xyz.transpose(1, 2).contiguous()
        if inds is None:
            inds = pointnet2_utils.furthest_point_sample(xyz, self.npoint)
        else:
            assert (inds.shape[1] == self.npoint)
        new_xyz = pointnet2_utils.gather_operation(
            xyz_flipped, inds).transpose(
                1, 2).contiguous() if self.npoint is not None else None

        if not self.ret_unique_cnt:
            grouped_features, grouped_xyz = self.grouper(
                xyz, new_xyz, features)  # (B, C, npoint, nsample)
        else:
            grouped_features, grouped_xyz, unique_cnt = self.grouper(
                xyz, new_xyz, features
            )  # (B, C, npoint, nsample), (B,3,npoint,nsample), (B,npoint)

        new_features = self.mlp_module(
            grouped_features)  # (B, mlp[-1], npoint, nsample)
        if self.pooling == 'max':
            new_features = F.max_pool2d(new_features,
                                        kernel_size=[
                                            1, new_features.size(3)
                                        ])  # (B, mlp[-1], npoint, 1)
        elif self.pooling == 'avg':
            new_features = F.avg_pool2d(new_features,
                                        kernel_size=[
                                            1, new_features.size(3)
                                        ])  # (B, mlp[-1], npoint, 1)
        elif self.pooling == 'rbf':
            # Use radial basis function kernel for weighted sum of features (normalized by nsample and sigma)
            # Ref: https://en.wikipedia.org/wiki/Radial_basis_function_kernel
            rbf = torch.exp(-1 * grouped_xyz.pow(2).sum(1, keepdim=False) /
                            (self.sigma**2) / 2)  # (B, npoint, nsample)
            new_features = torch.sum(
                new_features * rbf.unsqueeze(1), -1, keepdim=True) / float(
                    self.nsample)  # (B, mlp[-1], npoint, 1)
        new_features = new_features.squeeze(-1)  # (B, mlp[-1], npoint)

        if not self.ret_unique_cnt:
            return new_xyz, new_features, inds
        else:
            return new_xyz, new_features, inds, unique_cnt