Beispiel #1
0
    def forward(self, xyz, points):
        """
        Input:
            xyz: input points position data, [B, C, N]
            points: input points data, [B, D, N]
        Return:
            new_xyz: sampled points position data, [B, C, S]
            new_points_concat: sample points feature data, [B, D', S]
        """
        device = xyz.device
        B, C, N = xyz.shape
        xyz_t = xyz.permute(0, 2, 1).contiguous()  # [B, N, C]

        if self.group_all == False:
            fps_idx = pointutils.furthest_point_sample(
                xyz_t, self.npoint)  # [B, npoint]
            new_xyz = pointutils.gather_operation(xyz,
                                                  fps_idx)  # [B, C, npoint]
        else:
            new_xyz = xyz
        new_points = self.queryandgroup(xyz_t,
                                        new_xyz.transpose(2, 1).contiguous(),
                                        points)  # [B, D+C, npoint, nsample]

        for i, conv in enumerate(self.mlp_convs):
            bn = self.mlp_bns[i]
            new_points = F.relu(bn(
                conv(new_points)))  # [B, channel, npoint, nsample]

        new_points = torch.max(new_points, -1)[0]  # [B, channel, nsample]

        return new_xyz, new_points
Beispiel #2
0
    def forward(self, xyz, features, npoint):
        # type: (_PointnetSAModuleBase, torch.Tensor, torch.Tensor) -> tuple[torch.Tensor, torch.Tensor]
        r"""
        Parameters
        ----------
        xyz : torch.Tensor
            (B, N, 3) tensor of the xyz coordinates of the features
            B = Batch, N = Number of Points
        features : torch.Tensor
            (B, N, C) tensor of the descriptors of the the features
            C是特征维数

        Returns
        -------
        new_xyz : torch.Tensor
            (B, npoint, 3) tensor of the new features' xyz
        new_features : torch.Tensor
            (B,  \sum_k(mlps[k][-1]), npoint) tensor of the new_features descriptors
        """

        self.npoint = npoint  # 聚合后的质心数量
        new_features_list = []

        xyz_flipped = xyz.transpose(1, 2).contiguous()
        # contiguous:view只能用在内存占用连续的数据块的tensor上
        # 如果在view之前用了transpose, permute等
        # 需要用contiguous()来返回一个contiguous copy
        # (B, 3, N)

        new_xyz = (
            pointnet2_utils.gather_operation(
                #xyz_flipped, pointnet2_utils.furthest_point_sample(xyz, self.npoint)
                xyz_flipped,
                torch.arange(self.npoint).repeat(xyz.size(0),
                                                 1).int().cuda()).transpose(
                                                     1, 2).contiguous()
        )  # 聚合后得到的centroids,B * npoint * 3

        for i in range(len(self.groupers)):
            new_features = self.groupers[i](
                xyz, new_xyz, features)  # (B, C, npoint, nsample)
            # (batch, channel, feature_num, in_group_num)
            new_features = self.mlps[i](new_features)
            # (B, mlp[-1], npoint, nsample)
            # (batch, mlp最后一层输出维度, feature_num, in_group_num)
            new_features = F.max_pool2d(new_features,
                                        kernel_size=[
                                            1, new_features.size(3)
                                        ])  # (B, mlp[-1], npoint, 1)
            # (batch, mlp最后一层输出维度, feature_num, 1)
            # 全局max_pool
            new_features = new_features.squeeze(-1)
            # (B, mlp[-1], npoint)
            # (batch, mlp最后一层输出维度, feature_num)

            new_features_list.append(new_features)

        return new_xyz, torch.cat(new_features_list, dim=1)
Beispiel #3
0
    def forward(self, xyz, features, npoint, score):
        # type: (_PointnetSAModuleBase, torch.Tensor, torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]
        r"""
        Parameters
        ----------
        xyz : torch.Tensor
            (B, N, 3) tensor of the xyz coordinates of the features
        features : torch.Tensor
            (B, N, C) tensor of the descriptors of the the features

        Returns
        -------
        new_xyz : torch.Tensor
            (B, npoint, 3) tensor of the new features' xyz
        new_features : torch.Tensor
            (B,  \sum_k(mlps[k][-1]), npoint) tensor of the new_features descriptors
        """

        self.npoint = npoint
        new_features_list = []

        xyz_flipped = xyz.transpose(1, 2).contiguous()

        new_xyz = (
            pointnet2_utils.gather_operation(
                #xyz_flipped, pointnet2_utils.furthest_point_sample(xyz, self.npoint)
                xyz_flipped,
                torch.arange(self.npoint).repeat(xyz.size(0),
                                                 1).int().cuda()).transpose(
                                                     1, 2).contiguous())

        for i in range(len(self.groupers)):
            new_features, score_id = self.groupers[i](
                xyz, new_xyz, score, features)  # (B, C, npoint, nsample)
            #score_id = new_features[:,3,:,:].sum(dim = 2).argmax(dim = 1)

            #B
            #new_features_cpu = new_features.squeeze(0).detach().cpu().numpy()
            #np.savetxt('vote4.txt',new_features_cpu[0:4,i,:])
            idx = torch.arange(new_features.size(0))
            new_features = new_features[idx, :, score_id, :]
            #B*C*nsample
            new_features = new_features.unsqueeze(2)
            #B*C*1*nsample
            new_xyz = new_xyz[idx, score_id, :]
            #B*3

            new_features = self.mlps[i](
                new_features)  # (B, mlp[-1], npoint, nsample)
            new_features = F.max_pool2d(new_features,
                                        kernel_size=[
                                            1, new_features.size(3)
                                        ])  # (B, mlp[-1], npoint, 1)
            new_features = new_features.squeeze(-1).squeeze(-1)  # (B, mlp[-1])

            new_features_list.append(new_features)

        return new_xyz, torch.cat(new_features_list, dim=1)
Beispiel #4
0
def get_emd_loss(pred, gt, pcd_radius):
    idx, _ = auction_match(pred, gt)
    matched_out = pn2_utils.gather_operation(
        gt.transpose(1, 2).contiguous(), idx)
    matched_out = matched_out.transpose(1, 2).contiguous()
    dist2 = (pred - matched_out)**2
    dist2 = dist2.view(dist2.shape[0], -1)  # <-- ???
    dist2 = torch.mean(dist2, dim=1, keepdims=True)  # B,
    dist2 /= pcd_radius
    return torch.mean(dist2)
Beispiel #5
0
    def forward(self, xyz, feature=None):
        # type: (_PointnetSAModuleBase, torch.Tensor, torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]
        r"""
        Parameters
        ----------
        xyz : torch.Tensor
            (B, N, 3) tensor of the xyz coordinates of the features
        features : torch.Tensor
            (B, C, N) tensor of the descriptors of the the features

        Returns
        -------
        new_xyz : torch.Tensor
            (B, npoint, 3) tensor of the new features' xyz
        new_features : torch.Tensor
            (B,  \sum_k(mlps[k][-1]), npoint) tensor of the new_features descriptors
        """

        new_features_list = []

        xyz_flipped = xyz.transpose(1, 2).contiguous()
        new_xyz = (
            pointnet2_utils.gather_operation(
                xyz_flipped, pointnet2_utils.furthest_point_sample(xyz, self.npoint)
            )
            .transpose(1, 2)
            .contiguous()
            if self.npoint is not None
            else None
        )

        for i in range(len(self.groupers)):
            new_features = self.groupers[i](
                xyz, new_xyz, feature
            )  # (B, C, npoint, nsample)

            B, _, npoint, nsample = new_features.shape

            new_features = new_features.permute(0, 2, 3, 1)  # B npoint nsample C
            new_features = self.mlps[i](new_features)  # B, npoint, nsample, C
            new_features = new_features.permute(0, 3, 1, 2)  # B, C, npoint, nsample
            new_features = F.max_pool2d(
                new_features, kernel_size=[1, new_features.size(3)]
            )  # (B, mlp[-1], npoint, 1)
            new_features_list.append(new_features.reshape(B, 4, -1, npoint))
        
        if self.npoint is not None:
            out_npoint = self.npoint
        else:
            out_npoint = 1

        new_features = torch.cat(new_features_list, dim=2).reshape(B, -1, out_npoint)

        return new_xyz, new_features
Beispiel #6
0
def gather(p: torch.Tensor, idx: torch.Tensor) -> torch.Tensor:
    """Point cloud gathering by indices.

    Args:
        p: Reference point cloud of shape [batch_size, dim, num_point].
        idx: Indices tensor of shape [batch_size, num_query].

    Returns:
        Point cloud tensor of shape [batch_size, dim, num_query].
    """

    p = p.contiguous()
    return _PU.gather_operation(p, idx)
Beispiel #7
0
    def forward(self, xyz, features=None, return_critical_index=False):
        # type: (_PointnetSAModuleBase, torch.Tensor, torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]
        r"""
        Parameters
        ----------
        xyz : torch.Tensor
            (B, N, 3) tensor of the xyz coordinates of the features
        features : torch.Tensor
            (B, N, C) tensor of the descriptors of the the features

        Returns
        -------
        new_xyz : torch.Tensor
            (B, npoint, 3) tensor of the new features' xyz
        new_features : torch.Tensor
            (B,  \sum_k(mlps[k][-1]), npoint) tensor of the new_features descriptors
        """

        new_features_list = []
        new_indices_list = []

        xyz_flipped = xyz.transpose(1, 2).contiguous()
        new_xyz = (
            pointnet2_utils.gather_operation(
                xyz_flipped, pointnet2_utils.furthest_point_sample(xyz, self.npoint)
            )
            .transpose(1, 2)
            .contiguous()
            if self.npoint is not None
            else None
        )

        for i in range(len(self.groupers)):
            new_features = self.groupers[i](
                xyz, new_xyz, features
            )  # (B, C, npoint, nsample)

            new_features = self.mlps[i](new_features)  # (B, mlp[-1], npoint, nsample)
            new_features, new_indices = F.max_pool2d(
                new_features, kernel_size=[1, new_features.size(3)], return_indices=True
            )  # (B, mlp[-1], npoint, 1)
            new_features = new_features.squeeze(-1)  # (B, mlp[-1], npoint)
            new_indices = new_indices.squeeze(-1)

            new_features_list.append(new_features)
            new_indices_list.append(new_indices)

        if return_critical_index is False:
            return new_xyz, torch.cat(new_features_list, dim=1)
        else:
            return new_xyz, torch.cat(new_features_list, dim=1), torch.cat(new_indices_list, dim=1)
Beispiel #8
0
    def forward(self, xyz, points):
        """
        Input:
            xyz: input points position data, [B, C, N]
            points: input points data, [B, D, N]
        Return:
            new_xyz: sampled points position data, [B, C, S]
            new_points_concat: sample points feature data, [B, D', S]
        """
        device = xyz.device
        B, C, N = xyz.shape
        xyz_t = xyz.permute(0, 2, 1).contiguous()  # [B, N, C]

        fps_idx = pointutils.furthest_point_sample(xyz_t,
                                                   self.npoint)  # [B, npoint]
        new_xyz = pointutils.gather_operation(xyz, fps_idx)  # [B, 3, npoint]
        new_xyz_t = new_xyz.permute(0, 2, 1).contiguous()

        _, idx = pointutils.knn(self.nsample, new_xyz_t,
                                xyz_t)  # [B, npoint, nsample]
        neighbors = pointutils.grouping_operation(
            xyz, idx)  # [B, 3, npoint, nsample]
        centers = new_xyz.view(B, -1, self.npoint, 1).repeat(
            1, 1, 1, self.nsample)  # [B, 3, npoint, nsample]
        pos_diff = centers - neighbors  # [B, 3, npoint, nsample]
        distances = torch.norm(pos_diff, p=2, dim=1,
                               keepdim=True)  # [B, 1, npoint, nsample]
        h_xi_xj = torch.cat([distances, pos_diff, centers, neighbors],
                            dim=1)  # [B, 1+3+3+3, npoint, nsample]

        x = pointutils.grouping_operation(points,
                                          idx)  # [B, D, npoint, nsample]
        x = torch.cat([neighbors, x], dim=1)  # [B, D+3, npoint, nsample]

        h_xi_xj = self.mapping_func2(
            F.relu(self.bn_mapping(
                self.mapping_func1(h_xi_xj))))  # [B, c_in, npoint, nsample]
        if self.first_layer:
            x = F.relu(self.bn_xyz_raising(
                self.xyz_raising(x)))  # [B, c_in, npoint, nsample]
        x = F.relu(self.bn_rsconv(torch.mul(h_xi_xj,
                                            x)))  # (B, c_in, npoint, nsample)

        for i, conv in enumerate(self.mlp_convs):
            bn = self.mlp_bns[i]
            x = F.relu(bn(conv(x)))  # [B, c_out, npoint, nsample]

        x = torch.max(x, -1)[0]  # [B, c_out, npoint]
        # x = F.relu(self.bn_channel_raising(self.cr_mapping(x)))   # [B, c_out, npoint]

        return new_xyz, x
Beispiel #9
0
 def get_emd_loss(self, pred, gt, radius=1.0):
     '''
     pred and gt is B N 3
     '''
     idx, _ = auction_match(pred.contiguous(), gt.contiguous())
     #gather operation has to be B 3 N
     #print(gt.transpose(1,2).shape)
     matched_out = pn2_utils.gather_operation(
         gt.transpose(1, 2).contiguous(), idx)
     matched_out = matched_out.transpose(1, 2).contiguous()
     dist2 = (pred - matched_out)**2
     dist2 = dist2.view(dist2.shape[0], -1)  # <-- ???
     dist2 = torch.mean(dist2, dim=1, keepdims=True)  # B,
     dist2 /= radius
     return torch.mean(dist2)
Beispiel #10
0
    def forward(self, xyz, features, npoint):
        # type: (_PointnetSAModuleBase, torch.Tensor, torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]
        r"""
        Parameters
        ----------
        xyz : torch.Tensor
            (B, N, 3) tensor of the xyz coordinates of the features
        features : torch.Tensor
            (B, N, C) tensor of the descriptors of the the features

        Returns
        -------
        new_xyz : torch.Tensor
            (B, npoint, 3) tensor of the new features' xyz
        new_features : torch.Tensor
            (B,  \sum_k(mlps[k][-1]), npoint) tensor of the new_features descriptors
        """

        self.npoint = npoint
        new_features_list = []

        xyz_flipped = xyz.transpose(1, 2).contiguous()

        new_xyz = (
            pointnet2_utils.gather_operation(
                #xyz_flipped, pointnet2_utils.furthest_point_sample(xyz, self.npoint)
                xyz_flipped,
                torch.arange(self.npoint).repeat(xyz.size(0),
                                                 1).int().cuda()).transpose(
                                                     1, 2).contiguous())

        for i in range(len(self.groupers)):
            new_features = self.groupers[i](
                xyz, new_xyz, features)  # (B, C, npoint, nsample)

            new_features = self.mlps[i](
                new_features)  # (B, mlp[-1], npoint, nsample)
            new_features = F.max_pool2d(new_features,
                                        kernel_size=[
                                            1, new_features.size(3)
                                        ])  # (B, mlp[-1], npoint, 1)
            new_features = new_features.squeeze(-1)  # (B, mlp[-1], npoint)

            new_features_list.append(new_features)

        return new_xyz, torch.cat(new_features_list, dim=1)
Beispiel #11
0
    def forward(self, AA, features=None):
        # type: (_PointnetSAModuleBase, torch.Tensor, torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]
        r"""
        Parameters
        ----------
        AA : torch.Tensor
            (B, N, 32 tensor of the azimuth, distance coordinates of the features
        features : torch.Tensor
            (B, N, C) tensor of the descriptors of the the features

        Returns
        -------
        new_AA : torch.Tensor
            (B, npoint, 2) tensor of the new features' azimuth distance
        new_features : torch.Tensor
            (B,  \sum_k(mlps[k][-1]), npoint) tensor of the new_features descriptors
        """

        new_features_list = []

        AA_flipped = AA.transpose(1, 2).contiguous()
        new_AA = (
            pointnet2_utils.gather_operation(
                AA_flipped, pointnet2_utils.furthest_point_sample(AA, self.npoint)
            )
            .transpose(1, 2)
            .contiguous()
            if self.npoint is not None
            else None
        )

        for i in range(len(self.groupers)):
            new_features = self.groupers[i](
                AA, new_AA, features
            )  # (B, C, npoint, nsample)

            new_features = self.mlps[i](new_features)  # (B, mlp[-1], npoint, nsample)
            new_features = F.max_pool2d(
                new_features, kernel_size=[1, new_features.size(3)]
            )  # (B, mlp[-1], npoint, 1)
            new_features = new_features.squeeze(-1)  # (B, mlp[-1], npoint)

            new_features_list.append(new_features)

        return new_AA, torch.cat(new_features_list, dim=1)
Beispiel #12
0
    def forward(self,
                xyz: torch.Tensor,
                features: torch.Tensor = None) -> (torch.Tensor, torch.Tensor):
        r"""
        Parameters
        ----------
        xyz : torch.Tensor
            (B, N, 3) tensor of the xyz coordinates of the features
        features : torch.Tensor
            (B, N, C) tensor of the descriptors of the the features

        Returns
        -------
        new_xyz : torch.Tensor
            (B, npoint, 3) tensor of the new features' xyz
        new_features : torch.Tensor
            (B, npoint, \sum_k(mlps[k][-1])) tensor of the new_features descriptors
        """

        new_features_list = []

        xyz_flipped = xyz.transpose(1, 2).contiguous()
        new_xyz = pointnet2_utils.gather_operation(
            xyz_flipped, pointnet2_utils.furthest_point_sample(
                xyz, self.npoint)).transpose(
                    1, 2).contiguous() if self.npoint is not None else None

        for i in range(len(self.groupers)):
            new_features = self.groupers[i](
                xyz, new_xyz, features)  # (B, C, npoint, nsample)

            new_features = self.mlps[i](
                new_features)  # (B, mlp[-1], npoint, nsample)
            new_features = F.max_pool2d(new_features,
                                        kernel_size=[
                                            1, new_features.size(3)
                                        ])  # (B, mlp[-1], npoint, 1)
            new_features = new_features.squeeze(-1)  # (B, mlp[-1], npoint)

            new_features_list.append(new_features)

        return new_xyz, torch.cat(new_features_list, dim=1)
Beispiel #13
0
    def get_uniform_loss(self,
                         pcd,
                         percentage=[0.004, 0.006, 0.008, 0.010, 0.012],
                         radius=1.0):
        B, N, C = pcd.shape[0], pcd.shape[1], pcd.shape[2]
        npoint = int(N * 0.05)
        loss = 0
        further_point_idx = pn2_utils.furthest_point_sample(
            pcd.permute(0, 2, 1).contiguous(), npoint)
        new_xyz = pn2_utils.gather_operation(
            pcd.permute(0, 2, 1).contiguous(), further_point_idx)  # B,C,N
        for p in percentage:
            nsample = int(N * p)
            r = math.sqrt(p * radius)
            disk_area = math.pi * (radius**2) / N

            idx = pn2_utils.ball_query(r, nsample, pcd.contiguous(),
                                       new_xyz.permute(
                                           0, 2, 1).contiguous())  #b N nsample

            expect_len = math.sqrt(disk_area)

            grouped_pcd = pn2_utils.grouping_operation(
                pcd.permute(0, 2, 1).contiguous(), idx)  #B C N nsample
            grouped_pcd = grouped_pcd.permute(0, 2, 3, 1)  #B N nsample C

            grouped_pcd = torch.cat(torch.unbind(grouped_pcd, dim=1),
                                    dim=0)  #B*N nsample C

            dist, _ = self.knn_uniform(grouped_pcd, grouped_pcd)
            #print(dist.shape)
            uniform_dist = dist[:, :, 1:]  #B*N nsample 1
            uniform_dist = torch.abs(uniform_dist + 1e-8)
            uniform_dist = torch.mean(uniform_dist, dim=1)
            uniform_dist = (uniform_dist - expect_len)**2 / (expect_len + 1e-8)
            mean_loss = torch.mean(uniform_dist)
            mean_loss = mean_loss * math.pow(p * 100, 2)
            loss += mean_loss
        return loss / len(percentage)
Beispiel #14
0
    def forward(self, xyz, features=None, curvature=None):
        # type: (_PointnetSAModuleBase, torch.Tensor, torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]
        r"""
        Parameters
        ----------
        xyz : torch.Tensor
            (B, N, 3) tensor of the xyz coordinates of the features
        features : torch.Tensor
            (B, N, C) tensor of the descriptors of the the features

        Returns
        -------
        new_xyz : torch.Tensor
            (B, npoint, 3) tensor of the new features' xyz
        new_features : torch.Tensor
            (B,  \sum_k(mlps[k][-1]), npoint) tensor of the new_features descriptors
        """

        new_features_list = []

        if self.npoint is not None:
            # =============================================================================
            #             xyz_pairdistance = torch.topk(torch.norm(xyz,dim=2,keepdim=True),int(xyz.shape[1]*0.4),dim=1,largest=False)[1]
            # #            xyz_norm = torch.norm(xyz[:,:,0:3],dim=2,keepdim=True)
            # #            xyz_pairdistance = xyz_norm.lt(0.60).nonzero()[:,1].unsqueeze(0).unsqueeze(-1)
            #             xyz_inner = torch.gather(xyz,1,xyz_pairdistance.repeat(1,1,3))
            #             xyz_flipped = xyz_inner.transpose(1, 2).contiguous()
            #
            #             curvature_inner = torch.gather(curvature.unsqueeze(-1),1,xyz_pairdistance)
            #             curvature_inner = curvature_inner.squeeze(-1)
            #             xyz_c = torch.stack((xyz_inner[:,:,0],xyz_inner[:,:,1],xyz_inner[:,:,2],curvature_inner.squeeze(-1)),dim=2)
            # =============================================================================
            xyz_norm = torch.norm(xyz[:, :, 0:3], dim=2, keepdim=True)
            #            print(xyz_norm.shape)
            xyz_pairdistance = xyz_norm.squeeze(-1).gt(0.7).nonzero()
            #            print(xyz_pairdistance[0:10])
            #            print(xyz_pairdistance[0][1])
            #            print(curvature.shape)
            #            print(curvature[0:2,:])
            #            print('******')
            #            print(curvature[xyz_pairdistance[:,0],xyz_pairdistance[:,1]])
            #            quit()
            curvature[xyz_pairdistance[:, 0], xyz_pairdistance[:, 1]] = 0
            xyz_flipped = xyz[:, :, 0:3].transpose(1, 2).contiguous()
            xyz_c = torch.stack(
                (xyz[:, :, 0], xyz[:, :, 1], xyz[:, :,
                                                 2], curvature.squeeze(-1)),
                dim=2)

            idx = pointnet2_utils.furthest_point_sample(
                xyz_c.contiguous(), self.npoint)
            #            idx = idx.detach()
            new_xyz = (pointnet2_utils.gather_operation(
                xyz_flipped, idx).transpose(1, 2).contiguous())
            new_curvature = torch.gather(curvature, dim=1, index=idx.long())
        else:
            new_xyz = None
            new_curvature = None

        for i in range(len(self.groupers)):
            new_features = self.groupers[i](
                xyz, new_xyz, features)  # (B, C, npoint, nsample)

            new_features = self.mlps[i](
                new_features)  # (B, mlp[-1], npoint, nsample)
            new_features = F.max_pool2d(new_features,
                                        kernel_size=[
                                            1, new_features.size(3)
                                        ])  # (B, mlp[-1], npoint, 1)
            new_features = new_features.squeeze(-1)  # (B, mlp[-1], npoint)

            new_features_list.append(new_features)

        return new_xyz, torch.cat(new_features_list, dim=1), new_curvature