Esempio n. 1
0
    def forward(self, xyz, points):
        """
        Input:
            xyz: input points position data, [B, C, N]
            points: input points data, [B, D, N]
        Return:
            new_xyz: sampled points position data, [B, C, S]
            new_points_concat: sample points feature data, [B, D', S]
        """
        device = xyz.device
        B, C, N = xyz.shape
        xyz_t = xyz.permute(0, 2, 1).contiguous()  # [B, N, C]

        if self.group_all == False:
            fps_idx = pointutils.furthest_point_sample(
                xyz_t, self.npoint)  # [B, npoint]
            new_xyz = pointutils.gather_operation(xyz,
                                                  fps_idx)  # [B, C, npoint]
        else:
            new_xyz = xyz
        new_points = self.queryandgroup(xyz_t,
                                        new_xyz.transpose(2, 1).contiguous(),
                                        points)  # [B, D+C, npoint, nsample]

        for i, conv in enumerate(self.mlp_convs):
            bn = self.mlp_bns[i]
            new_points = F.relu(bn(
                conv(new_points)))  # [B, channel, npoint, nsample]

        new_points = torch.max(new_points, -1)[0]  # [B, channel, nsample]

        return new_xyz, new_points
Esempio n. 2
0
    def forward(self, xyz, feature=None):
        # type: (_PointnetSAModuleBase, torch.Tensor, torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]
        r"""
        Parameters
        ----------
        xyz : torch.Tensor
            (B, N, 3) tensor of the xyz coordinates of the features
        features : torch.Tensor
            (B, C, N) tensor of the descriptors of the the features

        Returns
        -------
        new_xyz : torch.Tensor
            (B, npoint, 3) tensor of the new features' xyz
        new_features : torch.Tensor
            (B,  \sum_k(mlps[k][-1]), npoint) tensor of the new_features descriptors
        """

        new_features_list = []

        xyz_flipped = xyz.transpose(1, 2).contiguous()
        new_xyz = (
            pointnet2_utils.gather_operation(
                xyz_flipped, pointnet2_utils.furthest_point_sample(xyz, self.npoint)
            )
            .transpose(1, 2)
            .contiguous()
            if self.npoint is not None
            else None
        )

        for i in range(len(self.groupers)):
            new_features = self.groupers[i](
                xyz, new_xyz, feature
            )  # (B, C, npoint, nsample)

            B, _, npoint, nsample = new_features.shape

            new_features = new_features.permute(0, 2, 3, 1)  # B npoint nsample C
            new_features = self.mlps[i](new_features)  # B, npoint, nsample, C
            new_features = new_features.permute(0, 3, 1, 2)  # B, C, npoint, nsample
            new_features = F.max_pool2d(
                new_features, kernel_size=[1, new_features.size(3)]
            )  # (B, mlp[-1], npoint, 1)
            new_features_list.append(new_features.reshape(B, 4, -1, npoint))
        
        if self.npoint is not None:
            out_npoint = self.npoint
        else:
            out_npoint = 1

        new_features = torch.cat(new_features_list, dim=2).reshape(B, -1, out_npoint)

        return new_xyz, new_features
Esempio n. 3
0
def fps(p: torch.Tensor, k: int) -> torch.Tensor:
    """Point cloud FPS sampling.

    Args:
        p: Reference point cloud of shape [batch_size, 3, num_point].
        k (int): Number of sampled points.

    Returns:
        Indices tensor of shape [batch_size, k].
    """

    p_t = p.transpose(1, 2).contiguous()
    return _PU.furthest_point_sample(p_t, k)
Esempio n. 4
0
    def forward(self, xyz, features=None, return_critical_index=False):
        # type: (_PointnetSAModuleBase, torch.Tensor, torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]
        r"""
        Parameters
        ----------
        xyz : torch.Tensor
            (B, N, 3) tensor of the xyz coordinates of the features
        features : torch.Tensor
            (B, N, C) tensor of the descriptors of the the features

        Returns
        -------
        new_xyz : torch.Tensor
            (B, npoint, 3) tensor of the new features' xyz
        new_features : torch.Tensor
            (B,  \sum_k(mlps[k][-1]), npoint) tensor of the new_features descriptors
        """

        new_features_list = []
        new_indices_list = []

        xyz_flipped = xyz.transpose(1, 2).contiguous()
        new_xyz = (
            pointnet2_utils.gather_operation(
                xyz_flipped, pointnet2_utils.furthest_point_sample(xyz, self.npoint)
            )
            .transpose(1, 2)
            .contiguous()
            if self.npoint is not None
            else None
        )

        for i in range(len(self.groupers)):
            new_features = self.groupers[i](
                xyz, new_xyz, features
            )  # (B, C, npoint, nsample)

            new_features = self.mlps[i](new_features)  # (B, mlp[-1], npoint, nsample)
            new_features, new_indices = F.max_pool2d(
                new_features, kernel_size=[1, new_features.size(3)], return_indices=True
            )  # (B, mlp[-1], npoint, 1)
            new_features = new_features.squeeze(-1)  # (B, mlp[-1], npoint)
            new_indices = new_indices.squeeze(-1)

            new_features_list.append(new_features)
            new_indices_list.append(new_indices)

        if return_critical_index is False:
            return new_xyz, torch.cat(new_features_list, dim=1)
        else:
            return new_xyz, torch.cat(new_features_list, dim=1), torch.cat(new_indices_list, dim=1)
Esempio n. 5
0
    def forward(self, xyz, points):
        """
        Input:
            xyz: input points position data, [B, C, N]
            points: input points data, [B, D, N]
        Return:
            new_xyz: sampled points position data, [B, C, S]
            new_points_concat: sample points feature data, [B, D', S]
        """
        device = xyz.device
        B, C, N = xyz.shape
        xyz_t = xyz.permute(0, 2, 1).contiguous()  # [B, N, C]

        fps_idx = pointutils.furthest_point_sample(xyz_t,
                                                   self.npoint)  # [B, npoint]
        new_xyz = pointutils.gather_operation(xyz, fps_idx)  # [B, 3, npoint]
        new_xyz_t = new_xyz.permute(0, 2, 1).contiguous()

        _, idx = pointutils.knn(self.nsample, new_xyz_t,
                                xyz_t)  # [B, npoint, nsample]
        neighbors = pointutils.grouping_operation(
            xyz, idx)  # [B, 3, npoint, nsample]
        centers = new_xyz.view(B, -1, self.npoint, 1).repeat(
            1, 1, 1, self.nsample)  # [B, 3, npoint, nsample]
        pos_diff = centers - neighbors  # [B, 3, npoint, nsample]
        distances = torch.norm(pos_diff, p=2, dim=1,
                               keepdim=True)  # [B, 1, npoint, nsample]
        h_xi_xj = torch.cat([distances, pos_diff, centers, neighbors],
                            dim=1)  # [B, 1+3+3+3, npoint, nsample]

        x = pointutils.grouping_operation(points,
                                          idx)  # [B, D, npoint, nsample]
        x = torch.cat([neighbors, x], dim=1)  # [B, D+3, npoint, nsample]

        h_xi_xj = self.mapping_func2(
            F.relu(self.bn_mapping(
                self.mapping_func1(h_xi_xj))))  # [B, c_in, npoint, nsample]
        if self.first_layer:
            x = F.relu(self.bn_xyz_raising(
                self.xyz_raising(x)))  # [B, c_in, npoint, nsample]
        x = F.relu(self.bn_rsconv(torch.mul(h_xi_xj,
                                            x)))  # (B, c_in, npoint, nsample)

        for i, conv in enumerate(self.mlp_convs):
            bn = self.mlp_bns[i]
            x = F.relu(bn(conv(x)))  # [B, c_out, npoint, nsample]

        x = torch.max(x, -1)[0]  # [B, c_out, npoint]
        # x = F.relu(self.bn_channel_raising(self.cr_mapping(x)))   # [B, c_out, npoint]

        return new_xyz, x
Esempio n. 6
0
    def forward(self, AA, features=None):
        # type: (_PointnetSAModuleBase, torch.Tensor, torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]
        r"""
        Parameters
        ----------
        AA : torch.Tensor
            (B, N, 32 tensor of the azimuth, distance coordinates of the features
        features : torch.Tensor
            (B, N, C) tensor of the descriptors of the the features

        Returns
        -------
        new_AA : torch.Tensor
            (B, npoint, 2) tensor of the new features' azimuth distance
        new_features : torch.Tensor
            (B,  \sum_k(mlps[k][-1]), npoint) tensor of the new_features descriptors
        """

        new_features_list = []

        AA_flipped = AA.transpose(1, 2).contiguous()
        new_AA = (
            pointnet2_utils.gather_operation(
                AA_flipped, pointnet2_utils.furthest_point_sample(AA, self.npoint)
            )
            .transpose(1, 2)
            .contiguous()
            if self.npoint is not None
            else None
        )

        for i in range(len(self.groupers)):
            new_features = self.groupers[i](
                AA, new_AA, features
            )  # (B, C, npoint, nsample)

            new_features = self.mlps[i](new_features)  # (B, mlp[-1], npoint, nsample)
            new_features = F.max_pool2d(
                new_features, kernel_size=[1, new_features.size(3)]
            )  # (B, mlp[-1], npoint, 1)
            new_features = new_features.squeeze(-1)  # (B, mlp[-1], npoint)

            new_features_list.append(new_features)

        return new_AA, torch.cat(new_features_list, dim=1)
Esempio n. 7
0
    def forward(self,
                xyz: torch.Tensor,
                features: torch.Tensor = None) -> (torch.Tensor, torch.Tensor):
        r"""
        Parameters
        ----------
        xyz : torch.Tensor
            (B, N, 3) tensor of the xyz coordinates of the features
        features : torch.Tensor
            (B, N, C) tensor of the descriptors of the the features

        Returns
        -------
        new_xyz : torch.Tensor
            (B, npoint, 3) tensor of the new features' xyz
        new_features : torch.Tensor
            (B, npoint, \sum_k(mlps[k][-1])) tensor of the new_features descriptors
        """

        new_features_list = []

        xyz_flipped = xyz.transpose(1, 2).contiguous()
        new_xyz = pointnet2_utils.gather_operation(
            xyz_flipped, pointnet2_utils.furthest_point_sample(
                xyz, self.npoint)).transpose(
                    1, 2).contiguous() if self.npoint is not None else None

        for i in range(len(self.groupers)):
            new_features = self.groupers[i](
                xyz, new_xyz, features)  # (B, C, npoint, nsample)

            new_features = self.mlps[i](
                new_features)  # (B, mlp[-1], npoint, nsample)
            new_features = F.max_pool2d(new_features,
                                        kernel_size=[
                                            1, new_features.size(3)
                                        ])  # (B, mlp[-1], npoint, 1)
            new_features = new_features.squeeze(-1)  # (B, mlp[-1], npoint)

            new_features_list.append(new_features)

        return new_xyz, torch.cat(new_features_list, dim=1)
Esempio n. 8
0
    def get_uniform_loss(self,
                         pcd,
                         percentage=[0.004, 0.006, 0.008, 0.010, 0.012],
                         radius=1.0):
        B, N, C = pcd.shape[0], pcd.shape[1], pcd.shape[2]
        npoint = int(N * 0.05)
        loss = 0
        further_point_idx = pn2_utils.furthest_point_sample(
            pcd.permute(0, 2, 1).contiguous(), npoint)
        new_xyz = pn2_utils.gather_operation(
            pcd.permute(0, 2, 1).contiguous(), further_point_idx)  # B,C,N
        for p in percentage:
            nsample = int(N * p)
            r = math.sqrt(p * radius)
            disk_area = math.pi * (radius**2) / N

            idx = pn2_utils.ball_query(r, nsample, pcd.contiguous(),
                                       new_xyz.permute(
                                           0, 2, 1).contiguous())  #b N nsample

            expect_len = math.sqrt(disk_area)

            grouped_pcd = pn2_utils.grouping_operation(
                pcd.permute(0, 2, 1).contiguous(), idx)  #B C N nsample
            grouped_pcd = grouped_pcd.permute(0, 2, 3, 1)  #B N nsample C

            grouped_pcd = torch.cat(torch.unbind(grouped_pcd, dim=1),
                                    dim=0)  #B*N nsample C

            dist, _ = self.knn_uniform(grouped_pcd, grouped_pcd)
            #print(dist.shape)
            uniform_dist = dist[:, :, 1:]  #B*N nsample 1
            uniform_dist = torch.abs(uniform_dist + 1e-8)
            uniform_dist = torch.mean(uniform_dist, dim=1)
            uniform_dist = (uniform_dist - expect_len)**2 / (expect_len + 1e-8)
            mean_loss = torch.mean(uniform_dist)
            mean_loss = mean_loss * math.pow(p * 100, 2)
            loss += mean_loss
        return loss / len(percentage)
Esempio n. 9
0
    def forward(self, xyz, features=None, curvature=None):
        # type: (_PointnetSAModuleBase, torch.Tensor, torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]
        r"""
        Parameters
        ----------
        xyz : torch.Tensor
            (B, N, 3) tensor of the xyz coordinates of the features
        features : torch.Tensor
            (B, N, C) tensor of the descriptors of the the features

        Returns
        -------
        new_xyz : torch.Tensor
            (B, npoint, 3) tensor of the new features' xyz
        new_features : torch.Tensor
            (B,  \sum_k(mlps[k][-1]), npoint) tensor of the new_features descriptors
        """

        new_features_list = []

        if self.npoint is not None:
            # =============================================================================
            #             xyz_pairdistance = torch.topk(torch.norm(xyz,dim=2,keepdim=True),int(xyz.shape[1]*0.4),dim=1,largest=False)[1]
            # #            xyz_norm = torch.norm(xyz[:,:,0:3],dim=2,keepdim=True)
            # #            xyz_pairdistance = xyz_norm.lt(0.60).nonzero()[:,1].unsqueeze(0).unsqueeze(-1)
            #             xyz_inner = torch.gather(xyz,1,xyz_pairdistance.repeat(1,1,3))
            #             xyz_flipped = xyz_inner.transpose(1, 2).contiguous()
            #
            #             curvature_inner = torch.gather(curvature.unsqueeze(-1),1,xyz_pairdistance)
            #             curvature_inner = curvature_inner.squeeze(-1)
            #             xyz_c = torch.stack((xyz_inner[:,:,0],xyz_inner[:,:,1],xyz_inner[:,:,2],curvature_inner.squeeze(-1)),dim=2)
            # =============================================================================
            xyz_norm = torch.norm(xyz[:, :, 0:3], dim=2, keepdim=True)
            #            print(xyz_norm.shape)
            xyz_pairdistance = xyz_norm.squeeze(-1).gt(0.7).nonzero()
            #            print(xyz_pairdistance[0:10])
            #            print(xyz_pairdistance[0][1])
            #            print(curvature.shape)
            #            print(curvature[0:2,:])
            #            print('******')
            #            print(curvature[xyz_pairdistance[:,0],xyz_pairdistance[:,1]])
            #            quit()
            curvature[xyz_pairdistance[:, 0], xyz_pairdistance[:, 1]] = 0
            xyz_flipped = xyz[:, :, 0:3].transpose(1, 2).contiguous()
            xyz_c = torch.stack(
                (xyz[:, :, 0], xyz[:, :, 1], xyz[:, :,
                                                 2], curvature.squeeze(-1)),
                dim=2)

            idx = pointnet2_utils.furthest_point_sample(
                xyz_c.contiguous(), self.npoint)
            #            idx = idx.detach()
            new_xyz = (pointnet2_utils.gather_operation(
                xyz_flipped, idx).transpose(1, 2).contiguous())
            new_curvature = torch.gather(curvature, dim=1, index=idx.long())
        else:
            new_xyz = None
            new_curvature = None

        for i in range(len(self.groupers)):
            new_features = self.groupers[i](
                xyz, new_xyz, features)  # (B, C, npoint, nsample)

            new_features = self.mlps[i](
                new_features)  # (B, mlp[-1], npoint, nsample)
            new_features = F.max_pool2d(new_features,
                                        kernel_size=[
                                            1, new_features.size(3)
                                        ])  # (B, mlp[-1], npoint, 1)
            new_features = new_features.squeeze(-1)  # (B, mlp[-1], npoint)

            new_features_list.append(new_features)

        return new_xyz, torch.cat(new_features_list, dim=1), new_curvature
Esempio n. 10
0
    def forward(self, pointcloud: torch.cuda.FloatTensor, cls):
        # x: B,3,N

        xyz, features = self._break_up_pc(pointcloud)
        num_pts = xyz.size(1)
        batch_size = xyz.size(0)
        # FPS to find different point subsets and their relations
        subset1_idx = pointnet2_utils.furthest_point_sample(xyz, num_pts //
                                                            4).long()  # B,N/2
        subset1_xyz = torch.unsqueeze(subset1_idx, -1).repeat(1, 1,
                                                              3)  # B,N/2,3
        subset1_xyz = torch.take(xyz, subset1_xyz)  # B,N/2,3

        dist, idx1 = pointnet2_utils.three_nn(xyz, subset1_xyz)
        dist_recip = 1.0 / (dist + 1e-8)
        norm = torch.sum(dist_recip, dim=2, keepdim=True)
        weight1 = dist_recip / norm

        subset12_idx = pointnet2_utils.furthest_point_sample(
            subset1_xyz, num_pts // 16).long()  # B,N/4
        subset12_xyz = torch.unsqueeze(subset12_idx, -1).repeat(1, 1,
                                                                3)  # B,N/4,3
        subset12_xyz = torch.take(subset1_xyz, subset12_xyz)  # B,N/4,3

        dist, idx12 = pointnet2_utils.three_nn(subset1_xyz, subset12_xyz)
        dist_recip = 1.0 / (dist + 1e-8)
        norm = torch.sum(dist_recip, dim=2, keepdim=True)
        weight12 = dist_recip / norm

        device = torch.device('cuda')
        centroid = torch.zeros([batch_size, 1, 3], device=device)
        dist, idx0 = pointnet2_utils.three_nn(subset12_xyz, centroid)
        dist_recip = 1.0 / (dist + 1e-8)
        norm = torch.sum(dist_recip, dim=2, keepdim=True)
        weight0 = dist_recip / norm
        #######################################
        # Error-minimizing module 1:
        # Encoding
        x = xyz.transpose(2, 1)  # x: B,3,N
        x1_1 = x
        x = get_adptive_dilated_graph_feature(x,
                                              self.conv_op1,
                                              self.conv_op11,
                                              self.conv_op12,
                                              d=5,
                                              k=20)
        x = self.conv1(x)  # B,64,N,k
        x = self.conv14(x)  # B,64,N,k
        x1_2 = x
        # Back-projection
        x = self.conv11(x)  # B,3,N,1
        x = torch.squeeze(x, -1)  # B,3,N
        x1_3 = x
        # Calculating Error
        delta_1 = x1_3 - x1_1  # B,3,N
        # Output
        x = x1_2  # B,64,N,k
        x1 = x.max(dim=-1, keepdim=False)[0]  # B,64,N
        #######################################

        #######################################
        # Multi-resolution (MR) Branch
        # Down-scaling 1
        subset1_feat = torch.unsqueeze(subset1_idx, -1).repeat(1, 1,
                                                               64)  # B,N/2,64
        x1_subset1 = torch.take(x1.transpose(1, 2).contiguous(),
                                subset1_feat).transpose(
                                    1, 2).contiguous()  # B,64,N/2

        x2_1 = x1_subset1  # B,64,N/2
        x = get_graph_feature(x1_subset1, k=self.k // 2)
        x = self.conv2(x)  # B,64,N/2,k
        x = self.conv24(x)  # B,128,N/2,k
        x2 = x.max(dim=-1, keepdim=False)[0]  # B,128,N/2

        # Dense-connection
        x12 = pointnet2_utils.three_interpolate(x2, idx1, weight1)  # B,128,N
        x12 = torch.cat((x12, x1), dim=1)  # B,192,N
        x12 = self.conv23(x12)  # B,128,N

        # Down-scaling 2
        subset12_feat = torch.unsqueeze(subset12_idx,
                                        -1).repeat(1, 1, 128)  # B,N/4,128
        x2_subset12 = torch.take(
            x2.transpose(1, 2).contiguous(),
            subset12_feat).transpose(1, 2).contiguous()  # B,128,N/4

        x3_1 = x2_subset12  # B,128,N/4
        x = get_graph_feature(x2_subset12, k=self.k // 4)
        x = self.conv3(x)  # B,256,N/4,k
        x3 = x.max(dim=-1, keepdim=False)[0]  # B,256,N/4

        # Dense-connection
        x23 = pointnet2_utils.three_interpolate(x3, idx12,
                                                weight12)  # B,256,N/2
        x23 = torch.cat((x23, x2), dim=1)  # B,384,N/2
        x23 = self.conv34(x23)  # B,128,N/2
        x123 = pointnet2_utils.three_interpolate(x23, idx1, weight1)  # B,128,N
        x123 = torch.cat((x123, x12, x1), dim=1)  # B,320,N
        x123 = self.conv35(x123)  # B,128,N

        # Down-scaling 3
        x_bot = self.conv53(x3)
        x_bot = self.conv54(x_bot)  # B,1024,N/128
        x_bot = F.adaptive_max_pool1d(x_bot, 1)  # B,1024,1

        # Upsampling 3:
        interpolated_feats1 = pointnet2_utils.three_interpolate(
            x_bot, idx0, weight0)  # B,1024,N/4
        interpolated_feats2 = x3  # B,256,N/4
        x3_up = torch.cat((interpolated_feats1, interpolated_feats2),
                          dim=1)  # B,1280,N/4
        x3_up = self.conv32(x3_up)  # B,256,N/4
        x3_up = self.conv33(x3_up)  # B,256,N/4

        # Upsampling 2:
        interpolated_feats1 = pointnet2_utils.three_interpolate(
            x3_up, idx12, weight12)  # B,256,N/2
        interpolated_feats2 = x2  # B,128,N/2
        interpolated_feats3 = x23  # B,128,N/2
        x2_up = torch.cat(
            (interpolated_feats1, interpolated_feats3, interpolated_feats2),
            dim=1)  # B,512,N/2
        x2_up = self.conv21(x2_up)  # B,256,N/2
        x2_up = self.conv22(x2_up)  # B,128,N/2

        # Upsampling 1:
        interpolated_feats1 = pointnet2_utils.three_interpolate(
            x2_up, idx1, weight1)  # B,128,N
        interpolated_feats2 = x1  # B,64,N
        interpolated_feats3 = x12  # B,128,N
        interpolated_feats4 = x123  # B,128,N
        x1_up = torch.cat((interpolated_feats1, interpolated_feats4,
                           interpolated_feats3, interpolated_feats2),
                          dim=1)  # B,448,N
        x1_up = self.conv12(x1_up)  # B,512,N
        x1_up = self.conv13(x1_up)  # B,1024,N

        x_mr = x1_up
        #############################################################################

        #############################################################################
        # Full-resolution Branch
        # Error-minimizing module 2:
        # Encoding
        x2_1 = x1  # B,64,N
        x = get_adptive_dilated_graph_feature(x1,
                                              self.conv_op2,
                                              self.conv_op21,
                                              self.conv_op22,
                                              d=5,
                                              k=20)
        x = self.convfc2(x)  # B,64,N,k
        x = self.convfc24(x)  # B,64,N,k
        x2_2 = x
        # Back-projection
        x = self.convfc21(x)  # B,64,N,1
        x = torch.squeeze(x, -1)  # B,64,N
        x2_3 = x
        # Calculating Error
        delta_2 = x2_3 - x2_1  # B,64,N
        # Output
        x = x2_2  # B,64,N,k
        x2 = x.max(dim=-1, keepdim=False)[0]  # B,64,N
        #######################################
        # Error-minimizing module 3:
        # Encoding
        x3_1 = x2  # B,64,N
        x = get_adptive_dilated_graph_feature(x2,
                                              self.conv_op3,
                                              self.conv_op31,
                                              self.conv_op32,
                                              d=5,
                                              k=20)
        x = self.convfc3(x)  # B,128,N,k
        x3_2 = x
        # Back-projection
        x = self.convfc31(x)  # B,64,N,1
        x = torch.squeeze(x, -1)  # B,64,N
        x3_3 = x
        # Calculating Error
        delta_3 = x3_3 - x3_1  # B,64,N
        # Output
        x = x3_2  # B,128,N,k
        x3 = x.max(dim=-1, keepdim=False)[0]  # B,128,N
        #######################################
        # Error-minimizing module 4:
        # Encoding
        x4_1 = x3  # B,128,N
        x = get_adptive_dilated_graph_feature(x3,
                                              self.conv_op4,
                                              self.conv_op41,
                                              self.conv_op42,
                                              d=5,
                                              k=20)
        x = self.convfc4(x)  # B,256,N,k
        x4_2 = x
        # Back-projection
        x = self.convfc41(x)  # B,128,N,1
        x = torch.squeeze(x, -1)  # B,128,N
        x4_3 = x
        # Calculating Error
        delta_4 = x4_3 - x4_1  # B,128,N
        # Output
        x = x4_2  # B,256,N,k
        x4 = x.max(dim=-1, keepdim=False)[0]  # B,256,N

        x = torch.cat((x1, x2, x3, x4), dim=1)  # B,512,N
        x_fr = self.conv7(x)  # B,1024,N

        # Fusing FR and MR outputs
        fusion_score = self.fuse(x_mr)
        x = x_fr + x_fr * fusion_score
        x_all = self.conv9(x)  # B,1024,N

        # Collecting global feature
        one_hot_label = cls.view(-1, 16, 1)  # B,16,1
        one_hot_label = self.conv5(one_hot_label)  # B,64,1
        x_max = F.adaptive_max_pool1d(x_all, 1)  # B,1024,1
        x_global = torch.cat((x_max, one_hot_label), dim=1)  # B,1088,1

        x_global = x_global.repeat(1, 1, num_pts)  # B,1088,N
        x = torch.cat((x_all, x_global), dim=1)  # B,2112,N

        x = self.conv8(x)  # B,1024,N

        x = self.conv63(x)  # B,128,N
        x = self.dp(x)
        x = self.conv64(x)  # B,50,N

        return (x.transpose(2,
                            1).contiguous(), delta_1.transpose(2,
                                                               1).contiguous(),
                delta_2.transpose(2, 1).contiguous(),
                delta_3.transpose(2, 1).contiguous(),
                delta_4.transpose(2, 1).contiguous())