Esempio n. 1
0
    def forward(self, x, sn, node, node_knn_I, is_train=False, epoch=None):
        '''

        :param x: Bx3xN Tensor
        :param sn: Bx3xN Tensor
        :param node: Bx3xM FloatTensor
        :param node_knn_I: BxMxk_som LongTensor
        :param is_train: determine whether to add noise in KNNModule
        :return:
        '''

        # optimize the som, access the Tensor's tensor, the optimize function should not modify the tensor
        # self.som_builder.optimize(x.data)
        self.som_builder.node.resize_(node.size()).copy_(node)

        # modify the x according to the nodes, minus the center
        self.mask, mask_row_max, min_idx = self.som_builder.query_topk(x.data, k=self.opt.k)  # BxkNxnode_num, Bxnode_num
        mask_row_sum = torch.sum(self.mask, dim=1)  # Bxnode_num
        mask = self.mask.unsqueeze(1)  # Bx1xkNxnode_num

        # if necessary, stack the x
        x_list, sn_list = [], []
        for i in range(self.opt.k):
            x_list.append(x)
            sn_list.append(sn)
        x_stack = torch.cat(tuple(x_list), dim=2)
        sn_stack = torch.cat(tuple(sn_list), dim=2)

        # re-compute center, instead of using som.node
        x_stack_data_unsqueeze = x_stack.data.unsqueeze(3)  # BxCxkNx1
        x_stack_data_masked = x_stack_data_unsqueeze * mask.float()  # BxCxkNxnode_num
        cluster_mean = torch.sum(x_stack_data_masked, dim=2) / (mask_row_sum.unsqueeze(1).float()+1e-5)  # BxCxnode_num
        self.som_builder.node = cluster_mean
        self.som_node = self.som_builder.node


        # ====== apply transformer to rotate x_stack, sn_stack, som_node ======
        # sin_theta = self.transformer(x=self.som_node, sn=None, epoch=epoch)  # Bx1
        # # sin_theta = self.transformer(x=torch.cat((x_stack, sn_stack), dim=1), sn=None, epoch=epoch)  # Bx1
        # cos_theta = torch.sqrt(1 + 1e-5 - sin_theta*sin_theta)  # Bx1
        # B = x.size()[0]
        # rotation_matrix = torch.Tensor(B, 3, 3).zero_().to(self.opt.device)  # Bx3x3
        # rotation_matrix[:, 0, 0] = cos_theta[:, 0]
        # rotation_matrix[:, 0, 2] = sin_theta[:, 0]
        # rotation_matrix[:, 1, 1] = 1
        # rotation_matrix[:, 2, 0] = -1 * sin_theta[:, 0]
        # rotation_matrix[:, 2, 2] = cos_theta[:, 0]
        # # print(rotation_matrix)

        # x_stack = torch.matmul(rotation_matrix, x_stack)
        # sn_stack = torch.matmul(rotation_matrix, sn_stack)
        # self.som_node = torch.matmul(rotation_matrix, self.som_node)
        # self.som_builder.node = torch.matmul(rotation_matrix.data, self.som_builder.node)
        # ====== apply transformer to rotate x_stack, sn_stack, som_node ======


        # assign each point with a center
        node_expanded = self.som_node.data.unsqueeze(2)  # BxCx1xnode_num, som.node is BxCxnode_num
        self.centers = torch.sum(mask.float() * node_expanded, dim=3).detach()  # BxCxkN

        self.x_decentered = (x_stack - self.centers).detach()  # Bx3xkN
        x_augmented = torch.cat((self.x_decentered, sn_stack), dim=1)  # Bx6xkN

        # go through the first PointNet
        if self.opt.surface_normal == True:
            self.first_pn_out = self.first_pointnet(x_augmented, epoch)
        else:
            self.first_pn_out = self.first_pointnet(self.x_decentered, epoch)

        M = node.size()[2]
        with torch.cuda.device(self.first_pn_out.get_device()):
            gather_index = index_max.forward_cuda(self.first_pn_out.detach(),
                                                  min_idx.int(),
                                                  M).detach().long()
        self.first_pn_out_masked_max = self.first_pn_out.gather(dim=2, index=gather_index * mask_row_max.unsqueeze(1).long())  # BxCxM

        if self.opt.som_k >= 2:
            # second pointnet, knn search on SOM nodes: ----------------------------------
            self.knn_center_1, self.knn_feature_1 = self.knnlayer(self.som_node, self.first_pn_out_masked_max, node_knn_I, self.opt.som_k, self.opt.som_k_type, epoch)

            # final pointnet --------------------------------------------------------------
            self.final_pn_out = self.final_pointnet(torch.cat((self.knn_center_1, self.knn_feature_1), dim=1), epoch)  # Bx1024xM
        else:
            # final pointnet --------------------------------------------------------------
            self.final_pn_out = self.final_pointnet(torch.cat((self.som_node, self.first_pn_out_masked_max), dim=1), epoch)  # Bx1024xM

        self.feature, _ = torch.max(self.final_pn_out, dim=2, keepdim=False)

        return self.feature
Esempio n. 2
0
    def forward(self, x, sn, node, node_knn_I, is_train=False, epoch=None):
        '''

        :param x: Bx3xN Tensor
        :param sn: Bx3xN Tensor
        :param node: Bx3xM FloatTensor
        :param node_knn_I: BxMxk_som LongTensor
        :param is_train: determine whether to add noise in KNNModule
        :return:
        '''

        # optimize the som, access the Tensor's tensor, the optimize function should not modify the tensor
        # self.som_builder.optimize(x.data)
        # self.som_builder.node.resize_(node.size()).copy_(node)

        # modify the x according to the nodes, minus the center
        mask, mask_row_max, min_idx = som.query_topk(
            node, x.data, node.size()[2],
            k=self.opt.k)  # BxkNxnode_num, Bxnode_num, BxkN
        mask_row_sum = torch.sum(mask, dim=1)  # Bxnode_num
        mask = mask.unsqueeze(1)  # Bx1xkNxnode_num

        # if necessary, stack the x
        x_list, sn_list = [], []
        for i in range(self.opt.k):
            x_list.append(x)
            sn_list.append(sn)
        x_stack = torch.cat(tuple(x_list), dim=2)
        sn_stack = torch.cat(tuple(sn_list), dim=2)

        # re-compute center, instead of using som.node
        x_stack_data_unsqueeze = x_stack.data.unsqueeze(3)  # BxCxkNx1
        x_stack_data_masked = x_stack_data_unsqueeze * mask.float(
        )  # BxCxkNxnode_num
        cluster_mean = torch.sum(x_stack_data_masked, dim=2) / (
            mask_row_sum.unsqueeze(1).float() + 1e-5)  # BxCxnode_num
        som_node_cluster_mean = cluster_mean

        # assign each point with a center
        node_expanded = som_node_cluster_mean.unsqueeze(
            2)  # BxCx1xnode_num, som.node is BxCxnode_num
        centers = torch.sum(mask.float() * node_expanded,
                            dim=3).detach()  # BxCxkN

        x_decentered = (x_stack - centers).detach()  # Bx3xkN
        x_augmented = torch.cat((x_decentered, sn_stack), dim=1)  # Bx6xkN

        # go through the first PointNet
        if self.opt.surface_normal == True:
            first_pn_out = self.first_pointnet(x_augmented, epoch)
        else:
            first_pn_out = self.first_pointnet(x_decentered, epoch)

        # gather_index = self.masked_max.compute(first_pn_out, min_idx, mask).detach()
        M = node.size()[2]
        with torch.cuda.device(first_pn_out.get_device()):
            gather_index = index_max.forward_cuda(first_pn_out.detach(),
                                                  min_idx.int(),
                                                  M).detach().long()
        first_pn_out_masked_max = first_pn_out.gather(
            dim=2,
            index=gather_index * mask_row_max.unsqueeze(1).long())  # BxCxM

        if self.opt.som_k >= 2:
            # second pointnet, knn search on SOM nodes: ----------------------------------
            knn_center_1, knn_feature_1 = self.knnlayer(
                som_node_cluster_mean, first_pn_out_masked_max, node_knn_I,
                self.opt.som_k, self.opt.som_k_type, epoch)

            # final pointnet --------------------------------------------------------------
            final_pn_out = self.final_pointnet(
                torch.cat((knn_center_1, knn_feature_1), dim=1),
                epoch)  # Bx1024xM
        else:
            # final pointnet --------------------------------------------------------------
            final_pn_out = self.final_pointnet(
                torch.cat((som_node_cluster_mean, first_pn_out_masked_max),
                          dim=1), epoch)  # Bx1024xM

        feature, _ = torch.max(final_pn_out, dim=2, keepdim=False)

        return feature
Esempio n. 3
0
    def forward(self, x, sn, node, node_knn_I, is_train=False, epoch=None):
        '''

        :param x: Bx3xN Tensor
        :param sn: Bx3xN Tensor
        :param node: Bx3xM FloatTensor
        :param node_knn_I: BxMxk_som LongTensor
        :param is_train: determine whether to add noise in KNNModule
        :return:
        '''
        device = x.device

        # optimize the som, access the Tensor's tensor, the optimize function should not modify the tensor
        # self.som_builder.optimize(x.data)
        # self.som_builder.node.resize_(node.size()).copy_(node)

        # modify the x according to the nodes, minus the center
        mask, mask_row_max, min_idx = som.query_topk(
            node, x.data, node.size()[2],
            k=self.opt.k)  # BxkNxnode_num, Bxnode_num, BxkN
        mask_row_sum = torch.sum(mask, dim=1)  # Bxnode_num
        mask = mask.unsqueeze(1)  # Bx1xkNxnode_num

        # if necessary, stack the x
        x_list, sn_list = [], []
        for i in range(self.opt.k):
            x_list.append(x)
            sn_list.append(sn)
        x_stack = torch.cat(tuple(x_list), dim=2)  # Bx3xkN
        sn_stack = torch.cat(tuple(sn_list), dim=2)  # Bx3xkN

        # re-compute center, instead of using som.node
        x_stack_data_unsqueeze = x_stack.data.unsqueeze(3)  # BxCxkNx1
        x_stack_data_masked = x_stack_data_unsqueeze * mask.float(
        )  # BxCxkNxnode_num
        cluster_mean = torch.sum(x_stack_data_masked, dim=2) / (
            mask_row_sum.unsqueeze(1).float() + 1e-5)  # BxCxnode_num
        som_node_cluster_mean = cluster_mean

        # ====== rotate the pc, sn & som_node into R number of rotated versions ======
        B, R, N, kN, M = x_stack.size()[0], \
                         self.opt.rot_equivariant_no, \
                         x.size()[2], x_stack.size()[2], \
                         node.size()[2]
        rotation_matrix = self.rotation_matrix_template.to(device).expand(
            B, R, 3, 3).detach()  # 1xRx3x3 -> BxRx3x3

        x_stack_rot = torch.matmul(
            rotation_matrix,
            x_stack.unsqueeze(1).expand(B, R, 3,
                                        kN))  # BxRx3x3 * BxRx3xkN -> BxRx3xkN
        sn_stack_rot = torch.matmul(rotation_matrix,
                                    sn_stack.unsqueeze(1).expand(
                                        B, R, 3, kN))  # BxRx3xkN
        som_node_rot = torch.matmul(rotation_matrix,
                                    som_node_cluster_mean.unsqueeze(1).expand(
                                        B, R, 3, M))  # BxRx3xM

        node_knn_I_rot = node_knn_I.unsqueeze(1).expand(
            B, R, M, self.opt.som_k).contiguous()  # BxRxMxsom_k
        mask_rot = mask.unsqueeze(1).expand(B, R, 1, kN, M).contiguous()
        min_idx_rot = min_idx.unsqueeze(1).expand(B, R, kN).contiguous()
        mask_row_max_rot = mask_row_max.unsqueeze(1).expand(B, R,
                                                            M).contiguous()

        # ====== rotate the pc, sn & som_node into R number of rotated versions ======

        # assign each point with a center
        # single rotation ------ begin ------
        # node_expanded = som_node_cluster_mean.unsqueeze(2)  # Bx3x1xM, som.node is Bx3xM
        # centers = torch.sum(mask.float() * node_expanded, dim=3).detach()  # BxCxkN
        #
        # x_decentered = (x_stack - centers).detach()  # Bx3xkN
        # x_augmented = torch.cat((x_decentered, sn_stack), dim=1)  # Bx6xkN
        # single rotation ------ end ------

        # multiple rotations ------ begin ------
        node_rot_expanded = som_node_rot.unsqueeze(
            3)  # BxRx3x1xM, som_node_rot is BxRx3xM
        # mask: Bx1xkNxM -> BxRx1xkNxM, self.centers_rot: BxRx3xkN
        centers_rot = torch.sum(mask_rot.float() * node_rot_expanded,
                                dim=4).detach()  # BxRx3xkN

        x_decentered_rot = (x_stack_rot - centers_rot).detach()  # BxRx3xkN
        x_augmented_rot = torch.cat((x_decentered_rot, sn_stack_rot),
                                    dim=2)  # BxRx6xkN
        # multiple rotations ------ end ------

        # go through the first PointNet
        if self.opt.surface_normal == True:
            first_pn_out_rot = self.first_pointnet(
                x_augmented_rot.contiguous().view(B * R, 6, kN).contiguous(),
                epoch)
        else:
            first_pn_out_rot = self.first_pointnet(
                x_decentered_rot.contiguous().view(B * R, 6, kN).contiguous(),
                epoch)
        C = first_pn_out_rot.size()[1]

        # permute and reshape the min_idx, mask_rot, mask_row_max_rot
        min_idx_rot = min_idx_rot.contiguous().view(
            B * R, kN).contiguous()  # BxRxkN-> kNxBxR->kN*BR->BR*kN
        mask_rot = mask_rot.contiguous().view(B * R, 1, kN, M).contiguous(
        )  # BxRx1xkNxM -> 1xkNxMxBxR -> 1xkNxMxBR -> BRx1xkNxM
        mask_row_max_rot = mask_row_max_rot.contiguous().view(
            B * R, M).contiguous().unsqueeze(1).long()

        # first_gather_index_rot = self.masked_max.compute(first_pn_out_rot,
        #                                                  min_idx_rot,
        #                                                  mask_rot).detach()
        with torch.cuda.device(first_pn_out_rot.get_device()):
            first_gather_index_rot = index_max.forward_cuda(
                first_pn_out_rot.detach(), min_idx_rot.int(),
                M).detach().long()
        first_pn_out_masked_max_rot = first_pn_out_rot.gather(
            dim=2, index=first_gather_index_rot * mask_row_max_rot)  # BRxCxM

        # scatter the masked_max back to the kN points
        scattered_first_masked_max = torch.gather(
            first_pn_out_masked_max_rot,
            dim=2,
            index=min_idx_rot.unsqueeze(1).expand(B * R,
                                                  first_pn_out_rot.size()[1],
                                                  kN))  # BRxCxkN
        first_pn_out_fusion = torch.cat(
            (first_pn_out_rot, scattered_first_masked_max), dim=1)  # BRx2CxkN
        second_pn_out = self.second_pointnet(first_pn_out_fusion, epoch)

        # second_gather_index_rot = self.masked_max.compute(second_pn_out,
        #                                                   min_idx_rot,
        #                                                   mask_rot).detach()  # BRxCxM
        with torch.cuda.device(second_pn_out.get_device()):
            second_gather_index_rot = index_max.forward_cuda(
                second_pn_out.detach(), min_idx_rot.int(), M).detach().long()
        second_pn_out_masked_max_rot = second_pn_out.gather(
            dim=2, index=second_gather_index_rot * mask_row_max_rot)  # BxCxM

        if self.opt.rot_equivariant_pooling_mode == 'per-hierarchy':
            # second_pn_out_masked_max_rot: BRxCxM
            second_pn_out_masked_max_rot = second_pn_out_masked_max_rot.contiguous(
            ).view(B, R, C, M).contiguous()  # BxRxCxM
            second_pn_out_masked_max_rot, _ = torch.max(
                second_pn_out_masked_max_rot, dim=1,
                keepdim=True)  # BxRxCxM -> Bx1xCxM
            second_pn_out_masked_max_rot = second_pn_out_masked_max_rot.expand(
                B, R, C, M).contiguous()  # Bx1xCxM -> BxRxCxM
            second_pn_out_masked_max_rot = second_pn_out_masked_max_rot.contiguous(
            ).view(
                B * R,
                C,
                M,
            ).contiguous()  # BRxCxM
        if self.opt.som_k >= 2:
            # second pointnet, knn search on SOM nodes: ----------------------------------
            knn_center_1_rot, knn_feature_1_rot = self.knnlayer(
                som_node_rot.contiguous().view(B * R, 3, M).contiguous(),
                second_pn_out_masked_max_rot,
                node_knn_I_rot.contiguous().view(B * R, M,
                                                 self.opt.som_k).contiguous(),
                self.opt.som_k, self.opt.som_k_type, epoch)
            C2 = knn_feature_1_rot.size()[1]

            # final pointnet --------------------------------------------------------------
            if self.opt.rot_equivariant_pooling_mode == 'per-hierarchy':
                knn_feature_1_rot = knn_feature_1_rot.contiguous().view(
                    B, R, C2, M).contiguous()  # B*RxC2xM -> BxRxC2xM
                knn_feature_1_rot, _ = torch.max(knn_feature_1_rot,
                                                 dim=1,
                                                 keepdim=True)  # Bx1xC2xM
                knn_feature_1_rot = knn_feature_1_rot.expand(
                    B, R, C2, M).contiguous()  # Bx1xC2xM -> BxRxC2xM
                knn_feature_1_rot = knn_feature_1_rot.contiguous().view(
                    B * R, C2, M).contiguous()
            final_pn_out_rot = self.final_pointnet(
                torch.cat((knn_center_1_rot, knn_feature_1_rot), dim=1),
                epoch)  # Bx1024xM
        else:
            # final pointnet --------------------------------------------------------------
            final_pn_out_rot = self.final_pointnet(
                torch.cat((som_node_rot.contiguous().view(
                    B * R, 3, M).contiguous(), second_pn_out_masked_max_rot),
                          dim=1), epoch)  # Bx1024xM

        # final_pn_out_rot:  BRx1024xM
        final_pn_out_rot = final_pn_out_rot.contiguous().view(
            B, R, self.opt.feature_num, M).contiguous()

        feature_rot, _ = torch.max(final_pn_out_rot, dim=3,
                                   keepdim=False)  # BxRxC
        feature, _ = torch.max(feature_rot, dim=1, keepdim=False)

        # # debug using vanilla pointnet
        # pn_out = self.pn(x)  # BxCxN
        # feature, _ = torch.max(pn_out, dim=2, keepdim=False)

        return feature
Esempio n. 4
0
                                               M).long()
    end_t = time.time()
    print('cpu single thread time: %f' % (end_t - begin_t))

    begin_t = time.time()
    max_idx_multi_cpu = index_max.forward_multi_thread_cpu(data, index, M,
                                                           8).long()
    end_t = time.time()
    print('cpu multi thread time: %f' % (end_t - begin_t))

    data_cuda = data.cuda()
    index_cuda = index.cuda()

    begin_t = time.time()
    for i in range(100):
        max_idx_cuda = index_max.forward_cuda(data_cuda, index_cuda, M).long()
    end_t = time.time()
    print('cuda cpp time, 100 times: %f' % (end_t - begin_t))

    begin_t = time.time()
    for i in range(100):
        max_idx_cuda_shared_mem = index_max.forward_cuda_shared_mem(
            data_cuda, index_cuda, M).long()
    end_t = time.time()
    print('cuda cpp shared mem time, 100 times: %f' % (end_t - begin_t))

    mask_max = operations.MaskedMax(M)
    begin_t = time.time()
    for i in range(100):
        max_idx_gt = mask_max.compute(data_cuda, index_cuda, None)
    end_t = time.time()