コード例 #1
0
 def multi_cls_head(self, *param):
     if len(param) == 3:
         return (pt_utils.Seq(param[0]).fc(
             param[1], bn=True).dropout(0.5).fc(param[2], activation=None))
     elif len(param) == 4:
         return (pt_utils.Seq(param[0]).fc(
             param[1],
             bn=True).dropout(0.5).fc(param[2],
                                      activation=nn.Softmax(param[3])))
     raise ('Invaild length of param')
コード例 #2
0
    def __init__(self, num_classes, input_channels=3, use_xyz=True):
        super(Pointnet2MSG, self).__init__()

        self.SA_modules = nn.ModuleList()
        self.SA_modules.append(
            PointnetSAModuleMSG(npoint=512,
                                radii=[0.1, 0.2, 0.4],
                                nsamples=[16, 32, 128],
                                mlps=[[input_channels, 32, 32, 64],
                                      [input_channels, 64, 64, 128],
                                      [input_channels, 64, 96, 128]],
                                use_xyz=use_xyz))

        input_channels = 64 + 128 + 128
        self.SA_modules.append(
            PointnetSAModuleMSG(npoint=128,
                                radii=[0.2, 0.4, 0.8],
                                nsamples=[32, 64, 128],
                                mlps=[[input_channels, 64, 64, 128],
                                      [input_channels, 128, 128, 256],
                                      [input_channels, 128, 128, 256]],
                                use_xyz=use_xyz))
        self.SA_modules.append(
            PointnetSAModule(mlp=[128 + 256 + 256, 256, 512, 1024],
                             use_xyz=use_xyz))

        self.FC_layer = (pt_utils.Seq(1024) \
                .fc(512, bn=True)
                .dropout(0.5)
                .fc(256, bn=True)
                .dropout(0.5)
                .fc(num_classes, activation=None))
コード例 #3
0
    def __init__(self, option, model_type, dataset, modules):
        # call the initialization method of UnwrappedUnetBasedModel
        UnwrappedUnetBasedModel.__init__(self, option, model_type, dataset,
                                         modules)
        self._num_classes = dataset.num_classes
        self._weight_classes = dataset.weight_classes
        self._use_category = option.use_category
        if self._use_category:
            if not dataset.class_to_segments:
                raise ValueError(
                    "The dataset needs to specify a class_to_segments property when using category information for segmentation"
                )
            self._num_categories = len(dataset.class_to_segments.keys())
            log.info(
                "Using category information for the predictions with %i categories",
                self._num_categories)
        else:
            self._num_categories = 0

        # Last MLP
        last_mlp_opt = option.mlp_cls

        self.FC_layer = pt_utils.Seq(last_mlp_opt.nn[0] + self._num_categories)
        for i in range(1, len(last_mlp_opt.nn)):
            self.FC_layer.conv1d(last_mlp_opt.nn[i], bn=True)
        if last_mlp_opt.dropout:
            self.FC_layer.dropout(p=last_mlp_opt.dropout)

        self.FC_layer.conv1d(self._num_classes, activation=None)
        self.loss_names = ["loss_seg"]
コード例 #4
0
    def __init__(self, input_channels=3, use_xyz=True):
        super(Pointnet2SSG, self).__init__()
        print('network inital')
        self.SA_modules = nn.ModuleList()
        self.SA_modules.append(
            PointnetSAModule1(
                npoint=2048,
                radius=0.01,
                nsample=64,
                mlp=[input_channels, 32, 32],
                use_xyz=use_xyz,
            ))

        self.SA_modules.append(
            PointnetSAModule1(
                npoint=512,
                radius=0.12,
                nsample=64,
                mlp=[32, 64, 64],
                use_xyz=use_xyz,
            ))

        self.SA_modules.append(
            PointnetSAModule1(mlp=[64, 256, 512], use_xyz=use_xyz))

        self.Feat_layer = (
            pt_utils.Seq(512)
            #            .fc(512, bn=True)#, activation=None
            #            .dropout(0.5)
            .fc(512, bn=False, activation=None))
コード例 #5
0
    def __init__(self, num_classes, input_channels=3, use_xyz=True):
        super(Pointnet2SSG, self).__init__()

        self.SA_modules = nn.ModuleList()
        self.SA_modules.append(
            PointnetSAModule(
                npoint=512,
                radius=0.2,
                nsample=64,
                mlp=[input_channels, 64, 64, 128],
                use_xyz=use_xyz,
            ))
        self.SA_modules.append(
            PointnetSAModule(
                npoint=128,
                radius=0.4,
                nsample=64,
                mlp=[128, 128, 128, 256],
                use_xyz=use_xyz,
            ))
        self.SA_modules.append(
            PointnetSAModule(mlp=[256, 256, 512, 1024], use_xyz=use_xyz))

        self.FC_layer = (pt_utils.Seq(1024).fc(512, bn=True).dropout(0.5).fc(
            256, bn=True).dropout(0.5).fc(num_classes, activation=None))
コード例 #6
0
    def __init__(self, num_classes, input_channels=6, use_xyz=True):
        super(Pointnet2MSG, self).__init__()

        self.SA_modules = nn.ModuleList()
        c_in = input_channels
        self.SA_modules.append(
            PointnetSAModuleMSG(
                npoint=1024,
                radii=[0.05, 0.1],
                nsamples=[16, 32],
                mlps=[[c_in, 16, 16, 32], [c_in, 32, 32, 64]],
                use_xyz=use_xyz,
            ))
        c_out_0 = 32 + 64

        c_in = c_out_0
        self.SA_modules.append(
            PointnetSAModuleMSG(
                npoint=256,
                radii=[0.1, 0.2],
                nsamples=[16, 32],
                mlps=[[c_in, 64, 64, 128], [c_in, 64, 96, 128]],
                use_xyz=use_xyz,
            ))
        c_out_1 = 128 + 128

        c_in = c_out_1
        self.SA_modules.append(
            PointnetSAModuleMSG(
                npoint=64,
                radii=[0.2, 0.4],
                nsamples=[16, 32],
                mlps=[[c_in, 128, 196, 256], [c_in, 128, 196, 256]],
                use_xyz=use_xyz,
            ))
        c_out_2 = 256 + 256

        c_in = c_out_2
        self.SA_modules.append(
            PointnetSAModuleMSG(
                npoint=16,
                radii=[0.4, 0.8],
                nsamples=[16, 32],
                mlps=[[c_in, 256, 256, 512], [c_in, 256, 384, 512]],
                use_xyz=use_xyz,
            ))
        c_out_3 = 512 + 512

        self.FP_modules = nn.ModuleList()
        self.FP_modules.append(
            PointnetFPModule(mlp=[256 + input_channels, 128, 128]))
        self.FP_modules.append(PointnetFPModule(mlp=[512 + c_out_0, 256, 256]))
        self.FP_modules.append(PointnetFPModule(mlp=[512 + c_out_1, 512, 512]))
        self.FP_modules.append(
            PointnetFPModule(mlp=[c_out_3 + c_out_2, 512, 512]))

        self.FC_layer = (pt_utils.Seq(128).conv1d(
            128, bn=True).dropout().conv1d(num_classes, activation=None))
コード例 #7
0
    def __init__(self, num_classes, input_channels=3, use_xyz=True):
        super(Pointnet2MSG, self).__init__()

        self.SA_modules = nn.ModuleList()
        self.SA_modules.append(
            PointnetSAModuleMSG2D(
                npoint=1024,
                radii=[1.0, 3.0],
                nsamples=[8, 32],
                mlps=[
                    [input_channels, 32, 32, 64],
                    [input_channels, 64, 64, 128],
                ],
                use_xyz=use_xyz,
            )
        )

        input_channels = 64 + 128 + 128
        self.SA_modules.append(
            PointnetSAModuleMSG2D(
                npoint=512,
                radii=[2.0, 4.0],
                nsamples=[8, 32],
                mlps=[
                    [input_channels, 32, 32, 64],
                    [input_channels, 64, 64, 128],
                ],
                use_xyz=use_xyz,
            )
        )
        self.SA_modules.append(
            PointnetSAModuleMSG2D(
                npoint=256,
                radii=[3.0, 6.0],
                nsamples=[16, 32],
                mlps=[
                    [input_channels, 64, 64, 128],
                    [input_channels, 64, 64, 256],
                ],
                use_xyz=use_xyz,
            )
        )

        self.FP_modules = nn.ModuleList()
        self.FP_modules.append(PointnetFPModule2D(mlp=[256, 256]))
        self.FP_modules.append(PointnetFPModule2D(mlp=[128, 128]))
        self.FP_modules.append(PointnetFPModule2D(mlp=[128, 128, 128]))

        self.FC_layer = (
            pt_utils.Seq(128)
            .conv1d(256, bn=True)
            .dropout(0.5)
            .conv1d(128, bn=True)
            .dropout(0.5)
            .conv1d(num_classes, activation=None)
            .softmax(num_classes)
        )
コード例 #8
0
    def __init__(self, num_classes, input_channels=3, use_xyz=True):
        super(Pointnet2SSG, self).__init__()

        self.SA_modules = nn.ModuleList()
        self.SA_modules.append(
            PointnetSAModule(
                npoint=1024,
                radius=0.1,
                nsample=32,
                mlp=[input_channels, 32, 32, 64],
                use_xyz=use_xyz,
            )
        )
        self.SA_modules.append(
            PointnetSAModule(
                npoint=256,
                radius=0.2,
                nsample=32,
                mlp=[64, 64, 64, 128],
                use_xyz=use_xyz,
            )
        )
        self.SA_modules.append(
            PointnetSAModule(
                npoint=64,
                radius=0.4,
                nsample=32,
                mlp=[128, 128, 128, 256],
                use_xyz=use_xyz,
            )
        )
        self.SA_modules.append(
            PointnetSAModule(
                npoint=16,
                radius=0.8,
                nsample=32,
                mlp=[256, 256, 256, 512],
                use_xyz=use_xyz,
            )
        )

        self.FP_modules = nn.ModuleList()
        self.FP_modules.append(
            PointnetFPModule(mlp=[128 + input_channels, 128, 128, 128])
        )
        self.FP_modules.append(PointnetFPModule(mlp=[256 + 64, 256, 128]))
        self.FP_modules.append(PointnetFPModule(mlp=[256 + 128, 256, 256]))
        self.FP_modules.append(PointnetFPModule(mlp=[512 + 256, 256, 256]))

        self.FC_layer = (
            pt_utils.Seq(128)
            .conv1d(128, bn=True)
            .dropout()
            .conv1d(num_classes, activation=None)
        )
コード例 #9
0
    def __init__(self, option, model_type, dataset, modules):
        BackboneBasedModel.__init__(self, option, model_type, dataset, modules)

        # Last MLP
        last_mlp_opt = option.mlp_cls
        self._dim_output = last_mlp_opt.nn[-1]

        self.FC_layer = pt_utils.Seq(last_mlp_opt.nn[0])
        for i in range(1, len(last_mlp_opt.nn)):
            self.FC_layer.conv1d(last_mlp_opt.nn[i], bn=True)

        self.loss_names = ["loss_patch_desc"]
コード例 #10
0
    def __init__(self, input_channels=3, use_xyz=True, objective=False):
        super(Pointnet_Tracking, self).__init__()

        self.backbone_net = Pointnet_Backbone(input_channels, use_xyz)

        self.cosine = nn.CosineSimilarity(dim=1)

        self.mlp = pt_utils.SharedMLP([4 + 256, 256, 256, 256], bn=True)

        self.FC_layer_cla = (pt_utils.Seq(256).conv1d(256, bn=True).conv1d(
            256, bn=True).conv1d(1, activation=None))
        self.fea_layer = (pt_utils.Seq(256).conv1d(256, bn=True).conv1d(
            256, activation=None))
        self.vote_layer = (pt_utils.Seq(3 + 256).conv1d(256, bn=True).conv1d(
            256, bn=True).conv1d(3 + 256, activation=None))
        self.vote_aggregation = PointnetSAModule(radius=0.3,
                                                 nsample=16,
                                                 mlp=[1 + 256, 256, 256, 256],
                                                 use_xyz=use_xyz)
        self.num_proposal = 64
        self.FC_proposal = (pt_utils.Seq(256).conv1d(256, bn=True).conv1d(
            256, bn=True).conv1d(3 + 1 + 1, activation=None))
コード例 #11
0
    def __init__(self, cfg, embedder, p_feature_dim, l_hidden_dim):
        super(ReferModel, self).__init__()

        self.p_feature_dim = p_feature_dim
        self.l_hidden_dim = l_hidden_dim
        self.language_module = LanguageModule(cfg, embedder)
        self.backbone_net = PointNet2Backbone(
            cfg,
            input_feature_dim=self.p_feature_dim,
            lang_hidden_dim=self.l_hidden_dim)

        self.box_proposals_layers = nn.Sequential(
            nn.Conv1d(in_channels=1024,
                      out_channels=512,
                      kernel_size=1,
                      stride=1),
            #nn.BatchNorm1d(512),
            #nn.Dropout(),
            nn.ReLU(),
            nn.Conv1d(in_channels=512,
                      out_channels=256,
                      kernel_size=1,
                      stride=1),
            #nn.BatchNorm1d(256),
            #nn.Dropout(),
            nn.ReLU(),
            nn.Conv1d(in_channels=256,
                      out_channels=128,
                      kernel_size=1,
                      stride=1),
            #nn.BatchNorm1d(128),
            #nn.Dropout(),
            nn.ReLU(),
            nn.Conv1d(in_channels=128, out_channels=6, kernel_size=1,
                      stride=1),
        )

        self.bbox_pred_layers = nn.Sequential(
            nn.ReLU(),
            nn.Linear(6 * 16, 7),
        )

        self.inst_seg_pred_layers = (
            pt_utils.Seq(135).conv1d(135, bn=True)
            # .dropout()
            .conv1d(1, activation=None)  # 0/1
        )
コード例 #12
0
    def __init__(self, num_classes, input_channels=3, use_xyz=True):
        super(Pointnet2SSGQPU, self).__init__()

        self.SA_modules = nn.ModuleList()
        
        self.SA_modules.append(
            PointnetSAModuleQPU(
                npoint=256,
                radius=0.4,
                nsample=32,
                mlp=nn.Sequential(
                        qpu_layers.QPU(4*8, 64*4),
                        qpu_layers.KeepRealPart(dim=-1),
                        nn.Linear(64, 128)
                    ),
                use_xyz=use_xyz
            )
        )

        self.SA_modules.append(
            PointnetSAModule(
                npoint=128,
                radius=0.4,
                nsample=16,  # 64
                # mlp=[32, 128],
                mlp=[128, 128, 128, 256],
                use_xyz=use_xyz
            )
        )

        self.SA_modules.append(
            PointnetSAModule(mlp=[256, 256, 512, 1024], use_xyz=use_xyz)
        )

        self.FC_layer = (
            pt_utils.Seq(1024)
            .fc(256, bn=True)
            .dropout(0.5)
            .fc(128, bn=True)
            .dropout(0.5)
            .fc(num_classes, activation=None)
        )
コード例 #13
0
    def __init__(self,
                 num_classes,
                 input_channels=3,
                 use_xyz=True,
                 type_r='r'):
        super(Pointnet2SSGQPU, self).__init__()
        if type_r == 'no_r':
            kp = qpu_layers.KeepImPart
            temp_size = 64 * 3
        else:
            kp = qpu_layers.KeepRealPart
            temp_size = 64 + 63 * ('inner' in type_r)

        self.SA_modules = nn.ModuleList()
        self.SA_modules.append(
            PointnetSAModuleQPU(npoint=256,
                                radius=0.4,
                                nsample=32,
                                mlp=nn.Sequential(
                                    qpu_layers.QPU(4 * 8, 64 * 4),
                                    kp(dim=-1, type_r=type_r),
                                    nn.Linear(temp_size, 128)),
                                use_xyz=use_xyz))

        self.SA_modules.append(
            PointnetSAModule(
                npoint=128,
                radius=0.4,
                nsample=16,  # 64
                # mlp=[32, 128],
                mlp=[128, 128, 128, 256],
                use_xyz=use_xyz))

        self.SA_modules.append(
            PointnetSAModule(mlp=[256, 256, 512, 1024], use_xyz=use_xyz))

        self.FC_layer = (pt_utils.Seq(1024).fc(256, bn=True).dropout(0.5).fc(
            128, bn=True).dropout(0.5).fc(num_classes, activation=None))
コード例 #14
0
ファイル: pc_encoder.py プロジェクト: HomerW/ShapeAssembly
    def __init__(self, input_channels=3, use_xyz=True):
        super(PCEncoder, self).__init__()

        self.SA_modules = nn.ModuleList()
        self.SA_modules.append(
            PointnetSAModule(npoint=512,
                             radius=0.2,
                             nsample=64,
                             mlp=[input_channels, 64, 64, 128],
                             use_xyz=use_xyz,
                             bn=False))
        self.SA_modules.append(
            PointnetSAModule(npoint=128,
                             radius=0.4,
                             nsample=64,
                             mlp=[128, 128, 128, 256],
                             use_xyz=use_xyz,
                             bn=False))
        self.SA_modules.append(
            PointnetSAModule(mlp=[256, 256, 512, 1024],
                             use_xyz=use_xyz,
                             bn=False))

        self.FC_layer = (pt_utils.Seq(1024).fc(256, bn=False, activation=None))
コード例 #15
0
 def regression_head(self, *param):
     return (pt_utils.Seq(param[0]).fc(param[1], bn=True).dropout(0.5).fc(
         param[2], activation=nn.Sigmoid()))
コード例 #16
0
    def __init__(self, num_classes, input_channels=37, use_xyz=True):
        super(Pointnet2MSG, self).__init__()
        self.scaling_factor = 4
        self.SA_modules = nn.ModuleList()
        c_in = input_channels
        self.SA_modules.append(
            PointnetSAModuleMSG(
                npoint=int(512 / self.scaling_factor),  # The number of groups
                radii=[1, 3],
                nsamples=[
                    int(8 / self.scaling_factor),
                    int(32 / self.scaling_factor)
                ],  # The number of samples in each group 
                mlps=[[
                    c_in,
                    int(32 / self.scaling_factor),
                    int(32 / self.scaling_factor),
                    int(64 / self.scaling_factor)
                ],
                      [
                          c_in,
                          int(64 / self.scaling_factor),
                          int(64 / self.scaling_factor),
                          int(128 / self.scaling_factor)
                      ]],
                use_xyz=use_xyz,
            ))
        c_out_1 = int(64 / self.scaling_factor) + int(
            128 / self.scaling_factor)  # 512 x c_out_1

        c_in = c_out_1
        self.SA_modules.append(
            PointnetSAModuleMSG(
                npoint=int(512 / self.scaling_factor),
                radii=[2, 4],
                nsamples=[
                    int(8 / self.scaling_factor),
                    int(32 / self.scaling_factor)
                ],
                mlps=[[
                    c_in,
                    int(32 / self.scaling_factor),
                    int(32 / self.scaling_factor),
                    int(64 / self.scaling_factor)
                ],
                      [
                          c_in,
                          int(64 / self.scaling_factor),
                          int(64 / self.scaling_factor),
                          int(128 / self.scaling_factor)
                      ]],
                use_xyz=use_xyz,
            ))
        c_out_2 = int(64 / self.scaling_factor) + int(
            128 / self.scaling_factor)  # 512 x c_out_

        c_in = c_out_2
        self.SA_modules.append(
            PointnetSAModuleMSG(
                npoint=int(256 / self.scaling_factor),
                radii=[3, 6],
                nsamples=[
                    int(16 / self.scaling_factor),
                    int(32 / self.scaling_factor)
                ],
                mlps=[[
                    c_in,
                    int(64 / self.scaling_factor),
                    int(64 / self.scaling_factor),
                    int(128 / self.scaling_factor)
                ],
                      [
                          c_in,
                          int(64 / self.scaling_factor),
                          int(64 / self.scaling_factor),
                          int(128 / self.scaling_factor)
                      ]],
                use_xyz=use_xyz,
            ))
        c_out_3 = int(128 / self.scaling_factor) + int(
            128 / self.scaling_factor)  # 256 x c_out_2

        # c_in = c_out_2
        # self.SA_modules.append(
        #     PointnetSAModuleMSG(
        #         npoint=16,
        #         radii=[0.4, 0.8],
        #         nsamples=[16, 32],
        #         mlps=[[c_in, 256, 256, 512], [c_in, 256, 384, 512]],
        #         use_xyz=use_xyz,
        #     )
        # )
        # c_out_3 = 512 + 512

        self.FP_modules = nn.ModuleList()
        self.FP_modules.append(
            PointnetFPModule(mlp=[
                int(128 / self.scaling_factor) + input_channels,
                int(128 / self.scaling_factor),
                int(128 / self.scaling_factor),
                int(128 / self.scaling_factor)
            ]))
        self.FP_modules.append(
            PointnetFPModule(mlp=[
                int(256 / self.scaling_factor) + c_out_1,
                int(128 / self.scaling_factor),
                int(128 / self.scaling_factor)
            ])
        )  # FP 2 from ___ FP1: (256, 256) ___ to another two-layer MLP with kernel
        self.FP_modules.append(
            PointnetFPModule(mlp=[
                int(c_out_2 + c_out_3),
                int(256 / self.scaling_factor),
                int(256 / self.scaling_factor)
            ])
        )  # FP1 from last ___MSG: (256, c_out_2)___ to a two-layer MLP with kernel (256, 256)

        self.FC_layer = (
            pt_utils.Seq(int(128 / self.scaling_factor))  ### Input channels 
            .conv1d(int(256 / self.scaling_factor), bn=True)  ### 1d Conv1
            .dropout()  ### default is 0.5
            .conv1d(int(128 / self.scaling_factor))  ### 1d Conv2
            .dropout().conv1d(num_classes, activation=None))
コード例 #17
0
    def __init__(self, input_channels=3, use_xyz=True, objective=False):
        super(Pointnet_Tracking, self).__init__()

        self.backbone_net = Pointnet_Backbone(input_channels, use_xyz)

        self.cosine = nn.CosineSimilarity(dim=1)

        self.mlp = pt_utils.SharedMLP([4 + 256, 256, 256, 256], bn=True)
        # SharedMLP(
        #   (layer0): Conv2d(
        #     (conv): Conv2d(260, 256, kernel_size=(1, 1), stride=(1, 1), bias=False)
        #     (normlayer): BatchNorm2d(
        #       (bn): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
        #     )
        #     (activation): ReLU(inplace=True)
        #   )
        #   (layer1): Conv2d(
        #     (conv): Conv2d(256, 256, kernel_size=(1, 1), stride=(1, 1), bias=False)
        #     (normlayer): BatchNorm2d(
        #       (bn): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
        #     )
        #     (activation): ReLU(inplace=True)
        #   )
        #   (layer2): Conv2d(
        #     (conv): Conv2d(256, 256, kernel_size=(1, 1), stride=(1, 1), bias=False)
        #     (normlayer): BatchNorm2d(
        #       (bn): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
        #     )
        #     (activation): ReLU(inplace=True)
        #   )
        # )
        self.FC_layer_cla = (pt_utils.Seq(256).conv1d(256, bn=True).conv1d(
            256, bn=True).conv1d(1, activation=None))
        # Seq(
        #   (0): Conv1d(
        #     (conv): Conv1d(256, 256, kernel_size=(1,), stride=(1,), bias=False)
        #     (normlayer): BatchNorm1d(
        #       (bn): BatchNorm1d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
        #     )
        #     (activation): ReLU(inplace=True)
        #   )
        #   (1): Conv1d(
        #     (conv): Conv1d(256, 256, kernel_size=(1,), stride=(1,), bias=False)
        #     (normlayer): BatchNorm1d(
        #       (bn): BatchNorm1d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
        #     )
        #     (activation): ReLU(inplace=True)
        #   )
        #   (2): Conv1d(
        #     (conv): Conv1d(256, 1, kernel_size=(1,), stride=(1,))
        #   )
        # )
        self.fea_layer = (pt_utils.Seq(256).conv1d(256, bn=True).conv1d(
            256, activation=None))
        # Seq(
        # (0): Conv1d(
        #     (conv): Conv1d(256, 256, kernel_size=(1,), stride=(1,), bias=False)
        #     (normlayer): BatchNorm1d(
        #     (bn): BatchNorm1d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
        #     )
        #     (activation): ReLU(inplace=True)
        # )
        # (1): Conv1d(
        #     (conv): Conv1d(256, 256, kernel_size=(1,), stride=(1,))
        # )
        # )
        self.vote_layer = (pt_utils.Seq(3 + 256).conv1d(256, bn=True).conv1d(
            256, bn=True).conv1d(3 + 256, activation=None))
        # Seq(
        #   (0): Conv1d(
        #     (conv): Conv1d(259, 256, kernel_size=(1,), stride=(1,), bias=False)
        #     (normlayer): BatchNorm1d(
        #       (bn): BatchNorm1d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
        #     )
        #     (activation): ReLU(inplace=True)
        #   )
        #   (1): Conv1d(
        #     (conv): Conv1d(256, 256, kernel_size=(1,), stride=(1,), bias=False)
        #     (normlayer): BatchNorm1d(
        #       (bn): BatchNorm1d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
        #     )
        #     (activation): ReLU(inplace=True)
        #   )
        #   (2): Conv1d(
        #     (conv): Conv1d(256, 259, kernel_size=(1,), stride=(1,))
        #   )
        # )
        self.vote_aggregation = PointnetSAModule(
            radius=0.3,
            nsample=16,
            mlp=[1 + 256, 256, 256, 256],
            use_xyz=use_xyz)  # 根据投票聚合投影候选中心点
        # PointnetSAModule(
        #   (groupers): ModuleList(
        #     (0): QueryAndGroup()
        #   )
        #   (mlps): ModuleList(
        #     (0): SharedMLP(
        #       (layer0): Conv2d(
        #         (conv): Conv2d(260, 256, kernel_size=(1, 1), stride=(1, 1), bias=False)
        #         (normlayer): BatchNorm2d(
        #           (bn): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
        #         )
        #         (activation): ReLU(inplace=True)
        #       )
        #       (layer1): Conv2d(
        #         (conv): Conv2d(256, 256, kernel_size=(1, 1), stride=(1, 1), bias=False)
        #         (normlayer): BatchNorm2d(
        #           (bn): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
        #         )
        #         (activation): ReLU(inplace=True)
        #       )
        #       (layer2): Conv2d(
        #         (conv): Conv2d(256, 256, kernel_size=(1, 1), stride=(1, 1), bias=False)
        #         (normlayer): BatchNorm2d(
        #           (bn): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
        #         )
        #         (activation): ReLU(inplace=True)
        #       )
        #     )
        #   )
        # )
        self.num_proposal = 64  # 实际上是_PointnetSAModuleBase(nn.Module)的forward中的npoint输入参数
        self.FC_proposal = (pt_utils.Seq(256).conv1d(256, bn=True).conv1d(
            256, bn=True).conv1d(3 + 1 + 1, activation=None))
コード例 #18
0
 def set_last_mlp(self, last_mlp_opt):
     self.FC_layer = pt_utils.Seq(last_mlp_opt.nn[0])
     for i in range(1, len(last_mlp_opt.nn)):
         self.FC_layer.conv1d(last_mlp_opt.nn[i], bn=True)