Example #1
0
    def __init__(self, class_count, nfeatures=3):
        super(
            PointNet2MSGSeg,
            self,
        ).__init__()

        self.sa1_module = SAModuleMSG(512, [0.1, 0.2, 0.4], [16, 32, 128], [
            MLP([nfeatures, 32, 32, 64]),
            MLP([nfeatures, 64, 64, 128]),
            MLP([nfeatures, 64, 96, 128])
        ])

        #Because we concat the out of each layer as a feature of each point
        nFeaturesL2 = 3 + 64 + 128 + 128
        self.sa2_module = SAModuleMSG(128, [0.2, 0.4, 0.8], [32, 64, 128], [
            MLP([nFeaturesL2, 64, 64, 128]),
            MLP([nFeaturesL2, 128, 128, 256]),
            MLP([nFeaturesL2, 128, 128, 256])
        ])

        nFeaturesL3 = 3 + 128 + 256 + 256
        self.sa3_module = GlobalSAModule(MLP([nFeaturesL3, 256, 512, 1024]))

        self.fp3_module = FPModule(1, MLP([1664, 256, 256]))
        self.fp2_module = FPModule(3, MLP([576, 256, 128]))
        self.fp1_module = FPModule(3, MLP([128, 128, 128, 128]))

        self.lin1 = torch.nn.Linear(128, 128)
        self.lin2 = torch.nn.Linear(128, 128)
        self.lin3 = torch.nn.Linear(128, class_count)
Example #2
0
    def __init__(self,
                 class_count,
                 n_features=3,
                 num_points=1024,
                 sort_pool_k=32):
        super(PointNet2MRGSortPoolClass, self).__init__()

        nFeaturesL2 = 3 + 128

        shared_mpls = [
            SAModuleFullPoint(0.4, 16, MLP([n_features, 64, 64, 128])),
            SAModuleFullPoint(0.9, 32, MLP([nFeaturesL2, 128, 128, 256]))
        ]

        # The mpls are shared to lower the model memory footprint
        self.high_resolution_module = SAModuleMRG(num_points, 512, shared_mpls)
        self.mid_resolution_module = SAModuleMRG(num_points, 256, shared_mpls)
        self.low_resolution_module = SAModuleMRG(num_points, 128, shared_mpls)

        self.readout = GlobalSortPool(MLP([789, 1024, 1024, 1024]),
                                      k=sort_pool_k)

        # Classification Layers
        sort_pool_out = 1024 * sort_pool_k
        self.lin1 = Lin(sort_pool_out, 512)
        self.bn1 = nn.BatchNorm1d(512)
        self.lin2 = Lin(512, 256)
        self.bn2 = nn.BatchNorm1d(256)
        self.lin3 = Lin(256, class_count)
    def __init__(self, class_count, n_feature=3, sort_pool_k=32):
        super(PointNet2MSGSortPoolSeg, self).__init__()

        self.sa1_module = SAModuleMSG(512, [0.1,0.2,0.4], [16,32,128], [
            MLP([n_feature, 32,32,64]),
            MLP([n_feature, 64,64,128]),
            MLP([n_feature, 64,96,128])
        ])

        #Because we concat the outout of each layer as a feature of each point
        n_features_l2 = 3 + 64 + 128 + 128
        self.sa2_module = SAModuleMSG(128, [0.2,0.4,0.8], [32,64,128], [
            MLP([n_features_l2, 64, 64, 128]),
            MLP([n_features_l2, 128, 128, 256]),
            MLP([n_features_l2, 128, 128, 256])
        ])

        n_features_l3 = 3 + 128 + 256 + 256
        self.sa3_module = GlobalSortPool(MLP([n_features_l3, 256, 512, 1024]), sort_pool_k)

        #Segmentation Layers
        classification_point_feature = 1024*sort_pool_k
        self.fp3_module = FPLayer(1, MLP([classification_point_feature + 256, 256, 256]))
        self.fp2_module = FPLayer(3, MLP([256 + 128, 256, 128]))
        self.fp1_module = FPLayer(3, MLP([128, 128, 128, 128]))

        self.lin1 = Lin(128, 128)
        self.bn1 = nn.BatchNorm1d(128)
        self.lin2 = Lin(128, 128)
        self.bn2 = nn.BatchNorm1d(128)
        self.lin3 = Lin(128, class_count)
    def __init__(self, class_count, bn_momentum=0.1):
        super(PointNet2Class, self).__init__()

        self.sa1_module = SAModule(512, 0.2, 32, MLP([3, 64, 64, 128]))
        self.sa2_module = SAModule(128, 0.4, 64, MLP([128 + 3, 128, 128, 256]))
        self.sa3_module = GlobalSAModule(MLP([256 + 3, 256, 512, 1024]))

        #Classification Layers
        self.lin1 = Lin(1024, 512)
        self.bn1 = nn.BatchNorm1d(512, momentum=bn_momentum)
        self.lin2 = Lin(512, 256)
        self.bn2 = nn.BatchNorm1d(256, momentum=bn_momentum)
        self.lin3 = Lin(256, class_count)
Example #5
0
    def __init__(self, class_count, nfeatures=3):
        super(PointNet2MSGClass, self).__init__()

        self.sa1_module = SAModuleMSG(512, [0.1, 0.2, 0.4], [16, 32, 128], [
            MLP([nfeatures, 32, 32, 64]),
            MLP([nfeatures, 64, 64, 128]),
            MLP([nfeatures, 64, 96, 128])
        ])

        #Because we concat the out of each layer as a feature of each point
        nFeaturesL2 = 3 + 64 + 128 + 128
        self.sa2_module = SAModuleMSG(128, [0.2, 0.4, 0.8], [32, 64, 128], [
            MLP([nFeaturesL2, 64, 64, 128]),
            MLP([nFeaturesL2, 128, 128, 256]),
            MLP([nFeaturesL2, 128, 128, 256])
        ])

        nFeaturesL3 = 3 + 128 + 256 + 256
        self.sa3_module = GlobalSAModule(MLP([nFeaturesL3, 256, 512, 1024]))

        #Classification Layers
        self.lin1 = Lin(1024, 512)
        self.bn1 = nn.BatchNorm1d(512)
        self.lin2 = Lin(512, 256)
        self.bn2 = nn.BatchNorm1d(256)
        self.lin3 = Lin(256, class_count)