示例#1
0
    def __init__(self, num_classes, input_channels=3):
        super().__init__()

        self.SA_modules = nn.ModuleList()
        self.SA_modules.append(
            PointnetSAModule(npoint=1024,
                             radius=0.1,
                             nsample=32,
                             mlp=[input_channels, 32, 32, 64]))
        self.SA_modules.append(
            PointnetSAModule(npoint=256,
                             radius=0.2,
                             nsample=32,
                             mlp=[64, 64, 64, 128]))
        self.SA_modules.append(
            PointnetSAModule(npoint=64,
                             radius=0.4,
                             nsample=32,
                             mlp=[128, 128, 128, 256]))
        self.SA_modules.append(
            PointnetSAModule(npoint=16,
                             radius=0.8,
                             nsample=32,
                             mlp=[256, 256, 256, 512]))

        self.FP_modules = nn.ModuleList()
        self.FP_modules.append(
            PointnetFPModule(mlp=[128 + input_channels, 128, 128, 128]))
        self.FP_modules.append(PointnetFPModule(mlp=[256 + 64, 256, 128]))
        self.FP_modules.append(PointnetFPModule(mlp=[256 + 128, 256, 256]))
        self.FP_modules.append(PointnetFPModule(mlp=[512 + 256, 256, 256]))

        self.FC_layer = nn.Sequential(
            pt_utils.Conv1d(128, 128, bn=True), nn.Dropout(),
            pt_utils.Conv1d(128, num_classes, activation=None))
    def __init__(self, num_classes, input_channels=9, use_xyz=True):
        super().__init__()

        self.SA_modules = nn.ModuleList()
        c_in = input_channels
        self.SA_modules.append(
            PointnetSAModuleMSG(npoint=1024,
                                radii=[0.05, 0.1],
                                nsamples=[16, 32],
                                mlps=[[c_in, 16, 16, 32], [c_in, 32, 32, 64]],
                                use_xyz=use_xyz))
        c_out_0 = 32 + 64

        c_in = c_out_0
        self.SA_modules.append(
            PointnetSAModuleMSG(
                npoint=256,
                radii=[0.1, 0.2],
                nsamples=[16, 32],
                mlps=[[c_in, 64, 64, 128], [c_in, 64, 96, 128]],
            ))
        c_out_1 = 128 + 128

        c_in = c_out_1
        self.SA_modules.append(
            PointnetSAModuleMSG(
                npoint=64,
                radii=[0.2, 0.4],
                nsamples=[16, 32],
                mlps=[[c_in, 128, 196, 256], [c_in, 128, 196, 256]],
            ))
        c_out_2 = 256 + 256

        c_in = c_out_2
        self.SA_modules.append(
            PointnetSAModuleMSG(
                npoint=16,
                radii=[0.4, 0.8],
                nsamples=[16, 32],
                mlps=[[c_in, 256, 256, 512], [c_in, 256, 384, 512]],
            ))
        c_out_3 = 512 + 512

        self.FP_modules = nn.ModuleList()
        self.FP_modules.append(
            PointnetFPModule(
                mlp=[256 + (input_channels if use_xyz else 0), 128, 128]))
        self.FP_modules.append(PointnetFPModule(mlp=[512 + c_out_0, 256, 256]))
        self.FP_modules.append(PointnetFPModule(mlp=[512 + c_out_1, 512, 512]))
        self.FP_modules.append(
            PointnetFPModule(mlp=[c_out_3 + c_out_2, 512, 512]))

        self.FC_layer = nn.Sequential(
            pt_utils.Conv1d(128, 128, bn=True), nn.Dropout(),
            pt_utils.Conv1d(128, num_classes, activation=None))
示例#3
0
    def __init__(self, num_classes, input_channels=3, use_xyz=True, bn=True):
        super().__init__()

        NPOINTS = [1024, 256, 64, 16]
        RADIUS = [[0.05, 0.1], [0.1, 0.2], [0.2, 0.4], [0.4, 0.8]]
        NSAMPLE = [[16, 32], [16, 32], [16, 32], [16, 32]]
        MLPS = [[[16, 16, 32], [32, 32, 64]], [[64, 64, 128], [64, 96, 128]],
                [[128, 196, 256], [128, 196, 256]],
                [[256, 256, 512], [256, 384, 512]]]
        FP_MLPS = [[128, 128], [256, 256], [512, 512], [512, 512]]
        CLS_FC = [128]
        DP_RATIO = 0.5

        self.SA_modules = nn.ModuleList()
        channel_in = input_channels

        skip_channel_list = [input_channels]
        for k in range(NPOINTS.__len__()):
            mlps = MLPS[k].copy()
            channel_out = 0
            for idx in range(mlps.__len__()):
                mlps[idx] = [channel_in] + mlps[idx]
                channel_out += mlps[idx][-1]

            self.SA_modules.append(
                PointnetSAModuleMSG(npoint=NPOINTS[k],
                                    radii=RADIUS[k],
                                    nsamples=NSAMPLE[k],
                                    mlps=mlps,
                                    use_xyz=use_xyz,
                                    bn=bn))
            skip_channel_list.append(channel_out)
            channel_in = channel_out

        self.FP_modules = nn.ModuleList()

        for k in range(FP_MLPS.__len__()):
            pre_channel = FP_MLPS[
                k + 1][-1] if k + 1 < len(FP_MLPS) else channel_out
            self.FP_modules.append(
                PointnetFPModule(mlp=[pre_channel + skip_channel_list[k]] +
                                 FP_MLPS[k],
                                 bn=bn))

        cls_layers = []
        pre_channel = FP_MLPS[0][-1]
        for k in range(0, CLS_FC.__len__()):
            cls_layers.append(pt_utils.Conv1d(pre_channel, CLS_FC[k], bn=bn))
            pre_channel = CLS_FC[k]
        cls_layers.append(
            pt_utils.Conv1d(pre_channel, num_classes, activation=None, bn=bn))
        cls_layers.insert(1, nn.Dropout(DP_RATIO))
        self.cls_layer = nn.Sequential(*cls_layers)
    def __init__(self, num_classes, input_channels=3, use_xyz=True, bn=True):
        super().__init__()

        self.SA_modules = nn.ModuleList()
        channel_in = input_channels

        skip_channel_list = [input_channels]
        for k in range(NPOINTS.__len__()):
            mlps = MLPS[k].copy()
            channel_out = 0
            for idx in range(mlps.__len__()):
                mlps[idx] = [channel_in] + mlps[idx]
                channel_out += mlps[idx][-1]

            self.SA_modules.append(
                PointnetSAModuleMSG(npoint=NPOINTS[k],
                                    radii=RADIUS[k],
                                    nsamples=NSAMPLE[k],
                                    mlps=mlps,
                                    use_xyz=use_xyz,
                                    bn=bn))
            skip_channel_list.append(channel_out)
            channel_in = channel_out

        self.FP_modules = nn.ModuleList()

        for k in range(FP_MLPS.__len__()):
            pre_channel = FP_MLPS[
                k + 1][-1] if k + 1 < len(FP_MLPS) else channel_out
            self.FP_modules.append(
                PointnetFPModule(mlp=[pre_channel + skip_channel_list[k]] +
                                 FP_MLPS[k],
                                 bn=bn))

        cls_layers = []
        pre_channel = FP_MLPS[0][-1]
        for k in range(0, CLS_FC.__len__()):
            cls_layers.append(pt_utils.Conv1d(pre_channel, CLS_FC[k], bn=bn))
            pre_channel = CLS_FC[k]
        cls_layers.append(
            pt_utils.Conv1d(pre_channel, num_classes, activation=None, bn=bn))
        cls_layers.insert(1, nn.Dropout(0.5))
        self.cls_layer = nn.Sequential(*cls_layers)
示例#5
0
    def __init__(self, num_classes):
        super().__init__()

        self.SA_modules = nn.ModuleList()
        self.SA_modules.append(
            PointnetSAModuleMSG(
                npoint=512,
                radii=[0.2],
                nsamples=[64],
                mlps=[[6, 64, 64, 128]],
                first_layer=True,
                use_xyz=True,
            )
        )
        self.SA_modules.append(
            PointnetSAModuleMSG(
                npoint=128,
                radii=[0.4],
                nsamples=[64],
                mlps=[[128+9, 128, 128, 256]],
                use_xyz=False,
                last_layer=True,
            )
        )

        # global pooling
        self.SA_modules.append(
            PointnetSAModule(
                nsample=128,
                mlp=[256, 256, 512, 1024],
                use_xyz=False
            )
        )
        self.FP_modules = nn.ModuleList()
        self.FP_modules.append(PointnetFPModule(mlp=[128, 128, 128, 128]))
        self.FP_modules.append(PointnetFPModule(mlp=[384, 256, 128]))
        self.FP_modules.append(PointnetFPModule(mlp=[1280, 256, 256]))

        self.FC_layer = nn.Sequential(
            pt_utils.Conv1d(128, 128, bn=True), nn.Dropout(),
            pt_utils.Conv1d(128, num_classes, activation=None)
        )
    def __init__(
            self,
            radius: float,
            nsamples: int,
            in_channels: int,
            out_channels: int
    ):
        super().__init__()

        self.in_channels = in_channels
        self.out_channels = out_channels
        self.z_corr = PointSpatioTemporalCorrelation(radius, nsamples, in_channels, out_channels)
        self.r_corr = PointSpatioTemporalCorrelation(radius, nsamples, in_channels, out_channels)
        self.s_corr = PointSpatioTemporalCorrelation(radius, nsamples, 0, out_channels)

        self.sigmoid = nn.Sigmoid()

        self.fc = pt_utils.Conv1d(in_size=in_channels+out_channels, out_size=out_channels, activation=None, bn=None)
        self.tanh = nn.Tanh()
示例#7
0
    def __init__(self, config, dataset_name='SemanticKITTI'):
        super().__init__()
        self.config = config
        self.class_weights = DP.get_class_weights(dataset_name)

        self.fc0 = pt_utils.Conv1d(3, 8, kernel_size=1, bn=True)

        self.dilated_res_blocks = nn.ModuleList()
        d_in = 8
        for i in range(self.config.num_layers):
            d_out = self.config.d_out[i]
            self.dilated_res_blocks.append(Dilated_res_block(d_in, d_out))
            d_in = 2 * d_out

        d_out = d_in
        self.decoder_0 = pt_utils.Conv2d(d_in,
                                         d_out,
                                         kernel_size=(1, 1),
                                         bn=True)

        self.decoder_blocks = nn.ModuleList()
        for j in range(self.config.num_layers):
            if j < 3:
                d_in = d_out + 2 * self.config.d_out[-j - 2]
                d_out = 2 * self.config.d_out[-j - 2]
            else:
                d_in = 4 * self.config.d_out[-4]
                d_out = 2 * self.config.d_out[-4]
            self.decoder_blocks.append(
                pt_utils.Conv2d(d_in, d_out, kernel_size=(1, 1), bn=True))

        self.fc1 = pt_utils.Conv2d(d_out, 64, kernel_size=(1, 1), bn=True)
        self.fc2 = pt_utils.Conv2d(64, 32, kernel_size=(1, 1), bn=True)
        self.dropout = nn.Dropout(0.5)
        self.fc3 = pt_utils.Conv2d(32,
                                   self.config.num_classes,
                                   kernel_size=(1, 1),
                                   bn=False,
                                   activation=None)
示例#8
0
    def __init__(self, num_classes, input_channels=0, relation_prior=1, use_xyz=True):
        super().__init__()

        self.SA_modules = nn.ModuleList()
        c_in = input_channels
        self.SA_modules.append(     # 0
            PointnetSAModuleMSG(
                npoint=1024,
                radii=[0.075, 0.1, 0.125],
                nsamples=[16, 32, 48],
                mlps=[[c_in, 64], [c_in, 64], [c_in, 64]],
                first_layer=True,
                use_xyz=use_xyz,
                relation_prior=relation_prior
            )
        )
        c_out_0 = 64*3

        c_in = c_out_0
        self.SA_modules.append(    # 1
            PointnetSAModuleMSG(
                npoint=256,
                radii=[0.1, 0.15, 0.2],
                nsamples=[16, 48, 64],
                mlps=[[c_in, 128], [c_in, 128], [c_in, 128]],
                use_xyz=use_xyz,
                relation_prior=relation_prior
            )
        )
        c_out_1 = 128*3

        c_in = c_out_1
        self.SA_modules.append(    # 2
            PointnetSAModuleMSG(
                npoint=64,
                radii=[0.2, 0.3, 0.4],
                nsamples=[16, 32, 48],
                mlps=[[c_in, 256], [c_in, 256], [c_in, 256]],
                use_xyz=use_xyz,
                relation_prior=relation_prior
            )
        )
        c_out_2 = 256*3

        c_in = c_out_2
        self.SA_modules.append(    # 3
            PointnetSAModuleMSG(
                npoint=16,
                radii=[0.4, 0.6, 0.8],
                nsamples=[16, 24, 32],
                mlps=[[c_in, 512], [c_in, 512], [c_in, 512]],
                use_xyz=use_xyz,
                relation_prior=relation_prior
            )
        )
        c_out_3 = 512*3
        
        self.SA_modules.append(   # 4   global pooling
            PointnetSAModule(
                nsample = 16,
                mlp=[c_out_3, 128], use_xyz=use_xyz
            )
        )
        global_out = 128
        
        self.SA_modules.append(   # 5   global pooling
            PointnetSAModule(
                nsample = 64,
                mlp=[c_out_2, 128], use_xyz=use_xyz
            )
        )
        global_out2 = 128

        self.FP_modules = nn.ModuleList()
        self.FP_modules.append(
            PointnetFPModule(mlp=[256 + input_channels, 128, 128])
        )
        self.FP_modules.append(PointnetFPModule(mlp=[512 + c_out_0, 256, 256]))
        self.FP_modules.append(PointnetFPModule(mlp=[512 + c_out_1, 512, 512]))
        self.FP_modules.append(
            PointnetFPModule(mlp=[c_out_3 + c_out_2, 512, 512])
        )
        self.context_prior=cp.Context(128+global_out+global_out2+16,128+global_out+global_out2+16)
        self.FC_layer = nn.Sequential(
            pt_utils.Conv1d((128+global_out+global_out2+16)*3, 128, bn=True), nn.Dropout(),
            pt_utils.Conv1d(128, num_classes, activation=None)
        )
示例#9
0
    def __init__(self, config):
        super().__init__()

        self.config = config
        self.class_weights = self.config.class_weights

        if self.config.name == 'SemanticKITTI':
            self.fc0 = pt_utils.Conv1d(3, 8, kernel_size=1, bn=True)
        elif self.config.name == 'S3DIS':
            self.fc0 = pt_utils.Conv1d(6, 8, kernel_size=1, bn=True)
        elif self.config.name == 'Semantic3D':
            assert 1 == 2, "Not checked yet"

        self.dilated_res_blocks = nn.ModuleList()

        d_in = 8

        # Encoder
        for i in range(self.config.num_layers):
            d_out = self.config.d_out[i]
            self.dilated_res_blocks.append(Dilated_res_block(d_in, d_out))
            d_in = 2 * d_out

        d_out = d_in

        # First layer of Decoder
        self.decoder_0 = pt_utils.Conv2d(d_in,
                                         d_out,
                                         kernel_size=(1, 1),
                                         bn=True)

        # Decoder
        self.decoder_blocks = nn.ModuleList()

        for j in range(self.config.num_layers):
            if self.config.name == "SemanticKITTI":
                if j < 3:
                    # j = 0, -j-2 = -2, 512 + 128*2 -> 256
                    # j = 1, -j-2 = -3, 256 + 64*2 -> 128
                    # j = 2, -j-2 = -4, 128 + 16*2 -> 32
                    d_in = d_out + 2 * self.config.d_out[-j - 2]
                    d_out = 2 * self.config.d_out[-j - 2]
                else:
                    # j = 3, 32 + 32 -> 32
                    d_in = 4 * self.config.d_out[-4]
                    d_out = 2 * self.config.d_out[-4]
            elif self.config.name in ["S3DIS", "Semantic3D"]:
                if j < 4:
                    d_in = d_out + 2 * self.config.d_out[-j - 2]
                    d_out = 2 * self.config.d_out[-j - 2]
                else:
                    d_in = 4 * self.config.d_out[-5]
                    d_out = 2 * self.config.d_out[-5]

            self.decoder_blocks.append(
                pt_utils.Conv2d(d_in, d_out, kernel_size=(1, 1), bn=True))
            #print("j : {}, d_in : {}, d_out : {}".format(j, d_in, d_out))

        self.fc1 = pt_utils.Conv2d(d_out, 64, kernel_size=(1, 1), bn=True)
        self.fc2 = pt_utils.Conv2d(64, 32, kernel_size=(1, 1), bn=True)
        self.dropout = nn.Dropout(0.5)
        self.fc3 = pt_utils.Conv2d(32,
                                   self.config.num_classes,
                                   kernel_size=(1, 1),
                                   bn=False,
                                   activation=None)
示例#10
0
    def __init__(self,
                 num_classes,
                 input_channels=0,
                 relation_prior=1,
                 use_xyz=True):
        super().__init__()

        # the number of convolution layers
        self.conv_layer_num = 4

        # the number of scalea
        self.scale_num = 3

        self.SA_modules = nn.ModuleList()
        c_in = input_channels
        self.SA_modules.append(
            RSCNNSAModuleMSG(
                npoint=1024,
                radii=[0.075, 0.1, 0.125],
                nsamples=[16, 32, 48],
                mlps=[[c_in, 64], [c_in, 64], [c_in, 64]],
                first_layer=True,
                use_xyz=use_xyz,
                scale_num=self.scale_num,
                rel_pose_mode="avg",
            ))
        c_out_0 = 64 * 3

        c_in = c_out_0
        self.SA_modules.append(
            RSCNNSAModuleMSG(
                npoint=256,
                radii=[0.1, 0.15, 0.2],
                nsamples=[16, 48, 64],
                mlps=[[c_in, 128], [c_in, 128], [c_in, 128]],
                use_xyz=False,
                scale_num=self.scale_num,
                rel_pose_mode="avg",
            ))
        c_out_1 = 128 * 3

        c_in = c_out_1
        self.SA_modules.append(
            RSCNNSAModuleMSG(
                npoint=64,
                radii=[0.2, 0.3, 0.4],
                nsamples=[16, 32, 48],
                mlps=[[c_in, 256], [c_in, 256], [c_in, 256]],
                use_xyz=False,
                scale_num=self.scale_num,
                rel_pose_mode="avg",
            ))
        c_out_2 = 256 * 3

        c_in = c_out_2
        self.SA_modules.append(
            RSCNNSAModuleMSG(
                npoint=16,
                radii=[0.4, 0.6, 0.8],
                nsamples=[16, 24, 32],
                mlps=[[c_in, 512], [c_in, 512], [c_in, 512]],
                use_xyz=False,
                last_layer=True,
                scale_num=self.scale_num,
                rel_pose_mode="avg",
            ))
        c_out_3 = 512 * 3

        # global pooling
        self.SA_modules.append(
            RSCNNSAModule(nsample=16, mlp=[c_out_3, 128], use_xyz=False))
        global_out = 128

        # global pooling
        self.SA_modules.append(
            RSCNNSAModule(nsample=64, mlp=[c_out_2, 128], use_xyz=False))
        global_out2 = 128

        self.FP_modules = nn.ModuleList()
        self.FP_modules.append(
            RSCNNFPModule(mlp=[256 + input_channels, 128, 128]))
        self.FP_modules.append(RSCNNFPModule(mlp=[512 + c_out_0, 256, 256]))
        self.FP_modules.append(RSCNNFPModule(mlp=[512 + c_out_1, 512, 512]))
        self.FP_modules.append(RSCNNFPModule(mlp=[c_out_3 +
                                                  c_out_2, 512, 512]))

        self.FC_layer = nn.Sequential(
            pt_utils.Conv1d(128 + global_out + global_out2 + 16, 128, bn=True),
            nn.Dropout(), pt_utils.Conv1d(128, num_classes, activation=None))