def __init__(self, num_classes, input_channels=3): super().__init__() self.SA_modules = nn.ModuleList() self.SA_modules.append( PointnetSAModuleMSG(npoint=512, radii=[0.1, 0.2, 0.4], nsamples=[32, 64, 128], mlps=[[input_channels, 64], [input_channels, 128], [input_channels, 128]])) input_channels = 64 + 128 + 128 self.SA_modules.append( PointnetSAModuleMSG(npoint=128, radii=[0.2, 0.4, 0.8], nsamples=[16, 32, 64], mlps=[[input_channels, 128], [input_channels, 256], [input_channels, 256]])) self.SA_modules.append( PointnetSAModule(mlp=[128 + 256 + 256, 256, 512, 1024])) self.FC_layer = nn.Sequential( pt_utils.FC(1024, 512, bn=True), nn.Dropout(p=0.5), pt_utils.FC(512, 256, bn=True), nn.Dropout(p=0.5), pt_utils.FC(256, num_classes, activation=None))
def __init__(self, num_classes, input_channels=0, relation_prior=1, use_xyz=True): super().__init__() self.SA_modules = nn.ModuleList() self.SA_modules.append( PointnetSAModuleMSG(npoint=512, radii=[0.23], nsamples=[48], mlps=[[input_channels, 128]], first_layer=True, use_xyz=use_xyz, relation_prior=relation_prior)) self.SA_modules.append( PointnetSAModuleMSG(npoint=128, radii=[0.32], nsamples=[64], mlps=[[128, 512]], use_xyz=use_xyz, relation_prior=relation_prior)) self.SA_modules.append( # global convolutional pooling PointnetSAModule(nsample=128, mlp=[512, 1024], use_xyz=use_xyz)) self.FC_layer = nn.Sequential( pt_utils.FC(1024, 512, activation=nn.ReLU(inplace=True), bn=True), nn.Dropout(p=0.5), pt_utils.FC(512, 256, activation=nn.ReLU(inplace=True), bn=True), nn.Dropout(p=0.5), pt_utils.FC(256, num_classes, activation=None))
def __init__(self, num_classes): super().__init__() self.SA_modules = nn.ModuleList() self.SA_modules.append( PointnetSAModuleMSG( npoint=512, radii=[0.2], nsamples=[32], mlps=[[6, 64, 64, 128]], use_xyz=True, first_layer=True, )) self.SA_modules.append( PointnetSAModuleMSG(npoint=128, radii=[0.4], nsamples=[64], mlps=[[128 + 9, 128, 128, 256]], use_xyz=False, last_layer=True)) self.SA_modules.append( # global pooling PointnetSAModule(nsample=128, mlp=[256, 256, 512, 1024], use_xyz=False)) self.FC_layer = nn.Sequential( pt_utils.FC(1024, 512, activation=nn.ReLU(inplace=True), bn=True), nn.Dropout(p=0.5), pt_utils.FC(512, 256, activation=nn.ReLU(inplace=True), bn=True), nn.Dropout(p=0.5), pt_utils.FC(256, num_classes, activation=None))
def __init__(self, input_feat_dim=0, k=40, retType='glob2'): super().__init__() self.sa1 = PointnetSAModuleMSG(npoint=512, radii=[0.1, 0.2, 0.4], nsamples=[16, 32, 128], mlps=[[input_feat_dim, 32, 32, 64], [input_feat_dim, 64, 64, 128], [input_feat_dim, 64, 96, 128]]) inputCh = 64 + 128 + 128 self.sa2 = PointnetSAModuleMSG(npoint=128, radii=[0.2, 0.4, 0.8], nsamples=[32, 64, 128], mlps=[[inputCh, 64, 64, 128], [inputCh, 128, 128, 256], [inputCh, 128, 128, 256]]) inputCh = 128 + 256 + 256 self.sa3 = PointnetSAModule(mlp=[inputCh, 256, 512, 1024]) self.fc_layer = nn.Sequential(nn.Linear(1024, 512, bias=False), nn.BatchNorm1d(512), nn.ReLU(True), nn.Linear(512, 256, bias=False), nn.BatchNorm1d(256), nn.ReLU(True), nn.Dropout(0.5), nn.Linear(256, k)) self.retType = retType
def __init__(self, num_classes, input_channels=9, use_xyz=True): super().__init__() self.SA_modules = nn.ModuleList() c_in = input_channels self.SA_modules.append( PointnetSAModuleMSG(npoint=1024, radii=[0.05, 0.1], nsamples=[16, 32], mlps=[[c_in, 16, 16, 32], [c_in, 32, 32, 64]], use_xyz=use_xyz)) c_out_0 = 32 + 64 c_in = c_out_0 self.SA_modules.append( PointnetSAModuleMSG( npoint=256, radii=[0.1, 0.2], nsamples=[16, 32], mlps=[[c_in, 64, 64, 128], [c_in, 64, 96, 128]], )) c_out_1 = 128 + 128 c_in = c_out_1 self.SA_modules.append( PointnetSAModuleMSG( npoint=64, radii=[0.2, 0.4], nsamples=[16, 32], mlps=[[c_in, 128, 196, 256], [c_in, 128, 196, 256]], )) c_out_2 = 256 + 256 c_in = c_out_2 self.SA_modules.append( PointnetSAModuleMSG( npoint=16, radii=[0.4, 0.8], nsamples=[16, 32], mlps=[[c_in, 256, 256, 512], [c_in, 256, 384, 512]], )) c_out_3 = 512 + 512 self.FP_modules = nn.ModuleList() self.FP_modules.append( PointnetFPModule( mlp=[256 + (input_channels if use_xyz else 0), 128, 128])) self.FP_modules.append(PointnetFPModule(mlp=[512 + c_out_0, 256, 256])) self.FP_modules.append(PointnetFPModule(mlp=[512 + c_out_1, 512, 512])) self.FP_modules.append( PointnetFPModule(mlp=[c_out_3 + c_out_2, 512, 512])) self.FC_layer = nn.Sequential( pt_utils.Conv1d(128, 128, bn=True), nn.Dropout(), pt_utils.Conv1d(128, num_classes, activation=None))
def __init__(self, args, divide_ratio=2): super(Discriminator, self).__init__() self.num_points = args.num_points self.pointnet_sa_module = PointnetSAModuleMSG(npoint=int(self.num_points/8), radii=[0.1, 0.2, 0.4], nsamples=[16, 32, 128], mlps=[[3, 32 // divide_ratio, 32 // divide_ratio, 64 // divide_ratio], [3, 64 // divide_ratio, 64 // divide_ratio, 128 // divide_ratio], [3, 64 // divide_ratio, 96 // divide_ratio, 128 // divide_ratio]],) self.patch_mlp_conv = MLPConv([(64//divide_ratio + 128 // divide_ratio + 128 // divide_ratio), 1])
def __init__(self, num_classes): super().__init__() self.SA_modules = nn.ModuleList() self.SA_modules.append( PointnetSAModuleMSG( npoint=512, radii=[0.2], nsamples=[64], mlps=[[6, 64, 64, 128]], first_layer=True, use_xyz=True, ) ) self.SA_modules.append( PointnetSAModuleMSG( npoint=128, radii=[0.4], nsamples=[64], mlps=[[128+9, 128, 128, 256]], use_xyz=False, last_layer=True, ) ) # global pooling self.SA_modules.append( PointnetSAModule( nsample=128, mlp=[256, 256, 512, 1024], use_xyz=False ) ) self.FP_modules = nn.ModuleList() self.FP_modules.append(PointnetFPModule(mlp=[128, 128, 128, 128])) self.FP_modules.append(PointnetFPModule(mlp=[384, 256, 128])) self.FP_modules.append(PointnetFPModule(mlp=[1280, 256, 256])) self.FC_layer = nn.Sequential( pt_utils.Conv1d(128, 128, bn=True), nn.Dropout(), pt_utils.Conv1d(128, num_classes, activation=None) )
def __init__(self, num_classes, input_channels=3, use_xyz=True, bn=True): super().__init__() NPOINTS = [1024, 256, 64, 16] RADIUS = [[0.05, 0.1], [0.1, 0.2], [0.2, 0.4], [0.4, 0.8]] NSAMPLE = [[16, 32], [16, 32], [16, 32], [16, 32]] MLPS = [[[16, 16, 32], [32, 32, 64]], [[64, 64, 128], [64, 96, 128]], [[128, 196, 256], [128, 196, 256]], [[256, 256, 512], [256, 384, 512]]] FP_MLPS = [[128, 128], [256, 256], [512, 512], [512, 512]] CLS_FC = [128] DP_RATIO = 0.5 self.SA_modules = nn.ModuleList() channel_in = input_channels skip_channel_list = [input_channels] for k in range(NPOINTS.__len__()): mlps = MLPS[k].copy() channel_out = 0 for idx in range(mlps.__len__()): mlps[idx] = [channel_in] + mlps[idx] channel_out += mlps[idx][-1] self.SA_modules.append( PointnetSAModuleMSG(npoint=NPOINTS[k], radii=RADIUS[k], nsamples=NSAMPLE[k], mlps=mlps, use_xyz=use_xyz, bn=bn)) skip_channel_list.append(channel_out) channel_in = channel_out self.FP_modules = nn.ModuleList() for k in range(FP_MLPS.__len__()): pre_channel = FP_MLPS[ k + 1][-1] if k + 1 < len(FP_MLPS) else channel_out self.FP_modules.append( PointnetFPModule(mlp=[pre_channel + skip_channel_list[k]] + FP_MLPS[k], bn=bn)) cls_layers = [] pre_channel = FP_MLPS[0][-1] for k in range(0, CLS_FC.__len__()): cls_layers.append(pt_utils.Conv1d(pre_channel, CLS_FC[k], bn=bn)) pre_channel = CLS_FC[k] cls_layers.append( pt_utils.Conv1d(pre_channel, num_classes, activation=None, bn=bn)) cls_layers.insert(1, nn.Dropout(DP_RATIO)) self.cls_layer = nn.Sequential(*cls_layers)
def _build_model(self): # inherit some attributes, e.g., fc_layer super()._build_model() self.SA_modules = nn.ModuleList() for i in range(len(self.npoints)): self.SA_modules.append( PointnetSAModuleMSG( npoint=self.npoints[i], radii=self.radii[i], # radii-radii nsamples=self.nsamples[i], mlps=self.mlps[i], use_xyz=self.use_xyz))
def __init__(self, num_classes, input_channels=3, use_xyz=True, bn=True): super().__init__() self.SA_modules = nn.ModuleList() channel_in = input_channels skip_channel_list = [input_channels] for k in range(NPOINTS.__len__()): mlps = MLPS[k].copy() channel_out = 0 for idx in range(mlps.__len__()): mlps[idx] = [channel_in] + mlps[idx] channel_out += mlps[idx][-1] self.SA_modules.append( PointnetSAModuleMSG(npoint=NPOINTS[k], radii=RADIUS[k], nsamples=NSAMPLE[k], mlps=mlps, use_xyz=use_xyz, bn=bn)) skip_channel_list.append(channel_out) channel_in = channel_out self.FP_modules = nn.ModuleList() for k in range(FP_MLPS.__len__()): pre_channel = FP_MLPS[ k + 1][-1] if k + 1 < len(FP_MLPS) else channel_out self.FP_modules.append( PointnetFPModule(mlp=[pre_channel + skip_channel_list[k]] + FP_MLPS[k], bn=bn)) cls_layers = [] pre_channel = FP_MLPS[0][-1] for k in range(0, CLS_FC.__len__()): cls_layers.append(pt_utils.Conv1d(pre_channel, CLS_FC[k], bn=bn)) pre_channel = CLS_FC[k] cls_layers.append( pt_utils.Conv1d(pre_channel, num_classes, activation=None, bn=bn)) cls_layers.insert(1, nn.Dropout(0.5)) self.cls_layer = nn.Sequential(*cls_layers)
def __init__(self, num_classes, input_channels=0, use_xyz=True): super().__init__() self.SA_modules = nn.ModuleList() # stage 1 begin self.SA_modules.append( PointnetSAModuleMSG( npoint=512, radii=[0.25], nsamples=[64], mlps=[[input_channels, 96]], use_xyz=use_xyz, pool=True ) ) # stage 1 end # stage 2 begin input_channels = 96 self.SA_modules.append( PointnetSAModuleMSG( npoint=128, radii=[0.32], nsamples=[64], mlps=[[input_channels, 93]], use_xyz=use_xyz, pool=True ) ) input_channels = 93 self.SA_modules.append( PointnetSAModuleMSG( npoint=128, radii=[0.39], nsamples=[16], mlps=[[input_channels, 96]], group_number=2, use_xyz=use_xyz, after_pool=True ) ) input_channels = 117 self.SA_modules.append( PointnetSAModuleMSG( npoint=128, radii=[0.39], nsamples=[16], mlps=[[input_channels, 96]], group_number=2, use_xyz=use_xyz ) ) input_channels = 141 self.SA_modules.append( PointnetSAModuleMSG( npoint=128, radii=[0.39], nsamples=[16], mlps=[[input_channels, 96]], group_number=2, use_xyz=use_xyz, before_pool=True ) ) # stage 2 end # global pooling input_channels = 165 self.SA_modules.append( PointnetSAModule( mlp=[input_channels, 512], use_xyz=use_xyz ) ) self.FC_layer = nn.Sequential( pt_utils.FC(512, 512, activation=nn.ReLU(inplace=True), bn=True), nn.Dropout(p=0.5), pt_utils.FC(512, 256, activation=nn.ReLU(inplace=True), bn=True), nn.Dropout(p=0.5), pt_utils.FC(256, num_classes, activation=None) )
def __init__(self, num_classes, input_channels=0, relation_prior=1, use_xyz=True): super().__init__() self.SA_modules = nn.ModuleList() c_in = input_channels self.SA_modules.append( # 0 PointnetSAModuleMSG( npoint=1024, radii=[0.075, 0.1, 0.125], nsamples=[16, 32, 48], mlps=[[c_in, 64], [c_in, 64], [c_in, 64]], first_layer=True, use_xyz=use_xyz, relation_prior=relation_prior ) ) c_out_0 = 64*3 c_in = c_out_0 self.SA_modules.append( # 1 PointnetSAModuleMSG( npoint=256, radii=[0.1, 0.15, 0.2], nsamples=[16, 48, 64], mlps=[[c_in, 128], [c_in, 128], [c_in, 128]], use_xyz=use_xyz, relation_prior=relation_prior ) ) c_out_1 = 128*3 c_in = c_out_1 self.SA_modules.append( # 2 PointnetSAModuleMSG( npoint=64, radii=[0.2, 0.3, 0.4], nsamples=[16, 32, 48], mlps=[[c_in, 256], [c_in, 256], [c_in, 256]], use_xyz=use_xyz, relation_prior=relation_prior ) ) c_out_2 = 256*3 c_in = c_out_2 self.SA_modules.append( # 3 PointnetSAModuleMSG( npoint=16, radii=[0.4, 0.6, 0.8], nsamples=[16, 24, 32], mlps=[[c_in, 512], [c_in, 512], [c_in, 512]], use_xyz=use_xyz, relation_prior=relation_prior ) ) c_out_3 = 512*3 self.SA_modules.append( # 4 global pooling PointnetSAModule( nsample = 16, mlp=[c_out_3, 128], use_xyz=use_xyz ) ) global_out = 128 self.SA_modules.append( # 5 global pooling PointnetSAModule( nsample = 64, mlp=[c_out_2, 128], use_xyz=use_xyz ) ) global_out2 = 128 self.FP_modules = nn.ModuleList() self.FP_modules.append( PointnetFPModule(mlp=[256 + input_channels, 128, 128]) ) self.FP_modules.append(PointnetFPModule(mlp=[512 + c_out_0, 256, 256])) self.FP_modules.append(PointnetFPModule(mlp=[512 + c_out_1, 512, 512])) self.FP_modules.append( PointnetFPModule(mlp=[c_out_3 + c_out_2, 512, 512]) ) self.context_prior=cp.Context(128+global_out+global_out2+16,128+global_out+global_out2+16) self.FC_layer = nn.Sequential( pt_utils.Conv1d((128+global_out+global_out2+16)*3, 128, bn=True), nn.Dropout(), pt_utils.Conv1d(128, num_classes, activation=None) )