def __init__(self, in_channels, num_centroids=(128, 32, 0), radius=(0.2, 0.4, -1.0), num_neighbours=(64, 64, -1), sa_channels=((16, 16, 32), (32, 32, 64), (128, 128, 256)), fp_channels=((64, 64), (64, 32), (32, 32, 32)), num_fp_neighbours=(0, 3, 3), seg_channels=(32, ), dropout_prob=0.5, use_xyz=True): super(PointNet2SSG, self).__init__() self.in_channels = in_channels self.use_xyz = use_xyz # Sanity check num_sa_layers = len(num_centroids) num_fp_layers = len(fp_channels) assert len(radius) == num_sa_layers assert len(num_neighbours) == num_sa_layers assert len(sa_channels) == num_sa_layers assert num_sa_layers == num_fp_layers assert len(num_fp_neighbours) == num_fp_layers # Set Abstraction Layers feature_channels = in_channels - 3 self.sa_modules = nn.ModuleList() for ind in range(num_sa_layers): sa_module = PointNetSAModule(in_channels=feature_channels, mlp_channels=sa_channels[ind], num_centroids=num_centroids[ind], radius=radius[ind], num_neighbours=num_neighbours[ind], use_xyz=use_xyz) self.sa_modules.append(sa_module) feature_channels = sa_channels[ind][-1] inter_channels = [in_channels if use_xyz else in_channels - 3] inter_channels.extend([x[-1] for x in sa_channels]) # Feature Propagation Layers self.fp_modules = nn.ModuleList() feature_channels = inter_channels[-1] for ind in range(num_fp_layers): fp_module = PointnetFPModule(in_channels=feature_channels + inter_channels[-2 - ind], mlp_channels=fp_channels[ind], num_neighbors=num_fp_neighbours[ind]) self.fp_modules.append(fp_module) feature_channels = fp_channels[ind][-1] # MLP self.mlp_seg = SharedMLP(feature_channels, seg_channels, ndim=1, dropout_prob=dropout_prob) self.reset_parameters()
def __init__(self, in_channels, out_channels, num_centroids=(512, 128, 0), radius_list=((0.1, 0.2, 0.4), (0.2, 0.4, 0.8), -1.0), num_neighbours_list=((16, 32, 128), (32, 64, 128), -1), sa_channels_list=( ((32, 32, 64), (64, 64, 128), (64, 96, 128)), ((64, 64, 128), (128, 128, 256), (128, 128, 256)), (256, 512, 1024), ), global_channels=(512, 256), dropout_prob=0.5, use_xyz=True): super(PointNet2MSGCls, self).__init__() self.in_channels = in_channels self.out_channels = out_channels self.use_xyz = use_xyz # sanity check num_sa_layers = len(num_centroids) assert len(radius_list) == num_sa_layers assert len(num_neighbours_list) == num_sa_layers assert len(sa_channels_list) == num_sa_layers feature_channels = in_channels - 3 self.sa_modules = nn.ModuleList() for ind in range(num_sa_layers - 1): sa_module = PointNetSAModuleMSG( in_channels=feature_channels, mlp_channels_list=sa_channels_list[ind], num_centroids=num_centroids[ind], radius_list=radius_list[ind], num_neighbours_list=num_neighbours_list[ind], use_xyz=use_xyz) self.sa_modules.append(sa_module) feature_channels = sa_module.out_channels sa_module = PointNetSAModule(in_channels=feature_channels, mlp_channels=sa_channels_list[-1], num_centroids=num_centroids[-1], radius=radius_list[-1], num_neighbours=num_neighbours_list[-1], use_xyz=use_xyz) self.sa_modules.append(sa_module) self.mlp_global = MLP(sa_channels_list[-1][-1], global_channels, dropout_prob=dropout_prob) self.classifier = nn.Linear(global_channels[-1], out_channels, bias=True) self.reset_parameters()
def __init__( self, in_channels=3, #out_channels, num_centroids=(64, 0), radius=(0.2, -1.0), num_neighbours=(32, -1), sa_channels=((64, 64, 128), (128, 128, 128)), global_channels=(128, 64), dropout_prob=0.5, use_xyz=True): super(PointNet2SSGCls, self).__init__() self.in_channels = in_channels #self.out_channels = out_channels self.use_xyz = use_xyz # sanity check num_sa_layers = len(num_centroids) assert len(radius) == num_sa_layers assert len(num_neighbours) == num_sa_layers assert len(sa_channels) == num_sa_layers feature_channels = in_channels - 3 self.sa_modules = nn.ModuleList() for ind in range(num_sa_layers): sa_module = PointNetSAModule(in_channels=feature_channels, mlp_channels=sa_channels[ind], num_centroids=num_centroids[ind], radius=radius[ind], num_neighbours=num_neighbours[ind], use_xyz=use_xyz) self.sa_modules.append(sa_module) feature_channels = sa_channels[ind][-1] self.mlp_global = MLP(feature_channels, global_channels, dropout_prob=dropout_prob) #self.classifier = nn.Linear(global_channels[-1], out_channels, bias=True) self.reset_parameters()
def __init__(self, in_channels, out_channels, num_centroids=(512, 128), radius=(0.2, 0.4), num_neighbours=(32, 64), sa_channels=((64, 64, 128), (128, 128, 256)), local_channels=(256, 512, 1024), global_channels=(512, 256), dropout_prob=0.5, use_xyz=True): """ Args: in_channels (int): the number of input channels out_channels (int): the number of semantics classes to predict over num_centroids (tuple of int): the numbers of centroids to sample in each set abstraction module radius (tuple of float): a tuple of radius to query neighbours in each set abstraction module num_neighbours (tuple of int): the numbers of neighbours to query for each centroid sa_channels (tuple of tuple of int): the numbers of channels to within each set abstraction module local_channels (tuple of int): the numbers of channels to extract local features after set abstraction global_channels (tuple of int): the numbers of channels to extract global features dropout_prob (float): the probability to dropout input features use_xyz (bool): whether or not to use the xyz position of a points as a feature """ super(PointNet2SSGCls, self).__init__() self.in_channels = in_channels self.out_channels = out_channels self.use_xyz = use_xyz # sanity check num_layers = len(num_centroids) assert len(radius) == num_layers assert len(num_neighbours) == num_layers assert len(sa_channels) == num_layers feature_channels = in_channels - 3 self.sa_modules = nn.ModuleList() for ind in range(num_layers): sa_module = PointNetSAModule(in_channels=feature_channels, mlp_channels=sa_channels[ind], num_centroids=num_centroids[ind], radius=radius[ind], num_neighbours=num_neighbours[ind], use_xyz=use_xyz) self.sa_modules.append(sa_module) feature_channels = sa_channels[ind][-1] if use_xyz: feature_channels += 3 self.mlp_local = SharedMLP(feature_channels, local_channels, bn=True) self.mlp_global = MLP(local_channels[-1], global_channels, dropout=dropout_prob) self.classifier = nn.Linear(global_channels[-1], out_channels, bias=True) self.init_weights()
def __init__(self, in_channels, num_classes, num_seg_classes, num_centroids=(512, 128, 0), radius_list=((0.1, 0.2, 0.4), (0.4, 0.8), -1.0), num_neighbours_list=((32, 64, 128), (64, 128), -1), sa_channels_list=( ((32, 32, 64), (64, 64, 128), (64, 96, 128)), ((128, 128, 256), (128, 196, 256)), (256, 512, 1024), ), fp_channels=((256, 256), (256, 128), (128, 128)), num_fp_neighbours=(0, 3, 3), seg_channels=(128, ), dropout_prob=0.5, use_xyz=True, use_one_hot=True): super(PointNet2MSGPartSeg, self).__init__() self.in_channels = in_channels self.num_classes = num_classes self.num_seg_classes = num_seg_classes self.use_xyz = use_xyz self.use_one_hot = use_one_hot # sanity check num_sa_layers = len(num_centroids) num_fp_layers = len(fp_channels) assert len(radius_list) == num_sa_layers assert len(num_neighbours_list) == num_sa_layers assert len(sa_channels_list) == num_sa_layers assert num_sa_layers == num_fp_layers assert len(num_fp_neighbours) == num_fp_layers # Set Abstraction Layers feature_channels = in_channels - 3 self.sa_modules = nn.ModuleList() for ind in range(num_sa_layers - 1): sa_module = PointNetSAModuleMSG( in_channels=feature_channels, mlp_channels_list=sa_channels_list[ind], num_centroids=num_centroids[ind], radius_list=radius_list[ind], num_neighbours_list=num_neighbours_list[ind], use_xyz=use_xyz) self.sa_modules.append(sa_module) feature_channels = sa_module.out_channels sa_module = PointNetSAModule(in_channels=feature_channels, mlp_channels=sa_channels_list[-1], num_centroids=num_centroids[-1], radius=radius_list[-1], num_neighbours=num_neighbours_list[-1], use_xyz=use_xyz) self.sa_modules.append(sa_module) inter_channels = [in_channels if use_xyz else in_channels - 3] if self.use_one_hot: inter_channels[0] += num_classes # concat with one-hot inter_channels.extend( [sa_module.out_channels for sa_module in self.sa_modules]) # Feature Propagation Layers self.fp_modules = nn.ModuleList() feature_channels = inter_channels[-1] for ind in range(num_fp_layers): fp_module = PointnetFPModule(in_channels=feature_channels + inter_channels[-2 - ind], mlp_channels=fp_channels[ind], num_neighbors=num_fp_neighbours[ind]) self.fp_modules.append(fp_module) feature_channels = fp_channels[ind][-1] # MLP self.mlp_seg = SharedMLP(feature_channels, seg_channels, ndim=1, dropout_prob=dropout_prob) self.seg_logit = nn.Conv1d(seg_channels[-1], num_seg_classes, 1, bias=True) self.reset_parameters()