def __init__(self, num_classes: int = 16): super().__init__() self.num_classes = num_classes inchannel = 6 # with normals self.msg1 = _BasePointnetMSGModule(npoint=512, radius=[0.1, 0.2, 0.4], nsamples=[32, 64, 128], mlps=[[inchannel, 32, 32, 64], [inchannel, 64, 64, 128], [inchannel, 64, 96, 128]]) inchannel = 64 + 128 + 128 + 3 self.msg2 = _BasePointnetMSGModule(npoint=128, radius=[0.4, 0.8], nsamples=[64, 128], mlps=[[inchannel, 128, 128, 256], [inchannel, 128, 196, 256]]) inchannel = 256 + 256 + 3 self.SA = pt_utils.SharedMLP([inchannel, 256, 512, 1024], bn=True) inchannel = 1024 + 256 + 256 self.fp1 = PointnetFPModule(mlp=[inchannel, 256, 256]) inchannel = 256 + 64 + 128 + 128 self.fp2 = PointnetFPModule(mlp=[inchannel, 256, 128]) inchannel = 128 + 6 + num_classes self.fp3 = PointnetFPModule(mlp=[inchannel, 128, 128])
def __init__(self, withnor=True): super().__init__() # this model is without one_hot_model self.norm_channel = 3 if withnor else 0 inchannel = 3 + self.norm_channel # with normals self.ssg1 = _BasePointnetSSGModule(npoint=512, radiu=0.2, nsample=64, mlp=[inchannel, 64, 64, 128]) inchannel = 128 + 3 self.ssg2 = _BasePointnetSSGModule(npoint=128, radiu=0.4, nsample=64, mlp=[inchannel, 128, 128, 256]) inchannel = 256 + 3 self.ssg3 = pt_utils.SharedMLP([inchannel, 256, 512, 1024], bn=True) inchannel = 1024 + 256 self.fp1 = PointnetFPModule([inchannel, 256, 256]) inchannel = 256 + 128 self.fp2 = PointnetFPModule([inchannel, 256, 128]) inchannel = 128 + 3 + self.norm_channel # if withnor, inchannel is 128+6 self.fp3 = PointnetFPModule([inchannel, 128, 128, 128])
def __init__(self): super().__init__() inchannel = 3 self.ssg1 = _BasePointnetSSGModule(512, 0.2, 32, [inchannel, 64, 64, 128]) inchannel = 128 + 3 self.ssg2 = _BasePointnetSSGModule(128, 0.4, 64, [inchannel, 128, 128, 256]) inchannel = 256 + 3 self.SA = pt_utils.SharedMLP([inchannel, 256, 512], bn=True) self.last_layer = pt_utils.Conv2d(512, 1024, bn=True)
def __init__(self, npoint: int, radius: list, nsamples: list, mlps: list): ''' npoint : point number for fps sampling nsamples : sample point numbers for each radius ''' super().__init__() assert len(radius) == len(nsamples) == len(mlps) self.npoint = npoint self.nsamples = nsamples self.radius = radius self.mlps = mlps self.fps = FarthestPointSample(npoint) self.mlp_layers = nn.ModuleList() self.query_ball_point = nn.ModuleList() for mlp, radiu, nsample in zip(mlps, radius, nsamples): self.mlp_layers.append(pt_utils.SharedMLP(mlp, bn=True)) self.query_ball_point.append(QueryBallPoint(radiu, nsample))
def __init__(self): super().__init__() inchannel = 3 self.msg1 = _BasePointnetMSGModule( npoint=512, radius=[0.1, 0.2, 0.4], nsamples=[16, 32, 128], mlps=[[inchannel, 32, 32, 64], [inchannel, 64, 64, 128], [inchannel, 64, 96, 128]] ) inchannel = 64 + 128 + 128 + 3 self.msg2 = _BasePointnetMSGModule( npoint=128, radius=[0.2, 0.4, 0.8], nsamples=[32, 64, 128], mlps=[[inchannel, 64, 64, 128], [inchannel, 128, 128, 256], [inchannel, 128, 128, 256]] ) inchannel = 128 + 256 + 256 + 3 self.SA = pt_utils.SharedMLP([inchannel, 256, 512], bn=True) self.last_layer = pt_utils.Conv2d(512, 1024, bn=True)
def __init__(self, *, mlp: List[int], bn: bool = True): super().__init__() self.mlp = pt_utils.SharedMLP(mlp, bn=bn)
def __init__(self, mlp: list): super().__init__() self.mlp = pt_utils.SharedMLP(mlp, bn=True)
def __init__(self, K, D, P, C_in, C_out, C_delta, depth_multiplier, sampling='random', with_global=False): super().__init__() ''' in the first layer, C_in set to be 0, and the nn_fts_input will only be the delta feature pts(points) : origin points -> B x 3 x N fts(features) : origin features -> B x C x N qrs(querys) : query points -> B x 3 x P nn_pts_local : # B x 3 x P x K nn_fts_input : B x C_in+C_delta x P x K return : sample point and features ''' self.with_global = with_global self.sampling = sampling if sampling=='random': self.sample = random_indices(P) elif sampling == 'fps': self.sample = FarthestPointSample(P) else : pass self.K = K self.D = D self.P = P self.depth_multiplier = depth_multiplier #the input tot mlp_delta is nn_pts_local(B x 3 x P x K) self.mlp_delta = pt_utils.SharedMLP( [3, C_delta, C_delta], bn=True, activation=nn.ELU(inplace=True), act_before_bn=True )# B x C_delta x P x K # the input to X_transform is nn_pts_local(B x 3 x P x K) self.X_transform0 = pt_utils.Conv2d( 3, K*K, kernel_size=(1, K), bn=True, bias=False, activation=nn.ELU(inplace=True), act_before_bn=True ) # B x K*K x P x 1 self.X_transform1 = nn.Sequential( nn.Conv2d(K, K*K, kernel_size=(1, K), groups=K, bias=False), nn.ELU(inplace=True), nn.BatchNorm2d(K*K) ) # B x K*K x P x 1 nn.init.xavier_uniform_(self.X_transform1[0].weight) self.X_transform2 = nn.Sequential( nn.Conv2d(K, K*K, kernel_size=(1, K), groups=K, bias=False), nn.BatchNorm2d(K*K) ) # B x K*K x P x 1 nn.init.xavier_uniform_(self.X_transform2[0].weight) # depth_multiplier = torch.ceil(float(C_out)/(C_in + C_delta)) self.conv = nn.Sequential( nn.Conv2d(C_in+C_delta, (C_in+C_delta)*depth_multiplier, kernel_size=(1, K), groups=(C_in+C_delta)), # nn.ELU(inplace=True), # nn.BatchNorm2d((C_in+C_delta)*depth_multiplier), nn.Conv2d((C_in+C_delta)*depth_multiplier, C_out, kernel_size=1, bias=False), nn.ELU(True), nn.BatchNorm2d(C_out) ) # equal to tf.layers.seperable_conv2d nn.init.xavier_uniform_(self.conv[0].weight) nn.init.xavier_uniform_(self.conv[1].weight) if self.with_global: self.conv_global = pt_utils.SharedMLP( [3, C_out // 4, C_out //4], bn=True, activation=nn.ELU(inplace=True), act_before_bn=True )