示例#1
0
 def __init__(self,
              num_features,
              n_classes,
              num_hidden,
              num_hidden_layers,
              dropout,
              activation,
              heads=1,
              bias=True):
     super(PFeaSt, self).__init__()
     # dropout
     if dropout:
         self.dropout = nn.Dropout(p=dropout)
     else:
         self.dropout = nn.Dropout(p=0.)
     #activation
     self.activation = activation
     # input layer
     self.conv_input = FeaStConv(num_features,
                                 num_hidden,
                                 heads=1,
                                 bias=bias)
     # Hidden layers
     self.layers = nn.ModuleList()
     for _ in range(num_hidden_layers):
         self.layers.append(
             FeaStConv(num_hidden, num_hidden, heads=1, bias=bias))
     # output layer
     self.conv_output = FeaStConv(num_hidden, n_classes, heads=1, bias=bias)
示例#2
0
 def __init__(self, nfeat, nhid, nclass, dropout, nlayer=3):
     super(FeaSTX, self).__init__()
     self.conv1 = FeaStConv(nfeat, nhid)
     self.conv2 = FeaStConv(nhid, nclass)
     self.convx = nn.ModuleList(
         [FeaStConv(nhid, nhid) for _ in range(nlayer - 2)])
     self.dropout_p = dropout
示例#3
0
 def __init__(self, n_features, heads=4, masif_descr=False):
     # REMEMBER TO UPDATE MODEL NAME
     super(TenConvwAffine, self).__init__()
     self.masif_descr = masif_descr
     if masif_descr is True:
         self.pre_lin = Linear(80, n_features)
     self.conv1 = FeaStConv(n_features, 16, heads=heads)
     self.conv2 = FeaStConv(16, 16, heads=heads)
     self.conv3 = FeaStConv(16, 16, heads=heads)
     self.conv4 = FeaStConv(16, 16, heads=heads)
     self.conv5 = FeaStConv(16, 16, heads=heads)
     self.conv6 = FeaStConv(16, 16, heads=heads)
     self.conv7 = FeaStConv(16, 16, heads=heads)
     self.conv8 = FeaStConv(16, 16, heads=heads)
     self.conv9 = FeaStConv(16, 16, heads=heads)
     self.conv10 = FeaStConv(16, 16, heads=heads)
     self.lin1 = Linear(16, 16)
     self.lin2 = Linear(16, 4)
     self.s1 = SELU()
     self.s2 = SELU()
     self.s3 = SELU()
     self.s4 = SELU()
     self.s5 = SELU()
     self.s6 = SELU()
     self.s7 = SELU()
     self.s8 = SELU()
     self.s9 = SELU()
     self.s10 = SELU()
     self.s11 = SELU()
     self.s12 = SELU()
     self.out = Linear(4, 1)
     self.affine1 = Linear(16, 16)
     self.affine2 = Linear(16, 16)
示例#4
0
 def __init__(self, n_features, lin2=4, heads=4):
     super(ThreeConvBlock2, self).__init__()
     self.conv1 = FeaStConv(n_features, 32, heads=heads)
     self.conv2 = FeaStConv(32, 32, heads=heads)
     self.conv3 = FeaStConv(32, 32, heads=heads)
     self.lin1 = Linear(32, 64)
     self.lin2 = Linear(64, lin2)
     self.out = Linear(lin2, 1)
示例#5
0
 def __init__(self, in_features, out_features, heads=4):
     super(FourConvPoolBlock, self).__init__()
     self.conv1 = FeaStConv(in_features, 16, heads=heads)
     self.conv2 = FeaStConv(16, 16, heads=heads)
     self.conv3 = FeaStConv(16, 16, heads=heads)
     self.conv4 = FeaStConv(16, out_features, heads=heads)
     self.batch = BatchNorm(out_features)
     self.pool = TopKPooling(16)
示例#6
0
 def __init__(self, n_features, heads=4):
     super(MultiScaleFeaStNet,self).__init__()
     self.conv1 = FeaStConv(n_features, 16, heads=heads)
     self.conv2 = FeaStConv(16, 32, heads=heads)
     self.conv3 = FeaStConv(32, 64, heads=heads)
     self.conv4 = FeaStConv(64, 32, heads=heads)
     self.conv5 = FeaStConv(64, 16, heads=heads)
     self.lin1 = Linear(32, 256)
     self.lin2 = Linear(256, 6890)
     self.out = Linear(6890, 1)
示例#7
0
 def __init__(self, n_features, heads=4, dropout=True):
     super(ThreeConv, self).__init__()
     self.conv1 = FeaStConv(n_features, 16, heads=heads)
     self.conv2 = FeaStConv(16, 32, heads=heads)
     self.conv3 = FeaStConv(32, 64, heads=heads)
     self.batch = BatchNorm(64)
     self.lin1 = Linear(64, 32)
     self.lin2 = Linear(32, 16)
     self.lin3 = Linear(16, 8)
     self.lin4 = Linear(8, 4)
     self.out = Linear(4, 1)
示例#8
0
 def __init__(self, in_features, out_features, heads=4):
     super(FourConvBlock, self).__init__()
     self.conv1 = FeaStConv(in_features, 4, heads=heads)
     torch.nn.init.kaiming_normal_(self.conv1, nonlinearity='relu')
     self.conv2 = FeaStConv(4, 4, heads=heads)
     torch.nn.init.kaiming_normal_(self.conv2, nonlinearity='relu')
     self.conv3 = FeaStConv(4, 4, heads=heads)
     torch.nn.init.kaiming_normal_(self.conv3, nonlinearity='relu')
     self.conv4 = FeaStConv(4, out_features, heads=heads)
     torch.nn.init.kaiming_normal_(self.conv4, nonlinearity='relu')
     self.batch = BatchNorm(out_features)
示例#9
0
 def __init__(self, n_features, heads=4):
     super(ThreeConvBlock, self).__init__()
     self.conv1 = FeaStConv(n_features, 16, heads=heads)
     self.conv2 = FeaStConv(16, 16, heads=heads)
     self.conv3 = FeaStConv(16, 16, heads=heads)
     self.lin1 = Linear(16, 16)
     self.lin2 = Linear(16, 4)
     self.s1 = SELU()
     self.s2 = SELU()
     self.s3 = SELU()
     self.s4 = SELU()
     self.s5 = SELU()
     self.out = Linear(4, 1)
示例#10
0
 def __init__(self, n_features, heads=4, masif_descr=False):
     # REMEMBER TO UPDATE MODEL NAME
     super(TwoConv, self).__init__()
     self.masif_descr = masif_descr
     if masif_descr is True:
         self.pre_lin = Linear(80, n_features)
     self.conv1 = FeaStConv(n_features, 16, heads=heads)
     self.conv2 = FeaStConv(16, 16, heads=heads)
     self.lin1 = Linear(16, 64)
     self.lin2 = Linear(64, 4)
     self.s1 = SELU()
     self.s2 = SELU()
     self.s3 = SELU()
     self.s4 = SELU()
     self.out = Linear(4, 1)
示例#11
0
 def __init__(self, n_features, heads=4):
     # REMEMBER TO UPDATE MODEL NAME
     super(FourConv, self).__init__()
     self.conv1 = FeaStConv(n_features, 16, heads=heads)
     self.conv2 = FeaStConv(16, 16, heads=heads)
     self.conv3 = FeaStConv(16, 16, heads=heads)
     self.conv4 = FeaStConv(16, 16, heads=heads)
     self.lin1 = Linear(16, 64)
     self.lin2 = Linear(64, 16)
     self.out = Linear(16, 1)
     self.s1 = SELU()
     self.s2 = SELU()
     self.s3 = SELU()
     self.s4 = SELU()
     self.s5 = SELU()
     self.s6 = SELU()
示例#12
0
def test_feast_conv():
    x1 = torch.randn(4, 16)
    x2 = torch.randn(2, 16)
    edge_index = torch.tensor([[0, 1, 2, 3], [0, 0, 1, 1]])
    row, col = edge_index
    adj = SparseTensor(row=row, col=col, sparse_sizes=(4, 4))

    conv = FeaStConv(16, 32, heads=2)
    assert conv.__repr__() == 'FeaStConv(16, 32, heads=2)'

    out = conv(x1, edge_index)
    assert out.size() == (4, 32)
    assert torch.allclose(conv(x1, adj.t()), out, atol=1e-6)

    t = '(Tensor, Tensor) -> Tensor'
    jit = torch.jit.script(conv.jittable(t))
    assert jit(x1, edge_index).tolist() == out.tolist()

    t = '(Tensor, SparseTensor) -> Tensor'
    jit = torch.jit.script(conv.jittable(t))
    assert torch.allclose(jit(x1, adj.t()), out, atol=1e-6)

    adj = adj.sparse_resize((4, 2))
    out = conv((x1, x2), edge_index)
    assert out.size() == (2, 32)
    assert torch.allclose(conv((x1, x2), adj.t()), out, atol=1e-6)

    t = '(PairTensor, Tensor) -> Tensor'
    jit = torch.jit.script(conv.jittable(t))
    assert jit((x1, x2), edge_index).tolist() == out.tolist()

    t = '(PairTensor, SparseTensor) -> Tensor'
    jit = torch.jit.script(conv.jittable(t))
    assert torch.allclose(jit((x1, x2), adj.t()), out, atol=1e-6)
示例#13
0
def test_feast_conv():
    in_channels, out_channels = (16, 32)
    edge_index = torch.tensor([[0, 0, 0, 1, 2, 3], [1, 2, 3, 0, 0, 0]])
    num_nodes = edge_index.max().item() + 1
    x = torch.randn((num_nodes, in_channels))

    conv = FeaStConv(in_channels, out_channels, heads=2)
    assert conv.__repr__() == 'FeaStConv(16, 32, heads=2)'
    assert conv(x, edge_index).size() == (num_nodes, out_channels)
示例#14
0
 def __init__(self, hcs, act):
     super(NodeModel, self).__init__()
     self.hcs = hcs
     self.act = act
     self.FeaStConv = FeaStConv(
         (2 + N_scatter_feats) * self.hcs,
         (2 + N_scatter_feats) * self.hcs, 1, False)
     self.decoder = torch.nn.Linear(
         (2 + N_scatter_feats) * self.hcs, self.hcs)
示例#15
0
 def makeConv(self, nin, nout, conv_args):
     # FeaStConv
     if(conv_args['name'] == 'FeaSt'):
         return FeaStConv(nin, nout, conv_args["num_heads"])
     
     # SplineConv
     if(conv_args['name'] == 'Spline'):
         return SplineConv(nin, nout, 
             self.edge_dim,
             conv_args["kernel_size"],
             is_open_spline=conv_args["open_spline"],
             degree=conv_args["degree"]
         )
     
     # GMMConv
     if(conv_args['name'] == "GMM"):
         return GMMConv(nin, nout,
             self.edge_dim,
             conv_args["kernel_size"]
         )
     
     # NNConv
     if(conv_args["name"] == "NN"):
         h = nn.Sequential(
                 nn.Linear(self.edge_dim, nin*nout),
                 nn.ReLU()
                 #nn.Linear(int(nin*nout/2), nin*nout)
         )
         return NNConv(nin, nout, h)
     
     # PPFConv
     if(conv_args["name"] == "PPF"):
         cin = nin+4
         hl = nn.Sequential(
             nn.Linear(cin, conv_args['nhidden']),
             nn.ReLU()
         )
         hg = nn.Linear(conv_args['nhidden'], nout)
         #hl = nn.Sequential(
                 #nn.Linear(cin, int(conv_args['nhidden']/2)),
                 #nn.ReLU(),
                 #nn.Linear(int(conv_args['nhidden']/2), conv_args['nhidden'])
         #)
         #hg = nn.Sequential(
                 #nn.Linear(conv_args['nhidden'], nout),
                 #nn.ReLU(),
                 #nn.Linear(nout, nout)
         #)
         return PPFConv(hl, hg)
     
     # CGConv
     if(conv_args["name"] == "CG"):
         return CGConv(nin, self.edge_dim)
示例#16
0
 def __init__(self, n_features):
     super(MultiScaleEncoder, self).__init__()
     # Will have to update these
     self.conv1 = FeaStConv(n_features, 16, heads=4)
     self.conv2 = FeaStConv(32, 16, heads=4)
     self.conv3 = FeaStConv(32, 16, heads=4)
     self.conv4 = FeaStConv(48, 16, heads=4)
     self.conv5 = FeaStConv(16, 16, heads=4)
     self.affine1 = Linear(n_features, 16)
     self.affine2 = Linear(n_features, 16)
     self.affine3 = Linear(16, 16)
     self.affine4 = Linear(16, 16)
     self.lin1 = Linear(16, 64)
     self.lin2 = Linear(64, 8)
     self.out = Linear(8, 1)
     self.s1 = SELU()
     self.s2 = SELU()
     self.s3 = SELU()
     self.s4 = SELU()
     self.s5 = SELU()
     self.s6 = SELU()
     self.s7 = SELU()
示例#17
0
def test_feast_conv():
    in_channels, out_channels = (16, 32)
    edge_index = torch.tensor([[0, 0, 0, 1, 2, 3], [1, 2, 3, 0, 0, 0]])
    num_nodes = edge_index.max().item() + 1
    x = torch.randn((num_nodes, in_channels))

    conv = FeaStConv(in_channels, out_channels, heads=2)
    assert conv.__repr__() == 'FeaStConv(16, 32, heads=2)'
    out = conv(x, edge_index)
    assert out.size() == (num_nodes, out_channels)

    jit_conv = conv.jittable(x=x, edge_index=edge_index)
    jit_conv = torch.jit.script(jit_conv)
    assert jit_conv(x, edge_index).tolist() == out.tolist()
示例#18
0
 def __init__(self, n_features, heads=4):
     # REMEMBER TO UPDATE MODEL NAME
     super(EightConv, self).__init__()
     self.conv1 = FeaStConv(n_features, 16, heads=heads)
     self.conv2 = FeaStConv(16, 16, heads=heads)
     self.conv3 = FeaStConv(16, 16, heads=heads)
     self.conv4 = FeaStConv(16, 16, heads=heads)
     self.conv5 = FeaStConv(16, 16, heads=heads)
     self.conv6 = FeaStConv(16, 16, heads=heads)
     self.conv7 = FeaStConv(16, 16, heads=heads)
     self.conv8 = FeaStConv(16, 16, heads=heads)
     self.lin1 = Linear(16, 16)
     self.lin2 = Linear(16, 4)
     self.s1 = SELU()
     self.s2 = SELU()
     self.s3 = SELU()
     self.s4 = SELU()
     self.s5 = SELU()
     self.s6 = SELU()
     self.s7 = SELU()
     self.s8 = SELU()
     self.s9 = SELU()
     self.s10 = SELU()
     self.out = Linear(4, 1)
示例#19
0
 def __init__(self, nfeat, nhid, nclass, dropout, nlayer=2):
     super(FeaST2, self).__init__()
     self.conv1 = FeaStConv(nfeat, nhid)
     self.conv2 = FeaStConv(nhid, nclass)
     self.dropout_p = dropout
     self.sig = nn.Sigmoid()
示例#20
0
    def init_layers(self, batch):


        self.layer_lst = [] ##[in_channel, out_channel, in_pn, out_pn, max_neighbor_num, neighbor_num_lst,neighbor_id_lstlst,conv_layer, residual_layer]
        
        self.layer_num = len(self.channel_lst)
        
        in_point_num = self.point_num
        in_channel = 3
        
        for l in range(self.layer_num):
            out_channel = self.channel_lst[l]
            weight_num = self.weight_num_lst[l]
            residual_rate = self.residual_rate_lst[l]
            

            connection_info  = np.load(self.connection_layer_fn_lst[l])
            print ("##Layer",self.connection_layer_fn_lst[l])
            out_point_num = connection_info.shape[0]

            neighbor_num_lst = torch.FloatTensor(connection_info[:,0].astype(float)).cuda() #out_point_num*1
            neighbor_id_dist_lstlst = connection_info[:, 1:] #out_point_num*(max_neighbor_num*2)
            neighbor_id_lstlst = neighbor_id_dist_lstlst.reshape((out_point_num, -1,2))[:,:,0] #out_point_num*max_neighbor_num
            #neighbor_id_lstlst = torch.LongTensor(neighbor_id_lstlst)
            avg_neighbor_num = neighbor_num_lst.mean().item()
            max_neighbor_num = neighbor_id_lstlst.shape[1]

            pc_mask  = torch.ones(in_point_num+1).cuda()
            pc_mask[in_point_num]=0
            neighbor_mask_lst = pc_mask[ torch.LongTensor(neighbor_id_lstlst)].contiguous() #out_pn*max_neighbor_num neighbor is 1 otherwise 0
            
            

            if((residual_rate<0) or (residual_rate>1)):
                print ("Invalid residual rate", residual_rate)
            ####parameters for conv###############
            conv_layer = ""
            if(residual_rate<1):
                edge_index_lst = self.get_edge_index_lst_from_neighbor_id_lstlst(neighbor_id_lstlst, neighbor_num_lst)
                print ("edge_index_lst", edge_index_lst.shape)
                if(self.conv_method=="Cheb"):
                    conv_layer = edge_index_lst, ChebConv(in_channel, out_channel, K=6, normalization='sym', bias=True)
                    for p in conv_layer[-1].parameters():
                        p.data=p.data.cuda()
                    ii=0
                    for p in conv_layer[-1].parameters():
                        p.data=p.data.cuda()/10
                        self.register_parameter("GMM"+str(l)+"_"+str(ii),p)
                        ii+=1
                elif(self.conv_method=="GAT"):
                    #conv_layer = edge_index_lst,GATConv(in_channel, out_channel, heads=1)
                    if((l!=((self.layer_num/2)-2)) and (l!=(self.layer_num-1))): #not middle or last layer
                        conv_layer = edge_index_lst, GATConv(in_channel, out_channel, heads=8, concat=True)
                        out_channel=out_channel*8
                    else:
                        conv_layer = edge_index_lst, GATConv(in_channel, out_channel, heads=8, concat=False) 
                    for p in conv_layer[-1].parameters():
                        p.data=p.data.cuda()
                    ii=0
                    for p in conv_layer[-1].parameters():
                        p.data=p.data.cuda()
                        self.register_parameter("GAT"+str(l)+"_"+str(ii),p)
                        ii+=1
                    #conv_layer = edge
                elif(self.conv_method=="GMM"):
                    pseudo_coordinates = self.get_GMM_pseudo_coordinates( edge_index_lst, neighbor_num_lst) #edge_num*2
                    #print ("pseudo_coordinates", pseudo_coordinates.shape)
                    conv_layer = edge_index_lst,pseudo_coordinates, GMMConv(in_channel, out_channel, dim=2 , kernel_size=25)
                    ii=0
                    for p in conv_layer[-1].parameters():
                        p.data=p.data.cuda()
                        self.register_parameter("GMM"+str(l)+"_"+str(ii),p)
                        ii+=1
                elif(self.conv_method=="FeaST"):
                    
                    conv_layer = edge_index_lst,FeaStConv(in_channel, out_channel, heads=32)
                    ii=0
                    for p in conv_layer[-1].parameters():
                        p.data=p.data.cuda()
                        self.register_parameter("FeaST"+str(l)+"_"+str(ii),p)
                        ii+=1
                elif(self.conv_method=="vw"):
                    weights = torch.randn(out_point_num, max_neighbor_num, out_channel,in_channel)/(avg_neighbor_num*weight_num)
                    
                    weights = nn.Parameter(weights.cuda())
                    
                    self.register_parameter("weights"+str(l),weights)
                    
                    bias = nn.Parameter(torch.zeros(out_channel).cuda())
                    if(self.perpoint_bias == 1):
                        bias= nn.Parameter(torch.zeros(out_point_num, out_channel).cuda())
                    self.register_parameter("bias"+str(l),bias)
                    
                    conv_layer = (weights, bias)
                else:
                    print ("ERROR! Unknown convolution layer!!!")
                

            zeros_batch_outpn_outchannel = torch.zeros((batch, out_point_num, out_channel)).cuda()
            ####parameters for residual###############
            residual_layer = ""

            if(residual_rate>0):
                p_neighbors = ""
                weight_res = ""

                if((out_point_num != in_point_num) and (self.use_vanilla_pool == 0)):
                    p_neighbors = nn.Parameter((torch.randn(out_point_num, max_neighbor_num)/(avg_neighbor_num)).cuda())
                    self.register_parameter("p_neighbors"+str(l),p_neighbors)
                
                if(out_channel != in_channel):
                    weight_res = torch.randn(out_channel,in_channel)
                    #self.normalize_weights(weight_res)
                    weight_res = weight_res/out_channel
                    weight_res = nn.Parameter(weight_res.cuda())
                    self.register_parameter("weight_res"+str(l),weight_res)
                
                residual_layer = (weight_res, p_neighbors)
            
            #####put everythin together
            
            layer = (in_channel, out_channel, in_point_num, out_point_num, weight_num, max_neighbor_num, neighbor_num_lst,neighbor_id_lstlst, conv_layer, residual_layer, residual_rate, neighbor_mask_lst, zeros_batch_outpn_outchannel)
            
            self.layer_lst +=[layer]
            
            print ("in_channel", in_channel,"out_channel",out_channel, "in_point_num", in_point_num, "out_point_num", out_point_num, "weight_num", weight_num,
                   "max_neighbor_num", max_neighbor_num, "avg_neighbor_num", avg_neighbor_num)
            
            in_point_num=out_point_num
            in_channel = out_channel
示例#21
0
 def __init__(self, n_features, dropout=True):
     # REMEMBER TO UPDATE MODEL NAME
     super(OneConv, self).__init__()
     self.conv1 = FeaStConv(n_features, 16)
     self.lin1 = Linear(16, 8)
     self.out = Linear(8, 1)
示例#22
0
 def __init__(self, nfeat, nhid, nclass, dropout, nlayer=1):
     super(FeaST1, self).__init__()
     self.conv1 = FeaStConv(nfeat, nclass)
     self.dropout_p = dropout