class GCN(nn.Module): def __init__(self, nfeat, nhid, nclass, dropout): super(GCN, self).__init__() self.gc1 = GraphConvolution(nfeat, nhid) self.gc2 = GraphConvolution(nhid, nhid) self.gc3 = GraphConvolution(nhid, nclass) self.dropout = dropout def forward(self, x, adj, eval=False): x = F.relu(self.gc1(x, adj)) x = F.dropout(x, self.dropout, training=self.training) x = self.gc2(x, adj) if eval: return x else: x = F.dropout(x, self.dropout, training=self.training) x = self.gc3(x, adj) return F.log_softmax(x, dim=1) def functional_forward(self, x, adj, weights, eval=False): x = F.relu(self.gc1.functional_forward(x, adj, id=1, weights=weights)) x = F.dropout(x, self.dropout, training=self.training) x = self.gc2.functional_forward(x, adj, id=2, weights=weights) if eval: return x else: x = F.dropout(x, self.dropout, training=self.training) x = self.gc3.functional_forward(x, adj, id=3, weights=weights) return F.log_softmax(x, dim=1)
def __init__(self, nnodes, nfeat, GO_mat): super(TORTOISE_GCN, self).__init__() self.nnodes = nnodes self.nfeat = nfeat # nnodes, in_features, out_features, self.gc1 = GraphConvolution(nnodes, nfeat, 50) self.gc2 = GraphConvolution(nnodes, 50, 50) self.gc3 = GraphConvolution(nnodes, 50, 10) self.gc4 = GraphConvolution(nnodes, 10, 1) # lets learn edge weights, call it coupling=C self.C = nn.parameter.Parameter( torch.ones(nnodes, nnodes, dtype=torch.float32)).requires_grad_(True) # Don't train this matrix self.GO = GO_mat.detach().clone().type( torch.float).requires_grad_(False) # output FFNN pathway_latent_layer_size = 5 stdv = 1. / pathway_latent_layer_size**0.5 self.out1 = nn.parameter.Parameter( torch.FloatTensor(self.GO.size()[1], pathway_latent_layer_size)) self.out1.data.uniform_(-stdv, stdv) self.out1_bias = nn.parameter.Parameter(torch.FloatTensor(1, 5)) self.out1_bias.data.uniform_(-stdv, stdv) self.out2 = nn.parameter.Parameter(torch.FloatTensor(5, 1)) self.out2.data.uniform_(-1, 1)
def _build(self): self.mylayer1 = GraphConvolutionSparse(input_dim=self.input_dim, output_dim=FLAGS.hidden1, adj=self.adj, features_nonzero=self.features_nonzero, act=tf.nn.relu, dropout=self.dropout, logging=self.logging) self.hidden1 = self.mylayer1(self.inputs) self.mylayer2 = GraphConvolution(input_dim=FLAGS.hidden1, output_dim=FLAGS.hidden2, adj=self.adj, act=lambda x: x, dropout=self.dropout, logging=self.logging) self.embeddings = self.mylayer2(self.hidden1) self.W1 = self.mylayer1.get_weight() self.W2 = self.mylayer2.get_weight() self.z_mean = self.embeddings self.reconstructions = InnerProductDecoder(input_dim=FLAGS.hidden2, act=lambda x: x, logging=self.logging)(self.embeddings)
def _build(self): self.hidden1 = GraphConvolutionSparse(input_dim=self.input_dim, output_dim=self.hidden1_dim, adj=self.adj, features_nonzero=self.features_nonzero, act=tf.nn.relu, dropout=self.dropout, logging=self.logging)(self.inputs) self.z_mean = GraphConvolution(input_dim=self.hidden1_dim, output_dim=self.hidden2_dim, adj=self.adj, act=lambda x: x, dropout=self.dropout, logging=self.logging)(self.hidden1) self.z_log_std = GraphConvolution(input_dim=self.hidden1_dim, output_dim=self.hidden2_dim, adj=self.adj, act=lambda x: x, dropout=self.dropout, logging=self.logging)(self.hidden1) self.z = self.z_mean + tf.random_normal([self.n_samples, self.hidden2_dim], dtype=tf.float64) * tf.exp(self.z_log_std) self.reconstructions = InnerProductDecoder(input_dim=self.hidden2_dim, act=lambda x: x, logging=self.logging)(self.z)
def __init__(self, nfeat, nhid, n_output, dropout): super(GCN, self).__init__() self.gc1 = GraphConvolution(nfeat, nhid) self.gc2 = GraphConvolution(nhid, 64) self.predict = nn.Linear(64, n_output) # output layer self.dropout = dropout
def construct(self, inputs = None, reuse = False): if inputs == None: inputs = self.inputs with tf.device("/gpu:1"): with tf.variable_scope('Decoder', reuse=reuse): self.hidden1 = GraphConvolution(input_dim=FLAGS.hidden2, output_dim=FLAGS.hidden1, adj=self.adj, act=tf.nn.relu, dropout=self.dropout, logging=self.logging, name='GG_dense_1')(inputs) self.embeddings = GraphConvolution(input_dim=FLAGS.hidden1, output_dim=self.input_dim, adj=self.adj, act=lambda x: x, dropout=self.dropout, logging=self.logging, name='GG_dense_2')(self.hidden1) self.z_mean = self.embeddings return self.z_mean,self.hidden1
def __init__(self, nfeat, nhid, nclass, dropout, alpha, nheads, nlayers, layer_type): super(MultiLayerGNN, self).__init__() self.dropout = dropout if layer_type == 'GAT': if nlayers == 1: self.gnn_layers = [GAT(nfeat, nclass, dropout, alpha, nheads)] else: self.gnn_layers = [GAT(nfeat, nhid, dropout, alpha, nheads)] self.gnn_layers += [ GAT(nhid, nhid, dropout, alpha, nheads) for _ in range(nlayers - 2) ] self.gnn_layers.append( GAT(nhid, nclass, dropout, alpha, nheads)) elif layer_type == 'GCN': if nlayers == 1: self.gnn_layers = [GraphConvolution(nfeat, nclass)] else: self.gnn_layers = [GraphConvolution(nfeat, nhid)] self.gnn_layers += [ GraphConvolution(nhid, nhid) for _ in range(nlayers - 2) ] self.gnn_layers.append(GraphConvolution(nhid, nclass)) for i, gnn in enumerate(self.gnn_layers): self.add_module('gnn_{}'.format(i), gnn)
def gcn_tracking2(gcn_config, inputs, supports, reuse=tf.AUTO_REUSE, scope='gcn_att'): logging = get(gcn_config, 'logging', False) input_dim = get(gcn_config, 'output_dim', 512) g2_hidden1 = get(gcn_config, 'g2_hidden1', 384) output_dim = get(gcn_config, 'g2_output', 256) with tf.variable_scope(scope, 'gcn_att', [inputs], reuse=reuse): with tf.variable_scope('gcn1', reuse=reuse): output = GraphConvolution(inputs=inputs, input_dim=input_dim, output_dim=g2_hidden1, support=supports, act=lambda x: tf.maximum(x, 0.2 * x), dropout=True, name='gcn1', support_sparse=False, logging=logging) with tf.variable_scope('gcn2', reuse=reuse): output = GraphConvolution(inputs=output, input_dim=g2_hidden1, output_dim=output_dim, support=supports, act=None, dropout=True, name='gcn2', support_sparse=False, logging=logging) return output
def gcn_tracking(gcn_config, inputs, supports, reuse=tf.AUTO_REUSE, scope='gcn_temp'): logging = get(gcn_config, 'logging', False) input_dim = get(gcn_config, 'input_dim', 256) hidden1 = get(gcn_config, 'hidden1', 512) output_dim = get(gcn_config, 'output_dim', 256) inputs = tf.reshape(inputs, [-1, input_dim]) with tf.variable_scope(scope, 'gcn_temp', [inputs], reuse=reuse): with tf.variable_scope('gcn1'): output = GraphConvolution(inputs=inputs, input_dim=input_dim, output_dim=hidden1, support=supports, act=lambda x: tf.maximum(x, 0.2 * x), dropout=True, name='gcn1', logging=logging) with tf.variable_scope('gcn2'): output = GraphConvolution(inputs=output, input_dim=hidden1, output_dim=output_dim, support=supports, act=lambda x: tf.maximum(x, 0.2 * x), dropout=True, name='gcn2', logging=logging) return output
def __init__(self, nfeat, nhid, nclass, dropout, sampler): super().__init__() self.gc1 = GraphConvolution(nfeat, nhid) self.gc2 = GraphConvolution(nhid, nclass) self.dropout = dropout self.sampler = sampler
def __init__(self, hidden1, hidden2, num_features, num_nodes, features_nonzero, dropout): super(CAN, self).__init__() self.input_dim = num_features self.features_nonzero = features_nonzero self.n_samples = num_nodes self.dropout = dropout '''init里定义的这些layer的参数都是传到对应layer的init处''' self.hidden1 = GraphConvolutionSparse( input_dim=self.input_dim, output_dim=hidden1, dropout=self.dropout, features_nonzero=self.features_nonzero) self.hidden2 = Dense(input_dim=self.n_samples, output_dim=hidden1, sparse_inputs=True) self.z_u_mean = GraphConvolution(input_dim=hidden1, output_dim=hidden2, dropout=self.dropout) self.z_u_log_std = GraphConvolution(input_dim=hidden1, output_dim=hidden2, dropout=self.dropout) self.z_a_mean = Dense(input_dim=hidden1, output_dim=hidden2, dropout=self.dropout) self.z_a_log_std = Dense(input_dim=hidden1, output_dim=hidden2, dropout=self.dropout) self.reconstructions = InnerDecoder(input_dim=hidden2)
def __init__(self, data, n_hidden, n_latent, dropout): super().__init__() # Data self.x = data['features'] self.adj_norm = data['adj_norm'] self.adj_labels = data['adj_labels'] # Dimensions N, D = data['features'].shape self.n_samples = N self.n_edges = self.adj_labels.sum() self.n_subsample = 2 * self.n_edges self.input_dim = D self.n_hidden = n_hidden self.n_latent = n_latent # Parameters self.pos_weight = float(N * N - self.n_edges) / self.n_edges self.norm = float(N * N) / ((N * N - self.n_edges) * 2) self.gc1 = GraphConvolution(self.input_dim, self.n_hidden) self.gc2_mu = GraphConvolution(self.n_hidden, self.n_latent) self.gc2_sig = GraphConvolution(self.n_hidden, self.n_latent) self.dropout = dropout
def __init__(self, input_feat_dim, hidden_dim1, hidden_dim2, dropout): super(GCNModelVAE, self).__init__() # 设置三个图卷积层 self.gc1 = GraphConvolution(input_feat_dim, hidden_dim1, dropout, act=F.relu) self.gc2 = GraphConvolution(hidden_dim1, hidden_dim2, dropout, act=lambda x: x) self.gc3 = GraphConvolution(hidden_dim1, hidden_dim2, dropout, act=lambda x: x) self.dc = InnerProductDecoder(dropout, act=lambda x: x)
def __init__(self, nFeat, nhid, nclass, dropout=0.5): super(GCN, self).__init__() self.gc1 = GraphConvolution(nFeat, nhid, bias=True) self.gc2 = GraphConvolution(2 * nhid, nhid, bias=True) self.dropout = dropout self.dense = nn.Linear(nhid, nclass, bias=1)
def __init__(self,K,node_num, nfeat, nhid, nclass, sampleSize, dropout,trainAttention): super(GAT, self).__init__() self.gc1 = GraphConvolution(K, node_num, nfeat, nhid, sampleSize[1],'False','True',trainAttention) self.gc2 = GraphConvolution(1, node_num, K*nhid, 14*nclass, sampleSize[0],'False','False',trainAttention) #self.gc3 = GraphConvolution(1, node_num, 4*7*nclass, 7*nclass, 'False','False') self.gc6 = LogisticRegression(14*nclass,1) self.dropout = dropout
def __init__(self, nfeat, nhid, nclass, dropout): super(GCN, self).__init__() self.gc1 = GraphConvolution(nfeat, nhid) self.gc2 = GraphConvolution(nhid, nhid) self.gc3 = GraphConvolution(nhid, nclass) self.dropout = dropout
def encoder(self, inputs): with tf.variable_scope('encoder') as scope: self.hidden1 = GraphConvolutionSparse( input_dim=self.input_dim, output_dim=FLAGS.hidden1, adj=self.adj, features_nonzero=self.features_nonzero, act=tf.nn.relu, dropout=self.dropout, logging=self.logging, name="encoder_conv1")(inputs) self.z_mean = GraphConvolution(input_dim=FLAGS.hidden1, output_dim=FLAGS.latent_dim, adj=self.adj, act=lambda x: x, dropout=self.dropout, logging=self.logging, name="encoder_conv2")(self.hidden1) self.z_log_std = GraphConvolution(input_dim=FLAGS.hidden1, output_dim=FLAGS.latent_dim, adj=self.adj, act=lambda x: x, dropout=self.dropout, logging=self.logging, name="encoder_conv3")( self.hidden1) z = self.z_mean + tf.random_normal([ self.n_samples, FLAGS.latent_dim ]) * tf.exp(self.z_log_std) # middle hidden layer return z
def __init__(self, nfeat, nhid, nclass, dropout): super(GCN, self).__init__() self.gc1 = GraphConvolution(nfeat, nhid[0]) # num of input channels and num of outputchannels self.bc1 = nn.BatchNorm1d(nhid[0]) self.gc2 = GraphConvolution(nhid[0], nhid[1]) self.bc2 = nn.BatchNorm1d(nhid[1]) # self.gcls = [] # self.gcls.append(self.gc1) # for i in range(len(nhid)-1): # self.gcls.append(GraphConvolution(nhid[i], nhid[i+1])) self.gc3 = GraphConvolution(nhid[1], nhid[2]) self.bc3 = nn.BatchNorm1d(nhid[2]) # self.gc4 = GraphConvolution(nhid[2], nhid[3]) # self.bc4 = nn.BatchNorm1d(nhid[3]) self.fc = GraphConvolution(nhid[-1], nclass) self.activation = nn.PReLU() self.mlp = nn.Sequential( # nn.Linear(400, 200), # nn.PReLU(), nn.Linear(200, 50), # nn.BatchNorm1d(50), nn.PReLU(), # nn.Linear(200, 50), # nn.PReLU(), # nn.Dropout(), nn.Linear(50, nclass)) self.dropout = dropout
def _build(self): with tf.variable_scope('Encoder'): self.hidden1 = GraphConvolutionSparse(input_dim=self.input_dim, output_dim=FLAGS.hidden1, adj=self.adj, features_nonzero=self.features_nonzero, act=tf.nn.relu, dropout=self.dropout, logging=self.logging, name='e_dense_1')(self.inputs) self.z_mean = GraphConvolution(input_dim=FLAGS.hidden1, output_dim=FLAGS.hidden2, adj=self.adj, act=lambda x: x, dropout=self.dropout, logging=self.logging, name='e_dense_2')(self.hidden1) self.z_log_std = GraphConvolution(input_dim=FLAGS.hidden1, output_dim=FLAGS.hidden2, adj=self.adj, act=lambda x: x, dropout=self.dropout, logging=self.logging, name='e_dense_3')(self.hidden1) self.z = self.z_mean + tf.random_normal([self.n_samples, FLAGS.hidden2]) * tf.exp(self.z_log_std) self.reconstructions = InnerProductDecoder(input_dim=FLAGS.hidden2, act=lambda x: x, logging=self.logging)(self.z) self.embeddings = self.z
def __init__(self, nfeat_v, nfeat_e, nhid, nclass, dropout, node_layer=True): super(GCN, self).__init__() self.gc1 = GraphConvolution(nfeat_v, nhid, nfeat_e, nfeat_e, node_layer=True) self.gc2 = GraphConvolution(nhid, nhid, nfeat_e, nfeat_e, node_layer=False) self.gc3 = GraphConvolution(nhid, nclass, nfeat_e, nfeat_e, node_layer=True) self.dropout = dropout
def build(self): self.adj = dropout_sparse(self.adj, 1-self.adjdp, self.adj_nonzero) self.hidden1 = GraphConvolutionSparse( name='gcn_sparse_layer', input_dim=self.input_dim, output_dim=self.emb_dim, adj=self.adj, features_nonzero=self.features_nonzero, dropout=self.dropout, act=self.act)(self.inputs) self.hidden2 = GraphConvolution( name='gcn_dense_layer', input_dim=self.emb_dim, output_dim=self.emb_dim, adj=self.adj, dropout=self.dropout, act=self.act)(self.hidden1) self.emb = GraphConvolution( name='gcn_dense_layer2', input_dim=self.emb_dim, output_dim=self.emb_dim, adj=self.adj, dropout=self.dropout, act=self.act)(self.hidden2) self.embeddings = self.hidden1 * \ self.att[0]+self.hidden2*self.att[1]+self.emb*self.att[2] self.reconstructions = InnerProductDecoder( name='gcn_decoder', input_dim=self.emb_dim, num_r=self.num_r, act=tf.nn.sigmoid)(self.embeddings)
def __init__(self, nfeat, nhid, nclass, dropout): super(ODEGCN3, self).__init__() self.gc1 = GraphConvolution(nfeat, nhid) self.gc2 = ODEBlock(ODEfunc(nhid)) self.gc3 = GraphConvolution(nhid, nclass) self.dropout = dropout
def __init__(self, nfeat, nhid, nclass, dropout, nlayers=3, residue_layers=1): super(RESKnorm, self).__init__() if nlayers < 2 + residue_layers: raise ValueError( "Can't make a Residual GCN with less than {} layers using {} layers for each residual block" .format(2 + residue_layers, residue_layers)) self.n_layers = nlayers stacked_layers = ( [GraphConvolution(nfeat, nhid)] + [GraphConvolution(nhid, nhid) for _ in range(self.n_layers - 2)] + [GraphConvolution(nhid, nclass)]) self.gcs = nn.ModuleList(stacked_layers) self.norms = nn.ModuleList([ nn.GroupNorm(min(32, nhid), nhid) for _ in range(self.n_layers - 2) ]) self.dropout = dropout self.residue_layers = residue_layers
def __init__(self, nfeat, nhid, nclass, dropout): super(GCN, self).__init__() self.gc1 = GraphConvolution(nfeat, nhid) self.reg_params = list(self.gc1.parameters()) self.gc2 = GraphConvolution(nhid, nclass) self.dropout = dropout
def __init__(self, nfeat, nhid, nclass, dropout): # 底层节点的参数,feature的个数;隐层节点个数;最终的分类数 super(GCN, self).__init__() # super()._init_()在利用父类里的对象构造函数 self.gc1 = GraphConvolution(nfeat, nhid) # gc1输入尺寸nfeat,输出尺寸nhid self.gc2 = GraphConvolution(nhid, nclass) # gc2输入尺寸nhid,输出尺寸ncalss self.dropout = dropout
def __init__(self, nfeat, nhid, dropout): super(GPN_Valuator, self).__init__() self.gc1 = GraphConvolution(nfeat, 2 * nhid) self.gc2 = GraphConvolution(2 * nhid, nhid) self.fc3 = nn.Linear(nhid, 1) self.dropout = dropout
def _build(self): self.hidden1 = GraphConvolutionSparse( input_dim=self.input_dim, output_dim=FLAGS.hidden1, adj=self.adj, features_nonzero=self.features_nonzero, act=tf.nn.relu, dropout=self.dropout, logging=self.logging)(self.inputs) self.z_mean = GraphConvolution(input_dim=FLAGS.hidden1, output_dim=FLAGS.hidden2, adj=self.adj, act=lambda x: x, dropout=self.dropout, logging=self.logging)(self.hidden1) self.z_log_std = GraphConvolution(input_dim=FLAGS.hidden1, output_dim=FLAGS.hidden2, adj=self.adj, act=lambda x: x, dropout=self.dropout, logging=self.logging)(self.hidden1) # TODO: output the hidden vector z, which is the node embedding vector self.z = self.z_mean + tf.random_normal( [self.n_samples, FLAGS.hidden2]) * tf.exp(self.z_log_std) logging.info('Finish calculating the latent vector!!!!!!!!!') self.reconstructions = InnerProductDecoder(input_dim=FLAGS.hidden2, act=lambda x: x, logging=self.logging)( self.z)
def _build(self): # First GCN Layer: (A, X) --> H (hidden layer features) self.hidden1 = GraphConvolution( input_dim=self.input_dim, output_dim=self.hidden1_dim, adj=self.adj, # features_nonzero=self.features_nonzero, act=tf.nn.relu, dropout=self.dropout, logging=self.logging)(self.inputs) # Second GCN Layer: (A, H) --> Z (mode embeddings) self.embeddings = GraphConvolution(input_dim=self.hidden1_dim, output_dim=self.hidden2_dim, adj=self.adj, act=lambda x: x, dropout=self.dropout, logging=self.logging)(self.hidden1) # Z_mean for AE. No noise added (because not a VAE) self.z_mean = self.embeddings # Inner-Product Decoder: Z (embeddings) --> A (reconstructed adj.) self.reconstructions = InnerProductDecoder(input_dim=self.hidden2_dim, act=lambda x: x, logging=self.logging)( self.embeddings)
def create_inference_network_weights(self): self.inference_layers = [] input_dim = self.num_features is_sparse = True for i, val in enumerate(self.layers_config): self.inference_layers.append( GraphConvolution(input_dim=input_dim, output_dim=val, adj=self.A_gcn, act=tf.nn.relu, dropout_rate=self.dropout, name="inference_layer_" + str(i))) input_dim = val # Add last layer for both mu and sigma. self.inference_mean_layer = GraphConvolution(input_dim=input_dim, output_dim=self.latent_dim, adj=self.A_gcn, act=tf.nn.relu, dropout_rate=self.dropout, name="inference_layer_mean") self.inference_log_sigma_layer = GraphConvolution( input_dim=input_dim, output_dim=self.latent_dim, adj=self.A_gcn, dropout_rate=self.dropout, name="inference_layer_log_sigma", act=tf.nn.softplus)
def _build(self): self.hidden1 = GraphConvolutionSparse( input_dim=self.input_dim, output_dim=FLAGS.hidden1, adj=self.adj, features_nonzero=self.features_nonzero, act=tf.nn.relu, dropout=self.dropout, logging=self.logging)(self.inputs) self.hidden2 = GraphConvolutionSparse( input_dim=self.input_dim, output_dim=FLAGS.hidden1, adj=self.adj, features_nonzero=self.features_nonzero, act=tf.nn.relu, dropout=self.dropout, logging=self.logging)(self.inputs) self.embeddings = GraphConvolution(input_dim=FLAGS.hidden1, output_dim=FLAGS.hidden2, adj=self.adj, act=tf.nn.relu, dropout=self.dropout, logging=self.logging)(self.hidden1) # self.z_mean = self.embeddings # decoder1 self.attribute_decoder_layer1 = GraphConvolution( input_dim=FLAGS.hidden2, output_dim=FLAGS.hidden1, adj=self.adj, act=tf.nn.relu, dropout=self.dropout, logging=self.logging)(self.embeddings) self.attribute_decoder_layer2 = GraphConvolution( input_dim=FLAGS.hidden1, output_dim=self.input_dim, adj=self.adj, act=tf.nn.relu, dropout=self.dropout, logging=self.logging)(self.attribute_decoder_layer1) # decoder2 self.structure_decoder_layer1 = GraphConvolution( input_dim=FLAGS.hidden2, output_dim=FLAGS.hidden1, adj=self.adj, act=tf.nn.relu, dropout=self.dropout, logging=self.logging)(self.embeddings) self.structure_decoder_layer2 = InnerProductDecoder( input_dim=FLAGS.hidden1, act=tf.nn.sigmoid, logging=self.logging)(self.structure_decoder_layer1) self.attribute_reconstructions = self.attribute_decoder_layer2 self.structure_reconstructions = self.structure_decoder_layer2