def _build(self): self.mylayer1 = GraphConvolutionSparse(input_dim=self.input_dim, output_dim=FLAGS.hidden1, adj=self.adj, features_nonzero=self.features_nonzero, act=tf.nn.relu, dropout=self.dropout, logging=self.logging) self.hidden1 = self.mylayer1(self.inputs) self.mylayer2 = GraphConvolution(input_dim=FLAGS.hidden1, output_dim=FLAGS.hidden2, adj=self.adj, act=lambda x: x, dropout=self.dropout, logging=self.logging) self.embeddings = self.mylayer2(self.hidden1) self.W1 = self.mylayer1.get_weight() self.W2 = self.mylayer2.get_weight() self.z_mean = self.embeddings self.reconstructions = InnerProductDecoder(input_dim=FLAGS.hidden2, act=lambda x: x, logging=self.logging)(self.embeddings)
def _build(self): self.hidden1 = GraphConvolutionSparse( input_dim=self.input_dim, output_dim=FLAGS.hidden1, adj=self.adj, features_nonzero=self.features_nonzero, act=tf.nn.relu, dropout=self.dropout, logging=self.logging)(self.inputs) self.hidden2 = GraphConvolutionSparse( input_dim=self.input_dim, output_dim=FLAGS.hidden1, adj=self.adj, features_nonzero=self.features_nonzero, act=tf.nn.relu, dropout=self.dropout, logging=self.logging)(self.inputs) self.embeddings = GraphConvolution(input_dim=FLAGS.hidden1, output_dim=FLAGS.hidden2, adj=self.adj, act=tf.nn.relu, dropout=self.dropout, logging=self.logging)(self.hidden1) # self.z_mean = self.embeddings # decoder1 self.attribute_decoder_layer1 = GraphConvolution( input_dim=FLAGS.hidden2, output_dim=FLAGS.hidden1, adj=self.adj, act=tf.nn.relu, dropout=self.dropout, logging=self.logging)(self.embeddings) self.attribute_decoder_layer2 = GraphConvolution( input_dim=FLAGS.hidden1, output_dim=self.input_dim, adj=self.adj, act=tf.nn.relu, dropout=self.dropout, logging=self.logging)(self.attribute_decoder_layer1) # decoder2 self.structure_decoder_layer1 = GraphConvolution( input_dim=FLAGS.hidden2, output_dim=FLAGS.hidden1, adj=self.adj, act=tf.nn.relu, dropout=self.dropout, logging=self.logging)(self.embeddings) self.structure_decoder_layer2 = InnerProductDecoder( input_dim=FLAGS.hidden1, act=tf.nn.sigmoid, logging=self.logging)(self.structure_decoder_layer1) self.attribute_reconstructions = self.attribute_decoder_layer2 self.structure_reconstructions = self.structure_decoder_layer2
def _build(self): with tf.variable_scope('Encoder', reuse=None): self.hidden1 = GraphConvolutionSparse(input_dim=self.input_dim, output_dim=FLAGS.hidden1, adj=self.adj, features_nonzero=self.features_nonzero, act=tf.nn.relu, dropout=self.dropout, logging=self.logging, name='e_dense_1')(self.inputs) self.noise = gaussian_noise_layer(self.hidden1, 0.1) self.embeddings = GraphConvolution(input_dim=FLAGS.hidden1, output_dim=FLAGS.hidden2, adj=self.adj, act=lambda x: x, dropout=self.dropout, logging=self.logging, name='e_dense_2')(self.noise) self.z_mean = self.embeddings self.reconstructions = InnerProductDecoder(input_dim=FLAGS.hidden2, act=lambda x: x, logging=self.logging)(self.embeddings)
def __init__(self, hidden1, hidden2, num_features, num_nodes, features_nonzero, dropout): super(CAN, self).__init__() self.input_dim = num_features self.features_nonzero = features_nonzero self.n_samples = num_nodes self.dropout = dropout '''init里定义的这些layer的参数都是传到对应layer的init处''' self.hidden1 = GraphConvolutionSparse( input_dim=self.input_dim, output_dim=hidden1, dropout=self.dropout, features_nonzero=self.features_nonzero) self.hidden2 = Dense(input_dim=self.n_samples, output_dim=hidden1, sparse_inputs=True) self.z_u_mean = GraphConvolution(input_dim=hidden1, output_dim=hidden2, dropout=self.dropout) self.z_u_log_std = GraphConvolution(input_dim=hidden1, output_dim=hidden2, dropout=self.dropout) self.z_a_mean = Dense(input_dim=hidden1, output_dim=hidden2, dropout=self.dropout) self.z_a_log_std = Dense(input_dim=hidden1, output_dim=hidden2, dropout=self.dropout) self.reconstructions = InnerDecoder(input_dim=hidden2)
def _build(self): self.hidden1 = GraphConvolutionSparse( input_dim=self.input_dim, output_dim=FLAGS.hidden1, adj=self.adj, features_nonzero=self.features_nonzero, act=tf.nn.relu, dropout=self.dropout, logging=self.logging)(self.inputs) self.z_mean = GraphConvolution(input_dim=FLAGS.hidden1, output_dim=FLAGS.hidden2, adj=self.adj, act=lambda x: x, dropout=self.dropout, logging=self.logging)(self.hidden1) self.z_log_std = GraphConvolution(input_dim=FLAGS.hidden1, output_dim=FLAGS.hidden2, adj=self.adj, act=lambda x: x, dropout=self.dropout, logging=self.logging)(self.hidden1) # TODO: output the hidden vector z, which is the node embedding vector self.z = self.z_mean + tf.random_normal( [self.n_samples, FLAGS.hidden2]) * tf.exp(self.z_log_std) logging.info('Finish calculating the latent vector!!!!!!!!!') self.reconstructions = InnerProductDecoder(input_dim=FLAGS.hidden2, act=lambda x: x, logging=self.logging)( self.z)
def build(self): self.adj = dropout_sparse(self.adj, 1-self.adjdp, self.adj_nonzero) self.hidden1 = GraphConvolutionSparse( name='gcn_sparse_layer', input_dim=self.input_dim, output_dim=self.emb_dim, adj=self.adj, features_nonzero=self.features_nonzero, dropout=self.dropout, act=self.act)(self.inputs) self.hidden2 = GraphConvolution( name='gcn_dense_layer', input_dim=self.emb_dim, output_dim=self.emb_dim, adj=self.adj, dropout=self.dropout, act=self.act)(self.hidden1) self.emb = GraphConvolution( name='gcn_dense_layer2', input_dim=self.emb_dim, output_dim=self.emb_dim, adj=self.adj, dropout=self.dropout, act=self.act)(self.hidden2) self.embeddings = self.hidden1 * \ self.att[0]+self.hidden2*self.att[1]+self.emb*self.att[2] self.reconstructions = InnerProductDecoder( name='gcn_decoder', input_dim=self.emb_dim, num_r=self.num_r, act=tf.nn.sigmoid)(self.embeddings)
def _build(self): with tf.variable_scope('Encoder'): self.hidden1 = GraphConvolutionSparse(input_dim=self.input_dim, output_dim=FLAGS.hidden1, adj=self.adj, features_nonzero=self.features_nonzero, act=tf.nn.relu, dropout=self.dropout, logging=self.logging, name='e_dense_1')(self.inputs) self.z_mean = GraphConvolution(input_dim=FLAGS.hidden1, output_dim=FLAGS.hidden2, adj=self.adj, act=lambda x: x, dropout=self.dropout, logging=self.logging, name='e_dense_2')(self.hidden1) self.z_log_std = GraphConvolution(input_dim=FLAGS.hidden1, output_dim=FLAGS.hidden2, adj=self.adj, act=lambda x: x, dropout=self.dropout, logging=self.logging, name='e_dense_3')(self.hidden1) self.z = self.z_mean + tf.random_normal([self.n_samples, FLAGS.hidden2]) * tf.exp(self.z_log_std) self.reconstructions = InnerProductDecoder(input_dim=FLAGS.hidden2, act=lambda x: x, logging=self.logging)(self.z) self.embeddings = self.z
def _build(self): self.hidden1 = GraphConvolutionSparse(input_dim=self.input_dim, output_dim=self.hidden1_dim, adj=self.adj, features_nonzero=self.features_nonzero, act=tf.nn.relu, dropout=self.dropout, logging=self.logging)(self.inputs) self.z_mean = GraphConvolution(input_dim=self.hidden1_dim, output_dim=self.hidden2_dim, adj=self.adj, act=lambda x: x, dropout=self.dropout, logging=self.logging)(self.hidden1) self.z_log_std = GraphConvolution(input_dim=self.hidden1_dim, output_dim=self.hidden2_dim, adj=self.adj, act=lambda x: x, dropout=self.dropout, logging=self.logging)(self.hidden1) self.z = self.z_mean + tf.random_normal([self.n_samples, self.hidden2_dim], dtype=tf.float64) * tf.exp(self.z_log_std) self.reconstructions = InnerProductDecoder(input_dim=self.hidden2_dim, act=lambda x: x, logging=self.logging)(self.z)
def encoder(self, inputs): with tf.variable_scope('encoder') as scope: self.hidden1 = GraphConvolutionSparse( input_dim=self.input_dim, output_dim=FLAGS.hidden1, adj=self.adj, features_nonzero=self.features_nonzero, act=tf.nn.relu, dropout=self.dropout, logging=self.logging, name="encoder_conv1")(inputs) self.z_mean = GraphConvolution(input_dim=FLAGS.hidden1, output_dim=FLAGS.latent_dim, adj=self.adj, act=lambda x: x, dropout=self.dropout, logging=self.logging, name="encoder_conv2")(self.hidden1) self.z_log_std = GraphConvolution(input_dim=FLAGS.hidden1, output_dim=FLAGS.latent_dim, adj=self.adj, act=lambda x: x, dropout=self.dropout, logging=self.logging, name="encoder_conv3")( self.hidden1) z = self.z_mean + tf.random_normal([ self.n_samples, FLAGS.latent_dim ]) * tf.exp(self.z_log_std) # middle hidden layer return z
def _build(self): with tf.variable_scope('Encoder', reuse=None): self.embeddings = [] self.reconstructions = [] for ts, (struct_adj_norm, struct_feature) in enumerate( zip(self.struct_adj_norms, self.struct_features)): features_nonzero = self.features_nonzeros[ts] self.hidden1 = GraphConvolutionSparse( input_dim=self.feature_dim, output_dim=FLAGS.hidden1, adj=struct_adj_norm, features_nonzero=features_nonzero, act=tf.nn.relu, dropout=self.dropout, logging=self.logging, name='e_dense_1_{}'.format(ts))(struct_feature) self.noise = gaussian_noise_layer(self.hidden1, 0.1) embeddings = GraphConvolution(input_dim=FLAGS.hidden1, output_dim=FLAGS.hidden2, adj=struct_adj_norm, act=lambda x: x, dropout=self.dropout, logging=self.logging, name='e_dense_2_{}'.format(ts))( self.noise) # for auxilary loss reconstructions = InnerProductDecoder( input_dim=FLAGS.hidden2, logging=self.logging)(embeddings) self.embeddings.append( tf.reshape(embeddings, [self.num_node, 1, FLAGS.hidden2])) self.reconstructions.append(reconstructions) # TCN sequence = tf.concat(self.embeddings, axis=1, name='concat_embedding') self.sequence_out = TCN(num_channels=FLAGS.hidden3, sequence_length=self.seq_len)(sequence) self.reconstructions_tss = [] # Dense for ts in range(self.seq_len): reconstructions_ts = Dense(input_dim=FLAGS.hidden3[-1], classes=self.num_node)( self.sequence_out[:, ts, :]) reconstructions_ts = tf.reshape(reconstructions_ts, [-1]) self.reconstructions_tss.append(reconstructions_ts)
def _build(self): self.hidden1_user = GraphConvolutionSparse(input_dim=self.input_dim_items, output_dim=FLAGS.hidden1, adj=self.adj_user, features_nonzero=self.features_nonzero_users, act=tf.nn.relu, dropout=self.dropout, logging=self.logging)(self.inputs_users) self.embeddings_user = GraphConvolution(input_dim=FLAGS.hidden1, output_dim=FLAGS.hidden2, adj=self.adj_user, act=lambda x: x, dropout=self.dropout, logging=self.logging)(self.hidden1_user) self.hidden1_item = GraphConvolutionSparse(input_dim=self.input_dim_users, output_dim=FLAGS.hidden1, adj=self.adj_item, features_nonzero=self.features_nonzero_items, act=tf.nn.relu, dropout=self.dropout, logging=self.logging)(self.inputs_items) self.embeddings_item = GraphConvolution(input_dim=FLAGS.hidden1, output_dim=FLAGS.hidden2, adj=self.adj_item, act=lambda x: x, dropout=self.dropout, logging=self.logging)(self.hidden1_item) self.reconstructions = Recommender_Decoder( act=lambda x: x, num_u = FLAGS.num_u, num_v = FLAGS.num_v, logging=self.logging)(tf.concat([self.embeddings_user, self.embeddings_item], 0))
def _build(self): with tf.variable_scope('Encoder', reuse=None): self.hidden1 = GraphConvolutionSparse(input_dim=self.input_dim, output_dim=FLAGS.hidden1, adj=self.adj, features_nonzero=self.features_nonzero, act=tf.nn.relu, dropout=self.dropout, logging=self.logging, name='e_dense_1')(self.inputs) self.noise = gaussian_noise_layer(self.hidden1, 0.1) self.embeddings = GraphConvolution(input_dim=FLAGS.hidden1, output_dim=FLAGS.hidden2, adj=self.adj, #act=lambda x: x, act=tf.nn.relu, dropout=self.dropout, logging=self.logging, name='e_dense_2')(self.noise) self.embeddings = GraphConvolution(input_dim=FLAGS.hidden1, output_dim=FLAGS.hidden2, adj=self.adj, act=tf.nn.softsign, dropout=self.dropout, logging=self.logging, name='e_dense_3')(self.embeddings) self.a = tf.sign(self.embeddings) self.z_mean = self.a #add softmax #self.binary_embeddings = Binarize(input_dim=FLAGS.hidden2, # output_dim=FLAGS.hidden2, # dropout=self.dropout, # logging=self.logging, # name='binary_dense_3')(self.embeddings) #self.binary_embeddings = tf.layers.dense(self.embeddings, FLAGS.hidden2, tf.nn.softsign) #self.reconstructions = InnerProductDecoder(input_dim=FLAGS.hidden2, # act=lambda x: x, # logging=self.logging)(self.binary_embeddings) self.reconstructions = InnerProductDecoder(input_dim=FLAGS.hidden2, act=lambda x: x, logging=self.logging)(self.embeddings)
class GCNModelAE_CITE(Model): def __init__(self, placeholders, num_features, features_nonzero, num_u, num_v, **kwargs): super(GCNModelAE_CITE, self).__init__(**kwargs) self.inputs = placeholders['features'] self.input_dim = num_features self.features_nonzero = features_nonzero self.adj = placeholders['adj'] self.dropout = placeholders['dropout'] self.num_u = num_u self.num_v = num_v self.build() def _build(self): self.mylayer1 = GraphConvolutionSparse(input_dim=self.input_dim, output_dim=FLAGS.hidden1, adj=self.adj, features_nonzero=self.features_nonzero, act=tf.nn.relu, dropout=self.dropout, logging=self.logging) self.hidden1 = self.mylayer1(self.inputs) self.mylayer2 = GraphConvolution(input_dim=FLAGS.hidden1, output_dim=FLAGS.hidden2, adj=self.adj, act=lambda x: x, dropout=self.dropout, logging=self.logging) self.embeddings = self.mylayer2(self.hidden1) self.z_mean = self.embeddings self.W1 = self.mylayer1.get_weight() self.W2 = self.mylayer2.get_weight() self.reconstructions = Recommender_Decoder(input_dim=FLAGS.hidden2, act=lambda x: x, num_u = self.num_u, num_v = self.num_v, logging=self.logging)(self.embeddings)
def _build(self): self.hidden1 = GraphConvolutionSparse(input_dim=self.input_dim, output_dim=FLAGS.hidden1, adj=self.adj, features_nonzero=self.features_nonzero, act=tf.nn.relu, dropout=self.dropout, logging=self.logging)(self.inputs) self.z_mean = GraphConvolution(input_dim=FLAGS.hidden1, #32 output_dim=FLAGS.hidden2, #16 adj=self.adj, act=lambda x: x, dropout=self.dropout, logging=self.logging)(self.hidden1) self.z_log_std = GraphConvolution(input_dim=FLAGS.hidden1, #32 output_dim=FLAGS.hidden2, #16 adj=self.adj, act=lambda x: x, dropout=self.dropout, logging=self.logging)(self.hidden1) self.z = self.z_mean + tf.random_normal([self.n_samples, FLAGS.hidden2]) * tf.exp(self.z_log_std) #self.reconstructions = InnerProductDecoder(input_dim=FLAGS.hidden2, # act=lambda x: x, # logging=self.logging)(self.z) #self.reconstructions = bullshit print(self.inputs) self.reconstructions = FullyConnectedDecoder(input_dim=FLAGS.hidden2, output_dim=self.input_dim, adj=self.adj, features_nonzero=self.features_nonzero, act=lambda x: x, inputs = self.inputs, dropout=self.dropout, logging=self.logging)(self.z)
def _build(self): self.hidden1 = GraphConvolutionSparse(input_dim=self.input_dim, output_dim=self.hidden1_dim, adj=self.adj, features_nonzero=self.features_nonzero, act=tf.nn.relu, dropout=self.dropout, logging=self.logging)(self.inputs) self.embeddings = GraphConvolution(input_dim=self.hidden1_dim, output_dim=self.hidden2_dim, adj=self.adj, act=lambda x: x, dropout=self.dropout, logging=self.logging)(self.hidden1) self.z_mean = self.embeddings self.reconstructions = InnerProductDecoder(input_dim=self.hidden2_dim, act=lambda x: x, logging=self.logging)(self.embeddings)
def _build(self): with tf.variable_scope('Encoder', reuse=None): self.hidden1 = GraphConvolutionSparse( input_dim=self.input_dim, output_dim=FLAGS.hidden1, adj=self.adj, features_nonzero=self.features_nonzero, act=tf.nn.relu, dropout=self.dropout, logging=self.logging, name='e_dense_1')(self.inputs) #self.noise = gaussian_noise_layer(self.hidden1, 0.1) self.embeddings = GraphConvolution(input_dim=FLAGS.hidden1, output_dim=FLAGS.hidden2, adj=self.adj, act=lambda x: x, dropout=self.dropout, logging=self.logging, name='e_dense_2')(self.hidden1) self.z_mean = self.embeddings self.embeddings = tf.identity(self.embeddings, name="emb") self.embeddings_long = tf.layers.dense(inputs=self.embeddings, units=64, activation=tf.nn.relu) self.embeddings_concat = tf.concat( [self.privacy_attr, self.embeddings_long], 1) self.reconstructions = InnerProductDecoder( input_dim=FLAGS.hidden2, act=lambda x: x, logging=self.logging)(self.embeddings_concat) self.attr_logits = tf.layers.dense(inputs=self.embeddings_concat, units=self.dim_attr[0]) self.pri_logits = dense(self.embeddings_long, 64, self.dim_attr[1], name='pri_den')
def _build(self): #隐节点分布的均值和标准差取对数,长度是对应的hidden2 self.z_mean = tf.Variable(tf.zeros([self.input_dim, FLAGS.hidden2]), name='zmean') self.z_log_std = tf.Variable(tf.zeros([self.input_dim, FLAGS.hidden2]), name='z_log_std') #20 for i in range(20): #遍历10个1-0矩阵 self.hidden1 = GraphConvolutionSparse(input_dim=self.input_dim, #VAE的第一个隐层 output_dim=FLAGS.hidden1, #默认输出维度是64 adj=self.adj[i], features_nonzero=self.features_nonzero, act=tf.nn.relu, dropout=self.dropout, logging=self.logging)(self.inputs) #GCN的切比雪夫展开用来减少参数量 self.z_mean1 = GraphConvolution_Chebyshev5(input_dim=FLAGS.hidden1, output_dim=FLAGS.hidden2, adj=self.adj[i], act=lambda x: x, dropout=self.dropout, logging=self.logging)(self.hidden1) self.z_log_std1 = GraphConvolution_Chebyshev5(input_dim=FLAGS.hidden1, output_dim=FLAGS.hidden2, adj=self.adj[i], act=lambda x: x, dropout=self.dropout, logging=self.logging)(self.hidden1) #变分自编码器基本操作 self.z_mean = self.z_mean1 + self.z_mean self.z_log_std = self.z_log_std1 + self.z_log_std self.z = self.z_mean + tf.random_normal([self.n_samples, FLAGS.hidden2]) * tf.exp(self.z_log_std) self.reconstructions, self.logits_output = Xunying_InnerProductDecoder(input_dim=FLAGS.hidden2, act=lambda x: x, logging=self.logging)(self.z)
def __init__(self, adj, input_dim, output_dim, dropout, hidden_dim, features_nonzero): """ GCN Network constructor Parameters: adj (sparse torch FloatTensor): dropout (float): dropout rate input_dim (int): number of features output_dim (int): output feature dimension hidden_dim (int): hidden feature dimension features_nonzero (int): number of non-zero features """ super(GCNModel, self).__init__() self.input_dim = input_dim self.output_dim = output_dim self.hidden_dim = hidden_dim self.features_nonzero = features_nonzero self.adj = adj self.dropout = dropout self.graph_conv_sparse = GraphConvolutionSparse(input_dim=self.input_dim, output_dim=self.hidden_dim, adj=self.adj, features_nonzero=self.features_nonzero, act=nn.ReLU(), dropout=self.dropout) self.graph_conv = GraphConvolution(input_dim=self.hidden_dim, output_dim=self.output_dim, adj=self.adj, act=dummy_func, dropout=self.dropout) self.inner_prod = InnerProductDecoder(act=dummy_func)
def encoder(self, inputs): hidden1 = GraphConvolutionSparse( input_dim=self.input_dim, output_dim=FLAGS.hidden1, adj=self.adj, features_nonzero=self.features_nonzero, act=tf.nn.relu, dropout=0., logging=self.logging)(inputs) self.z_mean = GraphConvolution(input_dim=FLAGS.hidden1, output_dim=FLAGS.hidden2, adj=self.adj, act=lambda x: x, dropout=self.dropout, logging=self.logging)(hidden1) self.z_log_std = GraphConvolution(input_dim=FLAGS.hidden1, output_dim=FLAGS.hidden2, adj=self.adj, act=lambda x: x, dropout=self.dropout, logging=self.logging)(hidden1)
def _build(self): self.z_mean = tf.Variable(tf.zeros([self.input_dim, FLAGS.hidden2]), name='zmean') for i in range(10): self.hidden1_i = GraphConvolutionSparse(input_dim=self.input_dim, output_dim=FLAGS.hidden1, adj=self.adj[i], features_nonzero=self.features_nonzero, act=tf.nn.relu, dropout=self.dropout, logging=self.logging)(self.inputs) self.embeddings = GraphConvolution(input_dim=FLAGS.hidden1, output_dim=FLAGS.hidden2, adj=self.adj[i], act=lambda x: x, dropout=self.dropout, logging=self.logging)(self.hidden1_i) self.z_mean = self.embeddings + self.z_mean self.reconstructions,self.logits_output = Xunying_InnerProductDecoder(input_dim=FLAGS.hidden2, act=lambda x: x, logging=self.logging)(self .z_mean)
def _build(self): if self.attn: self.hidden1 = AttentiveGraphConvolutionSparse( input_dim=self.input_dim, output_dim=FLAGS.hidden1, adj=self.adj, features_nonzero=self.features_nonzero, act=tf.nn.relu, in_drop=self.in_drop, attn_drop=self.attn_drop, feat_drop=self.feat_drop, logging=self.logging)(self.inputs) self.z_mean = AttentiveGraphConvolution(input_dim=FLAGS.hidden1, output_dim=FLAGS.hidden2, adj=self.adj, act=lambda x: x, in_drop=self.in_drop, attn_drop=self.attn_drop, feat_drop=self.feat_drop, logging=self.logging)( self.hidden1) self.z_log_std = AttentiveGraphConvolution( input_dim=FLAGS.hidden1, output_dim=FLAGS.hidden2, adj=self.adj, act=lambda x: x, in_drop=self.in_drop, attn_drop=self.attn_drop, feat_drop=self.feat_drop, logging=self.logging)(self.hidden1) else: self.hidden1 = GraphConvolutionSparse( input_dim=self.input_dim, output_dim=FLAGS.hidden1, adj=self.adj, features_nonzero=self.features_nonzero, act=tf.nn.relu, dropout=self.in_drop, logging=self.logging)(self.inputs) self.z_mean = GraphConvolution(input_dim=FLAGS.hidden1, output_dim=FLAGS.hidden2, adj=self.adj, act=lambda x: x, dropout=self.in_drop, logging=self.logging)(self.hidden1) self.z_log_std = GraphConvolution(input_dim=FLAGS.hidden1, output_dim=FLAGS.hidden2, adj=self.adj, act=lambda x: x, dropout=self.in_drop, logging=self.logging)( self.hidden1) self.z = self.z_mean + tf.random_normal( [self.n_samples, FLAGS.hidden2]) * tf.exp(self.z_log_std) self.reconstructions = InnerProductDecoder(input_dim=FLAGS.hidden2, act=lambda x: x, logging=self.logging)( self.z)
def _build(self): if self.attn: self.hidden1 = AttentiveGraphConvolutionSparse( input_dim=self.input_dim, output_dim=FLAGS.hidden1, adj=self.adj, features_nonzero=self.features_nonzero, act=tf.nn.relu, in_drop=self.in_drop, attn_drop=self.attn_drop, feat_drop=self.feat_drop, logging=self.logging)(self.inputs) self.embeddings = AttentiveGraphConvolution( input_dim=FLAGS.hidden1, output_dim=FLAGS.hidden2, adj=self.adj, act=lambda x: x, in_drop=self.in_drop, attn_drop=self.attn_drop, feat_drop=self.feat_drop, logging=self.logging)(self.hidden1) ''' self.embeddings = AttentiveGraphConvolutionSparse(input_dim=self.input_dim, output_dim=FLAGS.hidden2, adj=self.adj, features_nonzero=self.features_nonzero, act=lambda x: x, in_drop=self.in_drop, attn_drop=self.attn_drop, feat_drop=self.feat_drop, logging=self.logging)(self.inputs) ''' else: self.hidden1 = GraphConvolutionSparse( input_dim=self.input_dim, output_dim=FLAGS.hidden1, adj=self.adj, features_nonzero=self.features_nonzero, act=tf.nn.relu, dropout=self.in_drop, logging=self.logging)(self.inputs) self.embeddings = GraphConvolution(input_dim=FLAGS.hidden1, output_dim=FLAGS.hidden2, adj=self.adj, act=lambda x: x, dropout=self.in_drop, logging=self.logging)( self.hidden1) ''' self.embeddings = GraphConvolutionSparse(input_dim=self.input_dim, output_dim=FLAGS.hidden2, adj=self.adj, features_nonzero=self.features_nonzero, act=lambda x: x, dropout=self.in_drop, logging=self.logging)(self.inputs) ''' self.z_mean = self.embeddings if self.bilinear: self.reconstructions = BilinearDecoder(input_dim=FLAGS.hidden2, dropout=self.in_drop, act=lambda x: x, logging=self.logging)( self.embeddings) else: self.reconstructions = InnerProductDecoder(input_dim=FLAGS.hidden2, act=lambda x: x, logging=self.logging)( self.embeddings)
def _build(self): self.hidden1 = GraphConvolutionSparse( input_dim=self.input_dim, output_dim=FLAGS.hidden1, adj=self.adj, features_nonzero=self.features_nonzero, act=tf.nn.sigmoid, dropout=self.dropout, logging=self.logging)(self.inputs) # # # self.addedHidden1 = GraphConvolution(input_dim=1024, # output_dim=FLAGS.hidden1, # adj=self.adj, # act=tf.nn.tanh, # dropout=self.dropout, # logging=self.logging)(self.hidden1) # self.addedHidden2 = GraphConvolution(input_dim=128, # output_dim=64, # adj=self.adj, # act=tf.nn.relu, # dropout=self.dropout, # logging=self.logging)(self.addedHidden1) # self.addedHidden3 = GraphConvolution(input_dim=64, # output_dim=32, # adj=self.adj, # act=tf.nn.relu, # dropout=self.dropout, # logging=self.logging)(self.addedHidden2) # self.addedHidden4 = GraphConvolution(input_dim=32, # output_dim=16, # adj=self.adj, # act=tf.nn.relu, # dropout=self.dropout, # logging=self.logging)(self.addedHidden3) # self.addedHidden5 = GraphConvolution(input_dim=16, # output_dim=FLAGS.hidden1, # adj=self.adj, # act=tf.nn.relu, # dropout=self.dropout, # logging=self.logging)(self.addedHidden4) self.z_mean = GraphConvolution(input_dim=FLAGS.hidden1, output_dim=FLAGS.hidden2, adj=self.adj, act=lambda x: x, dropout=self.dropout, logging=self.logging)(self.hidden1) # self.z_log_std = GraphConvolution( input_dim=FLAGS.hidden1, output_dim=FLAGS.hidden2, adj=self.adj, act=lambda x: x, # act=tf.nn.relu, dropout=self.dropout, logging=self.logging)(self.hidden1) # self.z_log_std = 0.1 * tf.ones_like(self.z_mean) # Changing the number of samples self.z = self.z_mean + tf.random_normal( [self.n_samples, FLAGS.hidden2]) * tf.exp(self.z_log_std) # self.reconstructions = InnerProductDecoder(input_dim=FLAGS.hidden2, # act=lambda x: x, # logging=self.logging)(self.z) # self.hidden2 = GraphConvolution(input_dim=FLAGS.hidden2, # output_dim=FLAGS.hidden1, # adj=self.adj, # act=tf.nn.relu, # dropout=self.dropout, # logging=self.logging)(self.z) # # # self.hidden3 = GraphConvolution(input_dim=FLAGS.hidden1, # output_dim=16, # adj=self.adj, # act=tf.nn.relu, # dropout=self.dropout, # logging=self.logging)(self.hidden2) # # self.hidden4 = GraphConvolution(input_dim=16, # output_dim=32, # adj=self.adj, # act=tf.nn.relu, # dropout=self.dropout, # logging=self.logging)(self.hidden3) # # # self.hidden5 = GraphConvolution(input_dim=32, # output_dim=1024, # adj=self.adj, # act=tf.nn.relu, # dropout=self.dropout, # logging=self.logging)(self.hidden4) self.reconstructions = GraphConvolutionDec( input_dim=FLAGS.hidden2, output_dim=self.input_dim, adj=self.adj, # act=tf.nn.relu, act=lambda x: x, dropout=self.dropout, logging=self.logging)(self.z)
def _build(self): # total bias not being used for now entity_bias = self.e entity_bias_matrix = tf.tile(entity_bias, [1, self.n_samples]) entity_bias_matrix += tf.transpose(entity_bias_matrix) self.total_bias = entity_bias_matrix + tf.tile( self.c, [self.n_samples, self.n_samples]) for idx, hidden_layer in enumerate(self.hidden): if idx == 0: if self.num_hidden_layers == 1: activ = lambda x: x else: activ = lambda x: tf.nn.leaky_relu(x, alpha=0.2) h = GraphConvolutionSparse( input_dim=self.input_dim, output_dim=hidden_layer, adj=self.adj, features_nonzero=self.features_nonzero, act=activ, dropout=self.dropout, logging=self.logging)(self.inputs) elif idx == self.num_hidden_layers - 1: h = GraphConvolution(input_dim=self.hidden[idx - 1], output_dim=hidden_layer, adj=self.adj, act=lambda x: x, dropout=self.dropout, logging=self.logging)(h) else: h = GraphConvolution( input_dim=self.hidden[idx - 1], output_dim=hidden_layer, adj=self.adj, act=lambda x: tf.nn.leaky_relu(x, alpha=0.2), dropout=self.dropout, logging=self.logging)(h) self.pi_logit = h # See this 0.01 beta_a = tf.nn.softplus(self.a) + 0.01 beta_b = tf.nn.softplus(self.b) + 0.01 beta_a = tf.expand_dims(beta_a, 0) beta_b = tf.expand_dims(beta_b, 0) self.beta_a = tf.tile(beta_a, [self.n_samples, 1]) self.beta_b = tf.tile(beta_b, [self.n_samples, 1]) self.v = kumaraswamy_sample(self.beta_a, self.beta_b) v_term = tf.log(self.v + SMALL) self.log_prior = tf.cumsum(v_term, axis=1) self.logit_post = self.pi_logit + logit(tf.exp(self.log_prior)) # note: logsample is just logit(z_discrete), unless we've rounded self.z, _, _, self.y_sample = sample(None, None, self.logit_post, None, None, FLAGS.temp_post, calc_v=False, calc_real=False) self.z = tf.cond(tf.equal(self.training, tf.constant(False)), lambda: tf.round(self.z), lambda: self.z) if FLAGS.deep_decoder: f = tf.nn.leaky_relu(tf.matmul(self.z, self.w_gen_1) + self.b_gen_1, alpha=0.2) f = tf.matmul(f, self.w_gen_2) + self.b_gen_2 self.reconstructions = InnerProductDecoder(act=lambda x: x, logging=self.logging)(f) else: f = tf.matmul(self.z, self.w_gen_1) + self.b_gen_1 self.reconstructions = InnerProductDecoder(act=lambda x: x, logging=self.logging)(f) self.x_hat = tf.reshape( tf.matmul(self.z, self.w_gen_x) + self.b_gen_x, [-1])
print('num_node: ', num_node, ' feature_dim: ', feature_dim, ' pos_weight: ', pos_weight, ' norm: ', norm) # placeholder placeholders = { 'adj_orig': tf.sparse_placeholder(tf.float32), 'adj_norm': tf.sparse_placeholder(tf.float32), 'feature': tf.sparse_placeholder(tf.float32), 'dropout': tf.placeholder_with_default(0., shape=()), } # model hidden1 = GraphConvolutionSparse(input_dim=feature_dim, output_dim=FLAGS.hidden1, adj=placeholders['adj_norm'], features_nonzero=features_nonzero, act=tf.nn.relu, dropout=placeholders['dropout'])( placeholders['feature']) noise = gaussian_noise_layer(hidden1, 0.1) embeddings = GraphConvolution(input_dim=FLAGS.hidden1, output_dim=FLAGS.hidden2, adj=placeholders['adj_norm'], act=lambda x: x, dropout=placeholders['dropout'])(noise) reconstructions = InnerProductDecoder(input_dim=FLAGS.hidden2, act=lambda x: x)(embeddings) label = tf.reshape(
def _build(self): with tf.variable_scope('Encoder'): self.hidden1 = GraphConvolutionSparse(input_dim=self.input_dim, output_dim=FLAGS.hidden1, adj=self.adj, features_nonzero=self.features_nonzero, act=tf.nn.relu, dropout=self.dropout, logging=self.logging, name='e_dense_1')(self.inputs) self.hidden2 = GraphConvolution(input_dim=FLAGS.hidden1, output_dim=FLAGS.hidden2, adj=self.adj, act=lambda x: x, dropout=self.dropout, logging=self.logging, name='e_dense_2')(self.hidden1) self.hidden3 = GraphConvolution(input_dim=FLAGS.hidden2, output_dim=FLAGS.hidden3, adj=self.adj, act=lambda x: x, dropout=self.dropout, logging=self.logging, name='e_dense_3')(self.hidden2) if self.cat == True: self.merge3 = concatenate([self.hidden1,self.hidden2,self.hidden3], axis = 1) self.z_mean = GraphConvolution(input_dim=FLAGS.hidden1 + FLAGS.hidden2 + FLAGS.hidden3, output_dim=FLAGS.hidden4, adj=self.adj, act=lambda x: x, dropout=self.dropout, logging=self.logging, name='e_dense_4')(self.merge3) self.z_log_std = GraphConvolution(input_dim=FLAGS.hidden1 + FLAGS.hidden2 + FLAGS.hidden3, output_dim=FLAGS.hidden4, adj=self.adj, act=lambda x: x, dropout=self.dropout, logging=self.logging, name='e_dense_5')(self.merge3) if self.cat == False: self.z_mean = GraphConvolution(input_dim=FLAGS.hidden3, output_dim=FLAGS.hidden4, adj=self.adj, act=lambda x: x, dropout=self.dropout, logging=self.logging, name='e_dense_4')(self.hidden3) self.z_log_std = GraphConvolution(input_dim=FLAGS.hidden3, output_dim=FLAGS.hidden4, adj=self.adj, act=lambda x: x, dropout=self.dropout, logging=self.logging, name='e_dense_5')(self.hidden3) self.z = self.z_mean + tf.random_normal([self.n_samples, FLAGS.hidden4]) * tf.exp(self.z_log_std) self.reconstructions = InnerProductDecoder(input_dim=FLAGS.hidden4, act=lambda x: x, logging=self.logging)(self.z) self.X_reconstructions = tf.layers.dense(inputs=self.z, units=self.input_dim, activation=tf.nn.relu) self.embeddings = self.z # add gaussian layer to noise the classes gaussian = Gaussian(self.num_classes) self.z_prior_mean = gaussian(self.z) # output the classes y = GraphConvolution(input_dim=FLAGS.hidden4, output_dim=FLAGS.class1, adj=self.adj, act=lambda x: x, dropout=self.dropout, logging=self.logging)(self.z) self.y = tf.layers.dense(inputs=self.z, units=self.num_classes, activation=tf.nn.softmax)
def _build(self): with tf.name_scope('Autoencoder'): self.hidden1 = GraphConvolutionSparse( input_dim=self.input_dim1, output_dim=FLAGS.hidden1, adj=self.adj1, features_nonzero=self.features_nonzero1, act=tf.nn.relu, dropout=self.dropout, name='e_h1', logging=self.logging)(self.inputs1) self.z_mean1 = GraphConvolution(input_dim=FLAGS.hidden1, output_dim=FLAGS.hidden2, adj=self.adj1, act=lambda x: x, dropout=self.dropout, name='e_mean1', logging=self.logging)(self.hidden1) self.z_log_std1 = GraphConvolution(input_dim=FLAGS.hidden1, output_dim=FLAGS.hidden2, adj=self.adj1, act=lambda x: x, dropout=self.dropout, name='e_log_std1', logging=self.logging)( self.hidden1) self.z1 = self.z_mean1 + tf.random_normal( [self.n_samples1, FLAGS.hidden2]) * tf.exp(self.z_log_std1) self.reconstructions1 = InnerProductDecoder( input_dim=FLAGS.hidden2, act=lambda x: x, logging=self.logging)(self.z1) self.hidden2 = GraphConvolutionSparse( input_dim=self.input_dim2, output_dim=FLAGS.hidden1, adj=self.adj2, features_nonzero=self.features_nonzero2, act=tf.nn.relu, dropout=self.dropout, name='e_h2', logging=self.logging)(self.inputs2) self.z_mean2 = GraphConvolution(input_dim=FLAGS.hidden1, output_dim=FLAGS.hidden2, adj=self.adj2, act=lambda x: x, dropout=self.dropout, name='e_mean2', logging=self.logging)(self.hidden2) self.z_log_std2 = GraphConvolution(input_dim=FLAGS.hidden1, output_dim=FLAGS.hidden2, adj=self.adj2, act=lambda x: x, dropout=self.dropout, name='e_std2', logging=self.logging)( self.hidden2) self.z2 = self.z_mean2 + tf.random_normal( [self.n_samples2, FLAGS.hidden2]) * tf.exp(self.z_log_std2) self.reconstructions2 = InnerProductDecoder( input_dim=FLAGS.hidden2, act=lambda x: x, logging=self.logging)(self.z2) # MLP mapping to network2 if self.flag: dc_den1 = tf.nn.relu( tf.nn.dropout( dense(self.z_mean1, FLAGS.hidden2, FLAGS.hidden3, name='e_den1'), 1 - self.dropout)) self.output = dense(dc_den1, FLAGS.hidden3, FLAGS.hidden2, name='e_output') if not self.flag: self.output = dense(self.z_mean1, FLAGS.hidden2, FLAGS.hidden2, name='e_den1')
def _build(self): with tf.name_scope('Autoencoder'): self.hidden1 = GraphConvolutionSparse( input_dim=self.input_dim1, output_dim=FLAGS.hidden1, adj=self.adj1, features_nonzero=self.features_nonzero1, act=tf.nn.relu, dropout=self.dropout, name='e_h1', logging=self.logging)(self.inputs1) self.embeddings1 = GraphConvolution(input_dim=FLAGS.hidden1, output_dim=FLAGS.hidden2, adj=self.adj1, act=lambda x: x, dropout=self.dropout, name='e_1', logging=self.logging)( self.hidden1) self.z_mean1 = self.embeddings1 self.reconstructions1 = InnerProductDecoder( input_dim=FLAGS.hidden2, act=lambda x: x, logging=self.logging)(self.embeddings1) self.hidden2 = GraphConvolutionSparse( input_dim=self.input_dim2, output_dim=FLAGS.hidden1, adj=self.adj2, features_nonzero=self.features_nonzero2, act=tf.nn.relu, dropout=self.dropout, name='e_h2', logging=self.logging)(self.inputs2) self.embeddings2 = GraphConvolution(input_dim=FLAGS.hidden1, output_dim=FLAGS.hidden2, adj=self.adj2, act=lambda x: x, dropout=self.dropout, name='e_2', logging=self.logging)( self.hidden2) self.z_mean2 = self.embeddings2 self.reconstructions2 = InnerProductDecoder( input_dim=FLAGS.hidden2, act=lambda x: x, logging=self.logging)(self.embeddings2) # MLP mapping to network2 # non-linear if self.flag: dc_den1 = tf.nn.relu( dense(self.z_mean1, FLAGS.hidden2, FLAGS.hidden3, name='g_den1')) self.output = dense(dc_den1, FLAGS.hidden3, FLAGS.hidden2, name='g_output') # linear if not self.flag: self.output = dense(self.z_mean1, FLAGS.hidden2, FLAGS.hidden2, name='g_den1')