def compute_inference(self, node_features_in, sp_adj_matrix, is_training): with tf.variable_scope('edge-model'): z_latent = gcn_module(node_features_in, sp_adj_matrix, self.n_hidden_edge, self.p_drop_edge, is_training, self.input_dim, self.sparse_features) adj_matrix_pred = compute_adj(z_latent, self.att_mechanism, self.p_drop_edge, is_training) self.adj_matrix_pred = adj_matrix_pred with tf.variable_scope('node-model'): z_latent = tf.sparse_concat( axis=1, sp_inputs=[ tf.contrib.layers.dense_to_sparse(z_latent), node_features_in ]) sparse_features = True input_dim = self.n_hidden_edge[-1] + self.input_dim logits = gcn_module(z_latent, sp_adj_matrix, self.n_hidden_node, self.p_drop_node, is_training, input_dim, sparse_features=sparse_features) return logits, adj_matrix_pred
def compute_inference(self, node_features, adj_matrix, is_training): """Forward step for GAE model.""" sparse = self.sparse_features in_dim = self.input_dim with tf.variable_scope('edge-model'): h0 = gcn_module(node_features, adj_matrix, self.n_hidden[:-1], self.p_drop, is_training, in_dim, sparse) # N x F with tf.variable_scope('mean'): z_mean = gcn_module(h0, adj_matrix, self.n_hidden[-1:], self.p_drop, is_training, self.n_hidden[-2], False) self.z_mean = z_mean with tf.variable_scope('std'): # N x F z_log_std = gcn_module(h0, adj_matrix, self.n_hidden[-1:], self.p_drop, is_training, self.n_hidden[-2], False) self.z_log_std = z_log_std # add noise during training noise = tf.random_normal([self.nb_nodes, self.n_hidden[-1] ]) * tf.exp(z_log_std) z = tf.cond(is_training, lambda: tf.add(z_mean, noise), lambda: z_mean) # N x N adj_matrix_pred = compute_adj(z, self.att_mechanism, self.p_drop, is_training) self.adj_matrix_pred = tf.nn.sigmoid(adj_matrix_pred) return adj_matrix_pred
def compute_inference(self, node_features_in, sp_adj_matrix, is_training): with tf.variable_scope('edge-model'): z_latent = gcn_module(node_features_in, sp_adj_matrix, self.n_hidden_edge, self.p_drop_edge, is_training, self.input_dim, self.sparse_features) adj_matrix_pred = compute_adj(z_latent, self.att_mechanism, self.p_drop_edge, is_training) self.adj_matrix_pred = adj_matrix_pred with tf.variable_scope('node-model'): z_latent = tf.sparse_concat( axis=1, sp_inputs=[ tf.contrib.layers.dense_to_sparse(z_latent), node_features_in ]) sparse_features = True input_dim = self.n_hidden_edge[-1] + self.input_dim sp_adj_train = tf.SparseTensor( indices=sp_adj_matrix.indices, values=tf.ones_like(sp_adj_matrix.values), dense_shape=sp_adj_matrix.dense_shape) logits = gat_module(z_latent, sp_adj_train, self.n_hidden_node, self.n_att_node, self.p_drop_node, is_training, input_dim, sparse_features=sparse_features, average_last=True) return logits, adj_matrix_pred
def compute_inference(self, node_features, adj_matrix, is_training): """Forward step for GAE model.""" sparse = self.sparse_features in_dim = self.input_dim with tf.variable_scope('edge-model'): h0 = gcn_module(node_features, adj_matrix, self.n_hidden, self.p_drop, is_training, in_dim, sparse) adj_matrix_pred = compute_adj(h0, self.att_mechanism, self.p_drop, is_training) self.adj_matrix_pred = tf.nn.sigmoid(adj_matrix_pred) return adj_matrix_pred
def compute_inference(self, node_features, adj_matrix, is_training): with tf.variable_scope('node-model'): hidden_repr = mlp_module(node_features, self.n_hidden, self.p_drop, is_training, self.input_dim, self.sparse_features, use_bias=True, return_hidden=True) logits = hidden_repr[-1] hidden_repr_reg = hidden_repr[self.semi_emb_k] l2_scores = compute_adj(hidden_repr_reg, self.att_mechanism, self.p_drop, is_training=False) self.l2_scores = tf.gather_nd(l2_scores, adj_matrix.indices) return logits