def call(self, node_embeddings, adjacency_lists, training=True): node_embeddings = self.dropout1(node_embeddings, training=training) x = self.gc1(GNNInput(node_embeddings, adjacency_lists)) x = tf.nn.relu(x) x = self.dropout2(x, training=training) x = self.gc2(GNNInput(x, adjacency_lists)) return tf.math.softmax(x, -1)
def decoder(self, hidden_embeddings, adjacency_lists, training): for layer in range(self.num_layers): hidden_embeddings = self.decoder_layers[layer](GNNInput(hidden_embeddings, adjacency_lists), self.weight[-(layer+1)], True, training) return hidden_embeddings
def call(self, inputs): adjacency_lists = inputs.adjacency_lists node_embeddings = inputs.node_embeddings if self.use_bias: node_embeddings = tf.linalg.matmul(node_embeddings, self.weight) + self.bias else: node_embeddings = tf.linalg.matmul(node_embeddings, self.weight) aggr_out = self.propagate(GNNInput(node_embeddings, adjacency_lists)) return aggr_out
def call(self, inputs, training): adjacency_lists = inputs.adjacency_lists node_embeddings = inputs.node_embeddings node_embeddings = tf.linalg.matmul(node_embeddings, self.weight) aggr_out = self.propagate(GNNInput(node_embeddings, adjacency_lists), training) if self.concat is True: aggr_out = tf.reshape(aggr_out, [-1, self.heads * self.out_features]) else: aggr_out = tf.reduce_mean(aggr_out, 1) if self.use_bias: aggr_out += self.bias return aggr_out
def encoder(self, node_embeddings, adjacency_lists, training): for layer in range(self.num_layers): node_embeddings = self.encoder_layers[layer](GNNInput(node_embeddings, adjacency_lists), self.weight[layer], False, training) return node_embeddings