def gcs(self, inputs, stack, iteration): """ Creates a graph convolutional layer with a skip connection. :param inputs: list of input Tensors, namely - input node features - input node features for the skip connection - normalized adjacency matrix; :param stack: int, current stack (used to retrieve kernels); :param iteration: int, current iteration (used to retrieve kernels); :return: output node features. """ x, x_skip, a = inputs iter = 1 if self.share_weights and iteration >= 1 else iteration kernel_1, kernel_2, bias = self.kernels[stack][iter] output = K.dot(x, kernel_1) output = ops.modal_dot(a, output) skip = K.dot(x_skip, kernel_2) skip = Dropout(self.dropout_rate)(skip) output += skip if self.use_bias: output = K.bias_add(output, bias) output = self.gcn_activation(output) return output
def call(self, inputs): x, a = inputs mlp_out = self.mlp(x) z = mlp_out for k in range(self.propagations): z = (1 - self.alpha) * ops.modal_dot(a, z) + self.alpha * mlp_out output = self.activation(z) return output
def call(self, inputs): x, a = inputs T_0 = x output = K.dot(T_0, self.kernel[0]) if self.K > 1: T_1 = ops.modal_dot(a, x) output += K.dot(T_1, self.kernel[1]) for k in range(2, self.K): T_2 = 2 * ops.modal_dot(a, T_1) - T_0 output += K.dot(T_2, self.kernel[k]) T_0, T_1 = T_1, T_2 if self.use_bias: output = K.bias_add(output, self.bias) output = self.activation(output) return output
def call(self, inputs): x, a = inputs output = K.dot(x, self.kernel) output = ops.modal_dot(a, output) if self.use_bias: output = K.bias_add(output, self.bias) output = self.activation(output) return output
def call(self, inputs): x, a = inputs output = K.dot(x, self.kernel_1) output = ops.modal_dot(a, output) skip = K.dot(x, self.kernel_2) output += skip if self.use_bias: output = K.bias_add(output, self.bias) if self.activation is not None: output = self.activation(output) return output
def compute_scores(self, X, A, I): scores = K.dot(X, self.kernel) scores = ops.modal_dot(A, scores) return scores
def call(self, inputs): if len(inputs) == 3: X, A, I = inputs if K.ndim(I) == 2: I = I[:, 0] else: X, A = inputs I = None N = K.shape(A)[-1] # Check if the layer is operating in mixed or batch mode mode = ops.autodetect_mode(X, A) self.reduce_loss = mode in (ops.MIXED, ops.BATCH) # Get normalized adjacency if K.is_sparse(A): I_ = tf.sparse.eye(N, dtype=A.dtype) A_ = tf.sparse.add(A, I_) else: I_ = tf.eye(N, dtype=A.dtype) A_ = A + I_ fltr = ops.normalize_A(A_) # Node embeddings Z = K.dot(X, self.kernel_emb) Z = ops.modal_dot(fltr, Z) if self.activation is not None: Z = self.activation(Z) # Compute cluster assignment matrix S = K.dot(X, self.kernel_pool) S = ops.modal_dot(fltr, S) S = activations.softmax(S, axis=-1) # softmax applied row-wise # Link prediction loss S_gram = ops.modal_dot(S, S, transpose_b=True) if mode == ops.MIXED: A = tf.sparse.to_dense(A)[None, ...] if K.is_sparse(A): # A/tf.norm(A) - S_gram/tf.norm(S_gram) LP_loss = tf.sparse.add(A, -S_gram) else: LP_loss = A - S_gram LP_loss = tf.norm(LP_loss, axis=(-1, -2)) if self.reduce_loss: LP_loss = K.mean(LP_loss) self.add_loss(LP_loss) # Entropy loss entr = tf.negative( tf.reduce_sum(tf.multiply(S, K.log(S + K.epsilon())), axis=-1) ) entr_loss = K.mean(entr, axis=-1) if self.reduce_loss: entr_loss = K.mean(entr_loss) self.add_loss(entr_loss) # Pooling X_pooled = ops.modal_dot(S, Z, transpose_a=True) A_pooled = ops.matmul_at_b_a(S, A) output = [X_pooled, A_pooled] if I is not None: I_mean = tf.math.segment_mean(I, I) I_pooled = ops.repeat(I_mean, tf.ones_like(I_mean) * self.k) output.append(I_pooled) if self.return_mask: output.append(S) return output