def normalized_laplacian(x: tf.SparseTensor, symmetric: bool = True, shift: float = 0.0) -> tf.SparseTensor: d = tf.sparse.reduce_sum(x, axis=0) if symmetric: d = tf.math.rsqrt(d) row, col = tf.unstack(x.indices, axis=1) x = x.with_values(-x.values * tf.gather(d, row, axis=0) * tf.gather(d, col, axis=0)) else: x = x.with_values(-x.values / tf.gather(d, x.indices[:, 0], axis=0)) return tf.sparse.add( tf.sparse.eye(x.dense_shape[0], dtype=x.dtype) * (1 + shift), x)
def to_symmetric(x: tf.SparseTensor, half: bool = False) -> tf.SparseTensor: xt = tf.sparse.reorder( # pylint: disable=no-value-for-parameter tf.sparse.transpose(x)) x = tf.sparse.add(x, xt) if half: x = x.with_values(x.values / 2) return x
def normalize_sparse(A: tf.SparseTensor, symmetric: bool = True): row_sum = tf.sparse.reduce_sum(A, axis=1) tf.debugging.assert_non_negative(row_sum) i, j = tf.unstack(A.indices, axis=-1) if symmetric: d_vals = tf.math.rsqrt(row_sum) d_vals = tf.where(row_sum == 0, tf.ones_like(d_vals), d_vals) values = A.values * tf.gather(d_vals, i, axis=0) * tf.gather( d_vals, j, axis=0) else: d_vals = tf.math.reciprocal(row_sum) d_vals = tf.where(row_sum == 0, tf.ones_like(d_vals), d_vals) values = A.values * tf.gather(d_vals, i, axis=0) return A.with_values(values)
def multi_attention_v0(features: tf.Tensor, attention: tf.Tensor, adjacency: tf.SparseTensor): """ Implementation using unstack / stack / sparse_dense_matmul Args: features: [Ni, H, F] attention: [E, H] adjacency: [No, Ni], E non-zero entries. Returns: [No, H, F] features. """ features = [ tf.sparse.sparse_dense_matmul(adjacency.with_values(attn), f) for attn, f in zip(tf.unstack(attention, axis=1), tf.unstack(features, axis=1)) ] return tf.stack(features, axis=1)
def laplacian(x: tf.SparseTensor) -> tf.SparseTensor: d = tf.sparse.reduce_sum(x, axis=0) return tf.sparse.add(stfu.diag(d), x.with_values(-x.values))
def sparse_negate(x: tf.SparseTensor): return x.with_values(-x.values)
def negative(st: tf.SparseTensor) -> tf.SparseTensor: return st.with_values(-st.values)