Example #1
0
 def dropout(x):
     if dropout_rate:
         if is_sparse_tensor(x):
             x = SparseDropout(dropout_rate)(x)
         else:
             x = tf.keras.layers.Dropout(dropout_rate)(x)
     return x
Example #2
0
def to_csr(st: Union[tf.SparseTensor, CSRSparseMatrix]) -> CSRSparseMatrix:
    if is_sparse_tensor(st):
        return CSRSparseMatrix(st)
    if is_csr_matrix(st):
        return st
    raise TypeError(
        f"sp must be a `SparseTensor` or `CSRSparseMatrix`, got {st}")
Example #3
0
def normalize_asymmetric(
        x: tp.Union[tf.Tensor, tf.SparseTensor]) -> tf.SparseTensor:
    if is_sparse_tensor(x):
        return normalize_sparse(x, symmetric=False)
    D = tf.reduce_sum(x, axis=1, keepdims=True)
    D = tf.where(D == 0, tf.zeros_like(D), tf.math.reciprocal(D))
    return x * D
Example #4
0
def to_coo(sp: Union[tf.SparseTensor, CSRSparseMatrix]) -> tf.SparseTensor:
    if is_sparse_tensor(sp):
        return sp
    if is_csr_matrix(sp):
        return sp.to_sparse_tensor()
    raise TypeError(
        f"sp must be a `SparseTensor` or `CSRSparseMatrix`, got {sp}")
Example #5
0
def normalize_symmetric(
        x: tp.Union[tf.Tensor, tf.SparseTensor]) -> tf.SparseTensor:
    if is_sparse_tensor(x):
        return normalize_sparse(x, symmetric=True)
    d = tf.reduce_sum(tf.abs(x), axis=1)
    d = tf.where(d <= 0, tf.zeros_like(d), tf.math.rsqrt(d))
    return x * d * tf.expand_dims(d, -1)
Example #6
0
 def propagate(x, adj, filters, activation=None, kernel_regularizer=None):
     x = dropout(x)
     V = (SparseDense if is_sparse_tensor(x) else tf.keras.layers.Dense)(
         rank, activation=V_activation)(x)
     kwargs = dict(kernel_regularizer=kernel_regularizer,
                   use_bias=False,
                   reg_coeff=reg_coeff)
     if linear_skip_connections:
         skip = tf.keras.layers.Dense(filters, **kwargs)
         x = lfgcn_layers.LearnedFactorizedGraphConvolution(
             filters, **kwargs)([x, V, adj])
         if activation is not None:
             x = activation(x + skip)
     else:
         x = lfgcn_layers.LearnedFactorizedGraphConvolution(
             filters, activation=activation, **kwargs)([x, V, adj])
     return x
Example #7
0
    def propagate(x, adj, filters, activation=None, kernel_regularizer=None):
        if dropout_rate:
            if is_sparse_tensor(x):
                x = SparseDropout(dropout_rate)(x)
            else:
                x = tf.keras.layers.Dropout(dropout_rate)(x)

        kwargs = dict(kernel_regularizer=kernel_regularizer, use_bias=False)
        if linear_skip_connections:
            skip = tf.keras.layers.Dense(filters, **kwargs)
            x = graph_conv_factory(filters, **kwargs)([x, adj])
            x = x + skip
            if activation is not None:
                x = activation(x)
        else:
            x = graph_conv_factory(filters, activation=activation,
                                   **kwargs)([x, adj])
        return x
Example #8
0
def sparse_dense_matmul(
    a: Union[tf.SparseTensor, CSRSparseMatrix],
    b: tf.Tensor,
    transpose_a: bool = False,
    transpose_b: bool = False,
) -> tf.Tensor:
    if is_sparse_tensor(a):
        assert a.dtype.real_dtype == a.dtype, a.dtype

        return tf.sparse.sparse_dense_matmul(a,
                                             b,
                                             adjoint_a=transpose_a,
                                             adjoint_b=transpose_b)

    if is_csr_matrix(a):
        return sparse_lib.matmul(a,
                                 b,
                                 transpose_a=transpose_a,
                                 transpose_b=transpose_b)

    raise TypeError(f"a must be SparseTensor or CSRSparseMatrix, got {a}")
Example #9
0
def arnoldi_iteration(
    A: Union[tf.Tensor, tf.SparseTensor],
    b: tf.Tensor,
    n: int,
    symmetric: bool = False,
    eps: float = 1e-12,
) -> Tuple[tf.Tensor, tf.Tensor]:
    """
    Computes a basis of the (n + 1)-Krylov subspace of A for each col of b.

    The Krylov-subspace is the space spanned by {b, Ab, ..., A^n b}.

    Based on np implementation at

    Arguments
        A: [m, m] float Tensor or SparseTensor
        b: [m, p] initial Tensor
        n: dimension of Krylov subspace, must be >= 1
        symmetric: if true, A will be assumed to be symmetric, hence only the previous
            two components will be removed in re-orthogonalization.
        eps: threshold to exit early

    Returns
      Q: [m , (n + 1), p] float Tensor, the columns are an orthonormal basis of the
        Krylov subspace.
      h: [(n + 1), n, p] float Tensor, A on basis Q. It is upper Hessenberg.
    """
    assert b.dtype == A.dtype, "dtypes must be the same"
    if is_sparse_tensor(A):
        matmul = tf.sparse.sparse_dense_matmul
    else:
        matmul = tf.linalg.matmul

    if b.dtype.is_complex:

        def conjugate(x):
            return tf.complex(tf.math.real(x), -tf.math.imag(x))

    else:

        def conjugate(x):
            return x

    def dot(x, y):
        return tf.einsum("dp,dp->p", x, y)
        # return tf.squeeze(tf.tensordot(x, y, (0, 0)), axis=0)

    h = []
    q = b / tf.linalg.norm(b, axis=0)  # Normalize the input vector
    Q = [q]
    z = tf.zeros_like(b)

    for _ in range(n):
        v = matmul(A, q)  # Generate a new candidate vector
        hk = []
        h.append(hk)
        for qi in Q[-2:] if symmetric else Q:
            hjk = dot(conjugate(qi), v)
            hk.append(hjk)
            v = v - hjk * qi

        h_final = tf.linalg.norm(v, axis=0)
        hk.append(h_final)
        q = tf.where(h_final > eps, v / h_final, z)
        Q.append(q)

    Q = tf.stack(Q, axis=1)
    if symmetric:
        leading = [max(0, i - 1) for i in range(n)]
    else:
        leading = [0] * n
    h = [
        pad_to_size(hi, n + 1, leading=l, axis=0) for l, hi in zip(leading, h)
    ]

    h = tf.stack(h, axis=1)
    return Q, h
Example #10
0
def lanczos_iteration(A: Union[tf.Tensor, tf.SparseTensor],
                      b: tf.Tensor,
                      n: int,
                      eps: float = 1e-12):
    """
    Computes a basis of the (n + 1)-Krylov subspace of symmetric A for each col of b.

    The Krylov-subspace is the space spanned by {b, Ab, ..., A^n b}.

    See https://chen.pw/research/cg/arnoldi_lanczos.html

    Arguments
        A: [m, m] float Tensor or SparseTensor, assumed symmetric.
        b: [m, p] initial Tensor.
        n: dimension of Krylov subspace, must be >= 1
        eps: threshold to exit early.

    Returns
        Q: [m , (n + 1), p] float Tensor, the columns are an orthonormal basis of the
          Krylov subspace.
        d: [n, p] diagonal values of H.
        l: [n, p] sub-diagonal values of H.
    """
    assert b.dtype == A.dtype, "dtypes must be the same"
    if is_sparse_tensor(A):
        matmul = tf.sparse.sparse_dense_matmul
    else:
        matmul = tf.linalg.matmul

    if b.dtype.is_complex:

        def conjugate(x):
            return tf.complex(tf.math.real(x), -tf.math.imag(x))

    else:

        def conjugate(x):
            return x

    def dot(x, y):
        return tf.einsum("dp,dp->p", x, y)
        # return tf.squeeze(tf.tensordot(x, y, (0, 0)), axis=0)

    q = b / tf.linalg.norm(b, axis=0)  # Normalize the input vector
    Q = [q]
    z = tf.zeros_like(b)
    d = []
    l = []

    for _ in range(n):
        v = matmul(A, q)  # Generate a new candidate vector
        if len(Q) >= 2:
            v = v - l[-1] * Q[-2]

        di = dot(conjugate(Q[-1]), v)
        d.append(di)
        v = v - di * Q[-1]

        li = tf.linalg.norm(v, axis=0)
        l.append(li)
        q = tf.where(li > eps, v / li, z)

        Q.append(q)

    Q = tf.stack(Q, axis=1)
    d = tf.stack(d)
    l = tf.stack(l)
    return Q, d, l