Exemple #1
0
    def test_sparse_consistent_when_stacked(self):
        m = 100
        nnz = m * 10
        n = 10
        p = 5

        rng = tf.random.Generator.from_seed(0)
        A = random_sparse((m, m), nnz, rng=rng)
        b = rng.normal((m, p))

        bs = tf.split(  # pylint: disable=no-value-for-parameter,redundant-keyword-arg
            b, p, axis=-1
        )

        Q_stacked, h_stacked = arnoldi_iteration_tf(A, b, n)
        Q_sep, h_sep = zip(*(arnoldi_iteration_tf(A, bi, n) for bi in bs))
        Q_sep = tf.concat(  # pylint: disable=no-value-for-parameter,unexpected-keyword-arg
            Q_sep, axis=-1
        )
        h_sep = tf.concat(  # pylint: disable=no-value-for-parameter,unexpected-keyword-arg
            h_sep, axis=-1
        )

        self.assertAllClose(Q_stacked, Q_sep)
        self.assertAllClose(h_stacked, h_sep)
Exemple #2
0
    def test_chebyshev_polynomials(self):
        n = 10
        nnz = 20
        k = 2
        rng = tf.random.Generator.from_seed(0)
        A = tf.sparse.add(random_sparse((n, n), nnz, rng), tf.sparse.eye(n))
        A = A.with_values(tf.abs(A.values))

        def unpack(st):
            return st.values, tf.unstack(st.indices, axis=-1)

        data, (i, j) = self.evaluate(unpack(A))
        A_sp = sp.coo_matrix((data, (i, j)), shape=(n, n))

        expected = scipy_utils.chebyshev_polynomials(A_sp, k)
        expected = [
            exp.tocsr(copy=False).tocoo(copy=False) for exp in expected
        ]
        actual = ops.chebyshev_polynomials(A, k)
        actual = self.evaluate([unpack(st) for st in actual])

        for ((data, (row, col)), exp) in zip(actual, expected):
            np.testing.assert_equal(row, exp.row)
            np.testing.assert_equal(col, exp.col)
            np.testing.assert_allclose(data, exp.data, atol=1e-5)
Exemple #3
0
    def test_consistent(self):
        ni = 5
        no = 7
        nnz = 15
        f = 11
        h = 13
        rng = tf.random.Generator.from_seed(0)

        adj = random_sparse((no, ni), nnz, rng)
        nnz = adj.values.shape[0]
        features = rng.normal((ni, h, f))
        attention = rng.uniform((adj.values.shape[0], h))

        v0 = ops.multi_attention_v0(features, attention, adj)
        v1 = ops.multi_attention_v1(features, attention, adj)

        self.assertAllClose(v0, v1)

        v1_mean = ops.multi_attention_v1(features,
                                         attention,
                                         adj,
                                         reduction="mean")
        self.assertAllClose(tf.reduce_mean(v0, axis=1), v1_mean)
        v1_sum = ops.multi_attention_v1(features,
                                        attention,
                                        adj,
                                        reduction="sum")
        self.assertAllClose(tf.reduce_sum(v0, axis=1), v1_sum)
Exemple #4
0
def get_conv_args():
    rng = tf.random.Generator.from_seed(FLAGS.seed)
    nnz = FLAGS.nn * FLAGS.ni
    x = rng.normal((FLAGS.ni, FLAGS.fi), dtype=tf.float32)
    adjacencies = [
        random_sparse((FLAGS.no, FLAGS.ni), nnz, rng) for _ in range(FLAGS.na)
    ]
    kernel = rng.normal((FLAGS.fi, FLAGS.na, FLAGS.fo))
    return x, adjacencies, kernel
Exemple #5
0
 def test_sparse_boolean_mask(self):
     num_nodes = 50
     nnz = 150
     num_dims = 2
     rng = tf.random.Generator.from_seed(0)
     st = random_sparse((num_nodes, ) * num_dims, nnz, rng=rng)
     dense = tf.sparse.to_dense(st)
     mask = rng.uniform((num_nodes, )) > 0.6
     for axis in range(num_dims):
         actual = ops.sparse_boolean_mask(st, mask, axis=axis).st
         expected = tf.boolean_mask(dense, mask, axis=axis)
         self.assertAllEqual(tf.sparse.to_dense(actual), expected)
Exemple #6
0
 def test_signed_incidence(self, seed: int = 0, n: int = 10, m: int = 50):
     a = test_utils.random_sparse((n, n), m,
                                  tf.random.Generator.from_seed(seed))
     adj_tril = transforms.tril(a)
     del a
     adj_tril = tf.sparse.map_values(tf.math.abs, adj_tril)
     adj = tf.sparse.reorder(  # pylint: disable=no-value-for-parameter
         tf.sparse.add(adj_tril, tf.sparse.transpose(adj_tril)))
     B = tf.sparse.to_dense(transforms.signed_incidence(adj))
     L0 = tf.matmul(tf.transpose(B), B)
     L1 = tf.sparse.to_dense(transforms.laplacian(adj))
     self.assertAllClose(L0, L1)
Exemple #7
0
    def test_ritz_completes(self):
        m = 100
        nnz = m * 10
        n = 20

        rng = tf.random.Generator.from_seed(0)
        A = random_sparse((m, m), nnz, rng=rng)
        A = tf.sparse.add(A, tf.sparse.transpose(A, (1, 0)))  # make symmetric
        b = rng.normal((m,))

        w, v = ritz_embedding(A, b, n)
        del w, v
Exemple #8
0
def get_multi_graph_data(num_adj=3,
                         num_nodes=5,
                         filters_in=7,
                         filters_out=11,
                         nnz=5,
                         seed=0):
    rng = tf.random.Generator.from_seed(seed)
    adjacencies = [
        random_sparse((num_nodes, num_nodes), nnz + i, rng)
        for i in range(num_adj)
    ]
    kernel = rng.normal((filters_in, num_adj, filters_out))
    x = rng.normal((num_nodes, filters_in))
    return x, adjacencies, kernel
Exemple #9
0
 def test_sparse_gather(self):
     # ensure gather_adjacency_sparse is consistent with dense
     num_nodes = 50
     nnz = 150
     num_dims = 2
     rng = tf.random.Generator.from_seed(0)
     st = random_sparse((num_nodes, ) * num_dims, nnz, rng=rng)
     dense = tf.sparse.to_dense(st)
     mask = rng.uniform((num_nodes, )) > 0.6
     indices = tf.boolean_mask(tf.range(num_nodes), mask)
     for axis in range(num_dims):
         actual = ops.sparse_gather(st, indices, axis=axis).st
         expected = tf.gather(dense, indices, axis=axis)
         self.assertAllEqual(tf.sparse.to_dense(actual), expected)
Exemple #10
0
    def test_sparse_gather_again(self):
        num_nodes = 50
        nnz = 150
        num_dims = 2
        nf = 3
        ids = tf.constant([10, 20, 30, 35], dtype=tf.int64)
        rng = tf.random.Generator.from_seed(0)
        st = random_sparse((num_nodes, ) * num_dims, nnz, rng=rng)
        features = rng.uniform((num_nodes, nf))

        v0 = tf.gather(tf.sparse.sparse_dense_matmul(st, features),
                       ids,
                       axis=0)
        v1 = tf.sparse.sparse_dense_matmul(
            ops.sparse_gather(st, ids).st, features)
        self.assertAllClose(v0, v1)
Exemple #11
0
    def test_sparse_boolean_mask_all(self):
        # ensure gather_adjacency_sparse is consistent with dense
        num_nodes = 50
        nnz = 150
        num_dims = 2
        rng = tf.random.Generator.from_seed(0)
        st = random_sparse((num_nodes, ) * num_dims, nnz, rng=rng)
        mask = rng.uniform((num_nodes, )) > 0.6
        actual = ops.sparse_boolean_mask_all(st, mask).st
        expected = st
        for axis in range(num_dims):
            expected = ops.sparse_boolean_mask(expected, mask, axis=axis).st

        self.assertAllEqual(actual.indices, expected.indices)
        self.assertAllEqual(actual.values, expected.values)
        self.assertAllEqual(actual.dense_shape, expected.dense_shape)
Exemple #12
0
    def test_sparse_consistent_with_numpy(self):
        m = 100
        nnz = m * 10
        n = 10

        rng = tf.random.Generator.from_seed(0)
        A = random_sparse((m, m), nnz, rng=rng)
        b = rng.normal((m, 1))

        Q_tf, h_tf = arnoldi_iteration_tf(A, b, n)
        Q_np, h_np = arnoldi_iteration_np(
            tf.sparse.to_dense(A).numpy(), tf.squeeze(b, 1).numpy(), n
        )

        self.assertAllClose(tf.squeeze(Q_tf, axis=-1), Q_np)
        self.assertAllClose(tf.squeeze(h_tf, axis=-1), h_np)
Exemple #13
0
    def test_symmetric_case(self):
        m = 100
        nnz = m * 10
        n = 20
        p = 5

        rng = tf.random.Generator.from_seed(0)
        A = random_sparse((m, m), nnz, rng=rng)
        A = tf.sparse.add(A, tf.sparse.transpose(A, (1, 0)))  # make symmetric
        b = rng.normal((m, p))

        Qs, hs = arnoldi_iteration_tf(A, b, n, symmetric=True)

        Q, h = arnoldi_iteration_tf(A, b, n)
        self.assertAllClose(Q, Qs, atol=1e-4)
        self.assertAllClose(h, hs, atol=1e-4)
Exemple #14
0
def get_args(
    num_nodes: int = 1024,
    sparsity: float = 0.01,
    num_convs: int = 8,
    filters_in: int = 63,
    filters_out: int = 65,
    seed: int = 0,
):
    rng = tf.random.Generator.from_seed(seed)
    nnz = int(num_nodes**2 * sparsity)
    adj = test_utils.random_sparse((num_nodes, num_nodes), nnz, rng)
    indices = adj.indices
    nnz = tf.shape(indices)[0]
    adj_values = rng.uniform((num_convs, nnz))
    kernel = rng.normal((num_convs, filters_in, filters_out))
    features = rng.normal((num_nodes, filters_in))
    args = adj_values, features, kernel, indices, adj.dense_shape
    params = args[:3]
    return args, params
Exemple #15
0
    def test_normalize_sparse(self):
        n = 100
        A = random_sparse((n, n),
                          nnz=500,
                          rng=tf.random.Generator.from_seed(0))
        A = tf.sparse.add(A, tf.sparse.eye(n))
        A = A.with_values(tf.abs(A.values))
        values, indices = self.evaluate((A.values, A.indices))
        A_sp = sp.coo_matrix((values, indices.T))
        tf_impl = ops.normalize_sparse(A)
        sp_impl = scipy_utils.normalize_sparse(A_sp)
        sp_impl = sp_impl.tocsr(copy=False).tocoo(copy=False)  # force reorder

        row, col = tf.unstack(tf_impl.indices, axis=-1)
        values = tf_impl.values
        row, col, values = self.evaluate((row, col, values))
        tf_impl = tf.sparse.reorder(tf_impl)  # pylint: disable=no-value-for-parameter

        np.testing.assert_equal(row, sp_impl.row)
        np.testing.assert_equal(col, sp_impl.col)
        np.testing.assert_allclose(values, sp_impl.data, rtol=1e-5)
Exemple #16
0
    def test_gather_consistent(self):
        N = 1000
        nf = 8
        num_classes = 5
        nnz = N * 10
        rng = tf.random.Generator.from_seed(0)

        labels = rng.uniform((N, ), maxval=num_classes, dtype=tf.int64)
        labels = tf.sort(labels)

        features_spec = tf.TensorSpec((N, nf), tf.float32)
        adj_spec = tf.SparseTensorSpec((N, N), tf.float32)
        ids_spec = tf.TensorSpec((None, ), tf.int64)

        features = rng.uniform((N, nf))
        st = random_sparse((N, N), nnz, rng)
        ids = tf.convert_to_tensor([2, 5, 10, 20], tf.int64)
        weights = preprocess_weights(ids, N)

        tf.random.set_seed(0)
        model = gat((features_spec, adj_spec), num_classes)
        preds = model([features, st], training=False)
        v0 = tf.gather(preds, ids, axis=0)
        l0 = tf.keras.losses.SparseCategoricalCrossentropy(
            from_logits=True, reduction="sum")(labels, preds, weights)

        tf.random.set_seed(0)
        model_gathered = gat((features_spec, adj_spec, ids_spec), num_classes)
        v1 = model_gathered([features, st, ids], training=False)
        l1 = tf.keras.losses.SparseCategoricalCrossentropy(
            from_logits=True,
            reduction="sum_over_batch_size")(tf.gather(labels, ids, axis=0),
                                             v1)

        self.assertAllClose(v0, v1, rtol=1e-5)
        self.assertAllClose(l0, l1)
Exemple #17
0
def get_inputs(num_nodes, mean_degree, p, seed=0):
    nnz = num_nodes * mean_degree
    rng = tf.random.Generator.from_seed(seed)
    A = random_sparse((num_nodes, num_nodes), nnz, rng)
    b = rng.normal((num_nodes, p))
    return A, b