Пример #1
0
def test_backward_cpu(update, data):
    atom_data, adj_data, y_grad = data
    check_backward(update, atom_data, adj_data, y_grad)

    if sparse_utils_available():
        sparse_adj = convert_to_sparse(adj_data)
        check_backward(update, atom_data, sparse_adj, y_grad)
Пример #2
0
def test_forward_gpu(model, sparse_model, data):
    atom_data, adj_data = cuda.to_gpu(data[0]), cuda.to_gpu(data[1])
    model.to_gpu()
    check_forward(model, atom_data, adj_data)
    if sparse_utils_available():
        sparse_model.to_gpu()
        check_forward(sparse_model, atom_data, *_convert_to_sparse(adj_data))
Пример #3
0
def test_backward_gpu(update, data):
    update.to_gpu()
    atom_data, adj_data, y_grad = map(cuda.to_gpu, data)
    check_backward(update, atom_data, adj_data, y_grad)

    if sparse_utils_available():
        sparse_adj = convert_to_sparse(adj_data)
        check_backward(update, atom_data, sparse_adj, y_grad)
Пример #4
0
def test_forward_cpu(model, sparse_model, data):
    atom_data, adj_data = data[0], data[1]
    y_dense = check_forward(model, atom_data, adj_data)
    # test for sparse forward result is same with dense
    if sparse_utils_available():
        y_sparse = check_forward(sparse_model, atom_data,
                                 *_convert_to_sparse(adj_data))
        numpy.testing.assert_allclose(y_dense, y_sparse, atol=1e-4, rtol=1e-4)
Пример #5
0
def test_forward_cpu(update, data):
    atom_data, adj_data = data[:2]
    y_dense = check_forward(update, atom_data, adj_data)

    if sparse_utils_available():
        sparse_adj = convert_to_sparse(adj_data)
        y_sparse = check_forward(update, atom_data, sparse_adj)

        # results for dense matrix and sparse matrix must be same
        numpy.testing.assert_allclose(y_dense, y_sparse, atol=1e-4, rtol=1e-4)
def test_forward_gpu(update, data):
    atom_data, adj_data = cuda.to_gpu(data[0]), cuda.to_gpu(data[1])
    update.to_gpu()
    y_dense = check_forward(update, atom_data, adj_data)

    if sparse_utils_available():
        sparse_adj = convert_to_sparse(adj_data)
        y_sparse = check_forward(update, atom_data, sparse_adj)

        numpy.testing.assert_allclose(
            cuda.to_cpu(y_dense), cuda.to_cpu(y_sparse), atol=1e-4, rtol=1e-4)
Пример #7
0
def test_backward_cpu(update, data):
    atom_data, adj_data, y_grad = data
    gx_dense = check_backward(update, atom_data, adj_data, y_grad)

    if sparse_utils_available():
        sparse_adj = convert_to_sparse(adj_data)
        gx_sparse = check_backward(update, atom_data, sparse_adj, y_grad)

        numpy.testing.assert_allclose(gx_dense,
                                      gx_sparse,
                                      atol=1e-4,
                                      rtol=1e-4)
Пример #8
0
def test_backward_gpu(update, data):
    update.to_gpu()
    atom_data, adj_data, y_grad = map(cuda.to_gpu, data)
    gx_dense = check_backward(update, atom_data, adj_data, y_grad)

    if sparse_utils_available():
        sparse_adj = convert_to_sparse(adj_data)
        gx_sparse = check_backward(update, atom_data, sparse_adj, y_grad)

        numpy.testing.assert_allclose(cuda.to_cpu(gx_dense),
                                      cuda.to_cpu(gx_sparse),
                                      atol=1e-4,
                                      rtol=1e-4)
Пример #9
0
    atom_data = numpy.random.randint(0,
                                     high=MAX_ATOMIC_NUM,
                                     size=(batch_size, atom_size)).astype('i')
    adj_data = numpy.random.uniform(0,
                                    high=2,
                                    size=(batch_size, num_edge_type, atom_size,
                                          atom_size)).astype('f')
    y_grad = numpy.random.uniform(
        -1, 1, (batch_size, atom_size, hidden_dim)).astype('f')

    embed = EmbedAtomID(in_size=MAX_ATOMIC_NUM, out_size=hidden_dim)
    embed_atom_data = embed(atom_data).data
    return embed_atom_data, adj_data, y_grad


@pytest.mark.skipif(not sparse_utils_available())
def convert_to_sparse(dense_adj):
    # auxiliary function
    data, row, col, edge_type = _convert_to_sparse(dense_adj)
    return convert_sparse_with_edge_type(data, row, col, atom_size, edge_type,
                                         num_edge_type)


def check_forward(update, atom_data, adj_data):
    update.reset_state()
    y_actual = cuda.to_cpu(update(atom_data, adj_data).data)
    assert y_actual.shape == (batch_size, atom_size, hidden_dim)

    return y_actual

Пример #10
0
import numpy
import pytest

from chainer_chemistry.utils.sparse_utils import convert_sparse_with_edge_type
from chainer_chemistry.utils.sparse_utils import sparse_utils_available

if not sparse_utils_available():
    pytest.skip('sparse_utils is available if chainer>=5 and numpy>=1.16',
                allow_module_level=True)


def naive_convert(data, row, col, edge_type, num_edge_type):
    mb, length = data.shape
    new_mb = mb * num_edge_type
    new_data = [[] for _ in range(new_mb)]
    new_row = [[] for _ in range(new_mb)]
    new_col = [[] for _ in range(new_mb)]

    for i in range(mb):
        for j in range(length):
            k = i * num_edge_type + edge_type[i, j]
            new_data[k].append(data[i, j])
            new_row[k].append(row[i, j])
            new_col[k].append(col[i, j])

    new_length = max(len(arr) for arr in new_data)

    def pad(arr_2d, dtype=numpy.int32):
        for arr in arr_2d:
            arr.extend([0] * (new_length - len(arr)))
        return numpy.array(arr_2d)