def test_forward_cpu_input_size_invariant(model, data): atom_data, adj_data = data[0], data[1] is_real_node = numpy.ones(atom_data.shape, dtype=numpy.float32) y_actual = cuda.to_cpu(model(atom_data, adj_data, is_real_node).data) atom_data_ex = extend_node(atom_data, out_size=8) adj_data_ex = extend_adj(adj_data, out_size=8) is_real_node_ex = extend_node(is_real_node, out_size=8) y_actual_ex = cuda.to_cpu(model( atom_data_ex, adj_data_ex, is_real_node_ex).data) assert numpy.allclose(y_actual, y_actual_ex, rtol=1e-5, atol=1e-6)
def test_forward_cpu_input_size_invariant(model, data): atom_data, adj_data = data[0], data[1] is_real_node = numpy.ones(atom_data.shape, dtype=numpy.float32) y_actual = cuda.to_cpu(model(atom_data, adj_data, is_real_node).data) atom_data_ex = extend_node(atom_data, out_size=8) adj_data_ex = extend_adj(adj_data, out_size=8) is_real_node_ex = extend_node(is_real_node, out_size=8) y_actual_ex = cuda.to_cpu( model(atom_data_ex, adj_data_ex, is_real_node_ex).data) assert numpy.allclose(y_actual, y_actual_ex, rtol=1e-5, atol=1e-6)
def test_extend_node_2d(x): x_extended = extend_node(x, out_size=6) x_expected = numpy.array([[0, 1, 2, 0, 0, 0], [3, 4, 5, 0, 0, 0]], dtype=x.dtype) print('x type', x_extended.dtype) assert x_extended.shape == (batchsize, 6) assert x_extended.dtype == x.dtype assert numpy.array_equal(x_extended, x_expected)
def test_extend_node_2d(x): x_extended = extend_node(x, out_size=6) x_expected = numpy.array([[0, 1, 2, 0, 0, 0], [3, 4, 5, 0, 0, 0]], dtype=x.dtype) print('x type', x_extended.dtype) assert x_extended.shape == (batchsize, 6) assert x_extended.dtype == x.dtype assert numpy.array_equal(x_extended, x_expected)
def test_extend_node_3d(x, axis): x_extended = extend_node(x, out_size=6, axis=axis) x_expected = numpy.array([[[0, 1, 2, 3, 4, 0], [5, 6, 7, 8, 9, 0], [10, 11, 12, 13, 14, 0]], [[15, 16, 17, 18, 19, 0], [20, 21, 22, 23, 24, 0], [25, 26, 27, 28, 29, 0]]]) assert x_extended.shape == (batchsize, num_node, 6) assert x_extended.dtype == x.dtype assert numpy.array_equal(x_extended, x_expected)
def test_extend_node_3d(x, axis): x_extended = extend_node(x, out_size=6, axis=axis) x_expected = numpy.array([ [[0, 1, 2, 3, 4, 0], [5, 6, 7, 8, 9, 0], [10, 11, 12, 13, 14, 0]], [[15, 16, 17, 18, 19, 0], [20, 21, 22, 23, 24, 0], [25, 26, 27, 28, 29, 0]]]) assert x_extended.shape == (batchsize, num_node, 6) assert x_extended.dtype == x.dtype assert numpy.array_equal(x_extended, x_expected)
def test_forward_cpu_input_size_invariant(model, data): # This RSGCN uses dropout, so we need to forward with test mode # to remove stochastic calculation. atom_data, adj_data = data[0], data[1] with chainer.using_config('train', False): y_actual = cuda.to_cpu(model(atom_data, adj_data).data) # Set bigger size than original `atom_size`. atom_data_ex = extend_node(atom_data, out_size=8) adj_data_ex = extend_adj(adj_data, out_size=8) # print('size', atom_data.shape, adj_data.shape, # atom_data_ex.shape, adj_data_ex.shape) with chainer.using_config('train', False): y_actual_ex = cuda.to_cpu(model(atom_data_ex, adj_data_ex).data) assert numpy.allclose(y_actual, y_actual_ex, rtol=1.e-4, atol=1.e-5)
def test_forward_cpu_input_size_invariant(model, data): # This RSGCN uses dropout, so we need to forward with test mode # to remove stochastic calculation. atom_data, adj_data = data[0], data[1] with chainer.using_config('train', False): y_actual = cuda.to_cpu(model(atom_data, adj_data).data) # Set bigger size than original `atom_size`. atom_data_ex = extend_node(atom_data, out_size=8) adj_data_ex = extend_adj(adj_data, out_size=8) # print('size', atom_data.shape, adj_data.shape, # atom_data_ex.shape, adj_data_ex.shape) with chainer.using_config('train', False): y_actual_ex = cuda.to_cpu(model( atom_data_ex, adj_data_ex).data) assert numpy.allclose(y_actual, y_actual_ex, rtol=1.e-4, atol=1.e-5)
def test_extend_node_assert_raises(): with pytest.raises(ValueError): extend_node(x_2d, out_size=1)
def test_extend_node_assert_raises(): with pytest.raises(ValueError): extend_node(x_2d, out_size=1)