def test_extend_adj(adj): adj_extended = extend_adj(adj, out_size=6) assert adj_extended.shape == (batchsize, 6, 6) assert adj_extended.dtype == adj.dtype assert numpy.array_equal(adj_extended[:, :num_node, :num_node], adj) assert numpy.alltrue(adj_extended[:, num_node:, :] == 0) assert numpy.alltrue(adj_extended[:, :, num_node:] == 0)
def test_extend_adj(adj): adj_extended = extend_adj(adj, out_size=6) assert adj_extended.shape == (batchsize, 6, 6) assert adj_extended.dtype == adj.dtype assert numpy.array_equal(adj_extended[:, :num_node, :num_node], adj) assert numpy.alltrue(adj_extended[:, num_node:, :] == 0) assert numpy.alltrue(adj_extended[:, :, num_node:] == 0)
def test_forward_cpu_input_size_invariant(model, data): atom_data, adj_data = data[0], data[1] is_real_node = numpy.ones(atom_data.shape, dtype=numpy.float32) y_actual = cuda.to_cpu(model(atom_data, adj_data, is_real_node).data) atom_data_ex = extend_node(atom_data, out_size=8) adj_data_ex = extend_adj(adj_data, out_size=8) is_real_node_ex = extend_node(is_real_node, out_size=8) y_actual_ex = cuda.to_cpu( model(atom_data_ex, adj_data_ex, is_real_node_ex).data) assert numpy.allclose(y_actual, y_actual_ex, rtol=1e-5, atol=1e-6)
def test_forward_cpu_input_size_invariant(model, data): atom_data, adj_data = data[0], data[1] is_real_node = numpy.ones(atom_data.shape, dtype=numpy.float32) y_actual = cuda.to_cpu(model(atom_data, adj_data, is_real_node).data) atom_data_ex = extend_node(atom_data, out_size=8) adj_data_ex = extend_adj(adj_data, out_size=8) is_real_node_ex = extend_node(is_real_node, out_size=8) y_actual_ex = cuda.to_cpu(model( atom_data_ex, adj_data_ex, is_real_node_ex).data) assert numpy.allclose(y_actual, y_actual_ex, rtol=1e-5, atol=1e-6)
def test_forward_cpu_input_size_invariant(model, data): # This RSGCN uses dropout, so we need to forward with test mode # to remove stochastic calculation. atom_data, adj_data = data[0], data[1] with chainer.using_config('train', False): y_actual = cuda.to_cpu(model(atom_data, adj_data).data) # Set bigger size than original `atom_size`. atom_data_ex = extend_node(atom_data, out_size=8) adj_data_ex = extend_adj(adj_data, out_size=8) # print('size', atom_data.shape, adj_data.shape, # atom_data_ex.shape, adj_data_ex.shape) with chainer.using_config('train', False): y_actual_ex = cuda.to_cpu(model(atom_data_ex, adj_data_ex).data) assert numpy.allclose(y_actual, y_actual_ex, rtol=1.e-4, atol=1.e-5)
def test_forward_cpu_input_size_invariant(model, data): # This RSGCN uses dropout, so we need to forward with test mode # to remove stochastic calculation. atom_data, adj_data = data[0], data[1] with chainer.using_config('train', False): y_actual = cuda.to_cpu(model(atom_data, adj_data).data) # Set bigger size than original `atom_size`. atom_data_ex = extend_node(atom_data, out_size=8) adj_data_ex = extend_adj(adj_data, out_size=8) # print('size', atom_data.shape, adj_data.shape, # atom_data_ex.shape, adj_data_ex.shape) with chainer.using_config('train', False): y_actual_ex = cuda.to_cpu(model( atom_data_ex, adj_data_ex).data) assert numpy.allclose(y_actual, y_actual_ex, rtol=1.e-4, atol=1.e-5)
def test_padding(self, sample_molecule_2): adj = common.construct_discrete_edge_matrix(sample_molecule_2, 8) assert adj.shape == (4, 8, 8) expect = extend_adj(self.expect_adj, out_size=8, axis=[-1, -2]) numpy.testing.assert_equal(adj, expect)
def test_padding(self, sample_molecule_2): adj = common.construct_discrete_edge_matrix(sample_molecule_2, 8) assert adj.shape == (4, 8, 8) expect = extend_adj(self.expect_adj, out_size=8, axis=[-1, -2]) numpy.testing.assert_equal(adj, expect)
def test_extend_adj_assert_raises(): with pytest.raises(ValueError): extend_adj(adj_3d, out_size=1)
def test_extend_adj_assert_raises(): with pytest.raises(ValueError): extend_adj(adj_3d, out_size=1)