fr = Frame({'x': x, 'y': y}) fr2 = _reconstruct_pickle(fr) assert F.allclose(fr2['x'].data, x) assert F.allclose(fr2['y'].data, y) fr = Frame() def _global_message_func(nodes): return {'x': nodes.data['x']} @unittest.skipIf(F._default_context_str == 'gpu', reason="GPU not implemented") @parametrize_dtype @pytest.mark.parametrize('g', get_cases(exclude=['dglgraph'])) def test_pickling_graph(g, idtype): g = g.astype(idtype) new_g = _reconstruct_pickle(g) test_utils.check_graph_equal(g, new_g, check_feature=True) @unittest.skipIf(F._default_context_str == 'gpu', reason="GPU not implemented") def test_pickling_batched_heterograph(): # copied from test_heterograph.create_test_heterograph() plays_spmat = ssp.coo_matrix(([1, 1, 1, 1], ([0, 1, 2, 1], [0, 0, 1, 1]))) wishes_nx = nx.DiGraph() wishes_nx.add_nodes_from(['u0', 'u1', 'u2'], bipartite=0) wishes_nx.add_nodes_from(['g0', 'g1'], bipartite=1) wishes_nx.add_edge('u0', 'g1', id=0) wishes_nx.add_edge('u2', 'g0', id=1)
g.add_edge(19, 1) g = g.to(F.ctx()) nid = F.tensor([0, 1, 4, 5, 7, 12, 14, 15, 18, 19], g.idtype) target = ["u", "v", "e"] for lhs, rhs in product(target, target): if lhs == rhs: continue for binary_op in ["add", "sub", "mul", "div"]: for reducer in ["sum", "max", "min", "mean"]: for broadcast in ["none", lhs, rhs]: for partial in [False, True]: print(lhs, rhs, binary_op, reducer, broadcast, partial) _test(g, lhs, rhs, binary_op, reducer, partial, nid, broadcast=broadcast) @parametrize_idtype @pytest.mark.parametrize('g', get_cases(['h**o-zero-degree'])) def test_mean_zero_degree(g, idtype): g = g.astype(idtype).to(F.ctx()) g.ndata['h'] = F.ones((g.number_of_nodes(), 3)) g.update_all(fn.copy_u('h', 'm'), fn.mean('m', 'x')) deg = F.asnumpy(g.in_degrees()) v = F.tensor(np.where(deg == 0)[0]) assert F.allclose(F.gather_row(g.ndata['x'], v), F.zeros((len(v), 3))) if __name__ == '__main__': test_copy_src_reduce() test_copy_edge_reduce() test_all_binary_builtins()
if g1.number_of_nodes(nty) == 0: continue for feat_name in node_attrs[nty]: assert F.allclose(g1.nodes[nty].data[feat_name], g2.nodes[nty].data[feat_name]) if edge_attrs is not None: for ety in edge_attrs.keys(): if g1.number_of_edges(ety) == 0: continue for feat_name in edge_attrs[ety]: assert F.allclose(g1.edges[ety].data[feat_name], g2.edges[ety].data[feat_name]) @pytest.mark.parametrize('gs', get_cases(['two_hetero_batch'])) @parametrize_idtype def test_topology(gs, idtype): """Test batching two DGLHeteroGraphs where some nodes are isolated in some relations""" g1, g2 = gs g1 = g1.astype(idtype).to(F.ctx()) g2 = g2.astype(idtype).to(F.ctx()) bg = dgl.batch([g1, g2]) assert bg.idtype == idtype assert bg.device == F.ctx() assert bg.ntypes == g2.ntypes assert bg.etypes == g2.etypes assert bg.canonical_etypes == g2.canonical_etypes assert bg.batch_size == 2
g = dgl.heterograph( { ('user', 'follows', 'user'): ([0, 1, 2, 1, 1], [0, 0, 1, 1, 2]), ('user', 'plays', 'game'): ([0, 1, 2, 1], [0, 0, 1, 1]), ('user', 'wishes', 'game'): ([0, 1, 1], [0, 0, 1]), ('developer', 'develops', 'game'): ([0, 1, 0], [0, 1, 1]), }, idtype=idtype, device=F.ctx()) assert g.idtype == idtype assert g.device == F.ctx() return g @pytest.mark.parametrize('g', get_cases(['clique'])) @pytest.mark.parametrize('norm_by', ['src', 'dst']) # @pytest.mark.parametrize('shp', edge_softmax_shapes) @parametrize_dtype def test_edge_softmax(g, norm_by, idtype): print("params", norm_by, idtype) g = create_test_heterograph(idtype) x1 = F.randn((g.num_edges('plays'), feat_size)) x2 = F.randn((g.num_edges('follows'), feat_size)) x3 = F.randn((g.num_edges('develops'), feat_size)) x4 = F.randn((g.num_edges('wishes'), feat_size)) F.attach_grad(F.clone(x1)) F.attach_grad(F.clone(x2))
gi2 = _reconstruct_pickle(gi) assert gi2.number_of_nodes() == gi.number_of_nodes() src_idx2, dst_idx2, _ = gi2.edges() assert F.array_equal(src_idx.tousertensor(), src_idx2.tousertensor()) assert F.array_equal(dst_idx.tousertensor(), dst_idx2.tousertensor()) def _global_message_func(nodes): return {'x': nodes.data['x']} @unittest.skipIf(F._default_context_str == 'gpu', reason="GPU not implemented") @parametrize_dtype @pytest.mark.parametrize('g', get_cases(exclude=['dglgraph', 'two_hetero_batch'])) def test_pickling_graph(g, idtype): g = g.astype(idtype) new_g = _reconstruct_pickle(g) test_utils.check_graph_equal(g, new_g, check_feature=True) @unittest.skipIf(F._default_context_str == 'gpu', reason="GPU not implemented") def test_pickling_batched_heterograph(): # copied from test_heterograph.create_test_heterograph() g = dgl.heterograph({ ('user', 'follows', 'user'): ([0, 1], [1, 2]), ('user', 'plays', 'game'): ([0, 1, 2, 1], [0, 0, 1, 1]), ('user', 'wishes', 'game'): ([0, 2], [1, 0]), ('developer', 'develops', 'game'): ([0, 1], [0, 1]) })
gi.add_edges(src_idx, dst_idx) gi2 = _reconstruct_pickle(gi) assert gi2.number_of_nodes() == gi.number_of_nodes() src_idx2, dst_idx2, _ = gi2.edges() assert F.array_equal(src_idx.tousertensor(), src_idx2.tousertensor()) assert F.array_equal(dst_idx.tousertensor(), dst_idx2.tousertensor()) def _global_message_func(nodes): return {'x': nodes.data['x']} @unittest.skipIf(F._default_context_str == 'gpu', reason="GPU not implemented") @parametrize_dtype @pytest.mark.parametrize('g', get_cases(exclude=['dglgraph', 'two_hetero_batch'])) def test_pickling_graph(g, idtype): g = g.astype(idtype) new_g = _reconstruct_pickle(g) test_utils.check_graph_equal(g, new_g, check_feature=True) @unittest.skipIf(F._default_context_str == 'gpu', reason="GPU not implemented") def test_pickling_batched_heterograph(): # copied from test_heterograph.create_test_heterograph() g = dgl.heterograph({ ('user', 'follows', 'user'): ([0, 1], [1, 2]), ('user', 'plays', 'game'): ([0, 1, 2, 1], [0, 0, 1, 1]), ('user', 'wishes', 'game'): ([0, 2], [1, 0]), ('developer', 'develops', 'game'): ([0, 1], [0, 1]) }) g2 = dgl.heterograph({