def test_standalone(): os.environ['DGL_DIST_MODE'] = 'standalone' g = create_random_graph(10000) # Partition the graph num_parts = 1 graph_name = 'dist_graph_test_3' g.ndata['features'] = F.unsqueeze(F.arange(0, g.number_of_nodes()), 1) g.edata['features'] = F.unsqueeze(F.arange(0, g.number_of_edges()), 1) partition_graph(g, graph_name, num_parts, '/tmp/dist_graph') dgl.distributed.initialize("kv_ip_config.txt") dist_g = DistGraph(graph_name, part_config='/tmp/dist_graph/{}.json'.format(graph_name)) try: check_dist_graph(dist_g, 1, g.number_of_nodes(), g.number_of_edges()) except Exception as e: print(e) dgl.distributed.exit_client() # this is needed since there's two test here in one process
def create_random_hetero(): num_nodes = {'n1': 10000, 'n2': 10010, 'n3': 10020} etypes = [('n1', 'r1', 'n2'), ('n1', 'r2', 'n3'), ('n2', 'r3', 'n3')] edges = {} for etype in etypes: src_ntype, _, dst_ntype = etype arr = spsp.random(num_nodes[src_ntype], num_nodes[dst_ntype], density=0.001, format='coo', random_state=100) edges[etype] = (arr.row, arr.col) g = dgl.heterograph(edges, num_nodes) g.nodes['n1'].data['feat'] = F.unsqueeze( F.arange(0, g.number_of_nodes('n1')), 1) g.edges['r1'].data['feat'] = F.unsqueeze( F.arange(0, g.number_of_edges('r1')), 1) return g
def check_dist_emb_server_client(shared_mem, num_servers, num_clients): prepare_dist() g = create_random_graph(10000) # Partition the graph num_parts = 1 graph_name = 'dist_graph_test_2' g.ndata['features'] = F.unsqueeze(F.arange(0, g.number_of_nodes()), 1) g.edata['features'] = F.unsqueeze(F.arange(0, g.number_of_edges()), 1) partition_graph(g, graph_name, num_parts, '/tmp/dist_graph') # let's just test on one partition for now. # We cannot run multiple servers and clients on the same machine. serv_ps = [] ctx = mp.get_context('spawn') for serv_id in range(num_servers): p = ctx.Process(target=run_server, args=(graph_name, serv_id, num_servers, num_clients, shared_mem)) serv_ps.append(p) p.start() cli_ps = [] for cli_id in range(num_clients): print('start client', cli_id) p = ctx.Process(target=run_emb_client, args=(graph_name, 0, num_servers, num_clients, g.number_of_nodes(), g.number_of_edges())) p.start() cli_ps.append(p) for p in cli_ps: p.join() assert p.exitcode == 0 for p in serv_ps: p.join() print('clients have terminated')
def generate_graph(): g = dgl.DGLGraph() g.add_nodes(10) # create a graph where 0 is the source and 9 is the sink for i in range(1, 9): g.add_edge(0, i) g.add_edge(i, 9) # add a back flow from 9 to 0 g.add_edge(9, 0) g.set_n_repr({'f1': F.randn((10, )), 'f2': F.randn((10, D))}) weights = F.randn((17, )) g.set_e_repr({'e1': weights, 'e2': F.unsqueeze(weights, 1)}) return g
def generate_graph(idtype): g = dgl.DGLGraph() g = g.astype(idtype).to(F.ctx()) g.add_nodes(10) # create a graph where 0 is the source and 9 is the sink for i in range(1, 9): g.add_edge(0, i) g.add_edge(i, 9) # add a back flow from 9 to 0 g.add_edge(9, 0) g.ndata.update({'f1': F.randn((10, )), 'f2': F.randn((10, D))}) weights = F.randn((17, )) g.edata.update({'e1': weights, 'e2': F.unsqueeze(weights, 1)}) return g
def test_simple_readout(): g1 = dgl.DGLGraph() g1.add_nodes(3) g2 = dgl.DGLGraph() g2.add_nodes(4) # no edges g1.add_edges([0, 1, 2], [2, 0, 1]) n1 = F.randn((3, 5)) n2 = F.randn((4, 5)) e1 = F.randn((3, 5)) s1 = F.sum(n1, 0) # node sums s2 = F.sum(n2, 0) se1 = F.sum(e1, 0) # edge sums m1 = F.mean(n1, 0) # node means m2 = F.mean(n2, 0) me1 = F.mean(e1, 0) # edge means w1 = F.randn((3, )) w2 = F.randn((4, )) max1 = F.max(n1, 0) max2 = F.max(n2, 0) maxe1 = F.max(e1, 0) ws1 = F.sum(n1 * F.unsqueeze(w1, 1), 0) ws2 = F.sum(n2 * F.unsqueeze(w2, 1), 0) wm1 = F.sum(n1 * F.unsqueeze(w1, 1), 0) / F.sum(F.unsqueeze(w1, 1), 0) wm2 = F.sum(n2 * F.unsqueeze(w2, 1), 0) / F.sum(F.unsqueeze(w2, 1), 0) g1.ndata['x'] = n1 g2.ndata['x'] = n2 g1.ndata['w'] = w1 g2.ndata['w'] = w2 g1.edata['x'] = e1 assert F.allclose(dgl.sum_nodes(g1, 'x'), s1) assert F.allclose(dgl.sum_nodes(g1, 'x', 'w'), ws1) assert F.allclose(dgl.sum_edges(g1, 'x'), se1) assert F.allclose(dgl.mean_nodes(g1, 'x'), m1) assert F.allclose(dgl.mean_nodes(g1, 'x', 'w'), wm1) assert F.allclose(dgl.mean_edges(g1, 'x'), me1) assert F.allclose(dgl.max_nodes(g1, 'x'), max1) assert F.allclose(dgl.max_edges(g1, 'x'), maxe1) g = dgl.batch([g1, g2]) s = dgl.sum_nodes(g, 'x') m = dgl.mean_nodes(g, 'x') max_bg = dgl.max_nodes(g, 'x') assert F.allclose(s, F.stack([s1, s2], 0)) assert F.allclose(m, F.stack([m1, m2], 0)) assert F.allclose(max_bg, F.stack([max1, max2], 0)) ws = dgl.sum_nodes(g, 'x', 'w') wm = dgl.mean_nodes(g, 'x', 'w') assert F.allclose(ws, F.stack([ws1, ws2], 0)) assert F.allclose(wm, F.stack([wm1, wm2], 0)) s = dgl.sum_edges(g, 'x') m = dgl.mean_edges(g, 'x') max_bg_e = dgl.max_edges(g, 'x') assert F.allclose(s, F.stack([se1, F.zeros(5)], 0)) assert F.allclose(m, F.stack([me1, F.zeros(5)], 0)) assert F.allclose(max_bg_e, F.stack([maxe1, F.zeros(5)], 0))
def atest_nx_conversion(index_dtype): # check conversion between networkx and DGLGraph def _check_nx_feature(nxg, nf, ef): # check node and edge feature of nxg # this is used to check to_networkx num_nodes = len(nxg) num_edges = nxg.size() if num_nodes > 0: node_feat = ddict(list) for nid, attr in nxg.nodes(data=True): assert len(attr) == len(nf) for k in nxg.nodes[nid]: node_feat[k].append(F.unsqueeze(attr[k], 0)) for k in node_feat: feat = F.cat(node_feat[k], 0) assert F.allclose(feat, nf[k]) else: assert len(nf) == 0 if num_edges > 0: edge_feat = ddict(lambda: [0] * num_edges) for u, v, attr in nxg.edges(data=True): assert len(attr) == len(ef) + 1 # extra id eid = attr['id'] for k in ef: edge_feat[k][eid] = F.unsqueeze(attr[k], 0) for k in edge_feat: feat = F.cat(edge_feat[k], 0) assert F.allclose(feat, ef[k]) else: assert len(ef) == 0 n1 = F.randn((5, 3)) n2 = F.randn((5, 10)) n3 = F.randn((5, 4)) e1 = F.randn((4, 5)) e2 = F.randn((4, 7)) g = dgl.graph([(0, 2), (1, 4), (3, 0), (4, 3)], index_dtype=index_dtype) g.ndata.update({'n1': n1, 'n2': n2, 'n3': n3}) g.edata.update({'e1': e1, 'e2': e2}) # convert to networkx nxg = dgl.to_networkx(g, node_attrs=['n1', 'n3'], edge_attrs=['e1', 'e2']) assert len(nxg) == 5 assert nxg.size() == 4 _check_nx_feature(nxg, {'n1': n1, 'n3': n3}, {'e1': e1, 'e2': e2}) # convert to DGLGraph, nx graph has id in edge feature # use id feature to test non-tensor copy g = dgl.graph(nxg, node_attrs=['n1'], edge_attrs=['e1', 'id'], index_dtype=index_dtype) assert g._idtype_str == index_dtype # check graph size assert g.number_of_nodes() == 5 assert g.number_of_edges() == 4 # check number of features # test with existing dglgraph (so existing features should be cleared) assert len(g.ndata) == 1 assert len(g.edata) == 2 # check feature values assert F.allclose(g.ndata['n1'], n1) # with id in nx edge feature, e1 should follow original order assert F.allclose(g.edata['e1'], e1) assert F.array_equal(g.edata['id'], F.copy_to(F.arange(0, 4), F.cpu())) # test conversion after modifying DGLGraph # TODO(minjie): enable after mutation is supported #g.pop_e_repr('id') # pop id so we don't need to provide id when adding edges #new_n = F.randn((2, 3)) #new_e = F.randn((3, 5)) #g.add_nodes(2, data={'n1': new_n}) ## add three edges, one is a multi-edge #g.add_edges([3, 6, 0], [4, 5, 2], data={'e1': new_e}) #n1 = F.cat((n1, new_n), 0) #e1 = F.cat((e1, new_e), 0) ## convert to networkx again #nxg = g.to_networkx(node_attrs=['n1'], edge_attrs=['e1']) #assert len(nxg) == 7 #assert nxg.size() == 7 #_check_nx_feature(nxg, {'n1': n1}, {'e1': e1}) # now test convert from networkx without id in edge feature # first pop id in edge feature for _, _, attr in nxg.edges(data=True): attr.pop('id') # test with a new graph g = dgl.graph(nxg, node_attrs=['n1'], edge_attrs=['e1']) # check graph size assert g.number_of_nodes() == 5 assert g.number_of_edges() == 4 # check number of features assert len(g.ndata) == 1 assert len(g.edata) == 1 # check feature values assert F.allclose(g.ndata['n1'], n1) # edge feature order follows nxg.edges() edge_feat = [] for _, _, attr in nxg.edges(data=True): edge_feat.append(F.unsqueeze(attr['e1'], 0)) edge_feat = F.cat(edge_feat, 0) assert F.allclose(g.edata['e1'], edge_feat) # Test converting from a networkx graph whose nodes are # not labeled with consecutive-integers. nxg = nx.cycle_graph(5) nxg.remove_nodes_from([0, 4]) for u in nxg.nodes(): nxg.nodes[u]['h'] = F.tensor([u]) for u, v, d in nxg.edges(data=True): d['h'] = F.tensor([u, v]) g = dgl.DGLGraph() g.from_networkx(nxg, node_attrs=['h'], edge_attrs=['h']) assert g.number_of_nodes() == 3 assert g.number_of_edges() == 4 assert g.has_edge_between(0, 1) assert g.has_edge_between(1, 2) assert F.allclose(g.ndata['h'], F.tensor([[1.], [2.], [3.]])) assert F.allclose(g.edata['h'], F.tensor([[1., 2.], [1., 2.], [2., 3.], [2., 3.]]))
def check_server_client_hierarchy(shared_mem, num_servers, num_clients): prepare_dist(num_servers) g = create_random_graph(10000) # Partition the graph num_parts = 1 graph_name = 'dist_graph_test_2' g.ndata['features'] = F.unsqueeze(F.arange(0, g.number_of_nodes()), 1) g.edata['features'] = F.unsqueeze(F.arange(0, g.number_of_edges()), 1) partition_graph(g, graph_name, num_parts, '/tmp/dist_graph', num_trainers_per_machine=num_clients) # let's just test on one partition for now. # We cannot run multiple servers and clients on the same machine. serv_ps = [] ctx = mp.get_context('spawn') for serv_id in range(num_servers): p = ctx.Process(target=run_server, args=(graph_name, serv_id, num_servers, num_clients, shared_mem)) serv_ps.append(p) p.start() cli_ps = [] manager = mp.Manager() return_dict = manager.dict() node_mask = np.zeros((g.number_of_nodes(), ), np.int32) edge_mask = np.zeros((g.number_of_edges(), ), np.int32) nodes = np.random.choice(g.number_of_nodes(), g.number_of_nodes() // 10, replace=False) edges = np.random.choice(g.number_of_edges(), g.number_of_edges() // 10, replace=False) node_mask[nodes] = 1 edge_mask[edges] = 1 nodes = np.sort(nodes) edges = np.sort(edges) for cli_id in range(num_clients): print('start client', cli_id) p = ctx.Process(target=run_client_hierarchy, args=(graph_name, 0, num_servers, node_mask, edge_mask, return_dict)) p.start() cli_ps.append(p) for p in cli_ps: p.join() for p in serv_ps: p.join() nodes1 = [] edges1 = [] for n, e in return_dict.values(): nodes1.append(n) edges1.append(e) nodes1, _ = F.sort_1d(F.cat(nodes1, 0)) edges1, _ = F.sort_1d(F.cat(edges1, 0)) assert np.all(F.asnumpy(nodes1) == nodes) assert np.all(F.asnumpy(edges1) == edges) print('clients have terminated')
def test_level2(): #edges = { # 'follows': ([0, 1], [1, 2]), # 'plays': ([0, 1, 2, 1], [0, 0, 1, 1]), # 'wishes': ([0, 2], [1, 0]), # 'develops': ([0, 1], [0, 1]), #} g = create_test_heterograph() def rfunc(nodes): return {'y': F.sum(nodes.mailbox['m'], 1)} def rfunc2(nodes): return {'y': F.max(nodes.mailbox['m'], 1)} def mfunc(edges): return {'m': edges.src['h']} def afunc(nodes): return {'y': nodes.data['y'] + 1} ############################################################# # send_and_recv ############################################################# g.nodes['user'].data['h'] = F.ones((3, 2)) g.send_and_recv([2, 3], mfunc, rfunc, etype='plays') y = g.nodes['game'].data['y'] assert F.array_equal(y, F.tensor([[0., 0.], [2., 2.]])) # only one type g['plays'].send_and_recv([2, 3], mfunc, rfunc) y = g.nodes['game'].data['y'] assert F.array_equal(y, F.tensor([[0., 0.], [2., 2.]])) # test fail case # fail due to multiple types fail = False try: g.send_and_recv([2, 3], mfunc, rfunc) except dgl.DGLError: fail = True assert fail # test multi g.multi_send_and_recv( { 'plays': (g.edges(etype='plays'), mfunc, rfunc), ('user', 'wishes', 'game'): (g.edges(etype='wishes'), mfunc, rfunc2) }, 'sum') assert F.array_equal(g.nodes['game'].data['y'], F.tensor([[3., 3.], [3., 3.]])) # test multi g.multi_send_and_recv( { 'plays': (g.edges(etype='plays'), mfunc, rfunc, afunc), ('user', 'wishes', 'game'): (g.edges(etype='wishes'), mfunc, rfunc2) }, 'sum', afunc) assert F.array_equal(g.nodes['game'].data['y'], F.tensor([[5., 5.], [5., 5.]])) # test cross reducer g.nodes['user'].data['h'] = F.randn((3, 2)) for cred in ['sum', 'max', 'min', 'mean']: g.multi_send_and_recv( { 'plays': (g.edges(etype='plays'), mfunc, rfunc, afunc), 'wishes': (g.edges(etype='wishes'), mfunc, rfunc2) }, cred, afunc) y = g.nodes['game'].data['y'] g['plays'].send_and_recv(g.edges(etype='plays'), mfunc, rfunc, afunc) y1 = g.nodes['game'].data['y'] g['wishes'].send_and_recv(g.edges(etype='wishes'), mfunc, rfunc2) y2 = g.nodes['game'].data['y'] yy = get_redfn(cred)(F.stack([y1, y2], 0), 0) yy = yy + 1 # final afunc assert F.array_equal(y, yy) # test fail case # fail because cannot infer ntype fail = False try: g.multi_send_and_recv( { 'plays': (g.edges(etype='plays'), mfunc, rfunc), 'follows': (g.edges(etype='follows'), mfunc, rfunc2) }, 'sum') except dgl.DGLError: fail = True assert fail g.nodes['game'].data.clear() ############################################################# # pull ############################################################# g.nodes['user'].data['h'] = F.ones((3, 2)) g.pull(1, mfunc, rfunc, etype='plays') y = g.nodes['game'].data['y'] assert F.array_equal(y, F.tensor([[0., 0.], [2., 2.]])) # only one type g['plays'].pull(1, mfunc, rfunc) y = g.nodes['game'].data['y'] assert F.array_equal(y, F.tensor([[0., 0.], [2., 2.]])) # test fail case fail = False try: g.pull(1, mfunc, rfunc) except dgl.DGLError: fail = True assert fail # test multi g.multi_pull(1, { 'plays': (mfunc, rfunc), ('user', 'wishes', 'game'): (mfunc, rfunc2) }, 'sum') assert F.array_equal(g.nodes['game'].data['y'], F.tensor([[0., 0.], [3., 3.]])) # test multi g.multi_pull( 1, { 'plays': (mfunc, rfunc, afunc), ('user', 'wishes', 'game'): (mfunc, rfunc2) }, 'sum', afunc) assert F.array_equal(g.nodes['game'].data['y'], F.tensor([[0., 0.], [5., 5.]])) # test cross reducer g.nodes['user'].data['h'] = F.randn((3, 2)) for cred in ['sum', 'max', 'min', 'mean']: g.multi_pull(1, { 'plays': (mfunc, rfunc, afunc), 'wishes': (mfunc, rfunc2) }, cred, afunc) y = g.nodes['game'].data['y'] g['plays'].pull(1, mfunc, rfunc, afunc) y1 = g.nodes['game'].data['y'] g['wishes'].pull(1, mfunc, rfunc2) y2 = g.nodes['game'].data['y'] g.nodes['game'].data['y'] = get_redfn(cred)(F.stack([y1, y2], 0), 0) g.apply_nodes(afunc, 1, ntype='game') yy = g.nodes['game'].data['y'] assert F.array_equal(y, yy) # test fail case # fail because cannot infer ntype fail = False try: g.multi_pull(1, { 'plays': (mfunc, rfunc), 'follows': (mfunc, rfunc2) }, 'sum') except dgl.DGLError: fail = True assert fail g.nodes['game'].data.clear() ############################################################# # update_all ############################################################# g.nodes['user'].data['h'] = F.ones((3, 2)) g.update_all(mfunc, rfunc, etype='plays') y = g.nodes['game'].data['y'] assert F.array_equal(y, F.tensor([[2., 2.], [2., 2.]])) # only one type g['plays'].update_all(mfunc, rfunc) y = g.nodes['game'].data['y'] assert F.array_equal(y, F.tensor([[2., 2.], [2., 2.]])) # test fail case # fail due to multiple types fail = False try: g.update_all(mfunc, rfunc) except dgl.DGLError: fail = True assert fail # test multi g.multi_update_all( { 'plays': (mfunc, rfunc), ('user', 'wishes', 'game'): (mfunc, rfunc2) }, 'sum') assert F.array_equal(g.nodes['game'].data['y'], F.tensor([[3., 3.], [3., 3.]])) # test multi g.multi_update_all( { 'plays': (mfunc, rfunc, afunc), ('user', 'wishes', 'game'): (mfunc, rfunc2) }, 'sum', afunc) assert F.array_equal(g.nodes['game'].data['y'], F.tensor([[5., 5.], [5., 5.]])) # test cross reducer g.nodes['user'].data['h'] = F.randn((3, 2)) for cred in ['sum', 'max', 'min', 'mean', 'stack']: g.multi_update_all( { 'plays': (mfunc, rfunc, afunc), 'wishes': (mfunc, rfunc2) }, cred, afunc) y = g.nodes['game'].data['y'] g['plays'].update_all(mfunc, rfunc, afunc) y1 = g.nodes['game'].data['y'] g['wishes'].update_all(mfunc, rfunc2) y2 = g.nodes['game'].data['y'] if cred == 'stack': # stack has two both correct outcomes yy1 = F.stack([F.unsqueeze(y1, 1), F.unsqueeze(y2, 1)], 1) yy1 = yy1 + 1 # final afunc yy2 = F.stack([F.unsqueeze(y2, 1), F.unsqueeze(y1, 1)], 1) yy2 = yy2 + 1 # final afunc assert F.array_equal(y, yy1) or F.array_equal(y, yy2) else: yy = get_redfn(cred)(F.stack([y1, y2], 0), 0) yy = yy + 1 # final afunc assert F.array_equal(y, yy) # test fail case # fail because cannot infer ntype fail = False try: g.update_all({ 'plays': (mfunc, rfunc), 'follows': (mfunc, rfunc2) }, 'sum') except dgl.DGLError: fail = True assert fail g.nodes['game'].data.clear()
def src_mul_edge_udf(edges): return { 'sum': edges.src['h'] * F.unsqueeze(F.unsqueeze(edges.data['h'], 1), 1) }
def _mfunc_hxw1(edges): return {'m1': edges.src['h'] * F.unsqueeze(edges.data['w1'], 1)}
def test_nx_conversion(): # check conversion between networkx and DGLGraph def _check_nx_feature(nxg, nf, ef): # check node and edge feature of nxg # this is used to check to_networkx num_nodes = len(nxg) num_edges = nxg.size() if num_nodes > 0: node_feat = ddict(list) for nid, attr in nxg.nodes(data=True): assert len(attr) == len(nf) for k in nxg.nodes[nid]: node_feat[k].append(F.unsqueeze(attr[k], 0)) for k in node_feat: feat = F.cat(node_feat[k], 0) assert F.allclose(feat, nf[k]) else: assert len(nf) == 0 if num_edges > 0: edge_feat = ddict(lambda: [0] * num_edges) for u, v, attr in nxg.edges(data=True): assert len(attr) == len(ef) + 1 # extra id eid = attr['id'] for k in ef: edge_feat[k][eid] = F.unsqueeze(attr[k], 0) for k in edge_feat: feat = F.cat(edge_feat[k], 0) assert F.allclose(feat, ef[k]) else: assert len(ef) == 0 n1 = F.randn((5, 3)) n2 = F.randn((5, 10)) n3 = F.randn((5, 4)) e1 = F.randn((4, 5)) e2 = F.randn((4, 7)) g = DGLGraph(multigraph=True) g.add_nodes(5) g.add_edges([0,1,3,4], [2,4,0,3]) g.ndata.update({'n1': n1, 'n2': n2, 'n3': n3}) g.edata.update({'e1': e1, 'e2': e2}) # convert to networkx nxg = g.to_networkx(node_attrs=['n1', 'n3'], edge_attrs=['e1', 'e2']) assert len(nxg) == 5 assert nxg.size() == 4 _check_nx_feature(nxg, {'n1': n1, 'n3': n3}, {'e1': e1, 'e2': e2}) # convert to DGLGraph, nx graph has id in edge feature # use id feature to test non-tensor copy g.from_networkx(nxg, node_attrs=['n1'], edge_attrs=['e1', 'id']) # check graph size assert g.number_of_nodes() == 5 assert g.number_of_edges() == 4 # check number of features # test with existing dglgraph (so existing features should be cleared) assert len(g.ndata) == 1 assert len(g.edata) == 2 # check feature values assert F.allclose(g.ndata['n1'], n1) # with id in nx edge feature, e1 should follow original order assert F.allclose(g.edata['e1'], e1) assert F.array_equal(g.get_e_repr()['id'], F.arange(0, 4)) # test conversion after modifying DGLGraph g.pop_e_repr('id') # pop id so we don't need to provide id when adding edges new_n = F.randn((2, 3)) new_e = F.randn((3, 5)) g.add_nodes(2, data={'n1': new_n}) # add three edges, one is a multi-edge g.add_edges([3, 6, 0], [4, 5, 2], data={'e1': new_e}) n1 = F.cat((n1, new_n), 0) e1 = F.cat((e1, new_e), 0) # convert to networkx again nxg = g.to_networkx(node_attrs=['n1'], edge_attrs=['e1']) assert len(nxg) == 7 assert nxg.size() == 7 _check_nx_feature(nxg, {'n1': n1}, {'e1': e1}) # now test convert from networkx without id in edge feature # first pop id in edge feature for _, _, attr in nxg.edges(data=True): attr.pop('id') # test with a new graph g = DGLGraph(multigraph=True) g.from_networkx(nxg, node_attrs=['n1'], edge_attrs=['e1']) # check graph size assert g.number_of_nodes() == 7 assert g.number_of_edges() == 7 # check number of features assert len(g.ndata) == 1 assert len(g.edata) == 1 # check feature values assert F.allclose(g.ndata['n1'], n1) # edge feature order follows nxg.edges() edge_feat = [] for _, _, attr in nxg.edges(data=True): edge_feat.append(F.unsqueeze(attr['e1'], 0)) edge_feat = F.cat(edge_feat, 0) assert F.allclose(g.edata['e1'], edge_feat)