def tensor_topo_traverse(): n = g.number_of_nodes() mask = F.copy_to(F.ones((n, 1)), F.cpu()) degree = F.spmm(adjmat, mask) while F.reduce_sum(mask) != 0.: v = F.astype((degree == 0.), F.float32) v = v * mask mask = mask - v frontier = F.copy_to(F.nonzero_1d(F.squeeze(v, 1)), F.cpu()) yield frontier degree -= F.spmm(adjmat, v)
def check_compute_func(worker_id, graph_name, return_dict): time.sleep(3) print("worker starts") try: g = create_graph_store(graph_name) if g is None: return_dict[worker_id] = -1 return g._sync_barrier(60) in_feats = g.nodes[0].data['feat'].shape[1] # Test update all. g.update_all(fn.copy_src(src='feat', out='m'), fn.sum(msg='m', out='preprocess')) adj = g.adjacency_matrix() tmp = F.spmm(adj, g.nodes[:].data['feat']) assert_almost_equal(F.asnumpy(g.nodes[:].data['preprocess']), F.asnumpy(tmp)) g._sync_barrier(60) check_array_shared_memory(g, worker_id, [g.nodes[:].data['preprocess']]) # Test apply nodes. data = g.nodes[:].data['feat'] g.apply_nodes(func=lambda nodes: {'feat': F.ones((1, in_feats)) * 10}, v=0) assert_almost_equal(F.asnumpy(data[0]), np.squeeze(F.asnumpy(g.nodes[0].data['feat']))) # Test apply edges. data = g.edges[:].data['feat'] g.apply_edges(func=lambda edges: {'feat': F.ones((1, in_feats)) * 10}, edges=0) assert_almost_equal(F.asnumpy(data[0]), np.squeeze(F.asnumpy(g.edges[0].data['feat']))) g.init_ndata('tmp', (g.number_of_nodes(), 10), 'float32') data = g.nodes[:].data['tmp'] # Test pull g.pull(1, fn.copy_src(src='feat', out='m'), fn.sum(msg='m', out='tmp')) assert_almost_equal( F.asnumpy(data[1]), np.squeeze(F.asnumpy(g.nodes[1].data['preprocess']))) # Test send_and_recv in_edges = g.in_edges(v=2) g.send_and_recv(in_edges, fn.copy_src(src='feat', out='m'), fn.sum(msg='m', out='tmp')) assert_almost_equal( F.asnumpy(data[2]), np.squeeze(F.asnumpy(g.nodes[2].data['preprocess']))) g.destroy() return_dict[worker_id] = 0 except Exception as e: return_dict[worker_id] = -1 g.destroy() print(e, file=sys.stderr) traceback.print_exc()