예제 #1
0
파일: utils.py 프로젝트: yuk12/dgl
def load_graph(name):
    cite_graphs = ['cora', 'citeseer', 'pubmed']

    if name in cite_graphs:
        dataset = CitationGraphDataset(name)
        graph = dataset[0]

        nodes = graph.nodes()
        y = graph.ndata['label']
        train_mask = graph.ndata['train_mask']
        val_mask = graph.ndata['test_mask']

        nodes_train, y_train = nodes[train_mask], y[train_mask]
        nodes_val, y_val = nodes[val_mask], y[val_mask]
        eval_set = [(nodes_train, y_train), (nodes_val, y_val)]

    elif name.startswith('ogbn'):

        dataset = DglNodePropPredDataset(name)
        graph, y = dataset[0]
        split_nodes = dataset.get_idx_split()
        nodes = graph.nodes()

        train_idx = split_nodes['train']
        val_idx = split_nodes['valid']

        nodes_train, y_train = nodes[train_idx], y[train_idx]
        nodes_val, y_val = nodes[val_idx], y[val_idx]
        eval_set = [(nodes_train, y_train), (nodes_val, y_val)]

    else:
        raise ValueError("Dataset name error!")

    return graph, eval_set
예제 #2
0
def check_rpc_get_degree_shuffle(tmpdir, num_server):
    generate_ip_config("rpc_ip_config.txt", num_server, num_server)

    g = CitationGraphDataset("cora")[0]
    g.readonly()
    num_parts = num_server

    partition_graph(g, 'test_get_degrees', num_parts, tmpdir,
                    num_hops=1, part_method='metis', reshuffle=True)

    pserver_list = []
    ctx = mp.get_context('spawn')
    for i in range(num_server):
        p = ctx.Process(target=start_server, args=(i, tmpdir, num_server > 1, 'test_get_degrees'))
        p.start()
        time.sleep(1)
        pserver_list.append(p)

    orig_nid = F.zeros((g.number_of_nodes(),), dtype=F.int64, ctx=F.cpu())
    for i in range(num_server):
        part, _, _, _, _, _, _ = load_partition(tmpdir / 'test_get_degrees.json', i)
        orig_nid[part.ndata[dgl.NID]] = part.ndata['orig_id']

    nids = F.tensor(np.random.randint(g.number_of_nodes(), size=100))
    in_degs, out_degs, all_in_degs, all_out_degs = start_get_degrees_client(0, tmpdir, num_server > 1, nids)

    print("Done get_degree")
    for p in pserver_list:
        p.join()

    print('check results')
    assert F.array_equal(g.in_degrees(orig_nid[nids]), in_degs)
    assert F.array_equal(g.in_degrees(orig_nid), all_in_degs)
    assert F.array_equal(g.out_degrees(orig_nid[nids]), out_degs)
    assert F.array_equal(g.out_degrees(orig_nid), all_out_degs)
예제 #3
0
def check_standalone_etype_sampling(tmpdir, reshuffle):
    hg = CitationGraphDataset('cora')[0]
    num_parts = 1
    num_hops = 1

    partition_graph(hg,
                    'test_sampling',
                    num_parts,
                    tmpdir,
                    num_hops=num_hops,
                    part_method='metis',
                    reshuffle=reshuffle)
    os.environ['DGL_DIST_MODE'] = 'standalone'
    dgl.distributed.initialize("rpc_ip_config.txt")
    dist_graph = DistGraph("test_sampling",
                           part_config=tmpdir / 'test_sampling.json')
    sampled_graph = sample_etype_neighbors(dist_graph, [0, 10, 99, 66, 1023],
                                           dgl.ETYPE, 3)

    src, dst = sampled_graph.edges()
    assert sampled_graph.number_of_nodes() == hg.number_of_nodes()
    assert np.all(F.asnumpy(hg.has_edges_between(src, dst)))
    eids = hg.edge_ids(src, dst)
    assert np.array_equal(F.asnumpy(sampled_graph.edata[dgl.EID]),
                          F.asnumpy(eids))
    dgl.distributed.exit_client()
예제 #4
0
def check_standalone_etype_sampling_heterograph(tmpdir, reshuffle):
    hg = CitationGraphDataset('cora')[0]
    num_parts = 1
    num_hops = 1
    src, dst = hg.edges()
    new_hg = dgl.heterograph(
        {
            ('paper', 'cite', 'paper'): (src, dst),
            ('paper', 'cite-by', 'paper'): (dst, src)
        }, {'paper': hg.number_of_nodes()})
    partition_graph(new_hg,
                    'test_hetero_sampling',
                    num_parts,
                    tmpdir,
                    num_hops=num_hops,
                    part_method='metis',
                    reshuffle=reshuffle)
    os.environ['DGL_DIST_MODE'] = 'standalone'
    dgl.distributed.initialize("rpc_ip_config.txt")
    dist_graph = DistGraph("test_hetero_sampling",
                           part_config=tmpdir / 'test_hetero_sampling.json')
    sampled_graph = sample_etype_neighbors(
        dist_graph, [0, 1, 2, 10, 99, 66, 1023, 1024, 2700, 2701], dgl.ETYPE,
        1)
    src, dst = sampled_graph.edges(etype=('paper', 'cite', 'paper'))
    assert len(src) == 10
    src, dst = sampled_graph.edges(etype=('paper', 'cite-by', 'paper'))
    assert len(src) == 10
    assert sampled_graph.number_of_nodes() == new_hg.number_of_nodes()
    dgl.distributed.exit_client()
def start_node_dataloader(rank, tmpdir, num_server, num_workers):
    import dgl
    import torch as th
    dgl.distributed.initialize("mp_ip_config.txt", 1, num_workers=num_workers)
    gpb = None
    disable_shared_mem = num_server > 1
    if disable_shared_mem:
        _, _, _, gpb, _, _, _ = load_partition(tmpdir / 'test_sampling.json',
                                               rank)
    num_nodes_to_sample = 202
    batch_size = 32
    train_nid = th.arange(num_nodes_to_sample)
    dist_graph = DistGraph("test_mp",
                           gpb=gpb,
                           part_config=tmpdir / 'test_sampling.json')

    orig_nid = F.zeros((dist_graph.number_of_nodes(), ), dtype=F.int64)
    orig_eid = F.zeros((dist_graph.number_of_edges(), ), dtype=F.int64)
    for i in range(num_server):
        part, _, _, _, _, _, _ = load_partition(tmpdir / 'test_sampling.json',
                                                i)
        orig_nid[part.ndata[dgl.NID]] = part.ndata['orig_id']
        orig_eid[part.edata[dgl.EID]] = part.edata['orig_id']

    # Create sampler
    sampler = dgl.dataloading.MultiLayerNeighborSampler([5, 10])

    # We need to test creating DistDataLoader multiple times.
    for i in range(2):
        # Create DataLoader for constructing blocks
        dataloader = dgl.dataloading.NodeDataLoader(dist_graph,
                                                    train_nid,
                                                    sampler,
                                                    batch_size=batch_size,
                                                    shuffle=True,
                                                    drop_last=False,
                                                    num_workers=num_workers)

        groundtruth_g = CitationGraphDataset("cora")[0]
        max_nid = []

        for epoch in range(2):
            for idx, (_, _,
                      blocks) in zip(range(0, num_nodes_to_sample, batch_size),
                                     dataloader):
                block = blocks[-1]
                o_src, o_dst = block.edges()
                src_nodes_id = block.srcdata[dgl.NID][o_src]
                dst_nodes_id = block.dstdata[dgl.NID][o_dst]
                src_nodes_id = orig_nid[src_nodes_id]
                dst_nodes_id = orig_nid[dst_nodes_id]
                has_edges = groundtruth_g.has_edges_between(
                    src_nodes_id, dst_nodes_id)
                assert np.all(F.asnumpy(has_edges))
                max_nid.append(np.max(F.asnumpy(dst_nodes_id)))
                # assert np.all(np.unique(np.sort(F.asnumpy(dst_nodes_id))) == np.arange(idx, batch_size))
    del dataloader
    dgl.distributed.exit_client(
    )  # this is needed since there's two test here in one process
예제 #6
0
def start_dist_dataloader(rank, tmpdir, num_server, drop_last):
    import dgl
    import torch as th
    dgl.distributed.initialize("mp_ip_config.txt")
    gpb = None
    disable_shared_mem = num_server > 0
    if disable_shared_mem:
        _, _, _, gpb, _, _, _ = load_partition(tmpdir / 'test_sampling.json', rank)
    num_nodes_to_sample = 202
    batch_size = 32
    train_nid = th.arange(num_nodes_to_sample)
    dist_graph = DistGraph("test_mp", gpb=gpb, part_config=tmpdir / 'test_sampling.json')

    orig_nid = F.arange(0, dist_graph.number_of_nodes())
    orig_eid = F.arange(0, dist_graph.number_of_edges())
    for i in range(num_server):
        part, _, _, _, _, _, _ = load_partition(tmpdir / 'test_sampling.json', i)
        if 'orig_id' in part.ndata:
            orig_nid[part.ndata[dgl.NID]] = part.ndata['orig_id']
        if 'orig_id' in part.edata:
            orig_eid[part.edata[dgl.EID]] = part.edata['orig_id']

    # Create sampler
    sampler = NeighborSampler(dist_graph, [5, 10],
                              dgl.distributed.sample_neighbors)

    # We need to test creating DistDataLoader multiple times.
    for i in range(2):
        # Create DataLoader for constructing blocks
        dataloader = DistDataLoader(
            dataset=train_nid.numpy(),
            batch_size=batch_size,
            collate_fn=sampler.sample_blocks,
            shuffle=False,
            drop_last=drop_last)

        groundtruth_g = CitationGraphDataset("cora")[0]
        max_nid = []

        for epoch in range(2):
            for idx, blocks in zip(range(0, num_nodes_to_sample, batch_size), dataloader):
                block = blocks[-1]
                o_src, o_dst =  block.edges()
                src_nodes_id = block.srcdata[dgl.NID][o_src]
                dst_nodes_id = block.dstdata[dgl.NID][o_dst]
                max_nid.append(np.max(F.asnumpy(dst_nodes_id)))

                src_nodes_id = orig_nid[src_nodes_id]
                dst_nodes_id = orig_nid[dst_nodes_id]
                has_edges = groundtruth_g.has_edges_between(src_nodes_id, dst_nodes_id)
                assert np.all(F.asnumpy(has_edges))
                # assert np.all(np.unique(np.sort(F.asnumpy(dst_nodes_id))) == np.arange(idx, batch_size))
            if drop_last:
                assert np.max(max_nid) == num_nodes_to_sample - 1 - num_nodes_to_sample % batch_size
            else:
                assert np.max(max_nid) == num_nodes_to_sample - 1
    del dataloader
    dgl.distributed.exit_client() # this is needed since there's two test here in one process
예제 #7
0
def start_client(rank, tmpdir, disable_shared_mem, num_workers, drop_last):
    import dgl
    import torch as th
    os.environ['DGL_DIST_MODE'] = 'distributed'
    dgl.distributed.initialize("mp_ip_config.txt", num_workers=4)
    gpb = None
    if disable_shared_mem:
        _, _, _, gpb, _ = load_partition(tmpdir / 'test_sampling.json', rank)
    num_nodes_to_sample = 202
    batch_size = 32
    train_nid = th.arange(num_nodes_to_sample)
    dist_graph = DistGraph("mp_ip_config.txt", "test_mp", gpb=gpb)

    # Create sampler
    sampler = NeighborSampler(dist_graph, [5, 10],
                              dgl.distributed.sample_neighbors)

    # We need to test creating DistDataLoader multiple times.
    for i in range(2):
        # Create DataLoader for constructing blocks
        dataloader = DistDataLoader(dataset=train_nid.numpy(),
                                    batch_size=batch_size,
                                    collate_fn=sampler.sample_blocks,
                                    shuffle=False,
                                    drop_last=drop_last)

        groundtruth_g = CitationGraphDataset("cora")[0]
        max_nid = []

        for epoch in range(2):
            for idx, blocks in zip(range(0, num_nodes_to_sample, batch_size),
                                   dataloader):
                block = blocks[-1]
                o_src, o_dst = block.edges()
                src_nodes_id = block.srcdata[dgl.NID][o_src]
                dst_nodes_id = block.dstdata[dgl.NID][o_dst]
                has_edges = groundtruth_g.has_edges_between(
                    src_nodes_id, dst_nodes_id)
                assert np.all(F.asnumpy(has_edges))
                print(np.unique(np.sort(F.asnumpy(dst_nodes_id))))
                max_nid.append(np.max(F.asnumpy(dst_nodes_id)))
                # assert np.all(np.unique(np.sort(F.asnumpy(dst_nodes_id))) == np.arange(idx, batch_size))
            if drop_last:
                assert np.max(
                    max_nid
                ) == num_nodes_to_sample - 1 - num_nodes_to_sample % batch_size
            else:
                assert np.max(max_nid) == num_nodes_to_sample - 1

    dgl.distributed.exit_client(
    )  # this is needed since there's two test here in one process
예제 #8
0
def check_rpc_in_subgraph_shuffle(tmpdir, num_server):
    ip_config = open("rpc_ip_config.txt", "w")
    for _ in range(num_server):
        ip_config.write('{}\n'.format(get_local_usable_addr()))
    ip_config.close()

    g = CitationGraphDataset("cora")[0]
    g.readonly()
    num_parts = num_server

    partition_graph(g,
                    'test_in_subgraph',
                    num_parts,
                    tmpdir,
                    num_hops=1,
                    part_method='metis',
                    reshuffle=True)

    pserver_list = []
    ctx = mp.get_context('spawn')
    for i in range(num_server):
        p = ctx.Process(target=start_server,
                        args=(i, tmpdir, num_server > 1, 'test_in_subgraph'))
        p.start()
        time.sleep(1)
        pserver_list.append(p)

    nodes = [0, 10, 99, 66, 1024, 2008]
    time.sleep(3)
    sampled_graph = start_in_subgraph_client(0, tmpdir, num_server > 1, nodes)
    for p in pserver_list:
        p.join()

    orig_nid = F.zeros((g.number_of_nodes(), ), dtype=F.int64, ctx=F.cpu())
    orig_eid = F.zeros((g.number_of_edges(), ), dtype=F.int64, ctx=F.cpu())
    for i in range(num_server):
        part, _, _, _, _, _, _ = load_partition(
            tmpdir / 'test_in_subgraph.json', i)
        orig_nid[part.ndata[dgl.NID]] = part.ndata['orig_id']
        orig_eid[part.edata[dgl.EID]] = part.edata['orig_id']

    src, dst = sampled_graph.edges()
    src = orig_nid[src]
    dst = orig_nid[dst]
    assert sampled_graph.number_of_nodes() == g.number_of_nodes()
    assert np.all(F.asnumpy(g.has_edges_between(src, dst)))

    subg1 = dgl.in_subgraph(g, orig_nid[nodes])
    src1, dst1 = subg1.edges()
    assert np.all(np.sort(F.asnumpy(src)) == np.sort(F.asnumpy(src1)))
    assert np.all(np.sort(F.asnumpy(dst)) == np.sort(F.asnumpy(dst1)))
    eids = g.edge_ids(src, dst)
    eids1 = orig_eid[sampled_graph.edata[dgl.EID]]
    assert np.array_equal(F.asnumpy(eids1), F.asnumpy(eids))
예제 #9
0
def get_dgl_citation(aggregation_type="sum", dataset="pubmed"):
    import dgl
    from dgl.data import CitationGraphDataset

    tr_set = CitationGraphDataset(dataset)
    G = tr_set.graph

    e = len(G.edges)
    n = len(G.nodes)
    d_l = tr_set.features.shape[1]
    is_multilabel = False
    n_classes = tr_set.num_labels
    node_labels = torch.tensor(tr_set.features)
    targets = torch.tensor(tr_set.labels)
    idx_train = torch.BoolTensor(tr_set.train_mask)
    idx_valid = torch.BoolTensor(tr_set.val_mask)
    idx_test = torch.BoolTensor(tr_set.test_mask)
    edges, agg_matrix = nx_to_format(G, aggregation_type)

    return Dataset(
        "cora",
        n,
        e,
        d_l,
        is_multilabel,
        n_classes,
        edges,
        agg_matrix,
        node_labels,
        targets,
        idx_train,
        idx_valid,
        idx_test,
    )
예제 #10
0
def test_dist_dataloader(tmpdir, num_server, num_workers, drop_last):
    ip_config = open("mp_ip_config.txt", "w")
    for _ in range(num_server):
        ip_config.write('{}\n'.format(get_local_usable_addr()))
    ip_config.close()

    g = CitationGraphDataset("cora")[0]
    print(g.idtype)
    num_parts = num_server
    num_hops = 1

    partition_graph(g, 'test_sampling', num_parts, tmpdir,
                    num_hops=num_hops, part_method='metis', reshuffle=False)

    pserver_list = []
    ctx = mp.get_context('spawn')
    for i in range(num_server):
        p = ctx.Process(target=start_server, args=(
            i, tmpdir, num_server > 1, num_workers+1))
        p.start()
        time.sleep(1)
        pserver_list.append(p)

    time.sleep(3)
    os.environ['DGL_DIST_MODE'] = 'distributed'
    start_client(0, tmpdir, num_server > 1, num_workers, drop_last)
    dgl.distributed.exit_client() # this is needed since there's two test here in one process
예제 #11
0
def load_dataset(name):
    dataset = name.lower()
    if dataset == "amazon":
        from ogb.nodeproppred.dataset_dgl import DglNodePropPredDataset
        dataset = DglNodePropPredDataset(name="ogbn-products")
        splitted_idx = dataset.get_idx_split()
        train_nid = splitted_idx["train"]
        val_nid = splitted_idx["valid"]
        test_nid = splitted_idx["test"]
        g, labels = dataset[0]
        n_classes = int(labels.max() - labels.min() + 1)
        g.ndata['label'] = labels.squeeze()
        g.ndata['feat'] = g.ndata['feat'].float()
    elif dataset in ["reddit", "cora"]:
        if dataset == "reddit":
            from dgl.data import RedditDataset
            data = RedditDataset(self_loop=True)
            g = data[0]
        else:
            from dgl.data import CitationGraphDataset
            data = CitationGraphDataset('cora')
            g = data[0]
        n_classes = data.num_labels
        train_mask = g.ndata['train_mask']
        val_mask = g.ndata['val_mask']
        test_mask = g.ndata['test_mask']
        train_nid = torch.LongTensor(train_mask.nonzero().squeeze())
        val_nid = torch.LongTensor(val_mask.nonzero().squeeze())
        test_nid = torch.LongTensor(test_mask.nonzero().squeeze())
    else:
        print("Dataset {} is not supported".format(name))
        assert (0)

    return g, n_classes, train_nid, val_nid, test_nid
예제 #12
0
def download(dataset):
    if dataset == 'cora':
        return CoraDataset()
    elif dataset == 'citeseer' or 'pubmed':
        return CitationGraphDataset(name=dataset)
    else:
        return None
예제 #13
0
def test_standalone(tmpdir):
    reset_envs()
    generate_ip_config("mp_ip_config.txt", 1, 1)

    g = CitationGraphDataset("cora")[0]
    print(g.idtype)
    num_parts = 1
    num_hops = 1

    orig_nid, orig_eid = partition_graph(g,
                                         'test_sampling',
                                         num_parts,
                                         tmpdir,
                                         num_hops=num_hops,
                                         part_method='metis',
                                         reshuffle=True,
                                         return_mapping=True)

    os.environ['DGL_DIST_MODE'] = 'standalone'
    try:
        start_dist_dataloader(0, tmpdir, 1, True, orig_nid, orig_eid)
    except Exception as e:
        print(e)
    dgl.distributed.exit_client(
    )  # this is needed since there's two test here in one process
예제 #14
0
def check_standalone_sampling(tmpdir):
    g = CitationGraphDataset("cora")[0]
    num_parts = 1
    num_hops = 1
    partition_graph(g, 'test_sampling', num_parts, tmpdir,
                    num_hops=num_hops, part_method='metis', reshuffle=False)

    dist_graph = DistGraph(None, "test_sampling", conf_file=tmpdir / 'test_sampling.json')
    sampled_graph = sample_neighbors(dist_graph, [0, 10, 99, 66, 1024, 2008], 3)

    src, dst = sampled_graph.edges()
    assert sampled_graph.number_of_nodes() == g.number_of_nodes()
    assert np.all(F.asnumpy(g.has_edges_between(src, dst)))
    eids = g.edge_ids(src, dst)
    assert np.array_equal(
        F.asnumpy(sampled_graph.edata[dgl.EID]), F.asnumpy(eids))
예제 #15
0
def test_dist_dataloader(tmpdir, num_server, drop_last):
    ip_config = open("mp_ip_config.txt", "w")
    for _ in range(num_server):
        ip_config.write('{} 1\n'.format(get_local_usable_addr()))
    ip_config.close()

    g = CitationGraphDataset("cora")[0]
    print(g.idtype)
    num_parts = num_server
    num_hops = 1

    partition_graph(g,
                    'test_sampling',
                    num_parts,
                    tmpdir,
                    num_hops=num_hops,
                    part_method='metis',
                    reshuffle=False)

    num_workers = 4
    pserver_list = []
    ctx = mp.get_context('spawn')
    for i in range(num_server):
        p = ctx.Process(target=start_server,
                        args=(i, tmpdir, num_server > 1, num_workers + 1))
        p.start()
        time.sleep(1)
        pserver_list.append(p)

    time.sleep(3)
    sampled_graph = start_client(0, tmpdir, num_server > 1, num_workers,
                                 drop_last)
예제 #16
0
def test_standalone(tmpdir):
    ip_config = open("mp_ip_config.txt", "w")
    for _ in range(1):
        ip_config.write('{}\n'.format(get_local_usable_addr()))
    ip_config.close()

    g = CitationGraphDataset("cora")[0]
    print(g.idtype)
    num_parts = 1
    num_hops = 1

    orig_nid, orig_eid = partition_graph(g,
                                         'test_sampling',
                                         num_parts,
                                         tmpdir,
                                         num_hops=num_hops,
                                         part_method='metis',
                                         reshuffle=True,
                                         return_mapping=True)

    os.environ['DGL_DIST_MODE'] = 'standalone'
    try:
        start_dist_dataloader(0, tmpdir, 1, True, orig_nid, orig_eid)
    except Exception as e:
        print(e)
    dgl.distributed.exit_client(
    )  # this is needed since there's two test here in one process
예제 #17
0
    def __init__(self, name):
        t0 = time.time()
        self.name = name.lower()

        if self.name == 'cora':
            dataset = CoraDataset()
        else:
            dataset = CitationGraphDataset(self.name)
        dataset.graph.remove_edges_from(nx.selfloop_edges(dataset.graph))
        graph = dgl.DGLGraph(dataset.graph)
        E = graph.number_of_edges()
        N = graph.number_of_nodes()
        D = dataset.features.shape[1]
        graph.ndata['feat'] = torch.Tensor(dataset.features)
        graph.edata['feat'] = torch.zeros((E, D))
        graph.batch_num_nodes = [N]

        self.norm_n = torch.FloatTensor(N, 1).fill_(1. / float(N)).sqrt()
        self.norm_e = torch.FloatTensor(E, 1).fill_(1. / float(E)).sqrt()
        self.graph = graph
        self.train_mask = torch.BoolTensor(dataset.train_mask)
        self.val_mask = torch.BoolTensor(dataset.val_mask)
        self.test_mask = torch.BoolTensor(dataset.test_mask)
        self.labels = torch.LongTensor(dataset.labels)
        self.num_classes = dataset.num_labels
        self.num_dims = D

        print("[!] Dataset: ", self.name)

        print("Time taken: {:.4f}s".format(time.time() - t0))
    def __init__(self, name):

        self.__name = name
        if name in graph_data:
            self.__graph_type = 'graph'
        elif name in node_data:
            self.__graph_type = 'node'
        else:
            print('Wrong dataset name!')
            self.__name = 'MUTAG'

        # download dataset
        root = os.path.abspath(
            os.path.join(os.path.dirname(__file__), os.path.pardir))
        path = os.path.join(root, 'dataset', self.__graph_type)

        if not os.path.exists(path):
            os.makedirs(path)

        if not os.path.exists(os.path.join(path, self.__name)):
            if self.__name in graph_data:
                url = 'https://ls11-www.cs.tu-dortmund.de/people/morris/graphkerneldatasets/{0}.zip'.format(
                    self.__name)
                zip_path = '{0}/{1}.zip'.format(path, self.__name)
                print('downloading {0}'.format(self.__name))
                urllib.request.urlretrieve(url, zip_path)

                with zipfile.ZipFile(zip_path) as zf:
                    zf.extractall(path)
                os.remove(zip_path)

                # process the raw dataset
                self.graphs, self.aug_view = self.process_raw_data()
            else:
                self.cite_graph = CitationGraphDataset(name=self.__name)
예제 #19
0
def test_dist_dataloader(tmpdir, num_server, num_workers, drop_last, reshuffle):
    ip_config = open("mp_ip_config.txt", "w")
    for _ in range(num_server):
        ip_config.write('{}\n'.format(get_local_usable_addr()))
    ip_config.close()

    g = CitationGraphDataset("cora")[0]
    print(g.idtype)
    num_parts = num_server
    num_hops = 1

    orig_nid, orig_eid = partition_graph(g, 'test_sampling', num_parts, tmpdir,
                                         num_hops=num_hops, part_method='metis',
                                         reshuffle=reshuffle, return_mapping=True)

    pserver_list = []
    ctx = mp.get_context('spawn')
    for i in range(num_server):
        p = ctx.Process(target=start_server, args=(
            i, tmpdir, num_server > 1, num_workers+1))
        p.start()
        time.sleep(1)
        pserver_list.append(p)

    time.sleep(3)
    os.environ['DGL_DIST_MODE'] = 'distributed'
    os.environ['DGL_NUM_SAMPLER'] = str(num_workers)
    ptrainer = ctx.Process(target=start_dist_dataloader, args=(
        0, tmpdir, num_server, drop_last, orig_nid, orig_eid))
    ptrainer.start()
    time.sleep(1)

    for p in pserver_list:
        p.join()
    ptrainer.join()
예제 #20
0
def check_rpc_sampling_shuffle(tmpdir, num_server, num_groups=1):
    generate_ip_config("rpc_ip_config.txt", num_server, num_server)

    g = CitationGraphDataset("cora")[0]
    g.readonly()
    num_parts = num_server
    num_hops = 1

    partition_graph(g,
                    'test_sampling',
                    num_parts,
                    tmpdir,
                    num_hops=num_hops,
                    part_method='metis',
                    reshuffle=True)

    pserver_list = []
    ctx = mp.get_context('spawn')
    keep_alive = num_groups > 1
    for i in range(num_server):
        p = ctx.Process(target=start_server,
                        args=(i, tmpdir, num_server > 1, 'test_sampling',
                              ['csc', 'coo'], keep_alive))
        p.start()
        time.sleep(1)
        pserver_list.append(p)

    pclient_list = []
    num_clients = 1
    for client_id in range(num_clients):
        for group_id in range(num_groups):
            p = ctx.Process(target=start_sample_client_shuffle,
                            args=(client_id, tmpdir, num_server > 1, g,
                                  num_server, group_id))
            p.start()
            time.sleep(1)  # avoid race condition when instantiating DistGraph
            pclient_list.append(p)
    for p in pclient_list:
        p.join()
    if keep_alive:
        for p in pserver_list:
            assert p.is_alive()
        # force shutdown server
        dgl.distributed.shutdown_servers("rpc_ip_config.txt", 1)
    for p in pserver_list:
        p.join()
예제 #21
0
def check_rpc_sampling(tmpdir, num_server):
    ip_config = open("rpc_ip_config.txt", "w")
    for _ in range(num_server):
        ip_config.write('{} 1\n'.format(get_local_usable_addr()))
    ip_config.close()

    g = CitationGraphDataset("cora")[0]
    g.readonly()
    print(g.idtype)
    num_parts = num_server
    num_hops = 1

    partition_graph(g, 'test_sampling', num_parts, tmpdir,
                    num_hops=num_hops, part_method='metis', reshuffle=False)

    pserver_list = []
    ctx = mp.get_context('spawn')
    for i in range(num_server):
        p = ctx.Process(target=start_server, args=(i, tmpdir, num_server > 1, 'test_sampling'))
        p.start()
        time.sleep(1)
        pserver_list.append(p)

    time.sleep(3)
    sampled_graph = start_sample_client(0, tmpdir, num_server > 1)
    print("Done sampling")
    for p in pserver_list:
        p.join()

    src, dst = sampled_graph.edges()
    assert sampled_graph.number_of_nodes() == g.number_of_nodes()
    assert np.all(F.asnumpy(g.has_edges_between(src, dst)))
    eids = g.edge_ids(src, dst)
    assert np.array_equal(
        F.asnumpy(sampled_graph.edata[dgl.EID]), F.asnumpy(eids))
예제 #22
0
def check_rpc_sampling_shuffle(tmpdir):
    num_server = 2
    ip_config = open("rpc_sampling_ip_config.txt", "w")
    for _ in range(num_server):
        ip_config.write('{} 1\n'.format(get_local_usable_addr()))
    ip_config.close()

    g = CitationGraphDataset("cora")[0]
    g.readonly()
    num_parts = num_server
    num_hops = 1

    partition_graph(g,
                    'test_sampling',
                    num_parts,
                    tmpdir,
                    num_hops=num_hops,
                    part_method='metis',
                    reshuffle=True)

    pserver_list = []
    ctx = mp.get_context('spawn')
    for i in range(num_server):
        p = ctx.Process(target=start_server, args=(i, tmpdir))
        p.start()
        time.sleep(1)
        pserver_list.append(p)

    time.sleep(3)
    sampled_graph = start_client(0, tmpdir)
    print("Done sampling")
    for p in pserver_list:
        p.join()

    orig_nid = F.zeros((g.number_of_nodes(), ), dtype=F.int64)
    orig_eid = F.zeros((g.number_of_edges(), ), dtype=F.int64)
    for i in range(num_server):
        part, _, _, _ = load_partition(tmpdir / 'test_sampling.json', i)
        orig_nid[part.ndata[dgl.NID]] = part.ndata['orig_id']
        orig_eid[part.edata[dgl.EID]] = part.edata['orig_id']

    src, dst = sampled_graph.edges()
    src = orig_nid[src]
    dst = orig_nid[dst]
    assert sampled_graph.number_of_nodes() == g.number_of_nodes()
    assert np.all(F.asnumpy(g.has_edges_between(src, dst)))
    eids = g.edge_ids(src, dst)
    eids1 = orig_eid[sampled_graph.edata[dgl.EID]]
    assert np.array_equal(F.asnumpy(eids1), F.asnumpy(eids))
예제 #23
0
def test_dist_dataloader(tmpdir, num_server, num_workers, drop_last, reshuffle,
                         num_groups):
    reset_envs()
    # No multiple partitions on single machine for
    # multiple client groups in case of race condition.
    if num_groups > 1:
        num_server = 1
    generate_ip_config("mp_ip_config.txt", num_server, num_server)

    g = CitationGraphDataset("cora")[0]
    print(g.idtype)
    num_parts = num_server
    num_hops = 1

    orig_nid, orig_eid = partition_graph(g,
                                         'test_sampling',
                                         num_parts,
                                         tmpdir,
                                         num_hops=num_hops,
                                         part_method='metis',
                                         reshuffle=reshuffle,
                                         return_mapping=True)

    pserver_list = []
    ctx = mp.get_context('spawn')
    keep_alive = num_groups > 1
    for i in range(num_server):
        p = ctx.Process(target=start_server,
                        args=(i, tmpdir, num_server > 1, num_workers + 1,
                              keep_alive))
        p.start()
        time.sleep(1)
        pserver_list.append(p)

    os.environ['DGL_DIST_MODE'] = 'distributed'
    os.environ['DGL_NUM_SAMPLER'] = str(num_workers)
    ptrainer_list = []
    num_trainers = 1
    for trainer_id in range(num_trainers):
        for group_id in range(num_groups):
            p = ctx.Process(target=start_dist_dataloader,
                            args=(trainer_id, tmpdir, num_server, drop_last,
                                  orig_nid, orig_eid, group_id))
            p.start()
            time.sleep(1)  # avoid race condition when instantiating DistGraph
            ptrainer_list.append(p)

    for p in ptrainer_list:
        p.join()
    if keep_alive:
        for p in pserver_list:
            assert p.is_alive()
        # force shutdown server
        dgl.distributed.shutdown_servers("mp_ip_config.txt", 1)
    for p in pserver_list:
        p.join()
예제 #24
0
def check_rpc_find_edges(tmpdir, num_server):
    ip_config = open("rpc_ip_config.txt", "w")
    for _ in range(num_server):
        ip_config.write('{} 1\n'.format(get_local_usable_addr()))
    ip_config.close()

    g = CitationGraphDataset("cora")[0]
    g.readonly()
    num_parts = num_server

    partition_graph(g,
                    'test_find_edges',
                    num_parts,
                    tmpdir,
                    num_hops=1,
                    part_method='metis',
                    reshuffle=False)

    pserver_list = []
    ctx = mp.get_context('spawn')
    for i in range(num_server):
        p = ctx.Process(target=start_server,
                        args=(i, tmpdir, num_server > 1, 'test_find_edges'))
        p.start()
        time.sleep(1)
        pserver_list.append(p)

    time.sleep(3)
    eids = F.tensor(np.random.randint(g.number_of_edges(), size=100))
    u, v = g.find_edges(eids)
    du, dv = start_find_edges_client(0, tmpdir, num_server > 1, eids)
    assert F.array_equal(u, du)
    assert F.array_equal(v, dv)
예제 #25
0
def check_rpc_find_edges_shuffle(tmpdir, num_server):
    generate_ip_config("rpc_ip_config.txt", num_server, num_server)

    g = CitationGraphDataset("cora")[0]
    g.readonly()
    num_parts = num_server

    orig_nid, orig_eid = partition_graph(g,
                                         'test_find_edges',
                                         num_parts,
                                         tmpdir,
                                         num_hops=1,
                                         part_method='metis',
                                         reshuffle=True,
                                         return_mapping=True)

    pserver_list = []
    ctx = mp.get_context('spawn')
    for i in range(num_server):
        p = ctx.Process(target=start_server,
                        args=(i, tmpdir, num_server > 1, 'test_find_edges',
                              ['csr', 'coo']))
        p.start()
        time.sleep(1)
        pserver_list.append(p)

    eids = F.tensor(np.random.randint(g.number_of_edges(), size=100))
    u, v = g.find_edges(orig_eid[eids])
    du, dv = start_find_edges_client(0, tmpdir, num_server > 1, eids)
    du = orig_nid[du]
    dv = orig_nid[dv]
    assert F.array_equal(u, du)
    assert F.array_equal(v, dv)
예제 #26
0
def check_rpc_find_edges_shuffle(tmpdir, num_server):
    ip_config = open("rpc_ip_config.txt", "w")
    for _ in range(num_server):
        ip_config.write('{}\n'.format(get_local_usable_addr()))
    ip_config.close()

    g = CitationGraphDataset("cora")[0]
    g.readonly()
    num_parts = num_server

    partition_graph(g,
                    'test_find_edges',
                    num_parts,
                    tmpdir,
                    num_hops=1,
                    part_method='metis',
                    reshuffle=True)

    pserver_list = []
    ctx = mp.get_context('spawn')
    for i in range(num_server):
        p = ctx.Process(target=start_server,
                        args=(i, tmpdir, num_server > 1, 'test_find_edges',
                              ['csr', 'coo']))
        p.start()
        time.sleep(1)
        pserver_list.append(p)

    orig_nid = F.zeros((g.number_of_nodes(), ), dtype=F.int64, ctx=F.cpu())
    orig_eid = F.zeros((g.number_of_edges(), ), dtype=F.int64, ctx=F.cpu())
    for i in range(num_server):
        part, _, _, _, _, _, _ = load_partition(
            tmpdir / 'test_find_edges.json', i)
        orig_nid[part.ndata[dgl.NID]] = part.ndata['orig_id']
        orig_eid[part.edata[dgl.EID]] = part.edata['orig_id']

    time.sleep(3)
    eids = F.tensor(np.random.randint(g.number_of_edges(), size=100))
    u, v = g.find_edges(orig_eid[eids])
    du, dv = start_find_edges_client(0, tmpdir, num_server > 1, eids)
    du = orig_nid[du]
    dv = orig_nid[dv]
    assert F.array_equal(u, du)
    assert F.array_equal(v, dv)
예제 #27
0
def load_dataset(name):
    dataset = name.lower()
    if dataset == "amazon":
        from ogb.nodeproppred.dataset_dgl import DglNodePropPredDataset
        dataset = DglNodePropPredDataset(name="ogbn-products")
        splitted_idx = dataset.get_idx_split()
        train_nid = splitted_idx["train"]
        val_nid = splitted_idx["valid"]
        test_nid = splitted_idx["test"]
        g, labels = dataset[0]
        labels = labels.squeeze()
        n_classes = int(labels.max() - labels.min() + 1)
        features = g.ndata.pop("feat").float()
    elif dataset in ["reddit", "cora"]:
        if dataset == "reddit":
            from dgl.data import RedditDataset
            data = RedditDataset(self_loop=True)
            g = data.graph
        else:
            from dgl.data import CitationGraphDataset
            data = CitationGraphDataset('cora')
            g = dgl.DGLGraph(data.graph)
        train_mask = data.train_mask
        val_mask = data.val_mask
        test_mask = data.test_mask
        features = torch.Tensor(data.features)
        labels = torch.LongTensor(data.labels)
        n_classes = data.num_labels
        train_nid = torch.LongTensor(np.nonzero(train_mask)[0])
        val_nid = torch.LongTensor(np.nonzero(val_mask)[0])
        test_nid = torch.LongTensor(np.nonzero(test_mask)[0])
    else:
        print("Dataset {} is not supported".format(name))
        assert(0)

    return g, features, labels, n_classes, train_nid, val_nid, test_nid
예제 #28
0
def test_dataloader(tmpdir, num_server, num_workers, dataloader_type):
    ip_config = open("mp_ip_config.txt", "w")
    for _ in range(num_server):
        ip_config.write('{}\n'.format(get_local_usable_addr()))
    ip_config.close()

    g = CitationGraphDataset("cora")[0]
    print(g.idtype)
    num_parts = num_server
    num_hops = 1

    partition_graph(g, 'test_sampling', num_parts, tmpdir,
                    num_hops=num_hops, part_method='metis', reshuffle=False)

    pserver_list = []
    ctx = mp.get_context('spawn')
    for i in range(num_server):
        p = ctx.Process(target=start_server, args=(
            i, tmpdir, num_server > 1, num_workers+1))
        p.start()
        time.sleep(1)
        pserver_list.append(p)

    time.sleep(3)
    os.environ['DGL_DIST_MODE'] = 'distributed'
    ptrainer_list = []
    if dataloader_type == 'node':
        p = ctx.Process(target=start_node_dataloader, args=(
            0, tmpdir, num_server > 1, num_workers))
        p.start()
        time.sleep(1)
        ptrainer_list.append(p)
    for p in pserver_list:
        p.join()
    for p in ptrainer_list:
        p.join()
예제 #29
0
def check_rpc_in_subgraph(tmpdir, num_server):
    ip_config = open("rpc_ip_config.txt", "w")
    for _ in range(num_server):
        ip_config.write('{} 1\n'.format(get_local_usable_addr()))
    ip_config.close()

    g = CitationGraphDataset("cora")[0]
    g.readonly()
    num_parts = num_server

    partition_graph(g,
                    'test_in_subgraph',
                    num_parts,
                    tmpdir,
                    num_hops=1,
                    part_method='metis',
                    reshuffle=False)

    pserver_list = []
    ctx = mp.get_context('spawn')
    for i in range(num_server):
        p = ctx.Process(target=start_server,
                        args=(i, tmpdir, num_server > 1, 'test_in_subgraph'))
        p.start()
        time.sleep(1)
        pserver_list.append(p)

    nodes = [0, 10, 99, 66, 1024, 2008]
    time.sleep(3)
    sampled_graph = start_in_subgraph_client(0, tmpdir, num_server > 1, nodes)
    for p in pserver_list:
        p.join()

    src, dst = sampled_graph.edges()
    g = dgl.as_heterograph(g)
    assert sampled_graph.number_of_nodes() == g.number_of_nodes()
    subg1 = dgl.in_subgraph(g, nodes)
    src1, dst1 = subg1.edges()
    assert np.all(np.sort(F.asnumpy(src)) == np.sort(F.asnumpy(src1)))
    assert np.all(np.sort(F.asnumpy(dst)) == np.sort(F.asnumpy(dst1)))
    eids = g.edge_ids(src, dst)
    assert np.array_equal(F.asnumpy(sampled_graph.edata[dgl.EID]),
                          F.asnumpy(eids))
예제 #30
0
def test_neg_dataloader(tmpdir, num_server, num_workers):
    g = CitationGraphDataset("cora")[0]
    check_neg_dataloader(g, tmpdir, num_server, num_workers)
    g = create_random_hetero()
    check_neg_dataloader(g, tmpdir, num_server, num_workers)