Example #1
0
def main(args):
    if not args.standalone:
        th.distributed.init_process_group(backend="gloo")
    g = dgl.distributed.DistGraph(args.ip_config,
                                  args.graph_name,
                                  conf_file=args.conf_path)
    print("rank:", g.rank())

    train_nid = dgl.distributed.node_split(g.ndata["train_mask"],
                                           g.get_partition_book(),
                                           force_even=True)
    val_nid = dgl.distributed.node_split(g.ndata["val_mask"],
                                         g.get_partition_book(),
                                         force_even=True)
    test_nid = dgl.distributed.node_split(g.ndata["test_mask"],
                                          g.get_partition_book(),
                                          force_even=True)
    print("part {}, train: {}, val: {}, test: {}".format(
        g.rank(), len(train_nid), len(val_nid), len(test_nid)))
    device = th.device("cpu")
    n_classes = len(
        th.unique(g.ndata["labels"][np.arange(g.number_of_nodes())]))

    # Pack data
    in_feats = g.ndata["features"].shape[1]
    data = train_nid, val_nid, in_feats, n_classes, g
    run(args, device, data)
    print("parent ends")
Example #2
0
def main(args):
    th.distributed.init_process_group(backend='gloo')
    g = dgl.distributed.DistGraph(args.ip_config, args.graph_name)
    print('rank:', g.rank())

    train_nid = dgl.distributed.node_split(g.ndata['train_mask'],
                                           g.get_partition_book(),
                                           force_even=True)
    val_nid = dgl.distributed.node_split(g.ndata['val_mask'],
                                         g.get_partition_book(),
                                         force_even=True)
    test_nid = dgl.distributed.node_split(g.ndata['test_mask'],
                                          g.get_partition_book(),
                                          force_even=True)
    print('part {}, train: {}, val: {}, test: {}'.format(
        g.rank(), len(train_nid), len(val_nid), len(test_nid)))
    device = th.device('cpu')
    n_classes = len(
        th.unique(g.ndata['labels'][np.arange(g.number_of_nodes())]))

    # Pack data
    in_feats = g.ndata['features'].shape[1]
    data = train_nid, val_nid, in_feats, n_classes, g
    run(args, device, data)
    print("parent ends")
Example #3
0
def main(args):
    if not args.standalone:
        th.distributed.init_process_group(backend='gloo')
    g = dgl.distributed.DistGraph(args.ip_config, args.graph_name, conf_file=args.conf_path)
    print('rank:', g.rank())

    pb = g.get_partition_book()
    train_nid = dgl.distributed.node_split(g.ndata['train_mask'], pb, force_even=True)
    val_nid = dgl.distributed.node_split(g.ndata['val_mask'], pb, force_even=True)
    test_nid = dgl.distributed.node_split(g.ndata['test_mask'], pb, force_even=True)
    local_nid = pb.partid2nids(pb.partid).detach().numpy()
    print('part {}, train: {} (local: {}), val: {} (local: {}), test: {} (local: {})'.format(
        g.rank(), len(train_nid), len(np.intersect1d(train_nid.numpy(), local_nid)),
        len(val_nid), len(np.intersect1d(val_nid.numpy(), local_nid)),
        len(test_nid), len(np.intersect1d(test_nid.numpy(), local_nid))))
    device = th.device('cpu')
    labels = g.ndata['labels'][np.arange(g.number_of_nodes())]
    n_classes = len(th.unique(labels[th.logical_not(th.isnan(labels))]))
    print('#labels:', n_classes)

    # Pack data
    in_feats = g.ndata['features'].shape[1]
    data = train_nid, val_nid, in_feats, n_classes, g
    run(args, device, data)
    print("parent ends")