Exemplo n.º 1
0
def test_subframes(parent_idx_device, child_device):
    parent_device, idx_device = parent_idx_device
    g = dgl.graph((F.tensor([1, 2, 3],
                            dtype=F.int64), F.tensor([2, 3, 4],
                                                     dtype=F.int64)))
    print(g.device)
    g.ndata['x'] = F.randn((5, 4))
    g.edata['a'] = F.randn((3, 6))
    idx = F.tensor([1, 2], dtype=F.int64)
    if parent_device == 'cuda':
        g = g.to(F.cuda())
    elif parent_device == 'uva':
        g = g.to(F.cpu())
        g.create_formats_()
        g.pin_memory_()
    elif parent_device == 'cpu':
        g = g.to(F.cpu())
    idx = F.copy_to(idx, idx_device)
    sg = g.sample_neighbors(idx, 2).to(child_device)
    assert sg.device == sg.ndata['x'].device
    assert sg.device == sg.edata['a'].device
    assert sg.device == child_device
    if parent_device != 'uva':
        sg = g.to(child_device).sample_neighbors(F.copy_to(idx, child_device),
                                                 2)
        assert sg.device == sg.ndata['x'].device
        assert sg.device == sg.edata['a'].device
        assert sg.device == child_device
    if parent_device == 'uva':
        g.unpin_memory_()
Exemplo n.º 2
0
def test_subframes(parent_idx_device, child_device):
    parent_device, idx_device = parent_idx_device
    g = dgl.graph((F.tensor([1,2,3], dtype=F.int64), F.tensor([2,3,4], dtype=F.int64)))
    print(g.device)
    g.ndata['x'] = F.randn((5, 4))
    g.edata['a'] = F.randn((3, 6))
    idx = F.tensor([1, 2], dtype=F.int64)
    if parent_device == 'cuda':
        g = g.to(F.cuda())
    elif parent_device == 'uva':
        if F.backend_name != 'pytorch':
            pytest.skip("UVA only supported for PyTorch")
        g = g.to(F.cpu())
        g.create_formats_()
        g.pin_memory_()
    elif parent_device == 'cpu':
        g = g.to(F.cpu())
    idx = F.copy_to(idx, idx_device)
    sg = g.sample_neighbors(idx, 2).to(child_device)
    assert sg.device == F.context(sg.ndata['x'])
    assert sg.device == F.context(sg.edata['a'])
    assert sg.device == child_device
    if parent_device != 'uva':
        sg = g.to(child_device).sample_neighbors(F.copy_to(idx, child_device), 2)
        assert sg.device == F.context(sg.ndata['x'])
        assert sg.device == F.context(sg.edata['a'])
        assert sg.device == child_device
    if parent_device == 'uva':
        g.unpin_memory_()
Exemplo n.º 3
0
def test_segment_reduce(reducer):
    ctx = F.ctx()
    value = F.tensor(np.random.rand(10, 5))
    v1 = F.attach_grad(F.clone(value))
    v2 = F.attach_grad(F.clone(value))
    seglen = F.tensor([2, 3, 0, 4, 1, 0, 0])
    u = F.copy_to(F.arange(0, F.shape(value)[0], F.int32), ctx)
    v = F.repeat(F.copy_to(F.arange(0, len(seglen), F.int32), ctx),
                 seglen,
                 dim=0)

    num_nodes = {'_U': len(u), '_V': len(seglen)}
    g = dgl.convert.heterograph({('_U', '_E', '_V'): (u, v)},
                                num_nodes_dict=num_nodes)
    with F.record_grad():
        rst1 = gspmm(g, 'copy_lhs', reducer, v1, None)
        if reducer in ['max', 'min']:
            rst1 = F.replace_inf_with_zero(rst1)
        F.backward(F.reduce_sum(rst1))
        grad1 = F.grad(v1)

    with F.record_grad():
        rst2 = segment_reduce(seglen, v2, reducer=reducer)
        F.backward(F.reduce_sum(rst2))
        assert F.allclose(rst1, rst2)
        print('forward passed')

        grad2 = F.grad(v2)
        assert F.allclose(grad1, grad2)
        print('backward passed')
Exemplo n.º 4
0
def test_filter():
    g = DGLGraph()
    g.add_nodes(4)
    g.add_edges([0, 1, 2, 3], [1, 2, 3, 0])

    n_repr = np.zeros((4, 5))
    e_repr = np.zeros((4, 5))
    n_repr[[1, 3]] = 1
    e_repr[[1, 3]] = 1
    n_repr = F.copy_to(F.zerocopy_from_numpy(n_repr), F.ctx())
    e_repr = F.copy_to(F.zerocopy_from_numpy(e_repr), F.ctx())

    g.ndata['a'] = n_repr
    g.edata['a'] = e_repr

    def predicate(r):
        return F.max(r.data['a'], 1) > 0

    # full node filter
    n_idx = g.filter_nodes(predicate)
    assert set(F.zerocopy_to_numpy(n_idx)) == {1, 3}

    # partial node filter
    n_idx = g.filter_nodes(predicate, [0, 1])
    assert set(F.zerocopy_to_numpy(n_idx)) == {1}

    # full edge filter
    e_idx = g.filter_edges(predicate)
    assert set(F.zerocopy_to_numpy(e_idx)) == {1, 3}

    # partial edge filter
    e_idx = g.filter_edges(predicate, [0, 1])
    assert set(F.zerocopy_to_numpy(e_idx)) == {1}
Exemplo n.º 5
0
def _random_simple_graph(idtype, dtype, ctx, M, N, max_nnz, srctype, dsttype,
                         etype):
    src = np.random.randint(0, M, (max_nnz, ))
    dst = np.random.randint(0, N, (max_nnz, ))
    val = np.random.randn(max_nnz)
    a = ssp.csr_matrix((val, (src, dst)), shape=(M, N))
    a.sum_duplicates()
    a = a.tocoo()
    # shuffle edges
    perm = np.random.permutation(a.nnz)
    row = a.row[perm]
    col = a.col[perm]
    val = a.data[perm]
    a = ssp.csr_matrix((val, (row, col)), shape=(M, N))

    A = dgl.heterograph(
        {
            (srctype, etype, dsttype):
            (F.copy_to(F.tensor(row, dtype=idtype),
                       ctx), F.copy_to(F.tensor(col, dtype=idtype), ctx))
        },
        num_nodes_dict={
            srctype: a.shape[0],
            dsttype: a.shape[1]
        })
    A.edata['w'] = F.copy_to(F.tensor(val, dtype=dtype), ctx)
    return a, A
Exemplo n.º 6
0
def graph1():
    g = dgl.graph(([0, 0, 0, 1, 1, 2, 2, 2, 3, 3, 3, 4, 6, 6, 7, 8, 9
                    ], [4, 5, 1, 2, 4, 7, 9, 8, 6, 4, 1, 0, 1, 0, 2, 3, 5]),
                  device=F.cpu())
    g.ndata['h'] = F.copy_to(F.randn((g.number_of_nodes(), 2)), F.cpu())
    g.edata['w'] = F.copy_to(F.randn((g.number_of_edges(), 3)), F.cpu())
    return g
Exemplo n.º 7
0
def heterograph0():
    g = dgl.heterograph({
        ('user', 'plays', 'game'): ([0, 1, 1, 2], [0, 0, 1, 1]),
        ('developer', 'develops', 'game'): ([0, 1], [0, 1])}, device=F.cpu())
    g.nodes['user'].data['h'] = F.copy_to(F.randn((g.number_of_nodes('user'), 3)), F.cpu())
    g.nodes['game'].data['h'] = F.copy_to(F.randn((g.number_of_nodes('game'), 2)), F.cpu())
    g.nodes['developer'].data['h'] = F.copy_to(F.randn((g.number_of_nodes('developer'), 3)), F.cpu())
    g.edges['plays'].data['h'] = F.copy_to(F.randn((g.number_of_edges('plays'), 1)), F.cpu())
    g.edges['develops'].data['h'] = F.copy_to(F.randn((g.number_of_edges('develops'), 5)), F.cpu())
    return g
Exemplo n.º 8
0
 def tensor_topo_traverse():
     n = g.number_of_nodes()
     mask = F.copy_to(F.ones((n, 1)), F.cpu())
     degree = F.spmm(adjmat, mask)
     while F.reduce_sum(mask) != 0.:
         v = F.astype((degree == 0.), F.float32)
         v = v * mask
         mask = mask - v
         frontier = F.copy_to(F.nonzero_1d(F.squeeze(v, 1)), F.cpu())
         yield frontier
         degree -= F.spmm(adjmat, v)
Exemplo n.º 9
0
def test_to_device(index_dtype):
    g1 = dgl.heterograph({('user', 'plays', 'game'): [(0, 0), (1, 1)]},
                         index_dtype=index_dtype)
    g1.nodes['user'].data['h1'] = F.copy_to(F.tensor([[0.], [1.]]), F.cpu())
    g1.nodes['user'].data['h2'] = F.copy_to(F.tensor([[3.], [4.]]), F.cpu())
    g1.edges['plays'].data['h1'] = F.copy_to(F.tensor([[2.], [3.]]), F.cpu())

    g2 = dgl.heterograph({('user', 'plays', 'game'): [(0, 0), (1, 0)]},
                         index_dtype=index_dtype)
    g2.nodes['user'].data['h1'] = F.copy_to(F.tensor([[1.], [2.]]), F.cpu())
    g2.nodes['user'].data['h2'] = F.copy_to(F.tensor([[4.], [5.]]), F.cpu())
    g2.edges['plays'].data['h1'] = F.copy_to(F.tensor([[0.], [1.]]), F.cpu())

    bg = dgl.batch_hetero([g1, g2])

    if F.is_cuda_available():
        bg1 = bg.to(F.cuda())
        assert bg1 is not None
        assert bg.batch_size == bg1.batch_size
        assert bg.batch_num_nodes('user') == bg1.batch_num_nodes('user')
        assert bg.batch_num_edges('plays') == bg1.batch_num_edges('plays')

    # set feature
    g1 = dgl.heterograph({('user', 'plays', 'game'): [(0, 0), (1, 1)]},
                         index_dtype=index_dtype)
    g2 = dgl.heterograph({('user', 'plays', 'game'): [(0, 0), (1, 0)]},
                         index_dtype=index_dtype)
    bg = dgl.batch_hetero([g1, g2])
    if F.is_cuda_available():
        bg1 = bg.to(F.cuda())
        bg1.nodes['user'].data['test'] = F.copy_to(F.tensor([0, 1, 2, 3]),
                                                   F.cuda())
        bg1.edata['test'] = F.copy_to(F.tensor([0, 1, 2, 3]), F.cuda())
Exemplo n.º 10
0
def test_get_node_partition_from_book(idtype):
    node_map = {"type_n": F.tensor([[0, 3], [4, 5], [6, 10]], dtype=idtype)}
    edge_map = {"type_e": F.tensor([[0, 9], [10, 15], [16, 25]], dtype=idtype)}
    book = gpb.RangePartitionBook(0, 3, node_map, edge_map, {"type_n": 0},
                                  {"type_e": 0})
    partition = gpb.get_node_partition_from_book(book, F.ctx())
    assert partition.num_parts() == 3
    assert partition.array_size() == 11

    test_ids = F.copy_to(F.tensor([0, 2, 6, 7, 10], dtype=idtype), F.ctx())
    act_ids = partition.map_to_local(test_ids)
    exp_ids = F.copy_to(F.tensor([0, 2, 0, 1, 4], dtype=idtype), F.ctx())
    assert F.array_equal(act_ids, exp_ids)

    test_ids = F.copy_to(F.tensor([0, 2], dtype=idtype), F.ctx())
    act_ids = partition.map_to_global(test_ids, 0)
    exp_ids = F.copy_to(F.tensor([0, 2], dtype=idtype), F.ctx())
    assert F.array_equal(act_ids, exp_ids)

    test_ids = F.copy_to(F.tensor([0, 1], dtype=idtype), F.ctx())
    act_ids = partition.map_to_global(test_ids, 1)
    exp_ids = F.copy_to(F.tensor([4, 5], dtype=idtype), F.ctx())
    assert F.array_equal(act_ids, exp_ids)

    test_ids = F.copy_to(F.tensor([0, 1, 4], dtype=idtype), F.ctx())
    act_ids = partition.map_to_global(test_ids, 2)
    exp_ids = F.copy_to(F.tensor([6, 7, 10], dtype=idtype), F.ctx())
    assert F.array_equal(act_ids, exp_ids)
Exemplo n.º 11
0
def test_basic():
    num_layers = 2
    g = generate_rand_graph(100, connect_more=True)
    nf = create_full_nodeflow(g, num_layers)
    assert nf.number_of_nodes() == g.number_of_nodes() * (num_layers + 1)
    assert nf.number_of_edges() == g.number_of_edges() * num_layers
    assert nf.num_layers == num_layers + 1
    assert nf.layer_size(0) == g.number_of_nodes()
    assert nf.layer_size(1) == g.number_of_nodes()
    check_basic(g, nf)

    parent_nids = F.copy_to(F.arange(0, g.number_of_nodes()), F.cpu())
    nids = nf.map_from_parent_nid(0, parent_nids, remap_local=True)
    assert_array_equal(F.asnumpy(nids), F.asnumpy(parent_nids))

    # should also work for negative layer ids
    for l in range(-1, -num_layers, -1):
        nids1 = nf.map_from_parent_nid(l, parent_nids, remap_local=True)
        nids2 = nf.map_from_parent_nid(l + num_layers,
                                       parent_nids,
                                       remap_local=True)
        assert_array_equal(F.asnumpy(nids1), F.asnumpy(nids2))

    g = generate_rand_graph(100)
    nf = create_mini_batch(g, num_layers)
    assert nf.num_layers == num_layers + 1
    check_basic(g, nf)

    g = generate_rand_graph(100, add_self_loop=True)
    nf = create_mini_batch(g, num_layers, add_self_loop=True)
    assert nf.num_layers == num_layers + 1
    check_basic(g, nf)
Exemplo n.º 12
0
def test_subgraph_message_passing():
    # Unit test for PR #2055
    g = dgl.graph(([0, 1, 2], [2, 3, 4])).to(F.cpu())
    g.ndata['x'] = F.copy_to(F.randn((5, 6)), F.cpu())
    sg = g.subgraph([1, 2, 3]).to(F.ctx())
    sg.update_all(lambda edges: {'x': edges.src['x']},
                  lambda nodes: {'y': F.sum(nodes.mailbox['x'], 1)})
Exemplo n.º 13
0
def check_positive_edge_sampler():
    g = generate_rand_graph(1000)
    num_edges = g.number_of_edges()
    edge_weight = F.copy_to(
        F.tensor(np.full((num_edges, ), 1, dtype=np.float32)), F.cpu())

    edge_weight[num_edges - 1] = num_edges**2
    EdgeSampler = getattr(dgl.contrib.sampling, 'EdgeSampler')

    # Correctness check
    # Test the homogeneous graph.
    batch_size = 128
    edge_sampled = np.full((num_edges, ), 0, dtype=np.int32)
    for pos_edges in EdgeSampler(g,
                                 batch_size,
                                 reset=False,
                                 edge_weight=edge_weight):
        _, _, pos_leid = pos_edges.all_edges(form='all', order='eid')
        np.add.at(edge_sampled, F.asnumpy(pos_edges.parent_eid[pos_leid]), 1)
    truth = np.full((num_edges, ), 1, dtype=np.int32)
    edge_sampled = edge_sampled[:num_edges]
    assert np.array_equal(truth, edge_sampled)

    edge_sampled = np.full((num_edges, ), 0, dtype=np.int32)
    for pos_edges in EdgeSampler(g,
                                 batch_size,
                                 reset=False,
                                 shuffle=True,
                                 edge_weight=edge_weight):
        _, _, pos_leid = pos_edges.all_edges(form='all', order='eid')
        np.add.at(edge_sampled, F.asnumpy(pos_edges.parent_eid[pos_leid]), 1)
    truth = np.full((num_edges, ), 1, dtype=np.int32)
    edge_sampled = edge_sampled[:num_edges]
    assert np.array_equal(truth, edge_sampled)
Exemplo n.º 14
0
def test_multi_send():
    g = generate_graph()

    def _fmsg(edges):
        assert edges.src['h'].shape == (5, D)
        return {'m': edges.src['h']}

    g.register_message_func(_fmsg)
    # many-many send
    u = F.tensor([0, 0, 0, 0, 0])
    v = F.tensor([1, 2, 3, 4, 5])
    g.send((u, v))
    # duplicate send
    u = F.tensor([0])
    v = F.tensor([1, 2, 3, 4, 5])
    g.send((u, v))
    # send more
    u = F.tensor([1, 2, 3, 4, 5])
    v = F.tensor([9])
    g.send((u, v))

    # check if message indicator is as expected
    expected = F.copy_to(F.zeros((g.number_of_edges(), ), dtype=F.int64),
                         F.cpu())
    eid = g.edge_ids([0, 0, 0, 0, 0, 1, 2, 3, 4, 5],
                     [1, 2, 3, 4, 5, 9, 9, 9, 9, 9])
    expected = F.asnumpy(expected)
    eid = F.asnumpy(eid)
    expected[eid] = 1
    assert np.array_equal(g._get_msg_index().tonumpy(), expected)
Exemplo n.º 15
0
def test_node_dataloader(sampler_name, pin_graph):
    g1 = dgl.graph(([0, 0, 0, 1, 1], [1, 2, 3, 3, 4]))
    if F.ctx() != F.cpu() and pin_graph:
        g1.create_formats_()
        g1.pin_memory_()
    g1.ndata['feat'] = F.copy_to(F.randn((5, 8)), F.cpu())
    g1.ndata['label'] = F.copy_to(F.randn((g1.num_nodes(),)), F.cpu())

    for num_workers in [0, 1, 2]:
        sampler = {
            'full': dgl.dataloading.MultiLayerFullNeighborSampler(2),
            'neighbor': dgl.dataloading.MultiLayerNeighborSampler([3, 3]),
            'neighbor2': dgl.dataloading.MultiLayerNeighborSampler([3, 3])}[sampler_name]
        dataloader = dgl.dataloading.NodeDataLoader(
            g1, g1.nodes(), sampler, device=F.ctx(),
            batch_size=g1.num_nodes(),
            num_workers=num_workers)
        for input_nodes, output_nodes, blocks in dataloader:
            _check_device(input_nodes)
            _check_device(output_nodes)
            _check_device(blocks)

    g2 = dgl.heterograph({
         ('user', 'follow', 'user'): ([0, 0, 0, 1, 1, 1, 2], [1, 2, 3, 0, 2, 3, 0]),
         ('user', 'followed-by', 'user'): ([1, 2, 3, 0, 2, 3, 0], [0, 0, 0, 1, 1, 1, 2]),
         ('user', 'play', 'game'): ([0, 1, 1, 3, 5], [0, 1, 2, 0, 2]),
         ('game', 'played-by', 'user'): ([0, 1, 2, 0, 2], [0, 1, 1, 3, 5])
    })
    for ntype in g2.ntypes:
        g2.nodes[ntype].data['feat'] = F.copy_to(F.randn((g2.num_nodes(ntype), 8)), F.cpu())
    batch_size = max(g2.num_nodes(nty) for nty in g2.ntypes)
    sampler = {
        'full': dgl.dataloading.MultiLayerFullNeighborSampler(2),
        'neighbor': dgl.dataloading.MultiLayerNeighborSampler([{etype: 3 for etype in g2.etypes}] * 2),
        'neighbor2': dgl.dataloading.MultiLayerNeighborSampler([3, 3])}[sampler_name]

    dataloader = dgl.dataloading.NodeDataLoader(
        g2, {nty: g2.nodes(nty) for nty in g2.ntypes},
        sampler, device=F.ctx(), batch_size=batch_size)
    assert isinstance(iter(dataloader), Iterator)
    for input_nodes, output_nodes, blocks in dataloader:
        _check_device(input_nodes)
        _check_device(output_nodes)
        _check_device(blocks)

    if g1.is_pinned():
        g1.unpin_memory_()
Exemplo n.º 16
0
def test_dynamic_addition():
    N = 3
    D = 1

    g = DGLGraph()

    def _init(shape, dtype, ctx, ids):
        return F.copy_to(F.astype(F.randn(shape), dtype), ctx)

    g.set_n_initializer(_init)
    g.set_e_initializer(_init)

    def _message(edges):
        return {
            'm':
            edges.src['h1'] + edges.dst['h2'] + edges.data['h1'] +
            edges.data['h2']
        }

    def _reduce(nodes):
        return {'h': F.sum(nodes.mailbox['m'], 1)}

    def _apply(nodes):
        return {'h': nodes.data['h']}

    g.register_message_func(_message)
    g.register_reduce_func(_reduce)
    g.register_apply_node_func(_apply)
    g.set_n_initializer(dgl.init.zero_initializer)
    g.set_e_initializer(dgl.init.zero_initializer)

    # add nodes and edges
    g.add_nodes(N)
    g.ndata.update({'h1': F.randn((N, D)), 'h2': F.randn((N, D))})
    g.add_nodes(3)
    g.add_edge(0, 1)
    g.add_edge(1, 0)
    g.edata.update({'h1': F.randn((2, D)), 'h2': F.randn((2, D))})
    g.send()
    expected = F.copy_to(F.ones((g.number_of_edges(), ), dtype=F.int64),
                         F.cpu())
    assert F.array_equal(g._get_msg_index().tousertensor(), expected)

    # add more edges
    g.add_edges([0, 2], [2, 0], {'h1': F.randn((2, D))})
    g.send(([0, 2], [2, 0]))
    g.recv(0)

    g.add_edge(1, 2)
    g.edges[4].data['h1'] = F.randn((1, D))
    g.send((1, 2))
    g.recv([1, 2])

    h = g.ndata.pop('h')

    # a complete round of send and recv
    g.send()
    g.recv()
    assert F.allclose(h, g.ndata['h'])
Exemplo n.º 17
0
def _test_callback_array_thread(dtype):
    def cb(x):
        return F.to_dgl_nd(F.from_dgl_nd(x) + 1)

    arg = F.copy_to(F.tensor([1, 2, 3], dtype=dtype), F.ctx())
    ret = F.from_dgl_nd(
        dgl._api_internal._TestPythonCallbackThread(cb, F.to_dgl_nd(arg)))
    assert np.allclose(F.asnumpy(ret), F.asnumpy(arg) + 1)
Exemplo n.º 18
0
 def _test_sampler(g, sampler, ntype):
     seeds = F.copy_to(F.tensor([0, 2], dtype=F.int64), F.ctx())
     neighbor_g = sampler(seeds)
     assert neighbor_g.ntypes == [ntype]
     u, v = neighbor_g.all_edges(form='uv', order='eid')
     uv = list(zip(F.asnumpy(u).tolist(), F.asnumpy(v).tolist()))
     assert (1, 0) in uv or (0, 0) in uv
     assert (2, 2) in uv or (3, 2) in uv
Exemplo n.º 19
0
def test_async_transferer_to_other():
    cpu_ones = F.ones([100,75,25], dtype=F.int32, ctx=F.cpu())
    tran = AsyncTransferer(F.ctx())
    t = tran.async_copy(cpu_ones, F.ctx())
    other_ones = t.wait()

    assert F.context(other_ones) == F.ctx()
    assert F.array_equal(F.copy_to(other_ones, ctx=F.cpu()), cpu_ones)
Exemplo n.º 20
0
def test_column_subcolumn():
    data = F.copy_to(
        F.tensor([[1., 1., 1., 1.], [0., 2., 9., 0.], [3., 2., 1., 0.],
                  [1., 1., 1., 1.], [0., 2., 4., 0.]]), F.ctx())
    original = Column(data)

    # subcolumn from cpu context
    i1 = F.tensor([0, 2, 1, 3], dtype=F.int64)
    l1 = original.subcolumn(i1)

    assert len(l1) == i1.shape[0]
    assert F.array_equal(l1.data, F.gather_row(data, i1))

    # next subcolumn from target context
    i2 = F.copy_to(F.tensor([0, 2], dtype=F.int64), F.ctx())
    l2 = l1.subcolumn(i2)

    assert len(l2) == i2.shape[0]
    i1i2 = F.copy_to(F.gather_row(i1, F.copy_to(i2, F.context(i1))), F.ctx())
    assert F.array_equal(l2.data, F.gather_row(data, i1i2))

    # next subcolumn also from target context
    i3 = F.copy_to(F.tensor([1], dtype=F.int64), F.ctx())
    l3 = l2.subcolumn(i3)

    assert len(l3) == i3.shape[0]
    i1i2i3 = F.copy_to(F.gather_row(i1i2, F.copy_to(i3, F.context(i1i2))),
                       F.ctx())
    assert F.array_equal(l3.data, F.gather_row(data, i1i2i3))
Exemplo n.º 21
0
def test_self_loop():
    n = 100
    num_hops = 2
    g = generate_rand_graph(n, complete=True)
    nf = create_mini_batch(g, num_hops, add_self_loop=True)
    for i in range(1, nf.num_layers):
        in_deg = nf.layer_in_degree(i)
        deg = F.copy_to(F.ones(in_deg.shape, dtype=F.int64), F.cpu()) * n
        assert_array_equal(F.asnumpy(in_deg), F.asnumpy(deg))
Exemplo n.º 22
0
def test_edge_prediction_sampler(idtype):
    g = create_test_graph(idtype)
    sampler = NeighborSampler([10, 10])
    sampler = as_edge_prediction_sampler(
        sampler, negative_sampler=negative_sampler.Uniform(1))

    seeds = F.copy_to(F.arange(0, 2, dtype=idtype), ctx=F.ctx())
    # just a smoke test to make sure we don't fail internal assertions
    result = sampler.sample(g, {'follows': seeds})
Exemplo n.º 23
0
def test_uva_subgraph(idtype, device):
    g = create_test_heterograph(idtype)
    g = g.to(F.cpu())
    g.create_formats_()
    g.pin_memory_()
    indices = {'user': F.copy_to(F.tensor([0], idtype), device)}
    edge_indices = {'follows': F.copy_to(F.tensor([0], idtype), device)}
    assert g.subgraph(indices).device == device
    assert g.edge_subgraph(edge_indices).device == device
    assert g.in_subgraph(indices).device == device
    assert g.out_subgraph(indices).device == device
    if dgl.backend.backend_name != 'tensorflow':
        # (BarclayII) Most of Tensorflow functions somehow do not preserve device: a CPU tensor
        # becomes a GPU tensor after operations such as concat(), unique() or even sin().
        # Not sure what should be the best fix.
        assert g.khop_in_subgraph(indices, 1)[0].device == device
        assert g.khop_out_subgraph(indices, 1)[0].device == device
    assert g.sample_neighbors(indices, 1).device == device
    g.unpin_memory_()
Exemplo n.º 24
0
def test_sage_conv2(idtype):
    # TODO: add test for blocks
    # Test the case for graphs without edges
    g = dgl.heterograph({('_U', '_E', '_V'): ([], [])}, {'_U': 5, '_V': 3})
    g = g.astype(idtype).to(F.ctx())
    ctx = F.ctx()
    sage = nn.SAGEConv((3, 3), 2, 'gcn')
    feat = (F.randn((5, 3)), F.randn((3, 3)))
    sage = sage.to(ctx)
    h = sage(g, (F.copy_to(feat[0], F.ctx()), F.copy_to(feat[1], F.ctx())))
    assert h.shape[-1] == 2
    assert h.shape[0] == 3
    for aggre_type in ['mean', 'pool', 'lstm']:
        sage = nn.SAGEConv((3, 1), 2, aggre_type)
        feat = (F.randn((5, 3)), F.randn((3, 1)))
        sage = sage.to(ctx)
        h = sage(g, feat)
        assert h.shape[-1] == 2
        assert h.shape[0] == 3
Exemplo n.º 25
0
def test_sage_conv2(idtype):
    # TODO: add test for blocks
    # Test the case for graphs without edges
    g = dgl.bipartite([], num_nodes=(5, 3))
    g = g.astype(idtype).to(F.ctx())
    ctx = F.ctx()
    sage = nn.SAGEConv((3, 3), 2, 'gcn')
    feat = (F.randn((5, 3)), F.randn((3, 3)))
    sage = sage.to(ctx)
    h = sage(g, (F.copy_to(feat[0], F.ctx()), F.copy_to(feat[1], F.ctx())))
    assert h.shape[-1] == 2
    assert h.shape[0] == 3
    for aggre_type in ['mean', 'pool', 'lstm']:
        sage = nn.SAGEConv((3, 1), 2, aggre_type)
        feat = (F.randn((5, 3)), F.randn((3, 1)))
        sage = sage.to(ctx)
        h = sage(g, feat)
        assert h.shape[-1] == 2
        assert h.shape[0] == 3
Exemplo n.º 26
0
def test_multi_recv():
    # basic recv test
    g = generate_graph()
    h = g.ndata['h']
    g.register_message_func(message_func)
    g.register_reduce_func(reduce_func)
    g.register_apply_node_func(apply_node_func)
    expected = F.copy_to(F.zeros((g.number_of_edges(), ), dtype=F.int64),
                         F.cpu())
    # two separate round of send and recv
    u = [4, 5, 6]
    v = [9]
    g.send((u, v))
    eid = g.edge_ids(u, v)
    expected[eid] = 1
    assert F.array_equal(g._get_msg_index().tousertensor(), expected)
    g.recv(v)
    expected[eid] = 0
    assert F.array_equal(g._get_msg_index().tousertensor(), expected)

    u = [0]
    v = [1, 2, 3]
    g.send((u, v))
    eid = g.edge_ids(u, v)
    expected[eid] = 1
    assert F.array_equal(g._get_msg_index().tousertensor(), expected)
    g.recv(v)
    expected[eid] = 0
    assert F.array_equal(g._get_msg_index().tousertensor(), expected)

    h1 = g.ndata['h']

    # one send, two recv
    g.ndata['h'] = h
    u = F.tensor([0, 0, 0, 4, 5, 6])
    v = F.tensor([1, 2, 3, 9, 9, 9])
    g.send((u, v))
    eid = g.edge_ids(u, v)
    expected[eid] = 1
    assert F.array_equal(g._get_msg_index().tousertensor(), expected)
    u = [4, 5, 6]
    v = [9]
    g.recv(v)
    eid = g.edge_ids(u, v)
    expected[eid] = 0
    assert F.array_equal(g._get_msg_index().tousertensor(), expected)
    u = [0]
    v = [1, 2, 3]
    g.recv(v)
    eid = g.edge_ids(u, v)
    expected[eid] = 0
    assert F.array_equal(g._get_msg_index().tousertensor(), expected)

    h2 = g.ndata['h']
    assert F.allclose(h1, h2)
Exemplo n.º 27
0
def _random_simple_graph(idtype, dtype, ctx, M, N, max_nnz, srctype, dsttype,
                         etype):
    src = np.random.randint(0, M, (max_nnz, ))
    dst = np.random.randint(0, N, (max_nnz, ))
    val = np.random.randn(max_nnz)
    a = ssp.csr_matrix((val, (src, dst)), shape=(M, N))
    a.sum_duplicates()
    a = a.tocoo()
    A = dgl.heterograph(
        {
            ('A', 'AB', 'B'):
            (F.copy_to(F.tensor(a.row, dtype=idtype),
                       ctx), F.copy_to(F.tensor(a.col, dtype=idtype), ctx))
        },
        num_nodes_dict={
            'A': a.shape[0],
            'B': a.shape[1]
        })
    A.edata['w'] = F.copy_to(F.tensor(a.data, dtype=dtype), ctx)
    return a, A
Exemplo n.º 28
0
def test_node_dataloader():
    sampler = dgl.dataloading.MultiLayerFullNeighborSampler(2)

    g1 = dgl.graph(([0, 0, 0, 1, 1], [1, 2, 3, 3, 4]))
    g1.ndata['feat'] = F.copy_to(F.randn((5, 8)), F.cpu())

    dataloader = dgl.dataloading.NodeDataLoader(g1,
                                                g1.nodes(),
                                                sampler,
                                                device=F.ctx(),
                                                batch_size=g1.num_nodes())
    for input_nodes, output_nodes, blocks in dataloader:
        _check_device(input_nodes)
        _check_device(output_nodes)
        _check_device(blocks)

    g2 = dgl.heterograph({
        ('user', 'follow', 'user'): ([0, 0, 0, 1, 1, 1,
                                      2], [1, 2, 3, 0, 2, 3, 0]),
        ('user', 'followed-by', 'user'): ([1, 2, 3, 0, 2, 3,
                                           0], [0, 0, 0, 1, 1, 1, 2]),
        ('user', 'play', 'game'): ([0, 1, 1, 3, 5], [0, 1, 2, 0, 2]),
        ('game', 'played-by', 'user'): ([0, 1, 2, 0, 2], [0, 1, 1, 3, 5])
    })
    for ntype in g2.ntypes:
        g2.nodes[ntype].data['feat'] = F.copy_to(
            F.randn((g2.num_nodes(ntype), 8)), F.cpu())
    batch_size = max(g2.num_nodes(nty) for nty in g2.ntypes)

    dataloader = dgl.dataloading.NodeDataLoader(
        g2, {nty: g2.nodes(nty)
             for nty in g2.ntypes},
        sampler,
        device=F.ctx(),
        batch_size=batch_size)
    assert isinstance(iter(dataloader), Iterator)
    for input_nodes, output_nodes, blocks in dataloader:
        _check_device(input_nodes)
        _check_device(output_nodes)
        _check_device(blocks)
Exemplo n.º 29
0
def test_serialize_deserialize_dtype():
    data = F.copy_to(F.tensor([[1., 1., 1., 1.],
                               [0., 2., 9., 0.],
                               [3., 2., 1., 0.],
                               [1., 1., 1., 1.],
                               [0., 2., 4., 0.]]), F.ctx())
    original = Column(data)
    original = original.astype(F.int64)

    serial = pickle.dumps(original)
    new = pickle.loads(serial)

    assert new.dtype == F.int64
Exemplo n.º 30
0
def test_issue_2484(idtype):
    import dgl.function as fn
    g = dgl.graph(([0, 1, 2], [1, 2, 3]), idtype=idtype, device=F.ctx())
    x = F.copy_to(F.randn((4, )), F.ctx())
    g.ndata['x'] = x
    g.pull([2, 1], fn.u_add_v('x', 'x', 'm'), fn.sum('m', 'x'))
    y1 = g.ndata['x']

    g.ndata['x'] = x
    g.pull([1, 2], fn.u_add_v('x', 'x', 'm'), fn.sum('m', 'x'))
    y2 = g.ndata['x']

    assert F.allclose(y1, y2)