Exemplo n.º 1
0
 def build(self):
     self.gamma = K.ones((self.num_lstm,))
     self.beta = K.zeros((self.num_lstm,))
     
     self.running_mean = K.zeros((self.num_lstm,))
     self.running_std = K.ones((self.num_lstm,))
     self.updates = [(self.running_mean, None), (self.running_std, None)]
Exemplo n.º 2
0
def test_row2():
    # test row getter/setter autograd compatibility
    data = create_test_data(grad=True)
    f = FrameRef(Frame(data))

    with F.record_grad():
        # getter
        c1 = f['a1']
        # test non-duplicate keys
        rowid = Index(F.tensor([0, 2]))
        rows = f[rowid]
        y = rows['a1']
    F.backward(y, F.ones((len(rowid), D)))
    assert F.allclose(
        F.grad(c1)[:, 0], F.tensor([1., 0., 1., 0., 0., 0., 0., 0., 0., 0.]))

    f['a1'] = F.attach_grad(f['a1'])
    with F.record_grad():
        c1 = f['a1']
        # test duplicate keys
        rowid = Index(F.tensor([8, 2, 2, 1]))
        rows = f[rowid]
        y = rows['a1']
    F.backward(y, F.ones((len(rowid), D)))
    assert F.allclose(
        F.grad(c1)[:, 0], F.tensor([0., 1., 2., 0., 0., 0., 0., 0., 1., 0.]))

    f['a1'] = F.attach_grad(f['a1'])
    with F.record_grad():
        # setter
        c1 = f['a1']
        rowid = Index(F.tensor([0, 2, 4]))
        vals = {
            'a1': F.attach_grad(F.zeros((len(rowid), D))),
            'a2': F.attach_grad(F.zeros((len(rowid), D))),
            'a3': F.attach_grad(F.zeros((len(rowid), D))),
        }
        f[rowid] = vals
        c11 = f['a1']
    F.backward(c11, F.ones((N, D)))
    assert F.allclose(
        F.grad(c1)[:, 0], F.tensor([0., 1., 0., 1., 0., 1., 1., 1., 1., 1.]))
    assert F.allclose(F.grad(vals['a1']), F.ones((len(rowid), D)))
    assert F.is_no_grad(vals['a2'])
Exemplo n.º 3
0
def test_appnp_conv_e_weight(g, idtype):
    ctx = F.ctx()
    g = g.astype(idtype).to(ctx)
    appnp = nn.APPNPConv(10, 0.1)
    feat = F.randn((g.number_of_nodes(), 5))
    eweight = F.ones((g.num_edges(), ))
    appnp = appnp.to(ctx)

    h = appnp(g, feat, edge_weight=eweight)
    assert h.shape[-1] == 5
Exemplo n.º 4
0
def test_tagconv_e_weight(g, idtype):
    ctx = F.ctx()
    g = g.astype(idtype).to(ctx)
    conv = nn.TAGConv(5, 5, bias=True)
    conv = conv.to(ctx)
    feat = F.randn((g.number_of_nodes(), 5))
    eweight = F.ones((g.num_edges(), ))
    conv = conv.to(ctx)
    h = conv(g, feat, edge_weight=eweight)
    assert h.shape[-1] == 5
Exemplo n.º 5
0
def create_heterographs(idtype):
    g_x = dgl.graph(([0, 1, 2], [1, 2, 3]), 'user', 'follows', idtype=idtype)
    g_y = dgl.graph(([0, 2], [2, 3]), 'user', 'knows',
                    idtype=idtype).formats('csr')
    g_x.nodes['user'].data['h'] = F.randn((4, 3))
    g_x.edges['follows'].data['w'] = F.randn((3, 2))
    g_y.nodes['user'].data['hh'] = F.ones((4, 5))
    g_y.edges['knows'].data['ww'] = F.randn((2, 10))
    g = dgl.hetero_from_relations([g_x, g_y])
    return [g, g_x, g_y]
Exemplo n.º 6
0
def test_prop_edges_dfs(idtype):
    g = dgl.graph(nx.path_graph(5), idtype=idtype, device=F.ctx())
    g.ndata['x'] = F.ones((5, 2))
    dgl.prop_edges_dfs(g, 0, message_func=mfunc, reduce_func=rfunc, apply_node_func=None)
    # snr using dfs results in a cumsum
    assert F.allclose(g.ndata['x'],
            F.tensor([[1., 1.], [2., 2.], [3., 3.], [4., 4.], [5., 5.]]))

    g.ndata['x'] = F.ones((5, 2))
    dgl.prop_edges_dfs(g, 0, has_reverse_edge=True, message_func=mfunc, reduce_func=rfunc, apply_node_func=None)
    # result is cumsum[i] + cumsum[i-1]
    assert F.allclose(g.ndata['x'],
            F.tensor([[1., 1.], [3., 3.], [5., 5.], [7., 7.], [9., 9.]]))

    g.ndata['x'] = F.ones((5, 2))
    dgl.prop_edges_dfs(g, 0, has_nontree_edge=True, message_func=mfunc, reduce_func=rfunc, apply_node_func=None)
    # result is cumsum[i] + cumsum[i+1]
    assert F.allclose(g.ndata['x'],
            F.tensor([[3., 3.], [5., 5.], [7., 7.], [9., 9.], [5., 5.]]))
Exemplo n.º 7
0
def construct_graph(n, readonly=True):
    g_list = []
    for i in range(n):
        g = generate_rand_graph(30)
        g.edata['e1'] = F.randn((g.number_of_edges(), 32))
        g.edata['e2'] = F.ones((g.number_of_edges(), 32))
        g.ndata['n1'] = F.randn((g.number_of_nodes(), 64))
        g.readonly(i % 2 == 0)
        g_list.append(g)
    return g_list
Exemplo n.º 8
0
def test_prop_nodes_bfs():
    g = dgl.DGLGraph(nx.path_graph(5))
    g.ndata['x'] = F.ones((5, 2))
    g.register_message_func(mfunc)
    g.register_reduce_func(rfunc)

    dgl.prop_nodes_bfs(g, 0)
    # pull nodes using bfs order will result in a cumsum[i] + data[i] + data[i+1]
    assert F.allclose(
        g.ndata['x'],
        F.tensor([[2., 2.], [4., 4.], [6., 6.], [8., 8.], [9., 9.]]))
Exemplo n.º 9
0
 def tensor_topo_traverse():
     n = g.number_of_nodes()
     mask = F.copy_to(F.ones((n, 1)), F.cpu())
     degree = F.spmm(adjmat, mask)
     while F.reduce_sum(mask) != 0.:
         v = F.astype((degree == 0.), F.float32)
         v = v * mask
         mask = mask - v
         frontier = F.copy_to(F.nonzero_1d(F.squeeze(v, 1)), F.cpu())
         yield frontier
         degree -= F.spmm(adjmat, v)
Exemplo n.º 10
0
def test_graph_conv0():
    g = dgl.DGLGraph(nx.path_graph(3)).to(F.ctx())
    ctx = F.ctx()
    adj = g.adjacency_matrix(transpose=False, ctx=ctx)

    conv = nn.GraphConv(5, 2, norm='none', bias=True)

    # test#1: basic
    h0 = F.ones((3, 5))
    init_params = conv.init(jax.random.PRNGKey(2666), g, h0)
    h1 = conv.apply(init_params, g, h0)
    assert len(g.ndata) == 0
    assert len(g.edata) == 0

    assert F.allclose(
        h1,
        _AXWb(adj, h0, init_params["params"]["_weight"],
              init_params["params"]["_bias"]))
    # test#2: more-dim
    h0 = F.ones((3, 5, 5))
    h1 = conv.apply(init_params, g, h0)
    assert len(g.ndata) == 0
    assert len(g.edata) == 0

    assert F.allclose(
        h1,
        _AXWb(adj, h0, init_params["params"]["_weight"],
              init_params["params"]["_bias"]))

    conv = nn.GraphConv(5, 2)
    # test#3: basic
    h0 = F.ones((3, 5))
    init_params = conv.init(jax.random.PRNGKey(2666), g, h0)
    h1 = conv.apply(init_params, g, h0)
    assert len(g.ndata) == 0
    assert len(g.edata) == 0
    # test#4: basic
    h0 = F.ones((3, 5, 5))
    h1 = conv.apply(init_params, g, h0)
    assert len(g.ndata) == 0
    assert len(g.edata) == 0
Exemplo n.º 11
0
def check_init_func(worker_id, graph_name, return_dict):
    time.sleep(3)
    print("worker starts")
    np.random.seed(0)
    csr = (spsp.random(num_nodes, num_nodes, density=0.1, format='csr') != 0).astype(np.int64)

    # Verify the graph structure loaded from the shared memory.
    try:
        g = create_graph_store(graph_name)
        if g is None:
            return_dict[worker_id] = -1
            return

        src, dst = g.all_edges()
        coo = csr.tocoo()
        assert_array_equal(F.asnumpy(dst), coo.row)
        assert_array_equal(F.asnumpy(src), coo.col)
        feat = F.asnumpy(g.nodes[0].data['feat'])
        assert_array_equal(np.squeeze(feat), np.arange(10, dtype=feat.dtype))
        feat = F.asnumpy(g.edges[0].data['feat'])
        assert_array_equal(np.squeeze(feat), np.arange(10, dtype=feat.dtype))
        g.init_ndata('test4', (g.number_of_nodes(), 10), 'float32')
        g.init_edata('test4', (g.number_of_edges(), 10), 'float32')
        g._sync_barrier(60)
        check_array_shared_memory(g, worker_id, [g.nodes[:].data['test4'], g.edges[:].data['test4']])

        data = g.nodes[:].data['test4']
        g.set_n_repr({'test4': F.ones((1, 10)) * 10}, u=[0])
        assert_almost_equal(F.asnumpy(data[0]), np.squeeze(F.asnumpy(g.nodes[0].data['test4'])))

        data = g.edges[:].data['test4']
        g.set_e_repr({'test4': F.ones((1, 10)) * 20}, edges=[0])
        assert_almost_equal(F.asnumpy(data[0]), np.squeeze(F.asnumpy(g.edges[0].data['test4'])))

        g.destroy()
        return_dict[worker_id] = 0
    except Exception as e:
        return_dict[worker_id] = -1
        g.destroy()
        print(e, file=sys.stderr)
        traceback.print_exc()
Exemplo n.º 12
0
def test_prop_nodes_bfs(idtype):
    g = create_graph(idtype)
    g.ndata['x'] = F.ones((5, 2))
    dgl.prop_nodes_bfs(g,
                       0,
                       message_func=mfunc,
                       reduce_func=rfunc,
                       apply_node_func=None)
    # pull nodes using bfs order will result in a cumsum[i] + data[i] + data[i+1]
    assert F.allclose(
        g.ndata['x'],
        F.tensor([[2., 2.], [4., 4.], [6., 6.], [8., 8.], [9., 9.]]))
Exemplo n.º 13
0
def test_batch_setter_autograd():
    g = generate_graph(grad=True)
    h1 = g.ndata['h']
    # partial set
    v = F.tensor([1, 2, 8])
    hh = F.attach_grad(F.zeros((len(v), D)))
    with F.record_grad():
        g.nodes[v].data['h'] = hh
        h2 = g.ndata['h']
    F.backward(h2, F.ones((10, D)) * 2)
    assert F.array_equal(F.grad(h1)[:,0], F.tensor([2., 0., 0., 2., 2., 2., 2., 2., 0., 2.]))
    assert F.array_equal(F.grad(hh)[:,0], F.tensor([2., 2., 2.]))
Exemplo n.º 14
0
def test_async_transferer_from_other():
    other_ones = F.ones([100,75,25], dtype=F.int32, ctx=F.ctx())
    tran = AsyncTransferer(F.ctx())
    
    try:
        t = tran.async_copy(other_ones, F.cpu())
    except ValueError:
        # correctly threw an error
        pass
    else:
        # should have thrown an error
        assert False
Exemplo n.º 15
0
def test_inplace():
    f = FrameRef(Frame(create_test_data()))
    print(f.schemes)
    a1addr = id(f['a1'])
    a2addr = id(f['a2'])
    a3addr = id(f['a3'])

    # column updates are always out-of-place
    f['a1'] = F.ones((N, D))
    newa1addr = id(f['a1'])
    assert a1addr != newa1addr
    a1addr = newa1addr
    # full row update that becomes column update
    f[toindex(slice(0, N))] = {'a1': F.ones((N, D))}
    assert id(f['a1']) != a1addr

    # row update (outplace) w/ slice
    f[toindex(slice(1, 4))] = {'a2': F.ones((3, D))}
    newa2addr = id(f['a2'])
    assert a2addr != newa2addr
    a2addr = newa2addr
    # row update (outplace) w/ list
    f[toindex([1, 3, 5])] = {'a2': F.ones((3, D))}
    newa2addr = id(f['a2'])
    assert a2addr != newa2addr
    a2addr = newa2addr

    # row update (inplace) w/ slice
    f.update_data(toindex(slice(1, 4)), {'a2': F.ones((3, D))}, True)
    newa2addr = id(f['a2'])
    assert a2addr == newa2addr
    # row update (inplace) w/ list
    f.update_data(toindex([1, 3, 5]), {'a2': F.ones((3, D))}, True)
    newa2addr = id(f['a2'])
    assert a2addr == newa2addr
Exemplo n.º 16
0
def test_nonuniform_neighbor_sampler():
    # Construct a graph with
    # (1) A path (0, 1, ..., 99) with weight 1
    # (2) A bunch of random edges with weight 0.
    edges = []
    for i in range(99):
        edges.append((i, i + 1))
    for i in range(1000):
        edge = (np.random.randint(100), np.random.randint(100))
        if edge not in edges:
            edges.append(edge)
    src, dst = zip(*edges)
    g = dgl.DGLGraph()
    g.add_nodes(100)
    g.add_edges(src, dst)
    g.readonly()

    g.edata['w'] = F.cat([
        F.ones((99, ), F.float64, F.cpu()),
        F.zeros((len(edges) - 99, ), F.float64, F.cpu())
    ], 0)

    # Test 1-neighbor NodeFlow with 99 as target node.
    # The generated NodeFlow should only contain node i on layer i.
    sampler = dgl.contrib.sampling.NeighborSampler(g,
                                                   1,
                                                   1,
                                                   99,
                                                   'in',
                                                   transition_prob='w',
                                                   seed_nodes=[99])
    nf = next(iter(sampler))

    assert nf.num_layers == 100
    for i in range(nf.num_layers):
        assert nf.layer_size(i) == 1
        assert nf.layer_parent_nid(i)[0] == i

    # Test the reverse direction
    sampler = dgl.contrib.sampling.NeighborSampler(g,
                                                   1,
                                                   1,
                                                   99,
                                                   'out',
                                                   transition_prob='w',
                                                   seed_nodes=[0])
    nf = next(iter(sampler))

    assert nf.num_layers == 100
    for i in range(nf.num_layers):
        assert nf.layer_size(i) == 1
        assert nf.layer_parent_nid(i)[0] == 99 - i
Exemplo n.º 17
0
 def set_output(self):
     value, _ = theano.scan(
         fn=self.step,
         # non_sequences=[self.attribute],
         outputs_info=[
             self._slice(self.init, 0, self.hidden_dim),
             self._slice(self.init, 1, self.hidden_dim),
             K.zeros((self.hidden_dim, )),
             K.ones((self.hidden_dim, ))
         ],
         name='lstm',
         n_steps=self.steps)
     return value
Exemplo n.º 18
0
def test_gcn2conv_e_weight(g, idtype):
    ctx = F.ctx()
    g = g.astype(idtype).to(ctx)
    gcn2conv = nn.GCN2Conv(5,
                           layer=2,
                           alpha=0.5,
                           project_initial_features=True)
    feat = F.randn((g.number_of_nodes(), 5))
    eweight = F.ones((g.num_edges(), ))
    gcn2conv = gcn2conv.to(ctx)
    res = feat
    h = gcn2conv(g, res, feat, edge_weight=eweight)
    assert h.shape[-1] == 5
Exemplo n.º 19
0
def test_deserialize_old_heterograph_file():
    path = os.path.join(os.path.dirname(__file__), "data/hetero1.bin")
    g_list, label_dict = dgl.load_graphs(path)
    assert g_list[0].idtype == F.int64
    assert g_list[3].idtype == F.int32
    assert np.allclose(F.asnumpy(g_list[2].nodes['user'].data['hh']),
                       np.ones((4, 5)))
    assert np.allclose(F.asnumpy(g_list[5].nodes['user'].data['hh']),
                       np.ones((4, 5)))
    edges = g_list[0]['follows'].edges()
    assert np.allclose(F.asnumpy(edges[0]), np.array([0, 1, 2]))
    assert np.allclose(F.asnumpy(edges[1]), np.array([1, 2, 3]))
    assert F.allclose(label_dict['graph_label'], F.ones(54))
Exemplo n.º 20
0
def test_tagconv(out_dim):
    g = dgl.DGLGraph(nx.path_graph(3))
    g = g.to(F.ctx())
    ctx = F.ctx()
    adj = g.adjacency_matrix(transpose=True, ctx=ctx)
    norm = th.pow(g.in_degrees().float(), -0.5)

    conv = nn.TAGConv(5, out_dim, bias=True)
    conv = conv.to(ctx)
    print(conv)

    # test pickle
    th.save(conv, tmp_buffer)

    # test#1: basic
    h0 = F.ones((3, 5))
    h1 = conv(g, h0)
    assert len(g.ndata) == 0
    assert len(g.edata) == 0
    shp = norm.shape + (1, ) * (h0.dim() - 1)
    norm = th.reshape(norm, shp).to(ctx)

    assert F.allclose(h1, _S2AXWb(adj, norm, h0, conv.lin.weight,
                                  conv.lin.bias))

    conv = nn.TAGConv(5, out_dim)
    conv = conv.to(ctx)

    # test#2: basic
    h0 = F.ones((3, 5))
    h1 = conv(g, h0)
    assert h1.shape[-1] == out_dim

    # test reset_parameters
    old_weight = deepcopy(conv.lin.weight.data)
    conv.reset_parameters()
    new_weight = conv.lin.weight.data
    assert not F.allclose(old_weight, new_weight)
Exemplo n.º 21
0
def test_mul_two_vars(backendopt):

    for datatype in backendopt:
        T.set_backend(datatype)

        x2 = ad.Variable(name="x2", shape=[3])
        x3 = ad.Variable(name="x3", shape=[3])
        y = ad.sum(x2 * x3)

        grad_x2, grad_x3 = ad.gradients(y, [x2, x3])

        executor = ad.Executor([y, grad_x2, grad_x3])
        x2_val = 2 * T.ones(3)
        x3_val = 3 * T.ones(3)
        y_val, grad_x2_val, grad_x3_val = executor.run(feed_dict={
            x2: x2_val,
            x3: x3_val
        })

        assert isinstance(y, ad.Node)
        assert T.array_equal(y_val, T.sum(x2_val * x3_val))
        assert T.array_equal(grad_x2_val, x3_val)
        assert T.array_equal(grad_x3_val, x2_val)
def create_random_hetero():
    num_nodes = {'n1': 1010, 'n2': 1000, 'n3': 1020}
    etypes = [('n1', 'r1', 'n2'),
              ('n1', 'r2', 'n3'),
              ('n2', 'r3', 'n3')]
    edges = {}
    for etype in etypes:
        src_ntype, _, dst_ntype = etype
        arr = spsp.random(num_nodes[src_ntype], num_nodes[dst_ntype], density=0.001, format='coo',
                          random_state=100)
        edges[etype] = (arr.row, arr.col)
    g = dgl.heterograph(edges, num_nodes)
    g.nodes['n1'].data['feat'] = F.ones((g.number_of_nodes('n1'), 10), F.float32, F.cpu())
    return g
Exemplo n.º 23
0
def test_edge_softmax(idtype):
    # Basic
    g = dgl.graph(nx.path_graph(3))
    g = g.astype(idtype).to(F.ctx())
    edata = F.ones((g.number_of_edges(), 1))
    a = nn.edge_softmax(g, edata)
    assert len(g.ndata) == 0
    assert len(g.edata) == 0
    assert F.allclose(a, uniform_attention(g, a.shape))

    # Test higher dimension case
    edata = F.ones((g.number_of_edges(), 3, 1))
    a = nn.edge_softmax(g, edata)
    assert len(g.ndata) == 0
    assert len(g.edata) == 0
    assert F.allclose(a, uniform_attention(g, a.shape))

    # Test both forward and backward with PyTorch built-in softmax.
    g = dgl.rand_graph(30, 900)
    g = g.astype(idtype).to(F.ctx())

    score = F.randn((900, 1))
    score.requires_grad_()
    grad = F.randn((900, 1))
    y = F.softmax(score.view(30, 30), dim=0).view(-1, 1)
    y.backward(grad)
    grad_score = score.grad
    score.grad.zero_()
    y_dgl = nn.edge_softmax(g, score)
    assert len(g.ndata) == 0
    assert len(g.edata) == 0
    # check forward
    assert F.allclose(y_dgl, y)
    y_dgl.backward(grad)
    # checkout gradient
    assert F.allclose(score.grad, grad_score)
    print(score.grad[:10], grad_score[:10])
Exemplo n.º 24
0
def test_tgconv():
    g = dgl.DGLGraph(nx.path_graph(3))
    ctx = F.ctx()
    adj = g.adjacency_matrix(ctx=ctx)
    norm = th.pow(g.in_degrees().float(), -0.5)

    conv = nn.TGConv(5, 2, bias=True)
    if F.gpu_ctx():
        conv.cuda()
    print(conv)

    # test#1: basic
    h0 = F.ones((3, 5))
    h1 = conv(h0, g)
    assert len(g.ndata) == 0
    assert len(g.edata) == 0
    shp = norm.shape + (1, ) * (h0.dim() - 1)
    norm = th.reshape(norm, shp).to(ctx)

    assert F.allclose(h1, _S2AXWb(adj, norm, h0, conv.lin.weight,
                                  conv.lin.bias))

    conv = nn.TGConv(5, 2)
    if F.gpu_ctx():
        conv.cuda()
    # test#2: basic
    h0 = F.ones((3, 5))
    h1 = conv(h0, g)
    assert len(g.ndata) == 0
    assert len(g.edata) == 0

    # test rest_parameters
    old_weight = deepcopy(conv.lin.weight.data)
    conv.reset_parameters()
    new_weight = conv.lin.weight.data
    assert not F.allclose(old_weight, new_weight)
Exemplo n.º 25
0
def test_graph_conv0(out_dim):
    g = dgl.DGLGraph(nx.path_graph(3)).to(F.ctx())
    ctx = F.ctx()
    adj = g.adjacency_matrix(transpose=True, ctx=ctx)

    conv = nn.GraphConv(5, out_dim, norm='none', bias=True)
    conv = conv.to(ctx)
    print(conv)

    # test pickle
    th.save(conv, tmp_buffer)

    # test#1: basic
    h0 = F.ones((3, 5))
    h1 = conv(g, h0)
    assert len(g.ndata) == 0
    assert len(g.edata) == 0
    assert F.allclose(h1, _AXWb(adj, h0, conv.weight, conv.bias))
    # test#2: more-dim
    h0 = F.ones((3, 5, 5))
    h1 = conv(g, h0)
    assert len(g.ndata) == 0
    assert len(g.edata) == 0
    assert F.allclose(h1, _AXWb(adj, h0, conv.weight, conv.bias))

    conv = nn.GraphConv(5, out_dim)
    conv = conv.to(ctx)
    # test#3: basic
    h0 = F.ones((3, 5))
    h1 = conv(g, h0)
    assert len(g.ndata) == 0
    assert len(g.edata) == 0
    # test#4: basic
    h0 = F.ones((3, 5, 5))
    h1 = conv(g, h0)
    assert len(g.ndata) == 0
    assert len(g.edata) == 0

    conv = nn.GraphConv(5, out_dim)
    conv = conv.to(ctx)
    # test#3: basic
    h0 = F.ones((3, 5))
    h1 = conv(g, h0)
    assert len(g.ndata) == 0
    assert len(g.edata) == 0
    # test#4: basic
    h0 = F.ones((3, 5, 5))
    h1 = conv(g, h0)
    assert len(g.ndata) == 0
    assert len(g.edata) == 0

    # test rest_parameters
    old_weight = deepcopy(conv.weight.data)
    conv.reset_parameters()
    new_weight = conv.weight.data
    assert not F.allclose(old_weight, new_weight)
Exemplo n.º 26
0
def run_client(graph_name, barrier, num_nodes, num_edges):
    barrier.wait()
    g = DistGraph(server_namebook, graph_name)

    # Test API
    assert g.number_of_nodes() == num_nodes
    assert g.number_of_edges() == num_edges

    # Test reading node data
    nids = F.arange(0, int(g.number_of_nodes() / 2))
    feats1 = g.ndata['features'][nids]
    feats = F.squeeze(feats1, 1)
    assert np.all(F.asnumpy(feats == nids))

    # Test reading edge data
    eids = F.arange(0, int(g.number_of_edges() / 2))
    feats1 = g.edata['features'][eids]
    feats = F.squeeze(feats1, 1)
    assert np.all(F.asnumpy(feats == eids))

    # Test init node data
    new_shape = (g.number_of_nodes(), 2)
    g.init_ndata('test1', new_shape, F.int32)
    feats = g.ndata['test1'][nids]
    assert np.all(F.asnumpy(feats) == 0)

    # Test init edge data
    new_shape = (g.number_of_edges(), 2)
    g.init_edata('test1', new_shape, F.int32)
    feats = g.edata['test1'][eids]
    assert np.all(F.asnumpy(feats) == 0)

    # Test write data
    new_feats = F.ones((len(nids), 2), F.int32, F.cpu())
    g.ndata['test1'][nids] = new_feats
    feats = g.ndata['test1'][nids]
    assert np.all(F.asnumpy(feats) == 1)

    # Test metadata operations.
    assert len(g.ndata['features']) == g.number_of_nodes()
    assert g.ndata['features'].shape == (g.number_of_nodes(), 1)
    assert g.ndata['features'].dtype == F.int64
    assert g.node_attr_schemes()['features'].dtype == F.int64
    assert g.node_attr_schemes()['test1'].dtype == F.int32
    assert g.node_attr_schemes()['features'].shape == (1, )

    g.shut_down()
    print('end')
Exemplo n.º 27
0
 def _test1(p, replace):
     for i in range(10):
         subg = dgl.sampling.sample_neighbors(g, [0, 1], 2, prob=p, replace=replace)
         assert subg.number_of_nodes() == g.number_of_nodes()
         assert subg.number_of_edges() == 4
         u, v = subg.edges()
         assert set(F.asnumpy(F.unique(v))) == {0, 1}
         assert F.array_equal(g.has_edges_between(u, v), F.ones((4,), dtype=F.int64))
         assert F.array_equal(g.edge_ids(u, v), subg.edata[dgl.EID])
         edge_set = set(zip(list(F.asnumpy(u)), list(F.asnumpy(v))))
         if not replace:
             # check no duplication
             assert len(edge_set) == 4
         if p is not None:
             assert not (3, 0) in edge_set
             assert not (3, 1) in edge_set
Exemplo n.º 28
0
 def _test2(p, replace):  # fanout > #neighbors
     for i in range(10):
         subg = dgl.sampling.sample_neighbors(g, [0, 2], 2, prob=p, replace=replace, edge_dir='out')
         assert subg.number_of_nodes() == g.number_of_nodes()
         num_edges = 4 if replace else 3
         assert subg.number_of_edges() == num_edges
         u, v = subg.edges()
         assert set(F.asnumpy(F.unique(u))) == {0, 2}
         assert F.array_equal(g.has_edges_between(u, v), F.ones((num_edges,), dtype=F.int64))
         assert F.array_equal(g.edge_ids(u, v), subg.edata[dgl.EID])
         edge_set = set(zip(list(F.asnumpy(u)), list(F.asnumpy(v))))
         if not replace:
             # check no duplication
             assert len(edge_set) == num_edges
         if p is not None:
             assert not (0, 3) in edge_set
Exemplo n.º 29
0
def test_backward():
    g = create_test_heterograph()
    x = F.randn((3, 5))
    F.attach_grad(x)
    g.nodes['user'].data['h'] = x
    with F.record_grad():
        g.multi_update_all(
            {'plays' : (fn.copy_u('h', 'm'), fn.sum('m', 'y')),
             'wishes': (fn.copy_u('h', 'm'), fn.sum('m', 'y'))},
            'sum')
        y = g.nodes['game'].data['y']
        F.backward(y, F.ones(y.shape))
    print(F.grad(x))
    assert F.array_equal(F.grad(x), F.tensor([[2., 2., 2., 2., 2.],
                                              [2., 2., 2., 2., 2.],
                                              [2., 2., 2., 2., 2.]]))
Exemplo n.º 30
0
def test_graph_conv():
    g = dgl.DGLGraph(nx.path_graph(3))
    ctx = F.ctx()
    adj = g.adjacency_matrix(ctx=ctx)

    conv = nn.GraphConv(5, 2, norm=False, bias=True)
    if F.gpu_ctx():
        conv = conv.to(ctx)
    print(conv)
    # test#1: basic
    h0 = F.ones((3, 5))
    h1 = conv(g, h0)
    assert len(g.ndata) == 0
    assert len(g.edata) == 0
    assert F.allclose(h1, _AXWb(adj, h0, conv.weight, conv.bias))
    # test#2: more-dim
    h0 = F.ones((3, 5, 5))
    h1 = conv(g, h0)
    assert len(g.ndata) == 0
    assert len(g.edata) == 0
    assert F.allclose(h1, _AXWb(adj, h0, conv.weight, conv.bias))

    conv = nn.GraphConv(5, 2)
    if F.gpu_ctx():
        conv = conv.to(ctx)
    # test#3: basic
    h0 = F.ones((3, 5))
    h1 = conv(g, h0)
    assert len(g.ndata) == 0
    assert len(g.edata) == 0
    # test#4: basic
    h0 = F.ones((3, 5, 5))
    h1 = conv(g, h0)
    assert len(g.ndata) == 0
    assert len(g.edata) == 0

    conv = nn.GraphConv(5, 2)
    if F.gpu_ctx():
        conv = conv.to(ctx)
    # test#3: basic
    h0 = F.ones((3, 5))
    h1 = conv(g, h0)
    assert len(g.ndata) == 0
    assert len(g.edata) == 0
    # test#4: basic
    h0 = F.ones((3, 5, 5))
    h1 = conv(g, h0)
    assert len(g.ndata) == 0
    assert len(g.edata) == 0

    # test rest_parameters
    old_weight = deepcopy(conv.weight.data)
    conv.reset_parameters()
    new_weight = conv.weight.data
    assert not F.allclose(old_weight, new_weight)
Exemplo n.º 31
0
def test_negative(backendopt):

    for datatype in backendopt:
        T.set_backend(datatype)

        x2 = ad.Variable(name="x2", shape=[3])
        y = ad.sum(-x2)

        grad_x2, = ad.gradients(y, [x2])

        executor = ad.Executor([y, grad_x2])
        x2_val = 2 * T.ones(3)
        y_val, grad_x2_val = executor.run(feed_dict={x2: x2_val})

        assert isinstance(y, ad.Node)
        assert T.array_equal(y_val, T.sum(-x2_val))
        assert T.array_equal(grad_x2_val, -T.ones_like(x2_val))
Exemplo n.º 32
0
def one(shape, name=None):
    return K.ones(shape, name=name)