예제 #1
0
파일: test_sparse.py 프로젝트: simco19/dgl
def test_edge_softmax(g, norm_by, shp, idtype):
    g = g.astype(idtype).to(F.ctx())
    edata = F.tensor(np.random.rand(g.number_of_edges(), *shp))
    e1 = F.attach_grad(F.clone(edata))

    with F.record_grad():
        score1 = edge_softmax(g, e1, norm_by=norm_by)
        F.backward(F.reduce_sum(score1))
        grad_edata = F.grad(e1)

    with F.record_grad():
        e2 = F.attach_grad(F.clone(edata))
        e2_2d = F.reshape(
            e2,
            (g.number_of_src_nodes(), g.number_of_dst_nodes(), *e2.shape[1:]))
        if norm_by == 'src':
            score2 = F.softmax(e2_2d, 1)
            score2 = F.reshape(score2, (-1, *e2.shape[1:]))
        if norm_by == 'dst':
            score2 = F.softmax(e2_2d, 0)
            score2 = F.reshape(score2, (-1, *e2.shape[1:]))
        assert F.allclose(score1, score2)
        print('forward passed')

        F.backward(F.reduce_sum(score2))
        assert F.allclose(F.grad(e2), grad_edata)
        print('backward passed')
예제 #2
0
def check_dist_emb(g, num_clients, num_nodes, num_edges):
    from dgl.distributed.optim import SparseAdagrad
    from dgl.distributed import DistEmbedding
    # Test sparse emb
    try:
        emb = DistEmbedding(g.number_of_nodes(), 1, 'emb1', emb_init)
        nids = F.arange(0, int(g.number_of_nodes()))
        lr = 0.001
        optimizer = SparseAdagrad([emb], lr=lr)
        with F.record_grad():
            feats = emb(nids)
            assert np.all(F.asnumpy(feats) == np.zeros((len(nids), 1)))
            loss = F.sum(feats + 1, 0)
        loss.backward()
        optimizer.step()
        feats = emb(nids)
        if num_clients == 1:
            assert_almost_equal(F.asnumpy(feats),
                                np.ones((len(nids), 1)) * -lr)
        rest = np.setdiff1d(np.arange(g.number_of_nodes()), F.asnumpy(nids))
        feats1 = emb(rest)
        assert np.all(F.asnumpy(feats1) == np.zeros((len(rest), 1)))

        policy = dgl.distributed.PartitionPolicy('node',
                                                 g.get_partition_book())
        grad_sum = dgl.distributed.DistTensor((g.number_of_nodes(), 1),
                                              F.float32, 'emb1_sum', policy)
        if num_clients == 1:
            assert np.all(
                F.asnumpy(grad_sum[nids]) == np.ones((len(nids), 1)) *
                num_clients)
        assert np.all(F.asnumpy(grad_sum[rest]) == np.zeros((len(rest), 1)))

        emb = DistEmbedding(g.number_of_nodes(), 1, 'emb2', emb_init)
        with F.no_grad():
            feats1 = emb(nids)
        assert np.all(F.asnumpy(feats1) == 0)

        optimizer = SparseAdagrad([emb], lr=lr)
        with F.record_grad():
            feats1 = emb(nids)
            feats2 = emb(nids)
            feats = F.cat([feats1, feats2], 0)
            assert np.all(F.asnumpy(feats) == np.zeros((len(nids) * 2, 1)))
            loss = F.sum(feats + 1, 0)
        loss.backward()
        optimizer.step()
        with F.no_grad():
            feats = emb(nids)
        if num_clients == 1:
            assert_almost_equal(F.asnumpy(feats),
                                np.ones((len(nids), 1)) * 1 * -lr)
        rest = np.setdiff1d(np.arange(g.number_of_nodes()), F.asnumpy(nids))
        feats1 = emb(rest)
        assert np.all(F.asnumpy(feats1) == np.zeros((len(rest), 1)))
    except NotImplementedError as e:
        pass
    except Exception as e:
        print(e)
        sys.exit(-1)
예제 #3
0
def test_segment_reduce(reducer):
    ctx = F.ctx()
    value = F.tensor(np.random.rand(10, 5))
    v1 = F.attach_grad(F.clone(value))
    v2 = F.attach_grad(F.clone(value))
    seglen = F.tensor([2, 3, 0, 4, 1, 0, 0])
    u = F.copy_to(F.arange(0, F.shape(value)[0], F.int32), ctx)
    v = F.repeat(F.copy_to(F.arange(0, len(seglen), F.int32), ctx),
                 seglen,
                 dim=0)

    num_nodes = {'_U': len(u), '_V': len(seglen)}
    g = dgl.convert.heterograph({('_U', '_E', '_V'): (u, v)},
                                num_nodes_dict=num_nodes)
    with F.record_grad():
        rst1 = gspmm(g, 'copy_lhs', reducer, v1, None)
        if reducer in ['max', 'min']:
            rst1 = F.replace_inf_with_zero(rst1)
        F.backward(F.reduce_sum(rst1))
        grad1 = F.grad(v1)

    with F.record_grad():
        rst2 = segment_reduce(seglen, v2, reducer=reducer)
        F.backward(F.reduce_sum(rst2))
        assert F.allclose(rst1, rst2)
        print('forward passed')

        grad2 = F.grad(v2)
        assert F.allclose(grad1, grad2)
        print('backward passed')
예제 #4
0
파일: test_kernel.py 프로젝트: zemingd/dgl
    def _test(red):
        g = dgl.DGLGraph(nx.erdos_renyi_graph(100, 0.1))
        hu, hv, he = generate_feature(g, 'none')
        g.ndata['u'] = F.attach_grad(F.clone(hu))
        g.ndata['v'] = F.attach_grad(F.clone(hv))
        g.edata['e'] = F.attach_grad(F.clone(he))

        with F.record_grad():
            g.update_all(fn.copy_edge(edge='e', out='m'),
                         builtin[red](msg='m', out='r1'))
            r1 = g.ndata['r1']
            F.backward(r1.sum())
            e_grad1 = F.grad(g.edata['e'])

        # reset grad
        g.ndata['u'] = F.attach_grad(F.clone(hu))
        g.ndata['v'] = F.attach_grad(F.clone(hv))
        g.edata['e'] = F.attach_grad(F.clone(he))

        with F.record_grad():
            g.update_all(udf_copy_edge, udf_reduce[red])
            r2 = g.ndata['r2']
            F.backward(r2.sum())
            e_grad2 = F.grad(g.edata['e'])

        assert F.allclose(r1, r2)
        assert(F.allclose(e_grad1, e_grad2))
예제 #5
0
파일: test_sparse.py 프로젝트: aryaman4/dgl
def test_sddmm(g, shp, lhs_target, rhs_target, msg, index_dtype):
    if dgl.backend.backend_name == 'mxnet' and g.number_of_edges() == 0:
        pytest.skip()  # mxnet do not support zero shape tensor
    if dgl.backend.backend_name == 'tensorflow' and index_dtype == 'int32':
        pytest.skip()  # tensorflow dlpack has problem with int32 ndarray.
    if index_dtype == 'int32':
        g = g.int()
    else:
        g = g.long()
    print(g)
    print(g.idtype)

    len_lhs = select(lhs_target, g.number_of_src_nodes(), g.number_of_edges(),
                     g.number_of_dst_nodes())
    lhs_shp = (len_lhs, ) + shp[0]
    len_rhs = select(rhs_target, g.number_of_src_nodes(), g.number_of_edges(),
                     g.number_of_dst_nodes())
    rhs_shp = (len_rhs, ) + shp[1]
    feat_lhs = F.tensor(np.random.rand(*lhs_shp) + 1)
    feat_rhs = F.tensor(np.random.rand(*rhs_shp) + 1)
    print('lhs shape: {}, rhs shape: {}'.format(F.shape(feat_lhs),
                                                F.shape(feat_rhs)))

    lhs_frame = select(lhs_target, g.srcdata, g.edata, g.dstdata)
    rhs_frame = select(rhs_target, g.srcdata, g.edata, g.dstdata)
    lhs_frame['x'] = F.attach_grad(F.clone(feat_lhs))
    rhs_frame['y'] = F.attach_grad(F.clone(feat_rhs))
    msg_func = lhs_target + '_' + msg + '_' + rhs_target
    print('SDDMM(message func: {})'.format(msg_func))

    lhs = F.attach_grad(F.clone(feat_lhs))
    rhs = F.attach_grad(F.clone(feat_rhs))
    with F.record_grad():
        e = gsddmm(g,
                   msg,
                   lhs,
                   rhs,
                   lhs_target=lhs_target,
                   rhs_target=rhs_target)
        F.backward(F.reduce_sum(e))
        grad_lhs = F.grad(lhs)
        grad_rhs = F.grad(rhs)

    with F.record_grad():
        g.apply_edges(udf_apply_edges[msg_func])
        if g.number_of_edges() > 0:
            e1 = g.edata['m']
            assert F.allclose(e, e1)
            print('forward passed')

            F.backward(F.reduce_sum(e1))
            if msg != 'copy_rhs':
                assert F.allclose(F.grad(lhs_frame['x']), grad_lhs)
            if msg != 'copy_lhs':
                assert F.allclose(F.grad(rhs_frame['y']), grad_rhs)
            print('backward passed')

    lhs_frame.pop('x')
    rhs_frame.pop('y')
    if 'm' in g.edata: g.edata.pop('m')
예제 #6
0
    def _test(mfunc, rfunc):
        g = create_test_heterograph_2(idtype)
        g0 = create_test_heterograph(idtype)
        g1 = create_test_heterograph_large(idtype)
        cross_reducer = rfunc.__name__
        x1 = F.randn((g.num_nodes('user'), feat_size))
        x2 = F.randn((g.num_nodes('developer'), feat_size))
        F.attach_grad(x1)
        F.attach_grad(x2)
        g.nodes['user'].data['h'] = x1
        g.nodes['developer'].data['h'] = x2

        #################################################################
        #  multi_update_all(): call msg_passing separately for each etype
        #################################################################

        with F.record_grad():
            g.multi_update_all(
                {
                    etype: (mfunc('h', 'm'), rfunc('m', 'y'))
                    for etype in g.canonical_etypes
                }, cross_reducer)
            r1 = g.nodes['game'].data['y'].clone()
            r2 = g.nodes['user'].data['y'].clone()
            r3 = g.nodes['player'].data['y'].clone()
            loss = r1.sum() + r2.sum() + r3.sum()
            F.backward(loss)
            n_grad1 = F.grad(g.nodes['user'].data['h']).clone()
            n_grad2 = F.grad(g.nodes['developer'].data['h']).clone()

        g.nodes['user'].data.clear()
        g.nodes['developer'].data.clear()
        g.nodes['game'].data.clear()
        g.nodes['player'].data.clear()

        #################################################################
        #  update_all(): call msg_passing for all etypes
        #################################################################

        F.attach_grad(x1)
        F.attach_grad(x2)
        g.nodes['user'].data['h'] = x1
        g.nodes['developer'].data['h'] = x2

        with F.record_grad():
            g.update_all(mfunc('h', 'm'), rfunc('m', 'y'))
            r4 = g.nodes['game'].data['y']
            r5 = g.nodes['user'].data['y']
            r6 = g.nodes['player'].data['y']
            loss = r4.sum() + r5.sum() + r6.sum()
            F.backward(loss)
            n_grad3 = F.grad(g.nodes['user'].data['h'])
            n_grad4 = F.grad(g.nodes['developer'].data['h'])

        assert F.allclose(r1, r4)
        assert F.allclose(r2, r5)
        assert F.allclose(r3, r6)
        assert (F.allclose(n_grad1, n_grad3))
        assert (F.allclose(n_grad2, n_grad4))
예제 #7
0
def test_spmm(idtype, g, shp, msg, reducer):
    g = g.astype(idtype).to(F.ctx())
    if dgl.backend.backend_name == 'tensorflow' and (reducer in ['min', 'max']):
        pytest.skip()  # tensorflow dlpack has problem writing into int32 arrays on GPU.
    print(g)
    print(g.idtype)

    hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1)
    he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1)
    print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he)))

    g.srcdata['x'] = F.attach_grad(F.clone(hu))
    g.edata['w'] = F.attach_grad(F.clone(he))
    print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer))

    u = F.attach_grad(F.clone(hu))
    e = F.attach_grad(F.clone(he))
    with F.record_grad():
        v = gspmm(g, msg, reducer, u, e)
        non_degree_indices = F.tensor(
            np.nonzero(F.asnumpy(g.in_degrees()) != 0)[0])
        v = F.gather_row(v, non_degree_indices)
        if g.number_of_edges() > 0:
            F.backward(F.reduce_sum(v))
            if msg != 'copy_rhs':
                grad_u = F.grad(u)
            if msg != 'copy_lhs':
                grad_e = F.grad(e)

    with F.record_grad():
        g.update_all(udf_msg[msg], udf_reduce[reducer])
        if g.number_of_edges() > 0:
            v1 = F.gather_row(g.dstdata['v'], non_degree_indices)
            assert F.allclose(v, v1)
            print('forward passed')

            F.backward(F.reduce_sum(v1))
            if msg != 'copy_rhs':
                if reducer in ['min', 'max']: # there might be some numerical errors
                    rate = F.reduce_sum(F.abs(F.grad(g.srcdata['x']) - grad_u)) /\
                           F.reduce_sum(F.abs(grad_u))
                    assert F.as_scalar(rate) < 1e-2, rate
                else:
                    assert F.allclose(F.grad(g.srcdata['x']), grad_u)
            if msg != 'copy_lhs':
                if reducer in ['min', 'max']:
                    rate = F.reduce_sum(F.abs(F.grad(g.edata['w']) - grad_e)) /\
                           F.reduce_sum(F.abs(grad_e))
                    assert F.as_scalar(rate) < 1e-2, rate
                else:
                    assert F.allclose(F.grad(g.edata['w']), grad_e)
            print('backward passed')

    g.srcdata.pop('x')
    g.edata.pop('w')
    if 'v' in g.dstdata: g.dstdata.pop('v')
예제 #8
0
    def _test(red, partial):
        g = dgl.DGLGraph(nx.erdos_renyi_graph(100, 0.1))
        # NOTE(zihao): add self-loop to avoid zero-degree nodes.
        # https://github.com/dmlc/dgl/issues/761
        g.add_edges(g.nodes(), g.nodes())
        g = g.to(F.ctx())
        hu, hv, he = generate_feature(g, 'none', 'none')
        if partial:
            nid = F.tensor(list(range(0, 100, 2)), g.idtype)

        g.ndata['u'] = F.attach_grad(F.clone(hu))
        g.ndata['v'] = F.attach_grad(F.clone(hv))
        g.edata['e'] = F.attach_grad(F.clone(he))

        with F.record_grad():
            if partial:
                g.pull(nid, fn.copy_src(src='u', out='m'),
                       builtin[red](msg='m', out='r1'))
            else:
                g.update_all(fn.copy_src(src='u', out='m'),
                             builtin[red](msg='m', out='r1'))
            r1 = g.ndata['r1']
            F.backward(F.reduce_sum(r1))
            n_grad1 = F.grad(g.ndata['u'])

        # reset grad
        g.ndata['u'] = F.attach_grad(F.clone(hu))
        g.ndata['v'] = F.attach_grad(F.clone(hv))
        g.edata['e'] = F.attach_grad(F.clone(he))

        with F.record_grad():
            if partial:
                g.pull(nid, udf_copy_src, udf_reduce[red])
            else:
                g.update_all(udf_copy_src, udf_reduce[red])
            r2 = g.ndata['r2']
            F.backward(F.reduce_sum(r2))
            n_grad2 = F.grad(g.ndata['u'])

        def _print_error(a, b):
            print("ERROR: Test copy_src_{} partial: {}".
                  format(red, partial))
            for i, (x, y) in enumerate(zip(F.asnumpy(a).flatten(), F.asnumpy(b).flatten())):
                if not np.allclose(x, y):
                    print('@{} {} v.s. {}'.format(i, x, y))

        if not F.allclose(r1, r2):
            _print_error(r1, r2)
        assert F.allclose(r1, r2)

        if F.backend_name != "jax":
            if not F.allclose(n_grad1, n_grad2):
                print('node grad')
                _print_error(n_grad1, n_grad2)
            assert(F.allclose(n_grad1, n_grad2))
예제 #9
0
def test_spmm(idtype, g, shp, msg, reducer):
    g = g.astype(idtype).to(F.ctx())
    print(g)
    print(g.idtype)

    hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(), ) + shp[0])) + 1)
    he = F.tensor(np.random.rand(*((g.number_of_edges(), ) + shp[1])) + 1)
    print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he)))

    g.srcdata['x'] = F.attach_grad(F.clone(hu))
    g.edata['w'] = F.attach_grad(F.clone(he))
    print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer))

    u = F.attach_grad(F.clone(hu))
    e = F.attach_grad(F.clone(he))
    with F.record_grad():
        v = gspmm(g, msg, reducer, u, e)
        if reducer in ['max', 'min']:
            v = F.replace_inf_with_zero(v)
        if g.number_of_edges() > 0:
            F.backward(F.reduce_sum(v))
            if msg != 'copy_rhs':
                grad_u = F.grad(u)
            if msg != 'copy_lhs':
                grad_e = F.grad(e)

    with F.record_grad():
        g.update_all(udf_msg[msg], udf_reduce[reducer])
        if g.number_of_edges() > 0:
            v1 = g.dstdata['v']
            assert F.allclose(v, v1)
            print('forward passed')

            F.backward(F.reduce_sum(v1))
            if msg != 'copy_rhs':
                if reducer in ['min',
                               'max']:  # there might be some numerical errors
                    rate = F.reduce_sum(F.abs(F.grad(g.srcdata['x']) - grad_u)) /\
                           F.reduce_sum(F.abs(grad_u))
                    assert F.as_scalar(rate) < 1e-2, rate
                else:
                    assert F.allclose(F.grad(g.srcdata['x']), grad_u)
            if msg != 'copy_lhs':
                if reducer in ['min', 'max']:
                    rate = F.reduce_sum(F.abs(F.grad(g.edata['w']) - grad_e)) /\
                           F.reduce_sum(F.abs(grad_e))
                    assert F.as_scalar(rate) < 1e-2, rate
                else:
                    assert F.allclose(F.grad(g.edata['w']), grad_e)
            print('backward passed')

    g.srcdata.pop('x')
    g.edata.pop('w')
    if 'v' in g.dstdata: g.dstdata.pop('v')
예제 #10
0
    def _test(red, partial):
        g = dgl.DGLGraph(nx.erdos_renyi_graph(100, 0.1))
        # NOTE(zihao): add self-loop to avoid zero-degree nodes.
        g.add_edges(g.nodes(), g.nodes())
        hu, hv, he = generate_feature(g, 'none', 'none')
        if partial:
            nid = F.tensor(list(range(0, 100, 2)))

        g.ndata['u'] = F.attach_grad(F.clone(hu))
        g.ndata['v'] = F.attach_grad(F.clone(hv))
        g.edata['e'] = F.attach_grad(F.clone(he))

        with F.record_grad():
            if partial:
                g.pull(nid, fn.copy_edge(edge='e', out='m'),
                       builtin[red](msg='m', out='r1'))
            else:
                g.update_all(fn.copy_edge(edge='e', out='m'),
                             builtin[red](msg='m', out='r1'))
            r1 = g.ndata['r1']
            F.backward(r1.sum())
            e_grad1 = F.grad(g.edata['e'])

        # reset grad
        g.ndata['u'] = F.attach_grad(F.clone(hu))
        g.ndata['v'] = F.attach_grad(F.clone(hv))
        g.edata['e'] = F.attach_grad(F.clone(he))

        with F.record_grad():
            if partial:
                g.pull(nid, udf_copy_edge, udf_reduce[red])
            else:
                g.update_all(udf_copy_edge, udf_reduce[red])
            r2 = g.ndata['r2']
            F.backward(r2.sum())
            e_grad2 = F.grad(g.edata['e'])

        def _print_error(a, b):
            print("ERROR: Test copy_edge_{} partial: {}".format(red, partial))
            for i, (x, y) in enumerate(
                    zip(F.asnumpy(a).flatten(),
                        F.asnumpy(b).flatten())):
                if not np.allclose(x, y):
                    print('@{} {} v.s. {}'.format(i, x, y))

        if not F.allclose(r1, r2):
            _print_error(r1, r2)
        assert F.allclose(r1, r2)
        if not F.allclose(e_grad1, e_grad2):
            print('edge gradient')
            _print_error(e_grad1, e_grad2)
        assert (F.allclose(e_grad1, e_grad2))
예제 #11
0
def test_csrmask_backward(idtype, dtype):
    a, A = _random_simple_graph(idtype, dtype, F.ctx(), 3, 4, 6, 'A', 'B', 'AB')
    b, B = _random_simple_graph(idtype, dtype, F.ctx(), 3, 4, 6, 'A', 'B', 'AB')
    A_row, A_col = A.edges(order='eid')
    B_row, B_col = B.edges(order='eid')
    A_row = F.asnumpy(A_row)
    A_col = F.asnumpy(A_col)
    B_row = F.asnumpy(B_row)
    B_col = F.asnumpy(B_col)
    a_dense = F.attach_grad(F.tensor(a.todense(), dtype=dtype))

    A.edata['w'] = F.attach_grad(A.edata['w'])

    with F.record_grad():
        # Test for two element case
        C1 = F.csrmask(A._graph, A.edata['w'], B._graph)
        if dgl.backend.backend_name == 'tensorflow':
            import tensorflow as tf
            C2 = tf.gather_nd(a_dense, tf.stack([B_row, B_col], 1))
        else:
            C2 = a_dense[B_row, B_col]
        assert F.allclose(C1, C2, rtol=1e-4, atol=1e-4)

        F.backward(F.reduce_sum(C1) + F.reduce_sum(C2))
        a_dense_grad = F.asnumpy(F.grad(a_dense))[A_row, A_col]
        A_spspmm_grad = F.asnumpy(F.grad(A.edata['w']))
        assert np.allclose(a_dense_grad, A_spspmm_grad, rtol=1e-4, atol=1e-4)
예제 #12
0
def test_csrmm_backward(idtype, dtype, num_vtypes):
    a, A = _random_simple_graph(idtype, dtype, F.ctx(), 3, 4, 6, 'A', 'B', 'AB')
    b, B = _random_simple_graph(idtype, dtype, F.ctx(), 4, 3, 6, 'B', 'A' if num_vtypes == 1 else 'C', 'BA')
    A_row, A_col = A.edges(order='eid')
    B_row, B_col = B.edges(order='eid')
    A_row = F.asnumpy(A_row)
    A_col = F.asnumpy(A_col)
    B_row = F.asnumpy(B_row)
    B_col = F.asnumpy(B_col)
    a_dense = F.attach_grad(F.tensor(a.todense(), dtype=dtype))
    b_dense = F.attach_grad(F.tensor(b.todense(), dtype=dtype))

    A.edata['w'] = F.attach_grad(A.edata['w'])
    B.edata['w'] = F.attach_grad(B.edata['w'])

    with F.record_grad():
        C = dgl.adj_product_graph(A, B, 'w')
        assert len(C.ntypes) == num_vtypes
        assert len(C.etypes) == 1
        C_dense = np.zeros((3, 3))
        C_row, C_col = C.edges(order='eid')
        C_row = F.asnumpy(C_row)
        C_col = F.asnumpy(C_col)
        C_dense[C_row, C_col] = F.asnumpy(C.edata['w'])
        c_dense = F.matmul(a_dense, b_dense)
        assert np.allclose(C_dense, F.asnumpy(c_dense), rtol=1e-4, atol=1e-4)

        F.backward(F.reduce_sum(C.edata['w']) + F.reduce_sum(c_dense))
        a_dense_grad = F.asnumpy(F.grad(a_dense))[A_row, A_col]
        b_dense_grad = F.asnumpy(F.grad(b_dense))[B_row, B_col]
        A_spspmm_grad = F.asnumpy(F.grad(A.edata['w']))
        B_spspmm_grad = F.asnumpy(F.grad(B.edata['w']))
        assert np.allclose(a_dense_grad, A_spspmm_grad, rtol=1e-4, atol=1e-4)
        assert np.allclose(b_dense_grad, B_spspmm_grad, rtol=1e-4, atol=1e-4)
예제 #13
0
    def _test(mfunc, rfunc):

        g = create_test_heterograph(idtype)
        feat_size = 2

        x1 = F.randn((4, feat_size))
        x2 = F.randn((4, feat_size))
        x3 = F.randn((3, feat_size))
        x4 = F.randn((3, feat_size))
        F.attach_grad(x1)
        F.attach_grad(x2)
        F.attach_grad(x3)
        F.attach_grad(x4)
        g['plays'].edata['eid'] = x1
        g['follows'].edata['eid'] = x2
        g['develops'].edata['eid'] = x3
        g['wishes'].edata['eid'] = x4

        #################################################################
        #  multi_update_all(): call msg_passing separately for each etype
        #################################################################

        with F.record_grad():
            g.multi_update_all(
                {
                    'plays': (mfunc('eid', 'm'), rfunc('m', 'y')),
                    'follows': (mfunc('eid', 'm'), rfunc('m', 'y')),
                    'develops': (mfunc('eid', 'm'), rfunc('m', 'y')),
                    'wishes': (mfunc('eid', 'm'), rfunc('m', 'y'))
                }, 'sum')
            r1 = g.nodes['game'].data['y']
            F.backward(r1, F.randn(r1.shape))
            e_grad1 = F.grad(g['develops'].edata['eid'])

        #################################################################
        #  update_all(): call msg_passing for all etypes
        #################################################################

        # TODO(Israt): output type can be None in multi_update and empty
        # tensor in new_update_all
        g.update_all(mfunc('eid', 'm'), rfunc('m', 'y'))
        r2 = g.nodes['game'].data['y']
        F.backward(r2, F.randn(r2.shape))
        e_grad2 = F.grad(g['develops'].edata['eid'])

        # # correctness check
        def _print_error(a, b):
            for i, (x, y) in enumerate(
                    zip(F.asnumpy(a).flatten(),
                        F.asnumpy(b).flatten())):
                if not np.allclose(x, y):
                    print('@{} {} v.s. {}'.format(i, x, y))

        if not F.allclose(r1, r2):
            _print_error(r1, r2)
        assert F.allclose(r1, r2)
        if not F.allclose(e_grad1, e_grad2):
            print('edge grad')
            _print_error(e_grad1, e_grad2)
        assert (F.allclose(e_grad1, e_grad2))
예제 #14
0
def test_row2():
    # test row getter/setter autograd compatibility
    data = create_test_data(grad=True)
    f = FrameRef(Frame(data))

    with F.record_grad():
        # getter
        c1 = f['a1']
        # test non-duplicate keys
        rowid = Index(F.tensor([0, 2]))
        rows = f[rowid]
        y = rows['a1']
        F.backward(y, F.ones((len(rowid), D)))
    assert F.allclose(
        F.grad(c1)[:, 0], F.tensor([1., 0., 1., 0., 0., 0., 0., 0., 0., 0.]))

    f['a1'] = F.attach_grad(f['a1'])
    with F.record_grad():
        c1 = f['a1']
        # test duplicate keys
        rowid = Index(F.tensor([8, 2, 2, 1]))
        rows = f[rowid]
        y = rows['a1']
        F.backward(y, F.ones((len(rowid), D)))
    assert F.allclose(
        F.grad(c1)[:, 0], F.tensor([0., 1., 2., 0., 0., 0., 0., 0., 1., 0.]))

    f['a1'] = F.attach_grad(f['a1'])
    with F.record_grad():
        # setter
        c1 = f['a1']
        rowid = Index(F.tensor([0, 2, 4]))
        vals = {
            'a1': F.attach_grad(F.zeros((len(rowid), D))),
            'a2': F.attach_grad(F.zeros((len(rowid), D))),
            'a3': F.attach_grad(F.zeros((len(rowid), D))),
        }
        f[rowid] = vals
        c11 = f['a1']
        F.backward(c11, F.ones((N, D)))
    assert F.allclose(
        F.grad(c1)[:, 0], F.tensor([0., 1., 0., 1., 0., 1., 1., 1., 1., 1.]))
    assert F.allclose(F.grad(vals['a1']), F.ones((len(rowid), D)))
    assert F.is_no_grad(vals['a2'])
예제 #15
0
    def _test(mfunc):

        g = create_test_heterograph(idtype)
        feat_size = 2

        x1 = F.randn((4, feat_size))
        x2 = F.randn((4, feat_size))
        x3 = F.randn((3, feat_size))
        x4 = F.randn((3, feat_size))
        F.attach_grad(x1)
        F.attach_grad(x2)
        F.attach_grad(x3)
        F.attach_grad(x4)
        g['plays'].edata['eid'] = x1
        g['follows'].edata['eid'] = x2
        g['develops'].edata['eid'] = x3
        g['wishes'].edata['eid'] = x4

        #################################################################
        #  apply_edges() is called on each relation type separately
        #################################################################
        with F.record_grad():
            [
                g.apply_edges(fn.copy_e('eid', 'm'), etype=rel)
                for rel in g.canonical_etypes
            ]
            r1 = g['develops'].edata['m']
            F.backward(r1, F.ones(r1.shape))
            e_grad1 = F.grad(g['develops'].edata['eid'])

        #################################################################
        #  apply_edges() is called on all relation types
        #################################################################

        g.apply_edges(fn.copy_e('eid', 'm'))
        r2 = g['develops'].edata['m']
        F.backward(r2, F.ones(r2.shape))
        e_grad2 = F.grad(g['develops'].edata['eid'])

        # # correctness check
        def _print_error(a, b):
            for i, (x, y) in enumerate(
                    zip(F.asnumpy(a).flatten(),
                        F.asnumpy(b).flatten())):
                if not np.allclose(x, y):
                    print('@{} {} v.s. {}'.format(i, x, y))

        if not F.allclose(r1, r2):
            _print_error(r1, r2)
        assert F.allclose(r1, r2)
        if not F.allclose(e_grad1, e_grad2):
            print('edge grad')
            _print_error(e_grad1, e_grad2)
        assert (F.allclose(e_grad1, e_grad2))
예제 #16
0
def test_batch_setter_autograd():
    g = generate_graph(grad=True)
    h1 = g.ndata['h']
    # partial set
    v = F.tensor([1, 2, 8])
    hh = F.attach_grad(F.zeros((len(v), D)))
    with F.record_grad():
        g.nodes[v].data['h'] = hh
        h2 = g.ndata['h']
    F.backward(h2, F.ones((10, D)) * 2)
    assert F.array_equal(F.grad(h1)[:,0], F.tensor([2., 0., 0., 2., 2., 2., 2., 2., 0., 2.]))
    assert F.array_equal(F.grad(hh)[:,0], F.tensor([2., 2., 2.]))
예제 #17
0
    def _test(mfunc, rfunc):

        g = create_test_heterograph(idtype)

        x1 = F.randn((g.num_nodes('user'), feat_size))
        x2 = F.randn((g.num_nodes('developer'), feat_size))

        F.attach_grad(x1)
        F.attach_grad(x2)
        g.nodes['user'].data['h'] = x1
        g.nodes['developer'].data['h'] = x2

        #################################################################
        #  multi_update_all(): call msg_passing separately for each etype
        #################################################################

        with F.record_grad():
            g.multi_update_all(
                {
                    'plays': (mfunc('h', 'm'), rfunc('m', 'y')),
                    'follows': (mfunc('h', 'm'), rfunc('m', 'y')),
                    'develops': (mfunc('h', 'm'), rfunc('m', 'y')),
                    'wishes': (mfunc('h', 'm'), rfunc('m', 'y'))
                }, 'sum')
            r1 = g.nodes['game'].data['y']
            F.backward(r1, F.randn(r1.shape))
            n_grad1 = F.grad(g.nodes['user'].data['h'])
            g.nodes['game'].data.clear()

        #################################################################
        #  update_all(): call msg_passing for all etypes
        #################################################################

        g.update_all(mfunc('h', 'm'), rfunc('m', 'y'))
        r2 = g.nodes['game'].data['y']
        F.backward(r2, F.randn(r2.shape))
        n_grad2 = F.grad(g.nodes['user'].data['h'])

        # correctness check
        def _print_error(a, b):
            for i, (x, y) in enumerate(
                    zip(F.asnumpy(a).flatten(),
                        F.asnumpy(b).flatten())):
                if not np.allclose(x, y):
                    print('@{} {} v.s. {}'.format(i, x, y))

        if not F.allclose(r1, r2):
            _print_error(r1, r2)
        assert F.allclose(r1, r2)
        if not F.allclose(n_grad1, n_grad2):
            print('node grad')
            _print_error(n_grad1, n_grad2)
        assert (F.allclose(n_grad1, n_grad2))
예제 #18
0
파일: test_kernel.py 프로젝트: laifi/dgl
    def _test(red, partial):
        g = dgl.DGLGraph(nx.erdos_renyi_graph(100, 0.1))
        # NOTE(zihao): add self-loop to avoid zero-degree nodes.
        # https://github.com/dmlc/dgl/issues/761
        g.add_edges(g.nodes(), g.nodes())
        hu, hv, he = generate_feature(g, 'none')
        if partial:
            nid = F.tensor(list(range(0, 100, 2)))

        g.ndata['u'] = F.attach_grad(F.clone(hu))
        g.ndata['v'] = F.attach_grad(F.clone(hv))
        g.edata['e'] = F.attach_grad(F.clone(he))

        with F.record_grad():
            if partial:
                g.pull(nid, fn.copy_src(src='u', out='m'),
                       builtin[red](msg='m', out='r1'))
            else:
                g.update_all(fn.copy_src(src='u', out='m'),
                             builtin[red](msg='m', out='r1'))
            r1 = g.ndata['r1']
            F.backward(r1.sum())
            n_grad1 = F.grad(g.ndata['u'])

        # reset grad
        g.ndata['u'] = F.attach_grad(F.clone(hu))
        g.ndata['v'] = F.attach_grad(F.clone(hv))
        g.edata['e'] = F.attach_grad(F.clone(he))

        with F.record_grad():
            if partial:
                g.pull(nid, udf_copy_src, udf_reduce[red])
            else:
                g.update_all(udf_copy_src, udf_reduce[red])
            r2 = g.ndata['r2']
            F.backward(r2.sum())
            n_grad2 = F.grad(g.ndata['u'])

        assert F.allclose(r1, r2)
        assert(F.allclose(n_grad1, n_grad2))
예제 #19
0
    def _test(mfunc):

        g = create_test_heterograph(idtype)

        x1 = F.randn((g.num_nodes('user'), feat_size))
        x2 = F.randn((g.num_nodes('developer'), feat_size))

        F.attach_grad(x1)
        F.attach_grad(x2)
        g.nodes['user'].data['h'] = x1
        g.nodes['developer'].data['h'] = x2

        #################################################################
        #  apply_edges() is called on each relation type separately
        #################################################################

        with F.record_grad():
            [
                g.apply_edges(fn.copy_u('h', 'm'), etype=rel)
                for rel in g.canonical_etypes
            ]
            r1 = g['plays'].edata['m']
            F.backward(r1, F.ones(r1.shape))
            n_grad1 = F.grad(g.ndata['h']['user'])
        # TODO (Israt): clear not working
        g.edata['m'].clear()

        #################################################################
        #  apply_edges() is called on all relation types
        #################################################################

        g.apply_edges(fn.copy_u('h', 'm'))
        r2 = g['plays'].edata['m']
        F.backward(r2, F.ones(r2.shape))
        n_grad2 = F.grad(g.nodes['user'].data['h'])

        # correctness check
        def _print_error(a, b):
            for i, (x, y) in enumerate(
                    zip(F.asnumpy(a).flatten(),
                        F.asnumpy(b).flatten())):
                if not np.allclose(x, y):
                    print('@{} {} v.s. {}'.format(i, x, y))

        if not F.allclose(r1, r2):
            _print_error(r1, r2)
        assert F.allclose(r1, r2)
        if not F.allclose(n_grad1, n_grad2):
            print('node grad')
            _print_error(n_grad1, n_grad2)
        assert (F.allclose(n_grad1, n_grad2))
예제 #20
0
def test_csrsum_backward(idtype, dtype, nelems):
    a, A = _random_simple_graph(idtype, dtype, F.ctx(), 3, 4, 6, 'A', 'B', 'AB')
    b, B = _random_simple_graph(idtype, dtype, F.ctx(), 3, 4, 6, 'A', 'B', 'AB')
    A_row, A_col = A.edges(order='eid')
    B_row, B_col = B.edges(order='eid')
    A_row = F.asnumpy(A_row)
    A_col = F.asnumpy(A_col)
    B_row = F.asnumpy(B_row)
    B_col = F.asnumpy(B_col)
    a_dense = F.attach_grad(F.tensor(a.todense(), dtype=dtype))
    b_dense = F.attach_grad(F.tensor(b.todense(), dtype=dtype))

    A.edata['w'] = F.attach_grad(A.edata['w'])
    B.edata['w'] = F.attach_grad(B.edata['w'])

    with F.record_grad():
        if nelems == 2:
            # Test for two element case
            C = dgl.adj_sum_graph([A, B], 'w')
            assert C.canonical_etypes == A.canonical_etypes
            C_dense = np.zeros((3, 4))
            C_row, C_col = C.edges(order='eid')
            C_row = F.asnumpy(C_row)
            C_col = F.asnumpy(C_col)
            C_dense[C_row, C_col] = F.asnumpy(C.edata['w'])
            c_dense = a_dense + b_dense
            assert np.allclose(C_dense, F.asnumpy(c_dense), rtol=1e-4, atol=1e-4)

            F.backward(F.reduce_sum(C.edata['w']) + F.reduce_sum(c_dense))
            a_dense_grad = F.asnumpy(F.grad(a_dense))[A_row, A_col]
            b_dense_grad = F.asnumpy(F.grad(b_dense))[B_row, B_col]
            A_spspmm_grad = F.asnumpy(F.grad(A.edata['w']))
            B_spspmm_grad = F.asnumpy(F.grad(B.edata['w']))
            assert np.allclose(a_dense_grad, A_spspmm_grad, rtol=1e-4, atol=1e-4)
            assert np.allclose(b_dense_grad, B_spspmm_grad, rtol=1e-4, atol=1e-4)
        elif nelems == 1:
            # Test for single element case
            C = dgl.adj_sum_graph([A], 'w')
            assert C.canonical_etypes == A.canonical_etypes
            C_dense = np.zeros((3, 4))
            C_row, C_col = C.edges(order='eid')
            C_row = F.asnumpy(C_row)
            C_col = F.asnumpy(C_col)
            C_dense[C_row, C_col] = F.asnumpy(C.edata['w'])
            c_dense = a_dense
            assert np.allclose(C_dense, F.asnumpy(c_dense), rtol=1e-4, atol=1e-4)

            F.backward(F.reduce_sum(C.edata['w']) + F.reduce_sum(c_dense))
            a_dense_grad = F.asnumpy(F.grad(a_dense))[A_row, A_col]
            A_spspmm_grad = F.asnumpy(F.grad(A.edata['w']))
            assert np.allclose(a_dense_grad, A_spspmm_grad, rtol=1e-4, atol=1e-4)
예제 #21
0
    def _test(red, partial):
        g = dgl.DGLGraph(nx.erdos_renyi_graph(100, 0.1))
        hu, hv, he = generate_feature(g, 'none')
        if partial:
            nid = F.tensor(list(range(0, 100, 2)))

        g.ndata['u'] = F.attach_grad(F.clone(hu))
        g.ndata['v'] = F.attach_grad(F.clone(hv))
        g.edata['e'] = F.attach_grad(F.clone(he))

        with F.record_grad():
            if partial:
                g.pull(nid, fn.copy_src(src='u', out='m'),
                       builtin[red](msg='m', out='r1'))
            else:
                g.update_all(fn.copy_src(src='u', out='m'),
                             builtin[red](msg='m', out='r1'))
            r1 = g.ndata['r1']
            F.backward(r1.sum())
            n_grad1 = F.grad(g.ndata['u'])

        # reset grad
        g.ndata['u'] = F.attach_grad(F.clone(hu))
        g.ndata['v'] = F.attach_grad(F.clone(hv))
        g.edata['e'] = F.attach_grad(F.clone(he))

        with F.record_grad():
            if partial:
                g.pull(nid, udf_copy_src, udf_reduce[red])
            else:
                g.update_all(udf_copy_src, udf_reduce[red])
            r2 = g.ndata['r2']
            F.backward(r2.sum())
            n_grad2 = F.grad(g.ndata['u'])

        assert F.allclose(r1, r2)
        assert (F.allclose(n_grad1, n_grad2))
예제 #22
0
def test_backward():
    g = create_test_heterograph()
    x = F.randn((3, 5))
    F.attach_grad(x)
    g.nodes['user'].data['h'] = x
    with F.record_grad():
        g.multi_update_all(
            {'plays' : (fn.copy_u('h', 'm'), fn.sum('m', 'y')),
             'wishes': (fn.copy_u('h', 'm'), fn.sum('m', 'y'))},
            'sum')
        y = g.nodes['game'].data['y']
        F.backward(y, F.ones(y.shape))
    print(F.grad(x))
    assert F.array_equal(F.grad(x), F.tensor([[2., 2., 2., 2., 2.],
                                              [2., 2., 2., 2., 2.],
                                              [2., 2., 2., 2., 2.]]))
예제 #23
0
    def _test(lhs, rhs, binary_op):

        g = create_test_heterograph(idtype)

        n1 = F.randn((g.num_nodes('user'), feat_size))
        n2 = F.randn((g.num_nodes('developer'), feat_size))
        n3 = F.randn((g.num_nodes('game'), feat_size))

        x1 = F.randn((g.num_edges('plays'), feat_size))
        x2 = F.randn((g.num_edges('follows'), feat_size))
        x3 = F.randn((g.num_edges('develops'), feat_size))
        x4 = F.randn((g.num_edges('wishes'), feat_size))

        builtin_msg_name = "{}_{}_{}".format(lhs, binary_op, rhs)
        builtin_msg = getattr(fn, builtin_msg_name)

        #################################################################
        #  apply_edges() is called on each relation type separately
        #################################################################

        F.attach_grad(n1)
        F.attach_grad(n2)
        F.attach_grad(n3)
        g.nodes['user'].data['h'] = n1
        g.nodes['developer'].data['h'] = n2
        g.nodes['game'].data['h'] = n3
        F.attach_grad(x1)
        F.attach_grad(x2)
        F.attach_grad(x3)
        F.attach_grad(x4)
        g['plays'].edata['h'] = x1
        g['follows'].edata['h'] = x2
        g['develops'].edata['h'] = x3
        g['wishes'].edata['h'] = x4

        with F.record_grad():
            [
                g.apply_edges(builtin_msg('h', 'h', 'm'), etype=rel)
                for rel in g.canonical_etypes
            ]
            r1 = g['plays'].edata['m']
            loss = F.sum(r1.view(-1), 0)
            F.backward(loss)
            n_grad1 = F.grad(g.nodes['game'].data['h'])

        #################################################################
        #  apply_edges() is called on all relation types
        #################################################################

        F.attach_grad(n1)
        F.attach_grad(n2)
        F.attach_grad(n3)
        g.nodes['user'].data['h'] = n1
        g.nodes['developer'].data['h'] = n2
        g.nodes['game'].data['h'] = n3
        F.attach_grad(x1)
        F.attach_grad(x2)
        F.attach_grad(x3)
        F.attach_grad(x4)
        g['plays'].edata['h'] = x1
        g['follows'].edata['h'] = x2
        g['develops'].edata['h'] = x3
        g['wishes'].edata['h'] = x4

        with F.record_grad():
            g.apply_edges(builtin_msg('h', 'h', 'm'))
            r2 = g['plays'].edata['m']
            loss = F.sum(r2.view(-1), 0)
            F.backward(loss)
            n_grad2 = F.grad(g.nodes['game'].data['h'])
        # correctness check
        def _print_error(a, b):
            for i, (x, y) in enumerate(
                    zip(F.asnumpy(a).flatten(),
                        F.asnumpy(b).flatten())):
                if not np.allclose(x, y):
                    print('@{} {} v.s. {}'.format(i, x, y))

        if not F.allclose(r1, r2):
            _print_error(r1, r2)
        assert F.allclose(r1, r2)
        if n_grad1 is not None or n_grad2 is not None:
            if not F.allclose(n_grad1, n_grad2):
                print('node grad')
                _print_error(n_grad1, n_grad2)
            assert (F.allclose(n_grad1, n_grad2))
예제 #24
0
def check_dist_graph(g, num_nodes, num_edges):
    # Test API
    assert g.number_of_nodes() == num_nodes
    assert g.number_of_edges() == num_edges

    # Test reading node data
    nids = F.arange(0, int(g.number_of_nodes() / 2))
    feats1 = g.ndata['features'][nids]
    feats = F.squeeze(feats1, 1)
    assert np.all(F.asnumpy(feats == nids))

    # Test reading edge data
    eids = F.arange(0, int(g.number_of_edges() / 2))
    feats1 = g.edata['features'][eids]
    feats = F.squeeze(feats1, 1)
    assert np.all(F.asnumpy(feats == eids))

    # Test init node data
    new_shape = (g.number_of_nodes(), 2)
    g.ndata['test1'] = dgl.distributed.DistTensor(g, new_shape, F.int32)
    feats = g.ndata['test1'][nids]
    assert np.all(F.asnumpy(feats) == 0)

    # reference to a one that exists
    test2 = dgl.distributed.DistTensor(g,
                                       new_shape,
                                       F.float32,
                                       'test2',
                                       init_func=rand_init)
    test3 = dgl.distributed.DistTensor(g, new_shape, F.float32, 'test2')
    assert np.all(F.asnumpy(test2[nids]) == F.asnumpy(test3[nids]))

    # create a tensor and destroy a tensor and create it again.
    test3 = dgl.distributed.DistTensor(g,
                                       new_shape,
                                       F.float32,
                                       'test3',
                                       init_func=rand_init)
    del test3
    test3 = dgl.distributed.DistTensor(g, (g.number_of_nodes(), 3), F.float32,
                                       'test3')
    del test3

    # test a persistent tesnor
    test4 = dgl.distributed.DistTensor(g,
                                       new_shape,
                                       F.float32,
                                       'test4',
                                       init_func=rand_init,
                                       persistent=True)
    del test4
    try:
        test4 = dgl.distributed.DistTensor(g, (g.number_of_nodes(), 3),
                                           F.float32, 'test4')
        raise Exception('')
    except:
        pass

    # Test sparse emb
    try:
        emb = DistEmbedding(g, g.number_of_nodes(), 1, 'emb1', emb_init)
        lr = 0.001
        optimizer = SparseAdagrad([emb], lr=lr)
        with F.record_grad():
            feats = emb(nids)
            assert np.all(F.asnumpy(feats) == np.zeros((len(nids), 1)))
            loss = F.sum(feats + 1, 0)
        loss.backward()
        optimizer.step()
        feats = emb(nids)
        assert_almost_equal(F.asnumpy(feats), np.ones((len(nids), 1)) * -lr)
        rest = np.setdiff1d(np.arange(g.number_of_nodes()), F.asnumpy(nids))
        feats1 = emb(rest)
        assert np.all(F.asnumpy(feats1) == np.zeros((len(rest), 1)))

        policy = dgl.distributed.PartitionPolicy('node',
                                                 g.get_partition_book())
        grad_sum = dgl.distributed.DistTensor(g, (g.number_of_nodes(), ),
                                              F.float32, 'emb1_sum', policy)
        assert np.all(F.asnumpy(grad_sum[nids]) == np.ones((len(nids), 1)))
        assert np.all(F.asnumpy(grad_sum[rest]) == np.zeros((len(rest), 1)))

        emb = DistEmbedding(g, g.number_of_nodes(), 1, 'emb2', emb_init)
        with F.no_grad():
            feats1 = emb(nids)
        assert np.all(F.asnumpy(feats1) == 0)

        optimizer = SparseAdagrad([emb], lr=lr)
        with F.record_grad():
            feats1 = emb(nids)
            feats2 = emb(nids)
            feats = F.cat([feats1, feats2], 0)
            assert np.all(F.asnumpy(feats) == np.zeros((len(nids) * 2, 1)))
            loss = F.sum(feats + 1, 0)
        loss.backward()
        optimizer.step()
        with F.no_grad():
            feats = emb(nids)
        assert_almost_equal(F.asnumpy(feats),
                            np.ones((len(nids), 1)) * math.sqrt(2) * -lr)
        rest = np.setdiff1d(np.arange(g.number_of_nodes()), F.asnumpy(nids))
        feats1 = emb(rest)
        assert np.all(F.asnumpy(feats1) == np.zeros((len(rest), 1)))
    except NotImplementedError as e:
        pass

    # Test write data
    new_feats = F.ones((len(nids), 2), F.int32, F.cpu())
    g.ndata['test1'][nids] = new_feats
    feats = g.ndata['test1'][nids]
    assert np.all(F.asnumpy(feats) == 1)

    # Test metadata operations.
    assert len(g.ndata['features']) == g.number_of_nodes()
    assert g.ndata['features'].shape == (g.number_of_nodes(), 1)
    assert g.ndata['features'].dtype == F.int64
    assert g.node_attr_schemes()['features'].dtype == F.int64
    assert g.node_attr_schemes()['test1'].dtype == F.int32
    assert g.node_attr_schemes()['features'].shape == (1, )

    selected_nodes = np.random.randint(0, 100, size=g.number_of_nodes()) > 30
    # Test node split
    nodes = node_split(selected_nodes, g.get_partition_book())
    nodes = F.asnumpy(nodes)
    # We only have one partition, so the local nodes are basically all nodes in the graph.
    local_nids = np.arange(g.number_of_nodes())
    for n in nodes:
        assert n in local_nids

    print('end')
예제 #25
0
    def _test(feat_scale):
        in_feat = 16 * feat_scale
        out_feat = 8 * feat_scale
        print("in/out feat", in_feat, out_feat)
        E_per_rel = F.copy_to(
            F.tensor([
                50, 100, 20, 284, 89, 10, 82, 9200, 10, 20, 30, 100, 128, 20,
                284, 89, 10, 82, 92, 10, 20, 30, 100, 1280, 20, 284, 89, 1000,
                82, 92, 10, 2000, 30, 100, 128, 20, 284, 89, 10, 82, 92, 10,
                20, 30
            ]), F.cpu())

        E_per_rel *= n_edge_scale
        num_rel = len(E_per_rel)
        print('num_rel', num_rel)
        W_per_len = F.copy_to(
            F.full((num_rel, ), in_feat, dtype=F.dtype(E_per_rel)), F.cpu())

        H_arr = []
        W_arr = []
        Out_arr = []
        Out_grad_arr = []

        for eid in range(num_rel):
            H_arr.append(F.randn((E_per_rel[eid], in_feat)))
            W_arr.append(F.randn((in_feat, out_feat)))
            Out_arr.append(F.zeros((E_per_rel[eid], out_feat)))
            Out_grad_arr.append(F.ones((E_per_rel[eid], out_feat)))

        H = F.cat([h for h in H_arr], 0)
        W = F.cat([w for w in W_arr], 0)
        W_3D = W.reshape(num_rel, in_feat, out_feat)
        Out = F.cat([out for out in Out_arr], 0)
        Out_grad = F.cat([o for o in Out_grad_arr], 0)

        print('H.shape', H.shape)
        print('W.shape', W.shape)
        print('W_3D.shape', W_3D.shape)
        print('Out.shape', Out.shape)

        etype_arr = []
        for eid in range(num_rel):
            etype_arr.append(
                F.full((E_per_rel[eid], ), eid, dtype=F.dtype(E_per_rel)))
        etypes = F.cat([etype for etype in etype_arr], 0)

        #################################################################
        #  low-mem version using PyTorch operator
        #################################################################

        # forward pass
        out = []
        for i in range(len(E_per_rel)):
            Hi = H_arr[i]
            Wi = W_arr[i]
            out.append(F.matmul(Hi, Wi))
        out_low_mem = F.cat(out, 0)

        # backward pass
        H_grad = []
        W_grad = []
        for i in range(len(E_per_rel)):
            Hi = H_arr[i]
            Wi = W_arr[i]
            Out_gradi = Out_grad_arr[i]
            H_grad.append(F.matmul(Out_gradi, Wi.transpose(0, 1)))
            W_grad.append(F.matmul(Hi.transpose(0, 1), Out_gradi))
        Hgrad_low_mem = F.cat(H_grad, 0)
        Wgrad_low_mem = F.cat(W_grad, 0)
        Wgrad_low_mem = Wgrad_low_mem.reshape(num_rel, in_feat, out_feat)

        #################################################################
        #  gather_mm where H sorted according to etype
        #################################################################

        seglen_A = E_per_rel
        F.attach_grad(H)
        F.attach_grad(W_3D)
        with F.record_grad():
            out_gmm_sorted = dgl.ops.segment_mm(H, W_3D, seglen_A)
            F.backward(F.reduce_sum(out_gmm_sorted))
            Hgrad_gmm_sorted = H.grad
            Wgrad_gmm_sorted = W_3D.grad

        #################################################################
        #  gather_mm where H is not sorted (backward not supported yet)
        #################################################################

        F.attach_grad(H)
        F.attach_grad(W_3D)
        with F.record_grad():
            out_gmm_unsorted = dgl.ops.gather_mm(H, W_3D, idx_rhs=etypes)
            F.backward(F.reduce_sum(out_gmm_unsorted))
            Hgrad_gmm_unsorted = H.grad
            Wgrad_gmm_unsorted = W_3D.grad

        # correctness check
        assert F.allclose(out_low_mem, out_gmm_sorted, atol=1e-3, rtol=1e-3)
        assert F.allclose(Hgrad_low_mem,
                          Hgrad_gmm_sorted,
                          atol=1e-3,
                          rtol=1e-3)
        assert F.allclose(Wgrad_low_mem,
                          Wgrad_gmm_sorted,
                          atol=1e-3,
                          rtol=1e-3)
        assert F.allclose(out_low_mem, out_gmm_unsorted, atol=1e-3, rtol=1e-3)
        assert F.allclose(Hgrad_low_mem,
                          Hgrad_gmm_unsorted,
                          atol=1e-3,
                          rtol=1e-3)
        assert F.allclose(Wgrad_low_mem,
                          Wgrad_gmm_unsorted,
                          atol=1e-3,
                          rtol=1e-3)
예제 #26
0
    def _test(lhs, rhs, binary_op, reducer):

        g = create_test_heterograph(idtype)

        x1 = F.randn((g.num_nodes('user'), feat_size))
        x2 = F.randn((g.num_nodes('developer'), feat_size))
        x3 = F.randn((g.num_nodes('game'), feat_size))

        F.attach_grad(x1)
        F.attach_grad(x2)
        F.attach_grad(x3)
        g.nodes['user'].data['h'] = x1
        g.nodes['developer'].data['h'] = x2
        g.nodes['game'].data['h'] = x3

        x1 = F.randn((4, feat_size))
        x2 = F.randn((4, feat_size))
        x3 = F.randn((3, feat_size))
        x4 = F.randn((3, feat_size))
        F.attach_grad(x1)
        F.attach_grad(x2)
        F.attach_grad(x3)
        F.attach_grad(x4)
        g['plays'].edata['h'] = x1
        g['follows'].edata['h'] = x2
        g['develops'].edata['h'] = x3
        g['wishes'].edata['h'] = x4

        builtin_msg_name = "{}_{}_{}".format(lhs, binary_op, rhs)
        builtin_msg = getattr(fn, builtin_msg_name)
        builtin_red = getattr(fn, reducer)

        #################################################################
        #  multi_update_all(): call msg_passing separately for each etype
        #################################################################

        with F.record_grad():
            g.multi_update_all(
                {
                    etype: (builtin_msg('h', 'h', 'm'), builtin_red('m', 'y'))
                    for etype in g.canonical_etypes
                }, 'sum')
            r1 = g.nodes['game'].data['y']
            F.backward(r1, F.ones(r1.shape))
            n_grad1 = F.grad(r1)

        #################################################################
        #  update_all(): call msg_passing for all etypes
        #################################################################

        g.update_all(builtin_msg('h', 'h', 'm'), builtin_red('m', 'y'))
        r2 = g.nodes['game'].data['y']
        F.backward(r2, F.ones(r2.shape))
        n_grad2 = F.grad(r2)

        # correctness check
        def _print_error(a, b):
            for i, (x, y) in enumerate(
                    zip(F.asnumpy(a).flatten(),
                        F.asnumpy(b).flatten())):
                if not np.allclose(x, y):
                    print('@{} {} v.s. {}'.format(i, x, y))

        if not F.allclose(r1, r2):
            _print_error(r1, r2)
        assert F.allclose(r1, r2)
예제 #27
0
    def _test(mfunc, rfunc):

        g = create_test_heterograph_large(idtype)
        g0 = create_test_heterograph_2(idtype)
        g1 = create_test_heterograph(idtype)
        cross_reducer = rfunc.__name__
        x1 = F.randn((g.num_edges('plays'), feat_size))
        x2 = F.randn((g.num_edges('follows'), feat_size))
        x3 = F.randn((g.num_edges('develops'), feat_size))
        x4 = F.randn((g.num_edges('wishes'), feat_size))
        F.attach_grad(x1)
        F.attach_grad(x2)
        F.attach_grad(x3)
        F.attach_grad(x4)
        g['plays'].edata['eid'] = x1
        g['follows'].edata['eid'] = x2
        g['develops'].edata['eid'] = x3
        g['wishes'].edata['eid'] = x4

        #################################################################
        #  multi_update_all(): call msg_passing separately for each etype
        #################################################################

        with F.record_grad():
            g.multi_update_all(
                {
                    'plays': (mfunc('eid', 'm'), rfunc('m', 'y')),
                    'follows': (mfunc('eid', 'm'), rfunc('m', 'y')),
                    'develops': (mfunc('eid', 'm'), rfunc('m', 'y')),
                    'wishes': (mfunc('eid', 'm'), rfunc('m', 'y'))
                }, cross_reducer)
            r1 = g.nodes['game'].data['y'].clone()
            r2 = g.nodes['user'].data['y'].clone()
            loss = r1.sum() + r2.sum()
            F.backward(loss)
            e_grad1 = F.grad(g['develops'].edata['eid']).clone()
            e_grad2 = F.grad(g['plays'].edata['eid']).clone()
            e_grad3 = F.grad(g['wishes'].edata['eid']).clone()
            e_grad4 = F.grad(g['follows'].edata['eid']).clone()
        {
            etype: (g[etype].edata.clear())
            for _, etype, _ in g.canonical_etypes
        },

        #################################################################
        #  update_all(): call msg_passing for all etypes
        #################################################################

        # TODO(Israt): output type can be None in multi_update and empty
        F.attach_grad(x1)
        F.attach_grad(x2)
        F.attach_grad(x3)
        F.attach_grad(x4)

        g['plays'].edata['eid'] = x1
        g['follows'].edata['eid'] = x2
        g['develops'].edata['eid'] = x3
        g['wishes'].edata['eid'] = x4

        with F.record_grad():
            g.update_all(mfunc('eid', 'm'), rfunc('m', 'y'))
            r3 = g.nodes['game'].data['y']
            r4 = g.nodes['user'].data['y']
            loss = r3.sum() + r4.sum()
            F.backward(loss)
            e_grad5 = F.grad(g['develops'].edata['eid'])
            e_grad6 = F.grad(g['plays'].edata['eid'])
            e_grad7 = F.grad(g['wishes'].edata['eid'])
            e_grad8 = F.grad(g['follows'].edata['eid'])
        # # correctness check
        def _print_error(a, b):
            for i, (x, y) in enumerate(
                    zip(F.asnumpy(a).flatten(),
                        F.asnumpy(b).flatten())):
                if not np.allclose(x, y):
                    print('@{} {} v.s. {}'.format(i, x, y))

        assert F.allclose(r1, r3)
        assert F.allclose(r2, r4)
        assert (F.allclose(e_grad1, e_grad5))
        assert (F.allclose(e_grad2, e_grad6))
        assert (F.allclose(e_grad3, e_grad7))
        assert (F.allclose(e_grad4, e_grad8))
예제 #28
0
    def _test(g, lhs, rhs, binary_op, reducer, partial, nid, broadcast='none'):
        # initialize node/edge features with uniform(-1, 1)
        hu, hv, he = generate_feature(g, broadcast, binary_op)
        if binary_op == 'div':
            # op = div
            # lhs range: [-1, 1]
            # rhs range: [1, 2]
            # result range: [-1, 1]
            if rhs == 'u':
                hu = (hu + 3) / 2
            elif rhs == 'v':
                hv = (hv + 3) / 2
            elif rhs == 'e':
                he = (he + 3) / 2

        if binary_op == 'add' or binary_op == 'sub':
            # op = add, sub
            # lhs range: [-1/2, 1/2]
            # rhs range: [-1/2, 1/2]
            # result range: [-1, 1]
            hu = hu / 2
            hv = hv / 2
            he = he / 2

        g.ndata['u'] = F.attach_grad(F.clone(hu))
        g.ndata['v'] = F.attach_grad(F.clone(hv))
        g.edata['e'] = F.attach_grad(F.clone(he))

        builtin_msg_name = "{}_{}_{}".format(lhs, binary_op, rhs)
        builtin_msg = getattr(fn, builtin_msg_name)
        builtin_red = getattr(fn, reducer)

        def target_feature_switch(g, target):
            if target == "u":
                return g.ndata["u"]
            elif target == "v":
                return g.ndata["v"]
            else:
                return g.edata["e"]

        with F.record_grad():
            if partial:
                g.pull(nid, builtin_msg(lhs, rhs, 'm'), builtin_red('m', 'r1'))
            else:
                g.update_all(builtin_msg(lhs, rhs, 'm'), builtin_red('m', 'r1'))
            r1 = g.ndata.pop('r1')
            F.backward(F.reduce_sum(r1))
            lhs_grad_1 = F.grad(target_feature_switch(g, lhs))
            rhs_grad_1 = F.grad(target_feature_switch(g, rhs))

        # reset grad
        g.ndata['u'] = F.attach_grad(F.clone(hu))
        g.ndata['v'] = F.attach_grad(F.clone(hv))
        g.edata['e'] = F.attach_grad(F.clone(he))

        def target_switch(edges, target):
            if target == "u":
                return edges.src
            elif target == "v":
                return edges.dst
            elif target == "e":
                return edges.data
            else:
                assert(0), "Unknown target {}".format(target)

        def mfunc(edges):
            op = getattr(F, binary_op)
            lhs_data = target_switch(edges, lhs)[lhs]
            rhs_data = target_switch(edges, rhs)[rhs]
            # NOTE(zihao): we need to do batched broadcast
            # e.g. (68, 3, 1) op (68, 5, 3, 4)
            while F.ndim(lhs_data) < F.ndim(rhs_data):
                lhs_data = F.unsqueeze(lhs_data, 1)
            while F.ndim(rhs_data) < F.ndim(lhs_data):
                rhs_data = F.unsqueeze(rhs_data, 1)
            return {"m": op(lhs_data, rhs_data)}

        def rfunc(nodes):
            op = getattr(F, reducer)
            return {"r2": op(nodes.mailbox['m'], 1)}

        with F.record_grad():
            if partial:
                g.pull(nid, mfunc, rfunc)
            else:
                g.update_all(mfunc, rfunc)
            r2 = g.ndata.pop('r2')
            F.backward(F.reduce_sum(r2), F.tensor([1.]))
            lhs_grad_2 = F.grad(target_feature_switch(g, lhs))
            rhs_grad_2 = F.grad(target_feature_switch(g, rhs))

        rtol = 1e-4
        atol = 1e-4

        def _print_error(a, b):
            print("ERROR: Test {}_{}_{}_{} broadcast: {} partial: {}".
                  format(lhs, binary_op, rhs, reducer, broadcast, partial))
            return
            if lhs == 'u':
                lhs_data = hu
            elif lhs == 'v':
                lhs_data = hv
            elif lhs == 'e':
                lhs_data = he

            if rhs == 'u':
                rhs_data = hu
            elif rhs == 'v':
                rhs_data = hv
            elif rhs == 'e':
                rhs_data = he
            print("lhs", F.asnumpy(lhs_data).tolist())
            print("rhs", F.asnumpy(rhs_data).tolist())
            for i, (x, y) in enumerate(zip(F.asnumpy(a).flatten(), F.asnumpy(b).flatten())):
                if not np.allclose(x, y, rtol, atol):
                    print('@{} {} v.s. {}'.format(i, x, y))

        if not F.allclose(r1, r2, rtol, atol):
            _print_error(r1, r2)
        assert F.allclose(r1, r2, rtol, atol)

        if not F.allclose(lhs_grad_1, lhs_grad_2, rtol, atol):
            print("left grad")
            _print_error(lhs_grad_1, lhs_grad_2)
        assert(F.allclose(lhs_grad_1, lhs_grad_2, rtol, atol))

        if not F.allclose(rhs_grad_1, rhs_grad_2, rtol, atol):
            print("right grad")
            _print_error(rhs_grad_1, rhs_grad_2)
        assert(F.allclose(rhs_grad_1, rhs_grad_2, rtol, atol))
예제 #29
0
def run_client(graph_name, part_id, num_nodes, num_edges):
    time.sleep(5)
    gpb = load_partition_book('/tmp/dist_graph/{}.json'.format(graph_name),
                              part_id, None)
    g = DistGraph("kv_ip_config.txt", graph_name, gpb=gpb)

    # Test API
    assert g.number_of_nodes() == num_nodes
    assert g.number_of_edges() == num_edges

    # Test reading node data
    nids = F.arange(0, int(g.number_of_nodes() / 2))
    feats1 = g.ndata['features'][nids]
    feats = F.squeeze(feats1, 1)
    assert np.all(F.asnumpy(feats == nids))

    # Test reading edge data
    eids = F.arange(0, int(g.number_of_edges() / 2))
    feats1 = g.edata['features'][eids]
    feats = F.squeeze(feats1, 1)
    assert np.all(F.asnumpy(feats == eids))

    # Test init node data
    new_shape = (g.number_of_nodes(), 2)
    g.init_ndata('test1', new_shape, F.int32)
    feats = g.ndata['test1'][nids]
    assert np.all(F.asnumpy(feats) == 0)

    # Test init edge data
    new_shape = (g.number_of_edges(), 2)
    g.init_edata('test1', new_shape, F.int32)
    feats = g.edata['test1'][eids]
    assert np.all(F.asnumpy(feats) == 0)

    # Test sparse emb
    try:
        new_shape = (g.number_of_nodes(), 1)
        emb = SparseNodeEmbedding(g, 'emb1', new_shape, emb_init)
        lr = 0.001
        optimizer = SparseAdagrad([emb], lr=lr)
        with F.record_grad():
            feats = emb(nids)
            assert np.all(F.asnumpy(feats) == np.zeros((len(nids), 1)))
            loss = F.sum(feats + 1, 0)
        loss.backward()
        optimizer.step()
        feats = emb(nids)
        assert_almost_equal(F.asnumpy(feats), np.ones((len(nids), 1)) * -lr)
        rest = np.setdiff1d(np.arange(g.number_of_nodes()), F.asnumpy(nids))
        feats1 = emb(rest)
        assert np.all(F.asnumpy(feats1) == np.zeros((len(rest), 1)))

        policy = dgl.distributed.PartitionPolicy('node',
                                                 g.get_partition_book())
        grad_sum = dgl.distributed.DistTensor(g, 'node:emb1_sum', policy)
        assert np.all(F.asnumpy(grad_sum[nids]) == np.ones((len(nids), 1)))
        assert np.all(F.asnumpy(grad_sum[rest]) == np.zeros((len(rest), 1)))

        emb = SparseNodeEmbedding(g, 'emb2', new_shape, emb_init)
        optimizer = SparseAdagrad([emb], lr=lr)
        with F.record_grad():
            feats1 = emb(nids)
            feats2 = emb(nids)
            feats = F.cat([feats1, feats2], 0)
            assert np.all(F.asnumpy(feats) == np.zeros((len(nids) * 2, 1)))
            loss = F.sum(feats + 1, 0)
        loss.backward()
        optimizer.step()
        feats = emb(nids)
        assert_almost_equal(F.asnumpy(feats),
                            np.ones((len(nids), 1)) * math.sqrt(2) * -lr)
        rest = np.setdiff1d(np.arange(g.number_of_nodes()), F.asnumpy(nids))
        feats1 = emb(rest)
        assert np.all(F.asnumpy(feats1) == np.zeros((len(rest), 1)))
    except NotImplementedError as e:
        pass

    # Test write data
    new_feats = F.ones((len(nids), 2), F.int32, F.cpu())
    g.ndata['test1'][nids] = new_feats
    feats = g.ndata['test1'][nids]
    assert np.all(F.asnumpy(feats) == 1)

    # Test metadata operations.
    assert len(g.ndata['features']) == g.number_of_nodes()
    assert g.ndata['features'].shape == (g.number_of_nodes(), 1)
    assert g.ndata['features'].dtype == F.int64
    assert g.node_attr_schemes()['features'].dtype == F.int64
    assert g.node_attr_schemes()['test1'].dtype == F.int32
    assert g.node_attr_schemes()['features'].shape == (1, )

    selected_nodes = np.random.randint(0, 100, size=g.number_of_nodes()) > 30
    # Test node split
    nodes = node_split(selected_nodes, g.get_partition_book())
    nodes = F.asnumpy(nodes)
    # We only have one partition, so the local nodes are basically all nodes in the graph.
    local_nids = np.arange(g.number_of_nodes())
    for n in nodes:
        assert n in local_nids

    # clean up
    dgl.distributed.shutdown_servers()
    dgl.distributed.finalize_client()
    print('end')
예제 #30
0
    def _test(g,
              lhs,
              rhs,
              binary_op,
              reducer,
              paritial,
              nid,
              broadcast='none'):
        hu, hv, he = generate_feature(g, broadcast)
        g.ndata['u'] = F.attach_grad(F.clone(hu))
        g.ndata['v'] = F.attach_grad(F.clone(hv))
        g.edata['e'] = F.attach_grad(F.clone(he))

        builtin_msg_name = "{}_{}_{}".format(lhs, binary_op, rhs)
        builtin_msg = getattr(fn, builtin_msg_name)
        builtin_red = getattr(fn, reducer)

        def target_feature_switch(g, target):
            if target == "u":
                return g.ndata["u"]
            elif target == "v":
                return g.ndata["v"]
            else:
                return g.edata["e"]

        with F.record_grad():
            if partial:
                g.pull(nid, builtin_msg(lhs, rhs, 'm'), builtin_red('m', 'r1'))
            else:
                g.update_all(builtin_msg(lhs, rhs, 'm'),
                             builtin_red('m', 'r1'))
            r1 = g.ndata.pop('r1')
            F.backward(r1.sum())
            lhs_grad_1 = F.grad(target_feature_switch(g, lhs))
            rhs_grad_1 = F.grad(target_feature_switch(g, rhs))

        # reset grad
        g.ndata['u'] = F.attach_grad(F.clone(hu))
        g.ndata['v'] = F.attach_grad(F.clone(hv))
        g.edata['e'] = F.attach_grad(F.clone(he))

        def target_switch(edges, target):
            if target == "u":
                return edges.src
            elif target == "v":
                return edges.dst
            elif target == "e":
                return edges.data
            else:
                assert (0), "Unknown target {}".format(target)

        def mfunc(edges):
            op = getattr(F, binary_op)
            lhs_data = target_switch(edges, lhs)
            rhs_data = target_switch(edges, rhs)
            return {"m": op(lhs_data[lhs], rhs_data[rhs])}

        def rfunc(nodes):
            op = getattr(F, reducer)
            return {"r2": op(nodes.mailbox['m'], 1)}

        with F.record_grad():
            if partial:
                g.pull(nid, mfunc, rfunc)
            else:
                g.update_all(mfunc, rfunc)
            r2 = g.ndata.pop('r2')
            F.backward(r2.sum(), F.tensor([1.]))
            lhs_grad_2 = F.grad(target_feature_switch(g, lhs))
            rhs_grad_2 = F.grad(target_feature_switch(g, rhs))

        if reducer == 'prod':
            rtol = 1e-2
            atol = 1e-2
        else:
            rtol = 1e-4
            atol = 1e-4

        def _print_error(a, b):
            print("ERROR: Test {}_{}_{}_{} {}".format(lhs, binary_op, rhs,
                                                      reducer, broadcast))
            print(a, b)
            for i, (x, y) in enumerate(
                    zip(
                        F.asnumpy(F.cpu(a)).flatten(),
                        F.asnumpy(F.cpu(b)).flatten())):
                if not np.allclose(x, y, rtol, atol):
                    print('@{} {} v.s. {}'.format(i, x, y))

        if not F.allclose(r1, r2, rtol, atol):
            _print_error(r1, r2)
        assert F.allclose(r1, r2, rtol, atol)

        if not F.allclose(lhs_grad_1, lhs_grad_2, rtol, atol):
            print("left grad")
            _print_error(lhs_grad_1, lhs_grad_2)
        assert (F.allclose(lhs_grad_1, lhs_grad_2, rtol, atol))

        if not F.allclose(rhs_grad_1, rhs_grad_2, rtol, atol):
            print("right grad")
            _print_error(rhs_grad_1, rhs_grad_2)
        assert (F.allclose(rhs_grad_1, rhs_grad_2, rtol, atol))