示例#1
0
def test_vjps():
    for datatype in backends:
        T.set_backend(datatype)
        x = ad.Variable(name="x", shape=[2])
        A = ad.Variable(name="A", shape=[3, 2])
        v = ad.Variable(name="v", shape=[3])
        y = ad.einsum('ab, b->a', A, x)

        transposed_vjp_x, = ad.transposed_vjps(y, [x], v)

        executor = ad.Executor([y, transposed_vjp_x])
        x_val = T.tensor([1., 2.])  # 1x3
        A_val = T.tensor([[1., 2.], [3., 4.], [5, 6]])
        v_val = T.tensor([1., 2., 3.])

        y_val, transposed_vjp_x_val = executor.run(feed_dict={
            x: x_val,
            A: A_val,
            v: v_val
        })

        expected_yval = T.einsum('ab, b->a', A_val, x_val)
        expected_transposed_vjp_x_val = T.einsum('b, ba->a', v_val, A_val)

        assert isinstance(transposed_vjp_x, ad.Node)
        assert T.array_equal(y_val, expected_yval)
        assert T.array_equal(transposed_vjp_x_val,
                             expected_transposed_vjp_x_val)
示例#2
0
def test_sparse_ops():

    T.set_backend("numpy")
    size = 5

    x_sparse = T.random([size, size], format='coo', density=0.1)
    x_dense = T.to_numpy(x_sparse)
    assert isinstance(x_sparse, sparse._coo.core.COO)
    assert isinstance(x_dense, np.ndarray)

    y_dense = T.random([size, size])
    y_sparse = T.tensor(y_dense, format="coo")
    assert isinstance(y_sparse, sparse._coo.core.COO)
    assert isinstance(y_dense, np.ndarray)

    # test einsum
    einsum1 = T.einsum("ab,bc->ac", x_dense, y_dense)
    einsum2 = T.einsum("ab,bc->ac", x_dense, y_sparse)
    einsum3 = T.einsum("ab,bc->ac", x_sparse, y_sparse)
    assert float_eq(einsum1, einsum2)
    assert float_eq(einsum1, einsum3)

    # test solve_tri, first change matrices to full-rank ones
    x_sparse += T.tensor(T.identity(size), format="coo")
    y_sparse += T.tensor(T.identity(size), format="coo")
    x_dense += T.identity(size)
    y_dense += T.identity(size)
    out1 = T.solve_tri(x_sparse, y_sparse)
    out2 = T.solve_tri(x_dense, y_dense)
    assert float_eq(out1, out2)
示例#3
0
def test_sub_jacobian_w_chain(backendopt):

    for datatype in backendopt:
        T.set_backend(datatype)

        x1 = ad.Variable(name="x1", shape=[2, 2])
        x2 = ad.Variable(name="x2", shape=[2, 2])
        x3 = ad.Variable(name="x3", shape=[2, 2])
        y = x1 - x2
        z = x3 - y

        jacobian_x2, = ad.jacobians(z, [x2])

        executor = ad.Executor([z, jacobian_x2])

        x1_val = T.tensor([[1, 1], [1, 1]])
        x2_val = T.tensor([[1, 1], [1, 1]])
        x3_val = T.tensor([[1, 1], [1, 1]])
        z_val, jacobian_x2_val = executor.run(feed_dict={
            x1: x1_val,
            x2: x2_val,
            x3: x3_val
        })

        I = T.identity(2)
        expected_jacobian_x2_val = T.einsum("ac,bd->abcd", I, I)

        assert isinstance(z, ad.Node)
        assert isinstance(jacobian_x2, ad.Node)
        assert T.array_equal(z_val, x3_val - x1_val + x2_val)
        assert T.array_equal(jacobian_x2_val, expected_jacobian_x2_val)
示例#4
0
def test_mul_jacobian_one_scalar(backendopt):

    for datatype in backendopt:
        T.set_backend(datatype)

        x1 = ad.Variable(name="x1", shape=[])
        x2 = ad.Variable(name="x2", shape=[2, 2])

        # test both cases of left and right multiply a scalar
        for y in [x1 * x2, x2 * x1]:

            jacobian_x1, jacobian_x2 = ad.jacobians(y, [x1, x2])
            executor = ad.Executor([y, jacobian_x1, jacobian_x2])

            x1_val = T.tensor(2.)
            x2_val = T.tensor([[5., 6.], [7., 8.]])
            y_val, jacobian_x1_val, jacobian_x2_val = executor.run(feed_dict={
                x1: x1_val,
                x2: x2_val
            })

            I = T.identity(2)
            expected_jacobian_x1_val = T.einsum("ai,bj,ij->ab", I, I, x2_val)
            expected_jacobian_x2_val = x1_val * T.einsum("ai,bj->abij", I, I)

            assert isinstance(y, ad.Node)
            assert T.array_equal(y_val, x1_val * x2_val)
            assert T.array_equal(jacobian_x1_val, expected_jacobian_x1_val)
            assert T.array_equal(jacobian_x2_val, expected_jacobian_x2_val)
示例#5
0
def test_three_mul_jacobian_scalars(backendopt):

    for datatype in backendopt:
        T.set_backend(datatype)

        x1 = ad.Variable(name="x1", shape=[])
        x2 = ad.Variable(name="x2", shape=[])
        x3 = ad.Variable(name="x3", shape=[])
        y = x1 * x2 * x3

        jacobian_x1, = ad.jacobians(y, [x1])
        executor = ad.Executor([y, jacobian_x1])

        x1_val = T.tensor(1.)
        x2_val = T.tensor(2.)
        x3_val = T.tensor(3.)

        y_val, jacobian_x1_val = executor.run(feed_dict={
            x1: x1_val,
            x2: x2_val,
            x3: x3_val
        })

        expected_jacobian_x1_val = x2_val * x3_val

        assert isinstance(y, ad.Node)
        assert T.array_equal(y_val, x1_val * x2_val * x3_val)
        assert T.array_equal(jacobian_x1_val, expected_jacobian_x1_val)
示例#6
0
def test_inner_product_hvp():
    for datatype in backends:
        T.set_backend(datatype)
        x = ad.Variable(name="x", shape=[3, 1])
        v = ad.Variable(name="v", shape=[3, 1])
        y = ad.sum(ad.einsum("ab,bc->ac", ad.transpose(x), x))

        grad_x, = ad.gradients(y, [x])
        Hv, = ad.hvp(output_node=y, node_list=[x], vector_list=[v])

        executor = ad.Executor([y, grad_x, Hv])
        x_val = T.tensor([[1.], [2.], [3]])  # 3x1
        v_val = T.tensor([[1.], [2.], [3]])  # 3x1
        y_val, grad_x_val, Hv_val = executor.run(feed_dict={
            x: x_val,
            v: v_val
        })

        expected_yval = T.sum(T.dot(T.transpose(x_val), x_val))
        expected_grad_x_val = 2 * x_val
        expected_hv_val = 2 * v_val

        assert isinstance(y, ad.Node)
        assert T.array_equal(y_val, expected_yval)
        assert T.array_equal(grad_x_val, expected_grad_x_val)
        assert T.array_equal(Hv_val, expected_hv_val)
示例#7
0
def test_einsum():

    for datatype in backends:
        T.set_backend(datatype)

        x2 = ad.Variable(name="x2", shape=[3, 2])
        x3 = ad.Variable(name="x3", shape=[2, 3])
        matmul = ad.einsum('ik,kj->ij', x2, x3)
        y = ad.sum(matmul)

        grad_x2, grad_x3 = ad.gradients(y, [x2, x3])

        executor = ad.Executor([y, grad_x2, grad_x3])
        x2_val = T.tensor([[1, 2], [3, 4], [5, 6]])  # 3x2
        x3_val = T.tensor([[7, 8, 9], [10, 11, 12]])  # 2x3

        y_val, grad_x2_val, grad_x3_val = executor.run(feed_dict={
            x2: x2_val,
            x3: x3_val
        })

        expected_grad_sum = T.ones_like(T.dot(x2_val, x3_val))
        expected_yval = T.sum(T.dot(x2_val, x3_val))
        expected_grad_x2_val = T.dot(expected_grad_sum, T.transpose(x3_val))
        expected_grad_x3_val = T.dot(T.transpose(x2_val), expected_grad_sum)

        assert isinstance(y, ad.Node)
        assert T.array_equal(y_val, expected_yval)
        assert T.array_equal(grad_x2_val, expected_grad_x2_val)
        assert T.array_equal(grad_x3_val, expected_grad_x3_val)
示例#8
0
def test_s2s_jtjvp(backendopt):
    for datatype in backendopt:
        T.set_backend(datatype)
        x = ad.Variable(name="x", shape=[2])
        A = ad.Variable(name="A", shape=[3, 2])
        v = ad.Variable(name="v", shape=[2])
        y = ad.einsum("ab,b->a", A, x)

        jtjvp_x, = ad.jtjvps(y, [x], [v])

        x_val = T.tensor([1., 2.])
        A_val = T.tensor([[1., 2.], [3., 4.], [5, 6]])
        v_val = T.tensor([3, 4])

        expected_jtjvp_x_val = T.einsum("ba,bc,c->a", A_val, A_val, v_val)

        StS = SourceToSource()
        forward_str = StS.forward([jtjvp_x],
                                  function_name='jtjvp',
                                  backend=datatype)
        m = import_code(forward_str)
        jtjvp_x_val_s2s, = m.jtjvp([A_val, v_val])

        assert isinstance(jtjvp_x, ad.Node)
        assert T.array_equal(jtjvp_x_val_s2s, expected_jtjvp_x_val)
示例#9
0
def test_jtjvps():
    for datatype in backends:
        T.set_backend(datatype)
        x = ad.Variable(name="x", shape=[2])
        A = ad.Variable(name="A", shape=[3, 2])
        v = ad.Variable(name="v", shape=[2])
        y = ad.einsum('ab, b->a', A, x)

        jtjvp_x, = ad.jtjvps(y, [x], [v])

        executor = ad.Executor([y, jtjvp_x])
        x_val = T.tensor([1., 2.])
        A_val = T.tensor([[1., 2.], [3., 4.], [5, 6]])
        v_val = T.tensor([3., 4.])

        y_val, jtjvp_x_val = executor.run(feed_dict={
            x: x_val,
            A: A_val,
            v: v_val
        })

        expected_yval = T.einsum('ab, b->a', A_val, x_val)
        expected_jtjvp_x_val = T.einsum('ba, ac->bc', T.transpose(A_val),
                                        A_val)
        expected_jtjvp_x_val = T.einsum('ab, b->a', expected_jtjvp_x_val,
                                        v_val)

        assert isinstance(jtjvp_x, ad.Node)
        assert T.array_equal(y_val, expected_yval)
        assert T.array_equal(jtjvp_x_val, expected_jtjvp_x_val)
示例#10
0
def test_hvp2(backendopt):
    for datatype in backendopt:
        T.set_backend(datatype)
        x = ad.Variable(name="x", shape=[3, 1])
        H = ad.Variable(name="H", shape=[3, 3])
        v = ad.Variable(name="v", shape=[3, 1])
        y = ad.sum(
            ad.einsum("ab,bc->ac", ad.einsum("ab,bc->ac", ad.transpose(x), H),
                      x))

        grad_x, = ad.gradients(y, [x])
        Hv, = ad.hvp(output_node=y, node_list=[x], vector_list=[v])

        executor = ad.Executor([y, grad_x, Hv])
        x_val = T.tensor([[1.], [2.], [3]])  # 3x1
        v_val = T.tensor([[1.], [2.], [3]])  # 3x1
        H_val = T.tensor([[2., 0., 0.], [0., 2., 0.], [0., 0., 2.]])  # 3x3
        y_val, grad_x_val, Hv_val = executor.run(feed_dict={
            x: x_val,
            H: H_val,
            v: v_val
        })
        Hx = T.dot(H_val, x_val)
        expected_yval = T.sum(T.dot(T.transpose(x_val), Hx))
        expected_grad_x_val = 2 * Hx
        expected_hv_val = T.tensor([[4.], [8.], [12.]])

        assert isinstance(y, ad.Node)
        assert T.array_equal(y_val, expected_yval)
        assert T.array_equal(grad_x_val, expected_grad_x_val)
        assert T.array_equal(Hv_val, expected_hv_val)
示例#11
0
def test_s2s_hvp(backendopt):
    for datatype in backendopt:
        T.set_backend(datatype)
        x = ad.Variable(name="x", shape=[3])
        H = ad.Variable(name="H", shape=[3, 3])
        v = ad.Variable(name="v", shape=[3])
        y = ad.einsum("a,ab,b->", x, H, x)

        grad_x, = ad.gradients(y, [x])
        Hv, = ad.hvp(output_node=y, node_list=[x], vector_list=[v])

        x_val = T.tensor([1., 2., 3.])  # 3
        v_val = T.tensor([1., 2., 3.])  # 3
        H_val = T.tensor([[2., 0., 0.], [0., 2., 0.], [0., 0., 2.]])  # 3x3

        expected_yval = T.einsum("a,ab,b->", x_val, H_val, x_val)
        expected_grad_x_val = 2 * T.einsum("ab,b->a", H_val, x_val)
        expected_hv_val = T.tensor([4., 8., 12.])

        StS = SourceToSource()
        forward_str = StS.forward([y], backend=datatype)
        m = import_code(forward_str)
        y_val_s2s, = m.forward([x_val, H_val])
        grad_str = StS.gradients(y, [x], backend=datatype)
        m = import_code(grad_str)
        grad_x_val_s2s, = m.gradients([x_val, H_val])
        hvp_str = StS.hvp(y, [x], [v], backend=datatype)
        m = import_code(hvp_str)
        Hv_val_s2s, = m.hvp([x_val, H_val, v_val])

        assert isinstance(y, ad.Node)
        assert T.array_equal(y_val_s2s, expected_yval)
        assert T.array_equal(grad_x_val_s2s, expected_grad_x_val)
        assert T.array_equal(Hv_val_s2s, expected_hv_val)
示例#12
0
def test_add_jacobian_scalar(backendopt):

    for datatype in backendopt:
        T.set_backend(datatype)

        x1 = ad.Variable(name="x1", shape=[])
        x2 = ad.Variable(name="x2", shape=[])
        y = x1 + x2

        jacobian_x2, = ad.jacobians(y, [x2])

        executor = ad.Executor([y, jacobian_x2])

        x1_val = T.tensor(1.)
        x2_val = T.tensor(1.)
        y_val, jacobian_x2_val = executor.run(feed_dict={
            x1: x1_val,
            x2: x2_val
        })

        expected_jacobian_x2_val = T.tensor(1.)

        assert isinstance(y, ad.Node)
        assert isinstance(jacobian_x2, ad.Node)
        assert T.array_equal(y_val, x1_val + x2_val)
        assert T.array_equal(jacobian_x2_val, expected_jacobian_x2_val)
示例#13
0
def test_three_mul_jacobian(backendopt):

    for datatype in backendopt:
        T.set_backend(datatype)

        x1 = ad.Variable(name="x1", shape=[2, 2])
        x2 = ad.Variable(name="x2", shape=[2, 2])
        x3 = ad.Variable(name="x3", shape=[2, 2])
        y = x1 * x2 * x3

        jacobian_x1, = ad.jacobians(y, [x1])
        executor = ad.Executor([y, jacobian_x1])

        x1_val = T.tensor([[1., 2.], [3., 4.]])
        x2_val = T.tensor([[5., 6.], [7., 8.]])
        x3_val = T.tensor([[9., 10.], [11., 12.]])

        y_val, jacobian_x1_val = executor.run(feed_dict={
            x1: x1_val,
            x2: x2_val,
            x3: x3_val
        })

        I = T.identity(2)
        expected_jacobian_x1_val = T.einsum("ai,bj,ij,ij->abij", I, I, x2_val,
                                            x3_val)

        assert isinstance(y, ad.Node)
        assert T.array_equal(y_val, x1_val * x2_val * x3_val)
        assert T.array_equal(jacobian_x1_val, expected_jacobian_x1_val)
示例#14
0
def test_add_jacobian(backendopt):

    for datatype in backendopt:
        T.set_backend(datatype)

        x1 = ad.Variable(name="x1", shape=[2, 2])
        x2 = ad.Variable(name="x2", shape=[2, 2])
        y = x1 + x2

        jacobian_x2, = ad.jacobians(y, [x2])

        executor = ad.Executor([y, jacobian_x2])

        x1_val = T.tensor([[1, 1], [1, 1]])
        x2_val = T.tensor([[1, 1], [1, 1]])
        y_val, jacobian_x2_val = executor.run(feed_dict={
            x1: x1_val,
            x2: x2_val
        })

        I = T.identity(2)
        expected_jacobian_x2_val = T.einsum("ac,bd->abcd", I, I)

        assert isinstance(y, ad.Node)
        assert isinstance(jacobian_x2, ad.Node)
        assert T.array_equal(y_val, x1_val + x2_val)
        assert T.array_equal(jacobian_x2_val, expected_jacobian_x2_val)
示例#15
0
def load_quimb_tensors(network):
    tensors = []

    for tensor in network.tensor_map.values():
        tensors.append(T.tensor(tensor.data))

    return tensors
示例#16
0
def test_summation_einsum_2(backendopt):
    for datatype in backendopt:
        T.set_backend(datatype)
        x = ad.Variable(name="x", shape=[2, 2])
        y = ad.Variable(name="y", shape=[2, 2])
        out = ad.sum(ad.einsum('ij,ab->ab', x, y))

        grad_x, = ad.gradients(out, [x])
        executor = ad.Executor([out, grad_x])
        x_val = T.tensor([[1., 2.], [3., 4.]])
        y_val = T.tensor([[5., 6.], [7., 8.]])

        out_val, grad_x_val = executor.run(feed_dict={x: x_val, y: y_val})

        expected_out_val = T.sum(T.einsum('ij,ab->ab', x_val, y_val))
        expected_grad_x_val = T.sum(y_val) * T.ones_like(x_val)

        assert T.array_equal(out_val, expected_out_val)
        assert T.array_equal(grad_x_val, expected_grad_x_val)
示例#17
0
def test_mul_const_jacobian(backendopt):

    for datatype in backendopt:
        T.set_backend(datatype)

        x1 = ad.Variable(name="x2", shape=[2, 2])
        jacobian_x1, = ad.jacobians(2 * x1, [x1])
        executor = ad.Executor([jacobian_x1])
        x1_val = T.tensor([[5., 6.], [7., 8.]])
        jacobian_x1_val, = executor.run(feed_dict={x1: x1_val})
        I = T.identity(2)
        expected_jacobian_x1_val = 2 * T.einsum("ai,bj->abij", I, I)
        assert T.array_equal(jacobian_x1_val, expected_jacobian_x1_val)
示例#18
0
def test_chainjacobian(backendopt):

    for datatype in backendopt:
        T.set_backend(datatype)

        x1 = ad.Variable(name="x1", shape=[2, 2, 2])
        x2 = ad.Variable(name="x2", shape=[2, 2, 2])
        x1.set_in_indices_length(1)
        x2.set_in_indices_length(2)

        y = ad.chainjacobian(x1, x2)

        executor = ad.Executor([y])

        x1_val = T.tensor([[[1, 1], [1, 1]], [[1, 1], [1, 1]]])
        x2_val = T.tensor([[[1, 1], [1, 1]], [[1, 1], [1, 1]]])
        y_val, = executor.run(feed_dict={x1: x1_val, x2: x2_val})

        expected_y_val = T.einsum("abc,bcd->ad", x1_val, x2_val)

        assert isinstance(y, ad.Node)
        assert T.array_equal(y_val, expected_y_val)
示例#19
0
def test_einsum_3op(backendopt):

    for datatype in backendopt:
        T.set_backend(datatype)

        x2 = ad.Variable(name="x2", shape=[3, 2])
        x3 = ad.Variable(name="x3", shape=[2, 3])
        x4 = ad.Variable(name="x4", shape=[3, 2])
        matmul = ad.einsum('ik,kj,jl->il', x2, x3, x4)
        y = ad.sum(matmul)

        grad_x2, grad_x3, grad_x4 = ad.gradients(y, [x2, x3, x4])

        executor = ad.Executor([y, grad_x2, grad_x3, grad_x4])
        x2_val = T.tensor([[1, 2], [3, 4], [5, 6]])  # 3x2
        x3_val = T.tensor([[7, 8, 9], [10, 11, 12]])  # 2x3
        x4_val = T.tensor([[1, 2], [3, 4], [5, 6]])  # 3x2

        y_val, grad_x2_val, grad_x3_val, grad_x4_val = executor.run(feed_dict={
            x2: x2_val,
            x3: x3_val,
            x4: x4_val
        })

        expected_grad_sum = T.ones_like(T.dot(T.dot(x2_val, x3_val), x4_val))
        expected_yval = T.sum(T.dot(T.dot(x2_val, x3_val), x4_val))
        expected_grad_x2_val = T.einsum("il, kj, jl->ik", expected_grad_sum,
                                        x3_val, x4_val)
        expected_grad_x3_val = T.einsum("ik, il, jl->kj", x2_val,
                                        expected_grad_sum, x4_val)
        expected_grad_x4_val = T.einsum("ik, kj, il->jl", x2_val, x3_val,
                                        expected_grad_sum)

        assert isinstance(y, ad.Node)
        assert T.array_equal(y_val, expected_yval)
        assert T.array_equal(grad_x2_val, expected_grad_x2_val)
        assert T.array_equal(grad_x3_val, expected_grad_x3_val)
        assert T.array_equal(grad_x4_val, expected_grad_x4_val)
示例#20
0
def test_s2s_tensorinv(backendopt):
    for datatype in backendopt:
        T.set_backend(datatype)
        A = ad.Variable(name="A", shape=[2, 2])
        B = ad.tensorinv(A)

        A_val = T.tensor([[1., 0.], [0., 1.]])

        StS = SourceToSource()
        fwd_str = StS.forward([B], function_name='fwd', backend=datatype)
        m = import_code(fwd_str)
        out, = m.fwd([A_val])

        assert T.array_equal(A_val, out)
示例#21
0
def test_tensor_transpose_einsum(backendopt):

    for datatype in backendopt:
        T.set_backend(datatype)

        x = ad.Variable(name="x", shape=[2, 2, 2])
        y = ad.einsum("kij->jik", x)

        v = ad.Variable(name="v", shape=[2, 2, 2])
        v_val = T.tensor([[[1, 2], [3, 4]], [[5, 6], [7, 8]]])  # 2 x 2 x 2
        grad_x, = ad.transposed_vjps(y, [x], v)

        executor = ad.Executor([y, grad_x])
        x_val = T.tensor([[[1, 2], [3, 4]], [[5, 6], [7, 8]]])  # 2 x 2 x 2

        y_val, grad_x_val = executor.run(feed_dict={x: x_val, v: v_val})

        expected_yval = T.einsum("kij->jik", x_val)
        expected_grad_x_val = T.einsum("kij->jik", v_val)

        assert isinstance(y, ad.Node)
        assert T.array_equal(y_val, expected_yval)
        assert T.array_equal(grad_x_val, expected_grad_x_val)
示例#22
0
def test_add_jacobian_w_chain(backendopt):

    for datatype in backendopt:
        T.set_backend(datatype)

        x1 = ad.Variable(name="x1", shape=[2, 2])
        x2 = ad.Variable(name="x2", shape=[2, 2])
        x3 = ad.Variable(name="x3", shape=[2, 2])
        y = x1 + x2
        z = y + x3

        jacobian_x2, = ad.jacobians(z, [x2])

        executor = ad.Executor([z, jacobian_x2])

        x1_val = T.tensor([[1, 1], [1, 1]])
        x2_val = T.tensor([[1, 1], [1, 1]])
        x3_val = T.tensor([[1, 1], [1, 1]])
        z_val, jacobian_x2_val = executor.run(feed_dict={
            x1: x1_val,
            x2: x2_val,
            x3: x3_val
        })

        I = T.identity(2)
        # jacobian_z_y = T.einsum("ae,bf->abef", I, I)
        # jacobian_y_x2 = T.einsum("ec,fd->efcd", I, I)
        # jacobian_z_x2 = T.einsum("abef,efcd->abcd", jacobian_z_y, jacobian_y_x2)
        #               = T.einsum("ae,bf,ec,fd->abcd", I, I, I, I)
        #               = T.einsum("ac,bd->abcd", I, I)
        expected_jacobian_x2_val = T.einsum("ac,bd->abcd", I, I)

        assert isinstance(z, ad.Node)
        assert isinstance(jacobian_x2, ad.Node)
        assert T.array_equal(z_val, x1_val + x2_val + x3_val)
        assert T.array_equal(jacobian_x2_val, expected_jacobian_x2_val)
示例#23
0
def test_s2s_w_constants(backendopt):
    for datatype in backendopt:
        T.set_backend(datatype)
        A = ad.Variable(name="A", shape=[2, 2])
        I = ad.identity(2)
        B = ad.einsum("ab,bc->ac", A, I)

        A_val = T.tensor([[1., 2.], [3., 4.]])

        StS = SourceToSource()
        fwd_str = StS.forward([B], function_name='fwd', backend=datatype)
        m = import_code(fwd_str)
        out, = m.fwd([A_val])

        assert T.array_equal(A_val, out)
示例#24
0
def test_jacobian_summation_einsum(backendopt):
    for datatype in backendopt:
        T.set_backend(datatype)
        x = ad.Variable(name="x", shape=[2, 2])
        x_sum = ad.einsum('ij->', x)

        grad_x, = ad.jacobians(x_sum, [x])

        executor = ad.Executor([x_sum, grad_x])
        x_val = T.tensor([[1., 2.], [3., 4.]])

        x_sum_val, grad_x_val = executor.run(feed_dict={x: x_val})

        expected_x_sum_val = T.sum(x_val)
        expected_grad_x_val = T.ones_like(x_val)

        assert T.array_equal(x_sum_val, expected_x_sum_val)
        assert T.array_equal(grad_x_val, expected_grad_x_val)
示例#25
0
def test_inner_product_einsum(backendopt):
    for datatype in backendopt:
        T.set_backend(datatype)
        x = ad.Variable(name="x", shape=[3])
        x_inner = ad.einsum('i,i->', x, x)

        grad_x, = ad.gradients(x_inner, [x])

        executor = ad.Executor([x_inner, grad_x])
        x_val = T.tensor([3., 4.])  # 1x2

        y_val, grad_x_val = executor.run(feed_dict={x: x_val})

        expected_yval = T.norm(x_val)**2
        expected_grad_x_val = 2 * x_val

        assert isinstance(x_inner, ad.Node)
        assert T.array_equal(y_val, expected_yval)
        assert T.array_equal(grad_x_val, expected_grad_x_val)
示例#26
0
def test_jacobian_trace_einsum(backendopt):
    for datatype in backendopt:
        if datatype == 'taco':
            continue
        T.set_backend(datatype)
        x = ad.Variable(name="x", shape=[2, 2])
        trace = ad.einsum('ii->', x)

        grad_x, = ad.jacobians(trace, [x])

        executor = ad.Executor([trace, grad_x])
        x_val = T.tensor([[1., 2.], [3., 4.]])

        trace_val, grad_x_val = executor.run(feed_dict={x: x_val})

        expected_trace_val = T.einsum('ii->', x_val)
        expected_grad_x_val = T.identity(2)

        assert T.array_equal(trace_val, expected_trace_val)
        assert T.array_equal(grad_x_val, expected_grad_x_val)
示例#27
0
def gauge_transform_mps(tensors, right=True):
    """
    Perform gause transformation on the MPS

    NOTE: currently this function doesn't support CTF backend.
    Reference: https://tensornetwork.org/mps/#toc_7

    Parameters
    ----------
    tensors: array of tensors representing the MPS
    right: direction of the transformation. If true,
        for the output mps, the diagram for its inner product will be:
                                                
                 o-<-<-<-<-<-<-<-<         o-
                 | | | | | | | | |   =     | | (inner product of o)
                 o-<-<-<-<-<-<-<-<         o-
        if False, the diagram of its inner product will be:

                 >->->->->->->->-o          -o
                 | | | | | | | | |   =     | | (inner product of o)
                 >->->->->->->->-o          -o

        here > or < denotes a tensor that is left / right orthogonal.

    Returns
    -------
    1. An array of tensors representing the MPS
    """
    np_tensors = [T.to_numpy(tensor) for tensor in tensors]
    mps = qtn.MatrixProductState(np_tensors, shape='lrp')

    if right:
        mps.right_canonize()
    else:
        mps.left_canonize()

    tensors = []
    for tensor in mps.tensor_map.values():
        tensors.append(T.tensor(tensor.data))

    return tensors
示例#28
0
def test_transpose_einsum(backendopt):

    for datatype in backendopt:
        T.set_backend(datatype)

        x = ad.Variable(name="x", shape=[3, 2])
        y = ad.sum(ad.einsum("ij->ji", x))

        grad_x, = ad.gradients(y, [x])

        executor = ad.Executor([y, grad_x])
        x_val = T.tensor([[1, 2], [3, 4], [5, 6]])  # 3x2

        y_val, grad_x_val = executor.run(feed_dict={x: x_val})

        expected_yval = T.sum(T.transpose(x_val))
        expected_grad_x_val = T.ones_like(x_val)

        assert isinstance(y, ad.Node)
        assert T.array_equal(y_val, expected_yval)
        assert T.array_equal(grad_x_val, expected_grad_x_val)
示例#29
0
def test_trace_einsum(backendopt):
    for datatype in backendopt:
        if datatype == 'taco':
            continue  # Currently taco doesn't support same subscript in one operand.

        T.set_backend(datatype)
        x = ad.Variable(name="x", shape=[2, 2])
        trace = ad.einsum('ii->', x)

        grad_x, = ad.gradients(trace, [x])

        executor = ad.Executor([trace, grad_x])
        x_val = T.tensor([[1., 2.], [3., 4.]])

        trace_val, grad_x_val = executor.run(feed_dict={x: x_val})

        expected_trace_val = T.einsum('ii->', x_val)
        expected_grad_x_val = T.identity(2)

        assert T.array_equal(trace_val, expected_trace_val)
        assert T.array_equal(grad_x_val, expected_grad_x_val)
示例#30
0
def test_norm(backendopt):

    for datatype in backendopt:
        T.set_backend(datatype)

        x = ad.Variable(name="x", shape=[3, 2])
        y = ad.norm(x)
        z = y**2

        grad_x, = ad.gradients(z, [x])

        executor = ad.Executor([z, grad_x])
        x_val = T.tensor([[1., 2.], [3., 4.], [5., 6.]])  # 3x2

        z_val, grad_x_val = executor.run(feed_dict={x: x_val})

        expected_zval = T.norm(x_val)**2
        expected_grad_x_val = 2 * x_val

        assert isinstance(z, ad.Node)
        assert T.array_equal(z_val, expected_zval)
        assert T.array_equal(grad_x_val, expected_grad_x_val)