Exemple #1
0
def test_broadcast_subtract():
    shape1 = (3, 4, 1)
    shape2 = (1, 5)
    dtype = 'float32'
    x_nd = rand(dtype, *shape1)
    y_nd = rand(dtype, *shape2)
    x_np = x_nd.asnumpy()
    y_np = y_nd.asnumpy()
    expected_forward = x_np - y_np
    t1 = relay.TensorType(shape1, dtype)
    t2 = relay.TensorType(shape2, dtype)
    x = relay.var("x", t1)
    y = relay.var("y", t2)
    func = relay.Function([x, y], x - y)
    func = run_infer_type(func)
    full_func = run_infer_type(gradient(func))
    assert full_func.checked_type == relay.FuncType([t1, t2],
                                                    relay.TupleType([relay.TensorType(expected_forward.shape, dtype),
                                                                     relay.TupleType([t1, t2])]))
    ex = create_executor()
    forward, (grad_x, grad_y) = ex.evaluate(full_func)(x_nd, y_nd)
    tvm.testing.assert_allclose(forward.asnumpy(), expected_forward)
    tvm.testing.assert_allclose(grad_x.asnumpy(),
                                np.ones_like(expected_forward).sum(axis=2, keepdims=True))
    tvm.testing.assert_allclose(grad_y.asnumpy(),
                                -np.ones_like(expected_forward).sum(axis=(0, 1), keepdims=True).squeeze(axis=0))
def test_add_broadcast():
    """Test adding matrices of different size. Check types and semantic equivalence."""
    mod = tvm.IRModule()

    shape1 = (3, 4, 1)
    shape2 = (1, 5)
    dtype = "float32"
    t1 = relay.TensorType(shape1, dtype)
    t2 = relay.TensorType(shape2, dtype)

    x1 = relay.var("x1", t1)
    x2 = relay.var("x2", t2)
    func = relay.Function([x1, x2], x1 + x2)
    func = run_infer_type(func)

    mod["main"] = func
    mod = transform.InferType()(mod)
    mod = transform.LazyGradientInit()(mod)
    func = mod["main"]

    x1_np = rand(dtype, *shape1).numpy()
    x2_np = rand(dtype, *shape2).numpy()
    expected_forward = x1_np + x2_np

    expected_forward_type = relay.TensorType(expected_forward.shape, dtype)
    assert mod["main"].checked_type == relay.FuncType([t1, t2], expected_forward_type)

    forward = create_executor(mod=mod).evaluate(func)(x1_np, x2_np)

    assert_allclose(forward.numpy(), expected_forward)
def test_multivar_reverse_ad():
    """Simple test with multivariate reverse mode ad."""
    mod = tvm.IRModule()

    shape = (10, 10)
    dtype = "float32"
    t = relay.TensorType(shape, dtype)

    x = relay.var("x", t)
    y = relay.var("y", t)

    func = relay.Function([x, y], (x * y) * relay.const(np.ones(shape, dtype)))
    func = run_infer_type(func)
    back_func = transform.gradient(func)
    back_func = run_infer_type(back_func)

    mod["main"] = back_func
    mod = transform.InferType()(mod)
    mod = transform.LazyGradientInit()(mod)
    back_func = mod["main"]

    assert mod["main"].checked_type == relay.FuncType(
        [t, t], relay.TupleType([t, relay.TupleType([t, t])])
    )

    x = rand(dtype, *shape)
    y = rand(dtype, *shape)
    (forward), (grad_x, grad_y,) = create_executor(mod=mod).evaluate(
        back_func
    )(x, y)
    assert_allclose(forward.numpy(), x.numpy() * y.numpy())
    assert_allclose(grad_x.numpy(), y.numpy())
    assert_allclose(grad_y.numpy(), x.numpy())
Exemple #4
0
def test_before_partial_eval():
    """Test transformation before PartialEval"""
    mod = tvm.IRModule()

    shape = (10, 10)
    dtype = "float32"
    t = relay.TensorType(shape, dtype)

    x = relay.var("x", t)
    y = relay.var("y", t)

    func = relay.Function([x, y], x * y)
    func = run_infer_type(func)
    back_func = transform.gradient(func)
    back_func = run_infer_type(back_func)

    mod["main"] = back_func
    seq = tvm.transform.Sequential(
        [transform.LazyGradientInit(), transform.PartialEvaluate(), transform.DeadCodeElimination()]
    )
    mod = seq(mod)
    back_func = mod["main"]

    assert mod["main"].checked_type == relay.FuncType(
        [t, t], relay.TupleType([t, relay.TupleType([t, t])])
    )

    ex = create_executor(mod=mod)
    x = rand(dtype, *shape)
    y = rand(dtype, *shape)
Exemple #5
0
def test_tuple():
    shape = (10, 10)
    dtype = 'float32'
    t = relay.TensorType(shape, dtype)
    x = relay.var("x", t)
    y = relay.var("y", t)
    z = relay.var("z", t)
    tup = relay.Var("tup")
    func = relay.Function([x, y, z], relay.Let(tup, relay.Tuple([x, y, z]),
                                               relay.TupleGetItem(tup, 0) +
                                               relay.TupleGetItem(tup, 1) -
                                               relay.TupleGetItem(tup, 2)))
    func = run_infer_type(func)
    back_func = run_infer_type(gradient(func))
    assert back_func.checked_type == relay.FuncType([t, t, t], relay.TupleType([t, relay.TupleType([t, t, t])]))
    x_nd = rand(dtype, *shape)
    y_nd = rand(dtype, *shape)
    z_nd = rand(dtype, *shape)
    x_np = x_nd.asnumpy()
    y_np = y_nd.asnumpy()
    z_np = z_nd.asnumpy()
    expected_forward = x_np + y_np - z_np
    ex = create_executor()
    forward, (grad_x, grad_y, grad_z) = ex.evaluate(back_func)(x_nd, y_nd, z_nd)
    tvm.testing.assert_allclose(forward.asnumpy(), expected_forward)
    tvm.testing.assert_allclose(grad_x.asnumpy(), np.ones_like(grad_x.asnumpy()))
    tvm.testing.assert_allclose(grad_y.asnumpy(), np.ones_like(grad_y.asnumpy()))
    tvm.testing.assert_allclose(grad_z.asnumpy(), -1 * np.ones_like(grad_z.asnumpy()))
def test_add_tuple():
    """Add elements of tuple. Check types and semantic equivalence."""
    mod = tvm.IRModule()

    shape = (10, 10)
    dtype = "float32"
    tensor_type = relay.TensorType(shape, dtype)
    t = relay.TupleType([tensor_type, tensor_type])

    x = relay.var("x", t)
    # f((x1,x2)) = x1 + x2
    y = relay.Function([x],
                       relay.TupleGetItem(x, 0) + relay.TupleGetItem(x, 1))

    mod["main"] = y
    mod = transform.InferType()(mod)
    mod = transform.LazyGradientInit()(mod)
    mod = tvm.transform.PrintIR(show_meta_data=True)(mod)
    y = mod["main"]

    assert mod["main"].checked_type == relay.FuncType([t], tensor_type)

    x = (rand(dtype, *shape), rand(dtype, *shape))
    y = create_executor(mod=mod).evaluate(y)(x)
    assert_allclose(y.numpy(), x[0].numpy() + x[1].numpy())
def test_reverse_ad_identity():
    """Simple test with reverse mode ad."""
    # of f(x) = x
    mod = tvm.IRModule()

    shape = (10, 10)
    dtype = "float32"
    t = relay.TensorType(shape, dtype)

    x = relay.var("x", t)

    func = relay.Function([x], x)
    func = run_infer_type(func)
    back_func = transform.gradient(func)
    back_func = run_infer_type(back_func)

    mod["main"] = back_func
    mod = transform.InferType()(mod)
    mod = transform.LazyGradientInit()(mod)
    back_func = mod["main"]

    assert mod["main"].checked_type == relay.FuncType(
        [t], relay.TupleType([t, relay.TupleType([t])])
    )

    x = rand(dtype, *shape)
    (forward), (grad,) = create_executor(mod=mod).evaluate(back_func)(x)
    assert_allclose(forward.numpy(), x.numpy())
    assert_allclose(grad.numpy(), np.ones_like(x.numpy()))
Exemple #8
0
def test_pow():
    mod = tvm.IRModule()
    p = Prelude(mod)
    p.mod.import_from_std("nat.rly")
    nat_iterate = mod.get_global_var("nat_iterate")
    shape = (10, 10)
    dtype = "float32"
    t = relay.TensorType(shape, dtype)
    x = relay.var("x", t)
    double = relay.Function([x], x + x)
    i = relay.var("i", t)
    func = relay.Function([i], nat_iterate(double, make_nat_expr(p, 3))(i))
    mod["main"] = func
    mod = transform.InferType()(mod)
    mod["main"] = gradient(mod["main"], mod=mod)
    m = transform.InferType()(mod)
    back_func = m["main"]
    assert back_func.checked_type == relay.FuncType(
        [t], relay.TupleType([t, relay.TupleType([t])]))
    i_nd = rand(dtype, *shape)
    ex = create_executor(mod=mod)
    forward, (grad_i, ) = ex.evaluate(back_func)(i_nd)
    tvm.testing.assert_allclose(forward.numpy(), 8 * i_nd.numpy())
    tvm.testing.assert_allclose(grad_i.numpy(),
                                8 * np.ones_like(grad_i.numpy()))
Exemple #9
0
def test_reverse_ad_identity():
    """Simple test with reverse mode ad."""
    # of f(x) = x
    mod = tvm.IRModule()

    shape = (10, 10)
    dtype = "float32"
    t = relay.TensorType(shape, dtype)

    x = relay.var("x", t)

    func = relay.Function([x], x)
    func = run_infer_type(func)
    back_func = transform.gradient(func)
    back_func = run_infer_type(back_func)

    mod["main"] = back_func
    mod = transform.LazyGradientInit()(mod)
    back_func = mod["main"]

    assert mod["main"].checked_type == relay.FuncType(
        [t], relay.TupleType([t, relay.TupleType([t])])
    )

    ex = create_executor(mod=mod)
    x = rand(dtype, *shape)
def test_ret_tuple():
    """Test tuple return type. Check types and semantic equivalence."""
    mod = tvm.IRModule()

    shape = (10, 10)
    dtype = "float32"
    t = relay.TensorType(shape, dtype)

    x = relay.var("x", t)
    # f(x) = (x,x)
    func = relay.Function([x], relay.Tuple([x, x * relay.const(2.0)]))
    func = run_infer_type(func)

    mod["main"] = func
    mod = transform.InferType()(mod)
    mod = transform.LazyGradientInit()(mod)
    func = mod["main"]

    assert mod["main"].checked_type == relay.FuncType([t],
                                                      relay.TupleType([t, t]))

    x = rand(dtype, *shape)
    y = create_executor(mod=mod).evaluate(func)(x)
    assert_allclose(y[0].numpy(), x.numpy())
    assert_allclose(y[1].numpy(), x.numpy() * 2.0)
Exemple #11
0
def _test_tuple(mode):
    shape = (10, 10)
    dtype = "float32"
    t = relay.TensorType(shape, dtype)
    x = relay.var("x", t)
    y = relay.var("y", t)
    z = relay.var("z", t)
    if mode == "higher_order":
        tup = relay.Var("tup")
        func = relay.Function(
            [x, y, z],
            relay.Let(
                tup,
                relay.Tuple([x, y, z]),
                relay.TupleGetItem(tup, 0) + relay.TupleGetItem(tup, 1) -
                relay.TupleGetItem(tup, 2),
            ),
        )
    else:
        # first order does not do let.
        tup = relay.Tuple([x, y, z])
        func = relay.Function(
            [x, y, z],
            relay.TupleGetItem(tup, 0) + relay.TupleGetItem(tup, 1) -
            relay.TupleGetItem(tup, 2),
        )
    func = run_infer_type(func)
    back_func = run_infer_type(gradient(func, mode=mode))
    assert back_func.checked_type == relay.FuncType(
        [t, t, t], relay.TupleType([t, relay.TupleType([t, t, t])]))
    x_nd = rand(dtype, *shape)
    y_nd = rand(dtype, *shape)
    z_nd = rand(dtype, *shape)
    x_np = x_nd.numpy()
    y_np = y_nd.numpy()
    z_np = z_nd.numpy()
    expected_forward = x_np + y_np - z_np
    ex = create_executor()
    forward, (grad_x, grad_y, grad_z) = ex.evaluate(back_func)(x_nd, y_nd,
                                                               z_nd)
    tvm.testing.assert_allclose(forward.numpy(), expected_forward)
    tvm.testing.assert_allclose(grad_x.numpy(), np.ones_like(grad_x.numpy()))
    tvm.testing.assert_allclose(grad_y.numpy(), np.ones_like(grad_y.numpy()))
    tvm.testing.assert_allclose(grad_z.numpy(),
                                -1 * np.ones_like(grad_z.numpy()))
def test_after_partial_eval():
    """Test transformation following reverse mode ad and PartialEval"""
    mod = tvm.IRModule()

    shape = (10, 10)
    dtype = "float32"
    t = relay.TensorType(shape, dtype)

    x = relay.var("x", t)
    y = relay.var("y", t)

    func = relay.Function([x, y], (x * y) * relay.const(np.ones(shape, dtype)))
    func = run_infer_type(func)
    back_func = transform.gradient(func)
    back_func = run_infer_type(back_func)

    mod["main"] = back_func
    back_func = mod["main"]

    seq = tvm.transform.Sequential(
        [
            transform.PartialEvaluate(),
            transform.InferType(),
            transform.LazyGradientInit(),
            transform.InferType(),
            transform.DeadCodeElimination(),
            transform.InferType(),
        ]
    )

    mod = seq(mod)

    assert mod["main"].checked_type == relay.FuncType(
        [t, t], relay.TupleType([t, relay.TupleType([t, t])])
    )

    x = rand(dtype, *shape)
    y = rand(dtype, *shape)
    (forward), (grad_x, grad_y,) = create_executor(mod=mod).evaluate(
        back_func
    )(x, y)
    assert_allclose(forward.numpy(), x.numpy() * y.numpy())
    assert_allclose(grad_x.numpy(), y.numpy())
    assert_allclose(grad_y.numpy(), x.numpy())
Exemple #13
0
def test_add():
    shape = (10, 10)
    dtype = "float32"
    t = relay.TensorType(shape, dtype)
    x = relay.var("x", t)
    func = relay.Function([x], x + x)
    func = run_infer_type(func)
    back_func = run_infer_type(gradient(func))
    assert back_func.checked_type == relay.FuncType(
        [t], relay.TupleType([t, relay.TupleType([t])]))
    x = rand(dtype, *shape)
    forward, (grad, ) = create_executor().evaluate(back_func)(x)
    tvm.testing.assert_allclose(forward.numpy(), 2 * x.numpy())
    tvm.testing.assert_allclose(grad.numpy(), 2 * np.ones_like(x.numpy()))
Exemple #14
0
def test_sub():
    shape = (10, 10)
    dtype = 'float32'
    t = relay.TensorType(shape, dtype)
    x = relay.var("x", t)
    func = relay.Function([x], x - x)
    func = run_infer_type(func)
    back_func = run_infer_type(gradient(func))
    assert back_func.checked_type == relay.FuncType([t], relay.TupleType([t, relay.TupleType([t])]))
    ex = create_executor()
    x = rand(dtype, *shape)
    forward, (grad,) = ex.evaluate(back_func)(x)
    tvm.testing.assert_allclose(forward.asnumpy(), np.zeros_like(x.asnumpy()))
    tvm.testing.assert_allclose(grad.asnumpy(), np.zeros_like(x.asnumpy()))
Exemple #15
0
def test_grad_tuple():
    shape = (10, 10)
    dtype = 'float32'
    t = relay.TensorType(shape, dtype)
    x = relay.var("x", t)
    y = x + x
    func = relay.Function([x], relay.Tuple([y + y, y]))
    func = run_infer_type(func)
    back_func = run_infer_type(gradient(func))
    assert back_func.checked_type == relay.FuncType([t], relay.TupleType([relay.TupleType([t, t]), relay.TupleType([t])]))
    ex = create_executor()
    x = rand(dtype, *shape)
    (forward_four, forward_two), (grad,) = ex.evaluate(back_func)(x)
    tvm.testing.assert_allclose(forward_four.asnumpy(), 4 * x.asnumpy())
    tvm.testing.assert_allclose(forward_two.asnumpy(), 2 * x.asnumpy())
    tvm.testing.assert_allclose(grad.asnumpy(), 4 * np.ones_like(x.asnumpy()))
Exemple #16
0
def test_temp_add():
    scope = relay.ScopeBuilder()
    shape = (10, 10)
    dtype = "float32"
    t = relay.TensorType(shape, dtype)
    x = relay.var("x", t)
    y = scope.let("y", x + x)
    scope.ret(y + y)
    func = relay.Function([x], scope.get())
    func = run_infer_type(func)
    back_func = run_infer_type(gradient(func))
    assert back_func.checked_type == relay.FuncType([t], relay.TupleType([t, relay.TupleType([t])]))
    ex = create_executor()
    x = rand(dtype, *shape)
    forward, (grad,) = ex.evaluate(back_func)(x)
    tvm.testing.assert_allclose(forward.asnumpy(), 4 * x.asnumpy())
    tvm.testing.assert_allclose(grad.asnumpy(), 4 * np.ones_like(x.asnumpy()))
Exemple #17
0
def test_square_second_order():
    shape = (10, 10)
    dtype = 'float32'
    t = relay.TensorType(shape, dtype)
    x = relay.var("x", t)
    func = relay.Function([x], x * x)
    func = run_infer_type(func)
    back_func = run_infer_type(gradient(func))
    y = relay.var("y", t)
    back_func_adjusted = relay.Function([y], relay.TupleGetItem(relay.TupleGetItem(back_func(y), 1), 0))
    back_func_adjusted = run_infer_type(back_func_adjusted)
    back_back_func = run_infer_type(gradient(back_func_adjusted))
    assert back_func.checked_type == relay.FuncType([t], relay.TupleType([t, relay.TupleType([t])]))
    x_nd = rand(dtype, *shape)
    ex = create_executor()
    forward, (grad_x,) = ex.evaluate(back_back_func)(x_nd)
    tvm.testing.assert_allclose(forward.asnumpy(), 2 * x_nd.asnumpy())
    tvm.testing.assert_allclose(grad_x.asnumpy(), 2 * np.ones_like(grad_x.asnumpy()))
Exemple #18
0
def test_recursion():
    mod = tvm.IRModule()
    p = Prelude(mod)
    add_nat_definitions(p)
    shape = (10, 10)
    dtype = 'float32'
    t = relay.TensorType(shape, dtype)
    x = relay.var("x", t)
    double = relay.Function([x], x + x)
    i = relay.var("i", t)
    func = relay.Function([i], p.nat_iterate(double, make_nat_expr(p, 3))(i))
    mod["main"] = func
    mod["main"] = to_cps(mod["main"], mod=mod)
    mod["main"] = un_cps(mod["main"])
    ex = create_executor(mod=mod)
    i_nd = rand(dtype, *shape)
    forward = ex.evaluate()(i_nd)
    tvm.testing.assert_allclose(forward.asnumpy(), 8 * i_nd.asnumpy())
Exemple #19
0
def test_ref():
    shape = (10, 10)
    dtype = 'float32'
    t = relay.TensorType(shape, dtype)
    x = relay.var("x", t)
    r = relay.Var("r")
    u = relay.Var("u")
    body = relay.RefRead(r)
    body = relay.Let(u, relay.RefWrite(r, relay.RefRead(r) + relay.RefRead(r)), body)
    body = relay.Let(r, relay.RefCreate(x), body)
    func = relay.Function([x], body)
    func = run_infer_type(func)
    back_func = run_infer_type(gradient(func))
    assert back_func.checked_type == relay.FuncType([t], relay.TupleType([t, relay.TupleType([t])]))
    x_nd = rand(dtype, *shape)
    ex = create_executor()
    forward, (grad_x,) = ex.evaluate(back_func)(x_nd)
    tvm.testing.assert_allclose(forward.asnumpy(), 2 * x_nd.asnumpy())
    tvm.testing.assert_allclose(grad_x.asnumpy(), 2 * np.ones_like(grad_x.asnumpy()))
Exemple #20
0
def _test_tuple_argument(mode):
    shape = (2, 3)
    dtype = "float32"
    tensor_type = relay.TensorType(shape, dtype)
    fields = 3
    tuple_type = relay.TupleType([tensor_type] * fields)
    tup = relay.var("tup", type_annotation=tuple_type)
    body = relay.TupleGetItem(tup, 0)
    for i in range(1, fields):
        body = relay.add(body, relay.TupleGetItem(tup, i))
    func = relay.Function([tup], body)
    func = run_infer_type(func)
    back_func = run_infer_type(gradient(func, mode=mode))
    xs = [rand(dtype, *shape) for _ in range(fields)]
    xs_np = np.array([x.numpy() for x in xs])
    expected_forward = np.sum(xs_np, axis=0)
    forward, grad = create_executor().evaluate(back_func)(tuple(xs))
    tvm.testing.assert_allclose(forward.numpy(), expected_forward)
    for field in grad[0]:
        tvm.testing.assert_allclose(field.numpy(), np.ones_like(field.numpy()))
Exemple #21
0
def test_grad_tuple():
    scope = relay.ScopeBuilder()
    shape = (10, 10)
    dtype = "float32"
    t = relay.TensorType(shape, dtype)
    x = relay.var("x", t)
    y = scope.let("y", x + x)
    scope.ret(relay.Tuple([y + y, y]))
    func = relay.Function([x], scope.get())
    func = run_infer_type(func)
    back_func = run_infer_type(gradient(func))
    assert back_func.checked_type == relay.FuncType(
        [t], relay.TupleType([relay.TupleType([t, t]),
                              relay.TupleType([t])]))
    x = rand(dtype, *shape)
    (forward_four,
     forward_two), (grad, ) = create_executor().evaluate(back_func)(x)
    tvm.testing.assert_allclose(forward_four.numpy(), 4 * x.numpy())
    tvm.testing.assert_allclose(forward_two.numpy(), 2 * x.numpy())
    tvm.testing.assert_allclose(grad.numpy(), 4 * np.ones_like(x.numpy()))
def test_recursion():
    mod = tvm.IRModule()
    p = Prelude(mod)
    p.mod.import_from_std("nat.rly")
    nat_iterate = p.mod.get_global_var("nat_iterate")
    shape = (10, 10)
    dtype = "float32"
    t = relay.TensorType(shape, dtype)
    x = relay.var("x", t)
    double = relay.Function([x], x + x)
    i = relay.var("i", t)
    func = relay.Function([i], nat_iterate(double, make_nat_expr(p, 3))(i))
    mod["main"] = func
    mod = relay.transform.InferType()(mod)
    mod["main"] = to_cps(mod["main"], mod=mod)
    mod = relay.transform.InferType()(mod)
    mod["main"] = un_cps(mod["main"])
    i_nd = rand(dtype, *shape)
    forward = create_executor(mod=mod).evaluate()(i_nd)
    tvm.testing.assert_allclose(forward.numpy(), 8 * i_nd.numpy())
def test_pow():
    mod = relay.Module()
    p = Prelude(mod)
    add_nat_definitions(p)
    shape = (10, 10)
    dtype = 'float32'
    t = relay.TensorType(shape, dtype)
    x = relay.var("x", t)
    double = relay.Function([x], x + x)
    i = relay.var("i", t)
    func = relay.Function([i], p.nat_iterate(double, make_nat_expr(p, 3))(i))
    mod["main"] = func
    mod["main"] = gradient(mod["main"], mod=mod)
    m = transform.InferType()(mod)
    back_func = m["main"]
    assert back_func.checked_type == relay.FuncType([t], relay.TupleType([t, relay.TupleType([t])]))
    i_nd = rand(dtype, *shape)
    ex = create_executor(mod=mod)
    forward, (grad_i,) = ex.evaluate(back_func)(i_nd)
    tvm.testing.assert_allclose(forward.asnumpy(), 8 * i_nd.asnumpy())
    tvm.testing.assert_allclose(grad_i.asnumpy(), 8 * np.ones_like(grad_i.asnumpy()))
def test_ones_like():
    """Simple test using "ones_like" op"""
    mod = tvm.IRModule()

    shape = (10, 10)
    dtype = "float32"
    t = relay.TensorType(shape, dtype)

    x = relay.var("x", t)
    y = relay.Function([x], x + relay.ones_like(x))

    mod["main"] = y
    mod = transform.InferType()(mod)
    mod = transform.LazyGradientInit()(mod)
    y = mod["main"]

    assert mod["main"].checked_type == relay.FuncType([t], t)

    x = rand(dtype, *shape)
    y = create_executor(mod=mod).evaluate(y)(x)
    assert_allclose(y.numpy(), x.numpy() + np.ones_like(x.numpy()))
def test_global_function():
    m = tvm.IRModule()
    shape = (10, 10)
    dtype = "float32"
    t = relay.TensorType(shape, dtype)
    x = relay.Var("x", t)
    d = GlobalVar("double")
    m[d] = relay.Function([x], x + x)
    y = relay.Var("y", t)
    q = GlobalVar("q")
    m[q] = relay.Function([y], d(d(y)))
    g = GlobalVar("grad")
    m[g] = tvm.relay.transform.gradient(q, m)
    back_func = m[g]
    assert back_func.checked_type == relay.FuncType(
        [t], relay.TupleType([t, relay.TupleType([t])]))
    ex = create_executor(mod=m)
    x = rand(dtype, *shape)
    forward, (grad, ) = ex.evaluate(back_func)(x)
    tvm.testing.assert_allclose(forward.asnumpy(), 4 * x.asnumpy())
    tvm.testing.assert_allclose(grad.asnumpy(), 4 * np.ones_like(x.asnumpy()))
def test_zeros():
    """Simple test using "zeros" op"""
    mod = tvm.IRModule()

    shape = (10, 10)
    dtype = "float32"
    t = relay.TensorType(shape, dtype)

    x = relay.var("x", t)
    y = relay.Function([x], x + relay.zeros(shape, dtype))

    mod["main"] = y
    mod = transform.LazyGradientInit()(mod)
    y = mod["main"]

    assert mod["main"].checked_type == relay.FuncType([t], t)

    ex = create_executor(mod=mod)
    x = rand(dtype, *shape)
    y = ex.evaluate(y)(x)
    assert_allclose(y.asnumpy(), x.asnumpy())
def test_mult():
    """Simple multiplication testcase. Check types and semantic equivalence."""
    mod = tvm.IRModule()

    shape = (15, 15)
    dtype = "float32"
    t = relay.TensorType(shape, dtype)

    x = relay.var("x", t)
    # f(x) = x*x
    y = relay.Function([x], x * x)

    mod["main"] = y
    mod = transform.InferType()(mod)
    mod = transform.LazyGradientInit()(mod)
    y = mod["main"]

    assert mod["main"].checked_type == relay.FuncType([t], t)

    x = rand(dtype, *shape)
    y = create_executor(mod=mod).evaluate(y)(x)
    assert_allclose(y.numpy(), x.numpy() * x.numpy())
def test_add():
    """Simple add testcase. Check types and semantic equivalence."""
    mod = tvm.IRModule()

    shape = (10, 10)
    dtype = "float32"
    t = relay.TensorType(shape, dtype)

    x = relay.var("x", t)
    # f(x) = x+x
    y = relay.Function([x], x + x)

    mod["main"] = y
    mod = transform.LazyGradientInit()(mod)
    y = mod["main"]

    assert mod["main"].checked_type == relay.FuncType([t], t)

    ex = create_executor(mod=mod)
    x = rand(dtype, *shape)
    y = ex.evaluate(y)(x)
    assert_allclose(y.asnumpy(), x.asnumpy() + x.asnumpy())