Exemple #1
0
def compare_numpy_tvm(inputs, output, target, device, compute, schedule):
    """Compare a numpy inputs and output of a function to the results of the TVM version.

    Parameters
    ----------
    inputs : Sequence[numpy.nd.array]
        List of input numpy arrays to pass to the function.
    output : numpy.nd.array
        Verified correct function output.
    target : tvm.target.Target
        Target to run on.
    device : tvm.runtime.Device
        Context to run on.
    compute : callable
        Topi compute function to test against.
    schedule : callable
        Topi scheduling function to test against.
    """
    te_inputs = [
        tvm.te.placeholder(shape=i.shape, dtype=str(i.dtype)) for i in inputs
    ]
    te_out = tvm.nd.array(np.zeros(output.shape).astype(output.dtype),
                          device=device)
    with tvm.target.Target(target):
        out = compute(*te_inputs)
        s = schedule([out])
        func = tvm.build(s, te_inputs + [out])
        arys = [tvm.nd.array(x, device=device) for x in inputs]
        func(*(arys + [te_out]))
        assert_allclose(te_out.numpy(), output, atol=1e-4, rtol=1e-4)
def test_reverse_ad_identity():
    """Simple test with reverse mode ad."""
    # of f(x) = x
    mod = tvm.IRModule()

    shape = (10, 10)
    dtype = "float32"
    t = relay.TensorType(shape, dtype)

    x = relay.var("x", t)

    func = relay.Function([x], x)
    func = run_infer_type(func)
    back_func = transform.gradient(func)
    back_func = run_infer_type(back_func)

    mod["main"] = back_func
    mod = transform.InferType()(mod)
    mod = transform.LazyGradientInit()(mod)
    back_func = mod["main"]

    assert mod["main"].checked_type == relay.FuncType(
        [t], relay.TupleType([t, relay.TupleType([t])])
    )

    x = rand(dtype, *shape)
    (forward), (grad,) = create_executor(mod=mod).evaluate(back_func)(x)
    assert_allclose(forward.numpy(), x.numpy())
    assert_allclose(grad.numpy(), np.ones_like(x.numpy()))
def test_add_broadcast():
    """Test adding matrices of different size. Check types and semantic equivalence."""
    mod = tvm.IRModule()

    shape1 = (3, 4, 1)
    shape2 = (1, 5)
    dtype = "float32"
    t1 = relay.TensorType(shape1, dtype)
    t2 = relay.TensorType(shape2, dtype)

    x1 = relay.var("x1", t1)
    x2 = relay.var("x2", t2)
    func = relay.Function([x1, x2], x1 + x2)
    func = run_infer_type(func)

    mod["main"] = func
    mod = transform.InferType()(mod)
    mod = transform.LazyGradientInit()(mod)
    func = mod["main"]

    x1_np = rand(dtype, *shape1).numpy()
    x2_np = rand(dtype, *shape2).numpy()
    expected_forward = x1_np + x2_np

    expected_forward_type = relay.TensorType(expected_forward.shape, dtype)
    assert mod["main"].checked_type == relay.FuncType([t1, t2], expected_forward_type)

    forward = create_executor(mod=mod).evaluate(func)(x1_np, x2_np)

    assert_allclose(forward.numpy(), expected_forward)
Exemple #4
0
    def check_device(device, host="llvm"):
        ctx = tvm.context(device, 0)
        if not tvm.runtime.enabled(host):
            return
        if not ctx.exist:
            print("skip because %s is not enabled.." % device)
            return

        sout = te.create_schedule(out.op)
        mout = tvm.build(sout, [out] + inputs + args)
        out_shape = get_const_tuple(out.shape)

        l, h = data_range
        input_data = [
            tvm.nd.array(
                np.random.uniform(l, h, size=get_const_tuple(
                    input.shape)).astype(input.dtype)) for input in inputs
        ]
        arg_vals = [
            tvm.nd.array(
                np.random.uniform(l, h, size=get_const_tuple(
                    arg.shape)).astype(arg.dtype)) for arg in args
        ]

        ones = topi.full_like(out, 1.0)
        # we provide head to sum and reduce the output dimension,
        # which equals to grad(out.sum(), inputs)
        grads = te.gradient(out, inputs, head=ones)
        grad_sched = te.create_schedule([grad.op for grad in grads])
        mgrad = tvm.build(grad_sched, list(grads) + inputs + args)
        if assert_no_jacobian:
            # TODO(yzhliu): it is better to visit the expression and do assertion
            lowered_ir = str(
                tvm.lower(grad_sched,
                          list(grads) + inputs + args,
                          simple_mode=True))
            assert "jacobian" not in lowered_ir, lowered_ir

        grad_data = [
            tvm.nd.empty(get_const_tuple(i.shape), g.dtype)
            for i, g in zip(inputs, grads)
        ]

        mgrad(*grad_data, *input_data, *arg_vals)
        g_res = [g.asnumpy() for g in grad_data]

        if desired_grads:
            assert isinstance(desired_grads, list)
            for actual, desired in zip(g_res, desired_grads):
                assert_allclose(actual, desired, rtol=0.1, atol=1e-2)
        else:

            def forward(*in_data):
                out_data = tvm.nd.empty(out_shape, out.dtype)
                mout(out_data, *[tvm.nd.array(d) for d in list(in_data)])
                return out_data.asnumpy().sum()

            check_numerical_grads(forward,
                                  [d.asnumpy() for d in input_data + arg_vals],
                                  g_res)
def test_ret_tuple():
    """Test tuple return type. Check types and semantic equivalence."""
    mod = tvm.IRModule()

    shape = (10, 10)
    dtype = "float32"
    t = relay.TensorType(shape, dtype)

    x = relay.var("x", t)
    # f(x) = (x,x)
    func = relay.Function([x], relay.Tuple([x, x * relay.const(2.0)]))
    func = run_infer_type(func)

    mod["main"] = func
    mod = transform.InferType()(mod)
    mod = transform.LazyGradientInit()(mod)
    func = mod["main"]

    assert mod["main"].checked_type == relay.FuncType([t],
                                                      relay.TupleType([t, t]))

    x = rand(dtype, *shape)
    y = create_executor(mod=mod).evaluate(func)(x)
    assert_allclose(y[0].numpy(), x.numpy())
    assert_allclose(y[1].numpy(), x.numpy() * 2.0)
def test_add_tuple():
    """Add elements of tuple. Check types and semantic equivalence."""
    mod = tvm.IRModule()

    shape = (10, 10)
    dtype = "float32"
    tensor_type = relay.TensorType(shape, dtype)
    t = relay.TupleType([tensor_type, tensor_type])

    x = relay.var("x", t)
    # f((x1,x2)) = x1 + x2
    y = relay.Function([x],
                       relay.TupleGetItem(x, 0) + relay.TupleGetItem(x, 1))

    mod["main"] = y
    mod = transform.InferType()(mod)
    mod = transform.LazyGradientInit()(mod)
    mod = tvm.transform.PrintIR(show_meta_data=True)(mod)
    y = mod["main"]

    assert mod["main"].checked_type == relay.FuncType([t], tensor_type)

    x = (rand(dtype, *shape), rand(dtype, *shape))
    y = create_executor(mod=mod).evaluate(y)(x)
    assert_allclose(y.numpy(), x[0].numpy() + x[1].numpy())
Exemple #7
0
def test_tuple_passing():
    x = relay.var(
        "x",
        type_annotation=relay.ty.TupleType([
            relay.ty.TensorType((), "int64"),
            relay.ty.TensorType((), "int64")
        ]),
    )

    fn = relay.Function([x], relay.expr.TupleGetItem(x, 0))
    mod = tvm.IRModule({})
    gv = relay.GlobalVar("main")
    mod[gv] = fn
    mod = relay.transform.InferType()(mod)

    dev = tvm.cpu()
    target = tvm.target.Target("llvm")
    f = relay.create_executor(mod=mod, device=dev, target=target).evaluate(gv)
    # First use a Python tuple.
    out = f((10, 8))
    testing.assert_allclose(out.numpy(), np.array(10))
    # Second use a tuple value.
    value_tuple = container.tuple_object(
        [nd.array(np.array(11)),
         nd.array(np.array(12))])
    out = f(value_tuple)
    testing.assert_allclose(out.numpy(), np.array(11))
Exemple #8
0
def test_functional_returns():
    n = 3
    x = relay.Var("x", relay.TensorType([n], "float32"))
    f = relay.Function([x], x)
    t = relay.Tuple([f, f])
    c = np.random.rand(n).astype("float32")
    result1, result2 = relay.create_executor().evaluate(t)
    testing.assert_allclose(result1(c).numpy(), c)
    testing.assert_allclose(result2(c).numpy(), c)
Exemple #9
0
def test_kwargs_params():
    x = relay.var("x", shape=(1, 10))
    y = relay.var("y", shape=(1, 10))
    z = relay.var("z", shape=(1, 10))
    f = relay.Function([x, y, z], x + y + z)
    x_data = np.random.rand(1, 10).astype("float32")
    y_data = np.random.rand(1, 10).astype("float32")
    z_data = np.random.rand(1, 10).astype("float32")
    params = {"y": y_data, "z": z_data}
    res = relay.create_executor().evaluate(f)(x_data, **params)
    testing.assert_allclose(res.numpy(), x_data + y_data + z_data)
Exemple #10
0
def test_allclose(value, target, rtol=1e-5, print_diff=False):
    passed = 1
    from tvm.testing import assert_allclose
    try:
        assert_allclose(value, target, rtol)
    except AssertionError:
        passed = 0
        if print_diff:
            print(target - value)
            print("Max diff:", np.max(np.fabs(target - value)))
    return passed
Exemple #11
0
def check_eval(expr, args, expected_result, mod=None, rtol=1e-07):
    # TODO(tqchen) add more types once the schedule register is fixed.
    for target in ["llvm"]:
        dev = tvm.device(target, 0)
        if not testing.device_enabled(target):
            return
        func = relay.create_executor(mod=mod, device=dev,
                                     target=target).evaluate(expr)
        result = func if args is None else func(*args)
        # use testing which also set atol
        testing.assert_allclose(result.numpy(), expected_result, rtol=rtol)
Exemple #12
0
def test_keyword_args():
    n = 3
    x = relay.Var("x", relay.TensorType([n], "float32"))
    y = relay.Var("y", relay.TensorType([n], "float32"))
    z = relay.add(x, y)
    mod = tvm.IRModule()
    mod["main"] = relay.Function([x, y], z)
    x_np = np.random.uniform(size=(n, )).astype("float32")
    y_np = np.random.uniform(size=(n, )).astype("float32")
    expected = np.add(x_np, y_np)
    actual = relay.create_executor(mod=mod).evaluate()(y=y_np, x=x_np)
    testing.assert_allclose(actual.numpy(), expected)
def test_zeros():
    """Simple test using "zeros" op"""
    mod = tvm.IRModule()

    shape = (10, 10)
    dtype = "float32"
    t = relay.TensorType(shape, dtype)

    x = relay.var("x", t)
    y = relay.Function([x], x + relay.zeros(shape, dtype))

    mod["main"] = y
    mod = transform.LazyGradientInit()(mod)
    y = mod["main"]

    assert mod["main"].checked_type == relay.FuncType([t], t)

    ex = create_executor(mod=mod)
    x = rand(dtype, *shape)
    y = ex.evaluate(y)(x)
    assert_allclose(y.asnumpy(), x.asnumpy())
def test_ones_like():
    """Simple test using "ones_like" op"""
    mod = tvm.IRModule()

    shape = (10, 10)
    dtype = "float32"
    t = relay.TensorType(shape, dtype)

    x = relay.var("x", t)
    y = relay.Function([x], x + relay.ones_like(x))

    mod["main"] = y
    mod = transform.InferType()(mod)
    mod = transform.LazyGradientInit()(mod)
    y = mod["main"]

    assert mod["main"].checked_type == relay.FuncType([t], t)

    x = rand(dtype, *shape)
    y = create_executor(mod=mod).evaluate(y)(x)
    assert_allclose(y.numpy(), x.numpy() + np.ones_like(x.numpy()))
def test_multivar_reverse_ad():
    """Simple test with multivariate reverse mode ad."""
    mod = tvm.IRModule()

    shape = (10, 10)
    dtype = "float32"
    t = relay.TensorType(shape, dtype)

    x = relay.var("x", t)
    y = relay.var("y", t)

    func = relay.Function([x, y], (x * y) * relay.const(np.ones(shape, dtype)))
    func = run_infer_type(func)
    back_func = transform.gradient(func)
    back_func = run_infer_type(back_func)

    mod["main"] = back_func
    mod = transform.InferType()(mod)
    mod = transform.LazyGradientInit()(mod)
    back_func = mod["main"]

    assert mod["main"].checked_type == relay.FuncType(
        [t, t], relay.TupleType([t, relay.TupleType([t, t])])
    )

    x = rand(dtype, *shape)
    y = rand(dtype, *shape)
    (forward), (grad_x, grad_y,) = create_executor(mod=mod).evaluate(
        back_func
    )(x, y)
    assert_allclose(forward.numpy(), x.numpy() * y.numpy())
    assert_allclose(grad_x.numpy(), y.numpy())
    assert_allclose(grad_y.numpy(), x.numpy())
def test_before_partial_eval():
  """Test transformation before PartialEval"""
  mod = tvm.IRModule()

  shape = (10, 10)
  dtype = 'float32'
  t = relay.TensorType(shape, dtype)

  x = relay.var("x", t)
  y = relay.var("y", t)

  func = relay.Function([x, y], x * y)
  func = run_infer_type(func)
  back_func = transform.gradient(func)
  back_func = run_infer_type(back_func)

  mod["main"] = back_func
  seq = tvm.transform.Sequential([
    transform.LazyGradientInit(),
    transform.PartialEvaluate(),
    transform.DeadCodeElimination()
  ])
  mod = seq(mod)
  back_func = mod["main"]

  assert mod["main"].checked_type == relay.FuncType([t, t],
                                                    relay.TupleType([t, relay.TupleType([t, t])]))

  ex = create_executor(mod=mod)
  x = rand(dtype, *shape)
  y = rand(dtype, *shape)
  (forward), (grad_x, grad_y,) = ex.evaluate(back_func)(x, y)
  assert_allclose(forward.asnumpy(), x.asnumpy() * y.asnumpy())
  assert_allclose(grad_x.asnumpy(), y.asnumpy())
  assert_allclose(grad_y.asnumpy(), x.asnumpy())
def test_add():
    """Simple add testcase. Check types and semantic equivalence."""
    mod = tvm.IRModule()

    shape = (10, 10)
    dtype = "float32"
    t = relay.TensorType(shape, dtype)

    x = relay.var("x", t)
    # f(x) = x+x
    y = relay.Function([x], x + x)

    mod["main"] = y
    mod = transform.LazyGradientInit()(mod)
    y = mod["main"]

    assert mod["main"].checked_type == relay.FuncType([t], t)

    ex = create_executor(mod=mod)
    x = rand(dtype, *shape)
    y = ex.evaluate(y)(x)
    assert_allclose(y.asnumpy(), x.asnumpy() + x.asnumpy())
def test_mult():
    """Simple multiplication testcase. Check types and semantic equivalence."""
    mod = tvm.IRModule()

    shape = (15, 15)
    dtype = "float32"
    t = relay.TensorType(shape, dtype)

    x = relay.var("x", t)
    # f(x) = x*x
    y = relay.Function([x], x * x)

    mod["main"] = y
    mod = transform.InferType()(mod)
    mod = transform.LazyGradientInit()(mod)
    y = mod["main"]

    assert mod["main"].checked_type == relay.FuncType([t], t)

    x = rand(dtype, *shape)
    y = create_executor(mod=mod).evaluate(y)(x)
    assert_allclose(y.numpy(), x.numpy() * x.numpy())
def test_after_partial_eval():
    """Test transformation following reverse mode ad and PartialEval"""
    mod = tvm.IRModule()

    shape = (10, 10)
    dtype = "float32"
    t = relay.TensorType(shape, dtype)

    x = relay.var("x", t)
    y = relay.var("y", t)

    func = relay.Function([x, y], (x * y) * relay.const(np.ones(shape, dtype)))
    func = run_infer_type(func)
    back_func = transform.gradient(func)
    back_func = run_infer_type(back_func)

    mod["main"] = back_func
    back_func = mod["main"]

    seq = tvm.transform.Sequential(
        [
            transform.PartialEvaluate(),
            transform.InferType(),
            transform.LazyGradientInit(),
            transform.InferType(),
            transform.DeadCodeElimination(),
            transform.InferType(),
        ]
    )

    mod = seq(mod)

    assert mod["main"].checked_type == relay.FuncType(
        [t, t], relay.TupleType([t, relay.TupleType([t, t])])
    )

    x = rand(dtype, *shape)
    y = rand(dtype, *shape)
    (forward), (grad_x, grad_y,) = create_executor(mod=mod).evaluate(
        back_func
    )(x, y)
    assert_allclose(forward.numpy(), x.numpy() * y.numpy())
    assert_allclose(grad_x.numpy(), y.numpy())
    assert_allclose(grad_y.numpy(), x.numpy())
Exemple #20
0
def test_function_taking_adt_ref_tuple():
    mod = tvm.IRModule()
    prelude = relay.prelude.Prelude(mod)
    _, cons, nil = prelude.mod.get_type("List")

    nil_value = ConstructorValue(nil.tag, [], nil)
    cons_value = ConstructorValue(
        cons.tag,
        [nd.array(np.random.rand(1, 10).astype("float32")), nil_value],
        cons,
    )

    ref_value = RefValue(nd.array(np.random.rand(1, 10).astype("float32")))
    tuple_value = container.tuple_object(
        [nd.array(np.random.rand(1, 10).astype("float32")) for _ in range(10)])

    id_func = relay.create_executor(mod=mod).evaluate(prelude.id)

    res_nil = id_func(nil_value)
    assert res_nil.tag == nil_value.tag
    assert len(res_nil.fields) == 0

    res_cons = id_func(cons_value)
    assert res_cons.tag == cons_value.tag
    assert len(res_cons.fields) == len(cons_value.fields)
    testing.assert_allclose(res_cons.fields[0].numpy(),
                            cons_value.fields[0].numpy())
    assert isinstance(res_cons.fields[1], ConstructorValue)
    assert res_cons.fields[1].tag == nil.tag
    assert len(res_cons.fields[1].fields) == 0

    res_ref = id_func(ref_value)
    testing.assert_allclose(res_ref.value.numpy(), ref_value.value.numpy())

    res_tuple = id_func(tuple_value)
    for i in range(10):
        testing.assert_allclose(res_tuple[i].numpy(), tuple_value[i].numpy())
Exemple #21
0
def test_binds():
    x = relay.var("x")
    y = relay.add(x, x)
    xx = np.ones((10, 20))
    res = relay.create_executor().evaluate(y, binds={x: xx}).numpy()
    testing.assert_allclose(xx + xx, res)