예제 #1
0
def test_pow():
    mod = tvm.IRModule()
    p = Prelude(mod)
    add_nat_definitions(p)
    shape = (10, 10)
    dtype = 'float32'
    t = relay.TensorType(shape, dtype)
    x = relay.var("x", t)
    double = relay.Function([x], x + x)
    i = relay.var("i", t)
    func = relay.Function([i], p.nat_iterate(double, make_nat_expr(p, 3))(i))
    mod["main"] = func
    mod["main"] = gradient(mod["main"], mod=mod)
    m = transform.InferType()(mod)
    back_func = m["main"]
    assert back_func.checked_type == relay.FuncType([t], relay.TupleType([t, relay.TupleType([t])]))
    i_nd = rand(dtype, *shape)
    ex = create_executor(mod=mod)
    forward, (grad_i,) = ex.evaluate(back_func)(i_nd)
    tvm.testing.assert_allclose(forward.asnumpy(), 8 * i_nd.asnumpy())
    tvm.testing.assert_allclose(grad_i.asnumpy(), 8 * np.ones_like(grad_i.asnumpy()))
예제 #2
0
def test_recursion():
    mod = tvm.IRModule()
    p = Prelude(mod)
    p.mod.import_from_std("nat.rly")
    nat_iterate = p.mod.get_global_var("nat_iterate")
    shape = (10, 10)
    dtype = "float32"
    t = relay.TensorType(shape, dtype)
    x = relay.var("x", t)
    double = relay.Function([x], x + x)
    i = relay.var("i", t)
    func = relay.Function([i], nat_iterate(double, make_nat_expr(p, 3))(i))
    mod["main"] = func
    mod = relay.transform.InferType()(mod)
    mod["main"] = to_cps(mod["main"], mod=mod)
    mod = relay.transform.InferType()(mod)
    mod["main"] = un_cps(mod["main"])
    ex = create_executor(mod=mod)
    i_nd = rand(dtype, *shape)
    forward = ex.evaluate()(i_nd)
    tvm.testing.assert_allclose(forward.asnumpy(), 8 * i_nd.asnumpy())
예제 #3
0
def test_list_length():
    expected = list(range(10))

    mod = tvm.IRModule()
    p = Prelude(mod)

    _, cons, nil = mod.get_type("List")
    length = mod.get_global_var("length")

    l = nil()
    # create zero initialized list
    for _ in range(len(expected)):
        l = cons(relay.const(0), l)

    l = length(l)

    f = relay.Function([], l)
    mod["main"] = f
    for tgt, ctx in tvm.testing.enabled_targets():
        result = veval(mod, ctx=ctx, target=tgt)
        tvm.testing.assert_allclose(result.asnumpy(), 10)
예제 #4
0
def test_pow():
    mod = relay.Module()
    p = Prelude(mod)
    shape = (10, 10)
    dtype = 'float32'
    t = relay.TensorType(shape, dtype)
    x = relay.var("x", t)
    double = relay.Function([x], x + x)
    i = relay.var("i", t)
    func = relay.Function([i],
                          relay.Call(p.iterate(double, p.s(p.s(p.s(p.z())))),
                                     [i]))
    back_func = relay.ir_pass.infer_type(gradient(func, mod=mod), mod=mod)
    assert back_func.checked_type == relay.FuncType(
        [t], relay.TupleType([t, relay.TupleType([t])]))
    i_nd = rand(dtype, *shape)
    ex = create_executor(mod=mod)
    forward, (grad_i, ) = ex.evaluate(back_func)(i_nd)
    np.testing.assert_allclose(forward.asnumpy(), 8 * i_nd.asnumpy())
    np.testing.assert_allclose(grad_i.asnumpy(),
                               8 * np.ones_like(grad_i.asnumpy()))
예제 #5
0
 def run(dtype):
     mod = tvm.IRModule()
     p = Prelude(mod)
     tensor_t = p.get_type("tensor_t", dtype)
     rlist = p.mod.get_global_type_var(f"List")
     tensor_array = p.get_global_var("tensor_array", dtype)
     tensor1 = p.get_tensor_ctor("tensor1", dtype)
     write = p.get_global_var("tensor_array_write", dtype)
     stack = p.get_global_var("tensor_array_stack", dtype)
     # TODO extract test case from inference failures
     # setting this wrong causes crashes
     v = relay.var("v", shape=(1, ), dtype=dtype)
     init_tensor_array = tensor_array(relay.const(3))
     tensor_array1 = write(init_tensor_array, relay.const(0), tensor1(v))
     tensor_array2 = write(tensor_array1, relay.const(1), tensor1(v))
     tensor_array3 = write(tensor_array2, relay.const(2), tensor1(v))
     tensor_array4 = stack(tensor_array3)
     mod["main"] = relay.Function([v], tensor_array4, tensor_t())
     t = np.random.uniform(low=0.0, high=8.0, size=(1, )).astype(dtype)
     expected = [np.stack([t, t, t])]
     check_tensor_array(mod, expected, t, dtype=dtype)
예제 #6
0
def test_list_filter():
    mod = tvm.IRModule()
    p = Prelude(mod)

    nil = p.nil
    cons = p.cons
    filter = p.filter

    x = relay.var("x", 'int32')
    greater_than_one = relay.Function([x], x > relay.const(1))
    l = cons(
        relay.const(1),
        cons(
            relay.const(3),
            cons(relay.const(1),
                 cons(relay.const(5), cons(relay.const(1), nil())))))
    f = relay.Function([], filter(greater_than_one, l))
    mod["main"] = f
    for tgt, ctx in tvm.testing.enabled_targets():
        result = veval(mod, ctx=ctx, target=tgt)
        tvm.testing.assert_allclose(vmobj_to_list(result), np.array([3, 5]))
예제 #7
0
def test_abs_diff():
    # TODO(@M.K.): refactor using tuple pattern (not yet implemented)
    mod = tvm.IRModule()
    p = Prelude(mod)
    p.mod.import_from_std("nat.rly")
    nat, z, s = p.mod.get_type("nat")
    x = Var("x", nat())
    y = Var("y", nat())
    xp = Var("x'", nat())
    yp = Var("y'", nat())
    diff = GlobalVar("diff")
    y_z_case = Clause(PatternConstructor(z, []), x)
    y_s_case = Clause(PatternConstructor(s, [PatternVar(yp)]), diff(yp, xp))
    x_z_case = Clause(PatternConstructor(z, []), y)
    x_s_case = Clause(PatternConstructor(s, [PatternVar(xp)]),
                      Match(y, [y_z_case, y_s_case]))
    mod[diff] = Function([x, y], Match(x, [x_z_case, x_s_case]))
    orig = diff(make_nat_expr(p, 7), make_nat_expr(p, 3))
    orig = Function([], orig)
    res = dcpe(orig, mod=mod)
    assert tvm.ir.structural_equal(res.body, make_nat_expr(p, 4))
예제 #8
0
    def run(dtype, shape):
        mod = tvm.IRModule()
        p = Prelude(mod)
        static_tensor_array_ops = StaticTensorArrayOps(p, dtype, shape)
        static_tensor_array_ops.register()

        v1 = relay.var("v1")
        v2 = relay.var("v2")
        tensor_array = p.get_var_static("tensor_array", dtype, shape)
        tensor_array1 = tensor_array(relay.const(2))
        write_func = p.get_var_static("tensor_array_write", dtype, shape)
        concat_func = p.get_var_static("tensor_array_concat", dtype, shape)
        tensor = p.get_var_static("tensor_constructor", dtype, shape)
        tensor_array1 = write_func(tensor_array1, relay.const(0), tensor(v1))
        tensor_array1 = write_func(tensor_array1, relay.const(1), tensor(v2))
        tensor_array_concat = concat_func(tensor_array1)
        mod["main"] = relay.Function([v1, v2], tensor_array_concat)
        v1_data = np.random.uniform(low=0.0, high=8.0, size=(2, 3)).astype(dtype)
        v2_data = np.random.uniform(low=0.0, high=8.0, size=(1, 3)).astype(dtype)
        expected = [np.concatenate((v1_data, v2_data), axis=0)]
        check_tensor_array(mod, expected, *(v1_data, v2_data), dtype=dtype)
예제 #9
0
def test_abs_diff():
    # TODO(@M.K.): refactor using tuple pattern (not yet implemented)
    mod = Module()
    p = Prelude(mod)
    add_nat_definitions(p)
    nat = p.nat()
    x = Var("x", nat)
    y = Var("y", nat)
    xp = Var("x'", nat)
    yp = Var("y'", nat)
    diff = GlobalVar("diff")
    y_z_case = Clause(PatternConstructor(p.z, []), x)
    y_s_case = Clause(PatternConstructor(p.s, [PatternVar(yp)]), diff(yp, xp))
    x_z_case = Clause(PatternConstructor(p.z, []), y)
    x_s_case = Clause(PatternConstructor(p.s, [PatternVar(xp)]),
                      Match(y, [y_z_case, y_s_case]))
    mod[diff] = Function([x, y], Match(x, [x_z_case, x_s_case]))
    orig = diff(make_nat_expr(p, 7), make_nat_expr(p, 3))
    orig = Function([], orig)
    res = dcpe(orig, mod=mod)
    assert alpha_equal(res.body, make_nat_expr(p, 4))
예제 #10
0
def test_list_length():
    expected = list(range(10))

    mod = tvm.IRModule()
    p = Prelude(mod)

    nil = p.nil
    cons = p.cons
    length = p.length

    l = nil()
    # create zero initialized list
    for i in range(len(expected)):
        l = cons(relay.const(0), l)

    l = length(l)

    f = relay.Function([], l)
    mod["main"] = f
    result = veval(mod)
    tvm.testing.assert_allclose(result.asnumpy(), 10)
예제 #11
0
def test_list_constructor():
    mod = tvm.IRModule()
    p = Prelude(mod)

    nil = p.nil
    cons = p.cons
    l = p.l

    one2 = cons(relay.const(1), nil())
    one3 = cons(relay.const(2), one2)
    one4 = cons(relay.const(3), one3)
    f = relay.Function([], one4)

    mod["main"] = f

    result = veval(mod)
    assert len(result) == 2
    assert len(result[1]) == 2

    obj = vmobj_to_list(result)
    tvm.testing.assert_allclose(obj, np.array([3, 2, 1]))
예제 #12
0
def test_arbitrary_let_nesting():
    # something that is tricky to do in Python but comes naturally in Relay
    mod = relay.Module()
    p = Prelude(mod)
    x = relay.Var('x')
    r = relay.Var('r')
    y = relay.Var('y')
    z = relay.Var('z')
    expr = relay.Tuple([
        relay.Let(x, relay.Tuple([relay.const(1),
                                  relay.const(2)]), relay.TupleGetItem(x, 1)),
        relay.Let(r, relay.RefCreate(relay.const(1)),
                  seq(relay.RefWrite(r, relay.const(3)), relay.RefRead(r))),
        relay.Let(y, p.id(relay.Let(z, relay.const(4), z)), y)
    ])

    tup_val = run_as_python(expr, mod)
    assert_adt_len(tup_val, 3)
    assert_tensor_value(tup_val[0], 2)
    assert_tensor_value(tup_val[1], 3)
    assert_tensor_value(tup_val[2], 4)
예제 #13
0
파일: test_vm.py 프로젝트: zyzhou1028/tvm
def test_list_filter():
    mod = tvm.IRModule()
    p = Prelude(mod)

    _, cons, nil = mod.get_type("List")
    filter = mod.get_global_var("filter")

    x = relay.var("x", "int32")
    greater_than_one = relay.Function([x], x > relay.const(1))
    l = cons(
        relay.const(1),
        cons(
            relay.const(3),
            cons(relay.const(1),
                 cons(relay.const(5), cons(relay.const(1), nil())))),
    )
    f = relay.Function([], filter(greater_than_one, l))
    mod["main"] = f
    for tgt, dev in tvm.testing.enabled_targets():
        result = veval(mod, device=dev, target=tgt)
        tvm.testing.assert_allclose(vmobj_to_list(result), np.array([3, 5]))
예제 #14
0
def test_head_cons():
    mod = relay.Module()
    p = Prelude(mod)
    def hd_impl():
        a = relay.TypeVar("a")
        x = relay.Var("x", p.l(a))
        y = relay.Var("y")
        z = relay.Var("z")
        cons_case = relay.Clause(relay.PatternConstructor(p.cons,
                                                          [relay.PatternVar(y),
                                                           relay.PatternVar(z)]),
                                 y)
        return relay.Function([x], relay.Match(x, [cons_case]), a, [a])
    t = relay.TypeVar("t")
    x = relay.Var("x", t)
    hd = relay.Var("hd")
    body = relay.Let(hd, hd_impl(), hd(p.cons(x, p.nil())))
    f = relay.Function([x], body, None, [t])
    f = infer_type(f, mod=mod)
    res = dcpe(f)
    assert alpha_equal(res, relay.Function([x], x, t, [t]))
def test_missing_in_the_middle():
    mod = tvm.IRModule()
    p = Prelude(mod)

    v = relay.Var('v')
    match = relay.Match(
        v,
        [
            # list of length exactly 1
            relay.Clause(
                relay.PatternConstructor(p.cons, [
                    relay.PatternWildcard(),
                    relay.PatternConstructor(p.nil, [])
                ]), v),
            # empty list
            relay.Clause(relay.PatternConstructor(p.nil, []), v),
            # list of length 3 or more
            relay.Clause(
                relay.PatternConstructor(p.cons, [
                    relay.PatternWildcard(),
                    relay.PatternConstructor(p.cons, [
                        relay.PatternWildcard(),
                        relay.PatternConstructor(
                            p.cons,
                            [relay.PatternWildcard(),
                             relay.PatternWildcard()])
                    ])
                ]), v)
        ])

    # fails to match a list of length exactly two
    unmatched = unmatched_cases(match, mod)
    assert len(unmatched) == 1
    assert isinstance(unmatched[0], relay.PatternConstructor)
    assert unmatched[0].constructor == p.cons
    assert isinstance(unmatched[0].patterns[1], relay.PatternConstructor)
    assert unmatched[0].patterns[1].constructor == p.cons
    assert isinstance(unmatched[0].patterns[1].patterns[1],
                      relay.PatternConstructor)
    assert unmatched[0].patterns[1].patterns[1].constructor == p.nil
예제 #16
0
def test_local_recursion():
    mod = tvm.IRModule()
    p = Prelude(mod)

    v = relay.Var("v")
    h = relay.Var("h")
    t = relay.Var("t")
    f = relay.Var("f")

    # just returns the same list
    let = relay.Let(
        f,
        relay.Function(
            [v],
            relay.Match(
                v,
                [
                    relay.Clause(
                        relay.PatternConstructor(
                            p.cons, [relay.PatternVar(h),
                                     relay.PatternVar(t)]),
                        p.cons(h, f(t)),
                    ),
                    relay.Clause(relay.PatternConstructor(p.nil, []), p.nil()),
                ],
            ),
        ),
        f(
            p.cons(relay.const(1),
                   p.cons(relay.const(2), p.cons(relay.const(3), p.nil())))),
    )

    val = run_as_python(let, mod)
    assert_constructor_value(val, p.cons, 2)
    assert_tensor_value(val.fields[0], 1)
    assert_constructor_value(val.fields[1], p.cons, 2)
    assert_tensor_value(val.fields[1].fields[0], 2)
    assert_constructor_value(val.fields[1].fields[1], p.cons, 2)
    assert_tensor_value(val.fields[1].fields[1].fields[0], 3)
    assert_constructor_value(val.fields[1].fields[1].fields[1], p.nil, 0)
예제 #17
0
def test_compose():
    mod = relay.Module()
    p = Prelude(mod)

    compose = p.compose

    # remove all functions to not have pattern match to pass vm compilation
    # TODO(wweic): remove the hack and implement pattern match
    for v, _ in mod.functions.items():
        if v.name_hint == 'compose':
            continue
        mod[v] = relay.const(0)

    # add_one = fun x -> x + 1
    sb = relay.ScopeBuilder()
    x = relay.var('x', 'float32')
    x1 = sb.let('x1', x)
    xplusone = x1 + relay.const(1.0, 'float32')
    sb.ret(xplusone)
    body = sb.get()
    add_one = relay.GlobalVar("add_one")
    add_one_func = relay.Function([x], body)

    # add_two = compose(add_one, add_one)
    sb = relay.ScopeBuilder()
    y = relay.var('y', 'float32')
    add_two_func = sb.let('add_two', compose(add_one_func, add_one_func))
    add_two_res = add_two_func(y)
    sb.ret(add_two_res)
    add_two_body = sb.get()

    mod[add_one] = add_one_func

    f = relay.Function([y], add_two_body)
    mod["main"] = f

    x_data = np.array(np.random.rand()).astype('float32')
    result = veval(mod)(x_data)

    tvm.testing.assert_allclose(result.asnumpy(), x_data + 2.0)
예제 #18
0
    def run(dtype, shape):
        mod = tvm.IRModule()
        p = Prelude(mod)
        static_tensor_array_ops = StaticTensorArrayOps(p, dtype, shape)
        static_tensor_array_ops.register()

        tensor_array = p.get_var_static("tensor_array", dtype, shape)
        tensor = p.get_var_static("tensor_constructor", dtype, shape)
        write = p.get_var_static("tensor_array_write", dtype, shape)
        gather = p.get_var_static("tensor_array_gather", dtype, shape)
        v = relay.var("v")
        indice = relay.var("indice")
        init_tensor_array = tensor_array(relay.const(3))
        tensor_array1 = write(init_tensor_array, relay.const(0), tensor(v))
        tensor_array2 = write(tensor_array1, relay.const(1), tensor(v))
        tensor_array3 = write(tensor_array2, relay.const(2), tensor(v))
        out = gather(tensor_array3, indice)
        mod["main"] = relay.Function([v, indice], out)
        t = np.random.uniform(low=0.0, high=8.0, size=shape).astype(dtype)
        indice_data = np.array([0, 2], dtype="int32")
        expected = [np.stack([t, t])]
        check_tensor_array(mod, expected, *(t, indice_data), dtype=dtype)
예제 #19
0
    def run(dtype, shape):
        mod = tvm.IRModule()
        p = Prelude(mod)
        static_tensor_array_ops = StaticTensorArrayOps(p, dtype, shape)
        static_tensor_array_ops.register()

        ta_length = 2
        np_data_list = [
            np.random.uniform(0, 10, size=shape).astype(dtype) for _ in range(ta_length)
        ]

        v0 = relay.var("v0")
        v1 = relay.var("v1")
        tensor_array = p.get_var_static("tensor_array", dtype, shape)
        init_tensor_array = tensor_array(relay.const(ta_length))
        write_func = p.get_var_static("tensor_array_write", dtype, shape)
        tensor = p.get_var_static("tensor_constructor", dtype, shape)
        tensor_array0 = write_func(init_tensor_array, relay.const(0), tensor(v0))
        tensor_array1 = write_func(tensor_array0, relay.const(1), tensor(v1))
        mod["main"] = relay.Function([v0, v1], tensor_array1)
        expected = np_data_list
        check_tensor_array(mod, expected, *np_data_list, dtype=dtype)
예제 #20
0
 def run(dtype):
     mod = relay.Module()
     p = Prelude(mod)
     tensor_array = p.get_var('tensor_array', dtype)
     tensor1 = p.get_var('tensor1', dtype)
     write = p.get_var('tensor_array_write', dtype)
     stack = p.get_var('tensor_array_stack', dtype)
     l = relay.var('l')
     v = relay.var('v')
     init_tensor_array = tensor_array(relay.const(3))
     tensor_array1 = write(init_tensor_array, relay.const(0), tensor1(v))
     tensor_array2 = write(tensor_array1, relay.const(1), tensor1(v))    
     tensor_array3 = write(tensor_array2, relay.const(2), tensor1(v))        
     tensor_array4 = stack(tensor_array3)
     mod["main"] = relay.Function([v], tensor_array4)
     for kind in ["debug"]:
         ex = relay.create_executor(kind, mod=mod, ctx=tvm.cpu(), target="llvm")
         t = np.random.uniform(size=(1,)).astype(dtype)
         result = ex.evaluate()(t)
         res = vmobj_to_list(result)
         expected = [np.stack([t, t, t])]
         tvm.testing.assert_allclose(expected, res)
예제 #21
0
def test_adt_compose():
    mod = relay.Module()
    p = Prelude(mod)

    compose = p.compose

    # add_one = fun x -> x + 1
    sb = relay.ScopeBuilder()
    x = relay.var('x', 'float32')
    x1 = sb.let('x1', x)
    xplusone = x1 + relay.const(1.0, 'float32')
    sb.ret(xplusone)
    body = sb.get()
    add_one = relay.GlobalVar("add_one")
    add_one_func = relay.Function([x], body)

    # add_two = compose(add_one, add_one)
    sb = relay.ScopeBuilder()
    y = relay.var('y', 'float32')
    add_two_func = sb.let('add_two', compose(add_one_func, add_one_func))
    add_two_res = add_two_func(y)
    sb.ret(add_two_res)
    add_two_body = sb.get()

    mod[add_one] = add_one_func

    f = relay.Function([y], add_two_body)
    mod["main"] = f

    vm = create_vm(mod)
    ser = serializer.Serializer(vm)
    code, lib = ser.serialize()
    deser = deserializer.Deserializer(code, lib)
    des_vm = deser.deserialize()

    x_data = np.array(np.random.rand()).astype('float32')
    result = veval(des_vm, x_data)

    tvm.testing.assert_allclose(result.asnumpy(), x_data + 2.0)
예제 #22
0
def test_too_specific_match():
    mod = tvm.IRModule()
    p = Prelude(mod)
    _, cons, nil = mod.get_type("List")

    v = relay.Var("v")
    match = relay.Match(
        v,
        [
            relay.Clause(
                relay.PatternConstructor(
                    cons,
                    [
                        relay.PatternWildcard(),
                        relay.PatternConstructor(
                            cons,
                            [relay.PatternWildcard(),
                             relay.PatternWildcard()]),
                    ],
                ),
                v,
            )
        ],
    )

    unmatched = unmatched_cases(match, mod)

    # will not match nil or a list of length 1
    nil_found = False
    single_length_found = False
    assert len(unmatched) == 2
    for case in unmatched:
        assert isinstance(case, relay.PatternConstructor)
        if case.constructor == nil:
            nil_found = True
        if case.constructor == cons:
            assert isinstance(case.patterns[1], relay.PatternConstructor)
            assert case.patterns[1].constructor == nil
            single_length_found = True
예제 #23
0
    def run(dtype):
        mod = tvm.IRModule()
        p = Prelude(mod)

        # tensor array
        v1 = relay.var('v1')
        v2 = relay.var('v2')
        v3 = relay.var('v2')
        tensor_array = p.get_var('tensor_array', dtype)
        tensor_array1 = tensor_array(relay.const(3))
        write_func = p.get_var('tensor_array_write', dtype)
        split_func = p.get_var('tensor_array_split', dtype)
        tensor2 = p.get_var('tensor2', dtype)
        tensor_array1 = write_func(tensor_array1, relay.const(0), tensor2(v1))
        tensor_array1 = write_func(tensor_array1, relay.const(1), tensor2(v2))
        tensor_array1 = write_func(tensor_array1, relay.const(2), tensor2(v3))

        # value tensor
        value = relay.var('value')

        # lengths tensor
        ta_len = relay.var('length')

        # create the scatter function
        tensor_array_split = split_func(tensor_array1, tensor2(value), ta_len)
        mod["main"] = relay.Function([v1, v2, v3, value, ta_len],
                                     tensor_array_split)

        # initialize and check
        v1_data = np.random.uniform(low=0.0, high=8.0, size=(2, 3)).astype(dtype)
        v2_data = np.random.uniform(low=0.0, high=8.0, size=(2, 3)).astype(dtype)
        v3_data = np.random.uniform(low=0.0, high=8.0, size=(2, 3)).astype(dtype)
        value_data = np.random.uniform(low=0.0, high=8.0, size=(4, 3)).astype(dtype)
        length_data = np.array([2, 2], dtype="int32")
        expected = np.concatenate([value_data, v3_data])
        expected = np.split(expected, indices_or_sections=[2, 4])
        check_tensor_array(mod, expected, *(v1_data, v2_data, v3_data,
                                            value_data, length_data),
                           dtype=dtype)
예제 #24
0
def test_match_effect_exactly_once():
    mod = relay.Module()
    p = Prelude(mod)

    # the list should be of length 1!
    # Unless we mistakenly execute the data clause more than once
    r = relay.Var('r')
    data = seq(relay.RefWrite(r, p.cons(relay.Tuple([]), relay.RefRead(r))), relay.RefRead(r))
    match = relay.Let(
        r, relay.RefCreate(p.nil()),
        relay.Match(data, [
            relay.Clause(relay.PatternConstructor(p.nil, []), relay.const(0)),
            relay.Clause(
                relay.PatternConstructor(
                    p.cons,
                    [relay.PatternWildcard(), relay.PatternConstructor(p.nil, [])]),
                relay.const(1)),
            relay.Clause(relay.PatternWildcard(), relay.const(2))
        ]))

    match_val = run_as_python(match, mod)
    assert_tensor_value(match_val, 1)
예제 #25
0
def test_adt_compose():
    mod = relay.Module()
    p = Prelude(mod)

    compose = p.compose

    # add_one = fun x -> x + 1
    sb = relay.ScopeBuilder()
    x = relay.var('x', 'float32')
    x1 = sb.let('x1', x)
    xplusone = x1 + relay.const(1.0, 'float32')
    sb.ret(xplusone)
    body = sb.get()
    add_one = relay.GlobalVar("add_one")
    add_one_func = relay.Function([x], body)

    # add_two = compose(add_one, add_one)
    sb = relay.ScopeBuilder()
    y = relay.var('y', 'float32')
    add_two_func = sb.let('add_two', compose(add_one_func, add_one_func))
    add_two_res = add_two_func(y)
    sb.ret(add_two_res)
    add_two_body = sb.get()

    mod[add_one] = add_one_func

    f = relay.Function([y], add_two_body)
    mod["main"] = f

    exe = create_exec(mod)
    code, lib = exe.save()
    des_exec = _vm.Executable.load_exec(code, lib)
    des_vm = _vm.VirtualMachine(des_exec)
    des_vm.init(tvm.cpu())

    x_data = np.array(np.random.rand()).astype('float32')
    result = veval(des_vm, x_data)

    tvm.testing.assert_allclose(result.asnumpy(), x_data + 2.0)
예제 #26
0
def test_list_update(target, dev):
    expected = list(range(10))

    mod = tvm.IRModule()
    p = Prelude(mod)

    _, cons, nil = mod.get_type("List")
    update = mod.get_global_var("update")

    l = nil()
    # create zero initialized list
    for i in range(len(expected)):
        l = cons(relay.const(0), l)

    # set value
    for i, v in enumerate(expected):
        l = update(l, relay.const(i), relay.const(v))

    f = relay.Function([], l)
    mod["main"] = f
    result = veval(mod, device=dev, target=target)
    tvm.testing.assert_allclose(vmobj_to_list(result), np.array(expected))
예제 #27
0
 def __init__(self):
     self._nodes = {}
     self._tf_node_map = {}
     self._params = {}
     self._input_shapes = {}
     self._output_shapes = {}
     self._num_rnn_layer = False
     self._input_shapes = {}
     self._loops = {}
     self._branches = {}
     self._mod = IRModule({})
     self._prelude = Prelude(self._mod)
     self._control_flow_node_map = defaultdict(set)
     self._loop_body_order = {}
     self._loop_var_order = {}
     self._lvar2expr = {}
     self._lname_map = {}
     self._sorted_cf_node_names = []
     self._while_loop_name_set = set()
     self._main_graph_proto = self
     self._tensor_array_shapes = {}
     self._tensor_array_shape_nodes = {}
예제 #28
0
def test_list_update():
    expected = list(range(10))

    mod = relay.Module()
    p = Prelude(mod)

    nil = p.nil
    cons = p.cons
    update = p.update

    l = nil()
    # create zero initialized list
    for i in range(len(expected)):
        l = cons(relay.const(0), l)

    # set value
    for i, v in enumerate(expected):
        l = update(l, relay.const(i), relay.const(v))

    f = relay.Function([], l)
    mod["main"] = f
    result = veval(mod)
    tvm.testing.assert_allclose(vmobj_to_list(result), np.array(expected))
def test_multiple_constructor_clauses():
    mod = tvm.IRModule()
    p = Prelude(mod)

    v = relay.Var('v')
    match = relay.Match(
        v,
        [
            # list of length exactly 1
            relay.Clause(
                relay.PatternConstructor(p.cons, [
                    relay.PatternWildcard(),
                    relay.PatternConstructor(p.nil, [])
                ]), v),
            # list of length exactly 2
            relay.Clause(
                relay.PatternConstructor(p.cons, [
                    relay.PatternWildcard(),
                    relay.PatternConstructor(p.cons, [
                        relay.PatternWildcard(),
                        relay.PatternConstructor(p.nil, [])
                    ])
                ]), v),
            # empty list
            relay.Clause(relay.PatternConstructor(p.nil, []), v),
            # list of length 2 or more
            relay.Clause(
                relay.PatternConstructor(p.cons, [
                    relay.PatternWildcard(),
                    relay.PatternConstructor(
                        p.cons,
                        [relay.PatternWildcard(),
                         relay.PatternWildcard()])
                ]), v)
        ])
    assert len(unmatched_cases(match, mod)) == 0
예제 #30
0
    def __init__(self, do_aot, use_gpu, *args):
        assert isinstance(do_aot, bool)
        assert isinstance(use_gpu, bool)
        self.mod = Module()
        self.prelude = Prelude(self.mod)
        self.use_gpu = use_gpu
        self.context = tvm.gpu(0) if use_gpu else tvm.cpu(0)
        self.target = tvm.target.cuda() if use_gpu else tvm.target.create('llvm')
        self.executor = create_executor(mod=self.mod, ctx=self.context, target=self.target)
        self.parameters = []
        self.forward_var = relay.GlobalVar('forward_var')

        # Set up forward pass.
        inputs, body, ret_type = self.compute(*args)
        self.inputs = inputs

        forward_compute = relay.Function(inputs + list([p[0] for p in self.parameters]), body, ret_type)
        self.mod[self.forward_var] = forward_compute
        self.mod['main'] = self.mod[self.forward_var]
        if do_aot:
            self.forward = aot.compile(self.forward_var, self.mod, ctx=self.context, tgt=self.target)
        else:
            self.forward = self.executor.evaluate(self.forward_var)
        self.args = [None] * len(inputs) + list([p[1] for p in self.parameters])