Exemplo n.º 1
0
def test_constructor_type():
    mod = tvm.IRModule()
    box, constructor = initialize_box_adt(mod)

    a = relay.TypeVar("a")
    x = relay.Var("x", a)
    func = relay.Function([x], constructor(x), box(a), [a])
    mod["main"] = func
    mod = infer_mod(mod)
    func_ty = mod["main"].checked_type
    box = mod.get_global_type_var("box")
    expected = relay.FuncType([a], box(a), [a])
    assert func_ty == expected
Exemplo n.º 2
0
def test_adt_match_type_annotations():
    mod = tvm.IRModule()
    box, constructor = initialize_box_adt(mod)

    # the only type annotation is inside the match pattern var
    # but that should be enough info
    tt = relay.TensorType((2, 2), "float32")
    x = relay.Var("x")
    mv = relay.Var("mv", tt)
    match = relay.Match(
        constructor(x),
        [
            relay.Clause(
                relay.PatternConstructor(constructor, [relay.PatternVar(mv)]),
                relay.Tuple([]))
        ],
    )

    mod["main"] = relay.Function([x], match)
    mod = infer_mod(mod)
    ft = mod["main"].checked_type
    assert ft == relay.FuncType([tt], relay.TupleType([]))
Exemplo n.º 3
0
def test_function():
    param_names = ['a', 'b', 'c', 'd']
    params = tvm.convert(
        [relay.Param(relay.Var(n), None) for n in param_names])
    ret_type = None
    body = None
    type_params = tvm.convert([])
    fn = relay.Function(params, ret_type, body, type_params)
    assert fn.params == params
    assert fn.body == body
    assert fn.type_params == type_params
    assert fn.span == None
    str(fn)
Exemplo n.º 4
0
def test_function():
    param_names = ['a', 'b', 'c', 'd']
    params = tvm.convert([relay.Var(n) for n in param_names])
    ret_type = relay.TupleType(tvm.convert([]))
    body = relay.Tuple(tvm.convert([]))
    type_params = tvm.convert([])
    fn = relay.Function(params, body, ret_type, type_params)
    assert fn.params == params
    assert fn.body == body
    assert fn.type_params == type_params
    assert fn.span == None
    str(fn)
    check_json_roundtrip(fn)
Exemplo n.º 5
0
def test_map():
    a = relay.TypeVar("a")
    b = relay.TypeVar("b")
    lhs = prelude.mod[map].checked_type
    rhs = relay.FuncType([relay.FuncType([a], b), rlist(a)], rlist(b), [a, b])
    assert lhs == rhs

    x = relay.Var("x")
    add_one = relay.Function([x], s(x))
    res = intrp.evaluate(map(add_one, cons(z(), cons(z(), nil()))))
    ones = to_list(res)
    assert len(ones) == 2
    assert count(ones[0]) == 1 and count(ones[1]) == 1
Exemplo n.º 6
0
def test_optional_matching():
    x = relay.Var('x')
    y = relay.Var('y')
    v = relay.Var('v')
    condense = relay.Function(
        [x, y],
        relay.Match(x, [
            relay.Clause(relay.PatternConstructor(some, [relay.PatternVar(v)]),
                         cons(v, y)),
            relay.Clause(relay.PatternConstructor(none), y)
        ]))

    res = intrp.evaluate(
        foldr(
            condense, nil(),
            cons(some(build_nat(3)),
                 cons(none(), cons(some(build_nat(1)), nil())))))

    reduced = to_list(res)
    assert len(reduced) == 2
    assert count(reduced[0]) == 3
    assert count(reduced[1]) == 1
Exemplo n.º 7
0
def fuse_partitions(pre_mod, mid_mod, post_mod):
    """Combine prefix, middle, and suffix modules into a single module.

    The combined module includes an additional `main` that fuses all three
    partitions together.

    Parameters
    ----------
    pre_mod : tvm.IRModule
        Module containing an input quantization function

    mid_mod : tvm.IRModule
        Module containing core of a quantized inference function

    post_mod : tvm.IRModule
        Module containing an output dequantization function

    Returns
    -------
    fused_mod : tvm.IRModule
        Module containing the input quantization, core quantized inference,
        output dequantization, and full quantized inference functions
    """
    pre_func = pre_mod['main']
    mid_func = mid_mod['main']
    post_func = post_mod['main']
    # create a module containing the prefix, middle, and suffix partitions
    fused_mod = tvm.IRModule(functions={
        relay.GlobalVar('quantize_inputs'): pre_func,
        relay.GlobalVar('quantized_main'): mid_func,
        relay.GlobalVar('dequantize_outputs'): post_func,
    })
    # construct a `main` that strings together the partitions, such that its
    # behaviour is equivalent to `main` in an *unpartitioned* module
    scope_builder = relay.ScopeBuilder()
    fused_mod_main_params = [relay.Var(param.name_hint) for param in pre_func.params]
    quantized_inputs = scope_builder.let('quantized_inputs', relay.Call(
        fused_mod.get_global_var('quantize_inputs'),
        fused_mod_main_params
    ))
    quantized_outputs = scope_builder.let('quantized_outputs', relay.Call(
        fused_mod.get_global_var('quantized_main'),
        [relay.TupleGetItem(quantized_inputs, i) for i in range(len(pre_func.ret_type.fields))]
    ))
    dequantized_outputs = scope_builder.let('dequantized_outputs', relay.Call(
        fused_mod.get_global_var('dequantize_outputs'),
        [quantized_outputs]
    ))
    scope_builder.ret(dequantized_outputs)
    fused_mod['main'] = relay.Function(fused_mod_main_params, scope_builder.get())
    return fused_mod
Exemplo n.º 8
0
def partition_prefix(mod, quantized_dtypes):
    """Extract input quantization expressions from `mod['main']`.

    Parameters
    ----------
    mod : tvm.IRModule
        Module containing a quantized inference function

    quantized_dtypes : Set[str]
        Set of data types allowed in quantized operators

    Returns
    -------
    pre_mod : tvm.IRModule
        Module containing the input quantization function

    mid_mod : tvm.IRModule
        Module containing a function with everything except for input quantization
    """
    assert len(mod.functions) == 1
    func = mod['main']
    prefix_cutter = PrefixCutter(func.params, quantized_dtypes)
    mid_body = prefix_cutter.visit(func.body)
    assert not func.type_params, 'unimplemented'
    assert func.attrs is None, 'unimplemented'
    mid_func = relay.Function(
        relay.analysis.free_vars(mid_body),
        mid_body)
    mid_mod = tvm.IRModule.from_expr(mid_func)

    scope_builder = prefix_cutter.prefix_sb
    # make sure we pass through all inputs in the prefix function's return expr
    # (even those that don't require quantization)
    ret_expr = []
    for param in mid_func.params:
        if param in prefix_cutter.prefix_binding_map:
            # this param required a conversion, so we collected it in the
            # prefix cutter pass, and we can use the pass's mapping from mid
            # func params to pre func params
            ret_expr.append(prefix_cutter.prefix_binding_map[param])
        else:
            # there was no detected conversion for this argument, so we thread
            # it through the prefix function untouched
            ret_expr.append(relay.Var(param.name_hint, param.checked_type))
    ret_expr = relay.Tuple(ret_expr)
    scope_builder.ret(ret_expr)
    pre_func_body = scope_builder.get()
    pre_func = relay.Function(relay.analysis.free_vars(pre_func_body), pre_func_body)
    pre_mod = tvm.IRModule.from_expr(pre_func)

    return pre_mod, mid_mod
Exemplo n.º 9
0
def test_ref_execution_order():
    # we want to have effects execute from left to right
    x = relay.Var('x')
    y = relay.Var('y')
    f = relay.Var('f')
    r = relay.Var('r')

    expr = relay.Let(f, relay.Function([x, y], x),
                     # r = 1
                     relay.Let(r, relay.RefCreate(relay.const(1)),
                               relay.Tuple([
                                   # should be 1
                                   relay.RefRead(r),
                                   # set r to 2 and read back
                                   seq(relay.RefWrite(r, relay.const(2)),
                                       relay.RefRead(r)),
                                   # set r to 3 and read back
                                   seq(relay.RefWrite(r, relay.const(3)),
                                       relay.RefRead(r)),
                                   # set r to 4 and read as first arg to f
                                   # set r to 5 and read as second arg to f
                                   # f should evaluate to 4
                                   f(
                                       seq(relay.RefWrite(r, relay.const(4)),
                                           relay.RefRead(r)),
                                       seq(relay.RefWrite(r, relay.const(5)),
                                           relay.RefRead(r))),
                                   # read back 5
                                   relay.RefRead(r)
                  ])))

    tup_val = run_as_python(expr)
    assert_tuple_value(tup_val, 5)
    assert_tensor_value(tup_val.fields[0], 1)
    assert_tensor_value(tup_val.fields[1], 2)
    assert_tensor_value(tup_val.fields[2], 3)
    assert_tensor_value(tup_val.fields[3], 4)
    assert_tensor_value(tup_val.fields[4], 5)
Exemplo n.º 10
0
def test_order():
    z = relay.const(3)
    y = relay.const(2)
    x = relay.const(1)
    val = x + y * z
    check_eval(val, 7.0)
    anf = transform.OptimizeOnExpr(
        val, [transform.ToANormalForm(),
              transform.InferType()])
    a = relay.Var('a', relay.IncompleteType())
    b = relay.Var('b', relay.IncompleteType())
    c = relay.Var('c', relay.IncompleteType())
    d = relay.Var('d', relay.IncompleteType())
    e = relay.Var('e', relay.IncompleteType())
    expected_output = e
    expected_output = relay.Let(e, a + d, expected_output)
    expected_output = relay.Let(d, b * c, expected_output)
    expected_output = relay.Let(c, z, expected_output)
    expected_output = relay.Let(b, y, expected_output)
    expected_output = relay.Let(a, x, expected_output)
    expected_output = transform.OptimizeOnExpr(expected_output,
                                               transform.InferType())
    assert alpha_equal(anf, expected_output)
Exemplo n.º 11
0
def test_foldr():
    a = relay.TypeVar("a")
    b = relay.TypeVar("b")
    lhs = prelude.mod[foldr].checked_type
    rhs = relay.FuncType([relay.FuncType([a, b], b), b, rlist(a)], b, [a, b])
    assert lhs == rhs

    x = relay.Var("x")
    y = relay.Var("y")
    identity = relay.Function([x, y], cons(x, y))
    res = intrp.evaluate(
        foldr(
            identity,
            nil(),
            cons(
                make_nat_expr(prelude, 1),
                cons(make_nat_expr(prelude, 2), cons(make_nat_expr(prelude, 3), nil())),
            ),
        )
    )
    same = to_list(res)
    assert len(same) == 3
    assert count(same[0]) == 1 and count(same[1]) == 2 and count(same[2]) == 3
Exemplo n.º 12
0
def test_function_invalidate():
    shape = ()
    dtype = "bool"
    t = relay.TensorType(shape, dtype)
    d = relay.Var("d", t)
    r = relay.Var("r")
    fetch = relay.Function([], relay.RefRead(r))
    fet = relay.Var("fetch")
    fet_obscured = relay.Var("fetch_obscured")
    u = relay.Var("u")
    body = relay.If(d, fet_obscured(), fet_obscured())
    body = relay.Let(u, relay.RefWrite(r, relay.const(1)), body)
    body = relay.Let(fet_obscured, relay.If(d, fet, fet), body)
    body = relay.Let(fet, fetch, body)
    body = relay.Let(r, relay.RefCreate(relay.const(0)), body)
    f = relay.Function([d], body)
    f = infer_type(f)
    pe_f = infer_type(partial_evaluate(f))
    ex = create_executor()
    f_res = ex.evaluate(f)(relay.const(True))
    pe_f_res = ex.evaluate(pe_f)(relay.const(True))
    np.testing.assert_allclose(f_res.asnumpy(), np.ones_like(f_res.asnumpy()))
    np.testing.assert_allclose(pe_f_res.asnumpy(), np.ones_like(pe_f_res.asnumpy()))
Exemplo n.º 13
0
def test_wildcard_match_order():
    x = relay.Var("x", rlist(nat()))
    y = relay.Var("y")
    a = relay.Var("a")
    return_zero = relay.Function(
        [x],
        relay.Match(
            x,
            [
                relay.Clause(relay.PatternWildcard(), z()),
                relay.Clause(
                    relay.PatternConstructor(
                        cons, [relay.PatternVar(y),
                               relay.PatternVar(a)]), y),
                relay.Clause(relay.PatternConstructor(nil), s(z())),
            ],
        ),
        nat(),
    )

    res = intrp.evaluate(return_zero(cons(s(z()), nil())))
    # wildcard pattern is evaluated first
    assert count(res) == 0
Exemplo n.º 14
0
    def verify_upsampling(dshape,
                          scale_h,
                          scale_w,
                          layout,
                          method,
                          align_corners=False):

        if layout == "NCHW":
            (n, c, h, w) = dshape
            x_data = np.random.uniform(size=(n, c, h, w)).astype("float32")

        elif layout == "NHWC":
            (n, h, w, c) = dshape
            x_data = np.random.uniform(size=(n, h, w, c)).astype("float32")

        if method == "nearest_neighbor":
            ref_res = tvm.topi.testing.upsampling_python(
                x_data, (scale_h, scale_w), layout)
        else:
            ref_res = tvm.topi.testing.bilinear_resize_python(
                x_data, (int(round(h * scale_h)), int(round(w * scale_w))),
                layout)
        x = relay.Var("x", relay.TensorType(dshape, "float32"))
        scale_h_var = relay.var("scale_h", relay.TensorType((), "float32"))
        scale_w_var = relay.var("scale_h", relay.TensorType((), "float32"))

        z = relay.nn.upsampling(x,
                                scale_h_var,
                                scale_w_var,
                                method=method,
                                layout=layout,
                                align_corners=align_corners)
        zz = run_infer_type(z)
        func = relay.Function([x, scale_h_var, scale_w_var], z)

        for target, ctx in ctx_list():
            if "llvm" not in target: continue
            for kind in ["vm", "debug"]:
                mod = tvm.ir.IRModule.from_expr(func)
                intrp = relay.create_executor(kind,
                                              mod=mod,
                                              ctx=ctx,
                                              target=target)
                op_res = intrp.evaluate()(x_data,
                                          np.array(scale_h).astype("float32"),
                                          np.array(scale_w).astype("float32"))
                tvm.testing.assert_allclose(op_res.asnumpy(),
                                            ref_res,
                                            rtol=1e-4,
                                            atol=1e-6)
Exemplo n.º 15
0
def test_head_cons():
    mod = relay.Module()
    p = Prelude(mod)

    def hd_impl():
        a = relay.TypeVar("a")
        x = relay.Var("x", p.l(a))
        y = relay.Var("y")
        z = relay.Var("z")
        cons_case = relay.Clause(
            relay.PatternConstructor(
                p.cons,
                [relay.PatternVar(y), relay.PatternVar(z)]), y)
        return relay.Function([x], relay.Match(x, [cons_case]), a, [a])

    t = relay.TypeVar("t")
    x = relay.Var("x", t)
    hd = relay.Var("hd")
    body = relay.Let(hd, hd_impl(), hd(p.cons(x, p.nil())))
    f = relay.Function([x], body, None, [t])
    f = infer_type(f, mod=mod)
    res = dcpe(f)
    assert alpha_equal(res, relay.Function([x], x, t, [t]))
Exemplo n.º 16
0
 def verify_expand_dims(dshape, dtype, oshape, axis, num_newaxis):
     x = relay.Var("x", relay.TensorType(dshape, dtype))
     func = relay.Function([x], relay.expand_dims(x, axis, num_newaxis))
     for target, dev in tvm.testing.enabled_targets():
         if (
             dtype == "float16"
             and target == "cuda"
             and not have_fp16(tvm.cuda(0).compute_version)
         ):
             continue
         data = np.random.uniform(size=dshape).astype(dtype)
         ref_res = data.reshape(oshape)
         op_res = relay.create_executor("graph", device=dev, target=target).evaluate(func)(data)
         np.testing.assert_allclose(op_res.numpy(), ref_res, rtol=0.01)
Exemplo n.º 17
0
def test_dyn_broadcast_to():
    dtype = 'uint8'
    rank = 3
    shape_type = 'int64'
    dyn_shape = relay.Var("shape", relay.ty.TensorType((rank, ), shape_type))
    x_shape = (1, )
    x = relay.Var("x", relay.ty.TensorType(x_shape, dtype))
    z = relay.broadcast_to(x, dyn_shape)
    zz = run_infer_type(z)

    assert zz.checked_type == relay.ty.TensorType((relay.Any(), ) * rank, dtype)

    func = relay.Function([x, dyn_shape], z)

    x = np.random.uniform(size=x_shape).astype(dtype)
    dyn_shape = (1, ) * rank
    ref_res = np.broadcast_to(x, dyn_shape)
    for target, ctx in tvm.testing.enabled_targets():
        for kind in ["vm", "debug"]:
            mod = tvm.ir.IRModule.from_expr(func)
            intrp = relay.create_executor(kind, mod=mod, ctx=ctx, target=target)
            op_res = intrp.evaluate(func)(x, np.array(dyn_shape).astype(shape_type))
            tvm.testing.assert_allclose(op_res.asnumpy(), ref_res, rtol=1e-5)
Exemplo n.º 18
0
    def verify_zeros_ones(shape, dtype):
        for op, ref in [(relay.zeros, np.zeros), (relay.ones, np.ones)]:
            rank = len(shape)
            dyn_shape = relay.Var("shape",
                                  relay.ty.TensorType((rank, ), "int64"))
            y = op(dyn_shape, dtype)
            yy = run_infer_type(y)
            assert yy.checked_type == relay.ty.TensorType(
                (relay.Any(), ) * rank, dtype)

            func = relay.Function([dyn_shape], y)
            ref_res = ref(shape, dtype)
            verify_func(executor_kind, func, [np.array(shape).astype("int64")],
                        ref_res.astype("int64"))
Exemplo n.º 19
0
 def __init__(self):
     self.shape = tvm.runtime.convert([1, 2, 3])
     self.tt = relay.TensorType(self.shape, "float32")
     self.int32 = relay.TensorType([], "int32")
     self.float32 = relay.TensorType([], "float32")
     self.one = relay.const(1.0)
     self.two = relay.const(2.0)
     self.three = relay.const(3.0)
     self.a = relay.Var("a", self.float32)
     self.b = relay.Var("b", self.float32)
     self.c = relay.Var("c", self.float32)
     self.d = relay.Var("d", self.float32)
     self.e = relay.Var("e", self.float32)
     self.x = relay.Var("x", self.int32)
     self.y = relay.Var("y", self.int32)
     self.z = relay.Var("z", self.int32)
Exemplo n.º 20
0
def test_recursion():
    """
    Program:
       let f(n: i32, data: f32) -> f32 = {
          if (n == 0) {
              return data;
          } else {
              return f(n - 1, log(data));
          }
       }
       f(2, 10000);
    """
    f = relay.Var("f")
    n = relay.Var("n", e.int32)
    data = relay.Var("data", e.float32)
    funcbody = relay.If(equal(n, relay.const(0)),
                        data,
                        relay.Call(f, [subtract(n, relay.const(1.0)),
                                       log(data)]))
    value = relay.Function([n, data], funcbody, e.float32, [])
    orig = relay.Let(f, value, relay.Call(f, [relay.const(2.0), relay.const(10000.0)]))
    assert alpha_equal(dead_code_elimination(orig), orig)
    assert alpha_equal(dead_code_elimination(relay.Let(f, value, e.three)), e.three)
Exemplo n.º 21
0
def test_let():
    lv = relay.Var('x')
    ty = None
    arr = tvm.nd.array(10)
    value = relay.Constant(arr)
    # I would prefer that the order of arguments
    # matches syntax let x: t = v in b
    let = relay.Let(lv, value, lv, ty)
    assert let.var == lv
    assert let.value == value
    assert let.value_type == ty
    assert let.body == lv
    assert let.span == None
    str(let)
Exemplo n.º 22
0
def test_ref():
    shape = (10, 10)
    dtype = 'float32'
    t = relay.TensorType(shape, dtype)
    x = relay.var("x", t)
    r = relay.Var("r")
    u = relay.Var("u")
    body = relay.RefRead(r)
    body = relay.Let(u, relay.RefWrite(r,
                                       relay.RefRead(r) + relay.RefRead(r)),
                     body)
    body = relay.Let(r, relay.RefCreate(x), body)
    func = relay.Function([x], body)
    func = run_infer_type(func)
    back_func = run_infer_type(gradient(func))
    assert back_func.checked_type == relay.FuncType(
        [t], relay.TupleType([t, relay.TupleType([t])]))
    x_nd = rand(dtype, *shape)
    ex = create_executor()
    forward, (grad_x, ) = ex.evaluate(back_func)(x_nd)
    tvm.testing.assert_allclose(forward.asnumpy(), 2 * x_nd.asnumpy())
    tvm.testing.assert_allclose(grad_x.asnumpy(),
                                2 * np.ones_like(grad_x.asnumpy()))
Exemplo n.º 23
0
def test_nested_match_pattern():
    mod = tvm.IRModule()
    box, box_ctor = init_box_adt(mod)
    v = relay.Var("v")
    w = relay.Var("w")
    match = relay.Let(
        v,
        box_ctor(box_ctor(relay.const(2))),
        relay.Match(
            v,
            [
                relay.Clause(
                    relay.PatternConstructor(box_ctor, [
                        relay.PatternConstructor(box_ctor,
                                                 [relay.PatternVar(w)])
                    ]),
                    w,
                )
            ],
        ),
    )
    match_val = run_as_python(match, mod)
    assert_tensor_value(match_val, 2)
Exemplo n.º 24
0
def test_global_function():
    m = tvm.IRModule()
    shape = (10, 10)
    dtype = "float32"
    t = relay.TensorType(shape, dtype)
    x = relay.Var("x", t)
    d = GlobalVar("double")
    m[d] = relay.Function([x], x + x)
    y = relay.Var("y", t)
    q = GlobalVar("q")
    m[q] = relay.Function([y], d(d(y)))
    g = GlobalVar("grad")
    m = tvm.relay.transform.InferType()(m)
    m[g] = tvm.relay.transform.gradient(q, m)
    m = tvm.relay.transform.InferType()(m)
    back_func = m[g]
    assert back_func.checked_type == relay.FuncType(
        [t], relay.TupleType([t, relay.TupleType([t])]))
    ex = create_executor(mod=m)
    x = rand(dtype, *shape)
    forward, (grad, ) = ex.evaluate(back_func)(x)
    tvm.testing.assert_allclose(forward.numpy(), 4 * x.numpy())
    tvm.testing.assert_allclose(grad.numpy(), 4 * np.ones_like(x.numpy()))
Exemplo n.º 25
0
def test_multiple_constructor_clauses():
    mod = tvm.IRModule()
    p = Prelude(mod)
    _, cons, nil = mod.get_type("List")

    v = relay.Var("v")
    match = relay.Match(
        v,
        [
            # list of length exactly 1
            relay.Clause(
                relay.PatternConstructor(cons, [
                    relay.PatternWildcard(),
                    relay.PatternConstructor(nil, [])
                ]),
                v,
            ),
            # list of length exactly 2
            relay.Clause(
                relay.PatternConstructor(
                    cons,
                    [
                        relay.PatternWildcard(),
                        relay.PatternConstructor(cons, [
                            relay.PatternWildcard(),
                            relay.PatternConstructor(nil, [])
                        ]),
                    ],
                ),
                v,
            ),
            # empty list
            relay.Clause(relay.PatternConstructor(nil, []), v),
            # list of length 2 or more
            relay.Clause(
                relay.PatternConstructor(
                    cons,
                    [
                        relay.PatternWildcard(),
                        relay.PatternConstructor(
                            cons,
                            [relay.PatternWildcard(),
                             relay.PatternWildcard()]),
                    ],
                ),
                v,
            ),
        ],
    )
    assert len(unmatched_cases(match, mod)) == 0
Exemplo n.º 26
0
def test_ref_write():
    # check that the result of a ref write is an empty tuple
    v = relay.Var('v')
    initial_write = relay.Let(v, relay.RefCreate(relay.Tuple([relay.const(1)])),
                              relay.RefWrite(v, relay.Tuple([relay.const(2)])))
    write_val = run_as_python(initial_write)
    assert_tuple_value(write_val, 0)

    # now ensure that the value, once written, can be read back
    # (we read the value before and after mutation)
    w = relay.Var('w')
    read_after_write = relay.Let(
        v, relay.RefCreate(relay.Tuple([relay.const(1)])),
        relay.Let(
            w, relay.RefCreate(relay.RefRead(v)),
            seq(relay.RefWrite(v, relay.Tuple([relay.const(2)])),
                relay.Tuple([relay.RefRead(w), relay.RefRead(v)]))))
    read_val = run_as_python(read_after_write)
    assert_tuple_value(read_val, 2)
    assert_tuple_value(read_val.fields[0], 1)
    assert_tuple_value(read_val.fields[1], 1)
    assert_tensor_value(read_val.fields[0].fields[0], 1)
    assert_tensor_value(read_val.fields[1].fields[0], 2)
Exemplo n.º 27
0
def test_let():
    lv = relay.Var("x")
    ty = None
    arr = tvm.nd.array(10)
    value = relay.Constant(arr)
    # I would prefer that the order of arguments
    # matches syntax let x: t = v in b
    let = relay.Let(lv, value, lv)
    assert let.var == lv
    assert let.value == value
    assert let.body == lv
    assert let.span == None
    str(let)
    check_json_roundtrip(let)
Exemplo n.º 28
0
    def verify_upsampling3d(
        dshape, scale_d, scale_h, scale_w, layout, method, coord_trans="half_pixel"
    ):

        if layout == "NCDHW":
            (n, c, d, h, w) = dshape
            x_data = np.random.uniform(size=(n, c, d, h, w)).astype("float32")

        elif layout == "NDHWC":
            (n, d, h, w, c) = dshape
            x_data = np.random.uniform(size=(n, d, h, w, c)).astype("float32")

        if method == "nearest_neighbor":
            ref_res = tvm.topi.testing.upsampling3d_python(
                x_data, (scale_d, scale_h, scale_w), layout
            )
        else:
            ref_res = tvm.topi.testing.trilinear_resize3d_python(
                x_data,
                (int(round(d * scale_d)), int(round(h * scale_h)), int(round(w * scale_w))),
                layout,
            )
        x = relay.Var("x", relay.TensorType(dshape, "float32"))
        scale_d_var = relay.var("scale_d", relay.TensorType((), "float32"))
        scale_h_var = relay.var("scale_h", relay.TensorType((), "float32"))
        scale_w_var = relay.var("scale_h", relay.TensorType((), "float32"))

        z = relay.nn.upsampling3d(
            x,
            scale_d_var,
            scale_h_var,
            scale_w_var,
            method=method,
            layout=layout,
            coordinate_transformation_mode=coord_trans,
        )
        zz = run_infer_type(z)
        func = relay.Function([x, scale_d_var, scale_h_var, scale_w_var], z)

        for target, dev in enabled_targets():
            for kind in ["vm", "debug"]:
                mod = tvm.ir.IRModule.from_expr(func)
                intrp = relay.create_executor(kind, mod=mod, device=dev, target=target)
                op_res = intrp.evaluate()(
                    x_data,
                    np.array(scale_d).astype("float32"),
                    np.array(scale_h).astype("float32"),
                    np.array(scale_w).astype("float32"),
                )
                tvm.testing.assert_allclose(op_res.numpy(), ref_res, rtol=1e-4, atol=1e-6)
Exemplo n.º 29
0
def partition_suffix(mod, quantized_dtypes):
    """Extract output dequantization expressions from `mod['main']`.

    Parameters
    ----------
    mod : tvm.IRModule
        Module containing a quantized inference function

    quantized_dtypes : Set[str]
        Set of data types allowed in quantized operators

    Returns
    -------
    pre_mod : tvm.IRModule
        Module containing the input quantization function

    mid_mod : tvm.IRModule
        Module containing a function with everything except for input quantization
    """
    assert len(mod.functions) == 1
    func = mod['main']
    suffix_cutter = SuffixCutter(quantized_dtypes)
    post_body = suffix_cutter.visit(func.body)
    assert not func.type_params, 'unimplemented'
    assert func.attrs is None, 'unimplemented'
    post_func = relay.Function(
        relay.analysis.free_vars(post_body),
        post_body,
        func.ret_type)
    post_mod = tvm.IRModule.from_expr(post_func)

    mid_body = suffix_cutter.mid_body
    if mid_body is None:
        # The suffix contains the entire function, meaning there was no
        # quantization boundary in the given mod.  In this case, we use the
        # suffix mod as the middle mod and make the suffix an identity function.
        mid_mod = post_mod
        post_body = relay.Var('input', mid_mod['main'].ret_type)
        post_func = relay.Function(
            [post_body],
            post_body)
        post_mod = tvm.IRModule.from_expr(post_func)
    else:
        mid_func = relay.Function(
            func.params,
            mid_body)
        mid_mod = tvm.IRModule.from_expr(mid_func)

    return mid_mod, post_mod
    def verify_upsampling(dshape,
                          scale_h,
                          scale_w,
                          layout,
                          method,
                          align_corners=False):

        if layout == "NCHW":
            (n, c, h, w) = dshape
            x_data = np.random.uniform(size=(n, c, h, w)).astype("float32")

        elif layout == "NHWC":
            (n, h, w, c) = dshape
            x_data = np.random.uniform(size=(n, h, w, c)).astype("float32")

        ref_res = tvm.topi.testing.resize2d_python(
            x_data,
            (scale_h, scale_w),
            layout,
            method[2:] if method[0:2] == "bi" else method,
            "align_corners" if align_corners else "asymmetric",
        )
        x = relay.Var("x", relay.TensorType(dshape, "float32"))
        scale_h_var = relay.var("scale_h", relay.TensorType((), "float32"))
        scale_w_var = relay.var("scale_h", relay.TensorType((), "float32"))

        z = relay.nn.upsampling(x,
                                scale_h_var,
                                scale_w_var,
                                method=method,
                                layout=layout,
                                align_corners=align_corners)
        zz = run_infer_type(z)
        func = relay.Function([x, scale_h_var, scale_w_var], z)

        for target, dev in tvm.testing.enabled_targets():
            for kind in ["vm", "debug"]:
                mod = tvm.ir.IRModule.from_expr(func)
                intrp = relay.create_executor(kind,
                                              mod=mod,
                                              device=dev,
                                              target=target)
                op_res = intrp.evaluate()(x_data,
                                          np.array(scale_h).astype("float32"),
                                          np.array(scale_w).astype("float32"))
                tvm.testing.assert_allclose(op_res.numpy(),
                                            ref_res,
                                            rtol=1e-4,
                                            atol=1e-6)