Exemplo n.º 1
0
def quantize(data, shift_bits, target_bits=relay.const(7, dtype='int32')):
    """Quantize output of layer, to be consistent with source code @yx

    Question: should the shift_bits participating to network control flow?
            At mxnet quantization with truman's code, the bits number of max_v
            is converted to normal interger using function `asscalar()`. However,
            I cannot find the related function in relay.
            I am confused with the control flow logic in model network, whether
            the condition `shift_bits == -1` should join in model network or just
            left it in python code flow. By Longtao.Wang

    Parameters
    ----------
    shift_bits: tvm.relay.Expr
        The shift_bits parameter is never used according to @yx's source code,
        which always be constant Expr(-1).
    """
    max_v = relay.max(relay.abs(data))
    min_v = relay.min(data)

    ln_max_v = relay.log(relay.cast(max_v, 'float32'))
    ln_2 = relay.log(relay.const(2.))
    total_bits = relay.ceil(relay.divide(ln_max_v, ln_2)) # ceil( ln(max_v) / ln(2) )
    shift_bits = relay.subtract(total_bits.astype('int32'), target_bits)
    shift_bits = relay.maximum(shift_bits, relay.const(0))

    denominator = relay.left_shift(relay.const(1),
            relay.cast(shift_bits, 'int32'))
    out = relay.divide(data, denominator)
    # According to @yx's code, use divide operation instead of shift op for
    # possible negative number round.
    # out = relay.right_shift(data, shift_bits)

    out = relay.cast(relay.clip(out, a_min=-128, a_max=127), 'int8')
    return out, max_v, min_v, shift_bits
Exemplo n.º 2
0
def test_recursion():
    """
    Program:
       def f(n: i32, data: f32) -> f32 {
          if (n == 0) {
              return data;
          } else {
              return f(n - 1, log(data));
          }
       }
    """
    sb = relay.ScopeBuilder()
    f = relay.GlobalVar("f")
    ti32 = relay.scalar_type("int32")
    tf32 = relay.scalar_type("float32")
    n = relay.var("n", ti32)
    data = relay.var("data", tf32)

    with sb.if_scope(relay.equal(n, relay.const(0, ti32))):
        sb.ret(data)
    with sb.else_scope():
        sb.ret(f(relay.subtract(n, relay.const(1, ti32)), relay.log(data)))
    mod = relay.Module()
    mod[f] = relay.Function([n, data], sb.get())
    assert "%3 = @f(%1, %2)" in mod.astext()
    assert mod[f].checked_type == relay.FuncType([ti32, tf32], tf32)
Exemplo n.º 3
0
def test_recursion():
    """
    Program:
       def @f(%n: int32, %data: float32) -> float32 {
          if (%n == 0) {
              %data
          } else {
              @f(%n - 1, log(%data))
          }
       }
    """
    sb = relay.ScopeBuilder()
    f = relay.GlobalVar("f")
    ti32 = relay.scalar_type("int32")
    tf32 = relay.scalar_type("float32")
    n = relay.var("n", ti32)
    data = relay.var("data", tf32)

    with sb.if_scope(relay.equal(n, relay.const(0, ti32))):
        sb.ret(data)
    with sb.else_scope():
        sb.ret(f(relay.subtract(n, relay.const(1, ti32)), relay.log(data)))
    mod = relay.Module()
    mod[f] = relay.Function([n, data], sb.get())
    assert "@f(%1, %2) /* ty=float32 */" in mod.astext()
    assert mod[f].checked_type == relay.FuncType([ti32, tf32], tf32)
Exemplo n.º 4
0
def test_recursion():
    """
    Program:
       def @f(%n: int32, %data: float32) -> float32 {
          if (%n == 0) {
              %data
          } else {
              @f(%n - 1, log(%data))
          }
       }
    """
    sb = relay.ScopeBuilder()
    f = relay.GlobalVar("f")
    ti32 = relay.scalar_type("int32")
    tf32 = relay.scalar_type("float32")
    n = relay.var("n", ti32)
    data = relay.var("data", tf32)

    with sb.if_scope(relay.equal(n, relay.const(0, ti32))):
        sb.ret(data)
    with sb.else_scope():
        sb.ret(f(relay.subtract(n, relay.const(1, ti32)), relay.log(data)))
    mod = tvm.IRModule()
    mod[f] = relay.Function([n, data], sb.get())
    mod = infer_mod(mod)
    assert "@f(%1, %2)" in mod.astext()
    assert mod["f"].checked_type == relay.FuncType([ti32, tf32], tf32)
 def expected():
     mod = tvm.IRModule()
     x = relay.var("x", shape=(8, 8))
     y = relay.var("y", shape=(8, 8))
     x0 = relay.var("x0", shape=(8, 8))
     y0 = relay.var("y0", shape=(8, 8))
     add = x0 + y0
     # Function that uses C compiler
     func = relay.Function([x0, y0], add)
     func = set_func_attr(func, "ccompiler", "ccompiler_0")
     glb_0 = relay.GlobalVar("ccompiler_0")
     mod[glb_0] = func
     add_call = relay.Call(glb_0, [x, y])
     # Function that uses default compiler. Ops are fused in this function.
     p0 = relay.var("p0", shape=(8, 8))
     log = relay.log(p0)
     exp = relay.exp(p0)
     concat = relay.concatenate([log, exp], axis=0)
     fused_func = relay.Function([p0], concat)
     fused_func = fused_func.with_attr("Primitive",
                                       tvm.tir.IntImm("int32", 1))
     fused_call = relay.Call(fused_func, [add_call])
     main = relay.Function([x, y], fused_call)
     mod["main"] = main
     return mod
    def expected_different_output_region():
        mod = tvm.IRModule()
        x = relay.var("x", shape=(8, 8))
        y = relay.var("y", shape=(8, 8))
        z = relay.var("z", shape=(8, 8))

        # The partitioned graph contains log
        i0 = relay.var("i0", shape=(8, 8))
        log = relay.log(i0)
        func = relay.Function([i0], log)
        func = set_func_attr(func, "ccompiler", "ccompiler_0")
        glb_0 = relay.GlobalVar("ccompiler_0")
        mod[glb_0] = func

        # The partitioned graph contains subtract
        x0 = relay.var("x0", shape=(8, 8))
        y0 = relay.var("y0", shape=(8, 8))
        sub = x0 - y0
        func = relay.Function([x0, y0], sub)
        func = set_func_attr(func, "ccompiler", "ccompiler_1")
        glb_1 = relay.GlobalVar("ccompiler_1")
        mod[glb_1] = func

        add = x + y
        call_log = relay.Call(glb_0, [add])
        call_sub = relay.Call(glb_1, [add, z])
        main = relay.Function([x, y, z], call_log * call_sub)
        mod["main"] = main
        return mod
    def expected():
        mod = tvm.IRModule()

        # function 0
        f0_i0 = relay.var(target + "_0_i0", shape=(10, 10))
        f0_o0 = relay.abs(f0_i0)
        func0 = relay.Function([f0_i0], f0_o0)

        func0 = func0.with_attr("Primitive", tvm.tir.IntImm("int32", 1))
        func0 = func0.with_attr("Inline", tvm.tir.IntImm("int32", 1))
        func0 = func0.with_attr("Compiler", target)
        func0 = func0.with_attr("global_symbol", target + "_0")
        gv0 = relay.GlobalVar(target + "_0")
        mod[gv0] = func0

        # body
        data = relay.var('data', shape=(10, 10))
        function_out = gv0(data)
        out_1 = relay.nn.relu(function_out)
        out_2 = relay.tanh(function_out)
        out_3 = relay.log(function_out)
        out = relay.Tuple([out_1, out_2, out_3])
        func = relay.Function([data], out)
        mod["main"] = func
        return mod
 def expected():
     x = relay.var("x", shape=(8, 8))
     y = relay.var("y", shape=(8, 8))
     x0 = relay.var("x0", shape=(8, 8))
     y0 = relay.var("y0", shape=(8, 8))
     add = x0 + y0
     # Function that uses C compiler
     func = relay.Function([x0, y0], add)
     func = func.set_attribute("Primitive", tvm.expr.IntImm("int32", 1))
     func = func.set_attribute("Compiler",
                               tvm.expr.StringImm("ccompiler"))
     func = func.set_attribute("ExternalSymbol",
                               tvm.expr.StringImm("ccompiler_0"))
     add_call = relay.Call(func, [x, y])
     # Function that uses default compiler. Ops are fused in this function.
     p0 = relay.var("p0", shape=(8, 8))
     log = relay.log(p0)
     exp = relay.exp(p0)
     concat = relay.concatenate([log, exp], axis=0)
     fused_func = relay.Function([p0], concat)
     fused_func = fused_func.set_attribute("Primitive",
                                           tvm.expr.IntImm("int32", 1))
     fused_call = relay.Call(fused_func, [add_call])
     main = relay.Function([x, y], fused_call)
     mod = relay.Module()
     mod["main"] = main
     return mod
Exemplo n.º 9
0
def assign():
    """Assign a const to a varible
    """
    x = relay.var('x', shape=())
    v1 = relay.log(x)
    v2 = relay.add(v1, x)
    return relay.Function([x], v2)
Exemplo n.º 10
0
def test_get_direct_ancestor():
    data = relay.var("data")
    w0 = relay.var("w0")
    out1 = relay.nn.conv2d(data, w0)
    out2 = relay.add(out1, data * relay.expr.const(5.0))
    out3 = out2 + relay.expr.const(2.5)
    w1 = relay.var("w1")
    out = relay.nn.conv2d(out3, w1)
    net = relay.Function(relay.analysis.free_vars(out), out)
    net = bind_inputs(net, {
        "data": (1, 16, 224, 224),
        "w0": (16, 16, 1, 1),
        "w1": (16, 16, 1, 1)
    })
    target_ops = [relay.op.get("nn.conv2d")]
    node_list = []
    node_dict = {}
    expr2graph(net, target_ops, node_dict, node_list)
    visited_dict = {}
    input_names = ["data"]
    out = get_direct_ancestor(node_list, visited_dict, target_ops, 5,
                              input_names)
    assert out == [0], "Output mismatch: expecting [0] but got %s." % str(out)

    # non-regression test
    out = relay.add(relay.log(data), relay.sqrt(data))
    net = relay.Function(relay.analysis.free_vars(out), out)
    net = bind_inputs(net, {"data": (1, 16, 224, 224)})
    node_list = []
    node_dict = {}
    expr2graph(net, target_ops, node_dict, node_list)
    out = get_direct_ancestor(node_list, visited_dict, target_ops, 3,
                              input_names)
    assert out == [0], "Output mismatch: expecting [0] but got %s." % str(out)
 def create_graph():
     data = relay.var('data', shape=(10, 10))
     x = relay.abs(data)
     out_1 = relay.nn.relu(x)
     out_2 = relay.tanh(x)
     out_3 = relay.log(x)
     out = relay.Tuple([out_1, out_2, out_3])
     func = relay.Function([data], out)
     return func
Exemplo n.º 12
0
    def get_func():
        add = relay.add(x, y)
        sqrt = relay.sqrt(add)
        log = relay.log(add)
        subtract = relay.subtract(sqrt, log)
        exp = relay.exp(subtract)

        func = relay.Function([x, y], exp)
        return func
Exemplo n.º 13
0
    def get_func():
        add = relay.add(x, y)
        sqrt = relay.sqrt(add)
        log = relay.log(add)
        subtract = relay.subtract(sqrt, log)
        exp = relay.exp(subtract)

        func = relay.Function([x, y], exp)
        return func
Exemplo n.º 14
0
def test_if():
    x = relay.var("x", shape=(1, 16, 64, 64))
    y = relay.var("y", shape=(1, 16, 64, 64))
    cond = relay.var("cond", shape=(), dtype='uint1')
    net = relay.If(cond, x, y)
    net = relay.log(net)
    func = relay.Function(free_vars(net), net)
    func = run_infer_type(func)
    net = gradient(func, mode='higher_order')
    net = run_infer_type(net)
Exemplo n.º 15
0
def test_if():
    x = relay.var("x", shape=(1, 16, 64, 64))
    y = relay.var("y", shape=(1, 16, 64, 64))
    cond = relay.var("cond", shape=(), dtype='uint1')
    net = relay.If(cond, x, y)
    net = relay.log(net)
    net = relay.ir_pass.infer_type(
        relay.Function(relay.ir_pass.free_vars(net), net))
    back_func = relay.ir_pass.infer_type(
        relay.ir_pass.gradient(net, mode='higher_order'))
Exemplo n.º 16
0
        def expected():
            add = relay.add(x, y)
            sqrt = relay.sqrt(add)
            log = relay.log(add)
            subtract = relay.subtract(sqrt, log)
            copy_sub_exp = relay.device_copy(subtract, dev_dev, cpu_dev)
            exp = relay.exp(copy_sub_exp)

            func = relay.Function([x, y], exp)
            return func
Exemplo n.º 17
0
        def expected():
            add = relay.add(x, y)
            sqrt = relay.sqrt(add)
            log = relay.log(add)
            subtract = relay.subtract(sqrt, log)
            copy_sub_exp = relay.device_copy(subtract, dev_ctx, cpu_ctx)
            exp = relay.exp(copy_sub_exp)

            func = relay.Function([x, y], exp)
            return func
Exemplo n.º 18
0
def test_decl():
    """Program:
    def @f(%x : Tensor[(10, 10), float32]) {
        log(%x)
    }
    """
    tp = relay.TensorType((10, 10))
    x = relay.var("x", tp)
    f = relay.Function([x], relay.log(x))
    fchecked = infer_expr(f)
    assert fchecked.checked_type == relay.FuncType([tp], tp)
Exemplo n.º 19
0
def test_decl():
    """Program:
       def f(x : Tensor[(10, 10), f32]) {
           return log(x);
       }
    """
    tp = relay.TensorType((10, 10))
    x = relay.var("x", tp)
    f = relay.Function([x], relay.log(x))
    fchecked = relay.ir_pass.infer_type(f)
    assert fchecked.checked_type == relay.FuncType([tp], tp)
Exemplo n.º 20
0
        def annotated():
            add = relay.add(x, y)
            sqrt = relay.sqrt(add)
            log = relay.log(add)
            subtract = relay.subtract(sqrt, log)
            exp = relay.exp(subtract)
            _exp = relay.annotation.on_device(exp, cpu_dev)

            func = relay.Function([x, y], _exp)
            func = run_opt_pass(func, transform.RewriteAnnotatedOps(dev_dev.device_type))
            return func
Exemplo n.º 21
0
def test_decl():
    """Program:
       def @f(%x : Tensor[(10, 10), float32]) {
           log(%x)
       }
    """
    tp = relay.TensorType((10, 10))
    x = relay.var("x", tp)
    f = relay.Function([x], relay.log(x))
    fchecked = relay.ir_pass.infer_type(f)
    assert fchecked.checked_type == relay.FuncType([tp], tp)
Exemplo n.º 22
0
        def expected():
            add = relay.add(x, y)
            copy_add_sqrt = relay.device_copy(add, cpu_ctx, dev_ctx)
            sqrt = relay.sqrt(copy_add_sqrt)
            log = relay.log(add)
            copy_sqrt_subtract = relay.device_copy(sqrt, dev_ctx, cpu_ctx)
            subtract = relay.subtract(copy_sqrt_subtract, log)
            copy_sub_exp = relay.device_copy(subtract, cpu_ctx, dev_ctx)
            exp = relay.exp(copy_sub_exp)

            func = relay.Function([x, y], exp)
            return func
Exemplo n.º 23
0
    def expected():
        log = relay.log(x)
        _log_left = relay.device_copy(log, dev1, dev2)
        _log_right = relay.device_copy(log, dev1, dev2)
        log2 = relay.log2(_log_left)
        log10 = relay.log10(_log_right)
        add = relay.add(log2, log10)
        _add = relay.device_copy(add, dev2, dev1)
        tan = relay.tan(_add)

        func = run_opt_pass(tan, transform.InferType())
        return func
 def get_mod():
     x = relay.var("x", shape=(8, 8))
     y = relay.var("y", shape=(8, 8))
     z = relay.var("z", shape=(8, 8))
     add = x + y
     sub = add - z
     log = relay.log(add)
     sub1 = log * sub
     f = relay.Function([x, y, z], sub1)
     mod = tvm.IRModule()
     mod["main"] = f
     return mod
Exemplo n.º 25
0
def test_extern_ccompiler_default_ops():
    def expected():
        mod = tvm.IRModule()
        x = relay.var("x", shape=(8, 8))
        y = relay.var("y", shape=(8, 8))
        x0 = relay.var("x0", shape=(8, 8))
        y0 = relay.var("y0", shape=(8, 8))
        add = x0 + y0
        # Function that uses C compiler
        func = relay.Function([x0, y0], add)
        func = func.with_attr("Primitive", tvm.tir.IntImm("int32", 1))
        func = func.with_attr("Inline", tvm.tir.IntImm("int32", 1))
        func = func.with_attr("Compiler", tvm.tir.StringImm("ccompiler"))
        func = func.with_attr("ExternalSymbol",
                              tvm.tir.StringImm("ccompiler_0"))
        glb_0 = relay.GlobalVar("ccompiler_0")
        mod[glb_0] = func
        add_call = relay.Call(glb_0, [x, y])
        # Function that uses default compiler. Ops are fused in this function.
        p0 = relay.var("p0", shape=(8, 8))
        log = relay.log(p0)
        exp = relay.exp(p0)
        concat = relay.concatenate([log, exp], axis=0)
        fused_func = relay.Function([p0], concat)
        fused_func = fused_func.with_attr("Primitive",
                                          tvm.tir.IntImm("int32", 1))
        fused_call = relay.Call(fused_func, [add_call])
        main = relay.Function([x, y], fused_call)
        mod["main"] = main
        return mod

    x = relay.var("x", shape=(8, 8))
    y = relay.var("y", shape=(8, 8))
    add = x + y
    log = relay.log(add)
    exp = relay.exp(add)
    concat = relay.concatenate([log, exp], axis=0)
    f = relay.Function([x, y], concat)
    mod = tvm.IRModule()
    mod["main"] = f
    mod = WhiteListAnnotator(["add", "subtract", "multiply"], "ccompiler")(mod)
    mod = transform.PartitionGraph()(mod)

    fused_mod = transform.FuseOps(2)(mod)
    expected_mod = expected()
    assert relay.alpha_equal(fused_mod, expected_mod)

    x_data = np.random.rand(8, 8).astype('float32')
    y_data = np.random.rand(8, 8).astype('float32')
    np_add = x_data + y_data
    res = np.concatenate([np.log(np_add), np.exp(np_add)])
    check_result(mod, {"x": x_data, "y": y_data}, (16, 8), res)
Exemplo n.º 26
0
        def annotated():
            add = relay.add(x, y)
            sqrt = relay.sqrt(add)
            log = relay.log(add)
            subtract = relay.subtract(sqrt, log)
            exp = relay.exp(subtract)
            _exp = relay.annotation.on_device(exp, cpu_ctx)

            func = relay.Function([x, y], _exp)
            func = relay.ir_pass.infer_type(func)
            func = relay.ir_pass.rewrite_annotated_ops(func,
                                                       dev_ctx.device_type)
            return func
Exemplo n.º 27
0
    def annotated():
        log = relay.log(x)
        _log = relay.annotation.on_device(log, expected_dev_type["log"])
        log2 = relay.log2(_log)
        _log2 = relay.annotation.on_device(log2, expected_dev_type["log2"])
        log10 = relay.log10(_log)
        _log10 = relay.annotation.on_device(log10, expected_dev_type["log10"])
        add = relay.add(_log2, _log10)
        _add = relay.annotation.on_device(add, expected_dev_type["add"])
        tan = relay.tan(_add)
        _tan = relay.annotation.on_device(tan, expected_dev_type["tan"])

        func = run_opt_pass(_tan, transform.RewriteAnnotatedOps(dev1.device_type))
        return func
 def create_graph():
     data = relay.var('data', shape=(10, 10))
     bn_gamma = relay.var("bn_gamma")
     bn_beta = relay.var("bn_beta")
     bn_mmean = relay.var("bn_mean")
     bn_mvar = relay.var("bn_var")
     x = relay.nn.batch_norm(data, bn_gamma, bn_beta, bn_mmean, bn_mvar)
     out_1 = relay.nn.relu(x[0])
     bn_out_1 = x[1]
     out_2 = relay.tanh(bn_out_1)
     out_3 = relay.log(bn_out_1)
     out = relay.Tuple([out_1, out_2, out_3])
     func = relay.Function([data, bn_gamma, bn_beta, bn_mmean, bn_mvar], out)
     return func
Exemplo n.º 29
0
    def annotated():
        log = relay.log(x)
        _log = relay.annotation.on_device(log, expected_dev_type['log'])
        log2 = relay.log2(_log)
        _log2 = relay.annotation.on_device(log2, expected_dev_type['log2'])
        log10 = relay.log10(_log)
        _log10 = relay.annotation.on_device(log10, expected_dev_type['log10'])
        add = relay.add(_log2, _log10)
        _add = relay.annotation.on_device(add, expected_dev_type['add'])
        tan = relay.tan(_add)
        _tan = relay.annotation.on_device(tan, expected_dev_type['tan'])

        func = run_opt_pass(_tan,
                            transform.RewriteAnnotatedOps(ctx1.device_type))
        return func
Exemplo n.º 30
0
        def annotated():
            add = relay.add(x, y)
            _add = relay.annotation.on_device(add, dev_dev)
            sqrt = relay.sqrt(_add)
            _sqrt = relay.annotation.on_device(sqrt, dev_dev)
            log = relay.log(_add)
            _log = relay.annotation.on_device(log, dev_dev)
            subtract = relay.subtract(_sqrt, _log)
            _subtract = relay.annotation.on_device(subtract, dev_dev)
            exp = relay.exp(_subtract)
            _exp = relay.annotation.on_device(exp, dev_dev)

            func = relay.Function([x, y], _exp)
            func = run_opt_pass(func, transform.RewriteAnnotatedOps(cpu_dev.device_type))
            return func
Exemplo n.º 31
0
    def test_log(self):
        data = relay.var("data", relay.TensorType((-1, 4, 2, 2), "float32"))

        net = relay.log(data)

        net = relay.Function(relay.analysis.free_vars(net), net)

        mod, params = testing.create_workload(net)

        xgraph = xf_relay.from_relay(mod, params)

        layers = xgraph.get_layers()

        assert layers[0].type[0] == 'Input'
        assert layers[1].type[0] == 'Log'
        assert 'relay_id' in layers[1].attrs
 def expected():
     mod = tvm.IRModule()
     y = relay.var("y", shape=(8, 8))
     x0 = relay.const(ones)
     y0 = relay.var("y0", shape=(8, 8))
     add = x0 + y0
     # Function that uses C compiler
     func = relay.Function([y0], add)
     func = set_func_attr(func, "ccompiler", "ccompiler_0")
     glb_0 = relay.GlobalVar("ccompiler_0")
     mod[glb_0] = func
     add_call = relay.Call(glb_0, [y])
     log = relay.log(add_call)
     main = relay.Function([y], log)
     mod["main"] = main
     return mod
Exemplo n.º 33
0
        def annotated():
            add = relay.add(x, y)
            sqrt = relay.sqrt(add)
            log = relay.log(add)
            subtract = relay.subtract(sqrt, log)
            exp = relay.exp(subtract)
            _exp = relay.annotation.on_device(exp, cpu_ctx)

            func = relay.Function([x, y],
                                  relay.Tuple(tvm.convert([_exp, exp])))
            func = relay.ir_pass.infer_type(func)
            func = relay.ir_pass.rewrite_annotated_ops(func,
                                                       dev_ctx.device_type)
            func = relay.ir_pass.infer_type(func)
            return relay.Function(relay.ir_pass.free_vars(func.body[1]),
                                  func.body[1])
Exemplo n.º 34
0
        def annotated():
            add = relay.add(x, y)
            sqrt = relay.sqrt(add)
            log = relay.log(add)
            subtract = relay.subtract(sqrt, log)
            exp = relay.exp(subtract)
            _exp = relay.annotation.on_device(exp, cpu_ctx)

            func = relay.Function([x, y], relay.Tuple(tvm.convert([_exp,
                                                                   exp])))
            func = relay.ir_pass.infer_type(func)
            func = relay.ir_pass.rewrite_annotated_ops(func,
                                                       dev_ctx.device_type)
            func = relay.ir_pass.infer_type(func)
            return relay.Function(relay.ir_pass.free_vars(func.body[1]),
                                  func.body[1])
Exemplo n.º 35
0
def test_dual_op():
    """Program:
       fn (x : Tensor[f32, (10, 10)]) {
         let t1 = log(x);
         let t2 = add(t1, x);
         return t1;
       }
    """
    tp = relay.TensorType((10, 10), "float32")
    x = relay.var("x", tp)
    sb = relay.ScopeBuilder()
    t1 = sb.let("t1", relay.log(x))
    t2 = sb.let("t2", relay.add(t1, x))
    sb.ret(t2)
    f = relay.Function([x], sb.get())
    fchecked = relay.ir_pass.infer_type(f)
    assert fchecked.checked_type == relay.FuncType([tp], tp)
Exemplo n.º 36
0
def test_dual_op():
    """Program:
       fn (%x : Tensor[(10, 10), float32]) {
         let %t1 = log(x);
         let %t2 = add(%t1, %x);
         %t1
       }
    """
    tp = relay.TensorType((10, 10), "float32")
    x = relay.var("x", tp)
    sb = relay.ScopeBuilder()
    t1 = sb.let("t1", relay.log(x))
    t2 = sb.let("t2", relay.add(t1, x))
    sb.ret(t2)
    f = relay.Function([x], sb.get())
    fchecked = relay.ir_pass.infer_type(f)
    assert fchecked.checked_type == relay.FuncType([tp], tp)
Exemplo n.º 37
0
def test_compile_tuple_dup():
    x = relay.var("data", shape=(16, 16))
    log = relay.log(x)
    output = relay.Tuple([log, log])
    f = relay.Function([x], output)
    relay.build(f, 'llvm')
Exemplo n.º 38
0
def test_function_pass():
    shape = (10, )
    dtype = 'float32'
    tp = relay.TensorType(shape, dtype)
    x = relay.var("x", tp)
    v_log = relay.GlobalVar("myLog")
    log = relay.Function([x], relay.log(x))
    mod = relay.Module({v_log: log})

    pass_name = "function_pass_test"
    opt_level = 1
    opt_tester = OptTester(mod)
    pass_ctx = None

    @ir_pass.function_pass(opt_level=opt_level, name=pass_name)
    def transform(expr, ctx):
        return opt_tester.transform(expr, ctx)

    def get_ref_log():
        ref_log = relay.Function([x], relay.log(relay.add(x, x)))
        return ref_log

    def test_pass_registration():
        function_pass = transform
        assert isinstance(function_pass, ir_pass.FunctionPass)
        pass_info = function_pass.info
        assert pass_info.name == pass_name
        assert pass_info.opt_level == opt_level

    def test_pass_registration_no_decorator():
        def direct_transform(expr, ctx):
            return opt_tester.transform(expr, ctx)
        mod_pass = ir_pass.function_pass(direct_transform, opt_level=0)
        assert isinstance(mod_pass, ir_pass.FunctionPass)
        pass_info = mod_pass.info
        assert pass_info.name == "direct_transform"
        assert pass_info.opt_level == 0

    def test_pass_run():
        function_pass = transform
        assert pass_name in function_pass.astext()

        updated_mod = function_pass(mod)
        assert isinstance(updated_mod, relay.Module)

        # Check the log function in the updated module.
        new_v_log = updated_mod.get_global_var(v_log.name_hint)
        new_log = updated_mod[new_v_log]
        check_func(new_log, get_ref_log())

        # Check the log function in the python transformed function.
        ret = opt_tester.transform(log, pass_ctx)
        check_func(new_log, ret)

        # Execute the add function.
        x_nd = get_rand(shape, dtype)
        ref_res = np.log(x_nd.asnumpy() * 2)
        for target, ctx in ctx_list():
            exe1 = relay.create_executor("graph", ctx=ctx, target=target)
            exe2 = relay.create_executor("debug", ctx=ctx, target=target)
            res1 = exe1.evaluate(new_log)(x_nd)
            tvm.testing.assert_allclose(res1.asnumpy(), ref_res, rtol=1e-5)
            res2 = exe2.evaluate(new_log)(x_nd)
            tvm.testing.assert_allclose(res2.asnumpy(), ref_res, rtol=1e-5)

    test_pass_registration()
    test_pass_registration_no_decorator()
    test_pass_run()
Exemplo n.º 39
0
def test_sequential_pass():
    shape = (10, )
    dtype = 'float32'
    tp = relay.TensorType(shape, dtype)
    x = relay.var("x", tp)
    y = relay.var("y", tp)
    v_sub = relay.GlobalVar("mySub")
    sub = relay.Function([x, y], relay.subtract(x, y))

    z = relay.var("z", tp)
    v_log = relay.GlobalVar("myLog")
    log = relay.Function([z], relay.log(z))

    mod = relay.Module({v_sub: sub, v_log: log})

    def get_ref_log():
        ref_log = relay.Function([x], relay.log(relay.add(x, x)))
        return ref_log

    def get_ref_sub():
        ref_sub = relay.Function([x, y],
                                 relay.subtract(
                                     relay.add(x, x), relay.add(y, y)))
        return ref_sub

    def get_ref_abs():
        shape = (5, 10)
        tp = relay.TensorType(shape, "float32")
        a = relay.var("a", tp)
        ref_abs = relay.Function([a], relay.abs(relay.add(a, a)))
        return ref_abs

    # Register a module pass.
    opt_tester = OptTester(mod)
    pass_ctx = None

    @ir_pass.module_pass(opt_level=1)
    def mod_transform(expr, ctx):
        return opt_tester.transform(expr, ctx)

    module_pass = mod_transform

    # Register a function pass.
    @ir_pass.function_pass(opt_level=1)
    def func_transform(expr, ctx):
        return opt_tester.transform(expr, ctx)

    function_pass = func_transform

    def test_pass_registration():
        passes = [module_pass, function_pass]
        opt_level = 2
        pass_name = "sequential_pass"
        sequential_pass = ir_pass.sequential_pass(passes=passes,
                                                  opt_level=opt_level)
        assert isinstance(sequential_pass, ir_pass.SequentialPass)
        pass_info = sequential_pass.info
        assert pass_info.name == pass_name
        assert pass_info.opt_level == opt_level

    def test_no_pass():
        passes = []
        sequential_pass = ir_pass.sequential_pass(opt_level=1, passes=passes)
        ret_mod = sequential_pass(mod)
        mod_func = ret_mod[v_sub]
        check_func(sub, mod_func)

    def test_only_module_pass():
        passes = [module_pass]
        sequential_pass = ir_pass.sequential_pass(opt_level=1, passes=passes)
        ret_mod = sequential_pass(mod)
        # Check the subtract function.
        sub_var, new_sub = extract_var_func(ret_mod, v_sub.name_hint)
        check_func(new_sub, sub)

        # Check the abs function is added.
        abs_var, abs_func = get_var_func()
        abs_var, new_abs = extract_var_func(ret_mod, abs_var.name_hint)
        check_func(new_abs, abs_func)

    def test_only_function_pass():
        # Check the subtract function.
        passes = [function_pass]
        sequential_pass = ir_pass.sequential_pass(opt_level=1, passes=passes)
        ret_mod = sequential_pass(mod)
        _, new_sub = extract_var_func(ret_mod, v_sub.name_hint)
        check_func(new_sub, get_ref_sub())

        # Check the log function.
        log_var, new_log = extract_var_func(ret_mod, v_log.name_hint)
        check_func(new_log, get_ref_log())

    def test_multiple_passes():
        # Reset the current module since mod has been polluted by the previous
        # function pass.
        mod = relay.Module({v_sub: sub, v_log: log})
        passes = [module_pass, function_pass]
        sequential_pass = ir_pass.sequential_pass(opt_level=1, passes=passes)
        ret_mod = sequential_pass(mod)

        # Check the abs function is added.
        abs_var, abs_func = get_var_func()
        abs_var, new_abs = extract_var_func(ret_mod, abs_var.name_hint)
        check_func(new_abs, get_ref_abs())

        # Check the subtract function is modified correctly.
        _, new_sub = extract_var_func(ret_mod, v_sub.name_hint)
        check_func(new_sub, get_ref_sub())

        # Check the log function is modified correctly.
        _, new_log = extract_var_func(ret_mod, v_log.name_hint)
        check_func(new_log, get_ref_log())

        # Execute the updated subtract function.
        x_nd = get_rand(shape, dtype)
        y_nd = get_rand(shape, dtype)
        ref_res = np.subtract(x_nd.asnumpy() * 2, y_nd.asnumpy() * 2)
        for target, ctx in ctx_list():
            exe1 = relay.create_executor("graph", ctx=ctx, target=target)
            exe2 = relay.create_executor("debug", ctx=ctx, target=target)
            res1 = exe1.evaluate(new_sub)(x_nd, y_nd)
            tvm.testing.assert_allclose(res1.asnumpy(), ref_res, rtol=1e-5)
            res2 = exe2.evaluate(new_sub)(x_nd, y_nd)
            tvm.testing.assert_allclose(res2.asnumpy(), ref_res, rtol=1e-5)

        # Execute the updated abs function.
        x_nd = get_rand((5, 10), dtype)
        ref_res = np.abs(x_nd.asnumpy() * 2)
        for target, ctx in ctx_list():
            exe1 = relay.create_executor("graph", ctx=ctx, target=target)
            exe2 = relay.create_executor("debug", ctx=ctx, target=target)
            res1 = exe1.evaluate(new_abs)(x_nd)
            tvm.testing.assert_allclose(res1.asnumpy(), ref_res, rtol=1e-5)
            res2 = exe2.evaluate(new_abs)(x_nd)
            tvm.testing.assert_allclose(res2.asnumpy(), ref_res, rtol=1e-5)

    test_pass_registration()
    test_no_pass()
    test_only_module_pass()
    test_only_function_pass()
    test_multiple_passes()
Exemplo n.º 40
0
 def get_ref_log():
     ref_log = relay.Function([x], relay.log(relay.add(x, x)))
     return ref_log