def create_graph():
        data = relay.var("data", relay.TensorType((1, 3, 224, 224), "float32"))
        weight = relay.var("weight", relay.TensorType((16, 3, 3, 3), "float32"))
        bn_gamma = relay.var("bn_gamma", relay.TensorType((16,), "float32"))
        bn_beta = relay.var("bn_beta", relay.TensorType((16,), "float32"))
        bn_mean = relay.var("bn_mean", relay.TensorType((16,), "float32"))
        bn_var = relay.var("bn_var", relay.TensorType((16,), "float32"))

        data_cb = compiler_begin(data, "test_target")
        weight_cb = compiler_begin(weight, "test_target")
        bn_gamma_cb = compiler_begin(bn_gamma, "test_target")
        bn_beta_cb = compiler_begin(bn_beta, "test_target")
        bn_mean_cb = compiler_begin(bn_mean, "test_target")
        bn_var_cb = compiler_begin(bn_var, "test_target")

        conv_o = relay.nn.conv2d(
            data=data_cb, weight=weight_cb, kernel_size=(3, 3), channels=16, padding=(1, 1)
        )

        bn_o = relay.nn.batch_norm(conv_o, bn_gamma_cb, bn_beta_cb, bn_mean_cb, bn_var_cb)

        relu_o = relay.nn.relu(bn_o[0])
        relu_o_ce = compiler_end(relu_o, "test_target")

        bn_omean = bn_o[1]
        rebn_omean_ce = compiler_end(bn_omean, "test_target")
        bn_ovar = bn_o[2]
        bn_ovar_ce = compiler_end(bn_ovar, "test_target")

        dummy_mean_abs = relay.abs(rebn_omean_ce)
        dummy_ovar_abs = relay.abs(bn_ovar_ce)
        dummy_tuple = relay.Tuple((relu_o_ce, dummy_mean_abs, dummy_ovar_abs))

        func = relay.Function([data, weight, bn_gamma, bn_beta, bn_mean, bn_var], dummy_tuple)
        return func
Esempio n. 2
0
    def expected():
        mod = tvm.IRModule()

        # function 0
        data = relay.var("test_target_2_i0",
                         relay.TensorType((1, 3, 224, 224), "float32"))
        weight = relay.var("test_target_2_i1",
                           relay.TensorType((16, 3, 3, 3), "float32"))
        bn_gamma = relay.var("test_target_2_i2",
                             relay.TensorType((16, ), "float32"))
        bn_beta = relay.var("test_target_2_i3",
                            relay.TensorType((16, ), "float32"))
        bn_mean = relay.var("test_target_2_i4",
                            relay.TensorType((16, ), "float32"))
        bn_var = relay.var("test_target_2_i5",
                           relay.TensorType((16, ), "float32"))

        conv_o = relay.nn.conv2d(data=data,
                                 weight=weight,
                                 kernel_size=(3, 3),
                                 channels=16,
                                 padding=(1, 1))

        bn_o = relay.nn.batch_norm(conv_o, bn_gamma, bn_beta, bn_mean, bn_var)

        relu_o = relay.nn.relu(bn_o[0])
        tuple_o = relay.Tuple((bn_o[2], bn_o[1], relu_o))

        func0 = relay.Function(
            [data, weight, bn_gamma, bn_beta, bn_mean, bn_var], tuple_o)
        func0 = func0.with_attr("Primitive", tvm.tir.IntImm("int32", 1))
        func0 = func0.with_attr("Inline", tvm.tir.IntImm("int32", 1))
        func0 = func0.with_attr("Compiler", tvm.tir.StringImm("test_target"))
        func0 = func0.with_attr("global_symbol",
                                container.String("test_target_2"))
        gv0 = relay.GlobalVar("test_target_2")
        mod[gv0] = func0

        # body
        data = relay.var("data", relay.TensorType((1, 3, 224, 224), "float32"))
        weight = relay.var("weight", relay.TensorType((16, 3, 3, 3),
                                                      "float32"))
        bn_gamma = relay.var("bn_gamma", relay.TensorType((16, ), "float32"))
        bn_beta = relay.var("bn_beta", relay.TensorType((16, ), "float32"))
        bn_mean = relay.var("bn_mean", relay.TensorType((16, ), "float32"))
        bn_var = relay.var("bn_var", relay.TensorType((16, ), "float32"))

        f0_o = gv0(data, weight, bn_gamma, bn_beta, bn_mean, bn_var)
        f0_relu_o = relay.TupleGetItem(f0_o, 2)
        f0_mean_o = relay.TupleGetItem(f0_o, 1)
        f0_var_o = relay.TupleGetItem(f0_o, 0)

        f0_mean_abs = relay.abs(f0_mean_o)
        f0_var_abs = relay.abs(f0_var_o)
        main_tuple = relay.Tuple((f0_relu_o, f0_mean_abs, f0_var_abs))

        func = relay.Function(
            [data, weight, bn_gamma, bn_beta, bn_mean, bn_var], main_tuple)
        mod["main"] = func
        return mod
Esempio n. 3
0
 def before():
     x = relay.var("x", shape=(10, 10))
     r = relay.nn.relu(x)
     a_1 = relay.abs(r)
     a_2 = relay.abs(r)
     out = relay.add(a_1, a_2)
     f = relay.Function([x], out)
     mod = tvm.IRModule.from_expr(f)
     return mod
    def expected():
        mod = tvm.IRModule()

        # function 0
        data = relay.var("test_target_0_i0",
                         relay.TensorType((1, 3, 224, 224), "float32"))
        weight = relay.var("test_target_0_i1",
                           relay.TensorType((16, 3, 3, 3), "float32"))
        bn_gamma = relay.var("test_target_0_i2",
                             relay.TensorType((16, ), "float32"))
        bn_beta = relay.var("test_target_0_i3",
                            relay.TensorType((16, ), "float32"))
        bn_mean = relay.var("test_target_0_i4",
                            relay.TensorType((16, ), "float32"))
        bn_var = relay.var("test_target_0_i5",
                           relay.TensorType((16, ), "float32"))

        conv_o = relay.nn.conv2d(data=data,
                                 weight=weight,
                                 kernel_size=(3, 3),
                                 channels=16,
                                 padding=(1, 1))

        bn_o = relay.nn.batch_norm(conv_o, bn_gamma, bn_beta, bn_mean, bn_var)

        relu_o = relay.nn.relu(bn_o[0])
        tuple_o = relay.Tuple((relu_o, bn_o[1], bn_o[2]))

        func0 = relay.Function(
            [data, weight, bn_gamma, bn_beta, bn_mean, bn_var], tuple_o)
        func0 = set_func_attr(func0, "test_target", "test_target_0")
        gv0 = relay.GlobalVar("test_target_0")
        mod[gv0] = func0

        # body
        data = relay.var("data", relay.TensorType((1, 3, 224, 224), "float32"))
        weight = relay.var("weight", relay.TensorType((16, 3, 3, 3),
                                                      "float32"))
        bn_gamma = relay.var("bn_gamma", relay.TensorType((16, ), "float32"))
        bn_beta = relay.var("bn_beta", relay.TensorType((16, ), "float32"))
        bn_mean = relay.var("bn_mean", relay.TensorType((16, ), "float32"))
        bn_var = relay.var("bn_var", relay.TensorType((16, ), "float32"))

        f0_o = gv0(data, weight, bn_gamma, bn_beta, bn_mean, bn_var)
        f0_relu_o = relay.TupleGetItem(f0_o, 0)
        f0_mean_o = relay.TupleGetItem(f0_o, 1)
        f0_var_o = relay.TupleGetItem(f0_o, 2)

        f0_mean_abs = relay.abs(f0_mean_o)
        f0_var_abs = relay.abs(f0_var_o)
        main_tuple = relay.Tuple((f0_relu_o, f0_mean_abs, f0_var_abs))

        func = relay.Function(
            [data, weight, bn_gamma, bn_beta, bn_mean, bn_var], main_tuple)
        mod["main"] = func
        return mod
Esempio n. 5
0
 def after():
     x = relay.var("x", shape=(10, 10))
     cb_1 = relay.annotation.compiler_begin(x, "test")
     r = relay.nn.relu(cb_1)
     ce_1 = relay.annotation.compiler_end(r, "test")
     ce_2 = relay.annotation.compiler_end(r, "test")
     a_1 = relay.abs(ce_1)
     a_2 = relay.abs(ce_2)
     out = relay.add(a_1, a_2)
     f = relay.Function([x], out)
     mod = tvm.IRModule.from_expr(f)
     return mod
def _npu_and_non_npu_functions():
    mod = tvm.IRModule({})

    # NPU function 1
    x = relay.var("x", shape=(1, 2, 2, 4), dtype="int8")
    max_pool = relay.nn.max_pool2d(x)
    composite_func = relay.Function([x], max_pool)
    composite_func = composite_func.with_attr("Composite", "ethos-u.pooling")
    inp = relay.var("input", shape=(1, 2, 2, 4), dtype="int8")
    compiler_func = relay.Function([inp], composite_func)
    compiler_func = compiler_func.with_attr("used_memory", [32])
    npu_compiler_func1 = compiler_func.with_attr("Compiler", "ethos-u")
    g1 = relay.GlobalVar("g1")
    mod[g1] = npu_compiler_func1

    # Non-NPU function
    x = relay.var("x", shape=(1, 2, 2, 4), dtype="int8")
    max_pool = relay.abs(x)
    composite_func = relay.Function([x], max_pool)
    composite_func = composite_func.with_attr("Composite",
                                              "foo.unary_elementwise")
    inp = relay.var("input", shape=(1, 2, 2, 4), dtype="int8")
    compiler_func = relay.Function([inp], composite_func)
    compiler_func = compiler_func.with_attr("used_memory", [32])
    non_npu_compiler_func = compiler_func.with_attr("Compiler", "foo")
    g2 = relay.GlobalVar("g2")
    mod[g2] = non_npu_compiler_func

    # NPU function 2
    x = relay.var("x", shape=(1, 2, 2, 4), dtype="int8")
    max_pool = relay.abs(x)
    composite_func = relay.Function([x], max_pool)
    composite_func = composite_func.with_attr("Composite",
                                              "ethos-u.unary_elementwise")
    inp = relay.var("input", shape=(1, 2, 2, 4), dtype="int8")
    compiler_func = relay.Function([inp], composite_func)
    compiler_func = compiler_func.with_attr("used_memory", [32])
    npu_compiler_func2 = compiler_func.with_attr("Compiler", "ethos-u")
    g3 = relay.GlobalVar("g3")
    mod[g3] = npu_compiler_func2

    # Main
    inp = relay.var("main_input", shape=(1, 2, 2, 4), dtype="int8")
    call1 = relay.Call(g1, [inp])
    call2 = relay.Call(g2, [call1])
    call3 = relay.Call(g3, [call2])
    main_func = relay.Function([inp], call3)
    main_func = main_func.with_attr("io_used_memory", 32)
    mod["main"] = main_func
    return mod
Esempio n. 7
0
 def get_inner_func_3():
     x = relay.var("x", shape=(1, 4, 5, 6), dtype="int8")
     x = relay.abs(x)
     x = relay.nn.relu(x)
     x = relay.exp(x)
     x = _create_primitive_function(x)
     return x
    def expected():
        mod = tvm.IRModule()

        # function 1
        f1_cb1 = relay.var('test_target_0_i0', shape=(10, 10))
        f1_O_1 = relay.abs(f1_cb1)
        f1_O_2 = relay.nn.relu(f1_O_1)
        f1_out = relay.Tuple((f1_O_2, f1_O_1))
        func1 = relay.Function([f1_cb1], f1_out)
        func1 = set_func_attr(func1, "test_target", "test_target_0")
        gv1 = relay.GlobalVar("test_target_0")
        mod[gv1] = func1

        # function 0
        f2_cb3 = relay.var('test_target_1_i0', shape=(10, 10))
        f2_cb4 = relay.var('test_target_1_i1', shape=(10, 10))
        f2_O_3 = relay.add(f2_cb3, f2_cb4)
        func0 = relay.Function([f2_cb3, f2_cb4], f2_O_3)
        func0 = set_func_attr(func0, "test_target", "test_target_1")
        gv0 = relay.GlobalVar("test_target_1")
        mod[gv0] = func0

        # body
        data = relay.var('data', shape=(10, 10))
        tuple_out = gv1(data)
        ce_2 = relay.TupleGetItem(tuple_out, 1)
        ce_3 = relay.TupleGetItem(tuple_out, 0)

        X = relay.tanh(ce_2)
        ce_4 = gv0(ce_3, X)
        func = relay.Function([data], ce_4)
        mod["main"] = func

        return mod
Esempio n. 9
0
def quantize(data, shift_bits, target_bits=relay.const(7, dtype='int32')):
    """Quantize output of layer, to be consistent with source code @yx

    Question: should the shift_bits participating to network control flow?
            At mxnet quantization with truman's code, the bits number of max_v
            is converted to normal interger using function `asscalar()`. However,
            I cannot find the related function in relay.
            I am confused with the control flow logic in model network, whether
            the condition `shift_bits == -1` should join in model network or just
            left it in python code flow. By Longtao.Wang

    Parameters
    ----------
    shift_bits: tvm.relay.Expr
        The shift_bits parameter is never used according to @yx's source code,
        which always be constant Expr(-1).
    """
    max_v = relay.max(relay.abs(data))
    min_v = relay.min(data)

    ln_max_v = relay.log(relay.cast(max_v, 'float32'))
    ln_2 = relay.log(relay.const(2.))
    total_bits = relay.ceil(relay.divide(ln_max_v, ln_2)) # ceil( ln(max_v) / ln(2) )
    shift_bits = relay.subtract(total_bits.astype('int32'), target_bits)
    shift_bits = relay.maximum(shift_bits, relay.const(0))

    denominator = relay.left_shift(relay.const(1),
            relay.cast(shift_bits, 'int32'))
    out = relay.divide(data, denominator)
    # According to @yx's code, use divide operation instead of shift op for
    # possible negative number round.
    # out = relay.right_shift(data, shift_bits)

    out = relay.cast(relay.clip(out, a_min=-128, a_max=127), 'int8')
    return out, max_v, min_v, shift_bits
Esempio n. 10
0
def get_var_func():
    shape = (5, 10)
    tp = relay.TensorType(shape, "float32")
    x = relay.var("x", tp)
    gv = relay.GlobalVar("myAbs")
    func = relay.Function([x], relay.abs(x))
    return gv, func
    def expected():
        mod = tvm.IRModule()

        # function 0
        f0_i0 = relay.var(target + "_0_i0", shape=(10, 10), dtype="uint8")
        a_split = relay.split(f0_i0, 2)
        a_split_0 = relay.TupleGetItem(a_split.astuple(), 0)
        a_split_1 = relay.TupleGetItem(a_split.astuple(), 1)
        a_split_abs_in = relay.TupleGetItem(a_split.astuple(), 0)
        abs = relay.abs(a_split_abs_in)
        tuple_out = relay.Tuple((a_split_0, a_split_1, abs))
        func0 = relay.Function([f0_i0], tuple_out)

        func0 = func0.with_attr("Primitive", tvm.tir.IntImm("int32", 1))
        func0 = func0.with_attr("Inline", tvm.tir.IntImm("int32", 1))
        func0 = func0.with_attr("Compiler", target)
        func0 = func0.with_attr("global_symbol", target + "_0")
        gv0 = relay.GlobalVar(target + "_0")
        mod[gv0] = func0

        #body
        data = relay.var('a', shape=(10, 10), dtype="uint8")
        f_out = gv0(data)
        f_out_0 = relay.TupleGetItem(f_out, 0)
        f_out_1 = relay.TupleGetItem(f_out, 1)
        tuple = relay.Tuple((f_out_0, f_out_1))
        concat = relay.concatenate(tuple, 0)
        f_out_2 = relay.TupleGetItem(f_out, 2)
        relu = relay.nn.relu(f_out_2)
        ret_tuple = relay.Tuple((concat, relu))
        mod["main"] = relay.Function([data], ret_tuple)
        return mod
    def expected():
        mod = tvm.IRModule()

        # function 0
        f0_i0 = relay.var(target + "_0_i0", shape=(10, 10))
        f0_o0 = relay.abs(f0_i0)
        func0 = relay.Function([f0_i0], f0_o0)

        func0 = func0.with_attr("Primitive", tvm.tir.IntImm("int32", 1))
        func0 = func0.with_attr("Inline", tvm.tir.IntImm("int32", 1))
        func0 = func0.with_attr("Compiler", target)
        func0 = func0.with_attr("global_symbol", target + "_0")
        gv0 = relay.GlobalVar(target + "_0")
        mod[gv0] = func0

        # body
        data = relay.var('data', shape=(10, 10))
        function_out = gv0(data)
        out_1 = relay.nn.relu(function_out)
        out_2 = relay.tanh(function_out)
        out_3 = relay.log(function_out)
        out = relay.Tuple([out_1, out_2, out_3])
        func = relay.Function([data], out)
        mod["main"] = func
        return mod
Esempio n. 13
0
def get_var_func():
    shape = (5, 10)
    tp = relay.TensorType(shape, "float32")
    x = relay.var("x", tp)
    gv = relay.GlobalVar("myAbs")
    func = relay.Function([x], relay.abs(x))
    return gv, func
Esempio n. 14
0
 def before():
     input_1 = relay.var('input_1', shape=(10, 10))
     input_2 = relay.var('input_2', shape=(10, 10))
     out = relay.add(input_1, input_2)
     out = relay.abs(out)
     out = relay.nn.relu(out)
     return relay.Function([input_1, input_2], out)
 def pattern_A():
     x = relay.var('x')
     y = relay.var('y')
     out = relay.add(x, y)
     out = relay.abs(out)
     out = relay.nn.relu(out)
     return out
 def create_graph():
     data = relay.var('data', shape=(10, 10))
     x = relay.abs(data)
     out_1 = relay.nn.relu(x)
     out_2 = relay.tanh(x)
     out_3 = relay.log(x)
     out = relay.Tuple([out_1, out_2, out_3])
     func = relay.Function([data], out)
     return func
Esempio n. 17
0
    def before():
        x = relay.var("x", shape=(10, 5))
        a_1 = relay.nn.relu(x)
        a_2 = relay.abs(a_1)
        a_3 = relay.nn.relu(a_1)
        out = relay.add(a_2, a_3)

        f = relay.Function([x], out)
        mod = tvm.IRModule.from_expr(f)
        return mod
Esempio n. 18
0
 def after():
     x = relay.var("x", shape=(10, 10))
     cb_1 = relay.annotation.compiler_begin(x, "test")
     r = relay.nn.relu(cb_1)
     ce_1 = relay.annotation.compiler_end(r, "test")
     ce_2 = relay.annotation.compiler_end(r, "test")
     cb_2 = relay.annotation.compiler_begin(ce_1, "default")
     cb_3 = relay.annotation.compiler_begin(ce_2, "default")
     a_1 = relay.abs(cb_2)
     a_2 = relay.abs(cb_3)
     ce_3 = relay.annotation.compiler_end(a_1, "default")
     ce_4 = relay.annotation.compiler_end(a_2, "default")
     cb_4 = relay.annotation.compiler_begin(ce_3, "default")
     cb_5 = relay.annotation.compiler_begin(ce_4, "default")
     out = relay.add(cb_4, cb_5)
     ce_6 = relay.annotation.compiler_end(out, "default")
     f = relay.Function([x], ce_6)
     mod = tvm.IRModule.from_expr(f)
     return mod
Esempio n. 19
0
 def create_graph(axis, sections):
     x = relay.var("x", shape=(1, 50, 50, 3))
     x_abs = relay.abs(x)
     split_output = relay.split(x_abs, sections, axis).tuple_value
     outputs = list()
     for section_idx in range(sections):
         split_single_out = relay.TupleGetItem(split_output, section_idx)
         tanh = relay.tanh(split_single_out)
         outputs.append(tanh)
     tuple_out = relay.Tuple(outputs)
     return relay.Function([x], tuple_out)
Esempio n. 20
0
 def after_C_priority():
     input_1 = relay.var('input_1', shape=(10, 10))
     input_2 = relay.var('input_2', shape=(10, 10))
     x = relay.var('x')
     out = relay.abs(x)
     out = relay.nn.relu(out)
     merged_func = relay.Function([x], out)
     merged_func = merged_func.with_attr('Composite', 'C')
     merged_func = merged_func.with_attr('PartitionedFromPattern', 'abs_nn.relu_')
     out = relay.add(input_1, input_2)
     ret = relay.Call(merged_func, [out])
     return relay.Function([input_1, input_2], ret)
 def after_A_priority(composite_name):
     input_1 = relay.var('input_1', shape=(10, 10))
     input_2 = relay.var('input_2', shape=(10, 10))
     x = relay.var('x')
     y = relay.var('y')
     out = relay.add(x, y)
     out = relay.abs(out)
     out = relay.nn.relu(out)
     merged_func = relay.Function([x, y], out)
     merged_func = merged_func.with_attr('Composite', composite_name)
     ret = relay.Call(merged_func, [input_1, input_2])
     return relay.Function([input_1, input_2], ret)
 def after_C_priority():
     input_1 = relay.var("input_1", shape=(10, 10))
     input_2 = relay.var("input_2", shape=(10, 10))
     x = relay.var("x")
     out = relay.abs(x)
     out = relay.nn.relu(out)
     merged_func = relay.Function([x], out)
     merged_func = merged_func.with_attr("Composite", "C")
     merged_func = merged_func.with_attr("PartitionedFromPattern",
                                         "abs_nn.relu_")
     out = relay.add(input_1, input_2)
     ret = relay.Call(merged_func, [out])
     return relay.Function([input_1, input_2], ret)
Esempio n. 23
0
 def after_B_priority():
     input_1 = relay.var('input_1', shape=(10, 10))
     input_2 = relay.var('input_2', shape=(10, 10))
     x = relay.var('x')
     y = relay.var('y')
     out = relay.add(x, y)
     out = relay.abs(out)
     merged_func = relay.Function([x, y], out)
     merged_func = merged_func.with_attr('Composite', 'B')
     merged_func = merged_func.with_attr('PartitionedFromPattern', 'add_abs_')
     out = relay.Call(merged_func, [input_1, input_2])
     ret = relay.nn.relu(out)
     return relay.Function([input_1, input_2], ret)
def test_reshape_nop():
    # test that reshape can be turned into nop
    x = relay.var("x", shape=(10, 4))
    xx = relay.abs(x)
    y = relay.expand_dims(xx, axis=1)
    t0 = relay.reshape(y, (1, 40))
    t1 = relay.abs(y)

    z0 = relay.reshape(t0, (2, 20))
    z1 = relay.sqrt(t1)
    z2 = relay.reshape(t1, (1, 40))

    func = relay.Function([x], relay.Tuple([z0, z1, z2]))
    x_data = np.random.rand(10, 4).astype("float32")
    graph = relay.build(tvm.IRModule.from_expr(func), "llvm")
    graph_json_str = graph.get_graph_json()

    graph_json = json.loads(graph_json_str)

    # reshape must force sharing memory
    storage_ids = graph_json["attrs"]["storage_id"][1]
    assert tuple(storage_ids) == (0, 1, 1, 2, 3, 2)
    assert graph_json["nodes"][2]["attrs"]["func_name"] == "__nop"
    assert graph_json["nodes"][5]["attrs"]["func_name"] == "__nop"

    gmod = graph_executor.GraphModule(graph["default"](tvm.cpu(0)))

    gmod.set_input(x=x_data)
    gmod.run()
    z0_np = x_data.reshape(2, 20)
    z1_np = np.sqrt(np.abs(x_data.reshape(
        10,
        1,
        4,
    )))
    z2_np = np.abs(x_data).reshape(1, 40)
    tvm.testing.assert_allclose(gmod.get_output(0).numpy(), z0_np)
    tvm.testing.assert_allclose(gmod.get_output(1).numpy(), z1_np)
    tvm.testing.assert_allclose(gmod.get_output(2).numpy(), z2_np)
def test_region_set_creator_diamond():
    data = relay.var('data', shape=(10, 10))
    cb_1 = compiler_begin(data, 'test_target')
    O_1 = relay.abs(cb_1)
    ce_1 = compiler_end(O_1, 'test_target')
    ce_2 = compiler_end(O_1, 'test_target')
    cb_2 = compiler_begin(ce_1, 'test_target')
    O_2 = relay.nn.relu(cb_2)
    ce_3 = compiler_end(O_2, 'test_target')
    cb_d = compiler_begin(ce_2, "default")
    X = relay.tanh(cb_d)
    ce_d = compiler_end(X, 'default')
    cb_3 = compiler_begin(ce_3, 'test_target')
    cb_4 = compiler_begin(ce_d, 'test_target')
    O_3 = relay.add(cb_3, cb_4)
    ce_4 = compiler_end(O_3, 'test_target')
    diamond = relay.Function([data], ce_4)

    region_set = relay.analysis.AnnotatedRegionSet(
        diamond, relay.op.get("annotation.compiler_begin"),
        relay.op.get("annotation.compiler_end"))
    assert len(region_set) == 4
    check_region(
        region_set,
        'test_target',
        [cb_1],
        [cb_1, O_1, ce_1, ce_2],
        [ce_1, ce_2],
    )
    check_region(
        region_set,
        'test_target',
        [cb_2],
        [cb_2, O_2, ce_3],
        [ce_3],
    )
    check_region(
        region_set,
        'default',
        [cb_d],
        [cb_d, X, ce_d],
        [ce_d],
    )
    check_region(
        region_set,
        'test_target',
        [cb_3, cb_4],
        [cb_3, cb_4, O_3, ce_4],
        [ce_4],
    )
 def after_B_priority():
     input_1 = relay.var("input_1", shape=(10, 10))
     input_2 = relay.var("input_2", shape=(10, 10))
     x = relay.var("x")
     y = relay.var("y")
     out = relay.add(x, y)
     out = relay.abs(out)
     merged_func = relay.Function([x, y], out)
     merged_func = merged_func.with_attr("Composite", "B")
     merged_func = merged_func.with_attr("PartitionedFromPattern",
                                         "add_abs_")
     out = relay.Call(merged_func, [input_1, input_2])
     ret = relay.nn.relu(out)
     return relay.Function([input_1, input_2], ret)
    def create_graph():
        a = relay.var('a', shape=(10, 10), dtype="uint8")

        a_split = relay.split(a, 2)
        a_split_0 = relay.TupleGetItem(a_split.astuple(), 0)
        a_split_0_abs = relay.abs(a_split_0)

        a_con = relay.concatenate(a_split, 0)
        a_split_0_relu = relay.nn.relu(a_split_0_abs)

        out = relay.Tuple((a_con, a_split_0_relu))
        f = relay.Function([a], out)
        mod = tvm.IRModule.from_expr(f)
        return mod
Esempio n. 28
0
 def after_C_priority():
     input_1 = relay.var('input_1', shape=(10, 10))
     input_2 = relay.var('input_2', shape=(10, 10))
     add = relay.add(input_1, input_2)
     x = relay.var('x')
     out = relay.abs(x)
     out = relay.nn.relu(out)
     merged_func = relay.Function([x], out)
     merged_func = merged_func.set_attribute('Primitive',
                                             expr.IntImm('int32', 1))
     merged_func = merged_func.set_attribute('Composite',
                                             expr.StringImm('C'))
     ret = relay.Call(merged_func, [add])
     return relay.Function([input_1, input_2], ret)
Esempio n. 29
0
 def after_A_priority(composite_name):
     input_1 = relay.var('input_1', shape=(10, 10))
     input_2 = relay.var('input_2', shape=(10, 10))
     x = relay.var('x')
     y = relay.var('y')
     out = relay.add(x, y)
     out = relay.abs(out)
     out = relay.nn.relu(out)
     merged_func = relay.Function([x, y], out)
     merged_func = merged_func.set_attribute('Primitive',
                                             tir.IntImm('int32', 1))
     merged_func = merged_func.set_attribute('Composite',
                                             tir.StringImm(composite_name))
     ret = relay.Call(merged_func, [input_1, input_2])
     return relay.Function([input_1, input_2], ret)
    def expected():
        data = relay.var('data', shape=(10, 10))
        cb_1 = compiler_begin(data, "test")
        O_1 = relay.abs(cb_1)
        ce_2 = compiler_end(O_1, "test")
        O_2 = relay.nn.relu(O_1)
        ce_3 = compiler_end(O_2, "test")

        X = relay.tanh(ce_2)

        cb_3 = compiler_begin(ce_3, "test")
        cb_4 = compiler_begin(X, "test")
        O_3 = relay.add(cb_3, cb_4)
        ce_4 = compiler_end(O_3, "test")

        func = relay.Function([data], ce_4)
        return func
    def create_graph():
        a = relay.var('a', shape=(10, 10), dtype="uint8")
        b = relay.var('b', shape=(10, 10), dtype="uint8")
        a1 = relay.abs(a)

        zeroi = relay.const(1, "int32")
        zerof = relay.const(0, "float32")
        con = relay.qnn.op.concatenate((a1, b),
                                       input_scales=(zerof, zerof),
                                       input_zero_points=(zeroi, zeroi),
                                       output_scale=zerof,
                                       output_zero_point=zeroi,
                                       axis=1)

        f = relay.Function([a, b], con)
        mod = tvm.IRModule.from_expr(f)
        return mod
Esempio n. 32
0
 def get_ref_abs():
     shape = (5, 10)
     tp = relay.TensorType(shape, "float32")
     a = relay.var("a", tp)
     ref_abs = relay.Function([a], relay.abs(relay.add(a, a)))
     return ref_abs