Example #1
0
def test_tuple_fst(target, dev):
    ttype = relay.TupleType([relay.TensorType((1,)), relay.TensorType((10,))])
    tup = relay.var("tup", type_annotation=ttype)
    f = relay.Function([tup], relay.TupleGetItem(tup, 0))
    i_data = np.random.rand(41).astype("float32")
    j_data = np.random.rand(10).astype("float32")
    mod = tvm.IRModule()
    mod["main"] = f
    check_result(target, dev, [(i_data, j_data)], i_data, mod=mod)
Example #2
0
def test_tuple_getitem(use_calculated_workspaces, target_options):
    func = relay.Function([],
                          relay.TupleGetItem(
                              relay.Tuple([relay.const(1),
                                           relay.const(2)]), 0))
    output_list = generate_ref_data(func, {})
    input_list = []
    compile_and_run(func, input_list, output_list, target_options,
                    use_calculated_workspaces)
Example #3
0
def test_tuple_output(interface_api, use_unpacked_api, test_runner):
    x = relay.var("x", shape=(6, 9))
    y = relay.split(x, 3).astuple()
    a = relay.TupleGetItem(y, 0)
    b = relay.TupleGetItem(y, 1)
    out = relay.Tuple([a, b])
    func = relay.Function([x], out)
    x_data = np.random.rand(6, 9).astype("float32")
    inputs = {"x": x_data}
    output_list = generate_ref_data(func, inputs)
    compile_and_run(
        AOTTestModel(module=IRModule.from_expr(func),
                     inputs=inputs,
                     outputs=output_list),
        test_runner,
        interface_api,
        use_unpacked_api,
    )
Example #4
0
def test_square_second_order():
    shape = (10, 10)
    dtype = 'float32'
    t = relay.TensorType(shape, dtype)
    x = relay.var("x", t)
    func = relay.Function([x], x * x)
    func = run_infer_type(func)
    back_func = run_infer_type(gradient(func))
    y = relay.var("y", t)
    back_func_adjusted = relay.Function([y], relay.TupleGetItem(relay.TupleGetItem(back_func(y), 1), 0))
    back_func_adjusted = run_infer_type(back_func_adjusted)
    back_back_func = run_infer_type(gradient(back_func_adjusted))
    assert back_func.checked_type == relay.FuncType([t], relay.TupleType([t, relay.TupleType([t])]))
    x_nd = rand(dtype, *shape)
    ex = create_executor()
    forward, (grad_x,) = ex.evaluate(back_back_func)(x_nd)
    tvm.testing.assert_allclose(forward.asnumpy(), 2 * x_nd.asnumpy())
    tvm.testing.assert_allclose(grad_x.asnumpy(), 2 * np.ones_like(grad_x.asnumpy()))
Example #5
0
def test_checkpoint():
    inputs = [relay.var("x{}".format(i), shape=(1,)) for i in range(4)]
    output = relay.multiply(relay.add(inputs[0], inputs[1]), relay.add(inputs[2], inputs[3]))
    check_grad(relay.Function(inputs, relay.annotation.checkpoint(output)))

    scope = relay.ScopeBuilder()
    out_tuple = scope.let(
        "out_tuple",
        relay.Tuple([relay.add(inputs[0], inputs[1]), relay.multiply(inputs[2], inputs[3])]),
    )
    scope.ret(
        relay.subtract(
            relay.annotation.checkpoint(relay.TupleGetItem(out_tuple, 0)),
            relay.TupleGetItem(out_tuple, 1),
        )
    )
    out_single = scope.get()
    check_grad(relay.Function(inputs, out_single))
Example #6
0
def test_split_no_fuse():
    x = relay.var('x', shape=(12, ))
    y = relay.split(x, 3, axis=0).astuple()
    z = relay.concatenate([relay.TupleGetItem(y, 0)], axis=0)
    z = relay.annotation.stop_fusion(z)
    f = relay.Function([x], z)
    x_data = np.random.rand(12, ).astype('float32')
    res = veval(f, x_data)
    tvm.testing.assert_allclose(res.asnumpy(), np.split(x_data, 3, axis=0)[0])
Example #7
0
def test_tuple_second():
    ttype = relay.TupleType([relay.TensorType((1,)), relay.TensorType((10,))])
    tup = relay.var('tup', type_annotation=ttype)
    f = relay.Function([tup], relay.TupleGetItem(tup, 1))
    i_data = np.random.rand(41).astype('float32')
    j_data = np.random.rand(10).astype('float32')
    mod = tvm.IRModule()
    mod["main"] = f
    check_result([(i_data, j_data)], j_data, mod=mod)
Example #8
0
def get_net(iterations, num_hidden, batch_size=1, dtype="float32"):
    '''Constructs an unrolled RNN with LSTM cells'''
    input_type = relay.TensorType((batch_size, num_hidden), dtype)
    weight_type = relay.TensorType((4 * num_hidden, num_hidden), dtype)
    bias_type = relay.TensorType((4 * num_hidden, ), dtype)

    state_type = relay.TupleType([input_type, input_type])
    cell_type = relay.TupleType([input_type, state_type])

    builder = relay.ScopeBuilder()

    zeros = builder.let(("zeros", input_type),
                        relay.zeros((batch_size, num_hidden), dtype))
    init_states = builder.let(("init_states", state_type),
                              relay.Tuple([zeros, zeros]))

    states = init_states
    out = None

    for i in range(iterations):
        inputs = relay.Var("data", input_type)
        i2h_weight = relay.Var("i2h_%s_weight" % i, weight_type)
        i2h_bias = relay.Var("i2h_%i_bias" % i, bias_type)
        h2h_weight = relay.Var("h2h_%s_weight" % i, weight_type)
        h2h_bias = relay.Var("h2h_%s_bias" % i, bias_type)

        cell_fn = lstm_cell(num_hidden, batch_size, dtype, "lstm_%s" % i)

        call = builder.let(
            ("call_%s" % i, cell_type),
            relay.Call(
                cell_fn,
                [inputs, states, i2h_weight, i2h_bias, h2h_weight, h2h_bias]))
        new_out = builder.let(("out_%s" % i, input_type),
                              relay.TupleGetItem(call, 0))
        new_states = builder.let(("states_%s" % i, state_type),
                                 relay.TupleGetItem(call, 1))
        states = new_states
        out = new_out

    builder.ret(out)
    body = builder.get()
    args = relay.analysis.free_vars(body)
    return relay.Function(args, body, input_type)
Example #9
0
    def expected():
        mod = tvm.IRModule()

        # function 1
        f1_cb1 = relay.var('test_target_1_i0', shape=(10, 10))
        f1_O_1 = relay.abs(f1_cb1)
        f1_O_2 = relay.nn.relu(f1_O_1)
        f1_out = relay.Tuple((f1_O_2, f1_O_1))
        func1 = relay.Function([f1_cb1], f1_out)

        func1 = func1.with_attr("Primitive", tvm.tir.IntImm("int32", 1))
        func1 = func1.with_attr("Inline", tvm.tir.IntImm("int32", 1))
        func1 = func1.with_attr("Compiler", tvm.tir.StringImm("test_target"))
        func1 = func1.with_attr("global_symbol",
                                container.String("test_target_1"))
        gv1 = relay.GlobalVar("test_target_1")
        mod[gv1] = func1

        # function 0
        f2_cb3 = relay.var('test_target_0_i0', shape=(10, 10))
        f2_cb4 = relay.var('test_target_0_i1', shape=(10, 10))
        f2_O_3 = relay.add(f2_cb3, f2_cb4)
        func0 = relay.Function([f2_cb3, f2_cb4], f2_O_3)

        func0 = func0.with_attr("Primitive", tvm.tir.IntImm("int32", 1))
        func0 = func0.with_attr("Inline", tvm.tir.IntImm("int32", 1))
        func0 = func0.with_attr("Compiler", tvm.tir.StringImm("test_target"))
        func0 = func0.with_attr("global_symbol",
                                container.String("test_target_0"))
        gv0 = relay.GlobalVar("test_target_0")
        mod[gv0] = func0

        # body
        data = relay.var('data', shape=(10, 10))
        tuple_out = gv1(data)
        ce_2 = relay.TupleGetItem(tuple_out, 1)
        ce_3 = relay.TupleGetItem(tuple_out, 0)

        X = relay.tanh(ce_2)
        ce_4 = gv0(ce_3, X)
        func = relay.Function([data], ce_4)
        mod["main"] = func

        return mod
Example #10
0
 def get_model(get_item):
     """Return a model"""
     a = relay.var("a", shape=(1, 16, 16, 4), dtype="uint8")
     z = relay.op.clip(a, 0, 255)
     b = relay.op.clip(z, 0, 15)
     c = relay.op.clip(z, 16, 31)
     t = relay.Tuple((c, b))
     tgi = relay.TupleGetItem(t, 1) if get_item else t
     foo = relay.Function([a], tgi)
     return tvm.IRModule.from_expr(tgi)
Example #11
0
def test_tuple_second():
    ttype = relay.TupleType(
        [relay.TensorType((1, )),
         relay.TensorType((10, ))])
    tup = relay.var('tup', type_annotation=ttype)
    f = relay.Function([tup], relay.TupleGetItem(tup, 1))
    i_data = np.random.rand(41).astype('float32')
    j_data = np.random.rand(10).astype('float32')
    result = veval(f, (i_data, j_data))
    tvm.testing.assert_allclose(result.asnumpy(), j_data)
Example #12
0
def relay_lstm_cell(batch_size, input_size, hidden_size):
    # based on https://pytorch.org/docs/stable/generated/torch.nn.GRU.html#torch.nn.GRU
    state_tensor_type = relay.TensorType((batch_size, hidden_size))
    state_tuple_type = relay.TupleType([state_tensor_type, state_tensor_type])

    inp = relay.var("input", shape=(batch_size, input_size))
    state = relay.Var("state", type_annotation=state_tuple_type)

    w_ih = relay.var("w_ih", shape=(4*hidden_size, input_size))
    w_hh = relay.var("w_hh", shape=(4*hidden_size, hidden_size))
    b_ih = relay.var("b_ih", shape=(4*hidden_size,))
    b_hh = relay.var("b_hh", shape=(4*hidden_size,))

    hidden = relay.TupleGetItem(state, 0)
    cell_state = relay.TupleGetItem(state, 1)

    # PyTorch packs the i2h and h2h weights and biases together so we will match that here
    w_i_splits = relay.split(w_ih, 4, 0)
    w_h_splits = relay.split(w_hh, 4, 0)
    b_i_splits = relay.split(b_ih, 4, 0)
    b_h_splits = relay.split(b_hh, 4, 0)
    w_ii, w_if, w_ig, w_io = w_i_splits[0], w_i_splits[1], w_i_splits[2], w_i_splits[3]
    w_hi, w_hf, w_hg, w_ho = w_h_splits[0], w_h_splits[1], w_h_splits[2], w_h_splits[3]
    b_ii, b_if, b_ig, b_io = b_i_splits[0], b_i_splits[1], b_i_splits[2], b_i_splits[3]
    b_hi, b_hf, b_hg, b_ho = b_h_splits[0], b_h_splits[1], b_h_splits[2], b_h_splits[3]

    def weighted_value(weight, value, bias):
        return relay.transpose(relay.nn.dense(weight, value) + relay.reshape(bias, (hidden_size, 1)))

    i_t = relay.sigmoid(weighted_value(w_ii, inp, b_ii) + weighted_value(w_hi, hidden, b_hi))
    f_t = relay.sigmoid(weighted_value(w_if, inp, b_if) + weighted_value(w_hf, hidden, b_hf))
    g_t = relay.tanh(weighted_value(w_ig, inp, b_ig) + weighted_value(w_hg, hidden, b_hg))
    o_t = relay.sigmoid(weighted_value(w_io, inp, b_io) + weighted_value(w_ho, hidden, b_ho))
    c_t = f_t*cell_state + i_t*g_t
    h_t = o_t*relay.tanh(c_t)

    h_var = relay.Var("h")
    c_var = relay.Var("c")
    return relay.Function([inp, state, w_ih, w_hh, b_ih, b_hh],
                          relay.Let(h_var, h_t,
                                    relay.Let(c_var, c_t,
                                              relay.Tuple([h_var, relay.Tuple([h_var, c_var])]))),
                          ret_type=relay.TupleType([state_tensor_type, state_tuple_type]))
Example #13
0
def test_tuple_output(interface_api, use_unpacked_api,
                      use_calculated_workspaces):
    x = relay.var("x", shape=(6, 9))
    y = relay.split(x, 3).astuple()
    a = relay.TupleGetItem(y, 0)
    b = relay.TupleGetItem(y, 1)
    out = relay.Tuple([a, b])
    func = relay.Function([x], out)
    x_data = np.random.rand(6, 9).astype("float32")
    inputs = {"x": x_data}
    output_list = generate_ref_data(func, inputs)
    compile_and_run(
        func,
        inputs,
        output_list,
        interface_api,
        use_unpacked_api,
        use_calculated_workspaces,
    )
Example #14
0
def test_tuple_getitem(interface_api, use_unpacked_api, test_runner):
    func = relay.Function([], relay.TupleGetItem(relay.Tuple([relay.const(1), relay.const(2)]), 0))
    output_list = generate_ref_data(func, {})

    compile_and_run(
        AOTTestModel(module=IRModule.from_expr(func), inputs={}, outputs=output_list),
        test_runner,
        interface_api,
        use_unpacked_api,
    )
Example #15
0
def test_split_no_fuse():
    x = relay.var("x", shape=(12, ))
    y = relay.split(x, 3, axis=0).astuple()
    z = relay.concatenate([relay.TupleGetItem(y, 0)], axis=0)
    z = relay.annotation.stop_fusion(z)
    f = relay.Function([x], z)
    x_data = np.random.rand(12, ).astype("float32")
    for tgt, dev in tvm.testing.enabled_targets():
        res = veval(f, x_data, device=dev, target=tgt)
        tvm.testing.assert_allclose(res.numpy(),
                                    np.split(x_data, 3, axis=0)[0])
Example #16
0
def test_tuple():
    ttype = relay.TupleType(
        [relay.TensorType((1, )),
         relay.TensorType((10, ))])
    tup = relay.var("tup", type_annotation=ttype)
    f = relay.Function([tup], relay.TupleGetItem(tup, 1))
    i_data = np.random.rand(41).astype("float32")
    j_data = np.random.rand(10).astype("float32")

    result = get_serialized_output(f, (i_data, j_data))
    tvm.testing.assert_allclose(result.asnumpy(), j_data)
 def create_graph(axis, sections):
     x = relay.var("x", shape=(1, 50, 50, 3))
     x_abs = relay.abs(x)
     split_output = relay.split(x_abs, sections, axis).tuple_value
     outputs = list()
     for section_idx in range(sections):
         split_single_out = relay.TupleGetItem(split_output, section_idx)
         tanh = relay.tanh(split_single_out)
         outputs.append(tanh)
     tuple_out = relay.Tuple(outputs)
     return relay.Function([x], tuple_out)
Example #18
0
def test_tuple():
    mod = Module()
    cfunc = compile(
        Function([],
                 relay.TupleGetItem(
                     relay.Tuple([
                         relay.const(3, dtype='int32'),
                         relay.const(4.0, dtype='float32')
                     ]), 1)), mod)
    np.testing.assert_allclose(cfunc().asnumpy(), np.array(4.0,
                                                           dtype='float32'))
Example #19
0
def _test_tuple_argument(mode):
    shape = (2, 3)
    dtype = "float32"
    tensor_type = relay.TensorType(shape, dtype)
    fields = 3
    tuple_type = relay.TupleType([tensor_type] * fields)
    tup = relay.var("tup", type_annotation=tuple_type)
    body = relay.TupleGetItem(tup, 0)
    for i in range(1, fields):
        body = relay.add(body, relay.TupleGetItem(tup, i))
    func = relay.Function([tup], body)
    func = run_infer_type(func)
    back_func = run_infer_type(gradient(func, mode=mode))
    xs = [rand(dtype, *shape) for _ in range(fields)]
    xs_np = np.array([x.numpy() for x in xs])
    expected_forward = np.sum(xs_np, axis=0)
    forward, grad = create_executor().evaluate(back_func)(tuple(xs))
    tvm.testing.assert_allclose(forward.numpy(), expected_forward)
    for field in grad[0]:
        tvm.testing.assert_allclose(field.numpy(), np.ones_like(field.numpy()))
Example #20
0
def build_relay_module(batch_size, input_size, hidden_size, time_steps,
                       dense_dim):
    mod = tvm.IRModule()
    mod["lstm_layer"] = lstm_definition(batch_size, input_size, hidden_size,
                                        time_steps)
    mod["linear_layer"] = linear_layer_definition(batch_size, hidden_size,
                                                  dense_dim)
    lstm_var = mod.get_global_var("lstm_layer")
    linear_var = mod.get_global_var("linear_layer")

    # now we build up our main function
    input_var = relay.var("input", shape=(batch_size, time_steps, input_size))
    init_hidden_var = relay.var("init_hidden", shape=(batch_size, hidden_size))
    init_cell_var = relay.var("init_cell", shape=(batch_size, hidden_size))
    i2h_weight_var = relay.var("i2h_weight",
                               shape=(4 * hidden_size, input_size))
    h2h_weight_var = relay.var("h2h_weight",
                               shape=(4 * hidden_size, hidden_size))
    lstm_bias_var = relay.var("lstm_bias", shape=(4 * hidden_size, ))
    linear_weight_var = relay.var("linear_weight",
                                  shape=(dense_dim, hidden_size))
    linear_bias_var = relay.var("linear_bias", shape=(dense_dim, ))

    builder = relay.ScopeBuilder()
    state_var = builder.let("state",
                            relay.Tuple([init_hidden_var, init_cell_var]))
    lstm_res = builder.let(
        "lstm_res",
        lstm_var(
            input_var,
            state_var,
            i2h_weight_var,
            h2h_weight_var,
            lstm_bias_var,
            # the keras model only gave one bias,
            # so set the other to zero
            # (hopefully this is correct)
            relay.zeros_like(lstm_bias_var)))
    final_hidden = builder.let("final_hidden", relay.TupleGetItem(lstm_res, 1))
    # to match PT's semantics, we're undoing the reshape in LSTM :)
    reshape_hidden = builder.let("reshape_hidden",
                                 relay.squeeze(final_hidden, axis=[0]))
    linear_result = builder.let(
        "linear_result",
        linear_var(reshape_hidden, linear_weight_var, linear_bias_var))
    # finally do a softmax
    builder.ret(relay.nn.softmax(linear_result))
    main_func = relay.Function([
        input_var, init_hidden_var, init_cell_var, i2h_weight_var,
        h2h_weight_var, lstm_bias_var, linear_weight_var, linear_bias_var
    ], builder.get())
    mod["main"] = main_func
    return mod
def test_compile_fused_identity_cast():
    # a fused function that would optimized to identity
    x = relay.var("x", shape=[16], dtype="float32")
    y = relay.cast(x, "float32")
    func1 = relay.Function([x], y).with_attr("Primitive", 1)

    # a fused function with param pass-through
    x = relay.var("x", shape=[16], dtype="float32")
    y = relay.add(x, relay.const(3.14, "float32"))
    func2 = relay.Function([x], relay.Tuple([x, y])).with_attr("Primitive", 1)

    x_global = relay.var("xx", shape=[16], dtype="float32")
    tup = func2(x_global)
    y_global = func1(relay.TupleGetItem(tup, 0) + relay.TupleGetItem(tup, 1))

    mod = tvm.IRModule.from_expr(relay.Function([x_global], y_global))
    for target, device in tvm.testing.enabled_targets():
        with tvm.transform.PassContext(opt_level=2):
            graph, lib, _ = relay.build(mod, target=target)
            executor = graph_executor.create(graph, lib, device=device)
            executor.run()
    def expected():
        mod = tvm.IRModule()

        # function 0
        f0_i0 = relay.var(target + "_0_i0", shape=(10, 10), dtype="uint8")
        a_split = relay.split(f0_i0, 2)
        a_split_0 = relay.TupleGetItem(a_split.astuple(), 0)
        a_split_1 = relay.TupleGetItem(a_split.astuple(), 1)
        a_split_abs_in = relay.TupleGetItem(a_split.astuple(), 0)
        abs = relay.abs(a_split_abs_in)
        tuple_out = relay.Tuple((a_split_0, a_split_1, abs))
        func0 = relay.Function([f0_i0], tuple_out)

        func0 = func0.with_attr("Primitive", tvm.tir.IntImm("int32", 1))
        func0 = func0.with_attr("Inline", tvm.tir.IntImm("int32", 1))
        func0 = func0.with_attr("Compiler", target)
        func0 = func0.with_attr("global_symbol", target + "_0")
        gv0 = relay.GlobalVar(target + "_0")
        mod[gv0] = func0

        #body
        data = relay.var('a', shape=(10, 10), dtype="uint8")
        f_out = gv0(data)
        f_out_0 = relay.TupleGetItem(f_out, 0)
        f_out_1 = relay.TupleGetItem(f_out, 1)
        tuple = relay.Tuple((f_out_0, f_out_1))
        concat = relay.concatenate(tuple, 0)
        f_out_2 = relay.TupleGetItem(f_out, 2)
        relu = relay.nn.relu(f_out_2)
        ret_tuple = relay.Tuple((concat, relu))
        mod["main"] = relay.Function([data], ret_tuple)
        return mod
Example #23
0
    def expected():
        def create_external_func1(mod_, compiler_name, symbol_name):
            x_int = relay.var("x_int", shape=(10, 10))

            p0 = relay.nn.relu(x_int)
            q0 = relay.tanh(x_int)

            # reshapes
            p0_reshaped = relay.reshape(p0, newshape=100)
            q0_reshaped = relay.reshape(q0, newshape=100)
            ofms = relay.concatenate((p0_reshaped, q0_reshaped), 0)

            f1 = relay.Function([x_int], ofms)
            f1 = set_func_attr(f1, compiler_name, symbol_name)
            glb_f1 = relay.GlobalVar(symbol_name)
            mod_[glb_f1] = f1
            mod_ = relay.transform.InferType()(mod_)
            return glb_f1, mod_

        mod = tvm.IRModule()
        x = relay.var("x", shape=(10, 10))
        glb_symbol_f1, mod = create_external_func1(mod, "ethosu", "ethosu_0")
        ofms = relay.Call(glb_symbol_f1, [x])

        # splits
        (p0_flat, q0_flat) = relay.split(ofms, [100])
        # reshapes
        p0_flat_reshaped = relay.reshape(p0_flat, newshape=(10, 10))
        q0_flat_reshaped = relay.reshape(q0_flat, newshape=(10, 10))
        # original output
        tuple_out = relay.Tuple([p0_flat_reshaped, q0_flat_reshaped])

        p0 = relay.TupleGetItem(tuple_out, 0)
        q0 = relay.TupleGetItem(tuple_out, 1)
        r = relay.concatenate((p0, q0), axis=0)
        main = relay.Function([x], r)
        mod["main"] = main
        mod = relay.transform.InferType()(mod)
        return mod
Example #24
0
def test_vm_reshape_tuple(target, dev, x_shape=(1, 4, 2), y_shape=(1, 2, 10)):
    tup = relay.var(
        "tup",
        type_annotation=relay.TupleType([relay.TensorType(x_shape), relay.TensorType(y_shape)]),
    )
    out = relay.reshape(relay.TupleGetItem(tup, 0), (1, -1))
    f = relay.Function([tup], out)

    x_data = np.random.uniform(size=x_shape).astype("float32")
    y_data = np.random.uniform(size=y_shape).astype("float32")

    res = veval(f, (x_data, y_data), device=dev, target=target)
    tvm.testing.assert_allclose(res.numpy(), np.reshape(x_data, (1, -1)))
    def expected():
        mod = tvm.IRModule()

        # function 0
        f0_i0 = relay.var(target + "_0_i0", shape=(10, 10))
        f0_i1 = relay.var(target + "_0_i1")
        f0_i2 = relay.var(target + "_0_i2")
        f0_i3 = relay.var(target + "_0_i3")
        f0_i4 = relay.var(target + "_0_i4")
        f0_n0 = relay.nn.batch_norm(f0_i0, f0_i1, f0_i2, f0_i3, f0_i4)
        f0_n1 = f0_n0[1]
        f0_n2 = relay.nn.relu(f0_n0[0])
        f0_o0 = relay.Tuple([f0_n2, f0_n1])
        func0 = relay.Function([f0_i0, f0_i1, f0_i2, f0_i3, f0_i4], f0_o0)

        func0 = func0.with_attr("Primitive", tvm.tir.IntImm("int32", 1))
        func0 = func0.with_attr("Inline", tvm.tir.IntImm("int32", 1))
        func0 = func0.with_attr("Compiler", target)
        func0 = func0.with_attr("global_symbol", target + "_0")
        gv0 = relay.GlobalVar(target + "_0")
        mod[gv0] = func0
        mod = transform.InferType()(mod)

        # body
        data = relay.var("data", shape=(10, 10))
        bn_gamma = relay.var("bn_gamma")
        bn_beta = relay.var("bn_beta")
        bn_mmean = relay.var("bn_mean")
        bn_mvar = relay.var("bn_var")
        function_out = gv0(data, bn_gamma, bn_beta, bn_mmean, bn_mvar)
        get_out0 = relay.TupleGetItem(function_out, 0)
        get_out1 = relay.TupleGetItem(function_out, 1)
        out_2 = relay.tanh(get_out1)
        out_3 = relay.log(get_out1)
        out = relay.Tuple([get_out0, out_2, out_3])
        func = relay.Function([data, bn_gamma, bn_beta, bn_mmean, bn_mvar], out)
        mod["main"] = func
        mod = transform.InferType()(mod)
        return mod
Example #26
0
    def do_env_find(self, env, key, dft):
        """Build the code to find a value in env."""
        v = relay.var("v")
        cl = adt.Clause(
            adt.PatternConstructor(self.env_ctr, [adt.PatternVar(v)]), v)
        env_v = adt.Match(env, [cl], complete=False)

        val = relay.TupleGetItem(env_v, self.env_val_map[key][0])
        x = relay.var("x")
        nil_c = adt.Clause(adt.PatternConstructor(nil, []), dft)
        some_c = adt.Clause(adt.PatternConstructor(some, [adt.PatternVar(x)]),
                            x)
        return adt.Match(val, [some_c, nil_c])
Example #27
0
    def do_env_update(self, env_, key, val):
        """Build the code to update the env."""
        v = relay.var("v")
        cl = adt.Clause(
            adt.PatternConstructor(self.env_ctr, [adt.PatternVar(v)]), v)
        env = adt.Match(env_, [cl], complete=False)

        map = dict((i, k) for k, (i, _) in self.env_val_map.items())
        new_env = relay.Tuple([
            some(val) if map[i] == key else relay.TupleGetItem(env, i)
            for i in range(len(map))
        ])
        return self.env_ctr(new_env)
Example #28
0
def fuse_partitions(pre_mod, mid_mod, post_mod):
    """Combine prefix, middle, and suffix modules into a single module.

    The combined module includes an additional `main` that fuses all three
    partitions together.

    Parameters
    ----------
    pre_mod : tvm.IRModule
        Module containing an input quantization function

    mid_mod : tvm.IRModule
        Module containing core of a quantized inference function

    post_mod : tvm.IRModule
        Module containing an output dequantization function

    Returns
    -------
    fused_mod : tvm.IRModule
        Module containing the input quantization, core quantized inference,
        output dequantization, and full quantized inference functions
    """
    pre_func = pre_mod['main']
    mid_func = mid_mod['main']
    post_func = post_mod['main']
    # create a module containing the prefix, middle, and suffix partitions
    fused_mod = tvm.IRModule(functions={
        relay.GlobalVar('quantize_inputs'): pre_func,
        relay.GlobalVar('quantized_main'): mid_func,
        relay.GlobalVar('dequantize_outputs'): post_func,
    })
    # construct a `main` that strings together the partitions, such that its
    # behaviour is equivalent to `main` in an *unpartitioned* module
    scope_builder = relay.ScopeBuilder()
    fused_mod_main_params = [relay.Var(param.name_hint) for param in pre_func.params]
    quantized_inputs = scope_builder.let('quantized_inputs', relay.Call(
        fused_mod.get_global_var('quantize_inputs'),
        fused_mod_main_params
    ))
    quantized_outputs = scope_builder.let('quantized_outputs', relay.Call(
        fused_mod.get_global_var('quantized_main'),
        [relay.TupleGetItem(quantized_inputs, i) for i in range(len(pre_func.ret_type.fields))]
    ))
    dequantized_outputs = scope_builder.let('dequantized_outputs', relay.Call(
        fused_mod.get_global_var('dequantize_outputs'),
        [quantized_outputs]
    ))
    scope_builder.ret(dequantized_outputs)
    fused_mod['main'] = relay.Function(fused_mod_main_params, scope_builder.get())
    return fused_mod
def test_simple_graph():
    # A module with two subgraphs
    mod = tvm.IRModule()

    x0 = relay.var("x0", shape=(8, 8))
    y0 = relay.var("y0", shape=(8, 8))
    z0 = x0 + y0
    z1 = x0 - y0
    z2 = relay.Tuple((z0, z1))
    f0 = relay.Function([x0, y0], z2)
    f0 = f0.with_attr("Compiler", "test_graph")
    g0 = relay.GlobalVar("g0")
    mod[g0] = f0
    mod = relay.transform.InferType()(mod)

    x1 = relay.var("x1", shape=(8, 8))
    y1 = relay.var("y1", shape=(8, 8))
    z1 = x1 - y1
    f1 = relay.Function([x1, y1], z1)
    f1 = f1.with_attr("Compiler", "test_graph")
    g1 = relay.GlobalVar("g1")
    mod[g1] = f1
    mod = relay.transform.InferType()(mod)

    x = relay.var("x", shape=(8, 8))
    y = relay.var("y", shape=(8, 8))
    z = relay.var("z", shape=(8, 8))
    c0 = relay.Call(g0, [x, y])
    c1 = relay.Call(g1, [relay.TupleGetItem(c0, 0), z])
    fm = relay.Function([x, y, z], c1)
    mod["main"] = fm
    mod = relay.transform.InferType()(mod)

    x_data = np.random.rand(8, 8).astype("float32")
    y_data = np.random.rand(8, 8).astype("float32")
    z_data = np.random.rand(8, 8).astype("float32")
    data = get_calibration_data(mod, {"x": x_data, "y": y_data, "z": z_data})

    # Check the number and orders
    check_data_size(mod, data)
    tvm.testing.assert_allclose(data[g0]["inputs"][0].asnumpy(), x_data)
    tvm.testing.assert_allclose(data[g0]["inputs"][1].asnumpy(), y_data)
    tvm.testing.assert_allclose(data[g0]["outputs"][0].asnumpy(),
                                x_data + y_data)
    tvm.testing.assert_allclose(data[g0]["outputs"][1].asnumpy(),
                                x_data - y_data)
    tvm.testing.assert_allclose(data[g1]["inputs"][0].asnumpy(),
                                x_data + y_data)
    tvm.testing.assert_allclose(data[g1]["inputs"][1].asnumpy(), z_data)
    tvm.testing.assert_allclose(data[g1]["outputs"][0].asnumpy(),
                                x_data + y_data - z_data)
def test_tuple_get_item_sequal():
    x = relay.Var('x')
    y = relay.Var('y')
    assert not consistent_equal(relay.TupleGetItem(x, 1),
                                relay.TupleGetItem(y, 1))
    assert not consistent_equal(relay.TupleGetItem(x, 1),
                                relay.TupleGetItem(x, 2))
    assert consistent_equal(relay.TupleGetItem(x, 1), relay.TupleGetItem(x, 1))