def expected(x, w1, w2, b1, b2, scale1, scale2, newshape):
     args = [x, w1, w2, b1, b2, scale1, scale2]
     x_stacked = relay.stack((x, x), axis=0)
     w = relay.stack((w1, w2), axis=0)
     y = relay.nn.batch_matmul(x_stacked, w)
     b1 = relay.expand_dims(b1, 0)
     b2 = relay.expand_dims(b2, 0)
     b = relay.stack((b1, b2), axis=0)
     y = relay.add(y, b)
     scale1 = relay.expand_dims(scale1, 0)
     scale2 = relay.expand_dims(scale2, 0)
     scale = relay.stack((scale1, scale2), axis=0)
     y = relay.multiply(y, scale)
     (y1, y2) = relay.split(y, 2)
     y1 = relay.squeeze(y1, [0])
     y2 = relay.squeeze(y2, [0])
     y1 = relay.reshape(y1, newshape=newshape)
     y2 = relay.reshape(y2, newshape=newshape)
     y = relay.Tuple((y1, y2))
     return relay.Function(args, y)
 def expected(x, w1, w2, scale1, scale2, bias, channels1, channels2):
     args = [x, w1, w2, scale1, scale2, bias]
     w = relay.concatenate((w1, w2), axis=0)
     scale = relay.concatenate((scale1, scale2), axis=0)
     y = relay.nn.conv2d(x, w, channels=channels1 + channels2)
     y = relay.multiply(y, scale)
     y = relay.nn.relu(y)
     y1 = relay.strided_slice(y,
                              begin=[0, 0],
                              end=[-1, channels1],
                              strides=[1, 1],
                              slice_mode="size")
     y2 = relay.strided_slice(y,
                              begin=[0, channels1],
                              end=[-1, channels2],
                              strides=[1, 1],
                              slice_mode="size")
     y2 = relay.add(y2, bias)
     y = relay.Tuple((y1, y2))
     return relay.Function(args, y)
    def expected():
        x = relay.var("x", shape=(1, 56, 56, 64))
        bias = relay.var("bias", shape=(64,))
        weight = relay.var("weight", shape=(3, 3, 64, 64))
        x = relay.layout_transform(x, "NHWC", "NCHW")
        weight = relay.layout_transform(weight, "HWIO", "OIHW")
        y = relay.nn.conv2d(x, weight, channels=64, kernel_size=(3, 3), padding=(1, 1))

        bias = relay.expand_dims(bias, axis=0, num_newaxis=3)
        bias = relay.layout_transform(bias, "NHWC", "NCHW")
        y = relay.add(y, bias)
        # a useless tuple, which will be eliminated
        y = relay.Tuple([y])[0]
        y = relay.nn.relu(y)
        y = relay.nn.max_pool2d(y, pool_size=(2, 2))
        y = relay.cast(y, "int32")
        y = relay.layout_transform(y, "NCHW", "NHWC")
        y = relay.nn.batch_flatten(y)
        y = relay.Function(analysis.free_vars(y), y)
        return y
Ejemplo n.º 4
0
def test_global_recursion():
    mod = tvm.IRModule()
    p = Prelude(mod)
    copy = relay.GlobalVar("copy")
    # same as above: it copies the given list
    a = relay.TypeVar("a")
    v = relay.Var("v", p.l(a))
    h = relay.Var("h")
    t = relay.Var("t")
    copy_def = relay.Function(
        [v],
        relay.Match(
            v,
            [
                relay.Clause(
                    relay.PatternConstructor(
                        p.cons, [relay.PatternVar(h),
                                 relay.PatternVar(t)]),
                    p.cons(h, copy(t)),
                ),
                relay.Clause(relay.PatternConstructor(p.nil, []), p.nil()),
            ],
        ),
        p.l(a),
        [a],
    )
    mod[copy] = copy_def

    call1 = copy_def(p.cons(relay.const(1), p.cons(relay.const(2), p.nil())))
    val1 = run_as_python(call1, mod)
    assert_constructor_value(val1, p.cons, 2)
    assert_tensor_value(val1.fields[0], 1)
    assert_constructor_value(val1.fields[1], p.cons, 2)
    assert_tensor_value(val1.fields[1].fields[0], 2)
    assert_constructor_value(val1.fields[1].fields[1], p.nil, 0)

    call2 = copy_def(p.cons(relay.Tuple([]), p.nil()))
    val2 = run_as_python(call2, mod)
    assert_constructor_value(val2, p.cons, 2)
    assert_adt_len(val2.fields[0], 0)
    assert_constructor_value(val2.fields[1], p.nil, 0)
Ejemplo n.º 5
0
def test_ref_execution_order():
    # we want to have effects execute from left to right
    x = relay.Var("x")
    y = relay.Var("y")
    f = relay.Var("f")
    r = relay.Var("r")

    expr = relay.Let(
        f,
        relay.Function([x, y], x),
        # r = 1
        relay.Let(
            r,
            relay.RefCreate(relay.const(1)),
            relay.Tuple([
                # should be 1
                relay.RefRead(r),
                # set r to 2 and read back
                seq(relay.RefWrite(r, relay.const(2)), relay.RefRead(r)),
                # set r to 3 and read back
                seq(relay.RefWrite(r, relay.const(3)), relay.RefRead(r)),
                # set r to 4 and read as first arg to f
                # set r to 5 and read as second arg to f
                # f should evaluate to 4
                f(
                    seq(relay.RefWrite(r, relay.const(4)), relay.RefRead(r)),
                    seq(relay.RefWrite(r, relay.const(5)), relay.RefRead(r)),
                ),
                # read back 5
                relay.RefRead(r),
            ]),
        ),
    )

    tup_val = run_as_python(expr)
    assert_adt_len(tup_val, 5)
    assert_tensor_value(tup_val[0], 1)
    assert_tensor_value(tup_val[1], 2)
    assert_tensor_value(tup_val[2], 3)
    assert_tensor_value(tup_val[3], 4)
    assert_tensor_value(tup_val[4], 5)
Ejemplo n.º 6
0
def test_adt_match_type_annotations():
    mod = tvm.IRModule()
    box, constructor = initialize_box_adt(mod)

    # the only type annotation is inside the match pattern var
    # but that should be enough info
    tt = relay.TensorType((2, 2), "float32")
    x = relay.Var("x")
    mv = relay.Var("mv", tt)
    match = relay.Match(
        constructor(x),
        [
            relay.Clause(
                relay.PatternConstructor(constructor, [relay.PatternVar(mv)]),
                relay.Tuple([]))
        ],
    )

    func = relay.Function([x], match)
    ft = run_infer_type(func, mod)
    assert ft.checked_type == relay.FuncType([tt], relay.TupleType([]))
Ejemplo n.º 7
0
def test_tuple_output_exec():
    """Test C codegen and runtime for a subgraph with a tuple output"""
    a = relay.var('a', shape=(10, 10), dtype='float32')
    b = relay.var('b', shape=(10, 10), dtype='float32')
    ba = relay.annotation.compiler_begin(a, 'ccompiler')
    bb = relay.annotation.compiler_begin(b, 'ccompiler')
    add = relay.add(ba, bb)
    sub = relay.subtract(ba, bb)
    out = relay.Tuple((add, sub))
    eout = relay.annotation.compiler_end(out, 'ccompiler')
    func=relay.Function([a, b], eout)
    mod = tvm.IRModule()
    mod["main"] = func
    mod = transform.PartitionGraph()(mod)

    a_data = np.random.rand(10, 10).astype('float32')
    b_data = np.random.rand(10, 10).astype('float32')

    check_result(mod, {'a': a_data, 'b': b_data},
                 [(10, 10), (10, 10)],
                 [(a_data + b_data), (a_data - b_data)])
Ejemplo n.º 8
0
 def expected(x, w1, w2, w3, b1, b2, b3):
     # use a fixed order of args so alpha equal check can pass
     s1 = w1.type_annotation.shape[1]
     s2 = w2.type_annotation.shape[1]
     s3 = w3.type_annotation.shape[1]
     args = [x, w1, w2, w3, b1, b2, b3]
     w = relay.concatenate((w1, w2, w3), axis=1)
     b = relay.concatenate((b1, b2, b3), axis=-1)
     y = relay.nn.batch_matmul(x, w)
     y = relay.add(y, b)
     y1 = relay.strided_slice(
         y, begin=[0, 0, 0], end=[-1, -1, s1], strides=[1, 1, 1], slice_mode="size"
     )
     y2 = relay.strided_slice(
         y, begin=[0, 0, s1], end=[-1, -1, s2], strides=[1, 1, 1], slice_mode="size"
     )
     y3 = relay.strided_slice(
         y, begin=[0, 0, s1 + s2], end=[-1, -1, s3], strides=[1, 1, 1], slice_mode="size"
     )
     y = relay.Tuple((y1, y2, y3))
     return relay.Function(args, y)
Ejemplo n.º 9
0
def test_checkpoint():
    inputs = [relay.var("x{}".format(i), shape=(1, )) for i in range(4)]
    output = relay.multiply(relay.add(inputs[0], inputs[1]),
                            relay.add(inputs[2], inputs[3]))
    check_grad(relay.Function(inputs, relay.annotation.checkpoint(output)))

    scope = relay.ScopeBuilder()
    out_tuple = scope.let(
        "out_tuple",
        relay.Tuple([
            relay.add(inputs[0], inputs[1]),
            relay.multiply(inputs[2], inputs[3])
        ]),
    )
    scope.ret(
        relay.subtract(
            relay.annotation.checkpoint(relay.TupleGetItem(out_tuple, 0)),
            relay.TupleGetItem(out_tuple, 1),
        ))
    out_single = scope.get()
    check_grad(relay.Function(inputs, out_single))
def test_compile_fused_identity_cast():
    # a fused function that would optimized to identity
    x = relay.var("x", shape=[16], dtype="float32")
    y = relay.cast(x, "float32")
    func1 = relay.Function([x], y).with_attr("Primitive", 1)

    # a fused function with param pass-through
    x = relay.var("x", shape=[16], dtype="float32")
    y = relay.add(x, relay.const(3.14, "float32"))
    func2 = relay.Function([x], relay.Tuple([x, y])).with_attr("Primitive", 1)

    x_global = relay.var("xx", shape=[16], dtype="float32")
    tup = func2(x_global)
    y_global = func1(relay.TupleGetItem(tup, 0) + relay.TupleGetItem(tup, 1))

    mod = tvm.IRModule.from_expr(relay.Function([x_global], y_global))
    for target, device in tvm.testing.enabled_targets():
        with tvm.transform.PassContext(opt_level=2):
            graph, lib, _ = relay.build(mod, target=target)
            executor = graph_executor.create(graph, lib, device=device)
            executor.run()
Ejemplo n.º 11
0
    def expected(dshape):
        x = relay.var("x", shape=dshape)
        pooled = relay.nn.max_pool2d(x,
                                     pool_size=(2, 2),
                                     strides=(2, 2),
                                     padding=(0, 0))
        f0 = relay.Function([x], pooled)

        p0 = relay.var("p0",
                       shape=(dshape[0], dshape[1], dshape[2] // 2,
                              dshape[3] // 2))
        p1 = relay.var("p1",
                       shape=(dshape[0], dshape[1], dshape[2], dshape[3]))
        upsampled = relay.nn.upsampling(p0, scale=2, layout="NCHW")
        out = relay.Tuple((upsampled, p1))
        f1 = relay.Function([p0, p1], out)

        x = relay.var("x", shape=dshape)
        y = relay.Call(f0, [x])
        z = relay.Call(f1, [y, x])
        return relay.Function([x], z)
    def create_graph():
        data = relay.var("data", relay.TensorType((1, 3, 224, 224), "float32"))
        weight = relay.var("weight", relay.TensorType((16, 3, 3, 3),
                                                      "float32"))
        bn_gamma = relay.var("bn_gamma", relay.TensorType((16, ), "float32"))
        bn_beta = relay.var("bn_beta", relay.TensorType((16, ), "float32"))
        bn_mean = relay.var("bn_mean", relay.TensorType((16, ), "float32"))
        bn_var = relay.var("bn_var", relay.TensorType((16, ), "float32"))

        data_cb = compiler_begin(data, 'test_target')
        weight_cb = compiler_begin(weight, 'test_target')
        bn_gamma_cb = compiler_begin(bn_gamma, 'test_target')
        bn_beta_cb = compiler_begin(bn_beta, 'test_target')
        bn_mean_cb = compiler_begin(bn_mean, 'test_target')
        bn_var_cb = compiler_begin(bn_var, 'test_target')

        conv_o = relay.nn.conv2d(data=data_cb,
                                 weight=weight_cb,
                                 kernel_size=(3, 3),
                                 channels=16,
                                 padding=(1, 1))

        bn_o = relay.nn.batch_norm(conv_o, bn_gamma_cb, bn_beta_cb, bn_mean_cb,
                                   bn_var_cb)

        relu_o = relay.nn.relu(bn_o[0])
        relu_o_ce = compiler_end(relu_o, 'test_target')

        bn_omean = bn_o[1]
        rebn_omean_ce = compiler_end(bn_omean, 'test_target')
        bn_ovar = bn_o[2]
        bn_ovar_ce = compiler_end(bn_ovar, 'test_target')

        dummy_mean_abs = relay.abs(rebn_omean_ce)
        dummy_ovar_abs = relay.abs(bn_ovar_ce)
        dummy_tuple = relay.Tuple((relu_o_ce, dummy_mean_abs, dummy_ovar_abs))

        func = relay.Function(
            [data, weight, bn_gamma, bn_beta, bn_mean, bn_var], dummy_tuple)
        return func
Ejemplo n.º 13
0
        def annotated():
            add = relay.add(x, y)
            _add = relay.annotation.on_device(add, dev_ctx)
            sqrt = relay.sqrt(add)
            _sqrt = relay.annotation.on_device(sqrt, dev_ctx)
            log = relay.log(add)
            _log = relay.annotation.on_device(log, dev_ctx)
            subtract = relay.subtract(sqrt, log)
            _subtract = relay.annotation.on_device(subtract, dev_ctx)
            exp = relay.exp(subtract)
            _exp = relay.annotation.on_device(exp, dev_ctx)

            func = relay.Function(
                [x, y],
                relay.Tuple(
                    tvm.convert([_add, _sqrt, _log, _subtract, _exp, exp])))
            func = relay.ir_pass.infer_type(func)
            func = relay.ir_pass.rewrite_annotated_ops(func,
                                                       cpu_ctx.device_type)
            func = relay.ir_pass.infer_type(func)
            return relay.Function(relay.ir_pass.free_vars(func.body[5]),
                                  func.body[5])
Ejemplo n.º 14
0
    def expected(dshape):
        x = relay.var("x", shape=dshape)
        pooled = relay.nn.max_pool2d(x,
                                     pool_size=(2, 2),
                                     strides=(2, 2),
                                     padding=(0, 0))
        f0 = relay.Function([x], pooled)

        p0 = relay.var("p0",
                       shape=(dshape[0], dshape[1], dshape[2] // 2,
                              dshape[3] // 2))
        upsampled = relay.nn.upsampling(p0,
                                        scale_h=2,
                                        scale_w=2,
                                        layout="NCHW")
        f1 = relay.Function([p0], upsampled)

        x = relay.var("x", shape=dshape)
        y = relay.Call(f0, [x])
        z = relay.Call(f1, [y])
        tup = relay.Tuple((z, x))
        return relay.Function([x], tup)
Ejemplo n.º 15
0
def test_match_effect_exactly_once():
    mod = relay.Module()
    p = Prelude(mod)

    # the list should be of length 1!
    # Unless we mistakenly execute the data clause more than once
    r = relay.Var('r')
    data = seq(relay.RefWrite(r, p.cons(relay.Tuple([]), relay.RefRead(r))), relay.RefRead(r))
    match = relay.Let(
        r, relay.RefCreate(p.nil()),
        relay.Match(data, [
            relay.Clause(relay.PatternConstructor(p.nil, []), relay.const(0)),
            relay.Clause(
                relay.PatternConstructor(
                    p.cons,
                    [relay.PatternWildcard(), relay.PatternConstructor(p.nil, [])]),
                relay.const(1)),
            relay.Clause(relay.PatternWildcard(), relay.const(2))
        ]))

    match_val = run_as_python(match, mod)
    assert_tensor_value(match_val, 1)
Ejemplo n.º 16
0
def test_func():
    x = relay.var('x', shape=(1,), dtype='float32')#, a)
    y = relay.var('y', shape=(1,), dtype='float32')#, a)
    z = relay.var('z', shape=(1,), dtype='float32')#, a)
    x2 = relay.add(x, x)
    func_a = relay.Function([y], relay.add(x2, y)) #, a, [a])
    func_b = relay.Function([z], relay.add(x2, z)) #, a, [a])
    body = relay.Tuple([func_a, func_b])
    body = relay.Function([x], body)
    """
    fn (%x: Tensor[(1), float32]) {
      %1 = fn (%y: Tensor[(1), float32]) {
        %0 = add(%x, %x);
        add(%0, %y)
      };
      %2 = fn (%z: Tensor[(1), float32]) {
        add(%0, %z)
      };
      (%1, %2)
    }
    """
    check_basic_block_normal_form(body)
Ejemplo n.º 17
0
    def expected():
        def create_external_func1(mod_, compiler_name, symbol_name):
            x_int = relay.var("x_int", shape=(10, 10))

            p0 = relay.nn.relu(x_int)
            q0 = relay.tanh(x_int)

            # reshapes
            p0_reshaped = relay.reshape(p0, newshape=100)
            q0_reshaped = relay.reshape(q0, newshape=100)
            ofms = relay.concatenate((p0_reshaped, q0_reshaped), 0)

            f1 = relay.Function([x_int], ofms)
            f1 = set_func_attr(f1, compiler_name, symbol_name)
            glb_f1 = relay.GlobalVar(symbol_name)
            mod_[glb_f1] = f1
            mod_ = relay.transform.InferType()(mod_)
            return glb_f1, mod_

        mod = tvm.IRModule()
        x = relay.var("x", shape=(10, 10))
        glb_symbol_f1, mod = create_external_func1(mod, "ethosu", "ethosu_0")
        ofms = relay.Call(glb_symbol_f1, [x])

        # splits
        (p0_flat, q0_flat) = relay.split(ofms, [100])
        # reshapes
        p0_flat_reshaped = relay.reshape(p0_flat, newshape=(10, 10))
        q0_flat_reshaped = relay.reshape(q0_flat, newshape=(10, 10))
        # original output
        tuple_out = relay.Tuple([p0_flat_reshaped, q0_flat_reshaped])

        p0 = relay.TupleGetItem(tuple_out, 0)
        q0 = relay.TupleGetItem(tuple_out, 1)
        r = relay.concatenate((p0, q0), axis=0)
        main = relay.Function([x], r)
        mod["main"] = main
        mod = relay.transform.InferType()(mod)
        return mod
Ejemplo n.º 18
0
 def expected(x, w1, w2, w3, j):
     args = [x, w1, w2, w3]
     w_stacked = relay.concatenate((w1, w2, w3), axis=0)
     y = relay.nn.dense(x, w_stacked, units=6 * j)
     strides = [1, 1]
     y1 = relay.strided_slice(y,
                              begin=[0, 0],
                              end=[-1, j],
                              strides=strides,
                              slice_mode="size")
     y2 = relay.strided_slice(y,
                              begin=[0, j],
                              end=[-1, 2 * j],
                              strides=strides,
                              slice_mode="size")
     y3 = relay.strided_slice(y,
                              begin=[0, 3 * j],
                              end=[-1, 3 * j],
                              strides=strides,
                              slice_mode="size")
     y = relay.Tuple((y1, y2, y3))
     return relay.Function(args, y)
Ejemplo n.º 19
0
def test_higher_order_return():
    x = relay.var("x", shape=(1, ), dtype="float32")  # , a)
    y = relay.var("y", shape=(1, ), dtype="float32")  # , a)
    z = relay.var("z", shape=(1, ), dtype="float32")  # , a)
    x2 = relay.add(x, x)
    func_a = relay.Function([y], relay.add(x2, y))  # , a, [a])
    func_b = relay.Function([z], relay.add(x2, z))  # , a, [a])
    body = relay.Tuple([func_a, func_b])
    body = relay.Function([x], body)
    """
    fn (%x: Tensor[(1), float32]) {
      %1 = fn (%y: Tensor[(1), float32]) {
        %0 = add(%x, %x);
        add(%0, %y)
      };
      %2 = fn (%z: Tensor[(1), float32]) {
        add(%0, %z)
      };
      (%1, %2)
    }
    """
    check_basic_block_normal_form(body)
Ejemplo n.º 20
0
 def get_model(shape, splits, axis):
     a = relay.var("a", shape=shape, dtype="uint8")
     split = relay.op.split(a, indices_or_sections=splits, axis=axis)
     zeroi = relay.const(1, "int32")
     zerof = relay.const(0.5, "float32")
     con1 = relay.qnn.op.concatenate(
         [split[0], split[1]],
         input_scales=[zerof] * 2,
         input_zero_points=[zeroi] * 2,
         output_scale=zerof,
         output_zero_point=zeroi,
         axis=axis,
     )
     con2 = relay.qnn.op.concatenate(
         [split[2], split[3]],
         input_scales=[zerof] * 2,
         input_zero_points=[zeroi] * 2,
         output_scale=zerof,
         output_zero_point=zeroi,
         axis=axis,
     )
     return relay.Tuple((con2, con1))
 def expected(x, w1, w2, w3, w4, channels1, channels2, channels3, channels4):
     # use a fixed order of args so alpha equal check can pass
     args = [x, w1, w2, w3, w4]
     w = relay.concatenate((w1, w2, w4), axis=0)
     y = relay.nn.conv2d(x, w, channels=channels1 + channels2 + channels4)
     y1 = relay.strided_slice(
         y, begin=[0, 0], end=[-1, channels1], strides=[1, 1], slice_mode="size"
     )
     y2 = relay.strided_slice(
         y, begin=[0, channels1], end=[-1, channels2], strides=[1, 1], slice_mode="size"
     )
     y3 = relay.nn.conv2d(x, w3)
     y4 = relay.strided_slice(
         y,
         begin=[0, channels1 + channels2],
         end=[-1, channels4],
         strides=[1, 1],
         slice_mode="size",
     )
     y5 = relay.nn.max_pool2d(x)
     y = relay.Tuple((y1, y2, y3, y4, y5))
     return relay.Function(args, y)
Ejemplo n.º 22
0
 def before():
     x = relay.var("x", shape=(1, 56, 56, 64))
     bias = relay.var("bias", shape=(64, ))
     weight = relay.var("weight", shape=(3, 3, 64, 64))
     y = relay.nn.conv2d(
         x,
         weight,
         channels=64,
         kernel_size=(3, 3),
         padding=(1, 1),
         data_layout="NHWC",
         kernel_layout="HWIO",
     )
     y = relay.nn.bias_add(y, bias, axis=3)
     # a useless tuple, which will be eliminated
     y = relay.Tuple([y])[0]
     y = relay.nn.relu(y)
     y = relay.nn.max_pool2d(y, pool_size=(2, 2), layout="NHWC")
     y = relay.cast(y, "int32")
     y = relay.nn.batch_flatten(y)
     y = relay.Function(analysis.free_vars(y), y)
     return y
Ejemplo n.º 23
0
def get_net(iterations, num_hidden, batch_size=1, dtype="float32"):
    """Constructs an unrolled RNN with LSTM cells"""
    input_type = relay.TensorType((batch_size, num_hidden), dtype)
    weight_type = relay.TensorType((4 * num_hidden, num_hidden), dtype)
    bias_type = relay.TensorType((4 * num_hidden,), dtype)

    state_type = relay.TupleType([input_type, input_type])
    cell_type = relay.TupleType([input_type, state_type])

    builder = relay.ScopeBuilder()

    zeros = builder.let(("zeros", input_type), relay.zeros((batch_size, num_hidden), dtype))
    init_states = builder.let(("init_states", state_type), relay.Tuple([zeros, zeros]))

    states = init_states
    out = None

    for i in range(iterations):
        inputs = relay.Var("data", input_type)
        i2h_weight = relay.Var("i2h_%s_weight" % i, weight_type)
        i2h_bias = relay.Var("i2h_%i_bias" % i, bias_type)
        h2h_weight = relay.Var("h2h_%s_weight" % i, weight_type)
        h2h_bias = relay.Var("h2h_%s_bias" % i, bias_type)

        cell_fn = lstm_cell(num_hidden, batch_size, dtype, "lstm_%s" % i)

        call = builder.let(
            ("call_%s" % i, cell_type),
            relay.Call(cell_fn, [inputs, states, i2h_weight, i2h_bias, h2h_weight, h2h_bias]),
        )
        new_out = builder.let(("out_%s" % i, input_type), relay.TupleGetItem(call, 0))
        new_states = builder.let(("states_%s" % i, state_type), relay.TupleGetItem(call, 1))
        states = new_states
        out = new_out

    builder.ret(out)
    body = builder.get()
    args = relay.analysis.free_vars(body)
    return relay.Function(args, body, input_type)
def test_reshape_nop():
    # test that reshape can be turned into nop
    x = relay.var("x", shape=(10, 4))
    xx = relay.abs(x)
    y = relay.expand_dims(xx, axis=1)
    t0 = relay.reshape(y, (1, 40))
    t1 = relay.abs(y)

    z0 = relay.reshape(t0, (2, 20))
    z1 = relay.sqrt(t1)
    z2 = relay.reshape(t1, (1, 40))

    func = relay.Function([x], relay.Tuple([z0, z1, z2]))
    x_data = np.random.rand(10, 4).astype("float32")
    graph = relay.build(tvm.IRModule.from_expr(func), "llvm")
    graph_json_str = graph.get_graph_json()

    graph_json = json.loads(graph_json_str)

    # reshape must force sharing memory
    storage_ids = graph_json["attrs"]["storage_id"][1]
    assert tuple(storage_ids) == (0, 1, 1, 2, 3, 2)
    assert graph_json["nodes"][2]["attrs"]["func_name"] == "__nop"
    assert graph_json["nodes"][5]["attrs"]["func_name"] == "__nop"

    gmod = graph_executor.GraphModule(graph["default"](tvm.cpu(0)))

    gmod.set_input(x=x_data)
    gmod.run()
    z0_np = x_data.reshape(2, 20)
    z1_np = np.sqrt(np.abs(x_data.reshape(
        10,
        1,
        4,
    )))
    z2_np = np.abs(x_data).reshape(1, 40)
    tvm.testing.assert_allclose(gmod.get_output(0).numpy(), z0_np)
    tvm.testing.assert_allclose(gmod.get_output(1).numpy(), z1_np)
    tvm.testing.assert_allclose(gmod.get_output(2).numpy(), z2_np)
Ejemplo n.º 25
0
    def expected(dshape):
        p0 = relay.var("p0", shape=dshape)
        concat = gen_consecutive_tuple(p0)
        f0 = relay.Function([p0], concat)
        f0 = f0.with_attr("Primitive", tvm.tir.IntImm("int32", 1))

        p01 = relay.var("p01", shape=(1, dshape[1] * 9, dshape[2], dshape[3]))
        pooled = relay.nn.max_pool2d(p01, pool_size=(2, 2), strides=(2, 2), padding=(0, 0))
        out = relay.add(pooled, relay.const(1, "float32"))
        f1 = relay.Function([p01], out)
        f1 = f1.with_attr("Primitive", tvm.tir.IntImm("int32", 1))

        p02 = relay.var("p02", shape=(1, dshape[1] * 9, dshape[2] // 2, dshape[3] // 2))
        out = relay.add(p02, relay.const(1, "float32"))
        f2 = relay.Function([p02], out)
        f2 = f2.with_attr("Primitive", tvm.tir.IntImm("int32", 1))

        x = relay.var("x", shape=dshape)
        y = relay.Call(f0, [x])
        z = relay.Call(f1, [y])
        z2 = relay.Call(f2, [z])

        return relay.Function([x], relay.Tuple((z, z2)))
Ejemplo n.º 26
0
 def expected():
     x = relay.var("x", shape=(1, 64, 56, 56))
     weight1 = relay.var('weight1')
     weight2 = relay.var('weight2')
     y = relay.layout_transform(x, "NCHW", "NCHW16c")
     y = relay.nn.conv2d(y, weight1,
                         channels=32,
                         kernel_size=(3, 3),
                         padding=(1, 1),
                         data_layout="NCHW16c")
     y = relay.nn.relu(y)
     y1 = relay.nn.conv2d(y, weight2,
                          channels=32,
                          kernel_size=(3, 3),
                          padding=(1, 1),
                          data_layout='NCHW16c')
     y1 = relay.nn.relu(y1)
     y1 = relay.layout_transform(y1, "NCHW16c", "NCHW")
     y2 = relay.layout_transform(y, "NCHW16c", "NCHW")
     y2 = relay.nn.batch_flatten(y2)
     ret = relay.Tuple([y1, y2])
     y = relay.Function(free_vars(ret), ret)
     return y
Ejemplo n.º 27
0
def test_simple_graph():
    # A module with two subgraphs
    mod = tvm.IRModule()

    x0 = relay.var("x0", shape=(8, 8))
    y0 = relay.var("y0", shape=(8, 8))
    z0 = x0 + y0
    z1 = x0 - y0
    z2 = relay.Tuple((z0, z1))
    f0 = relay.Function([x0, y0], z2)
    f0 = f0.with_attr("Compiler", "test_graph")
    g0 = relay.GlobalVar("g0")
    mod[g0] = f0

    x1 = relay.var("x1", shape=(8, 8))
    y1 = relay.var("y1", shape=(8, 8))
    z1 = x1 - y1
    f1 = relay.Function([x1, y1], z1)
    f1 = f1.with_attr("Compiler", "test_graph")
    g1 = relay.GlobalVar("g1")
    mod[g1] = f1

    x = relay.var("x", shape=(8, 8))
    y = relay.var("y", shape=(8, 8))
    z = relay.var("z", shape=(8, 8))
    c0 = relay.Call(g0, [x, y])
    c1 = relay.Call(g1, [relay.TupleGetItem(c0, 0), z])
    fm = relay.Function([x, y, z], c1)
    mod["main"] = fm

    x_data = np.random.rand(8, 8).astype("float32")
    y_data = np.random.rand(8, 8).astype("float32")
    z_data = np.random.rand(8, 8).astype("float32")
    data = get_calibration_data(mod, {"x": x_data, "y": y_data, "z": z_data})

    # Check the number and orders
    check_data_size(mod, data)
Ejemplo n.º 28
0
def get_network():
    # Get a list of modules representing subgraphs.
    mods = []
    dshape = (3, 3)
    data = relay.var("data_0", relay.TensorType(dshape, "float32"))
    data21 = relay.var("data_1", relay.TensorType(dshape, "float32"))
    data_net1_output_1 = relay.var("data_0",
                                   relay.TensorType(dshape, "float32"))
    data_net1_output_2 = relay.var("data_1",
                                   relay.TensorType(dshape, "float32"))
    data_net2_output_1 = relay.var("data_0",
                                   relay.TensorType(dshape, "float32"))
    mvalue1 = np.full((1), 1).astype("float32")
    mvalue2 = np.full((1), 2).astype("float32")
    mvalue3 = np.full((1), 3).astype("float32")
    mv1 = relay.Constant(tvm.nd.array(mvalue1))
    mv2 = relay.Constant(tvm.nd.array(mvalue2))
    mv3 = relay.Constant(tvm.nd.array(mvalue3))
    # There are three outputs in the first model.
    net1_output1 = relay.add(data, mv1)
    net1_output2 = relay.subtract(data, mv2)
    net1_output3 = relay.concatenate((net1_output1, net1_output2), axis=0)
    (net1_output3, _) = relay.split(net1_output3,
                                    indices_or_sections=2,
                                    axis=0)
    net1_output3 = relay.add(net1_output3, mv2)
    # The second model uses the output named net1_output3 of the first model as the first input,
    # the second input of the second model is data21.
    net2 = relay.add(net1_output3, mv2)
    net2 = relay.add(net2, data21)
    net2_output = relay.add(net2, mv3)
    # The third model uses the output named net2_output of the second model as the first input
    # and uses the output named net1_output2 of the first model as the second input.
    net3 = relay.multiply(net2_output, mv3)
    net3 = relay.add(net3, net1_output2)
    return tvm.IRModule.from_expr(
        relay.Function([data, data21], relay.Tuple([net3]))), dshape
Ejemplo n.º 29
0
 def before():
     x = relay.var("x", shape=(1, 56, 56, 64))
     weight1 = relay.var('weight1', shape=(3, 3, 64, 32))
     weight2 = relay.var('weight2', shape=(3, 3, 32, 32))
     y = relay.nn.conv2d(x,
                         weight1,
                         channels=32,
                         kernel_size=(3, 3),
                         padding=(1, 1),
                         data_layout='NHWC',
                         kernel_layout='HWIO')
     y = relay.nn.relu(y)
     y1 = relay.nn.conv2d(y,
                          weight2,
                          channels=32,
                          kernel_size=(3, 3),
                          padding=(1, 1),
                          data_layout='NHWC',
                          kernel_layout='HWIO')
     y1 = relay.nn.relu(y1)
     y2 = relay.nn.batch_flatten(y)
     ret = relay.Tuple([y1, y2])
     y = relay.Function(analysis.free_vars(ret), ret)
     return y
def test_depthwise_conv2d():
    batch_size = 1
    dshape = (batch_size, 64, 56, 56)
    weight_conv = relay.var("weight_depthwiseconv", shape=(64, 1, 3, 3))
    data1 = relay.var("data1", shape=dshape)
    data2 = relay.var("data2", shape=dshape)
    depthwise_conv2d_1 = relay.nn.conv2d(data1,
                                         weight_conv,
                                         kernel_size=(3, 3),
                                         padding=(1, 1),
                                         groups=64)
    depthwise_conv2d_2 = relay.nn.conv2d(data2,
                                         weight_conv,
                                         kernel_size=(3, 3),
                                         padding=(1, 1),
                                         groups=64)
    add = relay.add(depthwise_conv2d_1, depthwise_conv2d_2)
    func = relay.Function(
        [data1, data2, weight_conv],
        relay.Tuple(tvm.convert([depthwise_conv2d_1, depthwise_conv2d_2,
                                 add])))
    func = relay.ir_pass.infer_type(func)
    compute_count = relay.ir_pass.get_total_mac_number(func)
    assert compute_count == 2 * np.prod(dshape) * 3 * 3