Пример #1
0
def test_let():
    assert parses_as(
        "let %x = 1; ()",
        relay.Let(
            X,
            relay.const(1),
            UNIT
        )
    )

    assert parses_as(
        """
        let %x = 1;
        let %y = 2;
        ()
        """,
        relay.Let(
            X,
            relay.const(1),
            relay.Let(
                Y,
                relay.const(2),
                UNIT
            )
        )
    )
Пример #2
0
def test_recursion():
    """
    Program:
       def @f(%n: int32, %data: float32) -> float32 {
          if (%n == 0) {
              %data
          } else {
              @f(%n - 1, log(%data))
          }
       }
    """
    sb = relay.ScopeBuilder()
    f = relay.GlobalVar("f")
    ti32 = relay.scalar_type("int32")
    tf32 = relay.scalar_type("float32")
    n = relay.var("n", ti32)
    data = relay.var("data", tf32)

    with sb.if_scope(relay.equal(n, relay.const(0, ti32))):
        sb.ret(data)
    with sb.else_scope():
        sb.ret(f(relay.subtract(n, relay.const(1, ti32)), relay.log(data)))
    mod = relay.Module()
    mod[f] = relay.Function([n, data], sb.get())
    assert "@f(%1, %2) /* ty=float32 */" in mod.astext()
    assert mod[f].checked_type == relay.FuncType([ti32, tf32], tf32)
Пример #3
0
def test_function_type():
    assert parses_as(
        """
        let %_: fn () -> int32 = fn () -> int32 { 0 }; ()
        """,
        relay.Let(
            relay.Var("_", relay.FuncType([], int32, [], [])),
            relay.Function([], relay.const(0), int32, []),
            UNIT
        )
    )

    assert parses_as(
        """
        let %_: fn (int32) -> int32 = fn (%x: int32) -> int32 { 0 }; ()
        """,
        relay.Let(
            relay.Var("_", relay.FuncType([int32], int32, [], [])),
            relay.Function([relay.Var("x", int32)], relay.const(0), int32, []),
            UNIT
        )
    )

    assert parses_as(
        """
        let %_: fn (int32, int32) -> int32 = fn (%x: int32, %y: int32) -> int32 { 0 }; ()
        """,
        relay.Let(
            relay.Var("_", relay.FuncType([int32, int32], int32, [], [])),
            relay.Function([relay.Var("x", int32), relay.Var("y", int32)], relay.const(0), int32, []),
            UNIT
        )
    )
Пример #4
0
 def gen_intermediate_tuple(x):
     y1 = relay.add(x, relay.const(1, "float32"))
     y2 = relay.add(x, relay.const(1, "float32"))
     y3 = relay.add(x, relay.const(1, "float32"))
     concat = relay.concatenate((y1, y2, y3), axis=1)
     out = relay.add(concat, relay.const(1, "float32"))
     return out
Пример #5
0
 def before(x):
     concat = gen_consecutive_tuple(x)
     pooled = relay.nn.max_pool2d(concat, pool_size=(2, 2), strides=(2, 2), padding=(0, 0))
     out = relay.add(pooled, relay.const(1, "float32"))
     out2 = relay.add(out, relay.const(1, "float32"))
     out_tup = relay.Tuple((out, out2))
     return relay.Function(relay.ir_pass.free_vars(out_tup), out_tup)
Пример #6
0
def test_filter():
    a = relay.TypeVar("a")
    expected_type = relay.FuncType([
        relay.FuncType([a], relay.scalar_type("bool")), l(a)
    ], l(a), [a])
    assert mod[filter].checked_type == expected_type

    x = relay.Var("x", nat())
    greater_than_one = relay.Function(
        [x],
        relay.Match(x, [
            relay.Clause(
                relay.PatternConstructor(s, [
                    relay.PatternConstructor(
                        s, [relay.PatternWildcard()])
                ]),
                relay.const(True)),
            relay.Clause(relay.PatternWildcard(), relay.const(False))
        ]))
    res = intrp.evaluate(
        filter(greater_than_one,
               cons(build_nat(1),
                    cons(build_nat(1),
                         cons(build_nat(3),
                              cons(build_nat(1),
                                   cons(build_nat(5),
                                        cons(build_nat(1),
                                             nil()))))))))
    filtered = to_list(res)
    assert len(filtered) == 2
    assert count(filtered[0]) == 3
    assert count(filtered[1]) == 5
Пример #7
0
def test_list_constructor():
    # TODO(wweic): implement pattern match to support this test
    def to_list(o):
        if isinstance(o, tvm.relay.backend.interpreter.TensorValue):
            return [o.data.asnumpy().tolist()]
        if isinstance(o, tvm.relay.backend.interpreter.ConstructorValue):
            result = []
            for f in o.fields:
                result.extend(to_list(f))
            return result

    mod = relay.Module()
    p = Prelude(mod)

    nil = p.nil
    cons = p.cons
    l = p.l

    one2 = cons(relay.const(1), nil())
    one3 = cons(relay.const(2), one2)
    one4 = cons(relay.const(3), one3)
    f = relay.Function([], one4)

    mod[mod.entry_func] = f

    result = veval(mod)()
    obj = to_list(result)
    import pdb; pdb.set_trace()
    tvm.testing.assert_allclose(obj, np.array([3,2,1]))
Пример #8
0
def test_tuple_type():
    assert parses_as(
        """
        let %_: () = (); ()
        """,
        relay.Let(
            relay.Var("_", relay.TupleType([])),
            UNIT,
            UNIT
        )
    )

    assert parses_as(
        """
        let %_: (int32,) = (0,); ()
        """,
        relay.Let(
            relay.Var("_", relay.TupleType([int32])),
            relay.Tuple([relay.const(0)]),
            UNIT
        )
    )

    assert parses_as(
        """
        let %_: (int32, int32) = (0, 1); ()
        """,
        relay.Let(
            relay.Var("_", relay.TupleType([int32, int32])),
            relay.Tuple([relay.const(0), relay.const(1)]),
            UNIT
        )
    )
Пример #9
0
 def before():
     c = relay.const(c_data)
     x = relay.var("x")
     y = relay.add(c, c)
     y = relay.multiply(y, relay.const(2, "float32"))
     y = relay.add(x, y)
     z = relay.add(y, c)
     return relay.Function([x], z)
 def expected():
     x = relay.var("x", shape=(1, 16))
     y = relay.nn.relu(x)
     y1 = relay.add(y, relay.const(1.0, "float32"))
     y2 = relay.add(y, relay.const(1.0, "float32"))
     y = relay.add(y1, y2)
     f = relay.Function([x], y)
     return f
Пример #11
0
def test_equal():
    i = relay.var('i', shape=[], dtype='int32')
    j = relay.var('i', shape=[], dtype='int32')
    z = relay.equal(i, j)
    func = relay.Function([i, j], z, ret_type=relay.TensorType([], 'bool'))
    i_data = relay.const(0)
    j_data = relay.const(0)
    check_eval(func, [i_data, j_data], True)
Пример #12
0
def test_closure():
    x = relay.var('x', shape=())
    y = relay.var('y', shape=())
    f = relay.Function([x], x + y)
    ff = relay.Function([y], f)
    clo = ff(relay.const(1.0))
    main = clo(relay.const(2.0))
    res = veval(main)
    tvm.testing.assert_allclose(res.asnumpy(), 3.0)
Пример #13
0
def test_graph():
    assert parses_as(
        "%0 = (); %1 = 1; (%0, %0, %1)",
        relay.Tuple([UNIT, UNIT, relay.const(1)])
    )

    assert not parses_as(
        "%0 = (); %1 = 1; (%0, %0, %1)",
        relay.Tuple([relay.Tuple([]), relay.Tuple([]), relay.const(1)])
    )
Пример #14
0
def test_let_alpha_equal():
    tt1 = relay.TensorType((), "float32")
    tt2 = relay.TensorType((), "int8")
    v1 = relay.Var("v1")
    v1_wtype = relay.Var("v1", tt1)
    v2 = relay.Var("v2")
    v3 = relay.Var("v3")

    let = relay.Let(v1, relay.const(2), v1)
    mapped = relay.Let(v2, relay.const(2), v2)
    assert alpha_equal(let, mapped)

    mismatched_var = relay.Let(v2, relay.const(2), v3)
    assert not alpha_equal(let, mismatched_var)

    different_value = relay.Let(v2, relay.const(3), v2)
    assert not alpha_equal(let, different_value)

    different_body = relay.Let(v2, relay.const(3), relay.const(12))
    assert not alpha_equal(let, different_body)

    # specified types must match

    let_with_type = relay.Let(v1_wtype, relay.const(2), v1_wtype)
    same_type = relay.Let(v1_wtype, relay.const(2), v1_wtype)
    assert alpha_equal(let_with_type, same_type)
    assert not alpha_equal(let, let_with_type)
    v2 = relay.Var("v1", tt2)
    different_type = relay.Let(v2, relay.const(2), v2)
    assert not alpha_equal(let_with_type, different_type)
Пример #15
0
def test_let_inlining():
    tup = relay.Tuple([relay.const(0), relay.const(0)])
    x = relay.var("x")
    assert relay.Let(x, tup, tup).astext() == SEMVER + \
        ("%0 = (0, 0)\n"
         "let %x = %0\n"
         "%0")

    assert relay.Let(x, tup, x).astext() == SEMVER + \
        ("let %x = (0, 0)\n"
         "%x")
Пример #16
0
 def before(x):
     inj = relay.squeeze(x)
     y1 = relay.add(inj, relay.const(1, "float32"))
     tmp = relay.squeeze(inj)
     tmp = relay.add(tmp, relay.const(1, "float32"))
     y2 = relay.add(tmp, relay.const(1, "float32"))
     y3 = relay.add(inj, relay.const(1, "float32"))
     concat = relay.concatenate((y1, y2, y3), axis=1)
     out_inj = relay.squeeze(concat)
     out = relay.add(out_inj, relay.const(1, "float32"))
     return relay.Function(relay.ir_pass.free_vars(out), out)
Пример #17
0
 def simple_bn(x, gamma, beta, moving_mean, moving_var,
               axis=1, epsilon=1e-5, shape=None):
     # expect = (x - moving_mean) / sqrt(moving_var + eps) * gamma + beta
     scale = rly.multiply(rly.const(1, 'float32') /
             rly.sqrt(moving_var + rly.const(epsilon, 'float32')), gamma)
     shift = rly.add(
         rly.multiply(rly.negative(moving_mean), scale), beta)
     num_newaxis = len(shape) - (axis + 1)
     if num_newaxis:
         scale = rly.expand_dims(scale, axis=1, num_newaxis=num_newaxis)
         shift = rly.expand_dims(shift, axis=1, num_newaxis=num_newaxis)
     return x * scale + shift
Пример #18
0
def test_ref():
    mod = relay.Module()
    three_with_ref = relay.GlobalVar('three_with_ref')
    i = relay.Var('i')
    iv = relay.Var('iv')
    u = relay.Var('u')
    uv = relay.Var('uv')
    body = relay.add(iv, uv)
    body = relay.Let(uv, relay.RefRead(i), body)
    body = relay.Let(u, relay.RefWrite(i, relay.const(2)), body)
    body = relay.Let(iv, relay.RefRead(i), body)
    body = relay.Let(i, relay.RefCreate(relay.const(1)), body)
    mod[three_with_ref] = relay.Function([], body)
    check_eval(three_with_ref, [], 3, mod=mod)
Пример #19
0
def test_bind_params():
    x = relay.var("x")
    y = relay.var("y")
    z = relay.add(x, y)
    f = relay.Function([x, y], z)
    fbinded = relay.bind(f, {x : relay.const(1, "float32")})
    fexpected =relay.Function(
        [y],
        relay.add(relay.const(1, "float32"),  y))
    assert relay.ir_pass.alpha_equal(fbinded, fexpected)

    zbinded = relay.bind(z, {y: x})
    zexpected = relay.add(x, x)
    assert relay.ir_pass.alpha_equal(zbinded, zexpected)
Пример #20
0
def test_func():
    # 0 args
    assert parses_as(
        "fn () { 0 }",
        relay.Function(
            [],
            relay.const(0),
            None,
            []
        )
    )

    # 1 arg
    assert parses_as(
        "fn (%x) { %x }",
        relay.Function(
            [X],
            X,
            None,
            []
        )
    )

    # 2 args
    assert parses_as(
        "fn (%x, %y) { %x + %y }",
        relay.Function(
            [X, Y],
            relay.add(X, Y),
            None,
            []
        )
    )

    # annotations
    assert parses_as(
        "fn (%x: int32) -> int32 { %x }",
        relay.Function(
            [X_ANNO],
            X_ANNO,
            int32,
            []
        )
    )

    # attributes
    assert parses_as(
        "fn (n=5) { () }",
        relay.Function([], UNIT, None, None, tvm.make.node("DictAttrs", n=relay.const(5)))
    )
Пример #21
0
def test_ifelse():
    assert parses_as(
        """
        if (True) {
            0
        } else {
            1
        }
        """,
        relay.If(
            relay.const(True),
            relay.const(0),
            relay.const(1)
        )
    )
Пример #22
0
def test_simple_loop():
    mod = relay.module.Module({})
    sum_up = relay.GlobalVar('sum_up')
    i = relay.var('i', shape=[], dtype='int32')
    sb = ScopeBuilder()
    with sb.if_scope(relay.equal(i, relay.const(0, dtype='int32'))):
        sb.ret(i)
    with sb.else_scope():
        one_less = relay.subtract(i, relay.const(1, dtype='int32'))
        rec_call = relay.Call(sum_up, [one_less])
        sb.ret(relay.add(rec_call, i))
    func = relay.Function([i], sb.get(), ret_type=relay.TensorType([], 'int32'))
    mod[sum_up] = func
    i_data = np.array(10, dtype='int32')
    check_eval(sum_up, [i_data], sum(range(1, 11)), mod=mod)
Пример #23
0
def test_fold_bwd_relu_fail():
    """testcase where we canont fold because scale can not pass relu"""
    def before(x, conv_weight, out_scale, channels):
        y = relay.nn.conv2d(x, conv_weight,
                             channels=channels,
                             kernel_size=(3, 3),
                             data_layout="NCHW",
                             padding=(1, 1))
        y = relay.nn.relu(y)
        y = relay.multiply(x, out_scale)
        return relay.Function(relay.ir_pass.free_vars(y), y)

    def check(shape, channels, out_scale):
        x =  relay.var("x", shape=shape)
        in_channels = shape[1]
        weight = relay.var("weight")
        y1 = before(x, weight, out_scale, channels)
        y1 = relay.ir_pass.infer_type(y1)
        y1_folded = relay.ir_pass.forward_fold_scale_axis(y1)
        assert relay.ir_pass.alpha_equal(y1, y1_folded)

    out_scale = relay.var("in_scale", shape=(4, 1, 1))
    check((4, 4, 10, 10), 4, out_scale)
    out_scale = relay.const(np.random.uniform(size=(4, 1, 1), low=-1.0, high=0.0)).astype("float32")
    check((4, 4, 10, 10), 4, out_scale)
Пример #24
0
 def before():
     x = relay.var("x", shape=(10, 20))
     y = relay.add(x, relay.const(1, "float32"))
     z = relay.squeeze(y)
     u = relay.transpose(y, axes=[0, 1])
     w = relay.left_shift(z, u)
     return relay.Function([x], w)
Пример #25
0
 def before():
     c = relay.const(c_data)
     x = relay.var("x")
     y = relay.Tuple([x, c])
     z = relay.add(y[1], c)
     z = relay.add(z, y[0])
     return relay.Function([x], z)
Пример #26
0
 def before(dshape):
     x = relay.var("x", shape=dshape)
     pooled = relay.nn.max_pool2d(x, pool_size=(2, 2), strides=(2, 2), padding=(0, 0))
     upsampled = relay.nn.upsampling(pooled, scale=2, layout="NCHW")
     concat = relay.concatenate((upsampled, x), axis=1)
     out = relay.add(concat, relay.const(1, "float32"))
     return relay.Function(relay.ir_pass.free_vars(out), out)
Пример #27
0
 def __init__(self):
     self.a = relay.Var("a")
     self.b = relay.Var("b")
     self.c = relay.Var("c")
     self.d = relay.Var("d")
     self.e = relay.Var("e")
     self.x = relay.Var("x")
     self.y = relay.Var("y")
     self.z = relay.Var("z")
     self.shape = tvm.convert([1, 2, 3])
     self.tt = relay.TensorType(self.shape, "float32")
     self.int32 = relay.TensorType([], "int32")
     self.float32 = relay.TensorType([], "float32")
     self.one = relay.const(1.0)
     self.two = relay.const(2.0)
     self.three = relay.const(3.0)
Пример #28
0
def test_equal():
    i = relay.var('i', shape=[], dtype='int32')
    eq = op.equal(i, relay.const(0, dtype='int32'))
    func = relay.Function([i], eq)
    ft = relay.ir_pass.infer_type(func)

    assert ft.checked_type == relay.FuncType([relay.scalar_type('int32')], relay.scalar_type('bool'))
Пример #29
0
def test_monomorphic_let():
    "Program: let %x = 1; %x"
    sb = relay.ScopeBuilder()
    x = sb.let('x', relay.const(1.0, "float64"))
    sb.ret(x)
    xchecked = relay.ir_pass.infer_type(sb.get())
    assert xchecked.checked_type == relay.scalar_type("float64" )
Пример #30
0
 def expected():
     sb = relay.ScopeBuilder()
     x = relay.var("x")
     c_folded = (c_data + c_data)
     t3 = sb.let("t3", relay.add(relay.const(c_folded), x))
     sb.ret(t3)
     return relay.Function([x], sb.get())
Пример #31
0
 def expected():
     data = tvm.nd.array(np.array([1.0, 2.0, 3.0, 1.0, 2.0, 3.0]))
     const = relay.const(data)
     func = relay.Function([], const)
     return func
Пример #32
0
 def after_left(x, elem_op, value):
     return elem_op(relay.const(value, dtype), x)
Пример #33
0
 def expected():
     x = relay.var("x", t)
     c_folded = (c_data + c_data) * 2
     y = relay.add(x, relay.const(c_folded))
     z = relay.add(y, relay.const(c_data))
     return relay.Function([x], z)
Пример #34
0
def _conv2d_legalize(attrs, inputs, arg_types):
    """Legalizes Conv2D op.

    Parameters
    ----------
    attrs : tvm.ir.Attrs
        Attributes of current convolution
    inputs : list of tvm.relay.Expr
        The args of the Relay expr to be legalized
    types : list of types
        List of input and output types

    Returns
    -------
    result : tvm.relay.Expr
        The legalized expr
    """

    # Dilation not supported yet. Return None if dilation is not (1, 1)
    dilation = attrs.get_int_tuple("dilation")
    if not (dilation[0] == 1 and dilation[1] == 1):
        return None

    # No legalization for depthwise convolutions yet.
    groups = attrs.get_int("groups")
    if groups != 1:
        return None

    # Collect the input tensors.
    data_tensor, kernel_tensor = arg_types[0], arg_types[1]
    data_dtype = data_tensor.dtype
    kernel_dtype = kernel_tensor.dtype

    # Collect the output tensor.
    output_tensor = arg_types[2]

    # Collect the input exprs.
    data, kernel = inputs

    # Get the conv attrs
    new_attrs = {k: attrs[k] for k in attrs.keys()}

    is_int8_inputs = False
    # If both the inputs are int8, we can add 128 to make the input dtype uint8, and then adjust the
    # output. This will help picking up Intel VNNI instructions.
    # Original --> C = A (conv) B
    # A and B are int8
    #   C = (A + 128 - 128) (conv) B
    #   C = (A' conv B) - 128 (conv) B
    # where A' = A + 128
    # and 128 (conv) B is basically a reduce on CRS axis for weights.
    if data_tensor.dtype == 'int8' and kernel_tensor.dtype == 'int8':
        is_int8_inputs = True
        padding = attrs.get_int_tuple("padding")
        kh, kw = attrs.get_int_tuple("kernel_size")
        pt, pl, pb, pr = get_pad_tuple(padding, (kh, kw))

        if attrs['data_layout'] == 'NHWC' and attrs['kernel_layout'] == 'HWIO':
            adjust_shift = relay.sum(relay.cast(kernel, dtype='int32'),
                                     axis=(0, 1, 2))
            pad_width = ((0, 0), (pt, pb), (pl, pr), (0, 0))
        elif attrs['data_layout'] == 'NCHW' and attrs[
                'kernel_layout'] == 'OIHW':
            pad_width = ((0, 0), (0, 0), (pt, pb), (pl, pr))
            adjust_shift = relay.sum(relay.cast(kernel, dtype='int32'),
                                     axis=(1, 2, 3))
            adjust_shift = relay.expand_dims(adjust_shift,
                                             axis=1,
                                             num_newaxis=2)
        else:
            return None

        data = relay.cast(data, 'int32')
        data = relay.add(data, relay.const(128, 'int32'))
        data = relay.cast(data, 'uint8')

        # Do external padding as pad value has to be 128.
        if not (padding[0] == 0 and padding[1] == 0):
            data = relay.nn.pad(data, pad_width=pad_width, pad_value=128)
        new_attrs['padding'] = (0, 0)

        # The data type is now shifted to uint8
        data_dtype = 'uint8'

        # Multiply 128 to adjust shift.
        adjust_shift = relay.multiply(adjust_shift, relay.const(128, 'int32'))

    # Legalize if the datatypes are suitable for fast Int8 instructions.  Int8 instructions require
    # input channel to be a multiple of 4 and output channels to be a multiple of 16. For input
    # channels, we pad both the inputs and weights input channels. For output channels, we pad the
    # weight and stride_slice the output.
    if is_int8_hw_support(data_dtype, kernel_dtype):
        # Flags to remember if the expr is modified
        ic_modified = False
        oc_modified = False

        # Find the value of input and output channel.
        in_channel = -1
        out_channel = -1
        if attrs['data_layout'] == 'NHWC' and attrs['kernel_layout'] == 'HWIO':
            in_channel = data_tensor.shape[3].value
            out_channel = kernel_tensor.shape[3].value
        elif attrs['data_layout'] == 'NCHW' and attrs[
                'kernel_layout'] == 'OIHW':
            in_channel = data_tensor.shape[1].value
            out_channel = kernel_tensor.shape[0].value
        else:
            return None

        if in_channel % 4 != 0:
            new_in_channel = ((in_channel + 4) // 4) * 4
            diff = new_in_channel - in_channel
            if attrs['data_layout'] == 'NHWC' and attrs[
                    'kernel_layout'] == 'HWIO':
                data = relay.nn.pad(data,
                                    pad_width=((0, 0), (0, 0), (0, 0), (0,
                                                                        diff)))
                kernel = relay.nn.pad(kernel,
                                      pad_width=((0, 0), (0, 0), (0, diff),
                                                 (0, 0)))
                ic_modified = True
            elif attrs['data_layout'] == 'NCHW' and attrs[
                    'kernel_layout'] == 'OIHW':
                pad_width = ((0, 0), (0, diff), (0, 0), (0, 0))
                data = relay.nn.pad(data, pad_width=pad_width)
                kernel = relay.nn.pad(kernel, pad_width=pad_width)
                ic_modified = True
            else:
                return None

        new_out_channel = out_channel
        if out_channel % 16 != 0:
            new_out_channel = ((out_channel + 16) // 16) * 16
            diff = new_out_channel - out_channel
            if attrs['data_layout'] == 'NHWC' and attrs[
                    'kernel_layout'] == 'HWIO':
                kernel = relay.nn.pad(kernel,
                                      pad_width=((0, 0), (0, 0), (0, 0),
                                                 (0, diff)))
                oc_modified = True
            elif attrs['data_layout'] == 'NCHW' and attrs[
                    'kernel_layout'] == 'OIHW':
                kernel = relay.nn.pad(kernel,
                                      pad_width=((0, diff), (0, 0), (0, 0),
                                                 (0, 0)))
                oc_modified = True
            else:
                return None

        if oc_modified:
            new_attrs['channels'] = new_out_channel
            out = tvm.relay.nn.conv2d(data, kernel, **new_attrs)
            original_out_shape = [x.value for x in output_tensor.shape]
            out = relay.strided_slice(out,
                                      begin=relay.const([0, 0, 0, 0], "int32"),
                                      end=relay.const(original_out_shape,
                                                      "int32"))
        else:
            out = relay.nn.conv2d(data, kernel, **new_attrs)

        if is_int8_inputs:
            out = relay.subtract(out, adjust_shift)

        return out
    return None
Пример #35
0
def test_partition_constant_embedding():
    x = relay.var('x')
    w = relay.var('w')
    wc = relay.const(1)
    b = relay.var('b')

    xf = relay.var('x')
    wf = relay.var('w')
    bf = relay.var('b')
    embeded_func = relay.Function([xf, bf],
                                  conv_bias_relu(xf, wc, bf)).with_attr(
                                      "PartitionedFromPattern",
                                      "nn.conv2d_nn.bias_add_nn.relu_")
    xf = relay.var('x')
    wf = relay.var('w')
    bf = relay.var('b')
    lifted_func = relay.Function([xf, wf, bf],
                                 conv_bias_relu(xf, wf, bf)).with_attr(
                                     "PartitionedFromPattern",
                                     "nn.conv2d_nn.bias_add_nn.relu_")
    relu = conv_bias_relu(x, w, b)
    reluc = conv_bias_relu(x, wc, b)

    # Check lifting of wildcard matches
    pattern = is_op('nn.relu')(is_op('nn.bias_add')(is_op('nn.conv2d')(
        wildcard(), wildcard()), wildcard()))
    assert tvm.ir.structural_equal(lifted_func(x, w, b),
                                   pattern.partition(relu))
    assert tvm.ir.structural_equal(lifted_func(x, wc, b),
                                   pattern.partition(reluc))

    # Check lifting of input matches
    pattern = is_op('nn.relu')(is_op('nn.bias_add')(is_op('nn.conv2d')(
        wildcard(), is_var()), wildcard()))
    assert tvm.ir.structural_equal(lifted_func(x, w, b),
                                   pattern.partition(relu))
    assert tvm.ir.structural_equal(
        reluc, pattern.partition(reluc))  #Constants are not Inputs

    # Check embedding of constant matches
    pattern = is_op('nn.relu')(is_op('nn.bias_add')(is_op('nn.conv2d')(
        wildcard(), is_constant()), wildcard()))
    assert tvm.ir.structural_equal(relu, pattern.partition(relu))
    assert tvm.ir.structural_equal(embeded_func(x, b),
                                   pattern.partition(reluc))

    # Check embedding of constant ExprPatterns
    pattern = is_op('nn.relu')(is_op('nn.bias_add')(is_op('nn.conv2d')(
        wildcard(), is_expr(wc)), wildcard()))
    assert tvm.ir.structural_equal(relu, pattern.partition(relu))
    assert tvm.ir.structural_equal(embeded_func(x, b),
                                   pattern.partition(reluc))

    # Check lifting/embedding of Alt matches
    pattern = is_op('nn.relu')(is_op('nn.bias_add')(is_op('nn.conv2d')(
        wildcard(), is_var() | is_constant()), wildcard()))
    assert tvm.ir.structural_equal(lifted_func(x, w, b),
                                   pattern.partition(relu))
    assert tvm.ir.structural_equal(embeded_func(x, b),
                                   pattern.partition(reluc))

    # Check lifting/embedding of Alt matches with the other ordering
    pattern = is_op('nn.relu')(is_op('nn.bias_add')(is_op('nn.conv2d')(
        wildcard(), is_constant() | is_var()), wildcard()))
    assert tvm.ir.structural_equal(lifted_func(x, w, b),
                                   pattern.partition(relu))
    assert tvm.ir.structural_equal(embeded_func(x, b),
                                   pattern.partition(reluc))
Пример #36
0
def test_sum():
    assert mod[sum].checked_type == relay.FuncType(
        [l(relay.scalar_type('int32'))], relay.scalar_type('int32'))
    res = intrp.evaluate(sum(cons(relay.const(1), cons(relay.const(2),
                                                       nil()))))
    assert get_scalar(res) == 3
Пример #37
0
 def before():
     a = relay.const(c_data)
     b = relay.const(c_data)
     y = relay.concatenate((a, b), axis=0)
     return relay.Function([], y)
Пример #38
0
 def before():
     data = tvm.nd.array(np.array([1.0, 2.0, 3.0]))
     const = relay.const(data)
     concat = relay.op.concatenate([const, const], axis=0)
     func = relay.Function([], concat)
     return func
Пример #39
0
def test_constant_alpha_equal():
    x = relay.const(1)
    y = relay.const(2)
    assert alpha_equal(x, x)
    assert not alpha_equal(x, y)
    assert alpha_equal(x, relay.const(1))
Пример #40
0
def test_ref_create():
    r = relay.expr.RefCreate(relay.const(1.0))
    check_visit(r)
Пример #41
0
def test_let():
    x = relay.var('x', shape=())
    value = relay.const(2.0)
    body = x + x
    l = relay.Let(x, value, body)
    check_visit(l)
Пример #42
0
def test_conv2d():
    x = relay.var("x", shape=(1, 3, 224, 224))
    w = relay.const(np.zeros((16, 3, 3, 3), dtype="float32"))
    y = relay.nn.conv2d(x, w, strides=[2, 2], padding=[1, 1, 1, 1], kernel_size=[3, 3])
    func = relay.Function([x], y)
    _construct_model(func)
Пример #43
0
 def pack_before():
     A = relay.var("A", shape=(1, m, k), dtype="float32")
     B = relay.const(B_const, "float32")
     C = relay.nn.batch_matmul(A, B)
     f = relay.Function(relay.analysis.free_vars(C), C)
     return f
Пример #44
0
def test_if_alpha_equal():
    v1 = relay.Var("v1")
    v2 = relay.Var("v2")

    if_sample = relay.If(v1, relay.const(1),
                         relay.Tuple([relay.const(2),
                                      relay.const(3)]))
    same = relay.If(v1, relay.const(1),
                    relay.Tuple([relay.const(2),
                                 relay.const(3)]))
    assert alpha_equal(if_sample, same)

    different_cond = relay.If(v2, relay.const(1),
                              relay.Tuple([relay.const(2),
                                           relay.const(3)]))
    assert not alpha_equal(if_sample, different_cond)

    different_true = relay.If(v1, relay.const(2),
                              relay.Tuple([relay.const(2),
                                           relay.const(3)]))
    assert not alpha_equal(if_sample, different_true)

    different_false = relay.If(v1, relay.const(1), relay.Tuple([]))
    assert not alpha_equal(if_sample, different_false)
Пример #45
0
def test_call_alpha_equal():
    v1 = relay.Var("v1")
    v2 = relay.Var("v2")

    attr1 = tvm.make.node("attrs.TestAttrs", name="attr", padding=(3, 4))
    attr1_same = tvm.make.node("attrs.TestAttrs", name="attr", padding=(3, 4))
    attr2 = tvm.make.node("attrs.TestAttrs", name="attr", padding=(3, 4, 4))

    tt1 = relay.TensorType((1, 2, 3), "float32")
    tt2 = relay.TensorType((), "int8")

    basic_args = [relay.const(1), relay.const(2), v2, relay.Tuple([])]

    # manually writing out args to ensure that args does not rely on
    # pointer equality
    call = relay.Call(
        v1,
        [relay.const(1), relay.const(2), v2,
         relay.Tuple([])], attr1, [tt1])
    same = relay.Call(v1, basic_args, attr1, [tt1])
    assert alpha_equal(call, same)

    different_fn = relay.Call(v2, basic_args, attr1, [tt1])
    assert not alpha_equal(call, different_fn)

    fewer_args = relay.Call(
        v1, [relay.const(1), relay.const(2), v2], attr1, [tt1])
    assert not alpha_equal(call, fewer_args)

    reordered_args = relay.Call(
        v1,
        [relay.const(2), relay.const(1),
         relay.Tuple([]), v2], attr1, [tt1])
    assert not alpha_equal(call, reordered_args)

    different_args = relay.Call(
        v1, [relay.const(1), relay.const(2),
             relay.const(3)], attr1, [tt1])
    assert not alpha_equal(call, different_args)

    more_args = relay.Call(v1, [
        relay.const(1),
        relay.const(2), v2,
        relay.Tuple([]),
        relay.const(3),
        relay.const(4)
    ], attr1, [tt1])
    assert not alpha_equal(call, more_args)

    different_attrs = relay.Call(v1, basic_args, attr2, [tt1])
    assert not alpha_equal(call, different_attrs)

    same_attrs = relay.Call(v1, basic_args, attr1_same, [tt1])
    assert alpha_equal(call, same_attrs)

    no_type_args = relay.Call(v1, basic_args, attr1)
    assert not alpha_equal(call, no_type_args)

    more_type_args = relay.Call(v1, basic_args, attr1, [tt1, tt2])
    assert not alpha_equal(call, more_type_args)

    different_type_arg = relay.Call(v1, basic_args, attr1, [tt2])
    assert not alpha_equal(call, different_type_arg)
Пример #46
0
def test_ref_read():
    ref = relay.expr.RefCreate(relay.const(1.0))
    r = relay.expr.RefRead(ref)
    check_visit(r)
Пример #47
0
 def expected():
     y_data = x_data - x_data
     y = relay.const(y_data)
     return relay.Function([], y)
Пример #48
0
def test_iterate():
    expr = relay.Call(iterate(double, relay.const(2)), [make_nat_expr(3)])
    res = intrp.evaluate(relay.Function([], expr)())
    assert count(res) == 12
Пример #49
0
 def expected(dtype):
     x = relay.var("x", shape=c_shape, dtype="float32")
     y = relay.var("y", shape=c_shape, dtype="float32")
     z = relay.const(np.array(c_shape).astype(dtype), dtype=dtype)
     func = relay.Function([x, y], z)
     return func
Пример #50
0
 def expected():
     y_data = np.concatenate((c_data, c_data), axis=0)
     y = relay.const(y_data)
     return relay.Function([], y)
Пример #51
0
def test_constant():
    check_visit(relay.const(1.0))
Пример #52
0
 def legalize_conv2d(attrs, inputs, types):
     data, weight = inputs
     weight = relay.multiply(weight, relay.const(2.0, "float32"))
     return relay.nn.conv2d(data, weight, **attrs)
Пример #53
0
 def before():
     a = relay.const(cond_data)
     x = relay.const(x_data)
     y = relay.const(x_data)
     iff = relay.If(a, x + y, x - y)
     return relay.Function([], iff)
Пример #54
0
    def __init__(self, multiplier):
        self.multiplier = multiplier

    # This function can define a pass.
    def transform_function(self, func, mod, ctx):
        obj = self

        class ReplaceConstant(tvm.relay.ExprMutator):
            def visit_constant(self, c):
                return relay.multiply(obj.multiplier, c)
        return ReplaceConstant().visit(func)

f = example()
mod = tvm.IRModule.from_expr(f)
custom_pass = CustomPipeline(multiplier=relay.const(3, "float32"))
assert custom_pass.info.name == "CustomPipeline"
mod3 = custom_pass(mod)
print(mod3)

##############################################################################
# Debug a Pass
# ------------
# TVM provides users a plug-and-play style debugging pass that print the IR
# after a certain pass is done through a special pass (``PrintIR``) to dump the IR of the
# whole module. A slightly modified version of the sequential pass example
# could be like the following to enable IR dumping for ``FoldConstant`` optimization.

f = example()
mod = tvm.IRModule.from_expr(f)
seq = tvm.transform.Sequential([relay.transform.FoldConstant(),
Пример #55
0
def make_model(
    shape,
    kernel_shape,
    input_zero_point,
    input_scale,
    kernel_zero_point,
    kernel_scale,
    output_zero_point,
    output_scale,
    padding,
    strides,
    dilation,
    groups,
    dtype,
    kernel_dtype,
    out_channels,
    weight_format,
    enable_bias,
    relu_type,
):
    """Return a model and any parameters it may have"""
    h_index = weight_format.index("H")
    w_index = weight_format.index("W")
    kernel_h = kernel_shape[h_index]
    kernel_w = kernel_shape[w_index]
    invar = relay.var("input", shape=shape, dtype=dtype)
    p = (0, 0, 0, 0)
    if padding == "SAME":
        p = get_same_padding((shape[1], shape[2]), (kernel_h, kernel_w),
                             dilation, strides)
        invar = relay.nn.pad(
            invar,
            pad_width=[(0, 0), (p[0], p[2]), (p[1], p[3]), (0, 0)],
            pad_value=input_zero_point,
            pad_mode="constant",
        )
        shape = (shape[0], shape[1] + p[0] + p[2], shape[2] + p[1] + p[3],
                 shape[3])

    rng = np.random.default_rng(12321)
    w = tvm.nd.array(
        rng.integers(
            np.iinfo(kernel_dtype).min,
            high=np.iinfo(kernel_dtype).max,
            size=kernel_shape,
            dtype=kernel_dtype,
        ))
    weight_const = relay.const(w, kernel_dtype)
    conv = relay.qnn.op.conv2d(
        invar,
        weight_const,
        input_zero_point=relay.const(input_zero_point, "int32"),
        kernel_zero_point=relay.const(kernel_zero_point, "int32"),
        input_scale=relay.const(input_scale, "float32"),
        kernel_scale=relay.const(kernel_scale, "float32"),
        kernel_size=(kernel_h, kernel_w),
        data_layout="NHWC",
        kernel_layout=weight_format,
        dilation=dilation,
        strides=strides,
        groups=groups,
        channels=out_channels,
        padding=p,
        out_dtype="int32",
    )
    b = tvm.nd.array(
        rng.integers(0, high=10, size=(out_channels, ), dtype="int32"))
    bias_const = relay.const(b, "int32")
    last_op = relay.nn.bias_add(conv, bias_const,
                                axis=3) if enable_bias else conv
    requant_input_sc = [sc * input_scale for sc in kernel_scale]
    last_op = relay.qnn.op.requantize(
        last_op,
        relay.const(requant_input_sc, "float32"),
        relay.const(0, "int32"),
        relay.const(output_scale, "float32"),
        relay.const(output_zero_point, "int32"),
        out_dtype=dtype,
    )
    last_op = make_qnn_relu(last_op, relu_type, output_scale,
                            output_zero_point, dtype)
    params = {"w": w, "b": b}
    return last_op, params
Пример #56
0
import sys
import numpy as np
from tvm import relay
from tvm import autotvm

import topi
from tvm.relay import op

print(type(tensorizer.INTRINSICS['vnni']['pattern'].body[0].source[0].a.value))

n, c, h, w, oc, ic, kh, kw, sh, sw = map(int, input().split())

var_x = relay.var('x', shape=(n, c, h, w), dtype='int8')
w_ = (np.random.randn(oc, ic, kh, kw) * 128).astype('int8')
b_ = (np.random.randn(1, oc, 1, 1) * 128).astype('int32')
var_w = relay.const(tvm.nd.array(w_))
var_b = relay.const(tvm.nd.array(b_))
conv2d = relay.nn.conv2d(var_x, var_w, out_dtype='int32', kernel_size=(kh, kw), channels=oc, strides=(sh, sw))
biased = relay.add(conv2d, var_b)
y = relay.multiply(biased, relay.const(11, 'int32'))

func = relay.Function([var_x], y)
module = tvm.IRModule()
module['main'] = func

import time
timing = -1
def tracer(module, info, is_before):
    return
    global timing
    if bool(is_before):
Пример #57
0
 def expected():
     c = relay.const(c_data + c_data)
     x = relay.var("x", t)
     z = relay.add(c, x)
     return relay.Function([x], z)
Пример #58
0
 def after_right(x, elem_op, value):
     return elem_op(x, relay.const(value, dtype))
Пример #59
0
def test_tuple_alpha_equal():
    v0 = relay.Var("v0")
    v1 = relay.Var("v1")
    v2 = relay.Var("v2")

    # unit value is a valid tuple
    assert alpha_equal(relay.Tuple([]), relay.Tuple([]))

    tup = relay.Tuple(
        [v0, relay.const(2),
         relay.const(3),
         relay.Tuple([relay.const(4)])])
    same = relay.Tuple(
        [v0, relay.const(2),
         relay.const(3),
         relay.Tuple([relay.const(4)])])

    assert alpha_equal(tup, same)

    # use the eq_map

    let_tup = relay.Let(v1, tup, v1)
    let_mapped = relay.Let(
        v2,
        relay.Tuple([
            v0,
            relay.const(2),
            relay.const(3),
            relay.Tuple([relay.const(4)])
        ]), v2)

    assert alpha_equal(let_tup, let_mapped)

    more_fields = relay.Tuple([
        v1,
        relay.const(2),
        relay.const(3),
        relay.Tuple([relay.const(4)]), v2
    ])
    assert not alpha_equal(tup, more_fields)

    fewer_fields = relay.Tuple([v1, relay.const(2), relay.const(3)])
    assert not alpha_equal(tup, fewer_fields)

    different_end = relay.Tuple(
        [v1, relay.const(2),
         relay.const(3),
         relay.Tuple([relay.const(5)])])
    assert not alpha_equal(tup, different_end)

    different_start = relay.Tuple(
        [v2, relay.const(2),
         relay.const(3),
         relay.Tuple([relay.const(4)])])
    assert not alpha_equal(tup, different_start)

    longer_at_end = relay.Tuple([
        v1,
        relay.const(2),
        relay.const(3),
        relay.Tuple([relay.const(4), relay.const(5)])
    ])
    assert not alpha_equal(tup, longer_at_end)
Пример #60
0
def test_ref_write():
    ref = relay.expr.RefCreate(relay.const(1.0))
    r = relay.expr.RefWrite(ref, relay.const(2.0))
    check_visit(r)