def test_recursion():
    """
    Program:
       let f(n: i32, data: f32) -> f32 = {
          if (n == 0) {
              return data;
          } else {
              return f(n - 1, log(data));
          }
       }
       f(2, 10000);
    """
    f = relay.Var("f")
    f1 = relay.Var("f1")
    n = relay.Var("n", e.int32)
    data = relay.Var("data", e.float32)
    funcbody = relay.If(
        equal(n, relay.const(0)), data,
        relay.Call(f1, [subtract(n, relay.const(1)),
                        log(data)]))
    value = relay.Function([n, data], funcbody, e.float32, [])
    orig = relay.Let(f, value,
                     relay.Call(
                         f,
                         [relay.const(2), relay.const(10000.0)]))
    dced = run_opt_pass(orig, transform.DeadCodeElimination())
    orig = run_opt_pass(orig, transform.InferType())
    assert graph_equal(dced, orig)
    dced = run_opt_pass(relay.Let(f, value, e.three),
                        transform.DeadCodeElimination())
    assert alpha_equal(dced, e.three)
def alpha_equal(x, y):
    """
    Wrapper around alpha equality which ensures that
    the hash function respects equality.
    """
    return analysis.alpha_equal(
        x, y) and analysis.structural_hash(x) == analysis.structural_hash(y)
Example #3
0
def test_legalize_multi_input():
    """Test directly replacing an operator with a new one"""
    def before():
        x = relay.var("x", shape=(1, 64, 56, 56))
        y = relay.var("y", shape=(1, 64, 56, 20))
        z = relay.var("z", shape=(1, 64, 56, 10))
        func = relay.concatenate([x, y, z], axis=3)
        func = relay.Function([x, y, z], func)
        return func

    @register_legalize("concatenate", level=104)
    def legalize_concatenate(attrs, inputs, types):
        # Check that the correct multi-input case is handled.
        assert len(inputs) == 1
        assert isinstance(inputs[0], tvm.relay.expr.Tuple)
        assert len(types) == 2
        assert isinstance(types[0], tvm.relay.ty.TupleType)
        assert isinstance(types[1], tvm.relay.ty.TensorType)
        return None

    def expected():
        x = relay.var("x", shape=(1, 64, 56, 56))
        y = relay.var("y", shape=(1, 64, 56, 20))
        z = relay.var("z", shape=(1, 64, 56, 10))
        func = relay.concatenate([x, y, z], axis=3)
        func = relay.Function([x, y, z], func)
        return func

    a = before()
    a = run_opt_pass(a, transform.Legalize())
    b = run_opt_pass(expected(), transform.InferType())

    assert analysis.alpha_equal(a, b), "Actual = \n" + str(a)
Example #4
0
def test_alter_layout_resnet():
    """Test alternating the layout of a residual block
    This also tests the elimination of duplicated transformation.
    If a same transformation applies to a same node twice, only one transformation will be created.
    """
    def before():
        x = relay.var("x", shape=(1, 64, 56, 56))
        weight1 = relay.var('weight1')
        weight2 = relay.var('weight2')
        y = relay.nn.conv2d(x,
                            weight1,
                            channels=32,
                            kernel_size=(3, 3),
                            padding=(1, 1))
        y = relay.nn.relu(y)
        y2 = relay.nn.conv2d(x, weight2, channels=32, kernel_size=(1, 1))
        y2 = relay.nn.relu(y2)
        y = y + y2
        y = relay.nn.global_max_pool2d(y)
        return relay.Function(analysis.free_vars(y), y)

    # Register alter op layout. "level" is used to override the previously registered functions.
    @register_alter_op_layout("nn.conv2d", level=104)
    def alter_conv2d(attrs, inputs, tinfos):
        data, weight = inputs
        new_attrs = dict(attrs)
        new_attrs['data_layout'] = 'NCHW16c'
        return relay.nn.conv2d(data, weight, **new_attrs)

    def expected():
        x = relay.var("x", shape=(1, 64, 56, 56))
        weight1 = relay.var('weight1')
        weight2 = relay.var('weight2')
        x = relay.layout_transform(x, "NCHW", "NCHW16c")
        y = relay.nn.conv2d(x,
                            weight1,
                            channels=32,
                            kernel_size=(3, 3),
                            padding=(1, 1),
                            data_layout="NCHW16c")
        y = relay.nn.relu(y)
        y2 = relay.nn.conv2d(x,
                             weight2,
                             channels=32,
                             kernel_size=(1, 1),
                             data_layout='NCHW16c')
        y2 = relay.nn.relu(y2)
        y = y + y2
        y = relay.nn.global_max_pool2d(y, layout="NCHW16c")
        y = relay.layout_transform(y, "NCHW16c", "NCHW")
        return relay.Function(analysis.free_vars(y), y)

    a = before()
    a = run_opt_pass(a, transform.AlterOpLayout())

    b = expected()
    b = run_opt_pass(b, transform.InferType())

    assert analysis.alpha_equal(a, b), "Actual = \n" + str(a)
Example #5
0
def test_alter_layout_nhwc_nchw_arm():
    """ Check NHWC to NHCW conversion for a small sequence of ops."""
    # Register alter op layout. "level" is used to override the previously registered functions.
    @register_alter_op_layout("nn.conv2d", level=115)
    def alter_conv2d(attrs, inputs, tinfos):
        from topi.arm_cpu.conv2d import _alter_conv2d_layout_arm
        return _alter_conv2d_layout_arm(attrs, inputs, tinfos, tvm.relay)

    # Check NHWC conversion.
    def before_nhwc():
        x = relay.var("x", shape=(1, 56, 56, 64))
        weight1 = relay.var('weight1', shape=(3, 3, 64, 64))
        weight2 = relay.var('weight2', shape=(3, 3, 64, 64))
        y = relay.nn.conv2d(x, weight1,
                            channels=64,
                            kernel_size=(3, 3),
                            data_layout='NHWC',
                            kernel_layout='HWIO')
        y = relay.nn.relu(y)
        y = relay.nn.avg_pool2d(y,
                                pool_size=(1,1),
                                layout='NHWC')
        y = relay.nn.conv2d(y, weight2,
                            channels=64,
                            kernel_size=(3, 3),
                            data_layout='NHWC',
                            kernel_layout='HWIO')
        y = relay.nn.relu(y)
        y = relay.Function(analysis.free_vars(y), y)
        return y

    def expected_nhwc():
        x = relay.var("x", shape=(1, 56, 56, 64))
        weight1 = relay.var('weight1', shape=(3, 3, 64, 64))
        weight2 = relay.var('weight2', shape=(3, 3, 64, 64))
        y = relay.layout_transform(x, "NHWC", "NCHW")
        weight1 = relay.layout_transform(weight1, "HWIO", "OIHW")
        weight2 = relay.layout_transform(weight2, "HWIO", "OIHW")
        y = relay.nn.conv2d(y, weight1,
                            channels=64,
                            kernel_size=(3, 3))
        y = relay.nn.relu(y)
        y = relay.nn.avg_pool2d(y,
                                pool_size=(1,1))
        y = relay.nn.conv2d(y, weight2,
                            channels=64,
                            kernel_size=(3, 3))
        y = relay.nn.relu(y)
        y = relay.layout_transform(y, "NCHW", "NHWC")
        y = relay.Function(analysis.free_vars(y), y)
        return y

    a = before_nhwc()
    a = run_opt_pass(a, transform.AlterOpLayout())

    b = expected_nhwc()
    b = run_opt_pass(b, transform.InferType())

    assert analysis.alpha_equal(a, b), "Actual = \n" + str(a)
Example #6
0
def test_tuple():
    t = TypeVar("t")
    x = Var("x", t)
    body = TupleGetItem(relay.Tuple([relay.const(4.0), x]), 1)
    f = Function([x], body, None, [t])
    expected = relay.Function([x], x, None, [t])
    expected = run_opt_pass(expected, transform.InferType())
    assert alpha_equal(dcpe(f), expected)
Example #7
0
def test_double():
    mod = Module()
    p = Prelude(mod)
    add_nat_definitions(p)
    orig = p.double(make_nat_expr(p, 3))
    orig = Function([], orig)
    res = dcpe(orig, mod=mod)
    assert alpha_equal(res.body, make_nat_expr(p, 6))
def test_dual_path_convert_layout():
    def before():
        x = relay.var("x", shape=(1, 56, 56, 64))
        weight1 = relay.var('weight1', shape=(3, 3, 64, 32))
        weight2 = relay.var('weight2', shape=(3, 3, 32, 32))
        y = relay.nn.conv2d(x,
                            weight1,
                            channels=32,
                            kernel_size=(3, 3),
                            padding=(1, 1),
                            data_layout='NHWC',
                            kernel_layout='HWIO')
        y = relay.nn.relu(y)
        y1 = relay.nn.conv2d(y,
                             weight2,
                             channels=32,
                             kernel_size=(3, 3),
                             padding=(1, 1),
                             data_layout='NHWC',
                             kernel_layout='HWIO')
        y1 = relay.nn.relu(y1)
        y2 = relay.nn.batch_flatten(y)
        ret = relay.Tuple([y1, y2])
        y = relay.Function(analysis.free_vars(ret), ret)
        return y

    def expected():
        x = relay.var("x", shape=(1, 56, 56, 64))
        weight1 = relay.var('weight1', shape=(3, 3, 64, 32))
        weight2 = relay.var('weight2', shape=(3, 3, 32, 32))
        weight1 = relay.layout_transform(weight1, 'HWIO', 'OIHW')
        weight2 = relay.layout_transform(weight2, 'HWIO', 'OIHW')
        y = relay.layout_transform(x, "NHWC", "NCHW")
        y = relay.nn.conv2d(y,
                            weight1,
                            channels=32,
                            kernel_size=(3, 3),
                            padding=(1, 1))
        y = relay.nn.relu(y)
        y1 = relay.nn.conv2d(y,
                             weight2,
                             channels=32,
                             kernel_size=(3, 3),
                             padding=(1, 1))
        y1 = relay.nn.relu(y1)
        y1 = relay.layout_transform(y1, "NCHW", "NHWC")
        y2 = relay.layout_transform(y, "NCHW", "NHWC")
        y2 = relay.nn.batch_flatten(y2)
        ret = relay.Tuple([y1, y2])
        y = relay.Function(analysis.free_vars(ret), ret)
        return y

    a = before()
    a = run_opt_pass(a, transform.ConvertLayout('NCHW'))
    b = run_opt_pass(expected(), transform.InferType())

    assert analysis.alpha_equal(a, b), "Actual = \n" + str(a)
Example #9
0
def test_alter_layout_broadcast_op():
    """Test boradcast operators """
    def before():
        x = relay.var("x", shape=(1, 64, 56, 56))
        bias = relay.var("bias", shape=(64, ))
        scale = relay.var("scale", shape=(64, 1, 1))
        weight = relay.var("weight")
        y = relay.nn.conv2d(x,
                            weight,
                            channels=64,
                            kernel_size=(3, 3),
                            padding=(1, 1))
        y = relay.nn.bias_add(y, bias)  # test broadcasting to lhs
        y = relay.multiply(scale, y)  # test broadcasting to rhs
        y = relay.Function(analysis.free_vars(y), y)
        return y

    # Register alter op layout. "level" is used to override the previously registered functions.
    @register_alter_op_layout("nn.conv2d", level=105)
    def alter_conv2d(attrs, inputs, tinfos):
        data, weight = inputs
        new_attrs = dict(attrs)
        new_attrs['data_layout'] = 'NCHW16c'
        return relay.nn.conv2d(data, weight, **new_attrs)

    def expected():
        x = relay.var("x", shape=(1, 64, 56, 56))
        bias = relay.var("bias", shape=(64, ))
        scale = relay.var("scale", shape=(64, 1, 1))
        weight = relay.var("weight")
        x = relay.layout_transform(x, "NCHW", "NCHW16c")
        bias = relay.expand_dims(bias, 1, 2)
        bias = relay.expand_dims(bias, 0, 1)
        bias = relay.layout_transform(bias, "NCHW", "NCHW16c")
        scale = relay.expand_dims(scale, 0, 1)
        scale = relay.layout_transform(scale, "NCHW", "NCHW16c")
        y = relay.nn.conv2d(x,
                            weight,
                            channels=64,
                            kernel_size=(3, 3),
                            padding=(1, 1),
                            data_layout="NCHW16c")
        y = relay.add(y, bias)  # test broadcasting to lhs
        y = relay.multiply(scale, y)  # test broadcasting to rhs
        y = relay.layout_transform(y, "NCHW16c", "NCHW")
        y = relay.Function(analysis.free_vars(y), y)
        return y

    a = before()
    a = run_opt_pass(a,
                     [transform.CanonicalizeOps(),
                      transform.AlterOpLayout()])

    b = expected()
    b = run_opt_pass(b, transform.InferType())

    assert analysis.alpha_equal(a, b), "Actual = \n" + str(a)
def test_alter_layout_concatenate():
    """ """
    def before():
        x = relay.var("x", shape=(1, 64, 56, 56))
        weight1 = relay.var('weight1')
        weight2 = relay.var('weight2')
        y = relay.nn.conv2d(x,
                            weight1,
                            channels=32,
                            kernel_size=(3, 3),
                            padding=(1, 1))
        y1 = relay.nn.conv2d(y,
                             weight2,
                             channels=32,
                             kernel_size=(3, 3),
                             padding=(1, 1))
        ret = relay.concatenate([y, y1], axis=1)
        y = relay.Function(analysis.free_vars(ret), ret)
        return y

    @register_alter_op_layout("nn.conv2d", level=107)
    def alter_conv2d(attrs, inputs, tinfos):
        data, weight = inputs
        new_attrs = dict(attrs)
        new_attrs['data_layout'] = 'NCHW16c'
        return relay.nn.conv2d(data, weight, **new_attrs)

    def expected():
        x = relay.var("x", shape=(1, 64, 56, 56))
        weight1 = relay.var('weight1')
        weight2 = relay.var('weight2')
        y = relay.layout_transform(x, "NCHW", "NCHW16c")
        y = relay.nn.conv2d(y,
                            weight1,
                            channels=32,
                            kernel_size=(3, 3),
                            padding=(1, 1),
                            data_layout="NCHW16c")
        y1 = relay.nn.conv2d(y,
                             weight2,
                             channels=32,
                             kernel_size=(3, 3),
                             padding=(1, 1),
                             data_layout='NCHW16c')
        ret = relay.concatenate([y, y1], axis=1)
        ret = relay.layout_transform(ret, "NCHW16c", "NCHW")
        y = relay.Function(analysis.free_vars(ret), ret)
        return y

    a = before()
    a = run_opt_pass(a, transform.AlterOpLayout())

    b = expected()
    b = run_opt_pass(b, transform.InferType())

    assert analysis.alpha_equal(a, b), "Actual = \n" + str(a)
Example #11
0
def test_qnn_conv_requantize_convert_layout():
    def before():
        x = relay.var("x", shape=(1, 56, 56, 64), dtype='int8')
        weight = relay.var('weight', shape=(3, 3, 64, 64), dtype='int8')
        y = relay.qnn.op.conv2d(x,
                                weight,
                                relay.const(1, 'int32'),
                                relay.const(1, 'int32'),
                                relay.const(1, 'float32'),
                                relay.const(1, 'float32'),
                                channels=64,
                                kernel_size=(3, 3),
                                padding=(1, 1),
                                data_layout='NHWC',
                                kernel_layout='HWIO')
        y = relay.qnn.op.requantize(y,
                                    relay.const(1, 'float32'),
                                    relay.const(1, 'int32'),
                                    relay.const(1, 'float32'),
                                    relay.const(1, 'int32'),
                                    out_dtype='int32')
        y = relay.nn.relu(y)
        y = relay.Function([x, weight], y)
        return y

    def expected():
        x = relay.var("x", shape=(1, 56, 56, 64), dtype='int8')
        weight = relay.var('weight', shape=(3, 3, 64, 64), dtype='int8')
        x = relay.layout_transform(x, 'NHWC', 'NCHW')
        weight = relay.layout_transform(weight, 'HWIO', 'OIHW')
        y = relay.qnn.op.conv2d(x,
                                weight,
                                relay.const(1, 'int32'),
                                relay.const(1, 'int32'),
                                relay.const(1, 'float32'),
                                relay.const(1, 'float32'),
                                channels=64,
                                kernel_size=(3, 3),
                                padding=(1, 1))
        y = relay.qnn.op.requantize(y,
                                    relay.const(1, 'float32'),
                                    relay.const(1, 'int32'),
                                    relay.const(1, 'float32'),
                                    relay.const(1, 'int32'),
                                    axis=1,
                                    out_dtype='int32')
        y = relay.nn.relu(y)
        y = relay.layout_transform(y, 'NCHW', 'NHWC')
        y = relay.Function(relay.analysis.free_vars(y), y)
        return y

    a = before()
    a = run_opt_pass(a, transform.ConvertLayout('NCHW'))
    b = run_opt_pass(expected(), transform.InferType())

    assert analysis.alpha_equal(a, b), "Actual = \n" + str(a)
Example #12
0
def test_head_cons():
    mod = Module()
    p = Prelude(mod)
    hd = p.hd
    t = TypeVar("t")
    x = Var("x", t)
    body = hd(p.cons(x, p.nil()))
    f = Function([x], body, None, [t])
    res = dcpe(f, mod)
    assert alpha_equal(res, Function([x], x, t, [t]))
Example #13
0
def test_empty_ad():
    shape = (10, 10)
    dtype = "float32"
    t = TensorType(shape, dtype)
    d = Var("d", t)
    f = Function([d], d)
    g = dcpe(f, grad=True)
    expected = Function([d], Tuple([d, Tuple([op.ones_like(d)])]))
    expected = run_opt_pass(expected, transform.InferType())
    assert alpha_equal(g, expected)
Example #14
0
def test_ref():
    t = relay.TensorType([], "float32")
    d = relay.Var("d", t)
    r = relay.Var("r", relay.RefType(t))
    x = relay.Var("x")
    body = relay.RefRead(r)
    body = Let(x, RefWrite(r, RefRead(r) * RefRead(r)), body)
    body = Let(r, RefCreate(d), body)
    square = Function([d], body)
    expected = run_opt_pass(Function([d], d * d), transform.InferType())
    assert alpha_equal(dcpe(square), expected)
Example #15
0
def test_global_match_nat_id():
    mod = Module()
    p = Prelude(mod)
    add_nat_definitions(p)
    nat = p.nat()
    x = Var("x", nat)
    z_case = Clause(PatternConstructor(p.z, []), p.z())
    s_case = Clause(PatternConstructor(p.s, [PatternVar(x)]), p.s(x))
    orig = Match(make_nat_expr(p, 3), [z_case, s_case])
    orig = Function([], orig)
    res = dcpe(orig, mod=mod)
    assert alpha_equal(res.body, make_nat_expr(p, 3))
Example #16
0
def test_tuple_match():
    a = relay.Var("a")
    b = relay.Var("b")
    clause = relay.Clause(relay.PatternTuple([relay.PatternVar(a), relay.PatternVar(b)]), a + b)
    x = relay.Match(relay.Tuple([relay.const(1), relay.const(1)]), [clause])

    a = relay.Var("a")
    b = relay.Var("b")
    clause = relay.Clause(relay.PatternTuple([relay.PatternVar(a), relay.PatternVar(b)]), a + b)
    y = relay.Match(relay.Tuple([relay.const(1), relay.const(1)]), [clause])
    assert analysis.alpha_equal(x, y)
    assert analysis.structural_hash(x) == analysis.structural_hash(y)
Example #17
0
def test_loop():
    mod = Module()
    t = TypeVar("t")
    x = Var("x", t)
    loop = GlobalVar("loop")
    mod[loop] = Function([x], loop(x), t, [t])
    expected = Call(loop, [const(1)])
    mod["main"] = Function([], expected)
    expected = mod["main"].body
    call = Function([], loop(const(1)))
    res = dcpe(call, mod=mod)
    assert alpha_equal(res.body, expected)
Example #18
0
def test_alter_layout_nhwc_nchw_arm():
    """ Check NHWC to NHCW conversion for a small sequence of ops."""
    def alter_conv2d(attrs, inputs, tinfos, out_type):
        import topi
        with tvm.target.create("llvm -device=arm_cpu"):
            return topi.nn.conv2d_alter_layout(attrs, inputs, tinfos, out_type)

    # Check NHWC conversion.
    def before_nhwc():
        x = relay.var("x", shape=(1, 56, 56, 64))
        weight1 = relay.var('weight1', shape=(3, 3, 64, 64))
        weight2 = relay.var('weight2', shape=(3, 3, 64, 64))
        y = relay.nn.conv2d(x,
                            weight1,
                            channels=64,
                            kernel_size=(3, 3),
                            data_layout='NHWC',
                            kernel_layout='HWIO')
        y = relay.nn.relu(y)
        y = relay.nn.avg_pool2d(y, pool_size=(1, 1), layout='NHWC')
        y = relay.nn.conv2d(y,
                            weight2,
                            channels=64,
                            kernel_size=(3, 3),
                            data_layout='NHWC',
                            kernel_layout='HWIO')
        y = relay.nn.relu(y)
        y = relay.Function(analysis.free_vars(y), y)
        return y

    def expected_nhwc():
        x = relay.var("x", shape=(1, 56, 56, 64))
        weight1 = relay.var('weight1', shape=(3, 3, 64, 64))
        weight2 = relay.var('weight2', shape=(3, 3, 64, 64))
        y = relay.layout_transform(x, "NHWC", "NCHW")
        weight1 = relay.layout_transform(weight1, "HWIO", "OIHW")
        weight2 = relay.layout_transform(weight2, "HWIO", "OIHW")
        y = relay.nn.conv2d(y, weight1, channels=64, kernel_size=(3, 3))
        y = relay.nn.relu(y)
        y = relay.nn.avg_pool2d(y, pool_size=(1, 1))
        y = relay.nn.conv2d(y, weight2, channels=64, kernel_size=(3, 3))
        y = relay.nn.relu(y)
        y = relay.layout_transform(y, "NCHW", "NHWC")
        y = relay.Function(analysis.free_vars(y), y)
        return y

    with TempOpAttr("nn.conv2d", "FTVMAlterOpLayout", alter_conv2d):
        a = before_nhwc()
        a = run_opt_pass(a, transform.AlterOpLayout())
        b = run_opt_pass(expected_nhwc(), transform.InferType())

    assert analysis.alpha_equal(a, b), "Actual = \n" + str(a)
def test_conv_bias_pool_convert_layout():
    def before():
        x = relay.var("x", shape=(1, 56, 56, 64))
        bias = relay.var("bias", shape=(64, ))
        weight = relay.var("weight", shape=(3, 3, 64, 64))
        y = relay.nn.conv2d(x,
                            weight,
                            channels=64,
                            kernel_size=(3, 3),
                            padding=(1, 1),
                            data_layout='NHWC',
                            kernel_layout='HWIO')
        y = relay.nn.bias_add(y, bias, axis=3)
        # a useless tuple, which will be eliminated
        y = relay.Tuple([y])[0]
        y = relay.nn.relu(y)
        y = relay.nn.max_pool2d(y, pool_size=(2, 2), layout='NHWC')
        y = relay.cast(y, 'int32')
        y = relay.nn.batch_flatten(y)
        y = relay.Function(analysis.free_vars(y), y)
        return y

    def expected():
        x = relay.var("x", shape=(1, 56, 56, 64))
        bias = relay.var("bias", shape=(64, ))
        weight = relay.var("weight", shape=(3, 3, 64, 64))
        x = relay.layout_transform(x, 'NHWC', 'NCHW')
        weight = relay.layout_transform(weight, 'HWIO', 'OIHW')
        y = relay.nn.conv2d(x,
                            weight,
                            channels=64,
                            kernel_size=(3, 3),
                            padding=(1, 1))

        bias = relay.expand_dims(bias, axis=0, num_newaxis=3)
        bias = relay.layout_transform(bias, 'NHWC', 'NCHW')
        y = relay.add(y, bias)
        # a useless tuple, which will be eliminated
        y = relay.Tuple([y])[0]
        y = relay.nn.relu(y)
        y = relay.nn.max_pool2d(y, pool_size=(2, 2))
        y = relay.cast(y, 'int32')
        y = relay.layout_transform(y, 'NCHW', 'NHWC')
        y = relay.nn.batch_flatten(y)
        y = relay.Function(analysis.free_vars(y), y)
        return y

    a = before()
    a = run_opt_pass(a, transform.ConvertLayout('NCHW'))
    b = run_opt_pass(expected(), transform.InferType())

    assert analysis.alpha_equal(a, b), "Actual = \n" + str(a)
Example #20
0
def test_swap_loop():
    mod = Module()
    p = Prelude(mod)
    add_nat_definitions(p)
    nat = p.nat()
    x = Var("x", nat)
    y = Var("y", nat)
    loop = GlobalVar("loop")
    mod[loop] = Function([x, y], loop(y, x), nat)
    prog = loop(make_nat_expr(p, 1), make_nat_expr(p, 2))
    res = Function([], prog)
    res = dcpe(res, mod=mod)
    assert alpha_equal(prog, res.body)
Example #21
0
def test_nat_id():
    mod = Module()
    p = Prelude(mod)
    add_nat_definitions(p)
    nat = p.nat()
    x = Var("x", nat)
    y = Var("y", nat)
    nat_id = GlobalVar("nat_id")
    mod[nat_id] = Function([x], x)
    orig = nat_id(make_nat_expr(p, 3))
    orig = Function([], orig)
    res = dcpe(orig, mod=mod)
    assert alpha_equal(res.body, make_nat_expr(p, 3))
Example #22
0
def test_alter_layout_nchw_upsamping_op():
    """Test upsamping operators """
    def before():
        x = relay.var("x", shape=(1, 32, 28, 28))
        weight = relay.var('weight', shape=(32, 32, 3, 3))
        y = relay.nn.conv2d(x,
                            weight,
                            channels=32,
                            kernel_size=(3, 3),
                            padding=(1, 1))
        y = relay.nn.upsampling(y, scale=2)
        y = relay.nn.avg_pool2d(y, pool_size=(2, 2), strides=(2, 2))
        y = relay.Function(analysis.free_vars(y), y)
        return y

    # Register alter op layout. "level" is used to override the previously registered functions.
    @register_alter_op_layout("nn.conv2d", level=108)
    def alter_conv2d(attrs, inputs, tinfos):
        data, weight = inputs
        new_attrs = dict(attrs)
        new_attrs['data_layout'] = 'NCHW16c'
        return relay.nn.conv2d(data, weight, **new_attrs)

    def expected():
        x = relay.var("x", shape=(1, 32, 28, 28))
        weight = relay.var("weight")
        x = relay.layout_transform(x, "NCHW", "NCHW16c")
        y = relay.nn.conv2d(x,
                            weight,
                            channels=32,
                            kernel_size=(3, 3),
                            padding=(1, 1),
                            data_layout="NCHW16c")
        y = relay.nn.upsampling(y, scale=2, layout="NCHW16c")
        y = relay.nn.avg_pool2d(y,
                                pool_size=(2, 2),
                                strides=(2, 2),
                                layout='NCHW16c')
        y = relay.layout_transform(y, "NCHW16c", "NCHW")
        y = relay.Function(analysis.free_vars(y), y)
        return y

    a = before()
    a = run_opt_pass(a,
                     [transform.CanonicalizeOps(),
                      transform.AlterOpLayout()])

    b = expected()
    b = run_opt_pass(b, transform.InferType())

    assert analysis.alpha_equal(a, b), "Actual = \n" + str(a)
Example #23
0
def test_alter_layout_scalar():
    """Test alternating the layout of a conv2d.
    The layout of broadcast operators and the weight should be changed accordingly.
    """
    def before():
        x = relay.var("x", shape=(1, 64, 56, 56))
        weight = relay.var("weight")
        y = relay.nn.conv2d(x,
                            weight,
                            channels=64,
                            kernel_size=(3, 3),
                            padding=(1, 1))
        y = relay.add(y, relay.const(1, "float32"))
        y = relay.Function(analysis.free_vars(y), y)
        return y

    # Register alter op layout. "level" is used to override the previously registered functions.
    @register_alter_op_layout("nn.conv2d", level=106)
    def alter_conv2d(attrs, inputs, tinfos):
        data, weight = inputs
        new_attrs = dict(attrs)
        new_attrs['data_layout'] = 'NCHW16c'
        return relay.nn.conv2d(data, weight, **new_attrs)

    def expected():
        x = relay.var("x", shape=(1, 64, 56, 56))
        w = relay.var("weight")

        y = relay.layout_transform(x, "NCHW", "NCHW16c")
        y = relay.nn.conv2d(y,
                            w,
                            channels=64,
                            kernel_size=(3, 3),
                            padding=(1, 1),
                            data_layout="NCHW16c")
        y = relay.add(y, relay.const(1.0, "float32"))

        y = relay.layout_transform(y, "NCHW16c", "NCHW")
        y = relay.Function(analysis.free_vars(y), y)
        return y

    a = before()
    a = run_opt_pass(a,
                     [transform.CanonicalizeOps(),
                      transform.AlterOpLayout()])

    b = expected()
    b = run_opt_pass(b, transform.InferType())

    assert analysis.alpha_equal(a, b), "Actual = \n" + str(a)
def test_conv_concat_convert_layout():
    def before():
        x = relay.var("x", shape=(1, 56, 56, 64))
        weight1 = relay.var('weight1', shape=(3, 3, 64, 64))
        weight2 = relay.var('weight2', shape=(3, 3, 64, 64))
        y = relay.nn.conv2d(x,
                            weight1,
                            channels=64,
                            kernel_size=(3, 3),
                            padding=(1, 1),
                            data_layout='NHWC',
                            kernel_layout='HWIO')
        y1 = relay.nn.conv2d(y,
                             weight2,
                             channels=64,
                             kernel_size=(3, 3),
                             padding=(1, 1),
                             data_layout='NHWC',
                             kernel_layout='HWIO')
        ret = relay.concatenate([y, y1], axis=3)
        y = relay.Function(analysis.free_vars(ret), ret)
        return y

    def expected():
        x = relay.var("x", shape=(1, 56, 56, 64))
        weight1 = relay.var('weight1', shape=(3, 3, 64, 64))
        weight2 = relay.var('weight2', shape=(3, 3, 64, 64))
        weight1 = relay.layout_transform(weight1, 'HWIO', 'OIHW')
        weight2 = relay.layout_transform(weight2, 'HWIO', 'OIHW')
        y = relay.layout_transform(x, "NHWC", "NCHW")
        y = relay.nn.conv2d(y,
                            weight1,
                            channels=64,
                            kernel_size=(3, 3),
                            padding=(1, 1))
        y1 = relay.nn.conv2d(y,
                             weight2,
                             channels=64,
                             kernel_size=(3, 3),
                             padding=(1, 1))
        ret = relay.concatenate([y, y1], axis=1)
        ret = relay.layout_transform(ret, "NCHW", "NHWC")
        y = relay.Function(analysis.free_vars(ret), ret)
        return y

    a = before()
    a = run_opt_pass(a, transform.ConvertLayout('NCHW'))
    b = run_opt_pass(expected(), transform.InferType())

    assert analysis.alpha_equal(a, b), "Actual = \n" + str(a)
Example #25
0
def test_alter_layout_prelu():
    """Test PRelu operator"""
    def before():
        x = relay.var("x", shape=(1, 64, 56, 56))
        weight = relay.var("weight")
        alpha = relay.var("alpha", relay.IncompleteType())
        y = relay.nn.conv2d(x,
                            weight,
                            channels=64,
                            kernel_size=(3, 3),
                            padding=(1, 1))
        y = relay.nn.prelu(y, alpha)
        y = relay.Function(analysis.free_vars(y), y)
        return y

    # Register alter op layout. "level" is used to override the previously registered functions.
    @register_alter_op_layout("nn.conv2d", level=111)
    def alter_conv2d(attrs, inputs, tinfos):
        data, weight = inputs
        new_attrs = dict(attrs)
        new_attrs['data_layout'] = 'NCHW16c'
        return relay.nn.conv2d(data, weight, **new_attrs)

    def expected():
        x = relay.var("x", shape=(1, 64, 56, 56))
        w = relay.var("weight")
        alpha = relay.var("alpha", relay.IncompleteType())

        y = relay.layout_transform(x, "NCHW", "NCHW16c")
        y = relay.nn.conv2d(y,
                            w,
                            channels=64,
                            kernel_size=(3, 3),
                            padding=(1, 1),
                            data_layout="NCHW16c")
        y = relay.layout_transform(y, "NCHW16c", "NCHW")
        y = relay.nn.prelu(y, alpha)
        y = relay.Function(analysis.free_vars(y), y)
        return y

    a = before()
    a = run_opt_pass(a,
                     [transform.CanonicalizeOps(),
                      transform.AlterOpLayout()])

    b = expected()
    b = run_opt_pass(b, transform.InferType())

    assert (analysis.alpha_equal(a, b))
def test_resnet_convert_layout():
    def before():
        x = relay.var("x", shape=(1, 56, 56, 64))
        weight1 = relay.var('weight1', shape=(3, 3, 64, 32))
        weight2 = relay.var('weight2', shape=(1, 1, 64, 32))
        y = relay.nn.conv2d(x,
                            weight1,
                            channels=32,
                            kernel_size=(3, 3),
                            padding=(1, 1),
                            data_layout='NHWC',
                            kernel_layout='HWIO')
        y = relay.nn.relu(y)
        y2 = relay.nn.conv2d(x,
                             weight2,
                             channels=32,
                             kernel_size=(1, 1),
                             data_layout='NHWC',
                             kernel_layout='HWIO')
        y2 = relay.nn.relu(y2)
        y = y + y2
        y = relay.nn.global_max_pool2d(y, layout='NHWC')
        return relay.Function(analysis.free_vars(y), y)

    def expected():
        x = relay.var("x", shape=(1, 56, 56, 64))
        weight1 = relay.var('weight1', shape=(3, 3, 64, 32))
        weight2 = relay.var('weight2', shape=(1, 1, 64, 32))
        weight1 = relay.layout_transform(weight1, 'HWIO', 'OIHW')
        weight2 = relay.layout_transform(weight2, 'HWIO', 'OIHW')
        x = relay.layout_transform(x, "NHWC", "NCHW")
        y = relay.nn.conv2d(x,
                            weight1,
                            channels=32,
                            kernel_size=(3, 3),
                            padding=(1, 1))
        y = relay.nn.relu(y)
        y2 = relay.nn.conv2d(x, weight2, channels=32, kernel_size=(1, 1))
        y2 = relay.nn.relu(y2)
        y = y + y2
        y = relay.nn.global_max_pool2d(y)
        y = relay.layout_transform(y, "NCHW", "NHWC")
        return relay.Function(analysis.free_vars(y), y)

    a = before()
    a = run_opt_pass(a, transform.ConvertLayout('NCHW'))
    b = run_opt_pass(expected(), transform.InferType())

    assert analysis.alpha_equal(a, b), "Actual = \n" + str(a)
def test_if():
    cond = relay.const(True)
    x = relay.If(cond, relay.const(2), relay.const(3))
    anf = run_opt_pass(x, [transform.ToANormalForm(), transform.InferType()])
    a = relay.Var('a', relay.IncompleteType())
    b = relay.Var('b', relay.IncompleteType())
    c = relay.Var('c', relay.IncompleteType())
    d = relay.Var('d', relay.IncompleteType())
    true_branch = relay.Let(a, relay.const(2), a)
    false_branch = relay.Let(b, relay.const(3), b)
    expected_output = relay.If(c, true_branch, false_branch)
    expected_output = relay.Let(d, expected_output, d)
    expected_output = relay.Let(c, cond, expected_output)
    expected_output = run_opt_pass(expected_output, transform.InferType())
    assert alpha_equal(anf, expected_output)
Example #28
0
def test_map():
    mod = Module()
    p = Prelude(mod)
    f = GlobalVar("f")
    t = TypeVar("t")
    a = Var("a", t)
    mod[f] = Function([a], a, t, [t])
    orig = p.map(f, p.cons(const(1), p.cons(const(2), p.cons(const(3), p.nil()))))
    expected = p.cons((const(1)), p.cons((const(2)), p.cons((const(3)), p.nil())))
    expected = Function([], expected)
    mod["main"] = expected
    expected = mod["main"]
    orig = Function([], orig)
    res = dcpe(orig, mod=mod)
    assert alpha_equal(res.body, expected.body)
Example #29
0
def test_match_nat_id():
    mod = Module()
    p = Prelude(mod)
    add_nat_definitions(p)
    nat = p.nat()
    x = Var("x", nat)
    y = Var("y", nat)
    nat_id = GlobalVar("nat_id")
    z_case = Clause(PatternConstructor(p.z, []), p.z())
    s_case = Clause(PatternConstructor(p.s, [PatternVar(y)]), p.s(y))
    mod[nat_id] = Function([x], Match(x, [z_case, s_case]))
    orig = nat_id(make_nat_expr(p, 3))
    orig = Function([], orig)
    res = dcpe(orig, mod=mod)
    assert alpha_equal(res.body, make_nat_expr(p, 3))
Example #30
0
def test_alter_layout_depthwise_conv2d():
    """Test depthwise_conv2d operator"""
    def before():
        x = relay.var("x", shape=(1, 32, 56, 56))
        w = relay.var("w", shape=(32, 1, 3, 3))
        y = relay.nn.conv2d(x,
                            w,
                            padding=(1, 1),
                            channels=32,
                            kernel_size=(3, 3),
                            groups=32)
        y = relay.Function(analysis.free_vars(y), y)
        return y

    import topi
    # Register alter op layout. "level" is used to override the previously registered functions.
    @register_alter_op_layout("nn.conv2d", level=110)
    def alter_conv2d(attrs, inputs, tinfos):
        with tvm.target.create("llvm"):
            return topi.nn.conv2d_alter_layout(attrs, inputs, tinfos, relay)

    def expected():
        x = relay.var("x", shape=(1, 32, 56, 56))
        w = relay.var("w", shape=(32, 1, 3, 3))
        x = relay.layout_transform(x, "NCHW", "NCHW8c")
        w = relay.layout_transform(w, "OIHW", "OIHW1i8o")
        y = relay.nn.contrib_depthwise_conv2d_nchwc(x,
                                                    w,
                                                    padding=(1, 1),
                                                    channels=32,
                                                    kernel_size=(3, 3),
                                                    groups=32,
                                                    data_layout="NCHW8c",
                                                    kernel_layout="OIHW1i8o",
                                                    out_layout="NCHW8c")
        y = relay.layout_transform(y, "NCHW8c", "NCHW")
        y = relay.Function(analysis.free_vars(y), y)
        return y

    a = before()
    a = run_opt_pass(a,
                     [transform.CanonicalizeOps(),
                      transform.AlterOpLayout()])

    b = expected()
    b = run_opt_pass(b, transform.InferType())

    assert (analysis.alpha_equal(a, b))