def test_alter_return_none():
    """Test doing nothing by returning 'None' """

    def before():
        x = relay.var("x", shape=(1, 64, 56, 56))
        y = relay.nn.global_max_pool2d(x)
        y = relay.Function([x], y)
        return y

    called = [False]

    def alter_conv2d(attrs, inputs, tinfos, out_type):
        called[0] = True
        return None

    with TempOpAttr("nn.global_max_pool2d", "FTVMAlterOpLayout", alter_conv2d):
        a = before()
        a = run_opt_pass(a, transform.AlterOpLayout())
        b = run_opt_pass(before(), transform.InferType())

    assert tvm.ir.structural_equal(a, b), "Actual = \n" + str(a)
    assert called[0]
def test_alter_layout_nhwc_arm():
    """ Check that AlterOplayout does not alter NHWC data layout. """
    def alter_conv2d(attrs, inputs, tinfos, out_type):
        from tvm import topi
        with tvm.target.create("llvm -device=arm_cpu"):
            return topi.nn.conv2d_alter_layout(attrs, inputs, tinfos, out_type)

    # Check NHWC conversion.
    def before_nhwc():
        x = relay.var("x", shape=(1, 56, 56, 64))
        weight1 = relay.var('weight1', shape=(3, 3, 64, 64))
        weight2 = relay.var('weight2', shape=(3, 3, 64, 64))
        y = relay.nn.conv2d(x,
                            weight1,
                            channels=64,
                            kernel_size=(3, 3),
                            data_layout='NHWC',
                            kernel_layout='HWIO')
        y = relay.nn.relu(y)
        y = relay.nn.avg_pool2d(y, pool_size=(1, 1), layout='NHWC')
        y = relay.nn.conv2d(y,
                            weight2,
                            channels=64,
                            kernel_size=(3, 3),
                            data_layout='NHWC',
                            kernel_layout='HWIO')
        y = relay.nn.relu(y)
        y = relay.Function(analysis.free_vars(y), y)
        return y

    def expected_nhwc():
        return before_nhwc()

    with TempOpAttr("nn.conv2d", "FTVMAlterOpLayout", alter_conv2d):
        a = before_nhwc()
        a = run_opt_pass(a, transform.AlterOpLayout())
        b = run_opt_pass(expected_nhwc(), transform.InferType())

    assert tvm.ir.structural_equal(a, b), "Actual = \n" + str(a)
def test_alter_layout_scalar():
    """Test alternating the layout of a conv2d.
    The layout of broadcast operators and the weight should be changed accordingly.
    """

    def before():
        x = relay.var("x", shape=(1, 64, 56, 56))
        weight = relay.var("weight")
        y = relay.nn.conv2d(x, weight, channels=64, kernel_size=(3, 3), padding=(1, 1))
        y = relay.add(y, relay.const(1, "float32"))
        y = relay.Function(analysis.free_vars(y), y)
        return y

    def alter_conv2d(attrs, inputs, tinfos, out_type):
        data, weight = inputs
        new_attrs = dict(attrs)
        new_attrs["data_layout"] = "NCHW16c"
        return relay.nn.conv2d(data, weight, **new_attrs)

    def expected():
        x = relay.var("x", shape=(1, 64, 56, 56))
        w = relay.var("weight")

        y = relay.layout_transform(x, "NCHW", "NCHW16c")
        y = relay.nn.conv2d(
            y, w, channels=64, kernel_size=(3, 3), padding=(1, 1), data_layout="NCHW16c"
        )
        y = relay.add(y, relay.const(1.0, "float32"))

        y = relay.layout_transform(y, "NCHW16c", "NCHW")
        y = relay.Function(analysis.free_vars(y), y)
        return y

    with TempOpAttr("nn.conv2d", "FTVMAlterOpLayout", alter_conv2d):
        a = before()
        a = run_opt_pass(a, [transform.CanonicalizeOps(), transform.AlterOpLayout()])
        b = run_opt_pass(expected(), transform.InferType())

    assert tvm.ir.structural_equal(a, b), "Actual = \n" + str(a)
예제 #4
0
def test_legalize():
    """Test directly replacing an operator with a new one"""
    def before():
        x = relay.var("x", shape=(1, 64, 56, 56))
        weight = relay.var('weight', shape=(64, 64, 3, 3))
        y = relay.nn.conv2d(x,
                            weight,
                            channels=64,
                            kernel_size=(3, 3),
                            padding=(1, 1))
        y = relay.nn.relu(y)
        y = relay.Function([x, weight], y)
        return y

    def legalize_conv2d(attrs, inputs, types):
        data, weight = inputs
        weight = relay.multiply(weight, relay.const(2.0, "float32"))
        return relay.nn.conv2d(data, weight, **attrs)

    def expected():
        x = relay.var("x", shape=(1, 64, 56, 56))
        weight = relay.var('weight', shape=(64, 64, 3, 3))
        y = relay.nn.conv2d(x,
                            relay.multiply(weight, relay.const(2.0,
                                                               "float32")),
                            channels=64,
                            kernel_size=(3, 3),
                            padding=(1, 1))
        y = relay.nn.relu(y)
        y = relay.Function([x, weight], y)
        return y

    with TempOpAttr("nn.conv2d", "FTVMLegalize", legalize_conv2d):
        a = before()
        a = run_opt_pass(a, transform.Legalize())
        b = run_opt_pass(expected(), transform.InferType())

    assert tvm.ir.structural_equal(a, b), "Actual = \n" + str(a)
def test_alter_layout_prelu():
    """Test PRelu operator"""

    def before():
        x = relay.var("x", shape=(1, 64, 56, 56))
        weight = relay.var("weight")
        alpha = relay.var("alpha", relay.IncompleteType())
        y = relay.nn.conv2d(x, weight, channels=64, kernel_size=(3, 3), padding=(1, 1))
        y = relay.nn.prelu(y, alpha)
        y = relay.Function(analysis.free_vars(y), y)
        return y

    def alter_conv2d(attrs, inputs, tinfos, out_type):
        data, weight = inputs
        new_attrs = dict(attrs)
        new_attrs["data_layout"] = "NCHW16c"
        return relay.nn.conv2d(data, weight, **new_attrs)

    def expected():
        x = relay.var("x", shape=(1, 64, 56, 56))
        w = relay.var("weight")
        alpha = relay.var("alpha", relay.IncompleteType())

        y = relay.layout_transform(x, "NCHW", "NCHW16c")
        y = relay.nn.conv2d(
            y, w, channels=64, kernel_size=(3, 3), padding=(1, 1), data_layout="NCHW16c"
        )
        y = relay.layout_transform(y, "NCHW16c", "NCHW")
        y = relay.nn.prelu(y, alpha)
        y = relay.Function(analysis.free_vars(y), y)
        return y

    with TempOpAttr("nn.conv2d", "FTVMAlterOpLayout", alter_conv2d):
        a = before()
        a = run_opt_pass(a, [transform.CanonicalizeOps(), transform.AlterOpLayout()])
        b = run_opt_pass(expected(), transform.InferType())

    assert tvm.ir.structural_equal(a, b)
예제 #6
0
def test_tflite_large_irregular():
    with TempOpAttr("qnn.conv2d", "FTVMQnnLegalize", legalize_qnn_conv2d):

        # uint8 input
        data_shape = (1, 1024, 1, 1)
        data_dtype = "uint8"
        kernel_shape = (1001, 1024, 1, 1)
        kernel_dtype = "uint8"
        ref_func, qnn_func = get_funcs(
            data_shape=data_shape,
            data_dtype=data_dtype,
            kernel_shape=kernel_shape,
            kernel_dtype=kernel_dtype,
            input_zero_point=127,
            kernel_zero_point=127,
            input_scale=1.0,
            kernel_scale=1.0,
            kernel_size=(1, 1),
            padding=(0, 0),
            strides=(1, 1),
            dilation=(1, 1),
            data_layout="NCHW",
            kernel_layout="OIHW",
            out_dtype="int32",
        )
        golden_data = np.full(data_shape, 127).astype("uint8")
        golden_weight = np.full(kernel_shape, 127).astype("uint8")

        with tvm.transform.PassContext(opt_level=2):
            params = {"kernel": golden_weight}
            graph, lib, params = relay.build(qnn_func, "llvm", params=params)
            mod = graph_runtime.create(graph, lib, ctx=tvm.cpu(0))
            mod.set_input("data", golden_data)
            mod.set_input(**params)
            mod.run()
            qnn_output = mod.get_output(0).asnumpy()
        golden_output = np.full((1, 1001, 1, 1), 0).astype("uint8")
        np.testing.assert_equal(qnn_output, golden_output)
예제 #7
0
def test_broadcast_layout():
    with TempOpAttr("qnn.conv2d", "FTVMQnnLegalize", legalize_qnn_conv2d):

        # Test broadcast support for NHWC layout.
        data_shape = (1, 229, 229, 3)  # NHWC
        data_dtype = "uint8"
        kernel_shape = (7, 7, 3, 64)  # HWIO
        kernel_dtype = "int8"
        _, qnn_func = get_funcs(
            data_shape=data_shape,
            data_dtype=data_dtype,
            kernel_shape=kernel_shape,
            kernel_dtype=kernel_dtype,
            input_zero_point=8,
            kernel_zero_point=3,
            input_scale=1.0,
            kernel_scale=1.0,
            kernel_size=(7, 7),
            padding=(1, 1),
            strides=(1, 1),
            dilation=(1, 1),
            data_layout="NHWC",
            kernel_layout="HWIO",
            out_dtype="int32",
        )
        func = qnn_func["main"].body
        bias = relay.var("bias", shape=(64,), dtype="int32")
        bias2 = relay.var("bias2", shape=(1, 225, 225, 1), dtype="int32")

        # Check broadcast support on both lhs and rhs
        func = relay.add(func, bias2)
        func = relay.add(bias2, func)
        func = relay.add(bias, func)
        func = relay.add(func, bias)
        func = relay.Function(relay.analysis.free_vars(func), func)
        mod = tvm.IRModule.from_expr(func)
        with tvm.transform.PassContext(opt_level=3):
            graph, lib, params = relay.build(mod, "llvm -mcpu=skylake-avx512")
예제 #8
0
def test_alter_op():
    """Test directly replacing an operator with a new one"""
    def before():
        x = relay.var("x", shape=(1, 64, 56, 56))
        weight = relay.var('weight', shape=(64, 64, 3, 3))
        y = relay.nn.conv2d(x,
                            weight,
                            channels=64,
                            kernel_size=(3, 3),
                            padding=(1, 1))
        y = relay.nn.relu(y)
        y = relay.Function([x, weight], y)
        return y

    def alter_conv2d(attrs, inputs, tinfos, out_type):
        data, weight = inputs
        weight = relay.multiply(weight, relay.const(2.0, "float32"))
        return relay.nn.conv2d(data, weight, **attrs)

    def expected():
        x = relay.var("x", shape=(1, 64, 56, 56))
        weight = relay.var('weight', shape=(64, 64, 3, 3))
        y = relay.nn.conv2d(x,
                            relay.multiply(weight, relay.const(2.0,
                                                               "float32")),
                            channels=64,
                            kernel_size=(3, 3),
                            padding=(1, 1))
        y = relay.nn.relu(y)
        y = relay.Function([x, weight], y)
        return y

    with TempOpAttr("nn.conv2d", "FTVMAlterOpLayout", alter_conv2d):
        a = before()
        a = run_opt_pass(a, transform.AlterOpLayout())
        b = run_opt_pass(expected(), transform.InferType())

    assert analysis.alpha_equal(a, b), "Actual = \n" + str(a)
def test_alter_layout_nchw_upsamping_op():
    """Test upsamping operators """

    def before():
        x = relay.var("x", shape=(1, 32, 28, 28))
        weight = relay.var("weight", shape=(32, 32, 3, 3))
        y = relay.nn.conv2d(x, weight, channels=32, kernel_size=(3, 3), padding=(1, 1))
        y = relay.nn.upsampling(y, scale_h=2, scale_w=2)
        y = relay.nn.avg_pool2d(y, pool_size=(2, 2), strides=(2, 2))
        y = relay.Function(analysis.free_vars(y), y)
        return y

    def alter_conv2d(attrs, inputs, tinfos, out_type):
        data, weight = inputs
        new_attrs = dict(attrs)
        new_attrs["data_layout"] = "NCHW16c"
        return relay.nn.conv2d(data, weight, **new_attrs)

    def expected():
        x = relay.var("x", shape=(1, 32, 28, 28))
        weight = relay.var("weight")
        x = relay.layout_transform(x, "NCHW", "NCHW16c")
        y = relay.nn.conv2d(
            x, weight, channels=32, kernel_size=(3, 3), padding=(1, 1), data_layout="NCHW16c"
        )
        y = relay.nn.upsampling(y, scale_h=2, scale_w=2, layout="NCHW16c")
        y = relay.nn.avg_pool2d(y, pool_size=(2, 2), strides=(2, 2), layout="NCHW16c")
        y = relay.layout_transform(y, "NCHW16c", "NCHW")
        y = relay.Function(analysis.free_vars(y), y)
        return y

    with TempOpAttr("nn.conv2d", "FTVMAlterOpLayout", alter_conv2d):
        a = before()
        a = run_opt_pass(a, transform.AlterOpLayout())
        b = run_opt_pass(expected(), transform.InferType())

    assert tvm.ir.structural_equal(a, b), "Actual = \n" + str(a)
예제 #10
0
def test_conv2d():
    N, IC, H, W = 1, 64, 56, 56
    OC, IC, FH, FW = 128, 64, 3, 3
    data_shape = (N, IC, H, W)
    weight_shape = (OC, IC, FH, FW)
    padding = (0, 0)
    strides = (1, 1)

    relay_mod = tvm.IRModule.from_expr(
        get_conv2d(
            data_shape,
            weight_shape,
            padding=padding,
            strides=strides,
            channels=OC,
            kernel_size=(FH, FW),
            data_layout="NCHW",
            kernel_layout="OIHW",
        ))

    data_np = np.random.randn(*data_shape).astype("float32")
    weight_np = np.random.randn(*weight_shape).astype("float32")

    target = "llvm"
    params = {"weight": weight_np}

    def schedule_fn(task, sch):
        if "nn_conv2d" in task.task_name:
            schedule_tir_conv2d_nchw_oihw(sch)
            return True
        return False

    with TempOpAttr("nn.conv2d", "FTVMStrategy", _tmp_strategy):
        database = apply_fixed_schedules(
            relay_mod,
            target,
            params,
            schedule_fn,
            te_filter_func="meta_schedule.DefaultTaskFilterAllowExtern",
        )
        with ApplyHistoryBest(
                database,
                te_filter_func="meta_schedule.DefaultTaskFilterAllowExtern",
        ):
            with tvm.transform.PassContext(
                    opt_level=3,
                    config={"relay.backend.use_meta_schedule": True},
            ):
                lib = relay.build(relay_mod, target=target, params=params)

    dev = tvm.device(target, 0)

    runtime = tvm.contrib.graph_executor.GraphModule(lib["default"](dev))

    runtime.set_input("data", data_np)
    runtime.run()

    out = runtime.get_output(0).numpy()

    ref = get_ref(data_np, weight_np, strides, padding)

    tvm.testing.assert_allclose(out, ref, atol=1e-4, rtol=1e-4)
예제 #11
0
def test_alter_layout_sum():
    """ Check NCHW, NHWC sum layout conversion"""
    def alter_conv2d(attrs, inputs, tinfos, out_type):
        data, weight = inputs
        new_attrs = dict(attrs)
        new_attrs['data_layout'] = 'NCHW16c'
        return relay.nn.conv2d(data, weight, **new_attrs)

    # Check NCHW conversion.
    def before_nchw():
        x = relay.var("x", shape=(1, 64, 56, 56))
        weight1 = relay.var('weight1')
        y = relay.nn.conv2d(x,
                            weight1,
                            channels=32,
                            kernel_size=(3, 3),
                            padding=(1, 1))
        ret = relay.sum(y, axis=1, keepdims=True)
        y = relay.Function(analysis.free_vars(ret), ret)
        return y

    def expected_nchw():
        x = relay.var("x", shape=(1, 64, 56, 56))
        weight1 = relay.var('weight1')
        y = relay.layout_transform(x, "NCHW", "NCHW16c")
        y = relay.nn.conv2d(y,
                            weight1,
                            channels=32,
                            kernel_size=(3, 3),
                            padding=(1, 1),
                            data_layout="NCHW16c")
        ret = relay.layout_transform(y, "NCHW16c", "NCHW")
        ret = relay.sum(ret, axis=[1], keepdims=True)
        y = relay.Function(analysis.free_vars(ret), ret)
        return y

    with TempOpAttr("nn.conv2d", "FTVMAlterOpLayout", alter_conv2d):
        a = before_nchw()
        a = run_opt_pass(a, transform.AlterOpLayout())
        b = run_opt_pass(expected_nchw(), transform.InferType())

    assert analysis.alpha_equal(a, b), "Actual = \n" + str(a)

    # Check NHWC conversion.
    def before_nhwc():
        x = relay.var("x", shape=(1, 56, 56, 64))
        weight1 = relay.var('weight1')
        y = relay.nn.conv2d(x,
                            weight1,
                            channels=32,
                            kernel_size=(3, 3),
                            padding=(1, 1),
                            data_layout='NHWC')
        ret = relay.sum(y, axis=3, keepdims=True)
        y = relay.Function(analysis.free_vars(ret), ret)
        return y

    def expected_nhwc():
        x = relay.var("x", shape=(1, 56, 56, 64))
        weight1 = relay.var('weight1')
        y = relay.layout_transform(x, "NHWC", "NCHW16c")
        y = relay.nn.conv2d(y,
                            weight1,
                            channels=32,
                            kernel_size=(3, 3),
                            padding=(1, 1),
                            data_layout="NCHW16c")
        ret = relay.layout_transform(y, "NCHW16c", "NCHW")
        ret = relay.sum(ret, axis=[1], keepdims=True)
        ret = relay.layout_transform(ret, "NCHW", "NHWC")
        y = relay.Function(analysis.free_vars(ret), ret)
        return y

    with TempOpAttr("nn.conv2d", "FTVMAlterOpLayout", alter_conv2d):
        a = before_nhwc()
        a = run_opt_pass(a, transform.AlterOpLayout())
        b = run_opt_pass(expected_nhwc(), transform.InferType())

    assert analysis.alpha_equal(a, b), "Actual = \n" + str(a)
예제 #12
0
def test_alter_layout_dual_path():
    """
    Test alternating the layout with two outputs.
    One path continues to use the new layout while one path fall backs to old layout.
    """
    def before():
        x = relay.var("x", shape=(1, 64, 56, 56))
        weight1 = relay.var('weight1')
        weight2 = relay.var('weight2')
        y = relay.nn.conv2d(x,
                            weight1,
                            channels=32,
                            kernel_size=(3, 3),
                            padding=(1, 1))
        y = relay.nn.relu(y)
        y1 = relay.nn.conv2d(y,
                             weight2,
                             channels=32,
                             kernel_size=(3, 3),
                             padding=(1, 1))
        y1 = relay.nn.relu(y1)
        y2 = relay.nn.batch_flatten(y)
        ret = relay.Tuple([y1, y2])
        y = relay.Function(analysis.free_vars(ret), ret)
        return y

    def alter_conv2d(attrs, inputs, tinfos, out_type):
        data, weight = inputs
        new_attrs = dict(attrs)
        new_attrs['data_layout'] = 'NCHW16c'
        return relay.nn.conv2d(data, weight, **new_attrs)

    def expected():
        x = relay.var("x", shape=(1, 64, 56, 56))
        weight1 = relay.var('weight1')
        weight2 = relay.var('weight2')
        y = relay.layout_transform(x, "NCHW", "NCHW16c")
        y = relay.nn.conv2d(y,
                            weight1,
                            channels=32,
                            kernel_size=(3, 3),
                            padding=(1, 1),
                            data_layout="NCHW16c")
        y = relay.nn.relu(y)
        y1 = relay.nn.conv2d(y,
                             weight2,
                             channels=32,
                             kernel_size=(3, 3),
                             padding=(1, 1),
                             data_layout='NCHW16c')
        y1 = relay.nn.relu(y1)
        y1 = relay.layout_transform(y1, "NCHW16c", "NCHW")
        y2 = relay.layout_transform(y, "NCHW16c", "NCHW")
        y2 = relay.nn.batch_flatten(y2)
        ret = relay.Tuple([y1, y2])
        y = relay.Function(analysis.free_vars(ret), ret)
        return y

    with TempOpAttr("nn.conv2d", "FTVMAlterOpLayout", alter_conv2d):
        a = before()
        a = run_opt_pass(a, transform.AlterOpLayout())
        b = run_opt_pass(expected(), transform.InferType())

    assert analysis.alpha_equal(a, b), "Actual = \n" + str(a)
예제 #13
0
def test_alter_layout_strided_slice():
    """Test rewriting strided_slice during alter_iop_layout"""
    def before():
        x = relay.var("x", shape=(1, 32, 28, 28))
        weight = relay.var('weight', shape=(32, 32, 3, 3))
        y = relay.nn.conv2d(x,
                            weight,
                            channels=32,
                            kernel_size=(3, 3),
                            padding=(1, 1))
        y = relay.strided_slice(y,
                                begin=relay.const([0, 16], "int32"),
                                end=relay.const([1, 33], "int32"),
                                strides=relay.const([1, 1], "int32"))
        y = relay.Function(analysis.free_vars(y), y)
        return y

    def alter_conv2d(attrs, inputs, tinfos, out_type):
        data, weight = inputs
        new_attrs = dict(attrs)
        new_attrs['data_layout'] = 'NCHW4c'
        return relay.nn.conv2d(data, weight, **new_attrs)

    def expected():
        x = relay.var("x", shape=(1, 32, 28, 28))
        weight = relay.var("weight", shape=(32, 32, 3, 3))
        weight = relay.layout_transform(weight, "OIHW", "OIHW4i4o")
        x = relay.layout_transform(x, "NCHW", "NCHW4c")
        y = relay.op.nn.contrib_conv2d_nchwc(x,
                                             weight,
                                             channels=32,
                                             kernel_size=(3, 3),
                                             padding=(1, 1),
                                             data_layout="NCHW4c")

        y = relay.strided_slice(y,
                                begin=relay.const([0, 4], "int32"),
                                end=relay.const([1, 21], "int32"),
                                strides=relay.const([1, 1], "int32"))

        y = relay.layout_transform(y, "NCHW4c", "NCHW")
        y = relay.Function(analysis.free_vars(y), y)
        return y

    with TempOpAttr("nn.conv2d", "FTVMAlterOpLayout", alter_conv2d):
        a = before()
        b = run_opt_pass(expected(), transform.InferType())

    # Verify inference result
    mod_before = tvm.IRModule()
    mod_new = tvm.IRModule()
    mod_before['main'] = a
    mod_new['main'] = b
    with relay.build_config(opt_level=3):
        for target, ctx in ctx_list():
            for kind in ["graph", "debug", "vm"]:
                ex_before = relay.create_executor(kind,
                                                  mod=mod_before,
                                                  ctx=ctx,
                                                  target=target)
                ex_new = relay.create_executor(kind,
                                               mod=mod_new,
                                               ctx=ctx,
                                               target=target)
                np_data = np.random.uniform(size=(1, 32, 28,
                                                  28)).astype("float32")
                np_weight = np.random.uniform(size=(32, 32, 3,
                                                    3)).astype("float32")
                result_before = ex_before.evaluate()(np_data, np_weight)
                result_new = ex_new.evaluate()(np_data, np_weight)
                tvm.testing.assert_allclose(result_before.asnumpy(),
                                            result_new.asnumpy(),
                                            rtol=1e-5,
                                            atol=1e-5)
예제 #14
0
def test_depthwise_depth_multiplier():
    with TempOpAttr("qnn.conv2d", "FTVMQnnLegalize", legalize_qnn_conv2d):

        # uint8 input, NCHW and OIHW
        # Depthwise multiplier = 1
        data_shape = (2, 4, 16, 16)
        data_dtype = 'uint8'
        kernel_shape = (4, 1, 3, 3)
        kernel_dtype = 'uint8'
        ref_func, qnn_func = get_funcs(data_shape=data_shape,
                                       data_dtype=data_dtype,
                                       kernel_shape=kernel_shape,
                                       kernel_dtype=kernel_dtype,
                                       input_zero_point=5,
                                       kernel_zero_point=3,
                                       input_scale=1.0,
                                       kernel_scale=1.0,
                                       kernel_size=(3, 3),
                                       padding=(0, 0),
                                       strides=(1, 1),
                                       dilation=(1, 1),
                                       data_layout="NCHW",
                                       kernel_layout="OIHW",
                                       out_dtype="int32",
                                       groups=4,
                                       channels=4)
        verify(ref_func, qnn_func, data_shape, data_dtype, kernel_shape,
               kernel_dtype)

        # Depthwise multiplier = 2
        data_shape = (10, 4, 16, 16)
        data_dtype = 'uint8'
        kernel_shape = (4, 2, 3, 3)
        kernel_dtype = 'uint8'
        ref_func, qnn_func = get_funcs(data_shape=data_shape,
                                       data_dtype=data_dtype,
                                       kernel_shape=kernel_shape,
                                       kernel_dtype=kernel_dtype,
                                       input_zero_point=5,
                                       kernel_zero_point=3,
                                       input_scale=1.0,
                                       kernel_scale=1.0,
                                       kernel_size=(3, 3),
                                       padding=(0, 0),
                                       strides=(1, 1),
                                       dilation=(1, 1),
                                       data_layout="NCHW",
                                       kernel_layout="OIHW",
                                       out_dtype="int32",
                                       groups=8,
                                       channels=8)
        verify(ref_func, qnn_func, data_shape, data_dtype, kernel_shape,
               kernel_dtype)

        # uint8 input, NHWC and HWOI
        # Depthwise multiplier = 1
        data_shape = (2, 16, 16, 4)
        data_dtype = 'uint8'
        kernel_shape = (3, 3, 4, 1)
        kernel_dtype = 'uint8'
        ref_func, qnn_func = get_funcs(data_shape=data_shape,
                                       data_dtype=data_dtype,
                                       kernel_shape=kernel_shape,
                                       kernel_dtype=kernel_dtype,
                                       input_zero_point=5,
                                       kernel_zero_point=3,
                                       input_scale=1.0,
                                       kernel_scale=1.0,
                                       kernel_size=(3, 3),
                                       padding=(0, 0),
                                       strides=(1, 1),
                                       dilation=(1, 1),
                                       data_layout="NHWC",
                                       kernel_layout="HWOI",
                                       out_dtype="int32",
                                       groups=4,
                                       channels=4)
        verify(ref_func, qnn_func, data_shape, data_dtype, kernel_shape,
               kernel_dtype)

        # Depthwise multiplier = 2
        data_shape = (2, 16, 16, 4)
        data_dtype = 'uint8'
        kernel_shape = (3, 3, 4, 2)
        kernel_dtype = 'uint8'
        ref_func, qnn_func = get_funcs(data_shape=data_shape,
                                       data_dtype=data_dtype,
                                       kernel_shape=kernel_shape,
                                       kernel_dtype=kernel_dtype,
                                       input_zero_point=5,
                                       kernel_zero_point=3,
                                       input_scale=1.0,
                                       kernel_scale=1.0,
                                       kernel_size=(3, 3),
                                       padding=(0, 0),
                                       strides=(1, 1),
                                       dilation=(1, 1),
                                       data_layout="NHWC",
                                       kernel_layout="HWOI",
                                       out_dtype="int32",
                                       groups=8,
                                       channels=8)
        verify(ref_func, qnn_func, data_shape, data_dtype, kernel_shape,
               kernel_dtype)
예제 #15
0
def test_alter_layout_pool():
    """ Check NCHW, NHWC pool layout conversion"""

    def alter_conv2d(attrs, inputs, tinfos, out_type):
        data, weight = inputs
        new_attrs = dict(attrs)
        new_attrs["data_layout"] = "NCHW16c"
        return relay.nn.conv2d(data, weight, **new_attrs)

    # Check NCHW conversion.
    def before_nchw():
        x = relay.var("x", shape=(1, 64, 56, 56))
        weight1 = relay.var("weight1")
        y = relay.nn.conv2d(x, weight1, channels=32, kernel_size=(3, 3), padding=(1, 1))
        ret = relay.nn.avg_pool2d(y, pool_size=(1, 1))
        y = relay.Function(analysis.free_vars(ret), ret)
        return y

    def expected_nchw():
        x = relay.var("x", shape=(1, 64, 56, 56))
        weight1 = relay.var("weight1")
        y = relay.layout_transform(x, "NCHW", "NCHW16c")
        y = relay.nn.conv2d(
            y, weight1, channels=32, kernel_size=(3, 3), padding=(1, 1), data_layout="NCHW16c"
        )
        ret = relay.nn.avg_pool2d(y, pool_size=(1, 1), layout="NCHW16c")
        ret = relay.layout_transform(ret, "NCHW16c", "NCHW")
        y = relay.Function(analysis.free_vars(ret), ret)
        return y

    with TempOpAttr("nn.conv2d", "FTVMAlterOpLayout", alter_conv2d):
        a = before_nchw()
        a = run_opt_pass(a, transform.AlterOpLayout())
        b = run_opt_pass(expected_nchw(), transform.InferType())

    assert tvm.ir.structural_equal(a, b), "Actual = \n" + str(a)

    # Check NHWC conversion.
    def before_nhwc():
        x = relay.var("x", shape=(1, 56, 56, 64))
        weight1 = relay.var("weight1")
        y = relay.nn.conv2d(
            x, weight1, channels=32, kernel_size=(3, 3), padding=(1, 1), data_layout="NHWC"
        )
        ret = relay.nn.avg_pool2d(y, pool_size=(1, 1), layout="NHWC")
        y = relay.Function(analysis.free_vars(ret), ret)
        return y

    def expected_nhwc():
        x = relay.var("x", shape=(1, 56, 56, 64))
        weight1 = relay.var("weight1")
        y = relay.layout_transform(x, "NHWC", "NCHW16c")
        y = relay.nn.conv2d(
            y, weight1, channels=32, kernel_size=(3, 3), padding=(1, 1), data_layout="NCHW16c"
        )
        ret = relay.nn.avg_pool2d(y, pool_size=(1, 1), layout="NCHW16c")
        ret = relay.layout_transform(ret, "NCHW16c", "NHWC")
        y = relay.Function(analysis.free_vars(ret), ret)
        return y

    with TempOpAttr("nn.conv2d", "FTVMAlterOpLayout", alter_conv2d):
        a = before_nhwc()
        a = run_opt_pass(a, transform.AlterOpLayout())
        b = run_opt_pass(expected_nhwc(), transform.InferType())

    assert tvm.ir.structural_equal(a, b), "Actual = \n" + str(a)
예제 #16
0
def test_tflite_anistropic_strides():
    with TempOpAttr("qnn.conv2d", "FTVMQnnLegalize", legalize_qnn_conv2d):

        # uint8 input
        data_shape = (1, 1, 3, 6)
        data_dtype = "uint8"
        kernel_shape = (1, 1, 2, 2)
        kernel_dtype = "uint8"
        ref_func, qnn_func = get_funcs(
            data_shape=data_shape,
            data_dtype=data_dtype,
            kernel_shape=kernel_shape,
            kernel_dtype=kernel_dtype,
            input_zero_point=127,
            kernel_zero_point=127,
            input_scale=1.0,
            kernel_scale=1.0,
            kernel_size=(2, 2),
            padding=(0, 0),
            strides=(1, 3),
            dilation=(1, 1),
            data_layout="NCHW",
            kernel_layout="OIHW",
            out_dtype="int32",
        )
        golden_data = np.array(
            (
                133,
                131,
                129,
                125,
                123,
                121,
                135,
                133,
                131,
                123,
                121,
                119,
                137,
                135,
                133,
                121,
                119,
                117,
            )
        ).reshape(data_shape)
        golden_data = golden_data.astype("uint8")
        golden_weight = np.array((129, 131, 133, 135)).reshape(kernel_shape)
        golden_weight = golden_weight.astype("uint8")

        with tvm.transform.PassContext(opt_level=2):
            params = {"kernel": golden_weight}
            graph, lib, params = relay.build(qnn_func, "llvm", params=params)
            mod = graph_runtime.create(graph, lib, ctx=tvm.cpu(0))
            mod.set_input("data", golden_data)
            mod.set_input(**params)
            mod.run()
            qnn_output = mod.get_output(0).asnumpy()
        golden_output = np.array((124, -92, 164, -132)).reshape(1, 1, 2, 2)
        np.testing.assert_equal(qnn_output, golden_output)
예제 #17
0
def test_padding():
    with TempOpAttr("qnn.conv2d", "FTVMQnnLegalize", legalize_qnn_conv2d):

        # uint8 input
        data_shape = (1, 4, 2, 2)
        data_dtype = "uint8"
        kernel_shape = (3, 4, 2, 2)
        kernel_dtype = "uint8"
        ref_func, qnn_func = get_funcs(
            data_shape=data_shape,
            data_dtype=data_dtype,
            kernel_shape=kernel_shape,
            kernel_dtype=kernel_dtype,
            input_zero_point=8,
            kernel_zero_point=5,
            input_scale=1.0,
            kernel_scale=1.0,
            kernel_size=(2, 2),
            padding=(1, 1),
            strides=(1, 1),
            dilation=(1, 1),
            data_layout="NCHW",
            kernel_layout="OIHW",
            out_dtype="int32",
        )
        verify(ref_func, qnn_func, data_shape, data_dtype, kernel_shape, kernel_dtype)

        # Try different layout
        data_shape = (2, 2, 4, 4)  # NHWC
        data_dtype = "uint8"
        kernel_shape = (2, 2, 4, 3)  # HWIO
        kernel_dtype = "uint8"
        ref_func, qnn_func = get_funcs(
            data_shape=data_shape,
            data_dtype=data_dtype,
            kernel_shape=kernel_shape,
            kernel_dtype=kernel_dtype,
            input_zero_point=8,
            kernel_zero_point=3,
            input_scale=1.0,
            kernel_scale=1.0,
            kernel_size=(2, 2),
            padding=(1, 1),
            strides=(1, 1),
            dilation=(1, 1),
            data_layout="NHWC",
            kernel_layout="HWIO",
            out_dtype="int32",
        )
        verify(ref_func, qnn_func, data_shape, data_dtype, kernel_shape, kernel_dtype)

        # Try asymmetric padding
        data_shape = (2, 2, 4, 4)  # NHWC
        data_dtype = "uint8"
        kernel_shape = (2, 2, 4, 3)  # HWIO
        kernel_dtype = "uint8"
        ref_func, qnn_func = get_funcs(
            data_shape=data_shape,
            data_dtype=data_dtype,
            kernel_shape=kernel_shape,
            kernel_dtype=kernel_dtype,
            input_zero_point=8,
            kernel_zero_point=3,
            input_scale=1.0,
            kernel_scale=1.0,
            kernel_size=(2, 2),
            padding=(1, 1, 2, 2),
            strides=(1, 1),
            dilation=(1, 1),
            data_layout="NHWC",
            kernel_layout="HWIO",
            out_dtype="int32",
        )
        verify(ref_func, qnn_func, data_shape, data_dtype, kernel_shape, kernel_dtype)
예제 #18
0
def test_alter_op_layout_batch_matmul():
    if not get_global_func("tvm.contrib.mlas.batch_sgemm", allow_missing=True):
        print("skip because mlas is not enabled...")
        return
    target = "llvm -libs=mlas"
    m, k, n = 32, 48, 64
    B_const = np.random.uniform(size=[1, n, k]).astype("float32")

    def pack_before():
        A = relay.var("A", shape=(1, m, k), dtype="float32")
        B = relay.const(B_const, "float32")
        C = relay.nn.batch_matmul(A, B)
        f = relay.Function(relay.analysis.free_vars(C), C)
        return f

    def pack_expected():
        A = relay.var("A", shape=(1, m, k), dtype="float32")
        B = relay.const(B_const, "float32")
        B_packed = relay.op.mlas_packb(B, k, n)
        C = relay.op.mlas_matmul(A, B_packed, True, k, n)
        f = relay.Function(relay.analysis.free_vars(C), C)
        return f

    with tvm.target.Target(target):
        with TempOpAttr(
            "nn.batch_matmul", "FTVMAlterOpLayout", relay.op._mlas._alter_batch_matmul_layout
        ):
            a = pack_before()
            a = _run_opt_pass(a, relay.transform.AlterOpLayout())
            b = _run_opt_pass(pack_expected(), relay.transform.InferType())
            assert tvm.ir.structural_equal(a, b)

    def nopack_before():
        A = relay.var("A", shape=(1, m, k), dtype="float32")
        B = relay.var("B", shape=(1, n, k), dtype="float32")
        C = relay.nn.batch_matmul(A, B)
        f = relay.Function(relay.analysis.free_vars(C), C)
        return f

    def nopack_expected():
        A = relay.var("A", shape=(1, m, k), dtype="float32")
        B = relay.var("B", shape=(1, n, k), dtype="float32")
        C = relay.op.mlas_matmul(A, B, False)
        f = relay.Function(relay.analysis.free_vars(C), C)
        return f

    with tvm.target.Target(target):
        with TempOpAttr(
            "nn.batch_matmul", "FTVMAlterOpLayout", relay.op._mlas._alter_batch_matmul_layout
        ):
            a = nopack_before()
            a = _run_opt_pass(a, relay.transform.AlterOpLayout())
            b = _run_opt_pass(nopack_expected(), relay.transform.InferType())
            assert tvm.ir.structural_equal(a, b)

    def dynamic_expected():
        A = relay.var("A", shape=(1, relay.Any(), k), dtype="float32")
        B = relay.var("B", shape=(1, n, k), dtype="float32")
        C = relay.nn.batch_matmul(A, B)
        f = relay.Function(relay.analysis.free_vars(C), C)
        return f

    with tvm.target.Target(target):
        with TempOpAttr(
            "nn.batch_matmul", "FTVMAlterOpLayout", relay.op._mlas._alter_batch_matmul_layout
        ):
            a = dynamic_expected()
            a = _run_opt_pass(a, relay.transform.AlterOpLayout())
            b = _run_opt_pass(dynamic_expected(), relay.transform.InferType())
            assert tvm.ir.structural_equal(a, b)
    def _test_legalize_conv2d(data_shape,
                              kernel_shape,
                              pad_shape,
                              dtype,
                              do_pad=True):
        out_channel = kernel_shape[3]
        out_shape = list(data_shape)
        out_shape[3] = out_channel
        db, di, do = pad_shape

        def before():
            x = relay.var("x", shape=data_shape, dtype=dtype)
            weight = relay.var("weight", shape=kernel_shape, dtype=dtype)
            y = relay.nn.conv2d(
                x,
                weight,
                channels=out_channel,
                kernel_size=(3, 3),
                padding=(1, 1),
                data_layout="NHWC",
                kernel_layout="HWIO",
            )
            y = relay.Function([x, weight], y)
            return y

        def legalize_conv2d(attrs, inputs, types):
            with tvm.target.Target("cuda"):
                return topi.nn.conv2d_legalize(attrs, inputs, types)

        def expected():
            if not do_pad:
                return before()
            x = relay.var("x", shape=data_shape, dtype=dtype)
            if db or di:
                x_pad = relay.nn.pad(x,
                                     pad_width=((0, db), (0, 0), (0, 0), (0,
                                                                          di)))
            else:
                x_pad = x
            weight = relay.var("weight", shape=(kernel_shape), dtype=dtype)
            if di or do:
                weight_pad = relay.nn.pad(weight,
                                          pad_width=((0, 0), (0, 0), (0, di),
                                                     (0, do)))
            else:
                weight_pad = weight
            y_pad = relay.nn.conv2d(
                x_pad,
                weight=weight_pad,
                channels=out_channel + do,
                kernel_size=(3, 3),
                padding=(1, 1),
                data_layout="NHWC",
                kernel_layout="HWIO",
            )
            if db or do:
                y = relay.strided_slice(y_pad,
                                        begin=[0, 0, 0, 0],
                                        end=out_shape)
            else:
                y = y_pad
            y = relay.Function([x, weight], y)
            return y

        with TempOpAttr("nn.conv2d", "FTVMLegalize", legalize_conv2d):
            a = before()
            a = run_opt_pass(a, transform.Legalize())
            b = run_opt_pass(expected(), transform.InferType())
        assert tvm.ir.structural_equal(
            a, b), "Actual = \n" + str(a) + "Expected = \n" + str(b)
예제 #20
0
파일: test_dnnl.py 프로젝트: wenxcs/tvm
def partition_for_dnnl(mod, params=None, alter_layout=True):
    """Partition the graph greedily offloading supported operators to DNNL.

    Parameters
    ----------
    mod : Module
        The module to run passes on.
    params : Optional[Dict[str, NDArray]]
        Constant input parameters.
    Returns
    -------
    mod : Module
        Annotated and partitioned module.
    """
    if params:
        mod["main"] = bind_params_by_name(mod["main"], params)

    with TempOpAttr("nn.conv2d", "FTVMLegalize", dnnl.legalize_group_conv):
        with TempOpAttr("nn.conv2d_transpose", "FTVMLegalize",
                        dnnl.legalize_group_conv):
            seq = tvm.transform.Sequential([
                transform.CanonicalizeOps(),
                transform.InferType(),
                transform.SimplifyInference(),
                transform.FoldConstant(),
                transform.FoldScaleAxis(),
                # fold consecutive add ops to simplify pattern `conv2d-bias_add-bn-relu`
                transform.SimplifyExpr(),
                transform.FoldConstant(),
                # alter group conv /conv_transpose layout to `GOIHW` / `GIOHW`
                transform.Legalize(),
                transform.FoldConstant(),
            ])
            with tvm.transform.PassContext(opt_level=3):
                mod = seq(mod)
    if alter_layout:
        with TempOpAttr("nn.conv1d", "FTVMAlterOpLayout", dnnl.alter_conv):
            with TempOpAttr("nn.conv2d", "FTVMAlterOpLayout", dnnl.alter_conv):
                with TempOpAttr("nn.conv3d", "FTVMAlterOpLayout",
                                dnnl.alter_conv):
                    with TempOpAttr("nn.conv2d_transpose", "FTVMAlterOpLayout",
                                    dnnl.alter_conv_transpose):
                        with TempOpAttr("nn.conv3d_transpose",
                                        "FTVMAlterOpLayout",
                                        dnnl.alter_conv_transpose):
                            alter_layout_seq = tvm.transform.Sequential([
                                transform.AlterOpLayout(),
                                transform.FoldConstant(),
                            ])
                            with tvm.transform.PassContext(opt_level=3):
                                mod = alter_layout_seq(mod)

    byoc_seq = tvm.transform.Sequential([
        transform.MergeComposite(dnnl.pattern_table()),
        transform.AnnotateTarget("dnnl"),
        transform.MergeCompilerRegions(),
        transform.PartitionGraph(),
    ])
    with tvm.transform.PassContext(opt_level=3):
        mod = byoc_seq(mod)
    return mod
예제 #21
0
def test_alter_layout_nhwc_int8_aarch64():
    """ Check that AlterOplayout does not alter NHWC data layout. """
    from tvm import autotvm
    expected_workload_shape = (20, 42, 4, 16)

    # We use Int8Fallback  to disable the fallback flag
    # and to test the new workload produced during the pass
    class Int8Fallback(autotvm.FallbackContext):
        def _query_inside(self, target, workload):
            key = (target, workload)
            if key in self.memory:
                return self.memory[key]
            cfg = autotvm.task.space.FallbackConfigEntity()
            cfg.is_fallback = False
            cfg.cost = 0
            self.memory[key] = cfg
            return cfg

        def update(self, target, workload, cfg):
            key = (str(target), workload)
            assert workload[2][1] == expected_workload_shape
            assert workload[
                0] == "conv2d_NHWC_quantized_without_transform.arm_cpu"
            self.memory[key] = cfg

    def alter_conv2d(attrs, inputs, tinfos, out_type):
        import topi
        with tvm.target.create(
                "llvm -device=arm_cpu -mtriple=aarch64-linux-gnu"):
            with Int8Fallback():
                tmp = topi.nn.conv2d_alter_layout(attrs, inputs, tinfos,
                                                  out_type)
                return tmp

    # Check NHWC conversion.
    def before_nhwc_int8():
        x = relay.var("x", shape=(1, 56, 56, 73), dtype='int8')
        weight = relay.var('weight1', shape=(3, 3, 73, 79), dtype='int8')
        y = relay.nn.conv2d(x,
                            weight,
                            channels=79,
                            kernel_size=(3, 3),
                            data_layout='NHWC',
                            kernel_layout='HWIO',
                            out_dtype='int32')
        y = relay.Function(analysis.free_vars(y), y)
        return y

    def expected_nhwc_int8():
        x = relay.var("x", shape=(1, 56, 56, 73), dtype='int8')
        weight = relay.var('weight1', shape=(3, 3, 73, 79), dtype='int8')
        tile_rows = 4
        tile_cols = 16
        weight_transformed = relay.nn.contrib_conv2d_gemm_weight_transform(
            weight, tile_rows, tile_cols)
        y = relay.nn.contrib_conv2d_gemm_without_weight_transform(
            x,
            weight_transformed,
            channels=79,
            kernel_size=(3, 3),
            data_layout='NHWC',
            kernel_layout='HWIO',
            out_dtype='int32')
        y = relay.Function(analysis.free_vars(y), y)
        return y

    with TempOpAttr("nn.conv2d", "FTVMAlterOpLayout", alter_conv2d):
        a = before_nhwc_int8()
        a = run_opt_pass(a, transform.AlterOpLayout())
        b = run_opt_pass(expected_nhwc_int8(), transform.InferType())

    assert tvm.ir.structural_equal(a, b), "Actual = \n" + str(a)
def test_qnn_batch_matmul_with_requantized_output():
    with TempOpAttr("qnn.dense", "FTVMQnnLegalize", legalize_qnn_batch_matmul):

        int8_requantized_output_params = make_int_configuration(
            requantize_output=True)
        qnn_batch_matmul_driver(int8_requantized_output_params)
예제 #23
0
def test_alter_layout_pad():
    """ Check NCHW, NHWC and corner case for pad layout conversion"""
    def alter_conv2d(attrs, inputs, tinfos, out_type):
        data, weight = inputs
        new_attrs = dict(attrs)
        new_attrs['data_layout'] = 'NCHW16c'
        return relay.nn.conv2d(data, weight, **new_attrs)

    # Check NCHW conversion.
    def before_nchw():
        x = relay.var("x", shape=(1, 64, 56, 56))
        weight1 = relay.var('weight1')
        y = relay.nn.conv2d(x,
                            weight1,
                            channels=32,
                            kernel_size=(3, 3),
                            padding=(1, 1))
        ret = relay.nn.pad(y, pad_width=((0, 0), (0, 0), (1, 1), (1, 1)))
        y = relay.Function(analysis.free_vars(ret), ret)
        return y

    def expected_nchw():
        x = relay.var("x", shape=(1, 64, 56, 56))
        weight1 = relay.var('weight1')
        y = relay.layout_transform(x, "NCHW", "NCHW16c")
        y = relay.nn.conv2d(y,
                            weight1,
                            channels=32,
                            kernel_size=(3, 3),
                            padding=(1, 1),
                            data_layout="NCHW16c")
        ret = relay.nn.pad(y,
                           pad_width=((0, 0), (0, 0), (1, 1), (1, 1), (0, 0)))
        ret = relay.layout_transform(ret, "NCHW16c", "NCHW")
        y = relay.Function(analysis.free_vars(ret), ret)
        return y

    with TempOpAttr("nn.conv2d", "FTVMAlterOpLayout", alter_conv2d):
        a = before_nchw()
        a = run_opt_pass(a, transform.AlterOpLayout())
        b = run_opt_pass(expected_nchw(), transform.InferType())

    assert tvm.ir.structural_equal(a, b), "Actual = \n" + str(a)

    # Check NHWC conversion.
    def before_nhwc():
        x = relay.var("x", shape=(1, 56, 56, 64))
        weight1 = relay.var('weight1')
        y = relay.nn.conv2d(x,
                            weight1,
                            channels=32,
                            kernel_size=(3, 3),
                            padding=(1, 1),
                            data_layout='NHWC')
        ret = relay.nn.pad(y, pad_width=((0, 0), (1, 1), (1, 1), (0, 0)))
        y = relay.Function(analysis.free_vars(ret), ret)
        return y

    def expected_nhwc():
        x = relay.var("x", shape=(1, 56, 56, 64))
        weight1 = relay.var('weight1')
        y = relay.layout_transform(x, "NHWC", "NCHW16c")
        y = relay.nn.conv2d(y,
                            weight1,
                            channels=32,
                            kernel_size=(3, 3),
                            padding=(1, 1),
                            data_layout="NCHW16c")
        ret = relay.nn.pad(y,
                           pad_width=((0, 0), (0, 0), (1, 1), (1, 1), (0, 0)))
        ret = relay.layout_transform(ret, "NCHW16c", "NHWC")
        y = relay.Function(analysis.free_vars(ret), ret)
        return y

    with TempOpAttr("nn.conv2d", "FTVMAlterOpLayout", alter_conv2d):
        a = before_nhwc()
        a = run_opt_pass(a, transform.AlterOpLayout())
        b = run_opt_pass(expected_nhwc(), transform.InferType())

    assert tvm.ir.structural_equal(a, b), "Actual = \n" + str(a)

    # Check that conversion does not happen when padding along split axis.
    def before():
        x = relay.var("x", shape=(1, 64, 56, 56))
        weight1 = relay.var('weight1')
        y = relay.nn.conv2d(x,
                            weight1,
                            channels=32,
                            kernel_size=(3, 3),
                            padding=(1, 1))
        ret = relay.nn.pad(y, pad_width=((0, 0), (1, 1), (1, 1), (1, 1)))
        y = relay.Function(analysis.free_vars(ret), ret)
        return y

    def expected():
        x = relay.var("x", shape=(1, 64, 56, 56))
        weight1 = relay.var('weight1')
        y = relay.layout_transform(x, "NCHW", "NCHW16c")
        y = relay.nn.conv2d(y,
                            weight1,
                            channels=32,
                            kernel_size=(3, 3),
                            padding=(1, 1),
                            data_layout="NCHW16c")
        ret = relay.layout_transform(y, "NCHW16c", "NCHW")
        ret = relay.nn.pad(ret, pad_width=((0, 0), (1, 1), (1, 1), (1, 1)))
        y = relay.Function(analysis.free_vars(ret), ret)
        return y

    with TempOpAttr("nn.conv2d", "FTVMAlterOpLayout", alter_conv2d):
        a = before()
        a = run_opt_pass(a, transform.AlterOpLayout())
        b = run_opt_pass(expected(), transform.InferType())

    assert tvm.ir.structural_equal(a, b), "Actual = \n" + str(a)
예제 #24
0
def test_qnn_dense_without_bias():
    with TempOpAttr("qnn.dense", "FTVMQnnLegalize", legalize_qnn_dense):

        int32_output_without_bias_params = \
            make_int_configuration(use_bias=False)
        qnn_dense_driver(int32_output_without_bias_params)
예제 #25
0
def test_alter_layout_broadcast_scalar_op():
    """Test alternating the layout of a conv2d.
    The layout of broadcast operators and the weight should be changed accordingly.
    """
    def before():
        x = relay.var("x", shape=(1, 500, 500, 64))
        kernel = relay.var('kernel', shape=(3, 3, 64, 64), dtype='float32')
        bias = relay.var("bias", shape=(64, ))
        multiplier1 = relay.var('multiplier1', shape=(1, ), dtype='float32')
        multiplier2 = relay.var('multiplier2', shape=(1, 1), dtype='float32')

        y = relay.nn.conv2d(x,
                            kernel,
                            data_layout='NHWC',
                            kernel_layout="HWIO",
                            kernel_size=(3, 3))
        y = relay.add(bias, y)
        y = relay.nn.relu(y)

        y = relay.multiply(multiplier1, y)
        y = relay.multiply(y, multiplier2)
        y = relay.Function(analysis.free_vars(y), y)
        return y

    def alter_conv2d(attrs, inputs, tinfos, out_type):
        data, weight = inputs
        new_attrs = dict(attrs)
        new_attrs['data_layout'] = 'NCHW16c'
        return relay.nn.conv2d(data, weight, **new_attrs)

    def expected():
        x = relay.var("x", shape=(1, 500, 500, 64))
        kernel = relay.var('kernel', shape=(3, 3, 64, 64), dtype='float32')
        bias = relay.var("bias", shape=(64, ))
        multiplier1 = relay.var('multiplier1', shape=(1, ), dtype='float32')
        multiplier2 = relay.var('multiplier2', shape=(1, 1), dtype='float32')

        b = relay.expand_dims(bias, axis=0, num_newaxis=3)
        b = relay.layout_transform(b, "NHWC", "NCHW16c")

        y = relay.layout_transform(x, "NHWC", "NCHW16c")
        y = relay.nn.conv2d(y,
                            kernel,
                            data_layout='NCHW16c',
                            kernel_layout="HWIO",
                            kernel_size=(3, 3))

        y = relay.add(b, y)
        y = relay.nn.relu(y)

        y = relay.multiply(multiplier1, y)
        y = relay.multiply(y, multiplier2)
        y = relay.layout_transform(y, "NCHW16c", "NHWC")
        y = relay.Function(analysis.free_vars(y), y)
        return y

    with TempOpAttr("nn.conv2d", "FTVMAlterOpLayout", alter_conv2d):
        a = before()
        a = run_opt_pass(
            a, [transform.CanonicalizeOps(),
                transform.AlterOpLayout()])
        b = run_opt_pass(expected(), transform.InferType())

    assert analysis.alpha_equal(a, b), "Actual = \n" + str(a)
예제 #26
0
def test_qnn_dense_with_requantized_output():
    with TempOpAttr("qnn.dense", "FTVMQnnLegalize", legalize_qnn_dense):

        int8_requantized_output_with_bias_params = \
            make_int_configuration(use_bias=True, requantize_output=True)
        qnn_dense_driver(int8_requantized_output_with_bias_params)
예제 #27
0
def test_alter_layout_concatenate():
    """ NCHW, NHWC and corner case concatenate layout transform."""
    def alter_conv2d(attrs, inputs, tinfos, out_type):
        data, weight = inputs
        new_attrs = dict(attrs)
        new_attrs['data_layout'] = 'NCHW16c'
        return relay.nn.conv2d(data, weight, **new_attrs)

    # NCHW layout transformation.
    def before_nchw():
        x = relay.var("x", shape=(1, 64, 56, 56))
        weight1 = relay.var('weight1')
        weight2 = relay.var('weight2')
        y = relay.nn.conv2d(x,
                            weight1,
                            channels=32,
                            kernel_size=(3, 3),
                            padding=(1, 1))
        y1 = relay.nn.conv2d(y,
                             weight2,
                             channels=32,
                             kernel_size=(3, 3),
                             padding=(1, 1))
        ret = relay.concatenate([y, y1], axis=1)
        y = relay.Function(analysis.free_vars(ret), ret)
        return y

    def expected_nchw():
        x = relay.var("x", shape=(1, 64, 56, 56))
        weight1 = relay.var('weight1')
        weight2 = relay.var('weight2')
        y = relay.layout_transform(x, "NCHW", "NCHW16c")
        y = relay.nn.conv2d(y,
                            weight1,
                            channels=32,
                            kernel_size=(3, 3),
                            padding=(1, 1),
                            data_layout="NCHW16c")
        y1 = relay.nn.conv2d(y,
                             weight2,
                             channels=32,
                             kernel_size=(3, 3),
                             padding=(1, 1),
                             data_layout='NCHW16c')
        ret = relay.concatenate([y, y1], axis=1)
        ret = relay.layout_transform(ret, "NCHW16c", "NCHW")
        y = relay.Function(analysis.free_vars(ret), ret)
        return y

    with TempOpAttr("nn.conv2d", "FTVMAlterOpLayout", alter_conv2d):
        a = before_nchw()
        a = run_opt_pass(a, transform.AlterOpLayout())
        b = run_opt_pass(expected_nchw(), transform.InferType())

    assert analysis.alpha_equal(a, b), "Actual = \n" + str(a)

    # NHWC layout transformation.
    def before_nhwc():
        x = relay.var("x", shape=(1, 56, 56, 64))
        weight1 = relay.var('weight1')
        weight2 = relay.var('weight2')
        y = relay.nn.conv2d(x,
                            weight1,
                            channels=32,
                            kernel_size=(3, 3),
                            padding=(1, 1),
                            data_layout='NHWC')
        y1 = relay.nn.conv2d(y,
                             weight2,
                             channels=32,
                             kernel_size=(3, 3),
                             padding=(1, 1),
                             data_layout='NHWC')
        ret = relay.concatenate([y, y1], axis=3)
        y = relay.Function(analysis.free_vars(ret), ret)
        return y

    def expected_nhwc():
        x = relay.var("x", shape=(1, 56, 56, 64))
        weight1 = relay.var('weight1')
        weight2 = relay.var('weight2')
        y = relay.layout_transform(x, "NHWC", "NCHW16c")
        y = relay.nn.conv2d(y,
                            weight1,
                            channels=32,
                            kernel_size=(3, 3),
                            padding=(1, 1),
                            data_layout="NCHW16c")
        y1 = relay.nn.conv2d(y,
                             weight2,
                             channels=32,
                             kernel_size=(3, 3),
                             padding=(1, 1),
                             data_layout='NCHW16c')
        ret = relay.concatenate([y, y1], axis=1)
        ret = relay.layout_transform(ret, "NCHW16c", "NHWC")
        y = relay.Function(analysis.free_vars(ret), ret)
        return y

    with TempOpAttr("nn.conv2d", "FTVMAlterOpLayout", alter_conv2d):
        a = before_nhwc()
        a = run_opt_pass(a, transform.AlterOpLayout())
        b = run_opt_pass(expected_nhwc(), transform.InferType())

    assert analysis.alpha_equal(a, b), "Actual = \n" + str(a)
예제 #28
0
def test_per_channel_weight_scale():
    with TempOpAttr("qnn.dense", "FTVMQnnLegalize", legalize_qnn_dense):
        config = make_int_configuration(use_bias=True,
                                        requantize_output=True,
                                        per_channel=True)
        qnn_dense_driver(config)
예제 #29
0
def test_alter_layout():
    """Test alternating the layout of a conv2d.
    The layout of broadcast operators and the weight should be changed accordingly.
    """
    def before():
        x = relay.var("x", shape=(1, 64, 56, 56))
        bias = relay.var("bias")
        weight = relay.var("weight")
        y = relay.nn.conv2d(x,
                            weight,
                            channels=64,
                            kernel_size=(3, 3),
                            padding=(1, 1))
        y = relay.nn.bias_add(y, bias)
        # a useless tuple, which will be eliminated
        y = relay.Tuple([y])[0]
        y = relay.nn.relu(y)
        y = relay.nn.max_pool2d(y, pool_size=(2, 2))
        y = relay.cast(y, 'int32')
        y = relay.nn.batch_flatten(y)
        y = relay.Function(analysis.free_vars(y), y)
        return y

    def alter_conv2d(attrs, inputs, tinfos, out_type):
        data, weight = inputs
        new_attrs = dict(attrs)
        new_attrs['data_layout'] = 'NCHW16c'
        new_attrs['kernel_layout'] = 'OIHW16i'
        return relay.nn.conv2d(data, weight, **new_attrs)

    def expected():
        x = relay.var("x", shape=(1, 64, 56, 56))
        bias = relay.var("bias", shape=(64, ))
        weight = relay.var("weight", shape=(64, 64, 3, 3))

        y = relay.layout_transform(x, "NCHW", "NCHW16c")
        w = relay.layout_transform(weight, "OIHW", "OIHW16i")
        y = relay.nn.conv2d(y,
                            w,
                            channels=64,
                            kernel_size=(3, 3),
                            padding=(1, 1),
                            kernel_layout="OIHW16i",
                            data_layout="NCHW16c")
        b = relay.expand_dims(bias, axis=1, num_newaxis=2)
        b = relay.expand_dims(b, axis=0, num_newaxis=1)
        b = relay.layout_transform(b, "NCHW", "NCHW16c")
        y = relay.add(y, b)

        y = relay.nn.relu(y)
        y = relay.nn.max_pool2d(y, pool_size=(2, 2), layout="NCHW16c")
        y = relay.cast(y, 'int32')
        y = relay.layout_transform(y, "NCHW16c", "NCHW")
        y = relay.nn.batch_flatten(y)
        y = relay.Function(analysis.free_vars(y), y)
        return y

    with TempOpAttr("nn.conv2d", "FTVMAlterOpLayout", alter_conv2d):
        a = before()
        a = run_opt_pass(
            a, [transform.CanonicalizeOps(),
                transform.AlterOpLayout()])
        b = run_opt_pass(expected(), transform.InferType())

    assert analysis.alpha_equal(a, b), "Actual = \n" + str(a)
예제 #30
0
def test_alter_layout_scalar_regression():
    """regression test where scalar fails"""

    def before():
        x = relay.var("x", shape=(1, 56, 56, 64))
        weight = relay.var("weight", shape=(3, 3, 64, 16))
        bias = relay.var("bias", shape=(1, 1, 1, 16))
        y = relay.nn.conv2d(
            x,
            weight,
            channels=16,
            kernel_size=(3, 3),
            padding=(1, 1),
            data_layout="NHWC",
            kernel_layout="HWIO",
        )
        y = relay.add(y, bias)
        mean = relay.mean(y, axis=3, exclude=True)
        var = relay.variance(y, axis=3, exclude=True)
        gamma = relay.var("gamma")
        beta = relay.var("beta")
        y = relay.nn.batch_norm(y, gamma, beta, mean, var, axis=3)
        y = y[0]
        y = relay.Function(analysis.free_vars(y), y)
        return y

    def alter_conv2d(attrs, inputs, tinfos, out_type):
        data, weight = inputs
        new_attrs = dict(attrs)
        new_attrs["data_layout"] = "NCHW16c"
        return relay.nn.conv2d(data, weight, **new_attrs)

    def expected():
        x = relay.var("x", shape=(1, 56, 56, 64))
        weight = relay.var("weight", shape=(3, 3, 64, 16))
        bias = relay.var("bias", shape=(1, 1, 1, 16))
        x = relay.layout_transform(x, src_layout="NHWC", dst_layout="NCHW")
        x = relay.layout_transform(x, src_layout="NCHW", dst_layout="NCHW16c")
        weight = relay.layout_transform(weight, src_layout="HWIO", dst_layout="OIHW")
        y = relay.nn.conv2d(
            x, weight, channels=16, kernel_size=(3, 3), padding=(1, 1), data_layout="NCHW16c"
        )
        bias = relay.layout_transform(bias, src_layout="NHWC", dst_layout="NCHW")
        bias = relay.layout_transform(bias, src_layout="NCHW", dst_layout="NCHW16c")
        add = relay.add(y, bias)
        y = relay.layout_transform(add, src_layout="NCHW16c", dst_layout="NCHW")
        y = relay.layout_transform(y, src_layout="NCHW", dst_layout="NHWC")
        mean = relay.mean(y, axis=3, exclude=True)
        var = relay.variance(y, axis=3, exclude=True)
        denom = relay.const(1.0) / relay.sqrt(var + relay.const(1e-05))
        gamma = relay.var("gamma", shape=(16,))
        denom = denom * gamma
        denom_expand1 = relay.expand_dims(denom, axis=1, num_newaxis=2)
        denom_expand2 = relay.expand_dims(denom_expand1, axis=0)
        denom_nchwc16 = relay.layout_transform(
            denom_expand2, src_layout="NCHW", dst_layout="NCHW16c"
        )
        out = add * denom_nchwc16
        beta = relay.var("beta", shape=(16,))
        numerator = (-mean) * denom + beta
        numerator_expand1 = relay.expand_dims(numerator, axis=1, num_newaxis=2)
        numerator_expand2 = relay.expand_dims(numerator_expand1, axis=0)
        numerator_nchwc16 = relay.layout_transform(
            numerator_expand2, src_layout="NCHW", dst_layout="NCHW16c"
        )
        out = out + numerator_nchwc16
        out = relay.layout_transform(out, src_layout="NCHW16c", dst_layout="NCHW")
        y = relay.layout_transform(out, src_layout="NCHW", dst_layout="NHWC")
        y = relay.Function(analysis.free_vars(y), y)
        return y

    with TempOpAttr("nn.conv2d", "FTVMAlterOpLayout", alter_conv2d):
        a = before()
        desired_layouts = {"nn.conv2d": ["NCHW", "default"], "nn.batch_norm": ["NHWC", "default"]}
        a = run_opt_pass(
            a,
            [
                transform.InferType(),
                relay.transform.ConvertLayout(desired_layouts),
                transform.SimplifyInference(),
                transform.CanonicalizeOps(),
                transform.AlterOpLayout(),
            ],
        )
        b = run_opt_pass(expected(), transform.InferType())

    assert tvm.ir.structural_equal(a, b), "Actual = \n" + str(a)