예제 #1
0
def test_deformable_conv_bias_pool_convert_layout():
    def before(N, CI, H, W, CO, KH, KW, layout):
        if layout == "NCHW":
            data_shape = (N, CI, H, W)
            weight_shape = (CO, CI, KH, KW)
            kernel_layout = "OIHW"
        else:
            data_shape = (N, H, W, CI)
            weight_shape = (KH, KW, CI, CO)
            kernel_layout = "HWIO"
        bias_shape = (CO,)

        data = relay.var("data", shape=data_shape, dtype="float32")
        offset = relay.var("offset")
        weight = relay.var("weight", shape=weight_shape, dtype="float32")
        bias = relay.var("bias", shape=bias_shape, dtype="float32")

        y = relay.nn.deformable_conv2d(
            data,
            offset,
            weight,
            kernel_size=(KH, KW),
            channels=CO,
            data_layout=layout,
            kernel_layout=kernel_layout,
        )
        y = relay.nn.bias_add(y, bias, axis=-1 if layout == "NHWC" else 1)
        y = relay.nn.relu(y)
        y = relay.nn.max_pool2d(y, pool_size=(2, 2), layout=layout)
        y = relay.cast(y, "int32")
        y = relay.nn.batch_flatten(y)
        y = relay.Function(analysis.free_vars(y), y)
        return y

    def expected(N, CI, H, W, CO, KH, KW, OH, OW, src_layout, dst_layout):
        layout_map = {"src": {}, "dst": {}}
        if src_layout == "NCHW":
            nchw = layout_map["src"]
            nhwc = layout_map["dst"]
        else:
            nchw = layout_map["dst"]
            nhwc = layout_map["src"]

        nchw["data_layout"] = "NCHW"
        nchw["data_shape"] = (N, CI, H, W)
        nchw["offset_shape"] = (N, KH * KW * 2, OH, OW)
        nchw["weight_shape"] = (CO, CI, KH, KW)
        nchw["kernel_layout"] = "OIHW"

        nhwc["data_layout"] = "NHWC"
        nhwc["data_shape"] = (N, H, W, CI)
        nhwc["offset_shape"] = (N, OH, OW, KH * KW * 2)
        nhwc["weight_shape"] = (KH, KW, CI, CO)
        nhwc["kernel_layout"] = "HWIO"

        bias_shape = (CO,)

        data = relay.var("data", shape=layout_map["src"]["data_shape"], dtype="float32")
        offset = relay.var("offset", shape=layout_map["src"]["offset_shape"], dtype="float32")
        weight = relay.var("weight", shape=layout_map["src"]["weight_shape"], dtype="float32")
        bias = relay.var("bias", shape=bias_shape, dtype="float32")

        data = relay.layout_transform(
            data, layout_map["src"]["data_layout"], layout_map["dst"]["data_layout"]
        )
        offset = relay.layout_transform(
            offset, layout_map["src"]["data_layout"], layout_map["dst"]["data_layout"]
        )
        weight = relay.layout_transform(
            weight, layout_map["src"]["kernel_layout"], layout_map["dst"]["kernel_layout"]
        )
        y = relay.nn.deformable_conv2d(
            data,
            offset,
            weight,
            kernel_size=(KH, KW),
            channels=CO,
            data_layout=layout_map["dst"]["data_layout"],
            kernel_layout=layout_map["dst"]["kernel_layout"],
        )
        if layout_map["src"]["data_layout"] == "NHWC":
            bias = relay.expand_dims(bias, axis=0, num_newaxis=3)
        else:
            bias = relay.expand_dims(bias, axis=1, num_newaxis=2)
            bias = relay.expand_dims(bias, axis=0)
        bias = relay.layout_transform(
            bias, layout_map["src"]["data_layout"], layout_map["dst"]["data_layout"]
        )
        y = relay.add(y, bias)
        y = relay.nn.relu(y)
        y = relay.nn.max_pool2d(y, pool_size=(2, 2), layout=layout_map["dst"]["data_layout"])
        y = relay.cast(y, "int32")
        y = relay.layout_transform(
            y, layout_map["dst"]["data_layout"], layout_map["src"]["data_layout"]
        )
        y = relay.nn.batch_flatten(y)
        y = relay.Function(analysis.free_vars(y), y)
        return y

    # NHWC -> NCHW
    a = before(1, 3, 224, 224, 32, 3, 3, "NHWC")
    a = run_opt_pass(a, transform.ConvertLayout({"nn.deformable_conv2d": ["NCHW", "default"]}))
    b = run_opt_pass(
        expected(1, 3, 224, 224, 32, 3, 3, 222, 222, "NHWC", "NCHW"), transform.InferType()
    )
    assert tvm.ir.structural_equal(a, b), "Actual = \n" + str(a)

    # NCHW -> NHWC
    a = before(1, 3, 224, 224, 32, 3, 3, "NCHW")
    a = run_opt_pass(a, transform.ConvertLayout({"nn.deformable_conv2d": ["NHWC", "default"]}))
    b = run_opt_pass(
        expected(1, 3, 224, 224, 32, 3, 3, 222, 222, "NCHW", "NHWC"), transform.InferType()
    )
    assert tvm.ir.structural_equal(a, b), "Actual = \n" + str(a)
예제 #2
0
def test_convert_with_config():
    def before():
        x = relay.var("x", shape=(1, 56, 56, 64))
        weight = relay.var("weight", shape=(3, 3, 64, 64))
        y = relay.nn.conv2d(
            x,
            weight,
            channels=64,
            kernel_size=(3, 3),
            padding=(1, 1),
            data_layout="NHWC",
            kernel_layout="HWIO",
        )
        y = relay.nn.relu(y)

        weight2 = relay.var("weight2", shape=(3, 3, 64, 64))
        y2 = relay.nn.conv2d(
            y,
            weight2,
            channels=64,
            kernel_size=(3, 3),
            padding=(1, 1),
            data_layout="NHWC",
            kernel_layout="HWIO",
        )
        y2 = relay.nn.relu(y2)

        out = relay.Function([x, weight, weight2], y2)
        return out

    def expected():
        x = relay.var("x", shape=(1, 56, 56, 64))
        weight = relay.var("weight", shape=(3, 3, 64, 64))

        weight2 = relay.var("weight2", shape=(3, 3, 64, 64))
        weight2 = relay.layout_transform(weight2, "HWIO", "HWOI")

        y = relay.nn.conv2d(
            x,
            weight,
            channels=64,
            kernel_size=(3, 3),
            padding=(1, 1),
            data_layout="NHWC",
            kernel_layout="HWIO",
        )
        y = relay.nn.relu(y)
        y = relay.layout_transform(y, "NHWC", "HWNC")

        y2 = relay.nn.conv2d(
            y,
            weight2,
            channels=64,
            kernel_size=(3, 3),
            padding=(1, 1),
            data_layout="HWNC",
            kernel_layout="HWOI",
        )
        y2 = relay.nn.relu(y2)

        y2 = relay.layout_transform(y2, "HWNC", "NHWC")
        output = relay.Function(relay.analysis.free_vars(y2), y2)
        return output

    a = before()
    layout_config = relay.transform.LayoutConfig(skip_layers=[0])
    with layout_config:
        a = run_opt_pass(a, transform.ConvertLayout({"nn.conv2d": ["HWNC", "default"]}))
    b = run_opt_pass(expected(), transform.InferType())
    assert tvm.ir.structural_equal(a, b), "Actual = \n" + str(a)
예제 #3
0
def test_different_ops_convert_layout():
    """Check convert layout correctly supports converting the layout of
    different ops in the same graph.
    """

    def before():
        x = relay.var("x", shape=(1, 64, 56, 56))
        weight1 = relay.var("weight1", shape=(64, 3, 3, 64))
        weight2 = relay.var("weight2", shape=(64, 3, 3, 64), dtype="int8")
        weight3 = relay.var("weight3", shape=(64, 3, 3, 64))
        out = relay.nn.conv2d(
            x,
            weight1,
            channels=64,
            kernel_size=(3, 3),
            padding=(1, 1),
            data_layout="NCHW",
            kernel_layout="OHWI",
        )
        out = relay.cast(out, "int8")
        out = relay.qnn.op.conv2d(
            out,
            weight2,
            relay.const(1, "int32"),
            relay.const(1, "int32"),
            relay.const(1, "float32"),
            relay.const(1, "float32"),
            channels=64,
            kernel_size=(3, 3),
            padding=(1, 1),
            data_layout="NCHW",
            kernel_layout="OHWI",
        )
        out = relay.cast(out, "float32")
        out = relay.nn.conv2d_transpose(
            out,
            weight3,
            channels=64,
            kernel_size=(3, 3),
            padding=(1, 1),
            data_layout="NCHW",
            kernel_layout="OHWI",
        )
        out = relay.Function(analysis.free_vars(out), out)
        return out

    def expected():
        x = relay.var("x", shape=(1, 64, 56, 56))
        weight1 = relay.var("weight1", shape=(64, 3, 3, 64))
        weight2 = relay.var("weight2", shape=(64, 3, 3, 64), dtype="int8")
        weight3 = relay.var("weight3", shape=(64, 3, 3, 64))
        x = relay.layout_transform(x, "NCHW", "NHWC")
        weight1 = relay.layout_transform(weight1, "OHWI", "HWIO")
        out = relay.nn.conv2d(
            x,
            weight1,
            channels=64,
            kernel_size=(3, 3),
            padding=(1, 1),
            data_layout="NHWC",
            kernel_layout="HWIO",
        )
        out = relay.cast(out, "int8")
        out = relay.layout_transform(out, "NHWC", "NCHW")
        weight2 = relay.layout_transform(weight2, "OHWI", "OIHW")
        out = relay.qnn.op.conv2d(
            out,
            weight2,
            relay.const(1, "int32"),
            relay.const(1, "int32"),
            relay.const(1, "float32"),
            relay.const(1, "float32"),
            channels=64,
            kernel_size=(3, 3),
            padding=(1, 1),
            data_layout="NCHW",
            kernel_layout="OIHW",
        )
        out = relay.cast(out, "float32")
        out = relay.layout_transform(out, "NCHW", "NHWC")
        weight3 = relay.layout_transform(weight3, "OHWI", "HWIO")
        out = relay.nn.conv2d_transpose(
            out,
            weight3,
            channels=64,
            kernel_size=(3, 3),
            padding=(1, 1),
            data_layout="NHWC",
            kernel_layout="HWIO",
        )
        out = relay.layout_transform(out, "NHWC", "NCHW")
        out = relay.Function(analysis.free_vars(out), out)
        return out

    a = before()
    desired_layouts = {
        "nn.conv2d": ["NHWC", "HWIO"],
        "qnn.conv2d": ["NCHW", "OIHW"],
        "nn.conv2d_transpose": ["NHWC", "HWIO"],
    }
    a = run_opt_pass(a, transform.ConvertLayout(desired_layouts))
    b = run_opt_pass(expected(), transform.InferType())

    assert tvm.ir.structural_equal(a, b), "Actual = \n" + str(a)
예제 #4
0
def run_infer_type(expr):
    mod = relay.Module.from_expr(expr)
    mod = transform.InferType()(mod)
    entry = mod["main"]
    return entry if isinstance(expr, relay.Function) else entry.body
def test_qnn_conv_add_convert_layout():
    def before():
        x = relay.var("x", shape=(1, 56, 56, 64), dtype="int8")
        weight1 = relay.var("weight1", shape=(3, 3, 64, 64), dtype="int8")
        weight2 = relay.var("weight2", shape=(3, 3, 64, 64), dtype="int8")
        y = relay.qnn.op.conv2d(
            x,
            weight1,
            relay.const(1, "int32"),
            relay.const(1, "int32"),
            relay.const(1, "float32"),
            relay.const(1, "float32"),
            channels=64,
            kernel_size=(3, 3),
            padding=(1, 1),
            data_layout="NHWC",
            kernel_layout="HWIO",
        )
        y1 = relay.qnn.op.conv2d(
            y,
            weight2,
            relay.const(1, "int32"),
            relay.const(1, "int32"),
            relay.const(1, "float32"),
            relay.const(1, "float32"),
            channels=64,
            kernel_size=(3, 3),
            padding=(1, 1),
            data_layout="NHWC",
            kernel_layout="HWIO",
        )
        y = relay.cast(y, "int8")
        y1 = relay.cast(y, "int8")
        ret = relay.qnn.op.add(
            y,
            y1,
            relay.const(1, "float32"),
            relay.const(1, "int32"),
            relay.const(1, "float32"),
            relay.const(1, "int32"),
            relay.const(1, "float32"),
            relay.const(1, "int32"),
        )
        y = relay.Function(analysis.free_vars(ret), ret)
        return y

    def expected():
        x = relay.var("x", shape=(1, 56, 56, 64), dtype="int8")
        weight1 = relay.var("weight1", shape=(3, 3, 64, 64), dtype="int8")
        weight2 = relay.var("weight2", shape=(3, 3, 64, 64), dtype="int8")
        weight1 = relay.layout_transform(weight1, "HWIO", "OIHW")
        weight2 = relay.layout_transform(weight2, "HWIO", "OIHW")
        y = relay.layout_transform(x, "NHWC", "NCHW")
        y = relay.qnn.op.conv2d(
            y,
            weight1,
            relay.const(1, "int32"),
            relay.const(1, "int32"),
            relay.const(1, "float32"),
            relay.const(1, "float32"),
            channels=64,
            kernel_size=(3, 3),
            padding=(1, 1),
        )
        y1 = relay.qnn.op.conv2d(
            y,
            weight2,
            relay.const(1, "int32"),
            relay.const(1, "int32"),
            relay.const(1, "float32"),
            relay.const(1, "float32"),
            channels=64,
            kernel_size=(3, 3),
            padding=(1, 1),
        )
        y = relay.cast(y, "int8")
        y1 = relay.cast(y, "int8")
        ret = relay.qnn.op.add(
            y,
            y1,
            relay.const(1, "float32"),
            relay.const(1, "int32"),
            relay.const(1, "float32"),
            relay.const(1, "int32"),
            relay.const(1, "float32"),
            relay.const(1, "int32"),
        )
        ret = relay.layout_transform(ret, "NCHW", "NHWC")
        y = relay.Function(analysis.free_vars(ret), ret)
        return y

    a = before()
    a = run_opt_pass(a, transform.ConvertLayout({"qnn.conv2d": ["NCHW", "default"]}))
    b = run_opt_pass(expected(), transform.InferType())

    assert tvm.ir.structural_equal(a, b), "Actual = \n" + str(a)
def test_qnn_conv_requantize_convert_layout():
    def before():
        x = relay.var("x", shape=(1, 56, 56, 64), dtype="int8")
        weight = relay.var("weight", shape=(3, 3, 64, 64), dtype="int8")
        y = relay.qnn.op.conv2d(
            x,
            weight,
            relay.const(1, "int32"),
            relay.const(1, "int32"),
            relay.const(1, "float32"),
            relay.const(1, "float32"),
            channels=64,
            kernel_size=(3, 3),
            padding=(1, 1),
            data_layout="NHWC",
            kernel_layout="HWIO",
        )
        y = relay.qnn.op.requantize(
            y,
            relay.const(1, "float32"),
            relay.const(1, "int32"),
            relay.const(1, "float32"),
            relay.const(1, "int32"),
            out_dtype="int32",
        )
        y = relay.nn.relu(y)
        y = relay.Function([x, weight], y)
        return y

    def expected():
        x = relay.var("x", shape=(1, 56, 56, 64), dtype="int8")
        weight = relay.var("weight", shape=(3, 3, 64, 64), dtype="int8")
        x = relay.layout_transform(x, "NHWC", "NCHW")
        weight = relay.layout_transform(weight, "HWIO", "OIHW")
        y = relay.qnn.op.conv2d(
            x,
            weight,
            relay.const(1, "int32"),
            relay.const(1, "int32"),
            relay.const(1, "float32"),
            relay.const(1, "float32"),
            channels=64,
            kernel_size=(3, 3),
            padding=(1, 1),
        )
        y = relay.qnn.op.requantize(
            y,
            relay.const(1, "float32"),
            relay.const(1, "int32"),
            relay.const(1, "float32"),
            relay.const(1, "int32"),
            axis=1,
            out_dtype="int32",
        )
        y = relay.nn.relu(y)
        y = relay.layout_transform(y, "NCHW", "NHWC")
        y = relay.Function(relay.analysis.free_vars(y), y)
        return y

    a = before()
    a = run_opt_pass(a, transform.ConvertLayout({"qnn.conv2d": ["NCHW", "default"]}))
    b = run_opt_pass(expected(), transform.InferType())

    assert tvm.ir.structural_equal(a, b), "Actual = \n" + str(a)