Exemple #1
0
def test_conv2d_yolov3_v2_nhwc_3c():
    target = "opencl --device=adreno"
    dtype = "float16"

    input_shape = (1, 13, 13, 1024)
    filter_shape = (1, 1, 1024, 255)
    A = relay.var("data", shape=input_shape, dtype=dtype)
    B = relay.var("weight", shape=filter_shape, dtype=dtype)

    conv = relay.nn.conv2d(
        A,
        B,
        data_layout="NHWC",
        kernel_layout="HWIO",
        padding=[0, 0, 0, 0],
        strides=[1, 1],
        out_dtype=dtype,
        channels=255,
        kernel_size=(1, 1),
    )

    mod = relay.Function([A, B], conv)
    # mod, params = relay.testing.init.create_workload(func)
    np.random.seed(0)
    initializer = relay.testing.init.Xavier()
    filter_data = np.zeros(filter_shape).astype(dtype)
    initializer("weight", filter_data)
    params = {
        "weight": tvm.nd.array(filter_data),
    }

    build_run_compare(mod, params, {"data": input_shape}, dtype, target)
def test_depthwise_conv2d_bias_nchwc():
    target = "opencl --device=adreno"
    dtype = "float16"

    input_shape = (1, 64, 112, 112)
    filter_shape = (64, 1, 3, 3)
    bias_shape = (1, 64, 1, 1)
    A = relay.var("data", shape=input_shape, dtype=dtype)
    B = relay.var("weight", shape=filter_shape, dtype=dtype)
    bias = relay.var("bias", shape=bias_shape, dtype=dtype)

    # C = relay.nn.relu(A)
    conv = relay.nn.conv2d(
        A,
        B,
        data_layout="NCHW",
        kernel_layout="OIHW",
        padding=[1, 1, 1, 1],
        strides=[2, 2],
        out_dtype=dtype,
        channels=64,
        groups=64,
        kernel_size=(3, 3),
    )
    D = relay.op.add(conv, bias)
    D = relay.op.nn.relu(D)

    mod = relay.Function([A, B, bias], D)
    # mod, params = relay.testing.init.create_workload(func)
    np.random.seed(1)
    initializer = relay.testing.init.Xavier()
    filter_data = np.zeros(filter_shape).astype(dtype)
    bias_data = np.zeros(bias_shape).astype(dtype)
    initializer("weight", filter_data)
    initializer("bias", bias_data)
    params1 = {
        "weight": tvm.nd.array(filter_data),
        "bias": tvm.nd.array(bias_data),
    }

    build_run_compare(mod, params1, {"data": input_shape}, dtype, target,
                      gpu_preprocess)
def test_conv2d_winograd_conv():
    target = "opencl --device=adreno"
    dtype = "float16"

    input_shape = (1, 4, 3, 3)
    A = relay.var("data", shape=input_shape, dtype=dtype)
    filter_shape3 = (8, 4, 3, 3)
    bias_shape3 = (8, )
    B3 = relay.var("weight3", shape=filter_shape3, dtype=dtype)
    D = relay.nn.conv2d(A,
                        B3,
                        padding=[1, 1, 1, 1],
                        channels=8,
                        kernel_size=[3, 3],
                        out_dtype=dtype)

    filter_shape4 = (8, 8, 3, 3)
    bias_shape4 = (8, )
    B4 = relay.var("weight4", shape=filter_shape4, dtype=dtype)
    D = relay.nn.conv2d(D,
                        B4,
                        padding=[1, 1, 1, 1],
                        channels=8,
                        kernel_size=[3, 3],
                        out_dtype=dtype)
    mod = relay.Function([A, B3, B4], D)
    np.random.seed(1)
    initializer = relay.testing.init.Xavier()
    filter_data3 = np.zeros(filter_shape3).astype(dtype)
    bias_data3 = np.zeros(bias_shape3).astype(dtype)
    filter_data4 = np.zeros(filter_shape4).astype(dtype)
    bias_data4 = np.zeros(bias_shape4).astype(dtype)
    initializer("weight", filter_data3)
    initializer("bias", bias_data3)
    initializer("weight", filter_data4)
    initializer("bias", bias_data4)
    params1 = {
        "weight3": tvm.nd.array(filter_data3),
        "weight4": tvm.nd.array(filter_data4),
    }

    temp = utils.tempdir()
    stat_file = temp.relpath("stat.log")
    with open(stat_file, "w") as f:
        f.write(
            '{"input": ["opencl -keys=adreno,opencl,gpu -device=adreno -max_num_threads=256", "conv2d_nchw_winograd_acc32.image2d", [["TENSOR", [1, 4, 3, 3], "float16"], ["TENSOR", [8, 4, 3, 3], "float16"], [1, 1], [1, 1, 1, 1], [1, 1], "float16"], {}], "config": {"index": 1591, "code_hash": null, "entity": [["auto_unroll_max_step", "ot", 4], ["tile_y", "sp", [-1, 1, 32]], ["tile_x", "sp", [-1, 4, 2]], ["tile_rc", "sp", [-1, 8]]]}, "result": [[0.0037244], 0, 7.06374192237854, 1653898629.7427933], "version": 0.2, "tvm_version": "0.8.dev0"}\n'
        )
    graph = build_run_compare(mod,
                              params1, {"data": input_shape},
                              dtype,
                              target,
                              stat_file=stat_file)
    matches = re.findall("winograd", graph)
    assert len(matches) > 0
Exemple #4
0
def test_depthwise_conv2d_deeplabv3_1_129_129_144x3_3_144_1_with_padding():
    target = "opencl --device=adreno"
    dtype = "float16"

    input_shape = (1, 129, 129, 144)
    filter_shape = (3, 3, 144, 1)
    kernel_size = (filter_shape[0], filter_shape[1])
    bias_shape = (filter_shape[2], )
    A = relay.var("data", shape=input_shape, dtype=dtype)
    B = relay.var("weight", shape=filter_shape, dtype=dtype)
    bias = relay.var("bias", shape=bias_shape, dtype=dtype)

    conv = relay.nn.conv2d(
        A,
        B,
        data_layout="NHWC",
        kernel_layout="HWOI",
        padding=[3, 3, 3, 3],
        strides=[2, 2],
        out_dtype=dtype,
        groups=filter_shape[2],
        channels=filter_shape[2],
        kernel_size=kernel_size,
    )
    D = relay.op.add(conv, bias)
    D = relay.op.nn.relu(D)

    mod = relay.Function([A, B, bias], D)
    # mod, params = relay.testing.init.create_workload(func)
    np.random.seed(1)
    initializer = relay.testing.init.Xavier()
    filter_data = np.zeros(filter_shape).astype(dtype)
    bias_data = np.zeros(bias_shape).astype(dtype)
    initializer("weight", filter_data)
    initializer("bias", bias_data)
    params1 = {
        "weight": tvm.nd.array(filter_data),
        "bias": tvm.nd.array(bias_data),
    }

    build_run_compare(mod, params1, {"data": input_shape}, dtype, target)
Exemple #5
0
def test_conv2d_resnet50_v2_nhwc_3c():
    target = "opencl --device=adreno"
    dtype = "float16"

    input_shape = (1, 224, 224, 3)
    filter_shape = (7, 7, 3, 64)
    bias_shape = (1, 1, 1, 64)
    A = relay.var("data", shape=input_shape, dtype=dtype)
    B = relay.var("weight", shape=filter_shape, dtype=dtype)
    bias = relay.var("bias", shape=bias_shape, dtype=dtype)

    # C = relay.nn.relu(A)
    conv = relay.nn.conv2d(
        A,
        B,
        data_layout="NHWC",
        kernel_layout="HWIO",
        padding=[3, 3, 3, 3],
        strides=[2, 2],
        out_dtype=dtype,
        channels=64,
        kernel_size=(7, 7),
    )
    D = relay.op.add(conv, bias)
    D = relay.op.nn.relu(D)

    mod = relay.Function([A, B, bias], D)
    # mod, params = relay.testing.init.create_workload(func)
    np.random.seed(1)
    initializer = relay.testing.init.Xavier()
    filter_data = np.zeros(filter_shape).astype(dtype)
    bias_data = np.zeros(bias_shape).astype(dtype)
    initializer("weight", filter_data)
    initializer("bias", bias_data)
    params1 = {
        "weight": tvm.nd.array(filter_data),
        "bias": tvm.nd.array(bias_data),
    }

    build_run_compare(mod, params1, {"data": input_shape}, dtype, target)
Exemple #6
0
def test_conv2d_inceptionv3_35_35_strides():
    target = "opencl --device=adreno"
    dtype = "float16"

    input_shape = (1, 35, 35, 48)
    filter_shape = (5, 5, 48, 64)
    bias_shape = (1, 1, 1, 64)
    A = relay.var("data", shape=input_shape, dtype=dtype)
    B = relay.var("weight", shape=filter_shape, dtype=dtype)
    bias = relay.var("bias", shape=bias_shape, dtype=dtype)

    # C = relay.nn.relu(A)
    conv = relay.nn.conv2d(
        A,
        B,
        data_layout="NHWC",
        kernel_layout="HWIO",
        padding=[2, 2, 2, 2],
        strides=[1, 1],
        out_dtype=dtype,
        channels=64,
        kernel_size=(5, 5),
    )
    D = relay.op.add(conv, bias)
    D = relay.op.nn.relu(D)

    mod = relay.Function([A, B, bias], D)
    np.random.seed(0)
    initializer = relay.testing.init.Xavier()
    filter_data = np.zeros(filter_shape).astype(dtype)
    bias_data = np.zeros(bias_shape).astype(dtype)
    initializer("weight", filter_data)
    initializer("bias", bias_data)
    params1 = {
        "weight": tvm.nd.array(filter_data),
        "bias": tvm.nd.array(bias_data),
    }

    build_run_compare(mod, params1, {"data": input_shape}, dtype, target,
                      gpu_preprocess)
Exemple #7
0
def test_conv2d_inceptionv3_64x35x35_96x64x3x3_nopad_pass():
    target = "opencl --device=adreno"
    dtype = "float16"

    input_shape = (1, 32, 40, 40)
    filter_shape = (96, 32, 2, 2)
    bias_shape = (1, 96, 1, 1)
    A = relay.var("data", shape=input_shape, dtype=dtype)
    B = relay.var("weight", shape=filter_shape, dtype=dtype)
    bias = relay.var("bias", shape=bias_shape, dtype=dtype)

    # C = relay.nn.relu(A)
    conv = relay.nn.conv2d(
        A,
        B,
        data_layout="NCHW",
        kernel_layout="OIHW",
        padding=[0, 0, 0, 0],
        strides=[2, 2],
        out_dtype=dtype,
        channels=96,
        kernel_size=(2, 2),
    )
    D = relay.op.add(conv, bias)
    D = relay.op.nn.relu(D)

    mod = relay.Function([A, B, bias], D)
    np.random.seed(0)
    initializer = relay.testing.init.Xavier()
    filter_data = np.zeros(filter_shape).astype(dtype)
    bias_data = np.zeros(bias_shape).astype(dtype)
    initializer("weight", filter_data)
    initializer("bias", bias_data)
    params1 = {
        "weight": tvm.nd.array(filter_data),
        "bias": tvm.nd.array(bias_data),
    }

    build_run_compare(mod, params1, {"data": input_shape}, dtype, target,
                      gpu_preprocess)
Exemple #8
0
def test_conv2d_4x4x4_16c16pad():
    target = "opencl --device=adreno"
    dtype = "float16"

    input_shape = (1, 256, 256, 32)
    filter_shape = (4, 4, 32, 4)
    bias_shape = (1, 1, 1, 4)
    A = relay.var("data", shape=input_shape, dtype=dtype)
    B = relay.var("weight", shape=filter_shape, dtype=dtype)
    bias = relay.var("bias", shape=bias_shape, dtype=dtype)

    # C = relay.nn.relu(A)
    conv = relay.nn.conv2d(
        A,
        B,
        data_layout="NHWC",
        kernel_layout="HWIO",
        padding=[3, 3, 0, 0],
        strides=[2, 2],
        out_dtype=dtype,
        channels=4,
        kernel_size=(4, 4),
    )
    D = relay.op.add(conv, bias)
    D = relay.op.nn.relu(D)

    mod = relay.Function([A, B, bias], D)
    np.random.seed(0)
    initializer = relay.testing.init.Xavier()
    filter_data = np.zeros(filter_shape).astype(dtype)
    bias_data = np.zeros(bias_shape).astype(dtype)
    initializer("weight", filter_data)
    initializer("bias", bias_data)
    params1 = {
        "weight": tvm.nd.array(filter_data),
        "bias": tvm.nd.array(bias_data),
    }

    build_run_compare(mod, params1, {"data": input_shape}, dtype, target)
Exemple #9
0
def test_depthwise_conv2d_deeplabv3_4_35_35_576x3_3_576_1():
    target = "opencl --device=adreno"
    dtype = "float16"

    input_shape = (4, 35, 35, 576)
    filter_shape = (3, 3, 576, 1)
    kernel_size = (filter_shape[0], filter_shape[1])
    bias_shape = (filter_shape[2], )
    A = relay.var("data", shape=input_shape, dtype=dtype)
    B = relay.var("weight", shape=filter_shape, dtype=dtype)
    bias = relay.var("bias", shape=bias_shape, dtype=dtype)

    conv = relay.nn.conv2d(
        A,
        B,
        data_layout="NHWC",
        kernel_layout="HWOI",
        out_dtype=dtype,
        groups=filter_shape[2],
        channels=filter_shape[2],
        kernel_size=kernel_size,
    )
    D = relay.op.add(conv, bias)
    D = relay.op.nn.relu(D)

    mod = relay.Function([A, B, bias], D)
    mod = relay.Function([A, B, bias], conv)
    np.random.seed(1)
    initializer = relay.testing.init.Xavier()
    filter_data = np.zeros(filter_shape).astype(dtype)
    bias_data = np.zeros(bias_shape).astype(dtype)
    initializer("weight", filter_data)
    initializer("bias", bias_data)
    params1 = {
        "weight": tvm.nd.array(filter_data),
        "bias": tvm.nd.array(bias_data),
    }

    build_run_compare(mod, params1, {"data": input_shape}, dtype, target)
Exemple #10
0
def test_conv2d_deeplabv3_1_513_513_3x3_3_3_32():
    target = "opencl --device=adreno"
    dtype = "float16"

    input_shape = (1, 513, 513, 3)
    filter_shape = (3, 3, 3, 32)
    bias_shape = (filter_shape[-1], )
    kernel_size = (filter_shape[0], filter_shape[1])
    A = relay.var("data", shape=input_shape, dtype=dtype)
    B = relay.var("weight", shape=filter_shape, dtype=dtype)
    bias = relay.var("bias", shape=bias_shape, dtype=dtype)

    conv = relay.nn.conv2d(
        A,
        B,
        data_layout="NHWC",
        kernel_layout="HWIO",
        out_dtype=dtype,
        channels=filter_shape[-1],
        kernel_size=kernel_size,
    )
    D = relay.op.add(conv, bias)
    D = relay.op.nn.relu(D)

    mod = relay.Function([A, B, bias], D)
    np.random.seed(1)
    initializer = relay.testing.init.Xavier()
    filter_data = np.ones(filter_shape).astype(dtype)
    bias_data = np.ones(bias_shape).astype(dtype)
    initializer("weight", filter_data)
    initializer("bias", bias_data)
    params1 = {
        "weight": tvm.nd.array(filter_data),
        "bias": tvm.nd.array(bias_data),
    }

    build_run_compare(mod, params1, {"data": input_shape}, dtype, target)
def test_conv2d_vgg16_winograd_4d():
    target = "opencl --device=adreno"
    dtype = "float16"

    input_shape = (1, 28, 28, 512)
    filter_shape = (3, 3, 512, 512)
    bias_shape = (1, 1, 1, 512)
    A = relay.var("data", shape=input_shape, dtype=dtype)
    B = relay.var("weight", shape=filter_shape, dtype=dtype)
    bias = relay.var("bias", shape=bias_shape, dtype=dtype)

    conv = relay.nn.conv2d(
        A,
        B,
        data_layout="NHWC",
        kernel_layout="HWIO",
        padding=[1, 1, 1, 1],
        channels=512,
        kernel_size=[3, 3],
        out_dtype=dtype,
    )
    D = relay.op.add(conv, bias)
    D = relay.op.nn.relu(D)

    mod = relay.Function([A, B, bias], D)
    np.random.seed(0)
    initializer = relay.testing.init.Xavier()
    filter_data = np.zeros(filter_shape).astype(dtype)
    bias_data = np.zeros(bias_shape).astype(dtype)
    initializer("weight", filter_data)
    initializer("bias", bias_data)
    params1 = {
        "weight": tvm.nd.array(filter_data),
        "bias": tvm.nd.array(bias_data),
    }

    temp = utils.tempdir()
    stat_file = temp.relpath("stat.log")
    with open(stat_file, "w") as f:
        f.write(
            '{"input": ["opencl -keys=adreno,opencl,gpu -device=adreno -max_num_threads=256", "conv2d_nhwc_winograd_acc32.image2d", [["TENSOR", [1, 28, 28, 512], "float16"], ["TENSOR", [3, 3, 512, 512], "float16"], [1, 1], [1, 1, 1, 1], [1, 1], "float16"], {}], "config": {"index": 1591, "code_hash": null, "entity": [["auto_unroll_max_step", "ot", 4], ["tile_y", "sp", [-1, 1, 32]], ["tile_x", "sp", [-1, 4, 2]], ["tile_rc", "sp", [-1, 8]]]}, "result": [[0.0037244], 0, 7.06374192237854, 1653898629.7427933], "version": 0.2, "tvm_version": "0.8.dev0"}\n'
        )
    graph = build_run_compare(mod,
                              params1, {"data": input_shape},
                              dtype,
                              target,
                              stat_file=stat_file)
    matches = re.findall("winograd", graph)
    assert len(matches) > 0
Exemple #12
0
def test_conv2d_vgg16_winograd_4d():
    target = "opencl --device=adreno"
    dtype = "float16"

    input_shape = (1, 28, 28, 512)
    filter_shape = (3, 3, 512, 512)
    bias_shape = (1, 1, 1, 512)
    A = relay.var("data", shape=input_shape, dtype=dtype)
    B = relay.var("weight", shape=filter_shape, dtype=dtype)
    bias = relay.var("bias", shape=bias_shape, dtype=dtype)

    conv = relay.nn.conv2d(
        A,
        B,
        data_layout="NHWC",
        kernel_layout="HWIO",
        padding=[1, 1, 1, 1],
        channels=512,
        kernel_size=[3, 3],
        out_dtype=dtype,
    )
    D = relay.op.add(conv, bias)
    D = relay.op.nn.relu(D)

    mod = relay.Function([A, B, bias], D)
    np.random.seed(0)
    initializer = relay.testing.init.Xavier()
    filter_data = np.zeros(filter_shape).astype(dtype)
    bias_data = np.zeros(bias_shape).astype(dtype)
    initializer("weight", filter_data)
    initializer("bias", bias_data)
    params1 = {
        "weight": tvm.nd.array(filter_data),
        "bias": tvm.nd.array(bias_data),
    }

    graph = build_run_compare(mod, params1, {"data": input_shape}, dtype,
                              target)
    matches = re.findall("winograd", graph)
    assert len(matches) > 0
def test_conv2d_different_lowering_same_op():
    """
    Use case for verification of caching compiled functions
    Three convolutions following by each other in this case should be
    compiled in three different entities and lowered differently because
    they are differ in input param memory scopes and in output memory scope

                layout_transform (NCHW->NCHW4c)
                         |                      <- buffer
                      conv2d (1)                <- buffer as input tensor and texture as output
                         |                      <- texture
                      conv2d (2)                <- texture as input and texture as output
                         |                      <- texture
                      conv2d (3)                <- texture as input and buffer as output
                         |                      <- buffer
                    layout_transform (NCHW4c->NCHW)
    """
    target = "opencl --device=adreno"
    dtype = "float16"

    input_shape = (1, 32, 40, 40)
    filter_shape1 = (32, 32, 1, 1)
    A = relay.var("data", shape=input_shape, dtype=dtype)
    W1 = relay.var("weight1", shape=filter_shape1, dtype=dtype)

    conv1 = relay.nn.conv2d(
        A,
        W1,
        data_layout="NCHW",
        kernel_layout="OIHW",
        padding=[0, 0, 0, 0],
        strides=[1, 1],
        out_dtype=dtype,
        channels=32,
        kernel_size=(1, 1),
    )

    conv2 = relay.nn.conv2d(
        conv1,
        W1,
        data_layout="NCHW",
        kernel_layout="OIHW",
        padding=[0, 0, 0, 0],
        strides=[1, 1],
        out_dtype=dtype,
        channels=32,
        kernel_size=(1, 1),
    )

    conv3 = relay.nn.conv2d(
        conv2,
        W1,
        data_layout="NCHW",
        kernel_layout="OIHW",
        padding=[0, 0, 0, 0],
        strides=[1, 1],
        out_dtype=dtype,
        channels=32,
        kernel_size=(1, 1),
    )

    mod = relay.Function([A, W1], conv3)
    np.random.seed(0)
    initializer = relay.testing.init.Xavier()
    filter_data1 = np.zeros(filter_shape1).astype(dtype)
    params1 = {
        "weight1": tvm.nd.array(filter_data1),
    }

    static_memory_scope = [
        "global",
        "global.texture",
        "global.texture-weight",
        "global.texture",
        "global.texture",
        "",
        "",
    ]

    build_run_compare(mod, params1, {"data": input_shape}, dtype, target,
                      static_memory_scope)
def test_branching_texture_params():
    """
    Verification of passing texture to several consumers markup of relay variables in
    primary functions + on_device

                layout_transform (NCHW->NCHW4c)
                         |                      <- buffer
                      conv2d (0)                <- to get textures
             /           \           \          <- here should be textures and textures in params
          conv2d (1)    conv2d (2)    conv2d (3)
            \             /           |
                  add                 |         <- to have  the only one output
                    \                /
                           add                  <- to have  the only one output
                            |                   <- buffer
                    layout_transform (NCHW4c->NCHW)
    """
    target = "opencl --device=adreno"
    dtype = "float16"

    input_shape = (1, 32, 40, 40)
    filter_shape0 = (32, 32, 1, 1)
    filter_shape1 = (32, 32, 2, 2)
    filter_shape2 = (32, 32, 1, 1)
    filter_shape3 = (32, 32, 2, 2)
    bias_shape1 = (1, 32, 1, 1)
    # bias_shape2 = (1, 32, 1, 1)
    A = relay.var("data", shape=input_shape, dtype=dtype)
    W0 = relay.var("weight0", shape=filter_shape0, dtype=dtype)
    W1 = relay.var("weight1", shape=filter_shape1, dtype=dtype)
    B1 = relay.var("bias1", shape=bias_shape1, dtype=dtype)
    W2 = relay.var("weight2", shape=filter_shape2, dtype=dtype)
    W3 = relay.var("weight3", shape=filter_shape3, dtype=dtype)

    conv0 = relay.nn.conv2d(
        A,
        W0,
        data_layout="NCHW",
        kernel_layout="OIHW",
        padding=[0, 0, 0, 0],
        strides=[1, 1],
        out_dtype=dtype,
        channels=32,
        kernel_size=(1, 1),
    )

    conv1 = relay.nn.conv2d(
        conv0,
        W1,
        data_layout="NCHW",
        kernel_layout="OIHW",
        padding=[0, 0, 1, 1],
        strides=[1, 1],
        out_dtype=dtype,
        channels=32,
        kernel_size=(2, 2),
    )
    conv1 = relay.op.add(conv1, B1)
    conv1 = relay.op.nn.relu(conv1)

    conv2 = relay.nn.conv2d(
        conv0,
        W2,
        data_layout="NCHW",
        kernel_layout="OIHW",
        padding=[0, 0, 0, 0],
        strides=[1, 1],
        out_dtype=dtype,
        channels=32,
        kernel_size=(1, 1),
    )

    conv3 = relay.nn.conv2d(
        conv0,
        W3,
        data_layout="NCHW",
        kernel_layout="OIHW",
        padding=[0, 1, 1, 0],
        strides=[1, 1],
        out_dtype=dtype,
        channels=32,
        kernel_size=(2, 2),
    )
    conv3 = relay.op.nn.relu(conv3)
    res = relay.op.add(conv1, conv2)
    res = relay.op.add(res, conv3)

    mod = relay.Function([A, W0, W1, B1, W2, W3], res)
    np.random.seed(0)
    initializer = relay.testing.init.Xavier()
    filter_data0 = np.zeros(filter_shape0).astype(dtype)
    filter_data1 = np.zeros(filter_shape1).astype(dtype)
    bias_data1 = np.zeros(bias_shape1).astype(dtype)
    initializer("weight", filter_data1)
    initializer("bias", bias_data1)
    filter_data2 = np.zeros(filter_shape2).astype(dtype)
    initializer("weight", filter_data2)
    filter_data3 = np.zeros(filter_shape3).astype(dtype)
    initializer("weight", filter_data3)
    params1 = {
        "weight0": tvm.nd.array(filter_data0),
        "weight1": tvm.nd.array(filter_data1),
        "bias1": tvm.nd.array(bias_data1),
        "weight2": tvm.nd.array(filter_data2),
        "weight3": tvm.nd.array(filter_data3),
    }

    static_memory_scope = [
        "global",
        "global.texture",
        "global.texture-weight",
        "global.texture",
        "global.texture-weight",
        "global.texture-weight",
        "global.texture-weight",
        "global.texture",
        "global.texture-weight",
        "global.texture",
        "",
        "",
    ]

    build_run_compare(mod, params1, {"data": input_shape}, dtype, target,
                      static_memory_scope)
def test_concat():
    """
        layout_transform (NCHW->NCHW4c)
                  |                      <- buffer
                conv2d (1)               <- to get textures as output
               /         \
            conv2d (2)    conv2d (3)
                 \       /               <- concat does not support textures, there we should have buffers
                concatenation
                     |                   <- buffer
               layout_transform (NCHW4c->NCHW)
    """
    target = "opencl --device=adreno"
    dtype = "float16"

    input_shape = (1, 32, 40, 40)
    filter_shape1 = (96, 32, 2, 2)
    filter_shape2 = (32, 96, 2, 2)
    filter_shape3 = (5, 96, 2, 2)
    bias_shape1 = (1, 96, 1, 1)
    bias_shape2 = (1, 32, 1, 1)
    A = relay.var("data", shape=input_shape, dtype=dtype)
    W1 = relay.var("weight1", shape=filter_shape1, dtype=dtype)
    B1 = relay.var("bias1", shape=bias_shape1, dtype=dtype)
    W2 = relay.var("weight2", shape=filter_shape2, dtype=dtype)
    W3 = relay.var("weight3", shape=filter_shape3, dtype=dtype)
    B2 = relay.var("bias2", shape=bias_shape2, dtype=dtype)

    # C = relay.nn.relu(A)
    conv1 = relay.nn.conv2d(
        A,
        W1,
        data_layout="NCHW",
        kernel_layout="OIHW",
        padding=[0, 0, 0, 0],
        strides=[2, 2],
        out_dtype=dtype,
        channels=96,
        kernel_size=(2, 2),
    )
    D = relay.op.add(conv1, B1)
    D = relay.op.nn.relu(D)

    conv2 = relay.nn.conv2d(
        D,
        W2,
        data_layout="NCHW",
        kernel_layout="OIHW",
        padding=[0, 0, 0, 0],
        strides=[2, 2],
        out_dtype=dtype,
        channels=32,
        kernel_size=(2, 2),
    )
    conv2 = relay.op.add(conv2, B2)
    conv2 = relay.op.nn.relu(conv2)

    conv3 = relay.nn.conv2d(
        D,
        W3,
        data_layout="NCHW",
        kernel_layout="OIHW",
        padding=[0, 0, 0, 0],
        strides=[2, 2],
        out_dtype=dtype,
        channels=5,
        kernel_size=(2, 2),
    )

    t = relay.Tuple([conv2, conv3])
    c = relay.op.concatenate(t, axis=1)

    mod = relay.Function([A, W1, B1, W2, B2, W3], c)
    np.random.seed(0)
    initializer = relay.testing.init.Xavier()
    filter_data1 = np.zeros(filter_shape1).astype(dtype)
    bias_data1 = np.zeros(bias_shape1).astype(dtype)
    initializer("weight", filter_data1)
    initializer("bias", bias_data1)
    filter_data2 = np.zeros(filter_shape2).astype(dtype)
    bias_data2 = np.zeros(bias_shape2).astype(dtype)
    initializer("weight", filter_data2)
    initializer("bias", bias_data2)
    filter_data3 = np.zeros(filter_shape3).astype(dtype)
    initializer("weight", filter_data3)
    params1 = {
        "weight1": tvm.nd.array(filter_data1),
        "bias1": tvm.nd.array(bias_data1),
        "weight2": tvm.nd.array(filter_data2),
        "bias2": tvm.nd.array(bias_data2),
        "weight3": tvm.nd.array(filter_data3),
    }

    static_memory_scope = [
        "",
        "global",
        "global.texture-weight",
        "global.texture-weight",
        "global",
        "global.texture-weight",
        "global.texture-weight",
        "",
        "",
        "",
        "",
        "",
    ]

    static_memory_scope = []

    build_run_compare(mod, params1, {"data": input_shape}, dtype, target,
                      static_memory_scope)
def test_residual_block():
    """
    - some kind of residual block followed by convolution to have texture after residual block
    - scalar data type verification which should be mapped to global memory scope
        layout_transform (NCHW->NCHW4c)
                  |                      <- buffer
                conv2d (1)                  <- to get textures as output
               /         \
            conv2d (2)    |
                 \       /
                    add                     <- add should be fused into conv2d (2)
                multiply to scalar          <- buffer to the input of multiply scalar value
                    relu
                     |                      <- texture in intermediate tensor
                  conv2d (3)
                   relu
                     |                      <- buffer
               layout_transform (NCHW4c->NCHW)
    """
    target = "opencl --device=adreno"
    dtype = "float16"

    input_shape = (1, 32, 40, 40)
    filter_shape1 = (32, 32, 2, 2)
    filter_shape2 = (32, 32, 1, 1)
    filter_shape3 = (32, 32, 2, 2)
    bias_shape1 = (1, 32, 1, 1)
    A = relay.var("data", shape=input_shape, dtype=dtype)
    W1 = relay.var("weight1", shape=filter_shape1, dtype=dtype)
    B1 = relay.var("bias1", shape=bias_shape1, dtype=dtype)
    W2 = relay.var("weight2", shape=filter_shape2, dtype=dtype)
    W3 = relay.var("weight3", shape=filter_shape3, dtype=dtype)

    conv1 = relay.nn.conv2d(
        A,
        W1,
        data_layout="NCHW",
        kernel_layout="OIHW",
        padding=[0, 0, 0, 0],
        strides=[2, 2],
        out_dtype=dtype,
        channels=32,
        kernel_size=(2, 2),
    )
    D = relay.op.add(conv1, B1)
    D = relay.op.nn.relu(D)

    conv2 = relay.nn.conv2d(
        D,
        W2,
        data_layout="NCHW",
        kernel_layout="OIHW",
        padding=[0, 0, 0, 0],
        strides=[1, 1],
        out_dtype=dtype,
        channels=32,
        kernel_size=(1, 1),
    )
    D = relay.op.add(conv2, D)
    D = D * relay.const(0.15, "float16")
    D = relay.op.nn.relu(D)

    conv3 = relay.nn.conv2d(
        D,
        W3,
        data_layout="NCHW",
        kernel_layout="OIHW",
        padding=[0, 0, 0, 0],
        strides=[2, 2],
        out_dtype=dtype,
        channels=32,
        kernel_size=(2, 2),
    )
    D = relay.op.nn.relu(conv3)

    mod = relay.Function([A, W1, B1, W2, W3], D)
    np.random.seed(0)
    initializer = relay.testing.init.Xavier()
    filter_data1 = np.zeros(filter_shape1).astype(dtype)
    bias_data1 = np.zeros(bias_shape1).astype(dtype)
    initializer("weight", filter_data1)
    initializer("bias", bias_data1)
    filter_data2 = np.zeros(filter_shape2).astype(dtype)
    initializer("weight", filter_data2)
    filter_data3 = np.zeros(filter_shape3).astype(dtype)
    initializer("weight", filter_data3)
    params1 = {
        "weight1": tvm.nd.array(filter_data1),
        "bias1": tvm.nd.array(bias_data1),
        "weight2": tvm.nd.array(filter_data2),
        "weight3": tvm.nd.array(filter_data3),
    }

    static_memory_scope = [
        "global",
        "global.texture",
        "global.texture-weight",
        "global.texture-weight",
        "global.texture",
        "global.texture-weight",
        "global",
        "global.texture",
        "global.texture-weight",
        "",
        "",
    ]

    build_run_compare(mod, params1, {"data": input_shape}, dtype, target,
                      static_memory_scope)