Example #1
0
def test_maxpool_1d(data_shape_nwc, pool_size, strides, padding):
    """Test a subgraph with a single maxpool_1d operator."""
    ishape = data_shape_nwc

    input0 = relay.var("input", relay.TensorType(ishape, "int8"))
    out = relay.op.nn.max_pool1d(input0,
                                 pool_size,
                                 layout="NWC",
                                 strides=strides,
                                 padding=padding)

    mod = tvm.IRModule.from_expr(relay.Function([input0], out))
    inputs = {
        "input": np.random.randint(low=-128,
                                   high=127,
                                   size=ishape,
                                   dtype="int8")
    }
    output_list = generate_ref_data(mod, inputs)

    compile_and_run(
        AOTTestModel(module=mod, inputs=inputs, outputs=output_list),
        runner=AOT_CORSTONE300_RUNNER,
        interface_api="c",
        use_unpacked_api=True,
        target_opts={
            "-keys": "arm_cpu",
            "-mcpu": "cortex-m7",
        },
    )
Example #2
0
def test_conv2d_int8_tflite(ifm_shape, kernel_shape, strides, dilation,
                            padding, activation):
    interface_api = "c"
    use_unpacked_api = True
    test_runner = AOT_USMP_CORSTONE300_RUNNER

    dtype = "int8"
    tflite_model, relay_mod, params = create_conv2d_tflite_relay_models(
        ifm_shape, kernel_shape, strides, dilation, padding, activation, dtype)

    cmsisnn_mod = cmsisnn.partition_for_cmsisnn(relay_mod, params)

    # validate pattern matching
    assert_partitioned_function(relay_mod, cmsisnn_mod)

    # validate CMSIS-NN output against TFLite output
    input_map, output_map, output_tolerance = generate_ref_data_tflite(
        tflite_model)
    compile_and_run(
        AOTTestModel(
            module=cmsisnn_mod,
            inputs=input_map,
            outputs=output_map,
            params=params,
            output_tolerance=output_tolerance,
        ),
        test_runner,
        interface_api,
        use_unpacked_api,
    )
Example #3
0
def test_avgpool_1d(data_shape_ncw, pool_size, strides, padding):
    """Test a subgraph with a single avgpool_1d operator."""

    ishape = data_shape_ncw

    input0 = relay.var("input", relay.TensorType(ishape, "int32"))
    out0 = relay.op.nn.avg_pool1d(input0, pool_size, layout="NCW", strides=strides, padding=padding)
    ref_mod = tvm.IRModule.from_expr(relay.Function([input0], out0))

    input1 = relay.var("input", relay.TensorType(ishape, "int16"))
    out1 = relay.op.nn.avg_pool1d(input1, pool_size, layout="NCW", strides=strides, padding=padding)
    mod = tvm.IRModule.from_expr(relay.Function([input1], out1))

    input_data = np.random.randint(low=-10, high=10, size=ishape, dtype="int32")
    inputs = {"input": input_data}
    output_list = generate_ref_data(ref_mod, inputs)

    compile_and_run(
        AOTTestModel(
            module=mod, inputs={"input": input_data.astype(dtype="int16")}, outputs=output_list
        ),
        runner=AOT_CORSTONE300_RUNNER,
        interface_api="c",
        use_unpacked_api=True,
        target_opts={
            "-keys": "arm_cpu",
            "-mcpu": "cortex-m7",
        },
    )
Example #4
0
def test_op_int8(zero_point, scale):
    interface_api = "c"
    use_unpacked_api = True
    test_runner = AOT_USMP_CORSTONE300_RUNNER

    dtype = "int8"
    shape = [1, 16, 16, 3]
    model = make_model(shape, dtype, dtype, zero_point, scale)
    orig_mod = make_module(model)

    cmsisnn_mod = cmsisnn.partition_for_cmsisnn(orig_mod)

    # validate pattern matching
    assert_partitioned_function(orig_mod, cmsisnn_mod)

    # validate the output
    in_min, in_max = get_range_for_dtype_str(dtype)
    np.random.seed(0)
    input_data = np.random.randint(in_min,
                                   high=in_max,
                                   size=shape,
                                   dtype=dtype)
    inputs = {"in0": input_data}
    params = {}
    output_list = generate_ref_data(orig_mod["main"], inputs, params)
    compile_and_run(
        AOTTestModel(module=cmsisnn_mod,
                     inputs=inputs,
                     outputs=output_list,
                     params=params),
        test_runner,
        interface_api,
        use_unpacked_api,
    )
Example #5
0
def test_dense(M, K, N):
    """Test a subgraph with a single dense operator."""
    ishape = (M, K)
    wshape = (N, K)

    input0 = relay.var("input", relay.TensorType(ishape, "int8"))
    dense_f = relay.op.nn.batch_flatten(input0)
    weight0 = relay.const(
        np.random.randint(low=-10, high=10, size=wshape, dtype="int8"))
    out = relay.op.nn.dense(dense_f, weight0, out_dtype="int32")

    mod = tvm.IRModule.from_expr(relay.Function([input0], out))
    inputs = {
        "input": np.random.randint(low=-128,
                                   high=127,
                                   size=ishape,
                                   dtype="int8")
    }
    output_list = generate_ref_data(mod, inputs)

    compile_and_run(
        AOTTestModel(module=mod, inputs=inputs, outputs=output_list),
        runner=AOT_CORSTONE300_RUNNER,
        interface_api="c",
        use_unpacked_api=True,
        target_opts={
            "-keys": "arm_cpu",
            "-mcpu": "cortex-m7",
        },
    )
Example #6
0
def test_cnn_small():
    # download the model
    base_url = "https://github.com/ARM-software/ML-zoo/raw/master/models/keyword_spotting/cnn_small/tflite_int8"
    file_to_download = "cnn_s_quantized.tflite"
    model_file = download_testdata("{}/{}".format(base_url, file_to_download), file_to_download)

    with open(model_file, "rb") as f:
        tflite_model_buf = f.read()

    input_shape = (1, 490)
    in_min, in_max = get_range_for_dtype_str("int8")
    input_data = np.random.randint(in_min, high=in_max, size=input_shape).astype(np.float32)

    orig_mod, params = convert_to_relay(tflite_model_buf, input_data, "input")
    cmsisnn_mod = cmsisnn.partition_for_cmsisnn(orig_mod, params)

    # validate CMSIS-NN output against CPU output
    interface_api = "c"
    use_unpacked_api = True
    test_runner = AOT_CORSTONE300_RUNNER
    inputs = {"input": input_data}
    params = {}
    output_list = generate_ref_data(orig_mod["main"], inputs, params)
    compile_and_run(
        AOTTestModel(module=cmsisnn_mod, inputs=inputs, outputs=output_list, params=params),
        test_runner,
        interface_api,
        use_unpacked_api,
    )
Example #7
0
def test_conv2d(data_shape_nhwc, kernel_size, num_filter, strides, padding,
                dilation, dtype):
    """Test a subgraph with a single conv2d operator."""
    ishape = data_shape_nhwc
    wshape = (*kernel_size, data_shape_nhwc[-1], num_filter)

    weight_data = np.random.randint(low=-10, high=10, size=wshape, dtype=dtype)

    input0 = relay.var("input", relay.TensorType(ishape, dtype))
    weight0 = relay.const(weight_data)
    out0 = relay.op.nn.conv2d(
        input0,
        weight0,
        kernel_size=kernel_size,
        strides=strides,
        padding=padding,
        dilation=(dilation, dilation),
        data_layout="NHWC",
        kernel_layout="HWIO",
        out_dtype="int32",
        out_layout="NHWC",
    )
    ref_mod = tvm.IRModule.from_expr(relay.Function([input0], out0))

    input1 = relay.var("input", relay.TensorType(ishape, dtype))
    weight1 = relay.const(np.moveaxis(weight_data, 2, -1))
    out1 = relay.op.nn.conv2d(
        input1,
        weight1,
        kernel_size=kernel_size,
        strides=strides,
        padding=padding,
        dilation=(dilation, dilation),
        data_layout="NHWC",
        kernel_layout="HWOI",
        out_dtype="int32",
        out_layout="NHWC",
    )
    mod = tvm.IRModule.from_expr(relay.Function([input1], out1))

    inputs = {
        "input": np.random.randint(low=-128,
                                   high=127,
                                   size=ishape,
                                   dtype=dtype)
    }
    output_list = generate_ref_data(ref_mod, inputs)

    compile_and_run(
        AOTTestModel(module=mod, inputs=inputs, outputs=output_list),
        runner=AOT_CORSTONE300_RUNNER,
        interface_api="c",
        use_unpacked_api=True,
        target_opts={
            "-keys": "arm_cpu",
            "-mcpu": "cortex-m7",
        },
    )
Example #8
0
def test_op_int8(op, input_0_scale, input_0_zero_point, input_1_scale, input_1_zero_point):
    interface_api = "c"
    use_unpacked_api = True
    test_runner = AOT_CORSTONE300_RUNNER

    dtype = "int8"
    shape = [1, 16, 16, 3]
    model = make_model(
        op,
        shape,
        dtype,
        dtype,
        input_0_scale,
        input_0_zero_point,
        input_1_scale,
        input_1_zero_point,
    )
    orig_mod = make_module(model)

    cmsisnn_mod = cmsisnn.partition_for_cmsisnn(orig_mod)

    # validate pattern matching
    attrs = [
        cmsisnn_mod[var.name_hint].attrs
        for var in cmsisnn_mod.get_global_vars()
        if cmsisnn_mod[var.name_hint].attrs
    ]
    assert any(attrs), "At least one function with external attributes was expected."

    compilers = [
        key == "Compiler" and value == "cmsis-nn" for attr in attrs for key, value in attr.items()
    ]
    assert any(compilers), "Module does not contain function for cmsisnn target."

    assert count_num_calls(orig_mod) == count_num_calls(
        cmsisnn_mod
    ), "Number of calls changed during partitioning"

    # validate the output
    in_min, in_max = get_range_for_dtype_str(dtype)
    inputs = {
        "input_0": np.random.randint(in_min, high=in_max, size=shape, dtype=dtype),
        "input_1": np.random.randint(in_min, high=in_max, size=shape, dtype=dtype),
    }
    output_list = generate_ref_data(orig_mod["main"], inputs)
    compile_and_run(
        AOTTestModel(
            module=cmsisnn_mod,
            inputs=inputs,
            outputs=output_list,
            output_tolerance=1,
        ),
        test_runner,
        interface_api,
        use_unpacked_api,
    )
Example #9
0
def test_op_int8(
    in_shape,
    pool_size,
    strides,
    padding,
    relu_type,
    pool_type,
    zero_point,
    scale,
):
    interface_api = "c"
    use_unpacked_api = True
    test_runner = AOT_USMP_CORSTONE300_RUNNER

    dtype = "int8"

    model = make_model(
        pool_type,
        in_shape,
        pool_size,
        strides,
        padding,
        dtype,
        scale,
        zero_point,
        relu_type,
    )
    orig_mod = make_module(model)

    cmsisnn_mod = cmsisnn.partition_for_cmsisnn(orig_mod)

    # validate pattern matching
    assert_partitioned_function(orig_mod, cmsisnn_mod)

    # validate the output
    in_min, in_max = get_range_for_dtype_str(dtype)
    np.random.seed(0)
    inputs = {
        "input":
        np.random.randint(in_min, high=in_max, size=in_shape, dtype="int8"),
    }
    output_list = generate_ref_data(orig_mod["main"], inputs)
    compile_and_run(
        AOTTestModel(
            module=cmsisnn_mod,
            inputs=inputs,
            outputs=output_list,
            params=None,
            output_tolerance=1,
        ),
        test_runner,
        interface_api,
        use_unpacked_api,
    )
Example #10
0
def test_constant_input_int8(op, input_0, input_1):
    interface_api = "c"
    use_unpacked_api = True
    test_runner = AOT_USMP_CORSTONE300_RUNNER

    dtype = "int8"
    shape = [1, 16, 16, 3]
    input_0_scale = 0.256
    input_0_zero_point = 33
    input_1_scale = 0.128
    input_1_zero_point = -24
    model = make_model(
        op,
        input_0,
        input_1,
        input_0_scale,
        input_0_zero_point,
        input_1_scale,
        input_1_zero_point,
    )
    orig_mod = make_module(model)

    cmsisnn_mod = cmsisnn.partition_for_cmsisnn(orig_mod)

    # validate pattern matching
    assert_partitioned_function(orig_mod, cmsisnn_mod)

    # validate the output
    in_min, in_max = get_range_for_dtype_str(dtype)
    inputs = {}
    if isinstance(input_0, tvm.relay.expr.Var):
        inputs.update({
            "input_0":
            np.random.randint(in_min, high=in_max, size=shape, dtype=dtype)
        })
    if isinstance(input_1, tvm.relay.expr.Var):
        inputs.update({
            "input_1":
            np.random.randint(in_min, high=in_max, size=shape, dtype=dtype)
        })
    output_list = generate_ref_data(orig_mod["main"], inputs)
    compile_and_run(
        AOTTestModel(
            module=cmsisnn_mod,
            inputs=inputs,
            outputs=output_list,
            output_tolerance=1,
        ),
        test_runner,
        interface_api,
        use_unpacked_api,
    )
Example #11
0
def test_op_int8(op, relu_type, input_0_scale, input_0_zero_point,
                 input_1_scale, input_1_zero_point):
    interface_api = "c"
    use_unpacked_api = True
    test_runner = AOT_USMP_CORSTONE300_RUNNER

    dtype = "int8"
    shape = [1, 16, 16, 3]
    model = make_model(
        op,
        generate_variable("input_0"),
        generate_variable("input_1"),
        input_0_scale,
        input_0_zero_point,
        input_1_scale,
        input_1_zero_point,
        relu_type,
    )
    orig_mod = make_module(model)

    cmsisnn_mod = cmsisnn.partition_for_cmsisnn(orig_mod)

    # validate pattern matching
    assert_partitioned_function(orig_mod, cmsisnn_mod)

    # validate the output
    in_min, in_max = get_range_for_dtype_str(dtype)
    inputs = {
        "input_0": np.random.randint(in_min,
                                     high=in_max,
                                     size=shape,
                                     dtype=dtype),
        "input_1": np.random.randint(in_min,
                                     high=in_max,
                                     size=shape,
                                     dtype=dtype),
    }
    output_list = generate_ref_data(orig_mod["main"], inputs)
    compile_and_run(
        AOTTestModel(
            module=cmsisnn_mod,
            inputs=inputs,
            outputs=output_list,
            output_tolerance=1,
        ),
        test_runner,
        interface_api,
        use_unpacked_api,
    )
def check_aot_executor_result(
    mod, map_inputs, out_shape, result, tol=1e-5, target="llvm", device=tvm.cpu()
):
    # Late import to avoid breaking test with USE_MICRO=OFF.
    from aot.aot_test_utils import AOTTestModel, AOT_DEFAULT_RUNNER, compile_and_run

    interface_api = "packed"
    use_unpacked_api = False
    test_runner = AOT_DEFAULT_RUNNER
    compile_and_run(
        AOTTestModel(module=mod, inputs=map_inputs, outputs=[result]),
        test_runner,
        interface_api,
        use_unpacked_api,
    )
def test_empty_function():
    ORIGINAL_MODEL = """
#[version = "0.0.5"]
def @main(%data : Tensor[(16, 29), int8]) -> Tensor[(16, 29), int8] {
    add(%data, %data)
}
"""
    CMSISNN_MODEL = """
#[version = "0.0.5"]
def @tvmgen_default_cmsis_nn_main_1(%i1: Tensor[(16, 29), int8], Inline=1, Compiler="cmsis-nn", global_symbol="tvmgen_default_cmsis_nn_main_1", Primitive=1) -> Tensor[(16, 29), int8] {
  add(%i1, %i1)
}
def @main(%data : Tensor[(16, 29), int8]) -> Tensor[(16, 29), int8] {
  %1 = @tvmgen_default_cmsis_nn_main_1(%data) /* ty=Tensor[(16, 29), int8] */;
  %1
}
"""
    orig_mod = tvm.parser.fromtext(ORIGINAL_MODEL)
    cmsisnn_mod = tvm.parser.fromtext(CMSISNN_MODEL)
    params = {}

    # validate the output
    interface_api = "c"
    use_unpacked_api = True
    test_runner = AOT_USMP_CORSTONE300_RUNNER
    dtype = "int8"
    in_min, in_max = get_range_for_dtype_str(dtype)
    rng = np.random.default_rng(12345)
    inputs = {"data": rng.integers(in_min, high=in_max, size=(16, 29), dtype=dtype)}
    outputs = generate_ref_data(orig_mod["main"], inputs, params)
    compile_and_run(
        AOTTestModel(
            module=cmsisnn_mod,
            inputs=inputs,
            outputs=outputs,
            params=params,
            output_tolerance=0,
        ),
        test_runner,
        interface_api,
        use_unpacked_api,
    )
Example #14
0
def test_cnn_small():
    # download the model
    base_url = "https://github.com/ARM-software/ML-zoo/raw/48a22ee22325d15d2371a6df24eb7d67e21dcc97/models/keyword_spotting/cnn_small/tflite_int8"
    file_to_download = "cnn_s_quantized.tflite"
    file_saved = "cnn_s_quantized_15Dec2021.tflite"
    model_file = download_testdata("{}/{}".format(base_url, file_to_download), file_saved)

    with open(model_file, "rb") as f:
        tflite_model_buf = f.read()

    input_shape = (1, 490)
    dtype = "int8"
    in_min, in_max = get_range_for_dtype_str(dtype)
    rng = np.random.default_rng(12345)
    input_data = rng.integers(in_min, high=in_max, size=input_shape, dtype=dtype)

    orig_mod, params = convert_to_relay(tflite_model_buf, input_data, "input")
    cmsisnn_mod = cmsisnn.partition_for_cmsisnn(orig_mod, params)

    # validate CMSIS-NN output against CPU output
    interface_api = "c"
    use_unpacked_api = True
    test_runner = AOT_CORSTONE300_RUNNER
    inputs = {"input": input_data}
    params = {}
    output_list = generate_ref_data(orig_mod["main"], inputs, params)
    compile_and_run(
        AOTTestModel(
            module=cmsisnn_mod,
            inputs=inputs,
            outputs=output_list,
            params=params,
            output_tolerance=1,
        ),
        test_runner,
        interface_api,
        use_unpacked_api,
    )
Example #15
0
def test_cnn_small():
    # download the model
    base_url = "https://github.com/ARM-software/ML-zoo/raw/ee35139af86bdace5e502b09fe8b9da9cb1f06bb/models/keyword_spotting/cnn_small/tflite_int8"
    file_to_download = "cnn_s_quantized.tflite"
    model_file = download_testdata("{}/{}".format(base_url, file_to_download),
                                   file_to_download)

    with open(model_file, "rb") as f:
        tflite_model_buf = f.read()

    input_shape = (1, 490)
    rng = np.random.default_rng(12345)
    input_data = rng.random(input_shape, dtype=np.float32)

    orig_mod, params = convert_to_relay(tflite_model_buf, input_data, "input")
    cmsisnn_mod = cmsisnn.partition_for_cmsisnn(orig_mod, params)

    # validate CMSIS-NN output against CPU output
    interface_api = "c"
    use_unpacked_api = True
    test_runner = AOT_CORSTONE300_RUNNER
    inputs = {"input": input_data}
    params = {}
    output_list = generate_ref_data(orig_mod["main"], inputs, params)
    compile_and_run(
        AOTTestModel(
            module=cmsisnn_mod,
            inputs=inputs,
            outputs=output_list,
            params=params,
            output_tolerance=1,
        ),
        test_runner,
        interface_api,
        use_unpacked_api,
    )
Example #16
0
def test_conv2d_int8_tflite(ifm_shape, kernel_shape, strides, dilation,
                            padding, activation):
    interface_api = "c"
    use_unpacked_api = True
    test_runner = AOT_USMP_CORSTONE300_RUNNER
    dtype = "int8"

    from tvm.relay.testing.tflite import TFLiteModel

    tfl_model = TFLiteModel(dtype)
    conv2d_function = tfl_model.create_conv2d_single(kernel_shape, strides,
                                                     padding, dilation,
                                                     activation)
    tfl_model.create_tflite_model(conv2d_function, [ifm_shape])
    relay_mod, relay_params = tfl_model.convert_to_relay()

    cmsisnn_mod = cmsisnn.partition_for_cmsisnn(relay_mod, relay_params)

    # validate pattern matching
    assert_partitioned_function(relay_mod, cmsisnn_mod)

    # validate CMSIS-NN output against TFLite output
    input_map, output_map, output_tolerance = tfl_model.generate_reference_data(
    )
    compile_and_run(
        AOTTestModel(
            module=cmsisnn_mod,
            inputs=input_map,
            outputs=output_map,
            params=relay_params,
            output_tolerance=output_tolerance,
        ),
        test_runner,
        interface_api,
        use_unpacked_api,
    )
Example #17
0
def test_depthwise_int8(
    ifm_shape,
    kernel_size,
    padding,
    strides,
    dilation,
    enable_bias,
    relu_type,
    input_zero_point,
    input_scale,
    kernel_scale,
    out_channels,
    depth_multiplier,
):
    interface_api = "c"
    use_unpacked_api = True
    test_runner = AOT_CORSTONE300_RUNNER

    dtype = "int8"
    groups = 1
    weight_format = "HWIO"
    kernel_h = kernel_size[0]
    kernel_w = kernel_size[1]
    kernel_shape = (kernel_h, kernel_w, ifm_shape[3] // groups, out_channels)
    kernel_zero_point = 0
    in_min, in_max = get_range_for_dtype_str(dtype)

    groups = ifm_shape[3]
    weight_format = "HWOI"
    kernel_shape = (kernel_h, kernel_w, ifm_shape[3], depth_multiplier)
    out_channels = ifm_shape[3] * depth_multiplier
    ks_len = len(kernel_scale)
    kernel_scale = [kernel_scale[i % ks_len] for i in range(out_channels)]

    output_scale, output_zero_point = get_conv2d_qnn_params(
        kernel_shape,
        input_scale,
        input_zero_point,
        kernel_scale,
        kernel_zero_point,
        dtype,
        dtype,
        dtype,
        True,
    )

    model, params = make_model(
        ifm_shape,
        kernel_shape,
        input_zero_point,
        input_scale,
        kernel_zero_point,
        kernel_scale,
        output_zero_point,
        output_scale,
        padding,
        strides,
        dilation,
        groups,
        dtype,
        dtype,
        out_channels,
        weight_format,
        enable_bias,
        relu_type,
    )
    orig_mod = make_module(model)
    cmsisnn_mod = cmsisnn.partition_for_cmsisnn(orig_mod, params)

    # validate pattern matching
    attrs = [
        cmsisnn_mod[var.name_hint].attrs
        for var in cmsisnn_mod.get_global_vars()
        if cmsisnn_mod[var.name_hint].attrs
    ]
    assert any(attrs), "At least one function with external attributes was expected."

    compilers = [
        key == "Compiler" and value == "cmsis-nn" for attr in attrs for key, value in attr.items()
    ]
    assert any(compilers), "Module does not contain function for cmsis-nn target."

    assert count_num_calls(orig_mod) == count_num_calls(
        cmsisnn_mod
    ), "Number of calls changed during partitioning"

    # validate the output
    rng = np.random.default_rng(12345)
    inputs = {"input": rng.integers(in_min, high=in_max, size=ifm_shape, dtype=dtype)}
    output_list = generate_ref_data(orig_mod["main"], inputs, params)
    compile_and_run(
        AOTTestModel(
            module=cmsisnn_mod,
            inputs=inputs,
            outputs=output_list,
            params=params,
            output_tolerance=1,
        ),
        test_runner,
        interface_api,
        use_unpacked_api,
    )
Example #18
0
def test_op_int8(
    in_shape,
    enable_bias,
    input_zero_point,
    input_scale,
    kernel_scale,
    out_channels,
    relu_type,
):
    interface_api = "c"
    use_unpacked_api = True
    test_runner = AOT_USMP_CORSTONE300_RUNNER

    dtype = "int8"
    kernel_zero_point = 0
    kernel_shape = [out_channels, in_shape[1]]
    conv2d_kernel_shape = (1, 1, kernel_shape[0], kernel_shape[1])
    in_min, in_max = get_range_for_dtype_str(dtype)

    output_scale, output_zero_point = get_conv2d_qnn_params(
        conv2d_kernel_shape,
        input_scale,
        input_zero_point,
        kernel_scale,
        kernel_zero_point,
        dtype,
    )

    model, params = make_model(
        in_shape,
        kernel_shape,
        input_zero_point,
        kernel_zero_point,
        input_scale,
        kernel_scale,
        output_zero_point,
        output_scale,
        dtype,
        dtype,
        out_channels,
        enable_bias,
    )
    orig_mod = make_module(model)
    cmsisnn_mod = cmsisnn.partition_for_cmsisnn(orig_mod, params)

    # validate pattern matching
    assert_partitioned_function(orig_mod, cmsisnn_mod)

    # validate the output
    rng = np.random.default_rng(12345)
    inputs = {
        "input": rng.integers(in_min, high=in_max, size=in_shape, dtype=dtype)
    }
    output_list = generate_ref_data(orig_mod["main"], inputs, params)
    compile_and_run(
        AOTTestModel(
            module=cmsisnn_mod,
            inputs=inputs,
            outputs=output_list,
            params=params,
            output_tolerance=1,
        ),
        test_runner,
        interface_api,
        use_unpacked_api,
    )
Example #19
0
def test_op_int8(
    in_shape,
    enable_bias,
    input_zero_point,
    input_scale,
    kernel_scale,
    out_channels,
    relu_type,
):
    interface_api = "c"
    use_unpacked_api = True
    test_runner = AOT_CORSTONE300_RUNNER

    dtype = "int8"
    kernel_zero_point = 0
    kernel_shape = [out_channels, in_shape[1]]
    conv2d_kernel_shape = (1, 1, kernel_shape[0], kernel_shape[1])
    in_min, in_max = get_range_for_dtype_str(dtype)

    output_scale, output_zero_point = get_conv2d_qnn_params(
        conv2d_kernel_shape,
        input_scale,
        input_zero_point,
        kernel_scale,
        kernel_zero_point,
        dtype,
    )

    model, params = make_model(
        in_shape,
        kernel_shape,
        input_zero_point,
        kernel_zero_point,
        input_scale,
        kernel_scale,
        output_zero_point,
        output_scale,
        dtype,
        dtype,
        out_channels,
        enable_bias,
    )
    orig_mod = make_module(model)
    cmsisnn_mod = cmsisnn.partition_for_cmsisnn(orig_mod, params)

    # validate pattern matching
    attrs = [
        cmsisnn_mod[var.name_hint].attrs
        for var in cmsisnn_mod.get_global_vars()
        if cmsisnn_mod[var.name_hint].attrs
    ]
    assert any(
        attrs), "At least one function with external attributes was expected."

    compilers = [
        key == "Compiler" and value == "cmsis-nn" for attr in attrs
        for key, value in attr.items()
    ]
    assert any(
        compilers), "Module does not contain function for cmsisnn target."

    assert count_num_calls(orig_mod) == count_num_calls(
        cmsisnn_mod), "Number of calls changed during partitioning"

    # validate the output
    rng = np.random.default_rng(12345)
    inputs = {
        "input": rng.integers(in_min, high=in_max, size=in_shape, dtype=dtype)
    }
    output_list = generate_ref_data(orig_mod["main"], inputs, params)
    compile_and_run(
        AOTTestModel(
            module=cmsisnn_mod,
            inputs=inputs,
            outputs=output_list,
            params=params,
            output_tolerance=1,
        ),
        test_runner,
        interface_api,
        use_unpacked_api,
    )
Example #20
0
def test_depthwise_int8(
    ifm_shape,
    kernel_size,
    padding,
    strides,
    dilation,
    enable_bias,
    relu_type,
    input_zero_point,
    input_scale,
    kernel_scale,
    out_channels,
    depth_multiplier,
):
    interface_api = "c"
    use_unpacked_api = True
    test_runner = AOT_CORSTONE300_RUNNER

    dtype = "int8"
    groups = 1
    weight_format = "HWIO"
    kernel_h = kernel_size[0]
    kernel_w = kernel_size[1]
    kernel_shape = (kernel_h, kernel_w, ifm_shape[3] // groups, out_channels)
    kernel_zero_point = 0
    in_min, in_max = get_range_for_dtype_str(dtype)

    groups = ifm_shape[3]
    weight_format = "HWOI"
    kernel_shape = (kernel_h, kernel_w, ifm_shape[3], depth_multiplier)
    out_channels = ifm_shape[3] * depth_multiplier
    ks_len = len(kernel_scale)
    kernel_scale = [kernel_scale[i % ks_len] for i in range(out_channels)]

    output_scale, output_zero_point = get_conv2d_qnn_params(
        kernel_shape,
        input_scale,
        input_zero_point,
        kernel_scale,
        kernel_zero_point,
        dtype,
        dtype,
        dtype,
        True,
    )

    model, params = make_model(
        ifm_shape,
        kernel_shape,
        input_zero_point,
        input_scale,
        kernel_zero_point,
        kernel_scale,
        output_zero_point,
        output_scale,
        padding,
        strides,
        dilation,
        groups,
        dtype,
        dtype,
        out_channels,
        weight_format,
        enable_bias,
        relu_type,
    )
    orig_mod = make_module(model)
    cmsisnn_mod = cmsisnn.partition_for_cmsisnn(orig_mod, params)

    # validate pattern matching
    assert_partitioned_function(orig_mod, cmsisnn_mod)

    # validate the output
    rng = np.random.default_rng(12345)
    inputs = {"input": rng.integers(in_min, high=in_max, size=ifm_shape, dtype=dtype)}
    output_list = generate_ref_data(orig_mod["main"], inputs, params)
    compile_and_run(
        AOTTestModel(
            module=cmsisnn_mod,
            inputs=inputs,
            outputs=output_list,
            params=params,
            output_tolerance=1,
        ),
        test_runner,
        interface_api,
        use_unpacked_api,
    )