コード例 #1
0
    def test_workload_padding(
        self,
        target,
        input_shape,
        weight_shape,
        stride,
        padding,
        dilation,
        dtype,
        ref_data,
    ):
        a_np, w_np, b_np, c_np = ref_data
        _, _, out_height, out_width = c_np.shape

        A = te.placeholder(input_shape, name="A", dtype=dtype)
        W = te.placeholder(weight_shape, name="W", dtype=dtype)

        with tvm.target.Target(target):
            wkl = _get_workload(A, W, (stride, stride), padding, dilation, dtype)

            # check if tile_ow candidates are the factors of the right output weight.
            cfg = autotvm.get_config()
            _fallback_schedule(cfg, wkl)
            ow_tile = np.prod(cfg["tile_ow"].size)

        tvm.testing.assert_allclose(ow_tile, out_width)
コード例 #2
0
ファイル: test_topi_conv2d_nchw.py プロジェクト: NathanTP/tvm
    def verify_workload_padding():
        _, _, out_height, out_width = get_const_tuple(c_np.shape)
        wkl = _get_workload(A, W, (stride, stride), padding, dilation, dtype)

        # check if tile_ow candidates are the factors of the right output weight.
        cfg = autotvm.get_config()
        _fallback_schedule(cfg, wkl)
        ow_tile = np.prod(cfg["tile_ow"].size)

        tvm.testing.assert_allclose(ow_tile, out_width)
コード例 #3
0
    def verify_workload_padding():
        _, _, out_height, out_width = get_const_tuple(c_np.shape)
        wkl = _get_workload(A, W, (stride, stride), padding, dilation, dtype)

        # for testing functionality,
        # we choose arbitrary int32_lanes and num_int8_elements can divide the channel,
        # regardless of the performance.
        int32_lanes, num_int8_elements = num_filter, in_channel

        # check if tile_ow candidates are the factors of the right output weight.
        cfg = autotvm.get_config()
        fallback_schedule_cpu_common_int8(cfg, wkl, int32_lanes, num_int8_elements)
        ow_tile = np.prod(cfg["tile_ow"].size)

        tvm.testing.assert_allclose(ow_tile, out_width)