Exemple #1
0
def test_any_concat():
    x = relay.var('x', shape=(relay.Any(), 2), dtype="float32")
    y = relay.var('y', shape=(1, 2), dtype="float32")
    xx = x - relay.expr.const(3.0)
    yy = y * relay.expr.const(5.0)
    z = relay.op.concatenate([xx, yy], axis=0)
    mod = tvm.IRModule()
    mod["main"] = relay.Function([x, y], z)
    x_np = np.random.uniform(size=(3, 2)).astype('float32')
    y_np = np.random.uniform(size=(1, 2)).astype('float32')
    ref = np.concatenate([x_np - 3.0, y_np * 5.0], axis=0)
    check_result([x_np, y_np], mod, ref)
Exemple #2
0
def test_any_crop_and_resize():
    verify_any_crop_and_resize(
        data_shape=(1, 234, 234, 256),
        boxes_shape=(relay.Any(), 4),
        box_indices_shape=(relay.Any(),),
        crop_size=(14, 14),
        layout="NHWC",
        static_boxes=(128, 4),
        static_box_indices_shape=(128,),
        ref_out_shape=(128, 14, 14, 256),
    )
    verify_any_crop_and_resize(
        data_shape=(1, 256, 234, 234),
        boxes_shape=(relay.Any(), 4),
        box_indices_shape=(relay.Any(),),
        crop_size=(14, 14),
        layout="NCHW",
        static_boxes=(128, 4),
        static_box_indices_shape=(128,),
        ref_out_shape=(128, 256, 14, 14),
    )
Exemple #3
0
def test_robust_imputer():
    st_helper = SklearnTestHelper()
    data = np.array(
        [[4, 5, np.nan, 7], [0, np.nan, 2, 3], [8, 9, 10, 11], [np.inf, 13, 14, 15]],
        dtype=np.float32,
    )

    ri = RobustImputer(dtype=None, strategy="constant", fill_values=np.nan, mask_function=None)
    ri.fit(data)

    dshape = (relay.Any(), len(data[0]))
    _test_model_impl(st_helper, ri, dshape, data)
Exemple #4
0
def test_simple_imputer():
    st_helper = SklearnTestHelper()
    data = np.array(
        [[4, 5, np.nan, 7], [0, np.nan, 2, 3], [8, 9, 10, 11], [np.nan, 13, 14, 15]],
        dtype=np.float32,
    )

    imp_mean = SimpleImputer(missing_values=np.nan, strategy="median")
    imp_mean.fit(data)

    dshape = (relay.Any(), len(data[0]))
    _test_model_impl(st_helper, imp_mean, dshape, data)
Exemple #5
0
def test_robust_missing_indicator():
    st_helper = SklearnTestHelper()
    data = np.array(
        [[4, 5, np.nan, 7], [0, np.nan, 2, 3], [8, 9, 10, 11], [np.inf, 13, 14, 15]],
        dtype=np.float32,
    )

    rmi = RobustMissingIndicator()
    rmi.fit(data)

    dshape = (relay.Any(), len(data[0]))
    _test_model_impl(st_helper, rmi, dshape, data)
def test_unique(target, dev):
    dtype = "int32"
    x = relay.var("x", shape=(relay.Any(), ), dtype=dtype)
    mod = tvm.IRModule()
    [unique, _, _, num_unique] = relay.unique(x, is_sorted=True)
    mod["main"] = relay.Function([x],
                                 relay.op.strided_slice(unique,
                                                        begin=[0],
                                                        end=num_unique))
    x_np = np.random.randint(0, high=10, size=(10, )).astype(dtype)
    res_np = np.unique(x_np)
    check_mod(target, dev, mod, x_np, res_np)
Exemple #7
0
def test_dynamic_channels():
    # Compile simulated quantize once but support either per-channel or scalar params.
    data = np.random.uniform(low=-64, high=64, size=[2, 5]).astype("float32")
    # Test scalar qnn params.
    scale_np = np.asarray([0.5]).astype("float32")
    zp_np = np.asarray([127]).astype("int32")
    dtype_np = np.int32(SQNN_DTYPE_TO_CODE["uint8"])
    quant_args = {"out_zero_point": zp_np[0], "out_scale": scale_np[0]}
    q_out = quantize_test_driver(
        in_dtype="float32",
        quant_args=quant_args,
        axis=0,
        out_dtype="uint8",
        in_data=data,
    )
    # Create variables with undefined shape and run with scalar inputs.
    input_data = relay.var("input_data", shape=data.shape, dtype="float32")
    scale = relay.var("scale", shape=[relay.Any()], dtype="float32")
    zp = relay.var("zp", shape=[relay.Any()], dtype="int32")
    dtype = relay.var("dtype", shape=[])
    vm = build_simulated_quantize(input_data, scale, zp, dtype, axis=0)
    sim_q_out = vm.invoke("main", input_data=data, scale=scale_np, zp=zp_np, dtype=dtype_np)
    allclose_with_rounding(sim_q_out.asnumpy(), q_out)

    # Now get the perchannel quantize output and compare without recompiling.
    scale_np = np.array([0.5, 0.25]).astype("float32")
    zp_np = np.array([127, 123]).astype("int32")

    # Get the reference quantize output.
    quant_args = {"out_zero_point": zp_np, "out_scale": scale_np}
    q_out = quantize_test_driver(
        in_dtype="float32",
        quant_args=quant_args,
        axis=0,
        out_dtype="uint8",
        in_data=data,
    )
    # Run the simulated quantize without recompiling and confirm results match.
    sim_q_out = vm.invoke("main", input_data=data, scale=scale_np, zp=zp_np, dtype=dtype_np)
    allclose_with_rounding(sim_q_out.asnumpy(), q_out)
Exemple #8
0
def test_any_conv2d_NCHWc():
    verify_any_conv2d_NCHWc(
        (relay.Any(), 8, relay.Any(), relay.Any(), 8),
        (8, 8, 3, 3, 8, 8),
        (1, 1),
        (1, 1),
        (1, 1),
        "NCHW8c",
        "OIHW8i8o",
        "NCHW8c",
        (1, 8, 224, 224, 8),
        (1, 8, 224, 224, 8),
    )
    verify_any_conv2d_NCHWc(
        (relay.Any(), 8, relay.Any(), relay.Any(), 8),
        (8, 8, 3, 3, 8, 8),
        (1, 1),
        (1, 1),
        (2, 2),
        "NCHW8c",
        "OIHW8i8o",
        "NCHW8c",
        (1, 8, 224, 224, 8),
        (1, 8, 222, 222, 8),
    )
Exemple #9
0
def test_dynamic_bcast():
    import pytest
    import numpy as np

    import tvm
    from tvm.runtime import vm as _vm
    from tvm.relay import vm as rly_vm
    from tvm import relay

    from tvm.relay.scope_builder import ScopeBuilder
    from tvm.relay import transform
    from tvm.relay.prelude import Prelude
    from tvm.relay import testing

    def create_exec(f, target="llvm", params=None):
        if isinstance(f, relay.Expr):
            mod = tvm.IRModule()
            mod["main"] = f
            executable = rly_vm.compile(mod, target=target, params=params)
            return executable
        else:
            assert isinstance(f, tvm.IRModule), "expected mod as tvm.IRModule"
            executable = rly_vm.compile(f, target=target, params=params)
            return executable

    def get_serialized_output(mod,
                              *data,
                              params=None,
                              target="llvm",
                              ctx=tvm.cpu()):
        exe = create_exec(mod, target, params=params)
        code, lib = exe.save()
        des_exec = _vm.Executable.load_exec(code, lib)
        des_vm = _vm.VirtualMachine(des_exec, ctx)
        result = des_vm.run(*data)
        print(result)
        return result

    dtype = "float32"
    x = relay.var("x", shape=(1, 2), dtype=dtype)
    y = relay.var("y", shape=(relay.Any(), 2), dtype=dtype)
    mod = tvm.IRModule()
    mod["main"] = relay.Function([x, y], relay.add(x, y))
    x_data = np.random.uniform(size=(1, 2)).astype(dtype)
    y_data = np.random.uniform(size=(4, 2)).astype(dtype)
    res_np = np.add(x_data, y_data)
    for target, ctx in testing.enabled_targets():
        res = get_serialized_output(mod,
                                    *(x_data, y_data),
                                    target=target,
                                    ctx=ctx)
        tvm.testing.assert_allclose(res.asnumpy(), res_np)
def test_dyn_upsampling3d_infer_type_const():
    n, c, d, h, w = (
        te.size_var("n"),
        te.size_var("c"),
        te.size_var("d"),
        te.size_var("h"),
        te.size_var("w"),
    )

    data = relay.var("data", relay.TensorType((n, c, d, h, w), "int8"))
    scale_d = relay.Var("scale_h", relay.TensorType((), "float32"))
    scale_w = relay.Var("scale_w", relay.TensorType((), "float32"))

    z = relay.nn.upsampling3d(data,
                              scale_d,
                              2.0,
                              scale_w,
                              layout="NCDHW",
                              method="trilinear")
    zz = run_infer_type(z)
    assert zz.checked_type == relay.TensorType(
        (n, c, relay.Any(), relay.Any(), relay.Any()), "int8")
Exemple #11
0
def test_pca():
    st_helper = SklearnTestHelper()
    pca = PCA(n_components=2)
    rpca = RobustPCA()
    data = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]],
                    dtype=np.float32)
    pca.fit(data)
    rpca.robust_pca_ = pca
    dshape = (relay.Any(), len(data[0]))
    _test_model_impl(st_helper, rpca, dshape, data)

    tSVD = TruncatedSVD(n_components=5, n_iter=7, random_state=42)
    data = sparse_random(100,
                         100,
                         density=0.01,
                         format="csr",
                         dtype="float32",
                         random_state=42).toarray()
    tSVD.fit(data)
    rpca.robust_pca_ = tSVD
    dshape = (relay.Any(), len(data[0]))
    _test_model_impl(st_helper, rpca, dshape, data)
Exemple #12
0
def test_any_concat():
    x = relay.var('x', shape=(relay.Any(), 2), dtype="float32")
    y = relay.var('y', shape=(1, 2), dtype="float32")
    z = relay.op.concatenate([x, y], axis=0)
    mod = relay.module.Module()
    mod["main"] = relay.Function([x, y], z)
    x_np = np.random.uniform(size=(3, 2)).astype('float32')
    y_np = np.random.uniform(size=(1, 2)).astype('float32')
    ref = np.concatenate([x_np, y_np], axis=0)
    for kind in ["debug", "vm"]:
        ex = relay.create_executor(kind, mod=mod, ctx=tvm.cpu(), target="llvm")
        result = ex.evaluate()(x_np, y_np)
        tvm.testing.assert_allclose(result.asnumpy(), ref)
Exemple #13
0
def _test_quantile_transformer(shape, n_quantiles):
    from sklearn.preprocessing import QuantileTransformer

    st_helper = SklearnTestHelper()

    rng = np.random.RandomState(0)
    data = np.sort(rng.normal(loc=0.5, scale=0.25, size=shape), axis=0)

    qt = QuantileTransformer(n_quantiles=n_quantiles, random_state=0)
    qt.fit_transform(data)

    dshape = (relay.Any(), len(data[0]))
    _test_model_impl(st_helper, qt, dshape, data.astype("float32"))
Exemple #14
0
def test_any_split():
    verify_any_split((relay.Any(), 4), 2, 1, (9, 4), [(9, 2), (9, 2)])
    verify_any_split((relay.Any(), relay.Any()), 2, 1, (9, 4), [(9, 2),
                                                                (9, 2)])
    verify_any_split((relay.Any(), 12), (1, 4, 8), 1, (7, 12), [(7, 1), (7, 3),
                                                                (7, 4)])
    verify_any_split((relay.Any(), relay.Any()), (1, 4, 8), 1, (7, 12),
                     [(7, 1), (7, 3), (7, 4)])
Exemple #15
0
def test_mixed_input_type():
    mod = tvm.IRModule()
    dtype = "float32"
    static_data_shape = (9, 4)
    data_shape = (relay.Any(), 4)
    tensor_type = relay.TensorType(data_shape, dtype)
    tuple_type = relay.TupleType([tensor_type, tensor_type])
    data0 = relay.var("d0", type_annotation=relay.TupleType([tuple_type, tensor_type]))
    data1 = relay.var("d1", shape=(relay.Any(), 4), dtype=dtype)
    data_tuple = relay.expr.TupleWrapper(data0, 2)
    nested_data_tuple = relay.expr.TupleWrapper(data_tuple[0], 2)
    y = nested_data_tuple[1] * data_tuple[1] + data1
    mod["main"] = relay.Function([data0, data1], y)
    data_np0 = np.random.uniform(size=static_data_shape).astype(dtype)
    data_np1 = np.random.uniform(size=static_data_shape).astype(dtype)
    ref_out_shape = (9, 4)
    check_result(
        [[[data_np0, data_np0], data_np0], data_np1],
        mod,
        ref_out_shape,
        assert_shape=True,
        only_vm=True,
    )
    def verify_pad_default_fill(dshape, pad_width, dtype):
        x = relay.var("x", relay.TensorType(dshape, dtype))
        ndim = len(dshape)
        pad_width_var = relay.var("pad_width_var", relay.TensorType((ndim, 2), 'int64'))
        y = relay.nn.pad(x, pad_width_var)
        yy = run_infer_type(y)

        assert yy.checked_type == relay.ty.TensorType((relay.Any(),) * ndim, dtype)
        func = relay.Function([x, pad_width_var], y)
        data = np.random.uniform(size=dshape).astype(dtype)
        ref_res = np.pad(data, pad_width)
        pad_width = np.array(pad_width).astype('int64')

        verify_func(func, [data, pad_width], ref_res)
Exemple #17
0
def test_pushconstants():
    if not tvm.testing.device_enabled("vulkan"):
        return

    # Three 32 bit pushconstants: any_dim, stride, stride
    dtype = "float32"
    x = relay.var("x", shape=(relay.Any(), ), dtype=dtype)
    mod = tvm.IRModule()
    mod["main"] = relay.Function([x], relay.sqrt(x))
    x_np = np.random.uniform(size=(10, )).astype(dtype)
    res_np = np.sqrt(x_np)

    check_mod(mod, x_np, res_np)

    # One 64 bit and one 32 bit constants
    dtype = "int32"
    x = relay.var("x", shape=(relay.Any(), ), dtype=dtype)
    mod = tvm.IRModule()
    mod["main"] = relay.Function([x], relay.argsort(x))
    x_np = np.random.randint(0, high=10, size=(10, )).astype(dtype)
    res_np = np.argsort(x_np)

    check_mod(mod, x_np, res_np)
Exemple #18
0
def test_mixed_input_type():
    mod = tvm.IRModule()
    dtype = "float32"
    static_data_shape = (9, 4)
    data_shape = (relay.Any(), 4)
    tensor_type = relay.TensorType(data_shape, dtype)
    tuple_type = relay.TupleType([tensor_type, tensor_type])
    data0 = relay.var("d0",
                      type_annotation=relay.TupleType(
                          [tuple_type, tensor_type]))
    data1 = relay.var("d1", shape=(relay.Any(), 4), dtype=dtype)
    data_tuple = relay.expr.TupleWrapper(data0, 2)
    nested_data_tuple = relay.expr.TupleWrapper(data_tuple[0], 2)
    y = nested_data_tuple[1] * data_tuple[1] + data1
    mod["main"] = relay.Function([data0, data1], y)
    data_np0 = np.random.uniform(size=static_data_shape).astype(dtype)
    data_np1 = np.random.uniform(size=static_data_shape).astype(dtype)
    ref_out_shape = (9, 4)
    for kind in ["vm"]:
        ex = relay.create_executor(kind, mod=mod, ctx=tvm.cpu(), target="llvm")
        result = ex.evaluate()([[data_np0, data_np0], data_np0], data_np1)
        assert result.asnumpy().shape == ref_out_shape, \
            "Shape mismatch: expect %s but got %s." % (str(ref_out_shape), str(result.asnumpy().shape))
Exemple #19
0
def test_take_grad():
    data_dtype = relay.TensorType((3, 4, 5), "float64")
    data = relay.var("data", data_dtype)
    indices = relay.var("indices", relay.TensorType((relay.Any(),), "int32"))
    inputs = [_np_randn_from_type(data_dtype, scale=1e-5), np.array([1, 2], dtype="int32")]
    test_inputs = [inputs[0]]

    # take on axis
    fwd_func = relay.Function([data, indices], relay.take(data, indices, axis=1))
    check_grad(fwd_func, inputs=inputs, test_inputs=test_inputs)

    # take on flattened
    fwd_func = relay.Function([data, indices], relay.take(data, indices, axis=None))
    check_grad(fwd_func, inputs=inputs, test_inputs=test_inputs)
Exemple #20
0
def test_shape_func_nested_function():
    @tvm.register_func("relay.ext.test2")
    def relay_ext_test(func):
        return None

    data_shape = (relay.Any(), 16)
    weight_shape = (relay.Any(), 16)

    dense = relay.nn.dense(relay.var("data", shape=data_shape),
                           relay.var("weight", shape=weight_shape))
    mod = tvm.IRModule.from_expr(dense)

    patterns = [("test.dense", is_op("nn.dense")(wildcard(), wildcard()))]
    passes = tvm.transform.Sequential([
        relay.transform.MergeComposite(patterns),
        relay.transform.AnnotateTarget(["test2"]),
        relay.transform.PartitionGraph(),
    ])

    mod = passes(mod)

    compiler = VMCompiler()
    compiler.lower(mod, "llvm")
Exemple #21
0
    def verify_zeros_ones(shape, dtype):
        for op, ref in [(relay.zeros, np.zeros), (relay.ones, np.ones)]:
            rank = len(shape)
            dyn_shape = relay.Var("shape",
                                  relay.ty.TensorType((rank, ), "int64"))
            y = op(dyn_shape, dtype)
            yy = run_infer_type(y)
            assert yy.checked_type == relay.ty.TensorType(
                (relay.Any(), ) * rank, dtype)

            func = relay.Function([dyn_shape], y)
            ref_res = ref(shape, dtype)
            verify_func(executor_kind, func, [np.array(shape).astype("int64")],
                        ref_res.astype("int64"))
Exemple #22
0
def test_tuple_get_item():
    mod = tvm.IRModule()
    dtype = "float32"
    static_data_shape = (9, 4)
    data_shape = (relay.Any(), 4)
    indices_or_sections = 2
    axis = 1
    data = relay.var("data", shape=data_shape, dtype=dtype)
    y = relay.split(data, indices_or_sections, axis)
    y = relay.expr.TupleGetItem(y.astuple(), 0)
    mod["main"] = relay.Function([data], y)
    data_np = np.random.uniform(size=static_data_shape).astype(dtype)
    ref_out_shape = (9, 2)
    check_result([data_np], mod, ref_out_shape, assert_shape=True)
Exemple #23
0
def test_any_conv2d_transpose_nchw():
    verify_any_conv2d_transpose_nchw(
        (relay.Any(), 64, 224, 224),
        (64, 192, 3, 3),
        (1, 1),
        (1, 1),
        (1, 1),
        1,
        (2, 64, 224, 224),
        (2, 192, 224, 224),
        (0, 0),
    )
    verify_any_conv2d_transpose_nchw(
        (relay.Any(), 32, 224, 224),
        (32, 64, 3, 3),
        (2, 2),
        (1, 1),
        (1, 1),
        1,
        (1, 32, 224, 224),
        (1, 64, 448, 448),
        (1, 1),
    )
def test_fuse_dynamic_squeeze_slice_take():
    input_data = [
        np.random.random([1, 2, 4]).astype("float32"),
        np.array([0]).astype("int64"),
    ]

    x = relay.var("p0107", shape=(relay.Any(), relay.Any(), 4), dtype="float32")
    take_val = relay.var("p166", shape=(relay.Any(),), dtype="int64")

    squeeze = relay.op.squeeze(x, axis=[0])
    strided_slice = relay.op.strided_slice(
        squeeze, begin=[0, 0], end=[15130, 9223372036854775807], strides=[1, 1]
    )
    take = relay.op.take(strided_slice, take_val, axis=0)

    mod = tvm.IRModule.from_expr(take)
    result = relay.create_executor("vm", mod=mod, device=tvm.cpu(), target="llvm").evaluate()(
        *input_data
    )

    np_result = np.squeeze(input_data[0][:, input_data[1][0], :], axis=0)

    assert np.allclose(result.numpy(), np_result)
Exemple #25
0
def test_dense_dynamic():
    data_shape = (relay.Any(), K)
    weight_shape = (relay.Any(), K)

    if has_cublas():
        # TVM native fp16 dense (without tensorcore), using fp16 accum, seems to have accuracy issues
        # Use cublas as a reference
        verify_dense(
            get_dense_with_shape(data_shape, weight_shape),
            M,
            N,
            K,
            ref_target="cuda -libs=cublas",
        )

    verify_dense(
        get_dense_with_shape(data_shape, weight_shape, out_dtype="float32"),
        M,
        N,
        K,
        atol=1e-4,
        rtol=1e-4,
    )
def test_dynamic_dtype():
    # Compile simulated quantize once but support any type of quantization.
    data = np.random.uniform(low=-64, high=64, size=[2, 5]).astype("float32")
    # Test scalar float32 to uint8.
    scale_np = np.asarray([0.5]).astype("float32")
    zp_np = np.asarray([127]).astype("int32")
    dtype_np = np.int32(SQNN_DTYPE_TO_CODE["uint8"])
    quant_args = {"out_zero_point": zp_np[0], "out_scale": scale_np[0]}
    q_out = quantize_test_driver(
        in_dtype="float32",
        quant_args=quant_args,
        axis=-1,
        out_dtype="uint8",
        in_data=data,
    )
    # Create variables with undefined shape and run with scalar inputs.
    input_data = relay.var("input_data", shape=data.shape, dtype="float32")
    scale = relay.var("scale", shape=[relay.Any()], dtype="float32")
    zp = relay.var("zp", shape=[relay.Any()], dtype="int32")
    dtype = relay.var("dtype", shape=[])
    vm = build_simulated_quantize(input_data, scale, zp, dtype)
    sim_q_out = vm.invoke("main", input_data=data, scale=scale_np, zp=zp_np, dtype=dtype_np)
    np.testing.assert_equal(sim_q_out.asnumpy(), q_out)

    # Now test float32 to int32 compilation.
    # Get the reference quantize output.
    q_out = quantize_test_driver(
        in_dtype="float32",
        quant_args=quant_args,
        axis=-1,
        out_dtype="int32",
        in_data=data,
    )
    # Run the simulated quantize without recompiling and confirm results match.
    dtype_np = np.int32(SQNN_DTYPE_TO_CODE["int32"])
    sim_q_out = vm.invoke("main", input_data=data, scale=scale_np, zp=zp_np, dtype=dtype_np)
    np.testing.assert_equal(sim_q_out.asnumpy(), q_out)
Exemple #27
0
def test_unique():
    if not tvm.testing.device_enabled("vulkan"):
        return

    dtype = "int32"
    x = relay.var("x", shape=(relay.Any(), ), dtype=dtype)
    mod = tvm.IRModule()
    [unique, _, num_unique] = relay.unique(x, is_sorted=True)
    mod["main"] = relay.Function([x],
                                 relay.op.strided_slice(unique,
                                                        begin=[0],
                                                        end=num_unique))
    x_np = np.random.randint(0, high=10, size=(10, )).astype(dtype)
    res_np = np.unique(x_np)
    check_mod(mod, x_np, res_np)
Exemple #28
0
def test_dynamic_bcast():
    dtype = "float32"
    x = relay.var("x", shape=(relay.Any(), 2), dtype=dtype)
    y = relay.var("y", shape=(3, 2), dtype=dtype)
    mod = tvm.IRModule()
    mod["main"] = relay.Function([x, y], relay.add(x, y))
    x_data = np.random.uniform(size=(1, 2)).astype(dtype)
    y_data = np.random.uniform(size=(3, 2)).astype(dtype)
    res_np = np.add(x_data, y_data)
    for target, ctx in testing.enabled_targets():
        res = get_serialized_output(mod,
                                    *(x_data, y_data),
                                    target=target,
                                    ctx=ctx)
        tvm.testing.assert_allclose(res.asnumpy(), res_np)
Exemple #29
0
    def verify_pad(dshape, pad_width, pad_val, dtype):
        x = relay.var("x", relay.TensorType(dshape, dtype))
        ndim = len(dshape)
        pad_width_var = relay.var("pad_width_var", relay.TensorType((ndim, 2), "int64"))
        pad_val_var = relay.var("pad_val_var", relay.TensorType((), dtype))
        y = relay.nn.pad(x, pad_width_var, pad_val_var)
        yy = run_infer_type(y)

        assert yy.checked_type == relay.ty.TensorType((relay.Any(),) * ndim, dtype)
        func = relay.Function([x, pad_width_var, pad_val_var], y)
        data = np.random.uniform(size=dshape).astype(dtype)
        ref_res = np.pad(data, pad_width, "constant", constant_values=(((pad_val,) * 2),) * ndim)
        pad_width = np.array(pad_width).astype("int64")

        verify_func(func, [data, pad_width, np.array(pad_val).astype(dtype)], ref_res)
Exemple #30
0
def test_vm_shape_of():
    if not tvm.testing.device_enabled("cuda") or not tvm.cuda(0).exist:
        return

    mod = tvm.IRModule()
    data_shape = (relay.Any(), )
    x = relay.var("x", shape=data_shape)
    y = relay.op.vm.shape_of(x)
    mod["main"] = relay.Function([x], y)
    ca = context_analysis(mod, tvm.cuda())
    main = mod["main"]

    cpu_dev = tvm.cpu().device_type
    gpu_dev = tvm.cuda().device_type
    assert main.params[0] in ca and ca[main.params[0]][0].value == gpu_dev
    assert main.body in ca and ca[main.body][0].value == cpu_dev