def check_vulkan(dtype, n):
     if not tvm.vulkan(0).exist or not tvm.runtime.enabled("vulkan"):
         print("skip because vulkan is not enabled..")
         return
     A = tvm.placeholder((n, ), name='A', dtype=dtype)
     ctx = tvm.vulkan(0)
     a_np = np.random.uniform(size=(n, )).astype(A.dtype)
     a = tvm.nd.empty((n, ), A.dtype, ctx).copyfrom(a_np)
     b_np = a.asnumpy()
     tvm.testing.assert_allclose(a_np, b_np)
     tvm.testing.assert_allclose(a_np, a.asnumpy())
        def worker():
            A = te.placeholder((n,), name="A", dtype="float32")
            B = te.placeholder((n,), name="B", dtype="float32")
            functions = [
                (
                    lambda: te.compute((n,), lambda i: 2 * A[i] + 3 * B[i]),
                    lambda a, b: 2 * a + 3 * b,
                ),
                (lambda: te.compute((n,), lambda i: A[i] + B[i]), lambda a, b: a + b),
                (lambda: te.compute((n,), lambda i: A[i] + 2 * B[i]), lambda a, b: a + 2 * b),
            ]

            def build_f(f_ref):
                (C_f, ref) = f_ref
                C = C_f()
                s = te.create_schedule(C.op)
                xo, xi = s[C].split(C.op.axis[0], factor=num_thread)
                s[C].bind(xo, bx)
                s[C].bind(xi, tx)
                fun = tvm.build(s, [A, B, C], "vulkan")
                return (fun, ref)

            fs = [
                build_f(random.choice(functions)) for _ in range(np.random.randint(low=1, high=10))
            ]
            dev = tvm.vulkan(0)
            a = tvm.nd.empty((n,), A.dtype, dev).copyfrom(np.random.uniform(size=(n,)))
            b = tvm.nd.empty((n,), B.dtype, dev).copyfrom(np.random.uniform(size=(n,)))
            cs = [tvm.nd.empty((n,), A.dtype, dev) for _ in fs]
            for ((f, _), c) in zip(fs, cs):
                f(a, b, c)

            for ((_, ref), c) in zip(fs, cs):
                tvm.testing.assert_allclose(c.numpy(), ref(a.numpy(), b.numpy()))
Exemple #3
0
 def check_vulkan(dtype, n):
     A = te.placeholder((n, ), name="A", dtype=dtype)
     dev = tvm.vulkan(0)
     a_np = np.random.uniform(size=(n, )).astype(A.dtype)
     a = tvm.nd.empty((n, ), A.dtype, dev).copyfrom(a_np)
     b_np = a.numpy()
     tvm.testing.assert_allclose(a_np, b_np)
     tvm.testing.assert_allclose(a_np, a.numpy())
Exemple #4
0
def enabled_ctx_list():
    ctx_list = [('cpu', tvm.cpu(0)), ('gpu', tvm.gpu(0)),
                ('cl', tvm.opencl(0)), ('metal', tvm.metal(0)),
                ('rocm', tvm.rocm(0)), ('vulkan', tvm.vulkan(0)),
                ('vpi', tvm.vpi(0))]
    for k, v in ctx_list:
        assert tvm.context(k, 0) == v
    ctx_list = [x[1] for x in ctx_list if x[1].exist]
    return ctx_list
 def check_vulkan(dtype, n, lanes):
     if not tvm.vulkan(0).exist or not tvm.runtime.enabled("vulkan"):
         print("skip because vulkan is not enabled..")
         return
     A = tvm.placeholder((n, ), name='A', dtype="%sx%d" % (dtype, lanes))
     B = tvm.compute((n, ),
                     lambda i: A[i] + tvm.const(1, A.dtype),
                     name='B')
     s = tvm.create_schedule(B.op)
     xo, xi = s[B].split(B.op.axis[0], factor=num_thread)
     s[B].bind(xo, bx)
     s[B].bind(xi, tx)
     fun = tvm.build(s, [A, B], "vulkan")
     ctx = tvm.vulkan(0)
     a = tvm.nd.empty((n, ), A.dtype,
                      ctx).copyfrom(np.random.uniform(size=(n, lanes)))
     c = tvm.nd.empty((n, ), B.dtype, ctx)
     fun(a, c)
     tvm.testing.assert_allclose(c.asnumpy(), a.asnumpy() + 1)
Exemple #6
0
def enabled_ctx_list():
    ctx_list = [('cpu', tvm.cpu(0)),
                ('gpu', tvm.gpu(0)),
                ('cl', tvm.opencl(0)),
                ('metal', tvm.metal(0)),
                ('rocm', tvm.rocm(0)),
                ('vulkan', tvm.vulkan(0)),
                ('vpi', tvm.vpi(0))]
    for k, v  in ctx_list:
        assert tvm.context(k, 0) == v
    ctx_list = [x[1] for x in ctx_list if x[1].exist]
    return ctx_list
 def check_vulkan(dtype, n, lanes):
     A = te.placeholder((n,), name="A", dtype="%sx%d" % (dtype, lanes))
     B = te.compute((n,), lambda i: A[i] + tvm.tir.const(1, A.dtype), name="B")
     s = te.create_schedule(B.op)
     xo, xi = s[B].split(B.op.axis[0], factor=num_thread)
     s[B].bind(xo, bx)
     s[B].bind(xi, tx)
     fun = tvm.build(s, [A, B], "vulkan")
     dev = tvm.vulkan(0)
     a = tvm.nd.empty((n,), A.dtype, dev).copyfrom(np.random.uniform(size=(n, lanes)))
     c = tvm.nd.empty((n,), B.dtype, dev)
     fun(a, c)
     tvm.testing.assert_allclose(c.numpy(), a.numpy() + 1)
Exemple #8
0
def requires_gpu(*args):
    """Mark a test as requiring a GPU to run.

    Tests with this mark will not be run unless a gpu is present.

    Parameters
    ----------
    f : function
        Function to mark
    """
    _requires_gpu = [
        pytest.mark.skipif(
            not tvm.cuda().exist and not tvm.rocm().exist
            and not tvm.opencl().exist and not tvm.metal().exist
            and not tvm.vulkan().exist,
            reason="No GPU present",
        ),
        *uses_gpu(),
    ]
    return _compose(args, _requires_gpu)
Exemple #9
0
    def test_scalar_params(num_int_params):
        n = te.var("n")
        scalars = [te.var("scale{}".format(i)) for i in range(num_int_params)]
        scalar_sum = scalars[0]
        for s in scalars[1:]:
            scalar_sum += s

        A = te.placeholder((n, ), name="A")
        B = te.compute(A.shape, lambda i: scalar_sum + A[i], name="B")

        s = te.create_schedule(B.op)
        xo, xi = s[B].split(B.op.axis[0], factor=64)
        s[B].bind(xo, bx)
        s[B].bind(xi, tx)
        f_add = tvm.build(s, scalars + [A, B], target)

        n = 1024
        scalars = [1 for _ in scalars]
        dev = tvm.vulkan(0)
        a = tvm.nd.array(np.random.uniform(size=n).astype(A.dtype), dev)
        b = tvm.nd.array(np.zeros(n, dtype=B.dtype), dev)
        f_add(*scalars, a, b)

        tvm.testing.assert_allclose(a.numpy() + sum(scalars), b.numpy())
Exemple #10
0
######################################################################
# Execute the portable graph on TVM
# ---------------------------------
# Now, we would like to reproduce the same forward computation using TVM.
from tvm.contrib import graph_runtime

if target == 'llvm':
    ctx = tvm.cpu(0)
elif target == 'cuda':
    ctx = tvm.gpu(0)
elif target == 'opengl':
    ctx = tvm.opengl(0)
elif target == 'opencl':
    ctx = tvm.cl(0)
elif target == 'vulkan':
    ctx = tvm.vulkan(0)
elif target == 'metal':
    ctx = tvm.metal(0)
else:
    raise ValueError('No supported context type for ' % target)

dtype = 'float32'
m = graph_runtime.create(graph, lib, ctx)
# set inputs
m.set_input('data', tvm.nd.array(x.astype(dtype)))
m.set_input(**params)
# execute
m.run()
# get outputs
tvm_output = m.get_output(0)
top1 = np.argmax(tvm_output.asnumpy()[0])