def test_dynamic_tensor(): dtype = "float32" stype = "csr" target = "llvm" dev = tvm.device(target, 0) nr, nc, n = te.size_var("nr"), te.size_var("nc"), te.size_var("n") A = tvmsp.placeholder(shape=(nr, nc), nonzeros=n, name="A", dtype=dtype) assert A.stype == "csr" C = te.compute(A.data.shape, lambda i: A.data[i] * 2.0, tag="cs_scatter") s = te.create_schedule(C.op) _nr, _nc = 3, 5 a = np.maximum(np.random.uniform(size=(_nr, _nc)).astype(dtype) - 0.6, 0.0) a = tvmsp.array(a, dev) assert a.data.dtype == a.dtype Ab = namedtuple("CSRBuffer", ["data", "indices", "indptr"]) Ab.data = tvm.tir.decl_buffer(a.data.shape, a.data.dtype, name="A_data") Ab.indices = tvm.tir.decl_buffer(a.data.shape, a.data.dtype, name="A_indices") binds = {A.data: Ab.data, A.indices: Ab.indices} f = tvm.build(s, [nr, A.data, C], target, binds=binds) c = tvmsp.array(np.zeros((_nr, _nc), dtype), dev) c.data = tvm.nd.empty(a.data.shape, dtype) c.indices = a.indices c.indptr = a.indptr f(a.data.shape[0], a.data, c.data) tvm.testing.assert_allclose(c.asnumpy(), a.asnumpy() * 2.0, rtol=1e-5)
def test_dynamic_tensor(): dtype = 'float32' stype = 'csr' target = 'llvm' ctx = tvm.context(target, 0) nr, nc, n = tvm.var('nr'), tvm.var('nc'), tvm.var('n') A = tvmsp.placeholder(shape=(nr, nc), nonzeros=n, name='A', dtype=dtype) assert (A.stype == 'csr') C = tvm.compute(A.data.shape, lambda i: A.data[i] * 2., tag='cs_scatter') s = tvm.create_schedule(C.op) _nr, _nc = 3, 5 a = np.maximum(np.random.uniform(size=(_nr, _nc)).astype(dtype) - .6, 0.) a = tvmsp.array(a, ctx) assert a.data.dtype == a.dtype Ab = namedtuple('CSRBuffer', ['data', 'indices', 'indptr']) Ab.data = tvm.decl_buffer(a.data.shape, a.data.dtype, name='A_data') Ab.indices = tvm.decl_buffer(a.data.shape, a.data.dtype, name='A_indices') binds = {A.data: Ab.data, A.indices: Ab.indices} f = tvm.build(s, [nr, A.data, C], target, binds=binds) c = tvmsp.array(np.zeros((_nr, _nc), dtype), ctx) c.data = tvm.nd.empty(a.data.shape, dtype) c.indices = a.indices c.indptr = a.indptr f(a.data.shape[0], a.data, c.data) np.testing.assert_allclose(c.asnumpy(), a.asnumpy() * 2., rtol=1e-5)
def test_dynamic_tensor(): dtype = 'float32' stype = 'csr' target = 'llvm' ctx = tvm.context(target, 0) nr, nc, n = tvm.var('nr'), tvm.var('nc'), tvm.var('n') A = tvmsp.placeholder(shape=(nr, nc), nonzeros=n, name='A', dtype=dtype) assert(A.stype == 'csr') C = tvm.compute(A.data.shape, lambda i: A.data[i] * 2., tag='cs_scatter') s = tvm.create_schedule(C.op) _nr, _nc = 3, 5 a = np.maximum(np.random.uniform(size=(_nr, _nc)).astype(dtype)-.6, 0.) a = tvmsp.array(a, ctx) assert a.data.dtype == a.dtype Ab = namedtuple('CSRBuffer', ['data', 'indices', 'indptr']) Ab.data = tvm.decl_buffer(a.data.shape, a.data.dtype, name='A_data') Ab.indices = tvm.decl_buffer(a.data.shape, a.data.dtype, name='A_indices') binds = {A.data: Ab.data, A.indices: Ab.indices} f = tvm.build(s, [nr, A.data, C], target, binds=binds) c = tvmsp.array(np.zeros((_nr, _nc), dtype), ctx) c.data = tvm.nd.empty(a.data.shape, dtype) c.indices = a.indices c.indptr = a.indptr f(a.data.shape[0], a.data, c.data) tvm.testing.assert_allclose(c.asnumpy(), a.asnumpy() * 2., rtol=1e-5)
def test_sparse_array_tuple(): dtype, itype = "float32", "int32" stype = "csr" target = "llvm" dev = tvm.device(target, 0) nr, nc, n = te.size_var("nr"), te.size_var("nc"), te.size_var("n") A = tvmsp.placeholder(shape=(nr, nc), nonzeros=n, name="A", dtype=dtype) assert A.stype == "csr" C = te.compute(A.data.shape, lambda i: A.data[i] * 2.0, tag="cs_scatter") s = te.create_schedule(C.op) _nr, _nc = 3, 5 a = np.maximum(np.random.uniform(size=(_nr, _nc)).astype(dtype) - 0.6, 0.0) # convert to sparse array tuple source_array = a ridx, cidx = np.nonzero(source_array) data = source_array[ridx, cidx] a_data = _nd.array(data, dev) indices = np.nonzero(source_array)[1].astype(itype) a_indices = _nd.array(indices, dev) indptr = [0] + np.apply_along_axis( np.count_nonzero, axis=1, arr=source_array).tolist() indptr = np.cumsum(np.array(indptr, itype)).astype(itype) a_indptr = _nd.array(indptr, dev) a_init = (a_data, a_indices, a_indptr) # construct tvm sparse array with tuple a = tvmsp.array(a_init, shape=source_array.shape, device=dev) assert a.data.dtype == a.dtype Ab = namedtuple("CSRBuffer", ["data", "indices", "indptr"]) Ab.data = tvm.tir.decl_buffer(a.data.shape, a.data.dtype, name="A_data") Ab.indices = tvm.tir.decl_buffer(a.data.shape, a.data.dtype, name="A_indices") binds = {A.data: Ab.data, A.indices: Ab.indices} f = tvm.build(s, [nr, A.data, C], target, binds=binds) c = tvmsp.array(np.zeros((_nr, _nc), dtype), dev) c.data = tvm.nd.empty(a.data.shape, dtype) c.indices = a.indices c.indptr = a.indptr f(a.data.shape[0], a.data, c.data) tvm.testing.assert_allclose(c.asnumpy(), a.asnumpy() * 2.0, rtol=1e-5)
def test_sparse_array_tuple(): dtype, itype = 'float32', 'int32' stype = 'csr' target = 'llvm' ctx = tvm.context(target, 0) nr, nc, n = te.size_var('nr'), te.size_var('nc'), te.size_var('n') A = tvmsp.placeholder(shape=(nr, nc), nonzeros=n, name='A', dtype=dtype) assert (A.stype == 'csr') C = te.compute(A.data.shape, lambda i: A.data[i] * 2., tag='cs_scatter') s = te.create_schedule(C.op) _nr, _nc = 3, 5 a = np.maximum(np.random.uniform(size=(_nr, _nc)).astype(dtype) - .6, 0.) # convert to sparse array tuple source_array = a ridx, cidx = np.nonzero(source_array) data = source_array[ridx, cidx] a_data = _nd.array(data, ctx) indices = np.nonzero(source_array)[1].astype(itype) a_indices = _nd.array(indices, ctx) indptr = [0] + np.apply_along_axis( np.count_nonzero, axis=1, arr=source_array).tolist() indptr = np.cumsum(np.array(indptr, itype)).astype(itype) a_indptr = _nd.array(indptr, ctx) a_init = (a_data, a_indices, a_indptr) # construct tvm sparse array with tuple a = tvmsp.array(a_init, shape=source_array.shape, ctx=ctx) assert a.data.dtype == a.dtype Ab = namedtuple('CSRBuffer', ['data', 'indices', 'indptr']) Ab.data = tvm.tir.decl_buffer(a.data.shape, a.data.dtype, name='A_data') Ab.indices = tvm.tir.decl_buffer(a.data.shape, a.data.dtype, name='A_indices') binds = {A.data: Ab.data, A.indices: Ab.indices} f = tvm.build(s, [nr, A.data, C], target, binds=binds) c = tvmsp.array(np.zeros((_nr, _nc), dtype), ctx) c.data = tvm.nd.empty(a.data.shape, dtype) c.indices = a.indices c.indptr = a.indptr f(a.data.shape[0], a.data, c.data) tvm.testing.assert_allclose(c.asnumpy(), a.asnumpy() * 2., rtol=1e-5)
def check_device(device): ctx = tvm.context(device, 0) if not ctx.exist: print("Skip because %s is not enabled" % device) return print("Running on target: %s" % device) a = tvm.nd.array(a_np, ctx) b = tvmsp.array(b_np, ctx) c = tvm.nd.array(c_np, ctx) d = tvm.nd.array(np.zeros(get_const_tuple(D.shape), dtype=dtype), ctx) f = tvm.build(s, [A, B.data, B.indices, B.indptr, C, D], device, name="dense") f(a, b.data, b.indices, b.indptr, c, d) np.testing.assert_allclose(d.asnumpy(), d_np, rtol=1e-4, atol=1e-4)
def check_device(device): dev = tvm.device(device, 0) if not tvm.testing.device_enabled(device): print("Skip because %s is not enabled" % device) return print("Running on target: %s" % device) a = tvmsp.array(a_np, dev) b = tvm.nd.array(b_np, dev) c = tvm.nd.array(c_np, dev) d = tvm.nd.array(np.zeros(get_const_tuple(D.shape), dtype=dtype), dev) f = tvm.build(s, [A.data, A.indices, A.indptr, B, C, D], device, name="dense") f(a.data, a.indices, a.indptr, b, c, d) tvm.testing.assert_allclose(d.asnumpy(), d_np, rtol=1e-4, atol=1e-4)
def test_sparse_array_tuple(): dtype, itype = 'float32', 'int32' stype = 'csr' target = 'llvm' ctx = tvm.context(target, 0) nr, nc, n = tvm.var('nr'), tvm.var('nc'), tvm.var('n') A = tvmsp.placeholder(shape=(nr, nc), nonzeros=n, name='A', dtype=dtype) assert(A.stype == 'csr') C = tvm.compute(A.data.shape, lambda i: A.data[i] * 2., tag='cs_scatter') s = tvm.create_schedule(C.op) _nr, _nc = 3, 5 a = np.maximum(np.random.uniform(size=(_nr, _nc)).astype(dtype)-.6, 0.) # convert to sparse array tuple source_array = a ridx, cidx = np.nonzero(source_array) data = source_array[ridx, cidx] a_data = _nd.array(data, ctx) indices = np.nonzero(source_array)[1].astype(itype) a_indices = _nd.array(indices, ctx) indptr = [0]+np.apply_along_axis(np.count_nonzero, axis=1, arr=source_array).tolist() indptr = np.cumsum(np.array(indptr, itype)).astype(itype) a_indptr = _nd.array(indptr, ctx) a_init = (a_data, a_indices, a_indptr) # construct tvm sparse array with tuple a = tvmsp.array(a_init, shape=source_array.shape, ctx=ctx) assert a.data.dtype == a.dtype Ab = namedtuple('CSRBuffer', ['data', 'indices', 'indptr']) Ab.data = tvm.decl_buffer(a.data.shape, a.data.dtype, name='A_data') Ab.indices = tvm.decl_buffer(a.data.shape, a.data.dtype, name='A_indices') binds = {A.data: Ab.data, A.indices: Ab.indices} f = tvm.build(s, [nr, A.data, C], target, binds=binds) c = tvmsp.array(np.zeros((_nr, _nc), dtype), ctx) c.data = tvm.nd.empty(a.data.shape, dtype) c.indices = a.indices c.indptr = a.indptr f(a.data.shape[0], a.data, c.data) tvm.testing.assert_allclose(c.asnumpy(), a.asnumpy() * 2., rtol=1e-5)
def test_static_tensor(): dtype = "float32" stype = "csr" target = "llvm" dev = tvm.device(target, 0) m = te.size_var("m") n = te.size_var("n") A = tvmsp.placeholder(shape=(m, n), name="A", dtype=dtype) assert A.stype == "csr" n = 3 a = np.maximum(np.random.uniform(size=(n, n)).astype(dtype) - 0.6, 0.0) a = tvmsp.array(a, dev) A.data = te.placeholder(a.data.shape, dtype, name="A_data") Ab = tvm.tir.decl_buffer(a.data.shape, dtype, name="A_data") binds = {A.data: Ab} C = te.compute(A.data.shape, lambda i: A.data[i] * 2.0, tag="cs_scatter") s = te.create_schedule(C.op) f = tvm.build(s, [A.data, C], target, binds=binds) c = tvmsp.array(np.zeros((n, n), dtype), dev) c.data = tvm.nd.empty(a.data.shape, dtype) c.indices = a.indices c.indptr = a.indptr f(a.data, c.data) tvm.testing.assert_allclose(c.asnumpy(), a.asnumpy() * 2.0, rtol=1e-5)
def test_static_tensor(): dtype = 'float32' stype = 'csr' target = 'llvm' ctx = tvm.context(target, 0) m = tvm.var('m') n = tvm.var('n') A = tvmsp.placeholder(shape=(m, n), name='A', dtype=dtype) assert(A.stype == 'csr') n = 3 a = np.maximum(np.random.uniform(size=(n,n)).astype(dtype)-.6, 0.) a = tvmsp.array(a, ctx) A.data = tvm.placeholder(a.data.shape, dtype, name='A_data') Ab = tvm.decl_buffer(a.data.shape, dtype, name='A_data') binds = {A.data: Ab} C = tvm.compute(A.data.shape, lambda i: A.data[i] * 2., tag='cs_scatter') s = tvm.create_schedule(C.op) f = tvm.build(s, [A.data, C], target, binds=binds) c = tvmsp.array(np.zeros((n,n), dtype), ctx) c.data = tvm.nd.empty(a.data.shape, dtype) c.indices = a.indices c.indptr = a.indptr f(a.data, c.data) tvm.testing.assert_allclose(c.asnumpy(), a.asnumpy() * 2., rtol=1e-5)
def test_static_tensor(): dtype = 'float32' stype = 'csr' target = 'llvm' ctx = tvm.context(target, 0) m = tvm.var('m') n = tvm.var('n') A = tvmsp.placeholder(shape=(m, n), name='A', dtype=dtype) assert (A.stype == 'csr') n = 3 a = np.maximum(np.random.uniform(size=(n, n)).astype(dtype) - .6, 0.) a = tvmsp.array(a, ctx) A.data = tvm.placeholder(a.data.shape, dtype, name='A_data') Ab = tvm.decl_buffer(a.data.shape, dtype, name='A_data') binds = {A.data: Ab} C = tvm.compute(A.data.shape, lambda i: A.data[i] * 2., tag='cs_scatter') s = tvm.create_schedule(C.op) f = tvm.build(s, [A.data, C], target, binds=binds) c = tvmsp.array(np.zeros((n, n), dtype), ctx) c.data = tvm.nd.empty(a.data.shape, dtype) c.indices = a.indices c.indptr = a.indptr f(a.data, c.data) np.testing.assert_allclose(c.asnumpy(), a.asnumpy() * 2., rtol=1e-5)
def check_device(device): ctx = tvm.context(device, 0) if not ctx.exist: print("Skip because %s is not enabled" % device) return print("Running on target: %s" % device) a = tvmsp.array(a_np, ctx) _nr, _nc, _n = a.shape[0], a.shape[1], a.data.shape[0] assert a.shape[0] == a.indptr.shape[0]-1 b = tvm.nd.array(b_np, ctx) c = tvm.nd.array(c_np, ctx) d = tvm.nd.array(np.zeros((_nr, out_dim), dtype=dtype), ctx) f = tvm.build(s, [nr, A.data, A.indices, A.indptr, B, C, D], device, name="csrmm") f(_nr, a.data, a.indices, a.indptr, b, c, d) np.testing.assert_allclose(d.asnumpy(), d_np, rtol=1e-2, atol=1e-2)
def check_device(device): dev = tvm.device(device, 0) if not tvm.testing.device_enabled(device): print("Skip because %s is not enabled" % device) return print("Running on target: %s" % device) a = tvmsp.array(a_np, dev) _nr, _nc, _n = a.shape[0], a.shape[1], a.data.shape[0] assert a.shape[0] == a.indptr.shape[0] - 1 b = tvm.nd.array(b_np, dev) c = tvm.nd.array(c_np, dev) d = tvm.nd.array(np.zeros((_nr, 1), dtype=dtype), dev) assert a.data.dtype == A.data.dtype assert a.indices.dtype == A.indices.dtype assert a.indptr.dtype == A.indptr.dtype f = tvm.build(s, [nr, A.data, A.indices, A.indptr, B, C, D], device, name="csrmv") f(_nr, a.data, a.indices, a.indptr, b, c, d) tvm.testing.assert_allclose(d.asnumpy(), d_np, rtol=1e-4, atol=1e-4)