def verify(target="llvm", algorithm=nnpack.ConvolutionAlgorithm.AUTO, with_bias=True): if not tvm.get_global_func("tvm.contrib.nnpack.fully_connected_inference", True): pytest.skip("extern function is not available") if not nnpack.is_available(): pytest.skip("nnpack is not available") ctx = tvm.cpu(0) output = nnpack.convolution_inference( data, kernel, bias if with_bias else None, [PAD, PAD, PAD, PAD], [STRIDE, STRIDE], algorithm=algorithm, ) s = te.create_schedule(output.op) f = tvm.build(s, [data, kernel, bias, output], target) na = np.random.uniform(size=dshape).astype(data.dtype) nb = np.random.uniform(size=kshape).astype(kernel.dtype) nc = np.zeros(bshape, dtype=bias.dtype) ta = tvm.nd.array(na, ctx) tb = tvm.nd.array(nb, ctx) tc = tvm.nd.array(nc, ctx) td = tvm.nd.array(np.zeros(oshape, dtype=output.dtype), ctx) f(ta, tb, tc, td) nd = np_conv(np.reshape(na, (BATCH, IC, IH, IW)), nb, PAD, STRIDE) + nc.reshape( 1, bshape[0], 1, 1 ) tvm.testing.assert_allclose(td.asnumpy(), nd.reshape(BATCH, IC, IH, IW), rtol=1e-5)
def verify(target="llvm", algorithm=nnpack.ConvolutionAlgorithm.AUTO, with_bias=True): if not tvm.module.enabled(target): print("skip because %s is not enabled..." % target) return if not tvm.get_global_func("tvm.contrib.nnpack.fully_connected_inference", True): print("skip because extern function is not available") return if not nnpack.is_available(): return ctx = tvm.cpu(0) output = nnpack.convolution_inference( data, kernel, bias if with_bias else None, [PAD, PAD, PAD, PAD], [STRIDE, STRIDE], algorithm=algorithm) s = tvm.create_schedule(output.op) f = tvm.build(s, [data, kernel, bias, output], target) na = np.random.uniform(size=dshape).astype(data.dtype) nb = np.random.uniform(size=kshape).astype(kernel.dtype) nc = np.zeros(bshape, dtype=bias.dtype) ta = tvm.nd.array(na, ctx) tb = tvm.nd.array(nb, ctx) tc = tvm.nd.array(nc, ctx) td = tvm.nd.array(np.zeros(oshape, dtype=output.dtype), ctx) f(ta, tb, tc, td) nd = np_conv(np.reshape(na, (BATCH, IC, IH, IW)), nb, PAD, STRIDE) + nc.reshape(1, bshape[0], 1, 1) tvm.testing.assert_allclose( td.asnumpy(), nd.reshape(BATCH, IC, IH, IW), rtol=1e-5)
def test_convolution_inference(): BATCH = 32 IH = 48 IW = 48 IC = 16 OC = 16 K = 3 PAD = 1 STRIDE = 1 OH = (IH + 2 * PAD - K) + 1 OW = (IW + 2 * PAD - K) + 1 dshape = (IC, IH, IW) kshape = (OC, IC, K, K) bshape = (OC, ) oshape = (OC, OH, OW) data = tvm.placeholder(dshape, name='data') kernel = tvm.placeholder(kshape, name='kernel') bias = tvm.placeholder(bshape, name='bias') output = nnpack.convolution_inference(data, kernel, bias, [PAD, PAD, PAD, PAD], [STRIDE, STRIDE]) s = tvm.create_schedule(output.op) def verify(target="llvm"): if not tvm.module.enabled(target): print("skip because %s is not enabled..." % target) return if not tvm.get_global_func( "tvm.contrib.nnpack.fully_connected_inference", True): print("skip because extern function is not available") return ctx = tvm.cpu(0) f = tvm.build(s, [data, kernel, bias, output], target) na = np.random.uniform(size=dshape).astype(data.dtype) nb = np.random.uniform(size=kshape).astype(kernel.dtype) nc = np.zeros(bshape, dtype=bias.dtype) ta = tvm.nd.array(na, ctx) tb = tvm.nd.array(nb, ctx) tc = tvm.nd.array(nc, ctx) td = tvm.nd.array(np.zeros(oshape, dtype=output.dtype), ctx) f(ta, tb, tc, td) nd = np_conv(np.reshape(na, (1, IC, IH, IW)), nb, PAD, STRIDE) np.testing.assert_allclose(td.asnumpy(), nd.reshape(IC, IH, IW), rtol=1e-5) verify()
def test_convolution_inference(): BATCH = 32 IH = 48 IW = 48 IC = 16 OC = 16 K = 3 PAD = 1 STRIDE = 1 OH = (IH + 2*PAD - K) + 1 OW = (IW + 2*PAD - K) + 1 dshape = (IC, IH, IW) kshape = (OC, IC, K, K) bshape = (OC, ) oshape = (OC, OH, OW) data = tvm.placeholder(dshape, name='data') kernel = tvm.placeholder(kshape, name='kernel') bias = tvm.placeholder(bshape, name='bias') output = nnpack.convolution_inference(data, kernel, bias, [PAD, PAD, PAD, PAD], [STRIDE, STRIDE]) s = tvm.create_schedule(output.op) def verify(target="llvm"): if not tvm.module.enabled(target): print("skip because %s is not enabled..." % target) return if not tvm.get_global_func("tvm.contrib.nnpack.fully_connected_inference", True): print("skip because extern function is not avalable") return ctx = tvm.cpu(0) f = tvm.build(s, [data, kernel, bias, output], target) na = np.random.uniform(size=dshape).astype(data.dtype) nb = np.random.uniform(size=kshape).astype(kernel.dtype) nc = np.zeros(bshape, dtype=bias.dtype) ta = tvm.nd.array(na, ctx) tb = tvm.nd.array(nb, ctx) tc = tvm.nd.array(nc, ctx) td = tvm.nd.array(np.zeros(oshape, dtype=output.dtype), ctx) f(ta, tb, tc, td) nd = np_conv(np.reshape(na, (1, IC, IH, IW)), nb, PAD, STRIDE) np.testing.assert_allclose( td.asnumpy(), nd.reshape(IC, IH, IW), rtol=1e-5) verify()