def l2norm_instance(data, eps, axis=None): """Perform L2norm on the input data For axis=None, y(i, j) = x(i, j) / sqrt(max(sum(x^2), eps)) Parameters ---------- data : tvm.Tensor 4-D with NCHW or NHWC layout eps : float epsilon value axis : list of int axis over the normalization applied Returns ------- output : tvm.Tensor 4-D output with same shape """ assert len(data.shape) == 4, "only support 4-dim lrn" dot_value = topi.cpp.pow(data, 2.0) sum_value = topi.sum(dot_value, axis=axis, keepdims=True) expand_sum = topi.broadcast_to(sum_value, data.shape) return topi.broadcast_div(data, topi.sqrt(\ tvm.compute(expand_sum.shape, lambda i, j, k, l:\ tvm.max(expand_sum[i, j, k, l], eps), tag='l2norm')))
def verify_reduce_map_ele(in_shape, axis, keepdims, type="sum"): # Build the logic and compile the function dat_dtype = "float32" A = tvm.placeholder(shape=in_shape, name="A", dtype=dat_dtype) A1 = topi.sqrt(topi.exp(A)) out_dtype = "float32" if type == "sum": B = topi.sum(A1, axis=axis, keepdims=keepdims) elif type == "max": B = topi.max(A1, axis=axis, keepdims=keepdims) elif type == "min": B = topi.min(A1, axis=axis, keepdims=keepdims) elif type == "argmax": B = topi.argmax(A1, axis=axis, keepdims=keepdims) out_dtype = "int32" elif type == "argmin": B = topi.argmin(A1, axis=axis, keepdims=keepdims) out_dtype = "int32" else: raise NotImplementedError def check_device(device): if not tvm.module.enabled(device): print("Skip because %s is not enabled" % device) return with tvm.target.create(device): s = topi.generic.schedule_reduce(B) ctx = tvm.context(device, 0) foo = tvm.build(s, [A, B], device, name="sum") # Test in_npy = np.random.uniform(size=in_shape).astype(np.float32) in_npy_map = np.sqrt(np.exp(in_npy)).astype(np.float32) if type == "sum": out_npy = in_npy_map.sum(axis=axis, keepdims=keepdims) elif type == "max": out_npy = in_npy_map.max(axis=axis, keepdims=keepdims) elif type == "min": out_npy = in_npy_map.min(axis=axis, keepdims=keepdims) elif type == "argmax": out_npy = _my_npy_argmax(in_npy_map, axis=axis, keepdims=keepdims) elif type == "argmin": out_npy = _my_npy_argmin(in_npy_map, axis=axis, keepdims=keepdims) else: raise NotImplementedError data_tvm = tvm.nd.array(in_npy, ctx=ctx) out_tvm = tvm.nd.empty(shape=out_npy.shape, ctx=ctx, dtype=out_dtype) for _ in range(1): foo(data_tvm, out_tvm) np.testing.assert_allclose(out_tvm.asnumpy(), out_npy, 1E-3, 1E-3) check_device("opencl") check_device("cuda") check_device("metal") check_device("rocm")
def batch_norm(c, n, eps=1e-5): """batch normalization c : channels N : input width and height eps : small positive value to prevent divide 0 """ X = te.placeholder((c, n, n), name='X') Mean = te.placeholder((c, 1, 1), name='Mean') Var = te.placeholder((c, 1, 1), name='Var') Gamma = te.placeholder((c, 1, 1), name='Gamma') Beta = te.placeholder((c, 1, 1), name='Beta') C1 = X - Mean C2 = topi.sqrt(Var + eps) Y = C1 / C2 * Gamma + Beta return X, Mean, Var, Gamma, Beta, Y
def batch_norm(c, n, eps=1e-5): """batch normalization c : channels N : input width and height eps : small positive value to prevent divide 0 """ def bcast(A, B, op): """Ad-hoc broadcast calculation, broadcasting B to A A : te.Tensor B : either a te.Tensor or a scalar op :arithmetic operator in string """ # the shapes of two operands of broadcast should be identical # or B should be a scalar assert isinstance(B, float) or len(A.shape) == len(B.shape) if op == '+': #f = lambda x, y, z: A[x, y, z] + B[x, 0, 0] #C = te.compute(A.shape, f, name='C') C = A + B elif op == '-': C = A - B elif op == '*': C = A * B elif op == '/': C = A / B else: raise ValueError("op should be an arithmetic operator.") return C X = te.placeholder((c, n, n), name='X') Mean = te.placeholder((c, 1, 1), name='Mean') Var = te.placeholder((c, 1, 1), name='Var') Gamma = te.placeholder((c, 1, 1), name='Gamma') Beta = te.placeholder((c, 1, 1), name='Beta') C1 = bcast(X, Mean, '-') C2 = topi.sqrt(bcast(Var, eps, '+')) Y = C1 / C2 * Gamma + Beta return X, Mean, Var, Gamma, Beta, Y
def sqrt_compute(attrs, inputs, output_type, target): assert len(inputs) == 1 return [topi.sqrt(inputs[0])]
def verify_reduce_map_ele(in_shape, axis, keepdims, type="sum", dtype="float32"): # Build the logic and compile the function A = tvm.placeholder(shape=in_shape, name="A", dtype=dtype) A1 = topi.sqrt(topi.exp(A)) out_dtype = dtype if type == "sum": B = topi.sum(A1, axis=axis, keepdims=keepdims) elif type == "all": B = topi.all(A, axis=axis, keepdims=keepdims) elif type == "any": B = topi.any(A, axis=axis, keepdims=keepdims) elif type == "max": B = topi.max(A1, axis=axis, keepdims=keepdims) elif type == "min": B = topi.min(A1, axis=axis, keepdims=keepdims) elif type == "argmax": B = topi.argmax(A1, axis=axis, keepdims=keepdims) out_dtype = "int32" elif type == "argmin": B = topi.argmin(A1, axis=axis, keepdims=keepdims) out_dtype = "int32" else: raise NotImplementedError def check_device(device): ctx = tvm.context(device, 0) if not ctx.exist: print("Skip because %s is not enabled" % device) return print("Running on target: %s" % device) with tvm.target.create(device): s = topi.generic.schedule_reduce(B) foo = tvm.build(s, [A, B], device, name=type) # Test if dtype == 'bool': in_npy_map = in_npy = np.random.choice([True, False], size=in_shape) else: in_npy = np.random.uniform(-1, 1, size=in_shape).astype(dtype) in_npy_map = np.sqrt(np.exp(in_npy)).astype(dtype) if type == "sum": out_npy = in_npy_map.sum(axis=axis, keepdims=keepdims) elif type == "all" and dtype == 'bool': out_npy = in_npy_map.all(axis=axis, keepdims=keepdims) elif type == "any" and dtype == "bool": out_npy = in_npy_map.any(axis=axis, keepdims=keepdims) elif type == "max": out_npy = in_npy_map.max(axis=axis, keepdims=keepdims) elif type == "min": out_npy = in_npy_map.min(axis=axis, keepdims=keepdims) elif type == "argmax": out_npy = _my_npy_argmax(in_npy_map, axis=axis, keepdims=keepdims) elif type == "argmin": out_npy = _my_npy_argmin(in_npy_map, axis=axis, keepdims=keepdims) else: raise NotImplementedError data_tvm = tvm.nd.array(in_npy, ctx=ctx) out_tvm = tvm.nd.empty(shape=out_npy.shape, ctx=ctx, dtype=out_dtype) for _ in range(1): foo(data_tvm, out_tvm) if type == "argmax" or type == "argmin": out_tvm_indices = out_tvm.asnumpy() if keepdims: out_tvm_indices = np.take(out_tvm_indices, indices=0, axis=axis) if axis is None: out_tvm_val = in_npy_map.ravel()[out_tvm_indices] else: other_indices = tuple( np.indices(in_shape[0:axis] + in_shape[(axis + 1):])) sel_indices = other_indices[0:axis] + ( out_tvm_indices, ) + other_indices[axis:] out_tvm_val = in_npy_map[sel_indices] if type == "argmax": tvm.testing.assert_allclose(out_tvm_val, in_npy_map.max(axis=axis), 1E-3, 1E-3) elif type == "argmin": tvm.testing.assert_allclose(out_tvm_val, in_npy_map.min(axis=axis), 1E-3, 1E-3) else: tvm.testing.assert_allclose(out_tvm.asnumpy(), out_npy, 1E-3, 1E-3) for device in get_all_backend(): check_device(device)
def verify_reduce_map_ele(in_shape, axis, keepdims, type="sum"): # Build the logic and compile the function dat_dtype = "float32" A = tvm.placeholder(shape=in_shape, name="A", dtype=dat_dtype) A1 = topi.sqrt(topi.exp(A)) out_dtype = "float32" if type == "sum": B = topi.sum(A1, axis=axis, keepdims=keepdims) elif type == "max": B = topi.max(A1, axis=axis, keepdims=keepdims) elif type == "min": B = topi.min(A1, axis=axis, keepdims=keepdims) elif type == "argmax": B = topi.argmax(A1, axis=axis, keepdims=keepdims) out_dtype = "int32" elif type == "argmin": B = topi.argmin(A1, axis=axis, keepdims=keepdims) out_dtype = "int32" else: raise NotImplementedError def check_device(device): ctx = tvm.context(device, 0) if not ctx.exist: print("Skip because %s is not enabled" % device) return print("Running on target: %s" % device) with tvm.target.create(device): s = topi.generic.schedule_reduce(B) foo = tvm.build(s, [A, B], device, name=type) # Test in_npy = np.random.uniform(size=in_shape).astype(np.float32) in_npy_map = np.sqrt(np.exp(in_npy)).astype(np.float32) if type == "sum": out_npy = in_npy_map.sum(axis=axis, keepdims=keepdims) elif type == "max": out_npy = in_npy_map.max(axis=axis, keepdims=keepdims) elif type == "min": out_npy = in_npy_map.min(axis=axis, keepdims=keepdims) elif type == "argmax": out_npy = _my_npy_argmax(in_npy_map, axis=axis, keepdims=keepdims) elif type == "argmin": out_npy = _my_npy_argmin(in_npy_map, axis=axis, keepdims=keepdims) else: raise NotImplementedError data_tvm = tvm.nd.array(in_npy, ctx=ctx) out_tvm = tvm.nd.empty(shape=out_npy.shape, ctx=ctx, dtype=out_dtype) for _ in range(1): foo(data_tvm, out_tvm) if type == "argmax" or type == "argmin": out_tvm_indices = out_tvm.asnumpy() if keepdims: out_tvm_indices = np.take(out_tvm_indices, indices=0, axis=axis) if axis is None: out_tvm_val = in_npy_map.ravel()[out_tvm_indices] else: other_indices = tuple(np.indices(in_shape[0:axis] + in_shape[(axis+1):])) sel_indices = other_indices[0:axis] + (out_tvm_indices,) + other_indices[axis:] out_tvm_val = in_npy_map[sel_indices] if type == "argmax": np.testing.assert_allclose(out_tvm_val, in_npy_map.max(axis=axis), 1E-3, 1E-3) elif type == "argmin": np.testing.assert_allclose(out_tvm_val, in_npy_map.min(axis=axis), 1E-3, 1E-3) else: np.testing.assert_allclose(out_tvm.asnumpy(), out_npy, 1E-3, 1E-3) for device in ["cuda", "opencl", "metal", "llvm", "rocm", "vulkan"]: check_device(device)