def test_gamma_op(): set_global_seed(1024) _shape, _scale = 2, 0.8 _expected_mean, _expected_std = _shape * _scale, np.sqrt(_shape) * _scale shape = F.full([8, 9, 11, 12], value=_shape, dtype="float32") scale = F.full([8, 9, 11, 12], value=_scale, dtype="float32") op = GammaRNG(seed=get_global_rng_seed(), handle=0) (output, ) = apply(op, shape, scale) assert np.fabs(output.numpy().mean() - _expected_mean) < 1e-1 assert np.fabs(np.sqrt(output.numpy().var()) - _expected_std) < 1e-1 assert str(output.device) == str(CompNode("xpux")) cn = CompNode("xpu2") seed = 233333 h = new_rng_handle(cn, seed) shape = F.full([8, 9, 11, 12], value=_shape, dtype="float32", device="xpu2") scale = F.full([8, 9, 11, 12], value=_scale, dtype="float32", device="xpu2") op = GammaRNG(seed=seed, handle=h) (output, ) = apply(op, shape, scale) delete_rng_handle(h) assert np.fabs(output.numpy().mean() - _expected_mean) < 1e-1 assert np.fabs(np.sqrt(output.numpy().var()) - _expected_std) < 1e-1 assert str(output.device) == str(cn)
def test_beta_op(): set_global_seed(1024) _alpha, _beta = 2, 0.8 _expected_mean = _alpha / (_alpha + _beta) _expected_std = np.sqrt(_alpha * _beta / ((_alpha + _beta)**2 * (_alpha + _beta + 1))) alpha = F.full([8, 9, 11, 12], value=_alpha, dtype="float32") beta = F.full([8, 9, 11, 12], value=_beta, dtype="float32") op = BetaRNG(seed=get_global_rng_seed()) (output, ) = apply(op, alpha, beta) assert np.fabs(output.numpy().mean() - _expected_mean) < 1e-1 assert np.fabs(np.sqrt(output.numpy().var()) - _expected_std) < 1e-1 assert str(output.device) == str(CompNode("xpux")) cn = CompNode("xpu2") seed = 233333 h = new_rng_handle(cn, seed) alpha = F.full([8, 9, 11, 12], value=_alpha, dtype="float32", device=cn) beta = F.full([8, 9, 11, 12], value=_beta, dtype="float32", device=cn) op = BetaRNG(seed=seed, handle=h) (output, ) = apply(op, alpha, beta) delete_rng_handle(h) assert np.fabs(output.numpy().mean() - _expected_mean) < 1e-1 assert np.fabs(np.sqrt(output.numpy().var()) - _expected_std) < 1e-1 assert str(output.device) == str(cn)
def test_gaussian_op(): # FIXME: remove this sync mge.core.set_option("async_level", 0) set_global_seed(1024) shape = ( 8, 9, 11, 12, ) shape = Tensor(shape, dtype="int32") op = GaussianRNG(seed=get_global_rng_seed(), mean=1.0, std=3.0, dtype="float32") (output, ) = apply(op, shape) assert np.fabs(output.numpy().mean() - 1.0) < 1e-1 assert np.fabs(np.sqrt(output.numpy().var()) - 3.0) < 1e-1 assert str(output.device) == str(CompNode("xpux")) assert output.dtype == np.float32 cn = CompNode("xpu2") seed = 233333 h = new_rng_handle(cn, seed) op = GaussianRNG(seed=seed, mean=3.0, std=1.0, dtype="float32", handle=h) (output, ) = apply(op, shape) delete_rng_handle(h) assert np.fabs(output.numpy().mean() - 3.0) < 1e-1 assert np.fabs(np.sqrt(output.numpy().var()) - 1.0) < 1e-1 assert str(output.device) == str(cn) assert output.dtype == np.float32
def test_raw_tensor(): from megengine.core.ops.builtin import Elemwise x = np.random.rand(10).astype("float32") xx = Tensor(x) (yy, ) = apply(Elemwise(Elemwise.Mode.MUL), xx, xx) np.testing.assert_allclose(x * x, yy.numpy()) (yy, ) = apply(Elemwise(Elemwise.Mode.MUL), xx, xx) np.testing.assert_allclose(x * x, yy.numpy())
def parse_quant_info(self, t: IRTensor): dt = np.dtype(t.q_dtype) v_max, v_min = None, None is_weight = bool(t.np_data is not None) if np.issubdtype(dt, np.integer): v_min = np.iinfo(dt).min v_max = np.iinfo(dt).max if self.param_fake_quant and is_weight: if t.scale is not None: inp = megengine.tensor(t.np_data) scale = megengine.tensor(t.scale) zp = float(t.zero_point) if t.zero_point else 0.0 zero_point = megengine.tensor(zp) from megengine.core._imperative_rt.core2 import ( # pylint:disable=import-error apply, ) from megengine.core.ops.builtin import FakeQuant t.np_data = apply(FakeQuant(qmin=v_min, qmax=v_max), inp, scale, zero_point)[0].numpy() else: param = { "dtype": str(dt), "qmin": str(v_min), "qmax": str(v_max), "scale": str(t.scale), "zero_point": str(t.zero_point), "is_weight": is_weight, } self.quant_params[t.name] = param
def test_poisson_op(): set_global_seed(1024) lam = F.full([8, 9, 11, 12], value=2, dtype="float32") op = PoissonRNG(seed=get_global_rng_seed()) (output, ) = apply(op, lam) assert np.fabs(output.numpy().mean() - 2.0) < 1e-1 assert np.fabs(np.sqrt(output.numpy().var()) - np.sqrt(2.0)) < 1e-1 assert str(output.device) == str(CompNode("xpux")) cn = CompNode("xpu2") seed = 233333 h = new_rng_handle(cn, seed) lam = F.full([8, 9, 11, 12], value=2, dtype="float32", device=cn) op = PoissonRNG(seed=seed, handle=h) (output, ) = apply(op, lam) delete_rng_handle(h) assert np.fabs(output.numpy().mean() - 2.0) < 1e-1 assert np.fabs(np.sqrt(output.numpy().var()) - np.sqrt(2.0)) < 1e-1 assert str(output.device) == str(cn)
def test_identity(): x_np = np.random.rand(10).astype("float32") x = mge.Tensor(x_np) dy_np = np.random.rand(*x.shape).astype("float32") dy = mge.Tensor(dy_np) grad = Grad().wrt(x, callback=save_to(x)) (y,) = apply(Identity(), x) grad(y, dy) np.testing.assert_array_equal(x.grad.numpy(), dy_np)
def test_uniform_op(): shape = ( 8, 9, 11, 12, ) shape = tensor(shape, dtype="int32") op = UniformRNG(seed=get_global_rng_seed()) (output, ) = apply(op, shape) assert np.fabs(output.numpy().mean() - 0.5) < 1e-1 assert str(output.device) == str(CompNode("xpux")) cn = CompNode("xpu2") seed = 233333 h = new_rng_handle(cn, seed) op = UniformRNG(seed=seed, handle=h) (output, ) = apply(op, shape) delete_rng_handle(h) assert np.fabs(output.numpy().mean() - 0.5) < 1e-1 assert str(output.device) == str(cn)
def test_gaussian_op(): shape = ( 8, 9, 11, 12, ) shape = tensor(shape, dtype="int32") op = GaussianRNG(seed=get_global_rng_seed(), mean=1.0, std=3.0) (output, ) = apply(op, shape) assert np.fabs(output.numpy().mean() - 1.0) < 1e-1 assert np.sqrt(output.numpy().var()) - 3.0 < 1e-1 assert str(output.device) == str(CompNode("xpux")) cn = CompNode("xpu2") seed = 233333 h = new_rng_handle(cn, seed) op = GaussianRNG(seed=seed, mean=3.0, std=1.0, handle=h) (output, ) = apply(op, shape) delete_rng_handle(h) assert np.fabs(output.numpy().mean() - 3.0) < 1e-1 assert np.sqrt(output.numpy().var()) - 1.0 < 1e-1 assert str(output.device) == str(cn)
def test_permutation_op_dtype(dtype): def sum_result(res, fun): return sum( [1 if i == v else 0 for i, v in enumerate(fun(res.numpy()))]) shape = Tensor((n, ), dtype="int32") op = PermutationRNG(seed=get_global_rng_seed(), dtype=dtype) (output, ) = apply(op, shape) assert sum_result(output, lambda x: x) < 500 assert sum_result(output, np.sort) == n assert str(output.device) == str(CompNode("xpux")) assert output.dtype == dtype cn = CompNode("xpu2") seed = 233333 h = new_rng_handle(cn, seed) op = PermutationRNG(seed=seed, handle=h, dtype=dtype) (output, ) = apply(op, shape) delete_rng_handle(h) assert sum_result(output, lambda x: x) < 500 assert sum_result(output, np.sort) == n assert str(output.device) == str(cn) assert output.dtype == dtype
def fwd(x, y): return apply(op, x, y)[0]
def elemwise(*args, mode): from megengine.core.ops.builtin import Elemwise return apply(Elemwise(mode), *args)
def fwd(*tensors): return apply(op, *tensors)[0]
def fwd(data): return apply(op, data)[0]
def assert_equal(expect, real, **kwargs): op = builtin.AssertEqual(**kwargs) (res, ) = apply(op, expect, real) return res
def invoke_op(op, inputs_, cvt_inputs=canonize_inputs): inputs = cvt_inputs( inputs_, config=megengine.core._imperative_rt.OperatorNodeConfig() ) return apply(op, *inputs)
def forward(self, x): out = x.shape out = apply(builtin.Elemwise(mode="ADD"), out, Tensor(1)) return out
def f(*args): (result, ) = apply(op, *args) return result
def forward(self, x, y): op = Elemwise("ADD") return apply(op, x, y)[0]