def forward(self, inps): x = F.matmul(inps[0], inps[1], self.param["transA"], self.param["transB"]) if self.param["alpha"] != 1.0: x = F.mul(x, self.param["alpha"]) if len(inps) == 3: if self.param["beta"] != 1.0: x = F.add(x, F.mul(inps[2], self.param["beta"])) else: x = F.add(x, inps[2]) return x
def test_as_raw_tensor_from_int64(): x = np.arange(6, dtype="int64").reshape(2, 3) xx = Tensor(x, dtype="float32", device="xpux") yy = F.add(xx, 1).numpy() assert xx.dtype == np.float32 assert xx.device == "xpux" np.testing.assert_almost_equal(yy, x.astype("float32") + 1)
def test_add_input(): a = Tensor([1, 2]) b = Tensor([3, 4]) @trace(symbolic=True, capture_as_const=True) def fwd(a, b): return (a + b) * 2 fwd(a, b) orig_model = io.BytesIO() fwd.dump(orig_model, arg_names=["a", "b"], output_names="o", optimize_for_inference=False) orig_model.seek(0) graph = Net.load(orig_model) inp_c = graph.make_input_node((2, ), np.int32, name="c") varo = graph.var_filter.name("o").as_unique() out = F.add(varo, inp_c) out.name = "o1" graph.remove_output(varo) graph.add_output(out) modified_model = io.BytesIO() graph.dump(modified_model) modified_model.seek(0) load_graph = GraphInference(modified_model) out = load_graph.run(a, b, a) np.testing.assert_equal(out["o1"], ((a + b) * 2 + a).numpy())
def test_add_output(): a = Tensor([1.0, 2.0]) b = Tensor([3.0, 4.0]) @trace(symbolic=True, capture_as_const=True) def fwd(a, b): return (a + b) * 2 fwd(a, b) orig_model = io.BytesIO() fwd.dump( orig_model, arg_names=["a", "b"], output_names="o", optimize_for_inference=False ) orig_model.seek(0) net = Net.load(orig_model) var_a = net.var_filter.name("a").as_unique() var_b = net.var_filter.name("b").as_unique() y = F.add(var_a, var_b) y = F.sigmoid(y) y.name = "o1" net.add_output(y) modified_model = io.BytesIO() net.dump(modified_model) modified_model.seek(0) g = GraphInference(modified_model) out = g.run(a.numpy(), b.numpy()) np.testing.assert_equal(out["o"], ((a + b) * 2).numpy()) np.testing.assert_equal(out["o1"], (F.sigmoid((a + b))).numpy())
def test_replace_vars(): g = mgb_graph.Graph() g.options.async_exec_level = 0b100 device = "xpux" dtype = np.float32 a = mgb_graph.InputNode(device=device, dtype=dtype, graph=g) const = g.make_const(1.234) a_plus_a = F.add(a.outputs[0], a.outputs[0]) a_plus_a_mul_const = F.mul(a_plus_a, const) rst = F.add(a_plus_a_mul_const, a.outputs[0]) (new, ) = cgtools.replace_vars([rst._node], {const._node: a_plus_a._node}) out = mgb_graph.OutputNode(mgb_graph.VarNode(new)) func = g.compile(out.outputs[0]) func.execute() x = make_dev_tensor(5.0, device=device) a.set_value(x) res = out.get_value().numpy() np.testing.assert_equal(res, np.array([105.0]))
def test_module_elemwise(): def test_func(method, *inps): elemwise = Elemwise(method) outputs = elemwise(*inps) return outputs.numpy() x = np.random.rand(100).astype("float32") y = np.random.rand(100).astype("float32") x, y = tensor(x), tensor(y) np.testing.assert_almost_equal(test_func("H_SWISH", x), F.hswish(x).numpy(), decimal=6) np.testing.assert_almost_equal(test_func("ADD", x, y), F.add(x, y).numpy(), decimal=6)
def forward(self, inps): return F.add(inps[0], inps[1])
import megengine as mge import megengine.functional as F A = mge.tensor([[2., 4., 2.], [2., 4., 2.]]) B = mge.tensor([[1., 2., 1.], [1., 2., 1.]]) print(A + B) print(A - B) print(A * B) print(A / B) print(F.add(A, B)) print(F.sub(A, B)) print(F.mul(A, B)) print(F.div(A, B)) A = mge.tensor([[1., 2., 3.], [4., 5., 6.]]) print(A[1, :2]) A = mge.tensor([[1., 2., 3.], [4., 5., 6.]]) print(A.shape) A = A.reshape(3, 2) print(A.shape) x = mge.tensor([[1., 3., 5.],