def test_metadata(): x = Tensor(0) @trace(symbolic=True, capture_as_const=True) def fwd(x): return x * 2 fwd(x) orig_model = io.BytesIO() fwd.dump(orig_model, user_info="test", optimize_for_inference=False) orig_model.seek(0) graph = Net.load(orig_model) assert graph.metadata == { "user_info": "test", "graph_modified": False, # False: tracing.dump "optimized_for_inference": False, } orig_model.seek(0) graph.dump( orig_model, user_info={ "str": "x", "tensor": x, "module": M.Module, "none": None }, optimize_for_inference=True, enable_nchw4=True, enable_ioc16=True, ) orig_model.seek(0) graph = Net.load(orig_model) assert graph.metadata == { "user_info": { "str": "x", "tensor": x, "module": M.Module, "none": None }, "graph_modified": True, # True: Network.dump "optimized_for_inference": True, "enable_nchw4": True, "enable_ioc16": True, } orig_model.seek(0) fwd.dump(orig_model, enable_metadata=False) orig_model.seek(0) graph = Net.load(orig_model) assert graph.metadata is None
def test_add_input(): a = Tensor([1, 2]) b = Tensor([3, 4]) @trace(symbolic=True, capture_as_const=True) def fwd(a, b): return (a + b) * 2 fwd(a, b) orig_model = io.BytesIO() fwd.dump(orig_model, arg_names=["a", "b"], output_names="o", optimize_for_inference=False) orig_model.seek(0) graph = Net.load(orig_model) inp_c = graph.make_input_node((2, ), np.int32, name="c") varo = graph.var_filter.name("o").as_unique() out = F.add(varo, inp_c) out.name = "o1" graph.remove_output(varo) graph.add_output(out) modified_model = io.BytesIO() graph.dump(modified_model) modified_model.seek(0) load_graph = GraphInference(modified_model) out = load_graph.run(a, b, a) np.testing.assert_equal(out["o1"], ((a + b) * 2 + a).numpy())
def test_make_const(): a = Tensor([1, 2]) b = Tensor([3, 4]) @trace(symbolic=True, capture_as_const=True) def fwd(a, b): return (a + b) * 2 fwd(a, b) orig_model = io.BytesIO() fwd.dump(orig_model, arg_names=["a", "b"], output_names="o", optimize_for_inference=False) orig_model.seek(0) graph = Net.load(orig_model) const_b = graph.make_const(np.array([0.0, 0.0]), name="b") varb = graph.var_filter.name("b").as_unique() repl_dict = {varb: const_b} graph.replace_vars(repl_dict) modified_model = io.BytesIO() graph.dump(modified_model) modified_model.seek(0) load_graph = GraphInference(modified_model) out = load_graph.run(a) np.testing.assert_equal(out["o"], [2, 4])
def test_modify_params(): a = Tensor([1, 2]) b = Tensor([3, 4]) @trace(symbolic=True, capture_as_const=True) def fwd(a, b): return (a + b) * 2 fwd(a, b) orig_model = io.BytesIO() fwd.dump(orig_model, arg_names=["a", "b"], output_names="o", optimize_for_inference=False) orig_model.seek(0) graph = Net.load(orig_model) param_const = graph.params_filter.as_unique() param_const.set_value(3) modified_model = io.BytesIO() graph.dump(modified_model) modified_model.seek(0) load_graph = GraphInference(modified_model) out = load_graph.run(a, b) np.testing.assert_equal(out["o"], [12, 18])
def test_set_symbolic_shape(): a = Tensor([1.0, 2.0]) @trace(symbolic=True, capture_as_const=True) def fwd(a): return F.relu(a * 2) fwd(a) orig_model = io.BytesIO() fwd.dump( orig_model, arg_names=["a"], output_names=["o"], optimize_for_inference=False, ) orig_model.seek(0) net = Net.load(orig_model) var_a = net.input_vars[0] saved_symbolic_shape = set_symbolic_shape(True) assert isinstance(var_a.shape, VarNode) set_symbolic_shape(False) assert var_a.shape == var_a.partial_shape set_symbolic_shape(saved_symbolic_shape)
def check_pygraph_dump(trace_func, inp_data, expect_results, max_err=None): orig_model = io.BytesIO() inp_size = len(inp_data) out_size = len(expect_results) arg_names = ["arg_{}".format(i) for i in range(inp_size)] output_names = ["out_{}".format(i) for i in range(out_size)] trace_func.dump( orig_model, arg_names=arg_names, output_names=output_names, optimize_for_inference=False, ) orig_model.seek(0) net = Net.load(orig_model) file = io.BytesIO() net.dump(file, optimize_for_inference=False) file.seek(0) graph = GraphInference(file) inp_dict = dict([(arg_names[i], inp_data[i].numpy()) for i in range(inp_size)]) results = graph.run(inp_dict=inp_dict) for ind, tensor in enumerate(expect_results): if max_err: np.testing.assert_almost_equal(tensor.numpy(), results[output_names[ind]], max_err) else: np.testing.assert_equal(tensor.numpy(), results[output_names[ind]]) assert tensor.dtype == results[output_names[ind]].dtype
def test_replace_var(): a = Tensor([1, 2]) b = Tensor([3, 4]) @trace(symbolic=True, capture_as_const=True) def fwd(a, b): return (a + b) * 2 fwd(a, b) orig_model = io.BytesIO() fwd.dump(orig_model, arg_names=["a", "b"], output_names="o", optimize_for_inference=False) orig_model.seek(0) graph = Net.load(orig_model) vara = graph.var_filter.name("a").as_unique() varb = graph.var_filter.name("b").as_unique() out = F.mul(vara, varb) out = F.relu(out) opnode = list(graph.opr_filter.has_input(vara)) repl_dict = {opnode[0].outputs[0]: out} graph.replace_vars(repl_dict) modified_model = io.BytesIO() graph.dump(modified_model) modified_model.seek(0) load_graph = GraphInference(modified_model) out = load_graph.run(a, b) np.testing.assert_equal(out["o"], [6, 16])
def test_utils_astensor1d(is_varnode): if is_varnode: network = Network() else: network = None reference = make_tensor(0, network) # literal x = [1, 2, 3] for dtype in [None, "float32"]: xx = astensor1d(x, reference, dtype=dtype) assert isinstance(xx, type(reference)) np.testing.assert_equal(xx.numpy(), x) # numpy array x = np.asarray([1, 2, 3], dtype="int32") for dtype in [None, "float32"]: xx = astensor1d(x, reference, dtype=dtype) assert isinstance(xx, type(reference)) np.testing.assert_equal(xx.numpy(), x.astype(dtype) if dtype else x) # tensor x = make_tensor([1, 2, 3], network) for dtype in [None, "float32"]: xx = astensor1d(x, reference, dtype=dtype) assert isinstance(xx, type(reference)) np.testing.assert_equal(xx.numpy(), x.numpy()) # mixed x = [1, make_tensor(2, network), 3] for dtype in [None, "float32"]: xx = astensor1d(x, reference, dtype=dtype) assert isinstance(xx, type(reference)) np.testing.assert_equal(xx.numpy(), [1, 2, 3])
def test_broadcast_auto_infer(is_varnode): if is_varnode: network = Network() else: network = None x = np.random.random((1, 2, 3)).astype(np.float32) xx = make_tensor(x, network) for shape in [ (1, 2, 3), (1, None, 3), ]: yy = F.broadcast_to(xx, shape) np.testing.assert_equal(yy.numpy(), x) with pytest.raises(ValueError): F.broadcast_to(xx, (1, -1, 3)) with pytest.raises(ValueError): F.broadcast_to(xx, (None, 1, 2, 3)) F.broadcast_to(xx, (1, None, 2, 3)) t = tensor(2, dtype=np.int32) F.broadcast_to(xx, (t, None, 2, 3))
def test_copy_d2h(is_varnode): if is_varnode: network = Network() else: network = None copy_test("gpu0", "cpu0", network=network)
def test_replace_opr(): a = Tensor([1, 2]) b = Tensor([3, 4]) @trace(symbolic=True, capture_as_const=True) def fwd(a, b): return (a + b) * 2 fwd(a, b) orig_model = io.BytesIO() fwd.dump(orig_model, arg_names=["a", "b"], output_names="o", optimize_for_inference=False) orig_model.seek(0) graph = Net.load(orig_model) vara = graph.var_filter.name("a").as_unique() varb = graph.var_filter.name("b").as_unique() out1 = F.sub(vara, varb) out1 = F.relu(out1) out1 = graph.add_dep_oprs(out1) orig_opr = graph.opr_filter.has_input(vara).as_unique() repl_dict = {orig_opr: out1[0].owner} graph.replace_oprs(repl_dict) modified_model1 = io.BytesIO() graph.dump(modified_model1) modified_model1.seek(0) load_graph = GraphInference(modified_model1) out = load_graph.run(a, b) np.testing.assert_equal(out["o"], [0, 0])
def test_split(is_varnode): if is_varnode: network = Network() else: network = None data = np.random.random((2, 3, 4, 5)).astype(np.float32) inp = make_tensor(data, network) mge_out0 = F.split(inp, 2, axis=3) mge_out1 = F.split(inp, [3], axis=3) np_out = np.split(data, [3, 5], axis=3) assert len(mge_out0) == 2 assert len(mge_out1) == 2 np.testing.assert_equal(mge_out0[0].numpy(), np_out[0]) np.testing.assert_equal(mge_out1[0].numpy(), np_out[0]) np.testing.assert_equal(mge_out0[1].numpy(), np_out[1]) np.testing.assert_equal(mge_out1[1].numpy(), np_out[1]) try: F.split(inp, 4) assert False except ValueError as e: pass try: F.split(inp, [3, 3, 5], axis=3) assert False except ValueError as e: assert str(e) == "Invalid nsplits_or_secions: [3, 3, 5]"
def test_add_output(): a = Tensor([1.0, 2.0]) b = Tensor([3.0, 4.0]) @trace(symbolic=True, capture_as_const=True) def fwd(a, b): return (a + b) * 2 fwd(a, b) orig_model = io.BytesIO() fwd.dump( orig_model, arg_names=["a", "b"], output_names="o", optimize_for_inference=False ) orig_model.seek(0) net = Net.load(orig_model) var_a = net.var_filter.name("a").as_unique() var_b = net.var_filter.name("b").as_unique() y = F.add(var_a, var_b) y = F.sigmoid(y) y.name = "o1" net.add_output(y) modified_model = io.BytesIO() net.dump(modified_model) modified_model.seek(0) g = GraphInference(modified_model) out = g.run(a.numpy(), b.numpy()) np.testing.assert_equal(out["o"], ((a + b) * 2).numpy()) np.testing.assert_equal(out["o1"], (F.sigmoid((a + b))).numpy())
def test_indexing_error(test_varnode): if test_varnode: network = Network() else: network = None a = np.arange(9).reshape(3, 3).astype(np.float32) b = np.array([1, 2]) aa = make_tensor(a, network) bb = make_tensor(b, network) with pytest.raises(IndexError): aa[..., ...] # only one ellipsis is allowed with pytest.raises(IndexError): aa[bb, bb, bb] # too many indices with pytest.raises(ValueError): aa[:] = bb # shape mismatch if test_varnode: cc = aa[aa > 4] with pytest.raises(IndexError): cc[...] # does not support ellipsis when tensor's ndim is unknown dd = aa > 4 with pytest.raises(IndexError): cc[..., dd[ dd]] # does not support bool index with unknown shape when using ellipsis
def test_copy_d2d(is_varnode): if is_varnode: network = Network() else: network = None copy_test("gpu0", "gpu1", network=network) copy_test("gpu0:0", "gpu0:1", network=network)
def test_replace_var_in_different_network(): a = Tensor([1, 2]) b = Tensor([3, 4]) @trace(symbolic=True, capture_as_const=True) def fwd(a, b): return (a + b) * 2 @trace(symbolic=True, capture_as_const=True) def fwd1(c, d): return c + d fwd(a, b) orig_model = io.BytesIO() fwd.dump(orig_model, arg_names=["a", "b"], output_names="o", optimize_for_inference=False) orig_model.seek(0) fwd1(a, b) orig_model1 = io.BytesIO() fwd1.dump( orig_model1, arg_names=["c", "d"], output_names="o", optimize_for_inference=False, ) orig_model1.seek(0) graph = Net.load(orig_model) graph1 = Net.load(orig_model1) vara = graph.var_filter.name("a").as_unique() varb = graph.var_filter.name("b").as_unique() varo = graph1.var_filter.name("o").as_unique() graph.replace_vars({vara: varo, varb: varo}) modified_model = io.BytesIO() graph.dump(modified_model) modified_model.seek(0) load_graph = GraphInference(modified_model) out = load_graph.run(a, b) np.testing.assert_equal(out["o"], [16, 24])
def test_transpose(is_varnode): if is_varnode: network = Network() else: network = None x = np.random.rand(2, 5).astype("float32") xx = make_tensor(x, network) np.testing.assert_almost_equal(xx.T.numpy(), x.T)
def test_identity(is_varnode): if is_varnode: network = Network() else: network = None x = make_tensor(np.random.random((5, 10)).astype(np.float32), network) y = F.copy(x) np.testing.assert_equal(y.numpy(), x)
def test_reset_batchsize(): @trace(symbolic=True, capture_as_const=True) def f(x): return F.exp(x) orig_model = io.BytesIO() f(Tensor(np.random.random((3, 3, 224, 224)))) f.dump(orig_model, optimize_for_inference=False) orig_model.seek(0) modified_model = io.BytesIO() net = Net.load(orig_model) net.reset_batch_size(1) net.dump(modified_model, optimize_for_inference=False) modified_model.seek(0) net1 = Net.load(modified_model) assert net1.data_providers_filter.as_unique().shape[0] == 1
def test_splice_network(): x = F.ones((2, )) y = F.ones((2, )) @trace(symbolic=True, capture_as_const=True) def fun1(a, b): return (a + b) * 2 @trace(symbolic=True, capture_as_const=True) def fun2(a): return a * 2 - 1 model = io.BytesIO() fun1(x, y) fun2(x) fun1.dump( model, arg_names=["net1_i0", "net1_i1"], output_names=["net1_o0"], optimize_for_inference=False, ) model.seek(0) net1 = Net.load(model) model.seek(0) fun2.dump( model, arg_names=["net2_i0"], output_names=["net2_o0"], optimize_for_inference=False, ) model.seek(0) net2 = Net.load(model) net1.add_output(*net2.output_vars) var = net1.var_filter.name("net1_i0").as_unique() repl_var = net2.var_filter.name("net2_o0").as_unique() net1.replace_vars({var: repl_var}) assert "net1_i0" not in [var.name for var in net1.all_vars] assert "net2_i0" in [var.name for var in net1.all_vars] model.seek(0) net1.dump(model, keep_var_name=2, optimize_for_inference=False) model.seek(0) net = Net.load(model) assert "net1_i0" not in [var.name for var in net.all_vars] assert "net2_i0" in [var.name for var in net.all_vars]
def test_modify_opr_name(): @trace(symbolic=True, capture_as_const=True) def f(x): return F.exp(x) orig_model = io.BytesIO() f(Tensor(np.random.random((3, 3, 224, 224)))) f.dump(orig_model, arg_names=["a"], optimize_for_inference=False) orig_model.seek(0) modified_model = io.BytesIO() net = Net.load(orig_model) net.modify_opr_names("net") net.modify_opr_names(lambda x: "net1." + x) net.dump(modified_model, optimize_for_inference=False) modified_model.seek(0) net1 = Net.load(modified_model) assert net1.data_providers_filter.as_unique().name == "net1.net.a"
def test_matmul(is_varnode): if is_varnode: network = Network() else: network = None A = make_tensor(np.random.rand(5, 7).astype("float32"), network) B = make_tensor(np.random.rand(7, 10).astype("float32"), network) C = A @ B np.testing.assert_almost_equal(C.numpy(), A.numpy() @ B.numpy(), decimal=6)
def test_literal_arith(is_varnode): if is_varnode: network = Network() else: network = None x_np = np.random.rand(10).astype("float32") x = make_tensor(x_np, network) y = x * 2 y_np = y.numpy() np.testing.assert_almost_equal(y_np, x_np * 2)
def test_set_value(is_varnode): if is_varnode: network = Network() else: network = None v0 = np.random.random((2, 3)).astype(np.float32) param = make_tensor(v0, network) v1 = np.random.random((2, 3)).astype(np.float32) param[...] = v1 np.testing.assert_allclose(param.numpy(), v1, atol=5e-6)
def test_arange(is_varnode): if is_varnode: network = Network() else: network = None cases = [ { "input": [1, 9, 1] }, { "input": [2, 10, 2] }, ] opr_test( cases, F.arange, ref_fn=lambda start, end, step: np.arange( start, end, step, dtype=np.float32), network=network, ) cases = [ { "input": [9, 1, -1] }, { "input": [10, 2, -2] }, ] opr_test( cases, F.arange, ref_fn=lambda start, end, step: np.arange( start, end, step, dtype=np.float32), network=network, ) cases = [ { "input": [9.3, 1.2, -0.5] }, { "input": [10.3, 2.1, -1.7] }, ] opr_test( cases, F.arange, ref_fn=lambda start, end, step: np.arange( start, end, step, dtype=np.float32), network=network, )
def test_linspace(is_varnode): if is_varnode: network = Network() else: network = None cases = [ { "input": [1, 9, 9] }, { "input": [3, 10, 8] }, ] opr_test( cases, F.linspace, ref_fn=lambda start, end, step: np.linspace( start, end, step, dtype=np.float32), network=network, ) cases = [ { "input": [9, 1, 9] }, { "input": [10, 3, 8] }, ] opr_test( cases, F.linspace, ref_fn=lambda start, end, step: np.linspace( start, end, step, dtype=np.float32), network=network, ) cases = [ { "input": [1, make_tensor(9, network), 9] }, { "input": [make_tensor(1, network), 9, make_tensor(9, network)] }, ] opr_test( cases, F.linspace, ref_fn=lambda start, end, step: np.linspace(1, 9, 9, dtype=np.float32), network=network, )
def test_round(is_varnode): if is_varnode: network = Network() else: network = None data1_shape = (15, ) data2_shape = (25, ) data1 = np.random.random(data1_shape).astype(np.float32) data2 = np.random.random(data2_shape).astype(np.float32) cases = [{"input": data1}, {"input": data2}] opr_test(cases, F.round, ref_fn=np.round, network=network)
def test_expand_dims(is_varnode): if is_varnode: network = Network() else: network = None x = np.arange(6, dtype="float32").reshape(2, 3) xx = make_tensor(x, network) for axis in [2, -3, (3, -4), (1, -4)]: y = np.expand_dims(x, axis) yy = F.expand_dims(xx, axis) np.testing.assert_equal(y, yy.numpy())
def test_concat_device(is_varnode): if is_varnode: network = Network() else: network = None data1 = make_tensor( np.random.random((3, 2, 2)).astype("float32"), network, "cpu0") data2 = make_tensor( np.random.random((2, 2, 2)).astype("float32"), network, "cpu1") out = F.concat([data1, data2], device="cpu0") assert str(out.device).split(":")[0] == "cpu0"
def test_condtake(is_varnode): if is_varnode: network = Network() else: network = None x = np.array([[1, 2, 3], [4, 5, 6]]).astype("float32") y = np.array([[True, False, True], [False, True, True]]) xx = make_tensor(x, network) yy = make_tensor(y, network) val, idx = F.cond_take(yy, xx) np.testing.assert_equal(val.numpy(), x[y]) np.testing.assert_equal(idx.numpy(), np.where(y.reshape(-1))[0])