示例#1
0
def test_replace_var():

    a = Tensor([1, 2])
    b = Tensor([3, 4])

    @trace(symbolic=True, capture_as_const=True)
    def fwd(a, b):
        return (a + b) * 2

    fwd(a, b)
    orig_model = io.BytesIO()
    fwd.dump(orig_model,
             arg_names=["a", "b"],
             output_names="o",
             optimize_for_inference=False)
    orig_model.seek(0)

    graph = Net.load(orig_model)
    vara = graph.var_filter.name("a").as_unique()
    varb = graph.var_filter.name("b").as_unique()

    out = F.mul(vara, varb)
    out = F.relu(out)

    opnode = list(graph.opr_filter.has_input(vara))
    repl_dict = {opnode[0].outputs[0]: out}
    graph.replace_vars(repl_dict)

    modified_model = io.BytesIO()
    graph.dump(modified_model)
    modified_model.seek(0)
    load_graph = GraphInference(modified_model)

    out = load_graph.run(a, b)
    np.testing.assert_equal(out["o"], [6, 16])
示例#2
0
def check_pygraph_dump(trace_func, inp_data, expect_results, max_err=None):
    orig_model = io.BytesIO()
    inp_size = len(inp_data)
    out_size = len(expect_results)
    arg_names = ["arg_{}".format(i) for i in range(inp_size)]
    output_names = ["out_{}".format(i) for i in range(out_size)]
    trace_func.dump(
        orig_model,
        arg_names=arg_names,
        output_names=output_names,
        optimize_for_inference=False,
    )
    orig_model.seek(0)

    net = Net.load(orig_model)
    file = io.BytesIO()
    net.dump(file, optimize_for_inference=False)
    file.seek(0)
    graph = GraphInference(file)

    inp_dict = dict([(arg_names[i], inp_data[i].numpy())
                     for i in range(inp_size)])
    results = graph.run(inp_dict=inp_dict)

    for ind, tensor in enumerate(expect_results):
        if max_err:
            np.testing.assert_almost_equal(tensor.numpy(),
                                           results[output_names[ind]], max_err)
        else:
            np.testing.assert_equal(tensor.numpy(), results[output_names[ind]])
        assert tensor.dtype == results[output_names[ind]].dtype
示例#3
0
def test_add_input():

    a = Tensor([1, 2])
    b = Tensor([3, 4])

    @trace(symbolic=True, capture_as_const=True)
    def fwd(a, b):
        return (a + b) * 2

    fwd(a, b)
    orig_model = io.BytesIO()
    fwd.dump(orig_model,
             arg_names=["a", "b"],
             output_names="o",
             optimize_for_inference=False)
    orig_model.seek(0)

    graph = Net.load(orig_model)
    inp_c = graph.make_input_node((2, ), np.int32, name="c")
    varo = graph.var_filter.name("o").as_unique()

    out = F.add(varo, inp_c)
    out.name = "o1"
    graph.remove_output(varo)
    graph.add_output(out)
    modified_model = io.BytesIO()

    graph.dump(modified_model)
    modified_model.seek(0)
    load_graph = GraphInference(modified_model)

    out = load_graph.run(a, b, a)
    np.testing.assert_equal(out["o1"], ((a + b) * 2 + a).numpy())
示例#4
0
def test_make_const():

    a = Tensor([1, 2])
    b = Tensor([3, 4])

    @trace(symbolic=True, capture_as_const=True)
    def fwd(a, b):
        return (a + b) * 2

    fwd(a, b)
    orig_model = io.BytesIO()
    fwd.dump(orig_model,
             arg_names=["a", "b"],
             output_names="o",
             optimize_for_inference=False)
    orig_model.seek(0)

    graph = Net.load(orig_model)
    const_b = graph.make_const(np.array([0.0, 0.0]), name="b")
    varb = graph.var_filter.name("b").as_unique()

    repl_dict = {varb: const_b}
    graph.replace_vars(repl_dict)

    modified_model = io.BytesIO()
    graph.dump(modified_model)
    modified_model.seek(0)
    load_graph = GraphInference(modified_model)

    out = load_graph.run(a)
    np.testing.assert_equal(out["o"], [2, 4])
示例#5
0
def test_modify_params():

    a = Tensor([1, 2])
    b = Tensor([3, 4])

    @trace(symbolic=True, capture_as_const=True)
    def fwd(a, b):
        return (a + b) * 2

    fwd(a, b)
    orig_model = io.BytesIO()
    fwd.dump(orig_model,
             arg_names=["a", "b"],
             output_names="o",
             optimize_for_inference=False)
    orig_model.seek(0)

    graph = Net.load(orig_model)
    param_const = graph.params_filter.as_unique()
    param_const.set_value(3)

    modified_model = io.BytesIO()
    graph.dump(modified_model)
    modified_model.seek(0)
    load_graph = GraphInference(modified_model)

    out = load_graph.run(a, b)
    np.testing.assert_equal(out["o"], [12, 18])
示例#6
0
def test_replace_opr():

    a = Tensor([1, 2])
    b = Tensor([3, 4])

    @trace(symbolic=True, capture_as_const=True)
    def fwd(a, b):
        return (a + b) * 2

    fwd(a, b)
    orig_model = io.BytesIO()
    fwd.dump(orig_model,
             arg_names=["a", "b"],
             output_names="o",
             optimize_for_inference=False)
    orig_model.seek(0)

    graph = Net.load(orig_model)
    vara = graph.var_filter.name("a").as_unique()
    varb = graph.var_filter.name("b").as_unique()

    out1 = F.sub(vara, varb)
    out1 = F.relu(out1)
    out1 = graph.add_dep_oprs(out1)
    orig_opr = graph.opr_filter.has_input(vara).as_unique()

    repl_dict = {orig_opr: out1[0].owner}
    graph.replace_oprs(repl_dict)
    modified_model1 = io.BytesIO()
    graph.dump(modified_model1)
    modified_model1.seek(0)

    load_graph = GraphInference(modified_model1)
    out = load_graph.run(a, b)
    np.testing.assert_equal(out["o"], [0, 0])
示例#7
0
def test_add_output():

    a = Tensor([1.0, 2.0])
    b = Tensor([3.0, 4.0])

    @trace(symbolic=True, capture_as_const=True)
    def fwd(a, b):
        return (a + b) * 2

    fwd(a, b)
    orig_model = io.BytesIO()
    fwd.dump(
        orig_model, arg_names=["a", "b"], output_names="o", optimize_for_inference=False
    )
    orig_model.seek(0)

    net = Net.load(orig_model)
    var_a = net.var_filter.name("a").as_unique()
    var_b = net.var_filter.name("b").as_unique()

    y = F.add(var_a, var_b)
    y = F.sigmoid(y)

    y.name = "o1"
    net.add_output(y)

    modified_model = io.BytesIO()
    net.dump(modified_model)
    modified_model.seek(0)

    g = GraphInference(modified_model)
    out = g.run(a.numpy(), b.numpy())

    np.testing.assert_equal(out["o"], ((a + b) * 2).numpy())
    np.testing.assert_equal(out["o1"], (F.sigmoid((a + b))).numpy())
示例#8
0
def test_replace_var_in_different_network():

    a = Tensor([1, 2])
    b = Tensor([3, 4])

    @trace(symbolic=True, capture_as_const=True)
    def fwd(a, b):
        return (a + b) * 2

    @trace(symbolic=True, capture_as_const=True)
    def fwd1(c, d):
        return c + d

    fwd(a, b)
    orig_model = io.BytesIO()
    fwd.dump(orig_model,
             arg_names=["a", "b"],
             output_names="o",
             optimize_for_inference=False)
    orig_model.seek(0)

    fwd1(a, b)
    orig_model1 = io.BytesIO()
    fwd1.dump(
        orig_model1,
        arg_names=["c", "d"],
        output_names="o",
        optimize_for_inference=False,
    )
    orig_model1.seek(0)

    graph = Net.load(orig_model)
    graph1 = Net.load(orig_model1)
    vara = graph.var_filter.name("a").as_unique()
    varb = graph.var_filter.name("b").as_unique()
    varo = graph1.var_filter.name("o").as_unique()

    graph.replace_vars({vara: varo, varb: varo})

    modified_model = io.BytesIO()
    graph.dump(modified_model)
    modified_model.seek(0)
    load_graph = GraphInference(modified_model)

    out = load_graph.run(a, b)
    np.testing.assert_equal(out["o"], [16, 24])
示例#9
0
def test_assert_equal():
    g = G.Graph()
    inp1 = g.make_h2d(dtype=np.float32, device="xpux")
    inp2 = g.make_h2d(dtype=np.float32, device="xpux")
    op = builtin.AssertEqual(maxerr=1e-5)
    out = G.apply_normal_varnode(op, inp1._node, inp2._node)[0]
    g.compile(out)
    file = io.BytesIO()
    out_model = G.dump_graph([out])
    file.write(out_model[0])
    file.seek(0)
    net = Net.load(file)

    dump_file = io.BytesIO()
    net.dump(dump_file)
    dump_file.seek(0)
    g = GraphInference(dump_file)
    g.run(np.array([1.0, 2.0]), np.array([1.0, 2.0]))
示例#10
0
def test_add_remove_output():

    a = Tensor([1.0, 2.0])
    b = Tensor([3.0, 4.0])

    @trace(symbolic=True, capture_as_const=True)
    def fwd(a, b):
        return (a + b) * 2, (a - b)

    fwd(a, b)
    orig_model = io.BytesIO()
    fwd.dump(
        orig_model,
        arg_names=["a", "b"],
        output_names=["o1", "o2"],
        optimize_for_inference=False,
    )
    orig_model.seek(0)

    net = Net.load(orig_model)
    var_a = net.var_filter.name("a").as_unique()
    var_b = net.var_filter.name("b").as_unique()

    y1 = (var_a + var_b) * 3
    y2 = F.sigmoid(var_a + var_b)

    net.remove_output(*net.output_vars)
    y1.name = "new_o1"
    y2.name = "new_o2"
    net.add_output(y1, y2)

    modified_model = io.BytesIO()
    net.dump(modified_model)
    modified_model.seek(0)

    g = GraphInference(modified_model)
    out = g.run(a.numpy(), b.numpy())

    np.testing.assert_equal(out["new_o1"], ((a + b) * 3).numpy())
    np.testing.assert_almost_equal(out["new_o2"], (F.sigmoid((a + b))).numpy())
示例#11
0
def test_dump_cond_take():

    a = Tensor([1.0, 2.0])

    @trace(symbolic=True, capture_as_const=True)
    def fwd(a):
        return F.cond_take(a > 1, a)

    fwd(a)
    orig_model = io.BytesIO()
    fwd.dump(
        orig_model,
        arg_names=["a"],
        output_names=["o1", "o2"],
        optimize_for_inference=False,
    )
    orig_model.seek(0)

    net = Net.load(orig_model)
    var_a = net.input_vars[0]

    val, idx = F.cond_take(var_a > 1, var_a)

    net.remove_output(*net.output_vars)
    val.name = "value"
    idx.name = "index"
    net.add_output(val, idx)

    modified_model = io.BytesIO()
    net.dump(modified_model)
    modified_model.seek(0)

    g = GraphInference(modified_model)
    out = g.run(a.numpy())

    data = a.numpy()
    mask = a.numpy() > 1
    np.testing.assert_equal(out["index"], np.where(mask.reshape(-1))[0])
    np.testing.assert_equal(out["value"], data[mask])
示例#12
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument("-i",
                        "--input",
                        required=True,
                        type=str,
                        help="Input megengine dump model file")
    parser.add_argument("-c",
                        "--prototxt",
                        required=True,
                        type=str,
                        help="Output caffe .prototxt file")
    parser.add_argument(
        "-b",
        "--caffemodel",
        required=True,
        type=str,
        help="Output caffe .caffemodel file",
    )
    parser.add_argument("-o",
                        "--onnxoutput",
                        required=True,
                        type=str,
                        help="Output onnx .onnx file")
    parser.add_argument("--onnxopset",
                        default=8,
                        type=int,
                        help="Onnx opset version")
    parser.add_argument("--onnxgraph",
                        default="graph",
                        type=str,
                        help="Onnx graph name")

    parser.add_argument(
        "--end_point",
        default=None,
        type=str,
        help=
        "end_point is used to specify which part of the mge model should be converted",
    )

    args = parser.parse_args()
    outspec = None
    if args.end_point is not None:
        outspec = args.end_point.split(";")

    # change batchsize to 1
    input_file = io.BytesIO()
    change_batch_and_dump(args.input, input_file)
    input_file.seek(0)

    inputs = {}
    net = TopologyNetwork(input_file, outspec=outspec)
    for var in net.input_vars:
        shape = list(var.shape)
        data = np.random.randint(0, high=255, size=shape, dtype=np.uint8)
        data = data.astype(var.dtype)
        inputs[var.name] = data

    # inference mge
    input_file.seek(0)
    inference = GraphInference(input_file)
    mge_outputs = inference.run(inp_dict=inputs)
    mge_results = OrderedDict()
    for var_name, value in mge_outputs.items():
        var_name = var_name.replace(":", "_")
        var_name = var_name.replace(".", "_")
        var_name = var_name.replace(",", "_")
        mge_results[var_name] = value

    check_caffe_result(net, inputs, mge_results, args.prototxt,
                       args.caffemodel)
    time.sleep(1)
    check_onnx_result(net, inputs, mge_results, args.onnxoutput,
                      args.onnxopset, args.onnxgraph)