コード例 #1
0
def test_byoc_microtvm(merge_compiler_regions):
    """This is a simple test to check BYOC capabilities of AOT - with and without merging compiler regions to test for https://github.com/apache/tvm/issues/9036"""
    use_unpacked_api = False
    interface_api = "packed"
    test_runner = AOTTestRunner(pass_config={"tir.usmp.enable": True})

    x = relay.var("x", shape=(10, 10))
    w0 = relay.var("w0", shape=(10, 10))
    w1 = relay.var("w1", shape=(10, 10))

    # z0 = x + w0
    x_ = compiler_begin(x, "ccompiler")
    w0_ = compiler_begin(w0, "ccompiler")
    z0_ = relay.add(x_, w0_)
    z0 = compiler_end(z0_, "ccompiler")

    # z1 = z0 + w1
    z0__ = compiler_begin(z0, "ccompiler")
    w1_ = compiler_begin(w1, "ccompiler")
    z1_ = relay.add(z0__, w1_)
    z1 = compiler_end(z1_, "ccompiler")

    # z2 = z0 + z1
    z2 = relay.add(z0, z1)

    f = relay.Function([x, w0, w1], z2)
    mod = tvm.IRModule()
    mod["main"] = f

    if merge_compiler_regions:
        mod = transform.MergeCompilerRegions()(mod)

    mod = transform.PartitionGraph("mod_name")(mod)
    mod = transform.InferType()(mod)

    x_data = [("x", np.random.rand(10, 10).astype("float32"))]
    w_data = [("w{}".format(i), np.random.rand(10, 10).astype("float32")) for i in range(2)]

    map_inputs = OrderedDict(x_data + w_data)
    output_list = generate_ref_data(mod, map_inputs)

    compiled_test_mods = compile_models(
        AOTTestModel(name="my_mod", module=mod, inputs=map_inputs, outputs=output_list),
        interface_api=interface_api,
        use_unpacked_api=use_unpacked_api,
        pass_config=test_runner.pass_config,
    )

    for compiled_model in compiled_test_mods:
        check_for_no_tvm_backendallocworkspace_calls(compiled_model.executor_factory.lib)

    run_and_check(
        models=compiled_test_mods,
        runner=test_runner,
        interface_api=interface_api,
    )
コード例 #2
0
def test_byoc_microtvm(merge_compiler_regions):
    """
    This is a simple test to check BYOC capabilities of AOT
    with and without merging compiler regions to test for https://github.com/apache/tvm/issues/9036
    """
    use_unpacked_api = False
    interface_api = "packed"
    test_runner = AOT_DEFAULT_RUNNER

    input_x = relay.var("x", shape=(10, 10))
    input_w0 = relay.var("w0", shape=(10, 10))
    input_w1 = relay.var("w1", shape=(10, 10))

    # z0 = x + w0
    marked_input_x = compiler_begin(input_x, "ccompiler")
    marked_input_w0 = compiler_begin(input_w0, "ccompiler")
    add_x_and_w0 = relay.add(marked_input_x, marked_input_w0)
    end_inner_add = compiler_end(add_x_and_w0, "ccompiler")

    # z1 = z0 + w1
    marked_inner_add = compiler_begin(end_inner_add, "ccompiler")
    marked_w1 = compiler_begin(input_w1, "ccompiler")
    add_nested_and_w1 = relay.add(marked_inner_add, marked_w1)
    end_outer_add = compiler_end(add_nested_and_w1, "ccompiler")

    # z2 = z0 + z1
    final_add = relay.add(end_inner_add, end_outer_add)

    relay_func = relay.Function([input_x, input_w0, input_w1], final_add)
    mod = tvm.IRModule()
    mod["main"] = relay_func

    if merge_compiler_regions:
        mod = transform.MergeCompilerRegions()(mod)

    mod = transform.PartitionGraph("mod_name")(mod)
    mod = transform.InferType()(mod)

    x_data = [("x", np.random.rand(10, 10).astype("float32"))]
    w_data = [("w{}".format(i), np.random.rand(10, 10).astype("float32"))
              for i in range(2)]

    map_inputs = OrderedDict(x_data + w_data)
    output_list = generate_ref_data(mod, map_inputs)
    compile_and_run(
        AOTTestModel(name="my_mod",
                     module=mod,
                     inputs=map_inputs,
                     outputs=output_list),
        test_runner,
        interface_api,
        use_unpacked_api,
    )
コード例 #3
0
def test_region_set_creator_diamond():
    data = relay.var('data', shape=(10, 10))
    cb_1 = compiler_begin(data, 'test_target')
    O_1 = relay.abs(cb_1)
    ce_1 = compiler_end(O_1, 'test_target')
    ce_2 = compiler_end(O_1, 'test_target')
    cb_2 = compiler_begin(ce_1, 'test_target')
    O_2 = relay.nn.relu(cb_2)
    ce_3 = compiler_end(O_2, 'test_target')
    cb_d = compiler_begin(ce_2, "default")
    X = relay.tanh(cb_d)
    ce_d = compiler_end(X, 'default')
    cb_3 = compiler_begin(ce_3, 'test_target')
    cb_4 = compiler_begin(ce_d, 'test_target')
    O_3 = relay.add(cb_3, cb_4)
    ce_4 = compiler_end(O_3, 'test_target')
    diamond = relay.Function([data], ce_4)

    region_set = relay.analysis.AnnotatedRegionSet(
        diamond, relay.op.get("annotation.compiler_begin"),
        relay.op.get("annotation.compiler_end"))
    assert len(region_set) == 4
    check_region(
        region_set,
        'test_target',
        [cb_1],
        [cb_1, O_1, ce_1, ce_2],
        [ce_1, ce_2],
    )
    check_region(
        region_set,
        'test_target',
        [cb_2],
        [cb_2, O_2, ce_3],
        [ce_3],
    )
    check_region(
        region_set,
        'default',
        [cb_d],
        [cb_d, X, ce_d],
        [ce_d],
    )
    check_region(
        region_set,
        'test_target',
        [cb_3, cb_4],
        [cb_3, cb_4, O_3, ce_4],
        [ce_4],
    )
コード例 #4
0
def test_byoc_microtvm(merge_compiler_regions):
    """This is a simple test to check BYOC capabilities of AOT - with and without merging compiler regions to test for https://github.com/apache/tvm/issues/9036"""
    use_unpacked_api = False
    interface_api = "packed"
    test_runner = AOT_DEFAULT_RUNNER

    x = relay.var("x", shape=(10, 10))
    w0 = relay.var("w0", shape=(10, 10))
    w1 = relay.var("w1", shape=(10, 10))

    # z0 = x + w0
    x_ = compiler_begin(x, "ccompiler")
    w0_ = compiler_begin(w0, "ccompiler")
    z0_ = relay.add(x_, w0_)
    z0 = compiler_end(z0_, "ccompiler")

    # z1 = z0 + w1
    z0__ = compiler_begin(z0, "ccompiler")
    w1_ = compiler_begin(w1, "ccompiler")
    z1_ = relay.add(z0__, w1_)
    z1 = compiler_end(z1_, "ccompiler")

    # z2 = z0 + z1
    z2 = relay.add(z0, z1)

    f = relay.Function([x, w0, w1], z2)
    mod = tvm.IRModule()
    mod["main"] = f

    if merge_compiler_regions:
        mod = transform.MergeCompilerRegions()(mod)

    mod = transform.PartitionGraph("mod_name")(mod)
    mod = transform.InferType()(mod)

    x_data = [("x", np.random.rand(10, 10).astype("float32"))]
    w_data = [("w{}".format(i), np.random.rand(10, 10).astype("float32"))
              for i in range(2)]

    map_inputs = OrderedDict(x_data + w_data)
    output_list = generate_ref_data(mod, map_inputs)
    compile_and_run(
        AOTTestModel(name="my_mod",
                     module=mod,
                     inputs=map_inputs,
                     outputs=output_list),
        test_runner,
        interface_api,
        use_unpacked_api,
    )
コード例 #5
0
 def visit_call(self, call):
     new_args = []
     for arg in call.args:
         ann = compiler_begin(self.visit(arg), "ccompiler")
         new_args.append(ann)
     new_call = relay.Call(call.op, new_args)
     return compiler_end(new_call, "ccompiler")
コード例 #6
0
    def expected():
        data = relay.var('data', shape=(10, 10))
        cb_1 = compiler_begin(data, "test")
        O_1 = relay.abs(cb_1)
        ce_2 = compiler_end(O_1, "test")
        O_2 = relay.nn.relu(O_1)
        ce_3 = compiler_end(O_2, "test")

        X = relay.tanh(ce_2)

        cb_3 = compiler_begin(ce_3, "test")
        cb_4 = compiler_begin(X, "test")
        O_3 = relay.add(cb_3, cb_4)
        ce_4 = compiler_end(O_3, "test")

        func = relay.Function([data], ce_4)
        return func
コード例 #7
0
    def create_graph():
        data = relay.var('data', shape=(10, 10))

        cb_1 = compiler_begin(data, 'test_target')
        O_1 = relay.abs(cb_1)
        ce_2 = compiler_end(O_1, 'test_target')
        O_2 = relay.nn.relu(O_1)
        ce_3 = compiler_end(O_2, 'test_target')

        X = relay.tanh(ce_2)

        cb_3 = compiler_begin(ce_3, 'test_target')
        cb_4 = compiler_begin(X, 'test_target')
        O_3 = relay.add(cb_3, cb_4)
        ce_4 = compiler_end(O_3, 'test_target')

        func = relay.Function([data], ce_4)
        return func
コード例 #8
0
ファイル: vitis_ai.py プロジェクト: jiajuns/tvm
 def visit_tuple_getitem(self, op):
     """Add compiler_begin and compiler_end annotations to TupleGetItem"""
     if int(hash(op.tuple_value)) in annotator.relay_ids:
         tuple_value = compiler_begin(super().visit(op.tuple_value),
                                      annotator.compiler)
         return compiler_end(TupleGetItem(tuple_value, op.index),
                             annotator.compiler)
     else:
         tuple_value = super().visit(op.tuple_value)
         return TupleGetItem(tuple_value, op.index)
コード例 #9
0
def test_region_set_creator_merged():
    data = relay.var("data", shape=(10, 10))
    cb_1 = compiler_begin(data, "test_target")
    O_1 = relay.abs(cb_1)
    ce_2 = compiler_end(O_1, "test_target")
    O_2 = relay.nn.relu(O_1)
    ce_3 = compiler_end(O_2, "test_target")
    cb_d = compiler_begin(ce_2, "default")
    X = relay.tanh(cb_d)
    ce_d = compiler_end(X, "default")
    cb_3 = compiler_begin(ce_3, "test_target")
    cb_4 = compiler_begin(ce_d, "test_target")
    O_3 = relay.add(cb_3, cb_4)
    O_4 = relay.add(cb_3, cb_4)
    O_5 = relay.Tuple([O_3, O_4])
    ce_4 = compiler_end(O_5, "test_target")
    merged = relay.Function([data], ce_4)

    region_set = relay.analysis.AnnotatedRegionSet(
        merged, relay.op.get("annotation.compiler_begin"),
        relay.op.get("annotation.compiler_end"))
    assert len(region_set) == 3
    check_region(
        region_set,
        "test_target",
        [cb_1],
        [cb_1, O_1, O_2, ce_2, ce_3],
        [ce_2, ce_3],
    )
    check_region(
        region_set,
        "default",
        [cb_d],
        [cb_d, X, ce_d],
        [ce_d],
    )
    check_region(
        region_set,
        "test_target",
        [cb_3, cb_4],
        [cb_3, cb_4, O_3, O_4, O_5, ce_4],
        [ce_4],
    )
コード例 #10
0
    def diamond_graph_fanouts():
        data = relay.var('data', shape=(10, 10))
        cb_1 = compiler_begin(data, "test")
        O_1 = relay.abs(cb_1)
        ce_1 = compiler_end(O_1, "test")
        ce_2 = compiler_end(O_1, "test")
        cb_2 = compiler_begin(ce_1, "test")
        O_2 = relay.nn.relu(cb_2)
        ce_3 = compiler_end(O_2, "test")

        X = relay.tanh(ce_2)

        cb_3 = compiler_begin(ce_3, "test")
        cb_4 = compiler_begin(X, "test")
        O_3 = relay.add(cb_3, cb_4)
        ce_4 = compiler_end(O_3, "test")

        diamond = relay.Function([data], ce_4)
        return diamond
コード例 #11
0
 def visit_call(self, call):
     op_name = call.op.name
     if op_name in annotator.op_list:
         new_args = []
         for arg in call.args:
             ann = compiler_begin(super().visit(arg), annotator.compiler)
             new_args.append(ann)
         new_call = relay.Call(call.op, new_args, call.attrs, call.type_args)
         return compiler_end(new_call, annotator.compiler)
     else:
         return super().visit_call(call)
コード例 #12
0
    def expected():
        in_1 = relay.var('in_1', shape=(10, 10), dtype='float32')
        in_2 = relay.var('in_2', shape=(10, 10), dtype='float32')
        in_3 = relay.var('in_3', shape=(10, 10), dtype='float32')
        in_4 = relay.var('in_4', shape=(10, 10), dtype='float32')
        in_5 = relay.var('in_5', shape=(10, 10), dtype='float32')
        in_6 = relay.var('in_6', shape=(10, 10), dtype='float32')
        in_7 = relay.var('in_7', shape=(10, 10), dtype='float32')
        in_8 = relay.var('in_8', shape=(10, 10), dtype='float32')
        in_9 = relay.var('in_9', shape=(10, 10), dtype='float32')
        in_10 = relay.var('in_10', shape=(10, 10), dtype='float32')

        begin0 = compiler_begin(in_1, "test")
        begin1 = compiler_begin(in_2, "test")
        begin2 = compiler_begin(in_3, "test")
        begin3 = compiler_begin(in_4, "test")
        node0 = relay.add(begin0, begin1)
        node1 = relay.add(begin2, begin3)
        node2 = relay.add(node0, node1)

        node3 = relay.subtract(in_5, in_6)
        node4 = relay.subtract(in_7, node3)

        begin4 = compiler_begin(node4, "test")
        begin5 = compiler_begin(in_9, "test")
        node5 = relay.add(node2, begin4)
        end1 = compiler_end(node5, "test")

        node6 = relay.subtract(in_8, end1)

        node7 = relay.add(begin5, node5)
        end2 = compiler_end(node7, "test")
        begin6 = compiler_begin(end2, "test")
        begin7 = compiler_begin(node6, "test")

        node8 = relay.add(begin7, begin6)

        begin8 = compiler_begin(in_10, "test")
        node9 = relay.add(begin8, node8)
        end3 = compiler_end(node9, "test")

        f = relay.Function(
            [in_1, in_2, in_3, in_4, in_5, in_6, in_7, in_8, in_9, in_10],
            end3)
        mod = tvm.IRModule.from_expr(f)
        return mod
コード例 #13
0
def test_load_params_with_constants_in_ext_codegen():
    # After binding params and partitioning graph_module.get_params()
    # might contain parameters that are not an graph runtime input but
    # for example constants in external function.
    y_in = np.ones((1,)).astype("float32")
    params = {"y": y_in}
    mod = tvm.IRModule()
    x = relay.var("x", shape=(1, 10))
    y = relay.var("y", shape=(1,))
    xcb = compiler_begin(x, "ccompiler")
    ycb = compiler_begin(y, "ccompiler")
    z = relay.add(xcb, ycb)
    zce = compiler_end(z, "ccompiler")
    mod["main"] = relay.Function([x, y], zce)
    mod["main"] = bind_params_by_name(mod["main"], params)
    mod = transform.PartitionGraph()(mod)

    graph_module = relay.build(mod, target="llvm", params=params)
    lib = update_lib(graph_module.get_lib())
    rt_mod = tvm.contrib.graph_runtime.create(graph_module.get_json(), lib, tvm.cpu(0))
    rt_mod.load_params(runtime.save_param_dict(graph_module.get_params()))
コード例 #14
0
ファイル: libtorch.py プロジェクト: wenxcs/tvm
def torchop(script_fn, *params):
    """Insert an Operation executed in the PyTorch JIT

    The operation includes backend annotation

    Currently, only tensors are supported. The shape inferrence
    assumes that input shapes (and not values) determine output shapes."""
    return compiler_end(
        relay.op._make.torchop([compiler_begin(p, "torch") for p in params],
                               script_fn.save_to_buffer()),
        "torch",
    )
コード例 #15
0
    def create_graph():
        data = relay.var("data", relay.TensorType((1, 3, 224, 224), "float32"))
        weight = relay.var("weight", relay.TensorType((16, 3, 3, 3), "float32"))
        bn_gamma = relay.var("bn_gamma", relay.TensorType((16,), "float32"))
        bn_beta = relay.var("bn_beta", relay.TensorType((16,), "float32"))
        bn_mean = relay.var("bn_mean", relay.TensorType((16,), "float32"))
        bn_var = relay.var("bn_var", relay.TensorType((16,), "float32"))

        data_cb = compiler_begin(data, "test_target")
        weight_cb = compiler_begin(weight, "test_target")
        bn_gamma_cb = compiler_begin(bn_gamma, "test_target")
        bn_beta_cb = compiler_begin(bn_beta, "test_target")
        bn_mean_cb = compiler_begin(bn_mean, "test_target")
        bn_var_cb = compiler_begin(bn_var, "test_target")

        conv_o = relay.nn.conv2d(
            data=data_cb, weight=weight_cb, kernel_size=(3, 3), channels=16, padding=(1, 1)
        )

        bn_o = relay.nn.batch_norm(conv_o, bn_gamma_cb, bn_beta_cb, bn_mean_cb, bn_var_cb)

        relu_o = relay.nn.relu(bn_o[0])
        relu_o_ce = compiler_end(relu_o, "test_target")

        bn_omean = bn_o[1]
        rebn_omean_ce = compiler_end(bn_omean, "test_target")
        bn_ovar = bn_o[2]
        bn_ovar_ce = compiler_end(bn_ovar, "test_target")

        dummy_mean_abs = relay.abs(rebn_omean_ce)
        dummy_ovar_abs = relay.abs(bn_ovar_ce)
        dummy_tuple = relay.Tuple((relu_o_ce, dummy_mean_abs, dummy_ovar_abs))

        func = relay.Function([data, weight, bn_gamma, bn_beta, bn_mean, bn_var], dummy_tuple)
        return func
コード例 #16
0
 def visit_call(self, call):
     if call.op.name == "add":  # Annotate begin at args
         if self.in_compiler == 1:
             lhs = compiler_begin(super().visit(call.args[0]), "ccompiler")
             rhs = compiler_begin(super().visit(call.args[1]), "ccompiler")
             op = relay.add(lhs, rhs)
             self.in_compiler = 2
             return op
     elif call.op.name == "subtract":
         if self.in_compiler == 1:
             lhs = super().visit(call.args[0])
             rhs = super().visit(call.args[1])
             if isinstance(lhs, relay.expr.Var):
                 lhs = compiler_begin(lhs, "ccompiler")
             if isinstance(rhs, relay.expr.Var):
                 rhs = compiler_begin(rhs, "ccompiler")
             return relay.subtract(lhs, rhs)
     elif call.op.name == "multiply":  # Annotate end at output
         self.in_compiler = 1
         lhs = super().visit(call.args[0])
         rhs = super().visit(call.args[1])
         if isinstance(lhs, relay.expr.Var):
             lhs = compiler_begin(lhs, "ccompiler")
         if isinstance(rhs, relay.expr.Var):
             rhs = compiler_begin(rhs, "ccompiler")
         op = relay.multiply(lhs, rhs)
         if self.in_compiler == 2:
             op = compiler_end(op, "ccompiler")
         self.in_compiler = 0
         return op
     return super().visit_call(call)
コード例 #17
0
ファイル: vitis_ai.py プロジェクト: jiajuns/tvm
            def visit_call(self, call):
                """Add compiler_begin and compiler_end annotations to the Call expr"""
                if int(hash(call)) in annotator.relay_ids:
                    new_args = []
                    for arg in call.args:
                        ann = compiler_begin(super().visit(arg),
                                             annotator.compiler)
                        new_args.append(ann)
                    new_call = relay.Call(call.op, new_args, call.attrs,
                                          call.type_args)
                    return compiler_end(new_call, annotator.compiler)

                else:
                    return super().visit_call(call)
コード例 #18
0
    def visit_call(self, call):
        curr_last = self.last_call
        self.last_call = False

        params = []
        for arg in call.args:
            param = super().visit(arg)
            if isinstance(param, relay.expr.Var):
                param = compiler_begin(param, self.compiler)
            params.append(param)

        new_call = relay.Call(call.op, params, call.attrs)
        if curr_last:
            new_call = compiler_end(new_call, self.compiler)
        return new_call
コード例 #19
0
ファイル: vitis_ai.py プロジェクト: jiajuns/tvm
 def visit_tuple(self, tup):
     """Add compiler_begin and compiler_end annotations to Tuple"""
     field_list = []
     cond = int(hash(tup))
     for field in tup.fields:
         if cond in annotator.relay_ids:
             field_list.append(
                 compiler_begin(super().visit(field),
                                annotator.compiler))
         else:
             field_list.append(super().visit(field))
     if cond in annotator.relay_ids:
         return compiler_end(Tuple(field_list), annotator.compiler)
     else:
         return Tuple(field_list)
コード例 #20
0
    def visit_call(self, call):

        if call.op.name == 'nn.global_avg_pool2d':
            self.compiler_open = True
        compiler_open = self.compiler_open

        params = []
        for arg in call.args:
            param = super().visit(arg)
            if call.op.name == 'nn.global_avg_pool2d':
                param = compiler_end(param, self.compiler)
            if compiler_open and isinstance(param, relay.expr.Var):
                param = compiler_begin(param, self.compiler)
            params.append(param)

        new_call = relay.Call(call.op, params, call.attrs)
        return new_call
コード例 #21
0
 def visit_constant(self, constant):
     new_constant = compiler_begin(constant, self.compiler)
     return new_constant
コード例 #22
0
def test_partition():
    in_1 = relay.var("in_1", shape=(10, 10), dtype="float32")
    in_2 = relay.var("in_2", shape=(10, 10), dtype="float32")
    in_3 = relay.var("in_3", shape=(10, 10), dtype="float32")
    in_4 = relay.var("in_4", shape=(10, 10), dtype="float32")
    in_5 = relay.var("in_5", shape=(10, 10), dtype="float32")
    in_6 = relay.var("in_6", shape=(10, 10), dtype="float32")
    in_7 = relay.var("in_7", shape=(10, 10), dtype="float32")
    in_8 = relay.var("in_8", shape=(10, 10), dtype="float32")
    in_9 = relay.var("in_9", shape=(10, 10), dtype="float32")
    in_10 = relay.var("in_10", shape=(10, 10), dtype="float32")

    begin0 = compiler_begin(in_1, "onnx")
    begin1 = compiler_begin(in_2, "onnx")
    begin2 = compiler_begin(in_3, "onnx")
    begin3 = compiler_begin(in_4, "onnx")
    node0 = relay.add(begin0, begin1)
    node1 = relay.add(begin2, begin3)
    end0 = compiler_end(node0, "onnx")
    end1 = compiler_end(node1, "onnx")
    begin4 = compiler_begin(end0, "onnx")
    begin5 = compiler_begin(end1, "onnx")
    node2 = relay.add(begin4, begin5)
    end2 = compiler_end(node2, "onnx")

    dbegin0 = compiler_begin(in_5, "default")
    dbegin1 = compiler_begin(in_6, "default")
    node3 = relay.subtract(dbegin0, dbegin1)
    dbegin2 = compiler_begin(in_7, "default")
    dend1 = compiler_end(node3, "default")
    dbegin3 = compiler_begin(dend1, "default")
    node4 = relay.subtract(dbegin2, dbegin3)
    dend2 = compiler_end(node4, "default")

    begin6 = compiler_begin(end2, "onnx")
    begin7 = compiler_begin(dend2, "onnx")
    node5 = relay.add(begin6, begin7)
    end3 = compiler_end(node5, "onnx")
    end4 = compiler_end(node5, "onnx")
    dbegin4 = compiler_begin(in_8, "default")
    dbegin5 = compiler_begin(end3, "default")
    node6 = relay.subtract(dbegin4, dbegin5)
    begin8 = compiler_begin(in_9, "onnx")
    begin9 = compiler_begin(end4, "onnx")
    node7 = relay.multiply(begin8, begin9)
    end5 = compiler_end(node7, "onnx")

    dend3 = compiler_end(node6, "default")
    begin10 = compiler_begin(dend3, "onnx")
    begin11 = compiler_begin(end5, "onnx")
    node8 = relay.add(begin10, begin11)
    end6 = compiler_end(node8, "onnx")
    begin12 = compiler_begin(in_10, "onnx")
    begin13 = compiler_begin(end6, "onnx")
    node9 = relay.add(begin12, begin13)
    end7 = compiler_end(node9, "onnx")

    func = relay.Function(
        [in_1, in_2, in_3, in_4, in_5, in_6, in_7, in_8, in_9, in_10], end7)

    target = "llvm"
    mod = IRModule.from_expr(func)
    mod = transform.PartitionGraph()(mod)

    with tvm.transform.PassContext(opt_level=3, disabled_pass=["FuseOps"]):
        graph_json, mod1, params = relay.build(mod, target)

    assert mod1.type_key == "metadata"
    assert mod1.imported_modules[0].type_key == "llvm"
    assert mod1.imported_modules[0].get_source()
    assert mod1.imported_modules[1].type_key == "onnx"
    assert mod1.imported_modules[1].get_source()
コード例 #23
0
    def annotated():
        in_1 = relay.var("in_1", shape=(10, 10), dtype="float32")
        in_2 = relay.var("in_2", shape=(10, 10), dtype="float32")
        in_3 = relay.var("in_3", shape=(10, 10), dtype="float32")
        in_4 = relay.var("in_4", shape=(10, 10), dtype="float32")
        in_5 = relay.var("in_5", shape=(10, 10), dtype="float32")
        in_6 = relay.var("in_6", shape=(10, 10), dtype="float32")
        in_7 = relay.var("in_7", shape=(10, 10), dtype="float32")
        in_8 = relay.var("in_8", shape=(10, 10), dtype="float32")
        in_9 = relay.var("in_9", shape=(10, 10), dtype="float32")
        in_10 = relay.var("in_10", shape=(10, 10), dtype="float32")

        begin0 = compiler_begin(in_1, "test")
        begin1 = compiler_begin(in_2, "test")
        begin2 = compiler_begin(in_3, "test")
        begin3 = compiler_begin(in_4, "test")
        node0 = relay.add(begin0, begin1)
        node1 = relay.add(begin2, begin3)
        end0 = compiler_end(node0, "test")
        end1 = compiler_end(node1, "test")
        begin4 = compiler_begin(end0, "test")
        begin5 = compiler_begin(end1, "test")
        node2 = relay.add(begin4, begin5)
        end2 = compiler_end(node2, "test")

        dbegin0 = compiler_begin(in_5, "default")
        dbegin1 = compiler_begin(in_6, "default")
        node3 = relay.subtract(dbegin0, dbegin1)
        dbegin2 = compiler_begin(in_7, "default")
        dend1 = compiler_end(node3, "default")
        dbegin3 = compiler_begin(dend1, "default")
        node4 = relay.subtract(dbegin2, dbegin3)
        dend2 = compiler_end(node4, "default")

        begin6 = compiler_begin(end2, "test")
        begin7 = compiler_begin(dend2, "test")
        node5 = relay.add(begin6, begin7)
        end3 = compiler_end(node5, "test")
        end4 = compiler_end(node5, "test")
        dbegin4 = compiler_begin(in_8, "default")
        dbegin5 = compiler_begin(end3, "default")
        node6 = relay.subtract(dbegin4, dbegin5)
        begin8 = compiler_begin(in_9, "test")
        begin9 = compiler_begin(end4, "test")
        node7 = relay.add(begin8, begin9)
        end5 = compiler_end(node7, "test")

        dend3 = compiler_end(node6, "default")
        begin10 = compiler_begin(dend3, "test")
        begin11 = compiler_begin(end5, "test")
        node8 = relay.add(begin10, begin11)
        end6 = compiler_end(node8, "test")
        begin12 = compiler_begin(in_10, "test")
        begin13 = compiler_begin(end6, "test")
        node9 = relay.add(begin12, begin13)
        end7 = compiler_end(node9, "test")

        f = relay.Function(
            [in_1, in_2, in_3, in_4, in_5, in_6, in_7, in_8, in_9, in_10],
            end7)
        mod = tvm.IRModule.from_expr(f)
        return mod