Ejemplo n.º 1
0
def onnx_to_caffe2(onnx_model, output, init_net_output):
    onnx_model_proto = ModelProto()
    onnx_model_proto.ParseFromString(onnx_model.read())

    init_net, predict_net = c2.onnx_graph_to_caffe2_net(onnx_model_proto)
    init_net_output.write(init_net.SerializeToString())
    output.write(predict_net.SerializeToString())
Ejemplo n.º 2
0
def convert_to_caffe2(net):
   model_path = f"models/{net_type}.onnx"
   init_net_path = f"models/{net_type}_init_net.pb"
   init_net_txt_path = f"models/{net_type}_init_net.pbtxt"
   predict_net_path = f"models/{net_type}_predict_net.pb"
   predict_net_txt_path = f"models/{net_type}_predict_net.pbtxt"

   dummy_input = torch.randn(1, 3, 300, 300)
   torch.onnx.export(net, dummy_input, model_path, verbose=False, output_names=['scores', 'boxes'])

   model = onnx.load(model_path)
   init_net, predict_net = c2.onnx_graph_to_caffe2_net(model)

   print(f"Save the model in binary format to the files {init_net_path} and {predict_net_path}.")

   with open(init_net_path, "wb") as fopen:
       fopen.write(init_net.SerializeToString())
   with open(predict_net_path, "wb") as fopen:
       fopen.write(predict_net.SerializeToString())

   print(f"Save the model in txt format to the files {init_net_txt_path} and {predict_net_txt_path}. ")
   with open(init_net_txt_path, 'w') as f:
       f.write(str(init_net))

   with open(predict_net_txt_path, 'w') as f:
       f.write(str(predict_net))
Ejemplo n.º 3
0
def onnx_to_caffe2(onnx_model, output, init_net_output):
    onnx_model_proto = ModelProto()
    onnx_model_proto.ParseFromString(onnx_model.read())

    init_net, predict_net = c2.onnx_graph_to_caffe2_net(onnx_model_proto)
    init_net_output.write(init_net.SerializeToString())
    output.write(predict_net.SerializeToString())
Ejemplo n.º 4
0
def _export_via_onnx(model, inputs):
    # make sure all modules are in eval mode, onnx may change the training state
    #  of the moodule if the states are not consistent
    def _check_eval(module):
        assert not module.training

    model.apply(_check_eval)

    # Export the model to ONNX
    with torch.no_grad():
        with io.BytesIO() as f:
            torch.onnx.export(
                model,
                inputs,
                f,
                operator_export_type=OperatorExportTypes.ONNX_ATEN_FALLBACK,
                # verbose=True,  # NOTE: uncomment this for debugging
                # export_params=True,
            )
            onnx_model = onnx.load_from_string(f.getvalue())

    # Apply ONNX's Optimization
    all_passes = onnx.optimizer.get_available_passes()
    passes = ["fuse_bn_into_conv"]
    assert all(p in all_passes for p in passes)
    onnx_model = onnx.optimizer.optimize(onnx_model, passes)

    # Convert ONNX model to Caffe2 protobuf
    init_net, predict_net = Caffe2Backend.onnx_graph_to_caffe2_net(onnx_model)

    return predict_net, init_net
Ejemplo n.º 5
0
 def create_caffe2_predictor(onnx_file_path):
     with open(onnx_file_path, 'rb') as onnx_model:
         onnx_model_proto = ModelProto()
         onnx_model_proto.ParseFromString(onnx_model.read())
         init_net, predict_net = c2.onnx_graph_to_caffe2_net(
             onnx_model_proto)
         predictor = workspace.Predictor(init_net, predict_net)
     return predictor
def main():
    args = parser.parse_args()
    onnx_model = onnx.load(args.model)
    caffe2_init, caffe2_predict = Caffe2Backend.onnx_graph_to_caffe2_net(onnx_model)
    caffe2_init_str = caffe2_init.SerializeToString()
    with open(args.c2_prefix + '.init.pb', "wb") as f:
        f.write(caffe2_init_str)
    caffe2_predict_str = caffe2_predict.SerializeToString()
    with open(args.c2_prefix + '.predict.pb', "wb") as f:
        f.write(caffe2_predict_str)
Ejemplo n.º 7
0
def onnx2caffe():
    print("==> Exporting ONNX to Caffe2 format")
    onnx_model = onnx.load('weights/model.onnx')
    caffe2_init, caffe2_predict = Caffe2Backend.onnx_graph_to_caffe2_net(
        onnx_model)
    caffe2_init_str = caffe2_init.SerializeToString()
    with open('weights/model.init.pb', "wb") as f:
        f.write(caffe2_init_str)
    caffe2_predict_str = caffe2_predict.SerializeToString()
    with open('weights/model.predict.pb', "wb") as f:
        f.write(caffe2_predict_str)
    print("==> Done")
Ejemplo n.º 8
0
    def __call__(self, graph, folder):
        onnx_model = build_model(graph, ir_version=6, opset_version=9)
        if not onnx_model.graph.name:
            onnx_model.graph.name = 'Graph'

        init_net, predict_net = Caffe2Backend.onnx_graph_to_caffe2_net(
            onnx_model)
        value_info = get_value_info(onnx_model)

        if not os.path.exists(folder):
            os.mkdir(folder)

        save_caffe2_model(folder, init_net, predict_net, value_info)
def convert_onnx_to_caffe2_v2(onnx_path, init_net_path, predict_net_path):
    '''
    等效命令行: convert-onnx-to-caffe2 path/to/model.onnx --output predict_net.pb --init-net-output init_net.pb
    :param onnx_path
    :param init_net_path:
    :param predict_net_path:
    :return:
    '''
    model = onnx.load(onnx_path)
    # onnx.checker.check_model(model)
    init_net, predict_net = Caffe2Backend.onnx_graph_to_caffe2_net(model)
    with open(init_net_path, "wb") as f:
        f.write(init_net.SerializeToString())
    with open(predict_net_path, "wb") as f:
        f.write(predict_net.SerializeToString())
Ejemplo n.º 10
0
    def eval_onnx_node(self, no_check_set):
        """Run a Caffe2 program using their ONNX backend.

        Prior to running the backend, use the Paddle scope to construct
        ONNX ops and prepare the inputs and output values based on ONNX
        compatibility.
        """
        # Convert inputs and outputs to ONNX tensors.
        # Use the Paddle fetch_list to prepare the outputs.
        inputs = [
            paddle_variable_to_onnx_tensor(v, self.block)
            for v in self.feed_map
        ]

        fetch_target_names = [
            fetch_target.name for fetch_target in self.fetch_list \
            if fetch_target.name not in no_check_set
        ]
        outputs = [
            paddle_variable_to_onnx_tensor(v, self.block)
            for v in fetch_target_names
        ]
        # Construct the ONNX model using paddle-onnx.
        onnx_node = ops.node_maker[self.op_type](operator=self.op,
                                                 block=self.block)

        node_list = list(onnx_node) if isinstance(onnx_node,
                                                  tuple) else [onnx_node]

        for node in node_list:
            check_node(node)
        #onnx_graph = make_graph(node_list, self.op_type, inputs, vars)
        onnx_graph = make_graph(node_list, self.op_type, inputs, outputs)
        onnx_model = make_model(onnx_graph, producer_name='unittest')

        # Expand input dictionary if there are tensor arrays
        input_map = {}
        for v in self.inputs:
            if isinstance(self.inputs[v], list):
                input_map.update(self.inputs[v])
            else:
                input_map[v] = self.inputs[v]

        # Run the Caffe2Backend with the ONNX model.
        rep = Caffe2Backend.prepare(onnx_model, device='CPU')
        in_vals = [input_map[input.name] for input in inputs]
        outs = rep.run(in_vals)
        return outs
Ejemplo n.º 11
0
def export_caffe2_detection_model(model: torch.nn.Module,
                                  tensor_inputs: List[torch.Tensor]):
    """
    Export a caffe2-compatible Detectron2 model to caffe2 format via ONNX.

    Arg:
        model: a caffe2-compatible version of detectron2 model, defined in caffe2_modeling.py
        tensor_inputs: a list of tensors that caffe2 model takes as input.
    """
    model = copy.deepcopy(model)
    assert isinstance(model, torch.nn.Module)
    assert hasattr(model, "encode_additional_info")

    # Export via ONNX
    logger.info(
        "Exporting a {} model via ONNX ...".format(type(model).__name__) +
        " Some warnings from ONNX are expected and are usually not to worry about."
    )
    onnx_model = export_onnx_model(model, (tensor_inputs, ))
    # Convert ONNX model to Caffe2 protobuf
    init_net, predict_net = Caffe2Backend.onnx_graph_to_caffe2_net(onnx_model)
    ops_table = [[op.type, op.input, op.output] for op in predict_net.op]
    table = tabulate(ops_table,
                     headers=["type", "input", "output"],
                     tablefmt="pipe")
    logger.info(
        "ONNX export Done. Exported predict_net (before optimizations):\n" +
        colored(table, "cyan"))

    # Apply protobuf optimization
    fuse_alias_placeholder(predict_net, init_net)
    if any(t.device.type != "cpu" for t in tensor_inputs):
        fuse_copy_between_cpu_and_gpu(predict_net)
        remove_dead_end_ops(init_net)
        _assign_device_option(predict_net, init_net, tensor_inputs)
    params, device_options = get_params_from_init_net(init_net)
    predict_net, params = remove_reshape_for_fc(predict_net, params)
    init_net = construct_init_net_from_params(params, device_options)
    group_norm_replace_aten_with_caffe2(predict_net)

    # Record necessary information for running the pb model in Detectron2 system.
    model.encode_additional_info(predict_net, init_net)

    logger.info("Operators used in predict_net: \n{}".format(
        _op_stats(predict_net)))
    logger.info("Operators used in init_net: \n{}".format(_op_stats(init_net)))

    return predict_net, init_net
Ejemplo n.º 12
0
def validate(args):
    place = fluid.CPUPlace()
    exe = fluid.Executor(place)

    [fluid_infer_program, feed_target_names,
     fetch_targets] = fluid.io.load_inference_model(args.fluid_model, exe)

    input_shapes = [
        fluid_infer_program.global_block().var(var_name).shape
        for var_name in feed_target_names
    ]
    input_shapes = [
        shape if shape[0] > 0 else (args.batch_size, ) + shape[1:]
        for shape in input_shapes
    ]

    # Generate dummy data as inputs
    inputs = [
        (args.b - args.a) * np.random.random(shape).astype("float32") + args.a
        for shape in input_shapes
    ]

    # Fluid inference 
    fluid_results = exe.run(fluid_infer_program,
                            feed=dict(zip(feed_target_names, inputs)),
                            fetch_list=fetch_targets)

    # Remove these prints some day
    print("Inference results for fluid model:")
    print(fluid_results)
    print('\n')

    # ONNX inference, using caffe2 as the backend
    onnx_model = load(args.onnx_model)
    rep = Caffe2Backend.prepare(onnx_model, device='CPU')
    onnx_results = rep.run(inputs)
    print("Inference results for ONNX model:")
    print(onnx_results)
    print('\n')

    for ref, hyp in zip(fluid_results, onnx_results):
        np.testing.assert_almost_equal(ref, hyp, decimal=args.expected_decimal)
    print("The exported model achieves {}-decimal precision.".format(
        args.expected_decimal))
Ejemplo n.º 13
0
import onnx
from caffe2.python.onnx.backend import Caffe2Backend
import argparse

parser = argparse.ArgumentParser(description="Convert ONNX models "
                                 "to Caffe2 models")

parser.add_argument("--onnx-model", required=True, help="The ONNX model")
parser.add_argument("--caffe2-init",
                    required=True,
                    help="The output file for the caffe2 model init file. ")
parser.add_argument("--caffe2-predict",
                    required=True,
                    help="The output file for the caffe2 model predict file. ")

if __name__ == "__main__":
    args = parser.parse_args()
    onnx = onnx.load(args.onnx_model)
    caffe2_init, caffe2_predict = \
        Caffe2Backend.onnx_graph_to_caffe2_net(onnx, device="CUDA")
    caffe2_init_str = caffe2_init.SerializeToString()
    with open(args.caffe2_init, "wb") as f:
        f.write(caffe2_init_str)
    caffe2_predict_str = caffe2_predict.SerializeToString()
    with open(args.caffe2_predict, "wb") as f:
        f.write(caffe2_predict_str)
    with open("onnx-predict.pbtxt", "w") as f:
        f.write(str(caffe2_predict))
Ejemplo n.º 14
0
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import argparse

import onnx
from caffe2.python.onnx.backend import Caffe2Backend

parser = argparse.ArgumentParser(description="Convert ONNX models "
                                 "to Caffe2 models")

parser.add_argument("--onnx-model", required=True, help="The ONNX model")
parser.add_argument("--caffe2-init",
                    required=True,
                    help="The output file for the caffe2 model init file. ")
parser.add_argument("--caffe2-predict",
                    required=True,
                    help="The output file for the caffe2 model predict file. ")

if __name__ == "__main__":
    args = parser.parse_args()
    onnx = onnx.load(args.onnx_model)
    caffe2_init, caffe2_predict = \
        Caffe2Backend.onnx_graph_to_caffe2_net(onnx)
    caffe2_init_str = caffe2_init.SerializeToString()
    with open(args.caffe2_init, "wb") as f:
        f.write(caffe2_init_str)
    caffe2_predict_str = caffe2_predict.SerializeToString()
    with open(args.caffe2_predict, "wb") as f:
        f.write(caffe2_predict_str)
Ejemplo n.º 15
0
def PyTorchModule(helper,
                  model,
                  sample_arguments,
                  caffe2_inputs,
                  prefix_name=None):
    """
    Embed an ONNX-exportable PyTorch Model into a Caffe2 model being built.

    Arguments:
        helper (caffe2.python.core.ModelHelder): the model helper where
            this imported network should be inserted
        model (torch.nn.Module): the model to be exported
        sample_arguments (tuple of arguments): the inputs to
            the model, e.g., such that ``model(*args)`` is a valid
            invocation of the model.  Any non-Variable arguments will
            be hard-coded into the exported model; any Variable arguments
            will become inputs of the exported model, in the order they
            occur in args.  If args is a Variable, this is equivalent
            to having called it with a 1-ary tuple of that Variable.
            (Note: passing keyword arguments to the model is not currently
            supported.  Give us a shout if you need it.)
        caffe2_inputs (list of str or caffe2.python.core.BlobReference): the
           caffe2 Blobs that should be inputs to this network. Must be
           the same length as sample_arguments
        prefix_name: prefix name to add to each member of the blob, if None then
           a fresh prefix pytorch_input_N/ is used
    Returns:
        A tuple of caffe2.python.core.BlobReference objects referring to the
        models outputs, or a single BlobReference when the model returns a single
        value.
    """
    if prefix_name is None:
        global _next_idx
        prefix_name = 'pytorch_import_' + str(_next_idx) + '/'
        _next_idx += 1

    # TODO: handle the case where model cannot be exported
    # and embed as a Python op in Caffe2
    f = io.BytesIO()
    torch.onnx.export(model, sample_arguments, f, export_params=True)
    onnx_model = onnx.load(io.BytesIO(f.getvalue()))
    init_net, predict_net = Caffe2Backend.onnx_graph_to_caffe2_net(onnx_model)

    initialized = set([x.name for x in onnx_model.graph.initializer])
    uninitialized_inputs = {
        x.name: i
        for i, x in enumerate(onnx_model.graph.input)
        if x.name not in initialized
    }

    if (len(uninitialized_inputs) != len(caffe2_inputs)):
        raise ValueError('Expected {} inputs but found {}'.format(
            len(uninitialized_inputs), len(caffe2_inputs)))

    def remap_blob_name(name):
        if name in uninitialized_inputs:
            idx = uninitialized_inputs[name]
            return str(caffe2_inputs[idx])
        return prefix_name + name

    predict_net = Net(predict_net).Clone('anon', _FakeDict(remap_blob_name))
    helper.net.AppendNet(predict_net)

    init_net = Net(init_net).Clone('anon', _FakeDict(remap_blob_name))
    helper.param_init_net.AppendNet(init_net)

    results = tuple([
        BlobReference(remap_blob_name(x.name), helper.net)
        for x in onnx_model.graph.output
    ])
    return results
Ejemplo n.º 16
0
import argparse

import onnx
from caffe2.python.onnx.backend import Caffe2Backend

parser = argparse.ArgumentParser(description="Convert ONNX models "
                                 "to Caffe2 models")

parser.add_argument("--onnx-model", required=True, help="The ONNX model")
parser.add_argument(
    "--caffe2-init",
    required=True,
    help="The output file for the caffe2 model init file. ",
)
parser.add_argument(
    "--caffe2-predict",
    required=True,
    help="The output file for the caffe2 model predict file. ",
)

if __name__ == "__main__":
    args = parser.parse_args()
    onnx = onnx.load(args.onnx_model)
    caffe2_init, caffe2_predict = Caffe2Backend.onnx_graph_to_caffe2_net(onnx)
    caffe2_init_str = caffe2_init.SerializeToString()
    with open(args.caffe2_init, "wb") as f:
        f.write(caffe2_init_str)
    caffe2_predict_str = caffe2_predict.SerializeToString()
    with open(args.caffe2_predict, "wb") as f:
        f.write(caffe2_predict_str)
init_net_path = f"models/{net_type}_init_net.pb"
init_net_txt_path = f"models/{net_type}_init_net.pbtxt"
predict_net_path = f"models/{net_type}_predict_net.pb"
predict_net_txt_path = f"models/{net_type}_predict_net.pbtxt"

dummy_input = torch.randn(1, 3, 160, 160)
torch.onnx.export(net,
                  dummy_input,
                  model_path,
                  verbose=False,
                  output_names=['embedding'])

print("export ok")
model = onnx.load(model_path)
print('load ok')
init_net, predict_net = c2.onnx_graph_to_caffe2_net(model)

print(
    f"Save the model in binary format to the files {init_net_path} and {predict_net_path}."
)

with open(init_net_path, "wb") as fopen:
    fopen.write(init_net.SerializeToString())
with open(predict_net_path, "wb") as fopen:
    fopen.write(predict_net.SerializeToString())

print(
    f"Save the model in txt format to the files {init_net_txt_path} and {predict_net_txt_path}. "
)
with open(init_net_txt_path, 'w') as f:
    f.write(str(init_net))
Ejemplo n.º 18
0
def debug_model(op_list, op_trackers, nms_outputs, args):

    return_numpy = not args.return_variable
    reader = None
    runner_type = "caffe2"
    runner = None
    if args.check_task == "image_classification":
        reader = image_classification_random_reader
    elif args.check_task == "image_detection_yolo":
        detection = YoloReader(args.image_path)
        reader = detection.reader
        runner_type = "onnxruntime"
    elif args.check_task == "image_detection_ssd":
        detection = SSDReader(args.image_path)
        reader = detection.reader
        runner_type = "onnxruntime"
    else:
        raise Exception(
            "Now just support the image_classification and image_detection_ssd and image_detection_yolo task"
        )

    feed_var_name = args.name_prefix + "feed"
    fetch_var_name = args.name_prefix + "fetch"
    # start check the op test
    print("--------------------START CHECK TEST OPS!---------------------")
    for op_name in op_list:
        print("start check the op: %s" % (op_name))
        op_test_name = TEST + op_name + PY
        run_script = "python " + op_test_name
        return_code = os.system(run_script)
        if return_code != 0:
            raise Exception("The op %s test check failed!" % (op_name))
    print("----------------------CHECK TEST OPS OK!-----------------------")

    # In some tools, examples(Tf2Onnx, Caffe2Onnx), therse tools just check the last layer output,
    # we will check all layers output. Just ensure the robustness of Paddle2Onnx
    # start check the output of op
    print("--------------------START CHECK OPS OUTPUT!--------------------")
    # get the intermediate result of fluid_model & onnx model
    fluid_intermedidate_target_names = []
    op2out = dict()
    out2op = dict()
    for tracker in op_trackers:
        last_node = tracker.op_node[0]
        outputs = last_node.output
        op2out[tracker] = outputs
        for output in outputs:
            out2op[output] = tracker
        fluid_intermedidate_target_names.extend(outputs)

    #fluid_intermedidate_target_names = fluid_intermedidate_target_names[:]
    # load the paddle and onnx model
    # init the fluid executor

    place = fluid.CPUPlace()
    exe = fluid.Executor(place)
    feed_target_names = None
    if len(args.fluid_model_name) != 0 and len(args.fluid_params_name) != 0:
        [fluid_infer_program, feed_target_names, fetch_targets
         ] = fluid.io.load_inference_model(args.fluid_model, exe,
                                           args.fluid_model_name,
                                           args.fluid_params_name)
    else:
        [fluid_infer_program, feed_target_names,
         fetch_targets] = fluid.io.load_inference_model(args.fluid_model, exe)
    fetch_target_names = [target.name for target in fetch_targets]
    fluid_intermedidate_target_names = []
    fluid_intermedidate_target_names.extend(fetch_target_names)
    # in this section, wo will set the varaiable we want to get
    global_block = fluid_infer_program.global_block()

    fetch_list = [global_block.var(name) for name in fluid_intermedidate_target_names\
                 if global_block.has_var(name)]
    fluid_intermedidate_target_names = [var.name for var in fetch_list]

    # load the onnx model and init the onnx executor
    onnx_model = load(args.onnx_model)
    # user define the fetch list
    onnx_model = onnx_user_define_fetch_list(onnx_model, global_block,
                                             fluid_intermedidate_target_names)
    user_define_fetch_list(fluid_infer_program,
                           fluid_intermedidate_target_names, fetch_var_name)
    if runner_type == "caffe2":
        from caffe2.python.onnx.backend import Caffe2Backend
        runner = Caffe2Backend.prepare(onnx_model, device='CPU')
    for inputs in reader(fluid_infer_program,\
        feed_target_names):
        fluid_results = exe.run(fluid_infer_program,
                                feed=dict(zip(feed_target_names, inputs)),
                                fetch_list=fetch_list,
                                feed_var_name=feed_var_name,
                                fetch_var_name=fetch_var_name,
                                return_numpy=return_numpy)
        print("the fluid results len:%d" % (len(fluid_results)))
        if runner == None:
            #save model to tests dir and run python script
            with open("tests/nms_test.onnx", 'wb') as f:
                f.write(onnx_model.SerializeToString())
            f.close()
            with open("tests/inputs_test.pkl", 'wb') as f:
                pickle.dump(dict(zip(feed_target_names, inputs)), f)
            f.close()
            ret = os.system("python tests/onnx_runtime.py %s %s" %
                            (False, False))
            with open("tests/outputs_test.pkl", "rb") as f:
                onnx_results = pickle.load(f)
            f.close()
        else:
            onnx_results = runner.run(inputs)
        print("the onnx_results len:%d" % (len(onnx_results)))
        err_ratio = compare_fluid_onnx_results(
            fluid_results, onnx_results, fluid_intermedidate_target_names,
            nms_outputs, return_numpy, args)
    if err_ratio > 0.01:
        raise Exception("The result between onnx and paddle has difference")
Ejemplo n.º 19
0
ori_scores, ori_boxes = net(dummy_input)
scores, boxes = torch.onnx._export(net,
                                   dummy_input,
                                   model_path,
                                   verbose=True,
                                   output_names=['scores', 'boxes'])
np.testing.assert_almost_equal(ori_scores.data.cpu().numpy(),
                               scores.data.cpu().numpy(),
                               decimal=3)
np.testing.assert_almost_equal(ori_boxes.data.cpu().numpy(),
                               boxes.data.cpu().numpy(),
                               decimal=3)

model = onnx.load(model_path)
# init_net, predict_net = c2.onnx_graph_to_caffe2_net(model)
prepared_backend = c2.prepare(model, device="CUDA")
W = {model.graph.input[0].name: dummy_input.data.cpu().numpy()}
c2_out = prepared_backend.run(W)[0]

c2_workspace = prepared_backend.workspace
c2_model = prepared_backend.predict_net

# Now import the caffe2 mobile exporter
from caffe2.python.predictor import mobile_exporter

# call the Export to get the predict_net, init_net. These nets are needed for running things on mobile
init_net, predict_net = mobile_exporter.Export(c2_workspace, c2_model,
                                               c2_model.external_input)

print(
    f"Save the model in binary format to the files {init_net_path} and {predict_net_path}."
Ejemplo n.º 20
0
            node.attribute.extend(newL)


def safe_mkdir(dirpath):
    if not os.path.exists(dirpath):
        os.makedirs(dirpath)


output_dir = "convertedModel/"

safe_mkdir(output_dir)

keras_model = keras.models.load_model("../../kerasModel/malaria_model.h5")
onnx_model = onnxmltools.convert_keras(keras_model)
onnxmltools.utils.save_model(onnx_model, output_dir + "test_model.onnx")
onnxmltools.utils.save_text(onnx_model, output_dir + "test_model.json")
stripField(onnx_model)

init_net, predict_net = Caffe2Backend.onnx_graph_to_caffe2_net(onnx_model)

with open(output_dir + "init_net.pb", "wb") as f:
    f.write(init_net.SerializeToString())

with open(output_dir + "predict_net.pb", "wb") as f:
    f.write(predict_net.SerializeToString())

write_for_debug = False
if (write_for_debug):
    with open(output_dir + "onnx-predict.pbtxt", "w") as f:
        f.write(str(predict_net))
Ejemplo n.º 21
0
  def convert_model(self, request):
    self._dst_type = request.args.get('destination_type')
    if self._dst_type == 'caffe2':
      dst_predict_net = request.args.get('predict_net')
      dst_init_net = request.args.get('init_net')
      logger.warn(dst_init_net)
      logger.warn(dst_predict_net)
    else:
      destination_path = request.args.get('destination_path')
      logger.warn(destination_path)

    if self._dst_type == 'onnx':
      if self._src_type == 'caffe2':
        data_type = onnx.TensorProto.FLOAT
        # data_shape = (1, 3, 299, 299) if model is inceptionv3/4
        tensor_size_list = self.input_tensor_size.split(',')
        data_shape = tuple(map(int, tensor_size_list))
        print(data_shape)
        value_info = {
            'data': (data_type, data_shape)
        }
        predict_net = caffe2_pb2.NetDef()
        with open(self.src_predict_net, 'rb') as f:
          predict_net.ParseFromString(f.read())

        init_net = caffe2_pb2.NetDef()
        with open(self.src_init_net, 'rb') as f:
          init_net.ParseFromString(f.read())
        # if self._src_tb_graph._predict_net.name == '':
        #     self._src_tb_graph._predict_net.name = 'modelName'

        onnx_model = c2_onnx.caffe2_net_to_onnx_model(predict_net,
                                                        init_net,
                                                        value_info)
        with open(destination_path, 'wb') as f:
          f.write(onnx_model.SerializeToString())

        self._dst_tb_graph = onnx_util.OnnxGraph(destination_path, "onnx")

      elif self._src_type == 'torch':
        # TODO: choose input_net
        tensor_size_list = self.input_tensor_size.split(',')
        logger.warn(destination_path)
        x = torch.randn(tuple(map(int, tensor_size_list)))
        if self.model_file in ['inception_v3', 'googlenet']:
          model = globals().get(self.model_file)(pretrained=True, aux_logits=False ,transform_input=False)
        else:
          model = globals().get(self.model_file)(pretrained=True)

        torch.onnx.export(model, x, destination_path, verbose=True)
        self._dst_tb_graph = onnx_util.OnnxGraph(destination_path, "onnx")

    elif self._dst_type == 'caffe2':
      if self._src_type == 'onnx':
        onnx_model_proto = ModelProto()
        with open(self.model_file, "rb") as onnx_model_path:
          onnx_model_proto.ParseFromString(onnx_model_path.read())

        init_net_model, predict_net_model = c2.onnx_graph_to_caffe2_net(onnx_model_proto)
        with open(dst_predict_net, 'wb') as f_pre:
          f_pre.write(predict_net_model.SerializeToString())
        with open(dst_init_net, 'wb') as f_init:
          f_init.write(init_net_model.SerializeToString())
        self._dst_tb_graph = c2graph_util.C2Graph(dst_predict_net, dst_init_net, "pb")

    logger.warn('Converting completed.')
    self._dst_tb_graph.ConvertNet()
    graph = self._dst_tb_graph.GetTBGraph()

    # count the number of nodes in the output model
    self.d_node_count = 0
    for node in graph.node:
      self.d_node_count += 1

    return http_util.Respond(request, str(graph), 'text/x-protobuf')