Exemple #1
0
def onnx_to_caffe2(onnx_model, output, init_net_output):
    onnx_model_proto = ModelProto()
    onnx_model_proto.ParseFromString(onnx_model.read())

    init_net, predict_net = c2.onnx_graph_to_caffe2_net(onnx_model_proto)
    init_net_output.write(init_net.SerializeToString())
    output.write(predict_net.SerializeToString())
Exemple #2
0
def _export_via_onnx(model, inputs):
    # make sure all modules are in eval mode, onnx may change the training state
    #  of the moodule if the states are not consistent
    def _check_eval(module):
        assert not module.training

    model.apply(_check_eval)

    # Export the model to ONNX
    with torch.no_grad():
        with io.BytesIO() as f:
            torch.onnx.export(
                model,
                inputs,
                f,
                operator_export_type=OperatorExportTypes.ONNX_ATEN_FALLBACK,
                # verbose=True,  # NOTE: uncomment this for debugging
                # export_params=True,
            )
            onnx_model = onnx.load_from_string(f.getvalue())

    # Apply ONNX's Optimization
    all_passes = onnx.optimizer.get_available_passes()
    passes = ["fuse_bn_into_conv"]
    assert all(p in all_passes for p in passes)
    onnx_model = onnx.optimizer.optimize(onnx_model, passes)

    # Convert ONNX model to Caffe2 protobuf
    init_net, predict_net = Caffe2Backend.onnx_graph_to_caffe2_net(onnx_model)

    return predict_net, init_net
Exemple #3
0
def onnx_to_caffe2(onnx_model, output, init_net_output):
    onnx_model_proto = ModelProto()
    onnx_model_proto.ParseFromString(onnx_model.read())

    init_net, predict_net = c2.onnx_graph_to_caffe2_net(onnx_model_proto)
    init_net_output.write(init_net.SerializeToString())
    output.write(predict_net.SerializeToString())
Exemple #4
0
def convert_to_caffe2(net):
   model_path = f"models/{net_type}.onnx"
   init_net_path = f"models/{net_type}_init_net.pb"
   init_net_txt_path = f"models/{net_type}_init_net.pbtxt"
   predict_net_path = f"models/{net_type}_predict_net.pb"
   predict_net_txt_path = f"models/{net_type}_predict_net.pbtxt"

   dummy_input = torch.randn(1, 3, 300, 300)
   torch.onnx.export(net, dummy_input, model_path, verbose=False, output_names=['scores', 'boxes'])

   model = onnx.load(model_path)
   init_net, predict_net = c2.onnx_graph_to_caffe2_net(model)

   print(f"Save the model in binary format to the files {init_net_path} and {predict_net_path}.")

   with open(init_net_path, "wb") as fopen:
       fopen.write(init_net.SerializeToString())
   with open(predict_net_path, "wb") as fopen:
       fopen.write(predict_net.SerializeToString())

   print(f"Save the model in txt format to the files {init_net_txt_path} and {predict_net_txt_path}. ")
   with open(init_net_txt_path, 'w') as f:
       f.write(str(init_net))

   with open(predict_net_txt_path, 'w') as f:
       f.write(str(predict_net))
Exemple #5
0
 def create_caffe2_predictor(onnx_file_path):
     with open(onnx_file_path, 'rb') as onnx_model:
         onnx_model_proto = ModelProto()
         onnx_model_proto.ParseFromString(onnx_model.read())
         init_net, predict_net = c2.onnx_graph_to_caffe2_net(
             onnx_model_proto)
         predictor = workspace.Predictor(init_net, predict_net)
     return predictor
def main():
    args = parser.parse_args()
    onnx_model = onnx.load(args.model)
    caffe2_init, caffe2_predict = Caffe2Backend.onnx_graph_to_caffe2_net(onnx_model)
    caffe2_init_str = caffe2_init.SerializeToString()
    with open(args.c2_prefix + '.init.pb', "wb") as f:
        f.write(caffe2_init_str)
    caffe2_predict_str = caffe2_predict.SerializeToString()
    with open(args.c2_prefix + '.predict.pb', "wb") as f:
        f.write(caffe2_predict_str)
Exemple #7
0
def onnx2caffe():
    print("==> Exporting ONNX to Caffe2 format")
    onnx_model = onnx.load('weights/model.onnx')
    caffe2_init, caffe2_predict = Caffe2Backend.onnx_graph_to_caffe2_net(
        onnx_model)
    caffe2_init_str = caffe2_init.SerializeToString()
    with open('weights/model.init.pb', "wb") as f:
        f.write(caffe2_init_str)
    caffe2_predict_str = caffe2_predict.SerializeToString()
    with open('weights/model.predict.pb', "wb") as f:
        f.write(caffe2_predict_str)
    print("==> Done")
Exemple #8
0
    def __call__(self, graph, folder):
        onnx_model = build_model(graph, ir_version=6, opset_version=9)
        if not onnx_model.graph.name:
            onnx_model.graph.name = 'Graph'

        init_net, predict_net = Caffe2Backend.onnx_graph_to_caffe2_net(
            onnx_model)
        value_info = get_value_info(onnx_model)

        if not os.path.exists(folder):
            os.mkdir(folder)

        save_caffe2_model(folder, init_net, predict_net, value_info)
def convert_onnx_to_caffe2_v2(onnx_path, init_net_path, predict_net_path):
    '''
    等效命令行: convert-onnx-to-caffe2 path/to/model.onnx --output predict_net.pb --init-net-output init_net.pb
    :param onnx_path
    :param init_net_path:
    :param predict_net_path:
    :return:
    '''
    model = onnx.load(onnx_path)
    # onnx.checker.check_model(model)
    init_net, predict_net = Caffe2Backend.onnx_graph_to_caffe2_net(model)
    with open(init_net_path, "wb") as f:
        f.write(init_net.SerializeToString())
    with open(predict_net_path, "wb") as f:
        f.write(predict_net.SerializeToString())
def export_caffe2_detection_model(model: torch.nn.Module,
                                  tensor_inputs: List[torch.Tensor]):
    """
    Export a caffe2-compatible Detectron2 model to caffe2 format via ONNX.

    Arg:
        model: a caffe2-compatible version of detectron2 model, defined in caffe2_modeling.py
        tensor_inputs: a list of tensors that caffe2 model takes as input.
    """
    model = copy.deepcopy(model)
    assert isinstance(model, torch.nn.Module)
    assert hasattr(model, "encode_additional_info")

    # Export via ONNX
    logger.info(
        "Exporting a {} model via ONNX ...".format(type(model).__name__) +
        " Some warnings from ONNX are expected and are usually not to worry about."
    )
    onnx_model = export_onnx_model(model, (tensor_inputs, ))
    # Convert ONNX model to Caffe2 protobuf
    init_net, predict_net = Caffe2Backend.onnx_graph_to_caffe2_net(onnx_model)
    ops_table = [[op.type, op.input, op.output] for op in predict_net.op]
    table = tabulate(ops_table,
                     headers=["type", "input", "output"],
                     tablefmt="pipe")
    logger.info(
        "ONNX export Done. Exported predict_net (before optimizations):\n" +
        colored(table, "cyan"))

    # Apply protobuf optimization
    fuse_alias_placeholder(predict_net, init_net)
    if any(t.device.type != "cpu" for t in tensor_inputs):
        fuse_copy_between_cpu_and_gpu(predict_net)
        remove_dead_end_ops(init_net)
        _assign_device_option(predict_net, init_net, tensor_inputs)
    params, device_options = get_params_from_init_net(init_net)
    predict_net, params = remove_reshape_for_fc(predict_net, params)
    init_net = construct_init_net_from_params(params, device_options)
    group_norm_replace_aten_with_caffe2(predict_net)

    # Record necessary information for running the pb model in Detectron2 system.
    model.encode_additional_info(predict_net, init_net)

    logger.info("Operators used in predict_net: \n{}".format(
        _op_stats(predict_net)))
    logger.info("Operators used in init_net: \n{}".format(_op_stats(init_net)))

    return predict_net, init_net
init_net_path = f"models/{net_type}_init_net.pb"
init_net_txt_path = f"models/{net_type}_init_net.pbtxt"
predict_net_path = f"models/{net_type}_predict_net.pb"
predict_net_txt_path = f"models/{net_type}_predict_net.pbtxt"

dummy_input = torch.randn(1, 3, 160, 160)
torch.onnx.export(net,
                  dummy_input,
                  model_path,
                  verbose=False,
                  output_names=['embedding'])

print("export ok")
model = onnx.load(model_path)
print('load ok')
init_net, predict_net = c2.onnx_graph_to_caffe2_net(model)

print(
    f"Save the model in binary format to the files {init_net_path} and {predict_net_path}."
)

with open(init_net_path, "wb") as fopen:
    fopen.write(init_net.SerializeToString())
with open(predict_net_path, "wb") as fopen:
    fopen.write(predict_net.SerializeToString())

print(
    f"Save the model in txt format to the files {init_net_txt_path} and {predict_net_txt_path}. "
)
with open(init_net_txt_path, 'w') as f:
    f.write(str(init_net))
Exemple #12
0
            node.attribute.extend(newL)


def safe_mkdir(dirpath):
    if not os.path.exists(dirpath):
        os.makedirs(dirpath)


output_dir = "convertedModel/"

safe_mkdir(output_dir)

keras_model = keras.models.load_model("../../kerasModel/malaria_model.h5")
onnx_model = onnxmltools.convert_keras(keras_model)
onnxmltools.utils.save_model(onnx_model, output_dir + "test_model.onnx")
onnxmltools.utils.save_text(onnx_model, output_dir + "test_model.json")
stripField(onnx_model)

init_net, predict_net = Caffe2Backend.onnx_graph_to_caffe2_net(onnx_model)

with open(output_dir + "init_net.pb", "wb") as f:
    f.write(init_net.SerializeToString())

with open(output_dir + "predict_net.pb", "wb") as f:
    f.write(predict_net.SerializeToString())

write_for_debug = False
if (write_for_debug):
    with open(output_dir + "onnx-predict.pbtxt", "w") as f:
        f.write(str(predict_net))
Exemple #13
0
  def convert_model(self, request):
    self._dst_type = request.args.get('destination_type')
    if self._dst_type == 'caffe2':
      dst_predict_net = request.args.get('predict_net')
      dst_init_net = request.args.get('init_net')
      logger.warn(dst_init_net)
      logger.warn(dst_predict_net)
    else:
      destination_path = request.args.get('destination_path')
      logger.warn(destination_path)

    if self._dst_type == 'onnx':
      if self._src_type == 'caffe2':
        data_type = onnx.TensorProto.FLOAT
        # data_shape = (1, 3, 299, 299) if model is inceptionv3/4
        tensor_size_list = self.input_tensor_size.split(',')
        data_shape = tuple(map(int, tensor_size_list))
        print(data_shape)
        value_info = {
            'data': (data_type, data_shape)
        }
        predict_net = caffe2_pb2.NetDef()
        with open(self.src_predict_net, 'rb') as f:
          predict_net.ParseFromString(f.read())

        init_net = caffe2_pb2.NetDef()
        with open(self.src_init_net, 'rb') as f:
          init_net.ParseFromString(f.read())
        # if self._src_tb_graph._predict_net.name == '':
        #     self._src_tb_graph._predict_net.name = 'modelName'

        onnx_model = c2_onnx.caffe2_net_to_onnx_model(predict_net,
                                                        init_net,
                                                        value_info)
        with open(destination_path, 'wb') as f:
          f.write(onnx_model.SerializeToString())

        self._dst_tb_graph = onnx_util.OnnxGraph(destination_path, "onnx")

      elif self._src_type == 'torch':
        # TODO: choose input_net
        tensor_size_list = self.input_tensor_size.split(',')
        logger.warn(destination_path)
        x = torch.randn(tuple(map(int, tensor_size_list)))
        if self.model_file in ['inception_v3', 'googlenet']:
          model = globals().get(self.model_file)(pretrained=True, aux_logits=False ,transform_input=False)
        else:
          model = globals().get(self.model_file)(pretrained=True)

        torch.onnx.export(model, x, destination_path, verbose=True)
        self._dst_tb_graph = onnx_util.OnnxGraph(destination_path, "onnx")

    elif self._dst_type == 'caffe2':
      if self._src_type == 'onnx':
        onnx_model_proto = ModelProto()
        with open(self.model_file, "rb") as onnx_model_path:
          onnx_model_proto.ParseFromString(onnx_model_path.read())

        init_net_model, predict_net_model = c2.onnx_graph_to_caffe2_net(onnx_model_proto)
        with open(dst_predict_net, 'wb') as f_pre:
          f_pre.write(predict_net_model.SerializeToString())
        with open(dst_init_net, 'wb') as f_init:
          f_init.write(init_net_model.SerializeToString())
        self._dst_tb_graph = c2graph_util.C2Graph(dst_predict_net, dst_init_net, "pb")

    logger.warn('Converting completed.')
    self._dst_tb_graph.ConvertNet()
    graph = self._dst_tb_graph.GetTBGraph()

    # count the number of nodes in the output model
    self.d_node_count = 0
    for node in graph.node:
      self.d_node_count += 1

    return http_util.Respond(request, str(graph), 'text/x-protobuf')
Exemple #14
0
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import argparse

import onnx
from caffe2.python.onnx.backend import Caffe2Backend

parser = argparse.ArgumentParser(description="Convert ONNX models "
                                 "to Caffe2 models")

parser.add_argument("--onnx-model", required=True, help="The ONNX model")
parser.add_argument("--caffe2-init",
                    required=True,
                    help="The output file for the caffe2 model init file. ")
parser.add_argument("--caffe2-predict",
                    required=True,
                    help="The output file for the caffe2 model predict file. ")

if __name__ == "__main__":
    args = parser.parse_args()
    onnx = onnx.load(args.onnx_model)
    caffe2_init, caffe2_predict = \
        Caffe2Backend.onnx_graph_to_caffe2_net(onnx)
    caffe2_init_str = caffe2_init.SerializeToString()
    with open(args.caffe2_init, "wb") as f:
        f.write(caffe2_init_str)
    caffe2_predict_str = caffe2_predict.SerializeToString()
    with open(args.caffe2_predict, "wb") as f:
        f.write(caffe2_predict_str)
Exemple #15
0
def PyTorchModule(helper,
                  model,
                  sample_arguments,
                  caffe2_inputs,
                  prefix_name=None):
    """
    Embed an ONNX-exportable PyTorch Model into a Caffe2 model being built.

    Arguments:
        helper (caffe2.python.core.ModelHelder): the model helper where
            this imported network should be inserted
        model (torch.nn.Module): the model to be exported
        sample_arguments (tuple of arguments): the inputs to
            the model, e.g., such that ``model(*args)`` is a valid
            invocation of the model.  Any non-Variable arguments will
            be hard-coded into the exported model; any Variable arguments
            will become inputs of the exported model, in the order they
            occur in args.  If args is a Variable, this is equivalent
            to having called it with a 1-ary tuple of that Variable.
            (Note: passing keyword arguments to the model is not currently
            supported.  Give us a shout if you need it.)
        caffe2_inputs (list of str or caffe2.python.core.BlobReference): the
           caffe2 Blobs that should be inputs to this network. Must be
           the same length as sample_arguments
        prefix_name: prefix name to add to each member of the blob, if None then
           a fresh prefix pytorch_input_N/ is used
    Returns:
        A tuple of caffe2.python.core.BlobReference objects referring to the
        models outputs, or a single BlobReference when the model returns a single
        value.
    """
    if prefix_name is None:
        global _next_idx
        prefix_name = 'pytorch_import_' + str(_next_idx) + '/'
        _next_idx += 1

    # TODO: handle the case where model cannot be exported
    # and embed as a Python op in Caffe2
    f = io.BytesIO()
    torch.onnx.export(model, sample_arguments, f, export_params=True)
    onnx_model = onnx.load(io.BytesIO(f.getvalue()))
    init_net, predict_net = Caffe2Backend.onnx_graph_to_caffe2_net(onnx_model)

    initialized = set([x.name for x in onnx_model.graph.initializer])
    uninitialized_inputs = {
        x.name: i
        for i, x in enumerate(onnx_model.graph.input)
        if x.name not in initialized
    }

    if (len(uninitialized_inputs) != len(caffe2_inputs)):
        raise ValueError('Expected {} inputs but found {}'.format(
            len(uninitialized_inputs), len(caffe2_inputs)))

    def remap_blob_name(name):
        if name in uninitialized_inputs:
            idx = uninitialized_inputs[name]
            return str(caffe2_inputs[idx])
        return prefix_name + name

    predict_net = Net(predict_net).Clone('anon', _FakeDict(remap_blob_name))
    helper.net.AppendNet(predict_net)

    init_net = Net(init_net).Clone('anon', _FakeDict(remap_blob_name))
    helper.param_init_net.AppendNet(init_net)

    results = tuple([
        BlobReference(remap_blob_name(x.name), helper.net)
        for x in onnx_model.graph.output
    ])
    return results
Exemple #16
0
import argparse

import onnx
from caffe2.python.onnx.backend import Caffe2Backend

parser = argparse.ArgumentParser(description="Convert ONNX models "
                                 "to Caffe2 models")

parser.add_argument("--onnx-model", required=True, help="The ONNX model")
parser.add_argument(
    "--caffe2-init",
    required=True,
    help="The output file for the caffe2 model init file. ",
)
parser.add_argument(
    "--caffe2-predict",
    required=True,
    help="The output file for the caffe2 model predict file. ",
)

if __name__ == "__main__":
    args = parser.parse_args()
    onnx = onnx.load(args.onnx_model)
    caffe2_init, caffe2_predict = Caffe2Backend.onnx_graph_to_caffe2_net(onnx)
    caffe2_init_str = caffe2_init.SerializeToString()
    with open(args.caffe2_init, "wb") as f:
        f.write(caffe2_init_str)
    caffe2_predict_str = caffe2_predict.SerializeToString()
    with open(args.caffe2_predict, "wb") as f:
        f.write(caffe2_predict_str)
import onnx
from caffe2.python.onnx.backend import Caffe2Backend
import argparse

parser = argparse.ArgumentParser(description="Convert ONNX models "
                                 "to Caffe2 models")

parser.add_argument("--onnx-model", required=True, help="The ONNX model")
parser.add_argument("--caffe2-init",
                    required=True,
                    help="The output file for the caffe2 model init file. ")
parser.add_argument("--caffe2-predict",
                    required=True,
                    help="The output file for the caffe2 model predict file. ")

if __name__ == "__main__":
    args = parser.parse_args()
    onnx = onnx.load(args.onnx_model)
    caffe2_init, caffe2_predict = \
        Caffe2Backend.onnx_graph_to_caffe2_net(onnx, device="CUDA")
    caffe2_init_str = caffe2_init.SerializeToString()
    with open(args.caffe2_init, "wb") as f:
        f.write(caffe2_init_str)
    caffe2_predict_str = caffe2_predict.SerializeToString()
    with open(args.caffe2_predict, "wb") as f:
        f.write(caffe2_predict_str)
    with open("onnx-predict.pbtxt", "w") as f:
        f.write(str(caffe2_predict))