Esempio n. 1
0
def import_model(model_file):
    """Imports the ONNX model file, passed as a parameter, into MXNet symbol and parameters.
    Operator support and coverage - https://cwiki.apache.org/confluence/display/MXNET/ONNX

    Parameters
    ----------
    model_file : str
        ONNX model file name

    Returns
    -------
    sym : :class:`~mxnet.symbol.Symbol`
        MXNet symbol object

    arg_params : dict of ``str`` to :class:`~mxnet.ndarray.NDArray`
        Dict of converted parameters stored in ``mxnet.ndarray.NDArray`` format

    aux_params : dict of ``str`` to :class:`~mxnet.ndarray.NDArray`
        Dict of converted parameters stored in ``mxnet.ndarray.NDArray`` format
    """
    graph = GraphProto()

    try:
        import onnx
    except ImportError:
        raise ImportError("Onnx and protobuf need to be installed. "
                          + "Instructions to install - https://github.com/onnx/onnx")
    # loads model file and returns ONNX protobuf object
    model_proto = onnx.load(model_file)
    sym, arg_params, aux_params = graph.from_onnx(model_proto.graph)
    return sym, arg_params, aux_params
  def testImportFromPytorch(self):
    """
    Test importing a pre-trained pytorch model into tensorflow for inference
    """

    # Export trained pytorch model to onnx
    pt_model = PytorchMNIST(N)
    self._train_pytorch(pt_model)
    dummy_input = torch.autograd.Variable(torch.FloatTensor(1, 28 * 28))
    torch.onnx.export(pt_model, dummy_input,
                      os.path.join(self.tmpDir, "torch.onnx"),
                      input_names=["input"] + pt_model.state_dict().keys(),
                      output_names=["output"])

    _, expected = self._test_pytorch(pt_model)

    # Load ONNX model
    model = onnx.load(os.path.join(self.tmpDir, "torch.onnx"))
    # HACK: Convert model's input shape to dynamic, ignoring batch size
    model.graph.input[0].type.tensor_type.shape.dim[0].dim_param = '?'
    onnx.checker.check_model(model)

    tf_model = onnx_tf.backend.prepare(model)
    # Load MNIST test data
    (_, _), (x_test, y_test) = keras.datasets.mnist.load_data()
    x_test = x_test.reshape(-1, 28 * 28) / 255.0
    x_test = (x_test - 0.1307) / 0.3081

    with self.test_session(config=CONFIG):
      # Compute accuracy
      output = tf_model.run(x_test)
      predictions = output[0].argmax(axis=1)
      accuracy = tf.reduce_mean(tf.cast(predictions == y_test, tf.float32))

      self.assertAlmostEqual(accuracy.eval(), expected, places=4)
Esempio n. 3
0
def get_model_metadata(model_file):
    """
    Returns the name and shape information of input and output tensors of the given ONNX model file.

    Parameters
    ----------
    model_file : str
        ONNX model file name

    Returns
    -------
    model_metadata : dict
        A dictionary object mapping various metadata to its corresponding value.
        The dictionary will have the following template.
        {
            'input_tensor_data' : <list of tuples representing the shape of the input paramters>,
            'output_tensor_data' : <list of tuples representing the shape of the output
                                    of the model>
        }

    """
    graph = GraphProto()
    try:
        import onnx
    except ImportError:
        raise ImportError("Onnx and protobuf need to be installed. "
                          + "Instructions to install - https://github.com/onnx/onnx")
    model_proto = onnx.load(model_file)
    metadata = graph.get_graph_metadata(model_proto.graph)
    return metadata
Esempio n. 4
0
def import_to_gluon(model_file, ctx):
    """
    Imports the ONNX model files, passed as a parameter, into Gluon SymbolBlock object.

    Parameters
    ----------
    model_file : str
        ONNX model file name
    ctx : Context or list of Context
        Loads the model into one or many context(s).

    Returns
    -------
    sym_block : :class:`~mxnet.gluon.SymbolBlock`
        A SymbolBlock object representing the given model file.
    """
    graph = GraphProto()
    try:
        import onnx
    except ImportError:
        raise ImportError("Onnx and protobuf need to be installed. Instructions to"
                          + " install - https://github.com/onnx/onnx#installation")
    model_proto = onnx.load(model_file)
    net = graph.graph_to_gluon(model_proto.graph, ctx)
    return net
Esempio n. 5
0
def check_model():  # type: () -> None
    parser = argparse.ArgumentParser('check-model')
    parser.add_argument('model_pb', type=argparse.FileType('rb'))
    args = parser.parse_args()

    model = load(args.model_pb)
    checker.check_model(model)
Esempio n. 6
0
 def test_export_to_buffer(self):
     model = Model()
     buffer = model.export_to_buffer()
     protobuf_model = onnx.load(BytesIO(buffer.getvalue()))
     self.assertEqual(4, len(protobuf_model.graph.input))  # 2 inputs + 2 params
     self.assertEqual(4, len(protobuf_model.graph.output))
     self.assertEqual("state:float_features", protobuf_model.graph.input[0].name)
     self.assertEqual("action:float_features", protobuf_model.graph.input[1].name)
Esempio n. 7
0
def main():
    xmodel = onnx.load(sys.argv[1])
    subgraphs = find_subgraphs(xmodel.graph)
    if len(sys.argv) == 2:
        for name, graph in subgraphs.items():
            print('name=%s node=%d' % (name, len(graph.node)))
    else:
        g = subgraphs[sys.argv[2]]
        m = onnx.helper.make_model(g)
        onnx.save(m, 'out.onnx')
Esempio n. 8
0
def compare_graph(onnx_file, nnvm_sym, ishape):
    onnx_graph = onnx.load(onnx_file)
    onnx_sym, params = nnvm.frontend.from_onnx(onnx_graph)
    g1 = nnvm.graph.create(onnx_sym)
    g2 = nnvm.graph.create(nnvm_sym)
    ishapes = {'input_0': ishape}
    graph_attr.set_shape_inputs(g1, ishapes)
    graph_attr.set_shape_inputs(g2, ishapes)
    g1 = g1.apply("InferShape").apply("SimplifyInference")
    g2 = g2.apply("InferShape").apply("SimplifyInference")
    graph_util.check_graph_equal(g1, g2)
Esempio n. 9
0
def convert_onnx(network, net_info, model_root, model_name, copy_params):
    model_file = model_root + '/' + model_name + '.onnx'

    onnx_model = optimize_onnx(onnx.load(model_file))
    onnx_graph = onnx_model.graph
    onnx_nodes = onnx_graph.node
    onnx_initializer = onnx_graph.initializer

    input_infos, output_names = parse_graph(onnx_graph)

    net_info['arg']['out_blob_v_s'] = output_names

    network.set_net_name(model_name)
    network.set_net_arg_dict(net_info['arg'])

    convert_input(net_info, network, input_infos)

    param_dict = {}
    for index, node in enumerate(onnx_nodes):
        op_type = node.op_type
        if op_type == 'Relu' or op_type == 'PRelu' or op_type == 'Sigmoid':
            convert_activate(onnx_nodes, index, onnx_initializer, param_dict, network)
        elif op_type == 'BatchNormalization':
            convert_batch_norm(onnx_nodes, index, param_dict, network)
        elif op_type == 'Add' or op_type == 'Sub' or op_type == 'Mul' or op_type == 'Div':
            convert_binary(onnx_nodes, index, network)
        elif op_type == 'Concat':
            convert_concat(onnx_nodes, index, network)
        elif op_type == 'Gemm':
            convert_connected(onnx_nodes, index, onnx_initializer, param_dict, network)
        elif op_type == 'Conv':
            convert_conv(onnx_nodes, index, onnx_initializer, param_dict, network)
        elif op_type == 'ConvTranspose':
            convert_deconv(onnx_nodes, index, onnx_initializer, param_dict, network)
        elif op_type == 'Flatten':
            convert_flatten(onnx_nodes, index, network)
        elif op_type == 'Transpose':
            convert_permute(onnx_nodes, index, network)
        elif op_type == 'MaxPool' or op_type == 'AveragePool' or op_type == 'GlobalMaxPool' or op_type == 'GlobalAveragePool':
            convert_pooling(onnx_nodes, index, network)
        elif op_type == 'Reshape':
            convert_reshape(onnx_nodes, index, onnx_initializer, network)
        elif op_type == 'Upsample':
            convert_resize(onnx_nodes, index, onnx_initializer, network)
        elif op_type == 'Softmax':
            convert_softmax(onnx_nodes, index, network)
        elif op_type == 'Squeeze':
            convert_squeeze(onnx_nodes, index, network)
        else:
            print('Skipping ' + op_type + ', please check!')

    if copy_params:
        copy_weights(onnx_initializer, param_dict, network)
Esempio n. 10
0
    def prepare_zip_archive(cls, file, device='CPU', **kwargs):
        with zipfile.ZipFile(file, mode='r') as z:
            with z.open('__MODEL_PROTO', 'r') as f:
                model = onnx.load(f);
            blob_names = set(z.namelist()) - set('__MODEL_PROTO')
            # TODO: make this more efficient
            raw_values_dict = {}
            for name in blob_names:
                with z.open(name, 'r') as blob_file:
                    raw_values_dict[name] = blob_file.read()

        return cls.prepare(model, device, raw_values_dict=raw_values_dict, **kwargs)
Esempio n. 11
0
 def buffer_to_caffe2_netdef(buffer):
     """Creates caffe2 NetDef from buffer object and returns pointer to
     input and output blobs and the NetDef."""
     protobuf_model = onnx.load(BytesIO(buffer.getvalue()))
     input_blob_name = protobuf_model.graph.input[0].name
     output_blob_name = protobuf_model.graph.output[0].name
     logger.info(
         "INPUT BLOB: " + input_blob_name + ". OUTPUT BLOB:" + output_blob_name
     )
     return (
         input_blob_name,
         output_blob_name,
         caffe2.python.onnx.backend.prepare(protobuf_model),
     )
Esempio n. 12
0
def test_save_no_junk(tmpdir):
    """ Test for an issue with save() not having O_TRUNC mode
    resulting in possible junk at the end of file if it already exists
    """
    try:
        import onnx
    except ImportError:
        pytest.skip('ONNX is not installed.')

    filename = os.path.join(str(tmpdir), R'no_junk.onnx')
    filename_new = os.path.join(str(tmpdir), R'no_junk_new.onnx')

    # Save a large model.
    g = C.ceil([1, 2, 3, 4, 5, 6, 7, 8] * 100)
    g.save(filename, format=C.ModelFormat.ONNX)

    # Save a smaller model using the same file name and a new file name.
    g = C.ceil([1, 2, 3, 4, 5, 6, 7] * 100)
    g.save(filename, format=C.ModelFormat.ONNX)
    g.save(filename_new, format=C.ModelFormat.ONNX)

    # Check the files can be loaded as standard ONNX files,
    # and both result in the same model.
    assert onnx.load(filename) == onnx.load(filename_new)
def main():
    args = make_args()
    config = configparser.ConfigParser()
    utils.load_config(config, args.config)
    for cmd in args.modify:
        utils.modify_config(config, cmd)
    with open(os.path.expanduser(os.path.expandvars(args.logging)), 'r') as f:
        logging.config.dictConfig(yaml.load(f))
    model_dir = utils.get_model_dir(config)
    model = onnx.load(model_dir + '.onnx')
    onnx.checker.check_model(model)
    init_net, predict_net = onnx_caffe2.backend.Caffe2Backend.onnx_graph_to_caffe2_net(model.graph, device='CPU')
    onnx_caffe2.helper.save_caffe2_net(init_net, os.path.join(model_dir, 'init_net.pb'))
    onnx_caffe2.helper.save_caffe2_net(predict_net, os.path.join(model_dir, 'predict_net.pb'), output_txt=True)
    logging.info(model_dir)
Esempio n. 14
0
def onnx_input_output_names(onnx_filename):
    onnx_model = onnx.load(onnx_filename)
    initializer_names = set()
    for initializer in onnx_model.graph.initializer:
        initializer_names.add(initializer.name)

    input_names = []
    for input in onnx_model.graph.input:
        if input.name not in initializer_names:
            input_names.append(input.name)

    output_names = []
    for output in onnx_model.graph.output:
        output_names.append(output.name)

    return input_names, output_names
Esempio n. 15
0
    def _roundtrip(self, model_name):
        model_dir = Runner(c2)._prepare_model_data(
            namedtuple('dummy', ['model_name'])(model_name))

        pb_path = os.path.join(model_dir, 'model.pb')

        before_roundtrip = onnx.load(pb_path)

        with open(pb_path, 'rb') as pb:
            after_roundtrip = onnx.load_from_string(pb.read())

        assert onnx.helper.printable_graph(before_roundtrip.graph) \
            == onnx.helper.printable_graph(after_roundtrip.graph)

        with open(pb_path, 'rb') as pb:
            assert after_roundtrip.SerializeToString() == pb.read()
def convert_onnx_to_model(onnx_input_path):
    """Reimplementation of the TensorFlow-onnx official tutorial convert the onnx file to specific: model

    Parameters
    -----------
    onnx_input_path : string
    the path where you save the onnx file.

    References
    -----------
    - `onnx-tf exporting tutorial <https://github.com/onnx/tutorials/blob/master/tutorials/OnnxTensorflowExport.ipynb>`__
    """
    model = onnx.load(onnx_input_path)
    tf_rep = prepare(model)
    # Image Path
    img = np.load("./assets/image.npz")
    output = tf_rep.run(img.reshape([1, 784]))
    print("The digit is classified as ", np.argmax(output))
Esempio n. 17
0
def main():
    from_dir = sys.argv[1]
    to_dir = sys.argv[2]

    os.makedirs(to_dir, exist_ok=True)

    xmodel = onnx.load(os.path.join(from_dir, 'model.onnx'))
    convert_model(xmodel)
    onnx.save(xmodel, os.path.join(to_dir, 'model.onnx'))

    for test_dir in glob.glob(os.path.join(from_dir, 'test_data_set_*')):
        dir_name = os.path.basename(test_dir)
        to_test_dir = os.path.join(to_dir, dir_name)
        os.makedirs(to_test_dir, exist_ok=True)

        for pb_filename in glob.glob(os.path.join(test_dir, '*.pb')):
            pb_name = os.path.basename(pb_filename)
            tensor = onnx.load_tensor(pb_filename)
            convert_tensor(tensor)
            onnx.save_tensor(tensor, os.path.join(to_test_dir, pb_name))
Esempio n. 18
0
 def _test_onnx_importer(self, model_name, data_input_index = 0):
     model_dir = _download_onnx_model(model_name)
     model_def = onnx.load(os.path.join(model_dir, 'model.onnx'))
     input_blob_dims = [int(x.dim_value) for x in model_def.graph.input[data_input_index].type.tensor_type.shape.dim]
     op_inputs = [x.name for x in model_def.graph.input]
     op_outputs = [x.name for x in model_def.graph.output]
     print("{}".format(op_inputs))
     data = np.random.randn(*input_blob_dims).astype(np.float32)
     Y_c2 = c2.run_model(model_def, {op_inputs[data_input_index]: data})
     op = convert_onnx_model_to_trt_op(model_def, verbosity=3)
     device_option = core.DeviceOption(caffe2_pb2.CUDA, 0)
     op.device_option.CopyFrom(device_option)
     Y_trt = None
     ws = Workspace()
     with core.DeviceScope(device_option):
         ws.FeedBlob(op_inputs[data_input_index], data)
         ws.RunOperatorsOnce([op])
         output_values = [ws.FetchBlob(name) for name in op_outputs]
         Y_trt = namedtupledict('Outputs', op_outputs)(*output_values)
     np.testing.assert_allclose(Y_c2, Y_trt, rtol=1e-3)
Esempio n. 19
0
    def _test(self, modelName):
        path = os.path.join(ONNX_MODEL_ZOO_ROOT, modelName, "model.onnx")
        try:
            o = onnx.load(path)
        except FileNotFoundError:
            self.skipTest("ONNX model is not found." \
                          "You may want to download it by running scripts/onnx/download_onnx_model_zoo.py" \
                          ": {}".format(modelName))

        onnxInputLayer, = set(map(lambda x: x.name, o.graph.input)) - set(map(lambda x: x.name, o.graph.initializer))

        layers = lbann.onnx.o2l.onnxToLbannLayers(
            o,
            [DATA_LAYER_NAME, LABEL_LAYER_NAME],
            {DATA_LAYER_NAME: onnxInputLayer},
        )

        model = lbann_pb2.Model(layer = layers)
        pb = lbann_pb2.LbannPB(model=model)

        if SAVE_PROTOTEXT:
            with open(os.path.join(DUMP_DIR, "{}.prototext".format(modelName)), "w") as f:
                f.write(txtf.MessageToString(pb))
def runBertTrainingTest(gradient_accumulation_steps,
                        use_mixed_precision,
                        allreduce_post_accumulation,
                        use_simple_model_desc=True,
                        use_internel_loss_scale=False):
    model_desc = bert_model_description()
    simple_model_desc = remove_extra_info(
        model_desc) if use_simple_model_desc else model_desc
    learning_rate_description = ort_trainer_learning_rate_description()
    device = torch.device("cuda", 0)

    torch.manual_seed(1)
    onnxruntime.set_seed(1)

    onnx_model = onnx.load(get_name("bert_toy_postprocessed.onnx"))

    loss_scaler = LossScaler("ort_test_input_loss_scalar",
                             True) if use_internel_loss_scale else None

    model = ORTTrainer(onnx_model,
                       None,
                       simple_model_desc,
                       "LambOptimizer",
                       map_optimizer_attributes,
                       learning_rate_description,
                       device,
                       postprocess_model=None,
                       gradient_accumulation_steps=gradient_accumulation_steps,
                       world_rank=0,
                       world_size=1,
                       loss_scaler=loss_scaler,
                       use_mixed_precision=use_mixed_precision,
                       allreduce_post_accumulation=allreduce_post_accumulation)

    if loss_scaler is None:
        loss_scaler = LossScaler(model.loss_scale_input_name, True)

    input_ids_batches = []
    segment_ids_batches = []
    input_mask_batches = []
    masked_lm_labels_batches = []
    next_sentence_labels_batches = []
    batch_size = 16
    num_batches = 8
    for batch in range(num_batches):
        input_ids_batches = [
            *input_ids_batches,
            generate_sample_batch(model_desc.inputs_[0], batch_size, device)
        ]
        segment_ids_batches = [
            *segment_ids_batches,
            generate_sample_batch(model_desc.inputs_[1], batch_size, device)
        ]
        input_mask_batches = [
            *input_mask_batches,
            generate_sample_batch(model_desc.inputs_[2], batch_size, device)
        ]
        masked_lm_labels_batches = [
            *masked_lm_labels_batches,
            generate_sample_batch(model_desc.inputs_[3], batch_size, device)
        ]
        next_sentence_labels_batches = [
            *next_sentence_labels_batches,
            generate_sample_batch(model_desc.inputs_[4], batch_size, device)
        ]

    lr_batch_list = [
        0.0000000e+00, 4.6012269e-07, 9.2024538e-07, 1.3803681e-06,
        1.8404908e-06, 2.3006135e-06, 2.7607362e-06, 3.2208588e-06,
        3.6809815e-06
    ]

    actual_losses = []
    actual_all_finites = []

    for batch_count in range(num_batches):
        input_ids = generate_sample_batch(model_desc.inputs_[0], batch_size,
                                          device)
        segment_ids = generate_sample_batch(model_desc.inputs_[1], batch_size,
                                            device)
        input_mask = generate_sample_batch(model_desc.inputs_[2], batch_size,
                                           device)
        masked_lm_labels = generate_sample_batch(model_desc.inputs_[3],
                                                 batch_size, device)
        next_sentence_labels = generate_sample_batch(model_desc.inputs_[4],
                                                     batch_size, device)
        lr = lr_batch_list[batch_count]

        learning_rate = torch.tensor([lr]).to(device)
        training_args = [
            input_ids, segment_ids, input_mask, masked_lm_labels,
            next_sentence_labels, learning_rate
        ]
        if use_mixed_precision:
            if not use_internel_loss_scale:
                loss_scale = torch.tensor([loss_scaler.loss_scale_]).to(device)
                training_args.append(loss_scale)
            actual_loss = model.train_step(*training_args)
            if isinstance(actual_loss, (list, tuple)):
                assert len(actual_loss) == 2
                actual_loss, actual_all_finite = actual_loss
                if not use_internel_loss_scale:
                    loss_scaler.update_loss_scale(actual_all_finite.item())
                    actual_all_finites = [
                        *actual_all_finites,
                        actual_all_finite.cpu().numpy().item(0)
                    ]

            actual_losses = [*actual_losses, actual_loss.cpu().numpy().item(0)]
        else:
            loss = model(*training_args)
            actual_losses = [*actual_losses, loss.cpu().numpy().item(0)]

        if batch_count == num_batches - 1:
            # test eval_step api with fetches at the end of the training.
            # if eval_step is called during the training, it will affect the actual training loss (training session is stateful),
            eval_loss = model.eval_step(input_ids,
                                        segment_ids,
                                        input_mask,
                                        masked_lm_labels,
                                        next_sentence_labels,
                                        fetches=['loss'])
            eval_loss = eval_loss.cpu().numpy().item(0)

    # If using internal loss scale, all_finites are handled internally too.
    if use_mixed_precision and not use_internel_loss_scale:
        return actual_losses, actual_all_finites, eval_loss
    else:
        return actual_losses, eval_loss
# An example input you would normally provide to your model's forward() method.
input = torch.ones(1, 3, 256, 128)
raw_output = model(input)
print(raw_output.size())
torch.onnx.export(model,
                  input,
                  'osnet_x0_25_mcmt17.onnx',
                  verbose=False,
                  export_params=True)

print(
    "-------------------------check model---------------------------------------\n"
)

try:
    onnx_model = onnx.load("osnet_x0_25_mcmt17.onnx")
    onnx.checker.check_model(onnx_model)
    graph_output = onnx.helper.printable_graph(onnx_model.graph)
    with open("graph_output.txt", mode="w") as fout:
        fout.write(graph_output)
except:
    print("Something went wrong")

import onnxruntime
import numpy as np

ort_session = onnxruntime.InferenceSession("osnet_x0_25_mcmt17.onnx")


def to_numpy(tensor):
    return tensor.detach().cpu().numpy(
Esempio n. 22
0
 def __init__(self, onnx_model_path):
     self.model = onnx.load(onnx_model_path)
model = Net()
model.eval()

# Convert from PyTorch to ONNX
input_names = ['input']
output_names = ['output']
dummy_input = torch.randn(1, 10, device='cpu')
torch.onnx.export(model,
                  dummy_input,
                  'onnx/linear.onnx',
                  verbose=False,
                  input_names=input_names,
                  output_names=output_names)

# Convert From ONNX to TF
onnx_model = onnx.load("onnx/linear.onnx")
tf_rep = prepare(onnx_model, strict=False)
tf_rep.export_graph("onnx/linear.pb")

# Test TF model on random input data.
input_tensor = np.random.random_sample((1, 10))
graph_def = tf.GraphDef()
graph_def.ParseFromString(open('onnx/linear.pb', 'rb').read())
tf.import_graph_def(graph_def, name='')

graph = tf.get_default_graph()

input_node = graph.get_tensor_by_name('input:0')
output_node = graph.get_tensor_by_name('output:0')

with tf.Session() as sess:
Esempio n. 24
0
        args.task_name, )
    eval_dataset = load_and_cache_examples(args,
                                           args.task_name,
                                           tokenizer,
                                           evaluate=True)

    # Note that DistributedSampler samples randomly
    eval_sampler = SequentialSampler(eval_dataset)
    eval_dataloader = DataLoader(eval_dataset, sampler=eval_sampler, \
        batch_size=args.eval_batch_size)

    def eval_func(model):
        return evaluate_onnxrt(args, model, tokenizer, eval_dataloader)

    if args.benchmark:
        model = onnx.load(args.model_path)

        from lpot.data.datasets.dummy_dataset import DummyDataset
        from lpot.data.dataloaders.onnxrt_dataloader import ONNXRTDataLoader
        shapes, lows, highs = parse_dummy_input(model, args.benchmark_nums,
                                                args.max_seq_length)
        dummy_dataset = DummyDataset(shapes,
                                     low=lows,
                                     high=highs,
                                     dtype="int64")
        dummy_dataloader = ONNXRTDataLoader(dummy_dataset)

        print(
            '---------------------------------------------------------------')
        if args.accuracy_only:
            results = evaluate_onnxrt(args, model, tokenizer, eval_dataloader)
                fontsize=8,
                textcoords='offset points',
                ha='right',
                va='bottom',
                bbox=dict(boxstyle='round,pad=0.5', fc='yellow', alpha=0.5),
                arrowprops=dict(arrowstyle='->', connectionstyle='arc3,rad=0'))

###########################################
# Remove one layer at the end
# ---------------------------
#
# The last is often removed before the model is
# inserted in a pipeline. Let's see how to do that.
# First, we need the list of output for every node.

model_onnx = onnx.load(BytesIO(model_bytes))
outputs = []
for node in model_onnx.graph.node:
    print(node.name, node.output)
    outputs.extend(node.output)

#################################
# We select one of the last one.

selected = outputs[-3]
print("selected", selected)

#################################
# And we tell *OnnxTransformer* to use that
# specific one and to flatten the output
# as the dimension is not a matrix.
Esempio n. 26
0
    def save(self, filename, x, metaData, save_model=False, export_model=False):

        """
        Saves the trained model to four files: a JSON file with the settings, a pickled pyTorch state dict
        file, and numpy files for the mean and variance of the inputs (used for input scaling).
        Also exports model to onnx if export_model is set to True. 
        Parameters
        ----------
        filename : str
            Path to the files. '_settings.json' and '_state_dict.pl' will be added.
        save_model : bool, optional
            If True, the whole model is saved in addition to the state dict. This is not necessary for loading it
            again with Estimator.load(), but can be useful for debugging, for instance to plot the computational graph.
        export_model : bool, optional
            If True, the whole model is exported to .onnx format to be loaded within a C++ envirnoment. 
        Returns
        -------
            None
        """

        logger.info("Saving model to %s", filename)

        if self.model is None:
            raise ValueError("No model -- train or load model before saving!")

        # Check paths
        create_missing_folders([os.path.dirname(filename)])

        # Save settings
        logger.debug("Saving settings to %s_settings.json", filename)

        settings = self._wrap_settings()
        with open(filename + "_settings.json", "w") as f:
            json.dump(settings, f)
        # Save scaling
        if self.x_scaling_stds is not None and self.x_scaling_means is not None:
            logger.debug("Saving input scaling information to %s_x_means.npy and %s_x_stds.npy", filename, filename)
            np.save(filename + "_x_means.npy", self.x_scaling_means)
            np.save(filename + "_x_stds.npy", self.x_scaling_stds)
            np.save(filename + "_x_mins.npy", self.x_scaling_mins)
            np.save(filename + "_x_maxs.npy", self.x_scaling_maxs)

        # Save state dict
        logger.debug("Saving state dictionary to %s_state_dict.pt", filename)
        torch.save(self.model.state_dict(), filename + "_state_dict.pt")
        
        # Save model
        if save_model:
            logger.debug("Saving model to %s_model.pt", filename)
            torch.save(self.model, filename + "_model.pt")

        # Export model to onnx
        if export_model:
            x = load_and_check(x)
            device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
            dummy_input = torch.from_numpy(x[0].reshape(1, -1)).float().to(device)
            torch.onnx.export(self.model, dummy_input,filename+".onnx", export_params=True, input_names = ['input'],output_names = ['r_hat', 's_hat'], verbose = True)

        # Manipulate onnx model using 'onnxruntime' module directly
        #  Note: This is inefficient due to I/O reasons, however
        #        torch.onnx interface seemingly has no options for this
        if export_model and os.path.isfile(filename+".onnx"):
            
            ####################################
            ##        ONNXRUNTIME
            ## Example using Onnxruntime instead of Onxx
            ## Keeping only for prosperity for now
            ####################################
            ## Start the normal onnxruntime session using the model
            ## just saved
            #ort_session = ort.InferenceSession(filename+".onnx")
            ## Model Meta data
            #metaData = ort_session.get_modelmeta()
            ## Get the custom map
            #CustomMap = metaData.custom_metadata_map
            #print("Custom Meta-Data Map: {}".format(CustomMap))

            ## Define a new custom meta data map
            #CustomMap_new = {"Var1" : 200.0, 
            #                 "Var2" : 5.0,
            #                 "Var3" : 1000.0,
            #                 "Var4" : 400.0,
            #                 "Var5" : 6.0,
            #             }
            #
            ## Load new custom map into mode
            #metaData.custom_metadata_map = CustomMap_new

            # Unable to save Onnx model from Onnxruntime Inference session it seems
            #   -> Makes sense given InferenceSession is designed to access and infer, not 
            #      a data/model editor session.
            #ort_session.SaveModelMetadata() # Believe that this does not work
            ####################################
            ####################################
            
            ####################################
            ##        ONNX
            ####################################
            # Define a new custom meta data map
            #CustomMap_new = {"Var1" : 200.0, 
            #                 "Var2" : 5.0,
            #                 "Var3" : 1000.0,
            #                 "Var4" : 400.0,
            #                 "Var5" : 6.0,
            #             }
            # Load model
            model = onnx.load(filename+".onnx")
            # Get Meta Data
            for index,(cust_key,cust_var) in enumerate(metaData.items()): 
                meta = model.metadata_props.add()
                meta.key = cust_key
                meta.value = str(cust_var)
                # Check value
                logger.info(" New Meta data: %s ",model.metadata_props[index])

                
            # Save model
            onnx.save(model, filename+"_new"+".onnx")
            
            # Start the normal onnxruntime session using the model
            # just saved to check that the model was saved with the correct 
            # metadata
            ort_session = ort.InferenceSession(filename+"_new"+".onnx")
            # Model Meta data
            metaData = ort_session.get_modelmeta()
            # Print Metadata
            CustomMap = metaData.custom_metadata_map
            logger.info(" Custom Meta-Data Map: %s",CustomMap)
            # Need to close the ort session for comleteness (C-style)
            ####################################
            ###################################
            

        # Tar model if training is done on GPU
        if torch.cuda.is_available():
            tar = tarfile.open("models_out.tar.gz", "w:gz")
            for name in [filename+".onnx", filename + "_x_stds.npy", filename + "_x_means.npy",  filename + "_x_mins.npy",  filename + "_x_maxs.npy", filename + "_settings.json",  filename + "_state_dict.pt"]:
                tar.add(name)
            tar.close()
Esempio n. 27
0
import onnx
import sys

filename = "squeezenet.onnx"
if len(sys.argv) == 2:
    filename = sys.argv[1]

print('Loading ONNX model %s' % filename)
# Load the ONNX model
model = onnx.load(filename)

# Check that the IR is well formed
onnx.checker.check_model(model)

# Print a human readable representation of the graph
print(onnx.helper.printable_graph(model.graph))
Esempio n. 28
0
# Load pretrained ONNX model
# ---------------------------------------------
# The example super resolution model used here is exactly the same model in onnx tutorial
# http://pytorch.org/tutorials/advanced/super_resolution_with_caffe2.html
# we skip the pytorch model construction part, and download the saved onnx model
model_url = ''.join([
    'https://gist.github.com/zhreshold/',
    'bcda4716699ac97ea44f791c24310193/raw/',
    '93672b029103648953c4e5ad3ac3aadf346a4cdc/', 'super_resolution_0.2.onnx'
])
#download(model_url, 'super_resolution.onnx', True)

download(model_url, 'super_resolution.onnx')

# now you have super_resolution.onnx on disk
onnx_model = onnx.load('super_resolution.onnx')
# we can load the graph as NNVM compatible model
sym, params = nnvm.frontend.from_onnx(onnx_model)

######################################################################
# Load a test image
# ---------------------------------------------
# A single cat dominates the examples!
from PIL import Image
img_url = 'https://github.com/dmlc/mxnet.js/blob/master/data/cat.png?raw=true'
download(img_url, 'cat.png')
img = Image.open('cat.png').resize((224, 224))
img_ycbcr = img.convert("YCbCr")  # convert to YCbCr
img_y, img_cb, img_cr = img_ycbcr.split()
x = np.array(img_y)[np.newaxis, np.newaxis, :, :]
Esempio n. 29
0
import os
import time
import sys
import onnx
from  onnx import helper
import pdb
##
onnxFileName = str(sys.argv[1])
from onnx.shape_inference import infer_shapes
from onnx.optimizer import optimize
om = onnx.load(onnxFileName)
om.graph.input[0].type.tensor_type.shape.dim[0].dim_value = 1
om.graph.input[0].type.tensor_type.shape.dim[1].dim_value =3
om.graph.input[0].type.tensor_type.shape.dim[2].dim_value = 540
om.graph.input[0].type.tensor_type.shape.dim[3].dim_value = 960
v = om.graph.value_info
while len(v)>0:
    v.pop()


#pdb.set_trace()
ots = om.graph.output
h = 17
for i in range(3)://this need do by yourself.
    ots[i*4+0].type.tensor_type.shape.dim[2].dim_value=h
    ots[i*4+1].type.tensor_type.shape.dim[2].dim_value=h
    ots[i*4+2].type.tensor_type.shape.dim[2].dim_value=h
    ots[i*4+3].type.tensor_type.shape.dim[2].dim_value=h
    h *= 2

om = infer_shapes(om)
Esempio n. 30
0
def load_model():
    return onnx.load('../assets/emotion_ferplus/model.onnx')
Esempio n. 31
0
# model URL, file name, and model type through the module, TVM will download
# the model and save it to disk. For the instance of an ONNX model, you can
# then load it into memory using the ONNX runtime.
#
# .. admonition:: Working with Other Model Formats
#
#   TVM supports many popular model formats. A list can be found in the
#   :ref:`Compile Deep Learning Models <tutorial-frontend>` section of the TVM
#   Documentation.

model_url = ("https://github.com/onnx/models/raw/main/"
             "vision/classification/resnet/model/"
             "resnet50-v2-7.onnx")

model_path = download_testdata(model_url, "resnet50-v2-7.onnx", module="onnx")
onnx_model = onnx.load(model_path)

# Seed numpy's RNG to get consistent results
np.random.seed(0)

################################################################################
# Downloading, Preprocessing, and Loading the Test Image
# ------------------------------------------------------
#
# Each model is particular when it comes to expected tensor shapes, formats and
# data types. For this reason, most models require some pre and
# post-processing, to ensure the input is valid and to interpret the output.
# TVMC has adopted NumPy's ``.npz`` format for both input and output data.
#
# As input for this tutorial, we will use the image of a cat, but you can feel
# free to substitute this image for any of your choosing.
Esempio n. 32
0
def onnx2tensorrt(onnx_file,
                  trt_file,
                  input_shape,
                  verify=False,
                  workspace_size=1):
    """Create tensorrt engine from onnx model.

    Args:
        onnx_file (str): Filename of the input ONNX model file.
        trt_file (str): Filename of the output TensorRT engine file.
        input_shape (list[int]): Input shape of the model.
            eg [1, 3, 224, 224].
        verify (bool, optional): Whether to verify the converted model.
            Defaults to False.
        workspace_size (int, optional): Maximium workspace of GPU.
            Defaults to 1.
    """
    import onnx
    from mmcv.tensorrt import TRTWraper, onnx2trt, save_trt_engine

    onnx_model = onnx.load(onnx_file)
    # create trt engine and wraper
    opt_shape_dict = {'input': [input_shape, input_shape, input_shape]}
    max_workspace_size = get_GiB(workspace_size)
    trt_engine = onnx2trt(
        onnx_model,
        opt_shape_dict,
        fp16_mode=False,
        max_workspace_size=max_workspace_size)
    save_dir, _ = osp.split(trt_file)
    if save_dir:
        os.makedirs(save_dir, exist_ok=True)
    save_trt_engine(trt_engine, trt_file)
    print(f'Successfully created TensorRT engine: {trt_file}')

    if verify:
        import torch
        import onnxruntime as ort

        input_img = torch.randn(*input_shape)
        input_img_cpu = input_img.detach().cpu().numpy()
        input_img_cuda = input_img.cuda()

        # Get results from ONNXRuntime
        session_options = ort.SessionOptions()
        sess = ort.InferenceSession(onnx_file, session_options)

        # get input and output names
        input_names = [_.name for _ in sess.get_inputs()]
        output_names = [_.name for _ in sess.get_outputs()]

        onnx_outputs = sess.run(None, {
            input_names[0]: input_img_cpu,
        })

        # Get results from TensorRT
        trt_model = TRTWraper(trt_file, input_names, output_names)
        with torch.no_grad():
            trt_outputs = trt_model({input_names[0]: input_img_cuda})
        trt_outputs = [
            trt_outputs[_].detach().cpu().numpy() for _ in output_names
        ]

        # Compare results
        np.testing.assert_allclose(
            onnx_outputs[0], trt_outputs[0], rtol=1e-05, atol=1e-05)
        print('The numerical values are the same ' +
              'between ONNXRuntime and TensorRT')
Esempio n. 33
0
File: read.py Progetto: Daiver/jff
import onnx
import sys

# Load the ONNX model
#model = onnx.load("alexnet.proto")
modelName = "torch_linear.proto"
if len(sys.argv) > 1:
    modelName = sys.argv[1]
model = onnx.load(modelName)

onnx.checker.check_model(model)
print(onnx.helper.printable_graph(model.graph))
Esempio n. 34
0
import sys
import timeit

import numpy as np
import onnx
import ngraph as ng
from ngraph_onnx.onnx_importer.importer import import_onnx_model

model = onnx.load(sys.argv[1])

ng_func = import_onnx_model(model)
#print(ng_model)

picture = np.ones([1, 3, 224, 224], dtype=np.float32)

runtime = ng.runtime(backend_name='CPU')
#runtime = ng.runtime(backend_name='GPU')
resnet = runtime.computation(ng_func)
#print(resnet)

def run():
  resnet(picture)

n = 100

print(timeit.timeit('run()', globals=globals(), number=n) / n * 1000, 'msec')
Esempio n. 35
0
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ******************************************************************************

import onnx

onnx_protobuf = onnx.load('/path/to/model/cntk_ResNet20_CIFAR10/model.onnx')

# Convert a serialized ONNX model to an ngraph model
from ngraph_onnx.onnx_importer.importer import import_onnx_model
ng_model = import_onnx_model(onnx_protobuf)[0]


# Using an ngraph runtime (CPU backend), create a callable computation
import ngraph as ng
runtime = ng.runtime(backend_name='CPU')
resnet = runtime.computation(ng_model['output'], *ng_model['inputs'])

# Load or create an image
import numpy as np
picture = np.ones([1, 3, 32, 32])
Esempio n. 36
0
def pytorch2onnx(model,
                 mm_inputs,
                 opset_version=11,
                 show=False,
                 output_file='tmp.onnx',
                 verify=False,
                 dynamic_export=False):
    """Export Pytorch model to ONNX model and verify the outputs are same
    between Pytorch and ONNX.

    Args:
        model (nn.Module): Pytorch model we want to export.
        mm_inputs (dict): Contain the input tensors and img_metas information.
        opset_version (int): The onnx op version. Default: 11.
        show (bool): Whether print the computation graph. Default: False.
        output_file (string): The path to where we store the output ONNX model.
            Default: `tmp.onnx`.
        verify (bool): Whether compare the outputs between Pytorch and ONNX.
            Default: False.
        dynamic_export (bool): Whether to export ONNX with dynamic axis.
            Default: False.
    """
    model.cpu().eval()
    test_mode = model.test_cfg.mode

    if isinstance(model.decode_head, nn.ModuleList):
        num_classes = model.decode_head[-1].num_classes
    else:
        num_classes = model.decode_head.num_classes

    imgs = mm_inputs.pop('imgs')
    img_metas = mm_inputs.pop('img_metas')

    img_list = [img[None, :] for img in imgs]
    img_meta_list = [[img_meta] for img_meta in img_metas]
    # update img_meta
    img_list, img_meta_list = _update_input_img(img_list, img_meta_list)

    # replace original forward function
    origin_forward = model.forward
    model.forward = partial(model.forward,
                            img_metas=img_meta_list,
                            return_loss=False,
                            rescale=True)
    dynamic_axes = None
    if dynamic_export:
        if test_mode == 'slide':
            dynamic_axes = {'input': {0: 'batch'}, 'output': {1: 'batch'}}
        else:
            dynamic_axes = {
                'input': {
                    0: 'batch',
                    2: 'height',
                    3: 'width'
                },
                'output': {
                    1: 'batch',
                    2: 'height',
                    3: 'width'
                }
            }

    register_extra_symbolics(opset_version)
    with torch.no_grad():
        torch.onnx.export(model, (img_list, ),
                          output_file,
                          input_names=['input'],
                          output_names=['output'],
                          export_params=True,
                          keep_initializers_as_inputs=False,
                          verbose=show,
                          opset_version=opset_version,
                          dynamic_axes=dynamic_axes)
        print(f'Successfully exported ONNX model: {output_file}')
    model.forward = origin_forward

    if verify:
        # check by onnx
        import onnx
        onnx_model = onnx.load(output_file)
        onnx.checker.check_model(onnx_model)

        if dynamic_export and test_mode == 'whole':
            # scale image for dynamic shape test
            img_list = [resize(_, scale_factor=1.5) for _ in img_list]
            # concate flip image for batch test
            flip_img_list = [_.flip(-1) for _ in img_list]
            img_list = [
                torch.cat((ori_img, flip_img), 0)
                for ori_img, flip_img in zip(img_list, flip_img_list)
            ]

            # update img_meta
            img_list, img_meta_list = _update_input_img(
                img_list, img_meta_list, test_mode == 'whole')

        # check the numerical value
        # get pytorch output
        with torch.no_grad():
            pytorch_result = model(img_list, img_meta_list, return_loss=False)
            pytorch_result = np.stack(pytorch_result, 0)

        # get onnx output
        input_all = [node.name for node in onnx_model.graph.input]
        input_initializer = [
            node.name for node in onnx_model.graph.initializer
        ]
        net_feed_input = list(set(input_all) - set(input_initializer))
        assert (len(net_feed_input) == 1)
        sess = rt.InferenceSession(output_file)
        onnx_result = sess.run(
            None, {net_feed_input[0]: img_list[0].detach().numpy()})[0][0]
        # show segmentation results
        if show:
            import cv2
            import os.path as osp
            img = img_meta_list[0][0]['filename']
            if not osp.exists(img):
                img = imgs[0][:3, ...].permute(1, 2, 0) * 255
                img = img.detach().numpy().astype(np.uint8)
                ori_shape = img.shape[:2]
            else:
                ori_shape = LoadImage()({'img': img})['ori_shape']

            # resize onnx_result to ori_shape
            onnx_result_ = cv2.resize(onnx_result[0].astype(np.uint8),
                                      (ori_shape[1], ori_shape[0]))
            show_result_pyplot(model,
                               img, (onnx_result_, ),
                               palette=model.PALETTE,
                               block=False,
                               title='ONNXRuntime',
                               opacity=0.5)

            # resize pytorch_result to ori_shape
            pytorch_result_ = cv2.resize(pytorch_result[0].astype(np.uint8),
                                         (ori_shape[1], ori_shape[0]))
            show_result_pyplot(model,
                               img, (pytorch_result_, ),
                               title='PyTorch',
                               palette=model.PALETTE,
                               opacity=0.5)
        # compare results
        np.testing.assert_allclose(
            pytorch_result.astype(np.float32) / num_classes,
            onnx_result.astype(np.float32) / num_classes,
            rtol=1e-5,
            atol=1e-5,
            err_msg='The outputs are different between Pytorch and ONNX')
        print('The outputs are same between Pytorch and ONNX')
Esempio n. 37
0
    type=str,
    help='Path to text file containing the names of all possible categories.')
parser.add_argument('--image_path', type=str, help='Path to image file.')
parser.add_argument('--image_list', type=str, help='Path to image list.')
args = parser.parse_args()

IMAGE_FILENAME = args.image_path  # '/home/meerkat/git/SpeciesClassification/PyTorchClassification/elephant.jpg'
MODEL_FILENAME = args.frozen_graph  # '/ai4edevfs/models/iNat/iNat_all_extended/demosite-model-ensemble-resnext-inceptionV4-560-81.0/iNat_all_extended_ensemble_resnext_inceptionV4_560_81.9_model.onnx'
CLASSLIST_FILENAME = args.classlist  # '/ai4edevfs/models/iNat/iNat_all_extended/demosite-model-ensemble-resnext-inceptionV4-560-81.0/iNat_all_extended_ensemble_resnext_inceptionV4_560_81.9_classes.txt'

# Target mean / std; should match the values used at training time
MODEL_IMAGE_SIZE = 560
MODEL_RESIZE_SIZE = 640
OVERSIZE_FACTOR = 1.3

model = onnx.load(MODEL_FILENAME)

if args.image_list:
    with open(args.image_list, 'rt') as fi:
        all_images = fi.read().splitlines()
else:
    all_images = [IMAGE_FILENAME]

for IMAGE_FILENAME in all_images:

    if not os.path.isfile(IMAGE_FILENAME):
        print("Did not find " + IMAGE_FILENAME)
        continue

    #%% Load and prepare image (using cv2)
Esempio n. 38
0
def detect(save_img=False):
    imgsz = (
        320, 192
    ) if ONNX_EXPORT else opt.img_size  # (320, 192) or (416, 256) or (608, 352) for (height, width)
    out, source, weights, half, view_img, save_txt = opt.output, opt.source, opt.weights, opt.half, opt.view_img, opt.save_txt
    webcam = source == '0' or source.startswith('rtsp') or source.startswith(
        'http') or source.endswith('.txt')

    # Initialize
    device = torch_utils.select_device(
        device='cpu' if ONNX_EXPORT else opt.device)
    if os.path.exists(out):
        shutil.rmtree(out)  # delete output folder
    os.makedirs(out)  # make new output folder

    # Initialize model
    model = Darknet(opt.cfg, imgsz)

    # Load weights
    attempt_download(weights)
    if weights.endswith('.pt'):  # pytorch format
        model.load_state_dict(
            torch.load(weights, map_location=device)['model'])
    else:  # darknet format
        load_darknet_weights(model, weights)

    # Second-stage classifier
    classify = False
    if classify:
        modelc = torch_utils.load_classifier(name='resnet101',
                                             n=2)  # initialize
        modelc.load_state_dict(
            torch.load('weights/resnet101.pt',
                       map_location=device)['model'])  # load weights
        modelc.to(device).eval()

    # Eval mode
    model.to(device).eval()

    # Fuse Conv2d + BatchNorm2d layers
    # model.fuse()

    # Export mode
    if ONNX_EXPORT:
        model.fuse()
        img = torch.zeros((1, 3) + imgsz)  # (1, 3, 320, 192)
        f = opt.weights.replace(opt.weights.split('.')[-1],
                                'onnx')  # *.onnx filename
        torch.onnx.export(model,
                          img,
                          f,
                          verbose=False,
                          opset_version=11,
                          input_names=['images'],
                          output_names=['classes', 'boxes'])

        # Validate exported model
        import onnx
        model = onnx.load(f)  # Load the ONNX model
        onnx.checker.check_model(model)  # Check that the IR is well formed
        print(onnx.helper.printable_graph(
            model.graph))  # Print a human readable representation of the graph
        return

    # Half precision
    half = half and device.type != 'cpu'  # half precision only supported on CUDA
    if half:
        model.half()

    # Set Dataloader
    vid_path, vid_writer = None, None
    if webcam:
        view_img = True
        torch.backends.cudnn.benchmark = True  # set True to speed up constant image size inference
        dataset = LoadStreams(source, img_size=imgsz)
    else:
        save_img = True
        dataset = LoadImages(source, img_size=imgsz)

    # Get names and colors
    names = load_classes(opt.names)
    colors = [[random.randint(0, 255) for _ in range(3)]
              for _ in range(len(names))]

    # Run inference
    t0 = time.time()
    img = torch.zeros((1, 3, imgsz, imgsz), device=device)  # init img
    _ = model(img.half() if half else img.float()
              ) if device.type != 'cpu' else None  # run once
    for path, img, im0s, vid_cap in dataset:
        img = torch.from_numpy(img).to(device)
        img = img.half() if half else img.float()  # uint8 to fp16/32
        img /= 255.0  # 0 - 255 to 0.0 - 1.0
        if img.ndimension() == 3:
            img = img.unsqueeze(0)

        # Inference
        t1 = torch_utils.time_synchronized()
        pred = model(img, augment=opt.augment)[0]
        t2 = torch_utils.time_synchronized()

        # to float
        if half:
            pred = pred.float()

        # Apply NMS
        pred = non_max_suppression(pred,
                                   opt.conf_thres,
                                   opt.iou_thres,
                                   multi_label=False,
                                   classes=opt.classes,
                                   agnostic=opt.agnostic_nms)

        # Apply Classifier
        if classify:
            pred = apply_classifier(pred, modelc, img, im0s)

        # Process detections
        for i, det in enumerate(pred):  # detections for image i
            if webcam:  # batch_size >= 1
                p, s, im0 = path[i], '%g: ' % i, im0s[i].copy()
            else:
                p, s, im0 = path, '', im0s

            save_path = str(Path(out) / Path(p).name)
            s += '%gx%g ' % img.shape[2:]  # print string
            gn = torch.tensor(im0.shape)[[1, 0, 1,
                                          0]]  #  normalization gain whwh
            if det is not None and len(det):
                # Rescale boxes from imgsz to im0 size
                det[:, :4] = scale_coords(img.shape[2:], det[:, :4],
                                          im0.shape).round()

                # Print results
                for c in det[:, -1].unique():
                    n = (det[:, -1] == c).sum()  # detections per class
                    s += '%g %ss, ' % (n, names[int(c)])  # add to string

                # Write results
                for *xyxy, conf, cls in det:
                    if save_txt:  # Write to file
                        xywh = (xyxy2xywh(torch.tensor(xyxy).view(1, 4)) /
                                gn).view(-1).tolist()  # normalized xywh
                        with open(save_path[:save_path.rfind('.')] + '.txt',
                                  'a') as file:
                            file.write(('%g ' * 5 + '\n') %
                                       (cls, *xywh))  # label format

                    if save_img or view_img:  # Add bbox to image
                        label = '%s %.2f' % (names[int(cls)], conf)
                        plot_one_box(xyxy,
                                     im0,
                                     label=label,
                                     color=colors[int(cls)])

            # Print time (inference + NMS)
            print('%sDone. (%.3fs)' % (s, t2 - t1))

            # Stream results
            if view_img:
                cv2.imshow(p, im0)
                if cv2.waitKey(1) == ord('q'):  # q to quit
                    raise StopIteration

            # Save results (image with detections)
            if save_img:
                if dataset.mode == 'images':
                    cv2.imwrite(save_path, im0)
                else:
                    if vid_path != save_path:  # new video
                        vid_path = save_path
                        if isinstance(vid_writer, cv2.VideoWriter):
                            vid_writer.release(
                            )  # release previous video writer

                        fps = vid_cap.get(cv2.CAP_PROP_FPS)
                        w = int(vid_cap.get(cv2.CAP_PROP_FRAME_WIDTH))
                        h = int(vid_cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
                        vid_writer = cv2.VideoWriter(
                            save_path, cv2.VideoWriter_fourcc(*opt.fourcc),
                            fps, (w, h))
                    vid_writer.write(im0)

    if save_txt or save_img:
        print('Results saved to %s' % os.getcwd() + os.sep + out)
        if platform == 'darwin':  # MacOS
            os.system('open ' + save_path)

    print('Done. (%.3fs)' % (time.time() - t0))
Esempio n. 39
0
def pytorch2onnx(model: nn.Module,
                 model_type: str,
                 img_path: str,
                 verbose: bool = False,
                 show: bool = False,
                 opset_version: int = 11,
                 output_file: str = 'tmp.onnx',
                 verify: bool = False,
                 dynamic_export: bool = False,
                 device_id: int = 0):
    """Export Pytorch model to ONNX model and verify the outputs are same
    between Pytorch and ONNX.

    Args:
        model (nn.Module): Pytorch model we want to export.
        model_type (str): Model type, detection or recognition model.
        img_path (str): We need to use this input to execute the model.
        opset_version (int): The onnx op version. Default: 11.
        verbose (bool): Whether print the computation graph. Default: False.
        show (bool): Whether visialize final results. Default: False.
        output_file (string): The path to where we store the output ONNX model.
            Default: `tmp.onnx`.
        verify (bool): Whether compare the outputs between Pytorch and ONNX.
            Default: False.
        dynamic_export (bool): Whether apply dynamic export.
            Default: False.
        device_id (id): Device id to place model and data.
            Default: 0
    """
    device = torch.device(type='cuda', index=device_id)
    model.to(device).eval()
    _convert_batchnorm(model)

    # prepare inputs
    mm_inputs = _prepare_data(cfg=model.cfg, imgs=img_path)
    imgs = mm_inputs.pop('img')
    img_metas = mm_inputs.pop('img_metas')

    if isinstance(imgs, list):
        imgs = imgs[0]

    img_list = [img[None, :].to(device) for img in imgs]
    # update img_meta
    img_list, img_metas = _update_input_img(img_list, img_metas)

    origin_forward = model.forward
    if (model_type == 'det'):
        model.forward = partial(model.simple_test,
                                img_metas=img_metas,
                                rescale=True)
    else:
        model.forward = partial(model.forward,
                                img_metas=img_metas,
                                return_loss=False,
                                rescale=True)

    # pytorch has some bug in pytorch1.3, we have to fix it
    # by replacing these existing op
    register_extra_symbolics(opset_version)
    dynamic_axes = None
    if dynamic_export and model_type == 'det':
        dynamic_axes = {
            'input': {
                0: 'batch',
                2: 'height',
                3: 'width'
            },
            'output': {
                0: 'batch',
                2: 'height',
                3: 'width'
            }
        }
    elif dynamic_export and model_type == 'recog':
        dynamic_axes = {
            'input': {
                0: 'batch',
                3: 'width'
            },
            'output': {
                0: 'batch',
                3: 'width'
            }
        }
    with torch.no_grad():
        torch.onnx.export(model, (img_list[0], ),
                          output_file,
                          input_names=['input'],
                          output_names=['output'],
                          export_params=True,
                          keep_initializers_as_inputs=False,
                          verbose=verbose,
                          opset_version=opset_version,
                          dynamic_axes=dynamic_axes)
    print(f'Successfully exported ONNX model: {output_file}')
    if verify:
        # check by onnx
        import onnx
        onnx_model = onnx.load(output_file)
        onnx.checker.check_model(onnx_model)

        scale_factor = (0.5, 0.5) if model_type == 'det' else (1, 0.5)
        if dynamic_export:
            # scale image for dynamic shape test
            img_list = [
                nn.functional.interpolate(_, scale_factor=scale_factor)
                for _ in img_list
            ]

            # update img_meta
            img_list, img_metas = _update_input_img(img_list, img_metas)

        # check the numerical value
        # get pytorch output
        with torch.no_grad():
            model.forward = origin_forward
            pytorch_out = model.simple_test(img_list[0],
                                            img_metas[0],
                                            rescale=True)

        # get onnx output
        if model_type == 'det':
            onnx_model = ONNXRuntimeDetector(output_file, model.cfg, device_id)
        else:
            onnx_model = ONNXRuntimeRecognizer(output_file, model.cfg,
                                               device_id)
        onnx_out = onnx_model.simple_test(img_list[0],
                                          img_metas[0],
                                          rescale=True)

        # compare results
        same_diff = 'same'
        if model_type == 'recog':
            for onnx_result, pytorch_result in zip(onnx_out, pytorch_out):
                if onnx_result['text'] != pytorch_result[
                        'text'] or not np.allclose(
                            np.array(onnx_result['score']),
                            np.array(pytorch_result['score']),
                            rtol=1e-4,
                            atol=1e-4):
                    same_diff = 'different'
                    break
        else:
            for onnx_result, pytorch_result in zip(
                    onnx_out[0]['boundary_result'],
                    pytorch_out[0]['boundary_result']):
                if not np.allclose(np.array(onnx_result),
                                   np.array(pytorch_result),
                                   rtol=1e-4,
                                   atol=1e-4):
                    same_diff = 'different'
                    break
        print('The outputs are {} between Pytorch and ONNX'.format(same_diff))

        if show:
            onnx_img = onnx_model.show_result(img_path,
                                              onnx_out[0],
                                              out_file='onnx.jpg',
                                              show=False)
            pytorch_img = model.show_result(img_path,
                                            pytorch_out[0],
                                            out_file='pytorch.jpg',
                                            show=False)
            if onnx_img is None:
                onnx_img = cv2.imread(img_path)
            if pytorch_img is None:
                pytorch_img = cv2.imread(img_path)

            cv2.imshow('PyTorch', pytorch_img)
            cv2.imshow('ONNXRuntime', onnx_img)
            cv2.waitKey()
    return
Esempio n. 40
0
        import urllib
        urllib.urlretrieve(url, path)

######################################################################
# Load pretrained ONNX model
# ---------------------------------------------
# The example super resolution model used here is exactly the same model in onnx tutorial
# http://pytorch.org/tutorials/advanced/super_resolution_with_caffe2.html
# we skip the pytorch model construction part, and download the saved onnx model
model_url = ''.join(['https://gist.github.com/zhreshold/',
                     'bcda4716699ac97ea44f791c24310193/raw/',
                     '93672b029103648953c4e5ad3ac3aadf346a4cdc/',
                     'super_resolution_0.2.onnx'])
download(model_url, 'super_resolution.onnx', True)
# now you have super_resolution.onnx on disk
onnx_graph = onnx.load('super_resolution.onnx')
# we can load the graph as NNVM compatible model
sym, params = nnvm.frontend.from_onnx(onnx_graph)

######################################################################
# Load a test image
# ---------------------------------------------
# A single cat dominates the examples!
from PIL import Image
img_url = 'https://github.com/dmlc/mxnet.js/blob/master/data/cat.png?raw=true'
download(img_url, 'cat.png')
img = Image.open('cat.png').resize((224, 224))
img_ycbcr = img.convert("YCbCr")  # convert to YCbCr
img_y, img_cb, img_cr = img_ycbcr.split()
x = np.array(img_y)[np.newaxis, np.newaxis, :, :]
Esempio n. 41
0
import onnx
from onnxsim import simplify
input_path= '/data/ai/yolov5/pet2kyolov5n.onnx'
output_path='/data/ai/yolov5/pet2kyolov5n_simpy.onnx'

onnx_model = onnx.load(input_path)  # load onnx model
model_simp, check = simplify(onnx_model)
assert check, "Simplified ONNX model could not be validated"
onnx.save(model_simp, output_path)
print('finished exporting onnx')
Esempio n. 42
0
import numpy as np
import onnx
import onnxruntime

from tvm import relay
import tvm

onnx_model = onnx.load('05_dense_b.onnx')

input_name1 = 'X'
input_shape1 = (1, 1)
shape_dict = {input_name1: input_shape1}

mod, params = relay.frontend.from_onnx(model=onnx_model, shape=shape_dict)

# Compilation
opt_level = 3
target = 'llvm'
with relay.build_config(opt_level=opt_level):
    graph, lib, params = relay.build_module.build(mod, target, params=params)

#printing some LLVM code
out_file = open("05_dense_b.ll", "w")
out_file.write(lib.get_source())
out_file.close()
Esempio n. 43
0
                                                  bias=bias,
                                                  dilation=dilation,
                                                  groups=groups)
                                model.eval()

                                input_np = np.random.uniform(
                                    0, 1, (1, 3, dimension, 224, 224))  # NCDHW
                                input_var = Variable(
                                    torch.FloatTensor(input_np))

                                torch.onnx.export(model,
                                                  input_var,
                                                  "_tmpnet.onnx",
                                                  verbose=True,
                                                  input_names=['test_in'],
                                                  output_names=['test_out'])

                                onnx_model = onnx.load('_tmpnet.onnx')
                                k_model = onnx_to_keras(
                                    onnx_model, ['test_in'])

                                error = check_torch_keras_error(
                                    model, k_model, input_np)

                                print('Error:', error)

                            if max_error < error:
                                max_error = error

    print('Max error: {0}'.format(max_error))
Esempio n. 44
0
            input_names=['images'],
            dynamic_axes={
                'images': {
                    0: 'batch',
                    2: 'height',
                    3: 'width'
                },  # size(1,3,640,640)
                'output': {
                    0: 'batch',
                    2: 'y',
                    3: 'x'
                }
            } if opt.dynamic else None)

        # Checks
        model_onnx = onnx.load(f)  # load onnx model
        onnx.checker.check_model(model_onnx)  # check onnx model
        # print(onnx.helper.printable_graph(model_onnx.graph))  # print

        # Simplify
        if opt.simplify:
            try:
                check_requirements(['onnx-simplifier'])
                import onnxsim

                print(
                    f'{prefix} simplifying with onnx-simplifier {onnxsim.__version__}...'
                )
                model_onnx, check = onnxsim.simplify(
                    model_onnx,
                    dynamic_input_shape=opt.dynamic,
Esempio n. 45
0
# can be expressed using Gemm alternatively
node = oh.make_node('MatMul', ["pdf","cdfmat"], ["cdf"])

# ArgMax(Clip(cdf + (1 - rand), max=1))
# node_rand_sub = oh.make_node('Sub', ["one","r"], ["rr"])
add_node = oh.make_node('Add', ["cdf", "r"], ["temp"], broadcast=1)
clip_node = oh.make_node('Clip', ["temp"], ["cdf_clipped"], min=0.0, max=1.0)
topk_node = oh.make_node('ArgMax', ["cdf_clipped"], ["chosen_action"], axis=1, keepdims=0)

graph = oh.make_graph([one, one_int, pdf_temp, pdf_temp_2,
                       max_score, normalized_scores, # top_action_1, # top_action_2,
                       # one_hot_top_action_int, one_hot_top_action_float, 
                       one_hot_top_action,
                       exploit_prob, exploit_top_action,
                       pdf, # node_rand_sub, 
                       node, node_rand, add_node, clip_node, topk_node], 
                    'compute_graph', input_tensors, output_tensors, initializer_tensors)
model = oh.make_model(graph, producer_name='explore')

f = open("model1.onnx", "wb")
f.write(model.SerializeToString())
f.close()

tf_model = onnx.load('model1.onnx')

tf_rep = prepare(tf_model)

sample = tf_rep.run(np.asarray([[.3,.3,.4]]))
print(sample)
Esempio n. 46
0
import onnx

# Load the ONNX model
model = onnx.load('./output/hopenet.pb')

# Check that the IR is well formed
onnx.checker.check_model(model)

# Print a human readable representation of the graph
print(onnx.helper.printable_graph(model.graph))
Esempio n. 47
0

if len(sys.argv) != 4:
    raise RuntimeError('Usage: %s input_dir output_dir ratio' % sys.argv[0])


input_dir = sys.argv[1]
output_dir = sys.argv[2]
ratio = float(sys.argv[3])

os.makedirs(output_dir, exist_ok=True)
shutil.rmtree(os.path.join(output_dir, 'test_data_set_0'), ignore_errors=True)
shutil.copytree(os.path.join(input_dir, 'test_data_set_0'),
                os.path.join(output_dir, 'test_data_set_0'))

model = onnx.load(os.path.join(input_dir, 'model.onnx'))
initializer_names = {i.name for i in model.graph.initializer}

inputs = []
for input in model.graph.input:
    if input.name not in initializer_names:
        inputs.append(input)

assert len(inputs) == 1

input = inputs[0]
old_shape = []
new_shape = []
for i in range(len(input.type.tensor_type.shape.dim)):
    nd = input.type.tensor_type.shape.dim[i].dim_value
    old_shape.append(nd)
Esempio n. 48
0
def simplify(model: Union[str, onnx.ModelProto],
             inputs: Sequence[Dict[str, np.ndarray]] = None,
             output_file: str = None,
             perform_optimization: bool = True,
             skip_fuse_bn: bool = False,
             skip_shape_inference: bool = True,
             input_shapes: Dict[str, Sequence[int]] = None,
             skipped_optimizers: Sequence[str] = None) -> onnx.ModelProto:
    """Simplify and optimize an onnx model.

    For models from detection and segmentation, it is strongly suggested to
    input multiple input images for verification.

    Arguments:
        model (str or onnx.ModelProto): path of model or loaded model object.
        inputs (optional, Sequence[Dict[str, np.ndarray]]): inputs of model.
        output_file (optional, str): output file to save simplified model.
        perform_optimization (optional, bool): whether to perform optimization.
        skip_fuse_bn (optional, bool): whether to skip fusing bn layer.
        skip_shape_inference (optional, bool): whether to skip shape inference.
        input_shapes (optional, Dict[str, Sequence[int]]):
            the shapes of model inputs.
        skipped_optimizers (optional, Sequence[str]):
            the names of optimizer to be skipped.

    Returns:
        onnx.ModelProto: simplified and optimized onnx model.

    Example:
        >>> import onnx
        >>> import numpy as np
        >>>
        >>> from mmcv.onnx import simplify
        >>>
        >>> dummy_input = np.random.randn(1, 3, 224, 224).astype(np.float32)
        >>> input = {'input':dummy_input}
        >>> input_file = 'sample.onnx'
        >>> output_file = 'slim.onnx'
        >>> model = simplify(input_file, [input], output_file)
    """
    if input_shapes is None:
        input_shapes = {}
    if isinstance(model, str):
        model = onnx.load(model)
    # rename op with numeric name for issue
    # https://github.com/onnx/onnx/issues/2613
    model = add_suffix2name(model)
    onnx.checker.check_model(model)
    model_ori = copy.deepcopy(model)
    numel_node_ori = len(model_ori.graph.node)
    if not skip_shape_inference:
        model = onnx.shape_inference.infer_shapes(model)

    input_shapes = check_and_update_input_shapes(model, input_shapes)

    if perform_optimization:
        model = optimize(model, skip_fuse_bn, skipped_optimizers)

    const_nodes = get_constant_nodes(model)
    feed_inputs = None if inputs is None else inputs[0]
    res = forward_for_node_outputs(model,
                                   const_nodes,
                                   input_shapes=input_shapes,
                                   inputs=feed_inputs)
    const_nodes = clean_constant_nodes(const_nodes, res)
    model = eliminate_const_nodes(model, const_nodes, res)
    onnx.checker.check_model(model)

    if perform_optimization:
        model = optimize(model, skip_fuse_bn, skipped_optimizers)

    check_ok = check(model_ori,
                     model,
                     input_shapes=input_shapes,
                     inputs=inputs)

    assert check_ok, 'Check failed for the simplified model!'
    numel_node_slim = len(model.graph.node)
    print('Number of nodes: {numel_node_ori} -> {numel_node_slim}')

    if output_file is not None:
        save_dir, _ = os.path.split(output_file)
        if save_dir:
            os.makedirs(save_dir, exist_ok=True)
        onnx.save(model, output_file)
    return model
Esempio n. 49
0
    # the sequence. the second is just the most recent hidden state
    # (compare the last slice of "out" with "hidden" below, they are the same)
    # The reason for this is that:
    # "out" will give you access to all hidden states in the sequence
    # "hidden" will allow you to continue the sequence and backpropagate,
    # by passing it as an argument  to the lstm at a later time
    # Add the extra 2nd dimension
    out, hidden = lstm(TEST_INPUTS, (TEST_INITIAL_H, TEST_INITIAL_C))
    #out, hidden = lstm(TEST_INPUTS, (TEST_INITIAL_H_2, TEST_INITIAL_C_2))
    print(out)
    print(hidden)

    torch.onnx.export(lstm, (TEST_INPUTS, (TEST_INITIAL_H, TEST_INITIAL_C)),
                      "lstm.onnx",
                      verbose=False)

else:
    # read the model from lstm.onnx with onnx-tensorflow
    model = onnx.load("lstm.onnx")

    tf_rep = prepare(model)

    import tensorflow as tf

    print(
        tf_rep.run({
            "0": TEST_INPUTS,
            "1": TEST_INITIAL_H,
            "2": TEST_INITIAL_C
        }))
Esempio n. 50
0
def convertAndSave(path, outputPath):
    o = onnx.load(path)
    onnxInputLayer, = set(map(lambda x: x.name, o.graph.input)) - set(map(lambda x: x.name, o.graph.initializer))

    # Parse layers
    layers = lbann.onnx.o2l.onnxToLbannLayers(
        o,
        [dataLayerName, labelLayerName],
        {dataLayerName: onnxInputLayer},
    )

    # Add a softmax layer
    outputLayerName = layers[-1].name
    probLayerName = outputLayerName
    # layers.append(lbann_pb2.Layer(name=probLayerName,
    #                               parents=lbann.onnx.util.list2LbannList([outputLayerName]),
    #                               data_layout="data_parallel",
    #                               softmax=lbann_pb2.Softmax()))

    # Add metric layers
    for name, dic in [(crossEntropyLayerName, {"cross_entropy": lbann_pb2.CrossEntropy()}),
                      (top1AccuracyLayerName, {"categorical_accuracy": lbann_pb2.CategoricalAccuracy()}),
                      (top5AccuracyLayerName, {"top_k_categorical_accuracy": lbann_pb2.TopKCategoricalAccuracy(k=5)})]:
        layers.append(lbann_pb2.Layer(name=name,
                                      parents=lbann.onnx.util.list2LbannList([probLayerName, labelLayerName]),
                                      data_layout="data_parallel",
                                      **dic))

    # Define an objective function
    objective = lbann_pb2.ObjectiveFunction(
        layer_term = [lbann_pb2.LayerTerm(layer=crossEntropyLayerName)],
        l2_weight_regularization = [lbann_pb2.L2WeightRegularization(scale_factor=1e-4)]
    )

    # Add metrics
    metrics = []
    for name, layer, unit in [("categorical accuracy", top1AccuracyLayerName, "%"),
                              ("top-5 categorical accuracy", top5AccuracyLayerName, "%")]:
        metrics.append(lbann_pb2.Metric(layer_metric=lbann_pb2.LayerMetric(name=name,
                                                                           layer=layer,
                                                                           unit=unit)))

    # Add callbacks
    callbacks = []
    for dic in [{"print": lbann_pb2.CallbackPrint()},
                {"timer": lbann_pb2.CallbackTimer()},
                {"imcomm": lbann_pb2.CallbackImComm(intermodel_comm_method="normal", all_optimizers=True)}]:
        callbacks.append(lbann_pb2.Callback(**dic))

    model = lbann_pb2.Model(
        data_layout = "data_parallel",
        mini_batch_size = 256,
        block_size = 256,
        num_epochs = 10,
        num_parallel_readers = 0,
        procs_per_model = 0,

        objective_function = objective,
        metric = metrics,
        callback = callbacks,
        layer = layers
    )

    with open(outputPath, "w") as f:
        f.write(txtf.MessageToString(lbann_pb2.LbannPB(model=model)))
Esempio n. 51
0
def detect(save_txt=False, save_img=False):
    # (320, 192) or (416, 256) or (608, 352) for (height, width)
    # img_size = (320, 192) if ONNX_EXPORT else opt.img_size
    img_size = (352, 608)
    out, source, weights, half, view_img = opt.output, opt.source, opt.weights, opt.half, opt.view_img
    webcam = source == '0' or source.startswith('rtsp') or source.startswith('http') or source.endswith('.txt')

    # Initialize
    device = torch_utils.select_device(device='cpu' if ONNX_EXPORT else opt.device)
    if os.path.exists(out):
        shutil.rmtree(out)  # delete output folder
    os.makedirs(out)  # make new output folder

    # Initialize model
    model = Darknet(opt.cfg, img_size)

    # Load weights
    attempt_download(weights)
    if weights.endswith('.pt'):  # pytorch format
        model.load_state_dict(torch.load(weights, map_location=device)['model'])
    else:  # darknet format
        _ = load_darknet_weights(model, weights)

    # Second-stage classifier
    classify = False
    if classify:
        modelc = torch_utils.load_classifier(name='resnet101', n=2)  # initialize
        modelc.load_state_dict(torch.load('weights/resnet101.pt', map_location=device)['model'])  # load weights
        modelc.to(device).eval()

    # Fuse Conv2d + BatchNorm2d layers
    # model.fuse()

    # Eval mode
    model.to(device).eval()

    # Export mode
    if ONNX_EXPORT:
        img = torch.zeros((1, 3) + img_size)  # (1, 3, 320, 192)
        torch.onnx.export(model, img, 'weights/export.onnx', verbose=True, opset_version=11)

        # Validate exported model
        import onnx
        model = onnx.load('weights/export.onnx')  # Load the ONNX model
        onnx.checker.check_model(model)  # Check that the IR is well formed
        print(onnx.helper.printable_graph(model.graph))  # Print a human readable representation of the graph
        return

    # Half precision
    half = half and device.type != 'cpu'  # half precision only supported on CUDA
    if half:
        model.half()

    # Set Dataloader
    vid_path, vid_writer = None, None
    if webcam:
        view_img = True
        torch.backends.cudnn.benchmark = True  # set True to speed up constant image size inference
        dataset = LoadStreams(source, img_size=img_size, half=half)
    else:
        save_img = True
        dataset = LoadImages(source, img_size=img_size, half=half)

    # Get names and colors
    names = load_classes(opt.names)
    colors = [[random.randint(0, 255) for _ in range(3)] for _ in range(len(names))]

    # Run inference
    timeStr = None
    t0 = time.time()
    for path, img, im0s, vid_cap in dataset:
        t = time.time()

        match = re.match(".*T([^\\.]*)", path)
        if match is not None:
            groups = match.groups()
            if len(groups) > 0:
                timeStr = groups[0].replace("-", ":")

        # Get detections
        img = torch.from_numpy(img).to(device)
        if img.ndimension() == 3:
            img = img.unsqueeze(0)
        pred = model(img)[0]

        if opt.half:
            pred = pred.float()

        # Apply NMS
        pred = non_max_suppression(pred, opt.conf_thres, opt.nms_thres)

        # Apply
        if classify:
            pred = apply_classifier(pred, modelc, img, im0s)

        # Process detections
        for i, det in enumerate(pred):  # detections per image
            if webcam:  # batch_size >= 1
                p, s, im0 = path[i], '%g: ' % i, im0s[i]
            else:
                p, s, im0 = path, '', im0s

            save_path = str(Path(out) / Path(p).name)
            s += '%gx%g ' % img.shape[2:]  # print string
            people = {}
            if det is not None and len(det):
                # Rescale boxes from img_size to im0 size
                det[:, :4] = scale_coords(img.shape[2:], det[:, :4], im0.shape).round()

                # Print results
                for c in det[:, -1].unique():
                    n = (det[:, -1] == c).sum()  # detections per class
                    s += '%g %ss, ' % (n, names[int(c)])  # add to string

                # Write results
                for *xyxy, conf, cls in det:
                    if save_txt:  # Write to file
                        with open(save_path + '.txt', 'a') as file:
                            file.write(('%g ' * 6 + '\n') % (*xyxy, cls, conf))

                    if save_img or view_img:  # Add bbox to image
                        label = '%s %.2f' % (names[int(cls)], conf)
                        plot_one_box(xyxy, im0, label=label, color=colors[int(cls)])

                    if names[int(cls)] == "person":
                        for threshold in range(90, 0, -10):
                            if conf.item() * 100 > threshold:
                                people[str(threshold)] = people.get(str(threshold), 0) + 1

            line = f"{timeStr},"
            for threshold in range(90, 0, -10):
                line += f"{people.get(str(threshold), 0)},"
            file.write(f"{line}\n")
            file.flush()
            print('%sDone. (%.3fs)' % (s, time.time() - t))

            # Stream results
            if view_img:
                cv2.imshow(p, im0)
                if cv2.waitKey(1) == ord('q'):  # q to quit
                    raise StopIteration

            # Save results (image with detections)
            if save_img:
                if dataset.mode == 'images':
                    cv2.imwrite(save_path, im0)
                else:
                    if vid_path != save_path:  # new video
                        vid_path = save_path
                        if isinstance(vid_writer, cv2.VideoWriter):
                            vid_writer.release()  # release previous video writer

                        fps = vid_cap.get(cv2.CAP_PROP_FPS)
                        w = int(vid_cap.get(cv2.CAP_PROP_FRAME_WIDTH))
                        h = int(vid_cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
                        vid_writer = cv2.VideoWriter(save_path, cv2.VideoWriter_fourcc(*opt.fourcc), fps, (w, h))
                    vid_writer.write(im0)

    if save_txt or save_img:
        print('Results saved to %s' % os.getcwd() + os.sep + out)
        if platform == 'darwin':  # MacOS
            os.system('open ' + out + ' ' + save_path)

    print('Done. (%.3fs)' % (time.time() - t0))
Esempio n. 52
0
    def convert_onnx_model(model_path, onnx_file, model_name):
        """
        Util to convert onnx model to MXNet model
        :param model_name:
        :param model_path:
        :param onnx_file:
        :return:
        """
        try:
            import mxnet as mx
            from mxnet.contrib import onnx as onnx_mxnet
        except ImportError:
            raise ModelArchiverError(
                "MXNet package is not installed. Run command: pip install mxnet to install it."
            )

        try:
            import onnx
        except ImportError:
            raise ModelArchiverError(
                "Onnx package is not installed. Run command: pip install onnx to install it."
            )

        symbol_file = '%s-symbol.json' % model_name
        params_file = '%s-0000.params' % model_name
        signature_file = 'signature.json'
        # Find input symbol name and shape
        try:
            model_proto = onnx.load(os.path.join(model_path, onnx_file))
        except:
            logging.error(
                "Failed to load the %s model. Verify if the model file is valid",
                onnx_file)
            raise

        graph = model_proto.graph
        _params = set()
        for tensor_vals in graph.initializer:
            _params.add(tensor_vals.name)

        input_data = []
        for graph_input in graph.input:
            shape = []
            if graph_input.name not in _params:
                for val in graph_input.type.tensor_type.shape.dim:
                    shape.append(val.dim_value)
                input_data.append((graph_input.name, tuple(shape)))

        try:
            sym, arg_params, aux_params = onnx_mxnet.import_model(
                os.path.join(model_path, onnx_file))
            # UNION of argument and auxillary parameters
            params = dict(arg_params, **aux_params)
        except:
            logging.error(
                "Failed to import %s file to onnx. Verify if the model file is valid",
                onnx_file)
            raise

        try:
            # rewrite input data_name correctly
            with open(os.path.join(model_path, signature_file), 'r') as f:
                data = json.loads(f.read())
                data['inputs'][0]['data_name'] = input_data[0][0]
                data['inputs'][0]['data_shape'] = [
                    int(i) for i in input_data[0][1]
                ]
            with open(os.path.join(model_path, signature_file), 'w') as f:
                f.write(json.dumps(data, indent=2))

            with open(os.path.join(model_path, symbol_file), 'w') as f:
                f.write(sym.tojson())
        except:
            logging.error(
                "Failed to write the signature or symbol files for %s model",
                onnx_file)
            raise

        save_dict = {('arg:%s' % k): v.as_in_context(mx.cpu())
                     for k, v in params.items()}
        mx.nd.save(os.path.join(model_path, params_file), save_dict)
        return symbol_file, params_file
Esempio n. 53
0
import sys
import onnx
import onnx.shape_inference

if len(sys.argv) != 2:
    raise RuntimeError('Usage: %s input.onnx' % sys.argv[0])

model = onnx.load(sys.argv[1], 'rb')
model = onnx.shape_inference.infer_shapes(model)

value_infos = {}
for vi in model.graph.input:
    value_infos[vi.name] = vi
for vi in model.graph.output:
    value_infos[vi.name] = vi
for vi in model.graph.value_info:
    value_infos[vi.name] = vi

total_kflops = 0

for node in model.graph.node:
    if node.op_type != 'Conv':
        continue
    if (node.input[0] in value_infos and
        node.input[1] in value_infos and
        node.output[0] in value_infos):
        in_shape = value_infos[node.input[0]].type.tensor_type.shape
        w_shape = value_infos[node.input[1]].type.tensor_type.shape
        out_shape = value_infos[node.output[0]].type.tensor_type.shape

        kvs = [
Esempio n. 54
0
def create_test_dir(model_path,
                    root_path,
                    test_name,
                    name_input_map=None,
                    symbolic_dim_values_map=None,
                    name_output_map=None):
    """
    Create a test directory that can be used with onnx_test_runner or onnxruntime_perf_test.
    Generates random input data for any missing inputs.
    Saves output from running the model if name_output_map is not provided.

    :param model_path: Path to the onnx model file to use.
    :param root_path: Root path to create the test directory in.
    :param test_name: Name for test. Will be added to the root_path to create the test directory name.
    :param name_input_map: Map of input names to numpy ndarray data for each input.
    :param symbolic_dim_values_map: Map of symbolic dimension names to values to use for the input data if creating
                                    using random data.
    :param name_output_map: Optional map of output names to numpy ndarray expected output data.
                            If not provided, the model will be run with the input to generate output data to save.
    :return: None
    """

    model_path = os.path.abspath(model_path)
    root_path = os.path.abspath(root_path)
    test_dir = os.path.join(root_path, test_name)
    if not os.path.exists(test_dir):
        os.makedirs(test_dir)

    # add to existing test data sets if present
    test_num = 0
    while True:
        test_data_dir = os.path.join(test_dir,
                                     "test_data_set_" + str(test_num))
        if not os.path.exists(test_data_dir):
            os.mkdir(test_data_dir)
            break

        test_num += 1

    model_filename = os.path.split(model_path)[-1]
    test_model_filename = os.path.join(test_dir, model_filename)
    shutil.copy(model_path, test_model_filename)

    model = onnx.load(model_path)
    model_inputs = model.graph.input
    model_outputs = model.graph.output

    def save_data(prefix, name_data_map, model_info):
        idx = 0
        for name, data in name_data_map.items():
            if isinstance(data, dict):
                # ignore. map<T1, T2> from traditional ML ops
                pass
            elif isinstance(data, list):
                # ignore. vector<map<T1,T2>> from traditional ML ops. e.g. ZipMap output
                pass
            else:
                np_type = _get_numpy_type(model_info, name)
                tensor = numpy_helper.from_array(data.astype(np_type), name)
                filename = os.path.join(test_data_dir,
                                        "{}_{}.pb".format(prefix, idx))
                with open(filename, "wb") as f:
                    f.write(tensor.SerializeToString())

            idx += 1

    if not name_input_map:
        name_input_map = {}

    if not symbolic_dim_values_map:
        symbolic_dim_values_map = {}
    initializer_set = set()
    for initializer in onnx.load(model_path).graph.initializer:
        initializer_set.add(initializer.name)
    _create_missing_input_data(model_inputs, name_input_map,
                               symbolic_dim_values_map, initializer_set)
    save_data("input", name_input_map, model_inputs)

    # save expected output data if provided. run model to create if not.
    if not name_output_map:
        output_names = [o.name for o in model_outputs]
        sess = ort.InferenceSession(test_model_filename)
        outputs = sess.run(output_names, name_input_map)
        name_output_map = {}
        for name, data in zip(output_names, outputs):
            name_output_map[name] = data

    save_data("output", name_output_map, model_outputs)
Esempio n. 55
0
import tvm.relay as relay
from tvm.contrib.download import download_testdata

######################################################################
# Load pretrained ONNX model
# ---------------------------------------------
# The example super resolution model used here is exactly the same model in onnx tutorial
# http://pytorch.org/tutorials/advanced/super_resolution_with_caffe2.html
# we skip the pytorch model construction part, and download the saved onnx model
model_url = ''.join(['https://gist.github.com/zhreshold/',
                     'bcda4716699ac97ea44f791c24310193/raw/',
                     '93672b029103648953c4e5ad3ac3aadf346a4cdc/',
                     'super_resolution_0.2.onnx'])
model_path = download_testdata(model_url, 'super_resolution.onnx', module='onnx')
# now you have super_resolution.onnx on disk
onnx_model = onnx.load(model_path)

######################################################################
# Load a test image
# ---------------------------------------------
# A single cat dominates the examples!
from PIL import Image
img_url = 'https://github.com/dmlc/mxnet.js/blob/master/data/cat.png?raw=true'
img_path = download_testdata(img_url, 'cat.png', module='data')
img = Image.open(img_path).resize((224, 224))
img_ycbcr = img.convert("YCbCr")  # convert to YCbCr
img_y, img_cb, img_cr = img_ycbcr.split()
x = np.array(img_y)[np.newaxis, np.newaxis, :, :]

######################################################################
# Compile the model with relay
import onnxruntime
from onnxsim import simplify
import argparse
import os
from pprint import pprint

parser = argparse.ArgumentParser()
parser.add_argument("--model", type=str, required=True)
parser.add_argument("--height", type=int, required=True)
parser.add_argument("--width", type=int, required=True)
args = parser.parse_args()

H = args.height
W = args.width
MODEL = args.model
model = onnx.load(f'{MODEL}')

onnx_session = onnxruntime.InferenceSession(f'{MODEL}')
inputs = {}

for i in onnx_session.get_inputs():
    inputs[i.name] = [i.shape[0], i.shape[1], H, W]

print('@@@@@@@@@@@@@@@@@@@@@ inputs')
pprint(inputs)

model_simp, check = simplify(model, input_shapes=inputs)
basename_without_ext = \
    os.path.splitext(os.path.basename(MODEL))[0].split('_')[0] \
    + os.path.splitext(os.path.basename(MODEL))[0].split('_')[1] \
    + os.path.splitext(os.path.basename(MODEL))[0].split('_')[2]
Esempio n. 57
0
        import onnx

        print('\nStarting ONNX export with onnx %s...' % onnx.__version__)
        f = opt.weights.replace('.pt', '.onnx')  # filename
        model.fuse()  # only for ONNX
        torch.onnx.export(
            model,
            img,
            f,
            verbose=False,
            opset_version=12,
            input_names=['images'],
            output_names=['classes', 'boxes'] if y is None else ['output'])

        # Checks
        onnx_model = onnx.load(f)  # load onnx model
        onnx.checker.check_model(onnx_model)  # check onnx model
        print(onnx.helper.printable_graph(
            onnx_model.graph))  # print a human readable model
        print('ONNX export success, saved as %s' % f)
    except Exception as e:
        print('ONNX export failure: %s' % e)

    # CoreML export
    try:
        import coremltools as ct

        print('\nStarting CoreML export with coremltools %s...' %
              ct.__version__)
        # convert model from torchscript and apply pixel scaling as per detect.py
        model = ct.convert(ts,
Esempio n. 58
0
def detect(data_dict,
           device='',
           checkpoint_path='checkpoint.pt',
           img_size=320,
           out='output',
           cfg_path='cfg/yolov3-custom.cfg',
           half=True,
           view_img=False,
           save_txt=False,
           save_img=True,
           conf_thres=0.3,
           nms_thres=0.5):
    img_size = (
        320, 192
    ) if ONNX_EXPORT else img_size  # (320, 192) or (416, 256) or (608, 352) for (height, width)
    this_path = os.path.join(os.getcwd(), 'zoo/yolov3')
    config_path = os.path.join(this_path, cfg_path)
    source = data_dict['predict_on']
    # Initialize
    device = torch_utils.select_device(device='cpu' if ONNX_EXPORT else device)
    if os.path.exists(out):
        shutil.rmtree(out)  # delete output folder
    os.makedirs(out)  # make new output folder

    # Initialize model
    model = Darknet(config_path, img_size)

    # Load checkpoint_path
    checkpoint = torch.load(checkpoint_path, map_location=device)
    model.load_state_dict(checkpoint['model'])

    # Eval mode
    model.to(device).eval()

    # Export mode
    if ONNX_EXPORT:
        img = torch.zeros((1, 3) + img_size)  # (1, 3, 320, 192)
        torch.onnx.export(model,
                          img,
                          'checkpoint_path/export.onnx',
                          verbose=False,
                          opset_version=11)

        # Validate exported model
        import onnx
        model = onnx.load('checkpoint_path/export.onnx')  # Load the ONNX model
        onnx.checker.check_model(model)  # Check that the IR is well formed
        print(onnx.helper.printable_graph(
            model.graph))  # Print a human readable representation of the graph
        return

    # Half precision
    half = half and device.type != 'cpu'  # half precision only supported on CUDA
    if half:
        model.half()

    # Set Dataloader
    vid_path, vid_writer = None, None
    dataset = LoadImages(source, img_size=img_size, half=half)

    # Get classes and colors
    classes = load_classes(data_dict['names'])
    colors = [[random.randint(0, 255) for _ in range(3)]
              for _ in range(len(classes))]

    # Run inference
    t0 = time.time()
    for path, img, im0s, vid_cap in dataset:
        t = time.time()

        # Get detections
        img = torch.from_numpy(img).to(device)
        if img.ndimension() == 3:
            img = img.unsqueeze(0)
        pred = model(img)[0]

        if half:
            pred = pred.float()

        # Apply NMS
        pred = non_max_suppression(pred, conf_thres, nms_thres)

        # Process detections
        for i, det in enumerate(pred):  # detections per image
            p, s, im0 = path, '', im0s

            save_path = str(Path(out) / Path(p).name)
            s += '%gx%g ' % img.shape[2:]  # print string
            if det is not None and len(det):
                # Rescale boxes from img_size to im0 size
                det[:, :4] = scale_coords(img.shape[2:], det[:, :4],
                                          im0.shape).round()

                # Print results
                for c in det[:, -1].unique():
                    n = (det[:, -1] == c).sum()  # detections per class
                    s += '%g %ss, ' % (n, classes[int(c)])  # add to string

                # Write results
                for *xyxy, conf, _, cls in det:
                    if save_txt:  # Write to file
                        with open(save_path + '.txt', 'a') as file:
                            file.write(('%g ' * 6 + '\n') % (*xyxy, cls, conf))

                    if save_img or view_img:  # Add bbox to image
                        label = '%s %.2f' % (classes[int(cls)], conf)
                        plot_one_box(xyxy,
                                     im0,
                                     label=label,
                                     color=colors[int(cls)])

            print('%sDone. (%.3fs)' % (s, time.time() - t))

            # Stream results
            if view_img:
                cv2.imshow(p, im0)

            # Save results (image with detections)
            if save_img:
                if dataset.mode == 'images':
                    cv2.imwrite(save_path, im0)

    if save_txt or save_img:
        print('Results saved to %s' % out)
        if platform == 'darwin':  # MacOS
            os.system('open ' + out + ' ' + save_path)

    print('Done. (%.3fs)' % (time.time() - t0))