def testRunModelProto(self):
        name = datasets.get_example("logreg_iris.onnx")
        model = load(name)

        rep = backend.prepare(model)
        x = np.array([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]], dtype=np.float32)
        res = rep.run(x)
        output_expected = np.array([0, 0, 0], dtype=np.float32)
        np.testing.assert_allclose(output_expected,
                                   res[0],
                                   rtol=1e-05,
                                   atol=1e-08)
        output_expected = [{
            0: 0.950599730014801,
            1: 0.027834169566631317,
            2: 0.02156602405011654
        }, {
            0: 0.9974970817565918,
            1: 5.6299926654901356e-05,
            2: 0.0024466661270707846
        }, {
            0: 0.9997311234474182,
            1: 1.1918064757310276e-07,
            2: 0.00026869276189245284
        }]
        self.assertEqual(output_expected, res[1])
예제 #2
0
    def testRunModelProtoApi(self):
        name = datasets.get_example("logreg_iris.onnx")
        model = load(name)

        inputs = np.array([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]],
                          dtype=np.float32)
        outputs = ort_backend.run_model(model, inputs)

        output_expected = np.array([0, 0, 0], dtype=np.float32)
        np.testing.assert_allclose(output_expected,
                                   outputs[0],
                                   rtol=1e-05,
                                   atol=1e-08)
        output_expected = [{
            0: 0.950599730014801,
            1: 0.027834169566631317,
            2: 0.02156602405011654
        }, {
            0: 0.9974970817565918,
            1: 5.6299926654901356e-05,
            2: 0.0024466661270707846
        }, {
            0: 0.9997311234474182,
            1: 1.1918064757310276e-07,
            2: 0.00026869276189245284
        }]

        check_list_of_map_to_float(self, output_expected, outputs[1])
예제 #3
0
def example_test_with_ort():  # type: () -> None
    import onnx
    import numpy  # type: ignore
    import onnxruntime as rt  # type: ignore
    from onnxruntime.datasets import get_example  # type: ignore
    import numpy.random  # type: ignore

    # get certain example model from ORT
    example1 = get_example("sigmoid.onnx")

    # test ONNX functions
    model = onnx.load(example1)
    onnx.checker.check_model(model)
    onnx.checker.check_model(model, True)
    inferred_model = onnx.shape_inference.infer_shapes(model, True)
    temp_filename = "temp.onnx"
    onnx.save(inferred_model, temp_filename)

    # test ONNXRuntime functions
    sess = rt.InferenceSession(temp_filename)
    input_name = sess.get_inputs()[0].name
    output_name = sess.get_outputs()[0].name
    x = numpy.random.random((3, 4, 5))
    x = x.astype(numpy.float32)

    sess.run([output_name], {input_name: x})
예제 #4
0
def testonnx(onnxmodelpath, dummy_input):
    example_model = get_example(onnxmodelpath)
    session = onnxruntime.InferenceSession(example_model)
    # get the name of the first input of the model
    input_name = session.get_inputs()[0].name
    print('Input Name:', input_name)
    result = session.run([], {input_name: dummy_input.data.numpy()})

    return result
def compare(args):
    # Load torch model
    onnx_path = os.path.join(os.getcwd(), 'checkpoints/conv_tasnet_v2.onnx')
    model = ConvTasNet.load_model(args.model_path)
    model.eval()

    # input data
    dummy_input = torch.rand(1, 88200)

    # Forward
    estimate_source = model(dummy_input)  # [B, C, T]

    # load onnx model
    example_model = get_example(onnx_path)
    session = onnxruntime.InferenceSession(example_model)
    session.get_modelmeta()
    input_name = session.get_inputs()[0].name
    onnx_out = session.run(None, {input_name: to_numpy(dummy_input)})

    estimate_source = to_numpy(estimate_source).tolist()
    print("estimate_source", estimate_source)
    print("onnx_out", onnx_out)
예제 #6
0
def torch2onnx(args, model, dummy_input):
    input_names = ['input']  #模型输入的name
    output_names = ['output']  #模型输出的name
    # return
    torch_out = torch.onnx._export(model,
                                   dummy_input,
                                   os.path.join(args.save_model_path,
                                                "pnet.onnx"),
                                   verbose=True,
                                   input_names=input_names,
                                   output_names=output_names)
    # test onnx model
    example_model = get_example(os.path.join(args.save_model_path,
                                             "pnet.onnx"))
    session = onnxruntime.InferenceSession(example_model)
    # get the name of the first input of the model
    input_name = session.get_inputs()[0].name
    print('Input Name:', input_name)
    result = session.run([], {input_name: dummy_input.data.numpy()})
    # np.testing.assert_almost_equal(
    #     torch_out.data.cpu().numpy(), result[0], decimal=3)
    print("the result is {}".format(result))
예제 #7
0
 def get_name(self, name):
     return get_example(name)
#!/usr/bin/env python3

import onnxruntime
import numpy as np
from onnxruntime.datasets import get_example

example_model = get_example("sigmoid.onnx")
sess = onnxruntime.InferenceSession(example_model)

print(sess.get_providers())
sess.set_providers(['CUDAExecutionProvider'])
# session.set_providers(['CPUExecutionProvider'])

# identify our input name and shape
input_name = sess.get_inputs()[0].name
print("Input name  :", input_name)
input_shape = sess.get_inputs()[0].shape
print("Input shape :", input_shape)
input_type = sess.get_inputs()[0].type
print("Input type  :", input_type, "\n")

# identify our output name and shape
output_name = sess.get_outputs()[0].name
print("Output name  :", output_name)
output_shape = sess.get_outputs()[0].shape
print("Output shape :", output_shape)
output_type = sess.get_outputs()[0].type
print("Output type  :", output_type, "\n")

# pass in some input and compute our predictions
x = np.random.random(input_shape)
예제 #9
0
Profile the execution of a simple model
=======================================

*ONNX Runtime* can profile the execution of the model.
This example shows how to interpret the results.
"""

import onnxruntime as rt
import numpy
from onnxruntime.datasets import get_example

#########################
# Let's load a very simple model and compute some prediction.

example1 = get_example("mul_1.pb")
sess = rt.InferenceSession(example1)
input_name = sess.get_inputs()[0].name

x = numpy.array([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]], dtype=numpy.float32)
res = sess.run(None, {input_name: x})
print(res)

#########################
# We need to enable to profiling
# before running the predictions.

options = rt.SessionOptions()
options.enable_profiling = True
sess_profile = rt.InferenceSession(example1, options)
input_name = sess.get_inputs()[0].name
예제 #10
0
Let's use the API to compute the prediction
of a simple logistic regression model.
"""
import numpy as np
from onnxruntime import datasets
from onnxruntime.capi.onnxruntime_pybind11_state import InvalidArgument
import onnxruntime.backend as backend
from onnx import load

########################################
# The device depends on how the package was compiled,
# GPU or CPU.
from onnxruntime import get_device
device = get_device()

name = datasets.get_example("logreg_iris.onnx")
model = load(name)

rep = backend.prepare(model, device)
x = np.array([[-1.0, -2.0]], dtype=np.float32)
try:
    label, proba = rep.run(x)
    print("label={}".format(label))
    print("probabilities={}".format(proba))
except (RuntimeError, InvalidArgument) as e:
    print(e)

########################################
# The backend can also directly load the model
# without using *onnx*.
예제 #11
0

def change_ir_version(filename, ir_version=6):
    "onnxruntime==1.2.0 does not support opset <= 7 and ir_version > 6"
    with open(filename, "rb") as f:
        model = onnx.load(f)
    model.ir_version = 6
    if model.opset_import[0].version <= 7:
        model.opset_import[0].version = 11
    return model


#########################
# Let's load a very simple model and compute some prediction.

example1 = get_example("mul_1.onnx")
onnx_model = change_ir_version(example1)
onnx_model_str = onnx_model.SerializeToString()
sess = rt.InferenceSession(onnx_model_str)
input_name = sess.get_inputs()[0].name

x = numpy.array([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]], dtype=numpy.float32)
res = sess.run(None, {input_name: x})
print(res)

#########################
# We need to enable to profiling
# before running the predictions.

options = rt.SessionOptions()
options.enable_profiling = True
==========================================================

This example demonstrates how to load a model and compute
the output for an input vector. It also shows how to
retrieve the definition of its inputs and outputs.
"""

import onnxruntime as rt
import numpy
from onnxruntime.datasets import get_example

#########################
# Let's load a very simple model.
# The model is available on github `onnx...test_sigmoid <https://github.com/onnx/onnx/tree/master/onnx/backend/test/data/node/test_sigmoid>`_.

example1 = get_example("sigmoid.onnx")
sess = rt.InferenceSession(example1)

#########################
# Let's see the input name and shape.

input_name = sess.get_inputs()[0].name
print("input name", input_name)
input_shape = sess.get_inputs()[0].shape
print("input shape", input_shape)
input_type = sess.get_inputs()[0].type
print("input type", input_type)

#########################
# Let's see the output name and shape.
예제 #13
0
# Licensed under the MIT License.
"""
Metadata
========

ONNX format contains metadata related to how the
model was produced. It is useful when the model
is deployed to production to keep track of which
instance was used at a specific time.
Let's see how to do that with a simple 
logistic regression model trained with
*scikit-learn* and converted with *onnxmltools*.
"""

from onnxruntime.datasets import get_example
example = get_example("logreg_iris.onnx")

import onnx
model = onnx.load(example)

print("doc_string={}".format(model.doc_string))
print("domain={}".format(model.domain))
print("ir_version={}".format(model.ir_version))
print("metadata_props={}".format(model.metadata_props))
print("model_version={}".format(model.model_version))
print("producer_name={}".format(model.producer_name))
print("producer_version={}".format(model.producer_version))

#############################
# With *ONNX Runtime*:
예제 #14
0
model.eval()

x = torch.randn(1, 3, 224, 224).cuda()
export_onnx_file = "models/model_pruned_0.5.onnx"
torch.onnx.export(model,
                  x,
                  export_onnx_file,
                  verbose=True,
                  input_names=["x"],
                  output_names=["y"])


def to_numpy(tensor):
    return tensor.detach().cpu().numpy(
    ) if tensor.requires_grad else tensor.cpu().numpy()


dummy_input = torch.randn(1, 3, 224, 224).cuda()
model.eval()
with torch.no_grad():
    torch_out = model(dummy_input)
print(torch_out)

example_model = get_example(
    '/home/xywang/code/pruning/Torch-Pruning/resnet50_catdog/models/model_pruned_0.5.onnx'
)
sess = onnxruntime.InferenceSession(example_model)
onnx_out = sess.run(None, {sess.get_inputs()[0].name: to_numpy(dummy_input)})
print(onnx_out)

np.testing.assert_almost_equal(to_numpy(torch_out), onnx_out[0], decimal=3)
예제 #15
0
import time
from onnxruntime.datasets import get_example
import os

# get the directory path till here
dirname = os.path.dirname(__file__)
# print(dirname)

# add the map output and the file we need
filename = os.path.join(dirname, 'output/model.onnx')

# print out the full path to the file
print(filename)

# get the example
example_model = get_example(filename)
sess = onnxruntime.InferenceSession(example_model)

input_name = sess.get_inputs()[0].name
input_shape = sess.get_inputs()[0].shape
input_type = sess.get_inputs()[0].type

print(input_name, input_shape, input_type)

output_name = sess.get_outputs()[0].name
output_shape = sess.get_outputs()[0].shape
output_type = sess.get_outputs()[0].type

print(output_name, output_shape, output_type)

x = np.random.random(input_shape)