Beispiel #1
0
 def test_dim_param_converted_to_int_shape(self):
     model = ONNX_MODELS["dim_param"]
     with OnnxrtRunner(SessionFromOnnxBytes(model.loader)) as runner:
         input_meta = runner.get_input_metadata()
         # In Polygraphy, we only use None to indicate a dynamic input dimension - not strings.
         for name, (dtype, shape) in input_meta.items():
             for dim in shape:
                 assert dim is None or isinstance(dim, int)
Beispiel #2
0
 def test_postprocess(self):
     onnx_loader = ONNX_MODELS["identity"].loader
     run_results = Comparator.run([OnnxrtRunner(SessionFromOnnxBytes(onnx_loader))], use_subprocess=True)
     # Output shape is (1, 1, 2, 2)
     postprocessed = Comparator.postprocess(run_results, postprocess_func=PostprocessFunc.topk_func(k=1, axis=-1))
     for name, results in postprocessed.items():
         for result in results:
             for name, output in result.items():
                 assert output.shape == (1, 1, 2, 1)
Beispiel #3
0
    def test_multirun_outputs_are_different(self):
        onnx_loader = ONNX_MODELS["identity"].loader
        runner = OnnxrtRunner(SessionFromOnnxBytes(onnx_loader))
        run_results = Comparator.run([runner], data_loader=DataLoader(iterations=2))

        iteration0 = run_results[runner.name][0]
        iteration1 = run_results[runner.name][1]
        for name in iteration0.keys():
            assert np.any(iteration0[name] != iteration1[name])
Beispiel #4
0
    def test_list_as_data_loader(self):
        onnx_loader = ONNX_MODELS["identity"].loader
        runner = OnnxrtRunner(SessionFromOnnxBytes(onnx_loader), name="onnx_runner")

        data = [{"x": np.ones((1, 1, 2, 2), dtype=np.float32)}] * 2
        run_results = Comparator.run([runner], data_loader=data)
        iter_results = run_results["onnx_runner"]
        assert len(iter_results) == 2
        for actual, expected in zip(iter_results, data):
            assert np.all(actual['y'] == expected['x'])
Beispiel #5
0
            def fallback_shape_inference(onnx_model):
                from polygraphy.backend.onnx import BytesFromOnnx, ModifyOnnx
                from polygraphy.backend.onnxrt import (OnnxrtRunner,
                                                       SessionFromOnnxBytes)

                load_model = ModifyOnnx(onnx_model, outputs=constants.MARK_ALL)
                with OnnxrtRunner(SessionFromOnnxBytes(BytesFromOnnx(load_model))) as runner:
                    data_loader = self.makers[DataLoaderArgs].get_data_loader()
                    data_loader.input_metadata = runner.get_input_metadata()
                    outputs = runner.infer(feed_dict=data_loader[0])

                    meta = TensorMetadata()
                    for name, output in outputs.items():
                        meta.add(name, output.dtype, output.shape)
                    return meta
Beispiel #6
0
    def test_multiple_runners(self):
        load_tf = TF_MODELS["identity"].loader
        build_tf_session = SessionFromGraph(load_tf)
        load_serialized_onnx = BytesFromOnnx(OnnxFromTfGraph(load_tf))
        build_onnxrt_session = SessionFromOnnxBytes(load_serialized_onnx)
        load_engine = EngineFromNetwork(NetworkFromOnnxBytes(load_serialized_onnx))

        runners = [
            TfRunner(build_tf_session),
            OnnxrtRunner(build_onnxrt_session),
            TrtRunner(load_engine),
        ]

        run_results = Comparator.run(runners)
        compare_func = CompareFunc.basic_compare_func(check_shapes=version(trt.__version__) >= version("7.0"))
        assert bool(Comparator.compare_accuracy(run_results, compare_func=compare_func))
        assert len(list(run_results.values())[0]) == 1 # Default number of iterations
Beispiel #7
0
 def test_warmup_runs(self):
     onnx_loader = ONNX_MODELS["identity"].loader
     runner = OnnxrtRunner(SessionFromOnnxBytes(onnx_loader))
     run_results = Comparator.run([runner], warm_up=2)
     assert len(run_results[runner.name]) == 1
Beispiel #8
0
    def test_session_from_onnx_bytes(self):
        from polygraphy.backend.onnxrt import SessionFromOnnxBytes

        SessionFromOnnxBytes(None)
Beispiel #9
0
# limitations under the License.
#
"""
This script runs an identity model with ONNX-Runtime and TensorRT,
then compares outputs.
"""
from polygraphy.backend.trt import NetworkFromOnnxBytes, EngineFromNetwork, TrtRunner
from polygraphy.backend.onnxrt import OnnxrtRunner, SessionFromOnnxBytes
from polygraphy.backend.common import BytesFromPath
from polygraphy.comparator import Comparator

import os

# Create loaders for both ONNX Runtime and TensorRT
MODEL = os.path.join(os.path.dirname(__file__), os.path.pardir, os.path.pardir,
                     "models", "identity.onnx")

load_serialized_onnx = BytesFromPath(MODEL)
build_onnxrt_session = SessionFromOnnxBytes(load_serialized_onnx)
build_engine = EngineFromNetwork(NetworkFromOnnxBytes(load_serialized_onnx))

# Create runners
runners = [
    TrtRunner(build_engine),
    OnnxrtRunner(build_onnxrt_session),
]

# Finally, run and compare the results.
run_results = Comparator.run(runners)
assert bool(Comparator.compare_accuracy(run_results))
Beispiel #10
0
 def test_shape_output(self):
     model = ONNX_MODELS["reshape"]
     with OnnxrtRunner(SessionFromOnnxBytes(model.loader)) as runner:
         model.check_runner(runner)
Beispiel #11
0
 def test_basic(self):
     model = ONNX_MODELS["identity"]
     with OnnxrtRunner(SessionFromOnnxBytes(model.loader)) as runner:
         assert runner.is_active
         model.check_runner(runner)
     assert not runner.is_active
Beispiel #12
0
def test_infer_raises_if_runner_inactive():
    runner = OnnxrtRunner(SessionFromOnnxBytes(ONNX_MODELS["identity"].loader))
    feed_dict = {"x": np.ones((1, 1, 2, 2), dtype=np.float32)}

    with pytest.raises(PolygraphyException):
        runner.infer(feed_dict)