Ejemplo n.º 1
0
    def test_errors_do_not_hang(self):
        # Should error because interface is not implemented correctly.
        class FakeRunner(object):
            def __init__(self):
                self.name = "fake"

        runners = [FakeRunner()]
        with pytest.raises(PolygraphyException):
            Comparator.run(runners, use_subprocess=True, subprocess_polling_interval=1)
Ejemplo n.º 2
0
    def test_segfault_does_not_hang(self):
        def raise_called_process_error():
            class FakeSegfault(sp.CalledProcessError):
                pass

            raise FakeSegfault(-11, ["simulate", "segfault"])

        runners = [TrtRunner(EngineFromNetwork(raise_called_process_error))]
        with pytest.raises(PolygraphyException):
            Comparator.run(runners, use_subprocess=True, subprocess_polling_interval=1)
Ejemplo n.º 3
0
def main():
    # The OnnxrtRunner requires an ONNX-RT session.
    # We can use the SessionFromOnnx lazy loader to construct one easily:
    build_onnxrt_session = SessionFromOnnx("identity.onnx")

    # The TrtRunner requires a TensorRT engine.
    # To create one from the ONNX model, we can chain a couple lazy loaders together:
    build_engine = EngineFromNetwork(NetworkFromOnnxPath("identity.onnx"))

    runners = [
        TrtRunner(build_engine),
        OnnxrtRunner(build_onnxrt_session),
    ]

    # `Comparator.run()` will run each runner separately using synthetic input data and
    #   return a `RunResults` instance. See `polygraphy/comparator/struct.py` for details.
    #
    # TIP: To use custom input data, you can set the `data_loader` parameter in `Comparator.run()``
    #   to a generator or iterable that yields `Dict[str, np.ndarray]`.
    run_results = Comparator.run(runners)

    # `Comparator.compare_accuracy()` checks that outputs match between runners.
    #
    # TIP: The `compare_func` parameter can be used to control how outputs are compared (see API reference for details).
    #   The default comparison function is created by `CompareFunc.simple()`, but we can construct it
    #   explicitly if we want to change the default parameters, such as tolerance.
    assert bool(
        Comparator.compare_accuracy(
            run_results, compare_func=CompareFunc.simple(atol=1e-8)))

    # We can use `RunResults.save()` method to save the inference results to a JSON file.
    # This can be useful if you want to generate and compare results separately.
    run_results.save("inference_results.json")
Ejemplo n.º 4
0
    def check_network(self, suffix):
        """
        Checks whether the provided network is accurate compared to golden values.

        Returns:
            OrderedDict[str, OutputCompareResult]:
                    A mapping of output names to an object describing whether they matched, and what the
                    required tolerances were.
        """
        from polygraphy.comparator import Comparator, CompareFunc, DataLoader
        from polygraphy.backend.trt import EngineFromNetwork, TrtRunner, ModifyNetwork, SaveEngine

        with G_LOGGER.verbosity(severity=G_LOGGER.severity if self.args.
                                show_output else G_LOGGER.CRITICAL):
            data_loader = tool_util.get_data_loader(self.args)

            self.args.strict_types = True  # HACK: Override strict types so things actually run in the right precision.
            config = tool_util.get_trt_config_loader(self.args,
                                                     data_loader)(self.builder,
                                                                  self.network)

            suffix = "-{:}-{:}".format(suffix, self.precision)
            engine_path = misc.insert_suffix(self.args.save_engine, suffix)

            self.builder, self.network, self.parser = ModifyNetwork(
                (self.builder, self.network, self.parser),
                outputs=self.args.trt_outputs)()

            engine_loader = SaveEngine(EngineFromNetwork(
                (self.builder, self.network, self.parser), config),
                                       path=engine_path)

            runners = [TrtRunner(engine_loader)]

            results = Comparator.run(runners, data_loader=data_loader)
            if self.args.validate:
                Comparator.validate(results)
            results.update(self.golden)

            compare_func = CompareFunc.basic_compare_func(
                atol=self.args.atol,
                rtol=self.args.rtol,
                check_shapes=not self.args.no_shape_check)
            accuracy_result = Comparator.compare_accuracy(
                results, compare_func=compare_func)

        tolerances = list(accuracy_result.values())[0][
            0]  # First iteration of first runner pair
        for name, req_tol in tolerances.items():
            if bool(req_tol):
                G_LOGGER.success(
                    "PASSED | Output: {:} | Required Tolerances: {:}".format(
                        name, req_tol))
            else:
                G_LOGGER.error(
                    "FAILED | Output: {:} | Required Tolerances: {:}".format(
                        name, req_tol))
        return accuracy_result
Ejemplo n.º 5
0
 def test_postprocess(self):
     onnx_loader = ONNX_MODELS["identity"].loader
     run_results = Comparator.run([OnnxrtRunner(SessionFromOnnx(onnx_loader))], use_subprocess=True)
     # Output shape is (1, 1, 2, 2)
     postprocessed = Comparator.postprocess(run_results, postprocess_func=PostprocessFunc.topk_func(k=1, axis=-1))
     for _, results in postprocessed.items():
         for result in results:
             for _, output in result.items():
                 assert output.shape == (1, 1, 2, 1)
Ejemplo n.º 6
0
    def test_multirun_outputs_are_different(self):
        onnx_loader = ONNX_MODELS["identity"].loader
        runner = TrtRunner(EngineFromNetwork(NetworkFromOnnxBytes(onnx_loader)))
        run_results = Comparator.run([runner], data_loader=DataLoader(iterations=2))

        iteration0 = run_results[runner.name][0]
        iteration1 = run_results[runner.name][1]
        for name in iteration0.keys():
            assert np.any(iteration0[name] != iteration1[name])
Ejemplo n.º 7
0
    def test_list_as_data_loader(self):
        onnx_loader = ONNX_MODELS["identity"].loader
        runner = OnnxrtRunner(SessionFromOnnx(onnx_loader), name="onnx_runner")

        data = [{"x": np.ones((1, 1, 2, 2), dtype=np.float32)}] * 2
        run_results = Comparator.run([runner], data_loader=data)
        iter_results = run_results["onnx_runner"]
        assert len(iter_results) == 2
        for actual, expected in zip(iter_results, data):
            assert np.all(actual["y"] == expected["x"])
Ejemplo n.º 8
0
    def test_generator_as_data_loader(self):
        onnx_loader = ONNX_MODELS["identity"].loader
        runner = OnnxrtRunner(SessionFromOnnxBytes(onnx_loader), name="onnx_runner")

        def data():
            for feed_dict in [{"x": np.ones((1, 1, 2, 2), dtype=np.float32)}] * 2:
                yield feed_dict

        run_results = Comparator.run([runner], data_loader=data())
        iter_results = run_results["onnx_runner"]
        assert len(iter_results) == 2
        for actual, expected in zip(iter_results, data()):
            assert np.all(actual['y'] == expected['x'])
Ejemplo n.º 9
0
    def test_dim_param_trt_onnxrt(self):
        load_onnx_bytes = ONNX_MODELS["dim_param"].loader
        build_onnxrt_session = SessionFromOnnx(load_onnx_bytes)
        load_engine = EngineFromNetwork(NetworkFromOnnxBytes(load_onnx_bytes))

        runners = [
            OnnxrtRunner(build_onnxrt_session),
            TrtRunner(load_engine),
        ]

        run_results = Comparator.run(runners)
        compare_func = CompareFunc.simple(check_shapes=mod.version(trt.__version__) >= mod.version("7.0"))
        assert bool(Comparator.compare_accuracy(run_results, compare_func=compare_func))
        assert len(list(run_results.values())[0]) == 1  # Default number of iterations
Ejemplo n.º 10
0
    def test_multiple_runners(self):
        load_tf = TF_MODELS["identity"].loader
        build_tf_session = SessionFromGraph(load_tf)
        load_serialized_onnx = BytesFromOnnx(OnnxFromTfGraph(load_tf))
        build_onnxrt_session = SessionFromOnnxBytes(load_serialized_onnx)
        load_engine = EngineFromNetwork(NetworkFromOnnxBytes(load_serialized_onnx))

        runners = [
            TfRunner(build_tf_session),
            OnnxrtRunner(build_onnxrt_session),
            TrtRunner(load_engine),
        ]

        run_results = Comparator.run(runners)
        compare_func = CompareFunc.basic_compare_func(check_shapes=version(trt.__version__) >= version("7.0"))
        assert bool(Comparator.compare_accuracy(run_results, compare_func=compare_func))
        assert len(list(run_results.values())[0]) == 1 # Default number of iterations
Ejemplo n.º 11
0
def main():
    # The OnnxrtRunner requires an ONNX-RT session.
    # We can use the SessionFromOnnx lazy loader to construct one easily:
    build_onnxrt_session = SessionFromOnnx("identity.onnx")

    # The TrtRunner requires a TensorRT engine.
    # To create one from the ONNX model, we can chain a couple lazy loaders together:
    build_engine = EngineFromNetwork(NetworkFromOnnxPath("identity.onnx"))

    runners = [
        TrtRunner(build_engine),
        OnnxrtRunner(build_onnxrt_session),
    ]

    # `Comparator.run()` will run each runner separately using synthetic input data and return a `RunResults` instance.
    # See `polygraphy/comparator/struct.py` for details.
    run_results = Comparator.run(runners)

    # `Comparator.compare_accuracy()` checks that outputs match between runners.
    assert bool(Comparator.compare_accuracy(run_results))

    # We can use `RunResults.save()` method to save the inference results to a JSON file.
    # This can be useful if you want to generate and compare results separately.
    run_results.save("inference_results.json")
Ejemplo n.º 12
0
 def test_warmup_runs(self):
     onnx_loader = ONNX_MODELS["identity"].loader
     runner = OnnxrtRunner(SessionFromOnnxBytes(onnx_loader))
     run_results = Comparator.run([runner], warm_up=2)
     assert len(run_results[runner.name]) == 1
Ejemplo n.º 13
0
# limitations under the License.
#
"""
This script runs an identity model with ONNX-Runtime and TensorRT,
then compares outputs.
"""
from polygraphy.backend.trt import NetworkFromOnnxBytes, EngineFromNetwork, TrtRunner
from polygraphy.backend.onnxrt import OnnxrtRunner, SessionFromOnnxBytes
from polygraphy.backend.common import BytesFromPath
from polygraphy.comparator import Comparator

import os

# Create loaders for both ONNX Runtime and TensorRT
MODEL = os.path.join(os.path.dirname(__file__), os.path.pardir, os.path.pardir,
                     "models", "identity.onnx")

load_serialized_onnx = BytesFromPath(MODEL)
build_onnxrt_session = SessionFromOnnxBytes(load_serialized_onnx)
build_engine = EngineFromNetwork(NetworkFromOnnxBytes(load_serialized_onnx))

# Create runners
runners = [
    TrtRunner(build_engine),
    OnnxrtRunner(build_onnxrt_session),
]

# Finally, run and compare the results.
run_results = Comparator.run(runners)
assert bool(Comparator.compare_accuracy(run_results))
                  min=[1, 1, 28, 28],
                  opt=[4, 1, 28, 28],
                  max=[16, 1, 28, 28])
]
create_trt_config = CreateTrtConfig(max_workspace_size=1000000000,
                                    profiles=profiles)
build_engine = EngineFromNetwork(parse_network_from_onnx,
                                 config=create_trt_config)
save_engine = SaveEngine(build_engine, path='model-FP32.plan')

# Runners
runners = [
    OnnxrtRunner(build_onnxrt_session),
    TrtRunner(save_engine),
]

# Runner Execution
results = Comparator.run(runners, data_loader=data_loader)

success = True
# Accuracy Comparison
compare_func = CompareFunc.simple(rtol={'': 0.001}, atol={'': 0.001})
success &= bool(Comparator.compare_accuracy(results,
                                            compare_func=compare_func))

# Report Results
cmd_run = ' '.join(sys.argv)
if not success:
    G_LOGGER.critical("FAILED | Command: {}".format(cmd_run))
G_LOGGER.finish("PASSED | Command: {}".format(cmd_run))