Пример #1
0
    def test_per_output_tol_fallback(self, mode):
        OUT0_NAME = "output0"
        OUT1_NAME = "output1"
        OUT_VALS = np.ones((4, 4))

        iter_result0 = IterationResult(outputs={
            OUT0_NAME: OUT_VALS + 1,
            OUT1_NAME: OUT_VALS
        })
        iter_result1 = IterationResult(outputs={
            OUT0_NAME: OUT_VALS,
            OUT1_NAME: OUT_VALS + 1
        })

        acc = CompareFunc.simple()(iter_result0, iter_result1)
        assert not acc[OUT0_NAME]
        assert not acc[OUT1_NAME]

        # Do not specify tolerance for OUT0_NAME - it should fail with fallback tolerance
        tols = {
            OUT1_NAME: 1.0,
        }

        if mode == "abs":
            compare_func = CompareFunc.simple(atol=tols)
        else:
            compare_func = CompareFunc.simple(rtol=tols)

        acc = compare_func(iter_result0, iter_result1)
        assert not acc[OUT0_NAME]
        assert acc[OUT1_NAME]
Пример #2
0
    def test_per_output_tol(self, mode):
        OUT0_NAME = "output0"
        OUT1_NAME = "output1"
        OUT_VALS = np.ones((4, 4))

        iter_result0 = IterationResult(outputs={
            OUT0_NAME: OUT_VALS,
            OUT1_NAME: OUT_VALS
        })
        iter_result1 = IterationResult(outputs={
            OUT0_NAME: OUT_VALS,
            OUT1_NAME: OUT_VALS + 1
        })

        # With default tolerances, out1 is wrong for the second result.
        compare_func = CompareFunc.simple()
        acc = compare_func(iter_result0, iter_result1)
        assert acc[OUT0_NAME]
        assert not acc[OUT1_NAME]

        # But with custom tolerances, it should pass.
        tols = {
            OUT0_NAME: 0.0,
            OUT1_NAME: 1.0,
        }

        if mode == "abs":
            compare_func = CompareFunc.simple(atol=tols)
        else:
            compare_func = CompareFunc.simple(rtol=tols)

        acc = compare_func(iter_result0, iter_result1)
        assert acc[OUT0_NAME]
        assert acc[OUT1_NAME]
Пример #3
0
    def test_invalid_error_stat(self):
        res0 = IterationResult(
            outputs={"output": np.array([0, 1, 2, 3], dtype=np.float32)})
        res1 = IterationResult(
            outputs={
                "output": np.array((0.15, 1.25, 2.5, 3.75), dtype=np.float32)
            })

        with pytest.raises(PolygraphyException, match="Invalid choice"):
            CompareFunc.simple(check_error_stat="invalid-stat")(res0, res1)
Пример #4
0
    def test_atol_rtol_either_pass(self, check_error_stat):
        # If either rtol/atol is sufficient, the compare_func should pass
        res0 = IterationResult(
            outputs={"output": np.array([1, 2], dtype=np.float32)})
        res1 = IterationResult(
            outputs={"output": np.array((1.25, 2.5), dtype=np.float32)})

        assert not CompareFunc.simple(check_error_stat=check_error_stat)(
            res0, res1)["output"]

        assert CompareFunc.simple(check_error_stat=check_error_stat,
                                  rtol=0.25)(res0, res1)["output"]
        assert CompareFunc.simple(check_error_stat=check_error_stat,
                                  atol=0.5)(res0, res1)["output"]
Пример #5
0
    def test_atol_rtol_combined_pass(self):
        # We should also be able to mix them - i.e. rtol might enough for some, atol for others.
        # If they cover the entire output range, it should pass.
        res0 = IterationResult(
            outputs={"output": np.array([0, 1, 2, 3], dtype=np.float32)})
        res1 = IterationResult(
            outputs={
                "output": np.array((0.15, 1.25, 2.5, 3.75), dtype=np.float32)
            })

        assert not CompareFunc.simple()(res0, res1)["output"]

        assert not CompareFunc.simple(atol=0.3)(res0, res1)["output"]
        assert not CompareFunc.simple(rtol=0.25)(res0, res1)["output"]

        assert CompareFunc.simple(atol=0.3, rtol=0.25)(res0, res1)["output"]
Пример #6
0
def main():
    # The OnnxrtRunner requires an ONNX-RT session.
    # We can use the SessionFromOnnx lazy loader to construct one easily:
    build_onnxrt_session = SessionFromOnnx("identity.onnx")

    # The TrtRunner requires a TensorRT engine.
    # To create one from the ONNX model, we can chain a couple lazy loaders together:
    build_engine = EngineFromNetwork(NetworkFromOnnxPath("identity.onnx"))

    runners = [
        TrtRunner(build_engine),
        OnnxrtRunner(build_onnxrt_session),
    ]

    # `Comparator.run()` will run each runner separately using synthetic input data and
    #   return a `RunResults` instance. See `polygraphy/comparator/struct.py` for details.
    #
    # TIP: To use custom input data, you can set the `data_loader` parameter in `Comparator.run()``
    #   to a generator or iterable that yields `Dict[str, np.ndarray]`.
    run_results = Comparator.run(runners)

    # `Comparator.compare_accuracy()` checks that outputs match between runners.
    #
    # TIP: The `compare_func` parameter can be used to control how outputs are compared (see API reference for details).
    #   The default comparison function is created by `CompareFunc.simple()`, but we can construct it
    #   explicitly if we want to change the default parameters, such as tolerance.
    assert bool(
        Comparator.compare_accuracy(
            run_results, compare_func=CompareFunc.simple(atol=1e-8)))

    # We can use `RunResults.save()` method to save the inference results to a JSON file.
    # This can be useful if you want to generate and compare results separately.
    run_results.save("inference_results.json")
Пример #7
0
    def test_can_compare_bool(self):
        iter_result0 = IterationResult(
            outputs={"output": np.zeros((4, 4), dtype=np.bool)})
        iter_result1 = IterationResult(
            outputs={"output": np.ones((4, 4), dtype=np.bool)})

        compare_func = CompareFunc.simple()
        acc = compare_func(iter_result0, iter_result1)

        assert not acc["output"]
Пример #8
0
    def test_default_tol_in_map(self, mode):
        # "" can be used to indicate a global tolerance
        OUT0_NAME = "output0"
        OUT_VALS = np.ones((4, 4))

        iter_result0 = IterationResult(outputs={OUT0_NAME: OUT_VALS})
        iter_result1 = IterationResult(outputs={OUT0_NAME: OUT_VALS + 1})

        tols = {
            "": 1.0,
        }

        if mode == "abs":
            compare_func = CompareFunc.simple(atol=tols)
        else:
            compare_func = CompareFunc.simple(rtol=tols)

        acc = compare_func(iter_result0, iter_result1)
        assert acc[OUT0_NAME]
Пример #9
0
    def test_per_output_error_stat(self, check_error_stat):
        # output0 will only pass when using check_error_stat=mean
        res0 = IterationResult(
            outputs={
                "output0": np.array([0, 1, 2, 3], dtype=np.float32),
                "output1": np.array([0, 1, 2, 3], dtype=np.float32),
            })
        res1 = IterationResult(
            outputs={
                "output0": np.array((0.15, 1.25, 2.5, 3.75), dtype=np.float32),
                "output1": np.array((0, 1, 2, 3), dtype=np.float32),
            })

        atol = 0.4125
        assert not CompareFunc.simple(atol=atol)(res0, res1)["output0"]

        assert CompareFunc.simple(check_error_stat=check_error_stat,
                                  atol=atol)(res0, res1)["output0"]
        assert CompareFunc.simple(check_error_stat=check_error_stat,
                                  atol=atol)(res0, res1)["output1"]
Пример #10
0
    def test_non_matching_outputs(self, shape):
        iter_result0 = IterationResult(
            outputs={"output": np.zeros(shape, dtype=np.float32)})
        iter_result1 = IterationResult(
            outputs={"output": np.ones(shape, dtype=np.float32)})

        compare_func = CompareFunc.simple()

        with G_LOGGER.verbosity(G_LOGGER.ULTRA_VERBOSE):
            acc = compare_func(iter_result0, iter_result1)

        assert util.is_empty_shape(shape) or not acc["output"]
Пример #11
0
    def test_dim_param_trt_onnxrt(self):
        load_onnx_bytes = ONNX_MODELS["dim_param"].loader
        build_onnxrt_session = SessionFromOnnx(load_onnx_bytes)
        load_engine = EngineFromNetwork(NetworkFromOnnxBytes(load_onnx_bytes))

        runners = [
            OnnxrtRunner(build_onnxrt_session),
            TrtRunner(load_engine),
        ]

        run_results = Comparator.run(runners)
        compare_func = CompareFunc.simple(check_shapes=mod.version(trt.__version__) >= mod.version("7.0"))
        assert bool(Comparator.compare_accuracy(run_results, compare_func=compare_func))
        assert len(list(run_results.values())[0]) == 1  # Default number of iterations
Пример #12
0
    def test_check_error_stat(self, func, check_error_stat):
        iter_result0 = IterationResult(
            outputs={"output": func((100, ), dtype=np.float32)})
        iter_result1 = IterationResult(
            outputs={"output": func((100, ), dtype=np.float32)})

        iter_result0["output"][0] += 100

        # Even though the max diff is 100, atol=1 should cause this to pass since we're checking
        # against the mean error.
        compare_func = CompareFunc.simple(check_error_stat=check_error_stat,
                                          atol=1)

        if check_error_stat in ["max", "elemwise"]:
            assert not compare_func(iter_result0, iter_result1)["output"]
        else:
            assert compare_func(iter_result0, iter_result1)["output"]
Пример #13
0
    def test_multiple_runners(self):
        load_tf = TF_MODELS["identity"].loader
        build_tf_session = SessionFromGraph(load_tf)
        onnx_model = OnnxFromTfGraph(load_tf)
        load_serialized_onnx = BytesFromOnnx(onnx_model)
        build_onnxrt_session = SessionFromOnnx(load_serialized_onnx)
        load_engine = EngineFromNetwork(NetworkFromOnnxBytes(load_serialized_onnx))
        gs_graph = GsFromOnnx(onnx_model)

        runners = [
            TfRunner(build_tf_session),
            OnnxrtRunner(build_onnxrt_session),
            PluginRefRunner(gs_graph),
            TrtRunner(load_engine),
        ]

        run_results = Comparator.run(runners)
        compare_func = CompareFunc.simple(check_shapes=mod.version(trt.__version__) >= mod.version("7.0"))
        assert bool(Comparator.compare_accuracy(run_results, compare_func=compare_func))
        assert len(list(run_results.values())[0]) == 1  # Default number of iterations
                  min=[1, 1, 28, 28],
                  opt=[4, 1, 28, 28],
                  max=[16, 1, 28, 28])
]
create_trt_config = CreateTrtConfig(max_workspace_size=1000000000,
                                    profiles=profiles)
build_engine = EngineFromNetwork(parse_network_from_onnx,
                                 config=create_trt_config)
save_engine = SaveEngine(build_engine, path='model-FP32.plan')

# Runners
runners = [
    OnnxrtRunner(build_onnxrt_session),
    TrtRunner(save_engine),
]

# Runner Execution
results = Comparator.run(runners, data_loader=data_loader)

success = True
# Accuracy Comparison
compare_func = CompareFunc.simple(rtol={'': 0.001}, atol={'': 0.001})
success &= bool(Comparator.compare_accuracy(results,
                                            compare_func=compare_func))

# Report Results
cmd_run = ' '.join(sys.argv)
if not success:
    G_LOGGER.critical("FAILED | Command: {}".format(cmd_run))
G_LOGGER.finish("PASSED | Command: {}".format(cmd_run))