Exemplo n.º 1
0
    def test_per_output_tol_fallback(self, mode):
        OUT0_NAME = "output0"
        OUT1_NAME = "output1"
        OUT_VALS = np.ones((4, 4))

        iter_result0 = IterationResult(outputs={
            OUT0_NAME: OUT_VALS + 1,
            OUT1_NAME: OUT_VALS
        })
        iter_result1 = IterationResult(outputs={
            OUT0_NAME: OUT_VALS,
            OUT1_NAME: OUT_VALS + 1
        })

        acc = CompareFunc.simple()(iter_result0, iter_result1)
        assert not acc[OUT0_NAME]
        assert not acc[OUT1_NAME]

        # Do not specify tolerance for OUT0_NAME - it should fail with fallback tolerance
        tols = {
            OUT1_NAME: 1.0,
        }

        if mode == "abs":
            compare_func = CompareFunc.simple(atol=tols)
        else:
            compare_func = CompareFunc.simple(rtol=tols)

        acc = compare_func(iter_result0, iter_result1)
        assert not acc[OUT0_NAME]
        assert acc[OUT1_NAME]
Exemplo n.º 2
0
    def test_per_output_tol(self, mode):
        OUT0_NAME = "output0"
        OUT1_NAME = "output1"
        OUT_VALS = np.ones((4, 4))

        iter_result0 = IterationResult(outputs={
            OUT0_NAME: OUT_VALS,
            OUT1_NAME: OUT_VALS
        })
        iter_result1 = IterationResult(outputs={
            OUT0_NAME: OUT_VALS,
            OUT1_NAME: OUT_VALS + 1
        })

        # With default tolerances, out1 is wrong for the second result.
        compare_func = CompareFunc.simple()
        acc = compare_func(iter_result0, iter_result1)
        assert acc[OUT0_NAME]
        assert not acc[OUT1_NAME]

        # But with custom tolerances, it should pass.
        tols = {
            OUT0_NAME: 0.0,
            OUT1_NAME: 1.0,
        }

        if mode == "abs":
            compare_func = CompareFunc.simple(atol=tols)
        else:
            compare_func = CompareFunc.simple(rtol=tols)

        acc = compare_func(iter_result0, iter_result1)
        assert acc[OUT0_NAME]
        assert acc[OUT1_NAME]
Exemplo n.º 3
0
    def test_can_compare_bool(self):
        iter_result0 = IterationResult(outputs={"output": np.zeros((4, 4), dtype=np.bool)})
        iter_result1 = IterationResult(outputs={"output": np.ones((4, 4), dtype=np.bool)})

        compare_func = CompareFunc.basic_compare_func()
        acc = compare_func(iter_result0, iter_result1)

        assert not acc["output"]
Exemplo n.º 4
0
    def test_atol_rtol_either_pass(self, check_error_stat):
        # If either rtol/atol is sufficient, the compare_func should pass
        res0 = IterationResult(outputs={"output": np.array([1, 2], dtype=np.float32)})
        res1 = IterationResult(outputs={"output": np.array((1.25, 2.5), dtype=np.float32)})

        assert not CompareFunc.basic_compare_func(check_error_stat=check_error_stat)(res0, res1)["output"]

        assert CompareFunc.basic_compare_func(check_error_stat=check_error_stat, rtol=0.25)(res0, res1)["output"]
        assert CompareFunc.basic_compare_func(check_error_stat=check_error_stat, atol=0.5)(res0, res1)["output"]
Exemplo n.º 5
0
    def test_invalid_error_stat(self):
        res0 = IterationResult(
            outputs={"output": np.array([0, 1, 2, 3], dtype=np.float32)})
        res1 = IterationResult(
            outputs={
                "output": np.array((0.15, 1.25, 2.5, 3.75), dtype=np.float32)
            })

        with pytest.raises(PolygraphyException, match="Invalid choice"):
            CompareFunc.simple(check_error_stat="invalid-stat")(res0, res1)
Exemplo n.º 6
0
    def test_non_matching_outputs(self, shape):
        iter_result0 = IterationResult(outputs={"output": np.zeros(shape, dtype=np.float32)})
        iter_result1 = IterationResult(outputs={"output": np.ones(shape, dtype=np.float32)})

        compare_func = CompareFunc.basic_compare_func()

        with G_LOGGER.verbosity(G_LOGGER.ULTRA_VERBOSE):
            acc = compare_func(iter_result0, iter_result1)

        assert util.is_empty_shape(shape) or not acc["output"]
Exemplo n.º 7
0
    def test_atol_rtol_combined_pass(self):
        # We should also be able to mix them - i.e. rtol might enough for some, atol for others.
        # If they cover the entire output range, it should pass.
        res0 = IterationResult(outputs={"output": np.array([0, 1, 2, 3], dtype=np.float32)})
        res1 = IterationResult(outputs={"output": np.array((0.15, 1.25, 2.5, 3.75), dtype=np.float32)})

        assert not CompareFunc.basic_compare_func()(res0, res1)["output"]

        assert not CompareFunc.basic_compare_func(atol=0.3)(res0, res1)["output"]
        assert not CompareFunc.basic_compare_func(rtol=0.25)(res0, res1)["output"]

        assert CompareFunc.basic_compare_func(atol=0.3, rtol=0.25)(res0, res1)["output"]
Exemplo n.º 8
0
    def test_check_error_stat(self, func, check_error_stat):
        iter_result0 = IterationResult(outputs={"output": func((100, ), dtype=np.float32)})
        iter_result1 = IterationResult(outputs={"output": func((100, ), dtype=np.float32)})

        iter_result0["output"][0] += 100

        # Even though the max diff is 100, atol=1 should cause this to pass since we're checking
        # against the mean error.
        compare_func = CompareFunc.basic_compare_func(check_error_stat=check_error_stat, atol=1)

        if check_error_stat in ["max", "elemwise"]:
            assert not compare_func(iter_result0, iter_result1)["output"]
        else:
            assert compare_func(iter_result0, iter_result1)["output"]
Exemplo n.º 9
0
def make_iter_result():
    return IterationResult(runtime=4.5,
                           runner_name="test",
                           outputs={
                               "out0": np.random.random_sample((1, 2, 1)),
                               "out1": np.ones((1, 2), dtype=np.float32),
                           })
Exemplo n.º 10
0
    def test_per_output_error_stat(self, check_error_stat):
        # output0 will only pass when using check_error_stat=mean
        res0 = IterationResult(outputs={
            "output0": np.array([0, 1, 2, 3], dtype=np.float32),
            "output1": np.array([0, 1, 2, 3], dtype=np.float32),
        })
        res1 = IterationResult(outputs={
            "output0": np.array((0.15, 1.25, 2.5, 3.75), dtype=np.float32),
            "output1": np.array((0, 1, 2, 3), dtype=np.float32),
        })

        atol = 0.4125
        assert not CompareFunc.basic_compare_func(atol=atol)(res0, res1)["output0"]

        assert CompareFunc.basic_compare_func(check_error_stat=check_error_stat, atol=atol)(res0, res1)["output0"]
        assert CompareFunc.basic_compare_func(check_error_stat=check_error_stat, atol=atol)(res0, res1)["output1"]
Exemplo n.º 11
0
    def test_default_tol_in_map(self, mode):
        # "" can be used to indicate a global tolerance
        OUT0_NAME = "output0"
        OUT_VALS = np.ones((4, 4))

        iter_result0 = IterationResult(outputs={OUT0_NAME: OUT_VALS})
        iter_result1 = IterationResult(outputs={OUT0_NAME: OUT_VALS + 1})

        tols = {
            "": 1.0,
        }

        if mode == "abs":
            compare_func = CompareFunc.simple(atol=tols)
        else:
            compare_func = CompareFunc.simple(rtol=tols)

        acc = compare_func(iter_result0, iter_result1)
        assert acc[OUT0_NAME]
Exemplo n.º 12
0
    def test_setitem(self, run_results):
        def check_results(results, is_none=False):
            for iter_res in results["runner1"]:
                if is_none:
                    assert not iter_res
                    assert iter_res.runner_name == ""
                else:
                    assert iter_res
                    assert iter_res.runner_name

        check_results(run_results)

        iter_results = [IterationResult(outputs=None, runner_name=None)]
        run_results["runner1"] = iter_results

        check_results(run_results, is_none=True)
Exemplo n.º 13
0
    def fallback_inference(self, onnx_model):
        """
        Run inference with ONNX-Runtime.

        This can be used to retrieve values/shapes/data types for all
        tensors in the model when other shape inference approaches fail.

        Args:
            onnx_model (onnx.ModelProto):
                    The ONNX model in which to infer shapes.
            data_loader_args (DataLoaderArgs):
                    The data loader argument group to use to generate input data.

        Returns:
            (OrderedDict[str, np.ndarray], TensorMetadata):
                    1. Mapping of values for all tensors in the model, including inputs.
                        Values are loaded lazily when first accessed so as to save memory.
                    2. Metadata for every tensor in the model.
        """
        from polygraphy.comparator import IterationResult

        with G_LOGGER.verbosity(G_LOGGER.severity + 10):
            load_model = onnx_backend.ModifyOutputs(onnx_model,
                                                    outputs=constants.MARK_ALL,
                                                    copy=True)
            with onnxrt_backend.OnnxrtRunner(
                    onnxrt_backend.SessionFromOnnx(
                        onnx_backend.BytesFromOnnx(load_model))) as runner:
                # We want to set input_metadata only - not user_input_metadata, so that user_input_metadata
                # will be populated by the --model-inputs argument.
                data_loader = self.data_loader_args.get_data_loader()
                data_loader.input_metadata = runner.get_input_metadata()
                feed_dict = data_loader[0]

                with G_LOGGER.verbosity(G_LOGGER.severity - 10):
                    G_LOGGER.info(
                        "Running fallback shape inference using input metadata:\n{:}"
                        .format(TensorMetadata.from_feed_dict(feed_dict)))

                outputs = runner.infer(feed_dict)
                # We include the inputs here so that we have values for all tensors in the model.
                outputs.update(feed_dict)
                # Use IterationResult here since it can handle very large tensors by saving to disk.
                # Layerwise outputs might otherwise take up too much memory.
                return IterationResult(outputs), TensorMetadata.from_feed_dict(
                    outputs)
Exemplo n.º 14
0
 def test_validate_nan(self):
     run_results = RunResults()
     run_results["fake-runner"] = [
         IterationResult(outputs={"x": np.array(np.nan)})
     ]
     assert not Comparator.validate(run_results)
Exemplo n.º 15
0
 def test_validate_inf(self):
     run_results = RunResults()
     run_results["fake-runner"] = [
         IterationResult(outputs={"x": np.array(np.inf)})
     ]
     assert not Comparator.validate(run_results, check_finite=True)
Exemplo n.º 16
0
 def test_basic(self):
     arr = np.array([1, 2, 3, 4, 5], dtype=np.float32)
     func = PostprocessFunc.topk_func(k=3)
     top_k = func(IterationResult({"x": arr}))
     assert np.all(top_k["x"] == [4, 3, 2])
Exemplo n.º 17
0
 def test_k_can_exceed_array_len(self):
     arr = np.array([1, 2, 3, 4, 5], dtype=np.float32)
     func = PostprocessFunc.topk_func(k=10)
     top_k = func(IterationResult({"x": arr}))
     assert np.all(top_k["x"] == [4, 3, 2, 1, 0])
Exemplo n.º 18
0
 def test_per_output_top_k(self):
     arr = np.array([1, 2, 3, 4, 5], dtype=np.float32)
     func = PostprocessFunc.topk_func(k={"": 10, "y": 2})
     top_k = func(IterationResult({"x": arr, "y": arr}))
     assert np.all(top_k["x"] == [4, 3, 2, 1, 0])
     assert np.all(top_k["y"] == [4, 3])
Exemplo n.º 19
0
    def test_setitem_out_of_bounds(self, run_results):
        iter_results = [IterationResult(outputs=None, runner_name="new")]
        run_results["runner2"] = iter_results

        assert len(run_results) == 3
        assert run_results["runner2"][0].runner_name == "new"
Exemplo n.º 20
0
def make_iter_results(runner_name):
    return [
        IterationResult(outputs={"dummy_out": np.zeros((4, 4))},
                        runner_name=runner_name)
    ] * 2