コード例 #1
0
class TestTrtLegacy(object):
    def test_trt_legacy_uff(self):
        run_polygraphy_run([TF_MODELS["identity"].path, "--trt-legacy"])

    @pytest.mark.skipif(version(trt.__version__) >= version("7.0"),
                        reason="Unsupported in TRT 7.0 and later")
    def test_trt_legacy_onnx(self):
        run_polygraphy_run([ONNX_MODELS["identity"].path, "--trt-legacy"])
コード例 #2
0
def test_polygraphy_inspect_model_trt_sanity(run_inspect_model, model):
    import tensorrt as trt

    if model == "tensor_attr" and version(trt.__version__) < version("7.2"):
        pytest.skip("Models with constant outputs were not supported before 7.2")

    if model == "scan" and version(trt.__version__) < version("7.0"):
        pytest.skip("Scan was not supported until 7.0")

    run_inspect_model([ONNX_MODELS[model].path, "--display-as=trt"])
コード例 #3
0
ファイル: test_loader.py プロジェクト: celidos/TensorRT_study
 def test_defaults(self, identity_builder_network):
     builder, network = identity_builder_network
     loader = CreateConfig()
     config = loader(builder, network)
     assert config.max_workspace_size == 1 << 24
     if version(trt.__version__) > version("7.1.0.0"):
         assert not config.get_flag(trt.BuilderFlag.TF32)
     assert not config.get_flag(trt.BuilderFlag.FP16)
     assert not config.get_flag(trt.BuilderFlag.INT8)
     assert config.num_optimization_profiles == 1
     assert config.int8_calibrator is None
コード例 #4
0
ファイル: test_loader.py プロジェクト: celidos/TensorRT_study
class TestModifyNetwork(object):
    def test_mark_layerwise(self, identity_identity_network):
        load_network = ModifyNetwork(identity_identity_network,
                                     outputs=constants.MARK_ALL)
        builder, network, parser = load_network()
        with builder, network, parser:
            for layer in network:
                for index in range(layer.num_outputs):
                    assert layer.get_output(index).is_network_output

    def test_mark_custom_outputs(self, identity_identity_network):
        builder, network, parser = func.invoke(
            ModifyNetwork(identity_identity_network,
                          outputs=["identity_out_0"]))
        with builder, network, parser:
            assert network.num_outputs == 1
            assert network.get_output(0).name == "identity_out_0"

    def test_exclude_outputs_with_mark_layerwise(self,
                                                 identity_identity_network):
        builder, network, parser = func.invoke(
            ModifyNetwork(identity_identity_network,
                          outputs=constants.MARK_ALL,
                          exclude_outputs=["identity_out_2"]))
        with builder, network, parser:
            assert network.num_outputs == 1
            assert network.get_output(0).name == "identity_out_0"

    @pytest.mark.skipif(version(trt.__version__) < version("7.0"),
                        reason="Unsupported for TRT 6")
    def test_mark_shape_outputs(self, reshape_network):
        builder, network, parser = func.invoke(
            ModifyNetwork(reshape_network,
                          outputs=["output", "reduce_prod_out_gs_2"]))
        with builder, network, parser:
            assert network.num_outputs == 2
            assert network.get_output(0).name == "reduce_prod_out_gs_2"
            assert network.get_output(0).is_shape_tensor

    @pytest.mark.skipif(version(trt.__version__) < version("7.0"),
                        reason="Unsupported for TRT 6")
    def test_unmark_shape_outputs(self, reshape_network):
        builder, network, parser = func.invoke(
            ModifyNetwork(reshape_network,
                          outputs=constants.MARK_ALL,
                          exclude_outputs=["reduce_prod_out_gs_2"]))
        with builder, network, parser:
            assert network.num_outputs == 1
コード例 #5
0
class TestOther(object):
    def test_0_iterations(self):
        run_polygraphy_run(
            [ONNX_MODELS["identity"].path, "--onnxrt", "--iterations=0"])

    def test_custom_tolerance(self):
        run_polygraphy_run([
            ONNX_MODELS["identity"].path, "--onnxrt", "--iterations=0",
            "--atol=1.0", "--rtol=1.0"
        ])

    def test_top_k(self):
        run_polygraphy_run(
            [ONNX_MODELS["identity"].path, "--onnxrt", "--top-k=5"])

    def test_save_load_outputs(self):
        with tempfile.NamedTemporaryFile(
        ) as outfile0, tempfile.NamedTemporaryFile() as outfile1:
            run_polygraphy_run([
                ONNX_MODELS["identity"].path, "--onnxrt", "--save-results",
                outfile0.name
            ])
            run_polygraphy_run([
                "--load-results", outfile0.name, "--save-results",
                outfile1.name
            ])  # Copy
            run_polygraphy_run([
                ONNX_MODELS["identity"].path, "--onnxrt", "--load-results",
                outfile0.name, outfile1.name
            ])
            # Should work even with no runners specified
            run_polygraphy_run([
                ONNX_MODELS["identity"].path, "--load-results", outfile0.name,
                outfile1.name
            ])
            # Should work with only one file
            run_polygraphy_run([
                ONNX_MODELS["identity"].path, "--load-results", outfile0.name
            ])

    @pytest.mark.skipif(version(trt.__version__) < version("7.0"),
                        reason="Unsupported for TRT 6")
    def test_runner_coexistence(self):
        run_polygraphy_run([
            TF_MODELS["identity"].path, "--model-type=frozen", "--tf",
            "--onnxrt", "--trt"
        ])
コード例 #6
0
class TestTrtRunner(object):
    def test_can_name_runner(self):
        NAME = "runner"
        runner = TrtRunner(None, name=NAME)
        assert runner.name == NAME

    def test_basic(self):
        model = ONNX_MODELS["identity"]
        network_loader = NetworkFromOnnxBytes(model.loader)
        with TrtRunner(EngineFromNetwork(network_loader)) as runner:
            assert runner.is_active
            model.check_runner(runner)
        assert not runner.is_active

    @pytest.mark.skipif(version(trt.__version__) < version("7.0"),
                        reason="Unsupported for TRT 6")
    def test_multiple_profiles(self):
        model = ONNX_MODELS["dynamic_identity"]
        shapes = [(1, 2, 4, 4), (1, 2, 8, 8), (1, 2, 16, 16)]
        network_loader = NetworkFromOnnxBytes(model.loader)
        profiles = [
            Profile().add("X", (1, 2, 1, 1), (1, 2, 2, 2), (1, 2, 4, 4)),
            Profile().add("X", *shapes),
        ]
        config_loader = CreateConfig(profiles=profiles)
        with TrtRunner(EngineFromNetwork(network_loader,
                                         config_loader)) as runner:
            runner.context.active_optimization_profile = 1
            for shape in shapes:
                model.check_runner(runner, {"X": shape})

    @pytest.mark.skipif(version(trt.__version__) < version("7.0"),
                        reason="Unsupported for TRT 6")
    def test_empty_tensor_with_dynamic_input_shape_tensor(self):
        model = ONNX_MODELS["empty_tensor_expand"]
        shapes = [(1, 2, 0, 3, 0), (2, 2, 0, 3, 0), (4, 2, 0, 3, 0)]
        network_loader = NetworkFromOnnxBytes(model.loader)
        profiles = [Profile().add("new_shape", *shapes)]
        config_loader = CreateConfig(profiles=profiles)

        with TrtRunner(EngineFromNetwork(network_loader,
                                         config_loader)) as runner:
            for shape in shapes:
                model.check_runner(runner, {"new_shape": shape})
コード例 #7
0
    def test_multiple_runners(self):
        load_tf = TF_MODELS["identity"].loader
        build_tf_session = SessionFromGraph(load_tf)
        load_serialized_onnx = BytesFromOnnx(OnnxFromTfGraph(load_tf))
        build_onnxrt_session = SessionFromOnnxBytes(load_serialized_onnx)
        load_engine = EngineFromNetwork(
            NetworkFromOnnxBytes(load_serialized_onnx))

        runners = [
            TfRunner(build_tf_session),
            OnnxrtRunner(build_onnxrt_session),
            TrtRunner(load_engine),
        ]

        run_results = Comparator.run(runners)
        compare_func = CompareFunc.basic_compare_func(
            check_shapes=version(trt.__version__) >= version("7.0"))
        assert bool(
            Comparator.compare_accuracy(run_results,
                                        compare_func=compare_func))
        assert len(list(
            run_results.values())[0]) == 1  # Default number of iterations
コード例 #8
0
ファイル: test_loader.py プロジェクト: celidos/TensorRT_study
 def test_tf32(self, identity_builder_network, flag):
     builder, network = identity_builder_network
     loader = CreateConfig(tf32=flag)
     config = loader(builder, network)
     if version(trt.__version__) > version("7.1.0.0"):
         assert config.get_flag(trt.BuilderFlag.TF32) == flag
コード例 #9
0
ファイル: test_loader.py プロジェクト: celidos/TensorRT_study
        builder, network, parser = func.invoke(
            NetworkFromOnnxBytes(ONNX_MODELS["identity"].loader))
        with builder, network, parser:
            assert not network.has_implicit_batch_dimension
            assert not network.has_explicit_precision

    def test_loader_explicit_precision(self):
        builder, network, parser = func.invoke(
            NetworkFromOnnxBytes(ONNX_MODELS["identity"].loader,
                                 explicit_precision=True))
        with builder, network, parser:
            assert not network.has_implicit_batch_dimension
            assert network.has_explicit_precision


@pytest.mark.skipif(version(trt.__version__) < version("7.1.0.0"),
                    reason="API was added in TRT 7.1")
class TestNetworkFromOnnxPath(object):
    def test_loader(self):
        builder, network, parser = func.invoke(
            NetworkFromOnnxPath(ONNX_MODELS["identity"].path))
        with builder, network, parser:
            assert not network.has_implicit_batch_dimension
            assert not network.has_explicit_precision

    def test_loader_explicit_precision(self):
        builder, network, parser = func.invoke(
            NetworkFromOnnxPath(ONNX_MODELS["identity"].path,
                                explicit_precision=True))
        with builder, network, parser:
            assert not network.has_implicit_batch_dimension
コード例 #10
0
            os.remove(artifact)


    def __str__(self):
        return os.path.relpath(self.path, EXAMPLES_ROOT)


API_EXAMPLES = [
    Example(["api", "01_comparing_frameworks"]),
    Example(["api", "02_using_real_data"]),
    Example(["api", "03_interoperating_with_tensorrt"]),
    Example(["api", "04_int8_calibration_in_tensorrt"], artifact_names=["identity-calib.cache"]),
    Example(["api", "05_using_tensorrt_network_api"]),
]

@pytest.mark.skipif(version(trt.__version__) < version("7.0"), reason="Unsupported for TRT 6")
@pytest.mark.parametrize("example", API_EXAMPLES, ids=lambda case: str(case))
def test_api_examples(example):
    with example as commands:
        for command in commands:
            example.run(command)


CLI_EXAMPLES = [
    # Run
    Example(["cli", "run", "01_comparing_frameworks"]),
    Example(["cli", "run", "02_comparing_across_runs"], artifact_names=["system_a_results.pkl"]),
    Example(["cli", "run", "03_generating_a_comparison_script"], artifact_names=["compare_trt_onnxrt.py"]),
    # Surgeon
    Example(["cli", "surgeon", "01_isolating_subgraphs"], artifact_names=["subgraph.onnx"]),
]
コード例 #11
0
class TestTrt(object):
    def test_trt(self):
        run_polygraphy_run([ONNX_MODELS["identity"].path, "--trt"])

    def test_trt_plugins(self):
        run_polygraphy_run([
            ONNX_MODELS["identity"].path, "--trt", "--plugins",
            "libnvinfer_plugin.so"
        ])

    def test_trt_custom_outputs(self):
        run_polygraphy_run([
            ONNX_MODELS["identity_identity"].path, "--trt", "--trt-outputs",
            "identity_out_0"
        ])

    def test_trt_layerwise_outputs(self):
        with tempfile.NamedTemporaryFile() as outfile0:
            run_polygraphy_run([
                ONNX_MODELS["identity_identity"].path, "--trt",
                "--trt-outputs", "mark", "all", "--save-results", outfile0.name
            ])
            results = misc.pickle_load(outfile0.name)
            [result] = list(results.values())[0]
            assert len(result) == 2
            assert "identity_out_0" in result
            assert "identity_out_2" in result

    def test_trt_exclude_outputs_with_layerwise(self):
        with tempfile.NamedTemporaryFile() as outfile0:
            run_polygraphy_run([
                ONNX_MODELS["identity_identity"].path, "--trt",
                "--trt-outputs", "mark", "all", "--trt-exclude-outputs",
                "identity_out_2", "--save-results", outfile0.name
            ])
            results = misc.pickle_load(outfile0.name)
            [result] = list(results.values())[0]
            assert len(result) == 1
            assert "identity_out_0" in result

    @pytest.mark.skipif(version(trt.__version__) < version("7.1.0.0"),
                        reason="API was added in TRT 7.1")
    def test_trt_onnx_ext(self):
        run_polygraphy_run([ONNX_MODELS["identity"].path, "--trt", "--ext"])

    def test_trt_int8(self):
        run_polygraphy_run([ONNX_MODELS["identity"].path, "--trt", "--int8"])

    @pytest.mark.skipif(version(trt.__version__) < version("7.0"),
                        reason="Unsupported for TRT 6")
    def test_trt_input_shape(self):
        run_polygraphy_run([
            ONNX_MODELS["dynamic_identity"].path, "--trt", "--onnxrt",
            "--input-shapes", "X,1x2x4x4"
        ])

    @pytest.mark.skipif(version(trt.__version__) < version("7.0"),
                        reason="Unsupported for TRT 6")
    def test_trt_dynamic_input_shape(self):
        run_polygraphy_run([
            ONNX_MODELS["dynamic_identity"].path, "--trt", "--onnxrt",
            "--input-shapes", "X,1x2x-1x4"
        ])

    @pytest.mark.skipif(version(trt.__version__) < version("7.0"),
                        reason="Unsupported for TRT 6")
    def test_trt_explicit_profile(self):
        run_polygraphy_run([
            ONNX_MODELS["dynamic_identity"].path, "--trt", "--onnxrt",
            "--input-shapes", "X,1x2x1x1", "--trt-min-shapes", "X,1x2x1x1",
            "--trt-opt-shapes", "X,1x2x1x1", "--trt-max-shapes", "X,1x2x1x1"
        ])

    @pytest.mark.skipif(version(trt.__version__) < version("7.0"),
                        reason="Unsupported for TRT 6")
    def test_trt_explicit_profile_implicit_runtime_shape(self):
        run_polygraphy_run([
            ONNX_MODELS["dynamic_identity"].path, "--trt", "--onnxrt",
            "--trt-min-shapes", "X,1x2x1x1", "--trt-opt-shapes", "X,1x2x1x1",
            "--trt-max-shapes", "X,1x2x1x1"
        ])

    @pytest.mark.skipif(version(trt.__version__) < version("7.0"),
                        reason="Unsupported for TRT 6")
    def test_trt_explicit_profile_opt_runtime_shapes_differ(self):
        run_polygraphy_run([
            ONNX_MODELS["dynamic_identity"].path, "--trt", "--onnxrt",
            "--input-shapes", "X,1x2x2x2", "--trt-min-shapes", "X,1x2x1x1",
            "--trt-opt-shapes", "X,1x2x3x3", "--trt-max-shapes", "X,1x2x4x4"
        ])

    @pytest.mark.skipif(version(trt.__version__) < version("7.0"),
                        reason="Unsupported for TRT 6")
    def test_trt_multiple_profiles(self):
        run_polygraphy_run([
            ONNX_MODELS["dynamic_identity"].path, "--trt", "--onnxrt",
            "--trt-min-shapes", "X,1x2x1x1", "--trt-opt-shapes", "X,1x2x1x1",
            "--trt-max-shapes", "X,1x2x1x1", "--trt-min-shapes", "X,1x2x4x4",
            "--trt-opt-shapes", "X,1x2x4x4", "--trt-max-shapes", "X,1x2x4x4"
        ])

    def test_trt_int8_calibration_cache(self):
        with tempfile.NamedTemporaryFile() as outpath:
            run_polygraphy_run([
                ONNX_MODELS["identity"].path, "--trt", "--int8",
                "--calibration-cache", outpath.name
            ])
            check_file_non_empty(outpath.name)

    def test_trt_save_load_engine(self):
        with tempfile.NamedTemporaryFile() as outpath:
            run_polygraphy_run([
                ONNX_MODELS["identity"].path, "--trt", "--save-engine",
                outpath.name
            ])
            check_file_non_empty(outpath.name)
            run_polygraphy_run(["--trt", outpath.name, "--model-type=engine"])
コード例 #12
0
class TestOther(object):
    def test_0_iterations(self):
        run_polygraphy_run(
            [ONNX_MODELS["identity"].path, "--onnxrt", "--iterations=0"])

    def test_subprocess_sanity(self):
        run_polygraphy_run(
            [ONNX_MODELS["identity"].path, "--onnxrt", "--use-subprocess"])

    def test_custom_tolerance(self):
        run_polygraphy_run([
            ONNX_MODELS["identity"].path, "--onnxrt", "--onnxrt",
            "--iterations=0", "--atol=1.0", "--rtol=1.0"
        ])

    def test_custom_per_output_tolerance(self):
        run_polygraphy_run([
            ONNX_MODELS["identity_identity"].path, "--onnxrt", "--onnxrt",
            "--onnx-outputs", "mark", "all", "--atol", "identity_out_0,1.0",
            "identity_out_2,3.0", "0.5", "--rtol", "identity_out_0,1.0",
            "identity_out_2,3.0", "0.5"
        ])

    def test_top_k(self):
        run_polygraphy_run(
            [ONNX_MODELS["identity"].path, "--onnxrt", "--top-k=5"])

    def test_save_load_outputs(self, tmp_path):
        OUTFILE0 = os.path.join(tmp_path, "outputs0.pkl")
        OUTFILE1 = os.path.join(tmp_path, "outputs1.pkl")
        run_polygraphy_run([
            ONNX_MODELS["identity"].path, "--onnxrt", "--save-results",
            OUTFILE0
        ])
        run_polygraphy_run([
            ONNX_MODELS["identity"].path, "--onnxrt", "--save-results",
            OUTFILE1
        ])

        status = run_polygraphy_run([
            ONNX_MODELS["identity"].path, "--onnxrt", "--load-results",
            OUTFILE0, OUTFILE1
        ])
        assert "Difference is within tolerance" in status.stdout + status.stderr  # Make sure it actually compared stuff.

        # Should work with only one file
        status = run_polygraphy_run(
            [ONNX_MODELS["identity"].path, "--load-results", OUTFILE0])
        assert "Difference is within tolerance" not in status.stdout + status.stderr  # Make sure it DIDN'T compare stuff.

        # Should work even with no runners specified
        status = run_polygraphy_run([
            ONNX_MODELS["identity"].path, "--load-results", OUTFILE0, OUTFILE1
        ])
        assert "Difference is within tolerance" in status.stdout + status.stderr  # Make sure it actually compared stuff.

        # Should work even when comparing a single runner to itself.
        status = run_polygraphy_run([
            ONNX_MODELS["identity"].path, "--load-results", OUTFILE0, OUTFILE0
        ])
        assert "Difference is within tolerance" in status.stdout + status.stderr  # Make sure it actually compared stuff.

    def test_save_load_inputs(self):
        with tempfile.NamedTemporaryFile(
        ) as infile0, tempfile.NamedTemporaryFile() as infile1:
            run_polygraphy_run([
                ONNX_MODELS["identity"].path, "--onnxrt", "--save-input-data",
                infile0.name
            ])
            run_polygraphy_run([
                ONNX_MODELS["identity"].path, "--onnxrt", "--load-input-data",
                infile0.name, "--save-input-data", infile1.name
            ])  # Copy
            run_polygraphy_run([
                ONNX_MODELS["identity"].path, "--onnxrt", "--load-input-data",
                infile0.name, infile1.name
            ])

    @pytest.mark.skipif(version(trt.__version__) < version("7.0"),
                        reason="Unsupported for TRT 6")
    def test_runner_coexistence(self):
        run_polygraphy_run([
            TF_MODELS["identity"].path, "--model-type=frozen", "--tf",
            "--onnxrt", "--trt"
        ])
コード例 #13
0
ファイル: test_loader.py プロジェクト: leo-XUKANG/TensorRT-1
class TestOnnxNetworkLoader(object):
    def test_loader(self):
        builder, network, parser = NetworkFromOnnxBytes(ONNX_MODELS["identity"].loader)()
        with builder, network, parser:
            assert not network.has_implicit_batch_dimension
            assert not network.has_explicit_precision


    def test_loader_explicit_precision(self):
        builder, network, parser = NetworkFromOnnxBytes(ONNX_MODELS["identity"].loader, explicit_precision=True)()
        with builder, network, parser:
            assert not network.has_implicit_batch_dimension
            assert network.has_explicit_precision


@pytest.mark.skipif(version(trt.__version__) < version("7.1.0.0"), reason="API was added in TRT 7.1")
class TestNetworkFromOnnxPath(object):
    def test_loader(self):
        builder, network, parser = NetworkFromOnnxPath(ONNX_MODELS["identity"].path)()
        with builder, network, parser:
            assert not network.has_implicit_batch_dimension
            assert not network.has_explicit_precision


    def test_loader_explicit_precision(self):
        builder, network, parser = NetworkFromOnnxPath(ONNX_MODELS["identity"].path, explicit_precision=True)()
        with builder, network, parser:
            assert not network.has_implicit_batch_dimension
            assert network.has_explicit_precision

コード例 #14
0
#
# INSPECT RESULTS
#

def test_polygraphy_inspect_results_sanity():
    with tempfile.NamedTemporaryFile() as outpath:
        run_polygraphy_run([ONNX_MODELS["identity"].path, "--onnxrt", "--save-results", outpath.name])
        run_polygraphy_inspect(["results", outpath.name])


#
# PRECISION
#

@pytest.mark.skipif(version(trt.__version__) < version("7.0"), reason="Unsupported for TRT 6")
def test_polygraphy_precision_bisect_sanity():
    with tempfile.NamedTemporaryFile() as outpath:
        run_polygraphy_run([ONNX_MODELS["identity"].path, "--onnxrt", "--save-results", outpath.name])
        run_polygraphy_precision(["bisect", ONNX_MODELS["identity"].path, "--golden", outpath.name, "--int8"])


@pytest.mark.skipif(version(trt.__version__) < version("7.0"), reason="Unsupported for TRT 6")
def test_polygraphy_precision_linear_sanity():
    with tempfile.NamedTemporaryFile() as outpath:
        run_polygraphy_run([ONNX_MODELS["identity"].path, "--onnxrt", "--save-results", outpath.name])
        run_polygraphy_precision(["linear", ONNX_MODELS["identity"].path, "--golden", outpath.name, "--int8"])


@pytest.mark.skipif(version(trt.__version__) < version("7.0"), reason="Unsupported for TRT 6")
def test_polygraphy_precision_worst_first_sanity():
コード例 #15
0
ファイル: test_runner.py プロジェクト: celidos/TensorRT_study
class TestTrtRunner(object):
    def test_can_name_runner(self):
        NAME = "runner"
        runner = TrtRunner(None, name=NAME)
        assert runner.name == NAME

    def test_basic(self):
        model = ONNX_MODELS["identity"]
        network_loader = NetworkFromOnnxBytes(model.loader)
        with TrtRunner(EngineFromNetwork(network_loader)) as runner:
            assert runner.is_active
            model.check_runner(runner)
        assert not runner.is_active

    def test_context(self):
        model = ONNX_MODELS["identity"]
        engine = func.invoke(
            EngineFromNetwork(NetworkFromOnnxBytes(model.loader)))
        with engine, TrtRunner(engine.create_execution_context) as runner:
            model.check_runner(runner)

    @pytest.mark.skipif(version(trt.__version__) < version("7.0"),
                        reason="Unsupported for TRT 6")
    def test_shape_output(self):
        model = ONNX_MODELS["reshape"]
        engine = func.invoke(
            EngineFromNetwork(NetworkFromOnnxBytes(model.loader)))
        with engine, TrtRunner(engine.create_execution_context) as runner:
            model.check_runner(runner)

    def test_multithreaded_runners_from_engine(self):
        model = ONNX_MODELS["identity"]
        engine = func.invoke(
            EngineFromNetwork(NetworkFromOnnxBytes(model.loader)))

        with engine, TrtRunner(engine) as runner0, TrtRunner(
                engine) as runner1:
            t1 = threading.Thread(target=model.check_runner, args=(runner0, ))
            t2 = threading.Thread(target=model.check_runner, args=(runner1, ))
            t1.start()
            t2.start()
            t2.join()
            t2.join()

    @pytest.mark.skipif(version(trt.__version__) < version("7.0"),
                        reason="Unsupported for TRT 6")
    def test_multiple_profiles(self):
        model = ONNX_MODELS["dynamic_identity"]
        shapes = [(1, 2, 4, 4), (1, 2, 8, 8), (1, 2, 16, 16)]
        network_loader = NetworkFromOnnxBytes(model.loader)
        profiles = [
            Profile().add("X", (1, 2, 1, 1), (1, 2, 2, 2), (1, 2, 4, 4)),
            Profile().add("X", *shapes),
        ]
        config_loader = CreateConfig(profiles=profiles)
        with TrtRunner(EngineFromNetwork(network_loader,
                                         config_loader)) as runner:
            if misc.version(trt.__version__) < misc.version("7.3"):
                runner.context.active_optimization_profile = 1
            else:
                runner.context.set_optimization_profile_async(
                    1, runner.stream.address())
            for shape in shapes:
                model.check_runner(runner, {"X": shape})

    @pytest.mark.skipif(version(trt.__version__) < version("7.0"),
                        reason="Unsupported for TRT 6")
    def test_empty_tensor_with_dynamic_input_shape_tensor(self):
        model = ONNX_MODELS["empty_tensor_expand"]
        shapes = [(1, 2, 0, 3, 0), (2, 2, 0, 3, 0), (4, 2, 0, 3, 0)]
        network_loader = NetworkFromOnnxBytes(model.loader)
        profiles = [Profile().add("new_shape", *shapes)]
        config_loader = CreateConfig(profiles=profiles)

        with TrtRunner(EngineFromNetwork(network_loader,
                                         config_loader)) as runner:
            for shape in shapes:
                model.check_runner(runner, {"new_shape": shape})