Пример #1
0
 def ShouldRunTest(self, run_params):
   """Whether to run the test."""
   # Disable the test in fp16 mode since multiple matmul and add ops together
   # can cause overflow.
   return ((run_params.precision_mode != "FP16") and
           not (trt_test.IsQuantizationMode(run_params.precision_mode) and
                not run_params.use_calibration))
Пример #2
0
 def ShouldRunTest(self, run_params):
     if trt_convert.get_linked_tensorrt_version()[0] < 5:
         return False
     # Only test static engine mode, with or without calibration.
     return (trt_test.IsQuantizationMode(run_params.precision_mode)
             and not run_params.use_optimizer
             and not run_params.dynamic_engine)
Пример #3
0
 def ExpectedEnginesToBuild(self, run_params):
     """Return the expected engines to build."""
     # In dynamic engine mode the engines are built in execution time, not in
     # conversion time, so build errors occurs later. Here three of the engines
     # will be failed to built but the corresponding engine op are still created.
     # TODO(aaroey, jjsjann123): fix this.
     if (run_params.dynamic_engine and
             not trt_test.IsQuantizationMode(run_params.precision_mode)):
         return self._ValidEngines() + self._InvalidEngines()
     return self._ValidEngines()
Пример #4
0
 def ShouldRunTest(self, run_params):
     """Whether to run the test."""
     # TODO(aaroey): Trt library will fail like:
     #
     # ../builder/cudnnBuilder2.cpp:685:
     # virtual std::vector<nvinfer1::query::Ports<
     #     nvinfer1::query::TensorRequirements>>
     # nvinfer1::builder::Node::getSupportedFormats(
     #     const nvinfer1::query::Ports<nvinfer1::query::AbstractTensor>&,
     #     const nvinfer1::cudnn::HardwareContext&,
     #     nvinfer1::builder::Format::Type,
     #     const nvinfer1::builder::FormatTypeHack&) const:
     # Assertion `sf' failed.
     #
     # To reproduce, run:
     # bazel test -c opt --copt=-mavx \
     #   --test_arg=BatchMatMulTest.testTfTrt_ToolConversion_INT8_DynamicEngine \
     #   tensorflow/contrib/tensorrt:batch_matmul_test
     #
     # Investigate and fix it.
     return not trt_test.IsQuantizationMode(run_params.precision_mode)
Пример #5
0
 def ShouldRunTest(self, run_params):
     # Only test FP32/FP16 mode.
     return not trt_test.IsQuantizationMode(run_params.precision_mode)
 def ShouldRunTest(self, run_params):
     """Whether to run the test."""
     # TODO(aaroey): Trt 4.0 forbids conversion for tensors with rank <3 in int8
     # mode, which is a bug. Re-enable this when trt library is fixed.
     return not trt_test.IsQuantizationMode(run_params.precision_mode)
 def ShouldRunTest(self, run_params):
     return (run_params.dynamic_engine
             and not trt_test.IsQuantizationMode(run_params.precision_mode))
 def ShouldRunTest(self, run_params):
     """Whether to run the test."""
     return (not trt_test.IsQuantizationMode(run_params.precision_mode)
             and not run_params.dynamic_engine)
Пример #9
0
 def ExpectedEnginesToBuild(self, run_params):
     """Return the expected engines to build."""
     if (run_params.dynamic_engine and
             not trt_test.IsQuantizationMode(run_params.precision_mode)):
         return ["TRTEngineOp_0", "TRTEngineOp_1"]
     return ["TRTEngineOp_1"]