예제 #1
0
def _check_trt_version_compatibility():
  """Check compatibility of TensorRT version.

  Raises:
    RuntimeError: if the TensorRT library version is incompatible.
  """
  compiled_version = get_linked_tensorrt_version()
  loaded_version = get_loaded_tensorrt_version()
  tf_logging.info("Linked TensorRT version: %s" % str(compiled_version))
  tf_logging.info("Loaded TensorRT version: %s" % str(loaded_version))
  version_mismatch = False
  if loaded_version[0] < compiled_version[0]:
    tf_logging.error(
        "TensorRT version mismatch. Tensorflow was compiled against " +
        "TensorRT %s but library loaded from environment is TensorRT %s" %
        (".".join([str(x) for x in compiled_version]),
         ".".join([str(x) for x in loaded_version])) +
        ". Please make sure that correct version of TensorRT " +
        "is available in the system and added to ldconfig or LD_LIBRARY_PATH")
    raise RuntimeError("Incompatible TensorRT library version")
  for i in zip(loaded_version, compiled_version):
    if i[0] != i[1]:
      tf_logging.warn("TensorRT mismatch. Compiled against version " +
                      "%s, but loaded %s. Things may not work" %
                      (".".join([str(x) for x in compiled_version]),
                       ".".join([str(x) for x in loaded_version])))
      version_mismatch = True
      break
  if not version_mismatch:
    tf_logging.info("Running against TensorRT version %s" %
                    ".".join([str(x) for x in loaded_version]))
예제 #2
0
def _check_trt_version_compatibility():
  """Check compatibility of TensorRT version.

  Raises:
    RuntimeError: if the TensorRT library version is incompatible.
  """
  compiled_version = wrap_py_utils.get_linked_tensorrt_version()
  loaded_version = wrap_py_utils.get_loaded_tensorrt_version()
  tf_logging.info("Linked TensorRT version: %s" % str(compiled_version))
  tf_logging.info("Loaded TensorRT version: %s" % str(loaded_version))
  version_mismatch = False
  if loaded_version[0] < compiled_version[0]:
    tf_logging.error(
        "TensorRT version mismatch. Tensorflow was compiled against " +
        "TensorRT %s but library loaded from environment is TensorRT %s" %
        (".".join([str(x) for x in compiled_version]),
         ".".join([str(x) for x in loaded_version])) +
        ". Please make sure that correct version of TensorRT " +
        "is available in the system and added to ldconfig or LD_LIBRARY_PATH")
    raise RuntimeError("Incompatible TensorRT library version")
  for i in zip(loaded_version, compiled_version):
    if i[0] != i[1]:
      tf_logging.warn("TensorRT mismatch. Compiled against version " +
                      "%s, but loaded %s. Things may not work" %
                      (".".join([str(x) for x in compiled_version]),
                       ".".join([str(x) for x in loaded_version])))
      version_mismatch = True
      break
  if not version_mismatch:
    tf_logging.info("Running against TensorRT version %s" %
                    ".".join([str(x) for x in loaded_version]))
예제 #3
0
 def ShouldRunTest(self, run_params):
     if get_linked_tensorrt_version()[0] < 5:
         return False
     # Only test static engine mode, with or without calibration.
     return (trt_test.IsQuantizationMode(run_params.precision_mode)
             and not run_params.convert_online
             and not run_params.dynamic_engine)
  def testEval(self):
    if not is_tensorrt_enabled():
      return
    model_dir = test.test_src_dir_path('python/compiler/tensorrt/test/testdata')

    accuracy_tf_native = self._Run(
        is_training=False,
        use_trt=False,
        batch_size=128,
        num_epochs=None,
        model_dir=model_dir)['accuracy']
    logging.info('accuracy_tf_native: %f', accuracy_tf_native)
    self.assertAllClose(0.9662, accuracy_tf_native, rtol=3e-3, atol=3e-3)

    if get_linked_tensorrt_version()[0] < 5:
      return

    accuracy_tf_trt = self._Run(
        is_training=False,
        use_trt=True,
        batch_size=128,
        num_epochs=None,
        model_dir=model_dir)['accuracy']
    logging.info('accuracy_tf_trt: %f', accuracy_tf_trt)
    self.assertAllClose(0.9675, accuracy_tf_trt, rtol=1e-3, atol=1e-3)
  def testEval(self):
    if not is_tensorrt_enabled():
      return
    model_dir = test.test_src_dir_path('python/compiler/tensorrt/test/testdata')

    accuracy_tf_native = self._Run(
        is_training=False,
        use_trt=False,
        batch_size=128,
        num_epochs=None,
        model_dir=model_dir)['accuracy']
    logging.info('accuracy_tf_native: %f', accuracy_tf_native)
    self.assertAllClose(0.9662, accuracy_tf_native, rtol=3e-3, atol=3e-3)

    if get_linked_tensorrt_version()[0] < 5:
      return

    accuracy_tf_trt = self._Run(
        is_training=False,
        use_trt=True,
        batch_size=128,
        num_epochs=None,
        model_dir=model_dir)['accuracy']
    logging.info('accuracy_tf_trt: %f', accuracy_tf_trt)
    self.assertAllClose(0.9675, accuracy_tf_trt, rtol=1e-3, atol=1e-3)
예제 #6
0
    def ShouldRunTest(self, run_params):
        # There is no CombinedNonMaxSuppression op for GPU at the moment, so
        # calibration will fail.
        # TODO(@mconley @jdekhtiar): remove skipped test when fixed
        return False

        # TODO(laigd): fix this.
        if trt_test.IsQuantizationMode(run_params.precision_mode):
            return False

        # Only run for TRT 5.1 and above.
        ver = get_linked_tensorrt_version()
        return ver[0] > 5 or (ver[0] == 5 and ver[1] >= 1)
def _check_trt_version_compatibility():
  """Check compatibility of TensorRT version.

  Raises:
    RuntimeError: if the TensorRT library version is incompatible.
  """
  linked_version = wrap_py_utils.get_linked_tensorrt_version()
  loaded_version = wrap_py_utils.get_loaded_tensorrt_version()
  assert isinstance(linked_version, tuple)
  assert isinstance(loaded_version, tuple)
  assert len(linked_version) == 3
  assert len(loaded_version) == 3
  tf_logging.info("Linked TensorRT version: %s" % str(linked_version))
  tf_logging.info("Loaded TensorRT version: %s" % str(loaded_version))
  if loaded_version < linked_version:
    tf_logging.error(
        "Loaded TensorRT %s but linked TensorFlow against TensorRT %s. " %
        (".".join([str(x) for x in loaded_version]),
         ".".join([str(x) for x in linked_version])) +
        "TensorRT does not support forward compatibility. " +
        "It is also required to use the same major version of TensorRT " +
        "during compilation and runtime.")
    raise RuntimeError("Incompatible TensorRT versions")
  if loaded_version[0] > linked_version[0]:
    tf_logging.error(
        "Loaded TensorRT %s but linked TensorFlow against TensorRT %s. " %
        (".".join([str(x) for x in loaded_version]),
         ".".join([str(x) for x in linked_version])) +
        "It is required to use the same major version " +
        "of TensorRT during compilation and runtime.")
    raise RuntimeError("Incompatible TensorRT major version")
  if loaded_version != linked_version:
    tf_logging.info(
        "Loaded TensorRT %s and linked TensorFlow against TensorRT %s. " %
        (".".join([str(x) for x in loaded_version]),
         ".".join([str(x) for x in linked_version])) +
        "This is supported because TensorRT " +
        " minor/patch upgrades are backward compatible")
 def ShouldRunTest(self, run_params):
     # Only run for TRT 6 and above.
     ver = get_linked_tensorrt_version()
     return ver[0] >= 6 and (not run_params.use_calibration)
예제 #9
0
 def ShouldRunTest(self, run_params):
     # Only run for TRT 5.1 and above.
     ver = get_linked_tensorrt_version()
     return ver[0] > 5 or (ver[0] == 5 and ver[1] >= 1)
예제 #10
0
    def __init__(self,
                 input_saved_model_dir=None,
                 input_saved_model_tags=None,
                 input_saved_model_signature_key=None,
                 input_graph_def=None,
                 nodes_blacklist=None,
                 session_config=None,
                 max_batch_size=1,
                 max_workspace_size_bytes=DEFAULT_TRT_MAX_WORKSPACE_SIZE_BYTES,
                 precision_mode=TrtPrecisionMode.FP32,
                 minimum_segment_size=3,
                 is_dynamic_op=False,
                 maximum_cached_engines=1,
                 cached_engine_batches=None,
                 use_calibration=True,
                 use_function_backup=True):
        """Initialize the converter.

    Args:
      input_saved_model_dir: the directory to load the SavedModel which contains
        the input graph to transforms. Used only when input_graph_def is None.
      input_saved_model_tags: list of tags to load the SavedModel.
      input_saved_model_signature_key: the key of the signature to optimize the
        graph for.
      input_graph_def: a GraphDef object containing a model to be transformed.
        If set to None, the graph will be read from the SavedModel loaded from
        input_saved_model_dir.
      nodes_blacklist: list of node names to prevent the converter from
        touching. Only used when input_graph_def is not None.
      session_config: the ConfigProto used to create a Session. It's also used
        as a template to create a TRT-enabled ConfigProto for conversion. If not
        specified, a default ConfigProto will be used.
      max_batch_size: max size for the input batch.
      max_workspace_size_bytes: the maximum GPU temporary memory which the TRT
        engine can use at execution time. This corresponds to the
        'workspaceSize' parameter of nvinfer1::IBuilder::setMaxWorkspaceSize().
      precision_mode: one of TrtPrecisionMode.supported_precision_modes().
      minimum_segment_size: the minimum number of nodes required for a subgraph
        to be replaced by TRTEngineOp.
      is_dynamic_op: whether to generate dynamic TRT ops which will build the
        TRT network and engine at run time.
      maximum_cached_engines: max number of cached TRT engines in dynamic TRT
        ops. If the number of cached engines is already at max but none of them
        can serve the input, the TRTEngineOp will fall back to run the TF
        function based on which the TRTEngineOp is created.
      cached_engine_batches: a list of batch sizes used to create cached
        engines, only used when is_dynamic_op is True. The length of the list
        should be <= maximum_cached_engines, and the dynamic TRT op will use
        this list to determine the batch sizes of the cached engines, instead of
        making the decision on the fly. This is useful when we know the most
        common batch size(s) the application is going to generate.
      use_calibration: this argument is ignored if precision_mode is not INT8.
        If set to True, a calibration graph will be created to calibrate the
        missing ranges. The calibration graph must be converted to an inference
        graph by running calibration with calibrate(). If set to False,
        quantization nodes will be expected for every tensor in the graph
        (exlcuding those which will be fused). If a range is missing, an error
        will occur. Please note that accuracy may be negatively affected if
        there is a mismatch between which tensors TRT quantizes and which
        tensors were trained with fake quantization.
      use_function_backup: if set to True, it will create a FunctionDef for each
        subgraph that is converted to TRT op, and if TRT ops fail to execute at
        runtime, it'll invoke that function as a fallback.

    Raises:
      ValueError: if the combination of the parameters is invalid.
      RuntimeError: if the TensorRT library version is incompatible.
    """
        super(TrtGraphConverter, self).__init__(
            input_saved_model_dir=input_saved_model_dir,
            input_saved_model_tags=input_saved_model_tags,
            input_saved_model_signature_key=input_saved_model_signature_key,
            input_graph_def=input_graph_def,
            nodes_blacklist=nodes_blacklist,
            session_config=session_config)

        # TODO(laigd): move all the validations below to
        # get_tensorrt_rewriter_config().
        # Check compatibility of TensorRT version.
        compiled_version = get_linked_tensorrt_version()
        loaded_version = get_loaded_tensorrt_version()
        tf_logging.info("Linked TensorRT version: %s" % str(compiled_version))
        tf_logging.info("Loaded TensorRT version: %s" % str(loaded_version))
        version_mismatch = False
        if loaded_version[0] < compiled_version[0]:
            tf_logging.error(
                "TensorRT version mismatch. Tensorflow was compiled against " +
                "TensorRT %s but library loaded from environment is TensorRT %s"
                % (".".join([str(x) for x in compiled_version]),
                   ".".join([str(x) for x in loaded_version])) +
                ". Please make sure that correct version of TensorRT " +
                "is available in the system and added to ldconfig or LD_LIBRARY_PATH"
            )
            raise RuntimeError("Incompatible TensorRT library version")
        for i in zip(loaded_version, compiled_version):
            if i[0] != i[1]:
                tf_logging.warn(
                    "TensorRT mismatch. Compiled against version " +
                    "%s, but loaded %s. Things may not work" %
                    (".".join([str(x) for x in compiled_version]),
                     ".".join([str(x) for x in loaded_version])))
                version_mismatch = True
                break
        if not version_mismatch:
            tf_logging.info("Running against TensorRT version %s" %
                            ".".join([str(x) for x in loaded_version]))

        # Check input arguments.
        supported_precision_modes = TrtPrecisionMode.supported_precision_modes(
        )
        if precision_mode not in supported_precision_modes:
            raise ValueError(
                ("precision mode '{}' is not supported."
                 "It should be one of {}").format(precision_mode,
                                                  supported_precision_modes))

        if cached_engine_batches:
            if not isinstance(cached_engine_batches, list):
                raise TypeError("cached_engine_batches should be a list.")
            if len(cached_engine_batches) > maximum_cached_engines:
                raise ValueError(
                    "cached_engine_batches should not contain more than "
                    "maximum_cached_engines items.")

        self._need_calibration = (precision_mode == TrtPrecisionMode.INT8
                                  and use_calibration)
        self._use_function_backup = use_function_backup

        # TODO(laigd): consider provide a mechanism to remove the fallback path
        # after calibration is done.
        if self._need_calibration and not use_function_backup:
            raise ValueError(
                "Calibration requires enabling fallback to TF function execution."
            )

        # TODO(laigd):
        # - Get rid of is_dynamic_op option, it should always be True, and it should
        #   accept N shapes as input.
        # - Verify in int8 mode that maximum_cached_engines and
        #   cached_engine_batches are set appropriately.
        # - If it fails to build the int8 engine it should return error.
        self._max_batch_size = max_batch_size
        self._max_workspace_size_bytes = max_workspace_size_bytes
        self._precision_mode = precision_mode
        self._minimum_segment_size = minimum_segment_size
        self._is_dynamic_op = is_dynamic_op
        self._maximum_cached_engines = maximum_cached_engines
        self._cached_engine_batches = cached_engine_batches
예제 #11
0
 def ShouldRunTest(self, run_params):
   if get_linked_tensorrt_version()[0] < 5:
     return False
   # Test static/dynamic engine with/without calibration.
   return (trt_test.IsQuantizationMode(run_params.precision_mode) and
           not run_params.use_optimizer)
예제 #12
0
  def __init__(self,
               input_saved_model_dir=None,
               input_saved_model_tags=None,
               input_saved_model_signature_key=None,
               input_graph_def=None,
               nodes_blacklist=None,
               session_config=None,
               max_batch_size=1,
               max_workspace_size_bytes=DEFAULT_TRT_MAX_WORKSPACE_SIZE_BYTES,
               precision_mode=TrtPrecisionMode.FP32,
               minimum_segment_size=3,
               is_dynamic_op=False,
               maximum_cached_engines=1,
               cached_engine_batches=None,
               use_calibration=True,
               use_function_backup=True):
    """Initialize the converter.

    Args:
      input_saved_model_dir: the directory to load the SavedModel which contains
        the input graph to transforms. Used only when input_graph_def is None.
      input_saved_model_tags: list of tags to load the SavedModel.
      input_saved_model_signature_key: the key of the signature to optimize the
        graph for.
      input_graph_def: a GraphDef object containing a model to be transformed.
        If set to None, the graph will be read from the SavedModel loaded from
        input_saved_model_dir.
      nodes_blacklist: list of node names to prevent the converter from
        touching. Only used when input_graph_def is not None.
      session_config: the ConfigProto used to create a Session. It's also used
        as a template to create a TRT-enabled ConfigProto for conversion. If not
        specified, a default ConfigProto will be used.
      max_batch_size: max size for the input batch.
      max_workspace_size_bytes: the maximum GPU temporary memory which the TRT
        engine can use at execution time. This corresponds to the
        'workspaceSize' parameter of nvinfer1::IBuilder::setMaxWorkspaceSize().
      precision_mode: one of TrtPrecisionMode.supported_precision_modes().
      minimum_segment_size: the minimum number of nodes required for a subgraph
        to be replaced by TRTEngineOp.
      is_dynamic_op: whether to generate dynamic TRT ops which will build the
        TRT network and engine at run time.
      maximum_cached_engines: max number of cached TRT engines in dynamic TRT
        ops. If the number of cached engines is already at max but none of them
        can serve the input, the TRTEngineOp will fall back to run the TF
        function based on which the TRTEngineOp is created.
      cached_engine_batches: a list of batch sizes used to create cached
        engines, only used when is_dynamic_op is True. The length of the list
        should be <= maximum_cached_engines, and the dynamic TRT op will use
        this list to determine the batch sizes of the cached engines, instead of
        making the decision on the fly. This is useful when we know the most
        common batch size(s) the application is going to generate.
      use_calibration: this argument is ignored if precision_mode is not INT8.
        If set to True, a calibration graph will be created to calibrate the
        missing ranges. The calibration graph must be converted to an inference
        graph by running calibration with calibrate(). If set to False,
        quantization nodes will be expected for every tensor in the graph
        (exlcuding those which will be fused). If a range is missing, an error
        will occur. Please note that accuracy may be negatively affected if
        there is a mismatch between which tensors TRT quantizes and which
        tensors were trained with fake quantization.
      use_function_backup: if set to True, it will create a FunctionDef for each
        subgraph that is converted to TRT op, and if TRT ops fail to execute at
        runtime, it'll invoke that function as a fallback.

    Raises:
      ValueError: if the combination of the parameters is invalid.
      RuntimeError: if the TensorRT library version is incompatible.
    """
    super(TrtGraphConverter, self).__init__(
        input_saved_model_dir=input_saved_model_dir,
        input_saved_model_tags=input_saved_model_tags,
        input_saved_model_signature_key=input_saved_model_signature_key,
        input_graph_def=input_graph_def,
        nodes_blacklist=nodes_blacklist,
        session_config=session_config)

    # TODO(laigd): move all the validations below to
    # get_tensorrt_rewriter_config().
    # Check compatibility of TensorRT version.
    compiled_version = get_linked_tensorrt_version()
    loaded_version = get_loaded_tensorrt_version()
    tf_logging.info("Linked TensorRT version: %s" % str(compiled_version))
    tf_logging.info("Loaded TensorRT version: %s" % str(loaded_version))
    version_mismatch = False
    if loaded_version[0] < compiled_version[0]:
      tf_logging.error(
          "TensorRT version mismatch. Tensorflow was compiled against " +
          "TensorRT %s but library loaded from environment is TensorRT %s" %
          (".".join([str(x) for x in compiled_version]),
           ".".join([str(x) for x in loaded_version])) +
          ". Please make sure that correct version of TensorRT " +
          "is available in the system and added to ldconfig or LD_LIBRARY_PATH")
      raise RuntimeError("Incompatible TensorRT library version")
    for i in zip(loaded_version, compiled_version):
      if i[0] != i[1]:
        tf_logging.warn("TensorRT mismatch. Compiled against version " +
                        "%s, but loaded %s. Things may not work" %
                        (".".join([str(x) for x in compiled_version]),
                         ".".join([str(x) for x in loaded_version])))
        version_mismatch = True
        break
    if not version_mismatch:
      tf_logging.info("Running against TensorRT version %s" %
                      ".".join([str(x) for x in loaded_version]))

    # Check input arguments.
    supported_precision_modes = TrtPrecisionMode.supported_precision_modes()
    if precision_mode not in supported_precision_modes:
      raise ValueError(("precision mode '{}' is not supported."
                        "It should be one of {}").format(
                            precision_mode, supported_precision_modes))

    if cached_engine_batches:
      if not isinstance(cached_engine_batches, list):
        raise TypeError("cached_engine_batches should be a list.")
      if len(cached_engine_batches) > maximum_cached_engines:
        raise ValueError("cached_engine_batches should not contain more than "
                         "maximum_cached_engines items.")

    self._need_calibration = (
        precision_mode == TrtPrecisionMode.INT8 and use_calibration)
    self._use_function_backup = use_function_backup

    # TODO(laigd): consider provide a mechanism to remove the fallback path
    # after calibration is done.
    if self._need_calibration and not use_function_backup:
      raise ValueError(
          "Calibration requires enabling fallback to TF function execution.")

    # TODO(laigd):
    # - Get rid of is_dynamic_op option, it should always be True, and it should
    #   accept N shapes as input.
    # - Verify in int8 mode that maximum_cached_engines and
    #   cached_engine_batches are set appropriately.
    # - If it fails to build the int8 engine it should return error.
    self._max_batch_size = max_batch_size
    self._max_workspace_size_bytes = max_workspace_size_bytes
    self._precision_mode = precision_mode
    self._minimum_segment_size = minimum_segment_size
    self._is_dynamic_op = is_dynamic_op
    self._maximum_cached_engines = maximum_cached_engines
    self._cached_engine_batches = cached_engine_batches
예제 #13
0
 def ShouldRunTest(self, run_params):
     if get_linked_tensorrt_version()[0] < 5:
         return False
     # Test static/dynamic engine with/without calibration.
     return (trt_test.IsQuantizationMode(run_params.precision_mode)
             and not run_params.use_optimizer)