예제 #1
0
    def _export_tflite(self,
                       tflite_filepath,
                       quantization_config=None,
                       with_metadata=True):
        """Converts the retrained model to tflite format and saves it.

    Args:
      tflite_filepath: File path to save tflite model.
      quantization_config: Configuration for post-training quantization.
      with_metadata: Whether the output tflite model contains metadata. If True,
        Exports metadata in json file as well.
    """
        # Sets batch size from None to 1 when converting to tflite.
        model_util.set_batch_size(self.model, batch_size=1)
        model_util.export_tflite(self.model, tflite_filepath,
                                 quantization_config, self._gen_dataset,
                                 self.model_spec.convert_from_saved_model_tf2)
        # Sets batch size back to None to support retraining later.
        model_util.set_batch_size(self.model, batch_size=None)

        if with_metadata:
            with tempfile.TemporaryDirectory() as temp_dir:
                tf.compat.v1.logging.info(
                    'Vocab file is inside the TFLite model with metadata.')
                vocab_filepath = os.path.join(temp_dir, 'vocab.txt')
                self.model_spec.save_vocab(vocab_filepath)
                model_info = _get_model_info(self.model_spec, vocab_filepath)
                export_dir = os.path.dirname(tflite_filepath)
                populator = metadata_writer.MetadataPopulatorForBertQuestionAndAnswer(
                    tflite_filepath, export_dir, model_info)
                populator.populate()
예제 #2
0
    def export_tflite(self, model, tflite_filepath, quantization_config=None):
        """Converts the retrained model to tflite format and saves it.

    This method overrides the default `CustomModel._export_tflite` method, and
    include the pre-processing in the exported TFLite library since support
    library can't handle audio tasks yet.

    Args:
      model: An instance of the keras classification model to be exported.
      tflite_filepath: File path to save tflite model.
      quantization_config: Configuration for post-training quantization.
    """
        combined = tf.keras.Sequential()
        combined.add(self._preprocess_model)
        combined.add(model)
        # Build the model.
        combined.build([None, self.expected_waveform_len])

        # Sets batch size from None to 1 when converting to tflite.
        model_util.set_batch_size(model, batch_size=1)

        model_util.export_tflite(combined,
                                 tflite_filepath,
                                 quantization_config,
                                 supported_ops=(tf.lite.OpsSet.TFLITE_BUILTINS,
                                                tf.lite.OpsSet.SELECT_TF_OPS))

        # Sets batch size back to None to support retraining later.
        model_util.set_batch_size(model, batch_size=None)
예제 #3
0
    def export_tflite(self,
                      model,
                      tflite_filepath,
                      with_metadata=True,
                      export_metadata_json_file=True,
                      index_to_label=None):
        """Converts the retrained model to tflite format and saves it.

    This method overrides the default `CustomModel._export_tflite` method, and
    include the pre-processing in the exported TFLite library since support
    library can't handle audio tasks yet.

    Args:
      model: An instance of the keras classification model to be exported.
      tflite_filepath: File path to save tflite model.
      with_metadata: Whether the output tflite model contains metadata.
      export_metadata_json_file: Whether to export metadata in json file. If
        True, export the metadata in the same directory as tflite model.Used
        only if `with_metadata` is True.
      index_to_label: A list that map from index to label class name.
    """
        del with_metadata, export_metadata_json_file, index_to_label
        combined = self.create_serving_model(model)

        # Sets batch size from None to 1 when converting to tflite.
        model_util.set_batch_size(model, batch_size=1)

        model_util.export_tflite(combined,
                                 tflite_filepath,
                                 quantization_config=None,
                                 supported_ops=(tf.lite.OpsSet.TFLITE_BUILTINS,
                                                tf.lite.OpsSet.SELECT_TF_OPS))

        # Sets batch size back to None to support retraining later.
        model_util.set_batch_size(model, batch_size=None)
예제 #4
0
    def _export_tflite(self,
                       tflite_filepath,
                       quantized=False,
                       quantization_steps=None,
                       representative_data=None,
                       inference_input_type=tf.float32,
                       inference_output_type=tf.float32):
        """Converts the retrained model to tflite format and saves it.

    Args:
      tflite_filepath: File path to save tflite model.
      quantized: boolean, if True, save quantized model.
      quantization_steps: Number of post-training quantization calibration steps
        to run. Used only if `quantized` is True.
      representative_data: Representative data used for post-training
        quantization. Used only if `quantized` is True.
      inference_input_type: Target data type of real-number input arrays. Allows
        for a different type for input arrays. Defaults to tf.float32. Must be
        be `{tf.float32, tf.uint8, tf.int8}`
      inference_output_type: Target data type of real-number output arrays.
        Allows for a different type for output arrays. Defaults to tf.float32.
        Must be `{tf.float32, tf.uint8, tf.int8}`
    """
        # Sets batch size from None to 1 when converting to tflite.
        model_util.set_batch_size(self.model, batch_size=1)
        super(QuestionAnswer,
              self)._export_tflite(tflite_filepath, quantized,
                                   quantization_steps, representative_data,
                                   inference_input_type, inference_output_type)
        # Sets batch size back to None to support retraining later.
        model_util.set_batch_size(self.model, batch_size=None)
예제 #5
0
    def _export_tflite(self,
                       tflite_filepath,
                       quantization_config='default',
                       with_metadata=True,
                       export_metadata_json_file=False):
        """Converts the retrained model to tflite format and saves it.

    Args:
      tflite_filepath: File path to save tflite model.
      quantization_config: Configuration for post-training quantization. If
        'default', sets the `quantization_config` by default according to
        `self.model_spec`. If None, exports the float tflite model without
        quantization.
      with_metadata: Whether the output tflite model contains metadata.
      export_metadata_json_file: Whether to export metadata in json file. If
        True, export the metadata in the same directory as tflite model.Used
        only if `with_metadata` is True.
    """
        if quantization_config == 'default':
            quantization_config = self.model_spec.get_default_quantization_config(
            )

        # Sets batch size from None to 1 when converting to tflite.
        model_util.set_batch_size(self.model, batch_size=1)
        model_util.export_tflite(self.model, tflite_filepath,
                                 quantization_config,
                                 self.model_spec.convert_from_saved_model_tf2)
        # Sets batch size back to None to support retraining later.
        model_util.set_batch_size(self.model, batch_size=None)

        if with_metadata:
            with tempfile.TemporaryDirectory() as temp_dir:
                tf.compat.v1.logging.info(
                    'Vocab file and label file are inside the '
                    'TFLite model with metadata.')
                vocab_filepath = os.path.join(temp_dir, 'vocab.txt')
                self.model_spec.save_vocab(vocab_filepath)
                label_filepath = os.path.join(temp_dir, 'labels.txt')
                self._export_labels(label_filepath)

                export_dir = os.path.dirname(tflite_filepath)
                if isinstance(self.model_spec, ms.BertClassifierModelSpec):
                    model_info = _get_bert_model_info(self.model_spec,
                                                      vocab_filepath,
                                                      label_filepath)
                    populator = bert_metadata_writer.MetadataPopulatorForBertTextClassifier(
                        tflite_filepath, export_dir, model_info)
                elif isinstance(self.model_spec, ms.AverageWordVecModelSpec):
                    model_info = _get_model_info(self.model_spec.name)
                    populator = metadata_writer.MetadataPopulatorForTextClassifier(
                        tflite_filepath, export_dir, model_info,
                        label_filepath, vocab_filepath)
                else:
                    raise ValueError(
                        'Model Specification is not supported to writing '
                        'metadata into TFLite. Please set '
                        '`with_metadata=False` or write metadata by '
                        'yourself.')
                populator.populate(export_metadata_json_file)
예제 #6
0
    def _export_tflite(self, tflite_filepath, quantization_config=None):
        """Converts the retrained model to tflite format and saves it.

    Args:
      tflite_filepath: File path to save tflite model.
      quantization_config: Configuration for post-training quantization.
    """
        # Sets batch size from None to 1 when converting to tflite.
        model_util.set_batch_size(self.model, batch_size=1)
        model_util.export_tflite(self.model, tflite_filepath,
                                 quantization_config, self._gen_dataset,
                                 self.model_spec.convert_from_saved_model_tf2)
        # Sets batch size back to None to support retraining later.
        model_util.set_batch_size(self.model, batch_size=None)
예제 #7
0
  def _export_tflite(self, tflite_filepath, quantization_config=None):
    """Converts the retrained model to tflite format and saves it.

    Args:
      tflite_filepath: File path to save tflite model.
      quantization_config: Configuration for post-training quantization.
    """
    # TODO(b/151761399): Removes these lines.
    if hasattr(self.model_spec, 'uri') and 'mobilebert' in self.model_spec.uri:
      raise ValueError('Couldn\'t convert MobileBert to TFLite for now.')

    # Sets batch size from None to 1 when converting to tflite.
    model_util.set_batch_size(self.model, batch_size=1)
    model_util.export_tflite(self.model, tflite_filepath, quantization_config,
                             self._gen_dataset,
                             self.model_spec.convert_from_saved_model_tf2)
    # Sets batch size back to None to support retraining later.
    model_util.set_batch_size(self.model, batch_size=None)
예제 #8
0
    def export_tflite(self,
                      model,
                      tflite_filepath,
                      with_metadata=True,
                      export_metadata_json_file=True,
                      index_to_label=None,
                      quantization_config=None):
        """Converts the retrained model to tflite format and saves it.

    This method overrides the default `CustomModel._export_tflite` method, and
    include the pre-processing in the exported TFLite library since support
    library can't handle audio tasks yet.

    Args:
      model: An instance of the keras classification model to be exported.
      tflite_filepath: File path to save tflite model.
      with_metadata: Whether the output tflite model contains metadata.
      export_metadata_json_file: Whether to export metadata in json file. If
        True, export the metadata in the same directory as tflite model.Used
        only if `with_metadata` is True.
      index_to_label: A list that map from index to label class name.
      quantization_config: Configuration for post-training quantization.
    """
        combined = self.create_serving_model(model)

        # Sets batch size from None to 1 when converting to tflite.
        model_util.set_batch_size(model, batch_size=1)

        model_util.export_tflite(combined,
                                 tflite_filepath,
                                 quantization_config=quantization_config)

        # Sets batch size back to None to support retraining later.
        model_util.set_batch_size(model, batch_size=None)

        if with_metadata:
            if not ENABLE_METADATA:
                print(
                    'Writing Metadata is not support in the installed tflite-support '
                    'version. Please use tflite-support >= 0.2.*')
            else:
                self._export_metadata(tflite_filepath, index_to_label,
                                      export_metadata_json_file)
예제 #9
0
    def _export_tflite(self,
                       tflite_filepath,
                       quantization_config='default',
                       with_metadata=True,
                       export_metadata_json_file=False):
        """Converts the retrained model to tflite format and saves it.

    Args:
      tflite_filepath: File path to save tflite model.
      quantization_config: Configuration for post-training quantization. If
        'default', sets the `quantization_config` by default according to
        `self.model_spec`. If None, exports the float tflite model without
        quantization.
      with_metadata: Whether the output tflite model contains metadata.
      export_metadata_json_file: Whether to export metadata in json file. If
        True, export the metadata in the same directory as tflite model.Used
        only if `with_metadata` is True.
    """
        if quantization_config == 'default':
            quantization_config = self.model_spec.get_default_quantization_config(
            )

        # Sets batch size from None to 1 when converting to tflite.
        model_util.set_batch_size(self.model, batch_size=1)
        model_util.export_tflite(self.model, tflite_filepath,
                                 quantization_config,
                                 self.model_spec.convert_from_saved_model_tf2)
        # Sets batch size back to None to support retraining later.
        model_util.set_batch_size(self.model, batch_size=None)

        if with_metadata:
            with tempfile.TemporaryDirectory() as temp_dir:
                tf.compat.v1.logging.info(
                    'Vocab file is inside the TFLite model with metadata.')
                vocab_filepath = os.path.join(temp_dir, 'vocab.txt')
                self.model_spec.save_vocab(vocab_filepath)
                model_info = _get_model_info(self.model_spec, vocab_filepath)
                export_dir = os.path.dirname(tflite_filepath)
                populator = metadata_writer.MetadataPopulatorForBertQuestionAndAnswer(
                    tflite_filepath, export_dir, model_info)
                populator.populate(export_metadata_json_file)