def testTrainedMnistSavedModel(self):
    """Test mnist SavedModel, trained with dummy data and small steps."""
    # Build classifier
    classifier = estimator.Estimator(
        model_fn=model_fn,
        params={
            "data_format": "channels_last"  # tflite format
        })

    # Train and pred for serving
    classifier.train(input_fn=dummy_input_fn, steps=2)
    image = array_ops.placeholder(dtypes.float32, [None, 28, 28])
    pred_input_fn = estimator.export.build_raw_serving_input_receiver_fn({
        "image": image,
    })

    # Export SavedModel
    saved_model_dir = os.path.join(self.get_temp_dir(), "mnist_savedmodel")
    classifier.export_savedmodel(saved_model_dir, pred_input_fn)

    # Convert to tflite and test output
    saved_model_name = os.listdir(saved_model_dir)[0]
    saved_model_final_dir = os.path.join(saved_model_dir, saved_model_name)

    # TODO(zhixianyan): no need to limit output_arrays to `Softmax'
    # once b/74205001 fixed and argmax implemented in tflite.
    result = convert_saved_model.freeze_saved_model(
        saved_model_dir=saved_model_final_dir,
        input_arrays=None,
        input_shapes=None,
        output_arrays=["Softmax"],
        tag_set=set([tag_constants.SERVING]),
        signature_key=signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY)

    self.assertTrue(result)
  def testTrainedMnistSavedModel(self):
    """Test mnist SavedModel, trained with dummy data and small steps."""
    # Build classifier
    classifier = estimator.Estimator(
        model_fn=model_fn,
        params={
            "data_format": "channels_last"  # tflite format
        })

    # Train and pred for serving
    classifier.train(input_fn=dummy_input_fn, steps=2)
    image = array_ops.placeholder(dtypes.float32, [None, 28, 28])
    pred_input_fn = estimator.export.build_raw_serving_input_receiver_fn({
        "image": image,
    })

    # Export SavedModel
    saved_model_dir = os.path.join(self.get_temp_dir(), "mnist_savedmodel")
    classifier.export_savedmodel(saved_model_dir, pred_input_fn)

    # Convert to tflite and test output
    saved_model_name = os.listdir(saved_model_dir)[0]
    saved_model_final_dir = os.path.join(saved_model_dir, saved_model_name)

    # TODO(zhixianyan): no need to limit output_arrays to `Softmax'
    # once b/74205001 fixed and argmax implemented in tflite.
    result = convert_saved_model.freeze_saved_model(
        saved_model_dir=saved_model_final_dir,
        input_arrays=None,
        input_shapes=None,
        output_arrays=["Softmax"],
        tag_set=None,
        signature_key=signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY)

    self.assertTrue(result)
예제 #3
0
  def from_saved_model(
      cls,
      saved_model_dir,
      input_arrays=None,
      input_shapes=None,
      output_arrays=None,
      tag_set=None,
      signature_key=signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY):
    """Creates a TocoConverter class from a SavedModel.

    Args:
      saved_model_dir: SavedModel directory to convert.
      input_arrays: List of input tensors to freeze graph with. Uses input
        arrays from SignatureDef when none are provided. (default None)
      input_shapes: Map of strings representing input tensor names to list of
        integers representing input shapes (e.g., {"foo": : [1, 16, 16, 3]}).
        Automatically determined when input shapes is None (e.g., {"foo" :
        None}). (default None)
      output_arrays: List of output tensors to freeze graph with. Uses output
        arrays from SignatureDef when none are provided. (default None)
      tag_set: Set of tags identifying the MetaGraphDef within the SavedModel to
        analyze. All tags in the tag set must be present. (default "serve")
      signature_key: Key identifying SignatureDef containing inputs and outputs.

    Returns:
      TocoConverter class.
    """
    if tag_set is None:
      tag_set = set([tag_constants.SERVING])

    result = freeze_saved_model(saved_model_dir, input_arrays, input_shapes,
                                output_arrays, tag_set, signature_key)
    return cls(
        graph_def=result[0], input_tensors=result[1], output_tensors=result[2])
 def _convertSavedModel(self,
                        saved_model_dir,
                        input_arrays=None,
                        input_shapes=None,
                        output_arrays=None,
                        tag_set=None,
                        signature_key=None):
   graph_def, in_tensors, out_tensors = convert_saved_model.freeze_saved_model(
       saved_model_dir=saved_model_dir,
       input_arrays=input_arrays,
       input_shapes=input_shapes,
       output_arrays=output_arrays,
       tag_set=tag_set,
       signature_key=signature_key)
   return graph_def, in_tensors, out_tensors
 def _convertSavedModel(self,
                        saved_model_dir,
                        input_arrays=None,
                        input_shapes=None,
                        output_arrays=None,
                        tag_set=None,
                        signature_key=None):
   if tag_set is None:
     tag_set = set([tag_constants.SERVING])
   if signature_key is None:
     signature_key = signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY
   graph_def, in_tensors, out_tensors = convert_saved_model.freeze_saved_model(
       saved_model_dir=saved_model_dir,
       input_arrays=input_arrays,
       input_shapes=input_shapes,
       output_arrays=output_arrays,
       tag_set=tag_set,
       signature_key=signature_key)
   return graph_def, in_tensors, out_tensors
 def _convertSavedModel(self,
                        saved_model_dir,
                        input_arrays=None,
                        input_shapes=None,
                        output_arrays=None,
                        tag_set=None,
                        signature_key=None):
     if tag_set is None:
         tag_set = set([tag_constants.SERVING])
     if signature_key is None:
         signature_key = signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY
     graph_def, in_tensors, out_tensors = convert_saved_model.freeze_saved_model(
         saved_model_dir=saved_model_dir,
         input_arrays=input_arrays,
         input_shapes=input_shapes,
         output_arrays=output_arrays,
         tag_set=tag_set,
         signature_key=signature_key)
     return graph_def, in_tensors, out_tensors
예제 #7
0
    def from_saved_model(cls,
                         saved_model_dir,
                         input_arrays=None,
                         input_shapes=None,
                         output_arrays=None,
                         tag_set=None,
                         signature_key=None):
        """Creates a TocoConverter class from a SavedModel.

    Args:
      saved_model_dir: SavedModel directory to convert.
      input_arrays: List of input tensors to freeze graph with. Uses input
        arrays from SignatureDef when none are provided. (default None)
      input_shapes: Dict of strings representing input tensor names to list of
        integers representing input shapes (e.g., {"foo" : [1, 16, 16, 3]}).
        Automatically determined when input shapes is None (e.g., {"foo" :
        None}). (default None)
      output_arrays: List of output tensors to freeze graph with. Uses output
        arrays from SignatureDef when none are provided. (default None)
      tag_set: Set of tags identifying the MetaGraphDef within the SavedModel to
        analyze. All tags in the tag set must be present. (default set("serve"))
      signature_key: Key identifying SignatureDef containing inputs and outputs.
        (default DEFAULT_SERVING_SIGNATURE_DEF_KEY)

    Returns:
      TocoConverter class.
    """
        if tag_set is None:
            tag_set = set([tag_constants.SERVING])
        if signature_key is None:
            signature_key = signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY

        result = freeze_saved_model(saved_model_dir, input_arrays,
                                    input_shapes, output_arrays, tag_set,
                                    signature_key)
        return cls(graph_def=result[0],
                   input_tensors=result[1],
                   output_tensors=result[2])