예제 #1
0
 def testBasic(self):
     in_tensor = array_ops.placeholder(shape=[1, 16, 16, 3],
                                       dtype=dtypes.float32)
     out_tensor = in_tensor + in_tensor
     sess = session.Session()
     # Try running on valid graph
     result = lite.toco_convert(sess.graph_def, [in_tensor], [out_tensor])
     self.assertTrue(result)
예제 #2
0
 def testBasic(self):
   in_tensor = array_ops.placeholder(shape=[1, 16, 16, 3],
                                     dtype=dtypes.float32)
   out_tensor = in_tensor + in_tensor
   sess = session.Session()
   # Try running on valid graph
   result = lite.toco_convert(sess.graph_def, [in_tensor], [out_tensor])
   self.assertTrue(result)
예제 #3
0
 def testQuantization(self):
   in_tensor = array_ops.placeholder(shape=[1, 16, 16, 3],
                                     dtype=dtypes.float32)
   out_tensor = array_ops.fake_quant_with_min_max_args(in_tensor + in_tensor,
                                                       min=0., max=1.)
   sess = session.Session()
   result = lite.toco_convert(sess.graph_def, [in_tensor], [out_tensor],
                              inference_type=lite.QUANTIZED_UINT8,
                              quantized_input_stats=[(0., 1.)])
   self.assertTrue(result)
예제 #4
0
 def testQuantization(self):
     in_tensor = array_ops.placeholder(shape=[1, 16, 16, 3],
                                       dtype=dtypes.float32)
     out_tensor = array_ops.fake_quant_with_min_max_args(in_tensor +
                                                         in_tensor,
                                                         min=0.,
                                                         max=1.)
     sess = session.Session()
     result = lite.toco_convert(sess.graph_def, [in_tensor], [out_tensor],
                                inference_type=lite.QUANTIZED_UINT8,
                                quantized_input_stats=[(0., 1.)])
     self.assertTrue(result)
예제 #5
0
def main(argv):
    args = parser.parse_args(argv[1:])

    # Fetch the data
    (train_x, train_y), (test_x, test_y) = iris_data.load_data()

    # Feature columns describe how to use the input.
    my_feature_columns = []
    for key in train_x.keys():
        my_feature_columns.append(tf.feature_column.numeric_column(key=key))

    # Build 2 hidden layer DNN with 10, 10 units respectively.
    classifier = tf.estimator.DNNClassifier(
        feature_columns=my_feature_columns,
        # Two hidden layers of 10 nodes each.
        hidden_units=[10, 10],
        # The model must choose between 3 classes.
        n_classes=3)

    # Train the Model.
    classifier.train(input_fn=lambda: iris_data.train_input_fn(
        train_x, train_y, args.batch_size),
                     steps=args.train_steps)

    # Evaluate the model.
    eval_result = classifier.evaluate(input_fn=lambda: iris_data.eval_input_fn(
        test_x, test_y, args.batch_size))

    print(
        '\nTest set accuracy:         {accuracy:0.3f}\n'.format(**eval_result))

    # Generate predictions from the model
    expected = ['Setosa', 'Versicolor', 'Virginica']
    predict_x = {
        'SepalLength': [5.1, 5.9, 6.9],
        'SepalWidth': [3.3, 3.0, 3.1],
        'PetalLength': [1.7, 4.2, 5.4],
        'PetalWidth': [0.5, 1.5, 2.1],
    }

    predictions = classifier.predict(input_fn=lambda: iris_data.eval_input_fn(
        predict_x, labels=None, batch_size=args.batch_size))

    template = '\nPrediction is "{}" ({:.1f}%), expected "{}"'

    for pred_dict, expec in zip(predictions, expected):
        class_id = pred_dict['class_ids'][0]
        probability = pred_dict['probabilities'][class_id]

        print(
            template.format(iris_data.SPECIES[class_id], 100 * probability,
                            expec))

    print("\n====== classifier model_dir, latest_checkpoint ===========")
    print(classifier.model_dir)
    print(classifier.latest_checkpoint())
    debug = False

    with tf.Session() as sess:
        # First let's load meta graph and restore weights
        latest_checkpoint_path = classifier.latest_checkpoint()
        saver = tf.train.import_meta_graph(latest_checkpoint_path + '.meta')
        saver.restore(sess, latest_checkpoint_path)

        # Get the input and output tensors needed for toco.
        # These were determined based on the debugging info printed / saved below.
        input_tensor = sess.graph.get_tensor_by_name(
            "dnn/input_from_feature_columns/input_layer/concat:0")
        input_tensor.set_shape([1, 4])
        out_tensor = sess.graph.get_tensor_by_name("dnn/logits/BiasAdd:0")
        out_tensor.set_shape([1, 3])

        # Pass the output node name we are interested in.
        # Based on the debugging info printed / saved below, pulled out the
        # name of the node for the logits (before the softmax is applied).
        frozen_graph_def = tf.graph_util.convert_variables_to_constants(
            sess, sess.graph_def, output_node_names=["dnn/logits/BiasAdd"])

        if debug is True:
            print(
                "\nORIGINAL GRAPH DEF Ops ==========================================="
            )
            ops = sess.graph.get_operations()
            for op in ops:
                if "BiasAdd" in op.name or "input_layer" in op.name:
                    print([op.name, op.values()])
            # save original graphdef to text file
            with open("estimator_graph.pbtxt", "w") as fp:
                fp.write(str(sess.graph_def))
            print(
                "\nFROZEN GRAPH DEF Nodes ==========================================="
            )
            for node in frozen_graph_def.node:
                print(node.name)
            # save frozen graph def to text file
            with open("estimator_frozen_graph.pbtxt", "w") as fp:
                fp.write(str(frozen_graph_def))

    tflite_model = lite.toco_convert(frozen_graph_def, [input_tensor],
                                     [out_tensor])
    open("estimator_model.tflite", "wb").write(tflite_model)
예제 #6
0
def convert(saved_model_dir,
            output_tflite=None,
            output_arrays=None,
            tag_set=None,
            signature_key=signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY,
            batch_size=1):
  """Convert a savedmodel to tflite flatbuffer.

  Args:
    saved_model_dir: Saved model directory to convert.
    output_tflite: File path to write result flatbuffer.
    output_arrays: List of output tensor names, the default value is None, which
      means conversion keeps all output tensors. This is also used to filter
      tensors that are from Op currently not supported in tflite, e.g., Argmax).
    tag_set: This is the set of tags to get meta_graph_def in saved_model.
    signature_key: This is the signature key to extract inputs, outputs.
    batch_size: If input tensor shape has None at first dimension,
      e.g. (None,224,224,3), replace None with batch_size.

  Returns:
    The converted data. For example if tflite was the destination, then
    this will be a tflite flatbuffer in a bytes array.

  Raises:
    ValueError: If tag_set does not indicate any meta_graph_def in saved_model,
      or signature_key is not in relevant meta_graph_def,
      or input shape has None beyond 1st dimension, e.g., (1,None, None, 3),
      or given output_arrays are not valid causing empty outputs.
  """
  if tag_set is None:
    tag_set = set([tag_constants.SERVING])

  meta_graph = get_meta_graph_def(saved_model_dir, tag_set)
  signature_def = get_signature_def(meta_graph, signature_key)
  inputs, outputs = get_inputs_outputs(signature_def)

  graph = ops.Graph()
  with session.Session(graph=graph) as sess:

    loader.load(sess, meta_graph.meta_info_def.tags, saved_model_dir)

    in_tensors = [graph.get_tensor_by_name(input_) for input_ in inputs]

    # Users can use output_arrays to filter output tensors for conversion.
    # If output_arrays is None, we keep all output tensors. In future, we may
    # use tflite supported Op list and check whether op is custom Op to
    # automatically filter output arrays.
    # TODO(zhixianyan): Use tflite supported Op list to filter outputs.
    if output_arrays is not None:
      output_arrays = output_arrays.split(",")
      out_tensors = [
          graph.get_tensor_by_name(output)
          for output in outputs
          if output.split(":")[0] in output_arrays
      ]
    else:
      out_tensors = [graph.get_tensor_by_name(output) for output in outputs]

    output_names = [node.split(":")[0] for node in outputs]

    if not out_tensors:
      raise ValueError(
          "No valid output tensors for '{}', possible values are '{}'".format(
              output_arrays, output_names))

    frozen_graph_def = tf_graph_util.convert_variables_to_constants(
        sess, graph.as_graph_def(), output_names)

    # Toco requires fully defined tensor shape, for input tensor with None in
    # their shape, e.g., (None, 224, 224, 3), we need to replace first None with
    # a given batch size. For shape with more None, e.g. (None, None, None, 3),
    # still be able to replace and convert, but require further investigation.
    # TODO(zhixianyan): Add supports for input tensor with more None in shape.
    for i in range(len(in_tensors)):
      shape = in_tensors[i].get_shape().as_list()
      if shape[0] is None:
        shape[0] = batch_size
      if None in shape[1:]:
        raise ValueError(
            "Only support None shape at 1st dim as batch_size. But tensor "
            "'{}' 's shape '{}' has None at other dimension. ".format(
                inputs[i], shape))
      in_tensors[i].set_shape(shape)

    result = lite.toco_convert(frozen_graph_def, in_tensors, out_tensors)

    if output_tflite is not None:
      with gfile.Open(output_tflite, "wb") as f:
        f.write(result)
      logging.info("Successfully converted to: %s", output_tflite)

    return result
예제 #7
0
def convert(saved_model_dir,
            output_tflite=None,
            output_arrays=None,
            tag_set=None,
            signature_key=signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY,
            batch_size=1):
  """Convert a saved_model to tflite flatbuffer.

  Args:
    saved_model_dir: Saved model directory to convert.
    output_tflite: File path to write result flatbuffer.
    output_arrays: List of output tensor names, the default value is None, which
      means conversion keeps all output tensors. This is also used to filter
      tensors that are from Op currently not supported in tflite, e.g., Argmax).
    tag_set: This is the set of tags to get meta_graph_def in saved_model.
    signature_key: This is the signature key to extract inputs, outputs.
    batch_size: If input tensor shape has None at first dimension,
      e.g. (None,224,224,3), replace None with batch_size.

  Returns:
    The converted data. For example if tflite was the destination, then
    this will be a tflite flatbuffer in a bytes array.

  Raises:
    ValueError: If tag_set does not indicate any meta_graph_def in saved_model,
      or signature_key is not in relevant meta_graph_def,
      or input shape has None beyond 1st dimension, e.g., (1,None, None, 3),
      or given output_arrays are not valid causing empty outputs.
  """
  if tag_set is None:
    tag_set = set([tag_constants.SERVING])

  meta_graph = get_meta_graph_def(saved_model_dir, tag_set)
  signature_def = get_signature_def(meta_graph, signature_key)
  inputs, outputs = get_inputs_outputs(signature_def)

  graph = ops.Graph()
  with session.Session(graph=graph) as sess:

    loader.load(sess, meta_graph.meta_info_def.tags, saved_model_dir)

    in_tensors = [graph.get_tensor_by_name(input_) for input_ in inputs]

    # Users can use output_arrays to filter output tensors for conversion.
    # If output_arrays is None, we keep all output tensors. In future, we may
    # use tflite supported Op list and check whether op is custom Op to
    # automatically filter output arrays.
    # TODO(zhixianyan): Use tflite supported Op list to filter outputs.
    if output_arrays is not None:
      output_arrays = output_arrays.split(",")
      out_tensors = [
          graph.get_tensor_by_name(output)
          for output in outputs
          if output.split(":")[0] in output_arrays
      ]
    else:
      out_tensors = [graph.get_tensor_by_name(output) for output in outputs]

    output_names = [node.split(":")[0] for node in outputs]

    if not out_tensors:
      raise ValueError(
          "No valid output tensors for '{}', possible values are '{}'".format(
              output_arrays, output_names))

    frozen_graph_def = tf_graph_util.convert_variables_to_constants(
        sess, graph.as_graph_def(), output_names)

    # Toco requires fully defined tensor shape, for input tensor with None in
    # their shape, e.g., (None, 224, 224, 3), we need to replace first None with
    # a given batch size. For shape with more None, e.g. (None, None, None, 3),
    # still be able to replace and convert, but require further investigation.
    # TODO(zhixianyan): Add supports for input tensor with more None in shape.
    for i in range(len(in_tensors)):
      shape = in_tensors[i].get_shape().as_list()
      if shape[0] is None:
        shape[0] = batch_size
      if None in shape[1:]:
        raise ValueError(
            "Only support None shape at 1st dim as batch_size. But tensor "
            "'{}' 's shape '{}' has None at other dimension. ".format(
                inputs[i], shape))
      in_tensors[i].set_shape(shape)

    result = lite.toco_convert(frozen_graph_def, in_tensors, out_tensors)

    if output_tflite is not None:
      with gfile.Open(output_tflite, "wb") as f:
        f.write(result)
      logging.info("Successfully converted to: %s", output_tflite)

    return result