Пример #1
0
    def testFlexWithDoubleOp(self):
        # Create a graph that has one double op.
        saved_model_dir = os.path.join(self.get_temp_dir(), 'model2')
        with ops.Graph().as_default():
            with session.Session() as sess:
                in_tensor = array_ops.placeholder(shape=[1, 4],
                                                  dtype=dtypes.int32,
                                                  name='input')
                out_tensor = double_op.double(in_tensor)
                inputs = {'x': in_tensor}
                outputs = {'z': out_tensor}
                saved_model.simple_save(sess, saved_model_dir, inputs, outputs)

        converter = lite.TFLiteConverterV2.from_saved_model(saved_model_dir)
        converter.target_spec.supported_ops = set([lite.OpsSet.SELECT_TF_OPS])
        converter.target_spec.experimental_select_user_tf_ops = ['Double']
        tflite_model = converter.convert()
        self.assertTrue(tflite_model)
        self.assertIn('FlexDouble',
                      tflite_test_util.get_ops_list(tflite_model))

        # Check the model works with TensorFlow ops.
        interpreter = Interpreter(model_content=tflite_model)
        interpreter.allocate_tensors()
        input_details = interpreter.get_input_details()
        test_input = np.array([[1.0, 2.0, 3.0, 4.0]], dtype=np.int32)
        interpreter.set_tensor(input_details[0]['index'], test_input)
        interpreter.invoke()

        output_details = interpreter.get_output_details()
        expected_output = np.array([[2.0, 4.0, 6.0, 8.0]], dtype=np.int32)
        output_data = interpreter.get_tensor(output_details[0]['index'])
        self.assertTrue((expected_output == output_data).all())
Пример #2
0
    def _createSavedModelWithCustomOp(self):
        custom_opdefs_str = (
            'name: \'CustomAdd\' input_arg: {name: \'Input1\' type: DT_FLOAT} '
            'input_arg: {name: \'Input2\' type: DT_FLOAT} output_arg: {name: '
            '\'Output\' type: DT_FLOAT}')

        # Create a graph that has one add op.
        new_graph = graph_pb2.GraphDef()
        with ops.Graph().as_default():
            with session.Session() as sess:
                in_tensor = array_ops.placeholder(shape=[1, 16, 16, 3],
                                                  dtype=dtypes.float32,
                                                  name='input')
                out_tensor = in_tensor + in_tensor
                inputs = {'x': in_tensor}
                outputs = {'z': out_tensor}

                new_graph.CopyFrom(sess.graph_def)

        # Rename Add op name to CustomAdd.
        for node in new_graph.node:
            if node.op.startswith('Add'):
                node.op = 'CustomAdd'
                del node.attr['T']

        # Register custom op defs to import modified graph def.
        register_custom_opdefs([custom_opdefs_str])

        # Store saved model.
        saved_model_dir = self._getFilepath('model')
        with ops.Graph().as_default():
            with session.Session() as sess:
                import_graph_def(new_graph, name='')
                saved_model.simple_save(sess, saved_model_dir, inputs, outputs)
        return (saved_model_dir, custom_opdefs_str)
 def _createSimpleSavedModel(self, shape):
   """Create a simple savedmodel on the fly."""
   saved_model_dir = os.path.join(self.get_temp_dir(), "simple_savedmodel")
   with session.Session() as sess:
     in_tensor = array_ops.placeholder(shape=shape, dtype=dtypes.float32)
     out_tensor = in_tensor + in_tensor
     inputs = {"x": in_tensor}
     outputs = {"y": out_tensor}
     saved_model.simple_save(sess, saved_model_dir, inputs, outputs)
   return saved_model_dir
 def _createSimpleSavedModel(self, shape):
   """Create a simple SavedModel on the fly."""
   saved_model_dir = os.path.join(self.get_temp_dir(), "simple_savedmodel")
   with session.Session() as sess:
     in_tensor = array_ops.placeholder(shape=shape, dtype=dtypes.float32)
     out_tensor = in_tensor + in_tensor
     inputs = {"x": in_tensor}
     outputs = {"y": out_tensor}
     saved_model.simple_save(sess, saved_model_dir, inputs, outputs)
   return saved_model_dir
  def test_need_flex_ops(self):

    def create_graph_with_custom_add(opname='CustomAdd'):
      custom_opdefs_str = (
          'name: \'' + opname +
          '\' input_arg: {name: \'Input1\' type: DT_FLOAT} '
          'input_arg: {name: \'Input2\' type: DT_FLOAT} output_arg: {name: '
          '\'Output\' type: DT_FLOAT}')

      # Create a graph that has one add op.
      new_graph = graph_pb2.GraphDef()
      with ops.Graph().as_default():
        with session.Session() as sess:
          in_tensor = array_ops.placeholder(
              shape=[1, 16, 16, 3], dtype=dtypes.float32, name='input')
          out_tensor = in_tensor + in_tensor
          inputs = {'x': in_tensor}
          outputs = {'z': out_tensor}

          new_graph.CopyFrom(sess.graph_def)

      # Rename Add op name to opname.
      for node in new_graph.node:
        if node.op.startswith('Add'):
          node.op = opname
          del node.attr['T']

      # Register custom op defs to import modified graph def.
      register_custom_opdefs([custom_opdefs_str])

      return (new_graph, inputs, outputs)

    new_graph, inputs, outputs = create_graph_with_custom_add()

    # Import to load the custom opdef.
    saved_model_dir = os.path.join(self.get_temp_dir(), 'model')
    with ops.Graph().as_default():
      with session.Session() as sess:
        import_graph_def(new_graph, name='')
        saved_model.simple_save(sess, saved_model_dir, inputs, outputs)

    converter = lite.TFLiteConverterV2.from_saved_model(saved_model_dir)
    self.convert_and_check_location_info(
        converter,
        converter_error_data_pb2.ConverterErrorData.NAMELOC,
        expected_sources='add')
    exported_error = metrics._gauge_conversion_errors.get_cell(
        'CONVERT_TF_TO_TFLITE_MODEL', 'CONVERT_SAVED_MODEL', 'tf.CustomAdd',
        'ERROR_NEEDS_CUSTOM_OPS').value()
    self.assertEqual(
        exported_error,
        "\'tf.CustomAdd\' op is neither a custom op nor a flex op\n"
        "Error code: ERROR_NEEDS_CUSTOM_OPS"
    )
  def testFloat(self):
    saved_model_dir = os.path.join(self.get_temp_dir(), 'simple_savedmodel')
    with session.Session().as_default() as sess:
      in_tensor_1 = array_ops.placeholder(
          shape=[1, 16, 16, 3], dtype=dtypes.float32, name='inputB')
      in_tensor_2 = array_ops.placeholder(
          shape=[1, 16, 16, 3], dtype=dtypes.float32, name='inputA')
      out_tensor = in_tensor_1 + in_tensor_2

      inputs = {'x': in_tensor_1, 'y': in_tensor_2}
      outputs = {'z': out_tensor}
      saved_model.simple_save(sess, saved_model_dir, inputs, outputs)
    model_coverage.test_saved_model(saved_model_dir)
Пример #7
0
  def testSavedModel(self):
    saved_model_dir = self._getFilepath('model')
    with ops.Graph().as_default():
      with session.Session() as sess:
        in_tensor = array_ops.placeholder(
            shape=[1, 16, 16, 3], dtype=dtypes.float32, name='inputB')
        out_tensor = in_tensor + in_tensor
        inputs = {'x': in_tensor}
        outputs = {'z': out_tensor}
        saved_model.simple_save(sess, saved_model_dir, inputs, outputs)

    flags_str = '--saved_model_dir={}'.format(saved_model_dir)
    self._run(flags_str, should_succeed=True)
 def _createSavedModelTwoInputArrays(self, shape):
   """Create a simple SavedModel."""
   saved_model_dir = os.path.join(self.get_temp_dir(), "simple_savedmodel")
   with session.Session() as sess:
     in_tensor_1 = array_ops.placeholder(
         shape=shape, dtype=dtypes.float32, name="inputB")
     in_tensor_2 = array_ops.placeholder(
         shape=shape, dtype=dtypes.float32, name="inputA")
     out_tensor = in_tensor_1 + in_tensor_2
     inputs = {"x": in_tensor_1, "y": in_tensor_2}
     outputs = {"z": out_tensor}
     saved_model.simple_save(sess, saved_model_dir, inputs, outputs)
   return saved_model_dir
Пример #9
0
 def _createSavedModel(self, shape):
   """Create a simple SavedModel."""
   saved_model_dir = os.path.join(self.get_temp_dir(), 'simple_savedmodel')
   with session.Session() as sess:
     in_tensor_1 = array_ops.placeholder(
         shape=shape, dtype=dtypes.float32, name='inputB')
     in_tensor_2 = array_ops.placeholder(
         shape=shape, dtype=dtypes.float32, name='inputA')
     out_tensor = in_tensor_1 + in_tensor_2
     inputs = {'x': in_tensor_1, 'y': in_tensor_2}
     outputs = {'z': out_tensor}
     saved_model.simple_save(sess, saved_model_dir, inputs, outputs)
   return saved_model_dir
Пример #10
0
  def testFloat(self):
    saved_model_dir = os.path.join(self.get_temp_dir(), 'simple_savedmodel')
    with session.Session().as_default() as sess:
      in_tensor_1 = array_ops.placeholder(
          shape=[1, 16, 16, 3], dtype=dtypes.float32, name='inputB')
      in_tensor_2 = array_ops.placeholder(
          shape=[1, 16, 16, 3], dtype=dtypes.float32, name='inputA')
      out_tensor = in_tensor_1 + in_tensor_2

      inputs = {'x': in_tensor_1, 'y': in_tensor_2}
      outputs = {'z': out_tensor}
      saved_model.simple_save(sess, saved_model_dir, inputs, outputs)
    model_coverage.test_saved_model(saved_model_dir)
 def _createSavedModelTwoInputArrays(self, shape):
   """Create a simple SavedModel."""
   saved_model_dir = os.path.join(self.get_temp_dir(), "simple_savedmodel")
   with session.Session() as sess:
     in_tensor_1 = array_ops.placeholder(
         shape=shape, dtype=dtypes.float32, name="inputB")
     in_tensor_2 = array_ops.placeholder(
         shape=shape, dtype=dtypes.float32, name="inputA")
     out_tensor = in_tensor_1 + in_tensor_2
     inputs = {"x": in_tensor_1, "y": in_tensor_2}
     outputs = {"z": out_tensor}
     saved_model.simple_save(sess, saved_model_dir, inputs, outputs)
   return saved_model_dir
Пример #12
0
 def _createSavedModel(self, shape):
   """Create a simple SavedModel."""
   saved_model_dir = os.path.join(self.get_temp_dir(), 'simple_savedmodel')
   with session.Session() as sess:
     in_tensor_1 = array_ops.placeholder(
         shape=shape, dtype=dtypes.float32, name='inputB')
     in_tensor_2 = array_ops.placeholder(
         shape=shape, dtype=dtypes.float32, name='inputA')
     out_tensor = in_tensor_1 + in_tensor_2
     inputs = {'x': in_tensor_1, 'y': in_tensor_2}
     outputs = {'z': out_tensor}
     saved_model.simple_save(sess, saved_model_dir, inputs, outputs)
   return saved_model_dir
 def _createV1SavedModel(self, shape):
   """Create a simple SavedModel."""
   saved_model_dir = os.path.join(self.get_temp_dir(), 'simple_savedmodel')
   with tf.Graph().as_default():
     with tf.compat.v1.Session() as sess:
       in_tensor_1 = tf.compat.v1.placeholder(
           shape=shape, dtype=tf.float32, name='inputB')
       in_tensor_2 = tf.compat.v1.placeholder(
           shape=shape, dtype=tf.float32, name='inputA')
       variable_node = tf.Variable(1.0, name='variable_node')
       out_tensor = in_tensor_1 + in_tensor_2 * variable_node
       inputs = {'x': in_tensor_1, 'y': in_tensor_2}
       outputs = {'z': out_tensor}
       sess.run(tf.compat.v1.variables_initializer([variable_node]))
       saved_model.simple_save(sess, saved_model_dir, inputs, outputs)
   return saved_model_dir
Пример #14
0
  def testFlexWithCustomOp(self):
    new_graph, inputs, outputs = self._createGraphWithCustomOp(
        opname='CustomAdd4')

    # Import to load the custom opdef.
    saved_model_dir = os.path.join(self.get_temp_dir(), 'model')
    with ops.Graph().as_default():
      with session.Session() as sess:
        import_graph_def(new_graph, name='')
        saved_model.simple_save(sess, saved_model_dir, inputs, outputs)

    converter = lite.TFLiteConverterV2.from_saved_model(saved_model_dir)
    converter.target_spec.supported_ops = set([lite.OpsSet.SELECT_TF_OPS])
    converter.target_spec.experimental_select_user_tf_ops = ['CustomAdd4']
    tflite_model = converter.convert()

    self.assertIn('FlexCustomAdd4', tflite_test_util.get_ops_list(tflite_model))