Exemplo n.º 1
0
    def test_convert_saved_model_skip_op_check(self):
        self.create_unsupported_saved_model()
        print(
            glob.glob(
                os.path.join(self._tmp_dir, SESSION_BUNDLE_MODEL_DIR, '*')))

        tf_saved_model_conversion.convert_tf_saved_model(
            os.path.join(self._tmp_dir, SAVED_MODEL_DIR),
            'Softmax',
            os.path.join(self._tmp_dir, SAVED_MODEL_DIR),
            skip_op_check=True)

        weights = [{
            'paths': ['group1-shard1of1.bin'],
            'weights': [{
                'shape': [2, 2, 2],
                'name': 'Softmax',
                'dtype': 'float32'
            }]
        }]
        tfjs_path = os.path.join(self._tmp_dir, SAVED_MODEL_DIR)
        # Check model.json and weights manifest.
        with open(os.path.join(tfjs_path, 'model.json'), 'rt') as f:
            model_json = json.load(f)
        self.assertTrue(model_json['modelTopology'])
        weights_manifest = model_json['weightsManifest']
        self.assertEqual(weights_manifest, weights)
        self.assertTrue(
            glob.glob(os.path.join(self._tmp_dir, SAVED_MODEL_DIR,
                                   'group*-*')))
Exemplo n.º 2
0
    def test_convert_saved_model_strip_debug_ops(self):
        self.create_saved_model_with_debug_ops()

        tf_saved_model_conversion.convert_tf_saved_model(
            os.path.join(self._tmp_dir, SAVED_MODEL_DIR),
            'add',
            os.path.join(self._tmp_dir, SAVED_MODEL_DIR),
            strip_debug_ops=True)

        weights = [{
            'paths': ['group1-shard1of1.bin'],
            'weights': [{
                'dtype': 'float32',
                'name': 'add',
                'shape': [2, 2]
            }]
        }]
        tfjs_path = os.path.join(self._tmp_dir, SAVED_MODEL_DIR)
        # Check model.json and weights manifest.
        with open(os.path.join(tfjs_path, 'model.json'), 'rt') as f:
            model_json = json.load(f)
        self.assertTrue(model_json['modelTopology'])
        weights_manifest = model_json['weightsManifest']
        self.assertEqual(weights_manifest, weights)
        self.assertTrue(
            glob.glob(os.path.join(self._tmp_dir, SAVED_MODEL_DIR,
                                   'group*-*')))
Exemplo n.º 3
0
    def test_convert_saved_model(self):
        self.create_saved_model()
        print(
            glob.glob(
                os.path.join(self._tmp_dir, SESSION_BUNDLE_MODEL_DIR, '*')))

        tf_saved_model_conversion.convert_tf_saved_model(
            os.path.join(self._tmp_dir, SAVED_MODEL_DIR), 'Softmax',
            os.path.join(self._tmp_dir, SAVED_MODEL_DIR))

        weights = [{
            'paths': ['group1-shard1of1'],
            'weights': [{
                'shape': [2, 2],
                'name': 'Softmax',
                'dtype': 'float32'
            }]
        }]
        # Load the saved weights as a JSON string.
        weights_manifest = open(
            os.path.join(self._tmp_dir, SAVED_MODEL_DIR,
                         'weights_manifest.json'), 'rt')
        output_json = json.load(weights_manifest)
        weights_manifest.close()
        self.assertEqual(output_json, weights)

        # Check the content of the output directory.
        self.assertTrue(
            glob.glob(
                os.path.join(self._tmp_dir, SAVED_MODEL_DIR,
                             'tensorflowjs_model.pb')))
        self.assertTrue(
            glob.glob(os.path.join(self._tmp_dir, SAVED_MODEL_DIR,
                                   'group*-*')))
Exemplo n.º 4
0
def main():
    parser = argparse.ArgumentParser('TensorFlow.js model converters.')
    parser.add_argument(
        'input_path',
        type=str,
        help='Path to the input file or directory. For input format "keras", '
        'an HDF5 (.h5) file is expected. For input format "tensorflow", '
        'a SavedModel or session bundle directory is expected.')
    parser.add_argument(
        '--input_format',
        type=str,
        required=True,
        choices=set(['keras', 'tf_saved_model', 'tf_session_bundle']),
        help='Input format. '
        'For "keras", the input path can be one of the two following formats:\n'
        '  - A topology+weights combined HDF5 (e.g., generated with'
        '    `keras.model.save_model()` method).\n'
        '  - A weights-only HDF5 (e.g., generated with Keras Model\'s '
        '    `save_weights()` method). \n'
        'For "tensorflow", a SavedModel or session bundle model is expected.')
    parser.add_argument(
        '--output_node_names',
        type=str,
        help='The names of the output nodes, separated by commas. E.g., '
        '"logits,activations". Applicable only if input format is '
        '"tf_saved_model" or "tf_session_bundle".')
    parser.add_argument(
        '--saved_model_tags',
        type=str,
        default='serve',
        help='Tags of the MetaGraphDef to load, in comma separated string '
        'format. Defaults to "serve". Applicable only if input format is '
        '"tf_saved_model".')
    parser.add_argument('output_dir',
                        type=str,
                        help='Path for all output artifacts.')

    FLAGS = parser.parse_args()

    # TODO(cais, piyu): More conversion logics can be added as additional
    #   branches below.
    if FLAGS.input_format == 'keras':
        if FLAGS.output_node_names:
            raise ValueError(
                'The --output_node_names flag is applicable only to input format '
                '"tensorflow", but the current input format is "keras".')

        dispatch_pykeras_conversion(FLAGS.input_path,
                                    output_dir=FLAGS.output_dir)
    elif FLAGS.input_format == 'tf_saved_model':
        tf_saved_model_conversion.convert_tf_saved_model(
            FLAGS.input_path,
            FLAGS.output_node_names,
            FLAGS.output_dir,
            saved_model_tags=FLAGS.saved_model_tags)
    elif FLAGS.input_format == 'tf_session_bundle':
        tf_saved_model_conversion.convert_tf_session_bundle(
            FLAGS.input_path, FLAGS.output_node_names, FLAGS.output_dir)
    else:
        raise ValueError('Invalid input format: \'%s\'' % FLAGS.input_format)
Exemplo n.º 5
0
 def test_optimizer_add_unsupported_op(self):
     self.create_saved_model()
     print(glob.glob(os.path.join(self._tmp_dir, SAVED_MODEL_DIR, '*')))
     with self.assertRaisesRegexp(  # pylint: disable=deprecated-method
             ValueError, r'^Unsupported Ops'):
         node = tf.test.mock.Mock(op='unknown')
         graph = tf.test.mock.Mock(node=[node])
         with tf.test.mock.patch.object(tf_optimizer,
                                        'OptimizeGraph',
                                        return_value=graph):
             tf_saved_model_conversion.convert_tf_saved_model(
                 os.path.join(self._tmp_dir, SAVED_MODEL_DIR), 'Softmax',
                 os.path.join(self._tmp_dir, SAVED_MODEL_DIR))
  def test_convert_saved_model_strip_debug_ops(self):
    self.create_saved_model_with_debug_ops()

    tf_saved_model_conversion.convert_tf_saved_model(
        os.path.join(self._tmp_dir, SAVED_MODEL_DIR),
        'add',
        os.path.join(self._tmp_dir, SAVED_MODEL_DIR),
        strip_debug_ops=True)

    # Check the content of the output directory.
    self.assertTrue(
        glob.glob(
            os.path.join(self._tmp_dir, SAVED_MODEL_DIR,
                         'tensorflowjs_model.pb')))
    self.assertTrue(
        glob.glob(
            os.path.join(self._tmp_dir, SAVED_MODEL_DIR, 'group*-*')))
Exemplo n.º 7
0
def convert_to_tfjs(
    savedmodel_dir,
    output_dir,
    tags,
    signature,
    inputs,
    outputs,
    quantization_dtype,
    skip_op_check,
    strip_debug_ops
  ):
  with tf.Graph().as_default() as graph, tf.Session(graph=graph) as sess:
    meta_graph = tf.saved_model.loader.load(sess, tags, savedmodel_dir)
    meta = meta_graph.signature_def[signature]

    output_node_names = [
      meta.outputs[key].name for key in outputs
    ]

    # Getting input/output tensor name from signature for information.
    input_tensors = [
      graph.get_tensor_by_name(meta.inputs[key].name) for key in inputs
    ]
    output_tensors = [
      graph.get_tensor_by_name(name) for name in output_node_names
    ]
    print('input tensors:')
    for i, t in enumerate(input_tensors):
      print('  {}: "{}" {}'.format(i, t.name, t.shape.as_list()))
    print('output tensors:')
    for i, t in enumerate(output_tensors):
      print('  {}: "{}" {}'.format(i, t.name, t.shape.as_list()))

    tf_saved_model_conversion.convert_tf_saved_model(
      savedmodel_dir,
      output_node_names=','.join([n.split(':')[0] for n in output_node_names]),
      output_dir=output_dir,
      saved_model_tags=','.join(tags),
      quantization_dtype=quantization_dtype,
      skip_op_check=skip_op_check,
      strip_debug_ops=strip_debug_ops)
Exemplo n.º 8
0
def main():
    parser = argparse.ArgumentParser('TensorFlow.js model converters.')
    parser.add_argument(
        'input_path',
        nargs='?',
        type=str,
        help='Path to the input file or directory. For input format "keras", '
        'an HDF5 (.h5) file is expected. For input format "tensorflow", '
        'a SavedModel directory, session bundle directory, frozen model file, '
        'or TF-Hub module is expected.')
    parser.add_argument('output_path',
                        nargs='?',
                        type=str,
                        help='Path for all output artifacts.')
    parser.add_argument(
        '--input_format',
        type=str,
        required=False,
        default='tf_saved_model',
        choices=set([
            'keras', 'keras_saved_model', 'tf_saved_model',
            'tf_session_bundle', 'tf_frozen_model', 'tf_hub', 'tensorflowjs'
        ]),
        help='Input format. '
        'For "keras", the input path can be one of the two following formats:\n'
        '  - A topology+weights combined HDF5 (e.g., generated with'
        '    `keras.model.save_model()` method).\n'
        '  - A weights-only HDF5 (e.g., generated with Keras Model\'s '
        '    `save_weights()` method). \n'
        'For "keras_saved_model", the input_path must point to a subfolder '
        'under the saved model folder that is passed as the argument '
        'to tf.contrib.save_model.save_keras_model(). '
        'The subfolder is generated automatically by tensorflow when '
        'saving tf.keras model in the SavedModel format. It is usually named '
        'as a Unix epoch time (e.g., 1542212752).\n'
        'For "tf" formats, a SavedModel, frozen model, session bundle model, '
        ' or TF-Hub module is expected.')
    parser.add_argument('--output_format',
                        type=str,
                        required=False,
                        choices=set(['keras', 'tensorflowjs']),
                        default='tensorflowjs',
                        help='Output format. Default: tensorflowjs.')
    parser.add_argument(
        '--output_node_names',
        type=str,
        help='The names of the output nodes, separated by commas. E.g., '
        '"logits,activations". Applicable only if input format is '
        '"tf_saved_model" or "tf_session_bundle".')
    parser.add_argument(
        '--signature_name',
        type=str,
        help='Signature of the TF-Hub module to load. Applicable only if input'
        ' format is "tf_hub".')
    parser.add_argument(
        '--saved_model_tags',
        type=str,
        default='serve',
        help='Tags of the MetaGraphDef to load, in comma separated string '
        'format. Defaults to "serve". Applicable only if input format is '
        '"tf_saved_model".')
    parser.add_argument(
        '--quantization_bytes',
        type=int,
        choices=set(quantization.QUANTIZATION_BYTES_TO_DTYPES.keys()),
        help='How many bytes to optionally quantize/compress the weights to. 1- '
        'and 2-byte quantizaton is supported. The default (unquantized) size is '
        '4 bytes.')
    parser.add_argument(
        '--split_weights_by_layer',
        action='store_true',
        help='Applicable to keras input_format only: Whether the weights from '
        'different layers are to be stored in separate weight groups, '
        'corresponding to separate binary weight files. Default: False.')
    parser.add_argument(
        '--version',
        '-v',
        dest='show_version',
        action='store_true',
        help='Show versions of tensorflowjs and its dependencies')
    parser.add_argument(
        '--skip_op_check',
        type=bool,
        default=False,
        help='Skip op validation for TensorFlow model conversion.')
    parser.add_argument(
        '--strip_debug_ops',
        type=bool,
        default=True,
        help='Strip debug ops (Print, Assert, CheckNumerics) from graph.')

    FLAGS = parser.parse_args()

    if FLAGS.show_version:
        print('\ntensorflowjs %s\n' % version.version)
        print('Dependency versions:')
        print('  keras %s' % keras.__version__)
        print('  tensorflow %s' % tf.__version__)
        return

    if FLAGS.input_path is None:
        raise ValueError('Error: The input_path argument must be set. '
                         'Run with --help flag for usage information.')

    quantization_dtype = (
        quantization.QUANTIZATION_BYTES_TO_DTYPES[FLAGS.quantization_bytes]
        if FLAGS.quantization_bytes else None)

    if (FLAGS.output_node_names and FLAGS.input_format
            not in ('tf_saved_model', 'tf_session_bundle', 'tf_frozen_model')):
        raise ValueError(
            'The --output_node_names flag is applicable only to input formats '
            '"tf_saved_model", "tf_session_bundle" and "tf_frozen_model", '
            'but the current input format is "%s".' % FLAGS.input_format)

    if FLAGS.signature_name and FLAGS.input_format != 'tf_hub':
        raise ValueError(
            'The --signature_name is applicable only to "tf_hub" input format, '
            'but the current input format is "%s".' % FLAGS.input_format)

    # TODO(cais, piyu): More conversion logics can be added as additional
    #   branches below.
    if FLAGS.input_format == 'keras' and FLAGS.output_format == 'tensorflowjs':
        dispatch_keras_h5_to_tensorflowjs_conversion(
            FLAGS.input_path,
            output_dir=FLAGS.output_path,
            quantization_dtype=quantization_dtype,
            split_weights_by_layer=FLAGS.split_weights_by_layer)
    elif (FLAGS.input_format == 'keras_saved_model'
          and FLAGS.output_format == 'tensorflowjs'):
        dispatch_keras_saved_model_to_tensorflowjs_conversion(
            FLAGS.input_path,
            FLAGS.output_path,
            quantization_dtype=quantization_dtype,
            split_weights_by_layer=FLAGS.split_weights_by_layer)
    elif (FLAGS.input_format == 'tf_saved_model'
          and FLAGS.output_format == 'tensorflowjs'):
        tf_saved_model_conversion.convert_tf_saved_model(
            FLAGS.input_path,
            FLAGS.output_node_names,
            FLAGS.output_path,
            saved_model_tags=FLAGS.saved_model_tags,
            quantization_dtype=quantization_dtype,
            skip_op_check=FLAGS.skip_op_check,
            strip_debug_ops=FLAGS.strip_debug_ops)

    elif (FLAGS.input_format == 'tf_session_bundle'
          and FLAGS.output_format == 'tensorflowjs'):
        tf_saved_model_conversion.convert_tf_session_bundle(
            FLAGS.input_path,
            FLAGS.output_node_names,
            FLAGS.output_path,
            quantization_dtype=quantization_dtype,
            skip_op_check=FLAGS.skip_op_check,
            strip_debug_ops=FLAGS.strip_debug_ops)

    elif (FLAGS.input_format == 'tf_frozen_model'
          and FLAGS.output_format == 'tensorflowjs'):
        tf_saved_model_conversion.convert_tf_frozen_model(
            FLAGS.input_path,
            FLAGS.output_node_names,
            FLAGS.output_path,
            quantization_dtype=quantization_dtype,
            skip_op_check=FLAGS.skip_op_check,
            strip_debug_ops=FLAGS.strip_debug_ops)

    elif (FLAGS.input_format == 'tf_hub'
          and FLAGS.output_format == 'tensorflowjs'):
        if FLAGS.signature_name:
            tf_saved_model_conversion.convert_tf_hub_module(
                FLAGS.input_path,
                FLAGS.output_path,
                FLAGS.signature_name,
                skip_op_check=FLAGS.skip_op_check,
                strip_debug_ops=FLAGS.strip_debug_ops)
        else:
            tf_saved_model_conversion.convert_tf_hub_module(
                FLAGS.input_path,
                FLAGS.output_path,
                skip_op_check=FLAGS.skip_op_check,
                strip_debug_ops=FLAGS.strip_debug_ops)

    elif (FLAGS.input_format == 'tensorflowjs'
          and FLAGS.output_format == 'keras'):
        dispatch_tensorflowjs_to_keras_h5_conversion(FLAGS.input_path,
                                                     FLAGS.output_path)

    else:
        raise ValueError(
            'Unsupported input_format - output_format pair: %s - %s' %
            (FLAGS.input_format, FLAGS.output_format))
Exemplo n.º 9
0
def main():
  parser = argparse.ArgumentParser('TensorFlow.js model converters.')
  parser.add_argument(
      'input_path',
      nargs='?',
      type=str,
      help='Path to the input file or directory. For input format "keras", '
      'an HDF5 (.h5) file is expected. For input format "tensorflow", '
      'a SavedModel directory, session bundle directory, frozen model file, '
      'or TF-Hub module is expected.')
  parser.add_argument(
      'output_path', nargs='?', type=str, help='Path for all output artifacts.')
  parser.add_argument(
      '--input_format',
      type=str,
      required=False,
      default='tf_saved_model',
      choices=set(['keras', 'tf_saved_model', 'tf_session_bundle',
                   'tf_frozen_model', 'tf_hub', 'tensorflowjs']),
      help='Input format. '
      'For "keras", the input path can be one of the two following formats:\n'
      '  - A topology+weights combined HDF5 (e.g., generated with'
      '    `keras.model.save_model()` method).\n'
      '  - A weights-only HDF5 (e.g., generated with Keras Model\'s '
      '    `save_weights()` method). \n'
      'For "tf" formats, a SavedModel, frozen model, session bundle model, '
      ' or TF-Hub module is expected.')
  parser.add_argument(
      '--output_format',
      type=str,
      required=False,
      choices=set(['keras', 'tensorflowjs']),
      default='tensorflowjs',
      help='Output format. Default: tensorflowjs.')
  parser.add_argument(
      '--output_node_names',
      type=str,
      help='The names of the output nodes, separated by commas. E.g., '
      '"logits,activations". Applicable only if input format is '
      '"tf_saved_model" or "tf_session_bundle".')
  parser.add_argument(
      '--signature_name',
      type=str,
      help='Signature of the TF-Hub module to load. Applicable only if input'
      ' format is "tf_hub".')
  parser.add_argument(
      '--saved_model_tags',
      type=str,
      default='serve',
      help='Tags of the MetaGraphDef to load, in comma separated string '
      'format. Defaults to "serve". Applicable only if input format is '
      '"tf_saved_model".')
  parser.add_argument(
      '--quantization_bytes',
      type=int,
      choices=set(quantization.QUANTIZATION_BYTES_TO_DTYPES.keys()),
      help='How many bytes to optionally quantize/compress the weights to. 1- '
      'and 2-byte quantizaton is supported. The default (unquantized) size is '
      '4 bytes.')
  parser.add_argument(
      '--version',
      '-v',
      dest='show_version',
      action='store_true',
      help='Show versions of tensorflowjs and its dependencies')
  parser.add_argument(
      '--skip_op_check',
      type=bool,
      default=False,
      help='Skip op validation for TensorFlow model conversion.')
  parser.add_argument(
      '--strip_debug_ops',
      type=bool,
      default=True,
      help='Strip debug ops (Print, Assert, CheckNumerics) from graph.')

  FLAGS = parser.parse_args()

  if FLAGS.show_version:
    print('\ntensorflowjs %s\n' % version.version)
    print('Dependency versions:')
    print('  keras %s' % keras.__version__)
    print('  tensorflow %s' % tf.__version__)
    return

  if FLAGS.input_path is None:
    raise ValueError(
        'Error: The input_path argument must be set. '
        'Run with --help flag for usage information.')

  quantization_dtype = (
      quantization.QUANTIZATION_BYTES_TO_DTYPES[FLAGS.quantization_bytes]
      if FLAGS.quantization_bytes else None)

  if (FLAGS.output_node_names and
      FLAGS.input_format not in
      ('tf_saved_model', 'tf_session_bundle', 'tf_frozen_model')):
    raise ValueError(
        'The --output_node_names flag is applicable only to input formats '
        '"tf_saved_model", "tf_session_bundle" and "tf_frozen_model", '
        'but the current input format is "%s".' % FLAGS.input_format)

  if FLAGS.signature_name and FLAGS.input_format != 'tf_hub':
    raise ValueError(
        'The --signature_name is applicable only to "tf_hub" input format, '
        'but the current input format is "%s".' % FLAGS.input_format)

  # TODO(cais, piyu): More conversion logics can be added as additional
  #   branches below.
  if FLAGS.input_format == 'keras' and FLAGS.output_format == 'tensorflowjs':
    dispatch_keras_h5_to_tensorflowjs_conversion(
        FLAGS.input_path, output_dir=FLAGS.output_path,
        quantization_dtype=quantization_dtype)

  elif (FLAGS.input_format == 'tf_saved_model' and
        FLAGS.output_format == 'tensorflowjs'):
    tf_saved_model_conversion.convert_tf_saved_model(
        FLAGS.input_path, FLAGS.output_node_names,
        FLAGS.output_path, saved_model_tags=FLAGS.saved_model_tags,
        quantization_dtype=quantization_dtype,
        skip_op_check=FLAGS.skip_op_check,
        strip_debug_ops=FLAGS.strip_debug_ops)

  elif (FLAGS.input_format == 'tf_session_bundle' and
        FLAGS.output_format == 'tensorflowjs'):
    tf_saved_model_conversion.convert_tf_session_bundle(
        FLAGS.input_path, FLAGS.output_node_names,
        FLAGS.output_path, quantization_dtype=quantization_dtype,
        skip_op_check=FLAGS.skip_op_check,
        strip_debug_ops=FLAGS.strip_debug_ops)

  elif (FLAGS.input_format == 'tf_frozen_model' and
        FLAGS.output_format == 'tensorflowjs'):
    tf_saved_model_conversion.convert_tf_frozen_model(
        FLAGS.input_path, FLAGS.output_node_names,
        FLAGS.output_path, quantization_dtype=quantization_dtype,
        skip_op_check=FLAGS.skip_op_check,
        strip_debug_ops=FLAGS.strip_debug_ops)

  elif (FLAGS.input_format == 'tf_hub' and
        FLAGS.output_format == 'tensorflowjs'):
    if FLAGS.signature_name:
      tf_saved_model_conversion.convert_tf_hub_module(
          FLAGS.input_path, FLAGS.output_path, FLAGS.signature_name,
          skip_op_check=FLAGS.skip_op_check,
          strip_debug_ops=FLAGS.strip_debug_ops)
    else:
      tf_saved_model_conversion.convert_tf_hub_module(
          FLAGS.input_path,
          FLAGS.output_path,
          skip_op_check=FLAGS.skip_op_check,
          strip_debug_ops=FLAGS.strip_debug_ops)

  elif (FLAGS.input_format == 'tensorflowjs' and
        FLAGS.output_format == 'keras'):
    dispatch_tensorflowjs_to_keras_h5_conversion(FLAGS.input_path,
                                                 FLAGS.output_path)

  else:
    raise ValueError(
        'Unsupported input_format - output_format pair: %s - %s' %
        (FLAGS.input_format, FLAGS.output_format))
Exemplo n.º 10
0
def main():
  parser = argparse.ArgumentParser('TensorFlow.js model converters.')
  parser.add_argument(
      'input_path',
      nargs='?',
      type=str,
      help='Path to the input file or directory. For input format "keras", '
      'an HDF5 (.h5) file is expected. For input format "tensorflow", '
      'a SavedModel directory, session bundle directory, frozen model file, '
      'or TF-Hub module is expected.')
  parser.add_argument(
      'output_path', nargs='?', type=str, help='Path for all output artifacts.')
  parser.add_argument(
      '--input_format',
      type=str,
      required=False,
      default='tf_saved_model',
      choices=set(['keras', 'tf_saved_model', 'tf_session_bundle',
                   'tf_frozen_model', 'tf_hub', 'tensorflowjs']),
      help='Input format. '
      'For "keras", the input path can be one of the two following formats:\n'
      '  - A topology+weights combined HDF5 (e.g., generated with'
      '    `keras.model.save_model()` method).\n'
      '  - A weights-only HDF5 (e.g., generated with Keras Model\'s '
      '    `save_weights()` method). \n'
      'For "tf" formats, a SavedModel, frozen model, session bundle model, '
      ' or TF-Hub module is expected.')
  parser.add_argument(
      '--output_format',
      type=str,
      required=False,
      choices=set(['keras', 'tensorflowjs']),
      default='tensorflowjs',
      help='Output format. Default: tensorflowjs.')
  parser.add_argument(
      '--output_node_names',
      type=str,
      help='The names of the output nodes, separated by commas. E.g., '
      '"logits,activations". Applicable only if input format is '
      '"tf_saved_model" or "tf_session_bundle".')
  parser.add_argument(
      '--signature_name',
      type=str,
      help='Signature of the TF-Hub module to load. Applicable only if input'
      ' format is "tf_hub".')
  parser.add_argument(
      '--saved_model_tags',
      type=str,
      default='serve',
      help='Tags of the MetaGraphDef to load, in comma separated string '
      'format. Defaults to "serve". Applicable only if input format is '
      '"tf_saved_model".')
  parser.add_argument(
      '--quantization_bytes',
      type=int,
      choices=set(quantization.QUANTIZATION_BYTES_TO_DTYPES.keys()),
      help='How many bytes to optionally quantize/compress the weights to. 1- '
      'and 2-byte quantizaton is supported. The default (unquantized) size is '
      '4 bytes.')
  parser.add_argument(
      '--version',
      '-v',
      dest='show_version',
      action='store_true',
      help='Show versions of tensorflowjs and its dependencies')

  FLAGS = parser.parse_args()

  if FLAGS.show_version:
    print('\ntensorflowjs %s\n' % version.version)
    print('Dependency versions:')
    print('  keras %s' % keras.__version__)
    print('  tensorflow %s' % tf.__version__)
    return

  quantization_dtype = (
      quantization.QUANTIZATION_BYTES_TO_DTYPES[FLAGS.quantization_bytes]
      if FLAGS.quantization_bytes else None)

  if (FLAGS.output_node_names and
      FLAGS.input_format not in
      ('tf_saved_model', 'tf_session_bundle', 'tf_frozen_model')):
    raise ValueError(
        'The --output_node_names flag is applicable only to input formats '
        '"tf_saved_model", "tf_session_bundle" and "tf_frozen_model", '
        'but the current input format is "%s".' % FLAGS.input_format)

  if FLAGS.signature_name and FLAGS.input_format != 'tf_hub':
    raise ValueError(
        'The --signature_name is applicable only to "tf_hub" input format, '
        'but the current input format is "%s".' % FLAGS.input_format)

  # TODO(cais, piyu): More conversion logics can be added as additional
  #   branches below.
  if FLAGS.input_format == 'keras' and FLAGS.output_format == 'tensorflowjs':
    dispatch_keras_h5_to_tensorflowjs_conversion(
        FLAGS.input_path, output_dir=FLAGS.output_path,
        quantization_dtype=quantization_dtype)

  elif (FLAGS.input_format == 'tf_saved_model' and
        FLAGS.output_format == 'tensorflowjs'):
    tf_saved_model_conversion.convert_tf_saved_model(
        FLAGS.input_path, FLAGS.output_node_names,
        FLAGS.output_path, saved_model_tags=FLAGS.saved_model_tags,
        quantization_dtype=quantization_dtype)

  elif (FLAGS.input_format == 'tf_session_bundle' and
        FLAGS.output_format == 'tensorflowjs'):
    tf_saved_model_conversion.convert_tf_session_bundle(
        FLAGS.input_path, FLAGS.output_node_names,
        FLAGS.output_path, quantization_dtype=quantization_dtype)

  elif (FLAGS.input_format == 'tf_frozen_model' and
        FLAGS.output_format == 'tensorflowjs'):
    tf_saved_model_conversion.convert_tf_frozen_model(
        FLAGS.input_path, FLAGS.output_node_names,
        FLAGS.output_path, quantization_dtype=quantization_dtype)

  elif (FLAGS.input_format == 'tf_hub' and
        FLAGS.output_format == 'tensorflowjs'):
    if FLAGS.signature_name:
      tf_saved_model_conversion.convert_tf_hub_module(
          FLAGS.input_path, FLAGS.output_path, FLAGS.signature_name)
    else:
      tf_saved_model_conversion.convert_tf_hub_module(FLAGS.input_path,
                                                      FLAGS.output_path)

  elif (FLAGS.input_format == 'tensorflowjs' and
        FLAGS.output_format == 'keras'):
    dispatch_tensorflowjs_to_keras_h5_conversion(FLAGS.input_path,
                                                 FLAGS.output_path)

  else:
    raise ValueError(
        'Unsupported input_format - output_format pair: %s - %s' %
        (FLAGS.input_format, FLAGS.output_format))
Exemplo n.º 11
0
def main():
  FLAGS = setup_arugments()
  if FLAGS.show_version:
    print('\ntensorflowjs %s\n' % version.version)
    print('Dependency versions:')
    print('  keras %s' % keras.__version__)
    print('  tensorflow %s' % tf.__version__)
    return

  if FLAGS.input_path is None:
    raise ValueError(
        'Error: The input_path argument must be set. '
        'Run with --help flag for usage information.')

  input_format, output_format = _standardize_input_output_formats(
      FLAGS.input_format, FLAGS.output_format)

  quantization_dtype = (
      quantization.QUANTIZATION_BYTES_TO_DTYPES[FLAGS.quantization_bytes]
      if FLAGS.quantization_bytes else None)

  if (FLAGS.output_node_names and
      input_format not in
      ('tf_saved_model', 'tf_session_bundle', 'tf_frozen_model')):
    raise ValueError(
        'The --output_node_names flag is applicable only to input formats '
        '"tf_saved_model", "tf_session_bundle" and "tf_frozen_model", '
        'but the current input format is "%s".' % FLAGS.input_format)

  if FLAGS.signature_name and input_format != 'tf_hub':
    raise ValueError(
        'The --signature_name is applicable only to "tf_hub" input format, '
        'but the current input format is "%s".' % input_format)

  # TODO(cais, piyu): More conversion logics can be added as additional
  #   branches below.
  if input_format == 'keras' and output_format == 'tfjs_layers_model':
    dispatch_keras_h5_to_tensorflowjs_conversion(
        FLAGS.input_path, output_dir=FLAGS.output_path,
        quantization_dtype=quantization_dtype,
        split_weights_by_layer=FLAGS.split_weights_by_layer)
  elif (input_format == 'keras_saved_model' and
        output_format == 'tfjs_layers_model'):
    dispatch_keras_saved_model_to_tensorflowjs_conversion(
        FLAGS.input_path, FLAGS.output_path,
        quantization_dtype=quantization_dtype,
        split_weights_by_layer=FLAGS.split_weights_by_layer)
  elif (input_format == 'tf_saved_model' and
        output_format == 'tfjs_graph_model'):
    tf_saved_model_conversion.convert_tf_saved_model(
        FLAGS.input_path, FLAGS.output_node_names,
        FLAGS.output_path, saved_model_tags=FLAGS.saved_model_tags,
        quantization_dtype=quantization_dtype,
        skip_op_check=FLAGS.skip_op_check,
        strip_debug_ops=FLAGS.strip_debug_ops)
  elif (input_format == 'tf_session_bundle' and
        output_format == 'tfjs_graph_model'):
    tf_saved_model_conversion.convert_tf_session_bundle(
        FLAGS.input_path, FLAGS.output_node_names,
        FLAGS.output_path, quantization_dtype=quantization_dtype,
        skip_op_check=FLAGS.skip_op_check,
        strip_debug_ops=FLAGS.strip_debug_ops)
  elif (input_format == 'tf_frozen_model' and
        output_format == 'tfjs_graph_model'):
    tf_saved_model_conversion.convert_tf_frozen_model(
        FLAGS.input_path, FLAGS.output_node_names,
        FLAGS.output_path, quantization_dtype=quantization_dtype,
        skip_op_check=FLAGS.skip_op_check,
        strip_debug_ops=FLAGS.strip_debug_ops)
  elif (input_format == 'tf_hub' and
        output_format == 'tfjs_graph_model'):
    if FLAGS.signature_name:
      tf_saved_model_conversion.convert_tf_hub_module(
          FLAGS.input_path, FLAGS.output_path, FLAGS.signature_name,
          skip_op_check=FLAGS.skip_op_check,
          strip_debug_ops=FLAGS.strip_debug_ops)
    else:
      tf_saved_model_conversion.convert_tf_hub_module(
          FLAGS.input_path,
          FLAGS.output_path,
          skip_op_check=FLAGS.skip_op_check,
          strip_debug_ops=FLAGS.strip_debug_ops)
  elif (input_format == 'tfjs_layers_model' and
        output_format == 'keras'):
    dispatch_tensorflowjs_to_keras_h5_conversion(FLAGS.input_path,
                                                 FLAGS.output_path)

  else:
    raise ValueError(
        'Unsupported input_format - output_format pair: %s - %s' %
        (input_format, output_format))