Ejemplo n.º 1
0
 def test_optimizer_add_unsupported_op(self):
     self._create_unsupported_saved_model()
     with self.assertRaisesRegexp(  # pylint: disable=deprecated-method
             ValueError, r'^Unsupported Ops'):
         tf_saved_model_conversion_v2.convert_tf_saved_model(
             os.path.join(self._tmp_dir, SAVED_MODEL_DIR),
             os.path.join(self._tmp_dir, SAVED_MODEL_DIR))
Ejemplo n.º 2
0
  def test_convert_saved_model_v1(self):
    self._create_saved_model_v1()

    input_dir = os.path.join(self._tmp_dir, SAVED_MODEL_DIR)
    output_dir = os.path.join(input_dir, 'js')
    tf_saved_model_conversion_v2.convert_tf_saved_model(
        input_dir,
        output_dir
    )

    tfjs_path = os.path.join(self._tmp_dir, SAVED_MODEL_DIR, 'js')
    # Check model.json and weights manifest.
    with open(os.path.join(tfjs_path, 'model.json'), 'rt') as f:
      model_json = json.load(f)
    self.assertTrue(model_json['modelTopology'])
    weights_manifest = model_json['weightsManifest']
    self.assertCountEqual(weights_manifest[0]['paths'],
                          ['group1-shard1of1.bin'])
    self.assertIn('weights', weights_manifest[0])

    # Check meta-data in the artifact JSON.
    self.assertEqual(model_json['format'], 'graph-model')
    self.assertEqual(
        model_json['convertedBy'],
        'TensorFlow.js Converter v%s' % version.version)
    self.assertEqual(model_json['generatedBy'],
                     tf.__version__)
    self.assertTrue(glob.glob(os.path.join(output_dir, 'group*-*')))
Ejemplo n.º 3
0
    def test_convert_saved_model_strip_debug_ops(self):
        self._create_saved_model_with_debug_ops()

        tf_saved_model_conversion_v2.convert_tf_saved_model(
            os.path.join(self._tmp_dir, SAVED_MODEL_DIR),
            os.path.join(self._tmp_dir, SAVED_MODEL_DIR),
            strip_debug_ops=True)

        tfjs_path = os.path.join(self._tmp_dir, SAVED_MODEL_DIR)
        # Check model.json and weights manifest.
        with open(os.path.join(tfjs_path, 'model.json'), 'rt') as f:
            model_json = json.load(f)
        self.assertTrue(model_json['modelTopology'])
        self.assertIsNot(model_json['modelTopology']['versions'], None)
        signature = model_json['userDefinedMetadata']['signature']
        self.assertIsNot(signature, None)
        self.assertIsNot(signature['inputs'], None)
        self.assertIsNot(signature['outputs'], None)

        weights_manifest = model_json['weightsManifest']
        self.assertCountEqual(weights_manifest[0]['paths'],
                              ['group1-shard1of1.bin'])
        self.assertIn('weights', weights_manifest[0])
        self.assertTrue(
            glob.glob(os.path.join(self._tmp_dir, SAVED_MODEL_DIR,
                                   'group*-*')))
Ejemplo n.º 4
0
    def test_convert_saved_model_v1(self):
        self._create_saved_model_v1()

        tf_saved_model_conversion_v2.convert_tf_saved_model(
            os.path.join(self._tmp_dir, SAVED_MODEL_DIR),
            os.path.join(self._tmp_dir, SAVED_MODEL_DIR))

        weights = [{
            'paths': ['group1-shard1of1.bin'],
            'weights': [{
                'dtype': 'float32',
                'name': 'w',
                'shape': [2, 2]
            }]
        }]

        tfjs_path = os.path.join(self._tmp_dir, SAVED_MODEL_DIR)
        # Check model.json and weights manifest.
        with open(os.path.join(tfjs_path, 'model.json'), 'rt') as f:
            model_json = json.load(f)
        self.assertTrue(model_json['modelTopology'])
        weights_manifest = model_json['weightsManifest']
        self.assertEqual(weights_manifest, weights)
        # Check meta-data in the artifact JSON.
        self.assertEqual(model_json['format'], 'graph-model')
        self.assertEqual(model_json['convertedBy'],
                         'TensorFlow.js Converter v%s' % version.version)
        self.assertEqual(model_json['generatedBy'], tf.__version__)
        self.assertTrue(
            glob.glob(os.path.join(self._tmp_dir, SAVED_MODEL_DIR,
                                   'group*-*')))
Ejemplo n.º 5
0
    def test_convert_saved_model_skip_op_check(self):
        self._create_unsupported_saved_model()

        tf_saved_model_conversion_v2.convert_tf_saved_model(
            os.path.join(self._tmp_dir, SAVED_MODEL_DIR),
            os.path.join(self._tmp_dir, SAVED_MODEL_DIR),
            skip_op_check=True)

        weights = [{
            'paths': ['group1-shard1of1.bin'],
            'weights': [{
                'dtype': 'float32',
                'name': 'statefulpartitionedcall_args_1',
                'shape': [2, 2]
            }, {
                'dtype': 'float32',
                'name': 'StatefulPartitionedCall/MatrixDiag',
                'shape': [2, 2, 2]
            }]
        }]
        tfjs_path = os.path.join(self._tmp_dir, SAVED_MODEL_DIR)
        # Check model.json and weights manifest.
        with open(os.path.join(tfjs_path, 'model.json'), 'rt') as f:
            model_json = json.load(f)
        self.assertTrue(model_json['modelTopology'])
        weights_manifest = model_json['weightsManifest']
        self.assertEqual(weights_manifest, weights)
        self.assertTrue(
            glob.glob(os.path.join(self._tmp_dir, SAVED_MODEL_DIR,
                                   'group*-*')))
    def test_convert_saved_model_sharded(self):
        self._create_saved_model()
        model_path = os.path.join(self._tmp_dir, SAVED_MODEL_DIR)
        tfjs_path = os.path.join(self._tmp_dir, SAVED_MODEL_DIR)

        # Do initial conversion without sharding.
        tf_saved_model_conversion_v2.convert_tf_saved_model(
            model_path, tfjs_path)
        weight_files = glob.glob(os.path.join(tfjs_path, 'group*.bin'))

        # Get size of weights in bytes after graph optimizations.
        optimized_total_weight = sum(
            [os.path.getsize(f) for f in weight_files])

        # Due to the shard size, there ought to be 2 shards after conversion.
        weight_shard_size_bytes = int(optimized_total_weight * 0.8)

        tfjs_path = os.path.join(self._tmp_dir, 'sharded_model')
        # Convert Saved Model again with shard argument set.
        tf_saved_model_conversion_v2.convert_tf_saved_model(
            model_path,
            tfjs_path,
            weight_shard_size_bytes=weight_shard_size_bytes)

        weight_files = sorted(glob.glob(os.path.join(tfjs_path, 'group*.bin')))
        self.assertEqual(len(weight_files), 2)
        weight_file_sizes = [os.path.getsize(f) for f in weight_files]

        self.assertEqual(sum(weight_file_sizes), optimized_total_weight)
        self.assertLess(weight_file_sizes[1], weight_file_sizes[0])
    def test_convert_saved_model_v1_with_hashtable(self):
        self._create_saved_model_v1_with_hashtable()

        input_dir = os.path.join(self._tmp_dir, SAVED_MODEL_DIR)
        output_dir = os.path.join(input_dir, 'js')
        tf_saved_model_conversion_v2.convert_tf_saved_model(
            input_dir, output_dir)

        expected_weights_manifest = [{
            'paths': ['group1-shard1of1.bin'],
            'weights': [{
                'dtype': 'float32',
                'name': 'w',
                'shape': [2, 2]
            }, {
                'dtype': 'string',
                'name': 'Const',
                'shape': [1]
            }, {
                'dtype': 'int32',
                'name': 'Const_1',
                'shape': [1]
            }]
        }]

        tfjs_path = os.path.join(self._tmp_dir, SAVED_MODEL_DIR, 'js')
        # Check model.json and weights manifest.
        with open(os.path.join(tfjs_path, 'model.json'), 'rt') as f:
            model_json = json.load(f)
        self.assertTrue(model_json['modelTopology'])
        self.assertIsNot(model_json['modelTopology']['versions'], None)
        signature = model_json['signature']
        self.assertIsNot(signature, None)
        self.assertIsNot(signature['inputs'], None)
        self.assertIsNot(signature['outputs'], None)
        self.assertTrue(model_json['modelInitializer'])

        for node in model_json['modelTopology']['node']:
            if node['name'] == 'ToFloat' and node['op'] == 'Placeholder':
                self.assertEqual(
                    node['attr']['shape'], {
                        'shape': {
                            'dim': [{
                                'size': '-1'
                            }, {
                                'size': '2'
                            }, {
                                'size': '2'
                            }]
                        }
                    })

        weights_manifest = model_json['weightsManifest']
        self.assertEqual(weights_manifest, expected_weights_manifest)
        # Check meta-data in the artifact JSON.
        self.assertEqual(model_json['format'], 'graph-model')
        self.assertEqual(model_json['convertedBy'],
                         'TensorFlow.js Converter v%s' % version.version)
        self.assertEqual(model_json['generatedBy'], tf.__version__)
        self.assertTrue(glob.glob(os.path.join(output_dir, 'group*-*')))
Ejemplo n.º 8
0
    def test_convert_saved_model(self):
        self._create_saved_model()

        tf_saved_model_conversion_v2.convert_tf_saved_model(
            os.path.join(self._tmp_dir, SAVED_MODEL_DIR),
            os.path.join(self._tmp_dir, SAVED_MODEL_DIR))

        weights = [{
            'paths': ['group1-shard1of1.bin'],
            'weights': [{
                'dtype': 'float32',
                'name': 'StatefulPartitionedCall/mul',
                'shape': []
            }]
        }]

        tfjs_path = os.path.join(self._tmp_dir, SAVED_MODEL_DIR)
        # Check model.json and weights manifest.
        with open(os.path.join(tfjs_path, 'model.json'), 'rt') as f:
            model_json = json.load(f)
        self.assertTrue(model_json['modelTopology'])
        weights_manifest = model_json['weightsManifest']
        self.assertEqual(len(weights_manifest), len(weights))
        if sys.version_info[0] < 3:
            self.assertItemsEqual(weights_manifest[0]['paths'],
                                  weights[0]['paths'])
            self.assertItemsEqual(weights_manifest[0]['weights'],
                                  weights[0]['weights'])
        else:
            self.assertCountEqual(weights_manifest[0]['paths'],
                                  weights[0]['paths'])
            self.assertCountEqual(weights_manifest[0]['weights'],
                                  weights[0]['weights'])
Ejemplo n.º 9
0
    def test_convert_saved_model_strip_debug_ops(self):
        self._create_saved_model_with_debug_ops()

        tf_saved_model_conversion_v2.convert_tf_saved_model(
            os.path.join(self._tmp_dir, SAVED_MODEL_DIR),
            os.path.join(self._tmp_dir, SAVED_MODEL_DIR),
            strip_debug_ops=True)

        weights = [{
            'paths': ['group1-shard1of1.bin'],
            'weights': [{
                'dtype': 'float32',
                'name': 'add',
                'shape': [2, 2]
            }]
        }]
        tfjs_path = os.path.join(self._tmp_dir, SAVED_MODEL_DIR)
        # Check model.json and weights manifest.
        with open(os.path.join(tfjs_path, 'model.json'), 'rt') as f:
            model_json = json.load(f)
        self.assertTrue(model_json['modelTopology'])
        weights_manifest = model_json['weightsManifest']
        self.assertEqual(weights_manifest, weights)
        self.assertTrue(
            glob.glob(os.path.join(self._tmp_dir, SAVED_MODEL_DIR,
                                   'group*-*')))
Ejemplo n.º 10
0
    def test_convert_saved_model_with_unfusable_prelu(self):
        self._create_saved_model_with_unfusable_prelu()
        tf_saved_model_conversion_v2.convert_tf_saved_model(
            os.path.join(self._tmp_dir, SAVED_MODEL_DIR),
            os.path.join(self._tmp_dir, SAVED_MODEL_DIR))

        tfjs_path = os.path.join(self._tmp_dir, SAVED_MODEL_DIR)
        # Check model.json and weights manifest.
        with open(os.path.join(tfjs_path, 'model.json'), 'rt') as f:
            model_json = json.load(f)
        self.assertTrue(model_json['modelTopology'])
        self.assertIsNot(model_json['modelTopology']['versions'], None)
        signature = model_json['userDefinedMetadata']['signature']
        self.assertIsNot(signature, None)
        self.assertIsNot(signature['inputs'], None)
        self.assertIsNot(signature['outputs'], None)

        nodes = model_json['modelTopology']['node']

        prelu_op = None
        for node in nodes:
            if node['op'] == 'Prelu':
                prelu_op = node
                break

        self.assertTrue(prelu_op)

        # Check meta-data in the artifact JSON.
        self.assertEqual(model_json['format'], 'graph-model')
        self.assertEqual(model_json['convertedBy'],
                         'TensorFlow.js Converter v%s' % version.version)
        self.assertEqual(model_json['generatedBy'], tf.__version__)
        self.assertTrue(
            glob.glob(os.path.join(self._tmp_dir, SAVED_MODEL_DIR,
                                   'group*-*')))
Ejemplo n.º 11
0
    def test_convert_saved_model_with_control_flow(self):
        self._create_saved_model_with_control_flow()

        tf_saved_model_conversion_v2.convert_tf_saved_model(
            os.path.join(self._tmp_dir, SAVED_MODEL_DIR),
            os.path.join(self._tmp_dir, SAVED_MODEL_DIR))

        tfjs_path = os.path.join(self._tmp_dir, SAVED_MODEL_DIR)
        # Check model.json and weights manifest.
        with open(os.path.join(tfjs_path, 'model.json'), 'rt') as f:
            model_json = json.load(f)
        self.assertTrue(model_json['modelTopology'])
        self.assertIsNot(model_json['modelTopology']['versions'], None)
        signature = model_json['userDefinedMetadata']['signature']
        self.assertIsNot(signature, None)
        self.assertIsNot(signature['inputs'], None)
        self.assertIsNot(signature['outputs'], None)

        weights_manifest = model_json['weightsManifest']
        self.assertCountEqual(weights_manifest[0]['paths'],
                              ['group1-shard1of1.bin'])
        self.assertIn('weights', weights_manifest[0])

        # Check meta-data in the artifact JSON.
        self.assertEqual(model_json['format'], 'graph-model')
        self.assertEqual(model_json['convertedBy'],
                         'TensorFlow.js Converter v%s' % version.version)
        self.assertEqual(model_json['generatedBy'], tf.__version__)
        self.assertTrue(
            glob.glob(os.path.join(self._tmp_dir, SAVED_MODEL_DIR,
                                   'group*-*')))
Ejemplo n.º 12
0
def export(path: str) -> None:
    torch_path = f"{path}.pth"
    onnx_path = f"{path}.onnx"
    tensorflow_path = f"{path}.pb"
    tensorflowjs_path = path

    model = LeNet5(10)
    model.load_state_dict(torch.load(torch_path))
    model = model.eval().cpu()

    torch.onnx.export(
        model,
        torch.zeros((1, 1, 28, 28)).float(),
        onnx_path,
        input_names=["img"],
        output_names=["pred"],
        do_constant_folding=True,
        export_params=True,
        opset_version=10,
        verbose=True,
    )

    model = onnx.load(onnx_path)
    prepare(model).export_graph(tensorflow_path)

    convert_tf_saved_model(tensorflow_path, tensorflowjs_path)
Ejemplo n.º 13
0
def dispatch_keras_h5_to_tfjs_graph_model_conversion(
    h5_path, output_dir=None,
    quantization_dtype_map=None,
    skip_op_check=False,
    strip_debug_ops=False,
    use_structured_outputs_names=False,
    weight_shard_size_bytes=1024 * 1024 * 4,
    control_flow_v2=False,
    experiments=False,
    metadata=None):
  """
  Convert a keras HDF5-format model to tfjs GraphModel artifacts.

  Args:
    h5_path: Path to the HDF5-format file that contains the model saved from
      keras or tf.keras.
    output_dir: The destination to which the tfjs GraphModel artifacts will be
      written.
    quantization_dtype_map: A mapping from dtype (`uint8`, `uint16`, `float16`)
      to weights. The weight mapping supports wildcard substitution.
    skip_op_check: Bool whether to skip the op check.
    strip_debug_ops: Bool whether to allow unsupported debug ops.
    use_structured_outputs_names: Bool whether output of graph model will follow
      the structured_outputs format.
    weight_shard_size_bytes: Shard size (in bytes) of the weight files.
      The size of each weight file will be <= this value.
    control_flow_v2: Bool whether to enable control flow v2 ops.
    experiments: Bool enable experimental features.
    metadata: User defined metadata map.
  """

  if not os.path.exists(h5_path):
    raise ValueError('Nonexistent path to HDF5 file: %s' % h5_path)
  if os.path.isdir(h5_path):
    raise ValueError(
        'Expected path to point to an HDF5 file, but it points to a '
        'directory: %s' % h5_path)

  temp_savedmodel_dir = tempfile.mktemp(suffix='.savedmodel')
  model = tf.keras.models.load_model(h5_path, compile=False)
  model.save(temp_savedmodel_dir, include_optimizer=False, save_format='tf')

  # NOTE(cais): This cannot use `tf.compat.v1` because
  #   `convert_tf_saved_model()` works only in v2.
  tf_saved_model_conversion_v2.convert_tf_saved_model(
      temp_savedmodel_dir, output_dir,
      signature_def='serving_default',
      saved_model_tags='serve',
      quantization_dtype_map=quantization_dtype_map,
      skip_op_check=skip_op_check,
      strip_debug_ops=strip_debug_ops,
      use_structured_outputs_names=use_structured_outputs_names,
      weight_shard_size_bytes=weight_shard_size_bytes,
      control_flow_v2=control_flow_v2,
      experiments=experiments,
      metadata=metadata)

  # Clean up the temporary SavedModel directory.
  shutil.rmtree(temp_savedmodel_dir)
def save_tfjs_from_torch(artifacts_dir, example_input, model, model_file_name):
    model_file_path = join(artifacts_dir, model_file_name)
    os.makedirs(model_file_path, exist_ok=True)
    torch.onnx.export(model.cpu(), example_input, join(model_file_path, 'model.onnx'), export_params=True, opset_version=11)
    model_onnx = onnx.load(join(model_file_path, 'model.onnx'))
    model_tf = prepare(model_onnx)
    model_tf.export_graph(join(model_file_path, 'model'))
    tf_saved_model_conversion_v2.convert_tf_saved_model(join(model_file_path, 'model'), model_file_path, skip_op_check=True)
    rmtree(join(model_file_path, 'model'))
    os.remove(join(model_file_path, 'model.onnx'))
Ejemplo n.º 15
0
    def test_convert_saved_model_with_frozen_file(self):
        self._create_saved_model()

        tf_saved_model_conversion_v2.convert_tf_saved_model(
            os.path.join(self._tmp_dir, SAVED_MODEL_DIR),
            os.path.join(self._tmp_dir, SAVED_MODEL_DIR),
            frozen_graph_dir=os.path.join(self._tmp_dir, SAVED_MODEL_DIR))

        frozen_file_path = os.path.join(self._tmp_dir, SAVED_MODEL_DIR,
                                        'model.json.frozen')
        # Check model.json.frozen exist.
        self.assertTrue(glob.glob(frozen_file_path))
Ejemplo n.º 16
0
    def test_convert_saved_model_structured_outputs_false(self):
        self._create_saved_model_with_structured_outputs()

        tf_saved_model_conversion_v2.convert_tf_saved_model(
            os.path.join(self._tmp_dir, SAVED_MODEL_DIR),
            os.path.join(self._tmp_dir, SAVED_MODEL_DIR))

        tfjs_path = os.path.join(self._tmp_dir, SAVED_MODEL_DIR)
        # Check model.json and weights manifest.
        with open(os.path.join(tfjs_path, 'model.json'), 'rt') as f:
            model_json = json.load(f)
        self.assertIs(model_json.get('userDefinedMetadata'), None)
  def test_convert_saved_model_with_prelu(self):
    self._create_saved_model_with_prelu()
    tf_saved_model_conversion_v2.convert_tf_saved_model(
        os.path.join(self._tmp_dir, SAVED_MODEL_DIR),
        os.path.join(self._tmp_dir, SAVED_MODEL_DIR)
    )

    tfjs_path = os.path.join(self._tmp_dir, SAVED_MODEL_DIR)
    # Check model.json and weights manifest.
    with open(os.path.join(tfjs_path, 'model.json'), 'rt') as f:
      model_json = json.load(f)
    self.assertTrue(model_json['modelTopology'])
    self.assertIsNot(model_json['modelTopology']['versions'], None)
    signature = model_json['userDefinedMetadata']['signature']
    self.assertIsNot(signature, None)
    self.assertIsNot(signature['inputs'], None)
    self.assertIsNot(signature['outputs'], None)

    nodes = model_json['modelTopology']['node']

    prelu_op = None
    fused_op = None
    depthwise_fused_op = None
    for node in nodes:
      if node['op'] == 'Prelu':
        prelu_op = node
      if node['op'] == '_FusedConv2D':
        fused_op = node
      if node['op'] == graph_rewrite_util.FUSED_DEPTHWISE_CONV2D:
        depthwise_fused_op = node
    self.assertTrue(prelu_op is None)
    self.assertIsNot(fused_op, None)
    self.assertIsNot(depthwise_fused_op, None)

    fused_ops = list(map(base64.b64decode,
                         fused_op['attr']['fused_ops']['list']['s']))
    self.assertEqual(fused_ops, [b'BiasAdd', b'Prelu'])
    self.assertEqual(fused_op['attr']['num_args']['i'], '2')
    depthwise_fused_ops = list(
        map(base64.b64decode,
            depthwise_fused_op['attr']['fused_ops']['list']['s']))
    self.assertEqual(depthwise_fused_ops, [b'BiasAdd', b'Prelu'])
    self.assertEqual(depthwise_fused_op['attr']['num_args']['i'], '2')
    # Check meta-data in the artifact JSON.
    self.assertEqual(model_json['format'], 'graph-model')
    self.assertEqual(
        model_json['convertedBy'],
        'TensorFlow.js Converter v%s' % version.version)
    self.assertEqual(model_json['generatedBy'],
                     tf.__version__)
    self.assertTrue(
        glob.glob(
            os.path.join(self._tmp_dir, SAVED_MODEL_DIR, 'group*-*')))
Ejemplo n.º 18
0
def make_tfjs_export(tflite_saved_model_dir, export_dir):
    if os.path.exists(export_dir):
        log('TF-JS export already exists in {}, skipping TF-JS export'.format(
            export_dir))
        return

    # Make a TF-JS model from the TF-Lite SavedModel export.
    log('Making TF-JS model ...')
    os.makedirs(export_dir)
    tfjs_saved_model_converter.convert_tf_saved_model(tflite_saved_model_dir,
                                                      export_dir)
    log('Done')
  def test_convert_saved_model_with_control_flow(self):
    self._create_saved_model_with_control_flow()

    tf_saved_model_conversion_v2.convert_tf_saved_model(
        os.path.join(self._tmp_dir, SAVED_MODEL_DIR),
        os.path.join(self._tmp_dir, SAVED_MODEL_DIR)
    )

    weights = [{
        'paths': ['group1-shard1of1.bin'],
        'weights': [{'dtype': 'int32', 'shape': [],
                     'name': 'StatefulPartitionedCall/while/loop_counter'},
                    {'dtype': 'int32', 'shape': [],
                     'name': 'StatefulPartitionedCall/while/maximum_iterations'
                    },
                    {'dtype': 'int32', 'shape': [],
                     'name': 'StatefulPartitionedCall/while/cond/_3/mod/y'},
                    {'dtype': 'int32', 'shape': [],
                     'name': 'StatefulPartitionedCall/while/cond/_3/Equal/y'},
                    {'dtype': 'int32', 'shape': [],
                     'name': 'StatefulPartitionedCall/while/body/_4/add_1/y'},
                    {'name': 'StatefulPartitionedCall/add/y',
                     'dtype': 'int32', 'shape': []}]}]

    tfjs_path = os.path.join(self._tmp_dir, SAVED_MODEL_DIR)
    # Check model.json and weights manifest.
    with open(os.path.join(tfjs_path, 'model.json'), 'rt') as f:
      model_json = json.load(f)
    self.assertTrue(model_json['modelTopology'])
    weights_manifest = model_json['weightsManifest']
    self.assertEqual(len(weights_manifest), len(weights))
    if sys.version_info[0] < 3:
      self.assertItemsEqual(weights_manifest[0]['paths'],
                            weights[0]['paths'])
      self.assertItemsEqual(weights_manifest[0]['weights'],
                            weights[0]['weights'])
    else:
      self.assertCountEqual(weights_manifest[0]['paths'],
                            weights[0]['paths'])
      self.assertCountEqual(weights_manifest[0]['weights'],
                            weights[0]['weights'])

    # Check meta-data in the artifact JSON.
    self.assertEqual(model_json['format'], 'graph-model')
    self.assertEqual(
        model_json['convertedBy'],
        'TensorFlow.js Converter v%s' % version.version)
    self.assertEqual(model_json['generatedBy'],
                     tf.__version__)
    self.assertTrue(
        glob.glob(
            os.path.join(self._tmp_dir, SAVED_MODEL_DIR, 'group*-*')))
Ejemplo n.º 20
0
def dispatch_keras_h5_to_tfjs_graph_model_conversion(
        h5_path,
        output_dir=None,
        quantization_dtype=None,
        skip_op_check=False,
        strip_debug_ops=False,
        weight_shard_size_bytes=1024 * 1024 * 4,
        control_flow_v2=False):
    """
  Convert a keras HDF5-format model to tfjs GraphModel artifacts.

  Args:
    h5_path: Path to the HDF5-format file that contains the model saved from
      keras or tf.keras.
    output_dir: The destination to which the tfjs GraphModel artifacts will be
      written.
    quantization_dtype: The quantized data type to store the weights in
      (Default: `None`).
    skip_op_check: Bool whether to skip the op check.
    strip_debug_ops: Bool whether to allow unsupported debug ops.
    weight_shard_size_bytes: Shard size (in bytes) of the weight files.
      The size of each weight file will be <= this value.
  """

    if not os.path.exists(h5_path):
        raise ValueError('Nonexistent path to HDF5 file: %s' % h5_path)
    if os.path.isdir(h5_path):
        raise ValueError(
            'Expected path to point to an HDF5 file, but it points to a '
            'directory: %s' % h5_path)

    temp_savedmodel_dir = tempfile.mktemp(suffix='.savedmodel')
    model = tf.keras.models.load_model(h5_path, compile=False)
    model.save(temp_savedmodel_dir, include_optimizer=False, save_format='tf')

    # NOTE(cais): This cannot use `tf.compat.v1` because
    #   `convert_tf_saved_model()` works only in v2.
    tf_saved_model_conversion_v2.convert_tf_saved_model(
        temp_savedmodel_dir,
        output_dir,
        signature_def='serving_default',
        saved_model_tags='serve',
        quantization_dtype=quantization_dtype,
        skip_op_check=skip_op_check,
        strip_debug_ops=strip_debug_ops,
        weight_shard_size_bytes=weight_shard_size_bytes,
        control_flow_v2=control_flow_v2)

    # Clean up the temporary SavedModel directory.
    shutil.rmtree(temp_savedmodel_dir)
Ejemplo n.º 21
0
  def test_convert_saved_model_with_control_flow_v2(self):
    self._create_saved_model_with_control_flow()

    tfjs_path = os.path.join(self._tmp_dir, SAVED_MODEL_DIR)
    tf_saved_model_conversion_v2.convert_tf_saved_model(
        tfjs_path, tfjs_path, control_flow_v2=True
    )

    # Check model.json and weights manifest.
    with open(os.path.join(tfjs_path, 'model.json'), 'rt') as f:
      model_json = json.load(f)
    self.assertTrue(model_json['modelTopology'])
    self.assertIsNot(model_json['modelTopology']['versions'], None)
    signature = model_json['signature']
    self.assertIsNot(signature, None)
    self.assertIsNot(signature['inputs'], None)
    self.assertIsNot(signature['outputs'], None)

    weights_manifest = model_json['weightsManifest']
    self.assertCountEqual(weights_manifest[0]['paths'],
                          ['group1-shard1of1.bin'])
    self.assertIn('weights', weights_manifest[0])

    add_y_weight = None
    for weight in weights_manifest[0]['weights']:
      if 'add/y' in weight['name']:
        add_y_weight = weight

    self.assertIsNot(add_y_weight, None)
    self.assertFalse(add_y_weight['name'].startswith('add/y'))

    nodes = model_json['modelTopology']['node']

    while_op = None
    for node in nodes:
      self.assertNotIn('Merge', node['op'])
      self.assertNotIn('Switch', node['op'])
      if node['op'] == 'StatelessWhile':
        while_op = node
    self.assertIsNot(while_op, None)
    # Check meta-data in the artifact JSON.
    self.assertEqual(model_json['format'], 'graph-model')
    self.assertEqual(
        model_json['convertedBy'],
        'TensorFlow.js Converter v%s' % version.version)
    self.assertEqual(model_json['generatedBy'],
                     tf.__version__)
    self.assertTrue(
        glob.glob(
            os.path.join(self._tmp_dir, SAVED_MODEL_DIR, 'group*-*')))
Ejemplo n.º 22
0
    def test_convert_saved_model_with_metadata(self):
        self._create_saved_model()

        metadata_json = {'a': 1}

        tf_saved_model_conversion_v2.convert_tf_saved_model(
            os.path.join(self._tmp_dir, SAVED_MODEL_DIR),
            os.path.join(self._tmp_dir, SAVED_MODEL_DIR),
            metadata={'key': metadata_json})

        tfjs_path = os.path.join(self._tmp_dir, SAVED_MODEL_DIR)
        # Check model.json and weights manifest.
        with open(os.path.join(tfjs_path, 'model.json'), 'rt') as f:
            model_json = json.load(f)
        self.assertEqual(metadata_json,
                         model_json['userDefinedMetadata']['key'])
  def test_convert_saved_model_with_fused_depthwise_conv2d(self):
    self._create_saved_model_with_fusable_depthwise_conv2d()
    tf_saved_model_conversion_v2.convert_tf_saved_model(
        os.path.join(self._tmp_dir, SAVED_MODEL_DIR),
        os.path.join(self._tmp_dir, SAVED_MODEL_DIR)
    )

    tfjs_path = os.path.join(self._tmp_dir, SAVED_MODEL_DIR)
    # Check model.json and weights manifest.
    with open(os.path.join(tfjs_path, 'model.json'), 'rt') as f:
      model_json = json.load(f)
    self.assertTrue(model_json['modelTopology'])
    self.assertIsNot(model_json['modelTopology']['versions'], None)
    signature = model_json['userDefinedMetadata']['signature']
    self.assertIsNot(signature, None)
    self.assertIsNot(signature['inputs'], None)
    self.assertIsNot(signature['outputs'], None)

    nodes = model_json['modelTopology']['node']

    fusedOp = None
    for node in nodes:
      self.assertNotIn('BatchNorm', node['op'])
      self.assertNotIn('Relu', node['op'])
      self.assertNotIn('BiasAdd', node['op'])
      if node['op'] == graph_rewrite_util.FUSED_DEPTHWISE_CONV2D:
        fusedOp = node
    self.assertIsNot(fusedOp, None)
    self.assertIsNot(fusedOp['attr']['dilations'], None)
    self.assertIsNot(fusedOp['attr']['strides'], None)
    self.assertEqual(
        base64.b64decode(fusedOp['attr']['fused_ops']['list']['s'][0]),
        b'BiasAdd')
    self.assertEqual(
        base64.b64decode(fusedOp['attr']['fused_ops']['list']['s'][1]),
        b'Relu')

    # Check meta-data in the artifact JSON.
    self.assertEqual(model_json['format'], 'graph-model')
    self.assertEqual(
        model_json['convertedBy'],
        'TensorFlow.js Converter v%s' % version.version)
    self.assertEqual(model_json['generatedBy'],
                     tf.__version__)
    self.assertTrue(
        glob.glob(
            os.path.join(self._tmp_dir, SAVED_MODEL_DIR, 'group*-*')))
Ejemplo n.º 24
0
  def test_convert_saved_model(self):
    self._create_saved_model()

    tf_saved_model_conversion_v2.convert_tf_saved_model(
        os.path.join(self._tmp_dir, SAVED_MODEL_DIR),
        os.path.join(self._tmp_dir, SAVED_MODEL_DIR)
    )

    tfjs_path = os.path.join(self._tmp_dir, SAVED_MODEL_DIR)
    # Check model.json and weights manifest.
    with open(os.path.join(tfjs_path, 'model.json'), 'rt') as f:
      model_json = json.load(f)
    self.assertTrue(model_json['modelTopology'])
    weights_manifest = model_json['weightsManifest']
    self.assertCountEqual(weights_manifest[0]['paths'],
                          ['group1-shard1of1.bin'])
    self.assertIn('weights', weights_manifest[0])
Ejemplo n.º 25
0
def dispatch_keras_h5_to_tfjs_graph_model_conversion(h5_path,
                                                     output_dir=None,
                                                     quantization_dtype=None,
                                                     skip_op_check=False,
                                                     strip_debug_ops=False):
    """
  Convert a keras HDF5-format model to tfjs GraphModel artifacts.

  Args:
    h5_path: Path to the HDF5-format file that contains the model saved from
      keras or tf.keras.
    output_dir: The destination to which the tfjs GraphModel artifacts will be
      written.
    quantization_dtype: The quantized data type to store the weights in
      (Default: `None`).
    skip_op_check: Bool whether to skip the op check.
    strip_debug_ops: Bool whether to allow unsupported debug ops.
  """

    if not os.path.exists(h5_path):
        raise ValueError('Nonexistent path to HDF5 file: %s' % h5_path)
    if os.path.isdir(h5_path):
        raise ValueError(
            'Expected path to point to an HDF5 file, but it points to a '
            'directory: %s' % h5_path)

    temp_savedmodel_dir = tempfile.mktemp(suffix='.savedmodel')
    model = keras.models.load_model(h5_path)
    keras.experimental.export_saved_model(model,
                                          temp_savedmodel_dir,
                                          serving_only=True)

    # NOTE(cais): This cannot use `tf.compat.v1` because
    #   `convert_tf_saved_model()` works only in v2.
    tf_saved_model_conversion_v2.convert_tf_saved_model(
        temp_savedmodel_dir,
        output_dir,
        signature_def='serving_default',
        saved_model_tags='serve',
        quantization_dtype=quantization_dtype,
        skip_op_check=skip_op_check,
        strip_debug_ops=strip_debug_ops)

    # Clean up the temporary SavedModel directory.
    shutil.rmtree(temp_savedmodel_dir)
def save_tfjs(model, combined_model_name):
    combined_model_name_dir = f'{tmpdir}/tfjs-models/{combined_model_name}'
    os.makedirs(combined_model_name_dir, exist_ok=True)
    example_input = torch.randn(1, 1, 176, requires_grad=False)
    torch.onnx.export(model.cpu(),
                      example_input,
                      f'{combined_model_name_dir}/model.onnx',
                      export_params=True,
                      opset_version=11)
    onnx_model = onnx.load(f'{combined_model_name_dir}/model.onnx')
    tf_model = prepare(onnx_model)
    tf_model.export_graph(f'{combined_model_name_dir}/model')
    tf_saved_model_conversion_v2.convert_tf_saved_model(
        f'{combined_model_name_dir}/model',
        combined_model_name_dir,
        skip_op_check=True)
    rmtree(f'{combined_model_name_dir}/model')
    os.remove(f'{combined_model_name_dir}/model.onnx')
  def test_convert_saved_model_skip_op_check(self):
    self._create_unsupported_saved_model()

    tf_saved_model_conversion_v2.convert_tf_saved_model(
        os.path.join(self._tmp_dir, SAVED_MODEL_DIR),
        os.path.join(self._tmp_dir, SAVED_MODEL_DIR), skip_op_check=True
    )

    tfjs_path = os.path.join(self._tmp_dir, SAVED_MODEL_DIR)
    # Check model.json and weights manifest.
    with open(os.path.join(tfjs_path, 'model.json'), 'rt') as f:
      model_json = json.load(f)
    self.assertTrue(model_json['modelTopology'])
    self.assertIsNot(model_json['modelTopology']['versions'], None)
    weights_manifest = model_json['weightsManifest']
    self.assertCountEqual(weights_manifest[0]['paths'],
                          ['group1-shard1of1.bin'])
    self.assertIn('weights', weights_manifest[0])
    self.assertTrue(
        glob.glob(
            os.path.join(self._tmp_dir, SAVED_MODEL_DIR, 'group*-*')))
  def test_convert_saved_model_with_fused_conv2d(self):
    self._create_saved_model_with_fusable_conv2d()
    tf_saved_model_conversion_v2.convert_tf_saved_model(
        os.path.join(self._tmp_dir, SAVED_MODEL_DIR),
        os.path.join(self._tmp_dir, SAVED_MODEL_DIR)
    )

    tfjs_path = os.path.join(self._tmp_dir, SAVED_MODEL_DIR)
    # Check model.json and weights manifest.
    with open(os.path.join(tfjs_path, 'model.json'), 'rt') as f:
      model_json = json.load(f)
    self.assertTrue(model_json['modelTopology'])
    self.assertIsNot(model_json['modelTopology']['versions'], None)
    nodes = model_json['modelTopology']['node']

    fusedOp = None
    for node in nodes:
      self.assertTrue(not 'BatchNorm' in node['op'])
      self.assertTrue(not 'Relu' in node['op'])
      self.assertTrue(not 'BiasAdd' in node['op'])
      if node['op'] == '_FusedConv2D':
        fusedOp = node
    self.assertTrue(fusedOp is not None)
    self.assertEqual(
        base64.b64decode(fusedOp['attr']['fused_ops']['list']['s'][0]),
        b'BiasAdd')
    self.assertEqual(
        base64.b64decode(fusedOp['attr']['fused_ops']['list']['s'][1]),
        b'Relu')

    # Check meta-data in the artifact JSON.
    self.assertEqual(model_json['format'], 'graph-model')
    self.assertEqual(
        model_json['convertedBy'],
        'TensorFlow.js Converter v%s' % version.version)
    self.assertEqual(model_json['generatedBy'],
                     tf.__version__)
    self.assertTrue(
        glob.glob(
            os.path.join(self._tmp_dir, SAVED_MODEL_DIR, 'group*-*')))
Ejemplo n.º 29
0
    def test_convert_saved_model_structured_outputs_true(self):
        self._create_saved_model_with_structured_outputs()

        tf_saved_model_conversion_v2.convert_tf_saved_model(
            os.path.join(self._tmp_dir, SAVED_MODEL_DIR),
            os.path.join(self._tmp_dir, SAVED_MODEL_DIR),
            use_structured_outputs_names=True)

        tfjs_path = os.path.join(self._tmp_dir, SAVED_MODEL_DIR)
        # Check model.json and weights manifest.
        with open(os.path.join(tfjs_path, 'model.json'), 'rt') as f:
            model_json = json.load(f)
        self.assertTrue(model_json['modelTopology'])
        self.assertIsNot(model_json['modelTopology']['versions'], None)
        signature = model_json['signature']
        self.assertIsNot(signature, None)
        self.assertIsNot(signature['inputs'], None)
        self.assertIsNot(signature['outputs'], None)

        self.assertEqual(
            ["a", "b", "c"],
            model_json['userDefinedMetadata']['structuredOutputKeys'])
Ejemplo n.º 30
0
def convert(arguments):
    args = get_arg_parser().parse_args(arguments)
    if args.show_version:
        print('\ntensorflowjs %s\n' % version.version)
        print('Dependency versions:')
        print('  keras %s' % keras.__version__)
        print('  tensorflow %s' % tf.__version__)
        return

    if not args.input_path:
        raise ValueError(
            'Missing input_path argument. For usage, use the --help flag.')
    if not args.output_path:
        raise ValueError(
            'Missing output_path argument. For usage, use the --help flag.')

    weight_shard_size_bytes = 1024 * 1024 * 4
    if args.weight_shard_size_bytes:
        if args.output_format != common.TFJS_LAYERS_MODEL:
            raise ValueError(
                'The --weight_shard_size_bytes flag is only supported under '
                'output_format=tfjs_layers_model.')
        weight_shard_size_bytes = args.weight_shard_size_bytes

    if args.input_path is None:
        raise ValueError('Error: The input_path argument must be set. '
                         'Run with --help flag for usage information.')

    input_format, output_format = _standardize_input_output_formats(
        args.input_format, args.output_format)

    quantization_dtype = (
        quantization.QUANTIZATION_BYTES_TO_DTYPES[args.quantization_bytes]
        if args.quantization_bytes else None)

    if (args.signature_name and input_format
            not in (common.TF_SAVED_MODEL, common.TF_HUB_MODEL)):
        raise ValueError(
            'The --signature_name flag is applicable only to "tf_saved_model" and '
            '"tf_hub" input format, but the current input format is '
            '"%s".' % input_format)

    # TODO(cais, piyu): More conversion logics can be added as additional
    #   branches below.
    if (input_format == common.KERAS_MODEL
            and output_format == common.TFJS_LAYERS_MODEL):
        dispatch_keras_h5_to_tfjs_layers_model_conversion(
            args.input_path,
            output_dir=args.output_path,
            quantization_dtype=quantization_dtype,
            split_weights_by_layer=args.split_weights_by_layer)
    elif (input_format == common.KERAS_MODEL
          and output_format == common.TFJS_GRAPH_MODEL):
        dispatch_keras_h5_to_tfjs_graph_model_conversion(
            args.input_path,
            output_dir=args.output_path,
            quantization_dtype=quantization_dtype,
            skip_op_check=args.skip_op_check,
            strip_debug_ops=args.strip_debug_ops)
    elif (input_format == common.KERAS_SAVED_MODEL
          and output_format == common.TFJS_LAYERS_MODEL):
        dispatch_keras_saved_model_to_tensorflowjs_conversion(
            args.input_path,
            args.output_path,
            quantization_dtype=quantization_dtype,
            split_weights_by_layer=args.split_weights_by_layer)
    elif (input_format == common.TF_SAVED_MODEL
          and output_format == common.TFJS_GRAPH_MODEL):
        tf_saved_model_conversion_v2.convert_tf_saved_model(
            args.input_path,
            args.output_path,
            signature_def=args.signature_name,
            saved_model_tags=args.saved_model_tags,
            quantization_dtype=quantization_dtype,
            skip_op_check=args.skip_op_check,
            strip_debug_ops=args.strip_debug_ops)
    elif (input_format == common.TF_HUB_MODEL
          and output_format == common.TFJS_GRAPH_MODEL):
        tf_saved_model_conversion_v2.convert_tf_hub_module(
            args.input_path,
            args.output_path,
            args.signature_name,
            args.saved_model_tags,
            skip_op_check=args.skip_op_check,
            strip_debug_ops=args.strip_debug_ops)
    elif (input_format == common.TFJS_LAYERS_MODEL
          and output_format == common.KERAS_MODEL):
        dispatch_tensorflowjs_to_keras_h5_conversion(args.input_path,
                                                     args.output_path)
    elif (input_format == common.TFJS_LAYERS_MODEL
          and output_format == common.KERAS_SAVED_MODEL):
        dispatch_tensorflowjs_to_keras_saved_model_conversion(
            args.input_path, args.output_path)
    elif (input_format == common.TFJS_LAYERS_MODEL
          and output_format == common.TFJS_LAYERS_MODEL):
        dispatch_tensorflowjs_to_tensorflowjs_conversion(
            args.input_path,
            args.output_path,
            quantization_dtype=_parse_quantization_bytes(
                args.quantization_bytes),
            weight_shard_size_bytes=weight_shard_size_bytes)
    elif (input_format == common.TFJS_LAYERS_MODEL
          and output_format == common.TFJS_GRAPH_MODEL):
        dispatch_tfjs_layers_model_to_tfjs_graph_conversion(
            args.input_path,
            args.output_path,
            quantization_dtype=_parse_quantization_bytes(
                args.quantization_bytes),
            skip_op_check=args.skip_op_check,
            strip_debug_ops=args.strip_debug_ops)
    else:
        raise ValueError(
            'Unsupported input_format - output_format pair: %s - %s' %
            (input_format, output_format))