Beispiel #1
0
def write_artifacts(topology,
                    weights,
                    output_graph,
                    tf_version,
                    quantization_dtype=None):
  """Writes weights and topology to the output_dir.

  If `topology` is Falsy (e.g., `None`), only emit weights to output_dir.

  Args:
    topology: tf.GraphDef TensorFlow GraphDef proto object, which represents
      the model topology.
    weights: an array of weight groups (as defined in tfjs write_weights).
    output_graph: the output file name to hold all the contents.
    tf_version: Tensorflow version of the input graph.
    quantization_dtype: An optional numpy dtype to quantize weights to for
      compression. Only np.uint8 and np.uint16 are supported.
  """
  model_json = {
      common.FORMAT_KEY: common.TFJS_GRAPH_MODEL_FORMAT,
      # TODO(piyu): Add tensorflow version below by using `meta_info_def`.
      common.GENERATED_BY_KEY: tf_version,
      common.CONVERTED_BY_KEY: common.get_converted_by(),
  }

  model_json[common.ARTIFACT_MODEL_TOPOLOGY_KEY] = topology or None
  weights_manifest = write_weights.write_weights(
      weights, os.path.dirname(output_graph), write_manifest=False,
      quantization_dtype=quantization_dtype)
  assert isinstance(weights_manifest, list)
  model_json[common.ARTIFACT_WEIGHTS_MANIFEST_KEY] = weights_manifest

  with open(output_graph, 'wt') as f:
    json.dump(model_json, f)
def write_artifacts(topology,
                    weights,
                    output_graph,
                    tf_version,
                    signature_def,
                    quantization_dtype_map=None,
                    weight_shard_size_bytes=1024 * 1024 * 4,
                    initializer_graph_def=None,
                    metadata=None):
    """Writes weights and topology to the output_dir.

  If `topology` is Falsy (e.g., `None`), only emit weights to output_dir.

  Args:
    topology: tf.GraphDef TensorFlow GraphDef proto object, which represents
      the model topology.
    weights: an array of weight groups (as defined in tfjs write_weights).
    output_graph: the output file name to hold all the contents.
    tf_version: Tensorflow version of the input graph.
    signature_def: the SignatureDef of the inference graph.
    quantization_dtype_map: A mapping from dtype
      (`uint8`, `uint16`, `float16`) to weights names. The weight mapping
      supports wildcard substitution.
    weight_shard_size_bytes: Shard size (in bytes) of the weight files.
      The size of each weight file will be <= this value.
    initializer_graph_def: tf.GraphDef proto object for initializer graph.
    metadata: User defined metadata map.
  """
    model_json = {
        common.FORMAT_KEY:
        common.TFJS_GRAPH_MODEL_FORMAT,
        # TODO(piyu): Add tensorflow version below by using `meta_info_def`.
        common.GENERATED_BY_KEY:
        tf_version,
        common.CONVERTED_BY_KEY:
        common.get_converted_by(),
        common.SIGNATURE_KEY:
        MessageToDict(signature_def),
    }
    model_json[common.ARTIFACT_MODEL_TOPOLOGY_KEY] = topology or None

    if metadata:
        model_json[common.USER_DEFINED_METADATA_KEY] = metadata

    if initializer_graph_def and initializer_graph_def.node:
        model_json[common.ARTIFACT_MODEL_INITIALIZER] = MessageToDict(
            initializer_graph_def)

    weights_manifest = write_weights.write_weights(
        weights,
        os.path.dirname(output_graph),
        write_manifest=False,
        quantization_dtype_map=quantization_dtype_map,
        shard_size_bytes=weight_shard_size_bytes)
    assert isinstance(weights_manifest, list)
    model_json[common.ARTIFACT_WEIGHTS_MANIFEST_KEY] = weights_manifest

    with tf.io.gfile.GFile(output_graph, 'w') as f:
        json.dump(model_json, f)
Beispiel #3
0
def write_artifacts(topology,
                    weights,
                    output_dir,
                    quantization_dtype_map=None,
                    weight_shard_size_bytes=1024 * 1024 * 4,
                    metadata=None):
    """Writes weights and topology to the output_dir.

  If `topology` is Falsy (e.g., `None`), only emit weights to output_dir.

  Args:
    topology: a JSON dictionary, representing the Keras config.
    weights: an array of weight groups (as defined in tfjs write_weights).
    output_dir: the directory to hold all the contents.
    quantization_dtype_map: (Optional) A mapping from dtype
      (`uint8`, `uint16`, `float16`) to weights names. The weight mapping
      supports wildcard substitution.
    weight_shard_size_bytes: Shard size (in bytes) of the weight files.
      The size of each weight file will be <= this value.
    metadata: User defined metadata map.
  """
    # TODO(cais, nielsene): This method should allow optional arguments of
    #   `write_weights.write_weights` (e.g., shard size) and forward them.
    # We write the topology after since write_weights makes no promises about
    # preserving directory contents.
    if not (isinstance(weight_shard_size_bytes, int)
            and weight_shard_size_bytes > 0):
        raise ValueError(
            'Expected weight_shard_size_bytes to be a positive integer, '
            'but got %s' % weight_shard_size_bytes)

    if os.path.isfile(output_dir):
        raise ValueError(
            'Path "%d" already exists as a file (not a directory).' %
            output_dir)

    model_json = {
        common.FORMAT_KEY: common.TFJS_LAYERS_MODEL_FORMAT,
        common.GENERATED_BY_KEY: _get_generated_by(topology),
        common.CONVERTED_BY_KEY: common.get_converted_by()
    }

    if metadata:
        model_json[common.USER_DEFINED_METADATA_KEY] = metadata

    model_json[common.ARTIFACT_MODEL_TOPOLOGY_KEY] = topology or None
    weights_manifest = write_weights.write_weights(
        weights,
        output_dir,
        write_manifest=False,
        quantization_dtype_map=quantization_dtype_map,
        shard_size_bytes=weight_shard_size_bytes)
    assert isinstance(weights_manifest, list)
    model_json[common.ARTIFACT_WEIGHTS_MANIFEST_KEY] = weights_manifest

    model_json_path = os.path.join(output_dir,
                                   common.ARTIFACT_MODEL_JSON_FILE_NAME)
    with open(model_json_path, 'wt') as f:
        json.dump(model_json, f)
def write_artifacts(topology,
                    weights,
                    output_graph,
                    tf_version,
                    signature_def,
                    quantization_dtype=None,
                    weight_shard_size_bytes=1024 * 1024 * 4):
    """Writes weights and topology to the output_dir.

  If `topology` is Falsy (e.g., `None`), only emit weights to output_dir.

  Args:
    topology: tf.GraphDef TensorFlow GraphDef proto object, which represents
      the model topology.
    weights: an array of weight groups (as defined in tfjs write_weights).
    output_graph: the output file name to hold all the contents.
    tf_version: Tensorflow version of the input graph.
    signature_def: the SignatureDef of the inference graph.
    quantization_dtype: An optional numpy dtype to quantize weights to for
      compression. Only np.uint8 and np.uint16 are supported.
    weight_shard_size_bytes: Shard size (in bytes) of the weight files.
      The size of each weight file will be <= this value.
  """

    model_json = {
        common.FORMAT_KEY:
        common.TFJS_GRAPH_MODEL_FORMAT,
        # TODO(piyu): Add tensorflow version below by using `meta_info_def`.
        common.GENERATED_BY_KEY:
        tf_version,
        common.CONVERTED_BY_KEY:
        common.get_converted_by(),
        common.USER_DEFINED_METADATA_KEY: {
            common.SIGNATURE_KEY: MessageToDict(signature_def)
        }
    }
    model_json[common.ARTIFACT_MODEL_TOPOLOGY_KEY] = topology or None
    weights_manifest = write_weights.write_weights(
        weights,
        os.path.dirname(output_graph),
        write_manifest=False,
        quantization_dtype=quantization_dtype,
        shard_size_bytes=weight_shard_size_bytes)
    assert isinstance(weights_manifest, list)
    model_json[common.ARTIFACT_WEIGHTS_MANIFEST_KEY] = weights_manifest

    with tf.io.gfile.GFile(output_graph, 'w') as f:
        json.dump(model_json, f)
Beispiel #5
0
def write_artifacts(topology, weights, output_dir, quantization_dtype=None):
    """Writes weights and topology to the output_dir.

  If `topology` is Falsy (e.g., `None`), only emit weights to output_dir.

  Args:
    topology: a JSON dictionary, representing the Keras config.
    weights: an array of weight groups (as defined in tfjs write_weights).
    output_dir: the directory to hold all the contents.
    quantization_dtype: An optional numpy dtype to quantize weights to for
      compression. Only np.uint8 and np.uint16 are supported.
  """
    # TODO(cais, nielsene): This method should allow optional arguments of
    #   `write_weights.write_weights` (e.g., shard size) and forward them.
    # We write the topology after since write_weights makes no promises about
    # preserving directory contents.
    if os.path.isfile(output_dir):
        raise ValueError(
            'Path "%d" already exists as a file (not a directory).' %
            output_dir)

    model_json = {
        common.FORMAT_KEY: common.TFJS_LAYERS_MODEL_FORMAT,
        common.GENERATED_BY_KEY: _get_generated_by(topology),
        common.CONVERTED_BY_KEY: common.get_converted_by(),
    }

    model_json[common.ARTIFACT_MODEL_TOPOLOGY_KEY] = topology or None
    weights_manifest = write_weights.write_weights(
        weights,
        output_dir,
        write_manifest=False,
        quantization_dtype=quantization_dtype)
    assert isinstance(weights_manifest, list)
    model_json[common.ARTIFACT_WEIGHTS_MANIFEST_KEY] = weights_manifest

    model_json_path = os.path.join(output_dir,
                                   common.ARTIFACT_MODEL_JSON_FILE_NAME)
    with open(model_json_path, 'wt') as f:
        json.dump(model_json, f)