Exemplo n.º 1
0
def extract_weights(graph_def,
                    output_graph,
                    tf_version,
                    signature_def,
                    quantization_dtype_map=None,
                    weight_shard_size_bytes=1024 * 1024 * 4,
                    initializer_graph_def=None,
                    metadata=None):
  """Takes a Python GraphDef object and extract the weights.

  Args:
    graph_def: tf.GraphDef TensorFlow GraphDef proto object, which represents
      the model topology.
    tf_version: Tensorflow version of the input graph.
    signature_def: the SignatureDef of the inference graph.
    quantization_dtype_map: A mapping from dtype
      (`uint8`, `uint16`, `float16`) to weights names. The weight mapping
      compression. Only np.uint8 and np.uint16 are supported.
      supports wildcard substitution.
    weight_shard_size_bytes: Shard size (in bytes) of the weight files.
      The size of each weight file will be <= this value.
    initializer_graph_def: tf.GraphDef proto object for initializer graph.
    metadata: User defined metadata map.
  """
  global_manifest = extract_const_nodes(graph_def.node)

  function_manifests = []
  for func in graph_def.library.function:
    nodes = graph_rewrite_util.rename_constants(
        func.node_def, func.signature.name)
    del func.node_def[:]
    func.node_def.extend(nodes)
    function_manifests += extract_const_nodes(func.node_def)

  initializer_manifests = []
  if initializer_graph_def:
    initializer_manifests = extract_const_nodes(initializer_graph_def.node)

  print('Writing weight file ' + output_graph + '...')

  write_artifacts(MessageToDict(graph_def),
                  [global_manifest +
                   function_manifests +
                   initializer_manifests],
                  output_graph,
                  tf_version, signature_def,
                  quantization_dtype_map=quantization_dtype_map,
                  weight_shard_size_bytes=weight_shard_size_bytes,
                  initializer_graph_def=initializer_graph_def,
                  metadata=metadata)
def extract_weights(graph_def,
                    output_graph,
                    tf_version,
                    signature_def,
                    quantization_dtype=None,
                    weight_shard_size_bytes=1024 * 1024 * 4):
    """Takes a Python GraphDef object and extract the weights.

  Args:
    graph_def: tf.GraphDef TensorFlow GraphDef proto object, which represents
      the model topology.
    tf_version: Tensorflow version of the input graph.
    signature_def: the SignatureDef of the inference graph.
    quantization_dtype: An optional numpy dtype to quantize weights to for
        compression. Only np.uint8 and np.uint16 are supported.
    weight_shard_size_bytes: Shard size (in bytes) of the weight files.
      The size of each weight file will be <= this value.
  """
    global_manifest = extract_const_nodes(graph_def.node)

    function_manifests = []
    for func in graph_def.library.function:
        nodes = graph_rewrite_util.rename_constants(func.node_def,
                                                    func.signature.name)
        del func.node_def[:]
        func.node_def.extend(nodes)
        function_manifests += extract_const_nodes(func.node_def)

    print('Writing weight file ' + output_graph + '...')

    write_artifacts(MessageToDict(graph_def),
                    [global_manifest + function_manifests],
                    output_graph,
                    tf_version,
                    signature_def,
                    quantization_dtype=quantization_dtype,
                    weight_shard_size_bytes=weight_shard_size_bytes)