コード例 #1
0
ファイル: decent_q_test.py プロジェクト: Xilinx/Vitis-AI
    def testQuantizeTrain(self):
        input_meta_name = "input_meta.meta"
        with ops.Graph().as_default():
            self._build_graph(is_freezed=False)
            graph_def = ops.get_default_graph().as_graph_def()
            input_meta_path = os.path.join(self.get_temp_dir(),
                                           input_meta_name)
            saver_lib.export_meta_graph(filename=input_meta_path)
            original_graph_node = [node.name for node in graph_def.node]

        meta_graph_def = MetaGraphDef()
        meta_graph_def = self._parse_def_from_file(meta_graph_def,
                                                   input_meta_path)
        q_config, _ = self._compose_config()
        decent_q.quantize_train(meta_graph_def, q_config)

        output_meta_graph_def = MetaGraphDef()
        output_meta_graph_path = os.path.join(
            self.get_temp_dir(), "quantize_train/quantize_train.ckpt.meta")
        output_meta_graph_def = self._parse_def_from_file(
            output_meta_graph_def, output_meta_graph_path)
        quantize_train_graph_def = output_meta_graph_def.graph_def
        for node in quantize_train_graph_def.node:
            if node.name not in original_graph_node:
                self.assertEqual(node.op, "FixNeuron")
コード例 #2
0
  def testNoVariables(self):
    test_dir = _TestDir("no_variables")
    filename = os.path.join(test_dir, "metafile")

    input_feed_value = -10  # Arbitrary input value for feed_dict.

    orig_graph = tf.Graph()
    with self.test_session(graph=orig_graph) as sess:
      # Create a minimal graph with zero variables.
      input_tensor = tf.placeholder(tf.float32, shape=[], name="input")
      offset = tf.constant(42, dtype=tf.float32, name="offset")
      output_tensor = tf.add(input_tensor, offset, name="add_offset")

      # Add input and output tensors to graph collections.
      tf.add_to_collection("input_tensor", input_tensor)
      tf.add_to_collection("output_tensor", output_tensor)

      output_value = sess.run(output_tensor, {input_tensor: input_feed_value})
      self.assertEqual(output_value, 32)

      # Generates MetaGraphDef.
      #
      # Note that this is calling the saver *module-level* export_meta_graph and
      # not the Saver.export_meta_graph instance-level method.
      meta_graph_def = saver_module.export_meta_graph(
          filename=filename,
          graph_def=tf.get_default_graph().as_graph_def(add_shapes=True),
          collection_list=["input_tensor", "output_tensor"],
          saver_def=None,
      )

    # Create a clean graph and import the MetaGraphDef nodes.
    new_graph = tf.Graph()
    with self.test_session(graph=new_graph) as sess:
      # Import the previously export meta graph.
      saver_instance = saver_module.import_meta_graph(filename)
      # The saver instance should be None since there are no graph variables
      # to be restored in this case.
      self.assertIsNone(saver_instance)

      # Re-exports the current graph state for comparison to the original.
      new_meta_graph_def = saver_module.export_meta_graph(filename + "_new")
      self.assertProtoEquals(meta_graph_def, new_meta_graph_def)

      # Ensures that we can still get a reference to our graph collections.
      new_input_tensor = tf.get_collection("input_tensor")[0]
      new_output_tensor = tf.get_collection("output_tensor")[0]
      # Verifies that the new graph computes the same result as the original.
      new_output_value = sess.run(
          new_output_tensor, {new_input_tensor: input_feed_value})
      self.assertEqual(new_output_value, output_value)
コード例 #3
0
ファイル: saver_test.py プロジェクト: 2er0/tensorflow
  def testNoVariables(self):
    test_dir = _TestDir("no_variables")
    filename = os.path.join(test_dir, "metafile")

    input_feed_value = -10  # Arbitrary input value for feed_dict.

    orig_graph = tf.Graph()
    with self.test_session(graph=orig_graph) as sess:
      # Create a minimal graph with zero variables.
      input_tensor = tf.placeholder(tf.float32, shape=[], name="input")
      offset = tf.constant(42, dtype=tf.float32, name="offset")
      output_tensor = tf.add(input_tensor, offset, name="add_offset")

      # Add input and output tensors to graph collections.
      tf.add_to_collection("input_tensor", input_tensor)
      tf.add_to_collection("output_tensor", output_tensor)

      output_value = sess.run(output_tensor, {input_tensor: input_feed_value})
      self.assertEqual(output_value, 32)

      # Generates MetaGraphDef.
      #
      # Note that this is calling the saver *module-level* export_meta_graph and
      # not the Saver.export_meta_graph instance-level method.
      meta_graph_def = saver_module.export_meta_graph(
          filename=filename,
          graph_def=tf.get_default_graph().as_graph_def(),
          collection_list=["input_tensor", "output_tensor"],
          saver_def=None,
      )

    # Create a clean graph and import the MetaGraphDef nodes.
    new_graph = tf.Graph()
    with self.test_session(graph=new_graph) as sess:
      # Import the previously export meta graph.
      saver_instance = saver_module.import_meta_graph(filename)
      # The saver instance should be None since there are no graph variables
      # to be restored in this case.
      self.assertIsNone(saver_instance)

      # Re-exports the current graph state for comparison to the original.
      new_meta_graph_def = saver_module.export_meta_graph(filename + "_new")
      self.assertProtoEquals(meta_graph_def, new_meta_graph_def)

      # Ensures that we can still get a reference to our graph collections.
      new_input_tensor = tf.get_collection("input_tensor")[0]
      new_output_tensor = tf.get_collection("output_tensor")[0]
      # Verifies that the new graph computes the same result as the original.
      new_output_value = sess.run(
          new_output_tensor, {new_input_tensor: input_feed_value})
      self.assertEqual(new_output_value, output_value)
コード例 #4
0
    def do_transformation(self):
        convert = False
        for node in self.model.node:
            if 'Conv' in node.op and \
               'data_format' in node.attr and \
               node.attr['data_format'].s == b'NCHW':
                convert = True
                break
        if convert:
            assert tf.version.VERSION >= '2.4.0', 'layout convert is only supported by \
                                                            tensorflow 2.4.0 and above'

            g = tf.Graph()
            with g.as_default():  # pylint: disable=not-context-manager
                g = tf.compat.v1.import_graph_def(self.model, name='')
                meta_graph = saver_lib.export_meta_graph(graph_def=self.model,
                                                         graph=g,
                                                         clear_devices=True)
                fetch_collection = meta_graph_pb2.CollectionDef()
                for fetch in self.outputs:
                    fetch_collection.node_list.value.append(fetch)  # pylint: disable=no-member
                meta_graph.collection_def["train_op"].CopyFrom(  # pylint: disable=no-member
                    fetch_collection)  # pylint: disable=no-member
            config = config_pb2.ConfigProto()
            convert = rewriter_config_pb2.RewriterConfig.NCHW_TO_NHWC  # pylint: disable=no-member
            config.graph_options.rewrite_options.CopyFrom(  # pylint: disable=no-member
                rewriter_config_pb2.RewriterConfig(
                    cpu_layout_conversion=convert))
            optimized_graph = tf_optimizer.OptimizeGraph(config, meta_graph)
            return optimized_graph
        else:
            return self.model
コード例 #5
0
def _run_inline_graph_optimization(func):
  """Apply function inline optimization to the graph.

  Returns the GraphDef after Grappler's function inlining optimization is
  applied. This optimization does not work on models with control flow.

  Args:
    func: ConcreteFunction.

  Returns:
    GraphDef
  """
  meta_graph = export_meta_graph(
      graph_def=func.graph.as_graph_def(), graph=func.graph)

  # Add a collection 'train_op' so that Grappler knows the outputs.
  fetch_collection = meta_graph_pb2.CollectionDef()
  for array in func.inputs + func.outputs:
    fetch_collection.node_list.value.append(array.name)
  meta_graph.collection_def["train_op"].CopyFrom(fetch_collection)

  # Initialize RewriterConfig with everything disabled except function inlining.
  config = config_pb2.ConfigProto()
  rewrite_options = config.graph_options.rewrite_options
  rewrite_options.optimizers.append("function")
  return tf_optimizer.OptimizeGraph(config, meta_graph)
コード例 #6
0
    def do_transformation(self):
        try:
            g = tf.Graph()
            with g.as_default():
                g = tf.compat.v1.import_graph_def(self.model, name='')
                meta_graph = saver.export_meta_graph(graph_def=self.model,
                                                     graph=g,
                                                     clear_devices=True)
                fetch_collection = meta_graph_pb2.CollectionDef()
                for fetch in self.outputs:
                    fetch_collection.node_list.value.append(fetch)
                meta_graph.collection_def["train_op"].CopyFrom(
                    fetch_collection)
                config = config_pb2.ConfigProto()
                rewriter_config = config.graph_options.rewrite_options
                rewriter_config.optimizers.append('pruning')
                rewriter_config.optimizers.append('dependency')
                rewriter_config.optimizers.append('debug_stripper')
                rewriter_config.optimizers.append('loop')
                rewriter_config.min_graph_nodes = -1

                optimized_graph = tf_optimizer.OptimizeGraph(
                    config, meta_graph)

            return optimized_graph
        except Exception as e:
            self.logger.warning("Failed to run grappler pass due to {}".format(
                str(e)))
            return self.model
コード例 #7
0
  def testGradient(self):
    if not test.is_gpu_available(cuda_only=True):
      self.skipTest('GPU required')

    random_seed.set_random_seed(0)
    x = random_ops.truncated_normal([1, 200, 200, 3], seed=0)
    y = conv_layers.conv2d(x, 32, [3, 3])
    z = conv_layers.conv2d(y, 32, [3, 3])
    optimizer = gradient_descent.GradientDescentOptimizer(1e-4)
    loss = math_ops.reduce_mean(z)
    train_op = optimizer.minimize(loss)
    graph = ops.get_default_graph()
    graph.add_to_collection('train_op', train_op)
    meta_graph = saver_lib.export_meta_graph(graph_def=graph.as_graph_def())

    rewrite_options = rewriter_config_pb2.RewriterConfig(
        optimize_tensor_layout=True)
    optimized_graph = tf_optimizer.OptimizeGraph(rewrite_options, meta_graph)

    found = 0
    for node in optimized_graph.node:
      if node.op in ['Conv2D', 'Conv2DBackpropFilter', 'Conv2DBackpropInput']:
        found += 1
        self.assertEqual(node.attr['data_format'].s, 'NCHW')
    self.assertEqual(found, 5)
コード例 #8
0
    def _convert_saved_model_v2(self):
        """Convert the input SavedModel in 2.0 format."""
        assert context.executing_eagerly()

        self._saved_model = load.load(self._input_saved_model_dir,
                                      self._input_saved_model_tags)
        func = self._saved_model.signatures[
            self._input_saved_model_signature_key]
        frozen_func = convert_to_constants.convert_variables_to_constants_v2(
            func)
        self._grappler_meta_graph_def = saver.export_meta_graph(
            graph_def=frozen_func.graph.as_graph_def(),
            graph=frozen_func.graph)

        # Add a collection 'train_op' so that Grappler knows the outputs.
        fetch_collection = meta_graph_pb2.CollectionDef()
        for array in frozen_func.inputs + frozen_func.outputs:
            fetch_collection.node_list.value.append(array.name)
        self._grappler_meta_graph_def.collection_def["train_op"].CopyFrom(
            fetch_collection)

        # Run TRT optimizer in Grappler to convert the graph.
        self._run_conversion()
        self._converted_func = wrap_function.function_from_graph_def(
            self._converted_graph_def,
            [tensor.name for tensor in frozen_func.inputs],
            [tensor.name for tensor in frozen_func.outputs])
コード例 #9
0
def main(_):
    if FLAGS.metagraphdef:
        with gfile.GFile(FLAGS.metagraphdef) as meta_file:
            metagraph = meta_graph_pb2.MetaGraphDef()
            metagraph.ParseFromString(meta_file.read())
    else:
        with gfile.GFile(FLAGS.graphdef) as graph_file:
            graph_def = graph_pb2.GraphDef()
            if FLAGS.graphdef.endswith(".pbtxt"):
                text_format.Merge(graph_file.read(), graph_def)
            else:
                graph_def.ParseFromString(graph_file.read())
            importer.import_graph_def(graph_def, name="")
            graph = ops.get_default_graph()
            fetch = graph.get_operation_by_name(FLAGS.fetch)
            graph.add_to_collection("train_op", fetch)
            metagraph = saver.export_meta_graph(graph_def=graph.as_graph_def(),
                                                graph=graph)

    if FLAGS.rewriter_config is not None:
        rewriter_config = rewriter_config_pb2.RewriterConfig()
        text_format.Merge(FLAGS.rewriter_config, rewriter_config)
        optimized_graph = tf_optimizer.OptimizeGraph(rewriter_config,
                                                     metagraph)
        metagraph.graph_def.CopyFrom(optimized_graph)

    report = cost_analyzer.GenerateCostReport(metagraph, FLAGS.per_node_report)
    print(report)
コード例 #10
0
def _run_tf_optimizer(config: ConfigProto, graph: tf.Graph,
                      signature_def: SignatureDef) -> GraphDef:
    """Run the TF optimizer ("grappler") on a graph"""
    graph_def = graph.as_graph_def()
    meta_graph = export_meta_graph(graph_def=graph_def, graph=graph)
    meta_graph.signature_def['not_used_key'].CopyFrom(signature_def)
    return tf_optimizer.OptimizeGraph(config, meta_graph)
コード例 #11
0
    def testGradient(self):
        if not test.is_gpu_available(cuda_only=True):
            self.skipTest('GPU required')

        random_seed.set_random_seed(0)
        x = random_ops.truncated_normal([1, 200, 200, 3], seed=0)
        y = conv_layers.conv2d(x, 32, [3, 3])
        z = conv_layers.conv2d(y, 32, [3, 3])
        optimizer = gradient_descent.GradientDescentOptimizer(1e-4)
        loss = math_ops.reduce_mean(z)
        train_op = optimizer.minimize(loss)
        graph = ops.get_default_graph()
        graph.add_to_collection('train_op', train_op)
        meta_graph = saver.export_meta_graph(graph_def=graph.as_graph_def())

        rewrite_options = rewriter_config_pb2.RewriterConfig(
            optimize_tensor_layout=True)
        optimized_graph = tf_optimizer.OptimizeGraph(rewrite_options,
                                                     meta_graph)

        found = 0
        for node in optimized_graph.node:
            if node.op in [
                    'Conv2D', 'Conv2DBackpropFilter', 'Conv2DBackpropInput'
            ]:
                found += 1
                self.assertEqual(node.attr['data_format'].s, 'NCHW')
        self.assertEqual(found, 5)
コード例 #12
0
def main(_):
  if FLAGS.metagraphdef:
    with gfile.GFile(FLAGS.metagraphdef) as meta_file:
      metagraph = meta_graph_pb2.MetaGraphDef()
      metagraph.ParseFromString(meta_file.read())
  else:
    with gfile.GFile(FLAGS.graphdef) as graph_file:
      graph_def = graph_pb2.GraphDef()
      if FLAGS.graphdef.endswith(".pbtxt"):
        text_format.Merge(graph_file.read(), graph_def)
      else:
        graph_def.ParseFromString(graph_file.read())
      importer.import_graph_def(graph_def, name="")
      graph = ops.get_default_graph()
      fetch = graph.get_operation_by_name(FLAGS.fetch)
      graph.add_to_collection("train_op", fetch)
      metagraph = saver.export_meta_graph(
          graph_def=graph.as_graph_def(), graph=graph)

  if FLAGS.rewriter_config is not None:
    rewriter_config = rewriter_config_pb2.RewriterConfig()
    text_format.Merge(FLAGS.rewriter_config, rewriter_config)
    optimized_graph = tf_optimizer.OptimizeGraph(rewriter_config, metagraph)
    metagraph.graph_def.CopyFrom(optimized_graph)

  report = cost_analyzer.GenerateCostReport(metagraph, FLAGS.per_node_report)
  print(report)
コード例 #13
0
def optimize_graph(func,
                   output_graph,
                   tf_version,
                   quantization_dtype=None,
                   skip_op_check=False,
                   strip_debug_ops=False,
                   graph=None):
  """Takes a Python Graph object and optimizes the graph.

  Args:
    func: ConcreteFunction TensorFlow function def.
    tf_version: Tensorflow version of the input graph.
    quantization_dtype: An optional numpy dtype to quantize weights to for
      compression. Only np.uint8 and np.uint16 are supported.
    skip_op_check: Bool whether to skip the op check.
    strip_debug_ops: Bool whether to strip debug ops.
    graph_def: tf.GraphDef TensorFlow GraphDef proto object, which represents
      the model topology.
  """
  if graph is None:
    graph = func.graph
  graph_def = graph.as_graph_def()
  unsupported = validate(graph_def.node, skip_op_check,
                         strip_debug_ops)
  if unsupported:
    raise ValueError('Unsupported Ops in the model before optimization\n' +
                     ', '.join(unsupported))

  config = config_pb2.ConfigProto()
  rewriter_config = config.graph_options.rewrite_options
  rewriter_config.optimizers[:] = [
      'pruning', 'constfold', 'arithmetic', 'dependency', 'pruning', 'remap',
      'constfold', 'arithmetic', 'dependency'
  ]
  if strip_debug_ops:
    rewriter_config.optimizers.insert(0, 'debug_stripper')
  meta_graph = export_meta_graph(
      graph_def=graph_def, graph=graph)

  # Add a collection 'train_op' so that Grappler knows the outputs.
  fetch_collection = meta_graph_pb2.CollectionDef()
  if func is not None:
    for array in func.inputs + func.outputs:
      fetch_collection.node_list.value.append(array.name)
    meta_graph.collection_def["train_op"].CopyFrom(fetch_collection)

  optimized_graph = tf_optimizer.OptimizeGraph(
      config, meta_graph, cluster=get_cluster())

  unsupported = validate(optimized_graph.node, skip_op_check,
                         strip_debug_ops)

  if unsupported:
    raise ValueError('Unsupported Ops in the model after optimization\n' +
                     ', '.join(unsupported))

  extract_weights(
      optimized_graph, output_graph, tf_version, quantization_dtype,
      skip_op_check)
  return optimize_graph
    def __init__(self, keras_model_path, inputshape, in_nodes, dest_nodes):
        if LooseVersion(tensorflow.__version__) < LooseVersion('1.8.0'):
            raise ImportError(
                'Your TensorFlow version %s is outdated. '
                'MMdnn requires tensorflow>=1.8.0' % tensorflow.__version__)

        super(TensorflowParser2, self).__init__()
        self.weight_loaded = True

        import tensorflow as tf
        from tensorflow.python.framework.convert_to_constants import convert_variables_to_constants_v2
        model = tf.keras.models.load_model(keras_model_path, compile=False)
        full_model = tf.function(lambda x: model(x))
        full_model = full_model.get_concrete_function(tf.TensorSpec(model.inputs[0].shape, model.inputs[0].dtype))
        frozen_func = convert_variables_to_constants_v2(full_model)
        frozen_func.graph.as_graph_def()
        g = frozen_func.graph

        from tensorflow.python.client.session import Session
        from tensorflow.python.training.saver import export_meta_graph
        with Session(graph = g) as sess:
            tempdir = tempfile.mkdtemp()
            meta_graph_def = export_meta_graph(filename=os.path.join(tempdir, 'my-model.meta'))
            model = meta_graph_def.graph_def
            shutil.rmtree((tempdir))

        self.tf_graph = TensorflowGraph(model)
        self.tf_graph.build()
コード例 #15
0
def get_metagraph():
    """Constructs and returns a MetaGraphDef from the input file."""
    if FLAGS.metagraphdef:
        with gfile.GFile(FLAGS.metagraphdef) as meta_file:
            metagraph = meta_graph_pb2.MetaGraphDef()
            if FLAGS.metagraphdef.endswith(".pbtxt"):
                text_format.Merge(meta_file.read(), metagraph)
            else:
                metagraph.ParseFromString(meta_file.read())
        if FLAGS.fetch is not None:
            fetch_collection = meta_graph_pb2.CollectionDef()
            for fetch in FLAGS.fetch.split(","):
                fetch_collection.node_list.value.append(fetch)
            metagraph.collection_def["train_op"].CopyFrom(fetch_collection)
    else:
        with gfile.GFile(FLAGS.graphdef) as graph_file:
            graph_def = graph_pb2.GraphDef()
            if FLAGS.graphdef.endswith(".pbtxt"):
                text_format.Merge(graph_file.read(), graph_def)
            else:
                graph_def.ParseFromString(graph_file.read())
            importer.import_graph_def(graph_def, name="")
            graph = ops.get_default_graph()
            for fetch in FLAGS.fetch.split(","):
                fetch_op = graph.get_operation_by_name(fetch)
                graph.add_to_collection("train_op", fetch_op)
            metagraph = saver.export_meta_graph(graph_def=graph.as_graph_def(),
                                                graph=graph)
    return metagraph
コード例 #16
0
def get_metagraph():
  """Constructs and returns a MetaGraphDef from the input file."""
  if FLAGS.metagraphdef:
    with gfile.GFile(FLAGS.metagraphdef) as meta_file:
      metagraph = meta_graph_pb2.MetaGraphDef()
      if FLAGS.metagraphdef.endswith(".pbtxt"):
        text_format.Merge(meta_file.read(), metagraph)
      else:
        metagraph.ParseFromString(meta_file.read())
    if FLAGS.fetch is not None:
      fetch_collection = meta_graph_pb2.CollectionDef()
      for fetch in FLAGS.fetch.split(","):
        fetch_collection.node_list.value.append(fetch)
      metagraph.collection_def["train_op"].CopyFrom(fetch_collection)
  else:
    with gfile.GFile(FLAGS.graphdef) as graph_file:
      graph_def = graph_pb2.GraphDef()
      if FLAGS.graphdef.endswith(".pbtxt"):
        text_format.Merge(graph_file.read(), graph_def)
      else:
        graph_def.ParseFromString(graph_file.read())
      importer.import_graph_def(graph_def, name="")
      graph = ops.get_default_graph()
      for fetch in FLAGS.fetch.split(","):
        fetch_op = graph.get_operation_by_name(fetch)
        graph.add_to_collection("train_op", fetch_op)
      metagraph = saver.export_meta_graph(
          graph_def=graph.as_graph_def(), graph=graph)
  return metagraph
コード例 #17
0
ファイル: loader.py プロジェクト: celidos/TensorRT_study
    def constfold(self, graphdef, output_names):
        from tensorflow.core.protobuf import (config_pb2, meta_graph_pb2,
                                              rewriter_config_pb2)
        from tensorflow.python.framework import importer, ops
        from tensorflow.python.grappler import tf_optimizer
        from tensorflow.python.training import saver

        graph = ops.Graph()
        with graph.as_default():
            output_collection = meta_graph_pb2.CollectionDef()
            output_list = output_collection.node_list.value
            for output in output_names:
                output_list.append(output.encode("utf-8"))

            importer.import_graph_def(graphdef, name="")
            metagraph = saver.export_meta_graph(
                graph_def=graph.as_graph_def(add_shapes=True), graph=graph)
            metagraph.collection_def["train_op"].CopyFrom(output_collection)

        rewriter_config = rewriter_config_pb2.RewriterConfig()
        rewriter_config.optimizers.extend(["constfold"])
        rewriter_config.meta_optimizer_iterations = (
            rewriter_config_pb2.RewriterConfig.ONE)

        session_config = config_pb2.ConfigProto()
        session_config.graph_options.resave_options.CopyFrom(rewriter_config)
        return tf_optimizer.OptimizeGraph(session_config,
                                          metagraph,
                                          graph_id=b"graph")
コード例 #18
0
def _run_inline_graph_optimization(func):
  """Apply function inline optimization to the graph.

  Returns the GraphDef after Grappler's function inlining optimization is
  applied. This optimization does not work on models with control flow.

  Args:
    func: ConcreteFunction.

  Returns:
    GraphDef
  """
  meta_graph = export_meta_graph(
      graph_def=func.graph.as_graph_def(), graph=func.graph)

  # Add a collection 'train_op' so that Grappler knows the outputs.
  fetch_collection = meta_graph_pb2.CollectionDef()
  for array in func.inputs + func.outputs:
    fetch_collection.node_list.value.append(array.name)
  meta_graph.collection_def["train_op"].CopyFrom(fetch_collection)

  # Initialize RewriterConfig with everything disabled except function inlining.
  config = config_pb2.ConfigProto()
  rewrite_options = config.graph_options.rewrite_options
  rewrite_options.optimizers.append("function")
  return tf_optimizer.OptimizeGraph(config, meta_graph)
コード例 #19
0
  def testGraphFromMetaGraphBecomesAvailable(self):
    """Test accumulator by writing values and then reading them."""

    directory = os.path.join(self.get_temp_dir(), 'metagraph_test_values_dir')
    if gfile.IsDirectory(directory):
      gfile.DeleteRecursively(directory)
    gfile.MkDir(directory)

    writer = writer_lib.FileWriter(directory, max_queue=100)

    with ops.Graph().as_default() as graph:
      _ = constant_op.constant([2.0, 1.0])
    # Add a graph to the summary writer.
    meta_graph_def = saver.export_meta_graph(
        graph_def=graph.as_graph_def(add_shapes=True))
    writer.add_meta_graph(meta_graph_def)

    writer.flush()

    # Verify that we can load those events properly
    acc = ea.EventAccumulator(directory)
    acc.Reload()
    self.assertTagsEqual(acc.Tags(), {
        ea.GRAPH: True,
        ea.META_GRAPH: True,
    })
    self.assertProtoEquals(graph.as_graph_def(add_shapes=True), acc.Graph())
    self.assertProtoEquals(meta_graph_def, acc.MetaGraph())
コード例 #20
0
    def do_transformation(self):
        try:
            g = tf.Graph()
            with g.as_default():
                g = tf.compat.v1.import_graph_def(self.model, name='')
                meta_graph = saver.export_meta_graph(graph_def=self.model,
                                                     graph=g,
                                                     clear_devices=True)
                fetch_collection = meta_graph_pb2.CollectionDef()
                for fetch in self.outputs:
                    fetch_collection.node_list.value.append(fetch)
                meta_graph.collection_def["train_op"].CopyFrom(
                    fetch_collection)
                config = config_pb2.ConfigProto()
                rewriter_config = config.graph_options.rewrite_options
                for optimizer in self.generic_optimizer:
                    if optimizer in self.opt_cfg and self.opt_cfg[optimizer]:
                        rewriter_config.optimizers.append(optimizer)

                if tf.version.VERSION >= '2.3.0':
                    for optimizer in self.tf_2_optimizer:
                        if optimizer in self.opt_cfg and self.opt_cfg[
                                optimizer]:
                            rewriter_config.optimizers.append(optimizer)

                rewriter_config.min_graph_nodes = -1

                optimized_graph = tf_optimizer.OptimizeGraph(
                    config, meta_graph)

            return optimized_graph
        except Exception as e:
            self.logger.warning("Failed to run grappler pass due to {}".format(
                str(e)))
            return self.model
コード例 #21
0
    def testGraphFromMetaGraphBecomesAvailable(self):
        """Test accumulator by writing values and then reading them."""

        directory = os.path.join(self.get_temp_dir(),
                                 'metagraph_test_values_dir')
        if gfile.IsDirectory(directory):
            gfile.DeleteRecursively(directory)
        gfile.MkDir(directory)

        writer = writer_lib.FileWriter(directory, max_queue=100)

        with ops.Graph().as_default() as graph:
            _ = constant_op.constant([2.0, 1.0])
        # Add a graph to the summary writer.
        meta_graph_def = saver.export_meta_graph(graph_def=graph.as_graph_def(
            add_shapes=True))
        writer.add_meta_graph(meta_graph_def)

        writer.flush()

        # Verify that we can load those events properly
        acc = ea.EventAccumulator(directory)
        acc.Reload()
        self.assertTagsEqual(acc.Tags(), {
            ea.GRAPH: True,
            ea.META_GRAPH: True,
        })
        self.assertProtoEquals(graph.as_graph_def(add_shapes=True),
                               acc.Graph())
        self.assertProtoEquals(meta_graph_def, acc.MetaGraph())
コード例 #22
0
 def _ExportAndImportGraph(self, graph):
   """Export and import graph into a new graph."""
   meta_graph = saver_lib.export_meta_graph(
       graph=graph, collection_list=graph.get_all_collection_keys())
   graph_copy = ops.Graph()
   with graph_copy.as_default():
     _ = saver_lib.import_meta_graph(meta_graph)
   return graph_copy
コード例 #23
0
def _run_grappler(config, graph_def, graph, signature_def):
    meta_graph = export_meta_graph(graph_def=graph_def, graph=graph)

    meta_graph.signature_def["not_used_key"].CopyFrom(signature_def)

    return tf_optimizer.OptimizeGraph(config,
                                      meta_graph,
                                      cluster=get_cluster())
コード例 #24
0
 def _CopyGraph(self, graph):
   """Return a copy of graph."""
   meta_graph = saver_lib.export_meta_graph(
       graph=graph, collection_list=graph.get_all_collection_keys())
   graph_copy = ops.Graph()
   with graph_copy.as_default():
     _ = saver_lib.import_meta_graph(meta_graph)
   return graph_copy
コード例 #25
0
def _run_inline_graph_optimization(func, lower_control_flow):
    """Apply function inline optimization to the graph.

  Returns the GraphDef after Grappler's function inlining optimization is
  applied. This optimization does not work on models with control flow.

  Args:
    func: ConcreteFunction.
    lower_control_flow: Boolean indicating whether or not to lower control flow
      ops such as If and While. (default True)

  Returns:
    GraphDef
  """
    graph_def = func.graph.as_graph_def()
    if not lower_control_flow:
        graph_def = disable_lower_using_switch_merge(graph_def)

    # In some cases, a secondary implementation of the function (e.g. for GPU) is
    # written to the "api_implements" attribute. (e.g. `tf.keras.layers.LSTM` in
    # TF2 produces a CuDNN-based RNN for GPU).
    # This function suppose to inline all functions calls, but "api_implements"
    # prevents this from happening. Removing the attribute solves the problem.
    # To learn more about "api_implements", see:
    #   tensorflow/core/grappler/optimizers/implementation_selector.h
    for function in graph_def.library.function:
        if "api_implements" in function.attr:
            del function.attr["api_implements"]

    meta_graph = export_meta_graph(graph_def=graph_def, graph=func.graph)

    # Clear the initializer_name for the variables collections, since they are not
    # needed after saved to saved_model.
    for name in [
            "variables", "model_variables", "trainable_variables",
            "local_variables"
    ]:
        raw_list = []
        for raw in meta_graph.collection_def["variables"].bytes_list.value:
            variable = variable_pb2.VariableDef()
            variable.ParseFromString(raw)
            variable.ClearField("initializer_name")
            raw_list.append(variable.SerializeToString())
        meta_graph.collection_def[name].bytes_list.value[:] = raw_list

    # Add a collection 'train_op' so that Grappler knows the outputs.
    fetch_collection = meta_graph_pb2.CollectionDef()
    for array in func.inputs + func.outputs:
        fetch_collection.node_list.value.append(array.name)
    meta_graph.collection_def["train_op"].CopyFrom(fetch_collection)

    # Initialize RewriterConfig with everything disabled except function inlining.
    config = config_pb2.ConfigProto()
    rewrite_options = config.graph_options.rewrite_options
    rewrite_options.min_graph_nodes = -1  # do not skip small graphs
    rewrite_options.optimizers.append("function")
    return tf_optimizer.OptimizeGraph(config, meta_graph)
コード例 #26
0
  def testMetagraph(self):
    with ops.Graph().as_default():
      with variable_scope.variable_scope("foo", use_resource=True):
        a = variable_scope.get_variable("a", initializer=10.0)

      momentum.MomentumOptimizer(
          learning_rate=0.001, momentum=0.1).minimize(
              a,
              colocate_gradients_with_ops=True,
              global_step=training_util.get_or_create_global_step())

      graph = ops.get_default_graph()
      meta_graph_def = saver.export_meta_graph(graph=graph)

    with ops.Graph().as_default():
      saver.import_meta_graph(meta_graph_def, import_scope="")
      meta_graph_two = saver.export_meta_graph(graph=graph)
    self.assertEqual(meta_graph_def, meta_graph_two)
コード例 #27
0
  def testMetagraph(self):
    with ops.Graph().as_default():
      with variable_scope.variable_scope("foo", use_resource=True):
        a = variable_scope.get_variable("a", initializer=10.0)

      momentum.MomentumOptimizer(
          learning_rate=0.001, momentum=0.1).minimize(
              a,
              colocate_gradients_with_ops=True,
              global_step=training_util.get_or_create_global_step())

      graph = ops.get_default_graph()
      meta_graph_def = saver.export_meta_graph(graph=graph)

    with ops.Graph().as_default():
      saver.import_meta_graph(meta_graph_def, import_scope="")
      meta_graph_two = saver.export_meta_graph(graph=graph)
    self.assertEqual(meta_graph_def, meta_graph_two)
コード例 #28
0
ファイル: trt_convert.py プロジェクト: aritratony/tensorflow
  def _convert_graph_def(self):
    """Convert the input GraphDef."""
    graph = ops.Graph()
    with graph.as_default():
      importer.import_graph_def(self._input_graph_def, name="")
    self._grappler_meta_graph_def = saver.export_meta_graph(
        graph_def=graph.as_graph_def(add_shapes=True), graph=graph)
    self._add_nodes_blacklist()

    self._run_conversion()
コード例 #29
0
  def _convert_graph_def(self):
    """Convert the input GraphDef."""
    graph = ops.Graph()
    with graph.as_default():
      importer.import_graph_def(self._input_graph_def, name="")
    self._grappler_meta_graph_def = saver.export_meta_graph(
        graph_def=graph.as_graph_def(add_shapes=True), graph=graph)
    self._add_nodes_blacklist()

    self._run_conversion()
コード例 #30
0
def optimize_graph(graph,
                   output_node_names,
                   output_graph,
                   tf_version,
                   quantization_dtype=None,
                   skip_op_check=False,
                   strip_debug_ops=False):
    """Takes a Python Graph object and optimizes the graph.

  Args:
    graph: The frozen graph to optimize.
    output_node_names: List of output node names.
    output_graph: The location of the output graph.
    tf_version: Tensorflow version of the input graph.
    quantization_dtype: An optional numpy dtype to quantize weights to for
      compression. Only np.uint8 and np.uint16 are supported.
    skip_op_check: Bool whether to skip the op check.
    strip_debug_ops: Bool whether to strip debug ops.
  """

    # Add a collection 'train_op' so that Grappler knows the outputs.
    for output in output_node_names:
        graph.add_to_collection('train_op',
                                graph.get_operation_by_name(output))

    graph_def = graph.as_graph_def()
    unsupported = validate(graph_def.node, skip_op_check, strip_debug_ops)
    if unsupported:
        raise ValueError('Unsupported Ops in the model before optimization\n' +
                         ', '.join(unsupported))

    config = config_pb2.ConfigProto()
    rewriter_config = config.graph_options.rewrite_options
    rewriter_config.optimizers[:] = [
        'pruning', 'constfold', 'arithmetic', 'dependency', 'pruning', 'remap',
        'constfold', 'arithmetic', 'dependency'
    ]
    if strip_debug_ops:
        rewriter_config.optimizers.insert(0, 'debug_stripper')
    meta_graph = export_meta_graph(graph_def=graph_def, graph=graph)

    optimized_graph = tf_optimizer.OptimizeGraph(config,
                                                 meta_graph,
                                                 cluster=get_cluster())

    unsupported = validate(optimized_graph.node, skip_op_check,
                           strip_debug_ops)

    if unsupported:
        raise ValueError('Unsupported Ops in the model after optimization\n' +
                         ', '.join(unsupported))

    extract_weights(optimized_graph, output_graph, tf_version,
                    quantization_dtype)
    return optimize_graph
コード例 #31
0
  def setUp(self):
    self.base_path = os.path.join(test.get_temp_dir(), "no_vars")
    if not os.path.exists(self.base_path):
      os.mkdir(self.base_path)

    # Create a simple graph with a variable, then convert variables to
    # constants and export the graph.
    with ops.Graph().as_default() as g:
      x = array_ops.placeholder(dtypes.float32, name="x")
      w = variables.Variable(3.0)
      y = math_ops.subtract(w * x, 7.0, name="y")  # pylint: disable=unused-variable
      ops.add_to_collection("meta", "this is meta")

      with self.session(graph=g) as session:
        variables.global_variables_initializer().run()
        new_graph_def = graph_util.convert_variables_to_constants(
            session, g.as_graph_def(), ["y"])

      filename = os.path.join(self.base_path, constants.META_GRAPH_DEF_FILENAME)
      saver.export_meta_graph(
          filename, graph_def=new_graph_def, collection_list=["meta"])
コード例 #32
0
    def _convert_saved_model_v2(self):
        """Convert the input SavedModel in 2.0 format."""
        self._saved_model = load.load(self._input_saved_model_dir,
                                      self._input_saved_model_tags)
        func = self._saved_model.signatures[
            self._input_saved_model_signature_key]
        frozen_func = convert_to_constants.convert_variables_to_constants_v2(
            func)
        self._grappler_meta_graph_def = saver.export_meta_graph(
            graph_def=frozen_func.graph.as_graph_def(),
            graph=frozen_func.graph)

        # Add a collection 'train_op' so that Grappler knows the outputs.
        fetch_collection = meta_graph_pb2.CollectionDef()
        for array in func.inputs + func.outputs:
            fetch_collection.node_list.value.append(array.name)
        self._grappler_meta_graph_def.collection_def["train_op"].CopyFrom(
            fetch_collection)

        # Run TRT optimizer in Grappler to convert the graph.
        self._run_conversion()

        def _get_tensor(graph, tensors):
            new_tensors = []
            for tensor in tensors:
                new_tensor = graph.get_tensor_by_name(tensor.name)
                new_tensor.set_shape(tensor.shape)
                new_tensors.append(new_tensor)
            return new_tensors

        # TODO(laigd): do we need to use different name e.g. "trt_func_graph"?
        converted_graph = func_graph.FuncGraph(func.graph.name)
        with converted_graph.as_default():
            importer.import_graph_def(self._converted_graph_def, name="")

        converted_graph.inputs = _get_tensor(converted_graph,
                                             func.graph.inputs)
        converted_graph.outputs = _get_tensor(converted_graph,
                                              func.graph.outputs)
        converted_graph.structured_outputs = func.graph.structured_outputs
        converted_graph.structured_input_signature = (
            func.graph.structured_input_signature)

        # pylint: disable=protected-access
        # TODO(laigd): should we set up the signature as well?
        self._converted_func = function.ConcreteFunction(converted_graph,
                                                         attrs=None,
                                                         signature=None)
        self._converted_func.add_to_graph()
        self._converted_func._arg_keywords = func._arg_keywords
        self._converted_func._num_positional_args = func._num_positional_args
        self._converted_func._captured_inputs = func._captured_inputs
        self._converted_func.graph.variables = func.graph.variables
コード例 #33
0
ファイル: decent_q_test.py プロジェクト: Xilinx/Vitis-AI
    def testQuantizeEval(self):
        input_meta_name = "original_meta.meta"
        input_meta_path = os.path.join(self.get_temp_dir(), input_meta_name)
        with ops.Graph().as_default():
            self._build_graph(is_freezed=False)
            graph_def = ops.get_default_graph().as_graph_def()
            saver_lib.export_meta_graph(filename=input_meta_path)

        original_meta_graph_def = MetaGraphDef()
        original_meta_graph_def = self._parse_def_from_file(
            original_meta_graph_def, input_meta_path)
        q_config, _ = self._compose_config()
        decent_q.quantize_train(original_meta_graph_def, q_config)

        quant_train_meta_graph_def = MetaGraphDef()
        quant_train_meta_graph_path = os.path.join(
            self.get_temp_dir(), "quantize_train/quantize_train.ckpt.meta")
        quant_train_meta_graph_def = self._parse_def_from_file(
            quant_train_meta_graph_def, quant_train_meta_graph_path)
        with session.Session() as sess:
            new_saver = saver_lib.import_meta_graph(quant_train_meta_graph_def)

            relu = sess.graph.get_tensor_by_name("relu/aquant:0")
            input_fn = self._mock_input_fn("input:0", [1, 4, 4, 3])
            init = variables.global_variables_initializer()
            sess.run(init)
            relu_val = sess.run([relu], feed_dict=input_fn(1))
        decent_q.quantize_evaluate(quant_train_meta_graph_def, q_config)
        quant_eval_meta_graph_def = MetaGraphDef()
        quant_eval_meta_graph_path = os.path.join(
            self.get_temp_dir(), "quantize_eval/quantize_eval.ckpt.meta")
        quant_eval_meta_graph_def = self._parse_def_from_file(
            quant_eval_meta_graph_def, quant_eval_meta_graph_path)
        eval_quant_pos = [
            node.attr["quantize_pos"].i
            for node in quant_eval_meta_graph_def.graph_def.node
            if node.op == "FixNeuron"
        ]
        self.assertAllEqual([8, 7, 6, 4], eval_quant_pos)
コード例 #34
0
def _simple_metagraph(depthwise=False):
  random_seed.set_random_seed(0)
  x = variables.Variable(random_ops.truncated_normal([1, 200, 200, 3], seed=0))
  conv = conv_layers.separable_conv2d if depthwise else conv_layers.conv2d
  y = conv(x, 32, [3, 3])
  z = conv(y, 32, [3, 3])
  optimizer = gradient_descent.GradientDescentOptimizer(1e-4)
  loss = math_ops.reduce_mean(z)
  train_op = optimizer.minimize(loss)
  graph = ops.get_default_graph()
  graph.add_to_collection('train_op', train_op)
  meta_graph = saver_lib.export_meta_graph(graph_def=graph.as_graph_def())
  return meta_graph
コード例 #35
0
def _simple_metagraph(depthwise=False):
  random_seed.set_random_seed(0)
  x = variables.Variable(random_ops.truncated_normal([1, 200, 200, 3], seed=0))
  conv = conv_layers.separable_conv2d if depthwise else conv_layers.conv2d
  y = conv(x, 32, [3, 3])
  z = conv(y, 32, [3, 3])
  optimizer = gradient_descent.GradientDescentOptimizer(1e-4)
  loss = math_ops.reduce_mean(z)
  train_op = optimizer.minimize(loss)
  graph = ops.get_default_graph()
  graph.add_to_collection('train_op', train_op)
  meta_graph = saver_lib.export_meta_graph(graph_def=graph.as_graph_def())
  return meta_graph
コード例 #36
0
ファイル: graph_util_test.py プロジェクト: paolodedios/keras
    def _inline_functions(self, graph_def, arrays):
        meta_graph = export_meta_graph(graph_def=graph_def)
        fetch_collection = meta_graph_pb2.CollectionDef()
        for name in arrays:
            fetch_collection.node_list.value.append(name)
        meta_graph.collection_def["train_op"].CopyFrom(fetch_collection)

        # Initialize RewriterConfig with everything disabled except function
        # inlining.
        config = tf.compat.v1.ConfigProto()
        rewrite_options = config.graph_options.rewrite_options
        rewrite_options.optimizers.append("function")
        return tf_optimizer.OptimizeGraph(config, meta_graph)
コード例 #37
0
  def test_meta_graph_transform(self):

    with ops.Graph().as_default():
      with tf_session.Session(''):
        a = array_ops.placeholder(dtypes.int64, [1], name='a')
        b = array_ops.placeholder(dtypes.int64, [1], name='b')
        c = array_ops.placeholder(dtypes.int64, [1], name='c')
        _ = a * b
        _ = b * c
        base_meta_graph_def = saver.export_meta_graph()

    with ops.Graph().as_default():
      with tf_session.Session(''):
        a = array_ops.placeholder(dtypes.int64, [1], name='a')
        b = array_ops.placeholder(dtypes.int64, [1], name='b')
        _ = a * b
        meta_info_def = meta_graph_pb2.MetaGraphDef.MetaInfoDef()
        meta_info_def.tags.append('tag_ab')

        expected_meta_graph_def = saver.export_meta_graph(
            meta_info_def=meta_info_def)
        # Graph rewriter clears versions field, so we expect that.
        expected_meta_graph_def.graph_def.ClearField('versions')
        # Graph rewriter adds an empty library field, so we expect that.
        expected_meta_graph_def.graph_def.library.CopyFrom(
            function_pb2.FunctionDefLibrary())

    input_names = ['a', 'b']
    output_names = ['mul:0']
    transforms = ['strip_unused_nodes']
    tags = ['tag_ab']
    print('AAAAAA: {}'.format(base_meta_graph_def))
    transformed_meta_graph_def = meta_graph_transform.meta_graph_transform(
        base_meta_graph_def, input_names, output_names, transforms, tags)

    self.assertEqual(expected_meta_graph_def, transformed_meta_graph_def)
コード例 #38
0
    def test_meta_graph_transform(self):

        with ops.Graph().as_default():
            with tf_session.Session(''):
                a = array_ops.placeholder(dtypes.int64, [1], name='a')
                b = array_ops.placeholder(dtypes.int64, [1], name='b')
                c = array_ops.placeholder(dtypes.int64, [1], name='c')
                _ = a * b
                _ = b * c
                base_meta_graph_def = saver.export_meta_graph()

        with ops.Graph().as_default():
            with tf_session.Session(''):
                a = array_ops.placeholder(dtypes.int64, [1], name='a')
                b = array_ops.placeholder(dtypes.int64, [1], name='b')
                _ = a * b
                meta_info_def = meta_graph_pb2.MetaGraphDef.MetaInfoDef()
                meta_info_def.tags.append('tag_ab')

                expected_meta_graph_def = saver.export_meta_graph(
                    meta_info_def=meta_info_def)
                # Graph rewriter clears versions field, so we expect that.
                expected_meta_graph_def.graph_def.ClearField('versions')
                # Graph rewriter adds an empty library field, so we expect that.
                expected_meta_graph_def.graph_def.library.CopyFrom(
                    function_pb2.FunctionDefLibrary())

        input_names = ['a', 'b']
        output_names = ['mul:0']
        transforms = ['strip_unused_nodes']
        tags = ['tag_ab']
        print('AAAAAA: {}'.format(base_meta_graph_def))
        transformed_meta_graph_def = meta_graph_transform.meta_graph_transform(
            base_meta_graph_def, input_names, output_names, transforms, tags)

        self.assertEqual(expected_meta_graph_def, transformed_meta_graph_def)
コード例 #39
0
ファイル: trt_convert.py プロジェクト: perfmjs/tensorflow
  def _convert_saved_model_v2(self):
    """Convert the input SavedModel in 2.0 format."""
    self._saved_model = load.load(self._input_saved_model_dir,
                                  self._input_saved_model_tags)
    func = self._saved_model.signatures[self._input_saved_model_signature_key]
    frozen_func = convert_to_constants.convert_variables_to_constants_v2(func)
    self._grappler_meta_graph_def = saver.export_meta_graph(
        graph_def=frozen_func.graph.as_graph_def(), graph=frozen_func.graph)

    # Add a collection 'train_op' so that Grappler knows the outputs.
    fetch_collection = meta_graph_pb2.CollectionDef()
    for array in func.inputs + func.outputs:
      fetch_collection.node_list.value.append(array.name)
    self._grappler_meta_graph_def.collection_def["train_op"].CopyFrom(
        fetch_collection)

    # Run TRT optimizer in Grappler to convert the graph.
    self._run_conversion()

    def _get_tensor(graph, tensors):
      new_tensors = []
      for tensor in tensors:
        new_tensor = graph.get_tensor_by_name(tensor.name)
        new_tensor.set_shape(tensor.shape)
        new_tensors.append(new_tensor)
      return new_tensors

    # TODO(laigd): do we need to use different name e.g. "trt_func_graph"?
    converted_graph = func_graph.FuncGraph(func.graph.name)
    with converted_graph.as_default():
      importer.import_graph_def(self._converted_graph_def, name="")

    converted_graph.inputs = _get_tensor(converted_graph, func.graph.inputs)
    converted_graph.outputs = _get_tensor(converted_graph, func.graph.outputs)
    converted_graph.structured_outputs = func.graph.structured_outputs
    converted_graph.structured_input_signature = (
        func.graph.structured_input_signature)

    # pylint: disable=protected-access
    # TODO(laigd): should we set up the signature as well?
    self._converted_func = function.ConcreteFunction(
        converted_graph, attrs=None, signature=None)
    self._converted_func.add_to_graph()
    self._converted_func._arg_keywords = func._arg_keywords
    self._converted_func._num_positional_args = func._num_positional_args
    self._converted_func._captured_inputs = func._captured_inputs
    self._converted_func.graph.variables = func.graph.variables
コード例 #40
0
def _run_inline_graph_optimization(func, lower_control_flow):
    """Apply function inline optimization to the graph.

  Returns the GraphDef after Grappler's function inlining optimization is
  applied. This optimization does not work on models with control flow.

  Args:
    func: ConcreteFunction.
    lower_control_flow: Boolean indicating whether or not to lower control flow
      ops such as If and While. (default True)

  Returns:
    GraphDef
  """
    graph_def = func.graph.as_graph_def()
    if not lower_control_flow:
        graph_def = disable_lower_using_switch_merge(graph_def)
    meta_graph = export_meta_graph(graph_def=graph_def, graph=func.graph)

    # Clear the initializer_name for the variables collections, since they are not
    # needed after saved to saved_model.
    for name in [
            "variables", "model_variables", "trainable_variables",
            "local_variables"
    ]:
        raw_list = []
        for raw in meta_graph.collection_def["variables"].bytes_list.value:
            variable = variable_pb2.VariableDef()
            variable.ParseFromString(raw)
            variable.ClearField("initializer_name")
            raw_list.append(variable.SerializeToString())
        meta_graph.collection_def[name].bytes_list.value[:] = raw_list

    # Add a collection 'train_op' so that Grappler knows the outputs.
    fetch_collection = meta_graph_pb2.CollectionDef()
    for array in func.inputs + func.outputs:
        fetch_collection.node_list.value.append(array.name)
    meta_graph.collection_def["train_op"].CopyFrom(fetch_collection)

    # Initialize RewriterConfig with everything disabled except function inlining.
    config = config_pb2.ConfigProto()
    rewrite_options = config.graph_options.rewrite_options
    rewrite_options.min_graph_nodes = -1  # do not skip small graphs
    rewrite_options.optimizers.append("function")
    return tf_optimizer.OptimizeGraph(config, meta_graph)
コード例 #41
0
    def convert(self):
        """Convert the input SavedModel in 2.0 format.

    Returns:
      The TF-TRT converted Function.
    """
        assert not self._converted
        self._saved_model = load.load(self._input_saved_model_dir,
                                      self._input_saved_model_tags)
        func = self._saved_model.signatures[
            self._input_saved_model_signature_key]
        frozen_func = convert_to_constants.convert_variables_to_constants_v2(
            func)
        grappler_meta_graph_def = saver.export_meta_graph(
            graph_def=frozen_func.graph.as_graph_def(),
            graph=frozen_func.graph)

        # Add a collection 'train_op' so that Grappler knows the outputs.
        fetch_collection = meta_graph_pb2.CollectionDef()
        for array in frozen_func.inputs + frozen_func.outputs:
            fetch_collection.node_list.value.append(array.name)
        grappler_meta_graph_def.collection_def["train_op"].CopyFrom(
            fetch_collection)

        # Run TRT optimizer in Grappler to convert the graph.
        self._converted_graph_def = self._run_conversion(
            grappler_meta_graph_def)
        self._converted_func = wrap_function.function_from_graph_def(
            self._converted_graph_def,
            [tensor.name for tensor in frozen_func.inputs],
            [tensor.name for tensor in frozen_func.outputs])
        # Reconstruct the output signatures using the ones from original model.
        self._converted_func.graph.structured_outputs = nest.pack_sequence_as(
            func.graph.structured_outputs,
            self._converted_func.graph.structured_outputs)

        self._converted = True

        # Wrap the converted ConcreteFunction in a Function so it can accept numpy
        # arrays as input.
        @def_function.function
        def wrapper_func(*args, **kwargs):
            return self._converted_func(*args, **kwargs)

        return wrapper_func
コード例 #42
0
ファイル: test_util.py プロジェクト: AbhinavJain13/tensorflow
def grappler_optimize(graph, fetches=None, rewriter_config=None):
  """Tries to optimize the provided graph using grappler.

  Args:
    graph: A @{tf.Graph} instance containing the graph to optimize.
    fetches: An optional list of `Tensor`s to fetch (i.e. not optimize away).
      Grappler uses the 'train_op' collection to look for fetches, so if not
      provided this collection should be non-empty.
    rewriter_config: An optional @{tf.RewriterConfig} to use when rewriting the
      graph.

  Returns:
    A @{tf.GraphDef} containing the rewritten graph.
  """
  if rewriter_config is None:
    rewriter_config = rewriter_config_pb2.RewriterConfig()
  if fetches is not None:
    for fetch in fetches:
      graph.add_to_collection('train_op', fetch)
  metagraph = saver.export_meta_graph(graph_def=graph.as_graph_def())
  return tf_optimizer.OptimizeGraph(rewriter_config, metagraph)
コード例 #43
0
ファイル: trt_convert.py プロジェクト: kylin9872/tensorflow
  def _convert_graph_def(self):
    """Convert the input GraphDef."""
    graph = ops.Graph()
    with graph.as_default():
      importer.import_graph_def(self._input_graph_def, name="")
    self._grappler_meta_graph_def = saver.export_meta_graph(
        graph_def=graph.as_graph_def(add_shapes=True), graph=graph)
    if self._nodes_blacklist:
      output_collection = meta_graph_pb2.CollectionDef()
      output_list = output_collection.node_list.value
      for i in self._nodes_blacklist:
        if isinstance(i, ops.Tensor):
          output_list.append(_to_bytes(i.name))
        else:
          output_list.append(_to_bytes(i))
      # TODO(laigd): use another key as the self._nodes_blacklist are really
      # not train_op.
      self._grappler_meta_graph_def.collection_def["train_op"].CopyFrom(
          output_collection)

    self._run_conversion()
コード例 #44
0
ファイル: cond_v2_test.py プロジェクト: clsung/tensorflow
  def testGradientOfDeserializedCond(self):
    with ops.Graph().as_default():
      pred = array_ops.placeholder(dtypes.bool, name="pred")
      x = constant_op.constant(3.0, name="x")
      ops.add_to_collection("x", x)

      def true_fn():
        return math_ops.pow(x, 3)

      def false_fn():
        return x

      ops.add_to_collection("pred", pred)
      cond = cond_v2.cond_v2(pred, true_fn, false_fn, name="cond")
      for c in cond:
        ops.add_to_collection("cond", c)
      meta_graph = saver.export_meta_graph()

    with ops.Graph().as_default() as g:
      with self.test_session(graph=g) as sess:
        saver.import_meta_graph(meta_graph)
        x = ops.get_collection("x")[0]
        pred = ops.get_collection("pred")[0]
        cond = ops.get_collection("cond")
        cond_grad = gradients_impl.gradients(cond, [x], name="cond_grad")
        cond_grad_grad = gradients_impl.gradients(
            cond_grad, [x], name="cond_grad_grad")
        # d[x^3]/dx = 3x^2
        true_val = sess.run(cond_grad, {pred: True})
        self.assertEqual(true_val, [27.0])
        # d[x]/dx = 1
        false_val = sess.run(cond_grad, {pred: False})
        self.assertEqual(false_val, [1.0])

        true_val = sess.run(cond_grad_grad, {pred: True})
        # d2[x^3]/dx2 = 6x
        self.assertEqual(true_val, [18.0])
        false_val = sess.run(cond_grad_grad, {pred: False})
        # d2[x]/dx2 = 0
        self.assertEqual(false_val, [0.0])
コード例 #45
0
  def _test_convert_variables_with_functions(self, inline_functions):
    """Freezes a graph with functions."""

    @function.Defun(dtypes.float32)
    def plus_one(x):
      return x + 1.0

    with ops.Graph().as_default():
      variable_node = variables.Variable(1.0, name="variable_node")
      _ = variables.Variable(1.0, name="unused_variable_node")
      defun_node = plus_one(variable_node)
      _ = math_ops_lib.multiply(defun_node, 2.0, name="output_node")

      with session.Session() as sess:
        self.evaluate(variables.variables_initializer([variable_node]))
        variable_graph_def = sess.graph.as_graph_def()

        if inline_functions:
          # Run Grappler to create the VarOpHandle --> Placeholder -->
          # ResourceVariable pattern.
          meta_graph = export_meta_graph(graph_def=variable_graph_def)
          fetch_collection = meta_graph_pb2.CollectionDef()
          for name in ["variable_node", "output_node"]:
            fetch_collection.node_list.value.append(name)
          meta_graph.collection_def["train_op"].CopyFrom(fetch_collection)

          # Initialize RewriterConfig with everything disabled except function
          # inlining.
          config = config_pb2.ConfigProto()
          rewrite_options = config.graph_options.rewrite_options
          rewrite_options.optimizers.append("function")
          variable_graph_def = tf_optimizer.OptimizeGraph(config, meta_graph)

        constant_graph_def = graph_util.convert_variables_to_constants(
            sess, variable_graph_def, ["output_node"])

    # Ensure there are no variables after freezing.
    for node in constant_graph_def.node:
      self.assertNotIn(
          node.op, ["Variable", "VariableV2", "VarHandleOp", "ReadVariableOp"])
コード例 #46
0
def grappler_optimize(graph, fetches=None, config_proto=None):
  """Tries to optimize the provided graph using grappler.

  Args:
    graph: A `tf.Graph` instance containing the graph to optimize.
    fetches: An optional list of `Tensor`s to fetch (i.e. not optimize away).
      Grappler uses the 'train_op' collection to look for fetches, so if not
      provided this collection should be non-empty.
    config_proto: An optional `tf.ConfigProto` to use when rewriting the
      graph.

  Returns:
    A `tf.GraphDef` containing the rewritten graph.
  """
  if config_proto is None:
    config_proto = config_pb2.ConfigProto()
    config_proto.graph_options.rewrite_options.min_graph_nodes = -1
  if fetches is not None:
    for fetch in fetches:
      graph.add_to_collection('train_op', fetch)
  metagraph = saver.export_meta_graph(graph_def=graph.as_graph_def())
  return tf_optimizer.OptimizeGraph(config_proto, metagraph)
コード例 #47
0
def get_metagraph():
  """Constructs and returns a MetaGraphDef from the input file."""
  with gfile.GFile(FLAGS.input) as input_file:
    input_data = input_file.read()
    try:
      saved_model = saved_model_pb2.SavedModel()
      text_format.Merge(input_data, saved_model)
      meta_graph = saved_model.meta_graphs[0]
    except text_format.ParseError:
      try:
        saved_model.ParseFromString(input_data)
        meta_graph = saved_model.meta_graphs[0]
      except message.DecodeError:
        try:
          meta_graph = meta_graph_pb2.MetaGraphDef()
          text_format.Merge(input_data, meta_graph)
        except text_format.ParseError:
          try:
            meta_graph.ParseFromString(input_data)
          except message.DecodeError:
            try:
              graph_def = graph_pb2.GraphDef()
              text_format.Merge(input_data, graph_def)
            except text_format.ParseError:
              try:
                graph_def.ParseFromString(input_data)
              except message.DecodeError:
                raise ValueError("Invalid input file.")
            importer.import_graph_def(graph_def, name="")
            graph = ops.get_default_graph()
            meta_graph = saver.export_meta_graph(
                graph_def=graph.as_graph_def(), graph=graph)
  if FLAGS.fetch is not None:
    fetch_collection = meta_graph_pb2.CollectionDef()
    for fetch in FLAGS.fetch.split(","):
      fetch_collection.node_list.value.append(fetch)
    meta_graph.collection_def["train_op"].CopyFrom(fetch_collection)
  return meta_graph
コード例 #48
0
ファイル: trt_convert.py プロジェクト: aritratony/tensorflow
  def convert(self):
    """Convert the input SavedModel in 2.0 format.

    Returns:
      The TF-TRT converted Function.
    """
    assert not self._converted
    self._saved_model = load.load(self._input_saved_model_dir,
                                  self._input_saved_model_tags)
    func = self._saved_model.signatures[self._input_saved_model_signature_key]
    frozen_func = convert_to_constants.convert_variables_to_constants_v2(func)
    grappler_meta_graph_def = saver.export_meta_graph(
        graph_def=frozen_func.graph.as_graph_def(), graph=frozen_func.graph)

    # Add a collection 'train_op' so that Grappler knows the outputs.
    fetch_collection = meta_graph_pb2.CollectionDef()
    for array in frozen_func.inputs + frozen_func.outputs:
      fetch_collection.node_list.value.append(array.name)
    grappler_meta_graph_def.collection_def["train_op"].CopyFrom(
        fetch_collection)

    # Run TRT optimizer in Grappler to convert the graph.
    self._converted_graph_def = self._run_conversion(grappler_meta_graph_def)
    self._converted_func = wrap_function.function_from_graph_def(
        self._converted_graph_def,
        [tensor.name for tensor in frozen_func.inputs],
        [tensor.name for tensor in frozen_func.outputs])

    self._converted = True

    # Wrap the converted ConcreteFunction in a Function so it can accept numpy
    # arrays as input.
    @def_function.function
    def wrapper_func(*args, **kwargs):
      return self._converted_func(*args, **kwargs)

    return wrapper_func
コード例 #49
0
    def testGraphFromMetaGraphBecomesAvailable(self):
        """Test accumulator by writing values and then reading them."""

        directory = os.path.join(self.get_temp_dir(), "metagraph_test_values_dir")
        if gfile.IsDirectory(directory):
            gfile.DeleteRecursively(directory)
        gfile.MkDir(directory)

        writer = tf.train.SummaryWriter(directory, max_queue=100)

        with tf.Graph().as_default() as graph:
            _ = tf.constant([2.0, 1.0])
        # Add a graph to the summary writer.
        meta_graph_def = saver.export_meta_graph(graph_def=graph.as_graph_def(add_shapes=True))
        writer.add_meta_graph(meta_graph_def)

        writer.flush()

        # Verify that we can load those events properly
        acc = ea.EventAccumulator(directory)
        acc.Reload()
        self.assertTagsEqual(
            acc.Tags(),
            {
                ea.IMAGES: [],
                ea.AUDIO: [],
                ea.SCALARS: [],
                ea.HISTOGRAMS: [],
                ea.COMPRESSED_HISTOGRAMS: [],
                ea.GRAPH: True,
                ea.META_GRAPH: True,
                ea.RUN_METADATA: [],
            },
        )
        self.assertProtoEquals(graph.as_graph_def(add_shapes=True), acc.Graph())
        self.assertProtoEquals(meta_graph_def, acc.MetaGraph())
コード例 #50
0
  def _convert_saved_model_v2(self):
    """Convert the input SavedModel in 2.0 format."""
    assert context.executing_eagerly()

    self._saved_model = load.load(self._input_saved_model_dir,
                                  self._input_saved_model_tags)
    func = self._saved_model.signatures[self._input_saved_model_signature_key]
    frozen_func = convert_to_constants.convert_variables_to_constants_v2(func)
    self._grappler_meta_graph_def = saver.export_meta_graph(
        graph_def=frozen_func.graph.as_graph_def(), graph=frozen_func.graph)

    # Add a collection 'train_op' so that Grappler knows the outputs.
    fetch_collection = meta_graph_pb2.CollectionDef()
    for array in frozen_func.inputs + frozen_func.outputs:
      fetch_collection.node_list.value.append(array.name)
    self._grappler_meta_graph_def.collection_def["train_op"].CopyFrom(
        fetch_collection)

    # Run TRT optimizer in Grappler to convert the graph.
    self._run_conversion()
    self._converted_func = wrap_function.function_from_graph_def(
        self._converted_graph_def,
        [tensor.name for tensor in frozen_func.inputs],
        [tensor.name for tensor in frozen_func.outputs])
コード例 #51
0
ファイル: trt_convert.py プロジェクト: zhaoyongke/tensorflow
def create_inference_graph(input_graph_def,
                           outputs,
                           max_batch_size=1,
                           max_workspace_size_bytes=2 << 20,
                           precision_mode=TrtPrecisionMode.FP32,
                           minimum_segment_size=3,
                           is_dynamic_op=False,
                           maximum_cached_engines=1,
                           cached_engine_batch_sizes=None,
                           input_saved_model_dir=None,
                           input_saved_model_tags=None,
                           output_saved_model_dir=None,
                           session_config=None):
  """Python wrapper for the TRT transformation.

  Args:
    input_graph_def: a GraphDef object containing a model to be transformed. If
      set to None, the graph will be read from the SavedModel loaded from
      input_saved_model_dir.
    outputs: list of tensors or node names for the model outputs. Only used when
      input_graph_def is not None.
    max_batch_size: max size for the input batch.
    max_workspace_size_bytes: the maximum GPU temporary memory which the TRT
      engine can use at execution time. This corresponds to the 'workspaceSize'
      parameter of nvinfer1::IBuilder::setMaxWorkspaceSize().
    precision_mode: one of TrtPrecisionMode.supported_precision_modes().
    minimum_segment_size: the minimum number of nodes required for a subgraph to
      be replaced by TRTEngineOp.
    is_dynamic_op: whether to generate dynamic TRT ops which will build the TRT
      network and engine at run time.
    maximum_cached_engines: max number of cached TRT engines in dynamic TRT ops.
      If the number of cached engines is already at max but none of them can
      serve the input, the TRTEngineOp will fall back to run the TF function
      based on which the TRTEngineOp is created.
    cached_engine_batch_sizes: a list of batch sizes used to create cached
      engines, only used when is_dynamic_op is True. The length of the list
      should be smaller than maximum_cached_engines, and the dynamic TRT op will
      use this list to determine the batch sizes of the cached engines, instead
      of making the decision on the fly. This is useful when we know the most
      common batch size(s) the application is going to generate.
    input_saved_model_dir: the directory to load the SavedModel which contains
      the input graph to transforms. Used only when input_graph_def is None.
    input_saved_model_tags: list of tags to load the SavedModel.
    output_saved_model_dir: if not None, construct a SavedModel using the
      returned GraphDef and save it to the specified directory. This option only
      works when the input graph is loaded from a SavedModel, i.e. when
      input_saved_model_dir is specified and input_graph_def is None.
    session_config: the ConfigProto used to create a Session. It's also used as
      a template to create a TRT-enabled ConfigProto for conversion. If not
      specified, a default ConfigProto will be used.

  Returns:
    A GraphDef transformed from input_graph_def (or the SavedModel graph def
    loaded from input_saved_model_dir, if input_graph_def is not present), where
    all TRT compatible subgraphs are replaced with TRTEngineOps, and a TF
    function is added for each of the subgraphs.

    If is_dynamic_op is True, each TRTEngineOp will contain a serialized
    subgraph GraphDef, which will be converted to a TRT engine at execution time
    and the TRT engine will be cached for future usage. A new TRT engine will be
    created each time when none of the cached engines match the input shapes. If
    it fails to execute the TRT engine or the number of cached engines reaches
    maximum_cached_engines, the op will fall back to call the corresponding TF
    function.

    If is_dynamic_op is False, each TRTEngineOp will contain a serialized TRT
    engine created from the corresponding subgraph. No more engines will be
    created on the fly, and the op will fall back to call the corresponding TF
    function when it fails to execute the engine.

  Raises:
    ValueError: if the combination of the parameters is invalid.
    RuntimeError: if the TensorRT library version is incompatible.
  """
  compiled_version = get_linked_tensorrt_version()
  loaded_version = get_loaded_tensorrt_version()
  version_mismatch = False
  if loaded_version[0] < compiled_version[0]:
    tf_logging.error(
        "TensorRT version mismatch. Tensorflow was compiled against " +
        "TensorRT %s but library loaded from environment is TensorRT %s" %
        (".".join([str(x) for x in compiled_version]),
         ".".join([str(x) for x in loaded_version])) +
        ". Please make sure that correct version of TensorRT " +
        "is available in the system and added to ldconfig or LD_LIBRARY_PATH")
    raise RuntimeError("Incompatible TensorRT library version")
  for i in zip(loaded_version, compiled_version):
    if i[0] != i[1]:
      tf_logging.warn("TensorRT mismatch. Compiled against version " +
                      "%s, but loaded %s. Things may not work" %
                      (".".join([str(x) for x in compiled_version]),
                       ".".join([str(x) for x in loaded_version])))
      version_mismatch = True
      break
  if not version_mismatch:
    tf_logging.info("Running against TensorRT version %s" % ".".join(
        [str(x) for x in loaded_version]))

  if session_config is None:
    session_config = config_pb2.ConfigProto()

  if input_saved_model_tags is None:
    input_saved_model_tags = [tag_constants.SERVING]
  saved_model_loader = None
  grappler_meta_graph_def = None

  if input_graph_def is None:
    # Read from SavedModel and freeze the graph if necessary.
    if input_saved_model_dir is None:
      raise ValueError("input_graph_def and input_saved_model_dir cannot be "
                       "both None")
    with ops.Graph().as_default():
      with session.Session(config=session_config) as sess:
        saved_model_loader = loader_impl.SavedModelLoader(input_saved_model_dir)
        input_meta_graph_def = saved_model_loader.load(sess,
                                                       input_saved_model_tags)
        output_node_names = set()

        def _gather_names(tensor_info):
          """Get the node names from a TensorInfo."""
          return set(
              [tensor_info[key].name.split(":")[0] for key in tensor_info])

        # Get input and outputs from all SignatureDef.
        for key in input_meta_graph_def.signature_def:
          signature_def = input_meta_graph_def.signature_def[key]
          output_node_names.update(_gather_names(signature_def.inputs))
          output_node_names.update(_gather_names(signature_def.outputs))

        # Freeze the variables in the SavedModel graph and copy the frozen
        # graph over.
        frozen_graph_def = graph_util.convert_variables_to_constants(
            sess, sess.graph.as_graph_def(add_shapes=True),
            list(output_node_names))
        grappler_meta_graph_def = meta_graph_pb2.MetaGraphDef()
        grappler_meta_graph_def.graph_def.CopyFrom(frozen_graph_def)

        # Copy the collections that are not variables.
        for key in input_meta_graph_def.collection_def:
          # TODO(laigd): currently we use the collection key to filter out
          # collections that depend on variable ops, but this may miss some
          # other user-defined collections. A better way would be to use
          # CollectionDef::NodeList for the filtering.
          if key not in [
              "variables", "local_variables", "model_variables",
              "trainable_variables", "train_op", "table_initializer"
          ]:
            grappler_meta_graph_def.collection_def[key].CopyFrom(
                input_meta_graph_def.collection_def[key])

        # Copy other information.
        grappler_meta_graph_def.meta_info_def.CopyFrom(
            input_meta_graph_def.meta_info_def)
        for key in input_meta_graph_def.signature_def:
          grappler_meta_graph_def.signature_def[key].CopyFrom(
              input_meta_graph_def.signature_def[key])
        # TODO(laigd): maybe add back AssetFileDef.
  else:
    if output_saved_model_dir is not None:
      raise ValueError("output_saved_model_dir cannot be set when "
                       "input_graph_def is set")
    # Create MetaGraphDef from input graph.
    graph = ops.Graph()
    with graph.as_default():
      importer.import_graph_def(input_graph_def, name="")
    grappler_meta_graph_def = saver.export_meta_graph(
        graph_def=graph.as_graph_def(add_shapes=True), graph=graph)
    if outputs:
      output_collection = meta_graph_pb2.CollectionDef()
      output_list = output_collection.node_list.value
      for i in outputs:
        if isinstance(i, ops.Tensor):
          output_list.append(_to_bytes(i.name))
        else:
          output_list.append(_to_bytes(i))
      # TODO(laigd): use another key as the outputs are really not train_op.
      grappler_meta_graph_def.collection_def["train_op"].CopyFrom(
          output_collection)

  # Create TRT-enabled ConfigProto.
  session_config_with_trt = config_pb2.ConfigProto()
  session_config_with_trt.CopyFrom(session_config)
  rewriter_config = None
  if (session_config_with_trt.HasField("graph_options") and
      session_config_with_trt.graph_options.HasField("rewrite_options")):
    rewriter_config = session_config_with_trt.graph_options.rewrite_options
  rewriter_config_with_trt = get_tensorrt_rewriter_config(
      rewriter_config, max_batch_size, max_workspace_size_bytes, precision_mode,
      minimum_segment_size, is_dynamic_op, maximum_cached_engines,
      cached_engine_batch_sizes)
  session_config_with_trt.graph_options.rewrite_options.CopyFrom(
      rewriter_config_with_trt)

  # Run Grappler.
  transformed_graph_def = tf_optimizer.OptimizeGraph(
      session_config_with_trt, grappler_meta_graph_def, graph_id=b"tf_graph")

  # Optionally write the transformed graphdef as SavedModel.
  if output_saved_model_dir is not None:
    saved_model_builder = builder.SavedModelBuilder(output_saved_model_dir)
    with ops.Graph().as_default():
      importer.import_graph_def(transformed_graph_def, name="")
      # We don't use TRT here.
      with session.Session(config=session_config) as sess:
        saved_model_builder.add_meta_graph_and_variables(
            sess,
            input_saved_model_tags,
            signature_def_map=grappler_meta_graph_def.signature_def)
    # Ignore other meta graphs from the input SavedModel.
    saved_model_builder.save()

  return transformed_graph_def
コード例 #52
0
  def testScalarsRealistically(self):
    """Test accumulator by writing values and then reading them."""

    def FakeScalarSummary(tag, value):
      value = tf.Summary.Value(tag=tag, simple_value=value)
      summary = tf.Summary(value=[value])
      return summary

    directory = os.path.join(self.get_temp_dir(), 'values_dir')
    if gfile.IsDirectory(directory):
      gfile.DeleteRecursively(directory)
    gfile.MkDir(directory)

    writer = tf.train.SummaryWriter(directory, max_queue=100)

    with tf.Graph().as_default() as graph:
      _ = tf.constant([2.0, 1.0])
    # Add a graph to the summary writer.
    writer.add_graph(graph)
    meta_graph_def = saver.export_meta_graph(
        graph_def=graph.as_graph_def(add_shapes=True))
    writer.add_meta_graph(meta_graph_def)

    run_metadata = tf.RunMetadata()
    device_stats = run_metadata.step_stats.dev_stats.add()
    device_stats.device = 'test device'
    writer.add_run_metadata(run_metadata, 'test run')

    # Write a bunch of events using the writer.
    for i in xrange(30):
      summ_id = FakeScalarSummary('id', i)
      summ_sq = FakeScalarSummary('sq', i * i)
      writer.add_summary(summ_id, i * 5)
      writer.add_summary(summ_sq, i * 5)
    writer.flush()

    # Verify that we can load those events properly
    acc = ea.EventAccumulator(directory)
    acc.Reload()
    self.assertTagsEqual(
        acc.Tags(),
        {
            ea.IMAGES: [],
            ea.AUDIO: [],
            ea.SCALARS: ['id', 'sq'],
            ea.HISTOGRAMS: [],
            ea.COMPRESSED_HISTOGRAMS: [],
            ea.GRAPH: True,
            ea.META_GRAPH: True,
            ea.RUN_METADATA: ['test run']
        })
    id_events = acc.Scalars('id')
    sq_events = acc.Scalars('sq')
    self.assertEqual(30, len(id_events))
    self.assertEqual(30, len(sq_events))
    for i in xrange(30):
      self.assertEqual(i * 5, id_events[i].step)
      self.assertEqual(i * 5, sq_events[i].step)
      self.assertEqual(i, id_events[i].value)
      self.assertEqual(i * i, sq_events[i].value)

    # Write a few more events to test incremental reloading
    for i in xrange(30, 40):
      summ_id = FakeScalarSummary('id', i)
      summ_sq = FakeScalarSummary('sq', i * i)
      writer.add_summary(summ_id, i * 5)
      writer.add_summary(summ_sq, i * 5)
    writer.flush()

    # Verify we can now see all of the data
    acc.Reload()
    id_events = acc.Scalars('id')
    sq_events = acc.Scalars('sq')
    self.assertEqual(40, len(id_events))
    self.assertEqual(40, len(sq_events))
    for i in xrange(40):
      self.assertEqual(i * 5, id_events[i].step)
      self.assertEqual(i * 5, sq_events[i].step)
      self.assertEqual(i, id_events[i].value)
      self.assertEqual(i * i, sq_events[i].value)
    self.assertProtoEquals(graph.as_graph_def(add_shapes=True), acc.Graph())
    self.assertProtoEquals(meta_graph_def, acc.MetaGraph())
コード例 #53
0
ファイル: trt_convert.py プロジェクト: ZhangXinNan/tensorflow
def create_inference_graph(input_graph_def,
                           outputs,
                           max_batch_size=1,
                           max_workspace_size_bytes=2 << 20,
                           precision_mode="FP32",
                           minimum_segment_size=3,
                           is_dynamic_op=False,
                           maximum_cached_engines=1,
                           cached_engine_batches=None):
  """Python wrapper for the TRT transformation.

  Args:
    input_graph_def: GraphDef object containing a model to be transformed.
    outputs: list of tensors or node names for the model outputs.
    max_batch_size: max size for the input batch
    max_workspace_size_bytes: parameter to control memory allocation (in Bytes)
    precision_mode: one of 'FP32', 'FP16' and 'INT8'
    minimum_segment_size: the minimum number of nodes required for a subgraph to
      be replaced by TRTEngineOp.
    is_dynamic_op: whether to generate dynamic TRT ops which will build the TRT
      network and engine at run time.
    maximum_cached_engines: max number of cached TRT engines in dynamic TRT ops.
    cached_engine_batches: batch sizes used to pre-create cached engines.

  Returns:
    New GraphDef with TRTEngineOps placed in graph replacing subgraphs.

  Raises:
    ValueError: if the provided precision mode is invalid.
    RuntimeError: if the returned status message is malformed.
  """
  supported_precision_modes = {"FP32": 0, "FP16": 1, "INT8": 2}
  if precision_mode.upper() not in supported_precision_modes:
    raise ValueError(("precision mode '{}' is not supported."
                      "It should be one of {}").format(
                          precision_mode, "{'FP32', 'FP16', 'INT8'}"))
  mode = supported_precision_modes[precision_mode.upper()]
  compiled_version = get_linked_tensorrt_version()
  loaded_version = get_loaded_tensorrt_version()
  version_mismatch = False
  if loaded_version[0] < compiled_version[0]:
    tf_logging.error(
        "TensorRT version mismatch. Tensorflow was compiled against " +
        "TensorRT %s but library loaded from environment is TensorRT %s" %
        (".".join([str(x) for x in compiled_version]),
         ".".join([str(x) for x in loaded_version])) +
        ". Please make sure that correct version of TensorRT " +
        "is available in the system and added to ldconfig or LD_LIBRARY_PATH")
    raise RuntimeError("Incompatible TensorRT library version")
  for i in zip(loaded_version, compiled_version):
    if i[0] != i[1]:
      tf_logging.warn("TensorRT mismatch. Compiled against version " +
                      "%s, but loaded %s. Things may not work" %
                      (".".join([str(x) for x in compiled_version]),
                       ".".join([str(x) for x in loaded_version])))
      version_mismatch = True
      break
  if not version_mismatch:
    tf_logging.info("Running against TensorRT version %s" % ".".join(
        [str(x) for x in loaded_version]))

  def py2bytes(inp):
    return inp

  def py3bytes(inp):
    return inp.encode("utf-8", errors="surrogateescape")

  def py2string(inp):
    return inp

  def py3string(inp):
    return inp.decode("utf-8")

  if _six.PY2:
    to_bytes = py2bytes
    to_string = py2string
  else:
    to_bytes = py3bytes
    to_string = py3string

  # Create MetaGraphDef
  graph = ops.Graph()
  with graph.as_default():
    importer.import_graph_def(input_graph_def, name="")
  meta_graph = saver.export_meta_graph(
      graph_def=graph.as_graph_def(), graph=graph)
  if outputs:
    output_collection = meta_graph_pb2.CollectionDef()
    output_list = output_collection.node_list.value
    for i in outputs:
      if isinstance(i, ops.Tensor):
        output_list.append(to_bytes(i.name))
      else:
        output_list.append(to_bytes(i))
    meta_graph.collection_def["train_op"].CopyFrom(output_collection)

  # Create RewriterConfig.
  rewriter_cfg = rewriter_config_pb2.RewriterConfig()
  rewriter_cfg.optimizers.extend(["constfold", "layout"])
  optimizer = rewriter_cfg.custom_optimizers.add()
  optimizer.name = "TensorRTOptimizer"
  optimizer.parameter_map["minimum_segment_size"].i = minimum_segment_size
  optimizer.parameter_map["max_batch_size"].i = max_batch_size
  optimizer.parameter_map["is_dynamic_op"].b = is_dynamic_op
  optimizer.parameter_map[
      "max_workspace_size_bytes"].i = max_workspace_size_bytes
  optimizer.parameter_map["precision_mode"].s = to_bytes(precision_mode)
  optimizer.parameter_map["maximum_cached_engines"].i = maximum_cached_engines
  if cached_engine_batches:
    if not isinstance(cached_engine_batches, list):
      raise TypeError("cached_engine_batches should be a list.")
    optimizer.parameter_map["cached_engine_batches"].list.i.extend(
        cached_engine_batches)

  return tf_optimizer.OptimizeGraph(
      rewriter_cfg, meta_graph, graph_id=b"tf_graph")