Ejemplo n.º 1
0
 def write_graph(self):
     """Saves current graph."""
     if self._checkpoint_dir is not None and self._is_chief:
         summary_writer = summary_writer_cache.SummaryWriterCache.get(self._checkpoint_dir)
         training_util.write_graph(self._graph.as_graph_def(add_shapes=True), self._checkpoint_dir, "graph.pbtxt")
         summary_writer.add_graph(self._graph)
         summary_writer.add_session_log(SessionLog(status=SessionLog.START), self._init_step)
Ejemplo n.º 2
0
 def _write_graph(self):
     """Writes graph_def to `logdir` and adds it to summary if applicable."""
     assert self._is_chief
     if self._logdir:
         training_util.write_graph(self._graph.as_graph_def(), self._logdir, "graph.pbtxt")
     if self._summary_writer:
         self._summary_writer.add_graph(self._graph)
Ejemplo n.º 3
0
    def before_run(self, run_context):
        """ Dumps graphs and loads checkpoint if there exits.

        Called before each call to run().

        Args:
            run_context: A `SessionRunContext` object.

        Returns: A `SessionRunArgs` object containing global_step.
        """
        # We do write graph and saver_def at the first call of before_run.
        # We cannot do this in begin, since we let other hooks to change graph and
        # add variables in begin. Graph is finalized after all begin calls.
        if self._is_chief and self._first_call:
            training_util.write_graph(
                ops.get_default_graph().as_graph_def(add_shapes=True),
                self._checkpoint_dir,
                "graph.pbtxt")
            # dump model details "model_analysis.txt"
            dump_model_analysis(self._checkpoint_dir)  # dump model configs
            graph = ops.get_default_graph()
            meta_graph_def = meta_graph.create_meta_graph_def(
                graph_def=graph.as_graph_def(add_shapes=True),
                saver_def=self._saver.saver_def)
            if self._summary_writer is not None:
                self._summary_writer.add_graph(graph)
                self._summary_writer.add_meta_graph(meta_graph_def)
            tf.logging.info("CheckpointSaverHook (before_run): dump graph...")
        self._first_call = False
        return tf.train.SessionRunArgs(self._global_step)
Ejemplo n.º 4
0
def export_meta_graph(filename=None, meta_info_def=None, graph_def=None,
                      saver_def=None, collection_list=None):
  """Returns `MetaGraphDef` proto. Optionally writes it to filename.

  This function exports the graph, saver, and collection objects into
  `MetaGraphDef` protocol buffer with the intension of it being imported
  at a later time or location to restart training, run inference, or be
  a subgraph.

  Args:
    filename: Optional filename including the path for writing the
      generated `MetaGraphDef` protocol buffer.
    meta_info_def: `MetaInfoDef` protocol buffer.
    graph_def: `GraphDef` protocol buffer.
    saver_def: `SaverDef` protocol buffer.
    collection_list: List of string keys to collect.

  Returns:
    A `MetaGraphDef` proto.
  """
  meta_graph_def = _as_meta_graph_def(meta_info_def=meta_info_def,
                                      graph_def=graph_def,
                                      saver_def=saver_def,
                                      collection_list=collection_list)
  if filename:
    training_util.write_graph(meta_graph_def, os.path.dirname(filename),
                              os.path.basename(filename))
  return meta_graph_def
Ejemplo n.º 5
0
    def testQATFrozenGraphDefInt8(self):
        with ops.Graph().as_default():
            in_tensor_1 = array_ops.placeholder(shape=[1, 16, 16, 3],
                                                dtype=dtypes.float32,
                                                name='inputA')
            in_tensor_2 = array_ops.placeholder(shape=[1, 16, 16, 3],
                                                dtype=dtypes.float32,
                                                name='inputB')
            _ = array_ops.fake_quant_with_min_max_args(
                in_tensor_1 + in_tensor_2,
                min=0.,
                max=1.,
                name='output',
                num_bits=16
            )  # INT8 inference type works for 16 bits fake quant.
            sess = session.Session()

        # Write graph to file.
        graph_def_file = self._getFilepath('model.pb')
        write_graph(sess.graph_def, '', graph_def_file, False)
        sess.close()

        flags_str = ('--inference_type=INT8 --std_dev_values=128,128 '
                     '--mean_values=128,128 '
                     '--graph_def_file={0} --input_arrays={1},{2} '
                     '--output_arrays={3}'.format(graph_def_file, 'inputA',
                                                  'inputB', 'output'))
        self._run(flags_str, should_succeed=True)
        os.remove(graph_def_file)
Ejemplo n.º 6
0
    def testFloatWithShapesArray(self):
        in_tensor = array_ops.placeholder(shape=[1, 16, 16, 3],
                                          dtype=dtypes.float32)
        _ = in_tensor + in_tensor
        sess = session.Session()

        # Write graph to file.
        graph_def_file = os.path.join(self.get_temp_dir(), 'model.pb')
        write_graph(sess.graph_def, '', graph_def_file, False)
        sess.close()

        # Convert model and ensure model is not None.
        converter = lite.TocoConverter.from_frozen_graph(
            graph_def_file, ['Placeholder'], ['add'],
            input_shapes={'Placeholder': [1, 16, 16, 3]})
        tflite_model = converter.convert()
        self.assertTrue(tflite_model)

        # Check values from converted model.
        interpreter = Interpreter(model_content=tflite_model)
        interpreter.allocate_tensors()

        input_details = interpreter.get_input_details()
        self.assertEqual(1, len(input_details))
        self.assertTrue(([1, 16, 16, 3] == input_details[0]['shape']).all())
Ejemplo n.º 7
0
    def testQATFrozenGraphDefUInt8(self):
        with ops.Graph().as_default():
            in_tensor_1 = array_ops.placeholder(shape=[1, 16, 16, 3],
                                                dtype=dtypes.float32,
                                                name='inputA')
            in_tensor_2 = array_ops.placeholder(shape=[1, 16, 16, 3],
                                                dtype=dtypes.float32,
                                                name='inputB')
            _ = array_ops.fake_quant_with_min_max_args(in_tensor_1 +
                                                       in_tensor_2,
                                                       min=0.,
                                                       max=1.,
                                                       name='output')
            sess = session.Session()

        # Write graph to file.
        graph_def_file = self._getFilepath('model.pb')
        write_graph(sess.graph_def, '', graph_def_file, False)
        sess.close()

        # Define converter flags
        flags_str = ('--std_dev_values=128,128 --mean_values=128,128 '
                     '--graph_def_file={0} --input_arrays={1} '
                     '--output_arrays={2}'.format(graph_def_file,
                                                  'inputA,inputB', 'output'))

        # Set inference_type UINT8 and (default) inference_input_type UINT8
        flags_str_1 = flags_str + ' --inference_type=UINT8'
        self._run(flags_str_1, should_succeed=True)

        # Set inference_type UINT8 and inference_input_type FLOAT
        flags_str_2 = flags_str_1 + ' --inference_input_type=FLOAT'
        self._run(flags_str_2, should_succeed=True)

        os.remove(graph_def_file)
Ejemplo n.º 8
0
  def testFloatWithShapesArray(self):
    in_tensor = array_ops.placeholder(
        shape=[1, 16, 16, 3], dtype=dtypes.float32)
    _ = in_tensor + in_tensor
    sess = session.Session()

    # Write graph to file.
    graph_def_file = os.path.join(self.get_temp_dir(), 'model.pb')
    write_graph(sess.graph_def, '', graph_def_file, False)
    sess.close()

    # Convert model and ensure model is not None.
    converter = lite.TFLiteConverter.from_frozen_graph(
        graph_def_file, ['Placeholder'], ['add'],
        input_shapes={'Placeholder': [1, 16, 16, 3]})
    tflite_model = converter.convert()
    self.assertTrue(tflite_model)

    # Check values from converted model.
    interpreter = Interpreter(model_content=tflite_model)
    interpreter.allocate_tensors()

    input_details = interpreter.get_input_details()
    self.assertEqual(1, len(input_details))
    self.assertTrue(([1, 16, 16, 3] == input_details[0]['shape']).all())
Ejemplo n.º 9
0
  def testPbtxt(self):
    in_tensor = array_ops.placeholder(
        shape=[1, 16, 16, 3], dtype=dtypes.float32)
    _ = in_tensor + in_tensor
    sess = session.Session()

    # Write graph to file.
    graph_def_file = os.path.join(self.get_temp_dir(), 'model.pbtxt')
    write_graph(sess.graph_def, '', graph_def_file, True)
    sess.close()

    # Convert model and ensure model is not None.
    converter = lite.TFLiteConverter.from_frozen_graph(graph_def_file,
                                                       ['Placeholder'], ['add'])
    tflite_model = converter.convert()
    self.assertTrue(tflite_model)

    # Check values from converted model.
    interpreter = Interpreter(model_content=tflite_model)
    interpreter.allocate_tensors()

    input_details = interpreter.get_input_details()
    self.assertEqual(1, len(input_details))
    self.assertEqual('Placeholder', input_details[0]['name'])
    self.assertEqual(np.float32, input_details[0]['dtype'])
    self.assertTrue(([1, 16, 16, 3] == input_details[0]['shape']).all())
    self.assertEqual((0., 0.), input_details[0]['quantization'])

    output_details = interpreter.get_output_details()
    self.assertEqual(1, len(output_details))
    self.assertEqual('add', output_details[0]['name'])
    self.assertEqual(np.float32, output_details[0]['dtype'])
    self.assertTrue(([1, 16, 16, 3] == output_details[0]['shape']).all())
    self.assertEqual((0., 0.), output_details[0]['quantization'])
Ejemplo n.º 10
0
    def before_run(self, run_context):
        """ Dumps graphs and loads checkpoint if there exits.

        Called before each call to run().

        Args:
            run_context: A `SessionRunContext` object.

        Returns: A `SessionRunArgs` object containing global_step.
        """
        # We do write graph and saver_def at the first call of before_run.
        # We cannot do this in begin, since we let other hooks to change graph and
        # add variables in begin. Graph is finalized after all begin calls.
        if self._is_chief and self._first_call:
            training_util.write_graph(
                ops.get_default_graph().as_graph_def(add_shapes=True),
                self._checkpoint_dir, "graph.pbtxt")
            # dump model details "model_analysis.txt"
            dump_model_analysis(self._checkpoint_dir)  # dump model configs
            graph = ops.get_default_graph()
            meta_graph_def = meta_graph.create_meta_graph_def(
                graph_def=graph.as_graph_def(add_shapes=True),
                saver_def=self._saver.saver_def)
            if self._summary_writer is not None:
                self._summary_writer.add_graph(graph)
                self._summary_writer.add_meta_graph(meta_graph_def)
            tf.logging.info("CheckpointSaverHook (before_run): dump graph...")
        self._first_call = False
        return tf.train.SessionRunArgs(self._global_step)
Ejemplo n.º 11
0
    def testPbtxt(self):
        in_tensor = array_ops.placeholder(shape=[1, 16, 16, 3],
                                          dtype=dtypes.float32)
        _ = in_tensor + in_tensor
        sess = session.Session()

        # Write graph to file.
        graph_def_file = os.path.join(self.get_temp_dir(), 'model.pbtxt')
        write_graph(sess.graph_def, '', graph_def_file, True)

        # Convert model and ensure model is not None.
        converter = lite.TocoConverter.from_frozen_graph(
            graph_def_file, ['Placeholder'], ['add'])
        tflite_model = converter.convert()
        self.assertTrue(tflite_model)

        # Check values from converted model.
        interpreter = Interpreter(model_content=tflite_model)
        interpreter.allocate_tensors()

        input_details = interpreter.get_input_details()
        self.assertEqual(1, len(input_details))
        self.assertEqual('Placeholder', input_details[0]['name'])
        self.assertEqual(np.float32, input_details[0]['dtype'])
        self.assertTrue(([1, 16, 16, 3] == input_details[0]['shape']).all())
        self.assertEqual((0., 0.), input_details[0]['quantization'])

        output_details = interpreter.get_output_details()
        self.assertEqual(1, len(output_details))
        self.assertEqual('add', output_details[0]['name'])
        self.assertEqual(np.float32, output_details[0]['dtype'])
        self.assertTrue(([1, 16, 16, 3] == output_details[0]['shape']).all())
        self.assertEqual((0., 0.), output_details[0]['quantization'])
Ejemplo n.º 12
0
 def _write_graph(self):
     """Writes graph_def to `logdir` and adds it to summary if applicable."""
     assert self._is_chief
     if self._logdir:
         training_util.write_graph(self._graph.as_graph_def(add_shapes=True), self._logdir, "graph.pbtxt")
     if self._summary_writer and not self._graph_added_to_summary:
         self._summary_writer.add_graph(self._graph)
         self._graph_added_to_summary = True
Ejemplo n.º 13
0
 def _write_graph(self):
     """Writes graph_def to `logdir` and adds it to summary if applicable."""
     assert self._is_chief
     if self._logdir:
         training_util.write_graph(self._graph.as_graph_def(), self._logdir,
                                   "graph.pbtxt")
     if self._summary_writer:
         self._summary_writer.add_graph(self._graph)
Ejemplo n.º 14
0
 def _write_graph(self):
   """Writes graph_def to `logdir` and adds it to summary if applicable."""
   assert self._is_chief
   if self._logdir:
     training_util.write_graph(self._graph.as_graph_def(add_shapes=True),
                               self._logdir, "graph.pbtxt")
   if self._summary_writer and not self._graph_added_to_summary:
     self._summary_writer.add_graph(self._graph)
     self._graph_added_to_summary = True
Ejemplo n.º 15
0
 def write_graph(self):
     """Saves current graph."""
     if self._checkpoint_dir is not None and self._is_chief:
         summary_writer = summary_writer_cache.SummaryWriterCache.get(
             self._checkpoint_dir)
         training_util.write_graph(
             self._graph.as_graph_def(add_shapes=True),
             self._checkpoint_dir, 'graph.pbtxt')
         summary_writer.add_graph(self._graph)
Ejemplo n.º 16
0
    def before_run(self, run_context):  # pylint: disable=unused-argument
        if self._last_saved_time is None:
            # Write graph in the first call.
            training_util.write_graph(
                ops.get_default_graph().as_graph_def(add_shapes=True),
                self._checkpoint_dir, "graph.pbtxt")
            self._summary_writer.add_graph(ops.get_default_graph())

        return SessionRunArgs(self._global_step_tensor)
  def before_run(self, run_context):  # pylint: disable=unused-argument
    if self._last_saved_time is None:
      # Write graph in the first call.
      training_util.write_graph(
          ops.get_default_graph().as_graph_def(add_shapes=True),
          self._checkpoint_dir,
          "graph.pbtxt")
      self._summary_writer.add_graph(ops.get_default_graph())

    return SessionRunArgs(self._global_step_tensor)
Ejemplo n.º 18
0
 def write_graph(self):
   """Saves current graph."""
   if self._checkpoint_dir is not None and self._is_chief:
     summary_writer = summary_writer_cache.SummaryWriterCache.get(
         self._checkpoint_dir)
     training_util.write_graph(
         self._graph.as_graph_def(add_shapes=True),
         self._checkpoint_dir,
         'graph.pbtxt')
     summary_writer.add_graph(self._graph)
  def before_run(self, run_context):
    """Essentially a copy of before_run as defined in the base class, except we
    don't add the default graph or any meta-graph data to the SummaryWriter"""
    if self._timer.last_triggered_step() is None:
      training_util.write_graph(
          ops.get_default_graph().as_graph_def(add_shapes=True),
          self._checkpoint_dir,
          "graph.pbtxt")
      saver_def = self._get_saver().saver_def if self._get_saver() else None

    return SessionRunArgs(self._global_step_tensor)
Ejemplo n.º 20
0
 def write_graph(self):
   """Saves current graph."""
   if self._checkpoint_dir is not None and self._is_chief:
     summary_writer = summary_writer_cache.SummaryWriterCache.get(
         self._checkpoint_dir)
     training_util.write_graph(self._graph.as_graph_def(add_shapes=True),
                               self._checkpoint_dir, 'graph.pbtxt')
     summary_writer.add_graph(self._graph)
     summary_writer.add_session_log(SessionLog(status=SessionLog.START),
                                    self._init_step)
   exception_type = None
   self._close_internal(exception_type)
   # __exit__ should return True to suppress an exception.
   return exception_type is None
Ejemplo n.º 21
0
    def before_run(self, run_context):  # pylint: disable=unused-argument
        if self._timer.last_triggered_step() is None:
            # Write graph in the first call.
            training_util.write_graph(
                ops.get_default_graph().as_graph_def(add_shapes=True),
                self._checkpoint_dir, "graph.pbtxt")
            saver_def = self._saver.saver_def if self._saver else None
            graph = ops.get_default_graph()
            meta_graph_def = meta_graph.create_meta_graph_def(
                graph_def=graph.as_graph_def(add_shapes=True),
                saver_def=saver_def)
            self._summary_writer.add_graph(graph)
            self._summary_writer.add_meta_graph(meta_graph_def)

        return SessionRunArgs(self._global_step_tensor)
Ejemplo n.º 22
0
  def testFrozenGraphDefNonPlaceholder(self):
    with ops.Graph().as_default():
      in_tensor = random_ops.random_normal(shape=[1, 16, 16, 3], name='random')
      _ = in_tensor + in_tensor
      sess = session.Session()

    # Write graph to file.
    graph_def_file = self._getFilepath('model.pb')
    write_graph(sess.graph_def, '', graph_def_file, False)
    sess.close()

    flags_str = ('--graph_def_file={0} --input_arrays={1} '
                 '--output_arrays={2}'.format(graph_def_file, 'random', 'add'))
    self._run(flags_str, should_succeed=True)
    os.remove(graph_def_file)
  def before_run(self, run_context):  # pylint: disable=unused-argument
    if self._timer.last_triggered_step() is None:
      # Write graph in the first call.
      training_util.write_graph(
          ops.get_default_graph().as_graph_def(add_shapes=True),
          self._checkpoint_dir,
          "graph.pbtxt")
      saver_def = self._saver.saver_def if self._saver else None
      graph = ops.get_default_graph()
      meta_graph_def = meta_graph.create_meta_graph_def(
          graph_def=graph.as_graph_def(add_shapes=True),
          saver_def=saver_def)
      self._summary_writer.add_graph(graph)
      self._summary_writer.add_meta_graph(meta_graph_def)

    return SessionRunArgs(self._global_step_tensor)
Ejemplo n.º 24
0
 def before_run(self, run_context):
     if self._timer.last_triggered_step() is None:
         # We do write graph and saver_def at the first call of before_run.
         # We cannot do this in begin, since we let other hooks to change graph and
         # add variables in begin. Graph is finalized after all begin calls.
         training_util.write_graph(
             ops.get_default_graph().as_graph_def(add_shapes=True),
             self._checkpoint_dir, "graph.pbtxt")
         graph = ops.get_default_graph()
         meta_graph_def = meta_graph.create_meta_graph_def(
             graph_def=graph.as_graph_def(add_shapes=True),
             saver_def=self._saver.saver_def)
         self._summary_writer.add_graph(graph)
         self._summary_writer.add_meta_graph(meta_graph_def)
     requests = {"global_steps": self._global_step_tensor}
     return SessionRunArgs(requests)
Ejemplo n.º 25
0
    def before_run(self, run_context):  # pylint: disable=unused-argument
        if self._timer.last_triggered_step() is None:
            # We do write graph and saver_def at the first call of before_run.
            # We cannot do this in begin, since we let other hooks to change graph and
            # add variables in begin. Graph is finalized after all begin calls.
            training_util.write_graph(
                ops.get_default_graph().as_graph_def(add_shapes=True),
                self._checkpoint_dir, "graph.pbtxt")
            saver_def = self._get_saver().saver_def if self._get_saver(
            ) else None
            graph = ops.get_default_graph()
            meta_graph_def = meta_graph.create_meta_graph_def(
                graph_def=graph.as_graph_def(add_shapes=True),
                saver_def=saver_def)

        return SessionRunArgs(self._global_step_tensor)
Ejemplo n.º 26
0
def run_training():
    mnist = input_data.read_data_sets('MNIST_data', one_hot=True)

    with tf.Graph().as_default():
        x = tf.placeholder(tf.float32, shape=[None, 784])
        y_ = tf.placeholder(tf.float32, shape=[None, 10])
        keep_prob = tf.placeholder(tf.float32)

        y_conv = inference(x, keep_prob)
        cross_entropy = loss(y_conv, y_)
        train_step = training(cross_entropy)
        accuracy = validation(y_conv, y_)

        # Build the summary Tensor based on the TF collection of Summaries.
        summary = tf.summary.merge_all()

        sess = tf.InteractiveSession()
        summary_writer = tf.summary.FileWriter(FLAGS.train_dir, sess.graph)
        sess.run(tf.global_variables_initializer())

        saver = tf.train.Saver()

        training_util.write_graph(sess.graph.as_graph_def(add_shapes=True),
                                  FLAGS.train_dir, "graph.pbtxt")

        for step in xrange(2000):
            batch = mnist.train.next_batch(50)
            feed_dict = {x: batch[0], y_: batch[1], keep_prob: 0.5}
            _, loss_value = sess.run([train_step, cross_entropy],
                                     feed_dict=feed_dict)

            if step % 500 == 0:
                saver.save(sess, os.path.join(FLAGS.train_dir, "model.ckpt"),
                           step)
                train_accuracy = accuracy.eval(feed_dict=feed_dict)
                print("step %d, loss_value %.2f,training accuracy %g" %
                      (step, loss_value, train_accuracy))
                # Update the events file.
                summary_str = sess.run(summary, feed_dict=feed_dict)
                summary_writer.add_summary(summary_str, step)
                summary_writer.flush()
        # Test
        print("test accuracy %g" % accuracy.eval(feed_dict={
            x: mnist.test.images,
            y_: mnist.test.labels,
            keep_prob: 1.0
        }))
Ejemplo n.º 27
0
 def after_create_session(self, session, coord):
     global_step = session.run(self._global_step_tensor)
     # We do write graph and saver_def at the first call of before_run.
     # We cannot do this in begin, since we let other hooks to change graph and
     # add variables in begin. Graph is finalized after all begin calls.
     training_util.write_graph(
         ops.get_default_graph().as_graph_def(add_shapes=True),
         self._checkpoint_dir, "graph.pbtxt")
     saver_def = self._get_saver().saver_def if self._get_saver() else None
     graph = ops.get_default_graph()
     meta_graph_def = meta_graph.create_meta_graph_def(
         graph_def=graph.as_graph_def(add_shapes=True), saver_def=saver_def)
     self._summary_writer.add_graph(graph)
     self._summary_writer.add_meta_graph(meta_graph_def)
     # The checkpoint saved here is the state at step "global_step".
     self._save(session, global_step)
     self._timer.update_last_triggered_step(global_step)
 def after_create_session(self, session, coord):
   global_step = session.run(self._global_step_tensor)
   # We do write graph and saver_def at the first call of before_run.
   # We cannot do this in begin, since we let other hooks to change graph and
   # add variables in begin. Graph is finalized after all begin calls.
   training_util.write_graph(
       ops.get_default_graph().as_graph_def(add_shapes=True),
       self._checkpoint_dir, "graph.pbtxt")
   saver_def = self._get_saver().saver_def if self._get_saver() else None
   graph = ops.get_default_graph()
   meta_graph_def = meta_graph.create_meta_graph_def(
       graph_def=graph.as_graph_def(add_shapes=True), saver_def=saver_def)
   self._summary_writer.add_graph(graph)
   self._summary_writer.add_meta_graph(meta_graph_def)
   # The checkpoint saved here is the state at step "global_step".
   self._save(session, global_step)
   self._timer.update_last_triggered_step(global_step)
  def before_run(self, run_context):  # pylint: disable=unused-argument
    if self._timer.last_triggered_step() is None:
      # We do write graph and saver_def at the first call of before_run.
      # We cannot do this in begin, since we let other hooks to change graph and
      # add variables in begin. Graph is finalized after all begin calls.
      training_util.write_graph(
          ops.get_default_graph().as_graph_def(add_shapes=True),
          self._checkpoint_dir,
          "graph.pbtxt")
      saver_def = self._get_saver().saver_def if self._get_saver() else None
      graph = ops.get_default_graph()
      meta_graph_def = meta_graph.create_meta_graph_def(
          graph_def=graph.as_graph_def(add_shapes=True),
          saver_def=saver_def)
      self._summary_writer.add_graph(graph)
      self._summary_writer.add_meta_graph(meta_graph_def)

    return SessionRunArgs(self._global_step_tensor)
Ejemplo n.º 30
0
    def testFrozenGraphDefWithLegacyConverter(self):
        with ops.Graph().as_default():
            in_tensor = array_ops.placeholder(shape=[1, 16, 16, 3],
                                              dtype=dtypes.float32)
            _ = in_tensor + in_tensor
            sess = session.Session()

        # Write graph to file.
        graph_def_file = self._getFilepath('model.pb')
        write_graph(sess.graph_def, '', graph_def_file, False)
        sess.close()

        flags_str = (
            '--graph_def_file={0} --input_arrays={1} '
            '--output_arrays={2} --experimental_new_converter=false'.format(
                graph_def_file, 'Placeholder', 'add'))
        self._run(flags_str, should_succeed=True)
        os.remove(graph_def_file)
Ejemplo n.º 31
0
  def testFreezeGraph(self):
    in_tensor = array_ops.placeholder(
        shape=[1, 16, 16, 3], dtype=dtypes.float32)
    var = variable_scope.get_variable(
        'weights', shape=[1, 16, 16, 3], dtype=dtypes.float32)
    _ = in_tensor + var
    sess = session.Session()

    # Write graph to file.
    graph_def_file = os.path.join(self.get_temp_dir(), 'model.pb')
    write_graph(sess.graph_def, '', graph_def_file, False)

    # Ensure the graph with variables cannot be converted.
    with self.assertRaises(ValueError) as error:
      lite.TocoConverter.from_frozen_graph(graph_def_file, ['Placeholder'],
                                           ['add'])
    self.assertEqual('Please freeze the graph using freeze_graph.py',
                     str(error.exception))
Ejemplo n.º 32
0
  def testFreezeGraph(self):
    in_tensor = array_ops.placeholder(
        shape=[1, 16, 16, 3], dtype=dtypes.float32)
    var = variable_scope.get_variable(
        'weights', shape=[1, 16, 16, 3], dtype=dtypes.float32)
    _ = in_tensor + var
    sess = session.Session()

    # Write graph to file.
    graph_def_file = os.path.join(self.get_temp_dir(), 'model.pb')
    write_graph(sess.graph_def, '', graph_def_file, False)

    # Ensure the graph with variables cannot be converted.
    with self.assertRaises(ValueError) as error:
      lite.TocoConverter.from_frozen_graph(graph_def_file, ['Placeholder'],
                                           ['add'])
    self.assertEqual('Please freeze the graph using freeze_graph.py',
                     str(error.exception))
Ejemplo n.º 33
0
    def before_run(self, run_context):
        """ Dumps graphs and loads checkpoint if there exits.

        Called before each call to run().

        Args:
            run_context: A `SessionRunContext` object.

        Returns: A `SessionRunArgs` object containing global_step.
        """
        # We do write graph and saver_def at the first call of before_run.
        # We cannot do this in begin, since we let other hooks to change graph and
        # add variables in begin. Graph is finalized after all begin calls.
        if self._is_chief and self._first_call:
            training_util.write_graph(
                ops.get_default_graph().as_graph_def(add_shapes=True),
                self._checkpoint_dir, "graph.pbtxt")
            # dump model details "model_analysis.txt"
            dump_model_analysis(self._checkpoint_dir)  # dump model configs
            graph = ops.get_default_graph()
            meta_graph_def = meta_graph.create_meta_graph_def(
                graph_def=graph.as_graph_def(add_shapes=True),
                saver_def=self._saver.saver_def)
            if self._summary_writer is not None:
                self._summary_writer.add_graph(graph)
                self._summary_writer.add_meta_graph(meta_graph_def)
            tf.logging.info("CheckpointSaverHook (before_run): dump graph...")
        checkpoint_path = saver_lib.latest_checkpoint(self._checkpoint_dir)
        if self._first_call:
            if checkpoint_path:
                # reloading model
                self._saver.restore(run_context.session, checkpoint_path)
                gs = run_context.session.run(self._global_step)
                tf.logging.info(
                    "CheckpointSaverHook (before_run): reloading models and reset global_step={}"
                    .format(gs))
                StepTimer.reset_init_triggered_step(gs)
            elif self._reload_var_ops:
                tf.logging.info(
                    "Assign all variables with pretrained variables.")
                run_context.session.run(self._reload_var_ops)
        self._first_call = False
        self._timer.register_before_run()
        return tf.train.SessionRunArgs(self._global_step)
  def testFloatTocoConverter(self):
    in_tensor = array_ops.placeholder(
        shape=[1, 16, 16, 3], dtype=dtypes.float32)
    _ = in_tensor + in_tensor
    sess = session.Session()

    # Write graph to file.
    graph_def_file = os.path.join(self.get_temp_dir(), 'model.pb')
    write_graph(sess.graph_def, '', graph_def_file, False)
    sess.close()

    # Convert model and ensure model is not None.
    converter = lite.TocoConverter.from_frozen_graph(graph_def_file,
                                                     ['Placeholder'], ['add'])
    tflite_model = converter.convert()
    self.assertTrue(tflite_model)

    # Ensure the model is able to load.
    interpreter = Interpreter(model_content=tflite_model)
    interpreter.allocate_tensors()
Ejemplo n.º 35
0
  def testFloatTocoConverter(self):
    in_tensor = array_ops.placeholder(
        shape=[1, 16, 16, 3], dtype=dtypes.float32)
    _ = in_tensor + in_tensor
    sess = session.Session()

    # Write graph to file.
    graph_def_file = os.path.join(self.get_temp_dir(), 'model.pb')
    write_graph(sess.graph_def, '', graph_def_file, False)
    sess.close()

    # Convert model and ensure model is not None.
    converter = lite.TocoConverter.from_frozen_graph(graph_def_file,
                                                     ['Placeholder'], ['add'])
    tflite_model = converter.convert()
    self.assertTrue(tflite_model)

    # Ensure the model is able to load.
    interpreter = Interpreter(model_content=tflite_model)
    interpreter.allocate_tensors()
Ejemplo n.º 36
0
 def after_create_session(self, session, coord):
     del coord
     # Ensure summary writer resource has been initialized.
     session.run(summary_ops_v2.summary_writer_initializer_op())
     global_step = session.run(self._global_step_tensor)
     # Write graph and saver_def once graph is finalized, which isn't true yet
     # in begin() since later hooks can still change the graph.
     training_util.write_graph(
         ops.get_default_graph().as_graph_def(add_shapes=True),
         self._checkpoint_dir, "graph.pbtxt")
     saver_def = self._get_saver().saver_def if self._get_saver() else None
     graph = ops.get_default_graph()
     meta_graph_def = meta_graph.create_meta_graph_def(
         graph_def=graph.as_graph_def(add_shapes=True), saver_def=saver_def)
     with ops.default_session(session):
         self._summary_writer.add_graph(graph)
         self._summary_writer.add_meta_graph(meta_graph_def)
     # The checkpoint saved here is the state at step "global_step".
     self._save(session, global_step)
     self._timer.update_last_triggered_step(global_step)
Ejemplo n.º 37
0
 def _write_graph_fn(self):
   training_util.write_graph(
       ops.get_default_graph().as_graph_def(add_shapes=True),
       self._checkpoint_dir, "graph.pbtxt")
Ejemplo n.º 38
0
 def _saveFrozenGraph(self, sess):
   graph_def_file = os.path.join(self.get_temp_dir(), 'model.pb')
   write_graph(sess.graph_def, '', graph_def_file, False)
   return graph_def_file
Ejemplo n.º 39
0
 def _saveFrozenGraph(self, sess):
   graph_def_file = os.path.join(self.get_temp_dir(), 'model.pb')
   write_graph(sess.graph_def, '', graph_def_file, False)
   return graph_def_file
Ejemplo n.º 40
0
def export_scoped_meta_graph(filename=None,
                             graph_def=None,
                             graph=None,
                             export_scope=None,
                             as_text=False,
                             unbound_inputs_col_name="unbound_inputs",
                             clear_devices=False,
                             **kwargs):
  """Returns `MetaGraphDef` proto. Optionally writes it to filename.

  This function exports the graph, saver, and collection objects into
  `MetaGraphDef` protocol buffer with the intention of it being imported
  at a later time or location to restart training, run inference, or be
  a subgraph.

  Args:
    filename: Optional filename including the path for writing the
      generated `MetaGraphDef` protocol buffer.
    graph_def: `GraphDef` protocol buffer.
    graph: The `Graph` to import into. If `None`, use the default graph.
    export_scope: Optional `string`. Name scope under which to extract
      the subgraph. The scope name will be striped from the node definitions
      for easy import later into new name scopes. If `None`, the whole graph
      is exported. graph_def and export_scope cannot both be specified.
    as_text: If `True`, writes the `MetaGraphDef` as an ASCII proto.
    unbound_inputs_col_name: Optional `string`. If provided, a string collection
      with the given name will be added to the returned `MetaGraphDef`,
      containing the names of tensors that must be remapped when importing the
      `MetaGraphDef`.
    clear_devices: Boolean which controls whether to clear device information
      before exporting the graph.
    **kwargs: Optional keyed arguments, including meta_info_def,
      saver_def, collection_list.

  Returns:
    A `MetaGraphDef` proto and dictionary of `Variables` in the exported
    name scope.

  Raises:
    ValueError: When the `GraphDef` is larger than 2GB.
  """
  graph = graph or ops.get_default_graph()
  unbound_inputs = []
  if export_scope or clear_devices:
    if graph_def:
      new_graph_def = graph_pb2.GraphDef()
      new_graph_def.versions.CopyFrom(graph_def.versions)
      for node_def in graph_def.node:
        if _should_include_node(node_def.name, export_scope):
          new_node_def = _node_def(node_def, export_scope, unbound_inputs,
                                   clear_devices=clear_devices)
          new_graph_def.node.extend([new_node_def])
      graph_def = new_graph_def
    else:
      # Only do this complicated work if we want to remove a name scope.
      graph_def = graph_pb2.GraphDef()
      # pylint: disable=protected-access
      graph_def.versions.CopyFrom(graph.graph_def_versions)
      bytesize = 0
      for key in sorted(graph._nodes_by_id):
        if _should_include_node(graph._nodes_by_id[key].name, export_scope):
          value = graph._nodes_by_id[key]
      # pylint: enable=protected-access
          node_def = _node_def(value.node_def, export_scope, unbound_inputs,
                               clear_devices=clear_devices)
          graph_def.node.extend([node_def])
          if value.outputs:
            assert "_output_shapes" not in graph_def.node[-1].attr
            graph_def.node[-1].attr["_output_shapes"].list.shape.extend([
                output.get_shape().as_proto() for output in value.outputs])
          bytesize += value.node_def.ByteSize()
          if bytesize >= (1 << 31) or bytesize < 0:
            raise ValueError("GraphDef cannot be larger than 2GB.")
    # It's possible that not all the inputs are in the export_scope.
    # If we would like such information included in the exported meta_graph,
    # add them to a special unbound_inputs collection.
    if unbound_inputs_col_name:
      # Clears the unbound_inputs collections.
      graph.clear_collection(unbound_inputs_col_name)
      for k in unbound_inputs:
        graph.add_to_collection(unbound_inputs_col_name, k)

  var_list = {}
  variables = graph.get_collection(ops.GraphKeys.VARIABLES,
                                   scope=export_scope)
  for v in variables:
    if _should_include_node(v, export_scope):
      var_list[ops.strip_name_scope(v.name, export_scope)] = v

  scoped_meta_graph_def = create_meta_graph_def(
      graph_def=graph_def,
      graph=graph,
      export_scope=export_scope,
      **kwargs)

  if filename:
    training_util.write_graph(
        scoped_meta_graph_def,
        os.path.dirname(filename),
        os.path.basename(filename),
        as_text=as_text)

  return scoped_meta_graph_def, var_list
Ejemplo n.º 41
0
def export_scoped_meta_graph(filename=None,
                             graph_def=None,
                             graph=None,
                             export_scope=None,
                             as_text=False,
                             unbound_inputs_col_name="unbound_inputs",
                             **kwargs):
    """Returns `MetaGraphDef` proto. Optionally writes it to filename.

  This function exports the graph, saver, and collection objects into
  `MetaGraphDef` protocol buffer with the intention of it being imported
  at a later time or location to restart training, run inference, or be
  a subgraph.

  Args:
    filename: Optional filename including the path for writing the
      generated `MetaGraphDef` protocol buffer.
    graph_def: `GraphDef` protocol buffer.
    graph: The `Graph` to import into. If `None`, use the default graph.
    export_scope: Optional `string`. Name scope under which to extract
      the subgraph. The scope name will be striped from the node definitions
      for easy import later into new name scopes. If `None`, the whole graph
      is exported. graph_def and export_scope cannot both be specified.
    as_text: If `True`, writes the `MetaGraphDef` as an ASCII proto.
    unbound_inputs_col_name: Optional `string`. If provided, a string collection
      with the given name will be added to the returned `MetaGraphDef`,
      containing the names of tensors that must be remapped when importing the
      `MetaGraphDef`.
    **kwargs: Optional keyed arguments, including meta_info_def,
      saver_def, collection_list.

  Returns:
    A `MetaGraphDef` proto and dictionary of `Variables` in the exported
    name scope.

  Raises:
    ValueError: When the `GraphDef` is larger than 2GB.
  """
    graph = graph or ops.get_default_graph()
    if graph_def and export_scope:
        raise ValueError("graph_def and export_scope cannot both "
                         "be specified.")

    if graph_def is None and export_scope:
        unbound_inputs = []
        # Only do this complicated work if we want to remove a name scope.
        graph_def = graph_pb2.GraphDef()
        # pylint: disable=protected-access
        graph_def.versions.CopyFrom(graph._graph_def_versions)
        bytesize = 0
        for key in sorted(graph._nodes_by_name):
            if _should_include_node(key, export_scope):
                value = graph._nodes_by_name[key]
                # pylint: enable=protected-access
                graph_def.node.extend(
                    [_node_def(value.node_def, export_scope, unbound_inputs)])
                if value.outputs:
                    assert "_output_shapes" not in graph_def.node[-1].attr
                    graph_def.node[-1].attr[
                        "_output_shapes"].list.shape.extend([
                            output.get_shape().as_proto()
                            for output in value.outputs
                        ])
                bytesize += value.node_def.ByteSize()
                if bytesize >= (1 << 31) or bytesize < 0:
                    raise ValueError("GraphDef cannot be larger than 2GB.")

        # It's possible that not all the inputs are in the export_scope.
        # If we would like such information included in the exported meta_graph,
        # add them to a special unbound_inputs collection.
        if unbound_inputs_col_name:
            # Clears the unbound_inputs collections.
            graph.clear_collection(unbound_inputs_col_name)
            for k in unbound_inputs:
                graph.add_to_collection(unbound_inputs_col_name, k)

    var_list = {}
    variables = graph.get_collection(ops.GraphKeys.VARIABLES,
                                     scope=export_scope)
    for v in variables:
        if _should_include_node(v, export_scope):
            var_list[ops.strip_name_scope(v.name, export_scope)] = v

    scoped_meta_graph_def = create_meta_graph_def(graph_def=graph_def,
                                                  graph=graph,
                                                  export_scope=export_scope,
                                                  **kwargs)

    if filename:
        training_util.write_graph(scoped_meta_graph_def,
                                  os.path.dirname(filename),
                                  os.path.basename(filename),
                                  as_text=as_text)

    return scoped_meta_graph_def, var_list
Ejemplo n.º 42
0
 def _write_graph_fn(self):
     training_util.write_graph(
         ops.get_default_graph().as_graph_def(add_shapes=True),
         self._checkpoint_dir, "graph.pbtxt")