Esempio n. 1
0
  def testSummaryGlobalStep(self):
    step = training_util.get_or_create_global_step()
    logdir = tempfile.mkdtemp()
    with summary_ops.create_file_writer(
        logdir, max_queue=0,
        name='t2').as_default(), summary_ops.always_record_summaries():

      summary_ops.scalar('scalar', 2.0, step=step)

      events = summary_test_util.events_from_logdir(logdir)
      self.assertEqual(len(events), 2)
      self.assertEqual(events[1].summary.value[0].tag, 'scalar')
Esempio n. 2
0
 def testMaxQueue(self):
   logs = tempfile.mkdtemp()
   with summary_ops.create_file_writer(
       logs, max_queue=2, flush_millis=999999,
       name='lol').as_default(), summary_ops.always_record_summaries():
     get_total = lambda: len(summary_test_util.events_from_logdir(logs))
     # Note: First tf.Event is always file_version.
     self.assertEqual(1, get_total())
     summary_ops.scalar('scalar', 2.0, step=1)
     self.assertEqual(1, get_total())
     summary_ops.scalar('scalar', 2.0, step=2)
     self.assertEqual(3, get_total())
Esempio n. 3
0
  def testSummaryGlobalStep(self):
    step = training_util.get_or_create_global_step()
    logdir = tempfile.mkdtemp()
    with summary_ops.create_file_writer(
        logdir, max_queue=0,
        name='t2').as_default(), summary_ops.always_record_summaries():

      summary_ops.scalar('scalar', 2.0, step=step)

      events = summary_test_util.events_from_logdir(logdir)
      self.assertEqual(len(events), 2)
      self.assertEqual(events[1].summary.value[0].tag, 'scalar')
Esempio n. 4
0
 def testMaxQueue(self):
   logs = tempfile.mkdtemp()
   with summary_ops.create_file_writer(
       logs, max_queue=2, flush_millis=999999,
       name='lol').as_default(), summary_ops.always_record_summaries():
     get_total = lambda: len(summary_test_util.events_from_logdir(logs))
     # Note: First tf.Event is always file_version.
     self.assertEqual(1, get_total())
     summary_ops.scalar('scalar', 2.0, step=1)
     self.assertEqual(1, get_total())
     summary_ops.scalar('scalar', 2.0, step=2)
     self.assertEqual(3, get_total())
Esempio n. 5
0
 def testSummaryOps(self):
   training_util.get_or_create_global_step()
   logdir = tempfile.mkdtemp()
   summary_ops.create_summary_file_writer(logdir, max_queue=0, name='t0')
   summary_ops.always_record_summaries()
   summary_ops.generic('tensor', 1, '')
   summary_ops.scalar('scalar', 2.0)
   summary_ops.histogram('histogram', [1.0])
   summary_ops.image('image', [[[[1.0]]]])
   summary_ops.audio('audio', [[1.0]], 1.0, 1)
   # The working condition of the ops is tested in the C++ test so we just
   # test here that we're calling them correctly.
   self.assertTrue(gfile.Exists(logdir))
Esempio n. 6
0
 def testSummaryOps(self):
   training_util.get_or_create_global_step()
   logdir = tempfile.mkdtemp()
   summary_ops.create_summary_file_writer(logdir, max_queue=0, name='t0')
   summary_ops.always_record_summaries()
   summary_ops.generic('tensor', 1, '')
   summary_ops.scalar('scalar', 2.0)
   summary_ops.histogram('histogram', [1.0])
   summary_ops.image('image', [[[[1.0]]]])
   summary_ops.audio('audio', [[1.0]], 1.0, 1)
   # The working condition of the ops is tested in the C++ test so we just
   # test here that we're calling them correctly.
   self.assertTrue(gfile.Exists(logdir))
Esempio n. 7
0
    def testScalarSummary(self):
        """Test record_summaries_every_n_global_steps and all_summaries()."""
        with ops.Graph().as_default(), self.test_session() as sess:
            global_step = training_util.get_or_create_global_step()
            global_step.initializer.run()
            with ops.device('/cpu:0'):
                step_increment = state_ops.assign_add(global_step, 1)
            sess.run(step_increment)  # Increment global step from 0 to 1

            logdir = tempfile.mkdtemp()
            with summary_ops.create_file_writer(logdir, max_queue=0,
                                                name='t2').as_default():
                with summary_ops.record_summaries_every_n_global_steps(2):
                    summary_ops.initialize()
                    summary_op = summary_ops.scalar('my_scalar', 2.0)

                    # Neither of these should produce a summary because
                    # global_step is 1 and "1 % 2 != 0"
                    sess.run(summary_ops.all_summary_ops())
                    sess.run(summary_op)
                    events = summary_test_util.events_from_logdir(logdir)
                    self.assertEqual(len(events), 1)

                    # Increment global step from 1 to 2 and check that the summary
                    # is now written
                    sess.run(step_increment)
                    sess.run(summary_ops.all_summary_ops())
                    events = summary_test_util.events_from_logdir(logdir)
                    self.assertEqual(len(events), 2)
                    self.assertEqual(events[1].summary.value[0].tag,
                                     'my_scalar')
Esempio n. 8
0
  def testSummaryName(self):
    training_util.get_or_create_global_step()
    logdir = tempfile.mkdtemp()
    summary_ops.create_summary_file_writer(logdir, max_queue=0, name='t2')
    summary_ops.always_record_summaries()

    summary_ops.scalar('scalar', 2.0)

    self.assertTrue(gfile.Exists(logdir))
    files = gfile.ListDirectory(logdir)
    self.assertEqual(len(files), 1)
    records = list(tf_record.tf_record_iterator(os.path.join(logdir, files[0])))
    self.assertEqual(len(records), 2)
    event = event_pb2.Event()
    event.ParseFromString(records[1])
    self.assertEqual(event.summary.value[0].tag, 'scalar')
Esempio n. 9
0
  def testSummaryName(self):
    training_util.get_or_create_global_step()
    logdir = tempfile.mkdtemp()
    summary_ops.create_summary_file_writer(logdir, max_queue=0, name='t2')
    summary_ops.always_record_summaries()

    summary_ops.scalar('scalar', 2.0)

    self.assertTrue(gfile.Exists(logdir))
    files = gfile.ListDirectory(logdir)
    self.assertEqual(len(files), 1)
    records = list(tf_record.tf_record_iterator(os.path.join(logdir, files[0])))
    self.assertEqual(len(records), 2)
    event = event_pb2.Event()
    event.ParseFromString(records[1])
    self.assertEqual(event.summary.value[0].tag, 'scalar')
  def testScalarSummary(self):
    """Test record_summaries_every_n_global_steps and all_summaries()."""
    with ops.Graph().as_default(), self.test_session() as sess:
      global_step = training_util.get_or_create_global_step()
      global_step.initializer.run()
      with ops.device('/cpu:0'):
        step_increment = state_ops.assign_add(global_step, 1)
      sess.run(step_increment)  # Increment global step from 0 to 1

      logdir = tempfile.mkdtemp()
      with summary_ops.create_file_writer(logdir, max_queue=0,
                                          name='t2').as_default():
        with summary_ops.record_summaries_every_n_global_steps(2):
          summary_ops.initialize()
          summary_op = summary_ops.scalar('my_scalar', 2.0)

          # Neither of these should produce a summary because
          # global_step is 1 and "1 % 2 != 0"
          sess.run(summary_ops.all_summary_ops())
          sess.run(summary_op)
          events = summary_test_util.events_from_logdir(logdir)
          self.assertEqual(len(events), 1)

          # Increment global step from 1 to 2 and check that the summary
          # is now written
          sess.run(step_increment)
          sess.run(summary_ops.all_summary_ops())
          events = summary_test_util.events_from_logdir(logdir)
          self.assertEqual(len(events), 2)
          self.assertEqual(events[1].summary.value[0].tag, 'my_scalar')
Esempio n. 11
0
 def write():
   summary_ops.scalar('scalar', 2.0)
Esempio n. 12
0
 def body(unused_pred):
     summary_ops.scalar('scalar', 2.0)
     return constant_op.constant(False)
Esempio n. 13
0
 def f():
     summary_ops.scalar('scalar', 2.0)
     return constant_op.constant(True)
 def f():
   summary_ops.scalar('scalar', 2.0)
   return constant_op.constant(True)
 def body(unused_pred):
   summary_ops.scalar('scalar', 2.0)
   return constant_op.constant(False)
Esempio n. 16
0
 def result(self):
   t = self.numer / self.denom
   summary_ops.scalar(name=self.name, tensor=t)
   return t
Esempio n. 17
0
 def write():
   summary_ops.scalar('scalar', 2.0)
Esempio n. 18
0
 def result(self):
     t = self.numer / self.denom
     summary_ops.scalar(name=self.name, tensor=t)
     return t