Example #1
0
 def testInvalidDirectory(self):
     logdir = '/tmp/apath/that/doesnt/exist'
     self.assertFalse(gfile.Exists(logdir))
     with self.assertRaises(errors.NotFoundError):
         summary_ops.create_summary_file_writer(logdir,
                                                max_queue=0,
                                                name='t0')
Example #2
0
 def testSummaryOps(self):
   training_util.get_or_create_global_step()
   logdir = tempfile.mkdtemp()
   summary_ops.create_summary_file_writer(logdir, max_queue=0, name='t0')
   summary_ops.always_record_summaries()
   summary_ops.generic('tensor', 1, '')
   summary_ops.scalar('scalar', 2.0)
   summary_ops.histogram('histogram', [1.0])
   summary_ops.image('image', [[[[1.0]]]])
   summary_ops.audio('audio', [[1.0]], 1.0, 1)
   # The working condition of the ops is tested in the C++ test so we just
   # test here that we're calling them correctly.
   self.assertTrue(gfile.Exists(logdir))
 def testSummaryOps(self):
   training_util.get_or_create_global_step()
   logdir = tempfile.mkdtemp()
   summary_ops.create_summary_file_writer(logdir, max_queue=0, name='t0')
   summary_ops.always_record_summaries()
   summary_ops.generic('tensor', 1, '')
   summary_ops.scalar('scalar', 2.0)
   summary_ops.histogram('histogram', [1.0])
   summary_ops.image('image', [[[[1.0]]]])
   summary_ops.audio('audio', [[1.0]], 1.0, 1)
   # The working condition of the ops is tested in the C++ test so we just
   # test here that we're calling them correctly.
   self.assertTrue(gfile.Exists(logdir))
  def testSummaryName(self):
    training_util.get_or_create_global_step()
    logdir = tempfile.mkdtemp()
    summary_ops.create_summary_file_writer(logdir, max_queue=0, name='t2')
    summary_ops.always_record_summaries()

    summary_ops.scalar('scalar', 2.0)

    self.assertTrue(gfile.Exists(logdir))
    files = gfile.ListDirectory(logdir)
    self.assertEqual(len(files), 1)
    records = list(tf_record.tf_record_iterator(os.path.join(logdir, files[0])))
    self.assertEqual(len(records), 2)
    event = event_pb2.Event()
    event.ParseFromString(records[1])
    self.assertEqual(event.summary.value[0].tag, 'scalar')
Example #5
0
  def testSummaryName(self):
    training_util.get_or_create_global_step()
    logdir = tempfile.mkdtemp()
    summary_ops.create_summary_file_writer(logdir, max_queue=0, name='t2')
    summary_ops.always_record_summaries()

    summary_ops.scalar('scalar', 2.0)

    self.assertTrue(gfile.Exists(logdir))
    files = gfile.ListDirectory(logdir)
    self.assertEqual(len(files), 1)
    records = list(tf_record.tf_record_iterator(os.path.join(logdir, files[0])))
    self.assertEqual(len(records), 2)
    event = event_pb2.Event()
    event.ParseFromString(records[1])
    self.assertEqual(event.summary.value[0].tag, 'scalar')
Example #6
0
    def testSummaryGlobalStep(self):
        global_step = training_util.get_or_create_global_step()
        logdir = tempfile.mkdtemp()
        with summary_ops.create_summary_file_writer(
                logdir, max_queue=0,
                name='t2').as_default(), summary_ops.always_record_summaries():

            summary_ops.scalar('scalar', 2.0, global_step=global_step)

            events = summary_test_util.events_from_logdir(logdir)
            self.assertEqual(len(events), 2)
            self.assertEqual(events[1].summary.value[0].tag, 'scalar')
  def testSummaryGlobalStep(self):
    step = training_util.get_or_create_global_step()
    logdir = tempfile.mkdtemp()
    with summary_ops.create_summary_file_writer(
        logdir, max_queue=0,
        name='t2').as_default(), summary_ops.always_record_summaries():

      summary_ops.scalar('scalar', 2.0, step=step)

      events = summary_test_util.events_from_logdir(logdir)
      self.assertEqual(len(events), 2)
      self.assertEqual(events[1].summary.value[0].tag, 'scalar')
 def testMaxQueue(self):
   logs = tempfile.mkdtemp()
   with summary_ops.create_summary_file_writer(
       logs, max_queue=2, flush_millis=999999,
       name='lol').as_default(), summary_ops.always_record_summaries():
     get_total = lambda: len(summary_test_util.events_from_logdir(logs))
     # Note: First tf.Event is always file_version.
     self.assertEqual(1, get_total())
     summary_ops.scalar('scalar', 2.0, step=1)
     self.assertEqual(1, get_total())
     summary_ops.scalar('scalar', 2.0, step=2)
     self.assertEqual(3, get_total())
Example #9
0
  def testWriteSummaries(self):
    m = metrics.Mean()
    m([1, 10, 100])
    training_util.get_or_create_global_step()
    logdir = tempfile.mkdtemp()
    with summary_ops.create_summary_file_writer(
        logdir, max_queue=0,
        name="t0").as_default(), summary_ops.always_record_summaries():
      m.result()  # As a side-effect will write summaries.

    events = summary_test_util.events_from_logdir(logdir)
    self.assertEqual(len(events), 2)
    self.assertEqual(events[1].summary.value[0].simple_value, 37.0)
Example #10
0
    def testDefunSummarys(self):
        training_util.get_or_create_global_step()
        logdir = tempfile.mkdtemp()
        with summary_ops.create_summary_file_writer(
                logdir, max_queue=0,
                name='t1').as_default(), summary_ops.always_record_summaries():

            @function.defun
            def write():
                summary_ops.scalar('scalar', 2.0)

            write()
            events = summary_test_util.events_from_logdir(logdir)
            self.assertEqual(len(events), 2)
            self.assertEqual(events[1].summary.value[0].simple_value, 2.0)
  def testDefunSummarys(self):
    training_util.get_or_create_global_step()
    logdir = tempfile.mkdtemp()
    with summary_ops.create_summary_file_writer(
        logdir, max_queue=0,
        name='t1').as_default(), summary_ops.always_record_summaries():

      @function.defun
      def write():
        summary_ops.scalar('scalar', 2.0)

      write()
      events = summary_test_util.events_from_logdir(logdir)
      self.assertEqual(len(events), 2)
      self.assertEqual(events[1].summary.value[0].simple_value, 2.0)
Example #12
0
    def testWriteSummaries(self):
        m = metrics.Mean()
        m([1, 10, 100])
        training_util.get_or_create_global_step()
        logdir = tempfile.mkdtemp()
        with summary_ops.create_summary_file_writer(
                logdir, max_queue=0,
                name="t0").as_default(), summary_ops.always_record_summaries():
            m.result()  # As a side-effect will write summaries.

        self.assertTrue(gfile.Exists(logdir))
        files = gfile.ListDirectory(logdir)
        self.assertEqual(len(files), 1)
        records = list(
            tf_record.tf_record_iterator(os.path.join(logdir, files[0])))
        self.assertEqual(len(records), 2)
        event = event_pb2.Event()
        event.ParseFromString(records[1])
        self.assertEqual(event.summary.value[0].simple_value, 37.0)
  def testWriteSummaries(self):
    m = metrics.Mean()
    m([1, 10, 100])
    training_util.get_or_create_global_step()
    logdir = tempfile.mkdtemp()
    with summary_ops.create_summary_file_writer(
        logdir, max_queue=0,
        name="t0").as_default(), summary_ops.always_record_summaries():
      m.result()  # As a side-effect will write summaries.

    self.assertTrue(gfile.Exists(logdir))
    files = gfile.ListDirectory(logdir)
    self.assertEqual(len(files), 1)
    records = list(
        tf_record.tf_record_iterator(os.path.join(logdir, files[0])))
    self.assertEqual(len(records), 2)
    event = event_pb2.Event()
    event.ParseFromString(records[1])
    self.assertEqual(event.summary.value[0].simple_value, 37.0)
  def testSummaryGraphModeCond(self):
    with ops.Graph().as_default(), self.test_session():
      training_util.get_or_create_global_step()
      logdir = tempfile.mkdtemp()
      with summary_ops.create_summary_file_writer(
          logdir, max_queue=0,
          name='t2').as_default(), summary_ops.always_record_summaries():
        summary_ops.initialize()
        training_util.get_or_create_global_step().initializer.run()
        def f():
          summary_ops.scalar('scalar', 2.0)
          return constant_op.constant(True)
        pred = array_ops.placeholder(dtypes.bool)
        x = control_flow_ops.cond(pred, f,
                                  lambda: constant_op.constant(False))
        x.eval(feed_dict={pred: True})

      events = summary_test_util.events_from_logdir(logdir)
      self.assertEqual(len(events), 2)
      self.assertEqual(events[1].summary.value[0].tag, 'cond/scalar')
  def testDefunSummarys(self):
    training_util.get_or_create_global_step()
    logdir = tempfile.mkdtemp()
    with summary_ops.create_summary_file_writer(
        logdir, max_queue=0,
        name='t1').as_default(), summary_ops.always_record_summaries():

      @function.defun
      def write():
        summary_ops.scalar('scalar', 2.0)

      write()

      self.assertTrue(gfile.Exists(logdir))
      files = gfile.ListDirectory(logdir)
      self.assertEqual(len(files), 1)
      records = list(
          tf_record.tf_record_iterator(os.path.join(logdir, files[0])))
      self.assertEqual(len(records), 2)
      event = event_pb2.Event()
      event.ParseFromString(records[1])
      self.assertEqual(event.summary.value[0].simple_value, 2.0)
Example #16
0
 def f():
     with summary_ops.create_summary_file_writer(
             summary_logdir).as_default(
             ), summary_ops.always_record_summaries():
         return self._all_metric_results()
Example #17
0
 def f():
   with summary_ops.create_summary_file_writer(
       summary_logdir).as_default(), summary_ops.always_record_summaries():
     return self._all_metric_results()
Example #18
0
 def testInvalidDirectory(self):
   logdir = '/tmp/apath/that/doesnt/exist'
   self.assertFalse(gfile.Exists(logdir))
   with self.assertRaises(errors.NotFoundError):
     summary_ops.create_summary_file_writer(logdir, max_queue=0, name='t0')