def testSummaryOps(self):
   training_util.get_or_create_global_step()
   logdir = tempfile.mkdtemp()
   summary_ops.create_summary_file_writer(logdir, max_queue=0, name='t0')
   summary_ops.always_record_summaries()
   summary_ops.generic('tensor', 1, '')
   summary_ops.scalar('scalar', 2.0)
   summary_ops.histogram('histogram', [1.0])
   summary_ops.image('image', [[[[1.0]]]])
   summary_ops.audio('audio', [[1.0]], 1.0, 1)
   # The working condition of the ops is tested in the C++ test so we just
   # test here that we're calling them correctly.
   self.assertTrue(gfile.Exists(logdir))
Example #2
0
 def testSummaryOps(self):
   training_util.get_or_create_global_step()
   logdir = tempfile.mkdtemp()
   summary_ops.create_summary_file_writer(logdir, max_queue=0, name='t0')
   summary_ops.always_record_summaries()
   summary_ops.generic('tensor', 1, '')
   summary_ops.scalar('scalar', 2.0)
   summary_ops.histogram('histogram', [1.0])
   summary_ops.image('image', [[[[1.0]]]])
   summary_ops.audio('audio', [[1.0]], 1.0, 1)
   # The working condition of the ops is tested in the C++ test so we just
   # test here that we're calling them correctly.
   self.assertTrue(gfile.Exists(logdir))
Example #3
0
    def testIntegerSummaries(self):
        step = training_util.create_global_step()
        writer = self.create_db_writer()

        def adder(x, y):
            state_ops.assign_add(step, 1)
            summary_ops.generic('x', x)
            summary_ops.generic('y', y)
            sum_ = x + y
            summary_ops.generic('sum', sum_)
            return sum_

        with summary_ops.always_record_summaries():
            with writer.as_default():
                self.assertEqual(5, adder(int64(2), int64(3)).numpy())

        six.assertCountEqual(
            self, [1, 1, 1],
            get_all(self.db,
                    'SELECT step FROM Tensors WHERE dtype IS NOT NULL'))
        six.assertCountEqual(self, ['x', 'y', 'sum'],
                             get_all(self.db, 'SELECT tag_name FROM Tags'))
        x_id = get_one(self.db, 'SELECT tag_id FROM Tags WHERE tag_name = "x"')
        y_id = get_one(self.db, 'SELECT tag_id FROM Tags WHERE tag_name = "y"')
        sum_id = get_one(self.db,
                         'SELECT tag_id FROM Tags WHERE tag_name = "sum"')

        with summary_ops.always_record_summaries():
            with writer.as_default():
                self.assertEqual(9, adder(int64(4), int64(5)).numpy())

        six.assertCountEqual(
            self, [1, 1, 1, 2, 2, 2],
            get_all(self.db,
                    'SELECT step FROM Tensors WHERE dtype IS NOT NULL'))
        six.assertCountEqual(self, [x_id, y_id, sum_id],
                             get_all(self.db, 'SELECT tag_id FROM Tags'))
        self.assertEqual(2, get_tensor(self.db, x_id, 1))
        self.assertEqual(3, get_tensor(self.db, y_id, 1))
        self.assertEqual(5, get_tensor(self.db, sum_id, 1))
        self.assertEqual(4, get_tensor(self.db, x_id, 2))
        self.assertEqual(5, get_tensor(self.db, y_id, 2))
        self.assertEqual(9, get_tensor(self.db, sum_id, 2))
        six.assertCountEqual(
            self, ['experiment'],
            get_all(self.db, 'SELECT experiment_name FROM Experiments'))
        six.assertCountEqual(self, ['run'],
                             get_all(self.db, 'SELECT run_name FROM Runs'))
        six.assertCountEqual(self, ['user'],
                             get_all(self.db, 'SELECT user_name FROM Users'))
  def testSummaryName(self):
    training_util.get_or_create_global_step()
    logdir = tempfile.mkdtemp()
    summary_ops.create_summary_file_writer(logdir, max_queue=0, name='t2')
    summary_ops.always_record_summaries()

    summary_ops.scalar('scalar', 2.0)

    self.assertTrue(gfile.Exists(logdir))
    files = gfile.ListDirectory(logdir)
    self.assertEqual(len(files), 1)
    records = list(tf_record.tf_record_iterator(os.path.join(logdir, files[0])))
    self.assertEqual(len(records), 2)
    event = event_pb2.Event()
    event.ParseFromString(records[1])
    self.assertEqual(event.summary.value[0].tag, 'scalar')
Example #5
0
  def testSummaryName(self):
    training_util.get_or_create_global_step()
    logdir = tempfile.mkdtemp()
    summary_ops.create_summary_file_writer(logdir, max_queue=0, name='t2')
    summary_ops.always_record_summaries()

    summary_ops.scalar('scalar', 2.0)

    self.assertTrue(gfile.Exists(logdir))
    files = gfile.ListDirectory(logdir)
    self.assertEqual(len(files), 1)
    records = list(tf_record.tf_record_iterator(os.path.join(logdir, files[0])))
    self.assertEqual(len(records), 2)
    event = event_pb2.Event()
    event.ParseFromString(records[1])
    self.assertEqual(event.summary.value[0].tag, 'scalar')
 def testGraphSummary(self):
   training_util.get_or_create_global_step()
   name = 'hi'
   graph = graph_pb2.GraphDef(node=(node_def_pb2.NodeDef(name=name),))
   with summary_ops.always_record_summaries():
     with self.create_db_writer().as_default():
       summary_ops.graph(graph)
   six.assertCountEqual(self, [name],
                        get_all(self.db, 'SELECT node_name FROM Nodes'))
Example #7
0
 def testGraphSummary(self):
   training_util.get_or_create_global_step()
   name = 'hi'
   graph = graph_pb2.GraphDef(node=(node_def_pb2.NodeDef(name=name),))
   with summary_ops.always_record_summaries():
     with self.create_db_writer().as_default():
       summary_ops.graph(graph)
   six.assertCountEqual(self, [name],
                        get_all(self.db, 'SELECT node_name FROM Nodes'))
  def testIntegerSummaries(self):
    step = training_util.create_global_step()

    def adder(x, y):
      state_ops.assign_add(step, 1)
      summary_ops.generic('x', x)
      summary_ops.generic('y', y)
      sum_ = x + y
      summary_ops.generic('sum', sum_)
      return sum_

    with summary_ops.always_record_summaries():
      with self.create_db_writer().as_default():
        self.assertEqual(5, adder(int64(2), int64(3)).numpy())

    six.assertCountEqual(self, [1, 1, 1],
                         get_all(self.db, 'SELECT step FROM Tensors'))
    six.assertCountEqual(self, ['x', 'y', 'sum'],
                         get_all(self.db, 'SELECT tag_name FROM Tags'))
    x_id = get_one(self.db, 'SELECT tag_id FROM Tags WHERE tag_name = "x"')
    y_id = get_one(self.db, 'SELECT tag_id FROM Tags WHERE tag_name = "y"')
    sum_id = get_one(self.db, 'SELECT tag_id FROM Tags WHERE tag_name = "sum"')

    with summary_ops.always_record_summaries():
      with self.create_db_writer().as_default():
        self.assertEqual(9, adder(int64(4), int64(5)).numpy())

    six.assertCountEqual(self, [1, 1, 1, 2, 2, 2],
                         get_all(self.db, 'SELECT step FROM Tensors'))
    six.assertCountEqual(self, [x_id, y_id, sum_id],
                         get_all(self.db, 'SELECT tag_id FROM Tags'))
    self.assertEqual(2, get_tensor(self.db, x_id, 1))
    self.assertEqual(3, get_tensor(self.db, y_id, 1))
    self.assertEqual(5, get_tensor(self.db, sum_id, 1))
    self.assertEqual(4, get_tensor(self.db, x_id, 2))
    self.assertEqual(5, get_tensor(self.db, y_id, 2))
    self.assertEqual(9, get_tensor(self.db, sum_id, 2))
    six.assertCountEqual(
        self, ['experiment'],
        get_all(self.db, 'SELECT experiment_name FROM Experiments'))
    six.assertCountEqual(self, ['run'],
                         get_all(self.db, 'SELECT run_name FROM Runs'))
    six.assertCountEqual(self, ['user'],
                         get_all(self.db, 'SELECT user_name FROM Users'))
Example #9
0
  def testSummaryGlobalStep(self):
    step = training_util.get_or_create_global_step()
    logdir = tempfile.mkdtemp()
    with summary_ops.create_file_writer(
        logdir, max_queue=0,
        name='t2').as_default(), summary_ops.always_record_summaries():

      summary_ops.scalar('scalar', 2.0, step=step)

      events = summary_test_util.events_from_logdir(logdir)
      self.assertEqual(len(events), 2)
      self.assertEqual(events[1].summary.value[0].tag, 'scalar')
Example #10
0
 def testMaxQueue(self):
   logs = tempfile.mkdtemp()
   with summary_ops.create_file_writer(
       logs, max_queue=2, flush_millis=999999,
       name='lol').as_default(), summary_ops.always_record_summaries():
     get_total = lambda: len(summary_test_util.events_from_logdir(logs))
     # Note: First tf.Event is always file_version.
     self.assertEqual(1, get_total())
     summary_ops.scalar('scalar', 2.0, step=1)
     self.assertEqual(1, get_total())
     summary_ops.scalar('scalar', 2.0, step=2)
     self.assertEqual(3, get_total())
Example #11
0
  def testSummaryGlobalStep(self):
    step = training_util.get_or_create_global_step()
    logdir = tempfile.mkdtemp()
    with summary_ops.create_file_writer(
        logdir, max_queue=0,
        name='t2').as_default(), summary_ops.always_record_summaries():

      summary_ops.scalar('scalar', 2.0, step=step)

      events = summary_test_util.events_from_logdir(logdir)
      self.assertEqual(len(events), 2)
      self.assertEqual(events[1].summary.value[0].tag, 'scalar')
Example #12
0
 def testMaxQueue(self):
   logs = tempfile.mkdtemp()
   with summary_ops.create_file_writer(
       logs, max_queue=2, flush_millis=999999,
       name='lol').as_default(), summary_ops.always_record_summaries():
     get_total = lambda: len(summary_test_util.events_from_logdir(logs))
     # Note: First tf.Event is always file_version.
     self.assertEqual(1, get_total())
     summary_ops.scalar('scalar', 2.0, step=1)
     self.assertEqual(1, get_total())
     summary_ops.scalar('scalar', 2.0, step=2)
     self.assertEqual(3, get_total())
Example #13
0
    def testWriteSummaries(self):
        m = metrics.Mean()
        m([1, 10, 100])
        training_util.get_or_create_global_step()
        logdir = tempfile.mkdtemp()
        with summary_ops.create_file_writer(
                logdir, max_queue=0,
                name="t0").as_default(), summary_ops.always_record_summaries():
            m.result()  # As a side-effect will write summaries.

        events = summary_test_util.events_from_logdir(logdir)
        self.assertEqual(len(events), 2)
        self.assertEqual(events[1].summary.value[0].simple_value, 37.0)
Example #14
0
  def testWriteSummaries(self):
    m = metrics.Mean()
    m([1, 10, 100])
    training_util.get_or_create_global_step()
    logdir = tempfile.mkdtemp()
    with summary_ops.create_file_writer(
        logdir, max_queue=0,
        name="t0").as_default(), summary_ops.always_record_summaries():
      m.result()  # As a side-effect will write summaries.

    events = summary_test_util.events_from_logdir(logdir)
    self.assertEqual(len(events), 2)
    self.assertEqual(events[1].summary.value[0].simple_value, 37.0)
Example #15
0
  def testDefunSummarys(self):
    training_util.get_or_create_global_step()
    logdir = tempfile.mkdtemp()
    with summary_ops.create_file_writer(
        logdir, max_queue=0,
        name='t1').as_default(), summary_ops.always_record_summaries():

      @function.defun
      def write():
        summary_ops.scalar('scalar', 2.0)

      write()
      events = summary_test_util.events_from_logdir(logdir)
      self.assertEqual(len(events), 2)
      self.assertEqual(events[1].summary.value[0].simple_value, 2.0)
Example #16
0
  def testDefunSummarys(self):
    training_util.get_or_create_global_step()
    logdir = tempfile.mkdtemp()
    with summary_ops.create_file_writer(
        logdir, max_queue=0,
        name='t1').as_default(), summary_ops.always_record_summaries():

      @function.defun
      def write():
        summary_ops.scalar('scalar', 2.0)

      write()
      events = summary_test_util.events_from_logdir(logdir)
      self.assertEqual(len(events), 2)
      self.assertEqual(events[1].summary.value[0].simple_value, 2.0)
  def testWriteSummaries(self):
    m = metrics.Mean()
    m([1, 10, 100])
    training_util.get_or_create_global_step()
    logdir = tempfile.mkdtemp()
    with summary_ops.create_summary_file_writer(
        logdir, max_queue=0,
        name="t0").as_default(), summary_ops.always_record_summaries():
      m.result()  # As a side-effect will write summaries.

    self.assertTrue(gfile.Exists(logdir))
    files = gfile.ListDirectory(logdir)
    self.assertEqual(len(files), 1)
    records = list(
        tf_record.tf_record_iterator(os.path.join(logdir, files[0])))
    self.assertEqual(len(records), 2)
    event = event_pb2.Event()
    event.ParseFromString(records[1])
    self.assertEqual(event.summary.value[0].simple_value, 37.0)
Example #18
0
    def testWriteSummaries(self):
        m = metrics.Mean()
        m([1, 10, 100])
        training_util.get_or_create_global_step()
        logdir = tempfile.mkdtemp()
        with summary_ops.create_summary_file_writer(
                logdir, max_queue=0,
                name="t0").as_default(), summary_ops.always_record_summaries():
            m.result()  # As a side-effect will write summaries.

        self.assertTrue(gfile.Exists(logdir))
        files = gfile.ListDirectory(logdir)
        self.assertEqual(len(files), 1)
        records = list(
            tf_record.tf_record_iterator(os.path.join(logdir, files[0])))
        self.assertEqual(len(records), 2)
        event = event_pb2.Event()
        event.ParseFromString(records[1])
        self.assertEqual(event.summary.value[0].simple_value, 37.0)
  def testSummaryGraphModeCond(self):
    with ops.Graph().as_default(), self.test_session():
      training_util.get_or_create_global_step()
      logdir = tempfile.mkdtemp()
      with summary_ops.create_file_writer(
          logdir, max_queue=0,
          name='t2').as_default(), summary_ops.always_record_summaries():
        summary_ops.initialize()
        training_util.get_or_create_global_step().initializer.run()
        def f():
          summary_ops.scalar('scalar', 2.0)
          return constant_op.constant(True)
        pred = array_ops.placeholder(dtypes.bool)
        x = control_flow_ops.cond(pred, f,
                                  lambda: constant_op.constant(False))
        x.eval(feed_dict={pred: True})

      events = summary_test_util.events_from_logdir(logdir)
      self.assertEqual(len(events), 2)
      self.assertEqual(events[1].summary.value[0].tag, 'cond/scalar')
  def testSummaryGraphModeCond(self):
    with ops.Graph().as_default(), self.test_session():
      training_util.get_or_create_global_step()
      logdir = tempfile.mkdtemp()
      with summary_ops.create_file_writer(
          logdir, max_queue=0,
          name='t2').as_default(), summary_ops.always_record_summaries():
        summary_ops.initialize()
        training_util.get_or_create_global_step().initializer.run()
        def f():
          summary_ops.scalar('scalar', 2.0)
          return constant_op.constant(True)
        pred = array_ops.placeholder(dtypes.bool)
        x = control_flow_ops.cond(pred, f,
                                  lambda: constant_op.constant(False))
        x.eval(feed_dict={pred: True})

      events = summary_test_util.events_from_logdir(logdir)
      self.assertEqual(len(events), 2)
      self.assertEqual(events[1].summary.value[0].tag, 'cond/scalar')
  def testDefunSummarys(self):
    training_util.get_or_create_global_step()
    logdir = tempfile.mkdtemp()
    with summary_ops.create_summary_file_writer(
        logdir, max_queue=0,
        name='t1').as_default(), summary_ops.always_record_summaries():

      @function.defun
      def write():
        summary_ops.scalar('scalar', 2.0)

      write()

      self.assertTrue(gfile.Exists(logdir))
      files = gfile.ListDirectory(logdir)
      self.assertEqual(len(files), 1)
      records = list(
          tf_record.tf_record_iterator(os.path.join(logdir, files[0])))
      self.assertEqual(len(records), 2)
      event = event_pb2.Event()
      event.ParseFromString(records[1])
      self.assertEqual(event.summary.value[0].simple_value, 2.0)
Example #22
0
 def testShouldRecordSummary(self):
   self.assertFalse(summary_ops.should_record_summaries())
   with summary_ops.always_record_summaries():
     self.assertTrue(summary_ops.should_record_summaries())
Example #23
0
 def f():
   with summary_ops.create_file_writer(
       summary_logdir).as_default(), summary_ops.always_record_summaries():
     return self._all_metric_results()
Example #24
0
 def testShouldRecordSummary(self):
   self.assertFalse(summary_ops.should_record_summaries())
   with summary_ops.always_record_summaries():
     self.assertTrue(summary_ops.should_record_summaries())
Example #25
0
 def f():
     with summary_ops.create_file_writer(summary_logdir).as_default(
     ), summary_ops.always_record_summaries():
         return self._all_metric_results()