コード例 #1
0
 def testWriterInitAndClose(self):
     logdir = self.get_temp_dir()
     with summary_ops.always_record_summaries():
         writer = summary_ops.create_file_writer(logdir,
                                                 max_queue=100,
                                                 flush_millis=1000000)
         with writer.as_default():
             summary_ops.scalar('one', 1.0, step=1)
     with self.cached_session() as sess:
         sess.run(summary_ops.summary_writer_initializer_op())
         get_total = lambda: len(
             summary_test_util.events_from_logdir(logdir))
         self.assertEqual(1, get_total())  # file_version Event
         # Running init() again while writer is open has no effect
         sess.run(writer.init())
         self.assertEqual(1, get_total())
         sess.run(summary_ops.all_summary_ops())
         self.assertEqual(1, get_total())
         # Running close() should do an implicit flush
         sess.run(writer.close())
         self.assertEqual(2, get_total())
         # Running init() on a closed writer should start a new file
         time.sleep(1.1)  # Ensure filename has a different timestamp
         sess.run(writer.init())
         sess.run(summary_ops.all_summary_ops())
         sess.run(writer.close())
         files = sorted(gfile.Glob(os.path.join(logdir, '*tfevents*')))
         self.assertEqual(2, len(files))
         self.assertEqual(2,
                          len(summary_test_util.events_from_file(files[1])))
コード例 #2
0
 def testFlushFunction(self):
     logdir = self.get_temp_dir()
     writer = summary_ops.create_file_writer(logdir,
                                             max_queue=999999,
                                             flush_millis=999999)
     with writer.as_default(), summary_ops.always_record_summaries():
         summary_ops.scalar('scalar', 2.0, step=1)
         flush_op = summary_ops.flush()
     with self.cached_session() as sess:
         sess.run(summary_ops.summary_writer_initializer_op())
         get_total = lambda: len(
             summary_test_util.events_from_logdir(logdir))
         # Note: First tf.Event is always file_version.
         self.assertEqual(1, get_total())
         sess.run(summary_ops.all_summary_ops())
         self.assertEqual(1, get_total())
         sess.run(flush_op)
         self.assertEqual(2, get_total())
         # Test "writer" parameter
         sess.run(summary_ops.all_summary_ops())
         sess.run(summary_ops.flush(writer=writer))
         self.assertEqual(3, get_total())
         sess.run(summary_ops.all_summary_ops())
         sess.run(summary_ops.flush(writer=writer._resource))  # pylint:disable=protected-access
         self.assertEqual(4, get_total())
コード例 #3
0
 def testWriterInitAndClose(self):
   logdir = self.get_temp_dir()
   get_total = lambda: len(summary_test_util.events_from_logdir(logdir))
   with summary_ops.always_record_summaries():
     writer = summary_ops.create_file_writer(
         logdir, max_queue=100, flush_millis=1000000)
     self.assertEqual(1, get_total())  # file_version Event
     # Calling init() again while writer is open has no effect
     writer.init()
     self.assertEqual(1, get_total())
     try:
       # Not using .as_default() to avoid implicit flush when exiting
       writer.set_as_default()
       summary_ops.scalar('one', 1.0, step=1)
       self.assertEqual(1, get_total())
       # Calling .close() should do an implicit flush
       writer.close()
       self.assertEqual(2, get_total())
       # Calling init() on a closed writer should start a new file
       time.sleep(1.1)  # Ensure filename has a different timestamp
       writer.init()
       files = sorted(gfile.Glob(os.path.join(logdir, '*tfevents*')))
       self.assertEqual(2, len(files))
       get_total = lambda: len(summary_test_util.events_from_file(files[1]))
       self.assertEqual(1, get_total())  # file_version Event
       summary_ops.scalar('two', 2.0, step=2)
       writer.close()
       self.assertEqual(2, get_total())
     finally:
       # Clean up by resetting default writer
       summary_ops.create_file_writer(None).set_as_default()
コード例 #4
0
 def testWriterInitAndClose(self):
   logdir = self.get_temp_dir()
   with summary_ops.always_record_summaries():
     writer = summary_ops.create_file_writer(
         logdir, max_queue=100, flush_millis=1000000)
     with writer.as_default():
       summary_ops.scalar('one', 1.0, step=1)
   with self.cached_session() as sess:
     sess.run(summary_ops.summary_writer_initializer_op())
     get_total = lambda: len(summary_test_util.events_from_logdir(logdir))
     self.assertEqual(1, get_total())  # file_version Event
     # Running init() again while writer is open has no effect
     sess.run(writer.init())
     self.assertEqual(1, get_total())
     sess.run(summary_ops.all_summary_ops())
     self.assertEqual(1, get_total())
     # Running close() should do an implicit flush
     sess.run(writer.close())
     self.assertEqual(2, get_total())
     # Running init() on a closed writer should start a new file
     time.sleep(1.1)  # Ensure filename has a different timestamp
     sess.run(writer.init())
     sess.run(summary_ops.all_summary_ops())
     sess.run(writer.close())
     files = sorted(gfile.Glob(os.path.join(logdir, '*tfevents*')))
     self.assertEqual(2, len(files))
     self.assertEqual(2, len(summary_test_util.events_from_file(files[1])))
コード例 #5
0
ファイル: summary_ops_test.py プロジェクト: flavz27/master_PA
 def testWriterInitAndClose(self):
     logdir = self.get_temp_dir()
     get_total = lambda: len(summary_test_util.events_from_logdir(logdir))
     with summary_ops.always_record_summaries():
         writer = summary_ops.create_file_writer(logdir,
                                                 max_queue=100,
                                                 flush_millis=1000000)
         self.assertEqual(1, get_total())  # file_version Event
         # Calling init() again while writer is open has no effect
         writer.init()
         self.assertEqual(1, get_total())
         try:
             # Not using .as_default() to avoid implicit flush when exiting
             writer.set_as_default()
             summary_ops.scalar('one', 1.0, step=1)
             self.assertEqual(1, get_total())
             # Calling .close() should do an implicit flush
             writer.close()
             self.assertEqual(2, get_total())
             # Calling init() on a closed writer should start a new file
             time.sleep(1.1)  # Ensure filename has a different timestamp
             writer.init()
             files = sorted(gfile.Glob(os.path.join(logdir, '*tfevents*')))
             self.assertEqual(2, len(files))
             get_total = lambda: len(
                 summary_test_util.events_from_file(files[1]))
             self.assertEqual(1, get_total())  # file_version Event
             summary_ops.scalar('two', 2.0, step=2)
             writer.close()
             self.assertEqual(2, get_total())
         finally:
             # Clean up by resetting default writer
             summary_ops.create_file_writer(None).set_as_default()
コード例 #6
0
 def testSummaryName(self):
   logdir = self.get_temp_dir()
   writer = summary_ops.create_file_writer(logdir, max_queue=0)
   with writer.as_default(), summary_ops.always_record_summaries():
     summary_ops.scalar('scalar', 2.0, step=1)
   with self.cached_session() as sess:
     sess.run(summary_ops.summary_writer_initializer_op())
     sess.run(summary_ops.all_summary_ops())
   events = summary_test_util.events_from_logdir(logdir)
   self.assertEqual(2, len(events))
   self.assertEqual('scalar', events[1].summary.value[0].tag)
コード例 #7
0
ファイル: summary_ops_test.py プロジェクト: flavz27/master_PA
 def testEagerMemory(self):
     training_util.get_or_create_global_step()
     logdir = self.get_temp_dir()
     with summary_ops.create_file_writer(
             logdir, max_queue=0,
             name='t0').as_default(), summary_ops.always_record_summaries():
         summary_ops.generic('tensor', 1, '')
         summary_ops.scalar('scalar', 2.0)
         summary_ops.histogram('histogram', [1.0])
         summary_ops.image('image', [[[[1.0]]]])
         summary_ops.audio('audio', [[1.0]], 1.0, 1)
コード例 #8
0
 def testSummaryName(self):
     logdir = self.get_temp_dir()
     writer = summary_ops.create_file_writer(logdir, max_queue=0)
     with writer.as_default(), summary_ops.always_record_summaries():
         summary_ops.scalar('scalar', 2.0, step=1)
     with self.cached_session() as sess:
         sess.run(summary_ops.summary_writer_initializer_op())
         sess.run(summary_ops.all_summary_ops())
     events = summary_test_util.events_from_logdir(logdir)
     self.assertEqual(2, len(events))
     self.assertEqual('scalar', events[1].summary.value[0].tag)
コード例 #9
0
 def testEagerMemory(self):
   training_util.get_or_create_global_step()
   logdir = self.get_temp_dir()
   with summary_ops.create_file_writer(
       logdir, max_queue=0,
       name='t0').as_default(), summary_ops.always_record_summaries():
     summary_ops.generic('tensor', 1, '')
     summary_ops.scalar('scalar', 2.0)
     summary_ops.histogram('histogram', [1.0])
     summary_ops.image('image', [[[[1.0]]]])
     summary_ops.audio('audio', [[1.0]], 1.0, 1)
コード例 #10
0
ファイル: summary_ops_test.py プロジェクト: flavz27/master_PA
 def testDbURIOpen(self):
     tmpdb_path = os.path.join(self.get_temp_dir(), 'tmpDbURITest.sqlite')
     tmpdb_uri = six.moves.urllib_parse.urljoin('file:', tmpdb_path)
     tmpdb_writer = summary_ops.create_db_writer(tmpdb_uri, 'experimentA',
                                                 'run1', 'user1')
     with summary_ops.always_record_summaries():
         with tmpdb_writer.as_default():
             summary_ops.scalar('t1', 2.0)
     tmpdb = sqlite3.connect(tmpdb_path)
     num = get_one(tmpdb, 'SELECT count(*) FROM Tags WHERE tag_name = "t1"')
     self.assertEqual(num, 1)
     tmpdb.close()
コード例 #11
0
 def testDbURIOpen(self):
   tmpdb_path = os.path.join(self.get_temp_dir(), 'tmpDbURITest.sqlite')
   tmpdb_uri = six.moves.urllib_parse.urljoin('file:', tmpdb_path)
   tmpdb_writer = summary_ops.create_db_writer(tmpdb_uri, 'experimentA',
                                               'run1', 'user1')
   with summary_ops.always_record_summaries():
     with tmpdb_writer.as_default():
       summary_ops.scalar('t1', 2.0)
   tmpdb = sqlite3.connect(tmpdb_path)
   num = get_one(tmpdb, 'SELECT count(*) FROM Tags WHERE tag_name = "t1"')
   self.assertEqual(num, 1)
   tmpdb.close()
コード例 #12
0
  def testSummaryGlobalStep(self):
    step = training_util.get_or_create_global_step()
    logdir = tempfile.mkdtemp()
    with summary_ops.create_file_writer(
        logdir, max_queue=0,
        name='t2').as_default(), summary_ops.always_record_summaries():

      summary_ops.scalar('scalar', 2.0, step=step)

      events = summary_test_util.events_from_logdir(logdir)
      self.assertEqual(len(events), 2)
      self.assertEqual(events[1].summary.value[0].tag, 'scalar')
コード例 #13
0
ファイル: summary_ops_test.py プロジェクト: flavz27/master_PA
    def testSummaryGlobalStep(self):
        step = training_util.get_or_create_global_step()
        logdir = tempfile.mkdtemp()
        with summary_ops.create_file_writer(
                logdir, max_queue=0,
                name='t2').as_default(), summary_ops.always_record_summaries():

            summary_ops.scalar('scalar', 2.0, step=step)

            events = summary_test_util.events_from_logdir(logdir)
            self.assertEqual(len(events), 2)
            self.assertEqual(events[1].summary.value[0].tag, 'scalar')
コード例 #14
0
 def testMaxQueue(self):
   logs = tempfile.mkdtemp()
   with summary_ops.create_file_writer(
       logs, max_queue=1, flush_millis=999999,
       name='lol').as_default(), summary_ops.always_record_summaries():
     get_total = lambda: len(summary_test_util.events_from_logdir(logs))
     # Note: First tf.Event is always file_version.
     self.assertEqual(1, get_total())
     summary_ops.scalar('scalar', 2.0, step=1)
     self.assertEqual(1, get_total())
     # Should flush after second summary since max_queue = 1
     summary_ops.scalar('scalar', 2.0, step=2)
     self.assertEqual(3, get_total())
コード例 #15
0
ファイル: summary_ops_test.py プロジェクト: flavz27/master_PA
 def testMaxQueue(self):
     logs = tempfile.mkdtemp()
     with summary_ops.create_file_writer(
             logs, max_queue=1, flush_millis=999999, name='lol').as_default(
             ), summary_ops.always_record_summaries():
         get_total = lambda: len(summary_test_util.events_from_logdir(logs))
         # Note: First tf.compat.v1.Event is always file_version.
         self.assertEqual(1, get_total())
         summary_ops.scalar('scalar', 2.0, step=1)
         self.assertEqual(1, get_total())
         # Should flush after second summary since max_queue = 1
         summary_ops.scalar('scalar', 2.0, step=2)
         self.assertEqual(3, get_total())
コード例 #16
0
 def testSummaryOps(self):
   training_util.get_or_create_global_step()
   logdir = tempfile.mkdtemp()
   with summary_ops.create_file_writer(
       logdir, max_queue=0,
       name='t0').as_default(), summary_ops.always_record_summaries():
     summary_ops.generic('tensor', 1, '')
     summary_ops.scalar('scalar', 2.0)
     summary_ops.histogram('histogram', [1.0])
     summary_ops.image('image', [[[[1.0]]]])
     summary_ops.audio('audio', [[1.0]], 1.0, 1)
     # The working condition of the ops is tested in the C++ test so we just
     # test here that we're calling them correctly.
     self.assertTrue(gfile.Exists(logdir))
コード例 #17
0
 def testSummaryGlobalStep(self):
   training_util.get_or_create_global_step()
   logdir = self.get_temp_dir()
   writer = summary_ops.create_file_writer(logdir, max_queue=0)
   with writer.as_default(), summary_ops.always_record_summaries():
     summary_ops.scalar('scalar', 2.0)
   with self.cached_session() as sess:
     sess.run(variables.global_variables_initializer())
     sess.run(summary_ops.summary_writer_initializer_op())
     step, _ = sess.run(
         [training_util.get_global_step(), summary_ops.all_summary_ops()])
   events = summary_test_util.events_from_logdir(logdir)
   self.assertEqual(2, len(events))
   self.assertEqual(step, events[1].step)
コード例 #18
0
 def testSummaryGlobalStep(self):
   training_util.get_or_create_global_step()
   logdir = self.get_temp_dir()
   writer = summary_ops.create_file_writer(logdir, max_queue=0)
   with writer.as_default(), summary_ops.always_record_summaries():
     summary_ops.scalar('scalar', 2.0)
   with self.cached_session() as sess:
     sess.run(variables.global_variables_initializer())
     sess.run(summary_ops.summary_writer_initializer_op())
     step, _ = sess.run(
         [training_util.get_global_step(), summary_ops.all_summary_ops()])
   events = summary_test_util.events_from_logdir(logdir)
   self.assertEqual(2, len(events))
   self.assertEqual(step, events[1].step)
コード例 #19
0
ファイル: summary_ops_test.py プロジェクト: flavz27/master_PA
 def testSummaryOps(self):
     training_util.get_or_create_global_step()
     logdir = tempfile.mkdtemp()
     with summary_ops.create_file_writer(
             logdir, max_queue=0,
             name='t0').as_default(), summary_ops.always_record_summaries():
         summary_ops.generic('tensor', 1, '')
         summary_ops.scalar('scalar', 2.0)
         summary_ops.histogram('histogram', [1.0])
         summary_ops.image('image', [[[[1.0]]]])
         summary_ops.audio('audio', [[1.0]], 1.0, 1)
         # The working condition of the ops is tested in the C++ test so we just
         # test here that we're calling them correctly.
         self.assertTrue(gfile.Exists(logdir))
コード例 #20
0
 def testWriterFlush(self):
   logdir = self.get_temp_dir()
   with summary_ops.always_record_summaries():
     writer = summary_ops.create_file_writer(
         logdir, max_queue=100, flush_millis=1000000)
     with writer.as_default():
       summary_ops.scalar('one', 1.0, step=1)
   with self.cached_session() as sess:
     sess.run(summary_ops.summary_writer_initializer_op())
     get_total = lambda: len(summary_test_util.events_from_logdir(logdir))
     self.assertEqual(1, get_total())  # file_version Event
     sess.run(summary_ops.all_summary_ops())
     self.assertEqual(1, get_total())
     sess.run(writer.flush())
     self.assertEqual(2, get_total())
コード例 #21
0
 def testSummaryOps(self):
     logdir = self.get_temp_dir()
     writer = summary_ops.create_file_writer(logdir, max_queue=0)
     with writer.as_default(), summary_ops.always_record_summaries():
         summary_ops.generic('tensor', 1, step=1)
         summary_ops.scalar('scalar', 2.0, step=1)
         summary_ops.histogram('histogram', [1.0], step=1)
         summary_ops.image('image', [[[[1.0]]]], step=1)
         summary_ops.audio('audio', [[1.0]], 1.0, 1, step=1)
     with self.cached_session() as sess:
         sess.run(summary_ops.summary_writer_initializer_op())
         sess.run(summary_ops.all_summary_ops())
     # The working condition of the ops is tested in the C++ test so we just
     # test here that we're calling them correctly.
     self.assertTrue(gfile.Exists(logdir))
コード例 #22
0
 def testWriterFlush(self):
   logdir = self.get_temp_dir()
   with summary_ops.always_record_summaries():
     writer = summary_ops.create_file_writer(
         logdir, max_queue=100, flush_millis=1000000)
     with writer.as_default():
       summary_ops.scalar('one', 1.0, step=1)
   with self.cached_session() as sess:
     sess.run(summary_ops.summary_writer_initializer_op())
     get_total = lambda: len(summary_test_util.events_from_logdir(logdir))
     self.assertEqual(1, get_total())  # file_version Event
     sess.run(summary_ops.all_summary_ops())
     self.assertEqual(1, get_total())
     sess.run(writer.flush())
     self.assertEqual(2, get_total())
コード例 #23
0
 def testWriterFlush(self):
   logdir = self.get_temp_dir()
   get_total = lambda: len(summary_test_util.events_from_logdir(logdir))
   with summary_ops.always_record_summaries():
     writer = summary_ops.create_file_writer(
         logdir, max_queue=100, flush_millis=1000000)
     self.assertEqual(1, get_total())  # file_version Event
     with writer.as_default():
       summary_ops.scalar('one', 1.0, step=1)
       self.assertEqual(1, get_total())
       writer.flush()
       self.assertEqual(2, get_total())
       summary_ops.scalar('two', 2.0, step=2)
     # Exiting the "as_default()" should do an implicit flush of the "two" tag
     self.assertEqual(3, get_total())
コード例 #24
0
 def testSummaryOps(self):
   logdir = self.get_temp_dir()
   writer = summary_ops.create_file_writer(logdir, max_queue=0)
   with writer.as_default(), summary_ops.always_record_summaries():
     summary_ops.generic('tensor', 1, step=1)
     summary_ops.scalar('scalar', 2.0, step=1)
     summary_ops.histogram('histogram', [1.0], step=1)
     summary_ops.image('image', [[[[1.0]]]], step=1)
     summary_ops.audio('audio', [[1.0]], 1.0, 1, step=1)
   with self.cached_session() as sess:
     sess.run(summary_ops.summary_writer_initializer_op())
     sess.run(summary_ops.all_summary_ops())
   # The working condition of the ops is tested in the C++ test so we just
   # test here that we're calling them correctly.
   self.assertTrue(gfile.Exists(logdir))
コード例 #25
0
 def testMaxQueue(self):
   logdir = self.get_temp_dir()
   writer = summary_ops.create_file_writer(
       logdir, max_queue=1, flush_millis=999999)
   with writer.as_default(), summary_ops.always_record_summaries():
     summary_ops.scalar('scalar', 2.0, step=1)
   with self.cached_session() as sess:
     sess.run(summary_ops.summary_writer_initializer_op())
     get_total = lambda: len(summary_test_util.events_from_logdir(logdir))
     # Note: First tf.Event is always file_version.
     self.assertEqual(1, get_total())
     sess.run(summary_ops.all_summary_ops())
     self.assertEqual(1, get_total())
     # Should flush after second summary since max_queue = 1
     sess.run(summary_ops.all_summary_ops())
     self.assertEqual(3, get_total())
コード例 #26
0
ファイル: summary_ops_test.py プロジェクト: flavz27/master_PA
 def testWriterFlush(self):
     logdir = self.get_temp_dir()
     get_total = lambda: len(summary_test_util.events_from_logdir(logdir))
     with summary_ops.always_record_summaries():
         writer = summary_ops.create_file_writer(logdir,
                                                 max_queue=100,
                                                 flush_millis=1000000)
         self.assertEqual(1, get_total())  # file_version Event
         with writer.as_default():
             summary_ops.scalar('one', 1.0, step=1)
             self.assertEqual(1, get_total())
             writer.flush()
             self.assertEqual(2, get_total())
             summary_ops.scalar('two', 2.0, step=2)
         # Exiting the "as_default()" should do an implicit flush of the "two" tag
         self.assertEqual(3, get_total())
コード例 #27
0
  def testScalarSummaryNameScope(self):
    """Test record_summaries_every_n_global_steps and all_summaries()."""
    with ops.Graph().as_default(), self.cached_session() as sess:
      global_step = training_util.get_or_create_global_step()
      global_step.initializer.run()
      with ops.device('/cpu:0'):
        step_increment = state_ops.assign_add(global_step, 1)
      sess.run(step_increment)  # Increment global step from 0 to 1

      logdir = tempfile.mkdtemp()
      with summary_ops.create_file_writer(logdir, max_queue=0,
                                          name='t2').as_default():
        with summary_ops.record_summaries_every_n_global_steps(2):
          summary_ops.initialize()
          with ops.name_scope('scope'):
            summary_op = summary_ops.scalar('my_scalar', 2.0)

          # Neither of these should produce a summary because
          # global_step is 1 and "1 % 2 != 0"
          sess.run(summary_ops.all_summary_ops())
          sess.run(summary_op)
          events = summary_test_util.events_from_logdir(logdir)
          self.assertEqual(len(events), 1)

          # Increment global step from 1 to 2 and check that the summary
          # is now written
          sess.run(step_increment)
          sess.run(summary_ops.all_summary_ops())
          events = summary_test_util.events_from_logdir(logdir)
          self.assertEqual(len(events), 2)
          self.assertEqual(events[1].summary.value[0].tag, 'scope/my_scalar')
コード例 #28
0
 def testMaxQueue(self):
   logdir = self.get_temp_dir()
   writer = summary_ops.create_file_writer(
       logdir, max_queue=1, flush_millis=999999)
   with writer.as_default(), summary_ops.always_record_summaries():
     summary_ops.scalar('scalar', 2.0, step=1)
   with self.cached_session() as sess:
     sess.run(summary_ops.summary_writer_initializer_op())
     get_total = lambda: len(summary_test_util.events_from_logdir(logdir))
     # Note: First tf.compat.v1.Event is always file_version.
     self.assertEqual(1, get_total())
     sess.run(summary_ops.all_summary_ops())
     self.assertEqual(1, get_total())
     # Should flush after second summary since max_queue = 1
     sess.run(summary_ops.all_summary_ops())
     self.assertEqual(3, get_total())
コード例 #29
0
    def testScalarSummaryNameScope(self):
        """Test record_summaries_every_n_global_steps and all_summaries()."""
        with ops.Graph().as_default(), self.cached_session() as sess:
            global_step = training_util.get_or_create_global_step()
            global_step.initializer.run()
            with ops.device('/cpu:0'):
                step_increment = state_ops.assign_add(global_step, 1)
            sess.run(step_increment)  # Increment global step from 0 to 1

            logdir = tempfile.mkdtemp()
            with summary_ops.create_file_writer(logdir, max_queue=0,
                                                name='t2').as_default():
                with summary_ops.record_summaries_every_n_global_steps(2):
                    summary_ops.initialize()
                    with ops.name_scope('scope'):
                        summary_op = summary_ops.scalar('my_scalar', 2.0)

                    # Neither of these should produce a summary because
                    # global_step is 1 and "1 % 2 != 0"
                    sess.run(summary_ops.all_summary_ops())
                    sess.run(summary_op)
                    events = summary_test_util.events_from_logdir(logdir)
                    self.assertEqual(len(events), 1)

                    # Increment global step from 1 to 2 and check that the summary
                    # is now written
                    sess.run(step_increment)
                    sess.run(summary_ops.all_summary_ops())
                    events = summary_test_util.events_from_logdir(logdir)
                    self.assertEqual(len(events), 2)
                    self.assertEqual(events[1].summary.value[0].tag,
                                     'scope/my_scalar')
コード例 #30
0
 def testFlushFunction(self):
   logdir = self.get_temp_dir()
   writer = summary_ops.create_file_writer(
       logdir, max_queue=999999, flush_millis=999999)
   with writer.as_default(), summary_ops.always_record_summaries():
     summary_ops.scalar('scalar', 2.0, step=1)
     flush_op = summary_ops.flush()
   with self.cached_session() as sess:
     sess.run(summary_ops.summary_writer_initializer_op())
     get_total = lambda: len(summary_test_util.events_from_logdir(logdir))
     # Note: First tf.Event is always file_version.
     self.assertEqual(1, get_total())
     sess.run(summary_ops.all_summary_ops())
     self.assertEqual(1, get_total())
     sess.run(flush_op)
     self.assertEqual(2, get_total())
     # Test "writer" parameter
     sess.run(summary_ops.all_summary_ops())
     sess.run(summary_ops.flush(writer=writer))
     self.assertEqual(3, get_total())
     sess.run(summary_ops.all_summary_ops())
     sess.run(summary_ops.flush(writer=writer._resource))  # pylint:disable=protected-access
     self.assertEqual(4, get_total())
コード例 #31
0
def main():
    batch_size = 16

    dataset = get_dataset(batch_size,
                          x_train,
                          y_train,
                          distorted_image_fn,
                          shuffle=True,
                          repeat=True)
    model = ClassificationNet(st_out_dim=IMG_SHAPE, num_class=NUM_CLASS)
    optimizer = tf.train.AdamOptimizer(learning_rate=0.001)
    global_step = tf.train.get_or_create_global_step()

    for i, (image, rotated_image, angle, label) in enumerate(dataset, start=1):
        global_step.assign_add(1)
        with summary.record_summaries_every_n_global_steps(10):
            loss, prediction, transformed, theta = model.train(
                optimizer, rotated_image, label)
            acc = model.accuracy(prediction, label)

            # test
            if i % 500 == 0:
                total_acc = 0
                dataset_test = get_dataset(
                    1000, x_test, y_test,
                    distorted_image_test_fn).make_one_shot_iterator()
                split = 10000 // 1000
                for _ in range(split):
                    image_test, rotated_image_test, angle_test, label_test = dataset_test.get_next(
                    )
                    logits_test, transformed_test, theta_test = model(
                        rotated_image_test)
                    prediction_test = tf.nn.softmax(logits_test)
                    acc_test = model.accuracy(prediction_test,
                                              label_test).numpy()
                    total_acc += acc_test
                print(total_acc / split)

                summary.scalar('accuracy/test', total_acc / split)

            if i % 10 == 0:
                print("step: {}, loss: {}, accuracy: {}".format(
                    int(global_step), float(loss), float(acc)))
                summary.scalar('loss', loss)
                summary.scalar('accuracy/training', acc)
                # summary images
                origin_images = image_utils.image_gallery(image.numpy(),
                                                          columns=4,
                                                          expand_dim=True)
                rotated_images = image_utils.image_gallery(
                    rotated_image.numpy(), columns=4, expand_dim=True)
                transformed_images = image_utils.image_gallery(
                    transformed.numpy(), columns=4, expand_dim=True)
                summary.image('image/original', origin_images)
                summary.image('image/rotated', rotated_images)
                summary.image('image/transformed', transformed_images)
コード例 #32
0
 def testFlushFunction(self):
   logs = tempfile.mkdtemp()
   writer = summary_ops.create_file_writer(
       logs, max_queue=999999, flush_millis=999999, name='lol')
   with writer.as_default(), summary_ops.always_record_summaries():
     get_total = lambda: len(summary_test_util.events_from_logdir(logs))
     # Note: First tf.Event is always file_version.
     self.assertEqual(1, get_total())
     summary_ops.scalar('scalar', 2.0, step=1)
     summary_ops.scalar('scalar', 2.0, step=2)
     self.assertEqual(1, get_total())
     summary_ops.flush()
     self.assertEqual(3, get_total())
     # Test "writer" parameter
     summary_ops.scalar('scalar', 2.0, step=3)
     summary_ops.flush(writer=writer)
     self.assertEqual(4, get_total())
     summary_ops.scalar('scalar', 2.0, step=4)
     summary_ops.flush(writer=writer._resource)  # pylint:disable=protected-access
     self.assertEqual(5, get_total())
コード例 #33
0
ファイル: summary_ops_test.py プロジェクト: flavz27/master_PA
 def testFlushFunction(self):
     logs = tempfile.mkdtemp()
     writer = summary_ops.create_file_writer(logs,
                                             max_queue=999999,
                                             flush_millis=999999,
                                             name='lol')
     with writer.as_default(), summary_ops.always_record_summaries():
         get_total = lambda: len(summary_test_util.events_from_logdir(logs))
         # Note: First tf.compat.v1.Event is always file_version.
         self.assertEqual(1, get_total())
         summary_ops.scalar('scalar', 2.0, step=1)
         summary_ops.scalar('scalar', 2.0, step=2)
         self.assertEqual(1, get_total())
         summary_ops.flush()
         self.assertEqual(3, get_total())
         # Test "writer" parameter
         summary_ops.scalar('scalar', 2.0, step=3)
         summary_ops.flush(writer=writer)
         self.assertEqual(4, get_total())
         summary_ops.scalar('scalar', 2.0, step=4)
         summary_ops.flush(writer=writer._resource)  # pylint:disable=protected-access
         self.assertEqual(5, get_total())
コード例 #34
0
    def testSharedName(self):
        logdir = self.get_temp_dir()
        with summary_ops.always_record_summaries():
            # Create with default shared name (should match logdir)
            writer1 = summary_ops.create_file_writer(logdir)
            with writer1.as_default():
                summary_ops.scalar('one', 1.0, step=1)
            # Create with explicit logdir shared name (should be same resource/file)
            shared_name = 'logdir:' + logdir
            writer2 = summary_ops.create_file_writer(logdir, name=shared_name)
            with writer2.as_default():
                summary_ops.scalar('two', 2.0, step=2)
            # Create with different shared name (should be separate resource/file)
            writer3 = summary_ops.create_file_writer(logdir, name='other')
            with writer3.as_default():
                summary_ops.scalar('three', 3.0, step=3)

        with self.cached_session() as sess:
            # Run init ops across writers sequentially to avoid race condition.
            # TODO(nickfelt): fix race condition in resource manager lookup or create
            sess.run(writer1.init())
            sess.run(writer2.init())
            time.sleep(1.1)  # Ensure filename has a different timestamp
            sess.run(writer3.init())
            sess.run(summary_ops.all_summary_ops())
            sess.run([writer1.flush(), writer2.flush(), writer3.flush()])

        event_files = iter(
            sorted(gfile.Glob(os.path.join(logdir, '*tfevents*'))))

        # First file has tags "one" and "two"
        events = summary_test_util.events_from_file(next(event_files))
        self.assertEqual('brain.Event:2', events[0].file_version)
        tags = [e.summary.value[0].tag for e in events[1:]]
        self.assertItemsEqual(['one', 'two'], tags)

        # Second file has tag "three"
        events = summary_test_util.events_from_file(next(event_files))
        self.assertEqual('brain.Event:2', events[0].file_version)
        tags = [e.summary.value[0].tag for e in events[1:]]
        self.assertItemsEqual(['three'], tags)

        # No more files
        self.assertRaises(StopIteration, lambda: next(event_files))
コード例 #35
0
  def testSharedName(self):
    logdir = self.get_temp_dir()
    with summary_ops.always_record_summaries():
      # Create with default shared name (should match logdir)
      writer1 = summary_ops.create_file_writer(logdir)
      with writer1.as_default():
        summary_ops.scalar('one', 1.0, step=1)
      # Create with explicit logdir shared name (should be same resource/file)
      shared_name = 'logdir:' + logdir
      writer2 = summary_ops.create_file_writer(logdir, name=shared_name)
      with writer2.as_default():
        summary_ops.scalar('two', 2.0, step=2)
      # Create with different shared name (should be separate resource/file)
      writer3 = summary_ops.create_file_writer(logdir, name='other')
      with writer3.as_default():
        summary_ops.scalar('three', 3.0, step=3)

    with self.cached_session() as sess:
      # Run init ops across writers sequentially to avoid race condition.
      # TODO(nickfelt): fix race condition in resource manager lookup or create
      sess.run(writer1.init())
      sess.run(writer2.init())
      time.sleep(1.1)  # Ensure filename has a different timestamp
      sess.run(writer3.init())
      sess.run(summary_ops.all_summary_ops())
      sess.run([writer1.flush(), writer2.flush(), writer3.flush()])

    event_files = iter(sorted(gfile.Glob(os.path.join(logdir, '*tfevents*'))))

    # First file has tags "one" and "two"
    events = summary_test_util.events_from_file(next(event_files))
    self.assertEqual('brain.Event:2', events[0].file_version)
    tags = [e.summary.value[0].tag for e in events[1:]]
    self.assertItemsEqual(['one', 'two'], tags)

    # Second file has tag "three"
    events = summary_test_util.events_from_file(next(event_files))
    self.assertEqual('brain.Event:2', events[0].file_version)
    tags = [e.summary.value[0].tag for e in events[1:]]
    self.assertItemsEqual(['three'], tags)

    # No more files
    self.assertRaises(StopIteration, lambda: next(event_files))
コード例 #36
0
ファイル: summary_ops_test.py プロジェクト: flavz27/master_PA
    def testSharedName(self):
        logdir = self.get_temp_dir()
        with summary_ops.always_record_summaries():
            # Create with default shared name (should match logdir)
            writer1 = summary_ops.create_file_writer(logdir)
            with writer1.as_default():
                summary_ops.scalar('one', 1.0, step=1)
                summary_ops.flush()
            # Create with explicit logdir shared name (should be same resource/file)
            shared_name = 'logdir:' + logdir
            writer2 = summary_ops.create_file_writer(logdir, name=shared_name)
            with writer2.as_default():
                summary_ops.scalar('two', 2.0, step=2)
                summary_ops.flush()
            # Create with different shared name (should be separate resource/file)
            time.sleep(1.1)  # Ensure filename has a different timestamp
            writer3 = summary_ops.create_file_writer(logdir, name='other')
            with writer3.as_default():
                summary_ops.scalar('three', 3.0, step=3)
                summary_ops.flush()

        event_files = iter(
            sorted(gfile.Glob(os.path.join(logdir, '*tfevents*'))))

        # First file has tags "one" and "two"
        events = iter(summary_test_util.events_from_file(next(event_files)))
        self.assertEqual('brain.Event:2', next(events).file_version)
        self.assertEqual('one', next(events).summary.value[0].tag)
        self.assertEqual('two', next(events).summary.value[0].tag)
        self.assertRaises(StopIteration, lambda: next(events))

        # Second file has tag "three"
        events = iter(summary_test_util.events_from_file(next(event_files)))
        self.assertEqual('brain.Event:2', next(events).file_version)
        self.assertEqual('three', next(events).summary.value[0].tag)
        self.assertRaises(StopIteration, lambda: next(events))

        # No more files
        self.assertRaises(StopIteration, lambda: next(event_files))
コード例 #37
0
  def testSharedName(self):
    logdir = self.get_temp_dir()
    with summary_ops.always_record_summaries():
      # Create with default shared name (should match logdir)
      writer1 = summary_ops.create_file_writer(logdir)
      with writer1.as_default():
        summary_ops.scalar('one', 1.0, step=1)
        summary_ops.flush()
      # Create with explicit logdir shared name (should be same resource/file)
      shared_name = 'logdir:' + logdir
      writer2 = summary_ops.create_file_writer(logdir, name=shared_name)
      with writer2.as_default():
        summary_ops.scalar('two', 2.0, step=2)
        summary_ops.flush()
      # Create with different shared name (should be separate resource/file)
      time.sleep(1.1)  # Ensure filename has a different timestamp
      writer3 = summary_ops.create_file_writer(logdir, name='other')
      with writer3.as_default():
        summary_ops.scalar('three', 3.0, step=3)
        summary_ops.flush()

    event_files = iter(sorted(gfile.Glob(os.path.join(logdir, '*tfevents*'))))

    # First file has tags "one" and "two"
    events = iter(summary_test_util.events_from_file(next(event_files)))
    self.assertEqual('brain.Event:2', next(events).file_version)
    self.assertEqual('one', next(events).summary.value[0].tag)
    self.assertEqual('two', next(events).summary.value[0].tag)
    self.assertRaises(StopIteration, lambda: next(events))

    # Second file has tag "three"
    events = iter(summary_test_util.events_from_file(next(event_files)))
    self.assertEqual('brain.Event:2', next(events).file_version)
    self.assertEqual('three', next(events).summary.value[0].tag)
    self.assertRaises(StopIteration, lambda: next(events))

    # No more files
    self.assertRaises(StopIteration, lambda: next(event_files))
コード例 #38
0
 def body(unused_pred):
     summary_ops.scalar('scalar', 2.0)
     return constant_op.constant(False)
コード例 #39
0
 def f():
     summary_ops.scalar('scalar', 2.0)
     return constant_op.constant(True)
コード例 #40
0
 def f():
   summary_ops.scalar('scalar', 2.0)
   return constant_op.constant(True)
コード例 #41
0
def train_one_epoch(dataset, base_model, optimizer, preprocessing_type,
                    logging_every_n_steps, summary_every_n_steps, saver,
                    save_every_n_steps, save_path):
    idx = 0

    for image, gt_bboxes, gt_labels in tqdm(dataset):
        # bgr input
        # for keras application pre-trained models, use bgr

        # conver ymin xmin ymax xmax -> xmin ymin xmax ymax
        gt_bboxes = tf.squeeze(gt_bboxes, axis=0)
        channels = tf.split(gt_bboxes, 4, axis=1)
        gt_bboxes = tf.concat(
            [channels[1], channels[0], channels[3], channels[2]], axis=1)

        # set labels to int32
        gt_labels = tf.to_int32(tf.squeeze(gt_labels, axis=0))

        # train one step
        with tf.GradientTape() as tape:
            rpn_cls_loss, rpn_reg_loss, roi_cls_loss, roi_reg_loss = base_model(
                (image, gt_bboxes, gt_labels), True)
            l2_loss = tf.add_n(base_model.losses)
            total_loss = rpn_cls_loss + rpn_reg_loss + roi_cls_loss + roi_reg_loss + l2_loss
            train_step(base_model, total_loss, tape, optimizer)

        # summary
        if idx % summary_every_n_steps == 0:
            summary.scalar("l2_loss", l2_loss)
            summary.scalar("rpn_cls_loss", rpn_cls_loss)
            summary.scalar("rpn_reg_loss", rpn_reg_loss)
            summary.scalar("roi_cls_loss", roi_cls_loss)
            summary.scalar("roi_reg_loss", roi_reg_loss)
            summary.scalar("total_loss", total_loss)

            pred_bboxes, pred_labels, pred_scores = base_model(image, False)

            if pred_bboxes is not None:
                selected_idx = tf.where(
                    pred_scores >= CONFIG['show_image_score_threshold'])[:, 0]
                if tf.size(selected_idx) != 0:
                    # show gt
                    gt_channels = tf.split(gt_bboxes, 4, axis=1)
                    show_gt_bboxes = tf.concat([
                        gt_channels[1], gt_channels[0], gt_channels[3],
                        gt_channels[2]
                    ],
                                               axis=1)
                    gt_image = show_one_image(
                        tf.squeeze(image, axis=0).numpy(),
                        show_gt_bboxes.numpy(),
                        gt_labels.numpy(),
                        preprocessing_type=preprocessing_type,
                        caffe_pixel_means=CONFIG['bgr_pixel_means'],
                        enable_matplotlib=False)
                    tf.contrib.summary.image("gt_image",
                                             tf.expand_dims(gt_image, axis=0))

                    # show pred
                    pred_bboxes = tf.gather(pred_bboxes, selected_idx)
                    pred_labels = tf.gather(pred_labels, selected_idx)
                    channels = tf.split(pred_bboxes,
                                        num_or_size_splits=4,
                                        axis=1)
                    show_pred_bboxes = tf.concat(
                        [channels[1], channels[0], channels[3], channels[2]],
                        axis=1)
                    pred_image = show_one_image(
                        tf.squeeze(image, axis=0).numpy(),
                        show_pred_bboxes.numpy(),
                        pred_labels.numpy(),
                        preprocessing_type=preprocessing_type,
                        caffe_pixel_means=CONFIG['bgr_pixel_means'],
                        enable_matplotlib=False)
                    tf.contrib.summary.image(
                        "pred_image", tf.expand_dims(pred_image, axis=0))

        # logging
        if idx % logging_every_n_steps == 0:
            if isinstance(optimizer, tf.train.AdamOptimizer):
                show_lr = optimizer._lr()
            else:
                show_lr = optimizer._learning_rate()
            logging_format = 'steps %d, lr is %.5f, loss: %.4f, %.4f, %.4f, %.4f, %.4f, %.4f'
            tf_logging.info(logging_format %
                            (idx + 1, show_lr, rpn_cls_loss, rpn_reg_loss,
                             roi_cls_loss, roi_reg_loss, l2_loss, total_loss))

        # saving
        if saver is not None and save_path is not None and idx % save_every_n_steps == 0 and idx != 0:
            saver.save(os.path.join(save_path, 'model.ckpt'),
                       global_step=tf.train.get_or_create_global_step())

        idx += 1
コード例 #42
0
ファイル: summary_ops_test.py プロジェクト: flavz27/master_PA
 def write():
     summary_ops.scalar('scalar', 2.0)
コード例 #43
0
ファイル: summary_ops_test.py プロジェクト: flavz27/master_PA
 def run_step():
     summary_ops.scalar('scalar', i, step=step)
     step.assign_add(1)
コード例 #44
0
 def write():
   summary_ops.scalar('scalar', 2.0)
コード例 #45
0
 def body(unused_pred):
   summary_ops.scalar('scalar', 2.0)
   return constant_op.constant(False)
コード例 #46
0
 def run_step():
   summary_ops.scalar('scalar', i, step=step)
   step.assign_add(1)