def testWriterInitAndClose(self): logdir = self.get_temp_dir() get_total = lambda: len(summary_test_util.events_from_logdir(logdir)) with summary_ops.always_record_summaries(): writer = summary_ops.create_file_writer( logdir, max_queue=100, flush_millis=1000000) self.assertEqual(1, get_total()) # file_version Event # Calling init() again while writer is open has no effect writer.init() self.assertEqual(1, get_total()) try: # Not using .as_default() to avoid implicit flush when exiting writer.set_as_default() summary_ops.scalar('one', 1.0, step=1) self.assertEqual(1, get_total()) # Calling .close() should do an implicit flush writer.close() self.assertEqual(2, get_total()) # Calling init() on a closed writer should start a new file time.sleep(1.1) # Ensure filename has a different timestamp writer.init() files = sorted(gfile.Glob(os.path.join(logdir, '*tfevents*'))) self.assertEqual(2, len(files)) get_total = lambda: len(summary_test_util.events_from_file(files[1])) self.assertEqual(1, get_total()) # file_version Event summary_ops.scalar('two', 2.0, step=2) writer.close() self.assertEqual(2, get_total()) finally: # Clean up by resetting default writer summary_ops.create_file_writer(None).set_as_default()
def testWriterInitAndClose(self): logdir = self.get_temp_dir() get_total = lambda: len(summary_test_util.events_from_logdir(logdir)) with summary_ops.always_record_summaries(): writer = summary_ops.create_file_writer(logdir, max_queue=100, flush_millis=1000000) self.assertEqual(1, get_total()) # file_version Event # Calling init() again while writer is open has no effect writer.init() self.assertEqual(1, get_total()) try: # Not using .as_default() to avoid implicit flush when exiting writer.set_as_default() summary_ops.scalar('one', 1.0, step=1) self.assertEqual(1, get_total()) # Calling .close() should do an implicit flush writer.close() self.assertEqual(2, get_total()) # Calling init() on a closed writer should start a new file time.sleep(1.1) # Ensure filename has a different timestamp writer.init() files = sorted(gfile.Glob(os.path.join(logdir, '*tfevents*'))) self.assertEqual(2, len(files)) get_total = lambda: len( summary_test_util.events_from_file(files[1])) self.assertEqual(1, get_total()) # file_version Event summary_ops.scalar('two', 2.0, step=2) writer.close() self.assertEqual(2, get_total()) finally: # Clean up by resetting default writer summary_ops.create_file_writer(None).set_as_default()
def testScalarSummaryNameScope(self): """Test record_summaries_every_n_global_steps and all_summaries().""" with ops.Graph().as_default(), self.cached_session() as sess: global_step = training_util.get_or_create_global_step() global_step.initializer.run() with ops.device('/cpu:0'): step_increment = state_ops.assign_add(global_step, 1) sess.run(step_increment) # Increment global step from 0 to 1 logdir = tempfile.mkdtemp() with summary_ops.create_file_writer(logdir, max_queue=0, name='t2').as_default(): with summary_ops.record_summaries_every_n_global_steps(2): summary_ops.initialize() with ops.name_scope('scope'): summary_op = summary_ops.scalar('my_scalar', 2.0) # Neither of these should produce a summary because # global_step is 1 and "1 % 2 != 0" sess.run(summary_ops.all_summary_ops()) sess.run(summary_op) events = summary_test_util.events_from_logdir(logdir) self.assertEqual(len(events), 1) # Increment global step from 1 to 2 and check that the summary # is now written sess.run(step_increment) sess.run(summary_ops.all_summary_ops()) events = summary_test_util.events_from_logdir(logdir) self.assertEqual(len(events), 2) self.assertEqual(events[1].summary.value[0].tag, 'scope/my_scalar')
def train(training_dataset, preprocessing_type, base_model, optimizer, summary_dir, ckpt_dir, logging_every_n_steps, summary_every_n_steps, save_every_n_steps, restore_ckpt_file_path): # 获取 pretained model variables = base_model.variables + [tf.train.get_or_create_global_step()] saver = model_saver.Saver(variables) if restore_ckpt_file_path is not None: saver.restore(restore_ckpt_file_path) if tf.train.latest_checkpoint(ckpt_dir) is not None: saver.restore(tf.train.latest_checkpoint(ckpt_dir)) # writer = tf.summary.FileWriter(train_dir, flush_secs=100) writer = summary.create_file_writer(summary_dir, flush_millis=100000) for i in range(configs['epoches']): tf.compat.v1.logging.info('epoch %d starting...' % (i + 1)) start_time = time.time() with writer.as_default(), summary.always_record_summaries(): train_one_epoch(dataset=training_dataset, base_model=base_model, optimizer=optimizer, preprocessing_type=preprocessing_type, logging_every_n_steps=logging_every_n_steps, summary_every_n_steps=summary_every_n_steps, save_path=ckpt_dir, saver=saver, save_every_n_steps=save_every_n_steps) tf.set_random_seed(1) end_time = time.time() tf.compat.v1.logging.info('epoch %d training finished, costing %d seconds...' % (i, end_time - start_time))
def testFlushFunction(self): logdir = self.get_temp_dir() writer = summary_ops.create_file_writer(logdir, max_queue=999999, flush_millis=999999) with writer.as_default(), summary_ops.always_record_summaries(): summary_ops.scalar('scalar', 2.0, step=1) flush_op = summary_ops.flush() with self.cached_session() as sess: sess.run(summary_ops.summary_writer_initializer_op()) get_total = lambda: len( summary_test_util.events_from_logdir(logdir)) # Note: First tf.Event is always file_version. self.assertEqual(1, get_total()) sess.run(summary_ops.all_summary_ops()) self.assertEqual(1, get_total()) sess.run(flush_op) self.assertEqual(2, get_total()) # Test "writer" parameter sess.run(summary_ops.all_summary_ops()) sess.run(summary_ops.flush(writer=writer)) self.assertEqual(3, get_total()) sess.run(summary_ops.all_summary_ops()) sess.run(summary_ops.flush(writer=writer._resource)) # pylint:disable=protected-access self.assertEqual(4, get_total())
def testWriterInitAndClose(self): logdir = self.get_temp_dir() with summary_ops.always_record_summaries(): writer = summary_ops.create_file_writer( logdir, max_queue=100, flush_millis=1000000) with writer.as_default(): summary_ops.scalar('one', 1.0, step=1) with self.cached_session() as sess: sess.run(summary_ops.summary_writer_initializer_op()) get_total = lambda: len(summary_test_util.events_from_logdir(logdir)) self.assertEqual(1, get_total()) # file_version Event # Running init() again while writer is open has no effect sess.run(writer.init()) self.assertEqual(1, get_total()) sess.run(summary_ops.all_summary_ops()) self.assertEqual(1, get_total()) # Running close() should do an implicit flush sess.run(writer.close()) self.assertEqual(2, get_total()) # Running init() on a closed writer should start a new file time.sleep(1.1) # Ensure filename has a different timestamp sess.run(writer.init()) sess.run(summary_ops.all_summary_ops()) sess.run(writer.close()) files = sorted(gfile.Glob(os.path.join(logdir, '*tfevents*'))) self.assertEqual(2, len(files)) self.assertEqual(2, len(summary_test_util.events_from_file(files[1])))
def testWriterInitAndClose(self): logdir = self.get_temp_dir() with summary_ops.always_record_summaries(): writer = summary_ops.create_file_writer(logdir, max_queue=100, flush_millis=1000000) with writer.as_default(): summary_ops.scalar('one', 1.0, step=1) with self.cached_session() as sess: sess.run(summary_ops.summary_writer_initializer_op()) get_total = lambda: len( summary_test_util.events_from_logdir(logdir)) self.assertEqual(1, get_total()) # file_version Event # Running init() again while writer is open has no effect sess.run(writer.init()) self.assertEqual(1, get_total()) sess.run(summary_ops.all_summary_ops()) self.assertEqual(1, get_total()) # Running close() should do an implicit flush sess.run(writer.close()) self.assertEqual(2, get_total()) # Running init() on a closed writer should start a new file time.sleep(1.1) # Ensure filename has a different timestamp sess.run(writer.init()) sess.run(summary_ops.all_summary_ops()) sess.run(writer.close()) files = sorted(gfile.Glob(os.path.join(logdir, '*tfevents*'))) self.assertEqual(2, len(files)) self.assertEqual(2, len(summary_test_util.events_from_file(files[1])))
def testSharedName(self): logdir = self.get_temp_dir() with summary_ops.always_record_summaries(): # Create with default shared name (should match logdir) writer1 = summary_ops.create_file_writer(logdir) with writer1.as_default(): summary_ops.scalar('one', 1.0, step=1) # Create with explicit logdir shared name (should be same resource/file) shared_name = 'logdir:' + logdir writer2 = summary_ops.create_file_writer(logdir, name=shared_name) with writer2.as_default(): summary_ops.scalar('two', 2.0, step=2) # Create with different shared name (should be separate resource/file) writer3 = summary_ops.create_file_writer(logdir, name='other') with writer3.as_default(): summary_ops.scalar('three', 3.0, step=3) with self.cached_session() as sess: # Run init ops across writers sequentially to avoid race condition. # TODO(nickfelt): fix race condition in resource manager lookup or create sess.run(writer1.init()) sess.run(writer2.init()) time.sleep(1.1) # Ensure filename has a different timestamp sess.run(writer3.init()) sess.run(summary_ops.all_summary_ops()) sess.run([writer1.flush(), writer2.flush(), writer3.flush()]) event_files = iter( sorted(gfile.Glob(os.path.join(logdir, '*tfevents*')))) # First file has tags "one" and "two" events = summary_test_util.events_from_file(next(event_files)) self.assertEqual('brain.Event:2', events[0].file_version) tags = [e.summary.value[0].tag for e in events[1:]] self.assertItemsEqual(['one', 'two'], tags) # Second file has tag "three" events = summary_test_util.events_from_file(next(event_files)) self.assertEqual('brain.Event:2', events[0].file_version) tags = [e.summary.value[0].tag for e in events[1:]] self.assertItemsEqual(['three'], tags) # No more files self.assertRaises(StopIteration, lambda: next(event_files))
def testSharedName(self): logdir = self.get_temp_dir() with summary_ops.always_record_summaries(): # Create with default shared name (should match logdir) writer1 = summary_ops.create_file_writer(logdir) with writer1.as_default(): summary_ops.scalar('one', 1.0, step=1) # Create with explicit logdir shared name (should be same resource/file) shared_name = 'logdir:' + logdir writer2 = summary_ops.create_file_writer(logdir, name=shared_name) with writer2.as_default(): summary_ops.scalar('two', 2.0, step=2) # Create with different shared name (should be separate resource/file) writer3 = summary_ops.create_file_writer(logdir, name='other') with writer3.as_default(): summary_ops.scalar('three', 3.0, step=3) with self.cached_session() as sess: # Run init ops across writers sequentially to avoid race condition. # TODO(nickfelt): fix race condition in resource manager lookup or create sess.run(writer1.init()) sess.run(writer2.init()) time.sleep(1.1) # Ensure filename has a different timestamp sess.run(writer3.init()) sess.run(summary_ops.all_summary_ops()) sess.run([writer1.flush(), writer2.flush(), writer3.flush()]) event_files = iter(sorted(gfile.Glob(os.path.join(logdir, '*tfevents*')))) # First file has tags "one" and "two" events = summary_test_util.events_from_file(next(event_files)) self.assertEqual('brain.Event:2', events[0].file_version) tags = [e.summary.value[0].tag for e in events[1:]] self.assertItemsEqual(['one', 'two'], tags) # Second file has tag "three" events = summary_test_util.events_from_file(next(event_files)) self.assertEqual('brain.Event:2', events[0].file_version) tags = [e.summary.value[0].tag for e in events[1:]] self.assertItemsEqual(['three'], tags) # No more files self.assertRaises(StopIteration, lambda: next(event_files))
def testSummaryName(self): logdir = self.get_temp_dir() writer = summary_ops.create_file_writer(logdir, max_queue=0) with writer.as_default(), summary_ops.always_record_summaries(): summary_ops.scalar('scalar', 2.0, step=1) with self.cached_session() as sess: sess.run(summary_ops.summary_writer_initializer_op()) sess.run(summary_ops.all_summary_ops()) events = summary_test_util.events_from_logdir(logdir) self.assertEqual(2, len(events)) self.assertEqual('scalar', events[1].summary.value[0].tag)
def testEagerMemory(self): training_util.get_or_create_global_step() logdir = self.get_temp_dir() with summary_ops.create_file_writer( logdir, max_queue=0, name='t0').as_default(), summary_ops.always_record_summaries(): summary_ops.generic('tensor', 1, '') summary_ops.scalar('scalar', 2.0) summary_ops.histogram('histogram', [1.0]) summary_ops.image('image', [[[[1.0]]]]) summary_ops.audio('audio', [[1.0]], 1.0, 1)
def testSummaryGlobalStep(self): step = training_util.get_or_create_global_step() logdir = tempfile.mkdtemp() with summary_ops.create_file_writer( logdir, max_queue=0, name='t2').as_default(), summary_ops.always_record_summaries(): summary_ops.scalar('scalar', 2.0, step=step) events = summary_test_util.events_from_logdir(logdir) self.assertEqual(len(events), 2) self.assertEqual(events[1].summary.value[0].tag, 'scalar')
def testSharedName(self): logdir = self.get_temp_dir() with summary_ops.always_record_summaries(): # Create with default shared name (should match logdir) writer1 = summary_ops.create_file_writer(logdir) with writer1.as_default(): summary_ops.scalar('one', 1.0, step=1) summary_ops.flush() # Create with explicit logdir shared name (should be same resource/file) shared_name = 'logdir:' + logdir writer2 = summary_ops.create_file_writer(logdir, name=shared_name) with writer2.as_default(): summary_ops.scalar('two', 2.0, step=2) summary_ops.flush() # Create with different shared name (should be separate resource/file) time.sleep(1.1) # Ensure filename has a different timestamp writer3 = summary_ops.create_file_writer(logdir, name='other') with writer3.as_default(): summary_ops.scalar('three', 3.0, step=3) summary_ops.flush() event_files = iter( sorted(gfile.Glob(os.path.join(logdir, '*tfevents*')))) # First file has tags "one" and "two" events = iter(summary_test_util.events_from_file(next(event_files))) self.assertEqual('brain.Event:2', next(events).file_version) self.assertEqual('one', next(events).summary.value[0].tag) self.assertEqual('two', next(events).summary.value[0].tag) self.assertRaises(StopIteration, lambda: next(events)) # Second file has tag "three" events = iter(summary_test_util.events_from_file(next(event_files))) self.assertEqual('brain.Event:2', next(events).file_version) self.assertEqual('three', next(events).summary.value[0].tag) self.assertRaises(StopIteration, lambda: next(events)) # No more files self.assertRaises(StopIteration, lambda: next(event_files))
def testMaxQueue(self): logs = tempfile.mkdtemp() with summary_ops.create_file_writer( logs, max_queue=1, flush_millis=999999, name='lol').as_default( ), summary_ops.always_record_summaries(): get_total = lambda: len(summary_test_util.events_from_logdir(logs)) # Note: First tf.compat.v1.Event is always file_version. self.assertEqual(1, get_total()) summary_ops.scalar('scalar', 2.0, step=1) self.assertEqual(1, get_total()) # Should flush after second summary since max_queue = 1 summary_ops.scalar('scalar', 2.0, step=2) self.assertEqual(3, get_total())
def testMaxQueue(self): logs = tempfile.mkdtemp() with summary_ops.create_file_writer( logs, max_queue=1, flush_millis=999999, name='lol').as_default(), summary_ops.always_record_summaries(): get_total = lambda: len(summary_test_util.events_from_logdir(logs)) # Note: First tf.Event is always file_version. self.assertEqual(1, get_total()) summary_ops.scalar('scalar', 2.0, step=1) self.assertEqual(1, get_total()) # Should flush after second summary since max_queue = 1 summary_ops.scalar('scalar', 2.0, step=2) self.assertEqual(3, get_total())
def testSharedName(self): logdir = self.get_temp_dir() with summary_ops.always_record_summaries(): # Create with default shared name (should match logdir) writer1 = summary_ops.create_file_writer(logdir) with writer1.as_default(): summary_ops.scalar('one', 1.0, step=1) summary_ops.flush() # Create with explicit logdir shared name (should be same resource/file) shared_name = 'logdir:' + logdir writer2 = summary_ops.create_file_writer(logdir, name=shared_name) with writer2.as_default(): summary_ops.scalar('two', 2.0, step=2) summary_ops.flush() # Create with different shared name (should be separate resource/file) time.sleep(1.1) # Ensure filename has a different timestamp writer3 = summary_ops.create_file_writer(logdir, name='other') with writer3.as_default(): summary_ops.scalar('three', 3.0, step=3) summary_ops.flush() event_files = iter(sorted(gfile.Glob(os.path.join(logdir, '*tfevents*')))) # First file has tags "one" and "two" events = iter(summary_test_util.events_from_file(next(event_files))) self.assertEqual('brain.Event:2', next(events).file_version) self.assertEqual('one', next(events).summary.value[0].tag) self.assertEqual('two', next(events).summary.value[0].tag) self.assertRaises(StopIteration, lambda: next(events)) # Second file has tag "three" events = iter(summary_test_util.events_from_file(next(event_files))) self.assertEqual('brain.Event:2', next(events).file_version) self.assertEqual('three', next(events).summary.value[0].tag) self.assertRaises(StopIteration, lambda: next(events)) # No more files self.assertRaises(StopIteration, lambda: next(event_files))
def testSummaryGlobalStep(self): training_util.get_or_create_global_step() logdir = self.get_temp_dir() writer = summary_ops.create_file_writer(logdir, max_queue=0) with writer.as_default(), summary_ops.always_record_summaries(): summary_ops.scalar('scalar', 2.0) with self.cached_session() as sess: sess.run(variables.global_variables_initializer()) sess.run(summary_ops.summary_writer_initializer_op()) step, _ = sess.run( [training_util.get_global_step(), summary_ops.all_summary_ops()]) events = summary_test_util.events_from_logdir(logdir) self.assertEqual(2, len(events)) self.assertEqual(step, events[1].step)
def testSummaryOps(self): training_util.get_or_create_global_step() logdir = tempfile.mkdtemp() with summary_ops.create_file_writer( logdir, max_queue=0, name='t0').as_default(), summary_ops.always_record_summaries(): summary_ops.generic('tensor', 1, '') summary_ops.scalar('scalar', 2.0) summary_ops.histogram('histogram', [1.0]) summary_ops.image('image', [[[[1.0]]]]) summary_ops.audio('audio', [[1.0]], 1.0, 1) # The working condition of the ops is tested in the C++ test so we just # test here that we're calling them correctly. self.assertTrue(gfile.Exists(logdir))
def testSummaryOps(self): logdir = self.get_temp_dir() writer = summary_ops.create_file_writer(logdir, max_queue=0) with writer.as_default(), summary_ops.always_record_summaries(): summary_ops.generic('tensor', 1, step=1) summary_ops.scalar('scalar', 2.0, step=1) summary_ops.histogram('histogram', [1.0], step=1) summary_ops.image('image', [[[[1.0]]]], step=1) summary_ops.audio('audio', [[1.0]], 1.0, 1, step=1) with self.cached_session() as sess: sess.run(summary_ops.summary_writer_initializer_op()) sess.run(summary_ops.all_summary_ops()) # The working condition of the ops is tested in the C++ test so we just # test here that we're calling them correctly. self.assertTrue(gfile.Exists(logdir))
def testWriterFlush(self): logdir = self.get_temp_dir() get_total = lambda: len(summary_test_util.events_from_logdir(logdir)) with summary_ops.always_record_summaries(): writer = summary_ops.create_file_writer( logdir, max_queue=100, flush_millis=1000000) self.assertEqual(1, get_total()) # file_version Event with writer.as_default(): summary_ops.scalar('one', 1.0, step=1) self.assertEqual(1, get_total()) writer.flush() self.assertEqual(2, get_total()) summary_ops.scalar('two', 2.0, step=2) # Exiting the "as_default()" should do an implicit flush of the "two" tag self.assertEqual(3, get_total())
def testDefunSummarys(self): training_util.get_or_create_global_step() logdir = tempfile.mkdtemp() with summary_ops.create_file_writer( logdir, max_queue=0, name='t1').as_default(), summary_ops.always_record_summaries(): @function.defun def write(): summary_ops.scalar('scalar', 2.0) write() events = summary_test_util.events_from_logdir(logdir) self.assertEqual(len(events), 2) self.assertEqual(events[1].summary.value[0].simple_value, 2.0)
def testWriterFlush(self): logdir = self.get_temp_dir() with summary_ops.always_record_summaries(): writer = summary_ops.create_file_writer( logdir, max_queue=100, flush_millis=1000000) with writer.as_default(): summary_ops.scalar('one', 1.0, step=1) with self.cached_session() as sess: sess.run(summary_ops.summary_writer_initializer_op()) get_total = lambda: len(summary_test_util.events_from_logdir(logdir)) self.assertEqual(1, get_total()) # file_version Event sess.run(summary_ops.all_summary_ops()) self.assertEqual(1, get_total()) sess.run(writer.flush()) self.assertEqual(2, get_total())
def testMaxQueue(self): logdir = self.get_temp_dir() writer = summary_ops.create_file_writer( logdir, max_queue=1, flush_millis=999999) with writer.as_default(), summary_ops.always_record_summaries(): summary_ops.scalar('scalar', 2.0, step=1) with self.cached_session() as sess: sess.run(summary_ops.summary_writer_initializer_op()) get_total = lambda: len(summary_test_util.events_from_logdir(logdir)) # Note: First tf.Event is always file_version. self.assertEqual(1, get_total()) sess.run(summary_ops.all_summary_ops()) self.assertEqual(1, get_total()) # Should flush after second summary since max_queue = 1 sess.run(summary_ops.all_summary_ops()) self.assertEqual(3, get_total())
def testWriterFlush(self): logdir = self.get_temp_dir() get_total = lambda: len(summary_test_util.events_from_logdir(logdir)) with summary_ops.always_record_summaries(): writer = summary_ops.create_file_writer(logdir, max_queue=100, flush_millis=1000000) self.assertEqual(1, get_total()) # file_version Event with writer.as_default(): summary_ops.scalar('one', 1.0, step=1) self.assertEqual(1, get_total()) writer.flush() self.assertEqual(2, get_total()) summary_ops.scalar('two', 2.0, step=2) # Exiting the "as_default()" should do an implicit flush of the "two" tag self.assertEqual(3, get_total())
def testMaxQueue(self): logdir = self.get_temp_dir() writer = summary_ops.create_file_writer( logdir, max_queue=1, flush_millis=999999) with writer.as_default(), summary_ops.always_record_summaries(): summary_ops.scalar('scalar', 2.0, step=1) with self.cached_session() as sess: sess.run(summary_ops.summary_writer_initializer_op()) get_total = lambda: len(summary_test_util.events_from_logdir(logdir)) # Note: First tf.compat.v1.Event is always file_version. self.assertEqual(1, get_total()) sess.run(summary_ops.all_summary_ops()) self.assertEqual(1, get_total()) # Should flush after second summary since max_queue = 1 sess.run(summary_ops.all_summary_ops()) self.assertEqual(3, get_total())
def testRecordEveryNGlobalSteps(self): step = training_util.get_or_create_global_step() logdir = tempfile.mkdtemp() def run_step(): summary_ops.scalar('scalar', i, step=step) step.assign_add(1) with summary_ops.create_file_writer(logdir).as_default( ), summary_ops.record_summaries_every_n_global_steps(2, step): for i in range(10): run_step() # And another 10 steps as a graph function. run_step_fn = function.defun(run_step) for i in range(10): run_step_fn() events = summary_test_util.events_from_logdir(logdir) self.assertEqual(len(events), 11)
def testSummaryGraphModeCond(self): with ops.Graph().as_default(), self.cached_session(): training_util.get_or_create_global_step() logdir = tempfile.mkdtemp() with summary_ops.create_file_writer( logdir, max_queue=0, name='t2').as_default(), summary_ops.always_record_summaries(): summary_ops.initialize() training_util.get_or_create_global_step().initializer.run() def f(): summary_ops.scalar('scalar', 2.0) return constant_op.constant(True) pred = array_ops.placeholder(dtypes.bool) x = control_flow_ops.cond(pred, f, lambda: constant_op.constant(False)) x.eval(feed_dict={pred: True}) events = summary_test_util.events_from_logdir(logdir) self.assertEqual(len(events), 2) self.assertEqual(events[1].summary.value[0].tag, 'cond/scalar')
def testRecordEveryNGlobalSteps(self): step = training_util.get_or_create_global_step() logdir = tempfile.mkdtemp() def run_step(): summary_ops.scalar('scalar', i, step=step) step.assign_add(1) with summary_ops.create_file_writer( logdir).as_default(), summary_ops.record_summaries_every_n_global_steps( 2, step): for i in range(10): run_step() # And another 10 steps as a graph function. run_step_fn = function.defun(run_step) for i in range(10): run_step_fn() events = summary_test_util.events_from_logdir(logdir) self.assertEqual(len(events), 11)
def testFlushFunction(self): logs = tempfile.mkdtemp() writer = summary_ops.create_file_writer( logs, max_queue=999999, flush_millis=999999, name='lol') with writer.as_default(), summary_ops.always_record_summaries(): get_total = lambda: len(summary_test_util.events_from_logdir(logs)) # Note: First tf.Event is always file_version. self.assertEqual(1, get_total()) summary_ops.scalar('scalar', 2.0, step=1) summary_ops.scalar('scalar', 2.0, step=2) self.assertEqual(1, get_total()) summary_ops.flush() self.assertEqual(3, get_total()) # Test "writer" parameter summary_ops.scalar('scalar', 2.0, step=3) summary_ops.flush(writer=writer) self.assertEqual(4, get_total()) summary_ops.scalar('scalar', 2.0, step=4) summary_ops.flush(writer=writer._resource) # pylint:disable=protected-access self.assertEqual(5, get_total())
def testFlushFunction(self): logs = tempfile.mkdtemp() writer = summary_ops.create_file_writer(logs, max_queue=999999, flush_millis=999999, name='lol') with writer.as_default(), summary_ops.always_record_summaries(): get_total = lambda: len(summary_test_util.events_from_logdir(logs)) # Note: First tf.compat.v1.Event is always file_version. self.assertEqual(1, get_total()) summary_ops.scalar('scalar', 2.0, step=1) summary_ops.scalar('scalar', 2.0, step=2) self.assertEqual(1, get_total()) summary_ops.flush() self.assertEqual(3, get_total()) # Test "writer" parameter summary_ops.scalar('scalar', 2.0, step=3) summary_ops.flush(writer=writer) self.assertEqual(4, get_total()) summary_ops.scalar('scalar', 2.0, step=4) summary_ops.flush(writer=writer._resource) # pylint:disable=protected-access self.assertEqual(5, get_total())
def testFlushFunction(self): logdir = self.get_temp_dir() writer = summary_ops.create_file_writer( logdir, max_queue=999999, flush_millis=999999) with writer.as_default(), summary_ops.always_record_summaries(): summary_ops.scalar('scalar', 2.0, step=1) flush_op = summary_ops.flush() with self.cached_session() as sess: sess.run(summary_ops.summary_writer_initializer_op()) get_total = lambda: len(summary_test_util.events_from_logdir(logdir)) # Note: First tf.Event is always file_version. self.assertEqual(1, get_total()) sess.run(summary_ops.all_summary_ops()) self.assertEqual(1, get_total()) sess.run(flush_op) self.assertEqual(2, get_total()) # Test "writer" parameter sess.run(summary_ops.all_summary_ops()) sess.run(summary_ops.flush(writer=writer)) self.assertEqual(3, get_total()) sess.run(summary_ops.all_summary_ops()) sess.run(summary_ops.flush(writer=writer._resource)) # pylint:disable=protected-access self.assertEqual(4, get_total())
was not `None`, that operation also increments `global_step`. """ return optimizer.minimize(loss=lambda: loss_func(model, inputs, labels, **kwargs), global_step=tf.train.get_or_create_global_step()) train_accuracy = tfe.metrics.Accuracy() epochs = 1000 logdir = '../logs/iris-eager' acc = 0.0 _loss = 0. # File writer for tensorboard visualization. writer = summary.create_file_writer(logdir=logdir) with writer.as_default(): with summary.always_record_summaries(): print('\n{}'.format(75 * '-')) print('Training started: {:%c}\n'.format(datetime.now())) start = time.time() for epoch in range(epochs): # Loop through each data batches. for X_batch, y_batch in tfe.Iterator(train_data): # Run the training step. train_step(model, optimizer, loss, X_batch, y_batch) # Estimate loss _loss = loss(model, X_batch, y_batch) tf.summary.scalar('loss', _loss)
tf.enable_eager_execution(conf) SEED = 1234 tf.set_random_seed(SEED) NUM_CLASS = 10 IMG_SHAPE = [28, 28] (x_train, y_train), (x_test, y_test) = tf.keras.datasets.mnist.load_data( '/data/ajy/datasets/MNIST/mnist.npz') # scale to (0, 1), shape is (28, 28, 1) x_train, x_test = [(np.expand_dims(x / 255., axis=-1)).astype(np.float32) for x in [x_train, x_test]] y_train, y_test = [tf.one_hot(i, depth=NUM_CLASS) for i in [y_train, y_test]] # summary writer = summary.create_file_writer('./logs/STN') writer.set_as_default() def get_dataset(batch_size, x, y, map_fn, shuffle=False, repeat=False): dataset = tf.data.Dataset.from_tensor_slices((x, y)) if repeat: dataset = dataset.repeat() if shuffle: dataset = dataset.shuffle(batch_size * 10) dataset = dataset.map(map_fn, num_parallel_calls=2).batch(batch_size).prefetch(1) return dataset def distorted_image_fn(image, label):
dis_dropout_keep_prob = 0.75 dis_l2_reg_lambda = 0.2 dis_batch_size = 64 ######################################################################################### # Basic Training Parameters ######################################################################################### TOTAL_BATCH = 200 base_path = "/home/panxie/Documents/TextGan/seqgan/" positive_file = base_path + 'save/real_data.txt' negative_file = base_path + 'save/generator_sample.txt' eval_file = base_path + 'save/eval_file.txt' generated_num = 100 vocab_size = 5000 summary_writer = summary.create_file_writer(base_path + '/tmp/summaries') summary_writer.set_as_default() global_step = tf.train.get_or_create_global_step() def main(): random.seed(SEED) np.random.seed(SEED) assert START_TOKEN == 0 ######################################################################################### # Generator, oracle(target-LSTM), discrimonator model. ######################################################################################### #tf.logging.info("loading generator, discriminator, oracle model.") generator = Generator(vocab_size, BATCH_SIZE, EMB_DIM, HIDDEN_DIM, SEQ_LENGTH, START_TOKEN)