コード例 #1
0
 def testWriterInitAndClose(self):
   logdir = self.get_temp_dir()
   get_total = lambda: len(summary_test_util.events_from_logdir(logdir))
   with summary_ops.always_record_summaries():
     writer = summary_ops.create_file_writer(
         logdir, max_queue=100, flush_millis=1000000)
     self.assertEqual(1, get_total())  # file_version Event
     # Calling init() again while writer is open has no effect
     writer.init()
     self.assertEqual(1, get_total())
     try:
       # Not using .as_default() to avoid implicit flush when exiting
       writer.set_as_default()
       summary_ops.scalar('one', 1.0, step=1)
       self.assertEqual(1, get_total())
       # Calling .close() should do an implicit flush
       writer.close()
       self.assertEqual(2, get_total())
       # Calling init() on a closed writer should start a new file
       time.sleep(1.1)  # Ensure filename has a different timestamp
       writer.init()
       files = sorted(gfile.Glob(os.path.join(logdir, '*tfevents*')))
       self.assertEqual(2, len(files))
       get_total = lambda: len(summary_test_util.events_from_file(files[1]))
       self.assertEqual(1, get_total())  # file_version Event
       summary_ops.scalar('two', 2.0, step=2)
       writer.close()
       self.assertEqual(2, get_total())
     finally:
       # Clean up by resetting default writer
       summary_ops.create_file_writer(None).set_as_default()
コード例 #2
0
    def testSummaryInCond(self, take_true_branch):
        strategy = get_tpu_strategy()

        def host_computation(x):
            scalar_summary_v2.scalar("x", x, step=0)
            return x * 2.0

        @def_function.function
        def step(take_true_branch):
            def computation(x):
                x = x + 1.0
                if x < 5.0:
                    y = tpu.outside_compilation(host_computation, x)
                    y = tpu.outside_compilation(host_computation, x)
                    x = y
                return x + 1.0

            if take_true_branch:
                return strategy.run(computation, args=(2.0, ))
            else:
                return strategy.run(computation, args=(10.0, ))

        summary_writer = summary.create_file_writer(os.path.join(
            os.getenv("TEST_TMPDIR", "/tmp")),
                                                    flush_millis=10000)

        output_value = 12.
        if take_true_branch:
            output_value = 7.
        with summary_writer.as_default(), summary.always_record_summaries():
            self.assertAllEqual(
                strategy.experimental_local_results(step(take_true_branch)),
                constant_op.constant(output_value,
                                     shape=(strategy.num_replicas_in_sync)))
コード例 #3
0
 def testWrite_usingDefaultStepVariable_fromLegacyGraph(self):
   logdir = self.get_temp_dir()
   try:
     with context.graph_mode():
       writer = summary_ops.create_file_writer(logdir)
       mystep = variables.Variable(0, dtype=dtypes.int64)
       summary_ops.set_step(mystep)
       with writer.as_default():
         write_op = summary_ops.write('tag', 1.0)
       first_assign_op = mystep.assign_add(1)
       second_assign_op = mystep.assign(10)
       with self.cached_session() as sess:
         sess.run(writer.init())
         sess.run(mystep.initializer)
         sess.run(write_op)
         sess.run(first_assign_op)
         sess.run(write_op)
         sess.run(second_assign_op)
         sess.run(write_op)
         sess.run(writer.flush())
     events = events_from_logdir(logdir)
     self.assertEqual(4, len(events))
     self.assertEqual(0, events[1].step)
     self.assertEqual(1, events[2].step)
     self.assertEqual(10, events[3].step)
   finally:
     # Reset to default state for other tests.
     summary_ops.set_step(None)
コード例 #4
0
ファイル: summary_ops_test.py プロジェクト: yuucyf/tensorflow
    def testWrite_usingDefaultStepVariable_fromFunction(self):
        logdir = self.get_temp_dir()
        try:
            with context.eager_mode():
                writer = summary_ops.create_file_writer(logdir)

                @def_function.function
                def f():
                    with writer.as_default():
                        summary_ops.write('tag', 1.0)

                mystep = variables.Variable(0, dtype=dtypes.int64)
                summary_ops.set_step(mystep)
                f()
                mystep.assign_add(1)
                f()
                mystep.assign(10)
                f()
            events = events_from_logdir(logdir)
            self.assertEqual(4, len(events))
            self.assertEqual(0, events[1].step)
            self.assertEqual(1, events[2].step)
            self.assertEqual(10, events[3].step)
        finally:
            # Reset to default state for other tests.
            summary_ops.set_step(None)
コード例 #5
0
    def testScalarSummaryNameScope(self):
        """Test record_summaries_every_n_global_steps and all_summaries()."""
        with ops.Graph().as_default(), self.test_session() as sess:
            global_step = training_util.get_or_create_global_step()
            global_step.initializer.run()
            with ops.device('/cpu:0'):
                step_increment = state_ops.assign_add(global_step, 1)
            sess.run(step_increment)  # Increment global step from 0 to 1

            logdir = tempfile.mkdtemp()
            with summary_ops.create_file_writer(logdir, max_queue=0,
                                                name='t2').as_default():
                with summary_ops.record_summaries_every_n_global_steps(2):
                    summary_ops.initialize()
                    with ops.name_scope('scope'):
                        summary_op = summary_ops.scalar('my_scalar', 2.0)

                    # Neither of these should produce a summary because
                    # global_step is 1 and "1 % 2 != 0"
                    sess.run(summary_ops.all_summary_ops())
                    sess.run(summary_op)
                    events = summary_test_util.events_from_logdir(logdir)
                    self.assertEqual(len(events), 1)

                    # Increment global step from 1 to 2 and check that the summary
                    # is now written
                    sess.run(step_increment)
                    sess.run(summary_ops.all_summary_ops())
                    events = summary_test_util.events_from_logdir(logdir)
                    self.assertEqual(len(events), 2)
                    self.assertEqual(events[1].summary.value[0].tag,
                                     'scope/my_scalar')
コード例 #6
0
    def testSummaryInWhile(self):
        strategy = get_tpu_strategy()

        def host_computation(x):
            scalar_summary_v2.scalar("x", x, step=0)
            return x * 2.0

        @def_function.function
        def step():
            def computation(x):
                n = 0
                while n < 3:
                    x = x + 1.0
                    y = tpu.outside_compilation(host_computation, x)
                    y = tpu.outside_compilation(host_computation, x)
                    x = y
                    n = n + 1
                return y + 1.0

            return strategy.run(computation, args=(2.0, ))

        summary_writer = summary.create_file_writer(os.path.join(
            os.getenv("TEST_TMPDIR", "/tmp")),
                                                    flush_millis=10000)
        with summary_writer.as_default(), summary.always_record_summaries():
            self.assertAllEqual(
                strategy.experimental_local_results(step()),
                constant_op.constant(31.,
                                     shape=(strategy.num_replicas_in_sync)))
コード例 #7
0
    def construct(self, args):
        with self.session.graph.as_default():
            # Inputs
            self.images = tf.placeholder(tf.float32,
                                         [None, MNIST.H, MNIST.W, MNIST.C],
                                         name="images")
            self.labels = tf.placeholder(tf.int64, [None], name="labels")

            # Computation
            hidden = tf.keras.layers.Flatten()(self.images)
            # TODO: Add `args.layers` number of hidden layers with size `args.hidden_layer`,
            # using activation from `args.activation`, allowing "none", "relu", "tanh", "sigmoid".
            # Store the results back to `hidden` variable.
            output_layer = tf.keras.layers.Dense(MNIST.LABELS)(hidden)
            self.predictions = tf.argmax(output_layer, axis=1)

            # Training
            loss = tf.keras.losses.sparse_categorical_crossentropy(
                self.labels, output_layer, from_logits=True)
            global_step = tf.train.create_global_step()
            self.training = tf.train.AdamOptimizer().minimize(
                loss, global_step=global_step, name="training")

            # Summaries
            accuracy = tf.math.reduce_mean(
                tf.cast(tf.equal(self.labels, self.predictions), tf.float32))
            confusion_matrix = tf.reshape(
                tf.confusion_matrix(self.labels,
                                    self.predictions,
                                    weights=tf.not_equal(
                                        self.labels, self.predictions),
                                    dtype=tf.float32),
                [1, MNIST.LABELS, MNIST.LABELS, 1])

            summary_writer = tf_summary.create_file_writer(args.logdir,
                                                           flush_millis=10 *
                                                           1000)
            self.summaries = {}
            with summary_writer.as_default(
            ), tf_summary.record_summaries_every_n_global_steps(100):
                self.summaries["train"] = [
                    tf_summary.scalar("train/loss", loss),
                    tf_summary.scalar("train/accuracy", accuracy)
                ]
            with summary_writer.as_default(
            ), tf_summary.always_record_summaries():
                for dataset in ["dev", "test"]:
                    self.summaries[dataset] = [
                        tf_summary.scalar(dataset + "/accuracy", accuracy),
                        tf_summary.image(dataset + "/confusion_matrix",
                                         confusion_matrix)
                    ]
                    with tf.control_dependencies(self.summaries[dataset]):
                        self.summaries[dataset].append(summary_writer.flush())

            # Initialize variables
            self.session.run(tf.global_variables_initializer())
            with summary_writer.as_default():
                tf_summary.initialize(session=self.session,
                                      graph=self.session.graph)
コード例 #8
0
 def testFlushFunction(self):
     logdir = self.get_temp_dir()
     writer = summary_ops.create_file_writer(logdir,
                                             max_queue=999999,
                                             flush_millis=999999)
     with writer.as_default(), summary_ops.always_record_summaries():
         summary_ops.scalar('scalar', 2.0, step=1)
         flush_op = summary_ops.flush()
     with self.test_session() as sess:
         sess.run(summary_ops.summary_writer_initializer_op())
         get_total = lambda: len(
             summary_test_util.events_from_logdir(logdir))
         # Note: First tf.Event is always file_version.
         self.assertEqual(1, get_total())
         sess.run(summary_ops.all_summary_ops())
         self.assertEqual(1, get_total())
         sess.run(flush_op)
         self.assertEqual(2, get_total())
         # Test "writer" parameter
         sess.run(summary_ops.all_summary_ops())
         sess.run(summary_ops.flush(writer=writer))
         self.assertEqual(3, get_total())
         sess.run(summary_ops.all_summary_ops())
         sess.run(summary_ops.flush(writer=writer._resource))  # pylint:disable=protected-access
         self.assertEqual(4, get_total())
コード例 #9
0
 def testWrite_usingDefaultStepVariable_fromLegacyGraph(self):
   logdir = self.get_temp_dir()
   try:
     with context.graph_mode():
       writer = summary_ops.create_file_writer(logdir)
       mystep = variables.Variable(0, dtype=dtypes.int64)
       summary_ops.set_step(mystep)
       with writer.as_default():
         write_op = summary_ops.write('tag', 1.0)
       first_assign_op = mystep.assign_add(1)
       second_assign_op = mystep.assign(10)
       with self.cached_session() as sess:
         sess.run(writer.init())
         sess.run(mystep.initializer)
         sess.run(write_op)
         sess.run(first_assign_op)
         sess.run(write_op)
         sess.run(second_assign_op)
         sess.run(write_op)
         sess.run(writer.flush())
     events = events_from_logdir(logdir)
     self.assertEqual(4, len(events))
     self.assertEqual(0, events[1].step)
     self.assertEqual(1, events[2].step)
     self.assertEqual(10, events[3].step)
   finally:
     # Reset to default state for other tests.
     summary_ops.set_step(None)
コード例 #10
0
 def testWriterInitAndClose(self):
   logdir = self.get_temp_dir()
   with summary_ops.always_record_summaries():
     writer = summary_ops.create_file_writer(
         logdir, max_queue=100, flush_millis=1000000)
     with writer.as_default():
       summary_ops.scalar('one', 1.0, step=1)
   with self.cached_session() as sess:
     sess.run(summary_ops.summary_writer_initializer_op())
     get_total = lambda: len(summary_test_util.events_from_logdir(logdir))
     self.assertEqual(1, get_total())  # file_version Event
     # Running init() again while writer is open has no effect
     sess.run(writer.init())
     self.assertEqual(1, get_total())
     sess.run(summary_ops.all_summary_ops())
     self.assertEqual(1, get_total())
     # Running close() should do an implicit flush
     sess.run(writer.close())
     self.assertEqual(2, get_total())
     # Running init() on a closed writer should start a new file
     time.sleep(1.1)  # Ensure filename has a different timestamp
     sess.run(writer.init())
     sess.run(summary_ops.all_summary_ops())
     sess.run(writer.close())
     files = sorted(gfile.Glob(os.path.join(logdir, '*tfevents*')))
     self.assertEqual(2, len(files))
     self.assertEqual(2, len(summary_test_util.events_from_file(files[1])))
コード例 #11
0
  def _test_summary_for_replica_zero_only(self, d):
    logdir = tempfile.mkdtemp()

    def run_fn():
      """Function executed for each replica."""
      with summary_writer.as_default():
        replica_id = ds_context.get_replica_context().replica_id_in_sync_group
        return summary_ops.write("a", replica_id)

    with self.cached_session() as sess, d.scope(), \
        summary_ops.always_record_summaries():
      # We need global_step because summary writing op *always* has global_step
      # as input, even when we always record summary or never record summary.
      global_step = training_util.get_or_create_global_step()
      if not context.executing_eagerly():
        # When executing eagerly, variables are initialized immediately after
        # creation, and its initializer will be None.
        global_step.initializer.run()
      summary_ops.set_step(0)
      summary_writer = summary_ops.create_file_writer(logdir)
      output = d.extended.call_for_each_replica(run_fn)
      unwrapped = d.unwrap(output)
      if not context.executing_eagerly():
        sess.run(summary_writer.init())
        sess.run(unwrapped)
        sess.run(summary_writer.close())

      events = _events_from_logdir(self, logdir)
      # There will be 2 entries: 1 summary file header entry, and 1 entry
      # written by replica 0.
      self.assertLen(events, 2)
      self.assertEqual(events[1].summary.value[0].tag, "a")
      self.assertEqual(events[1].summary.value[0].simple_value, 0.0)
コード例 #12
0
  def _test_summary_for_replica_zero_only(self, d):
    logdir = tempfile.mkdtemp()

    def run_fn():
      """Function executed for each replica."""
      with summary_writer.as_default():
        replica_id = ds_context.get_replica_context().replica_id_in_sync_group
        return summary_ops.write("a", replica_id)

    with self.cached_session() as sess, d.scope(), \
        summary_ops.always_record_summaries():
      # We need global_step because summary writing op *always* has global_step
      # as input, even when we always record summary or never record summary.
      global_step = training_util.get_or_create_global_step()
      if not context.executing_eagerly():
        # When executing eagerly, variables are initialized immediately after
        # creation, and its initializer will be None.
        global_step.initializer.run()
      summary_ops.set_step(0)
      summary_writer = summary_ops.create_file_writer(logdir)
      output = d.extended.call_for_each_replica(run_fn)
      unwrapped = d.unwrap(output)
      if not context.executing_eagerly():
        sess.run(summary_writer.init())
        sess.run(unwrapped)
        sess.run(summary_writer.close())

      events = _events_from_logdir(self, logdir)
      # There will be 2 entries: 1 summary file header entry, and 1 entry
      # written by replica 0.
      self.assertLen(events, 2)
      self.assertEqual(events[1].summary.value[0].tag, "a")
      self.assertEqual(events[1].summary.value[0].simple_value, 0.0)
コード例 #13
0
  def testScalarSummaryNameScope(self):
    """Test record_summaries_every_n_global_steps and all_summaries()."""
    with ops.Graph().as_default(), self.cached_session() as sess:
      global_step = training_util.get_or_create_global_step()
      global_step.initializer.run()
      with ops.device('/cpu:0'):
        step_increment = state_ops.assign_add(global_step, 1)
      sess.run(step_increment)  # Increment global step from 0 to 1

      logdir = tempfile.mkdtemp()
      with summary_ops.create_file_writer(logdir, max_queue=0,
                                          name='t2').as_default():
        with summary_ops.record_summaries_every_n_global_steps(2):
          summary_ops.initialize()
          with ops.name_scope('scope'):
            summary_op = summary_ops.scalar('my_scalar', 2.0)

          # Neither of these should produce a summary because
          # global_step is 1 and "1 % 2 != 0"
          sess.run(summary_ops.all_summary_ops())
          sess.run(summary_op)
          events = summary_test_util.events_from_logdir(logdir)
          self.assertEqual(len(events), 1)

          # Increment global step from 1 to 2 and check that the summary
          # is now written
          sess.run(step_increment)
          sess.run(summary_ops.all_summary_ops())
          events = summary_test_util.events_from_logdir(logdir)
          self.assertEqual(len(events), 2)
          self.assertEqual(events[1].summary.value[0].tag, 'scope/my_scalar')
コード例 #14
0
 def testWriterInitAndClose(self):
     logdir = self.get_temp_dir()
     with summary_ops.always_record_summaries():
         writer = summary_ops.create_file_writer(logdir,
                                                 max_queue=100,
                                                 flush_millis=1000000)
         with writer.as_default():
             summary_ops.scalar('one', 1.0, step=1)
     with self.test_session() as sess:
         sess.run(summary_ops.summary_writer_initializer_op())
         get_total = lambda: len(
             summary_test_util.events_from_logdir(logdir))
         self.assertEqual(1, get_total())  # file_version Event
         # Running init() again while writer is open has no effect
         sess.run(writer.init())
         self.assertEqual(1, get_total())
         sess.run(summary_ops.all_summary_ops())
         self.assertEqual(1, get_total())
         # Running close() should do an implicit flush
         sess.run(writer.close())
         self.assertEqual(2, get_total())
         # Running init() on a closed writer should start a new file
         time.sleep(1.1)  # Ensure filename has a different timestamp
         sess.run(writer.init())
         sess.run(summary_ops.all_summary_ops())
         sess.run(writer.close())
         files = sorted(gfile.Glob(os.path.join(logdir, '*tfevents*')))
         self.assertEqual(2, len(files))
         self.assertEqual(2,
                          len(summary_test_util.events_from_file(files[1])))
コード例 #15
0
    def _host_call_fn(**kwargs):
      """Training host call.

      Creates summaries for training metrics.

      Args:
        **kwargs: Dict of {str: Tensor} , with `Tensor` of shape `[batch]`. Must
          contain key "global_step" with value of current global_step Tensor.

      Returns:
        List of summary ops to run on the CPU host.
      """

      from tensorflow.python.ops import summary_ops_v2  # pylint: disable=g-direct-tensorflow-import,g-import-not-at-top

      gs = tf.cast(kwargs.pop("global_step")[0], dtype=tf.int64)
      for i, summary in enumerate(current_iteration.summaries):
        with summary_ops_v2.create_file_writer(summary.logdir).as_default():
          with summary_ops_v2.record_summaries_every_n_global_steps(
              n=self.config.save_summary_steps, global_step=gs):
            for j, summary_fn in enumerate(summary_fns[i]):
              tensor = kwargs["summary_{}_{}".format(i, j)]
              summary_fn(tensor, step=gs)
        summary.clear_summary_tuples()
      return tf.compat.v1.summary.all_v2_summary_ops()
コード例 #16
0
    def testSummaryControlFlowIfWithAutoOutsideCompilation(
            self, take_true_branch):
        strategy = get_tpu_strategy()

        @def_function.function
        def step():
            def computation(x):
                x = x + 1.0
                if x < 5:
                    scalar_summary_v2.scalar("x", x, step=0)
                    x = x * 2.0
                return x + 1.0

            if take_true_branch:
                return strategy.run(computation, args=(2.0, ))
            else:
                return strategy.run(computation, args=(10.0, ))

        logdir = tempfile.mkdtemp()
        summary_writer = summary.create_file_writer(logdir, flush_millis=10000)
        output_value = 12.
        if take_true_branch:
            output_value = 7.
        with summary_writer.as_default(), summary.always_record_summaries():
            self.assertAllEqual(
                strategy.experimental_local_results(step()),
                constant_op.constant(output_value,
                                     shape=(strategy.num_replicas_in_sync)))
        if take_true_branch:
            events = _events_from_logdir(self, logdir)
            # There will be 2 entries: 1 summary file header entry, and 1 entry
            # written by host.
            #
            self.assertLen(events, 2)
            self.assertEqual(events[1].summary.value[0].tag, "cond/x")
コード例 #17
0
    def testHistogramSummaryWithAutoOutsideCompilation(self):
        strategy = get_tpu_strategy()

        def host_computation(x):
            histogram_summary_v2.histogram("x", x, step=0)
            return x * 2.0

        @def_function.function
        def step():
            def computation(x):
                x = x + 1.0
                y = host_computation(x)
                return y + 1.0

            return strategy.run(computation, args=(2.0, ))

        logdir = tempfile.mkdtemp()
        summary_writer = summary.create_file_writer(logdir, flush_millis=10000)
        with summary_writer.as_default(), summary.always_record_summaries():
            self.assertAllEqual(
                strategy.experimental_local_results(step()),
                constant_op.constant(7.,
                                     shape=(strategy.num_replicas_in_sync)))
        events = _events_from_logdir(self, logdir)
        # There will be 2 entries: 1 summary file header entry, and 1 entry
        # written by host.
        self.assertLen(events, 2)
        self.assertEqual(events[1].summary.value[0].tag, "x")
コード例 #18
0
 def f():
   with summary_ops.create_file_writer(logdir).as_default():
     # Use assertAllEqual instead of assertTrue since it works in a defun.
     self.assertAllEqual(summary_ops.write('default', 1, step=0), True)
     with summary_ops.record_if(True):
       self.assertAllEqual(summary_ops.write('set_on', 1, step=0), True)
     with summary_ops.record_if(False):
       self.assertAllEqual(summary_ops.write('set_off', 1, step=0), False)
コード例 #19
0
 def testWrite_ndarray(self):
   logdir = self.get_temp_dir()
   with context.eager_mode():
     with summary_ops.create_file_writer(logdir).as_default():
       summary_ops.write('tag', [[1, 2], [3, 4]], step=12)
   events = events_from_logdir(logdir)
   value = events[1].summary.value[0]
   self.assertAllEqual([[1, 2], [3, 4]], to_numpy(value))
コード例 #20
0
 def f():
     with summary_ops.create_file_writer(logdir).as_default():
         with summary_ops.record_if(record_fn):
             return [
                 summary_ops.write('tag', 1, step=step),
                 summary_ops.write('tag', 1, step=step),
                 summary_ops.write('tag', 1, step=step)
             ]
コード例 #21
0
ファイル: callbacks.py プロジェクト: StephenOman/tensorflow
 def _init_writer(self):
   """Sets file writer."""
   if context.executing_eagerly():
     self.writer = summary_ops_v2.create_file_writer(self.log_dir)
   elif self.write_graph:
     self.writer = tf_summary.FileWriter(self.log_dir, K.get_session().graph)
   else:
     self.writer = tf_summary.FileWriter(self.log_dir)
コード例 #22
0
 def testWrite_stringTensor(self):
   logdir = self.get_temp_dir()
   with context.eager_mode():
     with summary_ops.create_file_writer(logdir).as_default():
       summary_ops.write('tag', [b'foo', b'bar'], step=12)
   events = events_from_logdir(logdir)
   value = events[1].summary.value[0]
   self.assertAllEqual([b'foo', b'bar'], to_numpy(value))
コード例 #23
0
 def f(tag_prefix):
   with summary_ops.create_file_writer(logdir).as_default():
     default_output = summary_ops.write(tag_prefix + '_default', 1, step=0)
     with summary_ops.always_record_summaries():
       on_output = summary_ops.write(tag_prefix + '_on', 1, step=0)
     with summary_ops.never_record_summaries():
       off_output = summary_ops.write(tag_prefix + '_off', 1, step=0)
     return [default_output, on_output, off_output]
コード例 #24
0
 def run_metadata_graphs(self, *args, **kwargs):
   assert context.executing_eagerly()
   logdir = self.get_temp_dir()
   writer = summary_ops.create_file_writer(logdir)
   with writer.as_default():
     summary_ops.run_metadata_graphs(*args, **kwargs)
   writer.close()
   events = events_from_logdir(logdir)
   return events[1]
コード例 #25
0
 def exec_summary_op(self, summary_op_fn):
   assert context.executing_eagerly()
   logdir = self.get_temp_dir()
   writer = summary_ops.create_file_writer(logdir)
   with writer.as_default():
     summary_op_fn()
   writer.close()
   events = events_from_logdir(logdir)
   return events[1]
コード例 #26
0
 def testSummaryFileWritersInvalidInput(self):
     # Test case for GitHub issue 46909
     logdir = self.get_temp_dir()
     with session.Session() as sess:
         with self.assertRaises(errors_impl.InvalidArgumentError):
             writer = summary_ops_v2.create_file_writer(logdir=logdir,
                                                        flush_millis=[1, 2])
             sess.run(writer.init())
             sess.run(writer.flush())
コード例 #27
0
 def run_metadata_graphs(self, *args, **kwargs):
     assert context.executing_eagerly()
     logdir = self.get_temp_dir()
     writer = summary_ops.create_file_writer(logdir)
     with writer.as_default():
         summary_ops.run_metadata_graphs(*args, **kwargs)
     writer.close()
     events = events_from_logdir(logdir)
     return events[1]
コード例 #28
0
    def testSharedName(self):
        logdir = self.get_temp_dir()
        with summary_ops.always_record_summaries():
            # Create with default shared name (should match logdir)
            writer1 = summary_ops.create_file_writer(logdir)
            with writer1.as_default():
                summary_ops.scalar('one', 1.0, step=1)
            # Create with explicit logdir shared name (should be same resource/file)
            shared_name = 'logdir:' + logdir
            writer2 = summary_ops.create_file_writer(logdir, name=shared_name)
            with writer2.as_default():
                summary_ops.scalar('two', 2.0, step=2)
            # Create with different shared name (should be separate resource/file)
            writer3 = summary_ops.create_file_writer(logdir, name='other')
            with writer3.as_default():
                summary_ops.scalar('three', 3.0, step=3)

        with self.cached_session() as sess:
            # Run init ops across writers sequentially to avoid race condition.
            # TODO(nickfelt): fix race condition in resource manager lookup or create
            sess.run(writer1.init())
            sess.run(writer2.init())
            time.sleep(1.1)  # Ensure filename has a different timestamp
            sess.run(writer3.init())
            sess.run(summary_ops.all_summary_ops())
            sess.run([writer1.flush(), writer2.flush(), writer3.flush()])

        event_files = iter(
            sorted(gfile.Glob(os.path.join(logdir, '*tfevents*'))))

        # First file has tags "one" and "two"
        events = summary_test_util.events_from_file(next(event_files))
        self.assertEqual('brain.Event:2', events[0].file_version)
        tags = [e.summary.value[0].tag for e in events[1:]]
        self.assertItemsEqual(['one', 'two'], tags)

        # Second file has tag "three"
        events = summary_test_util.events_from_file(next(event_files))
        self.assertEqual('brain.Event:2', events[0].file_version)
        tags = [e.summary.value[0].tag for e in events[1:]]
        self.assertItemsEqual(['three'], tags)

        # No more files
        self.assertRaises(StopIteration, lambda: next(event_files))
コード例 #29
0
  def __init__(self, session, logdir, max_queue=10, flush_secs=120,
               filename_suffix=''):
    """Creates an `EventFileWriterV2` and an event file to write to.

    On construction, this calls `tf.contrib.summary.create_file_writer` within
    the graph from `session.graph` to look up a shared summary writer resource
    for `logdir` if one exists, and create one if not. Creating the summary
    writer resource in turn creates a new event file in `logdir` to be filled
    with `Event` protocol buffers passed to `add_event`. Graph ops to control
    this writer resource are added to `session.graph` during this init call;
    stateful methods on this class will call `session.run()` on these ops.

    Note that because the underlying resource is shared, it is possible that
    other parts of the code using the same session may interact independently
    with the resource, e.g. by flushing or even closing it. It is the caller's
    responsibility to avoid any undesirable sharing in this regard.

    The remaining arguments to the constructor (`flush_secs`, `max_queue`, and
    `filename_suffix`) control the construction of the shared writer resource
    if one is created. If an existing resource is reused, these arguments have
    no effect.  See `tf.contrib.summary.create_file_writer` for details.

    Args:
      session: A `tf.compat.v1.Session`. Session that will hold shared writer
        resource. The writer ops will be added to session.graph during this
        init call.
      logdir: A string. Directory where event file will be written.
      max_queue: Integer. Size of the queue for pending events and summaries.
      flush_secs: Number. How often, in seconds, to flush the
        pending events and summaries to disk.
      filename_suffix: A string. Every event file's name is suffixed with
        `filename_suffix`.
    """
    self._session = session
    self._logdir = logdir
    self._closed = False
    if not gfile.IsDirectory(self._logdir):
      gfile.MakeDirs(self._logdir)

    with self._session.graph.as_default():
      with ops.name_scope('filewriter'):
        file_writer = summary_ops_v2.create_file_writer(
            logdir=self._logdir,
            max_queue=max_queue,
            flush_millis=flush_secs * 1000,
            filename_suffix=filename_suffix)
        with summary_ops_v2.always_record_summaries(), file_writer.as_default():
          self._event_placeholder = array_ops.placeholder_with_default(
              constant_op.constant('unused', dtypes.string),
              shape=[])
          self._add_event_op = summary_ops_v2.import_event(
              self._event_placeholder)
        self._init_op = file_writer.init()
        self._flush_op = file_writer.flush()
        self._close_op = file_writer.close()
      self._session.run(self._init_op)
コード例 #30
0
 def keras_model(self, *args, **kwargs):
   logdir = self.get_temp_dir()
   writer = summary_ops.create_file_writer(logdir)
   with writer.as_default():
     summary_ops.keras_model(*args, **kwargs)
   writer.close()
   events = events_from_logdir(logdir)
   # The first event contains no summary values. The written content goes to
   # the second event.
   return events[1]
コード例 #31
0
 def keras_model(self, *args, **kwargs):
     logdir = self.get_temp_dir()
     writer = summary_ops.create_file_writer(logdir)
     with writer.as_default():
         summary_ops.keras_model(*args, **kwargs)
     writer.close()
     events = events_from_logdir(logdir)
     # The first event contains no summary values. The written content goes to
     # the second event.
     return events[1]
コード例 #32
0
  def testSharedName(self):
    logdir = self.get_temp_dir()
    with summary_ops.always_record_summaries():
      # Create with default shared name (should match logdir)
      writer1 = summary_ops.create_file_writer(logdir)
      with writer1.as_default():
        summary_ops.scalar('one', 1.0, step=1)
      # Create with explicit logdir shared name (should be same resource/file)
      shared_name = 'logdir:' + logdir
      writer2 = summary_ops.create_file_writer(logdir, name=shared_name)
      with writer2.as_default():
        summary_ops.scalar('two', 2.0, step=2)
      # Create with different shared name (should be separate resource/file)
      writer3 = summary_ops.create_file_writer(logdir, name='other')
      with writer3.as_default():
        summary_ops.scalar('three', 3.0, step=3)

    with self.cached_session() as sess:
      # Run init ops across writers sequentially to avoid race condition.
      # TODO(nickfelt): fix race condition in resource manager lookup or create
      sess.run(writer1.init())
      sess.run(writer2.init())
      time.sleep(1.1)  # Ensure filename has a different timestamp
      sess.run(writer3.init())
      sess.run(summary_ops.all_summary_ops())
      sess.run([writer1.flush(), writer2.flush(), writer3.flush()])

    event_files = iter(sorted(gfile.Glob(os.path.join(logdir, '*tfevents*'))))

    # First file has tags "one" and "two"
    events = summary_test_util.events_from_file(next(event_files))
    self.assertEqual('brain.Event:2', events[0].file_version)
    tags = [e.summary.value[0].tag for e in events[1:]]
    self.assertItemsEqual(['one', 'two'], tags)

    # Second file has tag "three"
    events = summary_test_util.events_from_file(next(event_files))
    self.assertEqual('brain.Event:2', events[0].file_version)
    tags = [e.summary.value[0].tag for e in events[1:]]
    self.assertItemsEqual(['three'], tags)

    # No more files
    self.assertRaises(StopIteration, lambda: next(event_files))
コード例 #33
0
 def testWrite_tensor(self):
   logdir = self.get_temp_dir()
   with context.eager_mode():
     t = constant_op.constant([[1, 2], [3, 4]])
     with summary_ops.create_file_writer(logdir).as_default():
       summary_ops.write('tag', t, step=12)
     expected = t.numpy()
   events = events_from_logdir(logdir)
   value = events[1].summary.value[0]
   self.assertAllEqual(expected, to_numpy(value))
コード例 #34
0
    def testImageSummary(self):
        strategy = get_tpu_strategy()

        def run():
            @def_function.function
            def sample_sequence():
                bsz = 3
                max_length = 32 * 32

                def f():
                    def body(step, tokens):
                        next_token = random_ops.random_uniform([bsz])
                        tokens = tokens.write(step, next_token)
                        return (step + 1, tokens)

                    def cond(step, tokens):
                        del tokens
                        return math_ops.less(step, max_length)

                    tokens_var = tensor_array_ops.TensorArray(
                        dtype=dtypes.float32,
                        size=max_length,
                        dynamic_size=False,
                        clear_after_read=False,
                        element_shape=(bsz, ),
                        name="tokens_accumulator",
                    )

                    step = constant_op.constant(0)
                    step, tokens_var = control_flow_ops.while_loop(
                        cond, body, [step, tokens_var])

                    image_flat = array_ops.transpose(tokens_var.stack(),
                                                     [1, 0])
                    image = array_ops.tile(
                        array_ops.reshape(image_flat, [bsz, 32, 32, 1]),
                        [1, 1, 1, 3])
                    image_summary_v2.image(
                        "image_sample", image,
                        constant_op.constant(5, dtype=dtypes.int64))

                return strategy.run(f)

            sample_sequence()

        logdir = tempfile.mkdtemp()
        summary_writer = summary.create_file_writer(logdir, flush_millis=10000)
        with summary_writer.as_default(), summary.always_record_summaries():
            run()
        events = _events_from_logdir(self, logdir)
        decoded_image = image_ops.decode_png(
            events[1].summary.value[0].tensor.string_val[2]).numpy()
        # Ensure that non-zero values were written to the image summary.
        self.assertNotAllEqual(array_ops.zeros((3072, ), dtype=dtypes.float32),
                               list(decoded_image.flat))
コード例 #35
0
  def __init__(self, session, logdir, max_queue=10, flush_secs=120,
               filename_suffix=''):
    """Creates an `EventFileWriterV2` and an event file to write to.

    On construction, this calls `tf.contrib.summary.create_file_writer` within
    the graph from `session.graph` to look up a shared summary writer resource
    for `logdir` if one exists, and create one if not. Creating the summary
    writer resource in turn creates a new event file in `logdir` to be filled
    with `Event` protocol buffers passed to `add_event`. Graph ops to control
    this writer resource are added to `session.graph` during this init call;
    stateful methods on this class will call `session.run()` on these ops.

    Note that because the underlying resource is shared, it is possible that
    other parts of the code using the same session may interact independently
    with the resource, e.g. by flushing or even closing it. It is the caller's
    responsibility to avoid any undesirable sharing in this regard.

    The remaining arguments to the constructor (`flush_secs`, `max_queue`, and
    `filename_suffix`) control the construction of the shared writer resource
    if one is created. If an existing resource is reused, these arguments have
    no effect.  See `tf.contrib.summary.create_file_writer` for details.

    Args:
      session: A `tf.compat.v1.Session`. Session that will hold shared writer
        resource. The writer ops will be added to session.graph during this
        init call.
      logdir: A string. Directory where event file will be written.
      max_queue: Integer. Size of the queue for pending events and summaries.
      flush_secs: Number. How often, in seconds, to flush the
        pending events and summaries to disk.
      filename_suffix: A string. Every event file's name is suffixed with
        `filename_suffix`.
    """
    self._session = session
    self._logdir = logdir
    self._closed = False
    gfile.MakeDirs(self._logdir)

    with self._session.graph.as_default():
      with ops.name_scope('filewriter'):
        file_writer = summary_ops_v2.create_file_writer(
            logdir=self._logdir,
            max_queue=max_queue,
            flush_millis=flush_secs * 1000,
            filename_suffix=filename_suffix)
        with summary_ops_v2.always_record_summaries(), file_writer.as_default():
          self._event_placeholder = array_ops.placeholder_with_default(
              constant_op.constant('unused', dtypes.string),
              shape=[])
          self._add_event_op = summary_ops_v2.import_event(
              self._event_placeholder)
        self._init_op = file_writer.init()  # pylint: disable=assignment-from-no-return
        self._flush_op = file_writer.flush()  # pylint: disable=assignment-from-no-return
        self._close_op = file_writer.close()  # pylint: disable=assignment-from-no-return
      self._session.run(self._init_op)
コード例 #36
0
 def testSummaryName(self):
     logdir = self.get_temp_dir()
     writer = summary_ops.create_file_writer(logdir, max_queue=0)
     with writer.as_default(), summary_ops.always_record_summaries():
         summary_ops.scalar('scalar', 2.0, step=1)
     with self.cached_session() as sess:
         sess.run(summary_ops.summary_writer_initializer_op())
         sess.run(summary_ops.all_summary_ops())
     events = summary_test_util.events_from_logdir(logdir)
     self.assertEqual(2, len(events))
     self.assertEqual('scalar', events[1].summary.value[0].tag)
コード例 #37
0
 def testEagerMemory(self):
   training_util.get_or_create_global_step()
   logdir = self.get_temp_dir()
   with summary_ops.create_file_writer(
       logdir, max_queue=0,
       name='t0').as_default(), summary_ops.always_record_summaries():
     summary_ops.generic('tensor', 1, '')
     summary_ops.scalar('scalar', 2.0)
     summary_ops.histogram('histogram', [1.0])
     summary_ops.image('image', [[[[1.0]]]])
     summary_ops.audio('audio', [[1.0]], 1.0, 1)
コード例 #38
0
 def testSummaryName(self):
   logdir = self.get_temp_dir()
   writer = summary_ops.create_file_writer(logdir, max_queue=0)
   with writer.as_default(), summary_ops.always_record_summaries():
     summary_ops.scalar('scalar', 2.0, step=1)
   with self.cached_session() as sess:
     sess.run(summary_ops.summary_writer_initializer_op())
     sess.run(summary_ops.all_summary_ops())
   events = summary_test_util.events_from_logdir(logdir)
   self.assertEqual(2, len(events))
   self.assertEqual('scalar', events[1].summary.value[0].tag)
コード例 #39
0
 def run_trace(self, f, step=1):
     assert context.executing_eagerly()
     logdir = self.get_temp_dir()
     writer = summary_ops.create_file_writer(logdir)
     summary_ops.trace_on(graph=True, profiler=False)
     with writer.as_default():
         f()
         summary_ops.trace_export(name='foo', step=step)
     writer.close()
     events = events_from_logdir(logdir)
     return events[1]
コード例 #40
0
 def testEagerMemory(self):
   training_util.get_or_create_global_step()
   logdir = self.get_temp_dir()
   with summary_ops.create_file_writer(
       logdir, max_queue=0,
       name='t0').as_default(), summary_ops.always_record_summaries():
     summary_ops.generic('tensor', 1, '')
     summary_ops.scalar('scalar', 2.0)
     summary_ops.histogram('histogram', [1.0])
     summary_ops.image('image', [[[[1.0]]]])
     summary_ops.audio('audio', [[1.0]], 1.0, 1)
コード例 #41
0
ファイル: callbacks_v1.py プロジェクト: kylin9872/tensorflow
 def _init_writer(self, model):
   """Sets file writer."""
   if context.executing_eagerly():
     self.writer = summary_ops_v2.create_file_writer(self.log_dir)
     if not model.run_eagerly and self.write_graph:
       with self.writer.as_default():
         summary_ops_v2.graph(K.get_graph())
   elif self.write_graph:
     self.writer = tf_summary.FileWriter(self.log_dir, K.get_graph())
   else:
     self.writer = tf_summary.FileWriter(self.log_dir)
コード例 #42
0
 def run_trace(self, f, step=1):
   assert context.executing_eagerly()
   logdir = self.get_temp_dir()
   writer = summary_ops.create_file_writer(logdir)
   summary_ops.trace_on(graph=True, profiler=False)
   with writer.as_default():
     f()
     summary_ops.trace_export(name='foo', step=step)
   writer.close()
   events = events_from_logdir(logdir)
   return events[1]
コード例 #43
0
 def _init_writer(self, model):
     """Sets file writer."""
     if context.executing_eagerly():
         self.writer = summary_ops_v2.create_file_writer(self.log_dir)
         if not model.run_eagerly and self.write_graph:
             with self.writer.as_default():
                 summary_ops_v2.graph(K.get_graph(), step=0)
     elif self.write_graph:
         self.writer = tf_summary.FileWriter(self.log_dir, K.get_graph())
     else:
         self.writer = tf_summary.FileWriter(self.log_dir)
コード例 #44
0
  def testSummaryGlobalStep(self):
    step = training_util.get_or_create_global_step()
    logdir = tempfile.mkdtemp()
    with summary_ops.create_file_writer(
        logdir, max_queue=0,
        name='t2').as_default(), summary_ops.always_record_summaries():

      summary_ops.scalar('scalar', 2.0, step=step)

      events = summary_test_util.events_from_logdir(logdir)
      self.assertEqual(len(events), 2)
      self.assertEqual(events[1].summary.value[0].tag, 'scalar')
コード例 #45
0
  def testSummaryGlobalStep(self):
    step = training_util.get_or_create_global_step()
    logdir = tempfile.mkdtemp()
    with summary_ops.create_file_writer(
        logdir, max_queue=0,
        name='t2').as_default(), summary_ops.always_record_summaries():

      summary_ops.scalar('scalar', 2.0, step=step)

      events = summary_test_util.events_from_logdir(logdir)
      self.assertEqual(len(events), 2)
      self.assertEqual(events[1].summary.value[0].tag, 'scalar')
コード例 #46
0
ファイル: metrics_test.py プロジェクト: didukhle/tensorflow
  def testWriteSummaries(self):
    m = metrics.Mean()
    m([1, 10, 100])
    training_util.get_or_create_global_step()
    logdir = tempfile.mkdtemp()
    with summary_ops.create_file_writer(
        logdir, max_queue=0,
        name="t0").as_default(), summary_ops.always_record_summaries():
      m.result()  # As a side-effect will write summaries.

    events = summary_test_util.events_from_logdir(logdir)
    self.assertEqual(len(events), 2)
    self.assertEqual(events[1].summary.value[0].simple_value, 37.0)
コード例 #47
0
 def testMaxQueue(self):
   logs = tempfile.mkdtemp()
   with summary_ops.create_file_writer(
       logs, max_queue=1, flush_millis=999999,
       name='lol').as_default(), summary_ops.always_record_summaries():
     get_total = lambda: len(summary_test_util.events_from_logdir(logs))
     # Note: First tf.Event is always file_version.
     self.assertEqual(1, get_total())
     summary_ops.scalar('scalar', 2.0, step=1)
     self.assertEqual(1, get_total())
     # Should flush after second summary since max_queue = 1
     summary_ops.scalar('scalar', 2.0, step=2)
     self.assertEqual(3, get_total())
コード例 #48
0
  def testSharedName(self):
    logdir = self.get_temp_dir()
    with summary_ops.always_record_summaries():
      # Create with default shared name (should match logdir)
      writer1 = summary_ops.create_file_writer(logdir)
      with writer1.as_default():
        summary_ops.scalar('one', 1.0, step=1)
        summary_ops.flush()
      # Create with explicit logdir shared name (should be same resource/file)
      shared_name = 'logdir:' + logdir
      writer2 = summary_ops.create_file_writer(logdir, name=shared_name)
      with writer2.as_default():
        summary_ops.scalar('two', 2.0, step=2)
        summary_ops.flush()
      # Create with different shared name (should be separate resource/file)
      time.sleep(1.1)  # Ensure filename has a different timestamp
      writer3 = summary_ops.create_file_writer(logdir, name='other')
      with writer3.as_default():
        summary_ops.scalar('three', 3.0, step=3)
        summary_ops.flush()

    event_files = iter(sorted(gfile.Glob(os.path.join(logdir, '*tfevents*'))))

    # First file has tags "one" and "two"
    events = iter(summary_test_util.events_from_file(next(event_files)))
    self.assertEqual('brain.Event:2', next(events).file_version)
    self.assertEqual('one', next(events).summary.value[0].tag)
    self.assertEqual('two', next(events).summary.value[0].tag)
    self.assertRaises(StopIteration, lambda: next(events))

    # Second file has tag "three"
    events = iter(summary_test_util.events_from_file(next(event_files)))
    self.assertEqual('brain.Event:2', next(events).file_version)
    self.assertEqual('three', next(events).summary.value[0].tag)
    self.assertRaises(StopIteration, lambda: next(events))

    # No more files
    self.assertRaises(StopIteration, lambda: next(event_files))
コード例 #49
0
 def testSummaryGlobalStep(self):
   training_util.get_or_create_global_step()
   logdir = self.get_temp_dir()
   writer = summary_ops.create_file_writer(logdir, max_queue=0)
   with writer.as_default(), summary_ops.always_record_summaries():
     summary_ops.scalar('scalar', 2.0)
   with self.cached_session() as sess:
     sess.run(variables.global_variables_initializer())
     sess.run(summary_ops.summary_writer_initializer_op())
     step, _ = sess.run(
         [training_util.get_global_step(), summary_ops.all_summary_ops()])
   events = summary_test_util.events_from_logdir(logdir)
   self.assertEqual(2, len(events))
   self.assertEqual(step, events[1].step)
コード例 #50
0
 def testWrite_gpuDeviceContext(self):
   logdir = self.get_temp_dir()
   with context.eager_mode():
     with summary_ops.create_file_writer(logdir).as_default():
       with ops.device('/GPU:0'):
         value = constant_op.constant(42.0)
         step = constant_op.constant(12, dtype=dtypes.int64)
         summary_ops.write('tag', value, step=step).numpy()
   empty_metadata = summary_pb2.SummaryMetadata()
   events = events_from_logdir(logdir)
   self.assertEqual(2, len(events))
   self.assertEqual(12, events[1].step)
   self.assertEqual(42, to_numpy(events[1].summary.value[0]))
   self.assertEqual(empty_metadata, events[1].summary.value[0].metadata)
コード例 #51
0
 def testSummaryOps(self):
   training_util.get_or_create_global_step()
   logdir = tempfile.mkdtemp()
   with summary_ops.create_file_writer(
       logdir, max_queue=0,
       name='t0').as_default(), summary_ops.always_record_summaries():
     summary_ops.generic('tensor', 1, '')
     summary_ops.scalar('scalar', 2.0)
     summary_ops.histogram('histogram', [1.0])
     summary_ops.image('image', [[[[1.0]]]])
     summary_ops.audio('audio', [[1.0]], 1.0, 1)
     # The working condition of the ops is tested in the C++ test so we just
     # test here that we're calling them correctly.
     self.assertTrue(gfile.Exists(logdir))
コード例 #52
0
 def testWriterFlush(self):
   logdir = self.get_temp_dir()
   get_total = lambda: len(summary_test_util.events_from_logdir(logdir))
   with summary_ops.always_record_summaries():
     writer = summary_ops.create_file_writer(
         logdir, max_queue=100, flush_millis=1000000)
     self.assertEqual(1, get_total())  # file_version Event
     with writer.as_default():
       summary_ops.scalar('one', 1.0, step=1)
       self.assertEqual(1, get_total())
       writer.flush()
       self.assertEqual(2, get_total())
       summary_ops.scalar('two', 2.0, step=2)
     # Exiting the "as_default()" should do an implicit flush of the "two" tag
     self.assertEqual(3, get_total())
コード例 #53
0
 def testSummaryOps(self):
   logdir = self.get_temp_dir()
   writer = summary_ops.create_file_writer(logdir, max_queue=0)
   with writer.as_default(), summary_ops.always_record_summaries():
     summary_ops.generic('tensor', 1, step=1)
     summary_ops.scalar('scalar', 2.0, step=1)
     summary_ops.histogram('histogram', [1.0], step=1)
     summary_ops.image('image', [[[[1.0]]]], step=1)
     summary_ops.audio('audio', [[1.0]], 1.0, 1, step=1)
   with self.cached_session() as sess:
     sess.run(summary_ops.summary_writer_initializer_op())
     sess.run(summary_ops.all_summary_ops())
   # The working condition of the ops is tested in the C++ test so we just
   # test here that we're calling them correctly.
   self.assertTrue(gfile.Exists(logdir))
コード例 #54
0
 def testWriterFlush(self):
   logdir = self.get_temp_dir()
   with summary_ops.always_record_summaries():
     writer = summary_ops.create_file_writer(
         logdir, max_queue=100, flush_millis=1000000)
     with writer.as_default():
       summary_ops.scalar('one', 1.0, step=1)
   with self.cached_session() as sess:
     sess.run(summary_ops.summary_writer_initializer_op())
     get_total = lambda: len(summary_test_util.events_from_logdir(logdir))
     self.assertEqual(1, get_total())  # file_version Event
     sess.run(summary_ops.all_summary_ops())
     self.assertEqual(1, get_total())
     sess.run(writer.flush())
     self.assertEqual(2, get_total())
コード例 #55
0
  def testDefunSummarys(self):
    training_util.get_or_create_global_step()
    logdir = tempfile.mkdtemp()
    with summary_ops.create_file_writer(
        logdir, max_queue=0,
        name='t1').as_default(), summary_ops.always_record_summaries():

      @function.defun
      def write():
        summary_ops.scalar('scalar', 2.0)

      write()
      events = summary_test_util.events_from_logdir(logdir)
      self.assertEqual(len(events), 2)
      self.assertEqual(events[1].summary.value[0].simple_value, 2.0)
コード例 #56
0
 def testMaxQueue(self):
   logdir = self.get_temp_dir()
   writer = summary_ops.create_file_writer(
       logdir, max_queue=1, flush_millis=999999)
   with writer.as_default(), summary_ops.always_record_summaries():
     summary_ops.scalar('scalar', 2.0, step=1)
   with self.cached_session() as sess:
     sess.run(summary_ops.summary_writer_initializer_op())
     get_total = lambda: len(summary_test_util.events_from_logdir(logdir))
     # Note: First tf.Event is always file_version.
     self.assertEqual(1, get_total())
     sess.run(summary_ops.all_summary_ops())
     self.assertEqual(1, get_total())
     # Should flush after second summary since max_queue = 1
     sess.run(summary_ops.all_summary_ops())
     self.assertEqual(3, get_total())
コード例 #57
0
 def testFlushFunction(self):
   logs = tempfile.mkdtemp()
   writer = summary_ops.create_file_writer(
       logs, max_queue=999999, flush_millis=999999, name='lol')
   with writer.as_default(), summary_ops.always_record_summaries():
     get_total = lambda: len(summary_test_util.events_from_logdir(logs))
     # Note: First tf.Event is always file_version.
     self.assertEqual(1, get_total())
     summary_ops.scalar('scalar', 2.0, step=1)
     summary_ops.scalar('scalar', 2.0, step=2)
     self.assertEqual(1, get_total())
     summary_ops.flush()
     self.assertEqual(3, get_total())
     # Test "writer" parameter
     summary_ops.scalar('scalar', 2.0, step=3)
     summary_ops.flush(writer=writer)
     self.assertEqual(4, get_total())
     summary_ops.scalar('scalar', 2.0, step=4)
     summary_ops.flush(writer=writer._resource)  # pylint:disable=protected-access
     self.assertEqual(5, get_total())
コード例 #58
0
  def testSummaryGraphModeCond(self):
    with ops.Graph().as_default(), self.cached_session():
      training_util.get_or_create_global_step()
      logdir = tempfile.mkdtemp()
      with summary_ops.create_file_writer(
          logdir, max_queue=0,
          name='t2').as_default(), summary_ops.always_record_summaries():
        summary_ops.initialize()
        training_util.get_or_create_global_step().initializer.run()
        def f():
          summary_ops.scalar('scalar', 2.0)
          return constant_op.constant(True)
        pred = array_ops.placeholder(dtypes.bool)
        x = control_flow_ops.cond(pred, f,
                                  lambda: constant_op.constant(False))
        x.eval(feed_dict={pred: True})

      events = summary_test_util.events_from_logdir(logdir)
      self.assertEqual(len(events), 2)
      self.assertEqual(events[1].summary.value[0].tag, 'cond/scalar')
コード例 #59
0
 def testWrite_usingDefaultStepConstant_fromFunction(self):
   logdir = self.get_temp_dir()
   try:
     with context.eager_mode():
       writer = summary_ops.create_file_writer(logdir)
       @def_function.function
       def f():
         with writer.as_default():
           summary_ops.write('tag', 1.0)
       summary_ops.set_step(1)
       f()
       summary_ops.set_step(2)
       f()
     events = events_from_logdir(logdir)
     self.assertEqual(3, len(events))
     self.assertEqual(1, events[1].step)
     # The step value will still be 1 because the value was captured at the
     # time the function was first traced.
     self.assertEqual(1, events[2].step)
   finally:
     # Reset to default state for other tests.
     summary_ops.set_step(None)
コード例 #60
0
 def testWrite_usingDefaultStepConstant_fromLegacyGraph(self):
   logdir = self.get_temp_dir()
   try:
     with context.graph_mode():
       writer = summary_ops.create_file_writer(logdir)
       summary_ops.set_step(1)
       with writer.as_default():
         write_op = summary_ops.write('tag', 1.0)
       summary_ops.set_step(2)
       with self.cached_session() as sess:
         sess.run(writer.init())
         sess.run(write_op)
         sess.run(write_op)
         sess.run(writer.flush())
     events = events_from_logdir(logdir)
     self.assertEqual(3, len(events))
     self.assertEqual(1, events[1].step)
     # The step value will still be 1 because the value was captured at the
     # time the graph was constructed.
     self.assertEqual(1, events[2].step)
   finally:
     # Reset to default state for other tests.
     summary_ops.set_step(None)