示例#1
0
 def testAllV2SummaryOps(self):
   logdir = self.get_temp_dir()
   def define_ops():
     result = []
     # TF 2.0 summary ops
     result.append(summary_ops.write('write', 1, step=0))
     result.append(summary_ops.write_raw_pb(b'', step=0, name='raw_pb'))
     # TF 1.x tf.contrib.summary ops
     result.append(summary_ops.generic('tensor', 1, step=1))
     result.append(summary_ops.scalar('scalar', 2.0, step=1))
     result.append(summary_ops.histogram('histogram', [1.0], step=1))
     result.append(summary_ops.image('image', [[[[1.0]]]], step=1))
     result.append(summary_ops.audio('audio', [[1.0]], 1.0, 1, step=1))
     return result
   with context.graph_mode():
     ops_without_writer = define_ops()
     with summary_ops.create_file_writer_v2(logdir).as_default():
       with summary_ops.record_if(True):
         ops_recording_on = define_ops()
       with summary_ops.record_if(False):
         ops_recording_off = define_ops()
     # We should be collecting all ops defined with a default writer present,
     # regardless of whether recording was set on or off, but not those defined
     # without a writer at all.
     del ops_without_writer
     expected_ops = ops_recording_on + ops_recording_off
     self.assertCountEqual(expected_ops, summary_ops.all_v2_summary_ops())
示例#2
0
 def testAllV2SummaryOps(self):
   logdir = self.get_temp_dir()
   def define_ops():
     result = []
     # TF 2.0 summary ops
     result.append(summary_ops.write('write', 1, step=0))
     result.append(summary_ops.write_raw_pb(b'', step=0, name='raw_pb'))
     # TF 1.x tf.contrib.summary ops
     result.append(summary_ops.generic('tensor', 1, step=1))
     result.append(summary_ops.scalar('scalar', 2.0, step=1))
     result.append(summary_ops.histogram('histogram', [1.0], step=1))
     result.append(summary_ops.image('image', [[[[1.0]]]], step=1))
     result.append(summary_ops.audio('audio', [[1.0]], 1.0, 1, step=1))
     return result
   with context.graph_mode():
     ops_without_writer = define_ops()
     with summary_ops.create_file_writer_v2(logdir).as_default():
       with summary_ops.record_if(True):
         ops_recording_on = define_ops()
       with summary_ops.record_if(False):
         ops_recording_off = define_ops()
     # We should be collecting all ops defined with a default writer present,
     # regardless of whether recording was set on or off, but not those defined
     # without a writer at all.
     del ops_without_writer
     expected_ops = ops_recording_on + ops_recording_off
     self.assertCountEqual(expected_ops, summary_ops.all_v2_summary_ops())
示例#3
0
 def f():
   with writer.as_default():
     # Use assertAllEqual instead of assertTrue since it works in a defun.
     self.assertAllEqual(summary_ops.write('default', 1, step=0), True)
     with summary_ops.record_if(True):
       self.assertAllEqual(summary_ops.write('set_on', 1, step=0), True)
     with summary_ops.record_if(False):
       self.assertAllEqual(summary_ops.write('set_off', 1, step=0), False)
 def f():
   with writer.as_default():
     # Use assertAllEqual instead of assertTrue since it works in a defun.
     self.assertAllEqual(summary_ops.write('default', 1, step=0), True)
     with summary_ops.record_if(True):
       self.assertAllEqual(summary_ops.write('set_on', 1, step=0), True)
     with summary_ops.record_if(False):
       self.assertAllEqual(summary_ops.write('set_off', 1, step=0), False)
 def testWrite_recordIf_constant(self):
     logdir = self.get_temp_dir()
     with context.eager_mode():
         with summary_ops.create_file_writer_v2(logdir).as_default():
             self.assertTrue(summary_ops.write('default', 1, step=0))
             with summary_ops.record_if(True):
                 self.assertTrue(summary_ops.write('set_on', 1, step=0))
             with summary_ops.record_if(False):
                 self.assertFalse(summary_ops.write('set_off', 1, step=0))
     events = events_from_logdir(logdir)
     self.assertEqual(3, len(events))
     self.assertEqual('default', events[1].summary.value[0].tag)
     self.assertEqual('set_on', events[2].summary.value[0].tag)
 def testWrite_recordIf_constant(self):
   logdir = self.get_temp_dir()
   with context.eager_mode():
     with summary_ops.create_file_writer_v2(logdir).as_default():
       self.assertTrue(summary_ops.write('default', 1, step=0))
       with summary_ops.record_if(True):
         self.assertTrue(summary_ops.write('set_on', 1, step=0))
       with summary_ops.record_if(False):
         self.assertFalse(summary_ops.write('set_off', 1, step=0))
   events = events_from_logdir(logdir)
   self.assertEqual(3, len(events))
   self.assertEqual('default', events[1].summary.value[0].tag)
   self.assertEqual('set_on', events[2].summary.value[0].tag)
 def f():
   with writer.as_default():
     with summary_ops.record_if(record_fn):
       return [
           summary_ops.write('tag', 1, step=step),
           summary_ops.write('tag', 1, step=step),
           summary_ops.write('tag', 1, step=step)]
示例#8
0
    def _write_custom_summaries(self, step, logs=None):
        """Writes metrics out as custom scalar summaries.

    Arguments:
        step: the global step to use for TensorBoard.
        logs: dict. Keys are scalar summary names, values are
            NumPy scalars.

    """
        logs = logs or {}
        if context.executing_eagerly():
            # use v2 summary ops
            with self.writer.as_default(), summary_ops_v2.record_if(True):
                for name, value in logs.items():
                    if isinstance(value, np.ndarray):
                        value = value.item()
                    summary_ops_v2.scalar(name, value, step=step)
        else:
            # use FileWriter from v1 summary
            for name, value in logs.items():
                if isinstance(value, np.ndarray):
                    value = value.item()
                summary = tf_summary.Summary()
                summary_value = summary.value.add()
                summary_value.simple_value = value
                summary_value.tag = name
                self.writer.add_summary(summary, step)
        self.writer.flush()
示例#9
0
 def f():
   with summary_ops.create_file_writer(logdir).as_default():
     with summary_ops.record_if(record_fn):
       return [
           summary_ops.write('tag', 1, step=step),
           summary_ops.write('tag', 1, step=step),
           summary_ops.write('tag', 1, step=step)]
示例#10
0
 def f():
   with writer.as_default():
     with summary_ops.record_if(record_fn):
       return [
           summary_ops.write('tag', 1, step=step),
           summary_ops.write('tag', 1, step=step),
           summary_ops.write('tag', 1, step=step)]
 def _custom_step(features, labels):
     del labels
     logits = model(features)
     with summary_ops_v2.record_if(True), writer.as_default():
         scalar_summary_v2.scalar('logits',
                                  math_ops.reduce_sum(logits),
                                  step=model.optimizer.iterations)
     return logits
示例#12
0
    def write_model_to_tensorboard(self, model: Model):
        """
        Write the given model as a graph in tensorboard.

        :param model: The model to write to tensorboard.
        """
        with self._file_writer.as_default():
            if tf.__version__ == "2.4.1":
                with summary_ops_v2.always_record_summaries():
                    summary_ops_v2.keras_model(name=model.name,
                                               data=model,
                                               step=0)
            elif tf.__version__ == "2.5.0":
                from tensorflow.python.keras.callbacks import keras_model_summary

                with summary_ops_v2.record_if(True):
                    keras_model_summary("keras", model, step=0)
示例#13
0
 def testWrite_recordIf_callable(self):
   logdir = self.get_temp_dir()
   with context.eager_mode():
     step = variables.Variable(-1, dtype=dtypes.int64)
     def record_fn():
       step.assign_add(1)
       return int(step % 2) == 0
     with summary_ops.create_file_writer_v2(logdir).as_default():
       with summary_ops.record_if(record_fn):
         self.assertTrue(summary_ops.write('tag', 1, step=step))
         self.assertFalse(summary_ops.write('tag', 1, step=step))
         self.assertTrue(summary_ops.write('tag', 1, step=step))
         self.assertFalse(summary_ops.write('tag', 1, step=step))
         self.assertTrue(summary_ops.write('tag', 1, step=step))
   events = events_from_logdir(logdir)
   self.assertEqual(4, len(events))
   self.assertEqual(0, events[1].step)
   self.assertEqual(2, events[2].step)
   self.assertEqual(4, events[3].step)
示例#14
0
 def testWrite_recordIf_callable(self):
   logdir = self.get_temp_dir()
   with context.eager_mode():
     step = variables.Variable(-1, dtype=dtypes.int64)
     def record_fn():
       step.assign_add(1)
       return int(step % 2) == 0
     with summary_ops.create_file_writer_v2(logdir).as_default():
       with summary_ops.record_if(record_fn):
         self.assertTrue(summary_ops.write('tag', 1, step=step))
         self.assertFalse(summary_ops.write('tag', 1, step=step))
         self.assertTrue(summary_ops.write('tag', 1, step=step))
         self.assertFalse(summary_ops.write('tag', 1, step=step))
         self.assertTrue(summary_ops.write('tag', 1, step=step))
   events = events_from_logdir(logdir)
   self.assertEqual(4, len(events))
   self.assertEqual(0, events[1].step)
   self.assertEqual(2, events[2].step)
   self.assertEqual(4, events[3].step)
示例#15
0
 def f(step):
     with writer.as_default():
         with summary_ops.record_if(math_ops.equal(step % 2, 0)):
             return summary_ops.write('tag', 1, step=step)
示例#16
0
  def start(self):
    """Starts the evaluation loop."""
    optimizer_checkpoint = tracking_util.Checkpoint(iter=self._iterations)
    checkpoint = tracking_util.Checkpoint(
        model=self.model, optimizer=optimizer_checkpoint)

    for latest_checkpoint in checkpoint_utils.checkpoints_iterator(
        self.checkpoint_dir):
      try:
        # `expect_partial` because the checkpoint can have other `Trackable`s
        # such as `optimizer`.
        checkpoint.restore(latest_checkpoint).expect_partial()
        checkpoint_attributes = list_checkpoint_attributes(latest_checkpoint)
        # The checkpoint should contain model and optimizer for SidecarEvaluator
        # to work. But the model weights saved by ModelCheckpoint callback does
        # not contain model as an attribute. To make SidecarEvaluator compatibly
        # work in this case, if model attribute is not found but
        # layer_with_weights attribute is found, use model.load_weights to load
        # the model's weights, while self._iterations is still restored by
        # checkpoint variable.
        if 'model' not in checkpoint_attributes:
          for attribute in checkpoint_attributes:
            # check whether the checkpoint has the required attributes for
            # model.load_weights to work.
            if re.match(r'^layer_with_weights-[\d+]', attribute) is not None:
              self.model.load_weights(latest_checkpoint)
              break
      except (errors_impl.OpError,) as e:
        # A couple errors can happen here with the coordinator racing to write
        # checkpoint:
        # 1) OpError: open failed for <file path>: No such file or directory
        # 2) NotFoundError (subclass of OpError): Unsuccessful
        # TensorSliceReader constructor.
        # TODO(rchao): Remove this except block once b/150954027 is resolved.
        logging.info(
            'SidecarEvaluator has an error loading '
            'checkpoint: %s. Retrying. Error: %s: %s', latest_checkpoint,
            e.__class__.__name__, e)
        continue

      if self._iterations.numpy() == _ITERATIONS_UNINITIALIZED:
        raise RuntimeError(
            '`iterations` cannot be loaded from the '
            'checkpoint file. Please ensure `iterations` is '
            'tracked in the `checkpoint` saved by the coordinator.')

      logging.info(
          'Evaluation starts: Model weights loaded from latest '
          'checkpoint file: %s.', latest_checkpoint)

      # TODO(rchao): Support arbitrary callback for extensibility.
      self.model.evaluate(self.data, steps=self.steps)

      logging.info('End of evaluation. Accuracy: %r', [
          metric.result().numpy()
          for metric in self.model.compiled_metrics.metrics
      ])

      if self._summary_writer:
        with summary_ops_v2.record_if(True), self._summary_writer.as_default():
          for metric in self.model.compiled_metrics.metrics:
            summary_ops_v2.scalar(
                metric.name,
                metric.result(),
                step=self._iterations.read_value())

      # TODO(rchao): Make the max evaluation robust in case users save the
      # checkpoints with epoch format {epoch:03d}.
      if (self.max_evaluations and
          latest_checkpoint.endswith('-{}'.format(self.max_evaluations))):
        # Exit the loop because we have evaluated the final checkpoint file.
        logging.info('Last checkpoint evaluated. SidecarEvaluator stops.')
        return
示例#17
0
 def f(step):
   with writer.as_default():
     with summary_ops.record_if(math_ops.equal(step % 2, 0)):
       return summary_ops.write('tag', 1, step=step)
示例#18
0
 def f(step):
   with summary_ops.create_file_writer(logdir).as_default():
     with summary_ops.record_if(math_ops.equal(step % 2, 0)):
       return summary_ops.write('tag', 1, step=step)