def _log_summary(self, tf_name, summary, value, step=None):
     event = event_pb2.Event(wall_time=self._time(), summary=summary)
     if step is not None:
         event.step = int(step)
     if self.is_dummy:
         self.dummy_log[tf_name].append((step, value))
     else:
         self._write_event(event)
Example #2
0
 def add_summary(self, summary, step):
   if not self._enabled:
     return
   event = event_pb2.Event(summary=summary)
   event.wall_time = time.time()
   if step is not None:
     event.step = int(step)
   self._event_writer.add_event(event)
 def AddScalar(self, tag, wall_time=0, step=0, value=0):
     event = event_pb2.Event(
         wall_time=wall_time,
         step=step,
         summary=summary_pb2.Summary(
             value=[summary_pb2.Summary.Value(tag=tag, simple_value=value)
                    ]))
     self.AddEvent(event)
 def write_values(self, key2val):
     summary = tf.Summary(value=[tf.Summary.Value(tag=k, simple_value=float(v))
         for (k, v) in key2val.items()])
     event = event_pb2.Event(wall_time=time.time(), summary=summary)
     event.step = self.step # is there any reason why you'd want to specify the step?
     self.evwriter.WriteEvent(event)
     self.evwriter.Flush()
     self.step += 1
    def prepare_run_debug_urls(self, fetches, feed_dict):
        """Implementation of abstrat method in superclass.

    See doc of `NonInteractiveDebugWrapperSession.prepare_run_debug_urls()`
    for details. This implementation creates a run-specific subdirectory under
    self._session_root and stores information regarding run `fetches` and
    `feed_dict.keys()` in the subdirectory.

    Args:
      fetches: Same as the `fetches` argument to `Session.run()`
      feed_dict: Same as the `feed_dict` argument to `Session.run()`

    Returns:
      debug_urls: (`str` or `list` of `str`) file:// debug URLs to be used in
        this `Session.run()` call.
    """

        # Add a UUID to accommodate the possibility of concurrent run() calls.
        self._run_counter_lock.acquire()
        run_dir = os.path.join(
            self._session_root,
            "run_%d_%d" % (int(time.time() * 1e6), self._run_counter))
        self._run_counter += 1
        self._run_counter_lock.release()
        gfile.MkDir(run_dir)

        fetches_event = event_pb2.Event()
        fetches_event.log_message.message = repr(fetches)
        fetches_path = os.path.join(
            run_dir,
            debug_data.METADATA_FILE_PREFIX + debug_data.FETCHES_INFO_FILE_TAG)
        with gfile.Open(os.path.join(fetches_path), "wb") as f:
            f.write(fetches_event.SerializeToString())

        feed_keys_event = event_pb2.Event()
        feed_keys_event.log_message.message = (repr(feed_dict.keys()) if
                                               feed_dict else repr(feed_dict))

        feed_keys_path = os.path.join(
            run_dir, debug_data.METADATA_FILE_PREFIX +
            debug_data.FEED_KEYS_INFO_FILE_TAG)
        with gfile.Open(os.path.join(feed_keys_path), "wb") as f:
            f.write(feed_keys_event.SerializeToString())

        return ["file://" + run_dir]
Example #6
0
def log_image(im, logger, tag, step=0):
    im = make_image(im)

    summary = [tf.Summary.Value(tag=tag, image=im)]
    summary = tf.Summary(value=summary)
    event = event_pb2.Event(summary=summary)
    event.step = step
    logger.writer.WriteEvent(event)
    logger.writer.Flush()
Example #7
0
 def testFirstEventTimestamp(self):
   """Test that FirstEventTimestamp() returns wall_time of the first event."""
   gen = _EventGenerator()
   acc = ea.EventAccumulator(gen)
   gen.AddEvent(
       event_pb2.Event(
           wall_time=10, step=20, file_version='brain.Event:2'))
   gen.AddScalar('s1', wall_time=30, step=40, value=20)
   self.assertEqual(acc.FirstEventTimestamp(), 10)
 def _GenerateEventsData(self):
     fw = writer.FileWriter(self.log_dir)
     event = event_pb2.Event(
         wall_time=1,
         step=1,
         summary=summary_pb2.Summary(
             value=[summary_pb2.Summary.Value(tag='s1', simple_value=0)]))
     fw.add_event(event)
     fw.close()
Example #9
0
 def write_values(self, key2val):
     summary = tf.Summary(value=[tf.Summary.Value(tag=k,
                                                  simple_value=float(v))
                                 for (k, v) in key2val.items()])
     event = event_pb2.Event(wall_time=time.time(), summary=summary)
     event.step = self.step
     self.evwriter.WriteEvent(event)
     self.evwriter.Flush()
     self.step += 1
Example #10
0
 def writekvs(self, kvs):
     summary = tf.Summary(value=[
         summary_val(k, v) for k, v in kvs.items() if valid_float_value(v)
     ])
     event = event_pb2.Event(wall_time=time.time(), summary=summary)
     event.step = self.step  # is there any reason why you'd want to specify the step?
     self.writer.WriteEvent(event)
     self.writer.Flush()
     self.step += 1
Example #11
0
 def _log_value(self, tf_name, value, step=None):
     summary = summary_pb2.Summary()
     summary.value.add(tag=tf_name, simple_value=value)
     event = event_pb2.Event(wall_time=self._time(), summary=summary)
     if step is not None:
         event.step = int(step)
     if self.is_dummy:
         self.dummy_log[tf_name].append((step, value))
     else:
         self._write_event(event)
Example #12
0
 def testWriterException_raisedFromFlush(self):
   test_dir = self.get_temp_dir()
   sw = self._FileWriter(test_dir)
   writer_thread = sw.event_writer._worker
   with test.mock.patch.object(
       writer_thread, "_ev_writer", autospec=True) as mock_writer:
     # Coordinate threads to ensure both events are added before the writer
     # thread dies, to avoid the second add_event() failing instead of flush().
     second_event_added = threading.Event()
     def _FakeWriteEvent(event):
       del event  # unused
       second_event_added.wait()
       raise FakeWriteError()
     mock_writer.WriteEvent.side_effect = _FakeWriteEvent
     sw.add_event(event_pb2.Event())
     sw.add_event(event_pb2.Event())
     second_event_added.set()
     with self.assertRaises(FakeWriteError):
       sw.flush()
 def AddHealthPill(self, wall_time, step, op_name, output_slot, elements):
     event = event_pb2.Event(step=step, wall_time=wall_time)
     value = event.summary.value.add(tag='__health_pill__',
                                     node_name='%s:%d:DebugNumericSummary' %
                                     (op_name, output_slot))
     value.tensor.tensor_shape.dim.add(size=len(elements))
     value.tensor.dtype = types_pb2.DT_DOUBLE
     value.tensor.tensor_content = np.array(elements,
                                            dtype=np.float64).tobytes()
     self.AddEvent(event)
Example #14
0
 def testWriterException_raisedFromClose(self):
     test_dir = self.get_temp_dir()
     sw = self._FileWriter(test_dir)
     writer_thread = sw.event_writer._worker
     with test.mock.patch.object(writer_thread, "_ev_writer",
                                 autospec=True) as mock_writer:
         mock_writer.WriteEvent.side_effect = FakeWriteError()
         sw.add_event(event_pb2.Event())
         with self.assertRaises(FakeWriteError):
             sw.close()
    def testFirstEventTimestampLoadsEvent(self):
        """Test that FirstEventTimestamp() doesn't discard the loaded event."""
        gen = _EventGenerator(self)
        acc = ea.EventAccumulator(gen)
        gen.AddEvent(
            event_pb2.Event(wall_time=1, step=2, file_version='brain.Event:2'))

        self.assertEqual(acc.FirstEventTimestamp(), 1)
        acc.Reload()
        self.assertEqual(acc.file_version, 2.0)
Example #16
0
 def writekvs(self, kvs):
     summary = tf.compat.v1.Summary(value=[
         summary_val(k, v) for k, v in kvs.items() if valid_float_value(v)
     ])
     event = event_pb2.Event(wall_time=time.time(), summary=summary)
     event.step = self.step  # is there any reason why you'd want to specify the step?
     if self.writer is None:
         raise ValueError("Attempt to write after close().")
     self.writer.WriteEvent(event)
     self.writer.Flush()
     self.step += 1
Example #17
0
 def _readLastEvent(self, logdir=None):
     if not logdir:
         logdir = self._tmp_logdir
     files = [
         f for f in gfile.ListDirectory(logdir)
         if not gfile.IsDirectory(os.path.join(logdir, f))
     ]
     file_path = os.path.join(logdir, files[0])
     records = list(tf_record.tf_record_iterator(file_path))
     event = event_pb2.Event()
     event.ParseFromString(records[-1])
     return event
Example #18
0
def _events_from_logdir(test_case, logdir):
  """Reads summary events from log directory."""
  test_case.assertTrue(gfile.Exists(logdir))
  files = gfile.ListDirectory(logdir)
  test_case.assertLen(files, 1)
  records = list(tf_record.tf_record_iterator(os.path.join(logdir, files[0])))
  result = []
  for r in records:
    event = event_pb2.Event()
    event.ParseFromString(r)
    result.append(event)
  return result
Example #19
0
 def _write_graph_def(self, graph_def, device_name, wall_time):
   encoded_graph_def = graph_def.SerializeToString()
   graph_hash = int(hashlib.md5(encoded_graph_def).hexdigest(), 16)
   event = event_pb2.Event(graph_def=encoded_graph_def, wall_time=wall_time)
   graph_file_path = os.path.join(
       self._dump_dir,
       debug_data.device_name_to_device_path(device_name),
       debug_data.METADATA_FILE_PREFIX + debug_data.GRAPH_FILE_TAG +
       debug_data.HASH_TAG + "%d_%d" % (graph_hash, wall_time))
   self._try_makedirs(os.path.dirname(graph_file_path))
   with open(graph_file_path, "wb") as f:
     f.write(event.SerializeToString())
Example #20
0
def write_event(tag, value, step):
    event = event_pb2.Event(
        wall_time=time.time(),
        step=step,
        summary=summary_pb2.Summary(
            value=[summary_pb2.Summary.Value(tag=tag, simple_value=value)]))

    # todo: not flush so often?
    writer.WriteEvent(event)
    writer.Flush()

    return event
Example #21
0
def WriteScalarSeries(writer, tag, f, n=5):
  """Write a series of scalar events to writer, using f to create values."""
  step = 0
  wall_time = _start_time
  for i in xrange(n):
    v = f(i)
    value = summary_pb2.Summary.Value(tag=tag, simple_value=v)
    summary = summary_pb2.Summary(value=[value])
    event = event_pb2.Event(wall_time=wall_time, step=step, summary=summary)
    writer.add_event(event)
    step += 1
    wall_time += 10
def events_from_file(logdir):
  """Returns all events in the single eventfile in logdir."""
  assert gfile.Exists(logdir)
  files = gfile.ListDirectory(logdir)
  assert len(files) == 1, "Found more than one file in logdir: %s" % files
  records = list(
      tf_record.tf_record_iterator(os.path.join(logdir, files[0])))
  result = []
  for r in records:
    event = event_pb2.Event()
    event.ParseFromString(r)
    result.append(event)
  return result
Example #23
0
 def AddHealthPill(self, wall_time, step, node_name, output_slot, elements):
     event = event_pb2.Event()
     event.wall_time = wall_time
     event.step = step
     value = event.summary.value.add()
     # The node_name property is actually a watch key.
     value.node_name = '%s:%d:DebugNumericSummary' % (node_name,
                                                      output_slot)
     value.tag = '__health_pill__'
     value.tensor.tensor_shape.dim.add().size = len(elements)
     value.tensor.tensor_content = np.array(elements,
                                            dtype=np.float64).tobytes()
     self.AddEvent(event)
Example #24
0
    def add_session_log(self, session_log, global_step=None):
        """Adds a `SessionLog` protocol buffer to the event file.

    This method wraps the provided session in an `Event` protocol buffer
    and adds it to the event file.

    Args:
      session_log: A `SessionLog` protocol buffer.
      global_step: Number. Optional global step value to record with the
        summary.
    """
        event = event_pb2.Event(session_log=session_log)
        self._add_event(event, global_step)
Example #25
0
    def py_gif_event(step, tag, tensor, max_outputs, fps):
        summary = py_gif_summary(tag, tensor, max_outputs, fps)

        if isinstance(summary, bytes):
            summ = summary_pb2.Summary()
            summ.ParseFromString(summary)
            summary = summ

        event = event_pb2.Event(summary=summary)
        event.wall_time = time.time()
        event.step = step
        event_pb = event.SerializeToString()
        return event_pb
    def Load(self):
        """Loads all new values from disk.

    Calling Load multiple times in a row will not 'drop' events as long as the
    return value is not iterated over.

    Yields:
      All values that were written to disk that have not been yielded yet.
    """
        while self._reader.GetNext():
            event = event_pb2.Event()
            event.ParseFromString(self._reader.record())
            yield event
        logging.debug('No more events in %s', self._file_path)
Example #27
0
    def logkv(self, k, v):
        def summary_val(k, v):
            kwargs = {'tag': k, 'simple_value': float(v)}
            return tf.Summary.Value(**kwargs)

        summary = tf.Summary(value=[summary_val(k, v)])
        event = event_pb2.Event(wall_time=time.time(), summary=summary)
        # Use a separate step counter for each key
        if k not in self.key_steps:
            self.key_steps[k] = 0
        event.step = self.key_steps[k]
        self.writer.WriteEvent(event)
        self.writer.Flush()
        self.key_steps[k] += 1
    def testReloadPopulatesFirstEventTimestamp(self):
        """Test that Reload() means FirstEventTimestamp() won't load events."""
        gen = _EventGenerator(self)
        acc = ea.EventAccumulator(gen)
        gen.AddEvent(
            event_pb2.Event(wall_time=1, step=2, file_version='brain.Event:2'))

        acc.Reload()

        def _Die(*args, **kwargs):  # pylint: disable=unused-argument
            raise RuntimeError('Load() should not be called')

        self.stubs.Set(gen, 'Load', _Die)
        self.assertEqual(acc.FirstEventTimestamp(), 1)
Example #29
0
 def Load(self):
     # Create a temp file to hold the contents that we haven't seen yet.
     with tempfile.NamedTemporaryFile(prefix='tf-gcs-') as temp_file:
         name = temp_file.name
         logging.debug('Temp file created at %s', name)
         gcs.CopyContents(self._gcs_path, self._gcs_offset, temp_file)
         reader = pywrap_tensorflow.PyRecordReader_New(
             compat.as_bytes(name), 0)
         while reader.GetNext():
             event = event_pb2.Event()
             event.ParseFromString(reader.record())
             yield event
         logging.debug('No more events in %s', name)
         self._gcs_offset += reader.offset()
Example #30
0
def WriteHistogramSeries(writer, tag, mu_sigma_tuples, n=20):
  """Write a sequence of normally distributed histograms to writer."""
  step = 0
  wall_time = _start_time
  for [mean, stddev] in mu_sigma_tuples:
    data = [random.normalvariate(mean, stddev) for _ in xrange(n)]
    histo = _MakeHistogram(data)
    summary = summary_pb2.Summary(
        value=[summary_pb2.Summary.Value(
            tag=tag, histo=histo)])
    event = event_pb2.Event(wall_time=wall_time, step=step, summary=summary)
    writer.add_event(event)
    step += 10
    wall_time += 100