Ejemplo n.º 1
0
    def testSessionLogStartMessageDiscardsExpiredEvents(self):
        """Test that SessionLog.START message discards expired events.

    This discard logic is preferred over the out-of-order step discard logic,
    but this logic can only be used for event protos which have the SessionLog
    enum, which was introduced to event.proto for file_version >= brain.Event:2.
    """
        gen = _EventGenerator(self)
        acc = ea.EventAccumulator(gen)
        gen.AddEvent(
            tf.Event(wall_time=0, step=1, file_version='brain.Event:2'))

        gen.AddScalar('s1', wall_time=1, step=100, value=20)
        gen.AddScalar('s1', wall_time=1, step=200, value=20)
        gen.AddScalar('s1', wall_time=1, step=300, value=20)
        gen.AddScalar('s1', wall_time=1, step=400, value=20)

        gen.AddScalar('s2', wall_time=1, step=202, value=20)
        gen.AddScalar('s2', wall_time=1, step=203, value=20)

        slog = tf.SessionLog(status=tf.SessionLog.START)
        gen.AddEvent(tf.Event(wall_time=2, step=201, session_log=slog))
        acc.Reload()
        self.assertEqual([x.step for x in acc.Scalars('s1')], [100, 200])
        self.assertEqual([x.step for x in acc.Scalars('s2')], [])
Ejemplo n.º 2
0
 def testOnlySummaryEventsTriggerDiscards(self):
     """Test that file version event does not trigger data purge."""
     gen = _EventGenerator(self)
     acc = ea.EventAccumulator(gen)
     gen.AddScalar('s1', wall_time=1, step=100, value=20)
     ev1 = tf.Event(wall_time=2, step=0, file_version='brain.Event:1')
     graph_bytes = tf.GraphDef().SerializeToString()
     ev2 = tf.Event(wall_time=3, step=0, graph_def=graph_bytes)
     gen.AddEvent(ev1)
     gen.AddEvent(ev2)
     acc.Reload()
     self.assertEqual([x.step for x in acc.Scalars('s1')], [100])
Ejemplo n.º 3
0
 def AddScalar(self, tag, wall_time=0, step=0, value=0):
     event = tf.Event(
         wall_time=wall_time,
         step=step,
         summary=tf.Summary(
             value=[tf.Summary.Value(tag=tag, simple_value=value)]))
     self.AddEvent(event)
Ejemplo n.º 4
0
 def testReadOneEvent(self):
   event = tf.Event(step=123)
   path = self._save_records('events.out.tfevents.0.localhost',
                             [event.SerializeToString()])
   with self.EventLog(path) as log:
     self.assertEqual(event, log.get_next_event())
     self.assertIsNone(log.get_next_event())
Ejemplo n.º 5
0
 def testFirstEventTimestamp(self):
   """Test that FirstEventTimestamp() returns wall_time of the first event."""
   gen = _EventGenerator(self)
   acc = ea.EventAccumulator(gen)
   gen.AddEvent(tf.Event(wall_time=10, step=20, file_version='brain.Event:2'))
   gen.AddScalarTensor('s1', wall_time=30, step=40, value=20)
   self.assertEqual(acc.FirstEventTimestamp(), 10)
Ejemplo n.º 6
0
    def testExpiredDataDiscardedAfterRestartForFileVersionLessThan2(self):
        """Tests that events are discarded after a restart is detected.

    If a step value is observed to be lower than what was previously seen,
    this should force a discard of all previous items with the same tag
    that are outdated.

    Only file versions < 2 use this out-of-order discard logic. Later versions
    discard events based on the step value of SessionLog.START.
    """
        warnings = []
        self.stubs.Set(tf.logging, 'warn', warnings.append)

        gen = _EventGenerator(self)
        acc = ea.EventAccumulator(gen)

        gen.AddEvent(
            tf.Event(wall_time=0, step=0, file_version='brain.Event:1'))
        gen.AddScalar('s1', wall_time=1, step=100, value=20)
        gen.AddScalar('s1', wall_time=1, step=200, value=20)
        gen.AddScalar('s1', wall_time=1, step=300, value=20)
        acc.Reload()
        ## Check that number of items are what they should be
        self.assertEqual([x.step for x in acc.Scalars('s1')], [100, 200, 300])

        gen.AddScalar('s1', wall_time=1, step=101, value=20)
        gen.AddScalar('s1', wall_time=1, step=201, value=20)
        gen.AddScalar('s1', wall_time=1, step=301, value=20)
        acc.Reload()
        ## Check that we have discarded 200 and 300 from s1
        self.assertEqual([x.step for x in acc.Scalars('s1')],
                         [100, 101, 201, 301])
Ejemplo n.º 7
0
 def testRestartProgram_resumesThings(self):
   id_ = db.RUN_ROWID.create(1, 1)
   event1 = tf.Event(step=123)
   event2 = tf.Event(step=456)
   path = self._save_records('events.out.tfevents.1.localhost',
                             [event1.SerializeToString(),
                              event2.SerializeToString()])
   with self.connect_db() as db_conn:
     with self.EventLog(path) as log:
       with loader.RunReader(id_, 'doodle') as run:
         run.add_event_log(db_conn, log)
         self.assertEqual(event1, run.get_next_event())
         run.save_progress(db_conn)
     with self.EventLog(path) as log:
       with loader.RunReader(id_, 'doodle') as run:
         run.add_event_log(db_conn, log)
         self.assertEqual(event2, run.get_next_event())
Ejemplo n.º 8
0
  def testFirstEventTimestampLoadsEvent(self):
    """Test that FirstEventTimestamp() doesn't discard the loaded event."""
    gen = _EventGenerator(self)
    acc = ea.EventAccumulator(gen)
    gen.AddEvent(tf.Event(wall_time=1, step=2, file_version='brain.Event:2'))

    self.assertEqual(acc.FirstEventTimestamp(), 1)
    acc.Reload()
    self.assertEqual(acc.file_version, 2.0)
Ejemplo n.º 9
0
 def _GenerateEventsData(self):
     fw = tf.summary.FileWriter(self.log_dir)
     event = tf.Event(
         wall_time=1,
         step=1,
         summary=tf.Summary(
             value=[tf.Summary.Value(tag='s1', simple_value=0)]))
     fw.add_event(event)
     fw.close()
Ejemplo n.º 10
0
 def testReadOneEvent(self):
   id_ = db.RUN_ROWID.create(1, 1)
   event = tf.Event(step=123)
   path = self._save_records('events.out.tfevents.0.localhost',
                             [event.SerializeToString()])
   with self.connect_db() as db_conn:
     with self.EventLog(path) as log:
       with loader.RunReader(id_, 'doodle') as run:
         run.add_event_log(db_conn, log)
         self.assertEqual(event, run.get_next_event())
         self.assertIsNone(run.get_next_event())
Ejemplo n.º 11
0
def WriteHistogramSeries(writer, tag, mu_sigma_tuples, n=20):
    """Write a sequence of normally distributed histograms to writer."""
    step = 0
    wall_time = _start_time
    for [mean, stddev] in mu_sigma_tuples:
        data = [random.normalvariate(mean, stddev) for _ in xrange(n)]
        histo = _MakeHistogram(data)
        summary = tf.Summary(value=[tf.Summary.Value(tag=tag, histo=histo)])
        event = tf.Event(wall_time=wall_time, step=step, summary=summary)
        writer.add_event(event)
        step += 10
        wall_time += 100
Ejemplo n.º 12
0
 def testMarkWithShrinkingBatchSize_raisesValueError(self):
   id_ = db.RUN_ROWID.create(1, 1)
   event1 = tf.Event(step=123)
   event2 = tf.Event(step=456)
   path1 = self._save_records('events.out.tfevents.1.localhost',
                              [event1.SerializeToString()])
   path2 = self._save_records('events.out.tfevents.2.localhost',
                              [event2.SerializeToString()])
   with self.connect_db() as db_conn:
     with self.EventLog(path1) as log1, self.EventLog(path2) as log2:
       with loader.RunReader(id_, 'doodle') as run:
         run.add_event_log(db_conn, log1)
         run.add_event_log(db_conn, log2)
         run.mark()
         self.assertEqual(event1, run.get_next_event())
         self.assertEqual(event2, run.get_next_event())
         self.assertIsNone(run.get_next_event())
         run.reset()
         self.assertEqual(event1, run.get_next_event())
         with six.assertRaisesRegex(self, ValueError, r'monotonic'):
           run.mark()
Ejemplo n.º 13
0
  def testNonValueEvents(self):
    """Non-value events in the generator don't cause early exits."""
    gen = _EventGenerator(self)
    acc = ea.EventAccumulator(gen)
    gen.AddScalarTensor('s1', wall_time=1, step=10, value=20)
    gen.AddEvent(tf.Event(wall_time=2, step=20, file_version='nots2'))
    gen.AddScalarTensor('s3', wall_time=3, step=100, value=1)

    acc.Reload()
    self.assertTagsEqual(acc.Tags(), {
        ea.TENSORS: ['s1', 's3'],
    })
Ejemplo n.º 14
0
def WriteScalarSeries(writer, tag, f, n=5):
    """Write a series of scalar events to writer, using f to create values."""
    step = 0
    wall_time = _start_time
    for i in xrange(n):
        v = f(i)
        value = tf.Summary.Value(tag=tag, simple_value=v)
        summary = tf.Summary(value=[value])
        event = tf.Event(wall_time=wall_time, step=step, summary=summary)
        writer.add_event(event)
        step += 1
        wall_time += 10
Ejemplo n.º 15
0
  def testReloadPopulatesFirstEventTimestamp(self):
    """Test that Reload() means FirstEventTimestamp() won't load events."""
    gen = _EventGenerator(self)
    acc = ea.EventAccumulator(gen)
    gen.AddEvent(tf.Event(wall_time=1, step=2, file_version='brain.Event:2'))

    acc.Reload()

    def _Die(*args, **kwargs):  # pylint: disable=unused-argument
      raise RuntimeError('Load() should not be called')

    self.stubs.Set(gen, 'Load', _Die)
    self.assertEqual(acc.FirstEventTimestamp(), 1)
Ejemplo n.º 16
0
 def testMarkReset_acrossFiles(self):
   id_ = db.RUN_ROWID.create(1, 1)
   event1 = tf.Event(step=123)
   event2 = tf.Event(step=456)
   path1 = self._save_records('events.out.tfevents.1.localhost',
                              [event1.SerializeToString()])
   path2 = self._save_records('events.out.tfevents.2.localhost',
                              [event2.SerializeToString()])
   with self.connect_db() as db_conn:
     with self.EventLog(path1) as log1, self.EventLog(path2) as log2:
       with loader.RunReader(id_, 'doodle') as run:
         run.add_event_log(db_conn, log1)
         run.add_event_log(db_conn, log2)
         run.mark()
         self.assertEqual(event1, run.get_next_event())
         self.assertEqual(event2, run.get_next_event())
         self.assertIsNone(run.get_next_event())
         run.reset()
         self.assertEqual(event1, run.get_next_event())
         self.assertEqual(event2, run.get_next_event())
         self.assertIsNone(run.get_next_event())
         run.mark()
Ejemplo n.º 17
0
  def AddScalarTensor(self, tag, wall_time=0, step=0, value=0):
    """Add a rank-0 tensor event.

    Note: This is not related to the scalar plugin; it's just a
    convenience function to add an event whose contents aren't
    important.
    """
    tensor = tf.make_tensor_proto(float(value))
    event = tf.Event(
        wall_time=wall_time,
        step=step,
        summary=tf.Summary(
            value=[tf.Summary.Value(tag=tag, tensor=tensor)]))
    self.AddEvent(event)
Ejemplo n.º 18
0
 def AddImage(self,
              tag,
              wall_time=0,
              step=0,
              encoded_image_string=b'imgstr',
              width=150,
              height=100):
     image = tf.Summary.Image(encoded_image_string=encoded_image_string,
                              width=width,
                              height=height)
     event = tf.Event(
         wall_time=wall_time,
         step=step,
         summary=tf.Summary(value=[tf.Summary.Value(tag=tag, image=image)]))
     self.AddEvent(event)
Ejemplo n.º 19
0
    def get_next_event(self):
        """Reads an event proto from the file.

    Returns:
      A tf.Event or None if no more records exist in the file. Please
      note that the file remains open for subsequent reads in case more
      are appended later.

    :rtype: tf.Event
    """
        record = self._reader.get_next_record()
        if record is None:
            return None
        event = tf.Event()
        event.ParseFromString(record.record)
        self._offset = record.offset
        return event
Ejemplo n.º 20
0
 def AddAudio(self,
              tag,
              wall_time=0,
              step=0,
              encoded_audio_string=b'sndstr',
              content_type='audio/wav',
              sample_rate=44100,
              length_frames=22050):
     audio = tf.Summary.Audio(encoded_audio_string=encoded_audio_string,
                              content_type=content_type,
                              sample_rate=sample_rate,
                              length_frames=length_frames)
     event = tf.Event(
         wall_time=wall_time,
         step=step,
         summary=tf.Summary(value=[tf.Summary.Value(tag=tag, audio=audio)]))
     self.AddEvent(event)
Ejemplo n.º 21
0
    def testNonValueEvents(self):
        """Non-value events in the generator don't cause early exits."""
        gen = _EventGenerator(self)
        acc = ea.EventAccumulator(gen)
        gen.AddScalar('s1', wall_time=1, step=10, value=20)
        gen.AddEvent(tf.Event(wall_time=2, step=20, file_version='nots2'))
        gen.AddScalar('s3', wall_time=3, step=100, value=1)
        gen.AddHistogram('hst1')
        gen.AddImage('im1')
        gen.AddAudio('snd1')

        acc.Reload()
        self.assertTagsEqual(
            acc.Tags(), {
                ea.IMAGES: ['im1'],
                ea.AUDIO: ['snd1'],
                ea.SCALARS: ['s1', 's3'],
                ea.HISTOGRAMS: ['hst1'],
                ea.COMPRESSED_HISTOGRAMS: ['hst1'],
            })
Ejemplo n.º 22
0
  def testOrphanedDataNotDiscardedIfFlagUnset(self):
    """Tests that events are not discarded if purge_orphaned_data is false.
    """
    gen = _EventGenerator(self)
    acc = ea.EventAccumulator(gen, purge_orphaned_data=False)

    gen.AddEvent(tf.Event(wall_time=0, step=0, file_version='brain.Event:1'))
    gen.AddScalarTensor('s1', wall_time=1, step=100, value=20)
    gen.AddScalarTensor('s1', wall_time=1, step=200, value=20)
    gen.AddScalarTensor('s1', wall_time=1, step=300, value=20)
    acc.Reload()
    ## Check that number of items are what they should be
    self.assertEqual([x.step for x in acc.Tensors('s1')], [100, 200, 300])

    gen.AddScalarTensor('s1', wall_time=1, step=101, value=20)
    gen.AddScalarTensor('s1', wall_time=1, step=201, value=20)
    gen.AddScalarTensor('s1', wall_time=1, step=301, value=20)
    acc.Reload()
    ## Check that we have NOT discarded 200 and 300 from s1
    self.assertEqual([x.step for x in acc.Tensors('s1')],
                     [100, 200, 300, 101, 201, 301])
Ejemplo n.º 23
0
 def AddHistogram(self,
                  tag,
                  wall_time=0,
                  step=0,
                  hmin=1,
                  hmax=2,
                  hnum=3,
                  hsum=4,
                  hsum_squares=5,
                  hbucket_limit=None,
                  hbucket=None):
     histo = tf.HistogramProto(min=hmin,
                               max=hmax,
                               num=hnum,
                               sum=hsum,
                               sum_squares=hsum_squares,
                               bucket_limit=hbucket_limit,
                               bucket=hbucket)
     event = tf.Event(
         wall_time=wall_time,
         step=step,
         summary=tf.Summary(value=[tf.Summary.Value(tag=tag, histo=histo)]))
     self.AddEvent(event)
Ejemplo n.º 24
0
    def testEventsDiscardedPerTagAfterRestartForFileVersionLessThan2(self):
        """Tests that event discards after restart, only affect the misordered tag.

    If a step value is observed to be lower than what was previously seen,
    this should force a discard of all previous items that are outdated, but
    only for the out of order tag. Other tags should remain unaffected.

    Only file versions < 2 use this out-of-order discard logic. Later versions
    discard events based on the step value of SessionLog.START.
    """
        warnings = []
        self.stubs.Set(tf.logging, 'warn', warnings.append)

        gen = _EventGenerator(self)
        acc = ea.EventAccumulator(gen)

        gen.AddEvent(
            tf.Event(wall_time=0, step=0, file_version='brain.Event:1'))
        gen.AddScalar('s1', wall_time=1, step=100, value=20)
        gen.AddScalar('s1', wall_time=1, step=200, value=20)
        gen.AddScalar('s1', wall_time=1, step=300, value=20)
        gen.AddScalar('s1', wall_time=1, step=101, value=20)
        gen.AddScalar('s1', wall_time=1, step=201, value=20)
        gen.AddScalar('s1', wall_time=1, step=301, value=20)

        gen.AddScalar('s2', wall_time=1, step=101, value=20)
        gen.AddScalar('s2', wall_time=1, step=201, value=20)
        gen.AddScalar('s2', wall_time=1, step=301, value=20)

        acc.Reload()
        ## Check that we have discarded 200 and 300
        self.assertEqual([x.step for x in acc.Scalars('s1')],
                         [100, 101, 201, 301])

        ## Check that s1 discards do not affect s2
        ## i.e. check that only events from the out of order tag are discarded
        self.assertEqual([x.step for x in acc.Scalars('s2')], [101, 201, 301])