def val_scalars(self):
        #returns all scalar values corresponding to the validation set

        values_dict = dict()
        for model_name in os.listdir(self.filepath):
            if re.match(self.match, model_name) != None:
                model_f = os.path.join(self.filepath, model_name)
                try:
                    ac = event_accumulator.EventAccumulator(
                        path=os.path.join(model_f, 'validation'))
                    ac.Reload()
                    scalar_list = ac.Tags()['scalars']

                    temp_dict = dict()
                    for scalar in scalar_list:
                        temp_dict[scalar] = list()
                        for ac_loss in ac.Scalars(scalar):
                            temp_dict[scalar].append(ac_loss.value)
                    values_dict[model_name] = temp_dict
                except Exception as p:
                    print(p)
        return values_dict
Esempio n. 2
0
def clean_tensorboard_protobufs(dpath):
    """
    Removes event logs that only contain conflicting information

    dpath = '/home/local/KHQ/jon.crall/data/work_phase2/train/unet2/'
    """

    # from tensorflow.python.summary import event_accumulator
    from tensorboard.backend.event_processing import event_accumulator
    import glob
    from os.path import join
    from collections import defaultdict
    import ubelt as ub

    # Clean out iterno overrides
    event_paths = sorted(glob.glob(join(dpath, 'events.out.tfevents*')))

    bad_paths = set()
    good_paths = set()
    low_steps = defaultdict(lambda: float('inf'))
    for p in reversed(event_paths):
        ea = event_accumulator.EventAccumulator(p)
        ea.Reload()
        for key in ea.scalars.Keys():
            events = ea.scalars.Items(key)
            for e in reversed(sorted(events, key=lambda e: e.wall_time)):
                if e.step < low_steps[key]:
                    low_steps[key] = e.step
                    good_paths.add(p)
                else:
                    # Can we individually remove scalars?
                    bad_paths.add(p)
                    # print('skip e = {}, {!r}'.format(key, e))

    # these paths only have conflicting old information. Just kill them
    onlybad_paths = bad_paths - good_paths
    print('onlybad_paths = {!r}'.format(onlybad_paths))
    for p in onlybad_paths:
        ub.delete(p)
Esempio n. 3
0
  def AddRun(self, path, name=None):
    """Add a run to the multiplexer.

    If the name is not specified, it is the same as the path.

    If a run by that name exists, and we are already watching the right path,
      do nothing. If we are watching a different path, replace the event
      accumulator.

    If `Reload` has been called, it will `Reload` the newly created
    accumulators.

    Args:
      path: Path to the event files (or event directory) for given run.
      name: Name of the run to add. If not provided, is set to path.

    Returns:
      The `EventMultiplexer`.
    """
    name = name or path
    accumulator = None
    with self._accumulators_mutex:
      if name not in self._accumulators or self._paths[name] != path:
        if name in self._paths and self._paths[name] != path:
          # TODO(@dandelionmane) - Make it impossible to overwrite an old path
          # with a new path (just give the new path a distinct name)
          tf.logging.warning('Conflict for name %s: old path %s, new path %s',
                             name, self._paths[name], path)
        tf.logging.info('Constructing EventAccumulator for %s', path)
        accumulator = event_accumulator.EventAccumulator(
            path,
            size_guidance=self._size_guidance,
            purge_orphaned_data=self.purge_orphaned_data)
        self._accumulators[name] = accumulator
        self._paths[name] = path
    if accumulator:
      if self._reload_called:
        accumulator.Reload()
    return self
Esempio n. 4
0
 def testReload(self):
   """EventAccumulator contains suitable tags after calling Reload."""
   gen = _EventGenerator(self)
   acc = ea.EventAccumulator(gen)
   acc.Reload()
   self.assertTagsEqual(acc.Tags(), {})
   gen.AddScalar('s1')
   gen.AddScalar('s2')
   gen.AddHistogram('hst1')
   gen.AddHistogram('hst2')
   gen.AddImage('im1')
   gen.AddImage('im2')
   gen.AddAudio('snd1')
   gen.AddAudio('snd2')
   acc.Reload()
   self.assertTagsEqual(acc.Tags(), {
       ea.IMAGES: ['im1', 'im2'],
       ea.AUDIO: ['snd1', 'snd2'],
       ea.SCALARS: ['s1', 's2'],
       ea.HISTOGRAMS: ['hst1', 'hst2'],
       ea.COMPRESSED_HISTOGRAMS: ['hst1', 'hst2'],
   })
    def testOrphanedDataNotDiscardedIfFlagUnset(self):
        """Tests that events are not discarded if purge_orphaned_data is false.
    """
        gen = _EventGenerator(self)
        acc = ea.EventAccumulator(gen, purge_orphaned_data=False)

        gen.AddEvent(
            tf.Event(wall_time=0, step=0, file_version='brain.Event:1'))
        gen.AddScalar('s1', wall_time=1, step=100, value=20)
        gen.AddScalar('s1', wall_time=1, step=200, value=20)
        gen.AddScalar('s1', wall_time=1, step=300, value=20)
        acc.Reload()
        ## Check that number of items are what they should be
        self.assertEqual([x.step for x in acc.Scalars('s1')], [100, 200, 300])

        gen.AddScalar('s1', wall_time=1, step=101, value=20)
        gen.AddScalar('s1', wall_time=1, step=201, value=20)
        gen.AddScalar('s1', wall_time=1, step=301, value=20)
        acc.Reload()
        ## Check that we have discarded 200 and 300 from s1
        self.assertEqual([x.step for x in acc.Scalars('s1')],
                         [100, 200, 300, 101, 201, 301])
Esempio n. 6
0
    def testTFSummaryScalar(self):
        """Verify processing of tf.summary.scalar."""
        event_sink = _EventGenerator(self, zero_out_timestamps=True)
        with test_util.FileWriterCache.get(self.get_temp_dir()) as writer:
            writer.event_writer = event_sink
            with self.test_session() as sess:
                ipt = tf.compat.v1.placeholder(tf.float32)
                tf.compat.v1.summary.scalar("scalar1", ipt)
                tf.compat.v1.summary.scalar("scalar2", ipt * ipt)
                merged = tf.compat.v1.summary.merge_all()
                writer.add_graph(sess.graph)
                for i in range(10):
                    summ = sess.run(merged, feed_dict={ipt: i})
                    writer.add_summary(summ, global_step=i)

        accumulator = ea.EventAccumulator(event_sink)
        accumulator.Reload()

        seq1 = [
            ea.ScalarEvent(wall_time=0, step=i, value=i) for i in range(10)
        ]
        seq2 = [
            ea.ScalarEvent(wall_time=0, step=i, value=i * i) for i in range(10)
        ]

        self.assertTagsEqual(
            accumulator.Tags(),
            {
                ea.SCALARS: ["scalar1", "scalar2"],
                ea.GRAPH: True,
                ea.META_GRAPH: False,
            },
        )

        self.assertEqual(accumulator.Scalars("scalar1"), seq1)
        self.assertEqual(accumulator.Scalars("scalar2"), seq2)
        first_value = accumulator.Scalars("scalar1")[0].value
        self.assertTrue(isinstance(first_value, float))
Esempio n. 7
0
    def test_SuccessfulRestore(self, request):
        """
        Tests that when restoring from a previous session new events are correctly appended.
        """
        tf.reset_default_graph()

        # Setup our helpers, use a single summary
        MODEL_DIR = request.node.name
        START_DATETIME = datetime.utcnow().strftime("%Y%m%d-%H%M")
        manager = FileManager(MODEL_DIR)
        logHelper = TensorboardLogHelper(manager.getModelDir(),
                                         tf.get_default_graph(), ["test1"],
                                         False)

        # Write once
        with tf.Session() as sess:
            logHelper.writeSummary(sess, [5.0])

        logHelper.close()

        # Now restore (wait one second otherwise the files will have the first one will be overwritten)
        time.sleep(1)
        logHelper = TensorboardLogHelper(manager.getModelDir(),
                                         tf.get_default_graph(), ["test1"],
                                         True)
        # Write again
        with tf.Session() as sess:
            logHelper.setIteration(1)
            logHelper.writeSummary(sess, [6.0])

        logHelper.close()

        # Manually inspect the tensorboard log
        ea = event_accumulator.EventAccumulator(
            str(pathlib.Path.cwd() / "models" / MODEL_DIR / START_DATETIME))
        ea.Reload()
        assert ea.Scalars("TensorboardLogHelper/test1_1")[0].value == 5
        assert ea.Scalars("TensorboardLogHelper/test1_1")[1].value == 6
Esempio n. 8
0
    def testEventsDiscardedPerTagAfterRestartForFileVersionLessThan2(self):
        """Tests that event discards after restart, only affect the misordered
        tag.

        If a step value is observed to be lower than what was previously seen,
        this should force a discard of all previous items that are outdated, but
        only for the out of order tag. Other tags should remain unaffected.

        Only file versions < 2 use this out-of-order discard logic. Later versions
        discard events based on the step value of SessionLog.START.
        """
        warnings = []
        self.stubs.Set(logger, "warn", warnings.append)

        gen = _EventGenerator(self)
        acc = ea.EventAccumulator(gen)

        gen.AddEvent(
            event_pb2.Event(wall_time=0, step=0, file_version="brain.Event:1"))
        gen.AddScalar("s1", wall_time=1, step=100, value=20)
        gen.AddScalar("s1", wall_time=1, step=200, value=20)
        gen.AddScalar("s1", wall_time=1, step=300, value=20)
        gen.AddScalar("s1", wall_time=1, step=101, value=20)
        gen.AddScalar("s1", wall_time=1, step=201, value=20)
        gen.AddScalar("s1", wall_time=1, step=301, value=20)

        gen.AddScalar("s2", wall_time=1, step=101, value=20)
        gen.AddScalar("s2", wall_time=1, step=201, value=20)
        gen.AddScalar("s2", wall_time=1, step=301, value=20)

        acc.Reload()
        ## Check that we have discarded 200 and 300
        self.assertEqual([x.step for x in acc.Scalars("s1")],
                         [100, 101, 201, 301])

        ## Check that s1 discards do not affect s2
        ## i.e. check that only events from the out of order tag are discarded
        self.assertEqual([x.step for x in acc.Scalars("s2")], [101, 201, 301])
Esempio n. 9
0
    def testGraphFromMetaGraphBecomesAvailable(self):
        """Test accumulator by writing values and then reading them."""

        directory = os.path.join(self.get_temp_dir(),
                                 "metagraph_test_values_dir")
        if tf.io.gfile.isdir(directory):
            tf.io.gfile.rmtree(directory)
        tf.io.gfile.mkdir(directory)

        writer = test_util.FileWriter(directory, max_queue=100)

        with tf.Graph().as_default() as graph:
            _ = tf.constant([2.0, 1.0])
        # Add a graph to the summary writer.
        meta_graph_def = tf.compat.v1.train.export_meta_graph(
            graph_def=graph.as_graph_def(add_shapes=True))
        writer.add_meta_graph(meta_graph_def)

        writer.flush()

        # Verify that we can load those events properly
        acc = ea.EventAccumulator(directory)
        acc.Reload()
        self.assertTagsEqual(
            acc.Tags(),
            {
                ea.GRAPH: True,
                ea.META_GRAPH: True,
            },
        )

        expected_graph_def = graph_pb2.GraphDef.FromString(
            graph.as_graph_def(add_shapes=True).SerializeToString())
        self.assertProtoEquals(expected_graph_def, acc.Graph())

        expected_meta_graph = meta_graph_pb2.MetaGraphDef.FromString(
            meta_graph_def.SerializeToString())
        self.assertProtoEquals(expected_meta_graph, acc.MetaGraph())
    def _testTFSummaryTensor_SizeGuidance(self, plugin_name,
                                          tensor_size_guidance, steps,
                                          expected_count):
        event_sink = _EventGenerator(self, zero_out_timestamps=True)
        writer = tf.summary.FileWriter(self.get_temp_dir())
        writer.event_writer = event_sink
        with self.test_session() as sess:
            summary_metadata = tf.SummaryMetadata(
                plugin_data=tf.SummaryMetadata.PluginData(
                    plugin_name=plugin_name, content='{}'))
            tf.summary.tensor_summary('scalar',
                                      tf.constant(1.0),
                                      summary_metadata=summary_metadata)
            merged = tf.summary.merge_all()
            for step in xrange(steps):
                writer.add_summary(sess.run(merged), global_step=step)

        accumulator = ea.EventAccumulator(
            event_sink, tensor_size_guidance=tensor_size_guidance)
        accumulator.Reload()

        tensors = accumulator.Tensors('scalar')
        self.assertEqual(len(tensors), expected_count)
Esempio n. 11
0
def logging_moving_average(in_log_path, out_log_dir, k):
    # 加载日志数据
    ea = event_accumulator.EventAccumulator(in_log_path)
    ea.Reload()
    writer = SummaryWriter(out_log_dir)
    # reward
    val_reward = ea.scalars.Items('Train/reward')
    rewards = [item.value for item in val_reward]
    last_k_rewards = computing_moving_average(rewards, k)
    for idx, value in enumerate(last_k_rewards, 1):
        writer.add_scalar('Train/moving_average_reward', value, idx)
    # loss
    val_loss = ea.scalars.Items('Train/loss')
    loss = [item.value for item in val_loss]
    last_k_loss = computing_moving_average(loss, k)
    for idx, value in enumerate(last_k_loss, 1):
        writer.add_scalar('Train/moving_average_loss', value, idx)
    # steps
    val_steps = ea.scalars.Items('Train/steps')
    steps = [item.value for item in val_steps]
    last_k_steps = computing_moving_average(steps, k)
    for idx, value in enumerate(last_k_steps, 1):
        writer.add_scalar('Train/moving_average_steps', value, idx)
Esempio n. 12
0
    def __init__(self, log_file):
        """ Helper object to parse tensorboard logs

        Args:
            log_file (str): Log dir to parse
        """
        if 'events.out' not in log_file:
            for f in os.listdir(log_file):
                if 'events.out' in f:
                    log_file = os.path.join(log_file, f)
                    break

        self.ea = event_accumulator.EventAccumulator(log_file,
                                                size_guidance={
                                                    event_accumulator.COMPRESSED_HISTOGRAMS: 500,
                                                    event_accumulator.IMAGES: 4,
                                                    event_accumulator.AUDIO: 4,
                                                    event_accumulator.SCALARS: 0,
                                                    event_accumulator.HISTOGRAMS: 1,
                                                })
        self.ea.Reload()
        self.scalar_tags = []
        self.text_tags = []
Esempio n. 13
0
    def testNonValueEvents(self):
        """Non-value events in the generator don't cause early exits."""
        gen = _EventGenerator(self)
        acc = ea.EventAccumulator(gen)
        gen.AddScalar("s1", wall_time=1, step=10, value=20)
        gen.AddEvent(
            event_pb2.Event(wall_time=2, step=20, file_version="nots2"))
        gen.AddScalar("s3", wall_time=3, step=100, value=1)
        gen.AddHistogram("hst1")
        gen.AddImage("im1")
        gen.AddAudio("snd1")

        acc.Reload()
        self.assertTagsEqual(
            acc.Tags(),
            {
                ea.IMAGES: ["im1"],
                ea.AUDIO: ["snd1"],
                ea.SCALARS: ["s1", "s3"],
                ea.HISTOGRAMS: ["hst1"],
                ea.COMPRESSED_HISTOGRAMS: ["hst1"],
            },
        )
Esempio n. 14
0
def get_data(name, filename):
    # ea = event_accumulator.EventAccumulator('E:/Project/ChestCT/log_voc/events.out.tfevents.'
    #                                         + filename + '.user-System-Product-Name')
    ea = event_accumulator.EventAccumulator(
        'E:/Project/ChestCT/log_voc/events.out.tfevents.' + filename +
        '.DESKTOP-50KJA8U')
    ea.Reload()
    print(ea.scalars.Keys())

    data = ea.scalars.Items(name)
    print(len(data))
    # print([(i.step, i.value) for i in data])
    # loss = [(i.step, i.value) for i in loss]
    for k in range(1, len(data)):
        try:
            if data[k].step <= data[k - 1].step:
                data.pop(k)
                # print(k)
        except IndexError:
            break
    # print([(i.step, i.value) for i in loss])
    # print(len(loss))
    return data
 def testKeyError(self):
     """KeyError should be raised when accessing non-existing keys."""
     gen = _EventGenerator(self)
     acc = ea.EventAccumulator(gen)
     acc.Reload()
     with self.assertRaises(KeyError):
         acc.Scalars('s1')
     with self.assertRaises(KeyError):
         acc.Scalars('hst1')
     with self.assertRaises(KeyError):
         acc.Scalars('im1')
     with self.assertRaises(KeyError):
         acc.Histograms('s1')
     with self.assertRaises(KeyError):
         acc.Histograms('im1')
     with self.assertRaises(KeyError):
         acc.Images('s1')
     with self.assertRaises(KeyError):
         acc.Images('hst1')
     with self.assertRaises(KeyError):
         acc.Audio('s1')
     with self.assertRaises(KeyError):
         acc.Audio('hst1')
    def testTFSummaryScalar(self):
        """Verify processing of summary_lib.scalar."""
        event_sink = _EventGenerator(self, zero_out_timestamps=True)
        writer = FileWriter(self.get_temp_dir())
        writer.event_writer = event_sink
        with self.test_session() as sess:
            ipt = array_ops.placeholder(dtypes.float32)
            summary_lib.scalar('scalar1', ipt)
            summary_lib.scalar('scalar2', ipt * ipt)
            merged = summary_lib.merge_all()
            writer.add_graph(sess.graph)
            for i in xrange(10):
                summ = sess.run(merged, feed_dict={ipt: i})
                writer.add_summary(summ, global_step=i)

        accumulator = ea.EventAccumulator(event_sink)
        accumulator.Reload()

        seq1 = [
            ea.ScalarEvent(wall_time=0, step=i, value=i) for i in xrange(10)
        ]
        seq2 = [
            ea.ScalarEvent(wall_time=0, step=i, value=i * i)
            for i in xrange(10)
        ]

        self.assertTagsEqual(
            accumulator.Tags(), {
                ea.SCALARS: ['scalar1', 'scalar2'],
                ea.GRAPH: True,
                ea.META_GRAPH: False,
            })

        self.assertEqual(accumulator.Scalars('scalar1'), seq1)
        self.assertEqual(accumulator.Scalars('scalar2'), seq2)
        first_value = accumulator.Scalars('scalar1')[0].value
        self.assertTrue(isinstance(first_value, float))
  def testNewStyleImageSummary(self):
    """Verify processing of tensorboard.plugins.image.summary."""
    event_sink = _EventGenerator(self, zero_out_timestamps=True)
    writer = tf.summary.FileWriter(self.get_temp_dir())
    writer.event_writer = event_sink
    with self.test_session() as sess:
      ipt = tf.ones([10, 4, 4, 3], tf.uint8)
      # This is an interesting example, because the old tf.image_summary op
      # would throw an error here, because it would be tag reuse.
      # Using the tf node name instead allows argument re-use to the image
      # summary.
      with tf.name_scope('1'):
        image_summary.op('images', ipt, max_outputs=1)
      with tf.name_scope('2'):
        image_summary.op('images', ipt, max_outputs=2)
      with tf.name_scope('3'):
        image_summary.op('images', ipt, max_outputs=3)
      merged = tf.summary.merge_all()
      writer.add_graph(sess.graph)
      for i in xrange(10):
        summ = sess.run(merged)
        writer.add_summary(summ, global_step=i)

    accumulator = ea.EventAccumulator(event_sink)
    accumulator.Reload()

    tags = [
        u'1/images/image_summary',
        u'2/images/image_summary',
        u'3/images/image_summary',
    ]

    self.assertTagsEqual(accumulator.Tags(), {
        ea.TENSORS: tags,
        ea.GRAPH: True,
        ea.META_GRAPH: False,
    })
Esempio n. 18
0
def convert_tfevents_to_csv(
    root_dir: str, refresh: bool = False
) -> Dict[str, np.ndarray]:
    """Recursively convert test/rew from all tfevent file under root_dir to csv.

    This function assumes that there is at most one tfevents file in each directory
    and will add suffix to that directory.

    :param bool refresh: re-create csv file under any condition.
    """
    tfevent_files = find_all_files(root_dir, re.compile(r"^.*tfevents.*$"))
    print(f"Converting {len(tfevent_files)} tfevents files under {root_dir} ...")
    result = {}
    with tqdm.tqdm(tfevent_files) as t:
        for tfevent_file in t:
            t.set_postfix(file=tfevent_file)
            output_file = os.path.join(os.path.split(tfevent_file)[0], "test_rew.csv")
            if os.path.exists(output_file) and not refresh:
                content = list(csv.reader(open(output_file, "r")))
                if content[0] == ["env_step", "rew", "time"]:
                    for i in range(1, len(content)):
                        content[i] = list(map(eval, content[i]))
                    result[output_file] = content
                    continue
            ea = event_accumulator.EventAccumulator(tfevent_file)
            ea.Reload()
            initial_time = ea._first_event_timestamp
            content = [["env_step", "rew", "time"]]
            for test_rew in ea.scalars.Items("test/rew"):
                content.append([
                    round(test_rew.step, 4),
                    round(test_rew.value, 4),
                    round(test_rew.wall_time - initial_time, 4),
                ])
            csv.writer(open(output_file, 'w')).writerows(content)
            result[output_file] = content
    return result
Esempio n. 19
0
 def testAudio(self):
   """Tests 2 audio events inserted/accessed in EventAccumulator."""
   gen = _EventGenerator(self)
   acc = ea.EventAccumulator(gen)
   snd1 = ea.AudioEvent(
       wall_time=1,
       step=10,
       encoded_audio_string=b'big',
       content_type='audio/wav',
       sample_rate=44100,
       length_frames=441000)
   snd2 = ea.AudioEvent(
       wall_time=2,
       step=12,
       encoded_audio_string=b'small',
       content_type='audio/wav',
       sample_rate=44100,
       length_frames=44100)
   gen.AddAudio(
       'snd1',
       wall_time=1,
       step=10,
       encoded_audio_string=b'big',
       content_type='audio/wav',
       sample_rate=44100,
       length_frames=441000)
   gen.AddAudio(
       'snd2',
       wall_time=2,
       step=12,
       encoded_audio_string=b'small',
       content_type='audio/wav',
       sample_rate=44100,
       length_frames=44100)
   acc.Reload()
   self.assertEqual(acc.Audio('snd1'), [snd1])
   self.assertEqual(acc.Audio('snd2'), [snd2])
Esempio n. 20
0
 def testImages(self):
     """Tests 2 images inserted/accessed in EventAccumulator."""
     gen = _EventGenerator(self)
     acc = ea.EventAccumulator(gen)
     im1 = ea.ImageEvent(
         wall_time=1,
         step=10,
         encoded_image_string=b"big",
         width=400,
         height=300,
     )
     im2 = ea.ImageEvent(
         wall_time=2,
         step=12,
         encoded_image_string=b"small",
         width=40,
         height=30,
     )
     gen.AddImage(
         "im1",
         wall_time=1,
         step=10,
         encoded_image_string=b"big",
         width=400,
         height=300,
     )
     gen.AddImage(
         "im2",
         wall_time=2,
         step=12,
         encoded_image_string=b"small",
         width=40,
         height=30,
     )
     acc.Reload()
     self.assertEqual(acc.Images("im1"), [im1])
     self.assertEqual(acc.Images("im2"), [im2])
    def testSummaryMetadata_FirstMetadataWins(self):
        logdir = self.get_temp_dir()
        summary_metadata_1 = tf.SummaryMetadata(
            display_name='current tagee',
            summary_description='no',
            plugin_data=[
                tf.SummaryMetadata.PluginData(plugin_name='outlet',
                                              content='120v')
            ])
        self._writeMetadata(logdir, summary_metadata_1, nonce='1')
        acc = ea.EventAccumulator(logdir)
        acc.Reload()
        summary_metadata_2 = tf.SummaryMetadata(
            display_name='tagee of the future',
            summary_description='definitely not',
            plugin_data=[
                tf.SummaryMetadata.PluginData(plugin_name='plug',
                                              content='110v')
            ])
        self._writeMetadata(logdir, summary_metadata_2, nonce='2')
        acc.Reload()

        self.assertProtoEquals(summary_metadata_1,
                               acc.SummaryMetadata('you_are_it'))
Esempio n. 22
0
def read_data(load_dir, tag="perf/avg_reward_100"):

    events = os.listdir(load_dir)
    for event in events:
        path = os.path.join(load_dir, event)
        ea = event_accumulator.EventAccumulator(
            path,
            size_guidance={
                event_accumulator.COMPRESSED_HISTOGRAMS: 0,
                event_accumulator.IMAGES: 0,
                event_accumulator.AUDIO: 0,
                event_accumulator.SCALARS: 2500,
                event_accumulator.HISTOGRAMS: 0,
            })

        ea.Reload()
        tags = ea.Tags()

        if tag not in tags["scalars"]: continue

        if len(ea.Scalars(tag)) == 2500:
            return np.array([s.value for s in ea.Scalars(tag)])

    return None
Esempio n. 23
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('--path', type=str, required=True)
    parser.add_argument('--outdir', type=str)
    args = parser.parse_args()

    event_acc = event_accumulator.EventAccumulator(
        args.path, size_guidance={'scalars': 0})
    event_acc.Reload()

    scalars = {}
    for tag in event_acc.Tags()['scalars']:
        events = event_acc.Scalars(tag)
        scalars[tag] = [event.value for event in events]

    if args.outdir is not None:
        outdir = pathlib.Path(args.outdir)
    else:
        outdir = pathlib.Path(args.path).parent
    outdir.mkdir(exist_ok=True, parents=True)

    outpath = outdir / 'all_scalars.json'
    with open(outpath, 'w') as fout:
        json.dump(scalars, fout)
Esempio n. 24
0
  def testSessionLogStartMessageDiscardsExpiredEvents(self):
    """Test that SessionLog.START message discards expired events.

    This discard logic is preferred over the out-of-order step discard logic,
    but this logic can only be used for event protos which have the SessionLog
    enum, which was introduced to event.proto for file_version >= brain.Event:2.
    """
    gen = _EventGenerator(self)
    acc = ea.EventAccumulator(gen)
    gen.AddEvent(tf.Event(wall_time=0, step=1, file_version='brain.Event:2'))

    gen.AddScalar('s1', wall_time=1, step=100, value=20)
    gen.AddScalar('s1', wall_time=1, step=200, value=20)
    gen.AddScalar('s1', wall_time=1, step=300, value=20)
    gen.AddScalar('s1', wall_time=1, step=400, value=20)

    gen.AddScalar('s2', wall_time=1, step=202, value=20)
    gen.AddScalar('s2', wall_time=1, step=203, value=20)

    slog = tf.SessionLog(status=tf.SessionLog.START)
    gen.AddEvent(tf.Event(wall_time=2, step=201, session_log=slog))
    acc.Reload()
    self.assertEqual([x.step for x in acc.Scalars('s1')], [100, 200])
    self.assertEqual([x.step for x in acc.Scalars('s2')], [])
Esempio n. 25
0
def get_last_metric(path, metric, get_step=None):
    onlyfiles = sorted([join(path,f) for f in listdir(path) if isfile(join(path, f))])
    last_point = 0
    v = None
    for f in onlyfiles:
        ea = event_accumulator.EventAccumulator(f)
        # top1 not found
        ea.Reload()
        try:
            if get_step is not None:
                for e in ea.Scalars(metric):
                    last_point = e.step
                    v = e.value
                    if get_step is not None and last_point == get_step:
                        return v, last_point
            e = ea.Scalars(metric)[-1]
            if e.step >= last_point:
                if last_point > 0:
                    print("Warning: Multiple runs with one name:", f, "other result:", v, 'at', last_point)
                last_point = e.step
                v = e.value
        except Exception as e:
            print(e)
    return v, last_point
Esempio n. 26
0
def read_event(event_name):
    # e,即event,代表某一个batch的日志记录,读取日志记录
    for e in tf.train.summary_iterator(event_name):
        # v,即value,代表这个batch的某个已记录的观测值,loss或者accuracy
        for v in e.summary.value:
            # if v.tag == 'loss' or v.tag == 'accuracy':
            #     print(v.simple_value)
            print([v.tag, v.simple_value])

    from tensorboard.backend.event_processing import event_accumulator
    # 加载日志数据
    ea = event_accumulator.EventAccumulator(event_name)
    ea.Reload()
    print(ea.scalars.Keys())

    loss = ea.scalars.Items('losses/ent_loss')
    print(len(loss))
    print([(i.step, i.value) for i in loss])

    import matplotlib.pyplot as plt
    step = [i.step for i in loss]
    loss_value = [i.value for i in loss]
    plt.plot(step, loss_value)
    plt.savefig("loss.png")
Esempio n. 27
0
 def testTags(self):
     """Tags should be found in EventAccumulator after adding some
     events."""
     gen = _EventGenerator(self)
     gen.AddScalar("s1")
     gen.AddScalar("s2")
     gen.AddHistogram("hst1")
     gen.AddHistogram("hst2")
     gen.AddImage("im1")
     gen.AddImage("im2")
     gen.AddAudio("snd1")
     gen.AddAudio("snd2")
     acc = ea.EventAccumulator(gen)
     acc.Reload()
     self.assertTagsEqual(
         acc.Tags(),
         {
             ea.IMAGES: ["im1", "im2"],
             ea.AUDIO: ["snd1", "snd2"],
             ea.SCALARS: ["s1", "s2"],
             ea.HISTOGRAMS: ["hst1", "hst2"],
             ea.COMPRESSED_HISTOGRAMS: ["hst1", "hst2"],
         },
     )
Esempio n. 28
0
tfp = input('tf log dirname: ')
path = '/home/ubuntu/repos/hyperdeeprl/tf_log/'+tfp
for root, dirs, files in os.walk(path):
    for file in files:
        if file.endswith(".ubuntu-MS-7B48"):
             p = os.path.join(root, file)
             paths.append(p)

filepath = 'files/'+tfp
if not os.path.exists(filepath):
    os.makedirs(filepath)
else:
    print ('folder exists, will not overwrite, please delete and rerun')
    sys.exit(0)

paths = sorted(paths, key=lambda x: int(x.split('N')[-1].split('-')[0]))
for i, path in enumerate(paths):
    ea = event_accumulator.EventAccumulator(path,
        size_guidance={ # see below regarding this argument
            event_accumulator.COMPRESSED_HISTOGRAMS: 500,
            event_accumulator.IMAGES: 4,
            event_accumulator.AUDIO: 4,
            event_accumulator.SCALARS: 0,
            event_accumulator.HISTOGRAMS: 1,
    })

    ea.Reload() # loads events from file
    n = path.split('N')[1].split('-')[0]
    print (path)
    df = pd.DataFrame(ea.Scalars('episodic_return_train')).to_csv(filepath+'/N{}.csv'.format(n)) 
    def testScalarsRealistically(self):
        """Test accumulator by writing values and then reading them."""
        def FakeScalarSummary(tag, value):
            value = tf.Summary.Value(tag=tag, simple_value=value)
            summary = tf.Summary(value=[value])
            return summary

        directory = os.path.join(self.get_temp_dir(), 'values_dir')
        if tf.gfile.IsDirectory(directory):
            tf.gfile.DeleteRecursively(directory)
        tf.gfile.MkDir(directory)

        writer = tf.summary.FileWriter(directory, max_queue=100)

        with tf.Graph().as_default() as graph:
            _ = tf.constant([2.0, 1.0])
        # Add a graph to the summary writer.
        writer.add_graph(graph)
        meta_graph_def = tf.train.export_meta_graph(
            graph_def=graph.as_graph_def(add_shapes=True))
        writer.add_meta_graph(meta_graph_def)

        run_metadata = tf.RunMetadata()
        device_stats = run_metadata.step_stats.dev_stats.add()
        device_stats.device = 'test device'
        writer.add_run_metadata(run_metadata, 'test run')

        # Write a bunch of events using the writer.
        for i in xrange(30):
            summ_id = FakeScalarSummary('id', i)
            summ_sq = FakeScalarSummary('sq', i * i)
            writer.add_summary(summ_id, i * 5)
            writer.add_summary(summ_sq, i * 5)
        writer.flush()

        # Verify that we can load those events properly
        acc = ea.EventAccumulator(directory)
        acc.Reload()
        self.assertTagsEqual(
            acc.Tags(), {
                ea.SCALARS: ['id', 'sq'],
                ea.GRAPH: True,
                ea.META_GRAPH: True,
                ea.RUN_METADATA: ['test run'],
            })
        id_events = acc.Scalars('id')
        sq_events = acc.Scalars('sq')
        self.assertEqual(30, len(id_events))
        self.assertEqual(30, len(sq_events))
        for i in xrange(30):
            self.assertEqual(i * 5, id_events[i].step)
            self.assertEqual(i * 5, sq_events[i].step)
            self.assertEqual(i, id_events[i].value)
            self.assertEqual(i * i, sq_events[i].value)

        # Write a few more events to test incremental reloading
        for i in xrange(30, 40):
            summ_id = FakeScalarSummary('id', i)
            summ_sq = FakeScalarSummary('sq', i * i)
            writer.add_summary(summ_id, i * 5)
            writer.add_summary(summ_sq, i * 5)
        writer.flush()

        # Verify we can now see all of the data
        acc.Reload()
        id_events = acc.Scalars('id')
        sq_events = acc.Scalars('sq')
        self.assertEqual(40, len(id_events))
        self.assertEqual(40, len(sq_events))
        for i in xrange(40):
            self.assertEqual(i * 5, id_events[i].step)
            self.assertEqual(i * 5, sq_events[i].step)
            self.assertEqual(i, id_events[i].value)
            self.assertEqual(i * i, sq_events[i].value)
        self.assertProtoEquals(graph.as_graph_def(add_shapes=True),
                               acc.Graph())
        self.assertProtoEquals(meta_graph_def, acc.MetaGraph())
 def testEmptyAccumulator(self):
     gen = _EventGenerator(self)
     x = ea.EventAccumulator(gen)
     x.Reload()
     self.assertTagsEqual(x.Tags(), {})