Ejemplo n.º 1
0
  def testGraphFromMetaGraphBecomesAvailable(self):
    """Test accumulator by writing values and then reading them."""

    directory = os.path.join(self.get_temp_dir(), 'metagraph_test_values_dir')
    if tf.io.gfile.isdir(directory):
      tf.io.gfile.rmtree(directory)
    tf.io.gfile.mkdir(directory)

    writer = test_util.FileWriter(directory, max_queue=100)

    with tf.Graph().as_default() as graph:
      _ = tf.constant([2.0, 1.0])
    # Add a graph to the summary writer.
    meta_graph_def = tf.compat.v1.train.export_meta_graph(graph_def=graph.as_graph_def(
        add_shapes=True))
    writer.add_meta_graph(meta_graph_def)

    writer.flush()

    # Verify that we can load those events properly
    acc = ea.EventAccumulator(directory)
    acc.Reload()
    self.assertTagsEqual(acc.Tags(), {
        ea.GRAPH: True,
        ea.META_GRAPH: True,
    })

    expected_graph_def = graph_pb2.GraphDef.FromString(
          graph.as_graph_def(add_shapes=True).SerializeToString())
    self.assertProtoEquals(expected_graph_def, acc.Graph())

    expected_meta_graph = meta_graph_pb2.MetaGraphDef.FromString(
          meta_graph_def.SerializeToString())
    self.assertProtoEquals(expected_meta_graph, acc.MetaGraph())
Ejemplo n.º 2
0
    def testGraphFromMetaGraphBecomesAvailable(self):
        """Test accumulator by writing values and then reading them."""

        directory = os.path.join(self.get_temp_dir(),
                                 'metagraph_test_values_dir')
        if tf.gfile.IsDirectory(directory):
            tf.gfile.DeleteRecursively(directory)
        tf.gfile.MkDir(directory)

        writer = test_util.FileWriter(directory, max_queue=100)

        with tf.Graph().as_default() as graph:
            _ = tf.constant([2.0, 1.0])
        # Add a graph to the summary writer.
        meta_graph_def = tf.train.export_meta_graph(
            graph_def=graph.as_graph_def(add_shapes=True))
        writer.add_meta_graph(meta_graph_def)

        writer.flush()

        # Verify that we can load those events properly
        acc = ea.EventAccumulator(directory)
        acc.Reload()
        self.assertTagsEqual(acc.Tags(), {
            ea.GRAPH: True,
            ea.META_GRAPH: True,
        })
        self.assertProtoEquals(graph.as_graph_def(add_shapes=True),
                               acc.Graph())
        self.assertProtoEquals(meta_graph_def, acc.MetaGraph())
Ejemplo n.º 3
0
  def testTFSummaryTensor(self):
    """Verify processing of tf.summary.tensor."""
    event_sink = _EventGenerator(self, zero_out_timestamps=True)
    writer = test_util.FileWriter(self.get_temp_dir())
    writer.event_writer = event_sink
    with self.test_session() as sess:
      tf.compat.v1.summary.tensor_summary('scalar', tf.constant(1.0))
      tf.compat.v1.summary.tensor_summary('vector', tf.constant([1.0, 2.0, 3.0]))
      tf.compat.v1.summary.tensor_summary('string', tf.constant(six.b('foobar')))
      merged = tf.compat.v1.summary.merge_all()
      summ = sess.run(merged)
      writer.add_summary(summ, 0)

    accumulator = ea.EventAccumulator(event_sink)
    accumulator.Reload()

    self.assertTagsEqual(accumulator.Tags(), {
        ea.TENSORS: ['scalar', 'vector', 'string'],
    })

    scalar_proto = accumulator.Tensors('scalar')[0].tensor_proto
    scalar = tensor_util.make_ndarray(scalar_proto)
    vector_proto = accumulator.Tensors('vector')[0].tensor_proto
    vector = tensor_util.make_ndarray(vector_proto)
    string_proto = accumulator.Tensors('string')[0].tensor_proto
    string = tensor_util.make_ndarray(string_proto)

    self.assertTrue(np.array_equal(scalar, 1.0))
    self.assertTrue(np.array_equal(vector, [1.0, 2.0, 3.0]))
    self.assertTrue(np.array_equal(string, six.b('foobar')))
Ejemplo n.º 4
0
  def _testTFSummaryTensor_SizeGuidance(self,
                                        plugin_name,
                                        tensor_size_guidance,
                                        steps,
                                        expected_count):
    event_sink = _EventGenerator(self, zero_out_timestamps=True)
    writer = test_util.FileWriter(self.get_temp_dir())
    writer.event_writer = event_sink
    with self.test_session() as sess:
      summary_metadata = summary_pb2.SummaryMetadata(
          plugin_data=summary_pb2.SummaryMetadata.PluginData(
              plugin_name=plugin_name, content=b'{}'))
      tf.compat.v1.summary.tensor_summary('scalar', tf.constant(1.0),
                                summary_metadata=summary_metadata)
      merged = tf.compat.v1.summary.merge_all()
      for step in xrange(steps):
        writer.add_summary(sess.run(merged), global_step=step)


    accumulator = ea.EventAccumulator(
        event_sink, tensor_size_guidance=tensor_size_guidance)
    accumulator.Reload()

    tensors = accumulator.Tensors('scalar')
    self.assertEqual(len(tensors), expected_count)
Ejemplo n.º 5
0
  def testNewStyleAudioSummary(self):
    """Verify processing of tensorboard.plugins.audio.summary."""
    event_sink = _EventGenerator(self, zero_out_timestamps=True)
    writer = test_util.FileWriter(self.get_temp_dir())
    writer.event_writer = event_sink
    with self.test_session() as sess:
      ipt = tf.random.normal(shape=[5, 441, 2])
      with tf.name_scope('1'):
        audio_summary.op('one', ipt, sample_rate=44100, max_outputs=1)
      with tf.name_scope('2'):
        audio_summary.op('two', ipt, sample_rate=44100, max_outputs=2)
      with tf.name_scope('3'):
        audio_summary.op('three', ipt, sample_rate=44100, max_outputs=3)
      merged = tf.compat.v1.summary.merge_all()
      writer.add_graph(sess.graph)
      for i in xrange(10):
        summ = sess.run(merged)
        writer.add_summary(summ, global_step=i)

    accumulator = ea.EventAccumulator(event_sink)
    accumulator.Reload()

    tags = [
        u'1/one/audio_summary',
        u'2/two/audio_summary',
        u'3/three/audio_summary',
    ]

    self.assertTagsEqual(accumulator.Tags(), {
        ea.TENSORS: tags,
        ea.GRAPH: True,
        ea.META_GRAPH: False,
    })
    def testTFSummaryTensor(self):
        """Verify processing of tf.summary.tensor."""
        event_sink = _EventGenerator(self, zero_out_timestamps=True)
        writer = test_util.FileWriter(self.get_temp_dir())
        writer.event_writer = event_sink
        with tf.compat.v1.Graph().as_default():
            with self.test_session() as sess:
                tensor_summary = tf.compat.v1.summary.tensor_summary
                tensor_summary("scalar", tf.constant(1.0))
                tensor_summary("vector", tf.constant([1.0, 2.0, 3.0]))
                tensor_summary("string", tf.constant(six.b("foobar")))
                merged = tf.compat.v1.summary.merge_all()
                summ = sess.run(merged)
                writer.add_summary(summ, 0)

        accumulator = ea.EventAccumulator(event_sink)
        accumulator.Reload()

        self.assertTagsEqual(accumulator.Tags(), {
            ea.TENSORS: ["scalar", "vector", "string"],
        })

        scalar_proto = accumulator.Tensors("scalar")[0].tensor_proto
        scalar = tensor_util.make_ndarray(scalar_proto)
        vector_proto = accumulator.Tensors("vector")[0].tensor_proto
        vector = tensor_util.make_ndarray(vector_proto)
        string_proto = accumulator.Tensors("string")[0].tensor_proto
        string = tensor_util.make_ndarray(string_proto)

        self.assertTrue(np.array_equal(scalar, 1.0))
        self.assertTrue(np.array_equal(vector, [1.0, 2.0, 3.0]))
        self.assertTrue(np.array_equal(string, six.b("foobar")))

        self.assertItemsEqual(accumulator.ActivePlugins(), [])
Ejemplo n.º 7
0
 def testDoesntCrashWhenUpcomingFileIsDeleted(self):
     # Use actual file loader so it emits the real error.
     self._loader = directory_loader.DirectoryLoader(
         self._directory, event_file_loader.TimestampedEventFileLoader)
     with test_util.FileWriter(self._directory,
                               filename_suffix=".a") as writer_a:
         writer_a.add_test_summary("a")
     with test_util.FileWriter(self._directory,
                               filename_suffix=".b") as writer_b:
         writer_b.add_test_summary("b")
     generator = self._loader.Load()
     next(generator)  # Ignore the file_version event.
     event = next(generator)
     self.assertEqual("a", event.summary.value[0].tag)
     os.remove(glob.glob(os.path.join(self._directory, "*.b"))[0])
     self.assertEmpty(list(generator))
Ejemplo n.º 8
0
  def testNewStyleScalarSummary(self):
    """Verify processing of tensorboard.plugins.scalar.summary."""
    event_sink = _EventGenerator(self, zero_out_timestamps=True)
    writer = test_util.FileWriter(self.get_temp_dir())
    writer.event_writer = event_sink
    with self.test_session() as sess:
      step = tf.compat.v1.placeholder(tf.float32, shape=[])
      scalar_summary.op('accuracy', 1.0 - 1.0 / (step + tf.constant(1.0)))
      scalar_summary.op('xent', 1.0 / (step + tf.constant(1.0)))
      merged = tf.compat.v1.summary.merge_all()
      writer.add_graph(sess.graph)
      for i in xrange(10):
        summ = sess.run(merged, feed_dict={step: float(i)})
        writer.add_summary(summ, global_step=i)

    accumulator = ea.EventAccumulator(event_sink)
    accumulator.Reload()

    tags = [
        u'accuracy/scalar_summary',
        u'xent/scalar_summary',
    ]

    self.assertTagsEqual(accumulator.Tags(), {
        ea.TENSORS: tags,
        ea.GRAPH: True,
        ea.META_GRAPH: False,
    })
Ejemplo n.º 9
0
    def generate_run(self, logdir, run_name, include_graph,
                     include_run_metadata):
        """Create a run with a text summary, metadata, and optionally a graph."""
        tf.compat.v1.reset_default_graph()
        k1 = tf.constant(math.pi, name='k1')
        k2 = tf.constant(math.e, name='k2')
        result = (k1**k2) - k1
        expected = tf.constant(20.0, name='expected')
        error = tf.abs(result - expected, name='error')
        message_prefix_value = 'error ' * 1000
        true_length = len(message_prefix_value)
        assert true_length > self._MESSAGE_PREFIX_LENGTH_LOWER_BOUND, true_length
        message_prefix = tf.constant(message_prefix_value,
                                     name='message_prefix')
        error_message = tf.strings.join(
            [message_prefix,
             tf.as_string(error, name='error_string')],
            name='error_message')
        summary_message = tf.compat.v1.summary.text('summary_message',
                                                    error_message)

        sess = tf.compat.v1.Session()
        writer = test_util.FileWriter(os.path.join(logdir, run_name))
        if include_graph:
            writer.add_graph(sess.graph)
        options = tf.compat.v1.RunOptions(
            trace_level=tf.compat.v1.RunOptions.FULL_TRACE)
        run_metadata = config_pb2.RunMetadata()
        s = sess.run(summary_message,
                     options=options,
                     run_metadata=run_metadata)
        writer.add_summary(s)
        if include_run_metadata:
            writer.add_run_metadata(run_metadata, self._METADATA_TAG)
        writer.close()
Ejemplo n.º 10
0
 def test_directory_deletion(self):
     logdir = self.get_temp_dir()
     with test_util.FileWriter(os.path.join(logdir, "a")) as writer:
         writer.add_test_summary("tag_a")
     with test_util.FileWriter(os.path.join(logdir, "b")) as writer:
         writer.add_test_summary("tag_b")
     with test_util.FileWriter(os.path.join(logdir, "c")) as writer:
         writer.add_test_summary("tag_c")
     loader = self._create_logdir_loader(logdir)
     loader.synchronize_runs()
     self.assertEqual(list(loader.get_run_events().keys()), ["a", "b", "c"])
     shutil.rmtree(os.path.join(logdir, "b"))
     loader.synchronize_runs()
     self.assertEqual(list(loader.get_run_events().keys()), ["a", "c"])
     shutil.rmtree(logdir)
     loader.synchronize_runs()
     self.assertEmpty(loader.get_run_events())
Ejemplo n.º 11
0
    def test_audio(self):
        logdir = self.get_temp_dir()
        steps = (0, 1, 2)
        with test_util.FileWriter(logdir) as writer:
            for step in steps:
                event = event_pb2.Event()
                event.step = step
                event.wall_time = 456.75 * step
                audio = tf.reshape(tf.linspace(0.0, 100.0, 4 * 10 * 2),
                                   (4, 10, 2))
                audio_pb = audio_summary.pb(
                    "foo",
                    audio,
                    labels=["one", "two", "three", "four"],
                    sample_rate=44100,
                    display_name="bar",
                    description="baz",
                )
                writer.add_summary(audio_pb.SerializeToString(),
                                   global_step=step)
        files = os.listdir(logdir)
        self.assertLen(files, 1)
        event_file = os.path.join(logdir, files[0])
        loader = event_file_loader.RawEventFileLoader(event_file)
        input_events = [event_pb2.Event.FromString(x) for x in loader.Load()]

        new_events = []
        initial_metadata = {}
        for input_event in input_events:
            migrated = self._migrate_event(input_event,
                                           initial_metadata=initial_metadata)
            new_events.extend(migrated)

        self.assertLen(new_events, 4)
        self.assertEqual(new_events[0].WhichOneof("what"), "file_version")
        for step in steps:
            with self.subTest("step %d" % step):
                new_event = new_events[step + 1]
                self.assertLen(new_event.summary.value, 1)
                value = new_event.summary.value[0]
                tensor = tensor_util.make_ndarray(value.tensor)
                self.assertEqual(tensor.shape,
                                 (3, ))  # 4 clipped to max_outputs=3
                self.assertStartsWith(tensor[0], b"RIFF")
                self.assertStartsWith(tensor[1], b"RIFF")
                if step == min(steps):
                    metadata = value.metadata
                    self.assertEqual(
                        metadata.data_class,
                        summary_pb2.DATA_CLASS_BLOB_SEQUENCE,
                    )
                    self.assertEqual(
                        metadata.plugin_data.plugin_name,
                        audio_metadata.PLUGIN_NAME,
                    )
                else:
                    self.assertFalse(value.HasField("metadata"))
Ejemplo n.º 12
0
 def testDoesntCrashWhenCurrentFileIsDeleted(self):
     # Use actual file loader so it emits the real error.
     self._loader = directory_loader.DirectoryLoader(
         self._directory, event_file_loader.TimestampedEventFileLoader)
     with test_util.FileWriter(self._directory,
                               filename_suffix=".a") as writer_a:
         writer_a.add_test_summary("a")
     events = list(self._loader.Load())
     events.pop(0)  # Ignore the file_version event.
     self.assertEqual(1, len(events))
     self.assertEqual("a", events[0].summary.value[0].tag)
     os.remove(glob.glob(os.path.join(self._directory, "*.a"))[0])
     with test_util.FileWriter(self._directory,
                               filename_suffix=".b") as writer_b:
         writer_b.add_test_summary("b")
     events = list(self._loader.Load())
     events.pop(0)  # Ignore the file_version event.
     self.assertEqual(1, len(events))
     self.assertEqual("b", events[0].summary.value[0].tag)
Ejemplo n.º 13
0
 def test_single_event_logdir(self):
     logdir = self.get_temp_dir()
     with test_util.FileWriter(logdir) as writer:
         writer.add_test_summary("foo")
     loader = self._create_logdir_loader(logdir)
     loader.synchronize_runs()
     self.assertEqual(self._extract_run_to_tags(loader.get_run_events()),
                      {".": ["foo"]})
     # A second load should indicate no new data for the run.
     self.assertEqual(self._extract_run_to_tags(loader.get_run_events()),
                      {".": []})
Ejemplo n.º 14
0
 def test_upload_propagates_experiment_deletion(self):
     logdir = self.get_temp_dir()
     with tb_test_util.FileWriter(logdir) as writer:
         writer.add_test_summary("foo")
     mock_client = _create_mock_client()
     uploader = _create_uploader(mock_client, logdir)
     uploader.create_experiment()
     error = test_util.grpc_error(grpc.StatusCode.NOT_FOUND, "nope")
     mock_client.WriteScalar.side_effect = error
     with self.assertRaises(uploader_lib.ExperimentNotFoundError):
         uploader._upload_once()
Ejemplo n.º 15
0
 def test_upload_swallows_rpc_failure(self):
     logdir = self.get_temp_dir()
     with tb_test_util.FileWriter(logdir) as writer:
         writer.add_test_summary("foo")
     mock_client = _create_mock_client()
     uploader = _create_uploader(mock_client, logdir)
     uploader.create_experiment()
     error = test_util.grpc_error(grpc.StatusCode.INTERNAL, "Failure")
     mock_client.WriteScalar.side_effect = error
     uploader._upload_once()
     mock_client.WriteScalar.assert_called_once()
Ejemplo n.º 16
0
 def test_directory_deletion_during_event_loading(self):
     logdir = self.get_temp_dir()
     with test_util.FileWriter(logdir) as writer:
         writer.add_test_summary("foo")
     loader = self._create_logdir_loader(logdir)
     loader.synchronize_runs()
     self.assertEqual(self._extract_run_to_tags(loader.get_run_events()),
                      {".": ["foo"]})
     shutil.rmtree(logdir)
     runs_to_events = loader.get_run_events()
     self.assertEqual(list(runs_to_events.keys()), ["."])
     events = runs_to_events["."]
     self.assertEqual(self._extract_tags(events), [])
Ejemplo n.º 17
0
    def testNewStyleAudioSummary(self):
        """Verify processing of tensorboard.plugins.audio.summary."""
        event_sink = _EventGenerator(self, zero_out_timestamps=True)
        writer = test_util.FileWriter(self.get_temp_dir())
        writer.event_writer = event_sink
        with tf.compat.v1.Graph().as_default():
            with self.test_session() as sess:
                ipt = tf.random.normal(shape=[5, 441, 2])
                with tf.name_scope("1"):
                    audio_summary.op("one",
                                     ipt,
                                     sample_rate=44100,
                                     max_outputs=1)
                with tf.name_scope("2"):
                    audio_summary.op("two",
                                     ipt,
                                     sample_rate=44100,
                                     max_outputs=2)
                with tf.name_scope("3"):
                    audio_summary.op("three",
                                     ipt,
                                     sample_rate=44100,
                                     max_outputs=3)
                merged = tf.compat.v1.summary.merge_all()
                writer.add_graph(sess.graph)
                for i in range(10):
                    summ = sess.run(merged)
                    writer.add_summary(summ, global_step=i)

        accumulator = ea.EventAccumulator(event_sink)
        accumulator.Reload()

        tags = [
            graph_metadata.RUN_GRAPH_NAME,
            "1/one/audio_summary",
            "2/two/audio_summary",
            "3/three/audio_summary",
        ]
        self.assertTagsEqual(
            accumulator.Tags(),
            {
                ea.TENSORS: tags,
                ea.GRAPH: True,
                ea.META_GRAPH: False,
            },
        )

        self.assertItemsEqual(
            accumulator.ActivePlugins(),
            [audio_metadata.PLUGIN_NAME, graph_metadata.PLUGIN_NAME],
        )
Ejemplo n.º 18
0
    def test_graph_def(self):
        # Create a `GraphDef` and write it to disk as an event.
        logdir = self.get_temp_dir()
        writer = test_util.FileWriter(logdir)
        graph_def = graph_pb2.GraphDef()
        graph_def.node.add(name="alice", op="Person")
        graph_def.node.add(name="bob", op="Person")
        graph_def.node.add(name="friendship",
                           op="Friendship",
                           input=["alice", "bob"])
        writer.add_graph(graph=None, graph_def=graph_def, global_step=123)
        writer.flush()

        # Read in the `Event` containing the written `graph_def`.
        files = os.listdir(logdir)
        self.assertLen(files, 1)
        event_file = os.path.join(logdir, files[0])
        self.assertIn("tfevents", event_file)
        loader = event_file_loader.EventFileLoader(event_file)
        events = list(loader.Load())
        self.assertLen(events, 2)
        self.assertEqual(events[0].WhichOneof("what"), "file_version")
        self.assertEqual(events[1].WhichOneof("what"), "graph_def")
        old_event = events[1]

        new_events = self._migrate_event(old_event)
        self.assertLen(new_events, 2)
        self.assertIs(new_events[0], old_event)
        new_event = new_events[1]

        self.assertEqual(new_event.WhichOneof("what"), "summary")
        self.assertLen(new_event.summary.value, 1)
        tensor = tensor_util.make_ndarray(new_event.summary.value[0].tensor)
        self.assertEqual(
            new_event.summary.value[0].metadata.data_class,
            summary_pb2.DATA_CLASS_BLOB_SEQUENCE,
        )
        self.assertEqual(
            new_event.summary.value[0].metadata.plugin_data.plugin_name,
            graphs_metadata.PLUGIN_NAME,
        )
        self.assertEqual(tensor.shape, (1, ))
        new_graph_def_bytes = tensor[0]
        self.assertIsInstance(new_graph_def_bytes, bytes)
        self.assertGreaterEqual(len(new_graph_def_bytes), 16)
        new_graph_def = graph_pb2.GraphDef.FromString(new_graph_def_bytes)

        self.assertProtoEquals(graph_def, new_graph_def)
Ejemplo n.º 19
0
    def testNewStyleImageSummary(self):
        """Verify processing of tensorboard.plugins.image.summary."""
        event_sink = _EventGenerator(self, zero_out_timestamps=True)
        writer = test_util.FileWriter(self.get_temp_dir())
        writer.event_writer = event_sink
        with tf.compat.v1.Graph().as_default():
            with self.test_session() as sess:
                ipt = tf.ones([10, 4, 4, 3], tf.uint8)
                # This is an interesting example, because the old tf.image_summary op
                # would throw an error here, because it would be tag reuse.
                # Using the tf node name instead allows argument re-use to the image
                # summary.
                with tf.name_scope("1"):
                    image_summary.op("images", ipt, max_outputs=1)
                with tf.name_scope("2"):
                    image_summary.op("images", ipt, max_outputs=2)
                with tf.name_scope("3"):
                    image_summary.op("images", ipt, max_outputs=3)
                merged = tf.compat.v1.summary.merge_all()
                writer.add_graph(sess.graph)
                for i in range(10):
                    summ = sess.run(merged)
                    writer.add_summary(summ, global_step=i)

        accumulator = ea.EventAccumulator(event_sink)
        accumulator.Reload()

        tags = [
            graph_metadata.RUN_GRAPH_NAME,
            "1/images/image_summary",
            "2/images/image_summary",
            "3/images/image_summary",
        ]
        self.assertTagsEqual(
            accumulator.Tags(),
            {
                ea.TENSORS: tags,
                ea.GRAPH: True,
                ea.META_GRAPH: False,
            },
        )

        self.assertItemsEqual(
            accumulator.ActivePlugins(),
            [image_metadata.PLUGIN_NAME, graph_metadata.PLUGIN_NAME],
        )
Ejemplo n.º 20
0
 def test_multiple_writes_to_logdir(self):
     logdir = self.get_temp_dir()
     with test_util.FileWriter(os.path.join(logdir, "a")) as writer:
         writer.add_test_summary("tag_a")
     with test_util.FileWriter(os.path.join(logdir, "b")) as writer:
         writer.add_test_summary("tag_b")
     with test_util.FileWriter(os.path.join(logdir, "b", "x")) as writer:
         writer.add_test_summary("tag_b_x")
     writer_c = test_util.FileWriter(os.path.join(logdir, "c"))
     writer_c.add_test_summary("tag_c")
     writer_c.flush()
     loader = self._create_logdir_loader(logdir)
     loader.synchronize_runs()
     self.assertEqual(
         self._extract_run_to_tags(loader.get_run_events()),
         {
             "a": ["tag_a"],
             "b": ["tag_b"],
             "b/x": ["tag_b_x"],
             "c": ["tag_c"],
         },
     )
     # A second load should indicate no new data.
     self.assertEqual(
         self._extract_run_to_tags(loader.get_run_events()),
         {
             "a": [],
             "b": [],
             "b/x": [],
             "c": []
         },
     )
     # Write some new data to both new and pre-existing event files.
     with test_util.FileWriter(os.path.join(logdir, "a"),
                               filename_suffix=".other") as writer:
         writer.add_test_summary("tag_a_2")
         writer.add_test_summary("tag_a_3")
         writer.add_test_summary("tag_a_4")
     with test_util.FileWriter(os.path.join(logdir, "b", "x"),
                               filename_suffix=".other") as writer:
         writer.add_test_summary("tag_b_x_2")
     with writer_c as writer:
         writer.add_test_summary("tag_c_2")
     # New data should appear on the next load.
     self.assertEqual(
         self._extract_run_to_tags(loader.get_run_events()),
         {
             "a": ["tag_a_2", "tag_a_3", "tag_a_4"],
             "b": [],
             "b/x": ["tag_b_x_2"],
             "c": ["tag_c_2"],
         },
     )
Ejemplo n.º 21
0
  def _writeMetadata(self, logdir, summary_metadata, nonce=''):
    """Write to disk a summary with the given metadata.

    Arguments:
      logdir: a string
      summary_metadata: a `SummaryMetadata` protobuf object
      nonce: optional; will be added to the end of the event file name
        to guarantee that multiple calls to this function do not stomp the
        same file
    """

    summary = summary_pb2.Summary()
    summary.value.add(
        tensor=tensor_util.make_tensor_proto(['po', 'ta', 'to'], dtype=tf.string),
        tag='you_are_it',
        metadata=summary_metadata)
    writer = test_util.FileWriter(logdir, filename_suffix=nonce)
    writer.add_summary(summary.SerializeToString())
    writer.close()
Ejemplo n.º 22
0
 def test_upload_preserves_wall_time(self):
     logdir = self.get_temp_dir()
     with tb_test_util.FileWriter(logdir) as writer:
         # Add a raw event so we can specify the wall_time value deterministically.
         writer.add_event(
             event_pb2.Event(step=1,
                             wall_time=123.123123123,
                             summary=scalar_v2.scalar_pb("foo", 5.0)))
     mock_client = self._create_mock_client()
     mock_rate_limiter = mock.create_autospec(util.RateLimiter)
     uploader = uploader_lib.TensorBoardUploader(mock_client, logdir,
                                                 mock_rate_limiter)
     uploader.create_experiment()
     uploader._upload_once()
     mock_client.WriteScalar.assert_called_once()
     request = mock_client.WriteScalar.call_args[0][0]
     # Just check the wall_time value; everything else is covered in the full
     # logdir test below.
     self.assertEqual(
         123123123123,
         request.runs[0].tags[0].points[0].wall_time.ToNanoseconds())
    def testNewStyleScalarSummary(self):
        """Verify processing of tensorboard.plugins.scalar.summary."""
        event_sink = _EventGenerator(self, zero_out_timestamps=True)
        writer = test_util.FileWriter(self.get_temp_dir())
        writer.event_writer = event_sink
        with tf.compat.v1.Graph().as_default():
            with self.test_session() as sess:
                step = tf.compat.v1.placeholder(tf.float32, shape=[])
                scalar_summary.op(
                    "accuracy", 1.0 - 1.0 / (step + tf.constant(1.0))
                )
                scalar_summary.op("xent", 1.0 / (step + tf.constant(1.0)))
                merged = tf.compat.v1.summary.merge_all()
                writer.add_graph(sess.graph)
                for i in range(10):
                    summ = sess.run(merged, feed_dict={step: float(i)})
                    writer.add_summary(summ, global_step=i)

        accumulator = self._make_accumulator(event_sink)
        accumulator.Reload()

        tags = [
            graph_metadata.RUN_GRAPH_NAME,
            "accuracy/scalar_summary",
            "xent/scalar_summary",
        ]
        self.assertTagsEqual(
            accumulator.Tags(),
            {
                ea.TENSORS: tags,
                ea.GRAPH: True,
                ea.META_GRAPH: False,
            },
        )

        self.assertItemsEqual(
            accumulator.ActivePlugins(),
            [scalar_metadata.PLUGIN_NAME, graph_metadata.PLUGIN_NAME],
        )
Ejemplo n.º 24
0
  def testNewStyleImageSummary(self):
    """Verify processing of tensorboard.plugins.image.summary."""
    event_sink = _EventGenerator(self, zero_out_timestamps=True)
    writer = test_util.FileWriter(self.get_temp_dir())
    writer.event_writer = event_sink
    with self.test_session() as sess:
      ipt = tf.ones([10, 4, 4, 3], tf.uint8)
      # This is an interesting example, because the old tf.image_summary op
      # would throw an error here, because it would be tag reuse.
      # Using the tf node name instead allows argument re-use to the image
      # summary.
      with tf.name_scope('1'):
        image_summary.op('images', ipt, max_outputs=1)
      with tf.name_scope('2'):
        image_summary.op('images', ipt, max_outputs=2)
      with tf.name_scope('3'):
        image_summary.op('images', ipt, max_outputs=3)
      merged = tf.summary.merge_all()
      writer.add_graph(sess.graph)
      for i in xrange(10):
        summ = sess.run(merged)
        writer.add_summary(summ, global_step=i)

    accumulator = ea.EventAccumulator(event_sink)
    accumulator.Reload()

    tags = [
        u'1/images/image_summary',
        u'2/images/image_summary',
        u'3/images/image_summary',
    ]

    self.assertTagsEqual(accumulator.Tags(), {
        ea.TENSORS: tags,
        ea.GRAPH: True,
        ea.META_GRAPH: False,
    })
Ejemplo n.º 25
0
  def testTensorsRealistically(self):
    """Test accumulator by writing values and then reading them."""

    def FakeScalarSummary(tag, value):
      value = summary_pb2.Summary.Value(tag=tag, simple_value=value)
      summary = summary_pb2.Summary(value=[value])
      return summary

    directory = os.path.join(self.get_temp_dir(), 'values_dir')
    if tf.gfile.IsDirectory(directory):
      tf.gfile.DeleteRecursively(directory)
    tf.gfile.MkDir(directory)

    writer = test_util.FileWriter(directory, max_queue=100)

    with tf.Graph().as_default() as graph:
      _ = tf.constant([2.0, 1.0])
    # Add a graph to the summary writer.
    writer.add_graph(graph)
    meta_graph_def = tf.train.export_meta_graph(graph_def=graph.as_graph_def(
        add_shapes=True))
    writer.add_meta_graph(meta_graph_def)

    run_metadata = tf.RunMetadata()
    device_stats = run_metadata.step_stats.dev_stats.add()
    device_stats.device = 'test device'
    writer.add_run_metadata(run_metadata, 'test run')

    # Write a bunch of events using the writer.
    for i in xrange(30):
      summ_id = FakeScalarSummary('id', i)
      summ_sq = FakeScalarSummary('sq', i * i)
      writer.add_summary(summ_id, i * 5)
      writer.add_summary(summ_sq, i * 5)
    writer.flush()

    # Verify that we can load those events properly
    acc = ea.EventAccumulator(directory)
    acc.Reload()
    self.assertTagsEqual(acc.Tags(), {
        ea.TENSORS: ['id', 'sq'],
        ea.GRAPH: True,
        ea.META_GRAPH: True,
        ea.RUN_METADATA: ['test run'],
    })
    id_events = acc.Tensors('id')
    sq_events = acc.Tensors('sq')
    self.assertEqual(30, len(id_events))
    self.assertEqual(30, len(sq_events))
    for i in xrange(30):
      self.assertEqual(i * 5, id_events[i].step)
      self.assertEqual(i * 5, sq_events[i].step)
      self.assertEqual(i, tensor_util.make_ndarray(id_events[i].tensor_proto).item())
      self.assertEqual(i * i, tensor_util.make_ndarray(sq_events[i].tensor_proto).item())

    # Write a few more events to test incremental reloading
    for i in xrange(30, 40):
      summ_id = FakeScalarSummary('id', i)
      summ_sq = FakeScalarSummary('sq', i * i)
      writer.add_summary(summ_id, i * 5)
      writer.add_summary(summ_sq, i * 5)
    writer.flush()

    # Verify we can now see all of the data
    acc.Reload()
    id_events = acc.Tensors('id')
    sq_events = acc.Tensors('sq')
    self.assertEqual(40, len(id_events))
    self.assertEqual(40, len(sq_events))
    for i in xrange(40):
      self.assertEqual(i * 5, id_events[i].step)
      self.assertEqual(i * 5, sq_events[i].step)
      self.assertEqual(i, tensor_util.make_ndarray(id_events[i].tensor_proto).item())
      self.assertEqual(i * i, tensor_util.make_ndarray(sq_events[i].tensor_proto).item())
    self.assertProtoEquals(graph.as_graph_def(add_shapes=True), acc.Graph())
    self.assertProtoEquals(meta_graph_def, acc.MetaGraph())
Ejemplo n.º 26
0
 def _add_run(self, run_name, experiment_name="experiment"):
     run_path = os.path.join(self.logdir, experiment_name, run_name)
     with test_util.FileWriter(run_path) as writer:
         writer.add_test_summary("foo")
     self.multiplexer.AddRunsFromDirectory(self.logdir)
     self.multiplexer.Reload()
Ejemplo n.º 27
0
    def test_upload_full_logdir(self):
        logdir = self.get_temp_dir()
        mock_client = _create_mock_client()
        mock_rate_limiter = mock.create_autospec(util.RateLimiter)
        uploader = uploader_lib.TensorBoardUploader(
            mock_client, logdir, mock_rate_limiter
        )
        uploader.create_experiment()

        # Convenience helpers for constructing expected requests.
        run = write_service_pb2.WriteScalarRequest.Run
        tag = write_service_pb2.WriteScalarRequest.Tag
        point = scalar_pb2.ScalarPoint

        # First round
        writer = tb_test_util.FileWriter(logdir)
        writer.add_test_summary("foo", simple_value=5.0, step=1)
        writer.add_test_summary("foo", simple_value=6.0, step=2)
        writer.add_test_summary("foo", simple_value=7.0, step=3)
        writer.add_test_summary("bar", simple_value=8.0, step=3)
        writer.flush()
        writer_a = tb_test_util.FileWriter(os.path.join(logdir, "a"))
        writer_a.add_test_summary("qux", simple_value=9.0, step=2)
        writer_a.flush()
        uploader._upload_once()
        self.assertEqual(1, mock_client.WriteScalar.call_count)
        request1 = mock_client.WriteScalar.call_args[0][0]
        _clear_wall_times(request1)
        expected_request1 = write_service_pb2.WriteScalarRequest(
            experiment_id="123",
            runs=[
                run(
                    name=".",
                    tags=[
                        tag(
                            name="foo",
                            metadata=test_util.scalar_metadata("foo"),
                            points=[
                                point(step=1, value=5.0),
                                point(step=2, value=6.0),
                                point(step=3, value=7.0),
                            ],
                        ),
                        tag(
                            name="bar",
                            metadata=test_util.scalar_metadata("bar"),
                            points=[point(step=3, value=8.0)],
                        ),
                    ],
                ),
                run(
                    name="a",
                    tags=[
                        tag(
                            name="qux",
                            metadata=test_util.scalar_metadata("qux"),
                            points=[point(step=2, value=9.0)],
                        )
                    ],
                ),
            ],
        )
        self.assertProtoEquals(expected_request1, request1)
        mock_client.WriteScalar.reset_mock()

        # Second round
        writer.add_test_summary("foo", simple_value=10.0, step=5)
        writer.add_test_summary("baz", simple_value=11.0, step=1)
        writer.flush()
        writer_b = tb_test_util.FileWriter(os.path.join(logdir, "b"))
        writer_b.add_test_summary("xyz", simple_value=12.0, step=1)
        writer_b.flush()
        uploader._upload_once()
        self.assertEqual(1, mock_client.WriteScalar.call_count)
        request2 = mock_client.WriteScalar.call_args[0][0]
        _clear_wall_times(request2)
        expected_request2 = write_service_pb2.WriteScalarRequest(
            experiment_id="123",
            runs=[
                run(
                    name=".",
                    tags=[
                        tag(
                            name="foo",
                            metadata=test_util.scalar_metadata("foo"),
                            points=[point(step=5, value=10.0)],
                        ),
                        tag(
                            name="baz",
                            metadata=test_util.scalar_metadata("baz"),
                            points=[point(step=1, value=11.0)],
                        ),
                    ],
                ),
                run(
                    name="b",
                    tags=[
                        tag(
                            name="xyz",
                            metadata=test_util.scalar_metadata("xyz"),
                            points=[point(step=1, value=12.0)],
                        )
                    ],
                ),
            ],
        )
        self.assertProtoEquals(expected_request2, request2)
        mock_client.WriteScalar.reset_mock()

        # Empty third round
        uploader._upload_once()
        mock_client.WriteScalar.assert_not_called()
    def testTensorsRealistically(self):
        """Test accumulator by writing values and then reading them."""

        def FakeScalarSummary(tag, value):
            value = summary_pb2.Summary.Value(tag=tag, simple_value=value)
            summary = summary_pb2.Summary(value=[value])
            return summary

        directory = os.path.join(self.get_temp_dir(), "values_dir")
        if tf.io.gfile.isdir(directory):
            tf.io.gfile.rmtree(directory)
        tf.io.gfile.mkdir(directory)

        writer = test_util.FileWriter(directory, max_queue=100)

        with tf.Graph().as_default() as graph:
            _ = tf.constant([2.0, 1.0])
            # Add a graph to the summary writer.
            writer.add_graph(graph)
            graph_def = graph.as_graph_def(add_shapes=True)
            meta_graph_def = tf.compat.v1.train.export_meta_graph(
                graph_def=graph_def
            )
            writer.add_meta_graph(meta_graph_def)

        run_metadata = config_pb2.RunMetadata()
        device_stats = run_metadata.step_stats.dev_stats.add()
        device_stats.device = "test device"
        writer.add_run_metadata(run_metadata, "test run")

        # Write a bunch of events using the writer.
        for i in xrange(30):
            summ_id = FakeScalarSummary("id", i)
            summ_sq = FakeScalarSummary("sq", i * i)
            writer.add_summary(summ_id, i * 5)
            writer.add_summary(summ_sq, i * 5)
        writer.flush()

        # Verify that we can load those events properly
        acc = ea.EventAccumulator(directory)
        acc.Reload()
        self.assertTagsEqual(
            acc.Tags(),
            {
                ea.TENSORS: ["id", "sq"],
                ea.GRAPH: True,
                ea.META_GRAPH: True,
                ea.RUN_METADATA: ["test run"],
            },
        )
        id_events = acc.Tensors("id")
        sq_events = acc.Tensors("sq")
        self.assertEqual(30, len(id_events))
        self.assertEqual(30, len(sq_events))
        for i in xrange(30):
            self.assertEqual(i * 5, id_events[i].step)
            self.assertEqual(i * 5, sq_events[i].step)
            self.assertEqual(
                i, tensor_util.make_ndarray(id_events[i].tensor_proto).item()
            )
            self.assertEqual(
                i * i,
                tensor_util.make_ndarray(sq_events[i].tensor_proto).item(),
            )

        # Write a few more events to test incremental reloading
        for i in xrange(30, 40):
            summ_id = FakeScalarSummary("id", i)
            summ_sq = FakeScalarSummary("sq", i * i)
            writer.add_summary(summ_id, i * 5)
            writer.add_summary(summ_sq, i * 5)
        writer.flush()

        # Verify we can now see all of the data
        acc.Reload()
        id_events = acc.Tensors("id")
        sq_events = acc.Tensors("sq")
        self.assertEqual(40, len(id_events))
        self.assertEqual(40, len(sq_events))
        for i in xrange(40):
            self.assertEqual(i * 5, id_events[i].step)
            self.assertEqual(i * 5, sq_events[i].step)
            self.assertEqual(
                i, tensor_util.make_ndarray(id_events[i].tensor_proto).item()
            )
            self.assertEqual(
                i * i,
                tensor_util.make_ndarray(sq_events[i].tensor_proto).item(),
            )

        expected_graph_def = graph_pb2.GraphDef.FromString(
            graph.as_graph_def(add_shapes=True).SerializeToString()
        )
        self.assertProtoEquals(expected_graph_def, acc.Graph())
        self.assertProtoEquals(
            expected_graph_def,
            graph_pb2.GraphDef.FromString(acc.SerializedGraph()),
        )

        expected_meta_graph = meta_graph_pb2.MetaGraphDef.FromString(
            meta_graph_def.SerializeToString()
        )
        self.assertProtoEquals(expected_meta_graph, acc.MetaGraph())