コード例 #1
0
ファイル: summary_test.py プロジェクト: dhKwang/tensorboard
 def test_empty_input(self):
     pb = self.histogram('empty', [])
     buckets = tensor_util.make_ndarray(pb.value[0].tensor)
     np.testing.assert_allclose(buckets, np.array([]).reshape((0, 3)))
コード例 #2
0
def make_ndarray(tensor):
    with warnings.catch_warnings():
        warnings.simplefilter("ignore", Warning)
        from tensorboard.util import tensor_util
    return tensor_util.make_ndarray(tensor)
コード例 #3
0
 def test_bool_value(self):
     # bools should be valid, but converted to floats.
     pb = self.scalar('a', True)
     value = tensor_util.make_ndarray(pb.value[0].tensor).item()
     self.assertEqual(float, type(value))
     self.assertEqual(1.0, value)
コード例 #4
0
    def test_audio(self):
        logdir = self.get_temp_dir()
        steps = (0, 1, 2)
        with test_util.FileWriter(logdir) as writer:
            for step in steps:
                event = event_pb2.Event()
                event.step = step
                event.wall_time = 456.75 * step
                audio = tf.reshape(
                    tf.linspace(0.0, 100.0, 4 * 10 * 2), (4, 10, 2)
                )
                audio_pb = audio_summary.pb(
                    "foo",
                    audio,
                    labels=["one", "two", "three", "four"],
                    sample_rate=44100,
                    display_name="bar",
                    description="baz",
                )
                writer.add_summary(
                    audio_pb.SerializeToString(), global_step=step
                )
        files = os.listdir(logdir)
        self.assertLen(files, 1)
        event_file = os.path.join(logdir, files[0])
        loader = event_file_loader.RawEventFileLoader(event_file)
        input_events = [event_pb2.Event.FromString(x) for x in loader.Load()]

        new_events = []
        initial_metadata = {}
        for input_event in input_events:
            migrated = self._migrate_event(
                input_event, initial_metadata=initial_metadata
            )
            new_events.extend(migrated)

        self.assertLen(new_events, 4)
        self.assertEqual(new_events[0].WhichOneof("what"), "file_version")
        for step in steps:
            with self.subTest("step %d" % step):
                new_event = new_events[step + 1]
                self.assertLen(new_event.summary.value, 1)
                value = new_event.summary.value[0]
                tensor = tensor_util.make_ndarray(value.tensor)
                self.assertEqual(
                    tensor.shape, (3,)
                )  # 4 clipped to max_outputs=3
                self.assertStartsWith(tensor[0], b"RIFF")
                self.assertStartsWith(tensor[1], b"RIFF")
                if step == min(steps):
                    metadata = value.metadata
                    self.assertEqual(
                        metadata.data_class,
                        summary_pb2.DATA_CLASS_BLOB_SEQUENCE,
                    )
                    self.assertEqual(
                        metadata.plugin_data.plugin_name,
                        audio_metadata.PLUGIN_NAME,
                    )
                else:
                    self.assertFalse(value.HasField("metadata"))
コード例 #5
0
    def scalars_impl(self, tag, run, experiment, output_format):
        """Result of the form `(body, mime_type)`."""
        if self._data_provider:
            # Downsample reads to 1000 scalars per time series, which is the
            # default size guidance for scalars under the multiplexer loading
            # logic.
            SAMPLE_COUNT = 1000
            all_scalars = self._data_provider.read_scalars(
                experiment_id=experiment,
                plugin_name=metadata.PLUGIN_NAME,
                downsample=SAMPLE_COUNT,
                run_tag_filter=provider.RunTagFilter(runs=[run], tags=[tag]),
            )
            scalars = all_scalars.get(run, {}).get(tag, None)
            if scalars is None:
                raise ValueError('No scalar data for run=%r, tag=%r' %
                                 (run, tag))
            values = [(x.wall_time, x.step, x.value) for x in scalars]
        elif self._db_connection_provider:
            db = self._db_connection_provider()
            # We select for steps greater than -1 because the writer inserts
            # placeholder rows en masse. The check for step filters out those rows.
            cursor = db.execute(
                '''
        SELECT
          Tensors.step,
          Tensors.computed_time,
          Tensors.data,
          Tensors.dtype
        FROM Tensors
        JOIN Tags
          ON Tensors.series = Tags.tag_id
        JOIN Runs
          ON Tags.run_id = Runs.run_id
        WHERE
          /* For backwards compatibility, ignore the experiment id
             for matching purposes if it is empty. */
          (:exp == '' OR Runs.experiment_id == CAST(:exp AS INT))
          AND Runs.run_name = :run
          AND Tags.tag_name = :tag
          AND Tags.plugin_name = :plugin
          AND Tensors.shape = ''
          AND Tensors.step > -1
        ORDER BY Tensors.step
      ''', dict(exp=experiment, run=run, tag=tag, plugin=metadata.PLUGIN_NAME))
            values = [(wall_time, step, self._get_value(data, dtype_enum))
                      for (step, wall_time, data, dtype_enum) in cursor]
        else:
            tensor_events = self._multiplexer.Tensors(run, tag)
            values = [
                (tensor_event.wall_time, tensor_event.step,
                 tensor_util.make_ndarray(tensor_event.tensor_proto).item())
                for tensor_event in tensor_events
            ]

        if output_format == OutputFormat.CSV:
            string_io = StringIO()
            writer = csv.writer(string_io)
            writer.writerow(['Wall time', 'Step', 'Value'])
            writer.writerows(values)
            return (string_io.getvalue(), 'text/csv')
        else:
            return (values, 'application/json')
コード例 #6
0
ファイル: summary_test.py プロジェクト: dhKwang/tensorboard
 def test_input_with_all_same_values(self):
     pb = self.histogram('twelven', [12, 12, 12])
     buckets = tensor_util.make_ndarray(pb.value[0].tensor)
     np.testing.assert_allclose(buckets, np.array([[11.5, 12.5, 3]]))
コード例 #7
0
    def histograms_impl(self, tag, run, experiment, downsample_to=None):
        """Result of the form `(body, mime_type)`.

        At most `downsample_to` events will be returned. If this value is
        `None`, then no downsampling will be performed.

        Raises:
          tensorboard.errors.PublicError: On invalid request.
        """
        if self._data_provider:
            # Downsample reads to 500 histograms per time series, which is
            # the default size guidance for histograms under the multiplexer
            # loading logic.
            SAMPLE_COUNT = downsample_to if downsample_to is not None else 500
            all_histograms = self._data_provider.read_tensors(
                experiment_id=experiment,
                plugin_name=metadata.PLUGIN_NAME,
                downsample=SAMPLE_COUNT,
                run_tag_filter=provider.RunTagFilter(runs=[run], tags=[tag]),
            )
            histograms = all_histograms.get(run, {}).get(tag, None)
            if histograms is None:
                raise errors.NotFoundError("No histogram tag %r for run %r" %
                                           (tag, run))
            # Downsample again, even though the data provider is supposed to,
            # because the multiplexer provider currently doesn't. (For
            # well-behaved data providers, this is a no-op.)
            if downsample_to is not None:
                rng = random.Random(0)
                histograms = _downsample(rng, histograms, downsample_to)
            events = [(e.wall_time, e.step, e.numpy.tolist())
                      for e in histograms]
        elif self._db_connection_provider:
            # Serve data from the database.
            db = self._db_connection_provider()
            cursor = db.cursor()
            # Prefetch the tag ID matching this run and tag.
            cursor.execute(
                """
                SELECT
                  tag_id
                FROM Tags
                JOIN Runs USING (run_id)
                WHERE
                  Runs.run_name = :run
                  AND Tags.tag_name = :tag
                  AND Tags.plugin_name = :plugin
                """,
                {
                    "run": run,
                    "tag": tag,
                    "plugin": metadata.PLUGIN_NAME
                },
            )
            row = cursor.fetchone()
            if not row:
                raise errors.NotFoundError("No histogram tag %r for run %r" %
                                           (tag, run))
            (tag_id, ) = row
            # Fetch tensor values, optionally with linear-spaced sampling by step.
            # For steps ranging from s_min to s_max and sample size k, this query
            # divides the range into k - 1 equal-sized intervals and returns the
            # lowest step at or above each of the k interval boundaries (which always
            # includes s_min and s_max, and may be fewer than k results if there are
            # intervals where no steps are present). For contiguous steps the results
            # can be formally expressed as the following:
            #   [s_min + math.ceil(i / k * (s_max - s_min)) for i in range(0, k + 1)]
            cursor.execute(
                """
                SELECT
                  MIN(step) AS step,
                  computed_time,
                  data,
                  dtype,
                  shape
                FROM Tensors
                INNER JOIN (
                  SELECT
                    MIN(step) AS min_step,
                    MAX(step) AS max_step
                  FROM Tensors
                  /* Filter out NULL so we can use TensorSeriesStepIndex. */
                  WHERE series = :tag_id AND step IS NOT NULL
                )
                /* Ensure we omit reserved rows, which have NULL step values. */
                WHERE series = :tag_id AND step IS NOT NULL
                /* Bucket rows into sample_size linearly spaced buckets, or do
                   no sampling if sample_size is NULL. */
                GROUP BY
                  IFNULL(:sample_size - 1, max_step - min_step)
                  * (step - min_step) / (max_step - min_step)
                ORDER BY step
                """,
                {
                    "tag_id": tag_id,
                    "sample_size": downsample_to
                },
            )
            events = [(computed_time, step,
                       self._get_values(data, dtype, shape))
                      for step, computed_time, data, dtype, shape in cursor]
        else:
            # Serve data from events files.
            try:
                tensor_events = self._multiplexer.Tensors(run, tag)
            except KeyError:
                raise errors.NotFoundError("No histogram tag %r for run %r" %
                                           (tag, run))
            if downsample_to is not None:
                rng = random.Random(0)
                tensor_events = _downsample(rng, tensor_events, downsample_to)
            events = [[
                e.wall_time,
                e.step,
                tensor_util.make_ndarray(e.tensor_proto).tolist(),
            ] for e in tensor_events]
        return (events, "application/json")
コード例 #8
0
    def testSetLayout(self):
        layout_proto_to_write = layout_pb2.Layout(category=[
            layout_pb2.Category(
                title='mean biases',
                chart=[
                    layout_pb2.Chart(
                        title='mean layer biases',
                        multiline=layout_pb2.MultilineChartContent(
                            tag=[r'mean/layer\d+/biases'], )),
                ]),
            layout_pb2.Category(
                title='std weights',
                chart=[
                    layout_pb2.Chart(
                        title='stddev layer weights',
                        multiline=layout_pb2.MultilineChartContent(
                            tag=[r'stddev/layer\d+/weights'], )),
                ]),
            layout_pb2.Category(
                title='cross entropy ... and maybe some other values',
                chart=[
                    layout_pb2.Chart(
                        title='cross entropy',
                        multiline=layout_pb2.MultilineChartContent(
                            tag=[r'cross entropy'], )),
                    layout_pb2.Chart(
                        title='accuracy',
                        margin=layout_pb2.MarginChartContent(series=[
                            layout_pb2.MarginChartContent.Series(
                                value='accuracy',
                                lower='accuracy_lower_margin',
                                upper='accuracy_upper_margin')
                        ])),
                    layout_pb2.Chart(
                        title='max layer weights',
                        multiline=layout_pb2.MultilineChartContent(
                            tag=[r'max/layer1/.*', r'max/layer2/.*'], )),
                ],
                closed=True)
        ])

        # Write the data as a summary for the '.' run.
        with tf.compat.v1.Session() as s, test_util.FileWriterCache.get(
                self.logdir) as writer:
            writer.add_summary(s.run(summary.op(layout_proto_to_write)))

        # Read the data from disk.
        multiplexer = event_multiplexer.EventMultiplexer()
        multiplexer.AddRunsFromDirectory(self.logdir)
        multiplexer.Reload()
        tensor_events = multiplexer.Tensors('.', metadata.CONFIG_SUMMARY_TAG)
        self.assertEqual(1, len(tensor_events))

        # Parse the data.
        string_array = tensor_util.make_ndarray(tensor_events[0].tensor_proto)
        content = np.asscalar(string_array)
        layout_proto_from_disk = layout_pb2.Layout()
        layout_proto_from_disk.ParseFromString(tf.compat.as_bytes(content))

        # Verify the content.
        self.assertProtoEquals(layout_proto_to_write, layout_proto_from_disk)
コード例 #9
0
    def bars_impl(self, tag, run, downsample_to=None):
        """Result of the form `(body, mime_type)`, or `ValueError`.

    At most `downsample_to` events will be returned. If this value is
    `None`, then no downsampling will be performed.
    """
        if self._db_connection_provider:
            # Serve data from the database.
            db = self._db_connection_provider()
            cursor = db.cursor()
            # Prefetch the tag ID matching this run and tag.
            cursor.execute(
                '''
          SELECT
            tag_id
          FROM Tags
          JOIN Runs USING (run_id)
          WHERE
            Runs.run_name = :run
            AND Tags.tag_name = :tag
            AND Tags.plugin_name = :plugin
          ''', {
                    'run': run,
                    'tag': tag,
                    'plugin': metadata.PLUGIN_NAME
                })
            row = cursor.fetchone()
            if not row:
                raise ValueError('No bar tag %r for run %r' % (tag, run))
            (tag_id, ) = row
            # Fetch tensor values, optionally with linear-spaced sampling by step.
            # For steps ranging from s_min to s_max and sample size k, this query
            # divides the range into k - 1 equal-sized intervals and returns the
            # lowest step at or above each of the k interval boundaries (which always
            # includes s_min and s_max, and may be fewer than k results if there are
            # intervals where no steps are present). For contiguous steps the results
            # can be formally expressed as the following:
            #   [s_min + math.ceil(i / k * (s_max - s_min)) for i in range(0, k + 1)]
            cursor.execute(
                '''
          SELECT
            MIN(step) AS step,
            computed_time,
            data,
            dtype,
            shape
          FROM Tensors
          INNER JOIN (
            SELECT
              MIN(step) AS min_step,
              MAX(step) AS max_step
            FROM Tensors
            /* Filter out NULL so we can use TensorSeriesStepIndex. */
            WHERE series = :tag_id AND step IS NOT NULL
          )
          /* Ensure we omit reserved rows, which have NULL step values. */
          WHERE series = :tag_id AND step IS NOT NULL
          /* Bucket rows into sample_size linearly spaced buckets, or do
             no sampling if sample_size is NULL. */
          GROUP BY
            IFNULL(:sample_size - 1, max_step - min_step)
            * (step - min_step) / (max_step - min_step)
          ORDER BY step
          ''', {
                    'tag_id': tag_id,
                    'sample_size': downsample_to
                })
            events = [(computed_time, step,
                       self._get_values(data, dtype, shape))
                      for step, computed_time, data, dtype, shape in cursor]
        else:
            # Serve data from events files.
            try:
                tensor_events = self._multiplexer.Tensors(run, tag)
            except KeyError:
                raise ValueError('No bar tag %r for run %r' % (tag, run))
            if downsample_to is not None and len(
                    tensor_events) > downsample_to:
                rand_indices = random.Random(0).sample(
                    six.moves.xrange(len(tensor_events)), downsample_to)
                indices = sorted(rand_indices)
                tensor_events = [tensor_events[i] for i in indices]
            events = [[
                e.wall_time, e.step,
                tensor_util.make_ndarray(e.tensor_proto).tolist()
            ] for e in tensor_events]
        return (events, 'application/json')
コード例 #10
0
 def test_bytes_value(self):
     pb = self.compute_and_check_summary_pb(
         'mi', b'A name\xe2\x80\xa6I call myself')
     value = tensor_util.make_ndarray(pb.value[0].tensor).item()
     self.assertIsInstance(value, six.binary_type)
     self.assertEqual(b'A name\xe2\x80\xa6I call myself', value)
コード例 #11
0
    def on_value_event(self, event):
        """Records the summary values based on an updated message from the
        debugger.

        Logs an error message if writing the event to disk fails.

        Args:
          event: The Event proto to be processed.
        """
        if not event.summary.value:
            logger.warn("The summary of the event lacks a value.")
            return

        # The node name property is actually a watch key, which is a concatenation
        # of several pieces of data.
        watch_key = event.summary.value[0].node_name
        if not watch_key.endswith(constants.DEBUG_NUMERIC_SUMMARY_SUFFIX):
            # Ignore events that lack a DebugNumericSummary.
            # NOTE(@chihuahua): We may later handle other types of debug ops.
            return

        # We remove the constants.DEBUG_NUMERIC_SUMMARY_SUFFIX from the end of the
        # watch name because it is not distinguishing: every health pill entry ends
        # with it.
        node_name_and_output_slot = watch_key[
            : -len(constants.DEBUG_NUMERIC_SUMMARY_SUFFIX)
        ]

        shape = tensor_util.make_ndarray(event.summary.value[0].tensor).shape
        if (
            len(shape) != 1
            or shape[0] < constants.MIN_DEBUG_NUMERIC_SUMMARY_TENSOR_LENGTH
        ):
            logger.warn(
                "Health-pill tensor either lacks a dimension or is "
                "shaped incorrectly: %s" % shape
            )
            return

        match = re.match(r"^(.*):(\d+)$", node_name_and_output_slot)
        if not match:
            logger.warn(
                (
                    "A event with a health pill has an invalid node name and output "
                    "slot combination, (i.e., an unexpected debug op): %r"
                ),
                node_name_and_output_slot,
            )
            return

        if self._session_run_index >= 0:
            event.step = self._session_run_index
        else:
            # Data from parameter servers (or any graphs without a master) do not
            # contain core metadata. So the session run count is missing. Set its
            # value to a microsecond epoch timestamp.
            event.step = int(time.time() * 1e6)

        # Write this event to the events file designated for data from the
        # debugger.
        self._events_writer_manager.write_event(event)

        alert = numerics_alert.extract_numerics_alert(event)
        if self._numerics_alert_callback and alert:
            self._numerics_alert_callback(alert)
コード例 #12
0
 def convert_tensor_event(event):
     return provider.TensorDatum(
         step=event.step,
         wall_time=event.wall_time,
         numpy=tensor_util.make_ndarray(event.tensor_proto),
     )
コード例 #13
0
ファイル: summary_test.py プロジェクト: yatbear/tensorboard
 def test_zero_bucket_count(self):
     pb = self.histogram("zero_bucket_count", [1, 1, 1], buckets=0)
     buckets = tensor_util.make_ndarray(pb.value[0].tensor)
     np.testing.assert_array_equal(buckets, np.array([]).reshape((0, 3)))
コード例 #14
0
ファイル: summary_test.py プロジェクト: yatbear/tensorboard
 def test_empty_input_of_high_rank(self):
     pb = self.histogram("empty_but_fancy", [[[], []], [[], []]])
     buckets = tensor_util.make_ndarray(pb.value[0].tensor)
     # By default there will be 30 buckets.
     np.testing.assert_allclose(buckets, np.zeros((30, 3)))
コード例 #15
0
ファイル: summary_test.py プロジェクト: dhKwang/tensorboard
 def test_empty_input_of_high_rank(self):
     pb = self.histogram('empty_but_fancy', [[[], []], [[], []]])
     buckets = tensor_util.make_ndarray(pb.value[0].tensor)
     np.testing.assert_allclose(buckets, np.array([]).reshape((0, 3)))
コード例 #16
0
    def test_raw_data(self):
        # We pass these raw counts and precision/recall values.
        name = 'foo'
        true_positive_counts = [75, 64, 21, 5, 0]
        false_positive_counts = [150, 105, 18, 0, 0]
        true_negative_counts = [0, 45, 132, 150, 150]
        false_negative_counts = [0, 11, 54, 70, 75]
        precision = [0.3333333, 0.3786982, 0.5384616, 1.0, 0.0]
        recall = [1.0, 0.8533334, 0.28, 0.0666667, 0.0]
        num_thresholds = 5
        display_name = 'some_raw_values'
        description = 'We passed raw values into a summary op.'

        op = summary.raw_data_op(
            name=name,
            true_positive_counts=tf.constant(true_positive_counts),
            false_positive_counts=tf.constant(false_positive_counts),
            true_negative_counts=tf.constant(true_negative_counts),
            false_negative_counts=tf.constant(false_negative_counts),
            precision=tf.constant(precision),
            recall=tf.constant(recall),
            num_thresholds=num_thresholds,
            display_name=display_name,
            description=description)
        pb_via_op = self.normalize_summary_pb(self.pb_via_op(op))

        # Call the corresponding method that is decoupled from TensorFlow.
        pb = self.normalize_summary_pb(
            summary.raw_data_pb(name=name,
                                true_positive_counts=true_positive_counts,
                                false_positive_counts=false_positive_counts,
                                true_negative_counts=true_negative_counts,
                                false_negative_counts=false_negative_counts,
                                precision=precision,
                                recall=recall,
                                num_thresholds=num_thresholds,
                                display_name=display_name,
                                description=description))

        # The 2 methods above should write summaries with the same data.
        self.assertProtoEquals(pb, pb_via_op)

        # Test the metadata.
        summary_metadata = pb.value[0].metadata
        self.assertEqual('some_raw_values', summary_metadata.display_name)
        self.assertEqual('We passed raw values into a summary op.',
                         summary_metadata.summary_description)
        self.assertEqual(metadata.PLUGIN_NAME,
                         summary_metadata.plugin_data.plugin_name)

        plugin_data = metadata.parse_plugin_metadata(
            summary_metadata.plugin_data.content)
        self.assertEqual(5, plugin_data.num_thresholds)

        # Test the summary contents.
        values = tensor_util.make_ndarray(pb.value[0].tensor)
        self.verify_float_arrays_are_equal(
            [
                [75.0, 64.0, 21.0, 5.0, 0.0],  # True positives.
                [150.0, 105.0, 18.0, 0.0, 0.0],  # False positives.
                [0.0, 45.0, 132.0, 150.0, 150.0],  # True negatives.
                [0.0, 11.0, 54.0, 70.0, 75.0],  # False negatives.
                [0.3333333, 0.3786982, 0.5384616, 1.0, 0.0],  # Precision.
                [1.0, 0.8533334, 0.28, 0.0666667, 0.0],  # Recall.
            ],
            values)
コード例 #17
0
ファイル: summary_test.py プロジェクト: dhKwang/tensorboard
 def test_singleton_input(self):
     pb = self.histogram('twelve', [12])
     buckets = tensor_util.make_ndarray(pb.value[0].tensor)
     np.testing.assert_allclose(buckets, np.array([[11.5, 12.5, 1]]))
コード例 #18
0
 def _convert_scalar_event(self, event):
     return provider.ScalarDatum(
         step=event.step,
         wall_time=event.wall_time,
         value=tensor_util.make_ndarray(event.tensor_proto).item(),
     )
コード例 #19
0
 def test_bytes_value(self):
     pb = self.text("mi", b"A name\xe2\x80\xa6I call myself")
     value = tensor_util.make_ndarray(pb.value[0].tensor).item()
     self.assertIsInstance(value, six.binary_type)
     self.assertEqual(b"A name\xe2\x80\xa6I call myself", value)
コード例 #20
0
ファイル: summary_test.py プロジェクト: zshxie/tensorboard
 def test_unicode_value(self):
     pb = self.text("mi", "A name\u2026I call myself")
     value = tensor_util.make_ndarray(pb.value[0].tensor).item()
     self.assertIsInstance(value, bytes)
     self.assertEqual(b"A name\xe2\x80\xa6I call myself", value)
コード例 #21
0
  def testTensorsRealistically(self):
    """Test accumulator by writing values and then reading them."""

    def FakeScalarSummary(tag, value):
      value = summary_pb2.Summary.Value(tag=tag, simple_value=value)
      summary = summary_pb2.Summary(value=[value])
      return summary

    directory = os.path.join(self.get_temp_dir(), 'values_dir')
    if tf.io.gfile.isdir(directory):
      tf.io.gfile.rmtree(directory)
    tf.io.gfile.mkdir(directory)

    writer = test_util.FileWriter(directory, max_queue=100)

    with tf.Graph().as_default() as graph:
      _ = tf.constant([2.0, 1.0])
    # Add a graph to the summary writer.
    writer.add_graph(graph)
    meta_graph_def = tf.compat.v1.train.export_meta_graph(graph_def=graph.as_graph_def(
        add_shapes=True))
    writer.add_meta_graph(meta_graph_def)

    run_metadata = config_pb2.RunMetadata()
    device_stats = run_metadata.step_stats.dev_stats.add()
    device_stats.device = 'test device'
    writer.add_run_metadata(run_metadata, 'test run')

    # Write a bunch of events using the writer.
    for i in xrange(30):
      summ_id = FakeScalarSummary('id', i)
      summ_sq = FakeScalarSummary('sq', i * i)
      writer.add_summary(summ_id, i * 5)
      writer.add_summary(summ_sq, i * 5)
    writer.flush()

    # Verify that we can load those events properly
    acc = ea.EventAccumulator(directory)
    acc.Reload()
    self.assertTagsEqual(acc.Tags(), {
        ea.TENSORS: ['id', 'sq'],
        ea.GRAPH: True,
        ea.META_GRAPH: True,
        ea.RUN_METADATA: ['test run'],
    })
    id_events = acc.Tensors('id')
    sq_events = acc.Tensors('sq')
    self.assertEqual(30, len(id_events))
    self.assertEqual(30, len(sq_events))
    for i in xrange(30):
      self.assertEqual(i * 5, id_events[i].step)
      self.assertEqual(i * 5, sq_events[i].step)
      self.assertEqual(i, tensor_util.make_ndarray(id_events[i].tensor_proto).item())
      self.assertEqual(i * i, tensor_util.make_ndarray(sq_events[i].tensor_proto).item())

    # Write a few more events to test incremental reloading
    for i in xrange(30, 40):
      summ_id = FakeScalarSummary('id', i)
      summ_sq = FakeScalarSummary('sq', i * i)
      writer.add_summary(summ_id, i * 5)
      writer.add_summary(summ_sq, i * 5)
    writer.flush()

    # Verify we can now see all of the data
    acc.Reload()
    id_events = acc.Tensors('id')
    sq_events = acc.Tensors('sq')
    self.assertEqual(40, len(id_events))
    self.assertEqual(40, len(sq_events))
    for i in xrange(40):
      self.assertEqual(i * 5, id_events[i].step)
      self.assertEqual(i * 5, sq_events[i].step)
      self.assertEqual(i, tensor_util.make_ndarray(id_events[i].tensor_proto).item())
      self.assertEqual(i * i, tensor_util.make_ndarray(sq_events[i].tensor_proto).item())

    expected_graph_def = graph_pb2.GraphDef.FromString(
          graph.as_graph_def(add_shapes=True).SerializeToString())
    self.assertProtoEquals(expected_graph_def, acc.Graph())

    expected_meta_graph = meta_graph_pb2.MetaGraphDef.FromString(
          meta_graph_def.SerializeToString())
    self.assertProtoEquals(expected_meta_graph, acc.MetaGraph())
コード例 #22
0
 def test_float_value(self):
     pb = self.scalar('a', 1.13)
     value = tensor_util.make_ndarray(pb.value[0].tensor).item()
     self.assertEqual(float, type(value))
     self.assertNear(1.13, value, 1e-6)
コード例 #23
0
 def test_unicode_value(self):
     pb = self.text('mi', u'A name\u2026I call myself')
     value = tensor_util.make_ndarray(pb.value[0].tensor).item()
     self.assertIsInstance(value, six.binary_type)
     self.assertEqual(b'A name\xe2\x80\xa6I call myself', value)
コード例 #24
0
 def test_int_value(self):
     # ints should be valid, but converted to floats.
     pb = self.scalar('a', 113)
     value = tensor_util.make_ndarray(pb.value[0].tensor).item()
     self.assertEqual(float, type(value))
     self.assertNear(113.0, value, 1e-6)
コード例 #25
0
 def _get_sample(self, tensor_event, sample):
     """Returns a single sample from a batch of samples."""
     data = tensor_util.make_ndarray(tensor_event.tensor_proto)
     return data[sample].tolist()
コード例 #26
0
    def scalars_impl(self, tag, run, experiment, output_format):
        """Result of the form `(body, mime_type)`."""
        if self._data_provider:
            all_scalars = self._data_provider.read_scalars(
                experiment_id=experiment,
                plugin_name=metadata.PLUGIN_NAME,
                downsample=self._downsample_to,
                run_tag_filter=provider.RunTagFilter(runs=[run], tags=[tag]),
            )
            scalars = all_scalars.get(run, {}).get(tag, None)
            if scalars is None:
                raise errors.NotFoundError(
                    "No scalar data for run=%r, tag=%r" % (run, tag))
            values = [(x.wall_time, x.step, x.value) for x in scalars]
        elif self._db_connection_provider:
            db = self._db_connection_provider()
            # We select for steps greater than -1 because the writer inserts
            # placeholder rows en masse. The check for step filters out those rows.
            cursor = db.execute(
                """
                SELECT
                  Tensors.step,
                  Tensors.computed_time,
                  Tensors.data,
                  Tensors.dtype
                FROM Tensors
                JOIN Tags
                  ON Tensors.series = Tags.tag_id
                JOIN Runs
                  ON Tags.run_id = Runs.run_id
                WHERE
                  /* For backwards compatibility, ignore the experiment id
                     for matching purposes if it is empty. */
                  (:exp == '' OR Runs.experiment_id == CAST(:exp AS INT))
                  AND Runs.run_name = :run
                  AND Tags.tag_name = :tag
                  AND Tags.plugin_name = :plugin
                  AND Tensors.shape = ''
                  AND Tensors.step > -1
                ORDER BY Tensors.step
                """,
                dict(
                    exp=experiment,
                    run=run,
                    tag=tag,
                    plugin=metadata.PLUGIN_NAME,
                ),
            )
            values = [(wall_time, step, self._get_value(data, dtype_enum))
                      for (step, wall_time, data, dtype_enum) in cursor]
        else:
            try:
                tensor_events = self._multiplexer.Tensors(run, tag)
            except KeyError:
                raise errors.NotFoundError(
                    "No scalar data for run=%r, tag=%r" % (run, tag))
            values = [(
                tensor_event.wall_time,
                tensor_event.step,
                tensor_util.make_ndarray(tensor_event.tensor_proto).item(),
            ) for tensor_event in tensor_events]

        if output_format == OutputFormat.CSV:
            string_io = StringIO()
            writer = csv.writer(string_io)
            writer.writerow(["Wall time", "Step", "Value"])
            writer.writerows(values)
            return (string_io.getvalue(), "text/csv")
        else:
            return (values, "application/json")