Пример #1
0
 def compute_and_check_summary_pb(self,
                                  name,
                                  labels,
                                  predictions,
                                  num_thresholds,
                                  weights=None,
                                  display_name=None,
                                  description=None,
                                  feed_dict=None):
     """Use both `op` and `pb` to get a summary, asserting equality.
 Returns:
   a `Summary` protocol buffer
 """
     labels_tensor = tf.constant(labels)
     predictions_tensor = tf.constant(predictions)
     weights_tensor = None if weights is None else tf.constant(weights)
     op = summary.op(name=name,
                     labels=labels_tensor,
                     predictions=predictions_tensor,
                     num_thresholds=num_thresholds,
                     weights=weights_tensor,
                     display_name=display_name,
                     description=description)
     pb = self.normalize_summary_pb(
         summary.pb(name=name,
                    labels=labels,
                    predictions=predictions,
                    num_thresholds=num_thresholds,
                    weights=weights,
                    display_name=display_name,
                    description=description))
     pb_via_op = self.normalize_summary_pb(
         self.pb_via_op(op, feed_dict=feed_dict))
     self.assertProtoEquals(pb, pb_via_op)
     return pb
Пример #2
0
  def testNewStyleScalarSummary(self):
    """Verify processing of tensorboard.plugins.scalar.summary."""
    event_sink = _EventGenerator(self, zero_out_timestamps=True)
    writer = tf.summary.FileWriter(self.get_temp_dir())
    writer.event_writer = event_sink
    with self.test_session() as sess:
      step = tf.placeholder(tf.float32, shape=[])
      scalar_summary.op('accuracy', 1.0 - 1.0 / (step + tf.constant(1.0)))
      scalar_summary.op('xent', 1.0 / (step + tf.constant(1.0)))
      merged = tf.summary.merge_all()
      writer.add_graph(sess.graph)
      for i in xrange(10):
        summ = sess.run(merged, feed_dict={step: float(i)})
        writer.add_summary(summ, global_step=i)

    accumulator = ea.EventAccumulator(event_sink)
    accumulator.Reload()

    tags = [
        u'accuracy/scalar_summary',
        u'xent/scalar_summary',
    ]

    self.assertTagsEqual(accumulator.Tags(), {
        ea.TENSORS: tags,
        ea.GRAPH: True,
        ea.META_GRAPH: False,
    })
Пример #3
0
    def test_matches_op_with_updates(self):
        predictions = tf.constant([0.2, 0.4, 0.5, 0.6, 0.8], dtype=tf.float32)
        labels = tf.constant([False, True, True, False, True], dtype=tf.bool)
        pr_curve, update_op = summary.streaming_op(name='pr_curve',
                                                   predictions=predictions,
                                                   labels=labels,
                                                   num_thresholds=10)

        complete_predictions = tf.tile(predictions, [3])
        complete_labels = tf.tile(labels, [3])
        expected_pr_curve = summary.op(name='pr_curve',
                                       predictions=complete_predictions,
                                       labels=complete_labels,
                                       num_thresholds=10)
        with self.test_session() as sess:
            sess.run(tf.local_variables_initializer())
            sess.run([update_op])
            sess.run([update_op])
            sess.run([update_op])

            proto = self.pb_via_op(pr_curve)
            expected_proto = self.pb_via_op(expected_pr_curve)

            # Need to detect and fix the automatic _1 appended to second namespace.
            self.assertEqual(proto.value[0].tag, 'pr_curve/pr_curves')
            self.assertEqual(expected_proto.value[0].tag,
                             'pr_curve_1/pr_curves')
            expected_proto.value[0].tag = 'pr_curve/pr_curves'

            self.assertProtoEquals(expected_proto, proto)
Пример #4
0
    def generate_run(self, run_name, include_graph):
        """Create a run with a text summary, metadata, and optionally a graph."""
        tf.reset_default_graph()
        k1 = tf.constant(math.pi, name='k1')
        k2 = tf.constant(math.e, name='k2')
        result = (k1**k2) - k1
        expected = tf.constant(20.0, name='expected')
        error = tf.abs(result - expected, name='error')
        message_prefix_value = 'error ' * 1000
        true_length = len(message_prefix_value)
        assert true_length > self._MESSAGE_PREFIX_LENGTH_LOWER_BOUND, true_length
        message_prefix = tf.constant(message_prefix_value,
                                     name='message_prefix')
        error_message = tf.string_join(
            [message_prefix,
             tf.as_string(error, name='error_string')],
            name='error_message')
        summary_message = tf.summary.text('summary_message', error_message)

        sess = tf.Session()
        writer = tf.summary.FileWriter(os.path.join(self.logdir, run_name))
        if include_graph:
            writer.add_graph(sess.graph)
        options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)
        run_metadata = tf.RunMetadata()
        s = sess.run(summary_message,
                     options=options,
                     run_metadata=run_metadata)
        writer.add_summary(s)
        writer.add_run_metadata(run_metadata, self._METADATA_TAG)
        writer.close()
Пример #5
0
    def testTFSummaryTensor(self):
        """Verify processing of tf.summary.tensor."""
        event_sink = _EventGenerator(self, zero_out_timestamps=True)
        writer = tf.summary.FileWriter(self.get_temp_dir())
        writer.event_writer = event_sink
        with self.test_session() as sess:
            tf.summary.tensor_summary('scalar', tf.constant(1.0))
            tf.summary.tensor_summary('vector', tf.constant([1.0, 2.0, 3.0]))
            tf.summary.tensor_summary('string', tf.constant(six.b('foobar')))
            merged = tf.summary.merge_all()
            summ = sess.run(merged)
            writer.add_summary(summ, 0)

        accumulator = ea.EventAccumulator(event_sink)
        accumulator.Reload()

        self.assertTagsEqual(accumulator.Tags(), {
            ea.TENSORS: ['scalar', 'vector', 'string'],
        })

        scalar_proto = accumulator.Tensors('scalar')[0].tensor_proto
        scalar = tf.make_ndarray(scalar_proto)
        vector_proto = accumulator.Tensors('vector')[0].tensor_proto
        vector = tf.make_ndarray(vector_proto)
        string_proto = accumulator.Tensors('string')[0].tensor_proto
        string = tf.make_ndarray(string_proto)

        self.assertTrue(np.array_equal(scalar, 1.0))
        self.assertTrue(np.array_equal(vector, [1.0, 2.0, 3.0]))
        self.assertTrue(np.array_equal(string, six.b('foobar')))
Пример #6
0
    def generate_testdata(self, include_text=True, logdir=None):
        tf.reset_default_graph()
        sess = tf.Session()
        placeholder = tf.placeholder(tf.string)
        summary_tensor = tf.summary.text('message', placeholder)
        vector_summary = tf.summary.text('vector', placeholder)
        scalar_summary = tf.summary.scalar('twelve', tf.constant(12))

        run_names = ['fry', 'leela']
        for run_name in run_names:
            subdir = os.path.join(logdir or self.logdir, run_name)
            writer = tf.summary.FileWriter(subdir)
            writer.add_graph(sess.graph)

            step = 0
            for gem in GEMS:
                message = run_name + ' *loves* ' + gem
                feed_dict = {
                    placeholder: message,
                }
                if include_text:
                    summ = sess.run(summary_tensor, feed_dict=feed_dict)
                    writer.add_summary(summ, global_step=step)
                step += 1

            vector_message = ['one', 'two', 'three', 'four']
            if include_text:
                summ = sess.run(vector_summary,
                                feed_dict={placeholder: vector_message})
                writer.add_summary(summ)

            summ = sess.run(scalar_summary, feed_dict={placeholder: []})
            writer.add_summary(summ)

            writer.close()
Пример #7
0
    def compute_and_check_summary_pb(self,
                                     name,
                                     data,
                                     display_name=None,
                                     description=None,
                                     data_tensor=None,
                                     feed_dict=None):
        """Use both `op` and `pb` to get a summary, asserting equality.

    Returns:
      a `Summary` protocol buffer
    """
        if data_tensor is None:
            data_tensor = tf.constant(data)
        op = summary.op(name,
                        data,
                        display_name=display_name,
                        description=description)
        pb = self.normalize_summary_pb(
            summary.pb(name,
                       data,
                       display_name=display_name,
                       description=description))
        pb_via_op = self.normalize_summary_pb(
            self.pb_via_op(op, feed_dict=feed_dict))
        self.assertProtoEquals(pb, pb_via_op)
        return pb
Пример #8
0
  def _testTFSummaryTensor_SizeGuidance(self,
                                        plugin_name,
                                        tensor_size_guidance,
                                        steps,
                                        expected_count):
    event_sink = _EventGenerator(self, zero_out_timestamps=True)
    writer = tf.summary.FileWriter(self.get_temp_dir())
    writer.event_writer = event_sink
    with self.test_session() as sess:
      summary_metadata = tf.SummaryMetadata(
          plugin_data=tf.SummaryMetadata.PluginData(plugin_name=plugin_name,
                                                    content=b'{}'))
      tf.summary.tensor_summary('scalar', tf.constant(1.0),
                                summary_metadata=summary_metadata)
      merged = tf.summary.merge_all()
      for step in xrange(steps):
        writer.add_summary(sess.run(merged), global_step=step)


    accumulator = ea.EventAccumulator(
        event_sink, tensor_size_guidance=tensor_size_guidance)
    accumulator.Reload()

    tensors = accumulator.Tensors('scalar')
    self.assertEqual(len(tensors), expected_count)
Пример #9
0
 def test_new_style_scalar(self):
   op = scalar_summary.op('important_constants', tf.constant(0x5f3759df),
                          display_name='Important constants',
                          description='evil floating point bit magic')
   value = self._value_from_op(op)
   assert value.HasField('tensor'), value
   self._assert_noop(value)
Пример #10
0
def higher_order_tensors(step):
    # We're not limited to passing scalar tensors to the summary
    # operation. If we pass a rank-1 or rank-2 tensor, it'll be visualized
    # as a table in TensorBoard. (For higher-ranked tensors, you'll see
    # just a 2D slice of the data.)
    #
    # To demonstrate this, let's create a multiplication table.

    # First, we'll create the table body, a `step`-by-`step` array of
    # strings.
    numbers = tf.range(step)
    numbers_row = tf.expand_dims(numbers, 0)  # shape: [1, step]
    numbers_column = tf.expand_dims(numbers, 1)  # shape: [step, 1]
    products = tf.matmul(numbers_column, numbers_row)  # shape: [step, step]
    table_body = tf.as_string(products)

    # Next, we'll create a header row and column, and a little
    # multiplication sign to put in the corner.
    bold_numbers = tf.string_join(['**', tf.as_string(numbers), '**'])
    bold_row = tf.expand_dims(bold_numbers, 0)
    bold_column = tf.expand_dims(bold_numbers, 1)
    corner_cell = tf.constant(u'\u00d7'.encode('utf-8'))  # MULTIPLICATION SIGN

    # Now, we have to put the pieces together. Using `axis=0` stacks
    # vertically; using `axis=1` juxtaposes horizontally.
    table_body_and_top_row = tf.concat([bold_row, table_body], axis=0)
    table_left_column = tf.concat([[[corner_cell]], bold_column], axis=0)
    table_full = tf.concat([table_left_column, table_body_and_top_row], axis=1)

    # The result, `table_full`, is a rank-2 string tensor of shape
    # `[step + 1, step + 1]`. We can pass it directly to the summary, and
    # we'll get a nicely formatted table in TensorBoard.
    tf.summary.text('multiplication_table', table_full)
Пример #11
0
    def testGraphFromMetaGraphBecomesAvailable(self):
        """Test accumulator by writing values and then reading them."""

        directory = os.path.join(self.get_temp_dir(),
                                 'metagraph_test_values_dir')
        if tf.gfile.IsDirectory(directory):
            tf.gfile.DeleteRecursively(directory)
        tf.gfile.MkDir(directory)

        writer = tf.summary.FileWriter(directory, max_queue=100)

        with tf.Graph().as_default() as graph:
            _ = tf.constant([2.0, 1.0])
        # Add a graph to the summary writer.
        meta_graph_def = tf.train.export_meta_graph(
            graph_def=graph.as_graph_def(add_shapes=True))
        writer.add_meta_graph(meta_graph_def)

        writer.flush()

        # Verify that we can load those events properly
        acc = ea.EventAccumulator(directory)
        acc.Reload()
        self.assertTagsEqual(acc.Tags(), {
            ea.GRAPH: True,
            ea.META_GRAPH: True,
        })
        self.assertProtoEquals(graph.as_graph_def(add_shapes=True),
                               acc.Graph())
        self.assertProtoEquals(meta_graph_def, acc.MetaGraph())
Пример #12
0
  def compute_and_check_summary_pb(self,
                                   name='nemo',
                                   data=None,
                                   bucket_count=None,
                                   display_name=None,
                                   description=None,
                                   data_tensor=None,
                                   bucket_count_tensor=None,
                                   feed_dict=None):
    """Use both `op` and `pb` to get a summary, asserting equality.

    Returns:
      a `Summary` protocol buffer
    """
    if data is None:
      data = self.gaussian
    if data_tensor is None:
      data_tensor = tf.constant(data)
    if bucket_count_tensor is None:
      bucket_count_tensor = bucket_count
    op = summary.op(name, data_tensor, bucket_count=bucket_count_tensor,
                    display_name=display_name, description=description)
    pb = summary.pb(name, data, bucket_count=bucket_count,
                    display_name=display_name, description=description)
    pb_via_op = self.pb_via_op(op, feed_dict=feed_dict)
    self.assertProtoEquals(pb, pb_via_op)
    return pb
Пример #13
0
    def generate_testdata(self):
        tf.reset_default_graph()
        sess = tf.Session()
        placeholder = tf.constant('I am deprecated.')

        # Previously, we had used a means of creating text summaries that used
        # plugin assets (which loaded JSON files containing runs and tags). The
        # plugin must continue to be able to load summaries of that format, so we
        # create a summary using that old plugin asset-based method here.
        plugin_asset_summary = tf.summary.tensor_summary(
            'old_plugin_asset_summary', placeholder)
        assets_directory = os.path.join(self.logdir, 'fry', 'plugins',
                                        'tensorboard_text')
        # Make the directory of assets if it does not exist.
        if not os.path.isdir(assets_directory):
            try:
                os.makedirs(assets_directory)
            except OSError as err:
                self.assertFail('Could not make assets directory %r: %r',
                                assets_directory, err)
        json_path = os.path.join(assets_directory, 'tensors.json')
        with open(json_path, 'w+') as tensors_json_file:
            # Write the op name to a JSON file that the text plugin later uses to
            # determine the tag names of tensors to fetch.
            tensors_json_file.write(json.dumps([plugin_asset_summary.op.name]))

        run_name = 'fry'
        subdir = os.path.join(self.logdir, run_name)
        writer = tf.summary.FileWriter(subdir)
        writer.add_graph(sess.graph)

        summ = sess.run(plugin_asset_summary)
        writer.add_summary(summ)
        writer.close()
Пример #14
0
def get_image(verbose=False):
  """Get the image as a TensorFlow variable.

  Returns:
    A `tf.Variable`, which must be initialized prior to use:
    invoke `sess.run(result.initializer)`."""
  base_data = tf.constant(image_data(verbose=verbose))
  base_image = tf.image.decode_image(base_data, channels=3)
  base_image.set_shape((IMAGE_HEIGHT, IMAGE_WIDTH, 3))
  parsed_image = tf.Variable(base_image, name='image', dtype=tf.uint8)
  return parsed_image
Пример #15
0
    def test_only_1_summary_generated(self):
        """Tests that the streaming op only generates 1 summary for PR curves.

    This test was made in response to a bug in which calling the streaming op
    actually introduced 2 tags.
    """
        predictions = tf.constant([0.2, 0.4, 0.5, 0.6, 0.8], dtype=tf.float32)
        labels = tf.constant([False, True, True, False, True], dtype=tf.bool)
        _, update_op = summary.streaming_op(name='pr_curve',
                                            predictions=predictions,
                                            labels=labels,
                                            num_thresholds=10)
        with self.test_session() as sess:
            sess.run(tf.local_variables_initializer())
            sess.run(update_op)
            summary_proto = tf.Summary()
            summary_proto.ParseFromString(sess.run(tf.summary.merge_all()))

        tags = [v.tag for v in summary_proto.value]
        # Only 1 tag should have been introduced.
        self.assertEqual(['pr_curve/pr_curves'], tags)
Пример #16
0
 def test_fully_populated_tensor(self):
   metadata = tf.SummaryMetadata(
       plugin_data=tf.SummaryMetadata.PluginData(
           plugin_name='font_of_wisdom',
           content=b'adobe_garamond'))
   op = tf.summary.tensor_summary(
       name='tensorpocalypse',
       tensor=tf.constant([[0.0, 2.0], [float('inf'), float('nan')]]),
       display_name='TENSORPOCALYPSE',
       summary_description='look on my works ye mighty and despair',
       summary_metadata=metadata)
   value = self._value_from_op(op)
   assert value.HasField('tensor'), value
   self._assert_noop(value)
Пример #17
0
  def test_scalar(self):
    old_op = tf.summary.scalar('important_constants', tf.constant(0x5f3759df))
    old_value = self._value_from_op(old_op)
    assert old_value.HasField('simple_value'), old_value
    new_value = data_compat.migrate_value(old_value)

    self.assertEqual('important_constants', new_value.tag)
    expected_metadata = scalar_metadata.create_summary_metadata(
        display_name='important_constants',
        description='')
    self.assertEqual(expected_metadata, new_value.metadata)
    self.assertTrue(new_value.HasField('tensor'))
    data = tf.make_ndarray(new_value.tensor)
    self.assertEqual((), data.shape)
    low_precision_value = np.array(0x5f3759df).astype('float32').item()
    self.assertEqual(low_precision_value, data.item())
Пример #18
0
    def compute_and_check_summary_pb(self,
                                     name,
                                     images,
                                     max_outputs=3,
                                     images_tensor=None,
                                     feed_dict=None):
        """Use both `op` and `pb` to get a summary, asserting equality.

    Returns:
      a `Summary` protocol buffer
    """
        if images_tensor is None:
            images_tensor = tf.cast(tf.constant(images), tf.uint8)
        op = summary.op(name, images_tensor, max_outputs=max_outputs)
        pb = summary.pb(name, images, max_outputs=max_outputs)
        pb_via_op = self.pb_via_op(op, feed_dict=feed_dict)
        self.assertProtoEquals(pb, pb_via_op)
        return pb
Пример #19
0
    def compute_and_check_summary_pb(self,
                                     name,
                                     audio,
                                     max_outputs=3,
                                     display_name=None,
                                     description=None,
                                     audio_tensor=None,
                                     feed_dict=None):
        """Use both `op` and `pb` to get a summary, asserting validity.

    "Validity" means that the `op` and `pb` functions must return the
    same protobufs, and also that each encoded audio value appears to be
    a valid WAV file. If either of these conditions fails, the test will
    immediately fail. Otherwise, the valid protobuf will be returned.

    Returns:
      A `Summary` protocol buffer.
    """
        if audio_tensor is None:
            audio_tensor = tf.constant(audio)
        op = summary.op(name,
                        audio_tensor,
                        self.samples_per_second,
                        max_outputs=max_outputs,
                        display_name=display_name,
                        description=description)
        pb = summary.pb(name,
                        audio,
                        self.samples_per_second,
                        max_outputs=max_outputs,
                        display_name=display_name,
                        description=description)
        pb_via_op = self.pb_via_op(op, feed_dict=feed_dict)
        self.assertProtoEquals(pb, pb_via_op)
        audios = tf.make_ndarray(pb.value[0].tensor)[:, 0].tolist()
        invalid_audios = [x for x in audios if not x.startswith(b'RIFF')]
        self.assertFalse(invalid_audios)
        return pb
Пример #20
0
def op(scalars_layout, collections=None):
  """Creates a summary that contains a layout.

  When users navigate to the custom scalars dashboard, they will see a layout
  based on the proto provided to this function.

  Args:
    scalars_layout: The scalars_layout_pb2.Layout proto that specifies the
        layout.
    collections: Optional list of graph collections keys. The new
        summary op is added to these collections. Defaults to
        `[Graph Keys.SUMMARIES]`.

  Returns:
    A tensor summary op that writes the layout to disk.
  """
  assert isinstance(scalars_layout, layout_pb2.Layout)
  return tf.summary.tensor_summary(name=metadata.CONFIG_SUMMARY_TAG,
                                   tensor=tf.constant(
                                       scalars_layout.SerializeToString(),
                                       dtype=tf.string),
                                   collections=collections,
                                   summary_metadata=_create_summary_metadata())
Пример #21
0
    def _test_dimensions(self, alpha=False, static_dimensions=True):
        if not alpha:
            images = self.images
            channel_count = 3
        else:
            images = self.images_with_alpha
            channel_count = 4

        if static_dimensions:
            images_tensor = tf.constant(images, dtype=tf.uint8)
            feed_dict = {}
        else:
            images_tensor = tf.placeholder(tf.uint8)
            feed_dict = {images_tensor: images}

        pb = self.compute_and_check_summary_pb('mona_lisa',
                                               images,
                                               images_tensor=images_tensor,
                                               feed_dict=feed_dict)
        self.assertEqual(1, len(pb.value))
        result = pb.value[0].tensor.string_val

        # Check annotated dimensions.
        self.assertEqual(tf.compat.as_bytes(str(self.image_width)), result[0])
        self.assertEqual(tf.compat.as_bytes(str(self.image_height)), result[1])

        # Check actual image dimensions.
        images = result[2:]
        with tf.Session() as sess:
            placeholder = tf.placeholder(tf.string)
            decoder = tf.image.decode_png(placeholder)
            for image in images:
                decoded = sess.run(decoder, feed_dict={placeholder: image})
                self.assertEqual(
                    (self.image_height, self.image_width, channel_count),
                    decoded.shape)
Пример #22
0
 def test_requires_rank_3_in_op(self):
     with six.assertRaisesRegex(self, ValueError, 'must have rank 3'):
         summary.op('k488', tf.constant([[1, 2, 3], [4, 5, 6]]), 44100)
Пример #23
0
    def testScalarsRealistically(self):
        """Test accumulator by writing values and then reading them."""
        def FakeScalarSummary(tag, value):
            value = tf.Summary.Value(tag=tag, simple_value=value)
            summary = tf.Summary(value=[value])
            return summary

        directory = os.path.join(self.get_temp_dir(), 'values_dir')
        if tf.gfile.IsDirectory(directory):
            tf.gfile.DeleteRecursively(directory)
        tf.gfile.MkDir(directory)

        writer = tf.summary.FileWriter(directory, max_queue=100)

        with tf.Graph().as_default() as graph:
            _ = tf.constant([2.0, 1.0])
        # Add a graph to the summary writer.
        writer.add_graph(graph)
        meta_graph_def = tf.train.export_meta_graph(
            graph_def=graph.as_graph_def(add_shapes=True))
        writer.add_meta_graph(meta_graph_def)

        run_metadata = tf.RunMetadata()
        device_stats = run_metadata.step_stats.dev_stats.add()
        device_stats.device = 'test device'
        writer.add_run_metadata(run_metadata, 'test run')

        # Write a bunch of events using the writer.
        for i in xrange(30):
            summ_id = FakeScalarSummary('id', i)
            summ_sq = FakeScalarSummary('sq', i * i)
            writer.add_summary(summ_id, i * 5)
            writer.add_summary(summ_sq, i * 5)
        writer.flush()

        # Verify that we can load those events properly
        acc = ea.EventAccumulator(directory)
        acc.Reload()
        self.assertTagsEqual(
            acc.Tags(), {
                ea.SCALARS: ['id', 'sq'],
                ea.GRAPH: True,
                ea.META_GRAPH: True,
                ea.RUN_METADATA: ['test run'],
            })
        id_events = acc.Scalars('id')
        sq_events = acc.Scalars('sq')
        self.assertEqual(30, len(id_events))
        self.assertEqual(30, len(sq_events))
        for i in xrange(30):
            self.assertEqual(i * 5, id_events[i].step)
            self.assertEqual(i * 5, sq_events[i].step)
            self.assertEqual(i, id_events[i].value)
            self.assertEqual(i * i, sq_events[i].value)

        # Write a few more events to test incremental reloading
        for i in xrange(30, 40):
            summ_id = FakeScalarSummary('id', i)
            summ_sq = FakeScalarSummary('sq', i * i)
            writer.add_summary(summ_id, i * 5)
            writer.add_summary(summ_sq, i * 5)
        writer.flush()

        # Verify we can now see all of the data
        acc.Reload()
        id_events = acc.Scalars('id')
        sq_events = acc.Scalars('sq')
        self.assertEqual(40, len(id_events))
        self.assertEqual(40, len(sq_events))
        for i in xrange(40):
            self.assertEqual(i * 5, id_events[i].step)
            self.assertEqual(i * 5, sq_events[i].step)
            self.assertEqual(i, id_events[i].value)
            self.assertEqual(i * i, sq_events[i].value)
        self.assertProtoEquals(graph.as_graph_def(add_shapes=True),
                               acc.Graph())
        self.assertProtoEquals(meta_graph_def, acc.MetaGraph())
Пример #24
0
def start_runs(logdir,
               steps,
               run_name,
               thresholds,
               mask_every_other_prediction=False):
    """Generate a PR curve with precision and recall evenly weighted.

  Arguments:
    logdir: The directory into which to store all the runs' data.
    steps: The number of steps to run for.
    run_name: The name of the run.
    thresholds: The number of thresholds to use for PR curves.
    mask_every_other_prediction: Whether to mask every other prediction by
      alternating weights between 0 and 1.
  """
    tf.reset_default_graph()
    tf.set_random_seed(42)

    # Create a normal distribution layer used to generate true color labels.
    distribution = tf.distributions.Normal(loc=0., scale=142.)

    # Sample the distribution to generate colors. Lets generate different numbers
    # of each color. The first dimension is the count of examples.

    # The calls to sample() are given fixed random seed values that are "magic"
    # in that they correspond to the default seeds for those ops when the PR
    # curve test (which depends on this code) was written. We've pinned these
    # instead of continuing to use the defaults since the defaults are based on
    # node IDs from the sequence of nodes added to the graph, which can silently
    # change when this code or any TF op implementations it uses are modified.

    # TODO(nickfelt): redo the PR curve test to avoid reliance on random seeds.

    # Generate reds.
    number_of_reds = 100
    true_reds = tf.clip_by_value(
        tf.concat([
            255 - tf.abs(distribution.sample([number_of_reds, 1], seed=11)),
            tf.abs(distribution.sample([number_of_reds, 2], seed=34))
        ],
                  axis=1), 0, 255)

    # Generate greens.
    number_of_greens = 200
    true_greens = tf.clip_by_value(
        tf.concat([
            tf.abs(distribution.sample([number_of_greens, 1], seed=61)),
            255 - tf.abs(distribution.sample([number_of_greens, 1], seed=82)),
            tf.abs(distribution.sample([number_of_greens, 1], seed=105))
        ],
                  axis=1), 0, 255)

    # Generate blues.
    number_of_blues = 150
    true_blues = tf.clip_by_value(
        tf.concat([
            tf.abs(distribution.sample([number_of_blues, 2], seed=132)),
            255 - tf.abs(distribution.sample([number_of_blues, 1], seed=153))
        ],
                  axis=1), 0, 255)

    # Assign each color a vector of 3 booleans based on its true label.
    labels = tf.concat([
        tf.tile(tf.constant([[True, False, False]]), (number_of_reds, 1)),
        tf.tile(tf.constant([[False, True, False]]), (number_of_greens, 1)),
        tf.tile(tf.constant([[False, False, True]]), (number_of_blues, 1)),
    ],
                       axis=0)

    # We introduce 3 normal distributions. They are used to predict whether a
    # color falls under a certain class (based on distances from corners of the
    # color triangle). The distributions vary per color. We have the distributions
    # narrow over time.
    initial_standard_deviations = [v + FLAGS.steps for v in (158, 200, 242)]
    iteration = tf.placeholder(tf.int32, shape=[])
    red_predictor = tf.distributions.Normal(
        loc=0.,
        scale=tf.cast(initial_standard_deviations[0] - iteration,
                      dtype=tf.float32))
    green_predictor = tf.distributions.Normal(
        loc=0.,
        scale=tf.cast(initial_standard_deviations[1] - iteration,
                      dtype=tf.float32))
    blue_predictor = tf.distributions.Normal(
        loc=0.,
        scale=tf.cast(initial_standard_deviations[2] - iteration,
                      dtype=tf.float32))

    # Make predictions (assign 3 probabilities to each color based on each color's
    # distance to each of the 3 corners). We seek double the area in the right
    # tail of the normal distribution.
    examples = tf.concat([true_reds, true_greens, true_blues], axis=0)
    probabilities_colors_are_red = (1 - red_predictor.cdf(
        tf.norm(examples - tf.constant([255., 0, 0]), axis=1))) * 2
    probabilities_colors_are_green = (1 - green_predictor.cdf(
        tf.norm(examples - tf.constant([0, 255., 0]), axis=1))) * 2
    probabilities_colors_are_blue = (1 - blue_predictor.cdf(
        tf.norm(examples - tf.constant([0, 0, 255.]), axis=1))) * 2

    predictions = (probabilities_colors_are_red,
                   probabilities_colors_are_green,
                   probabilities_colors_are_blue)

    # This is the crucial piece. We write data required for generating PR curves.
    # We create 1 summary per class because we create 1 PR curve per class.
    for i, color in enumerate(('red', 'green', 'blue')):
        description = (
            'The probabilities used to create this PR curve are '
            'generated from a normal distribution. Its standard '
            'deviation is initially %0.0f and decreases over time.' %
            initial_standard_deviations[i])

        weights = None
        if mask_every_other_prediction:
            # Assign a weight of 0 to every even-indexed prediction. Odd-indexed
            # predictions are assigned a default weight of 1.
            consecutive_indices = tf.reshape(tf.range(tf.size(predictions[i])),
                                             tf.shape(predictions[i]))
            weights = tf.cast(consecutive_indices % 2, dtype=tf.float32)

        summary.op(name=color,
                   labels=labels[:, i],
                   predictions=predictions[i],
                   num_thresholds=thresholds,
                   weights=weights,
                   display_name='classifying %s' % color,
                   description=description)
    merged_summary_op = tf.summary.merge_all()
    events_directory = os.path.join(logdir, run_name)
    sess = tf.Session()
    writer = tf.summary.FileWriter(events_directory, sess.graph)

    for step in xrange(steps):
        feed_dict = {
            iteration: step,
        }
        merged_summary = sess.run(merged_summary_op, feed_dict=feed_dict)
        writer.add_summary(merged_summary, step)

    writer.close()
Пример #25
0
 def when_empty():
     return tf.constant([], shape=(0, 3), dtype=tf.float64)
Пример #26
0
 def test_text(self):
   op = tf.summary.text('lorem_ipsum', tf.constant('dolor sit amet'))
   value = self._value_from_op(op)
   assert value.HasField('tensor'), value
   self._assert_noop(value)
Пример #27
0
    def test_raw_data(self):
        # We pass these raw counts and precision/recall values.
        name = 'foo'
        true_positive_counts = [75, 64, 21, 5, 0]
        false_positive_counts = [150, 105, 18, 0, 0]
        true_negative_counts = [0, 45, 132, 150, 150]
        false_negative_counts = [0, 11, 54, 70, 75]
        precision = [0.3333333, 0.3786982, 0.5384616, 1.0, 0.0]
        recall = [1.0, 0.8533334, 0.28, 0.0666667, 0.0]
        num_thresholds = 5
        display_name = 'some_raw_values'
        description = 'We passed raw values into a summary op.'

        op = summary.raw_data_op(
            name=name,
            true_positive_counts=tf.constant(true_positive_counts),
            false_positive_counts=tf.constant(false_positive_counts),
            true_negative_counts=tf.constant(true_negative_counts),
            false_negative_counts=tf.constant(false_negative_counts),
            precision=tf.constant(precision),
            recall=tf.constant(recall),
            num_thresholds=num_thresholds,
            display_name=display_name,
            description=description)
        pb_via_op = self.normalize_summary_pb(self.pb_via_op(op))

        # Call the corresponding method that is decoupled from TensorFlow.
        pb = self.normalize_summary_pb(
            summary.raw_data_pb(name=name,
                                true_positive_counts=true_positive_counts,
                                false_positive_counts=false_positive_counts,
                                true_negative_counts=true_negative_counts,
                                false_negative_counts=false_negative_counts,
                                precision=precision,
                                recall=recall,
                                num_thresholds=num_thresholds,
                                display_name=display_name,
                                description=description))

        # The 2 methods above should write summaries with the same data.
        self.assertProtoEquals(pb, pb_via_op)

        # Test the metadata.
        summary_metadata = pb.value[0].metadata
        self.assertEqual('some_raw_values', summary_metadata.display_name)
        self.assertEqual('We passed raw values into a summary op.',
                         summary_metadata.summary_description)
        self.assertEqual(metadata.PLUGIN_NAME,
                         summary_metadata.plugin_data.plugin_name)

        plugin_data = metadata.parse_plugin_metadata(
            summary_metadata.plugin_data.content)
        self.assertEqual(5, plugin_data.num_thresholds)

        # Test the summary contents.
        values = tf.make_ndarray(pb.value[0].tensor)
        self.verify_float_arrays_are_equal(
            [
                [75.0, 64.0, 21.0, 5.0, 0.0],  # True positives.
                [150.0, 105.0, 18.0, 0.0, 0.0],  # False positives.
                [0.0, 45.0, 132.0, 150.0, 150.0],  # True negatives.
                [0.0, 11.0, 54.0, 70.0, 75.0],  # False negatives.
                [0.3333333, 0.3786982, 0.5384616, 1.0, 0.0],  # Precision.
                [1.0, 0.8533334, 0.28, 0.0666667, 0.0],  # Recall.
            ],
            values)
Пример #28
0
 def test_non_string_value_in_op(self):
     with six.assertRaisesRegex(self, Exception,
                                r'must be of type <dtype: \'string\'>'):
         with tf.Session() as sess:
             sess.run(summary.op('so', tf.constant(5)))
Пример #29
0
 def test_requires_rank_4_in_op(self):
     with six.assertRaisesRegex(self, ValueError, 'must have rank 4'):
         summary.op('mona_lisa', tf.constant([[1, 2, 3], [4, 5, 6]]))
Пример #30
0
def run(logdir, session_id, hparams, group_name):
    """Runs a temperature simulation.

  This will simulate an object at temperature `initial_temperature`
  sitting at rest in a large room at temperature `ambient_temperature`.
  The object has some intrinsic `heat_coefficient`, which indicates
  how much thermal conductivity it has: for instance, metals have high
  thermal conductivity, while the thermal conductivity of water is low.

  Over time, the object's temperature will adjust to match the
  temperature of its environment. We'll track the object's temperature,
  how far it is from the room's temperature, and how much it changes at
  each time step.

  Arguments:
    logdir: the top-level directory into which to write summary data
    session_id: an id for the session.
    hparams: A dictionary mapping an hyperparameter name to its value.
    group_name: an id for the session group this session belongs to.
  """
    tf.reset_default_graph()
    tf.set_random_seed(0)

    initial_temperature = hparams['initial_temperature']
    ambient_temperature = hparams['ambient_temperature']
    heat_coefficient = hparams['heat_coefficient']
    session_dir = os.path.join(logdir, session_id)
    writer = tf.summary.FileWriter(session_dir)
    writer.add_summary(
        summary.session_start_pb(hparams=hparams, group_name=group_name))
    writer.flush()
    with tf.name_scope('temperature'):
        # Create a mutable variable to hold the object's temperature, and
        # create a scalar summary to track its value over time. The name of
        # the summary will appear as "temperature/current" due to the
        # name-scope above.
        temperature = tf.Variable(tf.constant(initial_temperature),
                                  name='temperature')
        scalar_summary.op('current',
                          temperature,
                          display_name='Temperature',
                          description='The temperature of the object under '
                          'simulation, in Kelvins.')

        # Compute how much the object's temperature differs from that of its
        # environment, and track this, too: likewise, as
        # "temperature/difference_to_ambient".
        ambient_difference = temperature - ambient_temperature
        scalar_summary.op(
            'difference_to_ambient',
            ambient_difference,
            display_name='Difference to ambient temperature',
            description=('The difference between the ambient '
                         'temperature and the temperature of the '
                         'object under simulation, in Kelvins.'))

    # Newton suggested that the rate of change of the temperature of an
    # object is directly proportional to this `ambient_difference` above,
    # where the proportionality constant is what we called the heat
    # coefficient. But in real life, not everything is quite so clean, so
    # we'll add in some noise. (The value of 50 is arbitrary, chosen to
    # make the data look somewhat interesting. :-) )
    noise = 50 * tf.random_normal([])
    delta = -heat_coefficient * (ambient_difference + noise)
    scalar_summary.op(
        'delta',
        delta,
        description='The change in temperature from the previous '
        'step, in Kelvins.')

    # Collect all the scalars that we want to keep track of.
    summ = tf.summary.merge_all()

    # Now, augment the current temperature by this delta that we computed,
    # blocking the assignment on summary collection to avoid race conditions
    # and ensure that the summary always reports the pre-update value.
    with tf.control_dependencies([summ]):
        update_step = temperature.assign_add(delta)

    sess = tf.Session()
    sess.run(tf.global_variables_initializer())
    for step in xrange(STEPS):
        # By asking TensorFlow to compute the update step, we force it to
        # change the value of the temperature variable. We don't actually
        # care about this value, so we discard it; instead, we grab the
        # summary data computed along the way.
        (s, _) = sess.run([summ, update_step])
        writer.add_summary(s, global_step=step)
    writer.add_summary(summary.session_end_pb(api_pb2.STATUS_SUCCESS))
    writer.close()