Beispiel #1
0
  def testNewStyleAudioSummary(self):
    """Verify processing of tensorboard.plugins.audio.summary."""
    event_sink = _EventGenerator(self, zero_out_timestamps=True)
    writer = tf.summary.FileWriter(self.get_temp_dir())
    writer.event_writer = event_sink
    with self.test_session() as sess:
      ipt = tf.random_normal(shape=[5, 441, 2])
      with tf.name_scope('1'):
        audio_summary.op('one', ipt, sample_rate=44100, max_outputs=1)
      with tf.name_scope('2'):
        audio_summary.op('two', ipt, sample_rate=44100, max_outputs=2)
      with tf.name_scope('3'):
        audio_summary.op('three', ipt, sample_rate=44100, max_outputs=3)
      merged = tf.summary.merge_all()
      writer.add_graph(sess.graph)
      for i in xrange(10):
        summ = sess.run(merged)
        writer.add_summary(summ, global_step=i)

    accumulator = ea.EventAccumulator(event_sink)
    accumulator.Reload()

    tags = [
        u'1/one/audio_summary',
        u'2/two/audio_summary',
        u'3/three/audio_summary',
    ]

    self.assertTagsEqual(accumulator.Tags(), {
        ea.TENSORS: tags,
        ea.GRAPH: True,
        ea.META_GRAPH: False,
    })
Beispiel #2
0
def scalar(name, tensor, tag=None, description=None, step=None):
  """Create a scalar summary op.

  Arguments:
    name: A name for the generated summary node.
    tensor: A real numeric rank-0 `Tensor`. Must have `dtype` castable
      to `float32`.
    tag: Optional rank-0 string `Tensor` to identify this summary in
      TensorBoard.  Defaults to the generated name of this op.
    description: Optional long-form description for this summary, as a
      constant `str`. Markdown is supported. Defaults to empty.
    step: Optional `int64` monotonic step variable, which defaults
      to `tf.train.get_global_step`.

  Returns:
    A TensorFlow summary op.
  """
  # TODO(nickfelt): make tag param work
  summary_metadata = metadata.create_summary_metadata(
      display_name=None, description=description)
  with tf.name_scope(name, values=[tensor, tag, step]) as scope:
    with tf.control_dependencies([tf.assert_scalar(tensor)]):
      return tf.contrib.summary.generic(name=scope,
                                        tensor=tf.cast(tensor, tf.float32),
                                        metadata=summary_metadata,
                                        step=step)
Beispiel #3
0
def bisine_wave(frequency):
    """Emit two sine waves, in stereo at different octaves."""
    #
    # We can first our existing sine generator to generate two different
    # waves.
    f_hi = frequency
    f_lo = frequency / 2.0
    with tf.name_scope('hi'):
        sine_hi = sine_wave(f_hi)
    with tf.name_scope('lo'):
        sine_lo = sine_wave(f_lo)
    #
    # Now, we have two tensors of shape [1, _samples(), 1]. By concatenating
    # them along axis 2, we get a tensor of shape [1, _samples(), 2]---a
    # stereo waveform.
    return tf.concat([sine_lo, sine_hi], axis=2)
Beispiel #4
0
def op(name,
       data,
       display_name=None,
       description=None,
       collections=None):
  """Create a legacy scalar summary op.

  Arguments:
    name: A unique name for the generated summary node.
    data: A real numeric rank-0 `Tensor`. Must have `dtype` castable
      to `float32`.
    display_name: Optional name for this summary in TensorBoard, as a
      constant `str`. Defaults to `name`.
    description: Optional long-form description for this summary, as a
      constant `str`. Markdown is supported. Defaults to empty.
    collections: Optional list of graph collections keys. The new
      summary op is added to these collections. Defaults to
      `[Graph Keys.SUMMARIES]`.

  Returns:
    A TensorFlow summary op.
  """
  if display_name is None:
    display_name = name
  summary_metadata = metadata.create_summary_metadata(
      display_name=display_name, description=description)
  with tf.name_scope(name):
    with tf.control_dependencies([tf.assert_scalar(data)]):
      return tf.summary.tensor_summary(name='scalar_summary',
                                       tensor=tf.cast(data, tf.float32),
                                       collections=collections,
                                       summary_metadata=summary_metadata)
Beispiel #5
0
def convolve(image, pixel_filter, channels=3, name=None):
  """Perform a 2D pixel convolution on the given image.

  Arguments:
    image: A 3D `float32` `Tensor` of shape `[height, width, channels]`,
      where `channels` is the third argument to this function and the
      first two dimensions are arbitrary.
    pixel_filter: A 2D `Tensor`, representing pixel weightings for the
      kernel. This will be used to create a 4D kernel---the extra two
      dimensions are for channels (see `tf.nn.conv2d` documentation),
      and the kernel will be constructed so that the channels are
      independent: each channel only observes the data from neighboring
      pixels of the same channel.
    channels: An integer representing the number of channels in the
      image (e.g., 3 for RGB).

  Returns:
    A 3D `float32` `Tensor` of the same shape as the input.
  """
  with tf.name_scope(name, 'convolve'):
    tf.assert_type(image, tf.float32)
    channel_filter = tf.eye(channels)
    filter_ = (tf.expand_dims(tf.expand_dims(pixel_filter, -1), -1) *
               tf.expand_dims(tf.expand_dims(channel_filter, 0), 0))
    result_batch = tf.nn.conv2d(tf.stack([image]),  # batch
                                filter=filter_,
                                strides=[1, 1, 1, 1],
                                padding='SAME')
    return result_batch[0]  # unbatch
Beispiel #6
0
def run_all(logdir):
    tf.reset_default_graph()
    step_placeholder = tf.placeholder(tf.int32)

    with tf.name_scope('simple_example'):
        simple_example(step_placeholder)
    with tf.name_scope('markdown_table'):
        markdown_table(step_placeholder)
    with tf.name_scope('higher_order_tensors'):
        higher_order_tensors(step_placeholder)
    all_summaries = tf.summary.merge_all()

    with tf.Session() as sess:
        writer = tf.summary.FileWriter(logdir)
        writer.add_graph(sess.graph)
        for step in xrange(STEPS):
            s = sess.run(all_summaries, feed_dict={step_placeholder: step})
            writer.add_summary(s, global_step=step)
        writer.close()
Beispiel #7
0
def _buckets(data, bucket_count=None):
    """Create a TensorFlow op to group data into histogram buckets.

  Arguments:
    data: A `Tensor` of any shape. Must be castable to `float64`.
    bucket_count: Optional positive `int` or scalar `int32` `Tensor`.
  Returns:
    A `Tensor` of shape `[k, 3]` and type `float64`. The `i`th row is
    a triple `[left_edge, right_edge, count]` for a single bucket.
    The value of `k` is either `bucket_count` or `1` or `0`.
  """
    if bucket_count is None:
        bucket_count = DEFAULT_BUCKET_COUNT
    with tf.name_scope('buckets', values=[data, bucket_count]), \
         tf.control_dependencies([tf.assert_scalar(bucket_count),
                                  tf.assert_type(bucket_count, tf.int32)]):
        data = tf.reshape(data, shape=[-1])  # flatten
        data = tf.cast(data, tf.float64)
        is_empty = tf.equal(tf.size(data), 0)

        def when_empty():
            return tf.constant([], shape=(0, 3), dtype=tf.float64)

        def when_nonempty():
            min_ = tf.reduce_min(data)
            max_ = tf.reduce_max(data)
            range_ = max_ - min_
            is_singular = tf.equal(range_, 0)

            def when_nonsingular():
                bucket_width = range_ / tf.cast(bucket_count, tf.float64)
                offsets = data - min_
                bucket_indices = tf.cast(tf.floor(offsets / bucket_width),
                                         dtype=tf.int32)
                clamped_indices = tf.minimum(bucket_indices, bucket_count - 1)
                one_hots = tf.one_hot(clamped_indices, depth=bucket_count)
                bucket_counts = tf.cast(tf.reduce_sum(one_hots, axis=0),
                                        dtype=tf.float64)
                edges = tf.lin_space(min_, max_, bucket_count + 1)
                left_edges = edges[:-1]
                right_edges = edges[1:]
                return tf.transpose(
                    tf.stack([left_edges, right_edges, bucket_counts]))

            def when_singular():
                center = min_
                bucket_starts = tf.stack([center - 0.5])
                bucket_ends = tf.stack([center + 0.5])
                bucket_counts = tf.stack([tf.cast(tf.size(data), tf.float64)])
                return tf.transpose(
                    tf.stack([bucket_starts, bucket_ends, bucket_counts]))

            return tf.cond(is_singular, when_singular, when_nonsingular)

        return tf.cond(is_empty, when_empty, when_nonempty)
Beispiel #8
0
def op(name,
       images,
       max_outputs=3,
       display_name=None,
       description=None,
       collections=None):
    """Create an image summary op for use in a TensorFlow graph.

  Arguments:
    name: A unique name for the generated summary node.
    images: A `Tensor` representing pixel data with shape `[k, h, w, c]`,
      where `k` is the number of images, `h` and `w` are the height and
      width of the images, and `c` is the number of channels, which
      should be 1, 3, or 4. Any of the dimensions may be statically
      unknown (i.e., `None`).
    max_outputs: Optional `int` or rank-0 integer `Tensor`. At most this
      many images will be emitted at each step. When more than
      `max_outputs` many images are provided, the first `max_outputs` many
      images will be used and the rest silently discarded.
    display_name: Optional name for this summary in TensorBoard, as a
      constant `str`. Defaults to `name`.
    description: Optional long-form description for this summary, as a
      constant `str`. Markdown is supported. Defaults to empty.
    collections: Optional list of graph collections keys. The new
      summary op is added to these collections. Defaults to
      `[Graph Keys.SUMMARIES]`.

  Returns:
    A TensorFlow summary op.
  """
    if display_name is None:
        display_name = name
    summary_metadata = metadata.create_summary_metadata(
        display_name=display_name, description=description)
    with tf.name_scope(name), \
         tf.control_dependencies([tf.assert_rank(images, 4),
                                  tf.assert_type(images, tf.uint8),
                                  tf.assert_non_negative(max_outputs)]):
        limited_images = images[:max_outputs]
        encoded_images = tf.map_fn(tf.image.encode_png,
                                   limited_images,
                                   dtype=tf.string,
                                   name='encode_each_image')
        image_shape = tf.shape(images)
        dimensions = tf.stack([
            tf.as_string(image_shape[2], name='width'),
            tf.as_string(image_shape[1], name='height')
        ],
                              name='dimensions')
        tensor = tf.concat([dimensions, encoded_images], axis=0)
        return tf.summary.tensor_summary(name='image_summary',
                                         tensor=tensor,
                                         collections=collections,
                                         summary_metadata=summary_metadata)
Beispiel #9
0
  def testNewStyleImageSummary(self):
    """Verify processing of tensorboard.plugins.image.summary."""
    event_sink = _EventGenerator(self, zero_out_timestamps=True)
    writer = tf.summary.FileWriter(self.get_temp_dir())
    writer.event_writer = event_sink
    with self.test_session() as sess:
      ipt = tf.ones([10, 4, 4, 3], tf.uint8)
      # This is an interesting example, because the old tf.image_summary op
      # would throw an error here, because it would be tag reuse.
      # Using the tf node name instead allows argument re-use to the image
      # summary.
      with tf.name_scope('1'):
        image_summary.op('images', ipt, max_outputs=1)
      with tf.name_scope('2'):
        image_summary.op('images', ipt, max_outputs=2)
      with tf.name_scope('3'):
        image_summary.op('images', ipt, max_outputs=3)
      merged = tf.summary.merge_all()
      writer.add_graph(sess.graph)
      for i in xrange(10):
        summ = sess.run(merged)
        writer.add_summary(summ, global_step=i)

    accumulator = ea.EventAccumulator(event_sink)
    accumulator.Reload()

    tags = [
        u'1/images/image_summary',
        u'2/images/image_summary',
        u'3/images/image_summary',
    ]

    self.assertTagsEqual(accumulator.Tags(), {
        ea.TENSORS: tags,
        ea.GRAPH: True,
        ea.META_GRAPH: False,
    })
Beispiel #10
0
def op(name,
       data,
       bucket_count=None,
       display_name=None,
       description=None,
       collections=None):
    """Create a histogram summary op.

  Arguments:
    name: A unique name for the generated summary node.
    data: A `Tensor` of any shape. Must be castable to `float64`.
    bucket_count: Optional positive `int`. The output will have this
      many buckets, except in two edge cases. If there is no data, then
      there are no buckets. If there is data but all points have the
      same value, then there is one bucket whose left and right
      endpoints are the same.
    display_name: Optional name for this summary in TensorBoard, as a
      constant `str`. Defaults to `name`.
    description: Optional long-form description for this summary, as a
      constant `str`. Markdown is supported. Defaults to empty.
    collections: Optional list of graph collections keys. The new
      summary op is added to these collections. Defaults to
      `[Graph Keys.SUMMARIES]`.

  Returns:
    A TensorFlow summary op.
  """
    if display_name is None:
        display_name = name
    summary_metadata = metadata.create_summary_metadata(
        display_name=display_name, description=description)
    with tf.name_scope(name):
        tensor = _buckets(data, bucket_count=bucket_count)
        return tf.summary.tensor_summary(name='histogram_summary',
                                         tensor=tensor,
                                         collections=collections,
                                         summary_metadata=summary_metadata)
Beispiel #11
0
def streaming_op(name,
                 labels,
                 predictions,
                 num_thresholds=None,
                 weights=None,
                 metrics_collections=None,
                 updates_collections=None,
                 display_name=None,
                 description=None):
    """Computes a precision-recall curve summary across batches of data.

  This function is similar to op() above, but can be used to compute the PR
  curve across multiple batches of labels and predictions, in the same style
  as the metrics found in tf.metrics.

  This function creates multiple local variables for storing true positives,
  true negative, etc. accumulated over each batch of data, and uses these local
  variables for computing the final PR curve summary. These variables can be
  updated with the returned update_op.

  Args:
    name: A tag attached to the summary. Used by TensorBoard for organization.
    labels: The ground truth values, a `Tensor` whose dimensions must match
      `predictions`. Will be cast to `bool`.
    predictions: A floating point `Tensor` of arbitrary shape and whose values
      are in the range `[0, 1]`.
    num_thresholds: The number of evenly spaced thresholds to generate for
      computing the PR curve. Defaults to 201.
    weights: Optional `Tensor` whose rank is either 0, or the same rank as
      `labels`, and must be broadcastable to `labels` (i.e., all dimensions must
      be either `1`, or the same as the corresponding `labels` dimension).
    metrics_collections: An optional list of collections that `auc` should be
      added to.
    updates_collections: An optional list of collections that `update_op` should
      be added to.
    display_name: Optional name for this summary in TensorBoard, as a
        constant `str`. Defaults to `name`.
    description: Optional long-form description for this summary, as a
        constant `str`. Markdown is supported. Defaults to empty.

  Returns:
    pr_curve: A string `Tensor` containing a single value: the
      serialized PR curve Tensor summary. The summary contains a
      float32 `Tensor` of dimension (6, num_thresholds). The first
      dimension (of length 6) is of the order: true positives, false
      positives, true negatives, false negatives, precision, recall.
    update_op: An operation that updates the summary with the latest data.
  """
    if num_thresholds is None:
        num_thresholds = _DEFAULT_NUM_THRESHOLDS

    thresholds = [i / float(num_thresholds - 1) for i in range(num_thresholds)]

    with tf.name_scope(name, values=[labels, predictions, weights]):
        tp, update_tp = tf.metrics.true_positives_at_thresholds(
            labels=labels,
            predictions=predictions,
            thresholds=thresholds,
            weights=weights)
        fp, update_fp = tf.metrics.false_positives_at_thresholds(
            labels=labels,
            predictions=predictions,
            thresholds=thresholds,
            weights=weights)
        tn, update_tn = tf.metrics.true_negatives_at_thresholds(
            labels=labels,
            predictions=predictions,
            thresholds=thresholds,
            weights=weights)
        fn, update_fn = tf.metrics.false_negatives_at_thresholds(
            labels=labels,
            predictions=predictions,
            thresholds=thresholds,
            weights=weights)

        def compute_summary(tp, fp, tn, fn, collections):
            precision = tp / tf.maximum(_MINIMUM_COUNT, tp + fp)
            recall = tp / tf.maximum(_MINIMUM_COUNT, tp + fn)

            return _create_tensor_summary(name, tp, fp, tn, fn, precision,
                                          recall, num_thresholds, display_name,
                                          description, collections)

        pr_curve = compute_summary(tp, fp, tn, fn, metrics_collections)
        update_op = tf.group(update_tp, update_fp, update_tn, update_fn)
        if updates_collections:
            for collection in updates_collections:
                tf.add_to_collection(collection, update_op)

        return pr_curve, update_op
Beispiel #12
0
def run_sobel(logdir, verbose=False):
  """Run a Sobel edge detection demonstration.

  See the summary description for more details.

  Arguments:
    logdir: Directory into which to write event logs.
    verbose: Boolean; whether to log any output.
  """
  if verbose:
    tf.logging.info('--- Starting run: sobel')

  tf.reset_default_graph()
  tf.set_random_seed(0)

  image = get_image(verbose=verbose)
  kernel_radius = tf.placeholder(shape=(), dtype=tf.int32)

  with tf.name_scope('horizontal_kernel'):
    kernel_side_length = kernel_radius * 2 + 1
    # Drop off influence for pixels further away from the center.
    weighting_kernel = (
        1.0 - tf.abs(tf.linspace(-1.0, 1.0, num=kernel_side_length)))
    differentiation_kernel = tf.linspace(-1.0, 1.0, num=kernel_side_length)
    horizontal_kernel = tf.matmul(tf.expand_dims(weighting_kernel, 1),
                                  tf.expand_dims(differentiation_kernel, 0))

  with tf.name_scope('vertical_kernel'):
    vertical_kernel = tf.transpose(horizontal_kernel)

  float_image = tf.cast(image, tf.float32)
  dx = convolve(float_image, horizontal_kernel, name='convolve_dx')
  dy = convolve(float_image, vertical_kernel, name='convolve_dy')
  gradient_magnitude = tf.norm([dx, dy], axis=0, name='gradient_magnitude')
  with tf.name_scope('normalized_gradient'):
    normalized_gradient = gradient_magnitude / tf.reduce_max(gradient_magnitude)
  with tf.name_scope('output_image'):
    output_image = tf.cast(255 * normalized_gradient, tf.uint8)

  summ = image_summary.op(
      'sobel', tf.stack([output_image]),
      display_name='Sobel edge detection',
      description=(u'Demonstration of [Sobel edge detection]. The step '
                   'parameter adjusts the radius of the kernel. '
                   'The kernel can be of arbitrary size, and considers '
                   u'nearby pixels with \u2113\u2082-linear falloff.\n\n'
                   # (that says ``$\ell_2$-linear falloff'')
                   'Edge detection is done on a per-channel basis, so '
                   'you can observe which edges are “mostly red '
                   'edges,” for instance.\n\n'
                   'For practical edge detection, a small kernel '
                   '(usually not more than more than *r*=2) is best.\n\n'
                   '[Sobel edge detection]: %s\n\n'
                   "%s"
                   % ('https://en.wikipedia.org/wiki/Sobel_operator',
                      IMAGE_CREDIT)))

  with tf.Session() as sess:
    sess.run(image.initializer)
    writer = tf.summary.FileWriter(os.path.join(logdir, 'sobel'))
    writer.add_graph(sess.graph)
    for step in xrange(8):
      if verbose:
        tf.logging.info("--- sobel: step: %s" % step)
        feed_dict = {kernel_radius: step}
      run_options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)
      run_metadata = tf.RunMetadata()
      s = sess.run(summ, feed_dict=feed_dict,
                   options=run_options, run_metadata=run_metadata)
      writer.add_summary(s, global_step=step)
      writer.add_run_metadata(run_metadata, 'step_%04d' % step)
    writer.close()
Beispiel #13
0
def run_box_to_gaussian(logdir, verbose=False):
  """Run a box-blur-to-Gaussian-blur demonstration.

  See the summary description for more details.

  Arguments:
    logdir: Directory into which to write event logs.
    verbose: Boolean; whether to log any output.
  """
  if verbose:
    tf.logging.info('--- Starting run: box_to_gaussian')

  tf.reset_default_graph()
  tf.set_random_seed(0)

  image = get_image(verbose=verbose)
  blur_radius = tf.placeholder(shape=(), dtype=tf.int32)
  with tf.name_scope('filter'):
    blur_side_length = blur_radius * 2 + 1
    pixel_filter = tf.ones((blur_side_length, blur_side_length))
    pixel_filter = (pixel_filter
                    / tf.cast(tf.size(pixel_filter), tf.float32))  # normalize

  iterations = 4
  images = [tf.cast(image, tf.float32) / 255.0]
  for _ in xrange(iterations):
    images.append(convolve(images[-1], pixel_filter))
  with tf.name_scope('convert_to_uint8'):
    images = tf.stack(
        [tf.cast(255 * tf.clip_by_value(image_, 0.0, 1.0), tf.uint8)
         for image_ in images])

  summ = image_summary.op(
      'box_to_gaussian', images, max_outputs=iterations,
      display_name='Gaussian blur as a limit process of box blurs',
      description=('Demonstration of forming a Gaussian blur by '
                   'composing box blurs, each of which can be expressed '
                   'as a 2D convolution.\n\n'
                   'A Gaussian blur is formed by convolving a Gaussian '
                   'kernel over an image. But a Gaussian kernel is '
                   'itself the limit of convolving a constant kernel '
                   'with itself many times. Thus, while applying '
                   'a box-filter convolution just once produces '
                   'results that are noticeably different from those '
                   'of a Gaussian blur, repeating the same convolution '
                   'just a few times causes the result to rapidly '
                   'converge to an actual Gaussian blur.\n\n'
                   'Here, the step value controls the blur radius, '
                   'and the image sample controls the number of times '
                   'that the convolution is applied (plus one). '
                   'So, when *sample*=1, the original image is shown; '
                   '*sample*=2 shows a box blur; and a hypothetical '
                   '*sample*=∞ would show a true Gaussian blur.\n\n'
                   'This is one ingredient in a recipe to compute very '
                   'fast Gaussian blurs. The other pieces require '
                   'special treatment for the box blurs themselves '
                   '(decomposition to dual one-dimensional box blurs, '
                   'each of which is computed with a sliding window); '
                   'we don’t perform those optimizations here.\n\n'
                   '[Here are some slides describing the full process.]'
                   '(%s)\n\n'
                   '%s'
                   % ('http://elynxsdk.free.fr/ext-docs/Blur/Fast_box_blur.pdf',
                      IMAGE_CREDIT)))

  with tf.Session() as sess:
    sess.run(image.initializer)
    writer = tf.summary.FileWriter(os.path.join(logdir, 'box_to_gaussian'))
    writer.add_graph(sess.graph)
    for step in xrange(8):
      if verbose:
        tf.logging.info('--- box_to_gaussian: step: %s' % step)
        feed_dict = {blur_radius: step}
      run_options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)
      run_metadata = tf.RunMetadata()
      s = sess.run(summ, feed_dict=feed_dict,
                   options=run_options, run_metadata=run_metadata)
      writer.add_summary(s, global_step=step)
      writer.add_run_metadata(run_metadata, 'step_%04d' % step)
    writer.close()
def run():
    """Run custom scalar demo and generate event files."""
    step = tf.placeholder(tf.float32, shape=[])

    with tf.name_scope('loss'):
        # Specify 2 different loss values, each tagged differently.
        summary_lib.scalar('foo', tf.pow(0.9, step))
        summary_lib.scalar('bar', tf.pow(0.85, step + 2))

        # Log metric baz as well as upper and lower bounds for a margin chart.
        middle_baz_value = step + 4 * tf.random_uniform([]) - 2
        summary_lib.scalar('baz', middle_baz_value)
        summary_lib.scalar('baz_lower',
                           middle_baz_value - 6.42 - tf.random_uniform([]))
        summary_lib.scalar('baz_upper',
                           middle_baz_value + 6.42 + tf.random_uniform([]))

    with tf.name_scope('trigFunctions'):
        summary_lib.scalar('cosine', tf.cos(step))
        summary_lib.scalar('sine', tf.sin(step))
        summary_lib.scalar('tangent', tf.tan(step))

    merged_summary = tf.summary.merge_all()

    with tf.Session() as sess, tf.summary.FileWriter(LOGDIR) as writer:
        # We only need to specify the layout once (instead of per step).
        layout_summary = summary_lib.custom_scalar_pb(
            layout_pb2.Layout(category=[
                layout_pb2.Category(
                    title='losses',
                    chart=[
                        layout_pb2.Chart(
                            title='losses',
                            multiline=layout_pb2.MultilineChartContent(
                                tag=[r'loss(?!.*margin.*)'], )),
                        layout_pb2.Chart(
                            title='baz',
                            margin=layout_pb2.MarginChartContent(series=[
                                layout_pb2.MarginChartContent.Series(
                                    value='loss/baz/scalar_summary',
                                    lower='loss/baz_lower/scalar_summary',
                                    upper='loss/baz_upper/scalar_summary'),
                            ], )),
                    ]),
                layout_pb2.Category(
                    title='trig functions',
                    chart=[
                        layout_pb2.Chart(
                            title='wave trig functions',
                            multiline=layout_pb2.MultilineChartContent(tag=[
                                r'trigFunctions/cosine', r'trigFunctions/sine'
                            ], )),
                        # The range of tangent is different. Give it its own chart.
                        layout_pb2.Chart(
                            title='tan',
                            multiline=layout_pb2.MultilineChartContent(
                                tag=[r'trigFunctions/tangent'], )),
                    ],
                    # This category we care less about. Make it initially closed.
                    closed=True),
            ]))
        writer.add_summary(layout_summary)

        for i in xrange(42):
            summary = sess.run(merged_summary, feed_dict={step: i})
            writer.add_summary(summary, global_step=i)
Beispiel #15
0
def run(logdir, run_name, wave_name, wave_constructor):
    """Generate wave data of the given form.

  The provided function `wave_constructor` should accept a scalar tensor
  of type float32, representing the frequency (in Hz) at which to
  construct a wave, and return a tensor of shape [1, _samples(), `n`]
  representing audio data (for some number of channels `n`).

  Waves will be generated at frequencies ranging from A4 to A5.

  Arguments:
    logdir: the top-level directory into which to write summary data
    run_name: the name of this run; will be created as a subdirectory
      under logdir
    wave_name: the name of the wave being generated
    wave_constructor: see above
  """
    tf.reset_default_graph()
    tf.set_random_seed(0)

    # On each step `i`, we'll set this placeholder to `i`. This allows us
    # to know "what time it is" at each step.
    step_placeholder = tf.placeholder(tf.float32, shape=[])

    # We want to linearly interpolate a frequency between A4 (440 Hz) and
    # A5 (880 Hz).
    with tf.name_scope('compute_frequency'):
        f_min = 440.0
        f_max = 880.0
        t = step_placeholder / (FLAGS.steps - 1)
        frequency = f_min * (1.0 - t) + f_max * t

    # Let's log this frequency, just so that we can make sure that it's as
    # expected.
    tf.summary.scalar('frequency', frequency)

    # Now, we pass this to the wave constructor to get our waveform. Doing
    # so within a name scope means that any summaries that the wave
    # constructor produces will be namespaced.
    with tf.name_scope(wave_name):
        waveform = wave_constructor(frequency)

    # We also have the opportunity to annotate each audio clip with a
    # label. This is a good place to include the frequency, because it'll
    # be visible immediately next to the audio clip.
    with tf.name_scope('compute_labels'):
        samples = tf.shape(waveform)[0]
        wave_types = tf.tile(["*Wave type:* `%s`." % wave_name], [samples])
        frequencies = tf.string_join([
            "*Frequency:* ",
            tf.tile([tf.as_string(frequency, precision=2)], [samples]),
            " Hz.",
        ])
        samples = tf.string_join([
            "*Sample:* ",
            tf.as_string(tf.range(samples) + 1),
            " of ",
            tf.as_string(samples),
            ".",
        ])
        labels = tf.string_join([wave_types, frequencies, samples],
                                separator=" ")

    # We can place a description next to the summary in TensorBoard. This
    # is a good place to explain what the summary represents, methodology
    # for creating it, etc. Let's include the source code of the function
    # that generated the wave.
    source = '\n'.join('    %s' % line.rstrip()
                       for line in inspect.getsourcelines(wave_constructor)[0])
    description = ("A wave of type `%r`, generated via:\n\n%s" %
                   (wave_name, source))

    # Here's the crucial piece: we interpret this result as audio.
    summary.op('waveform',
               waveform,
               FLAGS.sample_rate,
               labels=labels,
               display_name=wave_name,
               description=description)

    # Now, we can collect up all the summaries and begin the run.
    summ = tf.summary.merge_all()

    sess = tf.Session()
    writer = tf.summary.FileWriter(os.path.join(logdir, run_name))
    writer.add_graph(sess.graph)
    sess.run(tf.global_variables_initializer())
    for step in xrange(FLAGS.steps):
        s = sess.run(summ, feed_dict={step_placeholder: float(step)})
        writer.add_summary(s, global_step=step)
    writer.close()
Beispiel #16
0
def op(name,
       audio,
       sample_rate,
       labels=None,
       max_outputs=3,
       encoding=None,
       display_name=None,
       description=None,
       collections=None):
    """Create an audio summary op for use in a TensorFlow graph.

  Arguments:
    name: A unique name for the generated summary node.
    audio: A `Tensor` representing audio data with shape `[k, t, c]`,
      where `k` is the number of audio clips, `t` is the number of
      frames, and `c` is the number of channels. Elements should be
      floating-point values in `[-1.0, 1.0]`. Any of the dimensions may
      be statically unknown (i.e., `None`).
    sample_rate: An `int` or rank-0 `int32` `Tensor` that represents the
      sample rate, in Hz. Must be positive.
    labels: Optional `string` `Tensor`, a vector whose length is the
      first dimension of `audio`, where `labels[i]` contains arbitrary
      textual information about `audio[i]`. (For instance, this could be
      some text that a TTS system was supposed to produce.) Markdown is
      supported. Contents should be UTF-8.
    max_outputs: Optional `int` or rank-0 integer `Tensor`. At most this
      many audio clips will be emitted at each step. When more than
      `max_outputs` many clips are provided, the first `max_outputs`
      many clips will be used and the rest silently discarded.
    encoding: A constant `str` (not string tensor) indicating the
      desired encoding. You can choose any format you like, as long as
      it's "wav". Please see the "API compatibility note" below.
    display_name: Optional name for this summary in TensorBoard, as a
      constant `str`. Defaults to `name`.
    description: Optional long-form description for this summary, as a
      constant `str`. Markdown is supported. Defaults to empty.
    collections: Optional list of graph collections keys. The new
      summary op is added to these collections. Defaults to
      `[Graph Keys.SUMMARIES]`.

  Returns:
    A TensorFlow summary op.

  API compatibility note: The default value of the `encoding`
  argument is _not_ guaranteed to remain unchanged across TensorBoard
  versions. In the future, we will by default encode as FLAC instead of
  as WAV. If the specific format is important to you, please provide a
  file format explicitly.
  """

    if display_name is None:
        display_name = name
    if encoding is None:
        encoding = 'wav'

    if encoding == 'wav':
        encoding = metadata.Encoding.Value('WAV')
        encoder = functools.partial(tf.contrib.ffmpeg.encode_audio,
                                    samples_per_second=sample_rate,
                                    file_format='wav')
    else:
        raise ValueError('Unknown encoding: %r' % encoding)

    with tf.name_scope(name), \
         tf.control_dependencies([tf.assert_rank(audio, 3)]):
        limited_audio = audio[:max_outputs]
        encoded_audio = tf.map_fn(encoder,
                                  limited_audio,
                                  dtype=tf.string,
                                  name='encode_each_audio')
        if labels is None:
            limited_labels = tf.tile([''], tf.shape(limited_audio)[:1])
        else:
            limited_labels = labels[:max_outputs]
        tensor = tf.transpose(tf.stack([encoded_audio, limited_labels]))
        summary_metadata = metadata.create_summary_metadata(
            display_name=display_name,
            description=description,
            encoding=encoding)
        return tf.summary.tensor_summary(name='audio_summary',
                                         tensor=tensor,
                                         collections=collections,
                                         summary_metadata=summary_metadata)
Beispiel #17
0
def op(name,
       labels,
       predictions,
       num_thresholds=None,
       weights=None,
       display_name=None,
       description=None,
       collections=None):
    """Create a PR curve summary op for a single binary classifier.

  Computes true/false positive/negative values for the given `predictions`
  against the ground truth `labels`, against a list of evenly distributed
  threshold values in `[0, 1]` of length `num_thresholds`.

  Each number in `predictions`, a float in `[0, 1]`, is compared with its
  corresponding boolean label in `labels`, and counts as a single tp/fp/tn/fn
  value at each threshold. This is then multiplied with `weights` which can be
  used to reweight certain values, or more commonly used for masking values.

  Args:
    name: A tag attached to the summary. Used by TensorBoard for organization.
    labels: The ground truth values. A Tensor of `bool` values with arbitrary
        shape.
    predictions: A float32 `Tensor` whose values are in the range `[0, 1]`.
        Dimensions must match those of `labels`.
    num_thresholds: Number of thresholds, evenly distributed in `[0, 1]`, to
        compute PR metrics for. Should be `>= 2`. This value should be a
        constant integer value, not a Tensor that stores an integer.
    weights: Optional float32 `Tensor`. Individual counts are multiplied by this
        value. This tensor must be either the same shape as or broadcastable to
        the `labels` tensor.
    display_name: Optional name for this summary in TensorBoard, as a
        constant `str`. Defaults to `name`.
    description: Optional long-form description for this summary, as a
        constant `str`. Markdown is supported. Defaults to empty.
    collections: Optional list of graph collections keys. The new
        summary op is added to these collections. Defaults to
        `[Graph Keys.SUMMARIES]`.

  Returns:
    A summary operation for use in a TensorFlow graph. The float32 tensor
    produced by the summary operation is of dimension (6, num_thresholds). The
    first dimension (of length 6) is of the order: true positives,
    false positives, true negatives, false negatives, precision, recall.

  """
    if num_thresholds is None:
        num_thresholds = _DEFAULT_NUM_THRESHOLDS

    if weights is None:
        weights = 1.0

    dtype = predictions.dtype

    with tf.name_scope(name, values=[labels, predictions, weights]):
        tf.assert_type(labels, tf.bool)
        # We cast to float to ensure we have 0.0 or 1.0.
        f_labels = tf.cast(labels, dtype)
        # Ensure predictions are all in range [0.0, 1.0].
        predictions = tf.minimum(1.0, tf.maximum(0.0, predictions))
        # Get weighted true/false labels.
        true_labels = f_labels * weights
        false_labels = (1.0 - f_labels) * weights

        # Before we begin, flatten predictions.
        predictions = tf.reshape(predictions, [-1])

        # Shape the labels so they are broadcast-able for later multiplication.
        true_labels = tf.reshape(true_labels, [-1, 1])
        false_labels = tf.reshape(false_labels, [-1, 1])

        # To compute TP/FP/TN/FN, we are measuring a binary classifier
        #   C(t) = (predictions >= t)
        # at each threshold 't'. So we have
        #   TP(t) = sum( C(t) * true_labels )
        #   FP(t) = sum( C(t) * false_labels )
        #
        # But, computing C(t) requires computation for each t. To make it fast,
        # observe that C(t) is a cumulative integral, and so if we have
        #   thresholds = [t_0, ..., t_{n-1}];  t_0 < ... < t_{n-1}
        # where n = num_thresholds, and if we can compute the bucket function
        #   B(i) = Sum( (predictions == t), t_i <= t < t{i+1} )
        # then we get
        #   C(t_i) = sum( B(j), j >= i )
        # which is the reversed cumulative sum in tf.cumsum().
        #
        # We can compute B(i) efficiently by taking advantage of the fact that
        # our thresholds are evenly distributed, in that
        #   width = 1.0 / (num_thresholds - 1)
        #   thresholds = [0.0, 1*width, 2*width, 3*width, ..., 1.0]
        # Given a prediction value p, we can map it to its bucket by
        #   bucket_index(p) = floor( p * (num_thresholds - 1) )
        # so we can use tf.scatter_add() to update the buckets in one pass.

        # Compute the bucket indices for each prediction value.
        bucket_indices = tf.cast(tf.floor(predictions * (num_thresholds - 1)),
                                 tf.int32)

        # Bucket predictions.
        tp_buckets = tf.reduce_sum(
            tf.one_hot(bucket_indices, depth=num_thresholds) * true_labels,
            axis=0)
        fp_buckets = tf.reduce_sum(
            tf.one_hot(bucket_indices, depth=num_thresholds) * false_labels,
            axis=0)

        # Set up the cumulative sums to compute the actual metrics.
        tp = tf.cumsum(tp_buckets, reverse=True, name='tp')
        fp = tf.cumsum(fp_buckets, reverse=True, name='fp')
        # fn = sum(true_labels) - tp
        #    = sum(tp_buckets) - tp
        #    = tp[0] - tp
        # Similarly,
        # tn = fp[0] - fp
        tn = fp[0] - fp
        fn = tp[0] - tp

        precision = tp / tf.maximum(_MINIMUM_COUNT, tp + fp)
        recall = tp / tf.maximum(_MINIMUM_COUNT, tp + fn)

        return _create_tensor_summary(name, tp, fp, tn, fn, precision, recall,
                                      num_thresholds, display_name,
                                      description, collections)
Beispiel #18
0
def raw_data_op(name,
                true_positive_counts,
                false_positive_counts,
                true_negative_counts,
                false_negative_counts,
                precision,
                recall,
                num_thresholds=None,
                display_name=None,
                description=None,
                collections=None):
    """Create an op that collects data for visualizing PR curves.

  Unlike the op above, this one avoids computing precision, recall, and the
  intermediate counts. Instead, it accepts those tensors as arguments and
  relies on the caller to ensure that the calculations are correct (and the
  counts yield the provided precision and recall values).

  This op is useful when a caller seeks to compute precision and recall
  differently but still use the PR curves plugin.

  Args:
    name: A tag attached to the summary. Used by TensorBoard for organization.
    true_positive_counts: A rank-1 tensor of true positive counts. Must contain
        `num_thresholds` elements and be castable to float32. Values correspond
        to thresholds that increase from left to right (from 0 to 1).
    false_positive_counts: A rank-1 tensor of false positive counts. Must
        contain `num_thresholds` elements and be castable to float32. Values
        correspond to thresholds that increase from left to right (from 0 to 1).
    true_negative_counts: A rank-1 tensor of true negative counts. Must contain
        `num_thresholds` elements and be castable to float32. Values
        correspond to thresholds that increase from left to right (from 0 to 1).
    false_negative_counts: A rank-1 tensor of false negative counts. Must
        contain `num_thresholds` elements and be castable to float32. Values
        correspond to thresholds that increase from left to right (from 0 to 1).
    precision: A rank-1 tensor of precision values. Must contain
        `num_thresholds` elements and be castable to float32. Values correspond
        to thresholds that increase from left to right (from 0 to 1).
    recall: A rank-1 tensor of recall values. Must contain `num_thresholds`
        elements and be castable to float32. Values correspond to thresholds
        that increase from left to right (from 0 to 1).
    num_thresholds: Number of thresholds, evenly distributed in `[0, 1]`, to
        compute PR metrics for. Should be `>= 2`. This value should be a
        constant integer value, not a Tensor that stores an integer.
    display_name: Optional name for this summary in TensorBoard, as a
        constant `str`. Defaults to `name`.
    description: Optional long-form description for this summary, as a
        constant `str`. Markdown is supported. Defaults to empty.
    collections: Optional list of graph collections keys. The new
        summary op is added to these collections. Defaults to
        `[Graph Keys.SUMMARIES]`.

  Returns:
    A summary operation for use in a TensorFlow graph. See docs for the `op`
    method for details on the float32 tensor produced by this summary.
  """
    with tf.name_scope(name,
                       values=[
                           true_positive_counts,
                           false_positive_counts,
                           true_negative_counts,
                           false_negative_counts,
                           precision,
                           recall,
                       ]):
        return _create_tensor_summary(name, true_positive_counts,
                                      false_positive_counts,
                                      true_negative_counts,
                                      false_negative_counts, precision, recall,
                                      num_thresholds, display_name,
                                      description, collections)
Beispiel #19
0
def run(logdir, session_id, hparams, group_name):
    """Runs a temperature simulation.

  This will simulate an object at temperature `initial_temperature`
  sitting at rest in a large room at temperature `ambient_temperature`.
  The object has some intrinsic `heat_coefficient`, which indicates
  how much thermal conductivity it has: for instance, metals have high
  thermal conductivity, while the thermal conductivity of water is low.

  Over time, the object's temperature will adjust to match the
  temperature of its environment. We'll track the object's temperature,
  how far it is from the room's temperature, and how much it changes at
  each time step.

  Arguments:
    logdir: the top-level directory into which to write summary data
    session_id: an id for the session.
    hparams: A dictionary mapping an hyperparameter name to its value.
    group_name: an id for the session group this session belongs to.
  """
    tf.reset_default_graph()
    tf.set_random_seed(0)

    initial_temperature = hparams['initial_temperature']
    ambient_temperature = hparams['ambient_temperature']
    heat_coefficient = hparams['heat_coefficient']
    session_dir = os.path.join(logdir, session_id)
    writer = tf.summary.FileWriter(session_dir)
    writer.add_summary(
        summary.session_start_pb(hparams=hparams, group_name=group_name))
    writer.flush()
    with tf.name_scope('temperature'):
        # Create a mutable variable to hold the object's temperature, and
        # create a scalar summary to track its value over time. The name of
        # the summary will appear as "temperature/current" due to the
        # name-scope above.
        temperature = tf.Variable(tf.constant(initial_temperature),
                                  name='temperature')
        scalar_summary.op('current',
                          temperature,
                          display_name='Temperature',
                          description='The temperature of the object under '
                          'simulation, in Kelvins.')

        # Compute how much the object's temperature differs from that of its
        # environment, and track this, too: likewise, as
        # "temperature/difference_to_ambient".
        ambient_difference = temperature - ambient_temperature
        scalar_summary.op(
            'difference_to_ambient',
            ambient_difference,
            display_name='Difference to ambient temperature',
            description=('The difference between the ambient '
                         'temperature and the temperature of the '
                         'object under simulation, in Kelvins.'))

    # Newton suggested that the rate of change of the temperature of an
    # object is directly proportional to this `ambient_difference` above,
    # where the proportionality constant is what we called the heat
    # coefficient. But in real life, not everything is quite so clean, so
    # we'll add in some noise. (The value of 50 is arbitrary, chosen to
    # make the data look somewhat interesting. :-) )
    noise = 50 * tf.random_normal([])
    delta = -heat_coefficient * (ambient_difference + noise)
    scalar_summary.op(
        'delta',
        delta,
        description='The change in temperature from the previous '
        'step, in Kelvins.')

    # Collect all the scalars that we want to keep track of.
    summ = tf.summary.merge_all()

    # Now, augment the current temperature by this delta that we computed,
    # blocking the assignment on summary collection to avoid race conditions
    # and ensure that the summary always reports the pre-update value.
    with tf.control_dependencies([summ]):
        update_step = temperature.assign_add(delta)

    sess = tf.Session()
    sess.run(tf.global_variables_initializer())
    for step in xrange(STEPS):
        # By asking TensorFlow to compute the update step, we force it to
        # change the value of the temperature variable. We don't actually
        # care about this value, so we discard it; instead, we grab the
        # summary data computed along the way.
        (s, _) = sess.run([summ, update_step])
        writer.add_summary(s, global_step=step)
    writer.add_summary(summary.session_end_pb(api_pb2.STATUS_SUCCESS))
    writer.close()