コード例 #1
0
 def when_singular():
     center = min_
     bucket_starts = tf.stack([center - 0.5])
     bucket_ends = tf.stack([center + 0.5])
     bucket_counts = tf.stack([tf.cast(tf.size(data), tf.float64)])
     return tf.transpose(
         tf.stack([bucket_starts, bucket_ends, bucket_counts]))
コード例 #2
0
ファイル: images_demo.py プロジェクト: ysm1121/tensorboard
def convolve(image, pixel_filter, channels=3, name=None):
  """Perform a 2D pixel convolution on the given image.

  Arguments:
    image: A 3D `float32` `Tensor` of shape `[height, width, channels]`,
      where `channels` is the third argument to this function and the
      first two dimensions are arbitrary.
    pixel_filter: A 2D `Tensor`, representing pixel weightings for the
      kernel. This will be used to create a 4D kernel---the extra two
      dimensions are for channels (see `tf.nn.conv2d` documentation),
      and the kernel will be constructed so that the channels are
      independent: each channel only observes the data from neighboring
      pixels of the same channel.
    channels: An integer representing the number of channels in the
      image (e.g., 3 for RGB).

  Returns:
    A 3D `float32` `Tensor` of the same shape as the input.
  """
  with tf.name_scope(name, 'convolve'):
    tf.assert_type(image, tf.float32)
    channel_filter = tf.eye(channels)
    filter_ = (tf.expand_dims(tf.expand_dims(pixel_filter, -1), -1) *
               tf.expand_dims(tf.expand_dims(channel_filter, 0), 0))
    result_batch = tf.nn.conv2d(tf.stack([image]),  # batch
                                filter=filter_,
                                strides=[1, 1, 1, 1],
                                padding='SAME')
    return result_batch[0]  # unbatch
コード例 #3
0
ファイル: summary.py プロジェクト: ysm1121/tensorboard
def op(name,
       images,
       max_outputs=3,
       display_name=None,
       description=None,
       collections=None):
    """Create an image summary op for use in a TensorFlow graph.

  Arguments:
    name: A unique name for the generated summary node.
    images: A `Tensor` representing pixel data with shape `[k, h, w, c]`,
      where `k` is the number of images, `h` and `w` are the height and
      width of the images, and `c` is the number of channels, which
      should be 1, 3, or 4. Any of the dimensions may be statically
      unknown (i.e., `None`).
    max_outputs: Optional `int` or rank-0 integer `Tensor`. At most this
      many images will be emitted at each step. When more than
      `max_outputs` many images are provided, the first `max_outputs` many
      images will be used and the rest silently discarded.
    display_name: Optional name for this summary in TensorBoard, as a
      constant `str`. Defaults to `name`.
    description: Optional long-form description for this summary, as a
      constant `str`. Markdown is supported. Defaults to empty.
    collections: Optional list of graph collections keys. The new
      summary op is added to these collections. Defaults to
      `[Graph Keys.SUMMARIES]`.

  Returns:
    A TensorFlow summary op.
  """
    if display_name is None:
        display_name = name
    summary_metadata = metadata.create_summary_metadata(
        display_name=display_name, description=description)
    with tf.name_scope(name), \
         tf.control_dependencies([tf.assert_rank(images, 4),
                                  tf.assert_type(images, tf.uint8),
                                  tf.assert_non_negative(max_outputs)]):
        limited_images = images[:max_outputs]
        encoded_images = tf.map_fn(tf.image.encode_png,
                                   limited_images,
                                   dtype=tf.string,
                                   name='encode_each_image')
        image_shape = tf.shape(images)
        dimensions = tf.stack([
            tf.as_string(image_shape[2], name='width'),
            tf.as_string(image_shape[1], name='height')
        ],
                              name='dimensions')
        tensor = tf.concat([dimensions, encoded_images], axis=0)
        return tf.summary.tensor_summary(name='image_summary',
                                         tensor=tensor,
                                         collections=collections,
                                         summary_metadata=summary_metadata)
コード例 #4
0
 def when_nonsingular():
     bucket_width = range_ / tf.cast(bucket_count, tf.float64)
     offsets = data - min_
     bucket_indices = tf.cast(tf.floor(offsets / bucket_width),
                              dtype=tf.int32)
     clamped_indices = tf.minimum(bucket_indices, bucket_count - 1)
     one_hots = tf.one_hot(clamped_indices, depth=bucket_count)
     bucket_counts = tf.cast(tf.reduce_sum(one_hots, axis=0),
                             dtype=tf.float64)
     edges = tf.lin_space(min_, max_, bucket_count + 1)
     left_edges = edges[:-1]
     right_edges = edges[1:]
     return tf.transpose(
         tf.stack([left_edges, right_edges, bucket_counts]))
コード例 #5
0
ファイル: summary.py プロジェクト: ysm1121/tensorboard
def _create_tensor_summary(name,
                           true_positive_counts,
                           false_positive_counts,
                           true_negative_counts,
                           false_negative_counts,
                           precision,
                           recall,
                           num_thresholds=None,
                           display_name=None,
                           description=None,
                           collections=None):
    """A private helper method for generating a tensor summary.

  We use a helper method instead of having `op` directly call `raw_data_op`
  to prevent the scope of `raw_data_op` from being embedded within `op`.

  Arguments are the same as for raw_data_op.

  Returns:
    A tensor summary that collects data for PR curves.
  """
    # Store the number of thresholds within the summary metadata because
    # that value is constant for all pr curve summaries with the same tag.
    summary_metadata = metadata.create_summary_metadata(
        display_name=display_name if display_name is not None else name,
        description=description or '',
        num_thresholds=num_thresholds)

    # Store values within a tensor. We store them in the order:
    # true positives, false positives, true negatives, false
    # negatives, precision, and recall.
    combined_data = tf.stack([
        tf.cast(true_positive_counts, tf.float32),
        tf.cast(false_positive_counts, tf.float32),
        tf.cast(true_negative_counts, tf.float32),
        tf.cast(false_negative_counts, tf.float32),
        tf.cast(precision, tf.float32),
        tf.cast(recall, tf.float32)
    ])

    return tf.summary.tensor_summary(name='pr_curves',
                                     tensor=combined_data,
                                     collections=collections,
                                     summary_metadata=summary_metadata)
コード例 #6
0
ファイル: images_demo.py プロジェクト: ysm1121/tensorboard
def run_sobel(logdir, verbose=False):
  """Run a Sobel edge detection demonstration.

  See the summary description for more details.

  Arguments:
    logdir: Directory into which to write event logs.
    verbose: Boolean; whether to log any output.
  """
  if verbose:
    tf.logging.info('--- Starting run: sobel')

  tf.reset_default_graph()
  tf.set_random_seed(0)

  image = get_image(verbose=verbose)
  kernel_radius = tf.placeholder(shape=(), dtype=tf.int32)

  with tf.name_scope('horizontal_kernel'):
    kernel_side_length = kernel_radius * 2 + 1
    # Drop off influence for pixels further away from the center.
    weighting_kernel = (
        1.0 - tf.abs(tf.linspace(-1.0, 1.0, num=kernel_side_length)))
    differentiation_kernel = tf.linspace(-1.0, 1.0, num=kernel_side_length)
    horizontal_kernel = tf.matmul(tf.expand_dims(weighting_kernel, 1),
                                  tf.expand_dims(differentiation_kernel, 0))

  with tf.name_scope('vertical_kernel'):
    vertical_kernel = tf.transpose(horizontal_kernel)

  float_image = tf.cast(image, tf.float32)
  dx = convolve(float_image, horizontal_kernel, name='convolve_dx')
  dy = convolve(float_image, vertical_kernel, name='convolve_dy')
  gradient_magnitude = tf.norm([dx, dy], axis=0, name='gradient_magnitude')
  with tf.name_scope('normalized_gradient'):
    normalized_gradient = gradient_magnitude / tf.reduce_max(gradient_magnitude)
  with tf.name_scope('output_image'):
    output_image = tf.cast(255 * normalized_gradient, tf.uint8)

  summ = image_summary.op(
      'sobel', tf.stack([output_image]),
      display_name='Sobel edge detection',
      description=(u'Demonstration of [Sobel edge detection]. The step '
                   'parameter adjusts the radius of the kernel. '
                   'The kernel can be of arbitrary size, and considers '
                   u'nearby pixels with \u2113\u2082-linear falloff.\n\n'
                   # (that says ``$\ell_2$-linear falloff'')
                   'Edge detection is done on a per-channel basis, so '
                   'you can observe which edges are “mostly red '
                   'edges,” for instance.\n\n'
                   'For practical edge detection, a small kernel '
                   '(usually not more than more than *r*=2) is best.\n\n'
                   '[Sobel edge detection]: %s\n\n'
                   "%s"
                   % ('https://en.wikipedia.org/wiki/Sobel_operator',
                      IMAGE_CREDIT)))

  with tf.Session() as sess:
    sess.run(image.initializer)
    writer = tf.summary.FileWriter(os.path.join(logdir, 'sobel'))
    writer.add_graph(sess.graph)
    for step in xrange(8):
      if verbose:
        tf.logging.info("--- sobel: step: %s" % step)
        feed_dict = {kernel_radius: step}
      run_options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)
      run_metadata = tf.RunMetadata()
      s = sess.run(summ, feed_dict=feed_dict,
                   options=run_options, run_metadata=run_metadata)
      writer.add_summary(s, global_step=step)
      writer.add_run_metadata(run_metadata, 'step_%04d' % step)
    writer.close()
コード例 #7
0
ファイル: images_demo.py プロジェクト: ysm1121/tensorboard
def run_box_to_gaussian(logdir, verbose=False):
  """Run a box-blur-to-Gaussian-blur demonstration.

  See the summary description for more details.

  Arguments:
    logdir: Directory into which to write event logs.
    verbose: Boolean; whether to log any output.
  """
  if verbose:
    tf.logging.info('--- Starting run: box_to_gaussian')

  tf.reset_default_graph()
  tf.set_random_seed(0)

  image = get_image(verbose=verbose)
  blur_radius = tf.placeholder(shape=(), dtype=tf.int32)
  with tf.name_scope('filter'):
    blur_side_length = blur_radius * 2 + 1
    pixel_filter = tf.ones((blur_side_length, blur_side_length))
    pixel_filter = (pixel_filter
                    / tf.cast(tf.size(pixel_filter), tf.float32))  # normalize

  iterations = 4
  images = [tf.cast(image, tf.float32) / 255.0]
  for _ in xrange(iterations):
    images.append(convolve(images[-1], pixel_filter))
  with tf.name_scope('convert_to_uint8'):
    images = tf.stack(
        [tf.cast(255 * tf.clip_by_value(image_, 0.0, 1.0), tf.uint8)
         for image_ in images])

  summ = image_summary.op(
      'box_to_gaussian', images, max_outputs=iterations,
      display_name='Gaussian blur as a limit process of box blurs',
      description=('Demonstration of forming a Gaussian blur by '
                   'composing box blurs, each of which can be expressed '
                   'as a 2D convolution.\n\n'
                   'A Gaussian blur is formed by convolving a Gaussian '
                   'kernel over an image. But a Gaussian kernel is '
                   'itself the limit of convolving a constant kernel '
                   'with itself many times. Thus, while applying '
                   'a box-filter convolution just once produces '
                   'results that are noticeably different from those '
                   'of a Gaussian blur, repeating the same convolution '
                   'just a few times causes the result to rapidly '
                   'converge to an actual Gaussian blur.\n\n'
                   'Here, the step value controls the blur radius, '
                   'and the image sample controls the number of times '
                   'that the convolution is applied (plus one). '
                   'So, when *sample*=1, the original image is shown; '
                   '*sample*=2 shows a box blur; and a hypothetical '
                   '*sample*=∞ would show a true Gaussian blur.\n\n'
                   'This is one ingredient in a recipe to compute very '
                   'fast Gaussian blurs. The other pieces require '
                   'special treatment for the box blurs themselves '
                   '(decomposition to dual one-dimensional box blurs, '
                   'each of which is computed with a sliding window); '
                   'we don’t perform those optimizations here.\n\n'
                   '[Here are some slides describing the full process.]'
                   '(%s)\n\n'
                   '%s'
                   % ('http://elynxsdk.free.fr/ext-docs/Blur/Fast_box_blur.pdf',
                      IMAGE_CREDIT)))

  with tf.Session() as sess:
    sess.run(image.initializer)
    writer = tf.summary.FileWriter(os.path.join(logdir, 'box_to_gaussian'))
    writer.add_graph(sess.graph)
    for step in xrange(8):
      if verbose:
        tf.logging.info('--- box_to_gaussian: step: %s' % step)
        feed_dict = {blur_radius: step}
      run_options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)
      run_metadata = tf.RunMetadata()
      s = sess.run(summ, feed_dict=feed_dict,
                   options=run_options, run_metadata=run_metadata)
      writer.add_summary(s, global_step=step)
      writer.add_run_metadata(run_metadata, 'step_%04d' % step)
    writer.close()
コード例 #8
0
ファイル: summary.py プロジェクト: ysm1121/tensorboard
def op(name,
       audio,
       sample_rate,
       labels=None,
       max_outputs=3,
       encoding=None,
       display_name=None,
       description=None,
       collections=None):
    """Create an audio summary op for use in a TensorFlow graph.

  Arguments:
    name: A unique name for the generated summary node.
    audio: A `Tensor` representing audio data with shape `[k, t, c]`,
      where `k` is the number of audio clips, `t` is the number of
      frames, and `c` is the number of channels. Elements should be
      floating-point values in `[-1.0, 1.0]`. Any of the dimensions may
      be statically unknown (i.e., `None`).
    sample_rate: An `int` or rank-0 `int32` `Tensor` that represents the
      sample rate, in Hz. Must be positive.
    labels: Optional `string` `Tensor`, a vector whose length is the
      first dimension of `audio`, where `labels[i]` contains arbitrary
      textual information about `audio[i]`. (For instance, this could be
      some text that a TTS system was supposed to produce.) Markdown is
      supported. Contents should be UTF-8.
    max_outputs: Optional `int` or rank-0 integer `Tensor`. At most this
      many audio clips will be emitted at each step. When more than
      `max_outputs` many clips are provided, the first `max_outputs`
      many clips will be used and the rest silently discarded.
    encoding: A constant `str` (not string tensor) indicating the
      desired encoding. You can choose any format you like, as long as
      it's "wav". Please see the "API compatibility note" below.
    display_name: Optional name for this summary in TensorBoard, as a
      constant `str`. Defaults to `name`.
    description: Optional long-form description for this summary, as a
      constant `str`. Markdown is supported. Defaults to empty.
    collections: Optional list of graph collections keys. The new
      summary op is added to these collections. Defaults to
      `[Graph Keys.SUMMARIES]`.

  Returns:
    A TensorFlow summary op.

  API compatibility note: The default value of the `encoding`
  argument is _not_ guaranteed to remain unchanged across TensorBoard
  versions. In the future, we will by default encode as FLAC instead of
  as WAV. If the specific format is important to you, please provide a
  file format explicitly.
  """

    if display_name is None:
        display_name = name
    if encoding is None:
        encoding = 'wav'

    if encoding == 'wav':
        encoding = metadata.Encoding.Value('WAV')
        encoder = functools.partial(tf.contrib.ffmpeg.encode_audio,
                                    samples_per_second=sample_rate,
                                    file_format='wav')
    else:
        raise ValueError('Unknown encoding: %r' % encoding)

    with tf.name_scope(name), \
         tf.control_dependencies([tf.assert_rank(audio, 3)]):
        limited_audio = audio[:max_outputs]
        encoded_audio = tf.map_fn(encoder,
                                  limited_audio,
                                  dtype=tf.string,
                                  name='encode_each_audio')
        if labels is None:
            limited_labels = tf.tile([''], tf.shape(limited_audio)[:1])
        else:
            limited_labels = labels[:max_outputs]
        tensor = tf.transpose(tf.stack([encoded_audio, limited_labels]))
        summary_metadata = metadata.create_summary_metadata(
            display_name=display_name,
            description=description,
            encoding=encoding)
        return tf.summary.tensor_summary(name='audio_summary',
                                         tensor=tensor,
                                         collections=collections,
                                         summary_metadata=summary_metadata)