Exemplo n.º 1
0
 def when_singular():
   center = min_
   bucket_starts = tf.stack([center - 0.5])
   bucket_ends = tf.stack([center + 0.5])
   bucket_counts = tf.stack([tf.cast(tf.size(input=data), tf.float64)])
   return tf.transpose(
       a=tf.stack([bucket_starts, bucket_ends, bucket_counts]))
Exemplo n.º 2
0
 def when_multiple_values():
     """When input data contains multiple values."""
     bucket_width = range_ / tf.cast(bucket_count, tf.float64)
     offsets = data - min_
     bucket_indices = tf.cast(tf.floor(offsets / bucket_width),
                              dtype=tf.int32)
     clamped_indices = tf.minimum(bucket_indices, bucket_count - 1)
     # Use float64 instead of float32 to avoid accumulating floating point error
     # later in tf.reduce_sum when summing more than 2^24 individual `1.0` values.
     # See https://github.com/tensorflow/tensorflow/issues/51419 for details.
     one_hots = tf.one_hot(clamped_indices,
                           depth=bucket_count,
                           dtype=tf.float64)
     bucket_counts = tf.cast(
         tf.reduce_sum(input_tensor=one_hots, axis=0),
         dtype=tf.float64,
     )
     edges = tf.linspace(min_, max_, bucket_count + 1)
     # Ensure edges[-1] == max_, which TF's linspace implementation does not
     # do, leaving it subject to the whim of floating point rounding error.
     edges = tf.concat([edges[:-1], [max_]], 0)
     left_edges = edges[:-1]
     right_edges = edges[1:]
     return tf.transpose(
         a=tf.stack([left_edges, right_edges, bucket_counts]))
Exemplo n.º 3
0
def image(name, data, step=None, max_outputs=3, description=None):
    """Write an image summary.

  Arguments:
    name: A name for this summary. The summary tag used for TensorBoard will
      be this name prefixed by any active name scopes.
    data: A `Tensor` representing pixel data with shape `[k, h, w, c]`,
      where `k` is the number of images, `h` and `w` are the height and
      width of the images, and `c` is the number of channels, which
      should be 1, 2, 3, or 4 (grayscale, grayscale with alpha, RGB, RGBA).
      Any of the dimensions may be statically unknown (i.e., `None`).
      Floating point data will be clipped to the range [0,1).
    step: Explicit `int64`-castable monotonic step value for this summary. If
      omitted, this defaults to `tf.summary.experimental.get_step()`, which must
      not be None.
    max_outputs: Optional `int` or rank-0 integer `Tensor`. At most this
      many images will be emitted at each step. When more than
      `max_outputs` many images are provided, the first `max_outputs` many
      images will be used and the rest silently discarded.
    description: Optional long-form description for this summary, as a
      constant `str`. Markdown is supported. Defaults to empty.

  Returns:
    True on success, or false if no summary was emitted because no default
    summary writer was available.

  Raises:
    ValueError: if a default writer exists, but no step was provided and
      `tf.summary.experimental.get_step()` is None.
  """
    summary_metadata = metadata.create_summary_metadata(
        display_name=None, description=description)
    # TODO(https://github.com/tensorflow/tensorboard/issues/2109): remove fallback
    summary_scope = (getattr(tf.summary.experimental, 'summary_scope', None)
                     or tf.summary.summary_scope)
    with summary_scope(name, 'image_summary', values=[data, max_outputs,
                                                      step]) as (tag, _):
        tf.debugging.assert_rank(data, 4)
        tf.debugging.assert_non_negative(max_outputs)
        images = tf.image.convert_image_dtype(data, tf.uint8, saturate=True)
        limited_images = images[:max_outputs]
        encoded_images = tf.map_fn(tf.image.encode_png,
                                   limited_images,
                                   dtype=tf.string,
                                   name='encode_each_image')
        # Workaround for map_fn returning float dtype for an empty elems input.
        encoded_images = tf.cond(
            tf.shape(input=encoded_images)[0] > 0, lambda: encoded_images,
            lambda: tf.constant([], tf.string))
        image_shape = tf.shape(input=images)
        dimensions = tf.stack([
            tf.as_string(image_shape[2], name='width'),
            tf.as_string(image_shape[1], name='height')
        ],
                              name='dimensions')
        tensor = tf.concat([dimensions, encoded_images], axis=0)
        return tf.summary.write(tag=tag,
                                tensor=tensor,
                                step=step,
                                metadata=summary_metadata)
Exemplo n.º 4
0
 def lazy_tensor():
     tf.debugging.assert_rank(data, 4)
     tf.debugging.assert_non_negative(max_outputs)
     images = tf.image.convert_image_dtype(data,
                                           tf.uint8,
                                           saturate=True)
     limited_images = images[:max_outputs]
     encoded_images = tf.map_fn(
         tf.image.encode_png,
         limited_images,
         dtype=tf.string,
         name="encode_each_image",
     )
     # Workaround for map_fn returning float dtype for an empty elems input.
     encoded_images = tf.cond(
         tf.shape(input=encoded_images)[0] > 0,
         lambda: encoded_images,
         lambda: tf.constant([], tf.string),
     )
     image_shape = tf.shape(input=images)
     dimensions = tf.stack(
         [
             tf.as_string(image_shape[2], name="width"),
             tf.as_string(image_shape[1], name="height"),
         ],
         name="dimensions",
     )
     return tf.concat([dimensions, encoded_images], axis=0)
Exemplo n.º 5
0
def histogram_continuous(name,
                         data,
                         bucket_min=None,
                         bucket_max=None,
                         bucket_count=DEFAULT_BUCKET_COUNT,
                         step=None,
                         description=None):
    """histogram for continuous data .

    Args:
        name (str): name for this summary
        data (Tensor): A `Tensor` of any shape.
        bucket_min (float|None): represent bucket min value,
            if None value of tf.reduce_min(data) will be used
        bucket_max (float|None): represent bucket max value,
            if None value tf.reduce_max(data) will be used
        bucket_count (int):  positive `int`. The output will have this many buckets.
        step (None|tf.Variable):  step value for this summary. this defaults to
            `tf.summary.experimental.get_step()`
        description (str): Optional long-form description for this summary
    """
    summary_metadata = metadata.create_summary_metadata(
        display_name=None, description=description)
    summary_scope = (getattr(tf.summary.experimental, 'summary_scope', None)
                     or tf.summary.summary_scope)
    with summary_scope(
            name,
            'histogram_summary',
            values=[data, bucket_min, bucket_max, bucket_count,
                    step]) as (tag, _):
        with tf.name_scope('buckets'):
            data = tf.cast(tf.reshape(data, shape=[-1]), tf.float64)
            if bucket_min is None:
                bucket_min = tf.reduce_min(data)
            if bucket_max is None:
                bucket_max = tf.reduce_min(data)
            range_ = bucket_max - bucket_min
            bucket_width = range_ / tf.cast(bucket_count, tf.float64)
            offsets = data - bucket_min
            bucket_indices = tf.cast(tf.floor(offsets / bucket_width),
                                     dtype=tf.int32)
            clamped_indices = tf.clip_by_value(bucket_indices, 0,
                                               bucket_count - 1)
            one_hots = tf.one_hot(clamped_indices, depth=bucket_count)
            bucket_counts = tf.cast(tf.reduce_sum(input_tensor=one_hots,
                                                  axis=0),
                                    dtype=tf.float64)
            edges = tf.linspace(bucket_min, bucket_max, bucket_count + 1)
            edges = tf.concat([edges[:-1], [bucket_max]], 0)
            edges = tf.cast(edges, tf.float64)
            left_edges = edges[:-1]
            right_edges = edges[1:]
            tensor = tf.transpose(
                a=tf.stack([left_edges, right_edges, bucket_counts]))
        return tf.summary.write(tag=tag,
                                tensor=tensor,
                                step=step,
                                metadata=summary_metadata)
Exemplo n.º 6
0
 def when_single_value():
     """When input data contains a single unique value."""
     # Left and right edges are the same for single value input.
     edges = tf.fill([bucket_count], max_)
     # Bucket counts are 0 except the last bucket (if bucket_count > 0),
     # which is `data_size`. Ensure that the resulting counts vector has
     # length `bucket_count` always, including the bucket_count==0 case.
     zeroes = tf.fill([bucket_count], 0)
     bucket_counts = tf.cast(
         tf.concat([zeroes[:-1], [data_size]], 0)[:bucket_count],
         dtype=tf.float64,
     )
     return tf.transpose(a=tf.stack([edges, edges, bucket_counts]))
Exemplo n.º 7
0
 def lazy_tensor():
     tf.debugging.assert_rank(data, 3)
     tf.debugging.assert_non_negative(max_outputs)
     limited_audio = data[:max_outputs]
     encode_fn = functools.partial(audio_ops.encode_wav,
                                   sample_rate=sample_rate)
     encoded_audio = tf.map_fn(encode_fn,
                               limited_audio,
                               dtype=tf.string,
                               name='encode_each_audio')
     # Workaround for map_fn returning float dtype for an empty elems input.
     encoded_audio = tf.cond(
         tf.shape(input=encoded_audio)[0] > 0, lambda: encoded_audio,
         lambda: tf.constant([], tf.string))
     limited_labels = tf.tile([''], tf.shape(input=limited_audio)[:1])
     return tf.transpose(a=tf.stack([encoded_audio, limited_labels]))
Exemplo n.º 8
0
 def when_nonsingular():
   bucket_width = range_ / tf.cast(bucket_count, tf.float64)
   offsets = data - min_
   bucket_indices = tf.cast(tf.floor(offsets / bucket_width),
                            dtype=tf.int32)
   clamped_indices = tf.minimum(bucket_indices, bucket_count - 1)
   one_hots = tf.one_hot(clamped_indices, depth=bucket_count)
   bucket_counts = tf.cast(tf.reduce_sum(input_tensor=one_hots, axis=0),
                           dtype=tf.float64)
   edges = tf.linspace(min_, max_, bucket_count + 1)
   # Ensure edges[-1] == max_, which TF's linspace implementation does not
   # do, leaving it subject to the whim of floating point rounding error.
   edges = tf.concat([edges[:-1], [max_]], 0)
   left_edges = edges[:-1]
   right_edges = edges[1:]
   return tf.transpose(a=tf.stack(
       [left_edges, right_edges, bucket_counts]))
Exemplo n.º 9
0
def histogram_discrete(name,
                       data,
                       bucket_min,
                       bucket_max,
                       step=None,
                       description=None):
    """histogram for discrete data.

    Args:
        name (str): name for this summary
        data (Tensor): A `Tensor` integers of any shape.
        bucket_min (int): represent bucket min value
        bucket_max (int): represent bucket max value
            bucket count is calculate as `bucket_max - bucket_min + 1`
            and output will have this many buckets.
        step (None|tf.Variable):  step value for this summary. this defaults to
            `tf.summary.experimental.get_step()`
        description (str): Optional long-form description for this summary
    """
    summary_metadata = metadata.create_summary_metadata(
        display_name=None, description=description)
    summary_scope = (getattr(tf.summary.experimental, 'summary_scope', None)
                     or tf.summary.summary_scope)
    with summary_scope(name,
                       'histogram_summary',
                       values=[data, bucket_min, bucket_max,
                               step]) as (tag, _):
        with tf.name_scope('buckets'):
            bucket_count = bucket_max - bucket_min + 1
            data = data - bucket_min
            one_hots = tf.one_hot(tf.reshape(data, shape=[-1]),
                                  depth=bucket_count)
            bucket_counts = tf.cast(
                tf.reduce_sum(input_tensor=one_hots, axis=0), tf.float64)
            edge = tf.cast(tf.range(bucket_count), tf.float64)
            # histogram can not draw when left_edge == right_edge
            left_edge = edge - 1e-12
            right_edge = edge + 1e-12
            tensor = tf.transpose(
                a=tf.stack([left_edge, right_edge, bucket_counts]))

        return tf.summary.write(tag=tag,
                                tensor=tensor,
                                step=step,
                                metadata=summary_metadata)
Exemplo n.º 10
0
        def lazy_tensor():
            tf.debugging.assert_rank(data, 3)
            tf.debugging.assert_non_negative(max_outputs)
            limited_audio = data[:max_outputs]

            encode_fn = functools.partial(
                audio_ops.encode_wav, sample_rate=sample_rate
            )
            if lengths is not None:
                tf.debugging.assert_rank(lengths, 1)
                limited_lengths = lengths[:max_outputs]

                def encode_with_length(datum_and_length):
                    datum, length = datum_and_length
                    return encode_fn(datum[:length])

                encoded_audio = tf.map_fn(
                    encode_with_length,
                    (limited_audio, limited_lengths),
                    dtype=tf.string,
                    name="encode_each_audio",
                )
            else:
                encoded_audio = tf.map_fn(
                    encode_fn,
                    limited_audio,
                    dtype=tf.string,
                    name="encode_each_audio",
                )
            # Workaround for map_fn returning float dtype for an empty elems input.
            encoded_audio = tf.cond(
                tf.shape(input=encoded_audio)[0] > 0,
                lambda: encoded_audio,
                lambda: tf.constant([], tf.string),
            )
            limited_labels = tf.tile([""], tf.shape(input=limited_audio)[:1])
            return tf.transpose(a=tf.stack([encoded_audio, limited_labels]))
Exemplo n.º 11
0
def audio(name,
          data,
          sample_rate,
          step,
          max_outputs=3,
          encoding=None,
          description=None):
    """Write an audio summary.

  Arguments:
    name: A name for this summary. The summary tag used for TensorBoard will
      be this name prefixed by any active name scopes.
    data: A `Tensor` representing audio data with shape `[k, t, c]`,
      where `k` is the number of audio clips, `t` is the number of
      frames, and `c` is the number of channels. Elements should be
      floating-point values in `[-1.0, 1.0]`. Any of the dimensions may
      be statically unknown (i.e., `None`).
    sample_rate: An `int` or rank-0 `int32` `Tensor` that represents the
      sample rate, in Hz. Must be positive.
    step: Required `int64`-castable monotonic step value.
    max_outputs: Optional `int` or rank-0 integer `Tensor`. At most this
      many audio clips will be emitted at each step. When more than
      `max_outputs` many clips are provided, the first `max_outputs`
      many clips will be used and the rest silently discarded.
    encoding: Optional constant `str` for the desired encoding. Only "wav"
      is currently supported, but this is not guaranteed to remain the
      default, so if you want "wav" in particular, set this explicitly.
    description: Optional long-form description for this summary, as a
      constant `str`. Markdown is supported. Defaults to empty.

  Returns:
    True on success, or false if no summary was emitted because no default
    summary writer was available.
  """
    # TODO(nickfelt): get encode_wav() exported in the public API.
    from tensorflow.python.ops import gen_audio_ops

    if encoding is None:
        encoding = 'wav'
    if encoding != 'wav':
        raise ValueError('Unknown encoding: %r' % encoding)
    summary_metadata = metadata.create_summary_metadata(
        display_name=None,
        description=description,
        encoding=metadata.Encoding.Value('WAV'))
    inputs = [data, sample_rate, max_outputs, step]
    with tf.summary.summary_scope(name, 'audio_summary',
                                  values=inputs) as (tag, _):
        tf.debugging.assert_rank(data, 3)
        tf.debugging.assert_non_negative(max_outputs)
        limited_audio = data[:max_outputs]
        encode_fn = functools.partial(gen_audio_ops.encode_wav,
                                      sample_rate=sample_rate)
        encoded_audio = tf.map_fn(encode_fn,
                                  limited_audio,
                                  dtype=tf.string,
                                  name='encode_each_audio')
        # Workaround for map_fn returning float dtype for an empty elems input.
        encoded_audio = tf.cond(
            tf.shape(input=encoded_audio)[0] > 0, lambda: encoded_audio,
            lambda: tf.constant([], tf.string))
        limited_labels = tf.tile([''], tf.shape(input=limited_audio)[:1])
        tensor = tf.transpose(a=tf.stack([encoded_audio, limited_labels]))
        return tf.summary.write(tag=tag,
                                tensor=tensor,
                                step=step,
                                metadata=summary_metadata)