Ejemplo n.º 1
0
 def when_multiple_values():
     """When input data contains multiple values."""
     bucket_width = range_ / tf.cast(bucket_count, tf.float64)
     offsets = data - min_
     bucket_indices = tf.cast(tf.floor(offsets / bucket_width),
                              dtype=tf.int32)
     clamped_indices = tf.minimum(bucket_indices, bucket_count - 1)
     # Use float64 instead of float32 to avoid accumulating floating point error
     # later in tf.reduce_sum when summing more than 2^24 individual `1.0` values.
     # See https://github.com/tensorflow/tensorflow/issues/51419 for details.
     one_hots = tf.one_hot(clamped_indices,
                           depth=bucket_count,
                           dtype=tf.float64)
     bucket_counts = tf.cast(
         tf.reduce_sum(input_tensor=one_hots, axis=0),
         dtype=tf.float64,
     )
     edges = tf.linspace(min_, max_, bucket_count + 1)
     # Ensure edges[-1] == max_, which TF's linspace implementation does not
     # do, leaving it subject to the whim of floating point rounding error.
     edges = tf.concat([edges[:-1], [max_]], 0)
     left_edges = edges[:-1]
     right_edges = edges[1:]
     return tf.transpose(
         a=tf.stack([left_edges, right_edges, bucket_counts]))
Ejemplo n.º 2
0
def image(name, data, step=None, max_outputs=3, description=None):
    """Write an image summary.

  Arguments:
    name: A name for this summary. The summary tag used for TensorBoard will
      be this name prefixed by any active name scopes.
    data: A `Tensor` representing pixel data with shape `[k, h, w, c]`,
      where `k` is the number of images, `h` and `w` are the height and
      width of the images, and `c` is the number of channels, which
      should be 1, 2, 3, or 4 (grayscale, grayscale with alpha, RGB, RGBA).
      Any of the dimensions may be statically unknown (i.e., `None`).
      Floating point data will be clipped to the range [0,1).
    step: Explicit `int64`-castable monotonic step value for this summary. If
      omitted, this defaults to `tf.summary.experimental.get_step()`, which must
      not be None.
    max_outputs: Optional `int` or rank-0 integer `Tensor`. At most this
      many images will be emitted at each step. When more than
      `max_outputs` many images are provided, the first `max_outputs` many
      images will be used and the rest silently discarded.
    description: Optional long-form description for this summary, as a
      constant `str`. Markdown is supported. Defaults to empty.

  Returns:
    True on success, or false if no summary was emitted because no default
    summary writer was available.

  Raises:
    ValueError: if a default writer exists, but no step was provided and
      `tf.summary.experimental.get_step()` is None.
  """
    summary_metadata = metadata.create_summary_metadata(
        display_name=None, description=description)
    # TODO(https://github.com/tensorflow/tensorboard/issues/2109): remove fallback
    summary_scope = (getattr(tf.summary.experimental, 'summary_scope', None)
                     or tf.summary.summary_scope)
    with summary_scope(name, 'image_summary', values=[data, max_outputs,
                                                      step]) as (tag, _):
        tf.debugging.assert_rank(data, 4)
        tf.debugging.assert_non_negative(max_outputs)
        images = tf.image.convert_image_dtype(data, tf.uint8, saturate=True)
        limited_images = images[:max_outputs]
        encoded_images = tf.map_fn(tf.image.encode_png,
                                   limited_images,
                                   dtype=tf.string,
                                   name='encode_each_image')
        # Workaround for map_fn returning float dtype for an empty elems input.
        encoded_images = tf.cond(
            tf.shape(input=encoded_images)[0] > 0, lambda: encoded_images,
            lambda: tf.constant([], tf.string))
        image_shape = tf.shape(input=images)
        dimensions = tf.stack([
            tf.as_string(image_shape[2], name='width'),
            tf.as_string(image_shape[1], name='height')
        ],
                              name='dimensions')
        tensor = tf.concat([dimensions, encoded_images], axis=0)
        return tf.summary.write(tag=tag,
                                tensor=tensor,
                                step=step,
                                metadata=summary_metadata)
Ejemplo n.º 3
0
 def lazy_tensor():
     tf.debugging.assert_rank(data, 4)
     tf.debugging.assert_non_negative(max_outputs)
     images = tf.image.convert_image_dtype(data,
                                           tf.uint8,
                                           saturate=True)
     limited_images = images[:max_outputs]
     encoded_images = tf.map_fn(
         tf.image.encode_png,
         limited_images,
         dtype=tf.string,
         name="encode_each_image",
     )
     # Workaround for map_fn returning float dtype for an empty elems input.
     encoded_images = tf.cond(
         tf.shape(input=encoded_images)[0] > 0,
         lambda: encoded_images,
         lambda: tf.constant([], tf.string),
     )
     image_shape = tf.shape(input=images)
     dimensions = tf.stack(
         [
             tf.as_string(image_shape[2], name="width"),
             tf.as_string(image_shape[1], name="height"),
         ],
         name="dimensions",
     )
     return tf.concat([dimensions, encoded_images], axis=0)
Ejemplo n.º 4
0
def histogram_continuous(name,
                         data,
                         bucket_min=None,
                         bucket_max=None,
                         bucket_count=DEFAULT_BUCKET_COUNT,
                         step=None,
                         description=None):
    """histogram for continuous data .

    Args:
        name (str): name for this summary
        data (Tensor): A `Tensor` of any shape.
        bucket_min (float|None): represent bucket min value,
            if None value of tf.reduce_min(data) will be used
        bucket_max (float|None): represent bucket max value,
            if None value tf.reduce_max(data) will be used
        bucket_count (int):  positive `int`. The output will have this many buckets.
        step (None|tf.Variable):  step value for this summary. this defaults to
            `tf.summary.experimental.get_step()`
        description (str): Optional long-form description for this summary
    """
    summary_metadata = metadata.create_summary_metadata(
        display_name=None, description=description)
    summary_scope = (getattr(tf.summary.experimental, 'summary_scope', None)
                     or tf.summary.summary_scope)
    with summary_scope(
            name,
            'histogram_summary',
            values=[data, bucket_min, bucket_max, bucket_count,
                    step]) as (tag, _):
        with tf.name_scope('buckets'):
            data = tf.cast(tf.reshape(data, shape=[-1]), tf.float64)
            if bucket_min is None:
                bucket_min = tf.reduce_min(data)
            if bucket_max is None:
                bucket_max = tf.reduce_min(data)
            range_ = bucket_max - bucket_min
            bucket_width = range_ / tf.cast(bucket_count, tf.float64)
            offsets = data - bucket_min
            bucket_indices = tf.cast(tf.floor(offsets / bucket_width),
                                     dtype=tf.int32)
            clamped_indices = tf.clip_by_value(bucket_indices, 0,
                                               bucket_count - 1)
            one_hots = tf.one_hot(clamped_indices, depth=bucket_count)
            bucket_counts = tf.cast(tf.reduce_sum(input_tensor=one_hots,
                                                  axis=0),
                                    dtype=tf.float64)
            edges = tf.linspace(bucket_min, bucket_max, bucket_count + 1)
            edges = tf.concat([edges[:-1], [bucket_max]], 0)
            edges = tf.cast(edges, tf.float64)
            left_edges = edges[:-1]
            right_edges = edges[1:]
            tensor = tf.transpose(
                a=tf.stack([left_edges, right_edges, bucket_counts]))
        return tf.summary.write(tag=tag,
                                tensor=tensor,
                                step=step,
                                metadata=summary_metadata)
Ejemplo n.º 5
0
 def when_single_value():
     """When input data contains a single unique value."""
     # Left and right edges are the same for single value input.
     edges = tf.fill([bucket_count], max_)
     # Bucket counts are 0 except the last bucket (if bucket_count > 0),
     # which is `data_size`. Ensure that the resulting counts vector has
     # length `bucket_count` always, including the bucket_count==0 case.
     zeroes = tf.fill([bucket_count], 0)
     bucket_counts = tf.cast(
         tf.concat([zeroes[:-1], [data_size]], 0)[:bucket_count],
         dtype=tf.float64,
     )
     return tf.transpose(a=tf.stack([edges, edges, bucket_counts]))
Ejemplo n.º 6
0
 def when_nonsingular():
   bucket_width = range_ / tf.cast(bucket_count, tf.float64)
   offsets = data - min_
   bucket_indices = tf.cast(tf.floor(offsets / bucket_width),
                            dtype=tf.int32)
   clamped_indices = tf.minimum(bucket_indices, bucket_count - 1)
   one_hots = tf.one_hot(clamped_indices, depth=bucket_count)
   bucket_counts = tf.cast(tf.reduce_sum(input_tensor=one_hots, axis=0),
                           dtype=tf.float64)
   edges = tf.linspace(min_, max_, bucket_count + 1)
   # Ensure edges[-1] == max_, which TF's linspace implementation does not
   # do, leaving it subject to the whim of floating point rounding error.
   edges = tf.concat([edges[:-1], [max_]], 0)
   left_edges = edges[:-1]
   right_edges = edges[1:]
   return tf.transpose(a=tf.stack(
       [left_edges, right_edges, bucket_counts]))