Example #1
0
def image(name, data, step=None, max_outputs=3, description=None):
    """Write an image summary.

  Arguments:
    name: A name for this summary. The summary tag used for TensorBoard will
      be this name prefixed by any active name scopes.
    data: A `Tensor` representing pixel data with shape `[k, h, w, c]`,
      where `k` is the number of images, `h` and `w` are the height and
      width of the images, and `c` is the number of channels, which
      should be 1, 2, 3, or 4 (grayscale, grayscale with alpha, RGB, RGBA).
      Any of the dimensions may be statically unknown (i.e., `None`).
      Floating point data will be clipped to the range [0,1).
    step: Explicit `int64`-castable monotonic step value for this summary. If
      omitted, this defaults to `tf.summary.experimental.get_step()`, which must
      not be None.
    max_outputs: Optional `int` or rank-0 integer `Tensor`. At most this
      many images will be emitted at each step. When more than
      `max_outputs` many images are provided, the first `max_outputs` many
      images will be used and the rest silently discarded.
    description: Optional long-form description for this summary, as a
      constant `str`. Markdown is supported. Defaults to empty.

  Returns:
    True on success, or false if no summary was emitted because no default
    summary writer was available.

  Raises:
    ValueError: if a default writer exists, but no step was provided and
      `tf.summary.experimental.get_step()` is None.
  """
    summary_metadata = metadata.create_summary_metadata(
        display_name=None, description=description)
    with tf.summary.summary_scope(name,
                                  'image_summary',
                                  values=[data, max_outputs,
                                          step]) as (tag, _):
        tf.debugging.assert_rank(data, 4)
        tf.debugging.assert_non_negative(max_outputs)
        images = tf.image.convert_image_dtype(data, tf.uint8, saturate=True)
        limited_images = images[:max_outputs]
        encoded_images = tf.map_fn(tf.image.encode_png,
                                   limited_images,
                                   dtype=tf.string,
                                   name='encode_each_image')
        # Workaround for map_fn returning float dtype for an empty elems input.
        encoded_images = tf.cond(
            tf.shape(input=encoded_images)[0] > 0, lambda: encoded_images,
            lambda: tf.constant([], tf.string))
        image_shape = tf.shape(input=images)
        dimensions = tf.stack([
            tf.as_string(image_shape[2], name='width'),
            tf.as_string(image_shape[1], name='height')
        ],
                              name='dimensions')
        tensor = tf.concat([dimensions, encoded_images], axis=0)
        return tf.summary.write(tag=tag,
                                tensor=tensor,
                                step=step,
                                metadata=summary_metadata)
Example #2
0
def op(name,
       images,
       max_outputs=3,
       display_name=None,
       description=None,
       collections=None):
    """Create a legacy image summary op for use in a TensorFlow graph.

  Arguments:
    name: A unique name for the generated summary node.
    images: A `Tensor` representing pixel data with shape `[k, h, w, c]`,
      where `k` is the number of images, `h` and `w` are the height and
      width of the images, and `c` is the number of channels, which
      should be 1, 3, or 4. Any of the dimensions may be statically
      unknown (i.e., `None`).
    max_outputs: Optional `int` or rank-0 integer `Tensor`. At most this
      many images will be emitted at each step. When more than
      `max_outputs` many images are provided, the first `max_outputs` many
      images will be used and the rest silently discarded.
    display_name: Optional name for this summary in TensorBoard, as a
      constant `str`. Defaults to `name`.
    description: Optional long-form description for this summary, as a
      constant `str`. Markdown is supported. Defaults to empty.
    collections: Optional list of graph collections keys. The new
      summary op is added to these collections. Defaults to
      `[Graph Keys.SUMMARIES]`.

  Returns:
    A TensorFlow summary op.
  """
    # TODO(nickfelt): remove on-demand imports once dep situation is fixed.
    import tensorflow.compat.v1 as tf

    if display_name is None:
        display_name = name
    summary_metadata = metadata.create_summary_metadata(
        display_name=display_name, description=description)
    with tf.name_scope(name), \
         tf.control_dependencies([tf.assert_rank(images, 4),
                                  tf.assert_type(images, tf.uint8),
                                  tf.assert_non_negative(max_outputs)]):
        limited_images = images[:max_outputs]
        encoded_images = tf.map_fn(tf.image.encode_png,
                                   limited_images,
                                   dtype=tf.string,
                                   name='encode_each_image')
        image_shape = tf.shape(images)
        dimensions = tf.stack([
            tf.as_string(image_shape[2], name='width'),
            tf.as_string(image_shape[1], name='height')
        ],
                              name='dimensions')
        tensor = tf.concat([dimensions, encoded_images], axis=0)
        return tf.summary.tensor_summary(name='image_summary',
                                         tensor=tensor,
                                         collections=collections,
                                         summary_metadata=summary_metadata)
Example #3
0
def _migrate_image_value(value):
  image_value = value.image
  data = [tf.compat.as_bytes(str(image_value.width)),
          tf.compat.as_bytes(str(image_value.height)),
          tf.compat.as_bytes(image_value.encoded_image_string)]

  summary_metadata = image_metadata.create_summary_metadata(
      display_name=value.metadata.display_name or value.tag,
      description=value.metadata.summary_description)
  return make_summary(value.tag, summary_metadata, data)
Example #4
0
def pb(name, images, max_outputs=3, display_name=None, description=None):
    """Create a legacy image summary protobuf.

    This behaves as if you were to create an `op` with the same arguments
    (wrapped with constant tensors where appropriate) and then execute
    that summary op in a TensorFlow session.

    Arguments:
      name: A unique name for the generated summary, including any desired
        name scopes.
      images: An `np.array` representing pixel data with shape
        `[k, h, w, c]`, where `k` is the number of images, `w` and `h` are
        the width and height of the images, and `c` is the number of
        channels, which should be 1, 3, or 4.
      max_outputs: Optional `int`. At most this many images will be
        emitted. If more than this many images are provided, the first
        `max_outputs` many images will be used and the rest silently
        discarded.
      display_name: Optional name for this summary in TensorBoard, as a
        `str`. Defaults to `name`.
      description: Optional long-form description for this summary, as a
        `str`. Markdown is supported. Defaults to empty.

    Returns:
      A `tf.Summary` protobuf object.
    """
    # TODO(nickfelt): remove on-demand imports once dep situation is fixed.
    import tensorflow.compat.v1 as tf

    images = np.array(images).astype(np.uint8)
    if images.ndim != 4:
        raise ValueError("Shape %r must have rank 4" % (images.shape, ))

    limited_images = images[:max_outputs]
    encoded_images = [encoder.encode_png(image) for image in limited_images]
    (width, height) = (images.shape[2], images.shape[1])
    content = [str(width), str(height)] + encoded_images
    tensor = tf.make_tensor_proto(content, dtype=tf.string)

    if display_name is None:
        display_name = name
    summary_metadata = metadata.create_summary_metadata(
        display_name=display_name, description=description)
    tf_summary_metadata = tf.SummaryMetadata.FromString(
        summary_metadata.SerializeToString())

    summary = tf.Summary()
    summary.value.add(
        tag="%s/image_summary" % name,
        metadata=tf_summary_metadata,
        tensor=tensor,
    )
    return summary
Example #5
0
def _migrate_image_value(value):
  image_value = value.image
  data = [tf.compat.as_bytes(str(image_value.width)),
          tf.compat.as_bytes(str(image_value.height)),
          tf.compat.as_bytes(image_value.encoded_image_string)]

  tensor_proto = tf.make_tensor_proto(data)
  summary_metadata = image_metadata.create_summary_metadata(
      display_name=value.metadata.display_name or value.tag,
      description=value.metadata.summary_description)
  return tf.Summary.Value(tag=value.tag,
                          metadata=summary_metadata,
                          tensor=tensor_proto)
Example #6
0
def _migrate_image_value(value):
    image_value = value.image
    data = [
        str(image_value.width).encode("ascii"),
        str(image_value.height).encode("ascii"),
        image_value.encoded_image_string,
    ]

    summary_metadata = image_metadata.create_summary_metadata(
        display_name=value.metadata.display_name or value.tag,
        description=value.metadata.summary_description,
    )
    return make_summary(value.tag, summary_metadata, data)
Example #7
0
def op(name,
       images,
       max_outputs=3,
       display_name=None,
       description=None,
       collections=None):
  """Create an image summary op for use in a TensorFlow graph.

  Arguments:
    name: A unique name for the generated summary node.
    images: A `Tensor` representing pixel data with shape `[k, w, h, c]`,
      where `k` is the number of images, `w` and `h` are the width and
      height of the images, and `c` is the number of channels, which
      should be 1, 3, or 4. Any of the dimensions may be statically
      unknown (i.e., `None`).
    max_outputs: Optional `int` or rank-0 integer `Tensor`. At most this
      many images will be emitted at each step. When more than
      `max_outputs` many images are provided, the first `max_outputs` many
      images will be used and the rest silently discarded.
    display_name: Optional name for this summary in TensorBoard, as a
      constant `str`. Defaults to `name`.
    description: Optional long-form description for this summary, as a
      constant `str`. Markdown is supported. Defaults to empty.
    collections: Optional list of graph collections keys. The new
      summary op is added to these collections. Defaults to
      `[Graph Keys.SUMMARIES]`.

  Returns:
    A TensorFlow summary op.
  """
  if display_name is None:
    display_name = name
  summary_metadata = metadata.create_summary_metadata(
      display_name=display_name, description=description)
  with tf.name_scope(name), \
       tf.control_dependencies([tf.assert_rank(images, 4),
                                tf.assert_type(images, tf.uint8),
                                tf.assert_non_negative(max_outputs)]):
    limited_images = images[:max_outputs]
    encoded_images = tf.map_fn(tf.image.encode_png, limited_images,
                               dtype=tf.string,
                               name='encode_each_image')
    image_shape = tf.shape(images)
    dimensions = tf.stack([tf.as_string(image_shape[1], name='width'),
                           tf.as_string(image_shape[2], name='height')],
                          name='dimensions')
    tensor = tf.concat([dimensions, encoded_images], axis=0)
    return tf.summary.tensor_summary(name='image_summary',
                                     tensor=tensor,
                                     collections=collections,
                                     summary_metadata=summary_metadata)
Example #8
0
def _migrate_image_value(value):
    image_value = value.image
    data = [
        tf.compat.as_bytes(str(image_value.width)),
        tf.compat.as_bytes(str(image_value.height)),
        tf.compat.as_bytes(image_value.encoded_image_string)
    ]

    tensor_proto = tf.make_tensor_proto(data)
    summary_metadata = image_metadata.create_summary_metadata(
        display_name=value.metadata.display_name or value.tag,
        description=value.metadata.summary_description)
    return tf.Summary.Value(tag=value.tag,
                            metadata=summary_metadata,
                            tensor=tensor_proto)
Example #9
0
def pb(name, images, max_outputs=3, display_name=None, description=None):
  """Create an image summary protobuf.

  This behaves as if you were to create an `op` with the same arguments
  (wrapped with constant tensors where appropriate) and then execute
  that summary op in a TensorFlow session.

  Arguments:
    name: A unique name for the generated summary, including any desired
      name scopes.
    images: An `np.array` representing pixel data with shape
      `[k, w, h, c]`, where `k` is the number of images, `w` and `h` are
      the width and height of the images, and `c` is the number of
      channels, which should be 1, 3, or 4.
    max_outputs: Optional `int`. At most this many images will be
      emitted. If more than this many images are provided, the first
      `max_outputs` many images will be used and the rest silently
      discarded.
    display_name: Optional name for this summary in TensorBoard, as a
      `str`. Defaults to `name`.
    description: Optional long-form description for this summary, as a
      `str`. Markdown is supported. Defaults to empty.

  Returns:
    A `tf.Summary` protobuf object.
  """
  images = np.array(images).astype(np.uint8)
  if images.ndim != 4:
    raise ValueError('Shape %r must have rank 4' % (images.shape, ))

  limited_images = images[:max_outputs]
  encoded_images = [util.encode_png(image) for image in limited_images]
  (width, height) = (images.shape[1], images.shape[2])
  content = [str(width), str(height)] + encoded_images
  tensor = tf.make_tensor_proto(content, dtype=tf.string)

  if display_name is None:
    display_name = name
  summary_metadata = metadata.create_summary_metadata(
      display_name=display_name, description=description)

  summary = tf.Summary()
  summary.value.add(tag='%s/image_summary' % name,
                    metadata=summary_metadata,
                    tensor=tensor)
  return summary
    def test_image(self):
        old_op = tf.summary.image(
            'mona_lisa',
            tf.cast(tf.random_normal(shape=[1, 400, 200, 3]), tf.uint8))
        old_value = self._value_from_op(old_op)
        assert old_value.HasField('image'), old_value
        new_value = data_compat.migrate_value(old_value)

        self.assertEqual('mona_lisa/image/0', new_value.tag)
        expected_metadata = image_metadata.create_summary_metadata(
            display_name='mona_lisa/image/0', description='')
        self.assertEqual(expected_metadata, new_value.metadata)
        self.assertTrue(new_value.HasField('tensor'))
        (width, height, data) = tf.make_ndarray(new_value.tensor)
        self.assertEqual(b'200', width)
        self.assertEqual(b'400', height)
        self.assertEqual(
            tf.compat.as_bytes(old_value.image.encoded_image_string), data)
Example #11
0
  def test_image(self):
    old_op = tf.summary.image('mona_lisa',
                              tf.cast(tf.random_normal(shape=[1, 400, 200, 3]),
                                      tf.uint8))
    old_value = self._value_from_op(old_op)
    assert old_value.HasField('image'), old_value
    new_value = data_compat.migrate_value(old_value)

    self.assertEqual('mona_lisa/image/0', new_value.tag)
    expected_metadata = image_metadata.create_summary_metadata(
        display_name='mona_lisa/image/0', description='')
    self.assertEqual(expected_metadata, new_value.metadata)
    self.assertTrue(new_value.HasField('tensor'))
    (width, height, data) = tf.make_ndarray(new_value.tensor)
    self.assertEqual(b'200', width)
    self.assertEqual(b'400', height)
    self.assertEqual(
        tf.compat.as_bytes(old_value.image.encoded_image_string), data)
Example #12
0
    def test_image(self):
        with tf.compat.v1.Graph().as_default():
            old_op = tf.compat.v1.summary.image(
                "mona_lisa",
                tf.image.convert_image_dtype(
                    tf.random.normal(shape=[1, 400, 200, 3]),
                    tf.uint8,
                    saturate=True,
                ),
            )
            old_value = self._value_from_op(old_op)
        assert old_value.HasField("image"), old_value
        new_value = data_compat.migrate_value(old_value)

        self.assertEqual("mona_lisa/image/0", new_value.tag)
        expected_metadata = image_metadata.create_summary_metadata(
            display_name="mona_lisa/image/0",
            description="",
            converted_to_tensor=True,
        )

        # Check serialized submessages...
        plugin_content = image_metadata.parse_plugin_metadata(
            new_value.metadata.plugin_data.content
        )
        expected_content = image_metadata.parse_plugin_metadata(
            expected_metadata.plugin_data.content
        )
        self.assertEqual(plugin_content, expected_content)
        # ...then check full metadata except plugin content, since
        # serialized forms need not be identical.
        new_value.metadata.plugin_data.content = (
            expected_metadata.plugin_data.content
        )
        self.assertEqual(expected_metadata, new_value.metadata)

        self.assertTrue(new_value.HasField("tensor"))
        (width, height, data) = tensor_util.make_ndarray(new_value.tensor)
        self.assertEqual(b"200", width)
        self.assertEqual(b"400", height)
        self.assertEqual(
            tf.compat.as_bytes(old_value.image.encoded_image_string), data
        )
Example #13
0
    def test_image(self):
        with tf.compat.v1.Graph().as_default():
            old_op = tf.compat.v1.summary.image(
                "mona_lisa",
                tf.image.convert_image_dtype(
                    tf.random.normal(shape=[1, 400, 200, 3]),
                    tf.uint8,
                    saturate=True,
                ),
            )
            old_value = self._value_from_op(old_op)
        assert old_value.HasField("image"), old_value
        new_value = data_compat.migrate_value(old_value)

        self.assertEqual("mona_lisa/image/0", new_value.tag)
        expected_metadata = image_metadata.create_summary_metadata(
            display_name="mona_lisa/image/0", description="")
        self.assertEqual(expected_metadata, new_value.metadata)
        self.assertTrue(new_value.HasField("tensor"))
        (width, height, data) = tensor_util.make_ndarray(new_value.tensor)
        self.assertEqual(b"200", width)
        self.assertEqual(b"400", height)
        self.assertEqual(
            tf.compat.as_bytes(old_value.image.encoded_image_string), data)
Example #14
0
def image(name, data, step=None, max_outputs=3, description=None):
    """Write an image summary.

    See also `tf.summary.scalar`, `tf.summary.SummaryWriter`.

    Writes a collection of images to the current default summary writer. Data
    appears in TensorBoard's 'Images' dashboard. Like `tf.summary.scalar` points,
    each collection of images is associated with a `step` and a `name`.  All the
    image collections with the same `name` constitute a time series of image
    collections.

    This example writes 2 random grayscale images:

    ```python
    w = tf.summary.create_file_writer('test/logs')
    with w.as_default():
      image1 = tf.random.uniform(shape=[8, 8, 1])
      image2 = tf.random.uniform(shape=[8, 8, 1])
      tf.summary.image("grayscale_noise", [image1, image2], step=0)
    ```

    To avoid clipping, data should be converted to one of the following:

    - floating point values in the range [0,1], or
    - uint8 values in the range [0,255]

    ```python
    # Convert the original dtype=int32 `Tensor` into `dtype=float64`.
    rgb_image_float = tf.constant([
      [[1000, 0, 0], [0, 500, 1000]],
    ]) / 1000
    tf.summary.image("picture", [rgb_image_float], step=0)

    # Convert original dtype=uint8 `Tensor` into proper range.
    rgb_image_uint8 = tf.constant([
      [[1, 1, 0], [0, 0, 1]],
    ], dtype=tf.uint8) * 255
    tf.summary.image("picture", [rgb_image_uint8], step=1)
    ```

    Arguments:
      name: A name for this summary. The summary tag used for TensorBoard will
        be this name prefixed by any active name scopes.
      data: A `Tensor` representing pixel data with shape `[k, h, w, c]`,
        where `k` is the number of images, `h` and `w` are the height and
        width of the images, and `c` is the number of channels, which
        should be 1, 2, 3, or 4 (grayscale, grayscale with alpha, RGB, RGBA).
        Any of the dimensions may be statically unknown (i.e., `None`).
        Floating point data will be clipped to the range [0,1]. Other data types
        will be clipped into an allowed range for safe casting to uint8, using
        `tf.image.convert_image_dtype`.
      step: Explicit `int64`-castable monotonic step value for this summary. If
        omitted, this defaults to `tf.summary.experimental.get_step()`, which must
        not be None.
      max_outputs: Optional `int` or rank-0 integer `Tensor`. At most this
        many images will be emitted at each step. When more than
        `max_outputs` many images are provided, the first `max_outputs` many
        images will be used and the rest silently discarded.
      description: Optional long-form description for this summary, as a
        constant `str`. Markdown is supported. Defaults to empty.

    Returns:
      True on success, or false if no summary was emitted because no default
      summary writer was available.

    Raises:
      ValueError: if a default writer exists, but no step was provided and
        `tf.summary.experimental.get_step()` is None.
    """
    summary_metadata = metadata.create_summary_metadata(
        display_name=None, description=description)
    # TODO(https://github.com/tensorflow/tensorboard/issues/2109): remove fallback
    summary_scope = (getattr(tf.summary.experimental, "summary_scope", None)
                     or tf.summary.summary_scope)
    with summary_scope(name, "image_summary", values=[data, max_outputs,
                                                      step]) as (tag, _):
        # Defer image encoding preprocessing by passing it as a callable to write(),
        # wrapped in a LazyTensorCreator for backwards compatibility, so that we
        # only do this work when summaries are actually written.
        @lazy_tensor_creator.LazyTensorCreator
        def lazy_tensor():
            tf.debugging.assert_rank(data, 4)
            tf.debugging.assert_non_negative(max_outputs)
            images = tf.image.convert_image_dtype(data,
                                                  tf.uint8,
                                                  saturate=True)
            limited_images = images[:max_outputs]
            encoded_images = tf.map_fn(
                tf.image.encode_png,
                limited_images,
                dtype=tf.string,
                name="encode_each_image",
            )
            # Workaround for map_fn returning float dtype for an empty elems input.
            encoded_images = tf.cond(
                tf.shape(input=encoded_images)[0] > 0,
                lambda: encoded_images,
                lambda: tf.constant([], tf.string),
            )
            image_shape = tf.shape(input=images)
            dimensions = tf.stack(
                [
                    tf.as_string(image_shape[2], name="width"),
                    tf.as_string(image_shape[1], name="height"),
                ],
                name="dimensions",
            )
            return tf.concat([dimensions, encoded_images], axis=0)

        # To ensure that image encoding logic is only executed when summaries
        # are written, we pass callable to `tensor` parameter.
        return tf.summary.write(tag=tag,
                                tensor=lazy_tensor,
                                step=step,
                                metadata=summary_metadata)
Example #15
0
def image(name, data, step=None, max_outputs=3, description=None):
    """Write an image summary.

    Arguments:
      name: A name for this summary. The summary tag used for TensorBoard will
        be this name prefixed by any active name scopes.
      data: A `Tensor` representing pixel data with shape `[k, h, w, c]`,
        where `k` is the number of images, `h` and `w` are the height and
        width of the images, and `c` is the number of channels, which
        should be 1, 2, 3, or 4 (grayscale, grayscale with alpha, RGB, RGBA).
        Any of the dimensions may be statically unknown (i.e., `None`).
        Floating point data will be clipped to the range [0,1).
      step: Explicit `int64`-castable monotonic step value for this summary. If
        omitted, this defaults to `tf.summary.experimental.get_step()`, which must
        not be None.
      max_outputs: Optional `int` or rank-0 integer `Tensor`. At most this
        many images will be emitted at each step. When more than
        `max_outputs` many images are provided, the first `max_outputs` many
        images will be used and the rest silently discarded.
      description: Optional long-form description for this summary, as a
        constant `str`. Markdown is supported. Defaults to empty.

    Returns:
      True on success, or false if no summary was emitted because no default
      summary writer was available.

    Raises:
      ValueError: if a default writer exists, but no step was provided and
        `tf.summary.experimental.get_step()` is None.
    """
    summary_metadata = metadata.create_summary_metadata(
        display_name=None, description=description
    )
    # TODO(https://github.com/tensorflow/tensorboard/issues/2109): remove fallback
    summary_scope = (
        getattr(tf.summary.experimental, "summary_scope", None)
        or tf.summary.summary_scope
    )
    with summary_scope(
        name, "image_summary", values=[data, max_outputs, step]
    ) as (tag, _):
        # Defer image encoding preprocessing by passing it as a callable to write(),
        # wrapped in a LazyTensorCreator for backwards compatibility, so that we
        # only do this work when summaries are actually written.
        @lazy_tensor_creator.LazyTensorCreator
        def lazy_tensor():
            tf.debugging.assert_rank(data, 4)
            tf.debugging.assert_non_negative(max_outputs)
            images = tf.image.convert_image_dtype(data, tf.uint8, saturate=True)
            limited_images = images[:max_outputs]
            encoded_images = tf.map_fn(
                tf.image.encode_png,
                limited_images,
                dtype=tf.string,
                name="encode_each_image",
            )
            # Workaround for map_fn returning float dtype for an empty elems input.
            encoded_images = tf.cond(
                tf.shape(input=encoded_images)[0] > 0,
                lambda: encoded_images,
                lambda: tf.constant([], tf.string),
            )
            image_shape = tf.shape(input=images)
            dimensions = tf.stack(
                [
                    tf.as_string(image_shape[2], name="width"),
                    tf.as_string(image_shape[1], name="height"),
                ],
                name="dimensions",
            )
            return tf.concat([dimensions, encoded_images], axis=0)

        # To ensure that image encoding logic is only executed when summaries
        # are written, we pass callable to `tensor` parameter.
        return tf.summary.write(
            tag=tag, tensor=lazy_tensor, step=step, metadata=summary_metadata
        )