Esempio n. 1
0
 def function(tag, scope):
     # Note the identity to move the tensor to the CPU.
     event = tf.numpy_function(
         py_gif_event,
         [_choose_step(step), tag,
          tf.identity(tensor), max_outputs, fps], tf.string)
     return summary_ops_v2.import_event(event, name=scope)
Esempio n. 2
0
    def __init__(self,
                 session,
                 logdir,
                 max_queue=10,
                 flush_secs=120,
                 filename_suffix=''):
        """Creates an `EventFileWriterV2` and an event file to write to.

    On construction, this calls `tf.contrib.summary.create_file_writer` within
    the graph from `session.graph` to look up a shared summary writer resource
    for `logdir` if one exists, and create one if not. Creating the summary
    writer resource in turn creates a new event file in `logdir` to be filled
    with `Event` protocol buffers passed to `add_event`. Graph ops to control
    this writer resource are added to `session.graph` during this init call;
    stateful methods on this class will call `session.run()` on these ops.

    Note that because the underlying resource is shared, it is possible that
    other parts of the code using the same session may interact independently
    with the resource, e.g. by flushing or even closing it. It is the caller's
    responsibility to avoid any undesirable sharing in this regard.

    The remaining arguments to the constructor (`flush_secs`, `max_queue`, and
    `filename_suffix`) control the construction of the shared writer resource
    if one is created. If an existing resource is reused, these arguments have
    no effect.  See `tf.contrib.summary.create_file_writer` for details.

    Args:
      session: A `tf.compat.v1.Session`. Session that will hold shared writer
        resource. The writer ops will be added to session.graph during this
        init call.
      logdir: A string. Directory where event file will be written.
      max_queue: Integer. Size of the queue for pending events and summaries.
      flush_secs: Number. How often, in seconds, to flush the
        pending events and summaries to disk.
      filename_suffix: A string. Every event file's name is suffixed with
        `filename_suffix`.
    """
        self._session = session
        self._logdir = logdir
        self._closed = False
        gfile.MakeDirs(self._logdir)

        with self._session.graph.as_default():
            with ops.name_scope('filewriter'):
                file_writer = summary_ops_v2.create_file_writer(
                    logdir=self._logdir,
                    max_queue=max_queue,
                    flush_millis=flush_secs * 1000,
                    filename_suffix=filename_suffix)
                with summary_ops_v2.always_record_summaries(
                ), file_writer.as_default():
                    self._event_placeholder = array_ops.placeholder_with_default(
                        constant_op.constant('unused', dtypes.string),
                        shape=[])
                    self._add_event_op = summary_ops_v2.import_event(
                        self._event_placeholder)
                self._init_op = file_writer.init()
                self._flush_op = file_writer.flush()
                self._close_op = file_writer.close()
            self._session.run(self._init_op)
Esempio n. 3
0
 def function(tag, scope):
     # Note the identity to move the paths to the CPU.
     event = tf.py_func(
         py_gif_event,
         [_choose_step(step), tag,
          tf.identity(paths), max_outputs],
         tf.string,
         stateful=False)
     return summary_ops_v2.import_event(event, name=scope)
  def __init__(self, session, logdir, max_queue=10, flush_secs=120,
               filename_suffix=''):
    """Creates an `EventFileWriterV2` and an event file to write to.

    On construction, this calls `tf.contrib.summary.create_file_writer` within
    the graph from `session.graph` to look up a shared summary writer resource
    for `logdir` if one exists, and create one if not. Creating the summary
    writer resource in turn creates a new event file in `logdir` to be filled
    with `Event` protocol buffers passed to `add_event`. Graph ops to control
    this writer resource are added to `session.graph` during this init call;
    stateful methods on this class will call `session.run()` on these ops.

    Note that because the underlying resource is shared, it is possible that
    other parts of the code using the same session may interact independently
    with the resource, e.g. by flushing or even closing it. It is the caller's
    responsibility to avoid any undesirable sharing in this regard.

    The remaining arguments to the constructor (`flush_secs`, `max_queue`, and
    `filename_suffix`) control the construction of the shared writer resource
    if one is created. If an existing resource is reused, these arguments have
    no effect.  See `tf.contrib.summary.create_file_writer` for details.

    Args:
      session: A `tf.compat.v1.Session`. Session that will hold shared writer
        resource. The writer ops will be added to session.graph during this
        init call.
      logdir: A string. Directory where event file will be written.
      max_queue: Integer. Size of the queue for pending events and summaries.
      flush_secs: Number. How often, in seconds, to flush the
        pending events and summaries to disk.
      filename_suffix: A string. Every event file's name is suffixed with
        `filename_suffix`.
    """
    self._session = session
    self._logdir = logdir
    self._closed = False
    if not gfile.IsDirectory(self._logdir):
      gfile.MakeDirs(self._logdir)

    with self._session.graph.as_default():
      with ops.name_scope('filewriter'):
        file_writer = summary_ops_v2.create_file_writer(
            logdir=self._logdir,
            max_queue=max_queue,
            flush_millis=flush_secs * 1000,
            filename_suffix=filename_suffix)
        with summary_ops_v2.always_record_summaries(), file_writer.as_default():
          self._event_placeholder = array_ops.placeholder_with_default(
              constant_op.constant('unused', dtypes.string),
              shape=[])
          self._add_event_op = summary_ops_v2.import_event(
              self._event_placeholder)
        self._init_op = file_writer.init()
        self._flush_op = file_writer.flush()
        self._close_op = file_writer.close()
      self._session.run(self._init_op)
Esempio n. 5
0
 def function(tag, scope):
     # Note the identity to move the tensor to the CPU.
     tag_tensor = tf.convert_to_tensor(tag)
     max_outputs_tensor = tf.convert_to_tensor(max_outputs)
     fps_tensor = tf.convert_to_tensor(fps)
     input_tensors = [
         _choose_step(step), tag_tensor,
         tf.identity(tensor), max_outputs_tensor, fps_tensor
     ]
     event = tf.py_func(py_gif_event,
                        input_tensors,
                        tf.string,
                        stateful=False)
     return summary_ops_v2.import_event(event, name=scope)
Esempio n. 6
0
def gif_summary(name: str,
                data: tf.Tensor,
                fps: int,
                step: int = None,
                max_outputs=3):
    """Write a gif summary.

    Args:
        name: A name for this summary. The summary tag used for TensorBoard will
            be this name prefixed by any active name scopes.
        data: A 5-D `uint8` `Tensor` of shape `[k, time, height, width, channels]`
            where `k` is the number of gifs and `channels` is either 1 or 3.
            Any of the dimensions may be statically unknown (i.e., `None`).
            Floating point data will be clipped to the range [0,1).
        fps: frames per second of the gif.
        step: Explicit `int64`-castable monotonic step value for this summary. If
            omitted, this defaults to `tf.summary.experimental.get_step()`, which must
            not be None.
        max_outputs: Optional `int` or rank-0 integer `Tensor`. At most this
            many gifs will be emitted at each step. When more than
            `max_outputs` many gifs are provided, the first `max_outputs` many
            images will be used and the rest silently discarded.
    Returns:
        A scalar `Tensor` of type `string`. The serialized `Summary` protocol buffer.
    """
    summary_scope = tf.summary.experimental.summary_scope(
        name=name,
        default_name='image_summary',
        values=[data, max_outputs, step])

    batch_size, length, height, width, channels = data.shape
    batch_size = min(batch_size, max_outputs)

    with summary_scope as (tag, _):
        tf.debugging.assert_rank(data, 5)

        summary = summary_pb2.Summary()

        if tf.executing_eagerly():
            data = data.numpy()
        else:
            session = tf.compat.v1.keras.backend.get_session()
            data = session.run(data)

        for i in range(batch_size):
            ith_image_summary = summary_pb2.Summary.Image()
            ith_image_summary.height = height
            ith_image_summary.width = width
            ith_image_summary.colorspace = channels

            try:
                ith_image_summary.encoded_image_string = encode_gif(
                    data[i], fps)
            except (IOError, OSError) as exception:
                raise IOError(
                    "Unable to encode images to a gif string because either ffmpeg is "
                    "not installed or ffmpeg returned an error: {}.".format(
                        repr(exception)))

            summary_tag = "{}/gif".format(tag) if (
                batch_size == 1) else "{}/gif/{}".format(tag, i)

            summary.value.add(tag=summary_tag, image=ith_image_summary)

        event = event_pb2.Event(summary=summary)
        event.wall_time = time.time()
        event.step = step

        summary_ops_v2.import_event(event.SerializeToString(), name="scope")