Exemplo n.º 1
0
  def __init__(self,
               logdir,
               max_queue=10,
               flush_secs=120,
               filename_suffix=""):
    """Summary writer for TensorBoard, compatible with eager execution.

    If necessary, multiple instances of `SummaryWriter` can be created, with
    distinct `logdir`s and `name`s. Each `SummaryWriter` instance will retain
    its independent `global_step` counter and data writing destination.

    Example:
    ```python
    writer = tfe.SummaryWriter("my_model")

    # ... Code that sets up the model and data batches ...

    for _ in xrange(train_iters):
      loss = model.train_batch(batch)
      writer.scalar("loss", loss)
      writer.step()
    ```

    Args:
      logdir: Directory in which summary files will be written.
      max_queue: Number of summary items to buffer before flushing to
        filesystem. If 0, summaries will be flushed immediately.
      flush_secs: Number of secondsbetween forced commits to disk.
      filename_suffix: Suffix of the event protobuf files in which the summary
        data are stored.

    Raises:
      ValueError: If this constructor is called not under eager execution.
    """
    # TODO(apassos, ashankar): Make this class and the underlying
    # contrib.summary_ops compatible with graph model and remove this check.
    if not context.in_eager_mode():
      raise ValueError(
          "Use of SummaryWriter is currently supported only with eager "
          "execution enabled. File an issue at "
          "https://github.com/tensorflow/tensorflow/issues/new to express "
          "interest in fixing this.")

    # TODO(cais): Consider adding name keyword argument, which if None or empty,
    # will register the global global_step that training_util.get_global_step()
    # can find.
    with context.device(self._CPU_DEVICE):
      self._name = uuid.uuid4().hex
      self._global_step = 0
      self._global_step_tensor = variable_scope.get_variable(
          "global_step/summary_writer/" + self._name,
          shape=[], dtype=dtypes.int64,
          initializer=init_ops.zeros_initializer())
      self._global_step_dirty = False
      self._resource = gen_summary_ops.summary_writer(shared_name=self._name)
      gen_summary_ops.create_summary_file_writer(
          self._resource, logdir, max_queue, flush_secs, filename_suffix)
      # Delete the resource when this object is deleted
      self._resource_deleter = resource_variable_ops.EagerResourceDeleter(
          handle=self._resource, handle_device=self._CPU_DEVICE)
Exemplo n.º 2
0
def _make_summary_writer(name, factory, **kwargs):
    resource = gen_summary_ops.summary_writer(shared_name=name)
    # TODO(apassos): Consider doing this instead.
    # node = factory(resource, **kwargs)
    # if not context.executing_eagerly():
    #   ops.get_default_session().run(node)
    ops.add_to_collection(_SUMMARY_WRITER_INIT_COLLECTION_NAME,
                          factory(resource, **kwargs))
    return SummaryWriter(resource)
Exemplo n.º 3
0
def _make_summary_writer(name, factory, **kwargs):
  resource = gen_summary_ops.summary_writer(shared_name=name)
  # TODO(apassos): Consider doing this instead.
  # node = factory(resource, **kwargs)
  # if not context.in_eager_mode():
  #   ops.get_default_session().run(node)
  ops.add_to_collection(_SUMMARY_WRITER_INIT_COLLECTION_NAME,
                        factory(resource, **kwargs))
  return SummaryWriter(resource)
Exemplo n.º 4
0
  def __init__(self,
               logdir,
               max_queue=10,
               flush_secs=120,
               filename_suffix=""):
    """Summary writer for TensorBoard, compatible with eager execution.

    If necessary, multiple instances of `SummaryWriter` can be created, with
    distinct `logdir`s and `name`s. Each `SummaryWriter` instance will retain
    its independent `global_step` counter and data writing destination.

    Example:
    ```python
    writer = tfe.SummaryWriter("my_model")

    # ... Code that sets up the model and data batches ...

    for _ in xrange(train_iters):
      loss = model.train_batch(batch)
      writer.scalar("loss", loss)
      writer.step()
    ```

    Args:
      logdir: Directory in which summary files will be written.
      max_queue: Number of summary items to buffer before flushing to
        filesystem. If 0, summaries will be flushed immediately.
      flush_secs: Number of secondsbetween forced commits to disk.
      filename_suffix: Suffix of the event protobuf files in which the summary
        data are stored.

    Raises:
      ValueError: If this constructor is called not under eager execution.
    """
    # TODO(apassos, ashankar): Make this class and the underlying
    # contrib.summary_ops compatible with graph model and remove this check.
    if not context.in_eager_mode():
      raise ValueError(
          "Use of SummaryWriter is currently supported only with eager "
          "execution enabled. File an issue at "
          "https://github.com/tensorflow/tensorflow/issues/new to express "
          "interest in fixing this.")

    # TODO(cais): Consider adding name keyword argument, which if None or empty,
    # will register the global global_step that training_util.get_global_step()
    # can find.
    with context.device(self._CPU_DEVICE):
      self._name = uuid.uuid4().hex
      self._global_step = 0
      self._global_step_tensor = variable_scope.get_variable(
          "global_step/summary_writer/" + self._name,
          shape=[], dtype=dtypes.int64,
          initializer=init_ops.zeros_initializer())
      self._global_step_dirty = False
      self._resource = gen_summary_ops.summary_writer(shared_name=self._name)
      gen_summary_ops.create_summary_file_writer(
          self._resource, logdir, max_queue, flush_secs, filename_suffix)
Exemplo n.º 5
0
def create_summary_file_writer(logdir,
                               max_queue=None,
                               flush_secs=None,
                               filename_suffix=None):
    """Creates a summary file writer in the current context."""
    if max_queue is None:
        max_queue = constant_op.constant(10)
    if flush_secs is None:
        flush_secs = constant_op.constant(120)
    if filename_suffix is None:
        filename_suffix = constant_op.constant("")
    resource = gen_summary_ops.summary_writer()
    gen_summary_ops.create_summary_file_writer(resource, logdir, max_queue,
                                               flush_secs, filename_suffix)
    context.context().summary_writer_resource = resource
Exemplo n.º 6
0
def create_summary_file_writer(logdir,
                               max_queue=None,
                               flush_secs=None,
                               filename_suffix=None):
  """Creates a summary file writer in the current context."""
  if max_queue is None:
    max_queue = constant_op.constant(10)
  if flush_secs is None:
    flush_secs = constant_op.constant(120)
  if filename_suffix is None:
    filename_suffix = constant_op.constant("")
  resource = gen_summary_ops.summary_writer()
  gen_summary_ops.create_summary_file_writer(resource, logdir, max_queue,
                                             flush_secs, filename_suffix)
  context.context().summary_writer_resource = resource
def create_summary_file_writer(logdir,
                               max_queue=None,
                               flush_secs=None,
                               filename_suffix=None,
                               name=None):
  """Creates a summary file writer in the current context."""
  if max_queue is None:
    max_queue = constant_op.constant(10)
  if flush_secs is None:
    flush_secs = constant_op.constant(120)
  if filename_suffix is None:
    filename_suffix = constant_op.constant("")
  resource = gen_summary_ops.summary_writer(shared_name=name)
  # TODO(apassos) ensure the initialization op runs when in graph mode; consider
  # calling session.run here.
  gen_summary_ops.create_summary_file_writer(resource, logdir, max_queue,
                                             flush_secs, filename_suffix)
  return SummaryWriter(resource)
Exemplo n.º 8
0
def create_summary_file_writer(logdir,
                               max_queue=None,
                               flush_secs=None,
                               filename_suffix=None,
                               name=None):
    """Creates a summary file writer in the current context."""
    if max_queue is None:
        max_queue = constant_op.constant(10)
    if flush_secs is None:
        flush_secs = constant_op.constant(120)
    if filename_suffix is None:
        filename_suffix = constant_op.constant("")
    resource = gen_summary_ops.summary_writer(shared_name=name)
    # TODO(apassos) ensure the initialization op runs when in graph mode; consider
    # calling session.run here.
    gen_summary_ops.create_summary_file_writer(resource, logdir, max_queue,
                                               flush_secs, filename_suffix)
    return SummaryWriter(resource)
Exemplo n.º 9
0
def create_summary_file_writer(logdir,
                               max_queue=None,
                               flush_millis=None,
                               filename_suffix=None,
                               name=None):
    """Creates a summary file writer in the current context.

  Args:
    logdir: a string, or None. If a string, creates a summary file writer
     which writes to the directory named by the string. If None, returns
     a mock object which acts like a summary writer but does nothing,
     useful to use as a context manager.
    max_queue: the largest number of summaries to keep in a queue; will
     flush once the queue gets bigger than this.
    flush_millis: the largest interval between flushes.
    filename_suffix: optional suffix for the event file name.
    name: name for the summary writer.

  Returns:
    Either a summary writer or an empty object which can be used as a
    summary writer.
  """
    if logdir is None:
        return SummaryWriter(None)
    with ops.device("cpu:0"):
        if max_queue is None:
            max_queue = constant_op.constant(10)
        if flush_millis is None:
            flush_millis = constant_op.constant(2 * 60 * 1000)
        if filename_suffix is None:
            filename_suffix = constant_op.constant("")
        resource = gen_summary_ops.summary_writer(shared_name=name)
        # TODO(apassos) ensure the initialization op runs when in graph mode;
        # consider calling session.run here.
        ops.add_to_collection(
            _SUMMARY_WRITER_INIT_COLLECTION_NAME,
            gen_summary_ops.create_summary_file_writer(resource, logdir,
                                                       max_queue, flush_millis,
                                                       filename_suffix))
        return SummaryWriter(resource)
Exemplo n.º 10
0
def create_summary_file_writer(logdir,
                               max_queue=None,
                               flush_millis=None,
                               filename_suffix=None,
                               name=None):
  """Creates a summary file writer in the current context.

  Args:
    logdir: a string, or None. If a string, creates a summary file writer
     which writes to the directory named by the string. If None, returns
     a mock object which acts like a summary writer but does nothing,
     useful to use as a context manager.
    max_queue: the largest number of summaries to keep in a queue; will
     flush once the queue gets bigger than this.
    flush_millis: the largest interval between flushes.
    filename_suffix: optional suffix for the event file name.
    name: name for the summary writer.

  Returns:
    Either a summary writer or an empty object which can be used as a
    summary writer.
  """
  if logdir is None:
    return SummaryWriter(None)
  with ops.device("cpu:0"):
    if max_queue is None:
      max_queue = constant_op.constant(10)
    if flush_millis is None:
      flush_millis = constant_op.constant(2 * 60 * 1000)
    if filename_suffix is None:
      filename_suffix = constant_op.constant("")
    resource = gen_summary_ops.summary_writer(shared_name=name)
    # TODO(apassos) ensure the initialization op runs when in graph mode;
    # consider calling session.run here.
    ops.add_to_collection(
        _SUMMARY_WRITER_INIT_COLLECTION_NAME,
        gen_summary_ops.create_summary_file_writer(
            resource, logdir, max_queue, flush_millis, filename_suffix))
    return SummaryWriter(resource)