예제 #1
0
    def testWriteEventInvalidType(self):
        class _Invalid(object):
            def __str__(self):
                return "Invalid"

        with self.assertRaisesRegexp(TypeError, "Invalid"):
            _pywrap_events_writer.EventsWriter(b"foo").WriteEvent(_Invalid())
예제 #2
0
  def testWriteEvents(self):
    file_prefix = os.path.join(self.get_temp_dir(), "events")
    writer = _pywrap_events_writer.EventsWriter(compat.as_bytes(file_prefix))
    filename = compat.as_text(writer.FileName())
    event_written = event_pb2.Event(
        wall_time=123.45,
        step=67,
        summary=summary_pb2.Summary(
            value=[summary_pb2.Summary.Value(
                tag="foo", simple_value=89.0)]))
    writer.WriteEvent(event_written)
    writer.Flush()
    writer.Close()

    with self.assertRaises(errors.NotFoundError):
      for r in tf_record.tf_record_iterator(filename + "DOES_NOT_EXIST"):
        self.assertTrue(False)

    reader = tf_record.tf_record_iterator(filename)
    event_read = event_pb2.Event()

    event_read.ParseFromString(next(reader))
    self.assertTrue(event_read.HasField("file_version"))

    event_read.ParseFromString(next(reader))
    # Second event
    self.assertProtoEquals("""
    wall_time: 123.45 step: 67
    summary { value { tag: 'foo' simple_value: 89.0 } }
    """, event_read)

    with self.assertRaises(StopIteration):
      next(reader)
예제 #3
0
 def __init__(self, dir):
     os.makedirs(dir, exist_ok=True)
     self.dir = dir
     self.step = 1
     prefix = 'events'
     path = osp.join(osp.abspath(dir), prefix)
     import tensorflow as tf
     from tensorflow.python import pywrap_tensorflow
     from tensorflow.python import _pywrap_events_writer
     from tensorflow.core.util import event_pb2
     from tensorflow.python.util import compat
     self.tf = tf
     self.event_pb2 = event_pb2
     self.pywrap_tensorflow = pywrap_tensorflow
     self.writer = _pywrap_events_writer.EventsWriter(compat.as_bytes(path))
예제 #4
0
def maybe_create_event_file(logdir):
  """Create an empty event file if not already exists.

  This event file indicates that we have a plugins/profile/ directory in the
  current logdir.

  Args:
    logdir: log directory.
  """
  for file_name in gfile.ListDirectory(logdir):
    if file_name.endswith(_EVENT_FILE_SUFFIX):
      return
  # TODO(b/127330388): Use summary_ops_v2.create_file_writer instead.
  event_writer = _pywrap_events_writer.EventsWriter(
      compat.as_bytes(os.path.join(logdir, 'events')))
  event_writer.InitWithSuffix(compat.as_bytes(_EVENT_FILE_SUFFIX))
예제 #5
0
 def __init__(self, dir, init_step=0):
     gfile.makedirs(dir)
     self.dir = dir
     self.step = init_step
     prefix = 'events'
     if dir.startswith('gs://'):
         path = osp.join(dir, prefix)
     else:
         path = osp.join(osp.abspath(dir), prefix)
     gfile.makedirs(path)
     # import tensorflow.compat.v1 as tf_fun
     from tensorflow.python import _pywrap_events_writer
     from tensorflow.core.util import event_pb2
     from tensorflow.python.util import compat
     self.tf = tf
     self.event_pb2 = event_pb2
     self.writer = _pywrap_events_writer.EventsWriter(compat.as_bytes(path))
예제 #6
0
    def _create_events_writer(self, directory):
        """Creates a new events writer.

        Args:
          directory: The directory in which to write files containing events.

        Returns:
          A new events writer, which corresponds to a new events file.
        """
        total_size = 0
        events_files = self._fetch_events_files_on_disk()
        for file_name in events_files:
            file_path = os.path.join(self._events_directory, file_name)
            total_size += tf.io.gfile.stat(file_path).length

        if total_size >= self.total_file_size_cap_bytes:
            # The total size written to disk is too big. Delete events files until
            # the size is below the cap.
            for file_name in events_files:
                if total_size < self.total_file_size_cap_bytes:
                    break

                file_path = os.path.join(self._events_directory, file_name)
                file_size = tf.io.gfile.stat(file_path).length
                try:
                    tf.io.gfile.remove(file_path)
                    total_size -= file_size
                    logger.info(
                        "Deleted %s because events files take up over %d bytes",
                        file_path,
                        self.total_file_size_cap_bytes,
                    )
                except IOError as err:
                    logger.error("Deleting %s failed: %s", file_path, err)

        # We increment this index because each events writer must differ in prefix.
        self._events_file_count += 1
        file_path = "%s.%d.%d" % (
            os.path.join(directory, DEBUGGER_EVENTS_FILE_STARTING_TEXT),
            time.time(),
            self._events_file_count,
        )
        logger.info("Creating events file %s", file_path)
        return tf_events_writer.EventsWriter(tf.compat.as_bytes(file_path))
예제 #7
0
    def __init__(self,
                 logdir,
                 max_queue=10,
                 flush_secs=120,
                 filename_suffix=None):
        """Creates a `EventFileWriter` and an event file to write to.

    On construction the summary writer creates a new event file in `logdir`.
    This event file will contain `Event` protocol buffers, which are written to
    disk via the add_event method.

    The other arguments to the constructor control the asynchronous writes to
    the event file:

    *  `flush_secs`: How often, in seconds, to flush the added summaries
       and events to disk.
    *  `max_queue`: Maximum number of summaries or events pending to be
       written to disk before one of the 'add' calls block.

    Args:
      logdir: A string. Directory where event file will be written.
      max_queue: Integer. Size of the queue for pending events and summaries.
      flush_secs: Number. How often, in seconds, to flush the
        pending events and summaries to disk.
      filename_suffix: A string. Every event file's name is suffixed with
        `filename_suffix`.
    """
        self._logdir = str(logdir)
        if not gfile.IsDirectory(self._logdir):
            gfile.MakeDirs(self._logdir)
        self._event_queue = six.moves.queue.Queue(max_queue)
        self._ev_writer = _pywrap_events_writer.EventsWriter(
            compat.as_bytes(os.path.join(self._logdir, "events")))
        self._flush_secs = flush_secs
        self._sentinel_event = self._get_sentinel_event()
        if filename_suffix:
            self._ev_writer.InitWithSuffix(compat.as_bytes(filename_suffix))
        self._closed = False
        self._worker = _EventLoggerThread(self._event_queue, self._ev_writer,
                                          self._flush_secs,
                                          self._sentinel_event)

        self._worker.start()
  def setUp(self):
    super(DebuggerPluginTestBase, self).setUp()
    # Importing the debugger_plugin can sometimes unfortunately produce errors.
    try:
      # pylint: disable=g-import-not-at-top
      from tensorboard.plugins.debugger import debugger_plugin
      from tensorboard.plugins.debugger import debugger_server_lib
      # pylint: enable=g-import-not-at-top
    except Exception as e:  # pylint: disable=broad-except
      raise self.skipTest(
          'Skipping test because importing some modules failed: %r' % e)
    self.debugger_plugin_module = debugger_plugin

    # Populate the log directory with debugger event for run '.'.
    self.log_dir = self.get_temp_dir()
    file_prefix = tf.compat.as_bytes(
        os.path.join(self.log_dir, 'events.debugger'))
    writer = tf_events_writer.EventsWriter(file_prefix)
    device_name = '/job:localhost/replica:0/task:0/cpu:0'
    writer.WriteEvent(
        self._CreateEventWithDebugNumericSummary(
            device_name=device_name,
            op_name='layers/Matmul',
            output_slot=0,
            wall_time=42,
            step=2,
            list_of_values=(list(range(12)) +
                            [float(tf.float32.as_datatype_enum), 1.0, 3.0])))
    writer.WriteEvent(
        self._CreateEventWithDebugNumericSummary(
            device_name=device_name,
            op_name='layers/Matmul',
            output_slot=1,
            wall_time=43,
            step=7,
            list_of_values=(
                list(range(12)) +
                [float(tf.float64.as_datatype_enum), 2.0, 3.0, 3.0])))
    writer.WriteEvent(
        self._CreateEventWithDebugNumericSummary(
            device_name=device_name,
            op_name='logits/Add',
            output_slot=0,
            wall_time=1337,
            step=7,
            list_of_values=(list(range(12)) +
                            [float(tf.int32.as_datatype_enum), 2.0, 3.0, 3.0])))
    writer.WriteEvent(
        self._CreateEventWithDebugNumericSummary(
            device_name=device_name,
            op_name='logits/Add',
            output_slot=0,
            wall_time=1338,
            step=8,
            list_of_values=(list(range(12)) +
                            [float(tf.int16.as_datatype_enum), 0.0])))
    writer.Close()

    # Populate the log directory with debugger event for run 'run_foo'.
    run_foo_directory = os.path.join(self.log_dir, 'run_foo')
    os.mkdir(run_foo_directory)
    file_prefix = tf.compat.as_bytes(
        os.path.join(run_foo_directory, 'events.debugger'))
    writer = tf_events_writer.EventsWriter(file_prefix)
    writer.WriteEvent(
        self._CreateEventWithDebugNumericSummary(
            device_name=device_name,
            op_name='layers/Variable',
            output_slot=0,
            wall_time=4242,
            step=42,
            list_of_values=(list(range(12)) +
                            [float(tf.int16.as_datatype_enum), 1.0, 8.0])))
    writer.Close()

    # Start a server that will receive requests and respond with health pills.
    multiplexer = event_multiplexer.EventMultiplexer({
        '.': self.log_dir,
        'run_foo': run_foo_directory,
    })
    multiplexer.Reload()
    self.debugger_data_server_grpc_port = portpicker.pick_unused_port()

    # Fake threading behavior so that threads are synchronous.
    tf.compat.v1.test.mock.patch('threading.Thread.start', threading.Thread.run).start()

    self.mock_debugger_data_server = tf.compat.v1.test.mock.Mock(
        debugger_server_lib.DebuggerDataServer)
    self.mock_debugger_data_server_class = tf.compat.v1.test.mock.Mock(
        debugger_server_lib.DebuggerDataServer,
        return_value=self.mock_debugger_data_server)

    tf.compat.v1.test.mock.patch.object(
        debugger_server_lib,
        'DebuggerDataServer',
        self.mock_debugger_data_server_class).start()

    self.context = base_plugin.TBContext(
        logdir=self.log_dir, multiplexer=multiplexer)
    self.plugin = debugger_plugin.DebuggerPlugin(self.context)
    self.plugin.listen(self.debugger_data_server_grpc_port)
    wsgi_app = application.TensorBoardWSGI([self.plugin])
    self.server = werkzeug_test.Client(wsgi_app, wrappers.BaseResponse)

    # The debugger data server should be started at the correct port.
    self.mock_debugger_data_server_class.assert_called_once_with(
        self.debugger_data_server_grpc_port, self.log_dir)

    mock_debugger_data_server = self.mock_debugger_data_server
    start = mock_debugger_data_server.start_the_debugger_data_receiving_server
    self.assertEqual(1, start.call_count)