def _get_runs_tensorboard(self, path, model_name=None, model_version=None): def directory_loader_factory(path): return directory_loader.DirectoryLoader( path, event_file_loader.TimestampedEventFileLoader, path_filter=io_wrapper.IsTensorFlowEventsFile, ) _logdir_loader = logdir_loader.LogdirLoader(path, directory_loader_factory) _logdir_loader.synchronize_runs() run_ids = _logdir_loader.get_run_events().keys() runs = [{ 'run_id': run_id, 'model_name': model_name, 'model_version': model_version, 'path': os.path.join(path, run_id) } for run_id in run_ids] for run in runs: created_timestamps = [] for root, dirs, files in os.walk(run['path']): for file in files: if file.startswith("events.out.tfevents"): created_timestamps += [file.split('.')[3]] run['created_at'] = int(min(created_timestamps)) * 1000 return runs
def __init__(self, writer_client, logdir, rate_limiter=None): """Constructs a TensorBoardUploader. Args: writer_client: a TensorBoardWriterService stub instance logdir: path of the log directory to upload rate_limiter: a `RateLimiter` to use to limit upload cycle frequency """ self._api = writer_client self._logdir = logdir self._request_builder = None if rate_limiter is None: self._rate_limiter = util.RateLimiter( _MIN_UPLOAD_CYCLE_DURATION_SECS) else: self._rate_limiter = rate_limiter active_filter = ( lambda secs: secs + _EVENT_FILE_INACTIVE_SECS >= time.time()) directory_loader_factory = functools.partial( directory_loader.DirectoryLoader, loader_factory=event_file_loader.TimestampedEventFileLoader, path_filter=io_wrapper.IsTensorFlowEventsFile, active_filter=active_filter, ) self._logdir_loader = logdir_loader.LogdirLoader( self._logdir, directory_loader_factory)
def _create_logdir_loader(self, logdir): def directory_loader_factory(path): return directory_loader.DirectoryLoader( path, event_file_loader.TimestampedEventFileLoader, path_filter=io_wrapper.IsTensorFlowEventsFile) return logdir_loader.LogdirLoader(logdir, directory_loader_factory)
def __init__( self, writer_client, logdir, rpc_rate_limiter=None, name=None, description=None, ): """Constructs a TensorBoardUploader. Args: writer_client: a TensorBoardWriterService stub instance logdir: path of the log directory to upload rpc_rate_limiter: a `RateLimiter` to use to limit write RPC frequency. Note this limit applies at the level of single RPCs in the Scalar and Tensor case, but at the level of an entire blob upload in the Blob case-- which may require a few preparatory RPCs and a stream of chunks. Note the chunk stream is internally rate-limited by backpressure from the server, so it is not a concern that we do not explicitly rate-limit within the stream here. name: String name to assign to the experiment. description: String description to assign to the experiment. """ self._api = writer_client self._logdir = logdir self._name = name self._description = description self._request_sender = None if rpc_rate_limiter is None: self._rpc_rate_limiter = util.RateLimiter( _MIN_WRITE_RPC_INTERVAL_SECS) else: self._rpc_rate_limiter = rpc_rate_limiter active_filter = ( lambda secs: secs + _EVENT_FILE_INACTIVE_SECS >= time.time()) directory_loader_factory = functools.partial( directory_loader.DirectoryLoader, loader_factory=event_file_loader.TimestampedEventFileLoader, path_filter=io_wrapper.IsTensorFlowEventsFile, active_filter=active_filter, ) self._logdir_loader = logdir_loader.LogdirLoader( self._logdir, directory_loader_factory)
def __init__( self, writer_client, logdir, allowed_plugins, upload_limits, logdir_poll_rate_limiter=None, rpc_rate_limiter=None, tensor_rpc_rate_limiter=None, blob_rpc_rate_limiter=None, name=None, description=None, verbosity=None, one_shot=None, ): """Constructs a TensorBoardUploader. Args: writer_client: a TensorBoardWriterService stub instance logdir: path of the log directory to upload allowed_plugins: collection of string plugin names; events will only be uploaded if their time series's metadata specifies one of these plugin names upload_limits: instance of tensorboard.service.UploadLimits proto. logdir_poll_rate_limiter: a `RateLimiter` to use to limit logdir polling frequency, to avoid thrashing disks, especially on networked file systems rpc_rate_limiter: a `RateLimiter` to use to limit write RPC frequency. Note this limit applies at the level of single RPCs in the Scalar and Tensor case, but at the level of an entire blob upload in the Blob case-- which may require a few preparatory RPCs and a stream of chunks. Note the chunk stream is internally rate-limited by backpressure from the server, so it is not a concern that we do not explicitly rate-limit within the stream here. name: String name to assign to the experiment. description: String description to assign to the experiment. verbosity: Level of verbosity, an integer. Supported value: 0 - No upload statistics is printed. 1 - Print upload statistics while uploading data (default). one_shot: Once uploading starts, upload only the existing data in the logdir and then return immediately, instead of the default behavior of continuing to listen for new data in the logdir and upload them when it appears. """ self._api = writer_client self._logdir = logdir self._allowed_plugins = frozenset(allowed_plugins) self._upload_limits = upload_limits self._name = name self._description = description self._verbosity = 1 if verbosity is None else verbosity self._one_shot = False if one_shot is None else one_shot self._request_sender = None if logdir_poll_rate_limiter is None: self._logdir_poll_rate_limiter = util.RateLimiter( _MIN_LOGDIR_POLL_INTERVAL_SECS) else: self._logdir_poll_rate_limiter = logdir_poll_rate_limiter if rpc_rate_limiter is None: self._rpc_rate_limiter = util.RateLimiter( self._upload_limits.min_scalar_request_interval / 1000) else: self._rpc_rate_limiter = rpc_rate_limiter if tensor_rpc_rate_limiter is None: self._tensor_rpc_rate_limiter = util.RateLimiter( self._upload_limits.min_tensor_request_interval / 1000) else: self._tensor_rpc_rate_limiter = tensor_rpc_rate_limiter if blob_rpc_rate_limiter is None: self._blob_rpc_rate_limiter = util.RateLimiter( self._upload_limits.min_blob_request_interval / 1000) else: self._blob_rpc_rate_limiter = blob_rpc_rate_limiter active_filter = ( lambda secs: secs + _EVENT_FILE_INACTIVE_SECS >= time.time()) directory_loader_factory = functools.partial( directory_loader.DirectoryLoader, loader_factory=event_file_loader.TimestampedEventFileLoader, path_filter=io_wrapper.IsTensorFlowEventsFile, active_filter=active_filter, ) self._logdir_loader = logdir_loader.LogdirLoader( self._logdir, directory_loader_factory)
def __init__( self, writer_client, logdir, allowed_plugins, max_blob_size, logdir_poll_rate_limiter=None, rpc_rate_limiter=None, blob_rpc_rate_limiter=None, name=None, description=None, ): """Constructs a TensorBoardUploader. Args: writer_client: a TensorBoardWriterService stub instance logdir: path of the log directory to upload allowed_plugins: collection of string plugin names; events will only be uploaded if their time series's metadata specifies one of these plugin names max_blob_size: the maximum allowed size for blob uploads. logdir_poll_rate_limiter: a `RateLimiter` to use to limit logdir polling frequency, to avoid thrashing disks, especially on networked file systems rpc_rate_limiter: a `RateLimiter` to use to limit write RPC frequency. Note this limit applies at the level of single RPCs in the Scalar and Tensor case, but at the level of an entire blob upload in the Blob case-- which may require a few preparatory RPCs and a stream of chunks. Note the chunk stream is internally rate-limited by backpressure from the server, so it is not a concern that we do not explicitly rate-limit within the stream here. name: String name to assign to the experiment. description: String description to assign to the experiment. """ self._api = writer_client self._logdir = logdir self._allowed_plugins = frozenset(allowed_plugins) self._max_blob_size = max_blob_size self._name = name self._description = description self._request_sender = None if logdir_poll_rate_limiter is None: self._logdir_poll_rate_limiter = util.RateLimiter( _MIN_LOGDIR_POLL_INTERVAL_SECS ) else: self._logdir_poll_rate_limiter = logdir_poll_rate_limiter if rpc_rate_limiter is None: self._rpc_rate_limiter = util.RateLimiter( _MIN_WRITE_RPC_INTERVAL_SECS ) else: self._rpc_rate_limiter = rpc_rate_limiter if blob_rpc_rate_limiter is None: self._blob_rpc_rate_limiter = util.RateLimiter( _MIN_BLOB_WRITE_RPC_INTERVAL_SECS ) else: self._blob_rpc_rate_limiter = blob_rpc_rate_limiter active_filter = ( lambda secs: secs + _EVENT_FILE_INACTIVE_SECS >= time.time() ) directory_loader_factory = functools.partial( directory_loader.DirectoryLoader, loader_factory=event_file_loader.TimestampedEventFileLoader, path_filter=io_wrapper.IsTensorFlowEventsFile, active_filter=active_filter, ) self._logdir_loader = logdir_loader.LogdirLoader( self._logdir, directory_loader_factory )