def Reload(self):
        """Loads all events added since the last call to `Reload`.

    If `Reload` was never called, loads all events in the file.
    Calling `Reload` activates the `EventAccumulator`.

    Returns:
      The `EventAccumulator`.
    """
        self._activated = True
        with self._generator_mutex:
            for event in self._generator.Load():
                if event.HasField("graph_def"):
                    if self._graph is not None:
                        logging.warn(
                            ("Found more than one graph event per run." "Overwritting the graph with the newest event")
                        )
                    self._graph = event.graph_def
                elif event.HasField("summary"):
                    for value in event.summary.value:
                        if value.HasField("simple_value"):
                            self._ProcessScalar(value.tag, event.wall_time, event.step, value.simple_value)
                        elif value.HasField("histo"):
                            self._ProcessHistogram(value.tag, event.wall_time, event.step, value.histo)
                            self._ProcessCompressedHistogram(value.tag, event.wall_time, event.step, value.histo)
                        elif value.HasField("image"):
                            self._ProcessImage(value.tag, event.wall_time, event.step, value.image)
        return self
Example #2
0
    def _Purge(self, event):
        """Purge all events that have occurred after the given event.step.

    Purge all events that occurred after the given event.step, but only for
    the tags that the event has. Non-sequential event.steps suggest that a
    Tensorflow restart occured, and we discard the out-of-order events to
    display a consistent view in TensorBoard.

    Previously, the purge logic discarded all events after event.step (not just
    within the affected tags), but this caused problems where race conditions in
    supervisor caused many events to be unintentionally discarded.

    Args:
      event: The event to use as reference for the purge. All events with
        the same tags, but with a greater event.step will be purged.
    """

        def _GetExpiredList(value):
            ## Keep data in reservoirs that has a step less than event.step
            _NotExpired = lambda x: x.step < event.step
            return [
                x.FilterItems(_NotExpired, value.tag)
                for x in (self._scalars, self._histograms, self._compressed_histograms, self._images)
            ]

        expired_per_tag = [_GetExpiredList(value) for value in event.summary.value]
        expired_per_type = [sum(x) for x in zip(*expired_per_tag)]

        if sum(expired_per_type) > 0:
            purge_msg = _GetPurgeMessage(
                self.most_recent_step, self.most_recent_wall_time, event.step, event.wall_time, *expired_per_type
            )
            logging.warn(purge_msg)
Example #3
0
    def Reload(self):
        """Loads all events added since the last call to `Reload`.

    If `Reload` was never called, loads all events in the file.
    Calling `Reload` activates the `EventAccumulator`.

    Returns:
      The `EventAccumulator`.
    """
        self._activated = True
        with self._generator_mutex:
            for event in self._generator.Load():
                if event.HasField('graph_def'):
                    if self._graph is not None:
                        logging.warn(
                            ('Found more than one graph event per run.'
                             'Overwritting the graph with the newest event'))
                    self._graph = event.graph_def
                elif event.HasField('summary'):
                    for value in event.summary.value:
                        if value.HasField('simple_value'):
                            self._ProcessScalar(value.tag, event.wall_time,
                                                event.step, value.simple_value)
                        elif value.HasField('histo'):
                            self._ProcessHistogram(value.tag, event.wall_time,
                                                   event.step, value.histo)
                            self._ProcessCompressedHistogram(
                                value.tag, event.wall_time, event.step,
                                value.histo)
                        elif value.HasField('image'):
                            self._ProcessImage(value.tag, event.wall_time,
                                               event.step, value.image)
        return self
Example #4
0
  def Reload(self):
    """Loads all events added since the last call to `Reload`.

    If `Reload` was never called, loads all events in the file.
    Calling `Reload` activates the `EventAccumulator`.

    Returns:
      The `EventAccumulator`.
    """
    self._activated = True
    with self._generator_mutex:
      for event in self._generator.Load():
        ## Check if the event happened after a crash
        ## file_version events always have step 0, ignore.
        ## TODO(danmane): Have this check for restart events explicitly
        if (event.step < self.most_recent_step and
            event.HasField('summary')):

          ## Keep data in reservoirs that has a step less than event.step
          _NotExpired = lambda x: x.step < event.step
          num_expired_scalars = self._scalars.FilterItems(_NotExpired)
          num_expired_histograms = self._histograms.FilterItems(_NotExpired)
          num_expired_compressed_histograms = self._compressed_histograms.FilterItems(
              _NotExpired)
          num_expired_images = self._images.FilterItems(_NotExpired)

          purge_msg = (
              'Detected out of order event.step likely caused by a Tensorflow '
              'restart. Purging expired events from Tensorboard display '
              'between the previous step: {} (timestamp: {}) and current step:'
              ' {} (timestamp: {}). Removing {} scalars, {} histograms, {} '
              'compressed histograms, and {} images.').format(
                  self.most_recent_step, self.most_recent_wall_time, event.step,
                  event.wall_time, num_expired_scalars, num_expired_histograms,
                  num_expired_compressed_histograms, num_expired_images)
          logging.warn(purge_msg)
        else:
          self.most_recent_step = event.step
          self.most_recent_wall_time = event.wall_time
        ## Process the event
        if event.HasField('graph_def'):
          if self._graph is not None:
            logging.warn(('Found more than one graph event per run.'
                          'Overwritting the graph with the newest event'))
          self._graph = event.graph_def
        elif event.HasField('summary'):
          for value in event.summary.value:
            if value.HasField('simple_value'):
              self._ProcessScalar(value.tag, event.wall_time, event.step,
                                  value.simple_value)
            elif value.HasField('histo'):
              self._ProcessHistogram(value.tag, event.wall_time, event.step,
                                     value.histo)
              self._ProcessCompressedHistogram(value.tag, event.wall_time,
                                               event.step, value.histo)
            elif value.HasField('image'):
              self._ProcessImage(value.tag, event.wall_time, event.step,
                                 value.image)
    return self
Example #5
0
    def Reload(self):
        """Loads all events added since the last call to `Reload`.

    If `Reload` was never called, loads all events in the file.
    Calling `Reload` activates the `EventAccumulator`.

    Returns:
      The `EventAccumulator`.
    """
        self._activated = True
        with self._generator_mutex:
            for event in self._generator.Load():
                if event.HasField('file_version'):
                    new_file_version = _ParseFileVersion(event.file_version)
                    if self.file_version and self.file_version != new_file_version:
                        ## This should not happen.
                        logging.warn((
                            'Found new file_version for event.proto. This will '
                            'affect purging logic for TensorFlow restarts. '
                            'Old: {0} New: {1}').format(
                                self.file_version, new_file_version))
                    self.file_version = new_file_version

                ## Check if the event happened after a crash, and purge expired tags.
                if self.file_version and self.file_version >= 2:
                    ## If the file_version is recent enough, use the SessionLog enum
                    ## to check for restarts.
                    self._CheckForRestartAndMaybePurge(event)
                else:
                    ## If there is no file version, default to old logic of checking for
                    ## out of order steps.
                    self._CheckForOutOfOrderStepAndMaybePurge(event)

                ## Process the event
                if event.HasField('graph_def'):
                    if self._graph is not None:
                        logging.warn(
                            ('Found more than one graph event per run.'
                             'Overwritting the graph with the newest event.'))
                    self._graph = event.graph_def
                elif event.HasField('summary'):
                    for value in event.summary.value:
                        if value.HasField('simple_value'):
                            self._ProcessScalar(value.tag, event.wall_time,
                                                event.step, value.simple_value)
                        elif value.HasField('histo'):
                            self._ProcessHistogram(value.tag, event.wall_time,
                                                   event.step, value.histo)
                            self._ProcessCompressedHistogram(
                                value.tag, event.wall_time, event.step,
                                value.histo)
                        elif value.HasField('image'):
                            self._ProcessImage(value.tag, event.wall_time,
                                               event.step, value.image)
        return self
Example #6
0
    def __init__(self, num_units, forget_bias=1.0, input_size=None):
        """Initialize the basic LSTM cell.

    Args:
      num_units: int, The number of units in the LSTM cell.
      forget_bias: float, The bias added to forget gates (see above).
      input_size: Deprecated and unused.
    """
        if input_size is not None:
            logging.warn("%s: The input_size parameter is deprecated." % self)
        self._num_units = num_units
        self._forget_bias = forget_bias
Example #7
0
  def __init__(self, num_units, forget_bias=1.0, input_size=None):
    """Initialize the basic LSTM cell.

    Args:
      num_units: int, The number of units in the LSTM cell.
      forget_bias: float, The bias added to forget gates (see above).
      input_size: Deprecated and unused.
    """
    if input_size is not None:
      logging.warn("%s: The input_size parameter is deprecated." % self)
    self._num_units = num_units
    self._forget_bias = forget_bias
Example #8
0
  def Reload(self):
    """Loads all events added since the last call to `Reload`.

    If `Reload` was never called, loads all events in the file.
    Calling `Reload` activates the `EventAccumulator`.

    Returns:
      The `EventAccumulator`.
    """
    self._activated = True
    with self._generator_mutex:
      for event in self._generator.Load():
        if event.HasField('file_version'):
          new_file_version = _ParseFileVersion(event.file_version)
          if self.file_version and self.file_version != new_file_version:
            ## This should not happen.
            logging.warn(('Found new file_version for event.proto. This will '
                          'affect purging logic for TensorFlow restarts. '
                          'Old: {0} New: {1}').format(self.file_version,
                                                      new_file_version))
          self.file_version = new_file_version

        ## Check if the event happened after a crash, and purge expired tags.
        if self.file_version and self.file_version >= 2:
          ## If the file_version is recent enough, use the SessionLog enum
          ## to check for restarts.
          self._CheckForRestartAndMaybePurge(event)
        else:
          ## If there is no file version, default to old logic of checking for
          ## out of order steps.
          self._CheckForOutOfOrderStepAndMaybePurge(event)

        ## Process the event
        if event.HasField('graph_def'):
          if self._graph is not None:
            logging.warn(('Found more than one graph event per run.'
                          'Overwritting the graph with the newest event.'))
          self._graph = event.graph_def
        elif event.HasField('summary'):
          for value in event.summary.value:
            if value.HasField('simple_value'):
              self._ProcessScalar(value.tag, event.wall_time, event.step,
                                  value.simple_value)
            elif value.HasField('histo'):
              self._ProcessHistogram(value.tag, event.wall_time, event.step,
                                     value.histo)
              self._ProcessCompressedHistogram(value.tag, event.wall_time,
                                               event.step, value.histo)
            elif value.HasField('image'):
              self._ProcessImage(value.tag, event.wall_time, event.step,
                                 value.image)
    return self
Example #9
0
    def _Purge(self, event, by_tags):
        """Purge all events that have occurred after the given event.step.

    If by_tags is True, purge all events that occurred after the given
    event.step, but only for the tags that the event has. Non-sequential
    event.steps suggest that a Tensorflow restart occured, and we discard
    the out-of-order events to display a consistent view in TensorBoard.

    Discarding by tags is the safer method, when we are unsure whether a restart
    has occured, given that threading in supervisor can cause events of
    different tags to arrive with unsynchronized step values.

    If by_tags is False, then purge all events with event.step greater than the
    given event.step. This can be used when we are certain that a TensorFlow
    restart has occurred and these events can be discarded.

    Args:
      event: The event to use as reference for the purge. All events with
        the same tags, but with a greater event.step will be purged.
      by_tags: Bool to dictate whether to discard all out-of-order events or
        only those that are associated with the given reference event.
    """
        ## Keep data in reservoirs that has a step less than event.step
        _NotExpired = lambda x: x.step < event.step

        if by_tags:

            def _ExpiredPerTag(value):
                return [
                    getattr(self, x).FilterItems(_NotExpired, value.tag)
                    for x in SUMMARY_TYPES
                ]

            expired_per_tags = [
                _ExpiredPerTag(value) for value in event.summary.value
            ]
            expired_per_type = [sum(x) for x in zip(*expired_per_tags)]
        else:
            expired_per_type = [
                getattr(self, x).FilterItems(_NotExpired)
                for x in SUMMARY_TYPES
            ]

        if sum(expired_per_type) > 0:
            purge_msg = _GetPurgeMessage(self.most_recent_step,
                                         self.most_recent_wall_time,
                                         event.step, event.wall_time,
                                         *expired_per_type)
            logging.warn(purge_msg)
Example #10
0
    def __init__(self,
                 num_units,
                 input_size=None,
                 use_peepholes=False,
                 cell_clip=None,
                 initializer=None,
                 num_proj=None,
                 num_unit_shards=1,
                 num_proj_shards=1,
                 forget_bias=1.0):
        """Initialize the parameters for an LSTM cell.

    Args:
      num_units: int, The number of units in the LSTM cell
      input_size: Deprecated and unused.
      use_peepholes: bool, set True to enable diagonal/peephole connections.
      cell_clip: (optional) A float value, if provided the cell state is clipped
        by this value prior to the cell output activation.
      initializer: (optional) The initializer to use for the weight and
        projection matrices.
      num_proj: (optional) int, The output dimensionality for the projection
        matrices.  If None, no projection is performed.
      num_unit_shards: How to split the weight matrix.  If >1, the weight
        matrix is stored across num_unit_shards.
      num_proj_shards: How to split the projection matrix.  If >1, the
        projection matrix is stored across num_proj_shards.
      forget_bias: Biases of the forget gate are initialized by default to 1
        in order to reduce the scale of forgetting at the beginning of
        the training.
    """
        if input_size is not None:
            logging.warn("%s: The input_size parameter is deprecated." % self)
        self._num_units = num_units
        self._use_peepholes = use_peepholes
        self._cell_clip = cell_clip
        self._initializer = initializer
        self._num_proj = num_proj
        self._num_unit_shards = num_unit_shards
        self._num_proj_shards = num_proj_shards
        self._forget_bias = forget_bias

        if num_proj:
            self._state_size = num_units + num_proj
            self._output_size = num_proj
        else:
            self._state_size = 2 * num_units
            self._output_size = num_units
Example #11
0
  def __init__(self, cell, num_proj, input_size=None):
    """Create a cell with input projection.

    Args:
      cell: an RNNCell, a projection of inputs is added before it.
      num_proj: Python integer.  The dimension to project to.
      input_size: Deprecated and unused.

    Raises:
      TypeError: if cell is not an RNNCell.
    """
    if input_size is not None:
      logging.warn("%s: The input_size parameter is deprecated." % self)
    if not isinstance(cell, RNNCell):
      raise TypeError("The parameter cell is not RNNCell.")
    self._cell = cell
    self._num_proj = num_proj
Example #12
0
def _read_image(imagefn):
    '''
  This function reads in an image as a raw file and then converts
  it to a PIL image. Note that, critically, PIL must be imported before
  tensorflow for black magic reasons.

  Args:
    imagefn: A fully-qualified path to an image as a string.

  Returns:
    The PIL image requested.
  '''
    try:
        pil_image = Image.open(imagefn)
    except Exception, e:
        warn('Problem opening %s with PIL, error: %s' % (imagefn, e.message))
        return None
Example #13
0
    def __init__(self, cell, num_proj, input_size=None):
        """Create a cell with input projection.

    Args:
      cell: an RNNCell, a projection of inputs is added before it.
      num_proj: Python integer.  The dimension to project to.
      input_size: Deprecated and unused.

    Raises:
      TypeError: if cell is not an RNNCell.
    """
        if input_size is not None:
            logging.warn("%s: The input_size parameter is deprecated." % self)
        if not isinstance(cell, RNNCell):
            raise TypeError("The parameter cell is not RNNCell.")
        self._cell = cell
        self._num_proj = num_proj
Example #14
0
def _ParseFileVersion(file_version):
  """Convert the string file_version in event.proto into a float.

  Args:
    file_version: String file_version from event.proto

  Returns:
    Version number as a float.
  """
  tokens = file_version.split('brain.Event:')
  try:
    return float(tokens[-1])
  except ValueError:
    ## This should never happen according to the definition of file_version
    ## specified in event.proto.
    logging.warn(('Invalid event.proto file_version. Defaulting to use of '
                  'out-of-order event.step logic for purging expired events.'))
    return -1
Example #15
0
def _ParseFileVersion(file_version):
  """Convert the string file_version in event.proto into a float.

  Args:
    file_version: String file_version from event.proto

  Returns:
    Version number as a float.
  """
  tokens = file_version.split('brain.Event:')
  try:
    return float(tokens[-1])
  except ValueError:
    ## This should never happen according to the definition of file_version
    ## specified in event.proto.
    logging.warn(('Invalid event.proto file_version. Defaulting to use of '
                  'out-of-order event.step logic for purging expired events.'))
    return -1
  def Reload(self):
    """Loads all events added since the last call to `Reload`.

    If `Reload` was never called, loads all events in the file.
    Calling `Reload` activates the `EventAccumulator`.

    Returns:
      The `EventAccumulator`.
    """
    self._activated = True
    with self._generator_mutex:
      for event in self._generator.Load():
        if event.HasField('file_version'):
          new_file_version = _ParseFileVersion(event.file_version)
          if self.file_version and self.file_version != new_file_version:
            ## This should not happen.
            logging.warn(('Found new file_version for event.proto. This will '
                          'affect purging logic for TensorFlow restarts. '
                          'Old: {0} New: {1}').format(self.file_version,
                                                      new_file_version))
          self.file_version = new_file_version

        self._MaybePurgeOrphanedData(event)

        ## Process the event
        if event.HasField('graph_def'):
          if self._graph is not None:
            logging.warn(('Found more than one graph event per run.'
                          'Overwritting the graph with the newest event.'))
          self._graph = event.graph_def
        elif event.HasField('summary'):
          for value in event.summary.value:
            if value.HasField('simple_value'):
              self._ProcessScalar(value.tag, event.wall_time, event.step,
                                  value.simple_value)
            elif value.HasField('histo'):
              self._ProcessHistogram(value.tag, event.wall_time, event.step,
                                     value.histo)
              self._ProcessCompressedHistogram(value.tag, event.wall_time,
                                               event.step, value.histo)
            elif value.HasField('image'):
              self._ProcessImage(value.tag, event.wall_time, event.step,
                                 value.image)
    return self
Example #17
0
  def _Purge(self, event, by_tags):
    """Purge all events that have occurred after the given event.step.

    If by_tags is True, purge all events that occurred after the given
    event.step, but only for the tags that the event has. Non-sequential
    event.steps suggest that a Tensorflow restart occured, and we discard
    the out-of-order events to display a consistent view in TensorBoard.

    Discarding by tags is the safer method, when we are unsure whether a restart
    has occured, given that threading in supervisor can cause events of
    different tags to arrive with unsynchronized step values.

    If by_tags is False, then purge all events with event.step greater than the
    given event.step. This can be used when we are certain that a TensorFlow
    restart has occurred and these events can be discarded.

    Args:
      event: The event to use as reference for the purge. All events with
        the same tags, but with a greater event.step will be purged.
      by_tags: Bool to dictate whether to discard all out-of-order events or
        only those that are associated with the given reference event.
    """
    ## Keep data in reservoirs that has a step less than event.step
    _NotExpired = lambda x: x.step < event.step

    if by_tags:

      def _ExpiredPerTag(value):
        return [getattr(self, x).FilterItems(_NotExpired, value.tag)
                for x in SUMMARY_TYPES]

      expired_per_tags = [_ExpiredPerTag(value)
                          for value in event.summary.value]
      expired_per_type = [sum(x) for x in zip(*expired_per_tags)]
    else:
      expired_per_type = [getattr(self, x).FilterItems(_NotExpired)
                          for x in SUMMARY_TYPES]

    if sum(expired_per_type) > 0:
      purge_msg = _GetPurgeMessage(self.most_recent_step,
                                   self.most_recent_wall_time, event.step,
                                   event.wall_time, *expired_per_type)
      logging.warn(purge_msg)
Example #18
0
    def Reload(self):
        """Loads all events added since the last call to `Reload`.

    If `Reload` was never called, loads all events in the file.
    Calling `Reload` activates the `EventAccumulator`.

    Returns:
      The `EventAccumulator`.
    """
        self._activated = True
        with self._generator_mutex:
            for event in self._generator.Load():
                ## Check if the event happened after a crash
                ## file_version events always have step 0, ignore.
                ## TODO(danmane): Have this check for restart events explicitly
                if (event.step < self.most_recent_step
                        and event.HasField('summary')):
                    self._Purge(event)
                else:
                    self.most_recent_step = event.step
                    self.most_recent_wall_time = event.wall_time
                ## Process the event
                if event.HasField('graph_def'):
                    if self._graph is not None:
                        logging.warn(
                            ('Found more than one graph event per run.'
                             'Overwritting the graph with the newest event'))
                    self._graph = event.graph_def
                elif event.HasField('summary'):
                    for value in event.summary.value:
                        if value.HasField('simple_value'):
                            self._ProcessScalar(value.tag, event.wall_time,
                                                event.step, value.simple_value)
                        elif value.HasField('histo'):
                            self._ProcessHistogram(value.tag, event.wall_time,
                                                   event.step, value.histo)
                            self._ProcessCompressedHistogram(
                                value.tag, event.wall_time, event.step,
                                value.histo)
                        elif value.HasField('image'):
                            self._ProcessImage(value.tag, event.wall_time,
                                               event.step, value.image)
        return self
Example #19
0
  def __init__(self, num_units, input_size=None,
               use_peepholes=False, cell_clip=None,
               initializer=None, num_proj=None,
               num_unit_shards=1, num_proj_shards=1, forget_bias=1.0):
    """Initialize the parameters for an LSTM cell.

    Args:
      num_units: int, The number of units in the LSTM cell
      input_size: Deprecated and unused.
      use_peepholes: bool, set True to enable diagonal/peephole connections.
      cell_clip: (optional) A float value, if provided the cell state is clipped
        by this value prior to the cell output activation.
      initializer: (optional) The initializer to use for the weight and
        projection matrices.
      num_proj: (optional) int, The output dimensionality for the projection
        matrices.  If None, no projection is performed.
      num_unit_shards: How to split the weight matrix.  If >1, the weight
        matrix is stored across num_unit_shards.
      num_proj_shards: How to split the projection matrix.  If >1, the
        projection matrix is stored across num_proj_shards.
      forget_bias: Biases of the forget gate are initialized by default to 1
        in order to reduce the scale of forgetting at the beginning of
        the training.
    """
    if input_size is not None:
      logging.warn("%s: The input_size parameter is deprecated." % self)
    self._num_units = num_units
    self._use_peepholes = use_peepholes
    self._cell_clip = cell_clip
    self._initializer = initializer
    self._num_proj = num_proj
    self._num_unit_shards = num_unit_shards
    self._num_proj_shards = num_proj_shards
    self._forget_bias = forget_bias

    if num_proj:
      self._state_size = num_units + num_proj
      self._output_size = num_proj
    else:
      self._state_size = 2 * num_units
      self._output_size = num_units
Example #20
0
  def Reload(self):
    """Loads all events added since the last call to `Reload`.

    If `Reload` was never called, loads all events in the file.
    Calling `Reload` activates the `EventAccumulator`.

    Returns:
      The `EventAccumulator`.
    """
    self._activated = True
    with self._generator_mutex:
      for event in self._generator.Load():
        ## Check if the event happened after a crash
        ## file_version events always have step 0, ignore.
        ## TODO(danmane): Have this check for restart events explicitly
        if (event.step < self.most_recent_step and
            event.HasField('summary')):
          self._Purge(event)
        else:
          self.most_recent_step = event.step
          self.most_recent_wall_time = event.wall_time
        ## Process the event
        if event.HasField('graph_def'):
          if self._graph is not None:
            logging.warn(('Found more than one graph event per run.'
                          'Overwritting the graph with the newest event'))
          self._graph = event.graph_def
        elif event.HasField('summary'):
          for value in event.summary.value:
            if value.HasField('simple_value'):
              self._ProcessScalar(value.tag, event.wall_time, event.step,
                                  value.simple_value)
            elif value.HasField('histo'):
              self._ProcessHistogram(value.tag, event.wall_time, event.step,
                                     value.histo)
              self._ProcessCompressedHistogram(value.tag, event.wall_time,
                                               event.step, value.histo)
            elif value.HasField('image'):
              self._ProcessImage(value.tag, event.wall_time, event.step,
                                 value.image)
    return self
Example #21
0
    def _Purge(self, event):
        """Purge all events that have occurred after the given event.step.

    Purge all events that occurred after the given event.step, but only for
    the tags that the event has. Non-sequential event.steps suggest that a
    Tensorflow restart occured, and we discard the out-of-order events to
    display a consistent view in TensorBoard.

    Previously, the purge logic discarded all events after event.step (not just
    within the affected tags), but this caused problems where race conditions in
    supervisor caused many events to be unintentionally discarded.

    Args:
      event: The event to use as reference for the purge. All events with
        the same tags, but with a greater event.step will be purged.
    """
        def _GetExpiredList(value):
            ## Keep data in reservoirs that has a step less than event.step
            _NotExpired = lambda x: x.step < event.step
            return [
                x.FilterItems(_NotExpired, value.tag)
                for x in (self._scalars, self._histograms,
                          self._compressed_histograms, self._images)
            ]

        expired_per_tag = [
            _GetExpiredList(value) for value in event.summary.value
        ]
        expired_per_type = [sum(x) for x in zip(*expired_per_tag)]

        if sum(expired_per_type) > 0:
            purge_msg = _GetPurgeMessage(self.most_recent_step,
                                         self.most_recent_wall_time,
                                         event.step, event.wall_time,
                                         *expired_per_type)
            logging.warn(purge_msg)
Example #22
0
def create_partitioned_variables(shape,
                                 slicing,
                                 initializer,
                                 dtype=dtypes.float32,
                                 trainable=True,
                                 collections=None,
                                 name=None,
                                 reuse=None):
    """Create a list of partitioned variables according to the given `slicing`.

  Currently only one dimension of the full variable can be sliced, and the
  full variable can be reconstructed by the concatenation of the returned
  list along that dimension.

  Args:
    shape: List of integers.  The shape of the full variable.
    slicing: List of integers.  How to partition the variable.
      Must be of the same length as `shape`.  Each value
      indicate how many slices to create in the corresponding
      dimension.  Presently only one of the values can be more than 1;
      that is, the variable can only be sliced along one dimension.

      For convenience, The requested number of partitions does not have to
      divide the corresponding dimension evenly.  If it does not, the
      shapes of the partitions are incremented by 1 starting from partition
      0 until all slack is absorbed.  The adjustment rules may change in the
      future, but as you can save/restore these variables with different
      slicing specifications this should not be a problem.
    initializer: A `Tensor` of shape `shape` or a variable initializer
      function.  If a function, it will be called once for each slice,
      passing the shape and data type of the slice as parameters.  The
      function must return a tensor with the same shape as the slice.
    dtype: Type of the variables. Ignored if `initializer` is a `Tensor`.
    trainable: If True also add all the variables to the graph collection
      `GraphKeys.TRAINABLE_VARIABLES`.
    collections: List of graph collections keys to add the variables to.
      Defaults to `[GraphKeys.VARIABLES]`.
    name: Optional name for the full variable.  Defaults to
      `"PartitionedVariable"` and gets uniquified automatically.
    reuse: Boolean or `None`; if `True` and name is set, it would reuse
      previously created variables. if `False` it will create new variables.
      if `None`, it would inherit the parent scope reuse.

  Returns:
    A list of Variables corresponding to the slicing.

  Raises:
    ValueError: If any of the arguments is malformed.
  """
    logging.warn("create_partitioned_variables is deprecated.  Use "
                 "tf.get_variable with a partitioner set, or "
                 "tf.get_partitioned_variable_list, instead.")

    if len(shape) != len(slicing):
        raise ValueError("The 'shape' and 'slicing' of a partitioned Variable "
                         "must have the length: shape: %s, slicing: %s" %
                         (shape, slicing))
    if len(shape) < 1:
        raise ValueError("A partitioned Variable must have rank at least 1: "
                         "shape: %s" % shape)

    # Legacy: we are provided the slicing directly, so just pass it to
    # the partitioner.
    partitioner = lambda **unused_kwargs: slicing

    with variable_scope.variable_op_scope([],
                                          name,
                                          "PartitionedVariable",
                                          reuse=reuse) as scope:

        # pylint: disable=protected-access
        vs, _ = variable_scope._get_partitioned_variable_list(
            name="part",
            shape=shape,
            dtype=dtype,
            initializer=initializer,
            trainable=trainable,
            partitioner=partitioner,
            collections=collections)

        for var in vs:
            var._save_slice_info.full_name = scope.name
        # pylint: enable=protected-access

    return vs
Example #23
0
def create_partitioned_variables(
    shape, slicing, initializer, dtype=dtypes.float32,
    trainable=True, collections=None, name=None, reuse=None):
  """Create a list of partitioned variables according to the given `slicing`.

  Currently only one dimension of the full variable can be sliced, and the
  full variable can be reconstructed by the concatenation of the returned
  list along that dimension.

  Args:
    shape: List of integers.  The shape of the full variable.
    slicing: List of integers.  How to partition the variable.
      Must be of the same length as `shape`.  Each value
      indicate how many slices to create in the corresponding
      dimension.  Presently only one of the values can be more than 1;
      that is, the variable can only be sliced along one dimension.

      For convenience, The requested number of partitions does not have to
      divide the corresponding dimension evenly.  If it does not, the
      shapes of the partitions are incremented by 1 starting from partition
      0 until all slack is absorbed.  The adjustment rules may change in the
      future, but as you can save/restore these variables with different
      slicing specifications this should not be a problem.
    initializer: A `Tensor` of shape `shape` or a variable initializer
      function.  If a function, it will be called once for each slice,
      passing the shape and data type of the slice as parameters.  The
      function must return a tensor with the same shape as the slice.
    dtype: Type of the variables. Ignored if `initializer` is a `Tensor`.
    trainable: If True also add all the variables to the graph collection
      `GraphKeys.TRAINABLE_VARIABLES`.
    collections: List of graph collections keys to add the variables to.
      Defaults to `[GraphKeys.VARIABLES]`.
    name: Optional name for the full variable.  Defaults to
      `"PartitionedVariable"` and gets uniquified automatically.
    reuse: Boolean or `None`; if `True` and name is set, it would reuse
      previously created variables. if `False` it will create new variables.
      if `None`, it would inherit the parent scope reuse.

  Returns:
    A list of Variables corresponding to the slicing.

  Raises:
    ValueError: If any of the arguments is malformed.
  """
  logging.warn(
      "create_partitioned_variables is deprecated.  Use "
      "tf.get_variable with a partitioner set, or "
      "tf.get_partitioned_variable_list, instead.")

  if len(shape) != len(slicing):
    raise ValueError("The 'shape' and 'slicing' of a partitioned Variable "
                     "must have the length: shape: %s, slicing: %s" %
                     (shape, slicing))
  if len(shape) < 1:
    raise ValueError("A partitioned Variable must have rank at least 1: "
                     "shape: %s" % shape)

  # Legacy: we are provided the slicing directly, so just pass it to
  # the partitioner.
  partitioner = lambda **unused_kwargs: slicing

  with variable_scope.variable_op_scope(
      [], name, "PartitionedVariable", reuse=reuse) as scope:

    # pylint: disable=protected-access
    vs, _ = variable_scope._get_partitioned_variable_list(
        name="part",
        shape=shape,
        dtype=dtype,
        initializer=initializer,
        trainable=trainable,
        partitioner=partitioner,
        collections=collections)

    for var in vs:
      var._save_slice_info.full_name = scope.name
    # pylint: enable=protected-access

  return vs
Example #24
0
  Args:
    imagefn: A fully-qualified path to an image as a string.

  Returns:
    The PIL image requested.
  '''
    try:
        pil_image = Image.open(imagefn)
    except Exception, e:
        warn('Problem opening %s with PIL, error: %s' % (imagefn, e.message))
        return None
    try:
        # ensure that the image file is closed.
        pil_image.load()
    except Exception, e:
        warn('Problem loading %s with PIL, error: %s' % (imagefn, e.message))
        return None
    return pil_image


def _resize_to_min(img, w=None, h=None):
    '''
  Resizes an image so that its size in both dimensions is greater than or
  equal to the provided arguments. If either argument is None, that dimension
  is ignored. If the image is larger in both dimensions, then the image is
  shrunk. In either case, the aspect ratio is preserved and image size is
  minimized. If the target of interest is in the center of the frame, but the
  image has an unusual aspect ratio, center cropping is likely the best option.
  If the image has an unusual aspect ratio but is irregularly framed, padding
  the image will prevent distortion while also including the entirety of the
  original image.
Example #25
0
 def __init__(self, num_units, input_size=None):
   if input_size is not None:
     logging.warn("%s: The input_size parameter is deprecated." % self)
   self._num_units = num_units
Example #26
0
 def __init__(self, num_units, input_size=None):
     if input_size is not None:
         logging.warn("%s: The input_size parameter is deprecated." % self)
     self._num_units = num_units