Ejemplo n.º 1
0
 def write(self, values: base.LoggingData):
     with self.summary.as_default():
         for key, value in values.items():
             tf.summary.scalar(f'{self.label}/{_format_key(key)}',
                               value,
                               step=self._iter)
     self._iter += 1
Ejemplo n.º 2
0
    def write(self, data: base.LoggingData):
        """Writes a `data` into a row of comma-separated values."""
        # Only log if `time_delta` seconds have passed since last logging event.
        now = time.time()

        # TODO(b/192227744): Remove this in favour of filters.TimeFilter.
        elapsed = now - self._last_log_time
        if elapsed < self._time_delta:
            logging.debug(
                'Not due to log for another %.2f seconds, dropping data.',
                self._time_delta - elapsed)
            return
        self._last_log_time = now

        # Append row to CSV.
        data = base.to_numpy(data)
        # Use fields from initial `data` to create the header. If extra fields are
        # present in subsequent `data`, we ignore them.
        if not self._writer:
            fields = sorted(data.keys())
            self._writer = csv.DictWriter(self._file,
                                          fieldnames=fields,
                                          extrasaction='ignore')
            # Write header only if the file is empty.
            if not self._file.tell():
                self._writer.writeheader()
        self._writer.writerow(data)

        # Flush every `flush_every` writes.
        if self._writes % self._flush_every == 0:
            self.flush()
        self._writes += 1
Ejemplo n.º 3
0
 def write(self, data: base.LoggingData):
   for k, v in data.items():
     image = Image.fromarray(v, mode=self._mode)
     path = self._path / f'{k}_{self._indices[k]:06}.png'
     self._indices[k] += 1
     with path.open(mode='wb') as f:
       logging.info('Writing image to %s.', str(path))
       image.save(f)
Ejemplo n.º 4
0
    def write(self, data: base.LoggingData, step: Optional[int] = None):
        """Writes a set of scalar values in the log at a specific time step
        
        Args:
            data: a dictionary with name of quantity: quantity pairs
            step: optionally the number of the step to register the data, if None, the internal is used
        """
        if step is not None:
            iteration = step
        else:
            iteration = self._iter
            self._iter += 1

        for key, value in data.items():
            self._writer.add_scalar(key, value, iteration)
Ejemplo n.º 5
0
  def write(self, values: base.LoggingData):
    if self._steps_key is not None and self._steps_key not in values:
      logging.warning('steps key %s not found. Skip logging.', self._steps_key)
      return

    step = values[
        self._steps_key] if self._steps_key is not None else self._iter

    with self.summary.as_default():
      # TODO(b/159065169): Remove this suppression once the bug is resolved.
      # pytype: disable=unsupported-operands
      for key in values.keys() - [self._steps_key]:
        # pytype: enable=unsupported-operands
        tf.summary.scalar(
            f'{self.label}/{_format_key(key)}', data=values[key], step=step)
    self._iter += 1
Ejemplo n.º 6
0
    def write(self, data: base.LoggingData):
        """Writes a `data` into a row of comma-separated values."""

        # Only log if `time_delta` seconds have passed since last logging event.
        now = time.time()
        if now - self._last_log_time < self._time_delta:
            return
        self._last_log_time = now

        # Append row to CSV.
        data = base.to_numpy(data)
        if not self._writer:
            keys = sorted(data.keys())
            self._writer = csv.DictWriter(self._file, fieldnames=keys)
            self._writer.writeheader()
        self._writer.writerow(data)
Ejemplo n.º 7
0
    def write(self, data: base.LoggingData):
        """Writes a `data` into a row of comma-separated values."""

        # Only log if `time_delta` seconds have passed since last logging event.
        now = time.time()
        if now - self._time < self._time_delta:
            return
        self._time = now

        # Append row to CSV.
        with self._open(self._file_path, mode='a') as f:
            keys = sorted(data.keys())
            writer = csv.DictWriter(f, fieldnames=keys)
            if not self._header_exists:
                # Only write the column headers once.
                writer.writeheader()
                self._header_exists = True
            writer.writerow(data)
Ejemplo n.º 8
0
    def write(self, values: base.LoggingData) -> None:
        # If this is in init, launchpad fails,
        # Error: tensorflow.python.framework.errors_impl.InvalidArgumentError:
        #   Cannot convert a Tensor of dtype resource to a NumPy array.
        # Line: CloudPickler(file, protocol=protocol, buffer_callback
        #   =buffer_callback).dump(obj)
        if self._summary is None:
            self._summary = tf.summary.create_file_writer(self._logdir)

        with self._summary.as_default():
            for key, value in values.items():
                if hasattr(value, "shape") and len(value.shape) > 0:
                    self.histogram_summary(key, value)
                elif hasattr(value, "shape") or not isinstance(value, dict):
                    self.scalar_summary(key, value)
                else:
                    self.dict_summary(key, value)
            self._iter += 1
Ejemplo n.º 9
0
Archivo: csv.py Proyecto: Idate96/acme
    def write(self, data: base.LoggingData):
        """Writes a `data` into a row of comma-separated values."""
        # Only log if `time_delta` seconds have passed since last logging event.
        now = time.time()
        if now - self._last_log_time < self._time_delta:
            return
        self._last_log_time = now

        # Append row to CSV.
        data = base.to_numpy(data)
        # Use fields from initial `data` to create the header. If extra fields are
        # present in subsequent `data`, we ignore them.
        if not self._writer:
            fields = sorted(data.keys())
            self._writer = csv.DictWriter(self._file,
                                          fieldnames=fields,
                                          extrasaction='ignore')
            self._writer.writeheader()
        self._writer.writerow(data)
Ejemplo n.º 10
0
def serialize(values: base.LoggingData) -> str:
    """Converts `values` to a pretty-printed string.

  This takes a dictionary `values` whose keys are strings and returns
  a formatted string such that each [key, value] pair is separated by ' = ' and
  each entry is separated by ' | '. The keys are sorted alphabetically to ensure
  a consistent order, and snake case is split into words.

  For example:

      values = {'a': 1, 'b' = 2.33333333, 'c': 'hello', 'big_value': 10}
      # Returns 'A = 1 | B = 2.333 | Big Value = 10 | C = hello'
      values_string = serialize(values)

  Args:
    values: A dictionary with string keys.

  Returns:
    A formatted string.
  """
    return ' | '.join(f'{_format_key(k)} = {_format_value(v)}'
                      for k, v in sorted(values.items()))
Ejemplo n.º 11
0
 def write(self, values: base.LoggingData):
     values = {k: v for k, v in values.items() if v is not None}
     self._to.write(values)
Ejemplo n.º 12
0
 def write(self, data: base.LoggingData):
     if self._keep:
         data = {k: data[k] for k in self._keep}
     if self._drop:
         data = {k: v for k, v in data.items() if k not in self._drop}
     self._to.write(data)
Ejemplo n.º 13
0
 def write(self, values: base.LoggingData):
   self._wandb.log({self.label + k: v for k, v in values.items()})
   self._iter += 1