def log_histogram(tag, values, step, bins=1000): """ log_histogram Logs the histogram of a list/vector of values. """ # Convert to a numpy array values = np.array(values) # Create histogram using numpy counts, bin_edges = np.histogram(values, bins=bins) # Fill fields of histogram proto hist = HistogramProto() hist.min = float(np.min(values)) hist.max = float(np.max(values)) hist.num = int(np.prod(values.shape)) hist.sum = float(np.sum(values)) hist.sum_squares = float(np.sum(values**2)) # Requires equal number as bins, where the first goes from -DBL_MAX to bin_edges[1] # See https://github.com/tensorflow/tensorflow/blob/master/tensorflow/core/framework/summary.proto#L30 # Thus, we drop the start of the first bin bin_edges = bin_edges[1:] # Add bin edges and counts for edge in bin_edges: hist.bucket_limit.append(edge) for c in counts: hist.bucket.append(c) return Summary(value=[Summary.Value(tag=tag, histo=hist)])
def plot(self, tag, mpl_plt, step=None, close_plot=True): """Saves matplotlib plot output to summary image. Args: tag: str: label for this data mpl_plt: matplotlib stateful pyplot object with prepared plotting state step: int: training step close_plot: bool: automatically closes plot """ if step is None: step = self._step else: self._step = step fig = mpl_plt.get_current_fig_manager() img_w, img_h = fig.canvas.get_width_height() image_buf = io.BytesIO() mpl_plt.savefig(image_buf, format='png') image_summary = Summary.Image( encoded_image_string=image_buf.getvalue(), colorspace=4, # RGBA height=img_h, width=img_w) summary = Summary(value=[Summary.Value(tag=tag, image=image_summary)]) self.add_summary(summary, step) if close_plot: mpl_plt.close()
def image(self, tag, image, step=None): """Saves RGB image summary from onp.ndarray [H,W], [H,W,1], or [H,W,3]. Args: tag: str: label for this data image: ndarray: [H,W], [H,W,1], [H,W,3] save image in greyscale or colors/ step: int: training step """ image = onp.array(image) if step is None: step = self._step else: self._step = step if len(onp.shape(image)) == 2: image = image[:, :, onp.newaxis] if onp.shape(image)[-1] == 1: image = onp.repeat(image, 3, axis=-1) image_strio = io.BytesIO() plt.imsave(image_strio, image, format='png') image_summary = Summary.Image( encoded_image_string=image_strio.getvalue(), colorspace=3, height=image.shape[0], width=image.shape[1]) summary = Summary(value=[Summary.Value(tag=tag, image=image_summary)]) self.add_summary(summary, step)
def _log_reward(self, reward): if self.show_profiling: self._log_performance() self.logger.info("Intermediate reward {:.4f}".format(reward)) stat_reward = Summary( value=[Summary.Value(tag="reward", simple_value=reward)]) self._tf_board_writer.add_summary(stat_reward)
def histogram(self, tag, values, bins, step=None): """Saves histogram of values. Args: tag: str: label for this data values: ndarray: will be flattened by this routine bins: number of bins in histogram, or array of bins for onp.histogram step: int: training step """ if step is None: step = self._step else: self._step = step values = onp.array(values) bins = onp.array(bins) values = onp.reshape(values, -1) counts, limits = onp.histogram(values, bins=bins) # boundary logic cum_counts = onp.cumsum(onp.greater(counts, 0, dtype=onp.int32)) start, end = onp.searchsorted(cum_counts, [0, cum_counts[-1] - 1], side='right') start, end = int(start), int(end) + 1 counts = (counts[start - 1:end] if start > 0 else onp.concatenate([[0], counts[:end]])) limits = limits[start:end + 1] sum_sq = values.dot(values) histo = HistogramProto(min=values.min(), max=values.max(), num=len(values), sum=values.sum(), sum_squares=sum_sq, bucket_limit=limits.tolist(), bucket=counts.tolist()) summary = Summary(value=[Summary.Value(tag=tag, histo=histo)]) self.add_summary(summary, step)
def update(self, progbar, names, values, val_names=None, val_values=None, display_step=10): logs_list = [] for name, value in zip(names, values): logs_list.append((name, value)) if (self.step + 1) % display_step == 0 and self.step != 0: summary = Summary() summary_value = summary.value.add() summary_value.simple_value = value summary_value.tag = name self.tb_callback.writer.add_summary( summary, ((self.step + 1) // display_step) - 1) self.tb_callback.writer.flush() if val_names is not None and val_values is not None: for name, value in zip(val_names, val_values): logs_list.append((name, value)) summary = Summary() summary_value = summary.value.add() summary_value.simple_value = value summary_value.tag = name self.tb_callback.writer.add_summary(summary, self.val_step) self.tb_callback.writer.flush() self.val_step += 1 progbar.add(1, values=logs_list) self.step += 1
def text(self, tag, textdata, step=None): """Saves a text summary. Args: tag: str: label for this data textdata: string, or 1D/2D list/numpy array of strings step: int: training step Note: markdown formatting is rendered by tensorboard. """ if step is None: step = self._step else: self._step = step smd = SummaryMetadata(plugin_data=SummaryMetadata.PluginData( plugin_name='text')) if isinstance(textdata, (str, bytes)): tensor = tf.make_tensor_proto( values=[textdata.encode(encoding='utf_8')], shape=(1, )) else: textdata = onp.array(textdata) # convert lists, jax arrays, etc. datashape = onp.shape(textdata) if len(datashape) == 1: tensor = tf.make_tensor_proto( values=[td.encode(encoding='utf_8') for td in textdata], shape=(datashape[0], )) elif len(datashape) == 2: tensor = tf.make_tensor_proto(values=[ td.encode(encoding='utf_8') for td in onp.reshape(textdata, -1) ], shape=(datashape[0], datashape[1])) summary = Summary( value=[Summary.Value(tag=tag, metadata=smd, tensor=tensor)]) self.add_summary(summary, step)
def log_colorimages(tag, images, tagsuffix=''): img = images s = StringIO() plt.imsave(s, img, format='png') img_sum = Summary.Image(encoded_image_string=s.getvalue(), height=img.shape[0], width=img.shape[1]) return Summary( value=[Summary.Value(tag='%s%s' % (tag, tagsuffix), image=img_sum)])
def to_summary(fig, tag): """ Convert a matplotlib figure ``fig`` into a TensorFlow Summary object that can be directly fed into ``Summary.FileWriter``. Example: >>> fig, ax = ... # (as above) >>> summary = to_summary(fig, tag='MyFigure/image') >>> type(summary) tensorflow.core.framework.summary_pb2.Summary >>> summary_writer.add_summary(summary, global_step=global_step) Args: fig: A ``matplotlib.figure.Figure`` object. tag (string): The tag name of the created summary. Returns: A TensorFlow ``Summary`` protobuf object containing the plot image as a image summary. """ if not isinstance(tag, six.string_types): raise TypeError("tag must be a string type") # attach a new agg canvas _old_canvas = fig.canvas try: canvas = FigureCanvasAgg(fig) canvas.draw() w, h = canvas.get_width_height() # get PNG data from the figure png_buffer = BytesIO() canvas.print_png(png_buffer) png_encoded = png_buffer.getvalue() png_buffer.close() summary_image = Summary.Image( height=h, width=w, colorspace=4, # RGB-A encoded_image_string=png_encoded) summary = Summary(value=[Summary.Value(tag=tag, image=summary_image)]) return summary finally: fig.canvas = _old_canvas
def scalar(self, tag, value, step=None): """Saves scalar value. Args: tag: str: label for this data value: int/float: number to log step: int: training step """ value = float(onp.array(value)) if step is None: step = self._step else: self._step = step summary = Summary(value=[Summary.Value(tag=tag, simple_value=value)]) self.add_summary(summary, step)
def log_plot(self, tag, figure, global_step): plot_buf = io.BytesIO() figure.savefig(plot_buf, format='png') plot_buf.seek(0) img = Image.open(plot_buf) img_ar = np.array(img) img_summary = Summary.Image(encoded_image_string=plot_buf.getvalue(), height=img_ar.shape[0], width=img_ar.shape[1]) summary = Summary() summary.value.add(tag=tag, image=img_summary) self.writer.add_summary(summary, global_step=global_step) self.writer.flush()
def log_image(file_writer, tensor, epoch_no, tag): height, width, channel = tensor.shape tensor = ((tensor + 1) * 255) tensor = tensor.astype('uint8') image = Image.fromarray(tensor) import io output = io.BytesIO() image.save(output, format='PNG') image_string = output.getvalue() output.close() tf_img = Summary.Image(height=height, width=width, colorspace=channel, encoded_image_string=image_string) summary = Summary(value=[Summary.Value(tag=tag, image=tf_img)]) file_writer.add_summary(summary, epoch_no) file_writer.flush()
def log_vector(tag, values): """ log_histogram Logs a vector of values. """ values = np.array(values).flatten() # Fill fields of histogram proto hist = HistogramProto() hist.min = 0 hist.max = len(values) - 1 hist.num = len(values) hist.sum = float(np.sum(np.arange(hist.num))) hist.sum_squares = float(np.sum(np.arange(hist.num)**2)) for idx, c in enumerate(values): hist.bucket_limit.append(idx) hist.bucket.append(c) return Summary(value=[Summary.Value(tag=tag, histo=hist)])
def audio(self, tag, audiodata, step=None, sample_rate=44100): """Saves audio. NB: single channel only right now. Args: tag: str: label for this data audiodata: ndarray [Nsamples,]: data between (-1.0,1.0) to save as wave step: int: training step sample_rate: sample rate of passed in audio buffer """ audiodata = onp.array(audiodata) if step is None: step = self.step else: self.step = step audiodata = onp.clip(onp.squeeze(audiodata), -1, 1) if audiodata.ndim != 1: raise ValueError('Audio data must be 1D.') sample_list = (32767.0 * audiodata).astype(int).tolist() wio = io.BytesIO() wav_buf = wave.open(wio, 'wb') wav_buf.setnchannels(1) wav_buf.setsampwidth(2) wav_buf.setframerate(sample_rate) enc = b''.join([struct.pack('<h', v) for v in sample_list]) wav_buf.writeframes(enc) wav_buf.close() encoded_audio_bytes = wio.getvalue() wio.close() audio = Summary.Audio( sample_rate=sample_rate, num_channels=1, length_frames=len(sample_list), encoded_audio_string=encoded_audio_bytes, content_type='audio/wav') summary = Summary(value=[Summary.Value(tag=tag, audio=audio)]) self.writer.add_summary(summary, step)
def _execute_summary_op(self, op, feed_dict={}): ''' Execute the summary op, and parse the result into Summary proto object. ''' with self.cached_session() as sess: cprint("\n >>> " + str(op), color='magenta') self.assertIsInstance(op, tf.Tensor) self.assertTrue(op.dtype, tf.string) ret = sess.run(op, feed_dict=feed_dict) # check ret is a byte self.assertIsInstance(ret, bytes) summary = Summary() summary.ParseFromString(ret) return summary
def log_histogram(self, tag, values, global_step, bins): # import Tensorflow as tf counts, bin_edges = np.histogram(values, bins=bins) hist = 0 # hist = tf.HistogramProto() hist.min = float(np.min(values)) hist.max = float(np.max(values)) hist.num = int(np.prod(values.shape)) hist.sum = float(np.sum(values)) hist.sum_squares = float(np.sum(values ** 2)) bin_edges = bin_edges[1:] for edge in bin_edges: hist.bucket_limit.append(edge) for c in counts: hist.bucket.append(c) summary = Summary() summary.value.add(tag=tag, histo=hist) self.writer.add_summary(summary, global_step=global_step) self.writer.flush()
def log_images(tag, images, tagsuffix=''): """ log_images Logs a list of images. """ def convert_to_uint8(img): return np.uint8(img * 255) if not type(images) == list: img = images s = StringIO() Image.fromarray(convert_to_uint8(img), mode='L').save(s, 'png') # Create an Image object img_res = Summary.Image(encoded_image_string=s.getvalue(), height=img.shape[0], width=img.shape[1], colorspace=1) return Summary(value=[ Summary.Value(tag='%s%s' % (tag, tagsuffix), image=img_res) ]) else: im_summaries = [] for nr, img in enumerate(images): # Write the image to a string s = StringIO() Image.fromarray(convert_to_uint8(img), mode='L').save(s, 'png') img_sum = Summary.Image( encoded_image_string=s.getvalue(), height=img.shape[0], width=img.shape[1], colorspace=1 ) #https://github.com/tensorflow/tensorflow/blob/r1.3/tensorflow/core/framework/summary.proto # Create a Summary value im_summaries.append( Summary.Value(tag='%s/%d%s' % (tag, nr, tagsuffix), image=img_sum)) return Summary(value=im_summaries)
def log_scalar(self, tag, value, global_step): summary = Summary() summary.value.add(tag=tag, simple_value=value) self.writer.add_summary(summary, global_step=global_step) self.writer.flush()
def log_scalar(tag, value): """ log_scalar Logs a scalar. """ return Summary(value=[Summary.Value(tag=tag, simple_value=value)])
imgs, labels = imgs.cuda(), labels.cuda() imgs, labels = Variable(imgs), Variable(labels) preds = net(imgs) loss = loss_fn(preds, labels) loss.backward() # Write training summary if args.write_summary and step % args.report_interval == 0: param_norm = global_norm(net.parameters()) grad_norm = global_norm( [param.grad for param in net.parameters()]) summary_proto = Summary(value=[ Summary.Value(tag='train/loss', simple_value=loss.data[0]), Summary.Value(tag='train/param_norm', simple_value=param_norm.data[0]), Summary.Value(tag='train/grad_norm', simple_value=grad_norm.data[0]), ]) writer.add_summary(summary_proto, global_step=step * args.batch_size) # Update parameters optimizer.step() step += 1 epoch += 1 # Evaluate on test n_correct = 0 for batch in tqdm.tqdm(val_loader):
def add_to_logs(self, tag: str, value: float, step: int) -> None: summary = Summary() summary.value.add(tag=tag, simple_value=value) self.writer.add_summary(summary, step) self.writer.flush()