Exemplo n.º 1
0
 def testWriteRawPb(self):
   logdir = self.get_temp_dir()
   pb = summary_pb2.Summary()
   pb.value.add().simple_value = 42.0
   with context.eager_mode():
     with summary_ops.create_file_writer_v2(logdir).as_default():
       output = summary_ops.write_raw_pb(pb.SerializeToString(), step=12)
       self.assertTrue(output.numpy())
   events = events_from_logdir(logdir)
   self.assertEqual(2, len(events))
   self.assertEqual(12, events[1].step)
   self.assertProtoEquals(pb, events[1].summary)
Exemplo n.º 2
0
 def testScalarSummary(self):
     with self.cached_session() as s:
         i = constant_op.constant(3)
         with ops.name_scope('outer'):
             im = summary_lib.scalar('inner', i)
         summary_str = s.run(im)
     summary = summary_pb2.Summary()
     summary.ParseFromString(summary_str)
     values = summary.value
     self.assertEqual(len(values), 1)
     self.assertEqual(values[0].tag, 'outer/inner')
     self.assertEqual(values[0].simple_value, 3.0)
Exemplo n.º 3
0
 def add_summary(self, summary, current_global_step):
     """Add summary."""
     if isinstance(summary, bytes):
         summary_proto = summary_pb2.Summary()
         summary_proto.ParseFromString(summary)
         summary = summary_proto
     if current_global_step in self._summaries:
         step_summaries = self._summaries[current_global_step]
     else:
         step_summaries = []
         self._summaries[current_global_step] = step_summaries
     step_summaries.append(summary)
Exemplo n.º 4
0
def WriteScalarSeries(writer, tag, f, n=5):
  """Write a series of scalar events to writer, using f to create values."""
  step = 0
  wall_time = _start_time
  for i in xrange(n):
    v = f(i)
    value = summary_pb2.Summary.Value(tag=tag, simple_value=v)
    summary = summary_pb2.Summary(value=[value])
    event = event_pb2.Event(wall_time=wall_time, step=step, summary=summary)
    writer.add_event(event)
    step += 1
    wall_time += 10
Exemplo n.º 5
0
def make_encoded_image_summary(name,
                               encoded_image_string,
                               height,
                               width,
                               colorspace=3):
    image = summary_pb2.Summary.Image(
        height=height,
        width=width,
        colorspace=colorspace,
        encoded_image_string=encoded_image_string)
    return summary_pb2.Summary(
        value=[summary_pb2.Summary.Value(tag=name, image=image)])
Exemplo n.º 6
0
def _AddRateToSummary(tag, rate, step, sw):
  """Adds the given rate to the summary with the given tag.

  Args:
    tag:   Name for this value.
    rate:  Value to add to the summary. Perhaps an error rate.
    step:  Global step of the graph for the x-coordinate of the summary.
    sw:    Summary writer to which to write the rate value.
  """
  sw.add_summary(
      summary_pb2.Summary(value=[summary_pb2.Summary.Value(
          tag=tag, simple_value=rate)]), step)
Exemplo n.º 7
0
 def _parse_summary_if_needed(summary):
     """
     Parses the summary if it is provided in serialized form (bytes).
     This code is copied from tensorflow's SummaryToEventTransformer::add_summary
     :param summary:
     :return:
     """
     if isinstance(summary, bytes):
         summ = summary_pb2.Summary()
         summ.ParseFromString(summary)
         summary = summ
     return summary
Exemplo n.º 8
0
    def _image_summary(self, tf_name, images, step=None):
        """
        Log a list of images.

        References:
            https://github.com/yunjey/pytorch-tutorial/blob/master/tutorials/04-utils/tensorboard/logger.py#L22

        Example:
            >>> tf_name = 'foo'
            >>> value = ([0, 1, 2, 3, 4, 5], [1, 20, 10, 22, 11])
            >>> self = Logger(None, is_dummy=True)
            >>> images = [np.random.rand(10, 10), np.random.rand(10, 10)]
            >>> summary = self._image_summary(tf_name, images, step=None)
            >>> assert len(summary.value) == 2
            >>> assert summary.value[0].image.width == 10
        """
        img_summaries = []
        for i, img in enumerate(images):
            # Write the image to a string
            try:
                s = StringIO()
            except:
                s = BytesIO()
            scipy.misc.toimage(img).save(s, format="png")

            # Create an Image object
            img_sum = summary_pb2.Summary.Image(
                encoded_image_string=s.getvalue(),
                height=img.shape[0],
                width=img.shape[1])
            # Create a Summary value
            img_value = summary_pb2.Summary.Value(tag='{}/{}'.format(
                tf_name, i),
                                                  image=img_sum)
            img_summaries.append(img_value)
            summary = summary_pb2.Summary()
            summary.value.add(tag=tf_name, image=img_sum)

        summary = summary_pb2.Summary(value=img_summaries)
        return summary
    def _histogram_summary(self, tf_name, value, step=None):
        """
        Args:
            tf_name (str): name of tensorflow variable
            value (tuple or list): either a tuple of bin_edges and bincounts or
                a list of values to summarize in a histogram.

        References:
            https://github.com/yunjey/pytorch-tutorial/blob/master/tutorials/04-utils/tensorboard/logger.py#L45

        Example:
            >>> tf_name = 'foo'
            >>> value = ([0, 1, 2, 3, 4, 5], [1, 20, 10, 22, 11])
            >>> self = Logger(None, is_dummy=True)
            >>> summary = self._histogram_summary(tf_name, value, step=None)
            >>> assert summary.value[0].histo.max == 5

        Example:
            >>> tf_name = 'foo'
            >>> value = [0.72,  0.18,  0.34,  0.66,  0.11,  0.70,  0.23]
            >>> self = Logger(None, is_dummy=True)
            >>> summary = self._histogram_summary(tf_name, value, step=None)
            >>> assert summary.value[0].histo.num == 7.0
        """
        if isinstance(value, tuple):
            bin_edges, bincounts = value
            assert len(bin_edges) == len(bincounts) + 1, (
                'must have one more edge than count')
            hist = summary_pb2.HistogramProto()
            hist.min = float(min(bin_edges))
            hist.max = float(max(bin_edges))
        else:
            values = np.array(value)

            bincounts, bin_edges = np.histogram(values)

            hist = summary_pb2.HistogramProto()
            hist.min = float(np.min(values))
            hist.max = float(np.max(values))
            hist.num = int(np.prod(values.shape))
            hist.sum = float(np.sum(values))
            hist.sum_squares = float(np.sum(values**2))

        # Add bin edges and counts
        for edge in bin_edges[1:]:
            hist.bucket_limit.append(edge)
        for v in bincounts:
            hist.bucket.append(v)

        summary = summary_pb2.Summary()
        summary.value.add(tag=tf_name, histo=hist)
        return summary
Exemplo n.º 10
0
def to_summary_proto(summary_str):
    """Create summary based on latest stats.

  Args:
    summary_str: Serialized summary.
  Returns:
    summary_pb2.Summary.
  Raises:
    ValueError: if tensor is not a valid summary tensor.
  """
    summary = summary_pb2.Summary()
    summary.ParseFromString(summary_str)
    return summary
Exemplo n.º 11
0
 def testImageSummary(self):
     with self.test_session() as s:
         i = array_ops.ones((5, 4, 4, 3))
         with ops.name_scope('outer'):
             im = summary_lib.image('inner', i, max_outputs=3)
         summary_str = s.run(im)
     summary = summary_pb2.Summary()
     summary.ParseFromString(summary_str)
     values = summary.value
     self.assertEqual(len(values), 3)
     tags = sorted(v.tag for v in values)
     expected = sorted('outer/inner/image/{}'.format(i) for i in xrange(3))
     self.assertEqual(tags, expected)
Exemplo n.º 12
0
    def on_validation_end(self):
        predicted_labels = np.argmax(np.vstack(self.logits), axis=1)
        labels = np.hstack(self.labels)
        acc = np.mean(np.equal(predicted_labels, labels))

        self.model.info('\n   > Accuracy: %0.2f\n  ' % (acc * 100))
        self.model.history['accuracy'] += [acc]

        value = summary_pb2.Summary.Value(tag="accuracy", simple_value=acc)
        global_step = self.model.epoch * self.model.data.validation_steps + self.model.valid_step
        self.model.valid_writer.add_summary(summary_pb2.Summary(value=[value]),
                                            global_step=global_step)
        self.model.valid_writer.flush()
Exemplo n.º 13
0
    def py_gif_event(step, tag, tensor, max_outputs, fps):
        summary = py_gif_summary(tag, tensor, max_outputs, fps)

        if isinstance(summary, bytes):
            summ = summary_pb2.Summary()
            summ.ParseFromString(summary)
            summary = summ

        event = event_pb2.Event(summary=summary)
        event.wall_time = time.time()
        event.step = step
        event_pb = event.SerializeToString()
        return event_pb
Exemplo n.º 14
0
 def testAudioSummary(self):
     with self.cached_session() as s:
         i = array_ops.ones((5, 3, 4))
         with ops.name_scope('outer'):
             aud = summary_lib.audio('inner', i, 0.2, max_outputs=3)
         summary_str = s.run(aud)
     summary = summary_pb2.Summary()
     summary.ParseFromString(summary_str)
     values = summary.value
     self.assertEqual(len(values), 3)
     tags = sorted(v.tag for v in values)
     expected = sorted('outer/inner/audio/{}'.format(i) for i in xrange(3))
     self.assertEqual(tags, expected)
Exemplo n.º 15
0
def tb_callback(res):
    # A callback function saving results to TensorBoard.
    print("Loss of the last generation (lower is better): %.3f" % res.fun)
    val = summary_pb2.Summary.Value(tag="Training loss rand%d" %
                                    config.oneplus_params["random_state"],
                                    simple_value=res.fun)
    summary = summary_pb2.Summary(value=[val])
    tb_writer.add_summary(summary, tb_callback.cntr)
    tb_callback.cntr += 1
    global best_loss
    if res.fun < best_loss:
        best_loss = res.fun
        save_result(res)
    def eval_once(self, sess, iterator_init_list, writer, step, caller):
        """ Eval the model once """
        start_time = time.time()

        # initialize data set iterators:
        for init in iterator_init_list:
            sess.run(init)

        total_sup_loss = 0
        total_dice_score = 0
        total_unsup_loss = 0
        n_batches = 0
        try:
            while True:
                caller.on_batch_begin(training_state=False,
                                      **self.callbacks_kwargs)

                sup_loss, dice_score, unsup_loss = self._eval_all_op(
                    sess, writer, step)
                total_dice_score += dice_score
                total_sup_loss += sup_loss
                total_unsup_loss += unsup_loss
                step += 1

                n_batches += 1
                caller.on_batch_end(training_state=False,
                                    **self.callbacks_kwargs)

        except tf.errors.OutOfRangeError:
            # End of the validation set. Compute statistics here:
            total_loss = total_sup_loss + total_unsup_loss
            avg_loss = total_loss / n_batches
            avg_dice = total_dice_score / n_batches
            dice_loss = 1.0 - avg_dice
            delta_t = time.time() - start_time

            value = summary_pb2.Summary.Value(
                tag="Dice_1/validation/dice_3channels_avg",
                simple_value=avg_dice)
            summary = summary_pb2.Summary(value=[value])
            writer.add_summary(summary, global_step=step)

            pass

        # update global epoch counter:
        sess.run(self.update_g_valid_step, feed_dict={'update_value:0': step})

        print(
            '\033[31m  VALIDATION\033[0m:  average loss = {1:.4f} {0} Took: {2:.3f} seconds'
            .format(' ' * 3, avg_loss, delta_t))
        return step, dice_loss
Exemplo n.º 17
0
    def write_tensorboard_results(
        results, epsilon_value, networks, actions_made_histogram,
        actions_made_placeholder, episode_number
    ):  # TODO: replace episode_number with Debug.EPISODE_NUMBER ??
        if Debug.USE_TENSORBOARD and Debug.EPISODE_NUMBER % Debug.OUTPUT_TO_TENSORBOARD_EVERY_N_EPISODES == 0:
            value = summary_pb2.Summary.Value(tag="score_per_episode",
                                              simple_value=results['score'])
            summary = summary_pb2.Summary(value=[value])
            Debug.WRITER.add_summary(summary, episode_number)

            value = summary_pb2.Summary.Value(tag="epsilon_value",
                                              simple_value=epsilon_value)
            summary = summary_pb2.Summary(value=[value])
            Debug.WRITER.add_summary(summary, episode_number)

            value = summary_pb2.Summary.Value(
                tag="total reward per episode",
                simple_value=results['total_reward'])
            summary = summary_pb2.Summary(value=[value])
            Debug.WRITER.add_summary(summary, episode_number)

            # log['actions_made'] += results['actions_made']
            summary = Debug.SESSION.run(
                actions_made_histogram,
                feed_dict={
                    actions_made_placeholder:
                    np.reshape(results['actions_made'],
                               (len(results['actions_made']), 1))
                }
            )  # TODO: why reshape results['actions_made']? maybe it isnt useful anymore
            Debug.WRITER.add_summary(summary, episode_number)
            # log['actions_made'] = []

            for network in networks:
                if network.is_training:
                    network.model.write_weights_tb_histograms()
                    if Debug.SAY_WHEN_HISTOGRAMS_ARE_PRINTED:
                        print("weights histograms printed")
Exemplo n.º 18
0
def write_to_summary(output_dir, summary_tag, summary_value,
                     current_global_step):
    summary_writer = tf.summary.FileWriterCache.get(output_dir)
    summary_proto = summary_pb2.Summary()
    value = summary_proto.value.add()
    value.tag = summary_tag
    if isinstance(summary_value, np.float32) or isinstance(
            summary_value, float):
        value.simple_value = float(summary_value)
    elif isinstance(summary_value, int) or isinstance(
            summary_value, np.int64) or isinstance(summary_value, np.int32):
        value.simple_value = int(summary_value)
    summary_writer.add_summary(summary_proto, current_global_step)
    summary_writer.flush()
Exemplo n.º 19
0
    def testUseAfterClose(self):
        test_dir = self._CleanTestDir("use_after_close")
        sw = self._FileWriter(test_dir)
        sw.close()
        with warnings.catch_warnings(record=True) as triggered:
            warnings.simplefilter("always")
            self.assertFalse(triggered)
            sw.add_summary(summary_pb2.Summary())
            sw.add_session_log(event_pb2.SessionLog())
            sw.add_graph(ops.Graph())

        self.assertEqual(len(triggered), 3)
        for w in triggered:
            self.assertEqual(w.category, UserWarning)
Exemplo n.º 20
0
 def testSummarizingVariable(self):
     with self.test_session() as s:
         c = constant_op.constant(42.0)
         v = variables.Variable(c)
         ss = summary_lib.scalar('summary', v)
         init = variables.global_variables_initializer()
         s.run(init)
         summ_str = s.run(ss)
     summary = summary_pb2.Summary()
     summary.ParseFromString(summ_str)
     self.assertEqual(len(summary.value), 1)
     value = summary.value[0]
     self.assertEqual(value.tag, 'summary')
     self.assertEqual(value.simple_value, 42.0)
Exemplo n.º 21
0
def WriteHistogramSeries(writer, tag, mu_sigma_tuples, n=20):
  """Write a sequence of normally distributed histograms to writer."""
  step = 0
  wall_time = _start_time
  for [mean, stddev] in mu_sigma_tuples:
    data = [random.normalvariate(mean, stddev) for _ in xrange(n)]
    histo = _MakeHistogram(data)
    summary = summary_pb2.Summary(
        value=[summary_pb2.Summary.Value(
            tag=tag, histo=histo)])
    event = event_pb2.Event(wall_time=wall_time, step=step, summary=summary)
    writer.add_event(event)
    step += 10
    wall_time += 100
Exemplo n.º 22
0
      def add_summary(self, summary, global_step):
        summary_obj = summary_pb2.Summary()

        # ensure a valid Summary proto is being sent
        if isinstance(summary, bytes):
          summary_obj.ParseFromString(summary)
        else:
          assert isinstance(summary, summary_pb2.Summary)
          summary_obj = summary

        # keep track of steps seen for the merged_summary op,
        # which contains the histogram summaries
        if len(summary_obj.value) > 1:
          self.steps_seen.append(global_step)
Exemplo n.º 23
0
    def test_once(self, sess, iterator_init_list, writer, step, caller):
        """ Test the model once """
        start_time = time.time()

        # initialize data set iterators:
        for init in iterator_init_list:
            sess.run(init)

        total_disc_loss = 0
        total_unsup_loss = 0
        total_dice = 0
        total_miou = 0

        n_batches = 0
        try:
            while True:
                caller.on_batch_begin(training_state=False,
                                      **self.callbacks_kwargs)

                unsup_loss, disc_loss, dice, miou = self._eval_all_op(
                    sess, writer, step)
                total_disc_loss += disc_loss
                total_unsup_loss += unsup_loss
                total_dice += dice
                total_miou += miou
                step += 1
                n_batches += 1

        except tf.errors.OutOfRangeError:
            # End of the test set. Compute statistics here:
            avg_loss = (total_unsup_loss + total_disc_loss) / n_batches
            avg_dice = total_dice / n_batches
            avg_miou = total_miou / n_batches
            delta_t = time.time() - start_time

            step += 1
            value = summary_pb2.Summary.Value(tag="y_TEST/test/loss_avg",
                                              simple_value=avg_loss)
            summary = summary_pb2.Summary(value=[value])
            writer.add_summary(summary, global_step=step)
            pass

        # update global epoch counter:
        sess.run(self.update_g_test_step, feed_dict={'update_value:0': step})

        print(
            '\033[31m  TEST\033[0m:{0}{0} \033[1;33m average loss = {1:.4f} {0}, average dice = {2:.4f} {0}, '
            'average miou = {3:.4f} {0}. Took: {4:.3f} seconds'.format(
                ' ' * 3, avg_loss, avg_dice, avg_miou, delta_t))
        return step
Exemplo n.º 24
0
def parse(bytes, wall_time, step, visit):
    summary = summary_pb2.Summary()
    summary.ParseFromString(bytes)

    for value in summary.value:
        if value.HasField('tensor') and value.tag == HEALTH_PILL_EVENT_TAG:
            continue

        for summary_type, summary_func in _SUMMARY_TYPES.items():
            if value.HasField(summary_type):
                datum = getattr(value, summary_type)
                tag = value.node_name if summary_type == 'tensor' else value.tag
                parsed = summary_func(tag, wall_time, step, datum)
                visit(summary_type, parsed)
Exemplo n.º 25
0
 def testWriteRawPb_multipleValues(self):
     logdir = self.get_temp_dir()
     pb1 = summary_pb2.Summary()
     pb1.value.add().simple_value = 1.0
     pb1.value.add().simple_value = 2.0
     pb2 = summary_pb2.Summary()
     pb2.value.add().simple_value = 3.0
     pb3 = summary_pb2.Summary()
     pb3.value.add().simple_value = 4.0
     pb3.value.add().simple_value = 5.0
     pb3.value.add().simple_value = 6.0
     pbs = [pb.SerializeToString() for pb in (pb1, pb2, pb3)]
     with context.eager_mode():
         with summary_ops.create_file_writer_v2(logdir).as_default():
             output = summary_ops.write_raw_pb(pbs, step=12)
             self.assertTrue(output.numpy())
     events = events_from_logdir(logdir)
     self.assertEqual(2, len(events))
     self.assertEqual(12, events[1].step)
     expected_pb = summary_pb2.Summary()
     for i in range(6):
         expected_pb.value.add().simple_value = i + 1.0
     self.assertProtoEquals(expected_pb, events[1].summary)
Exemplo n.º 26
0
def _assert_simple_summaries(test_case, expected_summaries, summary_str,
                             tol=1e-6):
  """Assert summary the specified simple values.

  Args:
    test_case: test case.
    expected_summaries: Dict of expected tags and simple values.
    summary_str: Serialized `summary_pb2.Summary`.
    tol: Tolerance for relative and absolute.
  """
  summary = summary_pb2.Summary()
  summary.ParseFromString(summary_str)
  test_case.assertAllClose(expected_summaries, {
      v.tag: v.simple_value for v in summary.value
  }, rtol=tol, atol=tol)
Exemplo n.º 27
0
 def testFileWriterWithSuffix(self):
     test_dir = self._CleanTestDir("test_suffix")
     sw = self._FileWriter(test_dir, filename_suffix="_test_suffix")
     for _ in range(10):
         sw.add_summary(
             summary_pb2.Summary(value=[
                 summary_pb2.Summary.Value(tag="float_ten",
                                           simple_value=10.0)
             ]), 10)
         sw.close()
         sw.reopen()
     sw.close()
     event_filenames = glob.glob(os.path.join(test_dir, "event*"))
     for filename in event_filenames:
         self.assertTrue(filename.endswith("_test_suffix"))
Exemplo n.º 28
0
 def _assertSummaryHasCount(self,
                            summary_str,
                            tag,
                            expected_value,
                            greater_than=False):
   summary_proto = summary_pb2.Summary()
   summary_proto.ParseFromString(summary_str)
   for value in summary_proto.value:
     if re.match(tag, value.tag):
       if greater_than:
         self.assertGreaterEqual(value.histo.num, expected_value)
       else:
         self.assertEqual(expected_value, value.histo.num)
       return
   self.fail("Expected tag %r not found in summary %r" % (tag, summary_proto))
Exemplo n.º 29
0
 def _run_val_metrics(self, X_val):
     val_msres, val_n_mf_updates = [], []
     for X_vb in batch_iter(X_val, batch_size=self.batch_size):
         msre, n_mf_upds = self._tf_session.run([self._msre, self._n_mf_updates],
                                                 feed_dict=self._make_tf_feed_dict(X_vb))
         val_msres.append(msre)
         val_n_mf_updates.append(n_mf_upds)
     mean_msre = np.mean(val_msres)
     mean_n_mf_updates = np.mean(val_n_mf_updates)
     s = summary_pb2.Summary(value=[
         summary_pb2.Summary.Value(tag='mean_squared_recon_error', simple_value=mean_msre),
         summary_pb2.Summary.Value(tag='n_mf_updates', simple_value=mean_n_mf_updates),
     ])
     self._tf_val_writer.add_summary(s, self.iter_)
     return mean_msre, mean_n_mf_updates
Exemplo n.º 30
0
def _write_summary_results(output_dir, eval_results, current_global_step):
    """Writes eval results into summary file in given dir."""
    logging.info('Saving evaluation summary for %d step: %s' %
                 (current_global_step, _eval_results_to_str(eval_results)))
    summary_writer = get_summary_writer(output_dir)
    summary = summary_pb2.Summary()
    for key in eval_results:
        if eval_results[key] is None:
            continue
        value = summary.value.add()
        value.tag = key
        if (isinstance(eval_results[key], np.float32)
                or isinstance(eval_results[key], float)):
            value.simple_value = float(eval_results[key])
    summary_writer.add_summary(summary, current_global_step)
    summary_writer.close()