Esempio n. 1
0
    def testCorpusBleuMetric(self):
        m = metrics.CorpusBleuMetric()
        m.Update('a b c d', 'a b c d')
        m.Update('a b c', 'a b c')

        self.assertEqual(1.0, m.value)

        name = 'corpus_bleu'
        self.assertEqual(
            tf.Summary(value=[tf.Summary.Value(tag=name, simple_value=1.0)]),
            m.Summary(name))
Esempio n. 2
0
    def _WriteToLog(self, text, logdir, filename):
        """Logs `text` and saves it under `logdir/filename`."""
        with tf.gfile.GFile(os.path.join(logdir, filename), 'w') as f:
            f.write(text)

        if self._summary_writer is not None:
            # Force newlines to be rendered correctly by Markdown.
            text = text.replace('\n', '  \n')
            self._summary_writer.add_summary(
                tf.Summary(value=[
                    tf.Summary.Value(tag=filename,
                                     tensor=tf.make_tensor_proto([text]))
                ]))
Esempio n. 3
0
    def testMCCMetric(self):
        m = metrics.MCCMetric()
        m.UpdateTruePositive(count=2.0)
        m.UpdateTrueNegative(count=2.0)
        m.UpdateFalsePositive()
        m.UpdateFalseNegative()

        expected_mcc = 1 / 3
        self.assertAlmostEqual(expected_mcc, m.value)

        name = 'my_mcc_metric'
        self.assertEqual(
            tf.Summary(
                value=[tf.Summary.Value(tag=name, simple_value=expected_mcc)]),
            m.Summary(name))
Esempio n. 4
0
def _FigureToSummary(name, fig):
    """Create tf.Summary proto from matplotlib.figure.Figure ."""
    canvas = backend_agg.FigureCanvasAgg(fig)
    fig.canvas.draw()
    ncols, nrows = fig.canvas.get_width_height()
    png_file = six.BytesIO()
    canvas.print_figure(png_file)
    png_str = png_file.getvalue()
    return tf.Summary(value=[
        tf.Summary.Value(tag='%s/image' % name,
                         image=tf.Summary.Image(height=nrows,
                                                width=ncols,
                                                colorspace=3,
                                                encoded_image_string=png_str))
    ])
Esempio n. 5
0
  def testF1Metric(self):
    m = metrics.F1Metric()
    m.UpdateTruePositive(count=2.0)
    m.UpdateFalsePositive()
    m.UpdateFalseNegative()

    precision = 2.0 / 3.0
    recall = 2.0 / 3.0
    expected_f1 = 2 * precision * recall / (precision + recall)
    self.assertAlmostEqual(expected_f1, m.value)

    name = 'my_f1_metric'
    self.assertEqual(
        tf.Summary(value=[tf.Summary.Value(tag=name,
                                           simple_value=expected_f1)]),
        m.Summary(name))
Esempio n. 6
0
    def Summary(self, name):
        """Implements custom Summary for Waymo metrics."""
        self._EvaluateIfNecessary()

        ret = tf.Summary()
        # Put '.value' first (so it shows up in logs / summaries, etc).
        ret.value.add(tag='{}/weighted_mAP'.format(name),
                      simple_value=self.value)

        ap = self._breakdown_metrics['waymo']._average_precisions  # pylint:disable=protected-access
        aph = self._breakdown_metrics['waymo']._average_precision_headings  # pylint:disable=protected-access
        breakdown_names = config_util.get_breakdown_names_from_config(
            self._waymo_metric_config)
        for i, j in enumerate(self.metadata.EvalClassIndices()):
            classname = self.metadata.ClassNames()[j]
            for k, breakdown_name in enumerate(breakdown_names):
                # Skip adding entries for breakdowns that are in a different class.
                #
                # The first breakdown is the overall one, so never skip it.
                if k > 0 and classname.lower() not in breakdown_name.lower():
                    continue

                if k == 0:
                    # For the overall mAP, include the class name
                    # and set the breakdown_str to 'default' for backwards compatibility.
                    prefix = '{}/{}'.format(name, classname)
                    breakdown_str = 'default'
                else:
                    # All breakdowns after the first one are extra and elide
                    # the classname, since it is present in the breakdown_name.
                    prefix = '{}_extra'.format(name)
                    breakdown_str = breakdown_name

                tag_str = '{}/AP_{}'.format(prefix, breakdown_str)
                ap_value = ap[breakdown_name][i]
                ret.value.add(tag=tag_str, simple_value=ap_value)
                tag_str = '{}/APH_{}'.format(prefix, breakdown_str)
                aph_value = aph[breakdown_name][i]
                ret.value.add(tag=tag_str, simple_value=aph_value)

        image_summaries = self._breakdown_metrics['waymo'].GenerateSummaries(
            name)
        for image_summary in image_summaries:
            ret.value.extend(image_summary.value)

        return ret
Esempio n. 7
0
    def Summary(self, name):
        """Implements custom Summary for Waymo metrics."""
        self._EvaluateIfNecessary()

        ret = tf.Summary()
        # Put '.value' first (so it shows up in logs / summaries, etc).
        ret.value.add(tag='{}/weighted_mAP'.format(name),
                      simple_value=self.value)

        ap = self._breakdown_metrics['waymo']._average_precisions  # pylint:disable=protected-access
        aph = self._breakdown_metrics['waymo']._average_precision_headings  # pylint:disable=protected-access
        breakdown_names = config_util.get_breakdown_names_from_config(
            self._waymo_metric_config)

        for i, class_index in enumerate(self.metadata.EvalClassIndices()):
            classname = self.metadata.ClassNames()[class_index]
            for breakdown_name in breakdown_names:
                # 'ONE_SHARD' breakdowns are the overall metrics (not sliced up)
                # So we should make that the defualt metric.
                if 'ONE_SHARD' in breakdown_name:
                    # For the overall mAP, include the class name
                    # and set the breakdown_str which will have the level
                    prefix = '{}/{}'.format(name, classname)
                    postfix = breakdown_name.replace('ONE_SHARD_', '')
                    breakdown_str = postfix if postfix else 'UNKNOWN'
                # Otherwise check that the class we are looking at is in the breakdown.
                elif classname.lower() in breakdown_name.lower():
                    prefix = '{}_extra'.format(name)
                    breakdown_str = breakdown_name
                else:
                    continue

                tag_str = '{}/AP_{}'.format(prefix, breakdown_str)
                ap_value = ap[breakdown_name][i]
                ret.value.add(tag=tag_str, simple_value=ap_value)
                tag_str = '{}/APH_{}'.format(prefix, breakdown_str)
                aph_value = aph[breakdown_name][i]
                ret.value.add(tag=tag_str, simple_value=aph_value)

        image_summaries = self._breakdown_metrics['waymo'].GenerateSummaries(
            name)
        for image_summary in image_summaries:
            ret.value.extend(image_summary.value)

        return ret
Esempio n. 8
0
  def testAverageMetric(self):
    m = metrics.AverageMetric()
    m.Update(1.0)
    m.Update(2.0, 10.0)

    self.assertEqual(1.0 + 2.0*10.0, m.total_value)
    expected_average = (1.0 + 2.0*10.0) / (1.0 + 10.0)
    self.assertEqual(expected_average, m.value)

    name = 'metric_name'
    self.assertEqual(
        tf.Summary(value=[tf.Summary.Value(tag=name,
                                           simple_value=expected_average)]),
        m.Summary(name))

    # Calling m.Summary() does not reset statistics.
    m.Update(1.0)
    self.assertEqual(1.0 + 2.0*10.0 + 1.0, m.total_value)
Esempio n. 9
0
  def Summary(self, name):
    """Converts the current state of this metric to a `tf.Summary`.

    Args:
      name: A string to use as the summary value tag.

    Returns:
      A `tf.Summary` proto.
    """
    summary = tf.Summary(value=[
        tf.Summary.Value(tag=name, simple_value=self.value),
        tf.Summary.Value(
            tag=name + '/total_count', simple_value=len(self._stored_values)),
        tf.Summary.Value(
            tag=name + '/total_value', simple_value=self._total_value),
        tf.Summary.Value(
            tag=name + '/total_weight', simple_value=self._total_weight),
    ])
    return summary
Esempio n. 10
0
  def Summary(self, name):
    self._EvaluateIfNecessary()

    ret = tf.Summary()

    # Put '.value' first (so it shows up in logs / summaries, etc).
    ret.value.add(tag='{}/weighted_mAP'.format(name), simple_value=self.value)

    average_precision_by_difficulty = self._AveragePrecisionByDifficulty()
    for i, j in enumerate(self.metadata.EvalClassIndices()):
      classname = self.metadata.ClassNames()[j]
      for difficulty in self.metadata.DifficultyLevels():
        tag_str = '{}/{}/AP_{}'.format(name, classname, difficulty)
        ap_value = average_precision_by_difficulty[difficulty][i]
        ret.value.add(tag=tag_str, simple_value=ap_value)

    for metric_class in self._breakdown_metrics.values():
      image_summaries = metric_class.GenerateSummaries(name)
      for image_summary in image_summaries:
        ret.value.extend(image_summary.value)
    return ret
Esempio n. 11
0
  def Run(self, sess):
    self._checkpointer.RestoreIfNeeded(sess)
    gsteps = py_utils.GetGlobalStep()
    global_step = sess.run(gsteps)

    self._infeed_pool.apply_async(self._InfeedLoop, args=(sess,))
    dec_metrics = self._model_task.CreateDecoderMetrics()
    start_time = time.time()
    for i in range(self._steps_per_loop):
      metrics_values = sess.run(self.metrics)
      self._model_task.PostProcessDecodeOut(metrics_values, dec_metrics)
      tf.logging.info('step: %d %f' %
                      (i, dec_metrics['num_samples_in_batch'].total_value))

    num_examples_metric = dec_metrics['num_samples_in_batch']
    summaries = {k: v.Summary(k) for k, v in six.iteritems(dec_metrics)}
    elapsed_secs = time.time() - start_time
    example_rate = num_examples_metric.total_value / elapsed_secs
    summaries['examples/sec'] = tf.Summary(
        value=[tf.Summary.Value(tag='examples/sec', simple_value=example_rate)])
    self._WriteSummaries(
        os.path.basename(self._program_dir), global_step, summaries)
Esempio n. 12
0
def FigureToSummary(name, fig):
    """Create tf.Summary proto from matplotlib.figure.Figure.

  Args:
    name: Summary name.
    fig: A matplotlib figure object.

  Returns:
    A `tf.Summary` proto containing the figure rendered to an image.
  """
    canvas = backend_agg.FigureCanvasAgg(fig)
    fig.canvas.draw()
    ncols, nrows = fig.canvas.get_width_height()
    png_file = six.BytesIO()
    canvas.print_figure(png_file)
    png_str = png_file.getvalue()
    return tf.Summary(value=[
        tf.Summary.Value(tag='%s/image' % name,
                         image=tf.Summary.Image(height=nrows,
                                                width=ncols,
                                                colorspace=3,
                                                encoded_image_string=png_str))
    ])
Esempio n. 13
0
    def Run(self, sess):
        gsteps = py_utils.GetGlobalStep()
        global_step = sess.run(gsteps)
        self.SetStatusMessage('Executing decode program at step %d' %
                              global_step)
        infeed_future = self._infeed_pool.apply_async(self._InfeedLoop,
                                                      args=(sess, ))
        dec_metrics = self._model_task.CreateDecoderMetrics()
        start_time = time.time()
        buffered_decode_out = []
        for i in range(self._steps_per_loop):
            metrics_values = sess.run(self.metrics)
            decode_out = self._model_task.PostProcessDecodeOut(
                metrics_values, dec_metrics)
            tf.logging.info(
                'step: %d %f' %
                (i, dec_metrics['num_samples_in_batch'].total_value))
            if decode_out:
                buffered_decode_out.extend(decode_out)
        infeed_future.wait()

        num_examples_metric = dec_metrics['num_samples_in_batch']
        summaries = {k: v.Summary(k) for k, v in six.iteritems(dec_metrics)}
        elapsed_secs = time.time() - start_time
        example_rate = num_examples_metric.total_value / elapsed_secs
        summaries['examples/sec'] = tf.Summary(value=[
            tf.Summary.Value(tag='examples/sec', simple_value=example_rate)
        ])
        self._WriteSummaries(os.path.basename(self._program_dir), global_step,
                             summaries)
        decode_out_path = os.path.join(self._program_dir,
                                       'decoder_out_%09d' % global_step)
        decode_finalize_args = base_model.DecodeFinalizeArgs(
            decode_out_path=decode_out_path, decode_out=buffered_decode_out)
        self._model_task.DecodeFinalize(decode_finalize_args)
        return False
Esempio n. 14
0
    def testUniqueAverageMetric(self):
        m = metrics.UniqueAverageMetric()
        m.Update('a', 1.0)
        m.Update('b', 2.0, 10.0)
        # Different value for 'a' than the previous one.
        m.Update('a', 2.0)

        with self.assertRaises(ValueError):
            # Error raised during value, so that we can collect all
            # of the keys in the error reporting.
            _ = m.value

        m = metrics.UniqueAverageMetric()
        m.Update('a', 1.0)
        m.Update('b', 2.0, 10.0)
        # Duplicate update is ignored.
        m.Update('a', 1.0)

        self.assertEqual(1.0 + 2.0 * 10.0, m.total_value)

        expected_average = (1.0 + 2.0 * 10.0) / (1.0 + 10.0)
        self.assertEqual(expected_average, m.value)

        name = 'metric_name'
        self.assertEqual(
            tf.Summary(value=[
                tf.Summary.Value(tag=name, simple_value=expected_average),
                tf.Summary.Value(tag=name + '/total_count', simple_value=2),
                tf.Summary.Value(tag=name + '/total_value', simple_value=21.),
                tf.Summary.Value(tag=name + '/total_weight', simple_value=11.),
            ]), m.Summary(name))

        m = metrics.UniqueAverageMetric(mismatch_is_error=False)
        m.Update('a', 1.0)
        m.Update('a', 2.0)
        self.assertEqual(1.0, m.value)
    def _EvaluateIfNecessary(self, name):
        """Create a camera image summary if not already created."""
        if self._summary is not None:
            return

        ret = tf.Summary()

        for sample_idx, sample in enumerate(self._sampler.samples):
            batch_size = sample.camera_images.shape[0]

            for batch_idx in range(batch_size):
                image = sample.camera_images[batch_idx]

                # [num bboxes, 8, 2].
                bbox_corners = sample.bbox_corners[batch_idx]

                # [num_bboxes]
                bbox_scores = sample.bbox_scores[batch_idx]

                def Draw3DBoxes(fig,
                                axes,
                                bbox_corners=bbox_corners,
                                bbox_scores=bbox_scores):
                    """Draw 3d bounding boxes."""
                    del fig
                    for bbox_id in range(bbox_corners.shape[0]):
                        # Skip visualizing low-scoring boxes.
                        bbox_score = bbox_scores[bbox_id]
                        if bbox_score < self._bbox_score_threshold:
                            continue
                        bbox_data = bbox_corners[bbox_id]

                        # Draw the score of each box.
                        #
                        # Turn score into an integer for better display.
                        center_x = np.mean(bbox_data[:, 0])
                        center_y = np.mean(bbox_data[:, 1])
                        bbox_score = int(bbox_score * 100)
                        text = axes.text(center_x,
                                         center_y,
                                         bbox_score,
                                         fontsize=12,
                                         color='red',
                                         fontweight='bold')
                        text.set_bbox(dict(facecolor='yellow', alpha=0.4))

                        # The BBoxToCorners function produces the points
                        # in a deterministic order, which we use to draw
                        # the faces of the polygon.
                        #
                        # The first 4 points are the "top" of the bounding box.
                        # The second 4 points are the "bottom" of the bounding box.
                        #
                        # We then draw the last 4 connecting points by choosing
                        # two of the connecting faces in the right order.
                        face_points = []
                        face_points += [[
                            bbox_data[0, :], bbox_data[1, :], bbox_data[2, :],
                            bbox_data[3, :]
                        ]]
                        face_points += [[
                            bbox_data[4, :], bbox_data[5, :], bbox_data[6, :],
                            bbox_data[7, :]
                        ]]
                        face_points += [[
                            bbox_data[1, :], bbox_data[2, :], bbox_data[6, :],
                            bbox_data[5, :]
                        ]]
                        face_points += [[
                            bbox_data[0, :], bbox_data[3, :], bbox_data[7, :],
                            bbox_data[4, :]
                        ]]
                        for face in face_points:
                            # Each face is a list of 4 x,y points
                            face_xy = np.array(face)
                            axes.add_patch(
                                matplotlib_patches.Polygon(face_xy,
                                                           closed=True,
                                                           edgecolor='red',
                                                           facecolor='none'))

                def Draw2DBoxes(fig,
                                axes,
                                bbox_corners=bbox_corners,
                                bbox_scores=bbox_scores):
                    """Draw 2d boxes on the figure."""
                    del fig
                    # Extract the 2D extrema of each bbox and the max score
                    for bbox_id in range(bbox_corners.shape[0]):
                        # Skip visualizing low-scoring boxes.
                        bbox_score = bbox_scores[bbox_id]
                        if bbox_score < self._bbox_score_threshold:
                            continue
                        bbox_data = bbox_corners[bbox_id]

                        ymin = np.min(bbox_data[:, 1])
                        xmin = np.min(bbox_data[:, 0])
                        ymax = np.max(bbox_data[:, 1])
                        xmax = np.max(bbox_data[:, 0])
                        height = ymax - ymin
                        width = xmax - xmin
                        # Turn score into an integer for better display.
                        bbox_score = int(bbox_score * 100)
                        text = axes.text(xmin,
                                         ymin,
                                         bbox_score,
                                         fontsize=12,
                                         color='red',
                                         fontweight='bold')
                        text.set_bbox(dict(facecolor='yellow', alpha=0.4))
                        axes.add_patch(
                            matplotlib_patches.Rectangle((xmin, ymin),
                                                         width,
                                                         height,
                                                         edgecolor='red',
                                                         facecolor='none'))

                # For each image, draw the boxes on that image.
                draw_fn = Draw3DBoxes if self._draw_3d_boxes else Draw2DBoxes
                image_summary = plot.Image(name='{}/{}/{}'.format(
                    name, sample_idx, batch_idx),
                                           aspect='equal',
                                           figsize=self._figsize,
                                           image=image,
                                           setter=draw_fn)
                ret.value.extend(image_summary.value)
        self._summary = ret
Esempio n. 16
0
def CreateScalarSummary(name, simple_value):
    return tf.Summary(
        value=[tf.Summary.Value(tag=name, simple_value=simple_value)])
Esempio n. 17
0
 def _CreateSummary(self, name):
     ret = tf.Summary()
     for sample in self.samples:
         value = sample.value
         ret.value.add(tag=name, simple_value=value)
     return ret
Esempio n. 18
0
    def GenerateSummaries(self, name):
        """Generate an image summary for PR by difficulty and for calibration.

    Args:
      name: str, name of summary.

    Returns:
      list of summaries
    """

        legend = {}
        p = self.params
        for class_id in p.metadata.EvalClassIndices():
            legend[class_id] = []
            for difficulty, i in p.metadata.DifficultyLevels().items():
                num_objects = self._histogram[i][class_id]
                legend[class_id].append('%s (%d)' % (difficulty, num_objects))

        summaries = []
        for i, j in enumerate(p.metadata.EvalClassIndices()):

            def _PRSetter(fig, axes):
                """Configure the plot for precision recall."""
                ticks = np.arange(0, 1.05, 0.1)
                axes.grid(b=False)
                axes.set_xlabel('Recall')
                axes.set_xticks(ticks)
                axes.set_ylabel('Precision')
                axes.set_yticks(ticks)
                # pylint: disable=undefined-loop-variable
                axes.legend(legend[j], numpoints=1)  # pylint: disable=cell-var-from-loop
                # pylint: enable=undefined-loop-variable
                fig.tight_layout()

            classname = p.metadata.ClassNames()[j]
            # Generate Precision-Recall curves.
            rs = []
            ps = []
            for difficulty in p.metadata.DifficultyLevels():
                ps += [self._precision_recall[difficulty][i][:, 0]]
                rs += [self._precision_recall[difficulty][i][:, 1]]
            tag_str = '{}/{}/PR'.format(name, classname)
            image_summary = plot.Curve(name=tag_str,
                                       figsize=(10, 8),
                                       xs=rs[0],
                                       ys=np.array(ps).T,
                                       setter=_PRSetter,
                                       marker='.',
                                       markersize=14,
                                       linestyle='-',
                                       linewidth=2,
                                       alpha=0.5)
            summaries.append(image_summary)

        for difficulty, c in self._calibration.items():
            # Note that we only generate a calibration for a single difficulty level.
            calibration_summaries = c.Summary(name)
            for calibration_summary in calibration_summaries:
                summaries.append(calibration_summary)

        # Generate scalar summaries for the various recalls for each difficulty.
        for difficulty in p.metadata.DifficultyLevels():
            max_recall = _FindMaximumRecall(self._precision_recall[difficulty])
            for i, j in enumerate(p.metadata.EvalClassIndices()):
                classname = p.metadata.ClassNames()[j]
                summary = tf.Summary(value=[
                    tf.Summary.Value(tag='{}/{}/max_recall_{}'.format(
                        name, classname, difficulty),
                                     simple_value=max_recall[i])
                ])
                summaries.append(summary)

            for precision_level in p.metadata.RecallAtPrecision():
                recall_at_precision = _FindRecallAtGivenPrecision(
                    self._precision_recall[difficulty], precision_level)
                for i, j in enumerate(p.metadata.EvalClassIndices()):
                    classname = p.metadata.ClassNames()[j]
                    summary = tf.Summary(value=[
                        tf.Summary.Value(tag='{}/{}/recall_{}_{}'.format(
                            name, classname, int(precision_level *
                                                 100), difficulty),
                                         simple_value=recall_at_precision[i])
                    ])
                    summaries.append(summary)
        return summaries
    def _EvaluateIfNecessary(self, name):
        """Create a top down image summary, if not already created."""
        if self._summary is not None:
            return

        tf.logging.info('Generating top down summary.')
        ret = tf.Summary()

        transform = self._top_down_transform

        for batch_idx, batch_sample in enumerate(self._sampler.samples):
            batch_size = batch_sample.labels.shape[0]
            visualization_labels = batch_sample.visualization_labels
            predicted_bboxes = batch_sample.predicted_bboxes
            visualization_weights = batch_sample.visualization_weights
            points_xyz = batch_sample.points_xyz
            points_padding = batch_sample.points_padding
            gt_bboxes_2d = batch_sample.gt_bboxes_2d
            gt_bboxes_2d_weights = batch_sample.gt_bboxes_2d_weights
            labels = batch_sample.labels
            difficulties = batch_sample.difficulties
            source_ids = batch_sample.source_ids

            # Create base images for entire batch that we will update.
            images = np.zeros(
                [batch_size, self._image_height, self._image_width, 3],
                dtype=np.uint8)

            # Draw lasers first, so that bboxes can be on top.
            self._DrawLasers(images, points_xyz, points_padding, transform)

            # Draw ground-truth bboxes.
            gt_bboxes_2d = np.where(
                np.expand_dims(gt_bboxes_2d_weights > 0, -1), gt_bboxes_2d,
                np.zeros_like(gt_bboxes_2d))
            transformed_gt_bboxes_2d = summary.TransformBBoxesToTopDown(
                gt_bboxes_2d, transform)

            summary.DrawBBoxesOnImages(images,
                                       transformed_gt_bboxes_2d,
                                       gt_bboxes_2d_weights,
                                       labels,
                                       self._class_id_to_name,
                                       groundtruth=True)

            # Draw predicted bboxes.
            predicted_bboxes = np.where(
                np.expand_dims(visualization_weights > 0, -1),
                predicted_bboxes, np.zeros_like(predicted_bboxes))
            transformed_predicted_bboxes = summary.TransformBBoxesToTopDown(
                predicted_bboxes, transform)

            summary.DrawBBoxesOnImages(images,
                                       transformed_predicted_bboxes,
                                       visualization_weights,
                                       visualization_labels,
                                       self._class_id_to_name,
                                       groundtruth=False)

            # Draw the difficulties on the image.
            self.DrawDifficulty(images, transformed_gt_bboxes_2d,
                                gt_bboxes_2d_weights, difficulties)

            for idx in range(batch_size):
                source_id = source_ids[idx]

                def AnnotateImage(fig, axes, source_id=source_id):
                    """Add source_id to image."""
                    del fig
                    # Draw in top middle of image.
                    text = axes.text(500,
                                     15,
                                     source_id,
                                     fontsize=16,
                                     color='blue',
                                     fontweight='bold',
                                     horizontalalignment='center')
                    text.set_path_effects([
                        path_effects.Stroke(linewidth=3,
                                            foreground='lightblue'),
                        path_effects.Normal()
                    ])

                image_summary = plot.Image(name='{}/{}/{}'.format(
                    name, batch_idx, idx),
                                           aspect='equal',
                                           figsize=self._figsize,
                                           image=images[idx, ...],
                                           setter=AnnotateImage)
                ret.value.extend(image_summary.value)

        tf.logging.info('Done generating top down summary.')
        self._summary = ret