Esempio n. 1
0
    def testCustomPlotFunc(self):
        batch_size = 3
        data = tf.ones((batch_size, 3, 5))
        trim = tf.constant([[3, 2], [2, 5], [3, 3]])
        titles = tf.constant(['batch1', 'batch2', 'batch3'])

        def TrimAndAddImage(fig, axes, data, trim, title, **kwargs):
            plot.AddImage(fig,
                          axes,
                          data[:trim[0], :trim[1]],
                          title=title,
                          **kwargs)

        with self.session() as s:
            fig = plot.MatplotlibFigureSummary('fig_custom_plotfunc',
                                               max_outputs=batch_size,
                                               plot_func=TrimAndAddImage)
            fig.AddSubplot([data, trim, titles])
            im = fig.Finalize()
            summary_str = s.run(im)
        summary = tf.summary.Summary.FromString(summary_str)
        self.assertEqual(len(summary.value), batch_size)
        for n, value in enumerate(summary.value):
            self.assertEqual(value.tag, 'fig_custom_plotfunc/image/%d' % n)
            self.assertGreater(value.image.width, 1)
            self.assertGreater(value.image.height, 1)
            self.assertEqual(value.image.colorspace, 3)
            self.assertNotEqual(value.image.encoded_image_string,
                                self.default_encoded_image)
Esempio n. 2
0
  def _AddSummary(self, batch, prediction):
    """Adds image summaries for the batch."""
    p = self.params
    if not self.do_eval or not p.add_image_summary:
      # Image summaries only works in evaler/decoder.
      return

    def Draw(fig, axes, img, label, pred):
      plot.AddImage(
          fig=fig,
          axes=axes,
          data=img[:, :, 0] / 256.,
          show_colorbar=False,
          suppress_xticks=True,
          suppress_yticks=True)
      axes.text(
          x=0.5,
          y=0,
          s=u'%d vs. %d' % (label, pred),
          transform=axes.transAxes,
          horizontalalignment='center')

    with plot.MatplotlibFigureSummary(
        'examples', figsize=(1, 1), max_outputs=10) as fig:
      fig.AddSubplot([batch.raw, batch.label, prediction], Draw)
Esempio n. 3
0
 def setUp(self):
   with self.session() as s:
     fig = plot.MatplotlibFigureSummary('DEFAULT', self.FIGSIZE, max_outputs=1)
     batched_data = tf.expand_dims(self.DEFAULT_DATA, 0)  # Batch size 1.
     fig.AddSubplot([batched_data])
     im = fig.Finalize()
     summary_str = s.run(im)
   summary = tf.summary.Summary.FromString(summary_str)
   self.default_encoded_image = summary.value[0].image.encoded_image_string
Esempio n. 4
0
 def testEnforcesConsistentBatchSize(self):
   batch_size = 4
   tensors = [tf.ones((batch_size, 3, 5)), tf.ones((batch_size - 2, 2, 2))]
   with self.session() as s:
     fig = plot.MatplotlibFigureSummary('summary', self.FIGSIZE, max_outputs=1)
     for t in tensors:
       fig.AddSubplot([t])
     im = fig.Finalize()
     with self.assertRaises(tf.errors.InvalidArgumentError):
       s.run(im)
Esempio n. 5
0
 def testLimitsOutputImagesIfBatchIsSmall(self):
   batch_size = 1
   tensors = [tf.zeros((batch_size, 3, 5)), tf.ones((batch_size, 2, 2))]
   with self.session() as s:
     fig = plot.MatplotlibFigureSummary('summary', self.FIGSIZE, max_outputs=3)
     for t in tensors:
       fig.AddSubplot([t])
     im = fig.Finalize()
     summary_str = s.run(im)
   summary = tf.summary.Summary.FromString(summary_str)
   self.assertEqual(len(summary.value), 1)
Esempio n. 6
0
 def testOnlyPlotsFirstMaxOutputImages(self):
   batch_size = 4
   tensors = [tf.ones((batch_size, 3, 5)), tf.ones((batch_size, 2, 2))]
   with self.session() as s:
     fig = plot.MatplotlibFigureSummary('summary', self.FIGSIZE, max_outputs=2)
     for t in tensors:
       fig.AddSubplot([t])
     im = fig.Finalize()
     summary_str = s.run(im)
   summary = tf.summary.Summary.FromString(summary_str)
   self.assertEqual(len(summary.value), 2)
Esempio n. 7
0
 def testDoesNotDieOnMatplotlibError(self):
   invalid_dim_data = tf.ones((5,))
   with self.session() as s:
     fig = plot.MatplotlibFigureSummary('summary', self.FIGSIZE, max_outputs=1)
     fig.AddSubplot([invalid_dim_data])
     im = fig.Finalize()
     summary_str = s.run(im)
   summary = tf.summary.Summary.FromString(summary_str)
   self.assertEqual(len(summary.value), 1)
   value = summary.value[0]
   # Generates dummy 1-pixel image.
   self.assertEqual(value.image.width, 1)
   self.assertEqual(value.image.height, 1)
Esempio n. 8
0
 def testCanUseAsContextManager(self):
   with self.session() as s:
     with plot.MatplotlibFigureSummary(
         'context_manager_figure', self.FIGSIZE, max_outputs=1) as fig:
       batched_data = tf.expand_dims(self.DEFAULT_DATA, 0)  # Batch size 1.
       fig.AddSubplot([batched_data])
     summary_str = s.run(tf.summary.merge_all(scope='context_manager_figure'))
   summary = tf.summary.Summary.FromString(summary_str)
   self.assertEqual(len(summary.value), 1)
   value = summary.value[0]
   self.assertEqual(value.image.width, self.EXPECTED_DPI * self.FIGSIZE[0])
   self.assertEqual(value.image.height, self.EXPECTED_DPI * self.FIGSIZE[1])
   self.assertEqual(value.image.colorspace, 3)
   self.assertEqual(value.image.encoded_image_string,
                    self.default_encoded_image)
Esempio n. 9
0
 def testUnicodeText(self):
   with self.session() as s:
     fig = plot.MatplotlibFigureSummary(
         'matplotlib_uni', self.FIGSIZE, max_outputs=1)
     batched_data = tf.expand_dims(self.DEFAULT_DATA, 0)  # Batch size 1.
     fig.AddSubplot([batched_data], xlabel=u'bździągwa', ylabel='żółć')
     im = fig.Finalize()
     summary_str = s.run(im)
   summary = tf.summary.Summary.FromString(summary_str)
   self.assertEqual(len(summary.value), 1)
   value = summary.value[0]
   self.assertEqual(value.tag, 'matplotlib_uni/image')
   self.assertEqual(value.image.width, self.EXPECTED_DPI * self.FIGSIZE[0])
   self.assertEqual(value.image.height, self.EXPECTED_DPI * self.FIGSIZE[1])
   self.assertEqual(value.image.colorspace, 3)
Esempio n. 10
0
  def _AddAttenProbsImageSummary(self, name, atten_probs):
    """Add image summary of input attention probabilities."""

    def PlotAttention(fig, axes, cur_atten_probs, title):
      plot.AddImage(fig, axes, cur_atten_probs, title=title)
      axes.set_ylabel(plot.ToUnicode('Output sequence index'), wrap=True)
      axes.set_xlabel(plot.ToUnicode('Input sequence index'), wrap=True)

    with plot.MatplotlibFigureSummary(
        name + '/atten_example',
        figsize=(10, 10),
        max_outputs=1,
        subplot_grid_shape=(1, 1)) as fig:
      # Extract first entry in batch of attention prob matrices
      # [tgt_len, src_len]
      fig.AddSubplot([atten_probs], PlotAttention, title='atten_probs')
Esempio n. 11
0
def AddAttentionSummary(params,
                        attention_tensors,
                        src_paddings,
                        tgt_paddings,
                        transcripts=None,
                        max_outputs=3):
  """Adds an image summary showing the attention probability matrix and state.

  Args:
    params: A param object.
    attention_tensors: A list of 3D tensors shaped [target_len, batch_size,
       source_len] where attention[i, j, k] is the probability for the i-th
       output attending to the k-th input for element j in the batch.
    src_paddings: A tensor of binary paddings shaped [source_len, batch] for the
      source sequence.
    tgt_paddings: A tensor of binary paddings shaped [target_len, batch] for the
      target sequence.
    transcripts: Optional, transcripts shaped [batch, target_len] for the source
      sequence.
    max_outputs: Integer maximum number of elements of the batch to plot.

  Returns:
    The added image summary.
  """
  name = attention_tensors[0].name + '/Attention'
  if not params.add_summary:
    return tf.summary.scalar('disabled_%s' % name, 0)
  fig = plot.MatplotlibFigureSummary(name, max_outputs=max_outputs)
  src_lens = SequenceLength(tf.transpose(src_paddings))
  tgt_lens = SequenceLength(tf.transpose(tgt_paddings))
  for n, atten in enumerate(attention_tensors):
    # Diagnostic metric that decreases as attention picks up.
    max_entropy = tf.log(tf.cast(src_lens, tf.float32))
    max_entropy = tf.expand_dims(tf.expand_dims(max_entropy, 0), -1)
    atten_normalized_entropy = -atten * tf.log(atten + 1e-10) / max_entropy
    scalar(params, 'Attention/average_normalized_entropy/%d' % n,
           tf.reduce_mean(atten_normalized_entropy))
    args = [tf.transpose(atten, [1, 0, 2]), src_lens, tgt_lens]
    if transcripts is not None and n == 0:
      args.append(transcripts)
    fig.AddSubplot(
        args,
        TrimPaddingAndPlotAttention,
        title=atten.name,
        xlabel='Input',
        ylabel='Output')
  return fig.Finalize()
Esempio n. 12
0
 def testBasic(self):
   with self.session() as s:
     fig = plot.MatplotlibFigureSummary(
         'matplotlib_figure', self.FIGSIZE, max_outputs=1)
     batched_data = tf.expand_dims(self.DEFAULT_DATA, 0)  # Batch size 1.
     fig.AddSubplot([batched_data])
     im = fig.Finalize()
     summary_str = s.run(im)
   summary = tf.summary.Summary.FromString(summary_str)
   self.assertEqual(len(summary.value), 1)
   value = summary.value[0]
   self.assertEqual(value.tag, 'matplotlib_figure/image')
   self.assertEqual(value.image.width, self.EXPECTED_DPI * self.FIGSIZE[0])
   self.assertEqual(value.image.height, self.EXPECTED_DPI * self.FIGSIZE[1])
   self.assertEqual(value.image.colorspace, 3)
   self.assertEqual(value.image.encoded_image_string,
                    self.default_encoded_image)
Esempio n. 13
0
  def testCanChangeFigsize(self):
    figsize = (self.FIGSIZE[0], 2 * self.FIGSIZE[1])
    with self.session() as s:
      fig = plot.MatplotlibFigureSummary('summary', figsize, max_outputs=1)
      batched_data = tf.expand_dims(self.DEFAULT_DATA, 0)  # Batch size 1.
      fig.AddSubplot([batched_data])
      im = fig.Finalize()
      summary_str = s.run(im)

      summary_str = s.run(im)
    summary = tf.summary.Summary.FromString(summary_str)
    self.assertEqual(len(summary.value), 1)
    value = summary.value[0]
    self.assertEqual(value.image.width, self.EXPECTED_DPI * figsize[0])
    self.assertEqual(value.image.height, self.EXPECTED_DPI * figsize[1])
    self.assertNotEqual(value.image.encoded_image_string,
                        self.default_encoded_image)
Esempio n. 14
0
def PlotSequenceFeatures(plots, name, **kwargs):
  """Plots a stack of sequence features.

  Args:
    plots: A list of tuple (tensor, seq_len), as returned by
      PrepareSequenceForPlot().
    name: A string for the caption of the plot.
    **kwargs: Keyword arguments passed to AddSubplot().
  """
  if not _ShouldAddSummary():
    return

  with plot.MatplotlibFigureSummary(name, figsize=(8, len(plots) * 3.5)) as fig:
    for i, (tensor, seq_len) in enumerate(plots):
      fig.AddSubplot([tensor, seq_len],
                     TrimPaddingAndPlotSequence,
                     title=GetTensorName(tensor, name, i),
                     **kwargs)
Esempio n. 15
0
def AddAttentionSummaryBatchMajor(attention_tensors,
                                  src_paddings,
                                  tgt_paddings,
                                  transcripts=None,
                                  max_outputs=3):
    """Adds an image summary showing the attention probability matrix and state.

  As opposed to AddAttentionSummary() takes all tensors with batch dimension in
  axis 0.

  Args:
    attention_tensors: A list of 3D tensors shaped [batch_size, target_len,
      source_len] where attention[b, i, j] is the probability for the i-th
      output attending to the j-th input for element b in the batch.
    src_paddings: A tensor of binary paddings shaped [batch, source_len] for the
      source sequence.
    tgt_paddings: A tensor of binary paddings shaped [batch, target_len] for the
      target sequence.
    transcripts: Optional, transcripts shaped [batch, source_len] for the source
      sequence.
    max_outputs: Integer maximum number of elements of the batch to plot.
  """
    name = attention_tensors[0].name + '/Attention'
    if not _ShouldAddSummary():
        return
    with plot.MatplotlibFigureSummary(name, max_outputs=max_outputs) as fig:
        src_lens = SequenceLength(src_paddings)
        tgt_lens = SequenceLength(tgt_paddings)
        for n, atten in enumerate(attention_tensors):
            # Diagnostic metric that decreases as attention picks up.
            max_entropy = tf.log(tf.cast(src_lens, tf.float32))
            max_entropy = tf.expand_dims(tf.expand_dims(max_entropy, -1), -1)
            atten_normalized_entropy = -atten * tf.log(atten +
                                                       1e-10) / max_entropy
            scalar('Attention/average_normalized_entropy/%d' % n,
                   tf.reduce_mean(atten_normalized_entropy))
            args = [atten, src_lens, tgt_lens]
            if transcripts is not None and n == 0:
                args.append(transcripts)
            fig.AddSubplot(args,
                           TrimPaddingAndPlotAttention,
                           title=atten.name,
                           xlabel='Input',
                           ylabel='Output')
Esempio n. 16
0
 def testLargerBatch(self):
   batch_size = 4
   tensors = [tf.ones((batch_size, 3, 5)), tf.ones((batch_size, 2, 2))]
   with self.session() as s:
     fig = plot.MatplotlibFigureSummary(
         'larger_batch', self.FIGSIZE, max_outputs=batch_size)
     for t in tensors:
       fig.AddSubplot([t])
     im = fig.Finalize()
     summary_str = s.run(im)
   summary = tf.summary.Summary.FromString(summary_str)
   self.assertEqual(len(summary.value), batch_size)
   for n, value in enumerate(summary.value):
     self.assertEqual(value.tag, u'larger_batch/image/%d' % n)
     self.assertEqual(value.image.width, self.EXPECTED_DPI * self.FIGSIZE[0])
     self.assertEqual(value.image.height, self.EXPECTED_DPI * self.FIGSIZE[1])
     self.assertEqual(value.image.colorspace, 3)
     self.assertNotEqual(value.image.encoded_image_string,
                         self.default_encoded_image)
Esempio n. 17
0
    def _AddAttenProbsSummary(self, source_paddings, targets, atten_probs):
        """Add image summary of attention probs.

    Args:
      source_paddings: source padding, of shape [src_len, src_batch].
      targets: A dict of string to tensors representing the targets one try to
          predict. Each tensor in targets is of shape [tgt_batch, tgt_len].
      atten_probs: a list of attention probs, each element is of shape
          [tgt_len, tgt_batch, src_len].
    """
        if not self.cluster.add_summary:
            return

        num_rows = len(atten_probs)
        fig = plot.MatplotlibFigureSummary('decoder_example',
                                           figsize=(6, 3 * num_rows),
                                           max_outputs=1,
                                           subplot_grid_shape=(num_rows, 1))

        def PlotAttention(fig, axes, cur_atten_probs, title, set_x_label):
            plot.AddImage(fig, axes, cur_atten_probs, title=title)
            axes.set_ylabel(plot.ToUnicode('Output sequence index'), wrap=True)
            if set_x_label:
                axes.set_xlabel(plot.ToUnicode('Input sequence index'),
                                wrap=True)

        index = 0
        srclen = tf.cast(tf.reduce_sum(1 - source_paddings[:, index]),
                         tf.int32)
        tgtlen = tf.cast(tf.reduce_sum(1 - targets.paddings[index, :]),
                         tf.int32)

        for i, probs in enumerate(atten_probs):
            # Extract first entry in batch of attention prob matrices
            # [tgt_len, src_len]
            probs = probs[:, index, :]
            probs = tf.expand_dims(probs[:tgtlen, :srclen], 0)
            fig.AddSubplot([probs],
                           PlotAttention,
                           title='atten_probs_%d' % i,
                           set_x_label=(i == len(atten_probs) - 1))
        fig.Finalize()
Esempio n. 18
0
    def testAddMultiCurveSubplot(self):
        with self.session(graph=tf.Graph(), use_gpu=False) as sess:
            fig = plot.MatplotlibFigureSummary('XXX')
            batch_size = 2
            tensor = tf.ones([batch_size, 3])
            paddings = tf.constant([[0., 0., 0.], [0., 1., 1.]])
            plot.AddMultiCurveSubplot(fig, [tensor, tensor],
                                      paddings,
                                      labels=['label1', 'label2'],
                                      xlabels=tf.constant(['a', 'b']),
                                      title='Title',
                                      ylabel='Ylabel')
            summary_str = sess.run(fig.Finalize())

        summary = tf.Summary.FromString(summary_str)
        self.assertEqual(len(summary.value), batch_size)
        for n, value in enumerate(summary.value):
            self.assertEqual(value.tag, 'XXX/image/%d' % n)
            self.assertGreater(value.image.width, 0)
            self.assertGreater(value.image.height, 0)
            self.assertNotEqual(value.image.encoded_image_string,
                                self.default_encoded_image)
Esempio n. 19
0
def CameraImageSummary(frontal_images, run_segment_strings, figsize=(6, 4)):
    """Write frontal_images as tf.Summaries.

  Args:
    frontal_images: Float tensor of frontal camera images: Shape: [batch,
      height, width, depth]. Expected aspect ratio of 3:2 for visualization.
    run_segment_strings: Tensor of strings: Shape: [batch, 1].  The associated
      RunSegment proto for the batch.
    figsize: Tuple indicating size of camera image. Default is (6, 4)
    indicating a 3:2 aspect ratio for visualization.
  """
    # Parse the run segment strings to extract the run segment info.
    run_segment_ids = ExtractRunIds(run_segment_strings)

    def DrawCameraImage(fig, axes, frontal_image, run_segment_id):
        """Draw camera image for image summary."""
        plot.AddImage(fig=fig,
                      axes=axes,
                      data=frontal_image / 256.,
                      show_colorbar=False,
                      suppress_xticks=True,
                      suppress_yticks=True)
        txt = axes.text(x=0.5,
                        y=0.01,
                        s=run_segment_id,
                        color='blue',
                        fontsize=14,
                        transform=axes.transAxes,
                        horizontalalignment='center')
        txt.set_path_effects([
            path_effects.Stroke(linewidth=3, foreground='lightblue'),
            path_effects.Normal()
        ])

    with plot.MatplotlibFigureSummary('examples',
                                      figsize=figsize,
                                      max_outputs=10) as fig:
        # Plot raw frontal image samples for each example.
        fig.AddSubplot([frontal_images, run_segment_ids], DrawCameraImage)
Esempio n. 20
0
def AddAttentionSummaryBatchMajor(attention_tensors,
                                  src_paddings,
                                  tgt_paddings,
                                  transcripts=None,
                                  max_outputs=3):
    """Adds an image summary showing the attention probability matrix and state.

  As opposed to AddAttentionSummary() takes all tensors with batch dimension in
  axis 0.

  Args:
    attention_tensors: A list of 3D tensors shaped [batch_size, target_len,
      source_len] where attention[b, i, j] is the probability for the i-th
      output attending to the j-th input for element b in the batch.
    src_paddings: A tensor of binary paddings shaped [batch, source_len] for the
      source sequence. Or a list of tensors of the same length as
      attention_tensors with a separate paddings for each entry in
      attention_tensors.
    tgt_paddings: A tensor of binary paddings shaped [batch, target_len] for the
      target sequence. Or a list of tensors of the same length as
      attention_tensors with a separate paddings for each entry in
      attention_tensors.
    transcripts: Optional, transcripts shaped [batch, source_len] for the source
      sequence.
    max_outputs: Integer maximum number of elements of the batch to plot.
  """
    def VerifyLen(paddings):
        length = len(paddings) if isinstance(paddings, list) else 1
        if length != 1 and length != len(attention_tensors):
            raise ValueError('Bad length of paddings list {}'.format(length))

    VerifyLen(src_paddings)
    VerifyLen(tgt_paddings)

    name = attention_tensors[0].name + '/Attention'
    if not _ShouldAddSummary():
        return

    def ToLengths(paddings):
        paddings = paddings if isinstance(paddings, list) else [paddings]
        return [SequenceLength(p) for p in paddings]

    def Get(lengths, i):
        return lengths[0 if len(lengths) == 1 else i]

    src_lens = ToLengths(src_paddings)
    tgt_lens = ToLengths(tgt_paddings)

    with plot.MatplotlibFigureSummary(name,
                                      max_outputs=max_outputs,
                                      gridspec_kwargs={'hspace': 0.3}) as fig:
        for n, atten in enumerate(attention_tensors):
            # Diagnostic metric that decreases as attention picks up.
            max_entropy = tf.log(tf.cast(Get(src_lens, n), tf.float32))
            max_entropy = tf.expand_dims(tf.expand_dims(max_entropy, -1), -1)
            atten_normalized_entropy = -atten * tf.log(atten +
                                                       1e-10) / max_entropy
            scalar('Attention/average_normalized_entropy/%d' % n,
                   tf.reduce_mean(atten_normalized_entropy))
            args = [atten, Get(src_lens, n), Get(tgt_lens, n)]
            if transcripts is not None and n == 0:
                args.append(transcripts)
            fig.AddSubplot(args,
                           TrimPaddingAndPlotAttention,
                           title=atten.name,
                           xlabel='Input',
                           ylabel='Output')
Esempio n. 21
0
    def FProp(self, theta, batch, state0=None):
        """Encodes source as represented by 'inputs' and 'paddings'.

    Args:
      theta: A NestedMap object containing weights' values of this
        layer and its children layers.
      batch: A NestedMap with fields:

        - src_inputs - The inputs tensor. It is expected to be of shape [batch,
          time, feature_dim, channels].
        - paddings - The paddings tensor. It is expected to be of shape [batch,
          time].
      state0: Recurrent input state. Not supported/ignored by this encoder.

    Returns:
      A NestedMap containing:

      - 'encoded': a feature tensor of shape [time, batch, depth]
      - 'padding': a 0/1 tensor of shape [time, batch]
      - 'state': the updated recurrent state
      - '${layer_type}_${layer_index}': The per-layer encoder output. Each one
        is a NestedMap containing 'encoded' and 'padding' similar to regular
        final outputs, except that 'encoded' from conv or conv_lstm layers are
        of shape [time, batch, depth, channels].
    """
        p = self.params
        inputs, paddings = batch.src_inputs, batch.paddings
        outputs = py_utils.NestedMap()
        with tf.name_scope(p.name):
            # Adding specAugmentation.
            if p.use_specaugment and not p.is_eval:
                inputs, paddings = self.specaugment.FProp(
                    theta.specaugment, inputs, paddings)
            # Add a few extra padded timesteps at the end. This is for ensuring the
            # correctness of the conv-layers at the edges.
            if p.pad_steps > 0:
                # inplace_update() is not supported by TPU for now. Since we have done
                # padding on the input_generator, we may avoid this additional padding.
                assert not py_utils.use_tpu()
                inputs_pad = tf.zeros(
                    inplace_ops.inplace_update(tf.shape(inputs), 1,
                                               p.pad_steps), inputs.dtype)
                paddings_pad = tf.ones(
                    inplace_ops.inplace_update(tf.shape(paddings), 1,
                                               p.pad_steps), paddings.dtype)
                inputs = tf.concat([inputs, inputs_pad], 1, name='inputs')
                paddings = tf.concat([paddings, paddings_pad], 1)

            def ReshapeForPlot(tensor, padding, name):
                """Transposes and flattens channels to [batch, dim, seq_len] shape."""
                # Flatten any dimensions beyond the third into the third.
                batch_size = tf.shape(tensor)[0]
                max_len = tf.shape(tensor)[1]
                plot_tensor = tf.reshape(tensor, [batch_size, max_len, -1])
                plot_tensor = tf.transpose(plot_tensor, [0, 2, 1], name=name)
                return (plot_tensor, summary_utils.SequenceLength(padding))

            plots = [
                ReshapeForPlot(tf.transpose(inputs, [0, 1, 3, 2]), paddings,
                               'inputs')
            ]

            conv_out = inputs
            out_padding = paddings
            for i, conv_layer in enumerate(self.conv):
                conv_out, out_padding = conv_layer.FProp(
                    theta.conv[i], conv_out, out_padding)
                if p.extra_per_layer_outputs:
                    conv_out *= (1.0 -
                                 out_padding[:, :, tf.newaxis, tf.newaxis])
                    outputs['conv_%d' % i] = py_utils.NestedMap(
                        encoded=tf.transpose(conv_out,
                                             [1, 0, 2, 3]),  # to [t, b, d, c]
                        padding=tf.transpose(out_padding))
                plots.append(
                    ReshapeForPlot(tf.transpose(conv_out, [0, 1, 3, 2]),
                                   out_padding, 'conv_%d_out' % i))

            def TransposeFirstTwoDims(t):
                first_dim = tf.shape(t)[0]
                second_dim = tf.shape(t)[1]
                t_new = tf.transpose(
                    tf.reshape(t, [first_dim, second_dim, -1]), [1, 0, 2])
                t_shape_new = tf.concat([[second_dim], [first_dim],
                                         tf.shape(t)[2:]], 0)
                return tf.reshape(t_new, t_shape_new)

            # Now the conv-lstm part.
            conv_lstm_out = conv_out
            conv_lstm_out_padding = out_padding
            for i, (rnn, cnn) in enumerate(
                    zip(self.conv_lstm_rnn, self.conv_lstm_cnn)):
                conv_lstm_in = conv_lstm_out
                # Move time dimension to be the first.
                conv_lstm_in = TransposeFirstTwoDims(conv_lstm_in)
                conv_lstm_in = tf.expand_dims(conv_lstm_in, 2)
                conv_lstm_in_padding = tf.expand_dims(
                    tf.transpose(conv_lstm_out_padding), 2)
                lstm_out = rnn.FProp(theta.conv_lstm_rnn[i], conv_lstm_in,
                                     conv_lstm_in_padding)
                # Move time dimension to be the second.
                cnn_in = TransposeFirstTwoDims(lstm_out)
                cnn_in = tf.squeeze(cnn_in, 2)
                cnn_in_padding = conv_lstm_out_padding
                cnn_out, cnn_out_padding = cnn.FProp(theta.conv_lstm_cnn[i],
                                                     cnn_in, cnn_in_padding)
                conv_lstm_out, conv_lstm_out_padding = cnn_out, cnn_out_padding
                if p.extra_per_layer_outputs:
                    conv_lstm_out *= (
                        1.0 -
                        conv_lstm_out_padding[:, :, tf.newaxis, tf.newaxis])
                    outputs['conv_lstm_%d' % i] = py_utils.NestedMap(
                        encoded=tf.transpose(conv_lstm_out,
                                             [1, 0, 2, 3]),  # to [t, b, d, c]
                        padding=tf.transpose(conv_lstm_out_padding))
                plots.append(
                    ReshapeForPlot(conv_lstm_out, conv_lstm_out_padding,
                                   'conv_lstm_%d_out' % i))

            # Need to do a reshape before starting the rnn layers.
            conv_lstm_out = py_utils.HasRank(conv_lstm_out, 4)
            conv_lstm_out_shape = tf.shape(conv_lstm_out)
            new_shape = tf.concat([conv_lstm_out_shape[:2], [-1]], 0)
            conv_lstm_out = tf.reshape(conv_lstm_out, new_shape)
            if self._first_lstm_input_dim_pad:
                conv_lstm_out = tf.pad(
                    conv_lstm_out,
                    [[0, 0], [0, 0], [0, self._first_lstm_input_dim_pad]])

            conv_lstm_out = py_utils.HasShape(
                conv_lstm_out, [-1, -1, self._first_lstm_input_dim])

            # Transpose to move the time dimension to be the first.
            rnn_in = tf.transpose(conv_lstm_out, [1, 0, 2])
            rnn_padding = tf.expand_dims(tf.transpose(conv_lstm_out_padding),
                                         2)
            # rnn_in is of shape [time, batch, depth]
            # rnn_padding is of shape [time, batch, 1]

            # Now the rnn layers.
            num_skips = 0
            for i in range(p.num_lstm_layers):
                rnn_out = self.rnn[i].FProp(theta.rnn[i], rnn_in, rnn_padding)
                residual_index = i - p.residual_start + 1
                if p.residual_start > 0 and residual_index >= 0:
                    if residual_index % p.residual_stride == 0:
                        residual_in = rnn_in
                    if residual_index % p.residual_stride == p.residual_stride - 1:
                        # Highway skip connection.
                        if p.highway_skip:
                            rnn_out = self.highway_skip[num_skips].FProp(
                                theta.highway_skip[num_skips], residual_in,
                                rnn_out)
                            num_skips += 1
                        else:
                            # Residual skip connection.
                            rnn_out += py_utils.HasShape(
                                residual_in, tf.shape(rnn_out))
                if p.project_lstm_output and (i < p.num_lstm_layers - 1):
                    # Projection layers.
                    rnn_out = self.proj[i].FProp(theta.proj[i], rnn_out,
                                                 rnn_padding)
                if i == p.num_lstm_layers - 1:
                    rnn_out *= (1.0 - rnn_padding)
                if p.extra_per_layer_outputs:
                    rnn_out *= (1.0 - rnn_padding)
                    outputs['rnn_%d' % i] = py_utils.NestedMap(
                        encoded=rnn_out, padding=tf.squeeze(rnn_padding, [2]))
                plots.append(
                    ReshapeForPlot(tf.transpose(rnn_out, [1, 0, 2]),
                                   tf.transpose(rnn_padding, [1, 0, 2]),
                                   'rnn_%d_out' % i))
                rnn_in = rnn_out
            final_out = rnn_in

            if self.cluster.add_summary:
                fig = plot.MatplotlibFigureSummary('encoder_example',
                                                   figsize=(8,
                                                            len(plots) * 3.5))

                # Order layers from bottom to top.
                plots.reverse()
                for tensor, seq_len in plots:
                    fig.AddSubplot([tensor, seq_len],
                                   summary_utils.TrimPaddingAndPlotSequence,
                                   title=tensor.name,
                                   xlabel='Time')
                fig.Finalize()

            outputs['encoded'] = final_out
            outputs['padding'] = tf.squeeze(rnn_padding, [2])
            outputs['state'] = py_utils.NestedMap()
            return outputs