Beispiel #1
0
def horizontal_lstm(images, num_filters_out, scope=None):
    """Run an LSTM bidirectionally over all the rows of each image.

  Args:
    images: (num_images, height, width, depth) tensor
    num_filters_out: output depth
    scope: optional scope name

  Returns:
    (num_images, height, width, num_filters_out) tensor, where
    num_steps is width and new num_batches is num_image_batches * height
  """
    with variable_scope.variable_scope(scope, "HorizontalLstm", [images]):
        batch_size, _, _, _ = _shape(images)
        sequence = images_to_sequence(images)
        with variable_scope.variable_scope("lr"):
            hidden_sequence_lr = lstm1d.ndlstm_base(sequence,
                                                    num_filters_out // 2)
        with variable_scope.variable_scope("rl"):
            hidden_sequence_rl = (lstm1d.ndlstm_base(sequence,
                                                     num_filters_out -
                                                     num_filters_out // 2,
                                                     reverse=1))
        output_sequence = array_ops.concat(
            [hidden_sequence_lr, hidden_sequence_rl], 2)
        output = sequence_to_images(output_sequence, batch_size)
        return output
Beispiel #2
0
def horizontal_lstm(images, num_filters_out, scope=None):
  """Run an LSTM bidirectionally over all the rows of each image.

  Args:
    images: (num_images, height, width, depth) tensor
    num_filters_out: output depth
    scope: optional scope name

  Returns:
    (num_images, height, width, num_filters_out) tensor, where
    num_steps is width and new num_batches is num_image_batches * height
  """
  with tf.variable_scope(scope, "HorizontalLstm", [images]):
    batch_size, _, _, _ = _shape(images)
    sequence = images_to_sequence(images)
    with tf.variable_scope("lr"):
      hidden_sequence_lr = lstm1d.ndlstm_base(sequence, num_filters_out // 2)
    with tf.variable_scope("rl"):
      hidden_sequence_rl = (
          lstm1d.ndlstm_base(sequence,
                             num_filters_out - num_filters_out // 2,
                             reverse=1))
    output_sequence = tf.concat_v2([hidden_sequence_lr, hidden_sequence_rl], 2)
    output = sequence_to_images(output_sequence, batch_size)
    return output
Beispiel #3
0
 def testSequenceToSequenceDims(self):
   with self.test_session():
     inputs = tf.constant(_rand(17, 1, 5))
     outputs = lstm1d.ndlstm_base(inputs, 8)
     tf.initialize_all_variables().run()
     result = outputs.eval()
     self.assertEqual(tuple(result.shape), (17, 1, 8))
Beispiel #4
0
 def testSequenceToSequenceGradientReverse(self):
     with self.test_session():
         size = (17, 1, 15)
         output_size = (17, 1, 8)
         inputs = tf.constant(_rand(*size))
         outputs = lstm1d.ndlstm_base(inputs, 8, reverse=1, dynamic=False)
         tf.initialize_all_variables().run()
         if 1:  # pylint: disable=using-constant-test
             gradients = tf.gradients(outputs, inputs)[0].eval()
             self.assertEqual(gradients.shape, size)
         else:
             # TODO(tmb) tf.test.compute_gradient error is currently broken
             # with dynamic_rnn. Enable this test case eventually.
             err = tf.test.compute_gradient_error(inputs,
                                                  size,
                                                  outputs,
                                                  output_size,
                                                  delta=1e-4)
             self.assert_(not np.isnan(err))
             self.assert_(err < 0.1)
Beispiel #5
0
 def testSequenceToSequenceGradientReverse(self):
   with self.test_session():
     size = (17, 1, 15)
     output_size = (17, 1, 8)
     inputs = tf.constant(_rand(*size))
     outputs = lstm1d.ndlstm_base(inputs, 8, reverse=1, dynamic=False)
     tf.initialize_all_variables().run()
     if 1:  # pylint: disable=using-constant-test
       gradients = tf.gradients(outputs, inputs)[0].eval()
       self.assertEqual(gradients.shape, size)
     else:
       # TODO(tmb) tf.test.compute_gradient error is currently broken
       # with dynamic_rnn. Enable this test case eventually.
       err = tf.test.compute_gradient_error(inputs,
                                            size,
                                            outputs,
                                            output_size,
                                            delta=1e-4)
       self.assert_(not np.isnan(err))
       self.assert_(err < 0.1)