Esempio n. 1
0
  def testBatchWithPadding(self):
    """Test that batching with padding up to an allowed batch size works."""
    with self.test_session() as sess:
      inp = array_ops.placeholder(dtype=dtypes.int32, shape=[2])
      batched, index, _ = batch_ops.batch(
          [inp], num_batch_threads=1, max_batch_size=10,
          batch_timeout_micros=100000,  # 100ms
          allowed_batch_sizes=[5, 10],
          grad_timeout_micros=0, batching_queue="")
      thread_results = []

      def worker():
        thread_results.extend(
            sess.run([batched, index], feed_dict={inp: [1, 3]}))

      worker_thread = threading.Thread(target=worker)
      worker_thread.start()
      main_results = sess.run([batched, index], feed_dict={inp: [2, 4]})
      worker_thread.join()

      # At this point either the thread or the main did the batch and the other
      # should have empty results.
      if list(thread_results[0][0]):
        batch_t = thread_results[0][0]
      else:
        batch_t = main_results[0][0]

      # Check that the batch tensor incorporates the padding.
      self.assertEqual(len(batch_t), 5)
Esempio n. 2
0
    def testUnbatchGrad(self):
        """Tests that batch and unbatch are differentiable."""
        with self.cached_session() as sess:
            inp = array_ops.placeholder(dtype=dtypes.float32, shape=[1])
            batched, index, id_t = batch_ops.batch(
                [inp],
                num_batch_threads=1,
                max_batch_size=2,
                batch_timeout_micros=36000000,
                grad_timeout_micros=1000000,
                batching_queue="")
            computation = batched[0] * batched[0]
            result = batch_ops.unbatch(computation,
                                       index,
                                       id_t,
                                       timeout_micros=1000000,
                                       shared_name="unbatch")
            grad = gradients_impl.gradients(result, inp)
            thread_results = []

            def worker():
                thread_results.extend(sess.run([grad], feed_dict={inp: [1]}))

            worker_thread = threading.Thread(target=worker)
            worker_thread.start()
            main_results = sess.run([grad], feed_dict={inp: [2]})
            worker_thread.join()
            self.assertEqual(thread_results[0], [2])
            self.assertEqual(main_results[0], [4])
Esempio n. 3
0
    def testBatchWithPadding(self):
        """Test that batching with padding up to an allowed batch size works."""
        with self.cached_session() as sess:
            inp = array_ops.placeholder(dtype=dtypes.int32, shape=[2])
            batched, index, _ = batch_ops.batch(
                [inp],
                num_batch_threads=1,
                max_batch_size=10,
                batch_timeout_micros=100000,  # 100ms
                allowed_batch_sizes=[5, 10],
                grad_timeout_micros=0,
                batching_queue="")
            thread_results = []

            def worker():
                thread_results.extend(
                    sess.run([batched, index], feed_dict={inp: [1, 3]}))

            worker_thread = threading.Thread(target=worker)
            worker_thread.start()
            main_results = sess.run([batched, index], feed_dict={inp: [2, 4]})
            worker_thread.join()

            # At this point either the thread or the main did the batch and the other
            # should have empty results.
            if list(thread_results[0][0]):
                batch_t = thread_results[0][0]
            else:
                batch_t = main_results[0][0]

            # Check that the batch tensor incorporates the padding.
            self.assertEqual(len(batch_t), 5)
Esempio n. 4
0
    def testBasicUnbatch(self):
        """Tests that batch and unbatch work together."""
        with self.cached_session() as sess:
            inp = array_ops.placeholder(dtype=dtypes.int32, shape=[1])
            batched, index, id_t = batch_ops.batch(
                [inp],
                num_batch_threads=1,
                max_batch_size=10,
                batch_timeout_micros=100000,  # 100ms
                allowed_batch_sizes=[3, 10],
                grad_timeout_micros=0,
                batching_queue="")
            computation = batched[0] + 1
            result = batch_ops.unbatch(computation,
                                       index,
                                       id_t,
                                       timeout_micros=1000000,
                                       shared_name="unbatch")
            thread_results = []

            def worker():
                thread_results.extend(sess.run([result], feed_dict={inp: [1]}))

            worker_thread = threading.Thread(target=worker)
            worker_thread.start()
            main_results = sess.run([result], feed_dict={inp: [2]})
            worker_thread.join()
            self.assertEqual(thread_results[0], [2])
            self.assertEqual(main_results[0], [3])
Esempio n. 5
0
 def testIllegalBatchDifferentDim0Sizes(self):
   """Tests illegally feeding tensors with different dim0 sizes."""
   with self.test_session() as sess:
     inp0 = array_ops.placeholder(dtype=dtypes.int32, shape=[1])
     inp1 = array_ops.placeholder(dtype=dtypes.int32, shape=[2])
     batched, index, _ = batch_ops.batch(
         [inp0, inp1], num_batch_threads=1, max_batch_size=2,
         batch_timeout_micros=0, grad_timeout_micros=0, batching_queue="")
     with self.assertRaises(Exception) as raised:
       _ = sess.run([batched, index], feed_dict={inp0: [0], inp1: [1, 2]})
     self.assertGreater(
         raised.exception.message.find("must have equal 0th-dimension size"),
         0)
Esempio n. 6
0
 def testIllegalBatchDifferentDim0Sizes(self):
   """Tests illegally feeding tensors with different dim0 sizes."""
   with self.cached_session() as sess:
     inp0 = array_ops.placeholder(dtype=dtypes.int32, shape=[1])
     inp1 = array_ops.placeholder(dtype=dtypes.int32, shape=[2])
     batched, index, _ = batch_ops.batch(
         [inp0, inp1], num_batch_threads=1, max_batch_size=2,
         batch_timeout_micros=0, grad_timeout_micros=0, batching_queue="")
     with self.assertRaises(Exception) as raised:
       _ = sess.run([batched, index], feed_dict={inp0: [0], inp1: [1, 2]})
     self.assertGreater(
         raised.exception.message.find("must have equal 0th-dimension size"),
         0)
Esempio n. 7
0
    def testUnbatchTimeout(self):
        """Tests that the unbatch timeout works."""
        with self.test_session() as sess:
            inp = array_ops.placeholder(dtype=dtypes.int32, shape=[1])
            batched, index, id_t = batch_ops.batch(
                [inp],
                num_batch_threads=1,
                max_batch_size=2,
                batch_timeout_micros=36000000,
                grad_timeout_micros=0,
                batching_queue="")
            computation = batched[0] + 1
            timeout_micros = 10
            result = batch_ops.unbatch(computation,
                                       index,
                                       id_t,
                                       timeout_micros,
                                       shared_name="shared_unbatch")
            # Set up a parallel pipeline that delays the computation, but uses the
            # same unbatch resource object as the non-delayed pipeline.
            computation_delayed = script_ops.py_func(delayed_plus1,
                                                     [batched[0]],
                                                     dtypes.int32)
            result_delayed = batch_ops.unbatch(computation_delayed,
                                               index,
                                               id_t,
                                               timeout_micros,
                                               shared_name="shared_unbatch")

            thread_results = []

            def worker():
                # A first call using the non-delayed pipeline. The batcher will send an
                # empty tensor along the non-delayed pipeline.
                thread_results.extend(sess.run([result], feed_dict={inp: [1]}))

            worker_thread = threading.Thread(target=worker)
            worker_thread.start()
            time.sleep(0.1)  # Ensure the thread's call starts first.
            # A second call using the delayed pipeline.  The batcher will send the
            # batched tensor along the delayed pipeline, thus delaying the arrival of
            # the batched tensor at the unbatch op, relative to the empty tensor.
            #
            # TODO (olston, apassos): Avoid relying on the order in which the batch op id:560
            # https://github.com/imdone/tensorflow/issues/561
            # emits the empty tensor versus the batched one.
            _ = sess.run([result_delayed], feed_dict={inp: [2]})
            worker_thread.join()
            # The thread's call should hit the timeout, and thus get 0 results.
            self.assertEqual(len(thread_results), 0)
Esempio n. 8
0
    def testMultipleBatch(self):
        """Tests that multiple batched tensors execute together."""
        with self.cached_session() as sess:
            inp0 = array_ops.placeholder(dtype=dtypes.int32, shape=[1])
            inp1 = array_ops.placeholder(dtype=dtypes.int32, shape=[1])
            batched, _, _ = batch_ops.batch([inp0, inp1],
                                            num_batch_threads=1,
                                            max_batch_size=2,
                                            batch_timeout_micros=36000000,
                                            grad_timeout_micros=0,
                                            batching_queue="")
            thread_results = []

            def worker():
                thread_results.extend(
                    sess.run([batched], feed_dict={
                        inp0: [1],
                        inp1: [2]
                    }))

            worker_thread = threading.Thread(target=worker)
            worker_thread.start()
            main_results = sess.run([batched],
                                    feed_dict={
                                        inp0: [2],
                                        inp1: [3]
                                    })
            worker_thread.join()

            # At this point either the thread or the main did the batch and the other
            # should have empty results.
            if list(thread_results[0][0]):
                batch_t = thread_results[0]
                empty_t = main_results[0]
            else:
                batch_t = main_results[0]
                empty_t = thread_results[0]

            # Assert that the tensors were batched together.
            self.assertAllEqual(sorted(batch_t[0]), [1, 2])
            self.assertAllEqual(sorted(batch_t[1]), [2, 3])
            self.assertAllEqual(empty_t[0], [])
            self.assertAllEqual(empty_t[1], [])
Esempio n. 9
0
  def testUnbatchTimeout(self):
    """Tests that the unbatch timeout works."""
    with self.test_session() as sess:
      inp = array_ops.placeholder(dtype=dtypes.int32, shape=[1])
      batched, index, id_t = batch_ops.batch(
          [inp], num_batch_threads=1, max_batch_size=2,
          batch_timeout_micros=36000000, grad_timeout_micros=0,
          batching_queue="")
      computation = batched[0] + 1
      timeout_micros = 10
      result = batch_ops.unbatch(computation, index, id_t, timeout_micros,
                                 shared_name="shared_unbatch")
      # Set up a parallel pipeline that delays the computation, but uses the
      # same unbatch resource object as the non-delayed pipeline.
      computation_delayed = script_ops.py_func(delayed_plus1,
                                               [batched[0]],
                                               dtypes.int32)
      result_delayed = batch_ops.unbatch(computation_delayed,
                                         index,
                                         id_t,
                                         timeout_micros,
                                         shared_name="shared_unbatch")

      thread_results = []
      def worker():
        # A first call using the non-delayed pipeline. The batcher will send an
        # empty tensor along the non-delayed pipeline.
        thread_results.extend(sess.run([result], feed_dict={inp: [1]}))
      worker_thread = threading.Thread(target=worker)
      worker_thread.start()
      time.sleep(0.1)  # Ensure the thread's call starts first.
      # A second call using the delayed pipeline.  The batcher will send the
      # batched tensor along the delayed pipeline, thus delaying the arrival of
      # the batched tensor at the unbatch op, relative to the empty tensor.
      #
      # TODO(olston, apassos): Avoid relying on the order in which the batch op
      # emits the empty tensor versus the batched one.
      _ = sess.run([result_delayed], feed_dict={inp: [2]})
      worker_thread.join()
      # The thread's call should hit the timeout, and thus get 0 results.
      self.assertEqual(len(thread_results), 0)
Esempio n. 10
0
    def testBasicBatch(self):
        """Tests that a single batched tensor executes together and only once."""
        with self.cached_session() as sess:
            inp = array_ops.placeholder(dtype=dtypes.int32, shape=[1])
            batched, index, _ = batch_ops.batch([inp],
                                                num_batch_threads=1,
                                                max_batch_size=2,
                                                batch_timeout_micros=36000000,
                                                grad_timeout_micros=0,
                                                batching_queue="")
            thread_results = []

            def worker():
                thread_results.extend(
                    sess.run([batched, index], feed_dict={inp: [1]}))

            worker_thread = threading.Thread(target=worker)
            worker_thread.start()
            main_results = sess.run([batched, index], feed_dict={inp: [2]})
            worker_thread.join()

            # At this point either the thread or the main did the batch and the other
            # should have empty results.
            if list(thread_results[0][0]):
                batch_t = thread_results[0][0]
                index_t = thread_results[1]
                empty_b = main_results[0][0]
                empty_m = main_results[1]
            else:
                batch_t = main_results[0][0]
                index_t = main_results[1]
                empty_b = thread_results[0][0]
                empty_m = thread_results[1]

            # Check that both the inputs made it out exactly once.
            self.assertAllEqual(sorted(batch_t), (1, 2))
            # Check that we get 2 rows in the index tensor.
            self.assertEqual(len(index_t), 2)
            # Check that the other ones are empty.
            self.assertEqual(len(empty_b), 0)
            self.assertEqual(len(empty_m), 0)
Esempio n. 11
0
  def testBasicBatch(self):
    """Tests that a single batched tensor executes together and only once."""
    with self.test_session() as sess:
      inp = array_ops.placeholder(dtype=dtypes.int32, shape=[1])
      batched, index, _ = batch_ops.batch(
          [inp], num_batch_threads=1, max_batch_size=2,
          batch_timeout_micros=36000000, grad_timeout_micros=0,
          batching_queue="")
      thread_results = []

      def worker():
        thread_results.extend(
            sess.run([batched, index], feed_dict={inp: [1]}))

      worker_thread = threading.Thread(target=worker)
      worker_thread.start()
      main_results = sess.run([batched, index], feed_dict={inp: [2]})
      worker_thread.join()

      # At this point either the thread or the main did the batch and the other
      # should have empty results.
      if list(thread_results[0][0]):
        batch_t = thread_results[0][0]
        index_t = thread_results[1]
        empty_b = main_results[0][0]
        empty_m = main_results[1]
      else:
        batch_t = main_results[0][0]
        index_t = main_results[1]
        empty_b = thread_results[0][0]
        empty_m = thread_results[1]

      # Check that both the inputs made it out exactly once.
      self.assertAllEqual(sorted(batch_t), (1, 2))
      # Check that we get 2 rows in the index tensor.
      self.assertEqual(len(index_t), 2)
      # Check that the other ones are empty.
      self.assertEqual(len(empty_b), 0)
      self.assertEqual(len(empty_m), 0)
Esempio n. 12
0
  def testMultipleBatch(self):
    """Tests that multiple batched tensors execute together."""
    with self.test_session() as sess:
      inp0 = array_ops.placeholder(dtype=dtypes.int32, shape=[1])
      inp1 = array_ops.placeholder(dtype=dtypes.int32, shape=[1])
      batched, _, _ = batch_ops.batch(
          [inp0, inp1],
          num_batch_threads=1,
          max_batch_size=2,
          batch_timeout_micros=36000000,
          grad_timeout_micros=0,
          batching_queue="")
      thread_results = []

      def worker():
        thread_results.extend(
            sess.run([batched], feed_dict={inp0: [1],
                                           inp1: [2]}))

      worker_thread = threading.Thread(target=worker)
      worker_thread.start()
      main_results = sess.run([batched], feed_dict={inp0: [2], inp1: [3]})
      worker_thread.join()

      # At this point either the thread or the main did the batch and the other
      # should have empty results.
      if list(thread_results[0][0]):
        batch_t = thread_results[0]
        empty_t = main_results[0]
      else:
        batch_t = main_results[0]
        empty_t = thread_results[0]

      # Assert that the tensors were batched together.
      self.assertAllEqual(sorted(batch_t[0]), [1, 2])
      self.assertAllEqual(sorted(batch_t[1]), [2, 3])
      self.assertAllEqual(empty_t[0], [])
      self.assertAllEqual(empty_t[1], [])
Esempio n. 13
0
  def testUnbatchGrad(self):
    """Tests that batch and unbatch are differentiable."""
    with self.test_session() as sess:
      inp = array_ops.placeholder(dtype=dtypes.float32, shape=[1])
      batched, index, id_t = batch_ops.batch(
          [inp], num_batch_threads=1, max_batch_size=2,
          batch_timeout_micros=36000000, grad_timeout_micros=1000000,
          batching_queue="")
      computation = batched[0] * batched[0]
      result = batch_ops.unbatch(computation, index, id_t,
                                 timeout_micros=1000000, shared_name="unbatch")
      grad = gradients_impl.gradients(result, inp)
      thread_results = []

      def worker():
        thread_results.extend(sess.run([grad], feed_dict={inp: [1]}))

      worker_thread = threading.Thread(target=worker)
      worker_thread.start()
      main_results = sess.run([grad], feed_dict={inp: [2]})
      worker_thread.join()
      self.assertEqual(thread_results[0], [2])
      self.assertEqual(main_results[0], [4])
Esempio n. 14
0
  def testBasicUnbatch(self):
    """Tests that batch and unbatch work together."""
    with self.test_session() as sess:
      inp = array_ops.placeholder(dtype=dtypes.int32, shape=[1])
      batched, index, id_t = batch_ops.batch(
          [inp], num_batch_threads=1, max_batch_size=10,
          batch_timeout_micros=100000,  # 100ms
          allowed_batch_sizes=[3, 10],
          grad_timeout_micros=0, batching_queue="")
      computation = batched[0] + 1
      result = batch_ops.unbatch(computation, index, id_t,
                                 timeout_micros=1000000, shared_name="unbatch")
      thread_results = []

      def worker():
        thread_results.extend(sess.run([result], feed_dict={inp: [1]}))

      worker_thread = threading.Thread(target=worker)
      worker_thread.start()
      main_results = sess.run([result], feed_dict={inp: [2]})
      worker_thread.join()
      self.assertEqual(thread_results[0], [2])
      self.assertEqual(main_results[0], [3])