Exemple #1
0
 def test_close_then_use_executor(self):
     ex = concurrent_executor.ConcurrentExecutor(
         eager_executor.EagerExecutor())
     ex.close()
     result = self.use_executor(ex)
     self.assertIsInstance(result, eager_executor.EagerValue)
     self.assertEqual(result.internal_representation.numpy(), 11)
Exemple #2
0
    def test_multiple_computations_with_same_executor(self):
        @computations.tf_computation(tf.int32)
        def add_one(x):
            return tf.add(x, 1)

        ex = concurrent_executor.ConcurrentExecutor(
            eager_executor.EagerExecutor())

        async def compute():
            return await ex.create_selection(await ex.create_tuple(
                collections.OrderedDict([
                    ('a', await
                     ex.create_call(await ex.create_value(add_one), await
                                    ex.create_value(10, tf.int32)))
                ])),
                                             name='a')

        result = asyncio.get_event_loop().run_until_complete(compute())
        self.assertIsInstance(result, eager_executor.EagerValue)
        self.assertEqual(result.internal_representation.numpy(), 11)

        # After this call, the ConcurrentExecutor has been closed, and needs
        # to be re-initialized.
        ex.close()

        result = asyncio.get_event_loop().run_until_complete(compute())
        self.assertIsInstance(result, eager_executor.EagerValue)
        self.assertEqual(result.internal_representation.numpy(), 11)
Exemple #3
0
 def test_close_then_use_executor_with_cache(self):
     # Integration that use after close is compatible with the combined
     # concurrent executors and cached executors. This was broken in
     # the past due to interactions between closing, caching, and the
     # concurrent executor. See b/148288711 for context.
     ex = concurrent_executor.ConcurrentExecutor(
         caching_executor.CachingExecutor(eager_executor.EagerExecutor()))
     self.use_executor(ex)
     ex.close()
     self.use_executor(ex)
 def make_output():
   test_ex = FakeExecutor()
   executors = [
       concurrent_executor.ConcurrentExecutor(test_ex) for _ in range(10)
   ]
   loop = asyncio.get_event_loop()
   vals = [ex.create_value(idx) for idx, ex in enumerate(executors)]
   results = loop.run_until_complete(asyncio.gather(*vals))
   self.assertCountEqual(list(results), list(range(10)))
   del executors
   return test_ex.output
Exemple #5
0
    def test_end_to_end(self):
        @computations.tf_computation(tf.int32)
        def add_one(x):
            return tf.add(x, 1)

        ex = concurrent_executor.ConcurrentExecutor(
            eager_executor.EagerExecutor())

        set_default_executor.set_default_executor(ex)

        self.assertEqual(add_one(7), 8)

        # After this invocation, the ConcurrentExecutor has been closed, and needs
        # to be re-initialized.

        self.assertEqual(add_one(8), 9)

        set_default_executor.set_default_executor()
Exemple #6
0
def _complete_stack(ex):
  return lambda_executor.LambdaExecutor(
      caching_executor.CachingExecutor(
          concurrent_executor.ConcurrentExecutor(ex)))
def _create_bottom_stack():
    return lambda_executor.LambdaExecutor(
        caching_executor.CachingExecutor(
            concurrent_executor.ConcurrentExecutor(
                eager_executor.EagerExecutor())))
Exemple #8
0
  def test_something(self):
    # TODO(b/134543154): Actually test something.

    concurrent_executor.ConcurrentExecutor([])
def _create_single_worker_stack():
    ex = eager_executor.EagerExecutor()
    ex = concurrent_executor.ConcurrentExecutor(ex)
    ex = caching_executor.CachingExecutor(ex)
    return lambda_executor.LambdaExecutor(ex)
 def _make(n):
   return [concurrent_executor.ConcurrentExecutor(bottom_ex) for _ in range(n)]