示例#1
0
def _make_executor_and_tracer_for_test(support_lambdas=False):
    tracer = executor_test_utils.TracingExecutor(
        eager_executor.EagerExecutor())
    ex = caching_executor.CachingExecutor(tracer)
    if support_lambdas:
        ex = lambda_executor.LambdaExecutor(
            caching_executor.CachingExecutor(ex))
    return ex, tracer
示例#2
0
 def test_close_then_use_executor_with_cache(self):
     # Integration that use after close is compatible with the combined
     # concurrent executors and cached executors. This was broken in
     # the past due to interactions between closing, caching, and the
     # concurrent executor. See b/148288711 for context.
     ex = concurrent_executor.ConcurrentExecutor(
         caching_executor.CachingExecutor(eager_executor.EagerExecutor()))
     self.use_executor(ex)
     ex.close()
     self.use_executor(ex)
示例#3
0
 def test_create_value_does_not_cache_error(self):
   loop = asyncio.get_event_loop()
   mock_executor = mock.create_autospec(executor_base.Executor)
   mock_executor.create_value.side_effect = raise_error
   cached_executor = caching_executor.CachingExecutor(mock_executor)
   with self.assertRaises(TestError):
     _ = loop.run_until_complete(cached_executor.create_value(1.0, tf.float32))
   with self.assertRaises(TestError):
     _ = loop.run_until_complete(cached_executor.create_value(1.0, tf.float32))
   # Ensure create_value was called twice on the mock (not cached and only
   # called once).
   mock_executor.create_value.assert_has_calls([
       mock.call(1.0, computation_types.TensorType(tf.float32)),
       mock.call(1.0, computation_types.TensorType(tf.float32))
   ])
示例#4
0
 def test_create_call_does_not_cache_error(self):
   loop = asyncio.get_event_loop()
   mock_executor = mock.create_autospec(executor_base.Executor)
   mock_executor.create_value.side_effect = create_test_value
   mock_executor.create_call.side_effect = raise_error
   cached_executor = caching_executor.CachingExecutor(mock_executor)
   v = loop.run_until_complete(cached_executor.create_value(foo))
   with self.assertRaises(TestError):
     _ = loop.run_until_complete(cached_executor.create_call(v))
   with self.assertRaises(TestError):
     _ = loop.run_until_complete(cached_executor.create_call(v))
   # Ensure create_call was called twice on the mock (not cached and only
   # called once).
   mock_executor.create_call.assert_has_calls(
       [mock.call(TEST_VALUE), mock.call(TEST_VALUE)])
示例#5
0
def create_local_executor(num_clients=None):
  """Constructs an executor to execute computations on the local machine.

  The initial temporary implementation requires that the number of clients be
  specified in advance. This limitation will be removed in the near future.

  NOTE: This function is only available in Python 3.

  Args:
    num_clients: The number of clients. If not specified (`None`), then this
      executor is not federated (can only execute unplaced computations).

  Returns:
    An instance of `tff.framework.Executor` for single-machine use only.

  Raises:
    ValueError: If the number of clients is not one or larger.
  """

  def _create_single_worker_stack():
    ex = eager_executor.EagerExecutor()
    ex = concurrent_executor.ConcurrentExecutor(ex)
    ex = caching_executor.CachingExecutor(ex)
    return lambda_executor.LambdaExecutor(ex)

  if num_clients is None:
    return _create_single_worker_stack()
  else:
    # TODO(b/134543154): We shouldn't have to specif the number of clients; this
    # needs to go away once we flesh out all the remaining bits ad pieces.
    py_typecheck.check_type(num_clients, int)
    if num_clients < 1:
      raise ValueError('If the number of clients is present, it must be >= 1.')

    def _create_multiple_worker_stacks(num_workers):
      return [_create_single_worker_stack() for _ in range(num_workers)]

    return lambda_executor.LambdaExecutor(
        caching_executor.CachingExecutor(
            federated_executor.FederatedExecutor({
                None:
                    _create_multiple_worker_stacks(1),
                placement_literals.SERVER:
                    _create_multiple_worker_stacks(1),
                placement_literals.CLIENTS:
                    (_create_multiple_worker_stacks(num_clients))
            })))
示例#6
0
 def test_create_value_does_not_cache_error_avoids_double_cache_delete(self):
   loop = asyncio.get_event_loop()
   mock_executor = mock.create_autospec(executor_base.Executor)
   mock_executor.create_value.side_effect = raise_error
   cached_executor = caching_executor.CachingExecutor(mock_executor)
   future1 = cached_executor.create_value(1.0, tf.float32)
   future2 = cached_executor.create_value(1.0, tf.float32)
   results = loop.run_until_complete(
       asyncio.gather(future1, future2, return_exceptions=True))
   # Ensure create_call is only called once, since the first call inserts the
   # inner executor future into the cache. However we expect two errors to be
   # returned.
   mock_executor.create_value.assert_called_once_with(
       1.0, computation_types.TensorType(tf.float32))
   self.assertLen(results, 2)
   self.assertIsInstance(results[0], TestError)
   self.assertIsInstance(results[1], TestError)
示例#7
0
 def test_create_selection_does_not_cache_error(self):
   loop = asyncio.get_event_loop()
   mock_executor = mock.create_autospec(executor_base.Executor)
   mock_executor.create_value.side_effect = create_test_value
   mock_executor.create_selection.side_effect = raise_error
   cached_executor = caching_executor.CachingExecutor(mock_executor)
   value = loop.run_until_complete(
       cached_executor.create_value((1, 2),
                                    computation_types.NamedTupleType(
                                        (tf.int32, tf.int32))))
   with self.assertRaises(TestError):
     _ = loop.run_until_complete(cached_executor.create_selection(value, 1))
   with self.assertRaises(TestError):
     _ = loop.run_until_complete(cached_executor.create_selection(value, 1))
   # Ensure create_tuple was called twice on the mock (not cached and only
   # called once).
   mock_executor.create_selection.assert_has_calls([])
示例#8
0
 def _create_variable_clients_executors(x):
     """Constructs executor stacks from `dict` argument."""
     py_typecheck.check_type(x, dict)
     for k, v in six.iteritems(x):
         py_typecheck.check_type(k, placement_literals.PlacementLiteral)
         if v <= 0:
             raise ValueError(
                 'Cardinality must be at '
                 'least one; you have passed {} for placement {}.'.format(
                     v, k))
     executor_dict = dict([(placement, _create_multiple_worker_stacks(n))
                           for placement, n in six.iteritems(x)])
     executor_dict.update({None: _create_multiple_worker_stacks(1)})
     executor_dict.update(
         {placement_literals.SERVER: _create_multiple_worker_stacks(1)})
     return lambda_executor.LambdaExecutor(
         caching_executor.CachingExecutor(
             federated_executor.FederatedExecutor(executor_dict)))
示例#9
0
 def test_create_call_does_not_cache_error_avoids_double_cache_delete(self):
     loop = asyncio.get_event_loop()
     mock_executor = mock.create_autospec(executor_base.Executor)
     mock_executor.create_value.side_effect = create_test_value
     mock_executor.create_call.side_effect = raise_error
     cached_executor = caching_executor.CachingExecutor(mock_executor)
     v = loop.run_until_complete(cached_executor.create_value(foo))
     future_call1 = cached_executor.create_call(v)
     future_call2 = cached_executor.create_call(v)
     results = loop.run_until_complete(
         asyncio.gather(future_call1, future_call2, return_exceptions=True))
     # Ensure create_call is only called once, since the first call inserts the
     # inner executor future into the cache. However we expect two errors to be
     # returned.
     mock_executor.create_call.assert_called_once_with(TEST_VALUE)
     self.assertLen(results, 2)
     self.assertIsInstance(results[0], TestError)
     self.assertIsInstance(results[1], TestError)
示例#10
0
 def test_create_tuple_does_not_cache_error(self):
   loop = asyncio.get_event_loop()
   mock_executor = mock.create_autospec(executor_base.Executor)
   mock_executor.create_value.side_effect = create_test_value
   mock_executor.create_tuple.side_effect = raise_error
   cached_executor = caching_executor.CachingExecutor(mock_executor)
   value = loop.run_until_complete(cached_executor.create_value(foo))
   value_tuple = (value, value)
   with self.assertRaises(TestError):
     _ = loop.run_until_complete(cached_executor.create_tuple(value_tuple))
   with self.assertRaises(TestError):
     _ = loop.run_until_complete(cached_executor.create_tuple(value_tuple))
   # Ensure create_tuple was called twice on the mock (not cached and only
   # called once).
   anon_tuple_value = anonymous_tuple.AnonymousTuple([(None, TEST_VALUE),
                                                      (None, TEST_VALUE)])
   mock_executor.create_tuple.assert_has_calls(
       [mock.call(anon_tuple_value),
        mock.call(anon_tuple_value)])
示例#11
0
 def test_create_selection_does_not_cache_error_avoids_double_cache_delete(
         self):
     loop = asyncio.get_event_loop()
     mock_executor = mock.create_autospec(executor_base.Executor)
     mock_executor.create_value.side_effect = create_test_value
     mock_executor.create_selection.side_effect = raise_error
     cached_executor = caching_executor.CachingExecutor(mock_executor)
     value = loop.run_until_complete(
         cached_executor.create_value((1, 2),
                                      computation_types.NamedTupleType(
                                          (tf.int32, tf.int32))))
     future1 = cached_executor.create_selection(value, 1)
     future2 = cached_executor.create_selection(value, 1)
     results = loop.run_until_complete(
         asyncio.gather(future1, future2, return_exceptions=True))
     # Ensure create_tuple was called twice on the mock (not cached and only
     # called once).
     mock_executor.create_selection.assert_has_calls([])
     self.assertLen(results, 2)
     self.assertIsInstance(results[0], TestError)
     self.assertIsInstance(results[1], TestError)
示例#12
0
def _complete_stack(ex):
  return lambda_executor.LambdaExecutor(
      caching_executor.CachingExecutor(
          concurrent_executor.ConcurrentExecutor(ex)))
示例#13
0
def _create_middle_stack(children):
    return lambda_executor.LambdaExecutor(
        caching_executor.CachingExecutor(
            composite_executor.CompositeExecutor(_create_bottom_stack(),
                                                 children)))
示例#14
0
def _create_bottom_stack():
    return lambda_executor.LambdaExecutor(
        caching_executor.CachingExecutor(
            concurrent_executor.ConcurrentExecutor(
                eager_executor.EagerExecutor())))
示例#15
0
 def _return_executor(x):
     del x  # Unused
     return lambda_executor.LambdaExecutor(
         caching_executor.CachingExecutor(
             federated_executor.FederatedExecutor(executor_dict)))
示例#16
0
def _create_single_worker_stack():
    ex = eager_executor.EagerExecutor()
    ex = concurrent_executor.ConcurrentExecutor(ex)
    ex = caching_executor.CachingExecutor(ex)
    return lambda_executor.LambdaExecutor(ex)