def _make_executor_and_tracer_for_test(support_lambdas=False): tracer = executor_test_utils.TracingExecutor( eager_tf_executor.EagerTFExecutor()) ex = caching_executor.CachingExecutor(tracer) if support_lambdas: ex = reference_resolving_executor.ReferenceResolvingExecutor( caching_executor.CachingExecutor(ex)) return ex, tracer
def _wrap_executor_in_threading_stack(ex: executor_base.Executor, use_caching: Optional[bool] = True): threaded_ex = thread_delegating_executor.ThreadDelegatingExecutor(ex) if use_caching: threaded_ex = caching_executor.CachingExecutor(threaded_ex) rre_wrapped_ex = reference_resolving_executor.ReferenceResolvingExecutor( threaded_ex) return rre_wrapped_ex
def test_close_then_use_executor_with_cache(self): # Integration that use after close is compatible with the combined # concurrent executors and cached executors. This was broken in # the past due to interactions between closing, caching, and the # concurrent executor. See b/148288711 for context. ex = concurrent_executor.ConcurrentExecutor( caching_executor.CachingExecutor(eager_executor.EagerExecutor())) self.use_executor(ex) ex.close() self.use_executor(ex)
def test_create_value_does_not_cache_error(self): loop = asyncio.get_event_loop() mock_executor = mock.create_autospec(executor_base.Executor) mock_executor.create_value.side_effect = raise_error cached_executor = caching_executor.CachingExecutor(mock_executor) with self.assertRaises(TestError): _ = loop.run_until_complete(cached_executor.create_value(1.0, tf.float32)) with self.assertRaises(TestError): _ = loop.run_until_complete(cached_executor.create_value(1.0, tf.float32)) # Ensure create_value was called twice on the mock (not cached and only # called once). mock_executor.create_value.assert_has_calls([ mock.call(1.0, computation_types.TensorType(tf.float32)), mock.call(1.0, computation_types.TensorType(tf.float32)) ])
def test_create_call_does_not_cache_error(self): loop = asyncio.get_event_loop() mock_executor = mock.create_autospec(executor_base.Executor) mock_executor.create_value.side_effect = create_test_value mock_executor.create_call.side_effect = raise_error cached_executor = caching_executor.CachingExecutor(mock_executor) v = loop.run_until_complete(cached_executor.create_value(foo)) with self.assertRaises(TestError): _ = loop.run_until_complete(cached_executor.create_call(v)) with self.assertRaises(TestError): _ = loop.run_until_complete(cached_executor.create_call(v)) # Ensure create_call was called twice on the mock (not cached and only # called once). mock_executor.create_call.assert_has_calls( [mock.call(TEST_VALUE), mock.call(TEST_VALUE)])
def _wrap_executor_in_threading_stack(ex: executor_base.Executor, use_caching: Optional[bool] = False, support_sequence_ops: bool = False, can_resolve_references=True): threaded_ex = thread_delegating_executor.ThreadDelegatingExecutor(ex) if use_caching: threaded_ex = caching_executor.CachingExecutor(threaded_ex) if support_sequence_ops: if not can_resolve_references: raise ValueError( 'Support for sequence ops requires ability to resolve references.') threaded_ex = sequence_executor.SequenceExecutor( reference_resolving_executor.ReferenceResolvingExecutor(threaded_ex)) if can_resolve_references: threaded_ex = reference_resolving_executor.ReferenceResolvingExecutor( threaded_ex) return threaded_ex
def test_create_value_does_not_cache_error_avoids_double_cache_delete(self): loop = asyncio.get_event_loop() mock_executor = mock.create_autospec(executor_base.Executor) mock_executor.create_value.side_effect = raise_error cached_executor = caching_executor.CachingExecutor(mock_executor) future1 = cached_executor.create_value(1.0, tf.float32) future2 = cached_executor.create_value(1.0, tf.float32) results = loop.run_until_complete( asyncio.gather(future1, future2, return_exceptions=True)) # Ensure create_call is only called once, since the first call inserts the # inner executor future into the cache. However we expect two errors to be # returned. mock_executor.create_value.assert_called_once_with( 1.0, computation_types.TensorType(tf.float32)) self.assertLen(results, 2) self.assertIsInstance(results[0], TestError) self.assertIsInstance(results[1], TestError)
def test_create_selection_does_not_cache_error(self): loop = asyncio.get_event_loop() mock_executor = mock.create_autospec(executor_base.Executor) mock_executor.create_value.side_effect = create_test_value mock_executor.create_selection.side_effect = raise_error cached_executor = caching_executor.CachingExecutor(mock_executor) value = loop.run_until_complete( cached_executor.create_value((1, 2), computation_types.NamedTupleType( (tf.int32, tf.int32)))) with self.assertRaises(TestError): _ = loop.run_until_complete(cached_executor.create_selection(value, 1)) with self.assertRaises(TestError): _ = loop.run_until_complete(cached_executor.create_selection(value, 1)) # Ensure create_tuple was called twice on the mock (not cached and only # called once). mock_executor.create_selection.assert_has_calls([])
def test_create_call_does_not_cache_error_avoids_double_cache_delete(self): loop = asyncio.get_event_loop() mock_executor = mock.create_autospec(executor_base.Executor) mock_executor.create_value.side_effect = create_test_value mock_executor.create_call.side_effect = raise_error cached_executor = caching_executor.CachingExecutor(mock_executor) v = loop.run_until_complete(cached_executor.create_value(foo)) future_call1 = cached_executor.create_call(v) future_call2 = cached_executor.create_call(v) results = loop.run_until_complete( asyncio.gather(future_call1, future_call2, return_exceptions=True)) # Ensure create_call is only called once, since the first call inserts the # inner executor future into the cache. However we expect two errors to be # returned. mock_executor.create_call.assert_called_once_with(TEST_VALUE) self.assertLen(results, 2) self.assertIsInstance(results[0], TestError) self.assertIsInstance(results[1], TestError)
def test_create_tuple_does_not_cache_error(self): loop = asyncio.get_event_loop() mock_executor = mock.create_autospec(executor_base.Executor) mock_executor.create_value.side_effect = create_test_value mock_executor.create_tuple.side_effect = raise_error cached_executor = caching_executor.CachingExecutor(mock_executor) value = loop.run_until_complete(cached_executor.create_value(foo)) value_tuple = (value, value) with self.assertRaises(TestError): _ = loop.run_until_complete(cached_executor.create_tuple(value_tuple)) with self.assertRaises(TestError): _ = loop.run_until_complete(cached_executor.create_tuple(value_tuple)) # Ensure create_tuple was called twice on the mock (not cached and only # called once). anon_tuple_value = anonymous_tuple.AnonymousTuple([(None, TEST_VALUE), (None, TEST_VALUE)]) mock_executor.create_tuple.assert_has_calls( [mock.call(anon_tuple_value), mock.call(anon_tuple_value)])
def test_create_selection_does_not_cache_error_avoids_double_cache_delete( self): loop = asyncio.get_event_loop() mock_executor = mock.create_autospec(executor_base.Executor) mock_executor.create_value.side_effect = create_test_value mock_executor.create_selection.side_effect = raise_error cached_executor = caching_executor.CachingExecutor(mock_executor) value = loop.run_until_complete( cached_executor.create_value((1, 2), computation_types.NamedTupleType( (tf.int32, tf.int32)))) future1 = cached_executor.create_selection(value, 1) future2 = cached_executor.create_selection(value, 1) results = loop.run_until_complete( asyncio.gather(future1, future2, return_exceptions=True)) # Ensure create_tuple was called twice on the mock (not cached and only # called once). mock_executor.create_selection.assert_has_calls([]) self.assertLen(results, 2) self.assertIsInstance(results[0], TestError) self.assertIsInstance(results[1], TestError)
def _complete_stack(ex): return reference_resolving_executor.ReferenceResolvingExecutor( caching_executor.CachingExecutor( thread_delegating_executor.ThreadDelegatingExecutor(ex)))
def _make_executor_and_tracer_for_test(support_lambdas=False): tracer = executor_test_utils.TracingExecutor(eager_executor.EagerExecutor()) ex = caching_executor.CachingExecutor(tracer) if support_lambdas: ex = lambda_executor.LambdaExecutor(caching_executor.CachingExecutor(ex)) return ex, tracer
def _complete_stack(ex): return lambda_executor.LambdaExecutor( caching_executor.CachingExecutor( concurrent_executor.ConcurrentExecutor(ex)))
def _create_middle_stack(children): return lambda_executor.LambdaExecutor( caching_executor.CachingExecutor( composite_executor.CompositeExecutor(_create_bottom_stack(), children)))
def create_test_executor_factory(): executor = eager_tf_executor.EagerTFExecutor() executor = caching_executor.CachingExecutor(executor) executor = reference_resolving_executor.ReferenceResolvingExecutor( executor) return executor_factory.ExecutorFactoryImpl(lambda _: executor)
def _create_middle_stack(children): return reference_resolving_executor.ReferenceResolvingExecutor( caching_executor.CachingExecutor( composing_executor.ComposingExecutor(_create_bottom_stack(), children)))
def _create_bottom_stack(): return reference_resolving_executor.ReferenceResolvingExecutor( caching_executor.CachingExecutor( thread_delegating_executor.ThreadDelegatingExecutor( eager_tf_executor.EagerTFExecutor())))
def _make_executor_and_tracer_for_test(): tracer = executor_test_utils.TracingExecutor( eager_tf_executor.EagerTFExecutor()) ex = caching_executor.CachingExecutor(tracer) return ex, tracer
def _create_bottom_stack(): return lambda_executor.LambdaExecutor( caching_executor.CachingExecutor( concurrent_executor.ConcurrentExecutor( eager_executor.EagerExecutor())))