def _dispose(self, value_ref: executor_pb2.ValueRef): """Disposes of the remote value stored on the worker service.""" self._dispose_request.value_ref.append(value_ref) if len(self._dispose_request.value_ref) < self._dispose_batch_size: return dispose_request = self._dispose_request self._dispose_request = executor_pb2.DisposeRequest() if self._bidi_stream is None: _request(self._stub.Dispose, dispose_request) else: send_request_fut = self._bidi_stream.send_request( executor_pb2.ExecuteRequest(dispose=dispose_request)) # We don't care about the response, and so don't bother to await it. # Just start it as a task so that it runs at some point. asyncio.get_event_loop().create_task(send_request_fut)
def __init__(self, channel, rpc_mode='REQUEST_REPLY', thread_pool_executor=None, dispose_batch_size=20): """Creates a remote executor. Args: channel: An instance of `grpc.Channel` to use for communication with the remote executor service. rpc_mode: Optional mode of calling the remote executor. Must be either 'REQUEST_REPLY' or 'STREAMING' (defaults to 'REQUEST_REPLY'). This option will be removed after the request-reply interface is deprecated. thread_pool_executor: Optional concurrent.futures.Executor used to wait for the reply to a streaming RPC message. Uses the default Executor if not specified. dispose_batch_size: The batch size for requests to dispose of remote worker values. Lower values will result in more requests to the remote worker, but will result in values being cleaned up sooner and therefore may result in lower memory usage on the remote worker. """ py_typecheck.check_type(channel, grpc.Channel) py_typecheck.check_type(rpc_mode, str) py_typecheck.check_type(dispose_batch_size, int) if rpc_mode not in ['REQUEST_REPLY', 'STREAMING']: raise ValueError('Invalid rpc_mode: {}'.format(rpc_mode)) logging.debug('Creating new ExecutorStub with RPC_MODE=%s', rpc_mode) self._channel_status = False def _channel_status_callback( channel_connectivity: grpc.ChannelConnectivity): self._channel_status = channel_connectivity channel.subscribe(_channel_status_callback, try_to_connect=True) # We need to keep a reference to the channel around to prevent the Python # object from being GC'ed and the callback above from no-op'ing. self._channel = channel self._stub = executor_pb2_grpc.ExecutorStub(channel) self._bidi_stream = None self._dispose_batch_size = dispose_batch_size self._dispose_request = executor_pb2.DisposeRequest() if rpc_mode == 'STREAMING': logging.debug('Creating Bidi stream') self._bidi_stream = _BidiStream(self._stub, thread_pool_executor)
def __init__(self, channel, rpc_mode=None, thread_pool_executor=None, dispose_batch_size=20): """Creates a remote executor. Args: channel: An instance of `grpc.Channel` to use for communication with the remote executor service. rpc_mode: (Deprecated) string, one of 'REQUEST_REPLY' or 'STREAMING'. Unused, still here for backwards compatibility. thread_pool_executor: Optional concurrent.futures.Executor used to wait for the reply to a streaming RPC message. Uses the default Executor if not specified. dispose_batch_size: The batch size for requests to dispose of remote worker values. Lower values will result in more requests to the remote worker, but will result in values being cleaned up sooner and therefore may result in lower memory usage on the remote worker. """ py_typecheck.check_type(channel, grpc.Channel) py_typecheck.check_type(dispose_batch_size, int) if rpc_mode is not None: warnings.warn( 'The rpc_mode argument is deprecated and slated for ' 'removal. Please update your callsites to avoid specifying ' 'rpc_mode.') del rpc_mode logging.debug('Creating new ExecutorStub') self._channel_status = False def _channel_status_callback( channel_connectivity: grpc.ChannelConnectivity): self._channel_status = channel_connectivity channel.subscribe(_channel_status_callback, try_to_connect=True) # We need to keep a reference to the channel around to prevent the Python # object from being GC'ed and the callback above from no-op'ing. self._channel = channel self._stub = executor_pb2_grpc.ExecutorStub(channel) self._dispose_batch_size = dispose_batch_size self._dispose_request = executor_pb2.DisposeRequest()
def test_executor_service_value_unavailable_after_dispose(self): env = TestEnv(eager_tf_executor.EagerTFExecutor()) value_proto, _ = executor_service_utils.serialize_value( tf.constant(10.0).numpy(), tf.float32) # Create the value response = env.stub.CreateValue( executor_pb2.CreateValueRequest(value=value_proto)) self.assertIsInstance(response, executor_pb2.CreateValueResponse) value_id = str(response.value_ref.id) # Check that the value appears in the _values map env.get_value_future_directly(value_id) # Dispose of the value dispose_request = executor_pb2.DisposeRequest() dispose_request.value_ref.append(response.value_ref) response = env.stub.Dispose(dispose_request) self.assertIsInstance(response, executor_pb2.DisposeResponse) # Check that the value is gone from the _values map # get_value_future_directly is used here so that we can catch the # exception rather than having it occur on the GRPC thread. with self.assertRaises(KeyError): env.get_value_future_directly(value_id)
def __init__(self, channel, thread_pool_executor=None, dispose_batch_size=20): """Creates a remote executor. Args: channel: An instance of `grpc.Channel` to use for communication with the remote executor service. thread_pool_executor: Optional concurrent.futures.Executor used to wait for the reply to a streaming RPC message. Uses the default Executor if not specified. dispose_batch_size: The batch size for requests to dispose of remote worker values. Lower values will result in more requests to the remote worker, but will result in values being cleaned up sooner and therefore may result in lower memory usage on the remote worker. """ py_typecheck.check_type(channel, grpc.Channel) py_typecheck.check_type(dispose_batch_size, int) logging.debug('Creating new ExecutorStub') self._channel_status = False def _channel_status_callback( channel_connectivity: grpc.ChannelConnectivity): self._channel_status = channel_connectivity channel.subscribe(_channel_status_callback, try_to_connect=True) # We need to keep a reference to the channel around to prevent the Python # object from being GC'ed and the callback above from no-op'ing. self._channel = channel self._stub = executor_pb2_grpc.ExecutorStub(channel) self._dispose_batch_size = dispose_batch_size self._dispose_request = executor_pb2.DisposeRequest()