async def _activate_worker(head_port, worker_port, shard_id=None): # this would be done by the Pod, its adding the worker to the head activate_msg = ControlRequest(command='ACTIVATE') activate_msg.add_related_entity( 'worker', '127.0.0.1', worker_port, shard_id=shard_id ) GrpcConnectionPool.send_request_sync(activate_msg, f'127.0.0.1:{head_port}')
def activate_worker_sync( worker_host: str, worker_port: int, target_head: str, shard_id: Optional[int] = None, ) -> ControlRequest: """ Register a given worker to a head by sending an activate request :param worker_host: the host address of the worker :param worker_port: the port of the worker :param target_head: address of the head to send the activate request to :param shard_id: id of the shard the worker belongs to :returns: the response request """ activate_request = ControlRequest(command='ACTIVATE') activate_request.add_related_entity('worker', worker_host, worker_port, shard_id) if os.name != 'nt': os.unsetenv('http_proxy') os.unsetenv('https_proxy') return GrpcConnectionPool.send_request_sync(activate_request, target_head)
async def process_control(self, request: ControlRequest, *args) -> ControlRequest: """ Process the received control request and return the same request :param request: the control request to process :param args: additional arguments in the grpc call, ignored :returns: the input request """ try: if self.logger.debug_enabled: self._log_control_request(request) if request.command == 'STATUS': pass else: raise RuntimeError( f'WorkerRuntime received unsupported ControlRequest command {request.command}' ) except (RuntimeError, Exception) as ex: self.logger.error( f'{ex!r}' + f'\n add "--quiet-error" to suppress the exception details' if not self.args.quiet_error else '', exc_info=not self.args.quiet_error, ) request.add_exception(ex, self._data_request_handler._executor) return request
async def test_pseudo_remote_pods_replicas(gateway, head, worker): NUM_REPLICAS = 3 head_port = random_port() port_expose = random_port() graph_description = ( '{"start-gateway": ["deployment0"], "deployment0": ["end-gateway"]}') deployments_addresses = f'{{"deployment0": ["0.0.0.0:{head_port}"]}}' # create a single head pod head_pod = _create_head_pod(head, head_port) head_pod.start() # create the shards replica_pods = [] for i in range(NUM_REPLICAS): # create worker worker_port = random_port() # create a single worker pod worker_pod = _create_worker_pod(worker, worker_port, f'deployment0/{i}') replica_pods.append(worker_pod) worker_pod.start() await asyncio.sleep(0.1) if head == 'remote': worker_host = __docker_host__ else: worker_host = HOST # this would be done by the deployment, its adding the worker to the head activate_msg = ControlRequest(command='ACTIVATE') activate_msg.add_related_entity('worker', worker_host, worker_port) GrpcConnectionPool.send_request_sync(activate_msg, f'{HOST}:{head_port}') # create a single gateway pod gateway_pod = _create_gateway_pod(gateway, graph_description, deployments_addresses, port_expose) gateway_pod.start() await asyncio.sleep(1.0) c = Client(host='localhost', port=port_expose, asyncio=True) responses = c.post('/', inputs=async_inputs, request_size=1, return_results=True) response_list = [] async for response in responses: response_list.append(response) # clean up pods gateway_pod.close() head_pod.close() for pod in replica_pods: pod.close() assert len(response_list) == 20 assert len(response_list[0].docs) == 1
async def test_pods_trivial_topology(head_runtime_docker_image_built, worker_runtime_docker_image_built): worker_port = random_port() head_port = random_port() port = random_port() graph_description = '{"start-gateway": ["pod0"], "pod0": ["end-gateway"]}' pod_addresses = f'{{"pod0": ["0.0.0.0:{head_port}"]}}' # create a single worker pod worker_pod = _create_worker_pod(worker_port) # create a single head pod head_pod = _create_head_pod(head_port) # create a single gateway pod gateway_pod = _create_gateway_pod(graph_description, pod_addresses, port) with gateway_pod, head_pod, worker_pod: await asyncio.sleep(1.0) assert HeadRuntime.wait_for_ready_or_shutdown( timeout=5.0, ctrl_address=head_pod.runtime_ctrl_address, ready_or_shutdown_event=head_pod.ready_or_shutdown.event, ) assert WorkerRuntime.wait_for_ready_or_shutdown( timeout=5.0, ctrl_address=worker_pod.runtime_ctrl_address, ready_or_shutdown_event=worker_pod.ready_or_shutdown.event, ) head_pod.ready_or_shutdown.event.wait(timeout=5.0) worker_pod.ready_or_shutdown.event.wait(timeout=5.0) gateway_pod.ready_or_shutdown.event.wait(timeout=5.0) # this would be done by the Pod, its adding the worker to the head activate_msg = ControlRequest(command='ACTIVATE') worker_host, worker_port = worker_pod.runtime_ctrl_address.split(':') activate_msg.add_related_entity('worker', worker_host, int(worker_port)) assert GrpcConnectionPool.send_request_sync( activate_msg, head_pod.runtime_ctrl_address) # send requests to the gateway c = Client(host='localhost', port=port, asyncio=True) responses = c.post('/', inputs=async_inputs, request_size=1, return_responses=True) response_list = [] async for response in responses: response_list.append(response) assert len(response_list) == 20 assert len(response_list[0].docs) == 1
async def test_pseudo_remote_pods_topologies(gateway, head, worker): """ g(l)-h(l)-w(l) - works g(l)-h(l)-w(r) - works - head connects to worker via localhost g(l)-h(r)-w(r) - works - head (inside docker) connects to worker via dockerhost g(l)-h(r)-w(l) - doesn't work remote head need remote worker g(r)-... - doesn't work, as distributed parser not enabled for gateway After any 1 failure, segfault """ worker_port = random_port() head_port = random_port() port_expose = random_port() graph_description = ( '{"start-gateway": ["deployment0"], "deployment0": ["end-gateway"]}') if head == 'remote': deployments_addresses = f'{{"deployment0": ["{HOST}:{head_port}"]}}' else: deployments_addresses = f'{{"deployment0": ["0.0.0.0:{head_port}"]}}' # create a single head pod head_pod = _create_head_pod(head, head_port) # create a single worker pod worker_pod = _create_worker_pod(worker, worker_port) # create a single gateway pod gateway_pod = _create_gateway_pod(gateway, graph_description, deployments_addresses, port_expose) with gateway_pod, worker_pod, head_pod: await asyncio.sleep(1.0) # this would be done by the deployment, its adding the worker to the head activate_msg = ControlRequest(command='ACTIVATE') worker_host, worker_port = worker_pod.runtime_ctrl_address.split(':') if head == 'remote': worker_host = __docker_host__ activate_msg.add_related_entity('worker', worker_host, int(worker_port)) assert GrpcConnectionPool.send_request_sync( activate_msg, head_pod.runtime_ctrl_address) # send requests to the gateway c = Client(host='127.0.0.1', port=port_expose, asyncio=True) responses = c.post('/', inputs=async_inputs, request_size=1, return_results=True) response_list = [] async for response in responses: response_list.append(response) assert len(response_list) == 20 assert len(response_list[0].docs) == 1
async def test_pods_with_replicas_advance_faster(port_generator): head_port = port_generator() port = port_generator() graph_description = '{"start-gateway": ["pod0"], "pod0": ["end-gateway"]}' pod_addresses = f'{{"pod0": ["0.0.0.0:{head_port}"]}}' # create a single head pod head_pod = _create_head_pod(head_port, 'head') head_pod.start() # create a single gateway pod gateway_pod = _create_gateway_pod(graph_description, pod_addresses, port) gateway_pod.start() # create the shards pods = [] for i in range(10): # create worker worker_port = port_generator() # create a single worker pod worker_pod = _create_worker_pod(worker_port, f'pod0/{i}', 'FastSlowExecutor') pods.append(worker_pod) worker_pod.start() await asyncio.sleep(0.1) head_pod.wait_start_success() gateway_pod.wait_start_success() for pod in pods: # this would be done by the Pod, its adding the worker to the head pod.wait_start_success() activate_msg = ControlRequest(command='ACTIVATE') activate_msg.add_related_entity('worker', '127.0.0.1', pod.args.port) GrpcConnectionPool.send_request_sync(activate_msg, f'127.0.0.1:{head_port}') c = Client(host='localhost', port=port, asyncio=True) input_docs = [Document(text='slow'), Document(text='fast')] responses = c.post('/', inputs=input_docs, request_size=1, return_responses=True) response_list = [] async for response in responses: response_list.append(response) # clean up pods gateway_pod.close() head_pod.close() for pod in pods: pod.close() assert len(response_list) == 2 for response in response_list: assert len(response.docs) == 1 assert response_list[0].docs[0].text == 'fast' assert response_list[1].docs[0].text == 'slow'
async def test_pods_shards(polling, port_generator): head_port = port_generator() port = port_generator() graph_description = '{"start-gateway": ["pod0"], "pod0": ["end-gateway"]}' pod_addresses = f'{{"pod0": ["0.0.0.0:{head_port}"]}}' # create a single head pod head_pod = _create_head_pod(head_port, 'head', polling) head_pod.start() # create the shards shard_pods = [] for i in range(10): # create worker worker_port = port_generator() # create a single worker pod worker_pod = _create_worker_pod(worker_port, f'pod0/shard/{i}') shard_pods.append(worker_pod) worker_pod.start() await asyncio.sleep(0.1) head_pod.wait_start_success() for i, pod in enumerate(shard_pods): # this would be done by the Pod, its adding the worker to the head pod.wait_start_success() activate_msg = ControlRequest(command='ACTIVATE') activate_msg.add_related_entity( 'worker', '127.0.0.1', pod.args.port, shard_id=i ) GrpcConnectionPool.send_request_sync(activate_msg, f'127.0.0.1:{head_port}') # create a single gateway pod gateway_pod = _create_gateway_pod(graph_description, pod_addresses, port) gateway_pod.start() await asyncio.sleep(1.0) gateway_pod.wait_start_success() c = Client(host='localhost', port=port, asyncio=True) responses = c.post('/', inputs=async_inputs, request_size=1, return_responses=True) response_list = [] async for response in responses: response_list.append(response) # clean up pods gateway_pod.close() head_pod.close() for shard_pod in shard_pods: shard_pod.close() assert len(response_list) == 20 assert len(response_list[0].docs) == 1 if polling == 'ANY' else len(shard_pods)
async def test_secure_send_request(private_key_cert_chain): server1_ready_event = multiprocessing.Event() (private_key, certificate_chain) = private_key_cert_chain def listen(port, event: multiprocessing.Event): class DummyServer: async def process_control(self, request, *args): returned_msg = ControlRequest(command='DEACTIVATE') return returned_msg async def start_grpc_server(): grpc_server = grpc.aio.server( options=[ ('grpc.max_send_request_length', -1), ('grpc.max_receive_message_length', -1), ] ) jina_pb2_grpc.add_JinaControlRequestRPCServicer_to_server( DummyServer(), grpc_server ) grpc_server.add_secure_port( f'localhost:{port}', grpc.ssl_server_credentials((private_key_cert_chain,)), ) await grpc_server.start() event.set() await grpc_server.wait_for_termination() asyncio.run(start_grpc_server()) port = random_port() server_process1 = Process( target=listen, args=( port, server1_ready_event, ), ) server_process1.start() time.sleep(0.1) server1_ready_event.wait() sent_msg = ControlRequest(command='STATUS') result = GrpcConnectionPool.send_request_sync( sent_msg, f'localhost:{port}', https=True, root_certificates=certificate_chain ) assert result.command == 'DEACTIVATE' result = await GrpcConnectionPool.send_request_async( sent_msg, f'localhost:{port}', https=True, root_certificates=certificate_chain ) assert result.command == 'DEACTIVATE' server_process1.kill() server_process1.join()
def FromString(x: bytes): """ # noqa: DAR101 # noqa: DAR102 # noqa: DAR201 """ proto = jina_pb2.ControlRequestProto() proto.ParseFromString(x) return ControlRequest(request=proto)
async def deactivate_worker( worker_host: str, worker_port: int, target_head: str, shard_id: Optional[int] = None, ) -> ControlRequest: """ Remove a given worker to a head by sending a deactivate request :param worker_host: the host address of the worker :param worker_port: the port of the worker :param target_head: address of the head to send the deactivate request to :param shard_id: id of the shard the worker belongs to :returns: the response request """ activate_request = ControlRequest(command='DEACTIVATE') activate_request.add_related_entity('worker', worker_host, worker_port, shard_id) return await GrpcConnectionPool.send_request_async( activate_request, target_head)
async def test_pods_trivial_topology(port_generator): worker_port = port_generator() head_port = port_generator() port = port_generator() graph_description = '{"start-gateway": ["pod0"], "pod0": ["end-gateway"]}' pod_addresses = f'{{"pod0": ["0.0.0.0:{head_port}"]}}' # create a single worker pod worker_pod = _create_worker_pod(worker_port) # create a single head pod head_pod = _create_head_pod(head_port) # create a single gateway pod gateway_pod = _create_gateway_pod(graph_description, pod_addresses, port) with gateway_pod, head_pod, worker_pod: # this would be done by the Pod, its adding the worker to the head head_pod.wait_start_success() worker_pod.wait_start_success() activate_msg = ControlRequest(command='ACTIVATE') activate_msg.add_related_entity('worker', '127.0.0.1', worker_port) assert GrpcConnectionPool.send_request_sync( activate_msg, f'127.0.0.1:{head_port}' ) # send requests to the gateway gateway_pod.wait_start_success() c = Client(host='localhost', port=port, asyncio=True) responses = c.post( '/', inputs=async_inputs, request_size=1, return_responses=True ) response_list = [] async for response in responses: response_list.append(response) assert len(response_list) == 20 assert len(response_list[0].docs) == 1
def is_ready(address: str) -> bool: """ TODO: make this async Check if status is ready. :param address: the address where the control message needs to be sent :return: True if status is ready else False. """ try: GrpcConnectionPool.send_request_sync(ControlRequest('STATUS'), address) except RpcError: return False return True
async def test_pods_health_check(port_generator, protocol, health_check): worker_port = port_generator() head_port = port_generator() port = port_generator() graph_description = '{"start-gateway": ["pod0"], "pod0": ["end-gateway"]}' pod_addresses = f'{{"pod0": ["0.0.0.0:{head_port}"]}}' # create a single worker pod worker_pod = _create_worker_pod(worker_port) # create a single head pod head_pod = _create_head_pod(head_port) # create a single gateway pod gateway_pod = _create_gateway_pod(graph_description, pod_addresses, port, protocol) with gateway_pod, head_pod, worker_pod: # this would be done by the Pod, its adding the worker to the head head_pod.wait_start_success() worker_pod.wait_start_success() activate_msg = ControlRequest(command='ACTIVATE') activate_msg.add_related_entity('worker', '127.0.0.1', worker_port) assert GrpcConnectionPool.send_request_sync( activate_msg, f'127.0.0.1:{head_port}' ) # send requests to the gateway gateway_pod.wait_start_success() for _port in (head_port, worker_port): check_health_pod(f'0.0.0.0:{_port}') if inspect.iscoroutinefunction(health_check): await health_check(f'0.0.0.0:{port}') else: health_check(f'0.0.0.0:{port}')
def is_ready(ctrl_address: str, **kwargs) -> bool: """ Check if status is ready. :param ctrl_address: the address where the control request needs to be sent :param kwargs: extra keyword arguments :return: True if status is ready else False. """ try: GrpcConnectionPool.send_request_sync(ControlRequest('STATUS'), ctrl_address) except RpcError as e: return False return True
def test_grpc_ssl_with_flow_and_client(cert_pem, key_pem, error_log_level): with Flow( protocol='grpc', ssl_certfile=cert_pem, ssl_keyfile=key_pem, ) as flow: with open(cert_pem, 'rb') as f: creds = f.read() GrpcConnectionPool.send_request_sync( request=ControlRequest('STATUS'), target=f'localhost:{flow.port}', root_certificates=creds, tls=True, timeout=1.0, )
def check_health_pod(addr: str): """check if a pods is healthy :param addr: the address on which the pod is serving ex : localhost:1234 """ import grpc from jina.serve.networking import GrpcConnectionPool from jina.types.request.control import ControlRequest try: GrpcConnectionPool.send_request_sync( request=ControlRequest('STATUS'), target=addr, ) except grpc.RpcError as e: print('The pod is unhealthy') print(e) raise e print('The pod is healthy')
async def test_grpc_connection_pool_real_sending(): server1_ready_event = multiprocessing.Event() server2_ready_event = multiprocessing.Event() def listen(port, event: multiprocessing.Event): class DummyServer: async def process_control(self, request, *args): returned_msg = ControlRequest(command='DEACTIVATE') return returned_msg async def start_grpc_server(): grpc_server = grpc.aio.server( options=[ ('grpc.max_send_request_length', -1), ('grpc.max_receive_message_length', -1), ] ) jina_pb2_grpc.add_JinaControlRequestRPCServicer_to_server( DummyServer(), grpc_server ) grpc_server.add_insecure_port(f'localhost:{port}') await grpc_server.start() event.set() await grpc_server.wait_for_termination() asyncio.run(start_grpc_server()) port1 = random_port() server_process1 = Process( target=listen, args=( port1, server1_ready_event, ), ) server_process1.start() port2 = random_port() server_process2 = Process( target=listen, args=( port2, server2_ready_event, ), ) server_process2.start() time.sleep(0.1) server1_ready_event.wait() server2_ready_event.wait() pool = GrpcConnectionPool() pool.add_connection(deployment='encoder', head=False, address=f'localhost:{port1}') pool.add_connection(deployment='encoder', head=False, address=f'localhost:{port2}') sent_msg = ControlRequest(command='STATUS') results_call_1 = pool.send_request( request=sent_msg, deployment='encoder', head=False ) results_call_2 = pool.send_request( request=sent_msg, deployment='encoder', head=False ) assert len(results_call_1) == 1 assert len(results_call_2) == 1 response1, meta = await results_call_1[0] assert response1.command == 'DEACTIVATE' response2, meta = await results_call_2[0] assert response2.command == 'DEACTIVATE' await pool.close() server_process1.kill() server_process2.kill() server_process1.join() server_process2.join()
def _remove_worker(args, ip='fake_ip', shard_id=None): activate_msg = ControlRequest(command='DEACTIVATE') activate_msg.add_related_entity('worker', ip, 8080, shard_id) assert GrpcConnectionPool.send_request_sync(activate_msg, f'{args.host}:{args.port}')
def test_command(control_req): request = ControlRequest(request=control_req) cmd = request.command assert cmd assert isinstance(cmd, str)
async def test_runtimes_trivial_topology(port_generator): worker_port = port_generator() head_port = port_generator() port = port_generator() graph_description = '{"start-gateway": ["pod0"], "pod0": ["end-gateway"]}' pod_addresses = f'{{"pod0": ["0.0.0.0:{head_port}"]}}' # create a single worker runtime worker_process = multiprocessing.Process(target=_create_worker_runtime, args=(worker_port, )) worker_process.start() # create a single head runtime head_process = multiprocessing.Process(target=_create_head_runtime, args=(head_port, )) head_process.start() # create a single gateway runtime gateway_process = multiprocessing.Process( target=_create_gateway_runtime, args=(graph_description, pod_addresses, port), ) gateway_process.start() await asyncio.sleep(1.0) AsyncNewLoopRuntime.wait_for_ready_or_shutdown( timeout=5.0, ctrl_address=f'0.0.0.0:{head_port}', ready_or_shutdown_event=multiprocessing.Event(), ) AsyncNewLoopRuntime.wait_for_ready_or_shutdown( timeout=5.0, ctrl_address=f'0.0.0.0:{worker_port}', ready_or_shutdown_event=multiprocessing.Event(), ) AsyncNewLoopRuntime.wait_for_ready_or_shutdown( timeout=5.0, ctrl_address=f'0.0.0.0:{port}', ready_or_shutdown_event=multiprocessing.Event(), ) # this would be done by the Pod, its adding the worker to the head activate_msg = ControlRequest(command='ACTIVATE') activate_msg.add_related_entity('worker', '127.0.0.1', worker_port) GrpcConnectionPool.send_request_sync(activate_msg, f'127.0.0.1:{head_port}') # send requests to the gateway c = Client(host='localhost', port=port, asyncio=True) responses = c.post('/', inputs=async_inputs, request_size=1, return_responses=True) response_list = [] async for response in responses: response_list.append(response) # clean up runtimes gateway_process.terminate() head_process.terminate() worker_process.terminate() gateway_process.join() head_process.join() worker_process.join() assert len(response_list) == 20 assert len(response_list[0].docs) == 1 assert gateway_process.exitcode == 0 assert head_process.exitcode == 0 assert worker_process.exitcode == 0
async def test_connection_pool(mocker, monkeypatch): close_mock_object, create_mock = await _mock_grpc(mocker, monkeypatch) pool = GrpcConnectionPool() send_mock = mocker.Mock() pool._send_requests = lambda messages, connection, endpoint: mock_send( send_mock) pool.add_connection(deployment='encoder', head=False, address='1.1.1.1:53') pool.add_connection(deployment='encoder', head=False, address='1.1.1.2:53') results = pool.send_request(request=ControlRequest(command='STATUS'), deployment='encoder', head=False) assert len(results) == 1 assert send_mock.call_count == 1 assert create_mock.call_count == 2 results = pool.send_request(request=ControlRequest(command='STATUS'), deployment='encoder', head=False) assert len(results) == 1 assert send_mock.call_count == 2 assert create_mock.call_count == 2 # indexer was not added yet, so there isnt anything being sent results = pool.send_request(request=ControlRequest(command='STATUS'), deployment='indexer', head=False) assert len(results) == 0 assert send_mock.call_count == 2 assert create_mock.call_count == 2 # add indexer now so it can be send pool.add_connection(deployment='indexer', head=False, address='2.1.1.1:53') results = pool.send_request(request=ControlRequest(command='STATUS'), deployment='indexer', head=False) assert len(results) == 1 assert send_mock.call_count == 3 assert create_mock.call_count == 3 # polling only applies to shards, there are no shards here, so it only sends one message pool.add_connection(deployment='encoder', head=False, address='1.1.1.3:53') results = pool.send_request( request=ControlRequest(command='STATUS'), deployment='encoder', head=False, polling_type=PollingType.ALL, ) assert len(results) == 1 assert send_mock.call_count == 4 assert create_mock.call_count == 4 # polling only applies to shards, so we add a shard now and expect 2 messages being sent pool.add_connection(deployment='encoder', head=False, address='1.1.1.3:53', shard_id=1) # adding the same connection again is a noop pool.add_connection(deployment='encoder', head=False, address='1.1.1.3:53', shard_id=1) results = pool.send_request( request=ControlRequest(command='STATUS'), deployment='encoder', head=False, polling_type=PollingType.ALL, ) assert len(results) == 2 assert send_mock.call_count == 6 assert create_mock.call_count == 5 # sending to one specific shard should only send one message results = pool.send_request( request=ControlRequest(command='STATUS'), deployment='encoder', head=False, polling_type=PollingType.ANY, shard_id=1, ) assert len(results) == 1 assert send_mock.call_count == 7 # doing the same with polling ALL ignores the shard id results = pool.send_request( request=ControlRequest(command='STATUS'), deployment='encoder', head=False, polling_type=PollingType.ALL, shard_id=1, ) assert len(results) == 2 assert send_mock.call_count == 9 # removing a replica for shard 0 works and does not prevent messages to be sent to the shard assert await pool.remove_connection(deployment='encoder', head=False, address='1.1.1.2:53', shard_id=0) assert close_mock_object.call_count == 1 results = pool.send_request( request=ControlRequest(command='STATUS'), deployment='encoder', head=False, polling_type=PollingType.ANY, shard_id=0, ) assert len(results) == 1 assert send_mock.call_count == 10 # encoder pod has no head registered yet so sending to the head will not work results = pool.send_request(request=ControlRequest(command='STATUS'), deployment='encoder', head=True) assert len(results) == 0 assert send_mock.call_count == 10 # after registering a head for encoder, sending to head should work pool.add_connection(deployment='encoder', head=True, address='1.1.1.10:53') results = pool.send_request(request=ControlRequest(command='STATUS'), deployment='encoder', head=True) assert len(results) == 1 assert send_mock.call_count == 11 # after remove the head again, sending will not work assert await pool.remove_connection(deployment='encoder', head=True, address='1.1.1.10:53') assert close_mock_object.call_count == 2 results = pool.send_request(request=ControlRequest(command='STATUS'), deployment='encoder', head=True) assert len(results) == 0 assert send_mock.call_count == 11 # check that remove/add order is handled well pool.add_connection(deployment='encoder', head=False, address='1.1.1.4:53') assert await pool.remove_connection(deployment='encoder', head=False, address='1.1.1.1:53') assert await pool.remove_connection(deployment='encoder', head=False, address='1.1.1.4:53') assert close_mock_object.call_count == 4 assert not (await pool.remove_connection( deployment='encoder', head=False, address='1.1.1.2:53')) await pool.close()
async def process_control(self, request, *args): returned_msg = ControlRequest(command='DEACTIVATE') await asyncio.sleep(0.1) return returned_msg
async def test_grpc_connection_pool_real_sending_timeout(): server1_ready_event = multiprocessing.Event() def listen(port, event: multiprocessing.Event): class DummyServer: async def process_control(self, request, *args): returned_msg = ControlRequest(command='DEACTIVATE') await asyncio.sleep(0.1) return returned_msg async def start_grpc_server(): grpc_server = grpc.aio.server(options=[ ('grpc.max_send_request_length', -1), ('grpc.max_receive_message_length', -1), ]) jina_pb2_grpc.add_JinaControlRequestRPCServicer_to_server( DummyServer(), grpc_server) service_names = ( jina_pb2.DESCRIPTOR.services_by_name['JinaControlRequestRPC']. full_name, reflection.SERVICE_NAME, ) reflection.enable_server_reflection(service_names, grpc_server) grpc_server.add_insecure_port(f'localhost:{port}') await grpc_server.start() event.set() await grpc_server.wait_for_termination() asyncio.run(start_grpc_server()) port1 = random_port() server_process1 = Process( target=listen, args=( port1, server1_ready_event, ), ) server_process1.start() time.sleep(0.1) server1_ready_event.wait() pool = GrpcConnectionPool() pool.add_connection(deployment='encoder', head=False, address=f'localhost:{port1}') sent_msg = ControlRequest(command='STATUS') results_call_1 = pool.send_request(request=sent_msg, deployment='encoder', head=False, timeout=1.0) assert len(results_call_1) == 1 response1, meta = await results_call_1[0] assert response1.command == 'DEACTIVATE' results_call_2 = pool.send_request(request=sent_msg, deployment='encoder', head=False, timeout=0.05) assert len(results_call_2) == 1 with pytest.raises(AioRpcError): await results_call_2[0] await pool.close() server_process1.kill() server_process1.join()
async def process_control(self, request, *args): returned_msg = ControlRequest(command='DEACTIVATE') return returned_msg