def test_independent_client(protocol): with Flow(protocol=protocol) as f: c = Client(host='localhost', port_expose=f.port_expose, protocol=protocol) assert type(c) == type(f.client) c.post('/')
def test_independent_client(): with Flow() as f: c = Client(port_expose=f.port_expose) c.post('/') with Flow(restful=True) as f: c = WebSocketClient(port_expose=f.port_expose) c.post('/')
def test_disable_prefetch_fast_client_slow_executor(protocol, inputs, monkeypatch, simple_graph_dict_slow): monkeypatch.setattr( networking.GrpcConnectionPool, 'send_requests_once', DummyMockConnectionPool.send_requests_once, ) port_in = random_port() final_da = DocumentArray() p = multiprocessing.Process( target=create_runtime, kwargs={ 'protocol': protocol, 'port_in': port_in, 'graph_dict': simple_graph_dict_slow, }, ) p.start() time.sleep(1.0) client = Client(protocol=protocol, port=port_in) client.post( on='/', inputs=inputs, request_size=1, on_done=lambda response: on_done(response, final_da), ) p.terminate() p.join() assert len(final_da) == INPUT_LEN # since Executor is slow, all client inputs should be read before 1st request exits from Executor. assert ( final_da['id-0'].id < final_da['id-1'].id ), f'ids are not ordered with times {final_da["id-0"].tags["input_gen"]} and {final_da["id-1"].tags["input_gen"]}' assert ( final_da['id-1'].id < final_da['id-2'].id ), f'ids are not ordered with times {final_da["id-1"].tags["input_gen"]} and {final_da["id-2"].tags["input_gen"]}' assert ( final_da['id-2'].id < final_da['id-3'].id ), f'ids are not ordered with times {final_da["id-2"].tags["input_gen"]} and {final_da["id-3"].tags["input_gen"]}' assert final_da['id-0'].tags['input_gen'] < final_da['id-1'].tags[ 'input_gen'] assert final_da['id-1'].tags['input_gen'] < final_da['id-2'].tags[ 'input_gen'] assert final_da['id-2'].tags['input_gen'] < final_da['id-3'].tags[ 'input_gen'] assert final_da['id-3'].tags['input_gen'] < final_da['id-0'].tags[ 'executor'] # At least 1 request should reache `on_done` before all requests are processed in the Executor. # Validates that the requests are not pending at the Executor first_on_done_time = min(i.tags['on_done'] for i in final_da) last_executor_time = max(i.tags['executor'] for i in final_da) assert first_on_done_time < last_executor_time
def test_disable_prefetch_slow_client_fast_executor(protocol, inputs, monkeypatch, simple_graph_dict_fast): monkeypatch.setattr( networking.GrpcConnectionPool, 'send_requests_once', DummyMockConnectionPool.send_requests_once, ) port_in = random_port() p = multiprocessing.Process( target=create_runtime, kwargs={ 'protocol': protocol, 'port_in': port_in, 'graph_dict': simple_graph_dict_fast, }, ) p.start() time.sleep(1.0) final_da = DocumentArray() client = Client(protocol=protocol, port=port_in) client.post( on='/', inputs=inputs, request_size=1, on_done=lambda response: on_done(response, final_da), ) p.terminate() p.join() assert len(final_da) == INPUT_LEN # Since the input_gen is slow, order will always be gen -> exec -> on_done for every request assert final_da['id-0'].tags['input_gen'] < final_da['id-0'].tags[ 'executor'] assert final_da['id-0'].tags['executor'] < final_da['id-0'].tags['on_done'] assert final_da['id-0'].tags['on_done'] < final_da['id-1'].tags['input_gen'] assert final_da['id-1'].tags['input_gen'] < final_da['id-1'].tags[ 'executor'] assert final_da['id-1'].tags['executor'] < final_da['id-1'].tags['on_done'] assert final_da['id-1'].tags['on_done'] < final_da['id-2'].tags['input_gen'] assert final_da['id-2'].tags['input_gen'] < final_da['id-2'].tags[ 'executor'] assert final_da['id-2'].tags['executor'] < final_da['id-2'].tags['on_done'] assert final_da['id-2'].tags['on_done'] < final_da['id-3'].tags['input_gen'] assert final_da['id-3'].tags['input_gen'] < final_da['id-3'].tags[ 'executor'] assert final_da['id-3'].tags['executor'] < final_da['id-3'].tags['on_done']
def send_requests( client_kwargs, rolling_event, client_ready_to_send_event, exception_to_raise_event, ): from jina.logging.logger import JinaLogger from jina.clients import Client _logger = JinaLogger('test_send_requests') _logger.debug(f' send request start') try: client = Client(**client_kwargs) client.show_progress = True _logger.debug(f' Client instantiated with {client_kwargs}') _logger.debug(f' Set client_ready_to_send_event event') client_ready_to_send_event.set() while not rolling_event.is_set(): _logger.debug(f' event is not set') r = client.post( '/exec', [Document() for _ in range(10)], return_results=True, port_expose=9090, ) assert len(r) > 0 assert len(r[0].docs) > 0 for doc in r[0].docs: assert doc.tags['argument'] in ['value1', 'value2'] time.sleep(0.1) _logger.debug(f' event is unset') except: _logger.error(f' Some error happened while sending requests') exception_to_raise_event.set() _logger.debug(f' send requests finished')
def send_requests( client_kwargs, stop_event: multiprocessing.Event, scale_event: multiprocessing.Event, received_responses: multiprocessing.Queue, response_arrival_times: multiprocessing.Queue, logger, ): from jina.clients import Client client = Client(**client_kwargs) client.show_progress = True class ResponseValidator: def __init__( self, received_resposes: multiprocessing.Queue, response_arrival_times: multiprocessing.Queue, ): self.prev_time = None self.received_resposes = received_resposes self.response_arrival_times = response_arrival_times def process_response(self, req): logger.debug(f'Received response {req.data.docs[0].text}') self.received_resposes.put(req.data.docs[0].text) if self.prev_time is not None: self.response_arrival_times.put(time.time() - self.prev_time) self.prev_time = time.time() validator = ResponseValidator(received_responses, response_arrival_times) async def async_inputs(): for i in range(50): yield Document(text=f'{i}') if stop_event.is_set(): logger.debug(f'stop sending new requests after {i} requests') else: await asyncio.sleep(1.0 if scale_event.is_set() else 0.05) client.post( '/', inputs=async_inputs, request_size=1, on_done=validator.process_response, )
def client_send(client_id: int, port: int, protocol: str): from jina.clients import Client c = Client(protocol=protocol, port=port, return_responses=True) # send requests return c.post(on='/', inputs=DocumentArray( [Document(text=f'client{client_id}-Request')]))
async def run_test_until_event(flow, core_client, namespace, endpoint, stop_event, logger, sleep_time=0.05): # start port forwarding from jina.clients import Client gateway_pod_name = (core_client.list_namespaced_pod( namespace=namespace, label_selector='app=gateway').items[0].metadata.name) config_path = os.environ['KUBECONFIG'] import portforward with portforward.forward(namespace, gateway_pod_name, flow.port, flow.port, config_path): client_kwargs = dict( host='localhost', port=flow.port, return_responses=True, asyncio=True, ) client_kwargs.update(flow._common_kwargs) client = Client(**client_kwargs) client.show_progress = True async def async_inputs(sent_ids: Set[int], sleep_time: float = 0.05): i = 0 while True: sent_ids.add(i) yield Document(text=f'{i}') if stop_event.is_set(): logger.info( f'stop yielding new requests after {i} requests') return elif sleep_time: await asyncio.sleep(sleep_time) i += 1 responses = [] sent_ids = set() async for resp in client.post( endpoint, inputs=functools.partial(async_inputs, sent_ids, sleep_time), request_size=1, ): responses.append(resp) logger.info( f'Client sent {len(sent_ids)} and received {(len(responses))} responses' ) return responses, sent_ids
def test_client_websocket(mocker, flow_with_websocket): with flow_with_websocket: time.sleep(0.5) client = Client( host='localhost', port=str(flow_with_websocket.port_expose), protocol='websocket', ) # Test that a regular index request triggers the correct callbacks on_always_mock = mocker.Mock() on_error_mock = mocker.Mock() on_done_mock = mocker.Mock() client.post( '', random_docs(1), request_size=1, on_always=on_always_mock, on_error=on_error_mock, on_done=on_done_mock, ) on_always_mock.assert_called_once() on_done_mock.assert_called_once() on_error_mock.assert_not_called()
def test_all_sync_clients(protocol, mocker): f = Flow(protocol=protocol).add(uses=MyExec) docs = list(random_docs(1000)) m1 = mocker.Mock() m2 = mocker.Mock() m3 = mocker.Mock() m4 = mocker.Mock() with f: c = Client(host='localhost', port=f.port_expose, protocol=protocol) c.post('/', on_done=m1) c.post('/foo', docs, on_done=m2) c.post('/foo', on_done=m3) c.post('/foo', docs, parameters={'hello': 'world'}, on_done=m4) m1.assert_called_once() m2.assert_called() m3.assert_called_once() m4.assert_called()
async def run_test(flow, endpoint, num_docs=10, request_size=10): # start port forwarding from jina.clients import Client client_kwargs = dict( host='localhost', port=flow.port_expose, asyncio=True, ) client_kwargs.update(flow._common_kwargs) client = Client(**client_kwargs) client.show_progress = True responses = [] async for resp in client.post( endpoint, inputs=[Document() for _ in range(num_docs)], return_results=True, request_size=request_size, ): responses.append(resp) return responses
async def run_test(flow, core_client, namespace, endpoint, n_docs=10, request_size=100): # start port forwarding from jina.clients import Client gateway_pod_name = (core_client.list_namespaced_pod( namespace=namespace, label_selector='app=gateway').items[0].metadata.name) config_path = os.environ['KUBECONFIG'] import portforward with portforward.forward(namespace, gateway_pod_name, flow.port, flow.port, config_path): client_kwargs = dict( host='localhost', port=flow.port, return_responses=True, asyncio=True, ) client_kwargs.update(flow._common_kwargs) client = Client(**client_kwargs) client.show_progress = True responses = [] async for resp in client.post( endpoint, inputs=[Document() for _ in range(n_docs)], request_size=request_size, ): responses.append(resp) return responses