def _create_worker_runtime(port, name='', executor=None): args = set_pod_parser().parse_args([]) args.port = port args.name = name if executor: args.uses = executor with WorkerRuntime(args) as runtime: runtime.run_forever()
def start_runtime(args, cancel_event, handler_closed_event): with WorkerRuntime(args, cancel_event=cancel_event) as runtime: runtime._data_request_handler.handle = lambda *args, **kwargs: time.sleep( slow_executor_block_time) runtime._data_request_handler.close = ( lambda *args, **kwargs: handler_closed_event.set()) runtime.run_forever()
async def test_pods_trivial_topology(head_runtime_docker_image_built, worker_runtime_docker_image_built): worker_port = random_port() head_port = random_port() port = random_port() graph_description = '{"start-gateway": ["pod0"], "pod0": ["end-gateway"]}' pod_addresses = f'{{"pod0": ["0.0.0.0:{head_port}"]}}' # create a single worker pod worker_pod = _create_worker_pod(worker_port) # create a single head pod head_pod = _create_head_pod(head_port) # create a single gateway pod gateway_pod = _create_gateway_pod(graph_description, pod_addresses, port) with gateway_pod, head_pod, worker_pod: await asyncio.sleep(1.0) assert HeadRuntime.wait_for_ready_or_shutdown( timeout=5.0, ctrl_address=head_pod.runtime_ctrl_address, ready_or_shutdown_event=head_pod.ready_or_shutdown.event, ) assert WorkerRuntime.wait_for_ready_or_shutdown( timeout=5.0, ctrl_address=worker_pod.runtime_ctrl_address, ready_or_shutdown_event=worker_pod.ready_or_shutdown.event, ) head_pod.ready_or_shutdown.event.wait(timeout=5.0) worker_pod.ready_or_shutdown.event.wait(timeout=5.0) gateway_pod.ready_or_shutdown.event.wait(timeout=5.0) # this would be done by the Pod, its adding the worker to the head activate_msg = ControlRequest(command='ACTIVATE') worker_host, worker_port = worker_pod.runtime_ctrl_address.split(':') activate_msg.add_related_entity('worker', worker_host, int(worker_port)) assert GrpcConnectionPool.send_request_sync( activate_msg, head_pod.runtime_ctrl_address) # send requests to the gateway c = Client(host='localhost', port=port, asyncio=True) responses = c.post('/', inputs=async_inputs, request_size=1, return_responses=True) response_list = [] async for response in responses: response_list.append(response) assert len(response_list) == 20 assert len(response_list[0].docs) == 1
def worker_runtime(args: 'Namespace'): """ Starts a WorkerRuntime :param args: arguments coming from the CLI. """ from jina.serve.runtimes.worker import WorkerRuntime with WorkerRuntime(args) as runtime: runtime.logger.info( f'Executor {runtime._data_request_handler._executor.metas.name} started' ) runtime.run_forever()
def __init__(self, args: 'argparse.Namespace'): """ Create a new :class:`NetworkChecker`. :param args: args provided by the CLI. """ import time from jina.logging.profile import TimeContext from jina.serve.runtimes.worker import WorkerRuntime ctrl_addr = f'{args.host}:{args.port}' try: total_time = 0 total_success = 0 for j in range(args.retries): with TimeContext(f'ping {ctrl_addr} at {j} round', default_logger) as tc: r = WorkerRuntime.is_ready(ctrl_addr) if not r: default_logger.warning( 'not responding, retry (%d/%d) in 1s' % (j + 1, args.retries)) else: total_success += 1 total_time += tc.duration time.sleep(1) if total_success < args.retries: default_logger.warning('message lost %.0f%% (%d/%d) ' % ( (1 - total_success / args.retries) * 100, args.retries - total_success, args.retries, )) if total_success > 0: default_logger.info('avg. latency: %.0f ms' % (total_time / total_success * 1000)) exit(0) except KeyboardInterrupt: pass # returns 1 (anomaly) when it comes to here exit(1)
def start_runtime(args, cancel_event): with WorkerRuntime(args, cancel_event=cancel_event) as runtime: runtime.run_forever()
async def test_worker_runtime_graceful_shutdown(): args = set_pod_parser().parse_args([]) cancel_event = multiprocessing.Event() handler_closed_event = multiprocessing.Event() slow_executor_block_time = 1.0 pending_requests = 5 def start_runtime(args, cancel_event, handler_closed_event): with WorkerRuntime(args, cancel_event=cancel_event) as runtime: runtime._data_request_handler.handle = lambda *args, **kwargs: time.sleep( slow_executor_block_time ) runtime._data_request_handler.close = ( lambda *args, **kwargs: handler_closed_event.set() ) runtime.run_forever() runtime_thread = Process( target=start_runtime, args=(args, cancel_event, handler_closed_event), ) runtime_thread.start() assert AsyncNewLoopRuntime.wait_for_ready_or_shutdown( timeout=5.0, ctrl_address=f'{args.host}:{args.port}', ready_or_shutdown_event=Event(), ) request_start_time = time.time() async def task_wrapper(adress, messages_received): request = _create_test_data_message(len(messages_received)) ( single_data_stub, data_stub, control_stub, channel, ) = GrpcConnectionPool.create_async_channel_stub(adress) await data_stub.process_data(request) await channel.close() messages_received.append(request) sent_requests = 0 messages_received = [] tasks = [] for i in range(pending_requests): tasks.append( asyncio.create_task( task_wrapper(f'{args.host}:{args.port}', messages_received) ) ) sent_requests += 1 await asyncio.sleep(1.0) runtime_thread.terminate() assert not handler_closed_event.is_set() runtime_thread.join() for future in asyncio.as_completed(tasks): _ = await future assert pending_requests == sent_requests assert sent_requests == len(messages_received) assert ( time.time() - request_start_time >= slow_executor_block_time * pending_requests ) assert handler_closed_event.is_set() assert not WorkerRuntime.is_ready(f'{args.host}:{args.port}')