Esempio n. 1
0
def test_executor_runtimes(signal, tmpdir):
    import time

    args = set_pod_parser().parse_args([])

    def run(args):

        args.uses = {
            'jtype': 'DummyExecutor',
            'with': {
                'dir': str(tmpdir)
            },
            'metas': {
                'workspace': str(tmpdir)
            },
        }
        executor_native(args)

    process = multiprocessing.Process(target=run, args=(args, ))
    process.start()
    time.sleep(0.5)

    GrpcConnectionPool.send_request_sync(_create_test_data_message(),
                                         target=f'{args.host}:{args.port}')

    time.sleep(0.1)

    os.kill(process.pid, signal)
    process.join()
    with open(f'{tmpdir}/test.txt', 'r') as fp:
        output = fp.read()
    split = output.split(';')
    assert split[0] == 'proper close'
    assert split[1] == '1'
Esempio n. 2
0
def test_dynamic_polling_default_config(polling):
    args = set_deployment_parser().parse_args([
        '--uses',
        'DynamicPollingExecutorDefaultNames',
        '--shards',
        str(2),
        '--polling',
        polling,
    ])
    pod = Deployment(args)

    with pod:
        response = GrpcConnectionPool.send_request_sync(
            _create_test_data_message(endpoint='/search'),
            f'{pod.head_args.host}:{pod.head_args.port_in}',
            endpoint='/search',
        )
        assert len(response.docs) == 1 + 2

        response = GrpcConnectionPool.send_request_sync(
            _create_test_data_message(endpoint='/index'),
            f'{pod.head_args.host}:{pod.head_args.port_in}',
            endpoint='/index',
        )
        assert len(response.docs) == 1 + 1
Esempio n. 3
0
async def _activate_worker(head_port, worker_port, shard_id=None):
    # this would be done by the Pod, its adding the worker to the head
    activate_msg = ControlRequest(command='ACTIVATE')
    activate_msg.add_related_entity(
        'worker', '127.0.0.1', worker_port, shard_id=shard_id
    )
    GrpcConnectionPool.send_request_sync(activate_msg, f'127.0.0.1:{head_port}')
Esempio n. 4
0
def test_dynamic_polling_overwrite_default_config(polling):
    endpoint_polling = {'/search': PollingType.ANY, '*': polling}
    args = set_deployment_parser().parse_args([
        '--uses',
        'DynamicPollingExecutorDefaultNames',
        '--shards',
        str(2),
        '--polling',
        json.dumps(endpoint_polling),
    ])
    pod = Deployment(args)

    with pod:
        response = GrpcConnectionPool.send_request_sync(
            _create_test_data_message(endpoint='/search'),
            f'{pod.head_args.host}:{pod.head_args.port_in}',
            endpoint='/search',
        )
        assert (len(response.docs) == 1 + 1
                )  # 1 source doc + 1 doc added by the one shard

        response = GrpcConnectionPool.send_request_sync(
            _create_test_data_message(endpoint='/index'),
            f'{pod.head_args.host}:{pod.head_args.port_in}',
            endpoint='/index',
        )
        assert (len(response.docs) == 1 + 1
                )  # 1 source doc + 1 doc added by the one shard
Esempio n. 5
0
async def test_pseudo_remote_pods_replicas(gateway, head, worker):
    NUM_REPLICAS = 3
    head_port = random_port()
    port_expose = random_port()
    graph_description = (
        '{"start-gateway": ["deployment0"], "deployment0": ["end-gateway"]}')
    deployments_addresses = f'{{"deployment0": ["0.0.0.0:{head_port}"]}}'

    # create a single head pod
    head_pod = _create_head_pod(head, head_port)
    head_pod.start()

    # create the shards
    replica_pods = []
    for i in range(NUM_REPLICAS):
        # create worker
        worker_port = random_port()
        # create a single worker pod
        worker_pod = _create_worker_pod(worker, worker_port,
                                        f'deployment0/{i}')
        replica_pods.append(worker_pod)
        worker_pod.start()

        await asyncio.sleep(0.1)
        if head == 'remote':
            worker_host = __docker_host__
        else:
            worker_host = HOST

        # this would be done by the deployment, its adding the worker to the head
        activate_msg = ControlRequest(command='ACTIVATE')
        activate_msg.add_related_entity('worker', worker_host, worker_port)
        GrpcConnectionPool.send_request_sync(activate_msg,
                                             f'{HOST}:{head_port}')

    # create a single gateway pod
    gateway_pod = _create_gateway_pod(gateway, graph_description,
                                      deployments_addresses, port_expose)
    gateway_pod.start()

    await asyncio.sleep(1.0)

    c = Client(host='localhost', port=port_expose, asyncio=True)
    responses = c.post('/',
                       inputs=async_inputs,
                       request_size=1,
                       return_results=True)
    response_list = []
    async for response in responses:
        response_list.append(response)

    # clean up pods
    gateway_pod.close()
    head_pod.close()
    for pod in replica_pods:
        pod.close()

    assert len(response_list) == 20
    assert len(response_list[0].docs) == 1
Esempio n. 6
0
async def test_pods_with_replicas_advance_faster(port_generator):
    head_port = port_generator()
    port = port_generator()
    graph_description = '{"start-gateway": ["pod0"], "pod0": ["end-gateway"]}'
    pod_addresses = f'{{"pod0": ["0.0.0.0:{head_port}"]}}'

    # create a single head pod
    head_pod = _create_head_pod(head_port, 'head')
    head_pod.start()

    # create a single gateway pod
    gateway_pod = _create_gateway_pod(graph_description, pod_addresses, port)
    gateway_pod.start()

    # create the shards
    pods = []
    for i in range(10):
        # create worker
        worker_port = port_generator()
        # create a single worker pod
        worker_pod = _create_worker_pod(worker_port, f'pod0/{i}', 'FastSlowExecutor')
        pods.append(worker_pod)
        worker_pod.start()

        await asyncio.sleep(0.1)

    head_pod.wait_start_success()
    gateway_pod.wait_start_success()
    for pod in pods:
        # this would be done by the Pod, its adding the worker to the head
        pod.wait_start_success()
        activate_msg = ControlRequest(command='ACTIVATE')
        activate_msg.add_related_entity('worker', '127.0.0.1', pod.args.port)
        GrpcConnectionPool.send_request_sync(activate_msg, f'127.0.0.1:{head_port}')

    c = Client(host='localhost', port=port, asyncio=True)
    input_docs = [Document(text='slow'), Document(text='fast')]
    responses = c.post('/', inputs=input_docs, request_size=1, return_responses=True)
    response_list = []
    async for response in responses:
        response_list.append(response)

    # clean up pods
    gateway_pod.close()
    head_pod.close()
    for pod in pods:
        pod.close()

    assert len(response_list) == 2
    for response in response_list:
        assert len(response.docs) == 1

    assert response_list[0].docs[0].text == 'fast'
    assert response_list[1].docs[0].text == 'slow'
Esempio n. 7
0
async def test_pods_shards(polling, port_generator):
    head_port = port_generator()
    port = port_generator()
    graph_description = '{"start-gateway": ["pod0"], "pod0": ["end-gateway"]}'
    pod_addresses = f'{{"pod0": ["0.0.0.0:{head_port}"]}}'

    # create a single head pod
    head_pod = _create_head_pod(head_port, 'head', polling)
    head_pod.start()

    # create the shards
    shard_pods = []
    for i in range(10):
        # create worker
        worker_port = port_generator()
        # create a single worker pod
        worker_pod = _create_worker_pod(worker_port, f'pod0/shard/{i}')
        shard_pods.append(worker_pod)
        worker_pod.start()

        await asyncio.sleep(0.1)

    head_pod.wait_start_success()
    for i, pod in enumerate(shard_pods):
        # this would be done by the Pod, its adding the worker to the head
        pod.wait_start_success()
        activate_msg = ControlRequest(command='ACTIVATE')
        activate_msg.add_related_entity(
            'worker', '127.0.0.1', pod.args.port, shard_id=i
        )
        GrpcConnectionPool.send_request_sync(activate_msg, f'127.0.0.1:{head_port}')

    # create a single gateway pod
    gateway_pod = _create_gateway_pod(graph_description, pod_addresses, port)
    gateway_pod.start()

    await asyncio.sleep(1.0)

    gateway_pod.wait_start_success()
    c = Client(host='localhost', port=port, asyncio=True)
    responses = c.post('/', inputs=async_inputs, request_size=1, return_responses=True)
    response_list = []
    async for response in responses:
        response_list.append(response)

    # clean up pods
    gateway_pod.close()
    head_pod.close()
    for shard_pod in shard_pods:
        shard_pod.close()

    assert len(response_list) == 20
    assert len(response_list[0].docs) == 1 if polling == 'ANY' else len(shard_pods)
Esempio n. 8
0
File: helper.py Progetto: srbhr/jina
def is_ready(address: str) -> bool:
    """
    TODO: make this async
    Check if status is ready.
    :param address: the address where the control message needs to be sent
    :return: True if status is ready else False.
    """

    try:
        GrpcConnectionPool.send_request_sync(ControlRequest('STATUS'), address)
    except RpcError:
        return False
    return True
Esempio n. 9
0
async def test_secure_send_request(private_key_cert_chain):
    server1_ready_event = multiprocessing.Event()
    (private_key, certificate_chain) = private_key_cert_chain

    def listen(port, event: multiprocessing.Event):
        class DummyServer:
            async def process_control(self, request, *args):
                returned_msg = ControlRequest(command='DEACTIVATE')
                return returned_msg

        async def start_grpc_server():
            grpc_server = grpc.aio.server(
                options=[
                    ('grpc.max_send_request_length', -1),
                    ('grpc.max_receive_message_length', -1),
                ]
            )

            jina_pb2_grpc.add_JinaControlRequestRPCServicer_to_server(
                DummyServer(), grpc_server
            )
            grpc_server.add_secure_port(
                f'localhost:{port}',
                grpc.ssl_server_credentials((private_key_cert_chain,)),
            )

            await grpc_server.start()
            event.set()
            await grpc_server.wait_for_termination()

        asyncio.run(start_grpc_server())

    port = random_port()
    server_process1 = Process(
        target=listen,
        args=(
            port,
            server1_ready_event,
        ),
    )
    server_process1.start()

    time.sleep(0.1)
    server1_ready_event.wait()
    sent_msg = ControlRequest(command='STATUS')

    result = GrpcConnectionPool.send_request_sync(
        sent_msg, f'localhost:{port}', https=True, root_certificates=certificate_chain
    )

    assert result.command == 'DEACTIVATE'

    result = await GrpcConnectionPool.send_request_async(
        sent_msg, f'localhost:{port}', https=True, root_certificates=certificate_chain
    )

    assert result.command == 'DEACTIVATE'

    server_process1.kill()
    server_process1.join()
Esempio n. 10
0
    def is_ready(ctrl_address: str, **kwargs) -> bool:
        """
        Check if status is ready.

        :param ctrl_address: the address where the control request needs to be sent
        :param kwargs: extra keyword arguments

        :return: True if status is ready else False.
        """

        try:
            GrpcConnectionPool.send_request_sync(ControlRequest('STATUS'),
                                                 ctrl_address)
        except RpcError as e:
            return False
        return True
Esempio n. 11
0
def test_grpc_ssl_with_flow_and_client(cert_pem, key_pem, error_log_level):
    with Flow(
            protocol='grpc',
            ssl_certfile=cert_pem,
            ssl_keyfile=key_pem,
    ) as flow:
        with open(cert_pem, 'rb') as f:
            creds = f.read()

        GrpcConnectionPool.send_request_sync(
            request=ControlRequest('STATUS'),
            target=f'localhost:{flow.port}',
            root_certificates=creds,
            tls=True,
            timeout=1.0,
        )
Esempio n. 12
0
def test_dynamic_polling_with_config(polling):
    endpoint_polling = {
        '/any': PollingType.ANY,
        '/all': PollingType.ALL,
        '*': polling
    }

    args = set_deployment_parser().parse_args([
        '--uses',
        'DynamicPollingExecutor',
        '--shards',
        str(2),
        '--polling',
        json.dumps(endpoint_polling),
    ])
    pod = Deployment(args)

    with pod:
        response = GrpcConnectionPool.send_request_sync(
            _create_test_data_message(endpoint='/all'),
            f'{pod.head_args.host}:{pod.head_args.port_in}',
            endpoint='/all',
        )
        assert len(response.docs
                   ) == 1 + 2  # 1 source doc + 2 docs added by each shard

        response = GrpcConnectionPool.send_request_sync(
            _create_test_data_message(endpoint='/any'),
            f'{pod.head_args.host}:{pod.head_args.port_in}',
            endpoint='/any',
        )
        assert (len(response.docs) == 1 + 1
                )  # 1 source doc + 1 doc added by the one shard

        response = GrpcConnectionPool.send_request_sync(
            _create_test_data_message(endpoint='/no_polling'),
            f'{pod.head_args.host}:{pod.head_args.port_in}',
            endpoint='/no_polling',
        )
        if polling == 'any':
            assert (len(response.docs) == 1 + 1
                    )  # 1 source doc + 1 doc added by the one shard
        else:
            assert (len(response.docs) == 1 + 2
                    )  # 1 source doc + 1 doc added by the two shards
Esempio n. 13
0
async def _send_requests(pod):
    response_texts = set()
    for _ in range(3):
        response = GrpcConnectionPool.send_request_sync(
            _create_test_data_message(),
            f'{pod.head_args.host}:{pod.head_args.port_in}',
        )
        response_texts.update(response.response.docs.texts)
    return response_texts
Esempio n. 14
0
async def test_pods_trivial_topology(head_runtime_docker_image_built,
                                     worker_runtime_docker_image_built):
    worker_port = random_port()
    head_port = random_port()
    port = random_port()
    graph_description = '{"start-gateway": ["pod0"], "pod0": ["end-gateway"]}'
    pod_addresses = f'{{"pod0": ["0.0.0.0:{head_port}"]}}'

    # create a single worker pod
    worker_pod = _create_worker_pod(worker_port)

    # create a single head pod
    head_pod = _create_head_pod(head_port)

    # create a single gateway pod
    gateway_pod = _create_gateway_pod(graph_description, pod_addresses, port)

    with gateway_pod, head_pod, worker_pod:
        await asyncio.sleep(1.0)

        assert HeadRuntime.wait_for_ready_or_shutdown(
            timeout=5.0,
            ctrl_address=head_pod.runtime_ctrl_address,
            ready_or_shutdown_event=head_pod.ready_or_shutdown.event,
        )

        assert WorkerRuntime.wait_for_ready_or_shutdown(
            timeout=5.0,
            ctrl_address=worker_pod.runtime_ctrl_address,
            ready_or_shutdown_event=worker_pod.ready_or_shutdown.event,
        )

        head_pod.ready_or_shutdown.event.wait(timeout=5.0)
        worker_pod.ready_or_shutdown.event.wait(timeout=5.0)
        gateway_pod.ready_or_shutdown.event.wait(timeout=5.0)

        # this would be done by the Pod, its adding the worker to the head
        activate_msg = ControlRequest(command='ACTIVATE')
        worker_host, worker_port = worker_pod.runtime_ctrl_address.split(':')
        activate_msg.add_related_entity('worker', worker_host,
                                        int(worker_port))
        assert GrpcConnectionPool.send_request_sync(
            activate_msg, head_pod.runtime_ctrl_address)

        # send requests to the gateway
        c = Client(host='localhost', port=port, asyncio=True)
        responses = c.post('/',
                           inputs=async_inputs,
                           request_size=1,
                           return_responses=True)
        response_list = []
        async for response in responses:
            response_list.append(response)

    assert len(response_list) == 20
    assert len(response_list[0].docs) == 1
Esempio n. 15
0
async def test_pseudo_remote_pods_topologies(gateway, head, worker):
    """
    g(l)-h(l)-w(l) - works
    g(l)-h(l)-w(r) - works - head connects to worker via localhost
    g(l)-h(r)-w(r) - works - head (inside docker) connects to worker via dockerhost
    g(l)-h(r)-w(l) - doesn't work remote head need remote worker
    g(r)-... - doesn't work, as distributed parser not enabled for gateway
    After any 1 failure, segfault
    """
    worker_port = random_port()
    head_port = random_port()
    port_expose = random_port()
    graph_description = (
        '{"start-gateway": ["deployment0"], "deployment0": ["end-gateway"]}')
    if head == 'remote':
        deployments_addresses = f'{{"deployment0": ["{HOST}:{head_port}"]}}'
    else:
        deployments_addresses = f'{{"deployment0": ["0.0.0.0:{head_port}"]}}'

    # create a single head pod
    head_pod = _create_head_pod(head, head_port)

    # create a single worker pod
    worker_pod = _create_worker_pod(worker, worker_port)

    # create a single gateway pod
    gateway_pod = _create_gateway_pod(gateway, graph_description,
                                      deployments_addresses, port_expose)

    with gateway_pod, worker_pod, head_pod:
        await asyncio.sleep(1.0)
        # this would be done by the deployment, its adding the worker to the head
        activate_msg = ControlRequest(command='ACTIVATE')
        worker_host, worker_port = worker_pod.runtime_ctrl_address.split(':')
        if head == 'remote':
            worker_host = __docker_host__

        activate_msg.add_related_entity('worker', worker_host,
                                        int(worker_port))
        assert GrpcConnectionPool.send_request_sync(
            activate_msg, head_pod.runtime_ctrl_address)

        # send requests to the gateway
        c = Client(host='127.0.0.1', port=port_expose, asyncio=True)
        responses = c.post('/',
                           inputs=async_inputs,
                           request_size=1,
                           return_results=True)
        response_list = []
        async for response in responses:
            response_list.append(response)

    assert len(response_list) == 20
    assert len(response_list[0].docs) == 1
Esempio n. 16
0
def check_health_pod(addr: str):
    """check if a pods is healthy

    :param addr: the address on which the pod is serving ex : localhost:1234
    """
    import grpc

    from jina.serve.networking import GrpcConnectionPool
    from jina.types.request.control import ControlRequest

    try:
        GrpcConnectionPool.send_request_sync(
            request=ControlRequest('STATUS'),
            target=addr,
        )
    except grpc.RpcError as e:
        print('The pod is unhealthy')
        print(e)
        raise e

    print('The pod is healthy')
Esempio n. 17
0
def test_control_message_processing():
    args = set_pod_parser().parse_args([])
    cancel_event, handle_queue, runtime_thread = _create_runtime(args)

    # no connection registered yet
    resp = GrpcConnectionPool.send_request_sync(_create_test_data_message(),
                                                f'{args.host}:{args.port}')
    assert resp.status.code == resp.status.ERROR

    _add_worker(args, 'ip1')
    # after adding a connection, sending should work
    result = GrpcConnectionPool.send_request_sync(_create_test_data_message(),
                                                  f'{args.host}:{args.port}')
    assert result

    _remove_worker(args, 'ip1')
    # after removing the connection again, sending does not work anymore
    resp = GrpcConnectionPool.send_request_sync(_create_test_data_message(),
                                                f'{args.host}:{args.port}')
    assert resp.status.code == resp.status.ERROR

    _destroy_runtime(args, cancel_event, runtime_thread)
Esempio n. 18
0
def test_message_merging():
    args = set_pod_parser().parse_args([])
    args.polling = PollingType.ALL
    cancel_event, handle_queue, runtime_thread = _create_runtime(args)

    assert handle_queue.empty()
    _add_worker(args, 'ip1', shard_id=0)
    _add_worker(args, 'ip2', shard_id=1)
    _add_worker(args, 'ip3', shard_id=2)
    assert handle_queue.empty()

    result = GrpcConnectionPool.send_request_sync(
        _create_test_data_message(), f'{args.host}:{args.port_in}')
    assert result
    assert _queue_length(handle_queue) == 3
    assert len(result.response.docs) == 1

    _destroy_runtime(args, cancel_event, runtime_thread)
Esempio n. 19
0
def test_pod_activates_replicas():
    args_list = ['--replicas', '3', '--shards', '2', '--disable-reduce']
    args = set_deployment_parser().parse_args(args_list)
    args.uses = 'AppendNameExecutor'
    with Deployment(args) as pod:
        assert pod.num_pods == 7
        response_texts = set()
        # replicas are used in a round robin fashion, so sending 3 requests should hit each one time
        for _ in range(6):
            response = GrpcConnectionPool.send_request_sync(
                _create_test_data_message(),
                f'{pod.head_args.host}:{pod.head_args.port}',
            )
            response_texts.update(response.response.docs.texts)
        assert 4 == len(response_texts)
        assert all(text in response_texts for text in ['0', '1', '2', 'client'])

    Deployment(args).start().close()
Esempio n. 20
0
def test_pod_activates_shards():
    args_list = ['--replicas', '3']
    args_list.extend(['--shards', '3'])
    args = set_deployment_parser().parse_args(args_list)
    args.uses = 'AppendShardExecutor'
    args.polling = PollingType.ALL
    with Deployment(args) as pod:
        assert pod.num_pods == 3 * 3 + 1
        response_texts = set()
        # replicas are used in a round robin fashion, so sending 3 requests should hit each one time
        response = GrpcConnectionPool.send_request_sync(
            _create_test_data_message(),
            f'{pod.head_args.host}:{pod.head_args.port_in}',
        )
        response_texts.update(response.response.docs.texts)
        assert 4 == len(response.response.docs.texts)
        assert 4 == len(response_texts)
        assert all(text in response_texts
                   for text in ['0', '1', '2', 'client'])

    Deployment(args).start().close()
Esempio n. 21
0
async def test_pods_trivial_topology(port_generator):
    worker_port = port_generator()
    head_port = port_generator()
    port = port_generator()
    graph_description = '{"start-gateway": ["pod0"], "pod0": ["end-gateway"]}'
    pod_addresses = f'{{"pod0": ["0.0.0.0:{head_port}"]}}'

    # create a single worker pod
    worker_pod = _create_worker_pod(worker_port)

    # create a single head pod
    head_pod = _create_head_pod(head_port)

    # create a single gateway pod
    gateway_pod = _create_gateway_pod(graph_description, pod_addresses, port)

    with gateway_pod, head_pod, worker_pod:
        # this would be done by the Pod, its adding the worker to the head
        head_pod.wait_start_success()
        worker_pod.wait_start_success()
        activate_msg = ControlRequest(command='ACTIVATE')
        activate_msg.add_related_entity('worker', '127.0.0.1', worker_port)
        assert GrpcConnectionPool.send_request_sync(
            activate_msg, f'127.0.0.1:{head_port}'
        )

        # send requests to the gateway
        gateway_pod.wait_start_success()
        c = Client(host='localhost', port=port, asyncio=True)
        responses = c.post(
            '/', inputs=async_inputs, request_size=1, return_responses=True
        )

        response_list = []
        async for response in responses:
            response_list.append(response)

    assert len(response_list) == 20
    assert len(response_list[0].docs) == 1
Esempio n. 22
0
def test_uses_before_uses_after():
    args = set_pod_parser().parse_args([])
    args.polling = PollingType.ALL
    args.uses_before_address = 'fake_address'
    args.uses_after_address = 'fake_address'
    connection_list_dict = {
        0: [f'ip1:8080'],
        1: [f'ip2:8080'],
        2: [f'ip3:8080']
    }
    args.connection_list = json.dumps(connection_list_dict)
    cancel_event, handle_queue, runtime_thread = _create_runtime(args)

    assert handle_queue.empty()

    result = GrpcConnectionPool.send_request_sync(_create_test_data_message(),
                                                  f'{args.host}:{args.port}')
    assert result
    assert _queue_length(
        handle_queue) == 5  # uses_before + 3 workers + uses_after
    assert len(result.response.docs) == 1

    _destroy_runtime(args, cancel_event, runtime_thread)
Esempio n. 23
0
async def test_pods_health_check(port_generator, protocol, health_check):
    worker_port = port_generator()
    head_port = port_generator()
    port = port_generator()
    graph_description = '{"start-gateway": ["pod0"], "pod0": ["end-gateway"]}'
    pod_addresses = f'{{"pod0": ["0.0.0.0:{head_port}"]}}'

    # create a single worker pod
    worker_pod = _create_worker_pod(worker_port)

    # create a single head pod
    head_pod = _create_head_pod(head_port)

    # create a single gateway pod
    gateway_pod = _create_gateway_pod(graph_description, pod_addresses, port, protocol)

    with gateway_pod, head_pod, worker_pod:
        # this would be done by the Pod, its adding the worker to the head
        head_pod.wait_start_success()
        worker_pod.wait_start_success()
        activate_msg = ControlRequest(command='ACTIVATE')
        activate_msg.add_related_entity('worker', '127.0.0.1', worker_port)
        assert GrpcConnectionPool.send_request_sync(
            activate_msg, f'127.0.0.1:{head_port}'
        )

        # send requests to the gateway
        gateway_pod.wait_start_success()

        for _port in (head_port, worker_port):
            check_health_pod(f'0.0.0.0:{_port}')

        if inspect.iscoroutinefunction(health_check):
            await health_check(f'0.0.0.0:{port}')
        else:
            health_check(f'0.0.0.0:{port}')
Esempio n. 24
0
def _remove_worker(args, ip='fake_ip', shard_id=None):
    activate_msg = ControlRequest(command='DEACTIVATE')
    activate_msg.add_related_entity('worker', ip, 8080, shard_id)
    assert GrpcConnectionPool.send_request_sync(activate_msg,
                                                f'{args.host}:{args.port}')
Esempio n. 25
0
async def test_runtimes_trivial_topology(port_generator):
    worker_port = port_generator()
    head_port = port_generator()
    port = port_generator()
    graph_description = '{"start-gateway": ["pod0"], "pod0": ["end-gateway"]}'
    pod_addresses = f'{{"pod0": ["0.0.0.0:{head_port}"]}}'

    # create a single worker runtime
    worker_process = multiprocessing.Process(target=_create_worker_runtime,
                                             args=(worker_port, ))
    worker_process.start()

    # create a single head runtime
    head_process = multiprocessing.Process(target=_create_head_runtime,
                                           args=(head_port, ))
    head_process.start()

    # create a single gateway runtime
    gateway_process = multiprocessing.Process(
        target=_create_gateway_runtime,
        args=(graph_description, pod_addresses, port),
    )
    gateway_process.start()

    await asyncio.sleep(1.0)

    AsyncNewLoopRuntime.wait_for_ready_or_shutdown(
        timeout=5.0,
        ctrl_address=f'0.0.0.0:{head_port}',
        ready_or_shutdown_event=multiprocessing.Event(),
    )

    AsyncNewLoopRuntime.wait_for_ready_or_shutdown(
        timeout=5.0,
        ctrl_address=f'0.0.0.0:{worker_port}',
        ready_or_shutdown_event=multiprocessing.Event(),
    )

    AsyncNewLoopRuntime.wait_for_ready_or_shutdown(
        timeout=5.0,
        ctrl_address=f'0.0.0.0:{port}',
        ready_or_shutdown_event=multiprocessing.Event(),
    )

    # this would be done by the Pod, its adding the worker to the head
    activate_msg = ControlRequest(command='ACTIVATE')
    activate_msg.add_related_entity('worker', '127.0.0.1', worker_port)
    GrpcConnectionPool.send_request_sync(activate_msg,
                                         f'127.0.0.1:{head_port}')

    # send requests to the gateway
    c = Client(host='localhost', port=port, asyncio=True)
    responses = c.post('/',
                       inputs=async_inputs,
                       request_size=1,
                       return_responses=True)
    response_list = []
    async for response in responses:
        response_list.append(response)

    # clean up runtimes
    gateway_process.terminate()
    head_process.terminate()
    worker_process.terminate()

    gateway_process.join()
    head_process.join()
    worker_process.join()

    assert len(response_list) == 20
    assert len(response_list[0].docs) == 1

    assert gateway_process.exitcode == 0
    assert head_process.exitcode == 0
    assert worker_process.exitcode == 0