Exemplo n.º 1
0
def get_port_forward_contextmanager(
    namespace: str,
    port_expose: int,
    config_path: str = None,
) -> Generator[None, None, None]:
    """Forward local requests to the gateway which is running in the Kubernetes cluster.
    :param namespace: namespace of the gateway
    :param port_expose: exposed port of the gateway
    :param config_path: path to the Kubernetes config file
    :return: context manager which sets up and terminates the port-forward
    """
    with ImportExtensions(
            required=True,
            help_text=
            'Sending requests to the Kubernetes cluster requires to install the portforward package. '
            'Please do `pip install "jina[portforward]"`'
            'Also make sure golang is installed `https://golang.org/`',
    ):
        import portforward

    clients = K8sClients()
    gateway_pod_name = _get_gateway_pod_name(namespace, k8s_clients=clients)
    if config_path is None and 'KUBECONFIG' in os.environ:
        config_path = os.environ['KUBECONFIG']
    return portforward.forward(namespace, gateway_pod_name, port_expose,
                               port_expose, config_path)
Exemplo n.º 2
0
async def run_test_until_event(flow,
                               core_client,
                               namespace,
                               endpoint,
                               stop_event,
                               logger,
                               sleep_time=0.05):
    # start port forwarding
    from jina.clients import Client

    gateway_pod_name = (core_client.list_namespaced_pod(
        namespace=namespace,
        label_selector='app=gateway').items[0].metadata.name)
    config_path = os.environ['KUBECONFIG']
    import portforward

    with portforward.forward(namespace, gateway_pod_name, flow.port, flow.port,
                             config_path):
        client_kwargs = dict(
            host='localhost',
            port=flow.port,
            return_responses=True,
            asyncio=True,
        )
        client_kwargs.update(flow._common_kwargs)

        client = Client(**client_kwargs)
        client.show_progress = True

        async def async_inputs(sent_ids: Set[int], sleep_time: float = 0.05):
            i = 0
            while True:
                sent_ids.add(i)
                yield Document(text=f'{i}')
                if stop_event.is_set():
                    logger.info(
                        f'stop yielding new requests after {i} requests')
                    return
                elif sleep_time:
                    await asyncio.sleep(sleep_time)
                i += 1

        responses = []
        sent_ids = set()
        async for resp in client.post(
                endpoint,
                inputs=functools.partial(async_inputs, sent_ids, sleep_time),
                request_size=1,
        ):
            responses.append(resp)

    logger.info(
        f'Client sent {len(sent_ids)} and received {(len(responses))} responses'
    )
    return responses, sent_ids
Exemplo n.º 3
0
async def test_flow_with_monitoring(logger, tmpdir, docker_images,
                                    port_generator):
    dump_path = os.path.join(str(tmpdir), 'test-flow-with-monitoring')
    namespace = f'test-flow-monitoring'.lower()

    port1 = port_generator()
    port2 = port_generator()
    flow = Flow(name='test-flow-monitoring',
                monitoring=True,
                port_monitoring=port1).add(
                    name='segmenter',
                    uses=f'docker://{docker_images[0]}',
                    monitoring=True,
                    port_monitoring=port2,
                )

    flow.to_k8s_yaml(dump_path, k8s_namespace=namespace)

    from kubernetes import client

    api_client = client.ApiClient()
    core_client = client.CoreV1Api(api_client=api_client)
    app_client = client.AppsV1Api(api_client=api_client)
    await create_all_flow_deployments_and_wait_ready(
        dump_path,
        namespace=namespace,
        api_client=api_client,
        app_client=app_client,
        core_client=core_client,
        deployment_replicas_expected={
            'gateway': 1,
            'segmenter': 1,
        },
        logger=logger,
    )
    import portforward

    config_path = os.environ['KUBECONFIG']
    gateway_pod_name = (core_client.list_namespaced_pod(
        namespace=namespace,
        label_selector='app=gateway').items[0].metadata.name)

    pod_port_ref = [(gateway_pod_name, port1)]

    for (pod_name, port) in pod_port_ref:
        with portforward.forward(namespace, pod_name, port, port, config_path):
            resp = req.get(f'http://localhost:{port}/')
            assert resp.status_code == 200

    core_client.delete_namespace(namespace)
Exemplo n.º 4
0
async def run_test(flow,
                   core_client,
                   namespace,
                   endpoint,
                   n_docs=10,
                   request_size=100):
    # start port forwarding
    from jina.clients import Client

    gateway_pod_name = (core_client.list_namespaced_pod(
        namespace=namespace,
        label_selector='app=gateway').items[0].metadata.name)
    config_path = os.environ['KUBECONFIG']
    import portforward

    with portforward.forward(namespace, gateway_pod_name, flow.port, flow.port,
                             config_path):
        client_kwargs = dict(
            host='localhost',
            port=flow.port,
            return_responses=True,
            asyncio=True,
        )
        client_kwargs.update(flow._common_kwargs)

        client = Client(**client_kwargs)
        client.show_progress = True
        responses = []
        async for resp in client.post(
                endpoint,
                inputs=[Document() for _ in range(n_docs)],
                request_size=request_size,
        ):
            responses.append(resp)

    return responses
async def test_linear_processing_time_scaling(docker_images, logger, tmpdir):
    flow = Flow(name='test-flow-slow-process-executor', ).add(
        name='slow_process_executor',
        uses=f'docker://{docker_images[0]}',
        replicas=3,
    )
    dump_path = os.path.join(str(tmpdir), 'test_flow_k8s')
    namespace = 'test-flow-slow-process-executor-ns-3'
    flow.to_k8s_yaml(dump_path, k8s_namespace=namespace)
    from kubernetes import client

    api_client = client.ApiClient()
    core_client = client.CoreV1Api(api_client=api_client)
    app_client = client.AppsV1Api(api_client=api_client)
    await create_all_flow_deployments_and_wait_ready(
        dump_path,
        namespace=namespace,
        api_client=api_client,
        app_client=app_client,
        core_client=core_client,
    )

    # start port forwarding
    logger.debug(f' Start port forwarding')
    gateway_pod_name = (core_client.list_namespaced_pod(
        namespace=namespace,
        label_selector='app=gateway').items[0].metadata.name)
    config_path = os.environ['KUBECONFIG']
    import portforward

    with portforward.forward(namespace, gateway_pod_name, flow.port, flow.port,
                             config_path):
        time.sleep(0.1)
        client_kwargs = dict(
            host='localhost',
            port=flow.port,
        )
        client_kwargs.update(flow._common_kwargs)

        stop_event = multiprocessing.Event()
        scale_event = multiprocessing.Event()
        received_responses = multiprocessing.Queue()
        response_arrival_times = multiprocessing.Queue()
        process = multiprocessing.Process(
            target=send_requests,
            kwargs={
                'client_kwargs': client_kwargs,
                'stop_event': stop_event,
                'scale_event': scale_event,
                'received_responses': received_responses,
                'response_arrival_times': response_arrival_times,
                'logger': logger,
            },
        )

        process.start()
        process.join()
        import numpy as np

        response_times = []
        while not response_arrival_times.empty():
            response_times.append(response_arrival_times.get())
        mean_response_time = np.mean(response_times)
        logger.debug(
            f'Mean time between responses is {mean_response_time}, expected is 1/3 second'
        )
        assert mean_response_time < 0.4

        responses_list = []
        while not received_responses.empty():
            responses_list.append(int(received_responses.get()))

        logger.debug(f'Got the following responses {sorted(responses_list)}')
        assert sorted(responses_list) == list(
            range(min(responses_list),
                  max(responses_list) + 1))
async def test_no_message_lost_during_kill(logger, docker_images, tmpdir):
    flow = Flow(name='test-flow-slow-process-executor', ).add(
        name='slow_process_executor',
        uses=f'docker://{docker_images[0]}',
        replicas=3,
    )
    dump_path = os.path.join(str(tmpdir), 'test_flow_k8s')
    namespace = 'test-flow-slow-process-executor-ns-2'
    flow.to_k8s_yaml(dump_path, k8s_namespace=namespace)
    from kubernetes import client

    api_client = client.ApiClient()
    core_client = client.CoreV1Api(api_client=api_client)
    app_client = client.AppsV1Api(api_client=api_client)
    await create_all_flow_deployments_and_wait_ready(
        dump_path,
        namespace=namespace,
        api_client=api_client,
        app_client=app_client,
        core_client=core_client,
    )

    # start port forwarding
    logger.debug(f' Start port forwarding')
    gateway_pod_name = (core_client.list_namespaced_pod(
        namespace=namespace,
        label_selector='app=gateway').items[0].metadata.name)
    config_path = os.environ['KUBECONFIG']
    import portforward

    with portforward.forward(namespace, gateway_pod_name, flow.port, flow.port,
                             config_path):
        # send requests and validate
        time.sleep(0.1)
        client_kwargs = dict(
            host='localhost',
            port=flow.port,
        )
        client_kwargs.update(flow._common_kwargs)

        stop_event = multiprocessing.Event()
        scale_event = multiprocessing.Event()
        received_responses = multiprocessing.Queue()
        response_arrival_times = multiprocessing.Queue()
        process = multiprocessing.Process(
            target=send_requests,
            kwargs={
                'client_kwargs': client_kwargs,
                'stop_event': stop_event,
                'scale_event': scale_event,
                'received_responses': received_responses,
                'response_arrival_times': response_arrival_times,
                'logger': logger,
            },
            daemon=True,
        )
        process.start()
        time.sleep(1.0)
        logger.debug('Kill 2 replicas')

        pods = core_client.list_namespaced_pod(
            namespace=namespace,
            label_selector=f'app=slow-process-executor',
        )

        names = [item.metadata.name for item in pods.items]
        core_client.delete_namespaced_pod(names[0], namespace=namespace)
        core_client.delete_namespaced_pod(names[1], namespace=namespace)

        scale_event.set()
        # wait for replicas to be dead
        while True:
            pods = core_client.list_namespaced_pod(
                namespace=namespace,
                label_selector=f'app=slow-process-executor',
            )
            current_pod_names = [item.metadata.name for item in pods.items]
            if names[0] not in current_pod_names and names[
                    1] not in current_pod_names:
                logger.debug('Killing pods complete')
                time.sleep(1.0)
                stop_event.set()
                break
            else:
                logger.debug(
                    f'not dead yet {current_pod_names} waiting for {names[0]} and {names[1]}'
                )
            time.sleep(1.0)

        process.join()

        responses_list = []
        while not received_responses.empty():
            responses_list.append(int(received_responses.get()))

        logger.debug(f'Got the following responses {sorted(responses_list)}')
        assert sorted(responses_list) == list(
            range(min(responses_list),
                  max(responses_list) + 1))
async def test_no_message_lost_during_scaling(logger, docker_images, tmpdir):
    flow = Flow(name='test-flow-slow-process-executor', ).add(
        name='slow_process_executor',
        uses=f'docker://{docker_images[0]}',
        replicas=3,
    )

    dump_path = os.path.join(str(tmpdir), 'test_flow_k8s')
    namespace = 'test-flow-slow-process-executor-ns'
    flow.to_k8s_yaml(dump_path, k8s_namespace=namespace)
    from kubernetes import client

    api_client = client.ApiClient()
    core_client = client.CoreV1Api(api_client=api_client)
    app_client = client.AppsV1Api(api_client=api_client)
    await create_all_flow_deployments_and_wait_ready(
        dump_path,
        namespace=namespace,
        api_client=api_client,
        app_client=app_client,
        core_client=core_client,
    )

    # start port forwarding
    gateway_pod_name = (core_client.list_namespaced_pod(
        namespace=namespace,
        label_selector='app=gateway').items[0].metadata.name)
    config_path = os.environ['KUBECONFIG']
    import portforward

    with portforward.forward(namespace, gateway_pod_name, flow.port, flow.port,
                             config_path):
        # send requests and validate
        time.sleep(0.1)
        client_kwargs = dict(
            return_responses=True,
            host='localhost',
            port=flow.port,
        )
        client_kwargs.update(flow._common_kwargs)

        stop_event = multiprocessing.Event()
        scale_event = multiprocessing.Event()
        received_responses = multiprocessing.Queue()
        response_arrival_times = multiprocessing.Queue()
        process = multiprocessing.Process(
            target=send_requests,
            kwargs={
                'client_kwargs': client_kwargs,
                'stop_event': stop_event,
                'scale_event': scale_event,
                'received_responses': received_responses,
                'response_arrival_times': response_arrival_times,
                'logger': logger,
            },
            daemon=True,
        )
        process.start()
        time.sleep(1.0)
        logger.debug('Scale down executor to 1 replica')
        app_client.patch_namespaced_deployment_scale(
            'slow-process-executor',
            namespace=namespace,
            body={'spec': {
                'replicas': 1
            }},
        )
        scale_event.set()
        # wait for replicas to be dead
        while True:
            pods = core_client.list_namespaced_pod(
                namespace=namespace,
                label_selector=f'app=slow-process-executor',
            )
            if len(pods.items) == 1:
                # still continue for a bit to hit the new replica only
                logger.debug('Scale down complete')
                time.sleep(1.0)
                stop_event.set()
                break
            await asyncio.sleep(1.0)
        await asyncio.sleep(10.0)
        # kill the process as the client can hang due to lost responsed
        if process.is_alive():
            process.kill()
        process.join()

        responses_list = []
        while not received_responses.empty():
            responses_list.append(int(received_responses.get()))

        logger.debug(f'Got the following responses {sorted(responses_list)}')
        assert sorted(responses_list) == list(
            range(min(responses_list),
                  max(responses_list) + 1))