def test_number_of_replicas(self, kube_apis, crd_ingress_controller,
                                transport_server_setup,
                                ingress_controller_prerequisites):
        """
        The load balancing of UDP should result in 4 servers to match the 4 replicas of a service.
        """
        original = scale_deployment(kube_apis.apps_v1_api, "udp-service",
                                    transport_server_setup.namespace, 4)
        wait_before_test()

        result_conf = get_ts_nginx_template_conf(
            kube_apis.v1, transport_server_setup.namespace,
            transport_server_setup.name,
            transport_server_setup.ingress_pod_name,
            ingress_controller_prerequisites.namespace)

        print(result_conf)

        pattern = 'server .*;'
        num_servers = len(re.findall(pattern, result_conf))

        assert num_servers is 4

        scale_deployment(kube_apis.apps_v1_api, "udp-service",
                         transport_server_setup.namespace, original)
        wait_before_test()
    def test_snippets(self, kube_apis, crd_ingress_controller,
                      transport_server_setup,
                      ingress_controller_prerequisites):
        """
        Test snippets are present in conf when enabled
        """
        patch_src = f"{TEST_DATA}/transport-server/transport-server-snippets.yaml"
        patch_ts(
            kube_apis.custom_objects,
            transport_server_setup.name,
            patch_src,
            transport_server_setup.namespace,
        )
        wait_before_test()

        conf = get_ts_nginx_template_conf(
            kube_apis.v1, transport_server_setup.namespace,
            transport_server_setup.name,
            transport_server_setup.ingress_pod_name,
            ingress_controller_prerequisites.namespace)
        print(conf)

        std_src = f"{TEST_DATA}/transport-server-status/standard/transport-server.yaml"
        patch_ts(
            kube_apis.custom_objects,
            transport_server_setup.name,
            std_src,
            transport_server_setup.namespace,
        )

        assert ("limit_conn_zone $binary_remote_addr zone=addr:10m;" in
                conf  # stream-snippets
                and "limit_conn addr 1;" in conf  # server-snippets
                )
    def test_number_of_replicas(self, kube_apis, crd_ingress_controller,
                                transport_server_setup,
                                ingress_controller_prerequisites):
        """
        The load balancing of TCP should result in 4 servers to match the 4 replicas of a service.
        """
        original = scale_deployment(kube_apis.v1, kube_apis.apps_v1_api,
                                    "tcp-service",
                                    transport_server_setup.namespace, 4)

        num_servers = 0
        retry = 0

        while (num_servers is not 4 and retry <= 30):
            result_conf = get_ts_nginx_template_conf(
                kube_apis.v1, transport_server_setup.namespace,
                transport_server_setup.name,
                transport_server_setup.ingress_pod_name,
                ingress_controller_prerequisites.namespace)

            pattern = 'server .*;'
            num_servers = len(re.findall(pattern, result_conf))
            retry += 1
            wait_before_test(1)
            print(f"Retry #{retry}")

        assert num_servers is 4

        scale_deployment(kube_apis.v1, kube_apis.apps_v1_api, "tcp-service",
                         transport_server_setup.namespace, original)
        retry = 0
        while (num_servers is not original and retry <= 50):
            result_conf = get_ts_nginx_template_conf(
                kube_apis.v1, transport_server_setup.namespace,
                transport_server_setup.name,
                transport_server_setup.ingress_pod_name,
                ingress_controller_prerequisites.namespace)

            pattern = 'server .*;'
            num_servers = len(re.findall(pattern, result_conf))
            retry += 1
            wait_before_test(1)
            print(f"Retry #{retry}")

        assert num_servers is original
    def test_udp_failing_healthcheck_with_match(
            self, kube_apis, crd_ingress_controller, transport_server_setup, ingress_controller_prerequisites
    ):
        """
        Configure a failing health check and check that NGINX Plus doesn't respond.
        """

        # Step 1 - configure a failing health check

        patch_src = f"{TEST_DATA}/transport-server-udp-load-balance/failing-hc-transport-server.yaml"
        patch_ts_from_yaml(
            kube_apis.custom_objects,
            transport_server_setup.name,
            patch_src,
            transport_server_setup.namespace,
        )
        wait_before_test(4)

        result_conf = get_ts_nginx_template_conf(
            kube_apis.v1,
            transport_server_setup.namespace,
            transport_server_setup.name,
            transport_server_setup.ingress_pod_name,
            ingress_controller_prerequisites.namespace
        )

        match = f"match_ts_{transport_server_setup.namespace}_transport-server_udp-app"

        assert "health_check interval=5s" in result_conf
        assert f"passes=1 jitter=0s fails=1 udp match={match}" in result_conf
        assert "health_check_timeout 3s;"
        assert 'send "health"' in result_conf
        assert 'expect  "unmatched"' in result_conf

        # Step 2 - confirm load balancing doesn't work

        port = transport_server_setup.public_endpoint.udp_server_port
        host = transport_server_setup.public_endpoint.public_ip

        host = host.strip("[]")
        client = socket.socket(ipfamily_from_host(host), socket.SOCK_DGRAM, 0)
        client.settimeout(2)
        client.sendto("ping".encode('utf-8'), (host, port))
        try:
            # client.recvfrom(4096)
            data, address = client.recvfrom(4096)
            endpoint = data.decode()
            print(f' req number  response: {endpoint}')
            # it should timeout
            pytest.fail("expected a timeout")
        except socket.timeout:
            print("successfully timed out")
        client.close()

        # Step 3 - restore

        self.restore_ts(kube_apis, transport_server_setup)
    def test_tcp_failing_healthcheck_with_match(
            self, kube_apis, crd_ingress_controller, transport_server_setup,
            ingress_controller_prerequisites):
        """
        Configure a failing health check and check that NGINX Plus resets connections.
        """

        # Step 1 - configure a failing health check

        patch_src = f"{TEST_DATA}/transport-server-tcp-load-balance/failing-hc-transport-server.yaml"
        patch_ts_from_yaml(
            kube_apis.custom_objects,
            transport_server_setup.name,
            patch_src,
            transport_server_setup.namespace,
        )
        # 4s includes 3s timeout for a health check to fail in case of a connection timeout to a backend pod
        wait_before_test(4)

        result_conf = get_ts_nginx_template_conf(
            kube_apis.v1, transport_server_setup.namespace,
            transport_server_setup.name,
            transport_server_setup.ingress_pod_name,
            ingress_controller_prerequisites.namespace)

        match = f"match_ts_{transport_server_setup.namespace}_transport-server_tcp-app"

        assert "health_check interval=5s" in result_conf
        assert f"passes=1 jitter=0s fails=1 match={match}" in result_conf
        assert "health_check_timeout 3s"
        assert 'send "health"' in result_conf
        assert 'expect  "unmatched"' in result_conf

        # Step 2 - confirm load balancing doesn't work

        port = transport_server_setup.public_endpoint.tcp_server_port
        host = transport_server_setup.public_endpoint.public_ip

        host = host.strip("[]")
        client = socket.create_connection((host, port))
        client.sendall(b'connect')

        try:
            client.recv(4096)  # must return ConnectionResetError
            client.close()
            pytest.fail(
                "We expected an error here, but didn't get it. Exiting...")
        except ConnectionResetError as ex:
            # expected error
            print(f"There was an expected exception {str(ex)}")

        # Step 3 - restore

        self.restore_ts(kube_apis, transport_server_setup)
    def test_udp_request_load_balanced(
            self, kube_apis, crd_ingress_controller, transport_server_setup, ingress_controller_prerequisites
    ):
        """
        Requests to the load balanced UDP service should result in responses from 3 different endpoints.
        """
        wait_before_test()
        port = transport_server_setup.public_endpoint.udp_server_port
        host = transport_server_setup.public_endpoint.public_ip

        print(f"sending udp requests to: {host}:{port}")

        endpoints = {}
        retry = 0
        while(len(endpoints) is not 3 and retry <= 30):
            for i in range(20):
                host = host.strip("[]")
                client = socket.socket(ipfamily_from_host(host), socket.SOCK_DGRAM, 0)
                client.sendto("ping".encode('utf-8'), (host, port))
                data, address = client.recvfrom(4096)
                endpoint = data.decode()
                print(f' req number {i}; response: {endpoint}')
                if endpoint not in endpoints:
                    endpoints[endpoint] = 1
                else:
                    endpoints[endpoint] = endpoints[endpoint] + 1
                client.close()
            retry += 1
            wait_before_test(1)
            print(f"Retry #{retry}")

        assert len(endpoints) is 3

        result_conf = get_ts_nginx_template_conf(
            kube_apis.v1,
            transport_server_setup.namespace,
            transport_server_setup.name,
            transport_server_setup.ingress_pod_name,
            ingress_controller_prerequisites.namespace
        )

        pattern = 'server .*;'
        servers = re.findall(pattern, result_conf)
        for key in endpoints.keys():
            found = False
            for server in servers:
                if chk_endpoint(key) in server:
                    found = True
            assert found
    def test_tcp_request_load_balanced(self, kube_apis, crd_ingress_controller,
                                       transport_server_setup,
                                       ingress_controller_prerequisites):
        """
        Requests to the load balanced TCP service should result in responses from 3 different endpoints.
        """
        wait_before_test()
        port = transport_server_setup.public_endpoint.tcp_server_port
        host = transport_server_setup.public_endpoint.public_ip

        print(f"sending tcp requests to: {host}:{port}")

        endpoints = {}
        retry = 0
        while (len(endpoints) is not 3 and retry <= 30):
            for i in range(20):
                host = host.strip("[]")
                client = socket.create_connection((host, port))
                client.sendall(b'connect')
                response = client.recv(4096)
                endpoint = response.decode()
                print(f' req number {i}; response: {endpoint}')
                if endpoint not in endpoints:
                    endpoints[endpoint] = 1
                else:
                    endpoints[endpoint] = endpoints[endpoint] + 1
                client.close()
            retry += 1
            wait_before_test(1)
            print(f"Retry #{retry}")

        assert len(endpoints) is 3

        result_conf = get_ts_nginx_template_conf(
            kube_apis.v1, transport_server_setup.namespace,
            transport_server_setup.name,
            transport_server_setup.ingress_pod_name,
            ingress_controller_prerequisites.namespace)

        pattern = 'server .*;'
        servers = re.findall(pattern, result_conf)
        for key in endpoints.keys():
            found = False
            for server in servers:
                if key in server:
                    found = True
            assert found
    def test_configurble_timeout_directives(
        self, kube_apis, crd_ingress_controller, transport_server_setup, ingress_controller_prerequisites
    ):
        """
        Test session and upstream configurable timeouts are present in conf
        """
        patch_src = f"{TEST_DATA}/transport-server/transport-server-configurable-timeouts.yaml"
        patch_ts_from_yaml(
            kube_apis.custom_objects,
            transport_server_setup.name,
            patch_src,
            transport_server_setup.namespace,
        )
        wait_before_test()

        conf = get_ts_nginx_template_conf(
            kube_apis.v1,
            transport_server_setup.namespace,
            transport_server_setup.name,
            transport_server_setup.ingress_pod_name,
            ingress_controller_prerequisites.namespace
        )
        print(conf)

        std_src = f"{TEST_DATA}/transport-server-status/standard/transport-server.yaml"
        patch_ts_from_yaml(
            kube_apis.custom_objects,
            transport_server_setup.name,
            std_src,
            transport_server_setup.namespace,
        )

        assert (
            "proxy_timeout 2s;" in conf # sessionParameters
        )
        assert (
            "proxy_connect_timeout 5s;" in conf # upstreamParameters
            and "proxy_next_upstream on;" in conf
            and "proxy_next_upstream_timeout 4s;" in conf
            and "proxy_next_upstream_tries 3;" in conf
        )
    def test_tcp_passing_healthcheck_with_match(
            self, kube_apis, crd_ingress_controller, transport_server_setup,
            ingress_controller_prerequisites):
        """
        Configure a passing health check and check that all backend pods return responses.
        """

        # Step 1 - configure a passing health check

        patch_src = f"{TEST_DATA}/transport-server-tcp-load-balance/passing-hc-transport-server.yaml"
        patch_ts_from_yaml(
            kube_apis.custom_objects,
            transport_server_setup.name,
            patch_src,
            transport_server_setup.namespace,
        )
        # 4s includes 3s timeout for a health check to fail in case of a connection timeout to a backend pod
        wait_before_test(4)

        result_conf = get_ts_nginx_template_conf(
            kube_apis.v1, transport_server_setup.namespace,
            transport_server_setup.name,
            transport_server_setup.ingress_pod_name,
            ingress_controller_prerequisites.namespace)

        match = f"match_ts_{transport_server_setup.namespace}_transport-server_tcp-app"

        assert "health_check interval=5s" in result_conf
        assert f"passes=1 jitter=0s fails=1 match={match}" in result_conf
        assert "health_check_timeout 3s;"
        assert 'send "health"' in result_conf
        assert 'expect  "healthy"' in result_conf

        # Step 2 - confirm load balancing works

        port = transport_server_setup.public_endpoint.tcp_server_port
        host = transport_server_setup.public_endpoint.public_ip

        endpoints = {}
        retry = 0
        while (len(endpoints) is not 3 and retry <= 30):
            for i in range(20):
                host = host.strip("[]")
                client = socket.create_connection((host, port))
                client.sendall(b'connect')
                response = client.recv(4096)
                endpoint = response.decode()
                print(f' req number {i}; response: {endpoint}')
                if endpoint not in endpoints:
                    endpoints[endpoint] = 1
                else:
                    endpoints[endpoint] = endpoints[endpoint] + 1
                client.close()
            retry += 1
            wait_before_test(1)
            print(f"Retry #{retry}")
        assert len(endpoints) is 3

        # Step 3 - restore

        self.restore_ts(kube_apis, transport_server_setup)
    def test_tcp_request_load_balanced_method(
            self, kube_apis, crd_ingress_controller, transport_server_setup,
            ingress_controller_prerequisites):
        """
        Update load balancing method to 'hash'. This send requests to a specific pod based on it's IP. In this case
        resulting in a single endpoint handling all the requests.
        """

        # Step 1 - set the load balancing method.

        patch_src = f"{TEST_DATA}/transport-server-tcp-load-balance/method-transport-server.yaml"
        patch_ts_from_yaml(
            kube_apis.custom_objects,
            transport_server_setup.name,
            patch_src,
            transport_server_setup.namespace,
        )
        wait_before_test()
        num_servers = 0
        retry = 0
        while (num_servers is not 3 and retry <= 30):
            result_conf = get_ts_nginx_template_conf(
                kube_apis.v1, transport_server_setup.namespace,
                transport_server_setup.name,
                transport_server_setup.ingress_pod_name,
                ingress_controller_prerequisites.namespace)

            pattern = 'server .*;'
            num_servers = len(re.findall(pattern, result_conf))
            retry += 1
            wait_before_test(1)
            print(f"Retry #{retry}")

        assert num_servers is 3

        # Step 2 - confirm all request go to the same endpoint.

        port = transport_server_setup.public_endpoint.tcp_server_port
        host = transport_server_setup.public_endpoint.public_ip
        endpoints = {}
        retry = 0
        while (len(endpoints) is not 1 and retry <= 30):
            for i in range(20):
                host = host.strip("[]")
                client = socket.create_connection((host, port))
                client.sendall(b'connect')
                response = client.recv(4096)
                endpoint = response.decode()
                print(f' req number {i}; response: {endpoint}')
                if endpoint not in endpoints:
                    endpoints[endpoint] = 1
                else:
                    endpoints[endpoint] = endpoints[endpoint] + 1
                client.close()
            retry += 1
            wait_before_test(1)
            print(f"Retry #{retry}")

        assert len(endpoints) is 1

        # Step 3 - restore to default load balancing method and confirm requests are balanced.

        self.restore_ts(kube_apis, transport_server_setup)
        wait_before_test()

        endpoints = {}
        retry = 0
        while (len(endpoints) is not 3 and retry <= 30):
            for i in range(20):
                host = host.strip("[]")
                client = socket.create_connection((host, port))
                client.sendall(b'connect')
                response = client.recv(4096)
                endpoint = response.decode()
                print(f' req number {i}; response: {endpoint}')
                if endpoint not in endpoints:
                    endpoints[endpoint] = 1
                else:
                    endpoints[endpoint] = endpoints[endpoint] + 1
                client.close()
            retry += 1
            wait_before_test(1)
            print(f"Retry #{retry}")

        assert len(endpoints) is 3
    def test_tcp_request_max_connections(self, kube_apis,
                                         crd_ingress_controller,
                                         transport_server_setup,
                                         ingress_controller_prerequisites):
        """
        The config, maxConns, should limit the number of open TCP connections.
        3 replicas of max 2 connections is 6, so making the 7th connection will fail.
        """

        # step 1 - set max connections to 2 with 1 replica
        patch_src = f"{TEST_DATA}/transport-server-tcp-load-balance/max-connections-transport-server.yaml"
        patch_ts_from_yaml(
            kube_apis.custom_objects,
            transport_server_setup.name,
            patch_src,
            transport_server_setup.namespace,
        )
        wait_before_test()
        configs = 0
        retry = 0
        while (configs is not 3 and retry <= 30):
            result_conf = get_ts_nginx_template_conf(
                kube_apis.v1, transport_server_setup.namespace,
                transport_server_setup.name,
                transport_server_setup.ingress_pod_name,
                ingress_controller_prerequisites.namespace)

            pattern = 'max_conns=2'
            configs = len(re.findall(pattern, result_conf))
            retry += 1
            wait_before_test(1)
            print(f"Retry #{retry}")

        assert configs is 3

        # step 2 - make the number of allowed connections
        port = transport_server_setup.public_endpoint.tcp_server_port
        host = transport_server_setup.public_endpoint.public_ip

        clients = []
        for i in range(6):
            c = self.make_holding_connection(host, port)
            clients.append(c)

        # step 3 - assert the next connection fails
        try:
            c = self.make_holding_connection(host, port)
            # making a connection should fail and throw an exception
            assert c is None
        except ConnectionResetError as E:
            print("The expected exception occurred:", E)

        for c in clients:
            c.close()

        # step 4 - revert to config with no max connections
        patch_src = f"{TEST_DATA}/transport-server-tcp-load-balance/standard/transport-server.yaml"
        patch_ts_from_yaml(
            kube_apis.custom_objects,
            transport_server_setup.name,
            patch_src,
            transport_server_setup.namespace,
        )
        wait_before_test()

        # step 5 - confirm making lots of connections doesn't cause an error
        clients = []
        for i in range(24):
            c = self.make_holding_connection(host, port)
            clients.append(c)

        for c in clients:
            c.close()
    def test_udp_passing_healthcheck_with_match(
            self, kube_apis, crd_ingress_controller, transport_server_setup, ingress_controller_prerequisites
    ):
        """
        Configure a passing health check and check that all backend pods return responses.
        """

        # Step 1 - configure a passing health check

        patch_src = f"{TEST_DATA}/transport-server-udp-load-balance/passing-hc-transport-server.yaml"
        patch_ts_from_yaml(
            kube_apis.custom_objects,
            transport_server_setup.name,
            patch_src,
            transport_server_setup.namespace,
        )
        # 4s includes 3s timeout for a health check to fail in case a backend pod doesn't respond or responds with
        # an unexpected response
        wait_before_test(4)

        result_conf = get_ts_nginx_template_conf(
            kube_apis.v1,
            transport_server_setup.namespace,
            transport_server_setup.name,
            transport_server_setup.ingress_pod_name,
            ingress_controller_prerequisites.namespace
        )

        match = f"match_ts_{transport_server_setup.namespace}_transport-server_udp-app"

        assert "health_check interval=5s" in result_conf
        assert f"passes=1 jitter=0s fails=1 udp match={match}" in result_conf
        assert "health_check_timeout 3s;"
        assert 'send "health"' in result_conf
        assert 'expect  "healthy"' in result_conf

        # Step 2 - confirm load balancing works

        port = transport_server_setup.public_endpoint.udp_server_port
        host = transport_server_setup.public_endpoint.public_ip

        print(f"sending udp requests to: {host}:{port}")

        retry = 0
        endpoints = {}
        while(len(endpoints) is not 3 and retry <= 30):
            for i in range(20):
                host = host.strip("[]")
                client = socket.socket(ipfamily_from_host(host), socket.SOCK_DGRAM, 0)
                client.sendto("ping".encode('utf-8'), (host, port))
                data, address = client.recvfrom(4096)
                endpoint = data.decode()
                print(f' req number {i}; response: {endpoint}')
                if endpoint not in endpoints:
                    endpoints[endpoint] = 1
                else:
                    endpoints[endpoint] = endpoints[endpoint] + 1
                client.close()
            retry += 1
            wait_before_test(1)
            print(f"Retry #{retry}")

        assert len(endpoints) is 3

        # Step 3 - restore

        self.restore_ts(kube_apis, transport_server_setup)