def test_number_of_replicas(self, kube_apis, crd_ingress_controller, transport_server_setup, ingress_controller_prerequisites): """ The load balancing of UDP should result in 4 servers to match the 4 replicas of a service. """ original = scale_deployment(kube_apis.apps_v1_api, "udp-service", transport_server_setup.namespace, 4) wait_before_test() result_conf = get_ts_nginx_template_conf( kube_apis.v1, transport_server_setup.namespace, transport_server_setup.name, transport_server_setup.ingress_pod_name, ingress_controller_prerequisites.namespace) print(result_conf) pattern = 'server .*;' num_servers = len(re.findall(pattern, result_conf)) assert num_servers is 4 scale_deployment(kube_apis.apps_v1_api, "udp-service", transport_server_setup.namespace, original) wait_before_test()
def test_startup_time(self, request, kube_apis, ingress_controller_prerequisites, crd_ingress_controller, ingress_controller_endpoint, vs_vsr_setup): """ Pod startup time with 1 VS and multiple VSRs. """ total_vsr = int(request.config.getoption("--batch-resources")) ic_ns = ingress_controller_prerequisites.namespace scale_deployment(kube_apis.v1, kube_apis.apps_v1_api, "nginx-ingress", ic_ns, 0) while get_pods_amount(kube_apis.v1, ic_ns) is not 0: print(f"Number of replicas not 0, retrying...") wait_before_test() num = scale_deployment(kube_apis.v1, kube_apis.apps_v1_api, "nginx-ingress", ic_ns, 1) metrics_url = f"http://{ingress_controller_endpoint.public_ip}:{ingress_controller_endpoint.metrics_port}/metrics" assert (get_total_vs(metrics_url, "nginx") == "1" and get_total_vsr(metrics_url, "nginx") == str(total_vsr) and get_last_reload_status(metrics_url, "nginx") == "1") assert num is None
def test_grpc_error_intercept(self, kube_apis, ingress_controller_prerequisites, crd_ingress_controller, backend_setup, virtual_server_setup): cert = get_certificate(virtual_server_setup.public_endpoint.public_ip, virtual_server_setup.vs_host, virtual_server_setup.public_endpoint.port_ssl) target = f'{virtual_server_setup.public_endpoint.public_ip}:{virtual_server_setup.public_endpoint.port_ssl}' credentials = grpc.ssl_channel_credentials(root_certificates=cert.encode()) options = (('grpc.ssl_target_name_override', virtual_server_setup.vs_host),) with grpc.secure_channel(target, credentials, options) as channel: stub = GreeterStub(channel) response = "" try: response = stub.SayHello(HelloRequest(name=virtual_server_setup.public_endpoint.public_ip)) valid_message = "Hello {}".format(virtual_server_setup.public_endpoint.public_ip) # no status has been returned in the response assert valid_message in response.message except grpc.RpcError as e: print(e.details()) pytest.fail("RPC error was not expected during call, exiting...") # Assert grpc_status is in the logs. The gRPC response in a successful call is 0. ic_pod_name = get_first_pod_name(kube_apis.v1, ingress_controller_prerequisites.namespace) log_contents = kube_apis.v1.read_namespaced_pod_log(ic_pod_name, ingress_controller_prerequisites.namespace) retry = 0 while '"POST /helloworld.Greeter/SayHello HTTP/2.0" 200 0' not in log_contents and retry <= 60: log_contents = kube_apis.v1.read_namespaced_pod_log( ic_pod_name, ingress_controller_prerequisites.namespace) retry += 1 wait_before_test(1) print(f"Logs not yet updated, retrying... #{retry}") assert '"POST /helloworld.Greeter/SayHello HTTP/2.0" 200 0' in log_contents scale_deployment(kube_apis.v1, kube_apis.apps_v1_api, "grpc1", virtual_server_setup.namespace, 0) scale_deployment(kube_apis.v1, kube_apis.apps_v1_api, "grpc2", virtual_server_setup.namespace, 0) wait_before_test() with grpc.secure_channel(target, credentials, options) as channel: stub = GreeterStub(channel) try: response = stub.SayHello(HelloRequest(name=virtual_server_setup.public_endpoint.public_ip)) # assert the grpc status has been returned in the header assert response.status == 14 pytest.fail("RPC error was expected during call, exiting...") except grpc.RpcError as e: print(e) # Assert the grpc_status is also in the logs. ic_pod_name = get_first_pod_name(kube_apis.v1, ingress_controller_prerequisites.namespace) wait_before_test() # Need to get full log because of a race condition on the last log entry. log_contents = kube_apis.v1.read_namespaced_pod_log(ic_pod_name, ingress_controller_prerequisites.namespace) retry = 0 while '"POST /helloworld.Greeter/SayHello HTTP/2.0" 204 14' not in log_contents and retry <= 60: log_contents = kube_apis.v1.read_namespaced_pod_log( ic_pod_name, ingress_controller_prerequisites.namespace) retry += 1 wait_before_test(1) print(f"Logs not yet updated, retrying... #{retry}")
def test_ap_ingress_batch_start( self, request, kube_apis, crd_ingress_controller_with_ap, ap_ingress_setup, ingress_controller_prerequisites, test_namespace, ): """ Pod startup time with AP Ingress """ print( "------------- Run test for AP policy: dataguard-alarm --------------" ) print( f"Request URL: {ap_ingress_setup.req_url} and Host: {ap_ingress_setup.ingress_host}" ) ensure_response_from_backend(ap_ingress_setup.req_url, ap_ingress_setup.ingress_host, check404=True) total_ing = int(request.config.getoption("--batch-resources")) manifest = f"{TEST_DATA}/appprotect/appprotect-ingress.yaml" for i in range(1, total_ing + 1): with open(manifest) as f: doc = yaml.safe_load(f) doc["metadata"]["name"] = f"appprotect-ingress-{i}" doc["spec"]["rules"][0]["host"] = f"appprotect-{i}.example.com" create_ingress(kube_apis.networking_v1, test_namespace, doc) print(f"Total resources deployed is {total_ing}") wait_before_test() ic_ns = ingress_controller_prerequisites.namespace scale_deployment(kube_apis.v1, kube_apis.apps_v1_api, "nginx-ingress", ic_ns, 0) while get_pods_amount(kube_apis.v1, ic_ns) is not 0: print(f"Number of replicas not 0, retrying...") wait_before_test() num = scale_deployment(kube_apis.v1, kube_apis.apps_v1_api, "nginx-ingress", ic_ns, 1) assert (get_total_ingresses(ap_ingress_setup.metrics_url, "nginx") == str(total_ing + 1) and get_last_reload_status( ap_ingress_setup.metrics_url, "nginx") == "1") for i in range(1, total_ing + 1): delete_ingress(kube_apis.networking_v1, f"appprotect-ingress-{i}", test_namespace) assert num is None
def test_vs_batch_start( self, request, kube_apis, ingress_controller_prerequisites, crd_ingress_controller, virtual_server_setup, test_namespace, ): """ Pod startup time with simple VS """ resp = requests.get(virtual_server_setup.backend_1_url, headers={"host": virtual_server_setup.vs_host}) assert resp.status_code is 200 total_vs = int(request.config.getoption("--batch-resources")) manifest = f"{TEST_DATA}/virtual-server/standard/virtual-server.yaml" for i in range(1, total_vs + 1): with open(manifest) as f: doc = yaml.safe_load(f) doc["metadata"]["name"] = f"virtual-server-{i}" doc["spec"]["host"] = f"virtual-server-{i}.example.com" kube_apis.custom_objects.create_namespaced_custom_object( "k8s.nginx.org", "v1", test_namespace, "virtualservers", doc) print( f"VirtualServer created with name '{doc['metadata']['name']}'" ) print(f"Total resources deployed is {total_vs}") wait_before_test() ic_ns = ingress_controller_prerequisites.namespace scale_deployment(kube_apis.v1, kube_apis.apps_v1_api, "nginx-ingress", ic_ns, 0) while get_pods_amount(kube_apis.v1, ic_ns) is not 0: print(f"Number of replicas not 0, retrying...") wait_before_test() num = scale_deployment(kube_apis.v1, kube_apis.apps_v1_api, "nginx-ingress", ic_ns, 1) assert (get_total_vs(virtual_server_setup.metrics_url, "nginx") == str(total_vs + 1) and get_last_reload_status( virtual_server_setup.metrics_url, "nginx") == "1") for i in range(1, total_vs + 1): delete_virtual_server(kube_apis.custom_objects, f"virtual-server-{i}", test_namespace) assert num is None
def test_reload_count_after_start(self, kube_apis, smoke_setup, ingress_controller_prerequisites): ns = ingress_controller_prerequisites.namespace scale_deployment(kube_apis.v1, kube_apis.apps_v1_api, "nginx-ingress", ns, 0) while get_pods_amount(kube_apis.v1, ns) is not 0: print(f"Number of replicas not 0, retrying...") wait_before_test() num = scale_deployment(kube_apis.v1, kube_apis.apps_v1_api, "nginx-ingress", ns, 1) assert num is None metrics_url = f"http://{smoke_setup.public_endpoint.public_ip}:{smoke_setup.public_endpoint.metrics_port}/metrics" count = get_reload_count(metrics_url) assert count == 1
def test_grpc_error_intercept(self, kube_apis, ingress_controller_prerequisites, crd_ingress_controller, backend_setup, virtual_server_setup): cert = get_certificate(virtual_server_setup.public_endpoint.public_ip, virtual_server_setup.vs_host, virtual_server_setup.public_endpoint.port_ssl) target = f'{virtual_server_setup.public_endpoint.public_ip}:{virtual_server_setup.public_endpoint.port_ssl}' credentials = grpc.ssl_channel_credentials( root_certificates=cert.encode()) options = (('grpc.ssl_target_name_override', virtual_server_setup.vs_host), ) with grpc.secure_channel(target, credentials, options) as channel: stub = GreeterStub(channel) response = "" try: response = stub.SayHello( HelloRequest( name=virtual_server_setup.public_endpoint.public_ip)) valid_message = "Hello {}".format( virtual_server_setup.public_endpoint.public_ip) # no status has been returned in the response assert valid_message in response.message except grpc.RpcError as e: print(e.details()) pytest.fail( "RPC error was not expected during call, exiting...") scale_deployment(kube_apis.v1, kube_apis.apps_v1_api, "grpc1", virtual_server_setup.namespace, 0) scale_deployment(kube_apis.v1, kube_apis.apps_v1_api, "grpc2", virtual_server_setup.namespace, 0) time.sleep(1) with grpc.secure_channel(target, credentials, options) as channel: stub = GreeterStub(channel) try: response = stub.SayHello( HelloRequest( name=virtual_server_setup.public_endpoint.public_ip)) # assert the grpc status has been returned in the header assert response.status == 14 pytest.fail("RPC error was expected during call, exiting...") except grpc.RpcError as e: print(e)
def test_ap_pod_startup( self, request, kube_apis, ingress_controller_prerequisites, crd_ingress_controller_with_ap, appprotect_setup, test_namespace, ): """ Log pod startup time while scaling up from 0 to 1 """ src_syslog_yaml = f"{TEST_DATA}/appprotect/syslog.yaml" create_items_from_yaml(kube_apis, src_syslog_yaml, test_namespace) syslog_ep = get_service_endpoint(kube_apis, "syslog-svc", test_namespace) # items[-1] because syslog pod is last one to spin-up syslog_pod = kube_apis.v1.list_namespaced_pod( test_namespace).items[-1].metadata.name create_ingress_with_ap_annotations(kube_apis, src_ing_yaml, test_namespace, ap_policy, "True", "True", f"{syslog_ep}:514") ingress_host = get_first_ingress_host_from_yaml(src_ing_yaml) print( "--------- AppProtect module is enabled with correct policy ---------" ) ensure_response_from_backend(appprotect_setup.req_url, ingress_host, check404=True) ns = ingress_controller_prerequisites.namespace scale_deployment(kube_apis.v1, kube_apis.apps_v1_api, "nginx-ingress", ns, 0) while get_pods_amount(kube_apis.v1, ns) is not 0: print(f"Number of replicas not 0, retrying...") wait_before_test() num = scale_deployment(kube_apis.v1, kube_apis.apps_v1_api, "nginx-ingress", ns, 1) delete_items_from_yaml(kube_apis, src_ing_yaml, test_namespace) delete_items_from_yaml(kube_apis, src_syslog_yaml, test_namespace) assert num is None
def test_number_of_replicas(self, kube_apis, crd_ingress_controller, transport_server_setup, ingress_controller_prerequisites): """ The load balancing of TCP should result in 4 servers to match the 4 replicas of a service. """ original = scale_deployment(kube_apis.v1, kube_apis.apps_v1_api, "tcp-service", transport_server_setup.namespace, 4) num_servers = 0 retry = 0 while (num_servers is not 4 and retry <= 30): result_conf = get_ts_nginx_template_conf( kube_apis.v1, transport_server_setup.namespace, transport_server_setup.name, transport_server_setup.ingress_pod_name, ingress_controller_prerequisites.namespace) pattern = 'server .*;' num_servers = len(re.findall(pattern, result_conf)) retry += 1 wait_before_test(1) print(f"Retry #{retry}") assert num_servers is 4 scale_deployment(kube_apis.v1, kube_apis.apps_v1_api, "tcp-service", transport_server_setup.namespace, original) retry = 0 while (num_servers is not original and retry <= 50): result_conf = get_ts_nginx_template_conf( kube_apis.v1, transport_server_setup.namespace, transport_server_setup.name, transport_server_setup.ingress_pod_name, ingress_controller_prerequisites.namespace) pattern = 'server .*;' num_servers = len(re.findall(pattern, result_conf)) retry += 1 wait_before_test(1) print(f"Retry #{retry}") assert num_servers is original
def test_ap_pod_startup( self, request, kube_apis, ingress_controller_prerequisites, crd_ingress_controller_with_ap, appprotect_setup, test_namespace, ): """ Log pod startup time while scaling up from 0 to 1 """ syslog_dst = f"syslog-svc.{test_namespace}" create_ingress_with_ap_annotations(kube_apis, src_ing_yaml, test_namespace, ap_policy, "True", "True", f"{syslog_dst}:514") ingress_host = get_first_ingress_host_from_yaml(src_ing_yaml) print( "--------- AppProtect module is enabled with correct policy ---------" ) ensure_response_from_backend(appprotect_setup.req_url, ingress_host, check404=True) ns = ingress_controller_prerequisites.namespace scale_deployment(kube_apis.v1, kube_apis.apps_v1_api, "nginx-ingress", ns, 0) while get_pods_amount(kube_apis.v1, ns) is not 0: print(f"Number of replicas not 0, retrying...") wait_before_test() num = scale_deployment(kube_apis.v1, kube_apis.apps_v1_api, "nginx-ingress", ns, 1) delete_items_from_yaml(kube_apis, src_ing_yaml, test_namespace) assert num is None
def test_dynamic_configuration(self, kube_apis, ingress_controller_endpoint, crd_ingress_controller, virtual_server_setup): req_url = f"http://{ingress_controller_endpoint.public_ip}:{ingress_controller_endpoint.api_port}" vs_upstream = f"vs_{virtual_server_setup.namespace}_{virtual_server_setup.vs_name}_backend2" initial_reloads_count = get_nginx_generation_value(req_url) upstream_servers_url = f"{req_url}/api/{NGINX_API_VERSION}/http/upstreams/{vs_upstream}/servers" print("Scale BE deployment") scale_deployment(kube_apis.v1, kube_apis.apps_v1_api, "backend2", virtual_server_setup.namespace, 0) wait_for_empty_array(upstream_servers_url) scale_deployment(kube_apis.v1, kube_apis.apps_v1_api, "backend2", virtual_server_setup.namespace, 1) wait_for_non_empty_array(upstream_servers_url) print("Run checks:") resp = json.loads(requests.get(upstream_servers_url).text) new_reloads_count = get_nginx_generation_value(req_url) assert new_reloads_count == initial_reloads_count, "Expected: no new reloads" assert resp[0]['max_conns'] == 32 assert resp[0]['max_fails'] == 25 assert resp[0]['fail_timeout'] == '15s' assert resp[0]['slow_start'] == '10s'
def test_nginx_plus_api_response(self, kube_apis, ingress_controller_endpoint, crd_ingress_controller, v_s_route_setup, v_s_route_app_setup): req_url = f"http://{ingress_controller_endpoint.public_ip}:{ingress_controller_endpoint.api_port}" vsr_s_upstream = f"vs_{v_s_route_setup.namespace}_{v_s_route_setup.vs_name}_" \ f"vsr_{v_s_route_setup.route_s.namespace}_{v_s_route_setup.route_s.name}_backend2" vsr_m_upstream = f"vs_{v_s_route_setup.namespace}_{v_s_route_setup.vs_name}_" \ f"vsr_{v_s_route_setup.route_m.namespace}_{v_s_route_setup.route_m.name}_backend1" initial_reloads_count = get_nginx_generation_value(req_url) upstream_servers_s_url = f"{req_url}/api/{NGINX_API_VERSION}/http/upstreams/{vsr_s_upstream}/servers" upstream_servers_m_url = f"{req_url}/api/{NGINX_API_VERSION}/http/upstreams/{vsr_m_upstream}/servers" print("Scale BE deployment") scale_deployment(kube_apis.apps_v1_api, "backend2", v_s_route_setup.route_s.namespace, 0) scale_deployment(kube_apis.apps_v1_api, "backend1", v_s_route_setup.route_m.namespace, 0) wait_for_empty_array(upstream_servers_s_url) wait_for_empty_array(upstream_servers_m_url) scale_deployment(kube_apis.apps_v1_api, "backend2", v_s_route_setup.route_s.namespace, 1) scale_deployment(kube_apis.apps_v1_api, "backend1", v_s_route_setup.route_m.namespace, 1) wait_for_non_empty_array(upstream_servers_s_url) wait_for_non_empty_array(upstream_servers_m_url) print("Run checks") resp_s = json.loads(requests.get(upstream_servers_s_url).text) resp_m = json.loads(requests.get(upstream_servers_m_url).text) new_reloads_count = get_nginx_generation_value(req_url) assert new_reloads_count == initial_reloads_count, "Expected: no new reloads" for resp in [resp_s, resp_m]: assert resp[0]['max_conns'] is 32 assert resp[0]['max_fails'] is 25 assert resp[0]['fail_timeout'] == '15s' assert resp[0]['slow_start'] == '10s'
def test_ap_waf_policy_vs_batch_start( self, request, kube_apis, ingress_controller_prerequisites, crd_ingress_controller_with_ap, virtual_server_setup, appprotect_waf_setup, test_namespace, ): """ Pod startup time with AP WAF Policy """ waf_spec_vs_src = f"{TEST_DATA}/ap-waf/virtual-server-waf-spec.yaml" waf_pol_dataguard_src = f"{TEST_DATA}/ap-waf/policies/waf-dataguard.yaml" print(f"Create waf policy") create_ap_waf_policy_from_yaml( kube_apis.custom_objects, waf_pol_dataguard_src, test_namespace, test_namespace, True, False, ap_pol_name, log_name, "syslog:server=127.0.0.1:514", ) wait_before_test() print(f"Patch vs with policy: {waf_spec_vs_src}") patch_virtual_server_from_yaml( kube_apis.custom_objects, virtual_server_setup.vs_name, waf_spec_vs_src, virtual_server_setup.namespace, ) wait_before_test(120) print( "----------------------- Send request with embedded malicious script----------------------" ) response1 = requests.get( virtual_server_setup.backend_1_url + "</script>", headers={"host": virtual_server_setup.vs_host}, ) print(response1.status_code) print( "----------------------- Send request with blocked keyword in UDS----------------------" ) response2 = requests.get( virtual_server_setup.backend_1_url, headers={"host": virtual_server_setup.vs_host}, data="kic", ) total_vs = int(request.config.getoption("--batch-resources")) print(response2.status_code) for i in range(1, total_vs + 1): with open(waf_spec_vs_src) as f: doc = yaml.safe_load(f) doc["metadata"]["name"] = f"virtual-server-{i}" doc["spec"]["host"] = f"virtual-server-{i}.example.com" kube_apis.custom_objects.create_namespaced_custom_object( "k8s.nginx.org", "v1", test_namespace, "virtualservers", doc) print( f"VirtualServer created with name '{doc['metadata']['name']}'" ) print(f"Total resources deployed is {total_vs}") wait_before_test() ic_ns = ingress_controller_prerequisites.namespace scale_deployment(kube_apis.v1, kube_apis.apps_v1_api, "nginx-ingress", ic_ns, 0) while get_pods_amount(kube_apis.v1, ic_ns) is not 0: print(f"Number of replicas not 0, retrying...") wait_before_test() num = scale_deployment(kube_apis.v1, kube_apis.apps_v1_api, "nginx-ingress", ic_ns, 1) assert (get_total_vs(virtual_server_setup.metrics_url, "nginx") == str(total_vs + 1) and get_last_reload_status( virtual_server_setup.metrics_url, "nginx") == "1") for i in range(1, total_vs + 1): delete_virtual_server(kube_apis.custom_objects, f"virtual-server-{i}", test_namespace) delete_policy(kube_apis.custom_objects, "waf-policy", test_namespace) assert num is None
def test_dos_arbitrator(self, kube_apis, ingress_controller_prerequisites, crd_ingress_controller_with_dos, dos_setup, test_namespace): """ Test App Protect Dos: Check new IC pod get learning info """ print( "----------------------- Get syslog pod name ----------------------" ) syslog_pod = self.getPodNameThatContains( kube_apis, ingress_controller_prerequisites.namespace, "syslog") assert "syslog" in syslog_pod log_loc = f"/var/log/messages" clear_file_contents(kube_apis.v1, log_loc, syslog_pod, ingress_controller_prerequisites.namespace) print( "------------------------- Deploy ingress -----------------------------" ) create_ingress_with_dos_annotations(kube_apis, src_ing_yaml, test_namespace, test_namespace + "/dos-protected") ingress_host = get_first_ingress_host_from_yaml(src_ing_yaml) # print("------------------------- Learning Phase -----------------------------") print("start good clients requests") p_good_client = subprocess.Popen([ f"exec {TEST_DATA}/dos/good_clients_xff.sh {ingress_host} {dos_setup.req_url}" ], preexec_fn=os.setsid, shell=True, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL) print("Learning for max 10 minutes") find_in_log(kube_apis, log_loc, syslog_pod, ingress_controller_prerequisites.namespace, 600, "learning_confidence=\"Ready\"") print( "------------------------- Check new IC pod get info from arbitrator -----------------------------" ) ic_ns = ingress_controller_prerequisites.namespace scale_deployment(kube_apis.v1, kube_apis.apps_v1_api, "nginx-ingress", ic_ns, 2) while get_pods_amount_with_name(kube_apis.v1, "nginx-ingress", "nginx-ingress") is not 2: print(f"Number of replicas is not 2, retrying...") wait_before_test() print( "------------------------- Check if new pod receive info from arbitrator -----------------------------" ) print("Wait for 30 seconds") wait_before_test(30) log_contents = get_file_contents( kube_apis.v1, log_loc, syslog_pod, ingress_controller_prerequisites.namespace) log_info_dic = log_content_to_dic(log_contents) print("Stop Good Client") p_good_client.terminate() learning_units_hostname = [] for log in log_info_dic: if log['unit_hostname'] not in learning_units_hostname and log[ 'learning_confidence'] == "Ready": learning_units_hostname.append(log['unit_hostname']) delete_items_from_yaml(kube_apis, src_ing_yaml, test_namespace) assert (len(learning_units_hostname) == 2)