def validate_ingress(): """ Validate ingress by creating a ingress rule. """ wait_for_pod_state("", "default", "running", label="app=default-http-backend") wait_for_pod_state("", "default", "running", label="name=nginx-ingress-microk8s") here = os.path.dirname(os.path.abspath(__file__)) manifest = os.path.join(here, "templates", "ingress.yaml") update_yaml_with_arch(manifest) kubectl("apply -f {}".format(manifest)) wait_for_pod_state("", "default", "running", label="app=microbot") attempt = 50 while attempt >= 0: output = kubectl("get ing") if "microbot.127.0.0.1.xip.io" in output: break time.sleep(2) attempt -= 1 assert "microbot.127.0.0.1.xip.io" in output attempt = 50 while attempt >= 0: resp = requests.get("http://microbot.127.0.0.1.xip.io") if resp.status_code == 200: break time.sleep(2) attempt -= 1 assert resp.status_code == 200 assert "microbot.png" in resp.content.decode("utf-8") kubectl("delete -f {}".format(manifest))
def validate_multus(): """ Validate multus by deploying alpine pod with 3 interfaces. """ wait_for_installation() here = os.path.dirname(os.path.abspath(__file__)) shutil.rmtree("/tmp/microk8s-multus-test-nets", ignore_errors=True) networks = os.path.join(here, "templates", "multus-networks.yaml") kubectl("create -f {}".format(networks)) manifest = os.path.join(here, "templates", "multus-alpine.yaml") kubectl("apply -f {}".format(manifest)) wait_for_pod_state("", "default", "running", label="app=multus-alpine") output = kubectl("exec multus-alpine -- ifconfig eth1", timeout_insec=900, err_out="no") assert "10.111.111.111" in output output = kubectl("exec multus-alpine -- ifconfig eth2", timeout_insec=900, err_out="no") assert "10.222.222.222" in output kubectl("delete -f {}".format(manifest)) kubectl("delete -f {}".format(networks)) shutil.rmtree("/tmp/microk8s-multus-test-nets", ignore_errors=True)
def _testCreateVolumeFromStorageClass(self, scFile): '''Test creating a volume based on provided storage class @type scFile: Path for storage class config file @param scFile: C{Str}''' utils.log("Testing for sc: %s" % scFile, as_banner=True) _storageClassFile = PopulateYaml( self._scFile, self._test_id, mount_target_ocid=self._mnt_target_ocid, subnet_ocid=self._subnet_ocid).generateFile() # Delete any previously existing storage classes with the same name utils.kubectl("delete -f " + _storageClassFile, exit_on_error=False) # Create storage class yaml file _storageClassFile = PopulateYaml( self._scFile, self._test_id, mount_target_ocid=self._mnt_target_ocid, subnet_ocid=self._subnet_ocid).generateFile() utils.kubectl("create -f " + _storageClassFile, exit_on_error=False) self._testSuccess = self._test_create_volume( PopulateYaml(self.FSS_CLAIM, self._test_id, region=self._region).generateFile(), "demooci-fss-" + self._test_id, availability_domain=self.DEFAULT_AVAILABILITY_DOMAIN, storageType=self.FS_STORAGE, verify_func=self._volume_from_fss_dynamic_check, canaryMetricName=self.CM_FSS) self._checkTestSuccess()
def validate_istio(): """ Validate istio by deploying the bookinfo app. """ if platform.machine() != "x86_64": print("Istio tests are only relevant in x86 architectures") return wait_for_installation() istio_services = [ "citadel", "egressgateway", "galley", "ingressgateway", "sidecar-injector", ] for service in istio_services: wait_for_pod_state("", "istio-system", "running", label="istio={}".format(service)) here = os.path.dirname(os.path.abspath(__file__)) manifest = os.path.join(here, "templates", "bookinfo.yaml") kubectl("apply -f {}".format(manifest)) wait_for_pod_state("", "default", "running", label="app=details") kubectl("delete -f {}".format(manifest))
def validate_openebs(): """ Validate OpenEBS """ wait_for_installation() wait_for_pod_state( "", "openebs", "running", label="openebs.io/component-name=maya-apiserver", timeout_insec=900, ) print("OpenEBS is up and running.") here = os.path.dirname(os.path.abspath(__file__)) manifest = os.path.join(here, "templates", "openebs-test.yaml") kubectl("apply -f {}".format(manifest)) wait_for_pod_state("", "default", "running", label="app=openebs-test-busybox", timeout_insec=900) output = kubectl("exec openebs-test-busybox -- ls /", timeout_insec=900, err_out="no") assert "my-data" in output kubectl("delete -f {}".format(manifest))
def validate_gpu(): """ Validate gpu by trying a cuda-add. """ if platform.machine() != "x86_64": print("GPU tests are only relevant in x86 architectures") return wait_for_pod_state("", "kube-system", "running", label="name=nvidia-device-plugin-ds") here = os.path.dirname(os.path.abspath(__file__)) manifest = os.path.join(here, "templates", "cuda-add.yaml") get_pod = kubectl_get("po") if "cuda-vector-add" in str(get_pod): # Cleanup kubectl("delete -f {}".format(manifest)) time.sleep(10) kubectl("apply -f {}".format(manifest)) wait_for_pod_state("cuda-vector-add", "default", "terminated") result = kubectl("logs pod/cuda-vector-add") assert "PASSED" in result
def validate_knative(): """ Validate Knative by deploying the helloworld-go app. """ if platform.machine() != "x86_64": print("Knative tests are only relevant in x86 architectures") return wait_for_installation() knative_services = [ "activator", "autoscaler", "controller", ] for service in knative_services: wait_for_pod_state("", "knative-serving", "running", label="app={}".format(service)) here = os.path.dirname(os.path.abspath(__file__)) manifest = os.path.join(here, "templates", "knative-helloworld.yaml") kubectl("apply -f {}".format(manifest)) wait_for_pod_state("", "default", "running", label="serving.knative.dev/service=helloworld-go") kubectl("delete -f {}".format(manifest))
def validate_linkerd(): """ Validate Linkerd by deploying emojivoto. """ if platform.machine() != "x86_64": print("Linkerd tests are only relevant in x86 architectures") return wait_for_installation() wait_for_pod_state( "", "linkerd", "running", label="linkerd.io/control-plane-component=controller", timeout_insec=300, ) print("Linkerd controller up and running.") wait_for_pod_state( "", "linkerd", "running", label="linkerd.io/control-plane-component=proxy-injector", timeout_insec=300, ) print("Linkerd proxy injector up and running.") here = os.path.dirname(os.path.abspath(__file__)) manifest = os.path.join(here, "templates", "emojivoto.yaml") kubectl("apply -f {}".format(manifest)) wait_for_pod_state("", "emojivoto", "running", label="app=emoji-svc", timeout_insec=600) kubectl("delete -f {}".format(manifest))
def validate_ingress(): """ Validate ingress by creating a ingress rule. """ daemonset = kubectl("get ds") if "nginx-ingress-microk8s-controller" in daemonset: wait_for_pod_state("", "default", "running", label="app=default-http-backend") wait_for_pod_state("", "default", "running", label="name=nginx-ingress-microk8s") else: wait_for_pod_state("", "ingress", "running", label="name=nginx-ingress-microk8s") here = os.path.dirname(os.path.abspath(__file__)) manifest = os.path.join(here, "templates", "ingress.yaml") update_yaml_with_arch(manifest) kubectl("apply -f {}".format(manifest)) wait_for_pod_state("", "default", "running", label="app=microbot") common_ingress() kubectl("delete -f {}".format(manifest))
def validate_storage(): """ Validate storage by creating a PVC. """ wait_for_pod_state("", "kube-system", "running", label="k8s-app=hostpath-provisioner") here = os.path.dirname(os.path.abspath(__file__)) manifest = os.path.join(here, "templates", "pvc.yaml") kubectl("apply -f {}".format(manifest)) wait_for_pod_state("hostpath-test-pod", "default", "running") attempt = 50 while attempt >= 0: output = kubectl("get pvc") if "Bound" in output: break time.sleep(2) attempt -= 1 # Make sure the test pod writes data sto the storage found = False for root, dirs, files in os.walk( "/var/snap/microk8s/common/default-storage"): for file in files: if file == "dates": found = True assert found assert "myclaim" in output assert "Bound" in output kubectl("delete -f {}".format(manifest))
def _create_rc_or_pod(self, config, availability_domain, volume_name="default_volume"): '''Create replication controller or pod and wait for it to start @param rc_config: Replication controller configuration file to patch @type rc_config: C{Str} @param availability_domain: Availability domain to start rc in @type availability_domain: C{Str} @param volume_name: Volume name used by the replication controller @type volume_name: C{Str} @return: Tuple containing the name of the created rc and its config file @rtype: C{Tuple}''' _config = PopulateYaml( config, self._test_id, volume_name=volume_name, availability_domain=availability_domain).generateFile() utils.log( "Starting the replication controller (creates a single nginx pod)." ) utils.kubectl("delete -f " + _config, exit_on_error=False, display_errors=False) utils.kubectl("create -f " + _config) utils.log("Waiting for the pod to start.") _name, _, _ = self._wait_for_pod_status("Running", self.POD_CONTROLLER) return _name, _config
def _volume_from_backup_check( self, test_id, availability_domain, volume, file_name='hello.txt', ): '''Verify whether the volume created from the backup is in a healthy state @param test_id: Test id to use for creating components @type test_id: C{Str} @param availability_domain: Availability domain to create resource in @type availability_domain: C{Str} @param volume: Name of volume to verify @type volume: C{Str} @param file_name: Name of file to do checks for @type file_name: C{Str}''' _ocid = volume.split('.') _ocid = _ocid[-1] _rc_name, _rc_config = self._create_rc_or_pod( "templates/example-replication-controller.template", availability_domain, _ocid) utils.log("Does the file from the previous backup exist?") stdout = utils.kubectl("exec " + _rc_name + " -- ls /usr/share/nginx/html") if file_name not in stdout.split("\n"): utils.log("Error: Failed to find file %s in mounted volume" % file_name) utils.log( "Deleting the replication controller (deletes the single nginx pod)." ) utils.kubectl("delete -f " + _rc_config)
def validate_rbac(): """ Validate RBAC is actually on """ output = kubectl("auth can-i --as=system:serviceaccount:default:default view pod", err_out='no') assert "no" in output output = kubectl("auth can-i --as=admin --as-group=system:masters view pod") assert "yes" in output
def _create_file_via_replication_controller(rc_name, file_name="hello.txt"): '''Create file via the replication controller @param rcName: Name of the replication controller to write data to @type rcName: C{Str} @param fileName: Name of file to create @type fileName: C{Str}''' utils.kubectl("exec " + rc_name + " -- touch /usr/share/nginx/html/" + file_name)
def validate_kata(): """ Validate Kata """ wait_for_installation() here = os.path.dirname(os.path.abspath(__file__)) manifest = os.path.join(here, "templates", "nginx-kata.yaml") kubectl("apply -f {}".format(manifest)) wait_for_pod_state("", "default", "running", label="app=kata") kubectl("delete -f {}".format(manifest))
def validate_dns(): """ Validate DNS by starting a busy box and nslookuping the kubernetes default service. """ here = os.path.dirname(os.path.abspath(__file__)) manifest = os.path.join(here, "templates", "bbox.yaml") kubectl("apply -f {}".format(manifest)) wait_for_pod_state("busybox", "default", "running") output = kubectl("exec -ti busybox -- nslookup kubernetes.default.svc.cluster.local") assert "10.152.183.1" in output kubectl("delete -f {}".format(manifest))
def run(self): if self._setup: # Cleanup in case any existing state exists in the cluster self.cleanup(display_errors=False) utils.log("Setting up the volume provisioner", as_banner=True) utils.kubectl("-n kube-system create secret generic oci-volume-provisioner " + \ "--from-file=config.yaml=" + self._get_oci_config_file(), exit_on_error=False) for _res in self._k8sResources: utils.kubectl("create -f " + _res, exit_on_error=False) pod_name, _, _ = self._wait_for_pod_status("Running", self.POD_VOLUME) self._compartment_id = self._get_compartment_id(pod_name)
def validate_keda(): """ Validate keda """ wait_for_installation() wait_for_pod_state("", "keda", "running", label="app=keda-operator") print("KEDA operator up and running.") here = os.path.dirname(os.path.abspath(__file__)) manifest = os.path.join(here, "templates", "keda-scaledobject.yaml") kubectl("apply -f {}".format(manifest)) scaledObject = kubectl("-n gonuts get scaledobject.keda.sh") assert "stan-scaledobject" in scaledObject kubectl("delete -f {}".format(manifest))
def validate_ambassador(): """ Validate the Ambassador API Gateway by creating a ingress rule. """ if platform.machine() != "x86_64": print("Ambassador tests are only relevant in x86 architectures") return wait_for_pod_state("", "ambassador", "running", label="product=aes") here = os.path.dirname(os.path.abspath(__file__)) manifest = os.path.join(here, "templates", "ingress.yaml") update_yaml_with_arch(manifest) kubectl("apply -f {}".format(manifest)) wait_for_pod_state("", "default", "running", label="app=microbot") # `Ingress`es must be annotatated for being recognized by Ambassador kubectl( "annotate ingress microbot-ingress-nip kubernetes.io/ingress.class=ambassador" ) kubectl( "annotate ingress microbot-ingress-xip kubernetes.io/ingress.class=ambassador" ) common_ingress() kubectl("delete -f {}".format(manifest))
def _test_create_volume(self, claim_target, claim_volume_name, availability_domain=None, verify_func=None, storageType=BLOCK_STORAGE, canaryMetricName=None): '''Test making a volume claim from a configuration file @param backup_ocid: Verify whether the volume created from a backup contains backup info @type backup_ocid: C{Str}''' utils.kubectl("create -f " + claim_target, exit_on_error=False) volume = self._get_volume_and_wait(claim_volume_name) utils.log("Created volume with name: %s" % str(volume)) if self._check_oci: utils.log( "Querying the OCI api to make sure a volume with this name exists..." ) if not self._wait_for_volume_to_create( volume, storageType=storageType, availability_domain=availability_domain): utils.log("Failed to find volume with name: " + volume) return False utils.log("Volume: " + volume + " is present and available") if verify_func: verify_func(self._test_id, availability_domain, volume) utils.log("Delete the volume claim") utils.kubectl("delete -f " + claim_target, exit_on_error=False) if self._check_oci: utils.log( "Querying the OCI api to make sure a volume with this name now doesnt exist..." ) self._wait_for_volume_to_delete( volume, storageType=storageType, availability_domain=availability_domain) if not self._volume_exists( volume, self.LIFECYCLE_STATE_OFF[storageType], compartment_id=self._compartment_id, storageType=storageType, availability_domain=availability_domain): utils.log("Volume with name: " + volume + " still exists") return False utils.log("Volume: " + volume + " has now been terminated") return True
def common_ingress(): """ Perform the Ingress validations that are common for all the Ingress controllers. """ attempt = 50 while attempt >= 0: output = kubectl("get ing") if "microbot.127.0.0.1.xip.io" in output: break time.sleep(5) attempt -= 1 assert "microbot.127.0.0.1.xip.io" in output attempt = 50 while attempt >= 0: output = kubectl("get ing") if "microbot.127.0.0.1.nip.io" in output: break time.sleep(5) attempt -= 1 assert "microbot.127.0.0.1.nip.io" in output service_ok = False attempt = 50 while attempt >= 0: try: resp = requests.get("http://microbot.127.0.0.1.xip.io/") if resp.status_code == 200 and "microbot.png" in resp.content.decode( "utf-8"): service_ok = True break except requests.RequestException: time.sleep(5) attempt -= 1 if resp.status_code != 200 or "microbot.png" not in resp.content.decode( "utf-8"): attempt = 50 while attempt >= 0: try: resp = requests.get("http://microbot.127.0.0.1.nip.io/") if resp.status_code == 200 and "microbot.png" in resp.content.decode( "utf-8"): service_ok = True break except requests.RequestException: time.sleep(5) attempt -= 1 assert service_ok
def validate_dashboard(): """ Validate the dashboard addon by looking at the grafana URL. """ wait_for_pod_state("", "kube-system", "running", label="k8s-app=influxGrafana") cluster_info = kubectl("cluster-info") # Cluster info output is colored so we better search for the port in the url pattern # instead of trying to extract the url substring regex = "http://127.0.0.1:([0-9]+)/api/v1/namespaces/kube-system/services/monitoring-grafana/proxy" grafana_pattern = re.compile(regex) for url in cluster_info.split(): port_search = grafana_pattern.search(url) if port_search: break grafana_url = "http://127.0.0.1:{}" \ "/api/v1/namespaces/kube-system/services/" \ "monitoring-grafana/proxy".format(port_search.group(1)) assert grafana_url attempt = 50 while attempt >= 0: resp = requests.get(grafana_url) if resp.status_code == 200: break time.sleep(2) attempt -= 1 assert resp.status_code == 200
def validate_dns_dashboard(): """ Validate the dashboard addon by looking at the grafana URL. Validate DNS by starting a busy box and nslookuping the kubernetes default service. """ wait_for_pod_state("", "kube-system", "running", label="k8s-app=influxGrafana") cluster_info = kubectl("cluster-info") # Cluster info output is colored so we better search for the port in the url pattern # instead of trying to extract the url substring regex = "http(.?)://127.0.0.1:([0-9]+)/api/v1/namespaces/kube-system/services/monitoring-grafana/proxy" grafana_pattern = re.compile(regex) for url in cluster_info.split(): port_search = grafana_pattern.search(url) if port_search: break grafana_url = "http{}://127.0.0.1:{}" \ "/api/v1/namespaces/kube-system/services/" \ "monitoring-grafana/proxy".format(port_search.group(1), port_search.group(2)) assert grafana_url attempt = 50 while attempt >= 0: resp = requests.get(grafana_url, verify=False) if (resp.status_code == 200 and grafana_url.startswith('http://')) or \ (resp.status_code == 401 and grafana_url.startswith('https://')): break time.sleep(2) attempt -= 1 assert resp.status_code in [200, 401]
def _get_pod_infos(self, pod_type): '''Retrieve pod information from kube-system @param pod_type: Pod type to search for @type pod_type: C{Str} @return: Tuple containing the name of the resource, its status and the node it's running on @rtype: C{Tuple}''' _namespace = "-n kube-system" if pod_type == self.POD_VOLUME else "" stdout = utils.kubectl(_namespace + " get pods -o wide") infos = [] for line in stdout.split("\n"): line_array = line.split() if len(line_array) > 0: name = line_array[0] if name.startswith('oci-volume-provisioner' ) and pod_type == self.POD_VOLUME: status = line_array[2] node = line_array[6] infos.append((name, status, node)) if re.match(r"nginx-controller-" + self._test_id + ".*", line) and pod_type == self.POD_CONTROLLER: name = line_array[0] status = line_array[2] node = line_array[6] infos.append((name, status, node)) if re.match(r"demooci-fss-pod-" + self._test_id + ".*", line) and pod_type == self.POD_CONTROLLER: name = line_array[0] status = line_array[2] node = line_array[6] infos.append((name, status, node)) return infos
def validate_registry(): """ Validate the private registry. """ wait_for_pod_state("", "container-registry", "running", label="app=registry") docker("pull busybox") docker("tag busybox localhost:32000/my-busybox") docker("push localhost:32000/my-busybox") here = os.path.dirname(os.path.abspath(__file__)) manifest = os.path.join(here, "templates", "bbox-local.yaml") kubectl("apply -f {}".format(manifest)) wait_for_pod_state("busybox", "default", "running") output = kubectl("describe po busybox") assert "localhost:32000/my-busybox" in output kubectl("delete -f {}".format(manifest))
def validate_dns_dashboard(): """ Validate the dashboard addon by trying to access the kubernetes dashboard. The dashboard will return an HTML indicating that it is up and running. """ wait_for_pod_state("", "kube-system", "running", label="k8s-app=kubernetes-dashboard") wait_for_pod_state("", "kube-system", "running", label="k8s-app=dashboard-metrics-scraper") attempt = 30 while attempt > 0: try: output = kubectl( "get " "--raw " "/api/v1/namespaces/kube-system/services/https:kubernetes-dashboard:/proxy/" ) if "Kubernetes Dashboard" in output: break except subprocess.CalledProcessError: pass time.sleep(10) attempt -= 1 assert attempt > 0
def _get_region(): nodes_json = utils.kubectl("get nodes -o json", log_stdout=False) nodes = json.loads(nodes_json) for node in nodes['items']: return node['metadata']['labels'][ 'failure-domain.beta.kubernetes.io/zone'] utils.log("Region lookup failed") utils.finish_with_exit_code(1)
def validate_ingress(): """ Validate ingress by creating a ingress rule. """ daemonset = kubectl("get ds") if "nginx-ingress-microk8s-controller" in daemonset: wait_for_pod_state("", "default", "running", label="app=default-http-backend") wait_for_pod_state("", "default", "running", label="name=nginx-ingress-microk8s") else: wait_for_pod_state("", "ingress", "running", label="name=nginx-ingress-microk8s") here = os.path.dirname(os.path.abspath(__file__)) manifest = os.path.join(here, "templates", "ingress.yaml") update_yaml_with_arch(manifest) kubectl("apply -f {}".format(manifest)) wait_for_pod_state("", "default", "running", label="app=microbot") attempt = 50 while attempt >= 0: output = kubectl("get ing") if "microbot.127.0.0.1.xip.io" in output: break time.sleep(5) attempt -= 1 assert "microbot.127.0.0.1.xip.io" in output attempt = 50 while attempt >= 0: output = kubectl("get ing") if "microbot.127.0.0.1.nip.io" in output: break time.sleep(5) attempt -= 1 assert "microbot.127.0.0.1.nip.io" in output service_ok = False attempt = 50 while attempt >= 0: try: resp = requests.get("http://microbot.127.0.0.1.xip.io/") if resp.status_code == 200 and "microbot.png" in resp.content.decode("utf-8"): service_ok = True break except: time.sleep(5) attempt -= 1 if resp.status_code != 200 or "microbot.png" not in resp.content.decode("utf-8"): attempt = 50 while attempt >= 0: try: resp = requests.get("http://microbot.127.0.0.1.nip.io/") if resp.status_code == 200 and "microbot.png" in resp.content.decode("utf-8"): service_ok = True break except: time.sleep(5) attempt -= 1 assert service_ok kubectl("delete -f {}".format(manifest))
def validate_coredns_config(ip_ranges="8.8.8.8,1.1.1.1"): """ Validate dns """ out = kubectl("get configmap coredns -n kube-system -o jsonpath='{.data.Corefile}'") expected_forward_val = "forward ." for ip_range in ip_ranges.split(","): expected_forward_val = expected_forward_val + " " + ip_range assert expected_forward_val in out
def validate_cilium(): """ Validate cilium by deploying the bookinfo app. """ if platform.machine() != 'x86_64': print("Cilium tests are only relevant in x86 architectures") return wait_for_installation() wait_for_pod_state("", "kube-system", "running", label="k8s-app=cilium") here = os.path.dirname(os.path.abspath(__file__)) manifest = os.path.join(here, "templates", "nginx-pod.yaml") kubectl("apply -f {}".format(manifest)) wait_for_pod_state("", "default", "running", label="app=nginx") output = cilium('endpoint list -o json') assert "nginx" in output kubectl("delete -f {}".format(manifest))