def sequential_pvc(self, value_sc, num_of_pvc): created_objects = get_cleanup_dict() sc_name = d.get_random_name("sc") config.load_kube_config(config_file=self.kubeconfig) d.create_storage_class(value_sc, sc_name, created_objects) d.check_storage_class(sc_name) pvc_names = [] number_of_pvc = num_of_pvc common_pvc_name = d.get_random_name("pvc") for num in range(0, number_of_pvc): pvc_names.append(common_pvc_name+"-"+str(num)) value_pvc_pass = copy.deepcopy(self.value_pvc[0]) LOGGER.info(100*"-") value_pvc_pass["parallel"] = "True" for pvc_name in pvc_names: LOGGER.info(100*"-") d.create_pvc(value_pvc_pass, sc_name, pvc_name, created_objects) for pvc_name in pvc_names: LOGGER.info(100*"-") d.check_pvc(value_pvc_pass, pvc_name, created_objects) pod_names = [] for pvc_name in pvc_names: LOGGER.info(100*"-") pod_name = d.get_random_name("pod") pod_names.append(pod_name) d.create_pod(self.value_pod[0], pvc_name, pod_name, created_objects, self.image_name) d.check_pod(self.value_pod[0], pod_name, created_objects) cleanup.clean_with_created_objects(created_objects)
def create_file_inside_pod(value_pod, pod_name, created_objects): """ create snaptestfile inside the pod using touch """ api_instance = client.CoreV1Api() LOGGER.info( "POD Check : Trying to create snaptestfile on SpectrumScale mount point inside the pod" ) exec_command1 = "touch " + value_pod["mount_path"] + "/snaptestfile" exec_command = ['/bin/sh', '-c', exec_command1] resp = stream(api_instance.connect_get_namespaced_pod_exec, pod_name, namespace_value, command=exec_command, stderr=True, stdin=False, stdout=True, tty=False) if resp == "": LOGGER.info( "file snaptestfile created successfully on SpectrumScale mount point inside the pod" ) return LOGGER.error("file snaptestfile not created") cleanup.clean_with_created_objects(created_objects) assert False
def create_vs_from_content(vs_name, vs_content_name, created_objects): """ create volume snapshot vs_name from volume snapshot content vs_content_name """ class_body = { "apiVersion": "snapshot.storage.k8s.io/v1", "kind": "VolumeSnapshot", "metadata": { "name": vs_name }, "spec": { "source": { "volumeSnapshotContentName": vs_content_name } } } custom_object_api_instance = client.CustomObjectsApi() try: custom_object_api_response = custom_object_api_instance.create_namespaced_custom_object( group="snapshot.storage.k8s.io", version="v1", plural="volumesnapshots", body=class_body, namespace=namespace_value, pretty=True ) LOGGER.debug(custom_object_api_response) LOGGER.info(f"Volume Snapshot Create : volume snapshot {vs_name} is created from {vs_content_name}") created_objects["vs"].append(vs_name) except ApiException as e: LOGGER.error( f"Exception when calling CustomObjectsApi->create_namespaced_custom_object: {e}") clean_with_created_objects(created_objects) assert False
def check_vs_detail_for_static(vs_name, created_objects): api_instance = client.CustomObjectsApi() try: api_response = api_instance.get_namespaced_custom_object( group="snapshot.storage.k8s.io", version="v1", plural="volumesnapshots", name=vs_name, namespace=namespace_value) LOGGER.debug(api_response) LOGGER.info( f"Volume Snapshot Check : volume snapshot {vs_name} has been created" ) except ApiException: LOGGER.info( f"Volume Snapshot Check : volume snapshot {vs_name} does not exists" ) clean_with_created_objects(created_objects) assert False if check_snapshot_status(vs_name): LOGGER.info("volume snapshot status ReadyToUse is true") else: LOGGER.error("volume snapshot status ReadyToUse is not true") clean_with_created_objects(created_objects) assert False
def check_file_inside_pod(value_pod, pod_name, created_objects, volume_name=None): """ check snaptestfile inside the pod using ls """ api_instance = client.CoreV1Api() if volume_name is None: exec_command1 = "ls " + value_pod["mount_path"] else: exec_command1 = "ls " + value_pod[ "mount_path"] + "/" + volume_name + "-data" exec_command = ['/bin/sh', '-c', exec_command1] resp = stream(api_instance.connect_get_namespaced_pod_exec, pod_name, namespace_value, command=exec_command, stderr=True, stdin=False, stdout=True, tty=False) if resp[0:12] == "snaptestfile": LOGGER.info( "POD Check : snaptestfile is succesfully restored from snapshot") return LOGGER.error("snaptestfile is not restored from snapshot") cleanup.clean_with_created_objects(created_objects) assert False
def create_vs_class(vs_class_name, body_params, created_objects): """ create volume snapshot class with vs_class_name body_params contains configurable parameters """ class_body = { "apiVersion": "snapshot.storage.k8s.io/v1", "kind": "VolumeSnapshotClass", "metadata": { "name": vs_class_name }, "driver": "spectrumscale.csi.ibm.com", "deletionPolicy": body_params["deletionPolicy"] } custom_object_api_instance = client.CustomObjectsApi() try: custom_object_api_response = custom_object_api_instance.create_cluster_custom_object( group="snapshot.storage.k8s.io", version="v1", plural="volumesnapshotclasses", body=class_body, pretty=True) LOGGER.debug(custom_object_api_response) LOGGER.info( f"Volume Snapshot Class Create : {vs_class_name} is created with {body_params}" ) created_objects["vsclass"].append(vs_class_name) except ApiException as e: LOGGER.error( f"Exception when calling CustomObjectsApi->create_namespaced_custom_object: {e}" ) clean_with_created_objects(created_objects) assert False
def check_ds_pod(ds_name, value_ds, created_objects): api_instance = client.CoreV1Api() selector = "ownerReferences=" + ds_name running_pod_list, pod_list = [], [] try: api_response = api_instance.list_namespaced_pod( namespace=namespace_value, pretty=True, label_selector=selector) LOGGER.debug(api_response) for pod in api_response.items: if pod.status.phase == "Running": running_pod_list.append(pod.metadata.name) else: pod_list.append(pod.metadata.name) except ApiException as e: LOGGER.error(f"Exception when calling CoreV1Api->list_node: {e}") cleanup.clean_with_created_objects(created_objects) assert False if len(running_pod_list) != 1: LOGGER.error( f"running pods are {running_pod_list} , only one pod should be running, asserting" ) return False for pod_name in pod_list: check_pod(value_ds, pod_name, created_objects) return True
def create_clone_pvc(pvc_values, sc_name, pvc_name, from_pvc_name, created_objects): api_instance = client.CoreV1Api() pvc_metadata = client.V1ObjectMeta(name=pvc_name) pvc_resources = client.V1ResourceRequirements( requests={"storage": pvc_values["storage"]}) pvc_data_source = client.V1TypedLocalObjectReference( kind="PersistentVolumeClaim", name=from_pvc_name) pvc_spec = client.V1PersistentVolumeClaimSpec( access_modes=[pvc_values["access_modes"]], resources=pvc_resources, storage_class_name=sc_name, data_source=pvc_data_source) pvc_body = client.V1PersistentVolumeClaim(api_version="v1", kind="PersistentVolumeClaim", metadata=pvc_metadata, spec=pvc_spec) try: LOGGER.info( f'PVC Create from Clone : Creating pvc {pvc_name} with parameters {str(pvc_values)} and storageclass {str(sc_name)} from PVC {from_pvc_name}' ) api_response = api_instance.create_namespaced_persistent_volume_claim( namespace=namespace_value, body=pvc_body, pretty=True) LOGGER.debug(str(api_response)) created_objects["clone_pvc"].append(pvc_name) except ApiException as e: LOGGER.info(f'PVC {pvc_name} creation operation has been failed') LOGGER.error( f"Exception when calling CoreV1Api->create_namespaced_persistent_volume_claim: {e}" ) cleanup.clean_with_created_objects(created_objects) assert False
def test_dynamic(self, value_sc, value_pvc_passed=None, value_pod_passed=None, value_clone_passed=None): created_objects = get_cleanup_dict() if value_pvc_passed is None: value_pvc_passed = copy.deepcopy(self.value_pvc) if value_pod_passed is None: value_pod_passed = copy.deepcopy(self.value_pod) if "permissions" in value_sc.keys() and not(ff.feature_available("permissions")): LOGGER.warning("Min required Spectrum Scale version for permissions in storageclass support with CSI is 5.1.1-2") LOGGER.warning("Skipping Testcase") return LOGGER.info( f"Testing Dynamic Provisioning with following PVC parameters {str(value_pvc_passed)}") sc_name = d.get_random_name("sc") config.load_kube_config(config_file=self.kubeconfig) d.create_storage_class(value_sc, sc_name, created_objects) d.check_storage_class(sc_name) for num, _ in enumerate(value_pvc_passed): value_pvc_pass = copy.deepcopy(value_pvc_passed[num]) if (check_key(value_sc, "reason")): if not(check_key(value_pvc_pass, "reason")): value_pvc_pass["reason"] = value_sc["reason"] LOGGER.info(100*"=") pvc_name = d.get_random_name("pvc") d.create_pvc(value_pvc_pass, sc_name, pvc_name, created_objects) val = d.check_pvc(value_pvc_pass, pvc_name, created_objects) if val is True: if "permissions" in value_sc.keys(): d.check_permissions_for_pvc(pvc_name, value_sc["permissions"], created_objects) for num2, _ in enumerate(value_pod_passed): LOGGER.info(100*"-") pod_name = d.get_random_name("pod") if value_sc.keys() >= {"permissions", "gid", "uid"}: value_pod_passed[num2]["gid"] = value_sc["gid"] value_pod_passed[num2]["uid"] = value_sc["uid"] d.create_pod(value_pod_passed[num2], pvc_name, pod_name, created_objects, self.image_name) d.check_pod(value_pod_passed[num2], pod_name, created_objects) if "volume_expansion_storage" in value_pvc_pass: d.expand_and_check_pvc(sc_name, pvc_name, value_pvc_pass, "volume_expansion_storage", pod_name, value_pod_passed[num2], created_objects) if value_clone_passed is not None: d.clone_and_check_pvc(sc_name, value_sc, pvc_name, pod_name, value_pod_passed[num2], value_clone_passed, created_objects) cleanup.delete_pod(pod_name, created_objects) cleanup.check_pod_deleted(pod_name, created_objects) if ((value_pvc_pass["access_modes"] == "ReadWriteOnce") and (self.keep_objects is True) and (num2 < (len(value_pod_passed)-1))): pvc_name = d.get_random_name("pvc") d.create_pvc(value_pvc_pass, sc_name, pvc_name, created_objects) val = d.check_pvc(value_pvc_pass, pvc_name, created_objects) if val is not True: break LOGGER.info(100*"-") vol_name = cleanup.delete_pvc(pvc_name, created_objects) cleanup.check_pvc_deleted(pvc_name, vol_name, created_objects) LOGGER.info(100*"=") cleanup.clean_with_created_objects(created_objects)
def create_pvc_from_snapshot(pvc_values, sc_name, pvc_name, snap_name, created_objects): """ creates persistent volume claim from snapshot Args: param1: pvc_values - values required for creation of pvc param2: sc_name - name of storage class , pvc associated with param3: pvc_name - name of pvc to be created param4: snap_name - name of snapshot to recover data from Returns: None Raises: Raises an exception on kubernetes client api failure and asserts """ api_instance = client.CoreV1Api() pvc_metadata = client.V1ObjectMeta(name=pvc_name) pvc_resources = client.V1ResourceRequirements( requests={"storage": pvc_values["storage"]}) pvc_data_source = client.V1TypedLocalObjectReference( api_group="snapshot.storage.k8s.io", kind="VolumeSnapshot", name=snap_name) pvc_spec = client.V1PersistentVolumeClaimSpec( access_modes=[pvc_values["access_modes"]], resources=pvc_resources, storage_class_name=sc_name, data_source=pvc_data_source) pvc_body = client.V1PersistentVolumeClaim(api_version="v1", kind="PersistentVolumeClaim", metadata=pvc_metadata, spec=pvc_spec) try: LOGGER.info( f'PVC Create from snapshot : Creating pvc {pvc_name} with parameters {str(pvc_values)} and storageclass {str(sc_name)}' ) api_response = api_instance.create_namespaced_persistent_volume_claim( namespace=namespace_value, body=pvc_body, pretty=True) LOGGER.debug(str(api_response)) created_objects["restore_pvc"].append(pvc_name) except ApiException as e: LOGGER.info(f'PVC {pvc_name} creation operation has been failed') LOGGER.error( f"Exception when calling CoreV1Api->create_namespaced_persistent_volume_claim: {e}" ) cleanup.clean_with_created_objects(created_objects) assert False
def test_dynamic(self, value_sc, test_restore, value_vs_class=None, number_of_snapshots=None, reason=None): if value_vs_class is None: value_vs_class = self.value_vs_class if number_of_snapshots is None: number_of_snapshots = self.number_of_snapshots number_of_restore = 1 for pvc_value in self.value_pvc: created_objects = get_cleanup_dict() LOGGER.info("-"*100) sc_name = d.get_random_name("sc") d.create_storage_class(value_sc, sc_name, created_objects) d.check_storage_class(sc_name) pvc_name = d.get_random_name("pvc") d.create_pvc(pvc_value, sc_name, pvc_name, created_objects) d.check_pvc(pvc_value, pvc_name, created_objects) pod_name = d.get_random_name("snap-start-pod") value_pod = {"mount_path": "/usr/share/nginx/html/scale", "read_only": "False"} d.create_pod(value_pod, pvc_name, pod_name, created_objects, self.image_name) d.check_pod(value_pod, pod_name, created_objects) d.create_file_inside_pod(value_pod, pod_name, created_objects) vs_class_name = d.get_random_name("vsclass") snapshot.create_vs_class(vs_class_name, value_vs_class, created_objects) snapshot.check_vs_class(vs_class_name) vs_name = d.get_random_name("vs") for num in range(0, number_of_snapshots): snapshot.create_vs(vs_name+"-"+str(num), vs_class_name, pvc_name, created_objects) snapshot.check_vs_detail(vs_name+"-"+str(num), pvc_name, value_vs_class, reason, created_objects) if not(ff.snapshot_restore_available()): pvc_value["reason"] = "Min required Spectrum Scale version is 5.0.5.2" if test_restore: for num in range(0, number_of_restore): restored_pvc_name = "restored-pvc"+vs_name[2:]+"-"+str(num) snap_pod_name = "snap-end-pod"+vs_name[2:] d.create_pvc_from_snapshot(pvc_value, sc_name, restored_pvc_name, vs_name+"-"+str(num), created_objects) val = d.check_pvc(pvc_value, restored_pvc_name, created_objects) if val is True: d.create_pod(value_pod, restored_pvc_name, snap_pod_name, created_objects, self.image_name) d.check_pod(value_pod, snap_pod_name, created_objects) d.check_file_inside_pod(value_pod, snap_pod_name, created_objects) cleanup.delete_pod(snap_pod_name, created_objects) cleanup.check_pod_deleted(snap_pod_name, created_objects) vol_name=cleanup.delete_pvc(restored_pvc_name, created_objects) cleanup.check_pvc_deleted(restored_pvc_name,vol_name, created_objects) cleanup.clean_with_created_objects(created_objects)
def create_pv(pv_values, pv_name, created_objects, sc_name=""): """ creates persistent volume Args: param1: pv_values - values required for creation of pv param2: pv_name - name of pv to be created param3: sc_name - name of storage class pv associated with Returns: None Raises: Raises an exception on kubernetes client api failure and asserts """ api_instance = client.CoreV1Api() pv_metadata = client.V1ObjectMeta(name=pv_name) pv_csi = client.V1CSIPersistentVolumeSource( driver="spectrumscale.csi.ibm.com", volume_handle=pv_values["volumeHandle"]) if pv_values["reclaim_policy"] == "Default": pv_spec = client.V1PersistentVolumeSpec( access_modes=[pv_values["access_modes"]], capacity={"storage": pv_values["storage"]}, csi=pv_csi, storage_class_name=sc_name) else: pv_spec = client.V1PersistentVolumeSpec( access_modes=[pv_values["access_modes"]], capacity={"storage": pv_values["storage"]}, csi=pv_csi, persistent_volume_reclaim_policy=pv_values["reclaim_policy"], storage_class_name=sc_name) pv_body = client.V1PersistentVolume(api_version="v1", kind="PersistentVolume", metadata=pv_metadata, spec=pv_spec) try: LOGGER.info( f'PV Create : Creating PV {pv_name} with {pv_values} parameter') api_response = api_instance.create_persistent_volume(body=pv_body, pretty=True) LOGGER.debug(str(api_response)) created_objects["pv"].append(pv_name) except ApiException as e: LOGGER.error(f'PV {pv_name} creation failed hence failing test case ') LOGGER.error( f"Exception when calling CoreV1Api->create_persistent_volume: {e}") cleanup.clean_with_created_objects(created_objects) assert False
def test_dynamic(self, value_sc, value_pvc_passed=None, value_pod_passed=None): created_objects = get_cleanup_dict() if value_pvc_passed is None: value_pvc_passed = self.value_pvc if value_pod_passed is None: value_pod_passed = self.value_pod LOGGER.info( f"Testing Dynamic Provisioning with following PVC parameters {str(value_pvc_passed)}" ) sc_name = d.get_random_name("sc") config.load_kube_config(config_file=self.kubeconfig) d.create_storage_class(value_sc, sc_name, created_objects) d.check_storage_class(sc_name) for num in range(0, len(value_pvc_passed)): value_pvc_pass = copy.deepcopy(value_pvc_passed[num]) if (check_key(value_sc, "reason")): if not (check_key(value_pvc_pass, "reason")): value_pvc_pass["reason"] = value_sc["reason"] LOGGER.info(100 * "=") pvc_name = d.get_random_name("pvc") d.create_pvc(value_pvc_pass, sc_name, pvc_name, created_objects) val = d.check_pvc(value_pvc_pass, pvc_name, created_objects) if val is True: for num2 in range(0, len(value_pod_passed)): LOGGER.info(100 * "-") pod_name = d.get_random_name("pod") d.create_pod(value_pod_passed[num2], pvc_name, pod_name, created_objects, self.image_name) d.check_pod(value_pod_passed[num2], pod_name, created_objects) cleanup.delete_pod(pod_name, created_objects) cleanup.check_pod_deleted(pod_name, created_objects) if value_pvc_pass[ "access_modes"] == "ReadWriteOnce" and self.keep_objects is True: if num2 < (len(value_pod_passed) - 1): pvc_name = d.get_random_name("pvc") d.create_pvc(value_pvc_pass, sc_name, pvc_name, created_objects) val = d.check_pvc(value_pvc_pass, pvc_name, created_objects) if val is not True: break LOGGER.info(100 * "-") vol_name = cleanup.delete_pvc(pvc_name, created_objects) cleanup.check_pvc_deleted(pvc_name, vol_name, created_objects) LOGGER.info(100 * "=") cleanup.clean_with_created_objects(created_objects)
def get_pv_name(pvc_name, created_objects): api_instance = client.CoreV1Api() try: api_response = api_instance.read_namespaced_persistent_volume_claim( name=pvc_name, namespace=namespace_value, pretty=True) LOGGER.debug(str(api_response)) except ApiException as e: LOGGER.error( f"Exception when calling CoreV1Api->read_namespaced_persistent_volume_claim: {e}") LOGGER.error(f"PVC {pvc_name} does not exists on the cluster") clean_with_created_objects(created_objects) assert False return api_response.spec.volume_name
def create_pvc(pvc_values, sc_name, pvc_name, created_objects, pv_name=None): """ creates persistent volume claim Args: param1: pvc_values - values required for creation of pvc param2: sc_name - name of storage class , pvc associated with if "notusingsc" no storage class param3: pvc_name - name of pvc to be created param4: pv_name - name of pv , pvc associated with if None , no pv is associated Returns: None Raises: Raises an exception on kubernetes client api failure and asserts """ api_instance = client.CoreV1Api() pvc_metadata = client.V1ObjectMeta(name=pvc_name) pvc_resources = client.V1ResourceRequirements( requests={"storage": pvc_values["storage"]}) pvc_spec = client.V1PersistentVolumeClaimSpec( access_modes=[pvc_values["access_modes"]], resources=pvc_resources, storage_class_name=sc_name, volume_name=pv_name) pvc_body = client.V1PersistentVolumeClaim(api_version="v1", kind="PersistentVolumeClaim", metadata=pvc_metadata, spec=pvc_spec) try: LOGGER.info( f'PVC Create : Creating pvc {pvc_name} with parameters {str(pvc_values)} and storageclass {str(sc_name)}' ) api_response = api_instance.create_namespaced_persistent_volume_claim( namespace=namespace_value, body=pvc_body, pretty=True) LOGGER.debug(str(api_response)) created_objects["pvc"].append(pvc_name) except ApiException as e: LOGGER.info(f'PVC {pvc_name} creation operation has been failed') LOGGER.error( f"Exception when calling CoreV1Api->create_namespaced_persistent_volume_claim: {e}" ) cleanup.clean_with_created_objects(created_objects) assert False
def one_pvc_two_pod(self, value_sc, value_pvc_pass, value_ds_pass): created_objects = get_cleanup_dict() sc_name = d.get_random_name("sc") config.load_kube_config(config_file=self.kubeconfig) d.create_storage_class(value_sc, sc_name, created_objects) d.check_storage_class(sc_name) pvc_name = d.get_random_name("pvc") d.create_pvc(value_pvc_pass, sc_name, pvc_name, created_objects) val = d.check_pvc(value_pvc_pass, pvc_name, created_objects) if val is True: ds_name = d.get_random_name("ds") d.create_ds(value_ds_pass, ds_name, pvc_name, created_objects) d.check_ds(ds_name, value_ds_pass, created_objects) cleanup.clean_with_created_objects(created_objects)
def check_permissions_for_pvc(pvc_name, permissions, created_objects): """ get pv and verify permissions for pv """ pv_name = get_pv_for_pvc(pvc_name, created_objects) if permissions == "": # assign default permissions 771 permissions = "771" status = ff.get_and_verify_pv_permissions(pv_name, permissions) if status is True: LOGGER.info( f'PASS: Testing storageclass parameter permissions={permissions} passed.' ) else: LOGGER.info( f'FAIL: Testing storageclass parameter permissions={permissions} failed.' ) cleanup.clean_with_created_objects(created_objects) assert False
def check_vs_detail(vs_name, pvc_name, body_params, reason, created_objects): """ checks volume snapshot vs_name exits , checks volume snapshot content for vs_name is created check snapshot is created on spectrum scale """ api_instance = client.CustomObjectsApi() try: api_response = api_instance.get_namespaced_custom_object( group="snapshot.storage.k8s.io", version="v1", plural="volumesnapshots", name=vs_name, namespace=namespace_value ) LOGGER.debug(api_response) LOGGER.info(f"Volume Snapshot Check : volume snapshot {vs_name} has been created") except ApiException: LOGGER.info(f"Volume Snapshot Check : volume snapshot {vs_name} does not exists") clean_with_created_objects(created_objects) assert False if check_snapshot_status(vs_name): LOGGER.info("volume snapshot status ReadyToUse is true") else: LOGGER.error("volume snapshot status ReadyToUse is not true") if reason is not None: LOGGER.info("As failure reason is provided , passing the test") return clean_with_created_objects(created_objects) assert False LOGGER.debug(api_response) uid_name = api_response["metadata"]["uid"] snapcontent_name = "snapcontent-" + uid_name snapshot_name = "snapshot-" + uid_name time.sleep(2) if not(check_vs_content(snapcontent_name)): clean_with_created_objects(created_objects) assert False volume_name = get_pv_name(pvc_name, created_objects) if ff.check_snapshot(snapshot_name, volume_name): LOGGER.info(f"snapshot {snapshot_name} exists for {volume_name}") else: LOGGER.error(f"snapshot {snapshot_name} does not exists for {volume_name}") clean_with_created_objects(created_objects) assert False if body_params["deletionPolicy"] == "Retain": created_objects["vscontent"].append(snapcontent_name) created_objects["scalesnapshot"].append([snapshot_name,volume_name])
def create_storage_class(values, sc_name, created_objects): """ creates storage class Args: param1: values - storage class parameters param2: config_value - configuration file param3: sc_name - name of storage class to be created Returns: None Raises: Raises an exception on kubernetes client api failure and asserts """ global storage_class_parameters api_instance = client.StorageV1Api() storage_class_metadata = client.V1ObjectMeta(name=sc_name) storage_class_parameters = get_storage_class_parameters(values) storage_class_body = client.V1StorageClass( api_version="storage.k8s.io/v1", kind="StorageClass", metadata=storage_class_metadata, provisioner="spectrumscale.csi.ibm.com", parameters=storage_class_parameters, reclaim_policy="Delete") try: LOGGER.info( f'SC Create : creating storageclass {sc_name} with parameters {str(storage_class_parameters)}' ) api_response = api_instance.create_storage_class( body=storage_class_body, pretty=True) LOGGER.debug(str(api_response)) created_objects["sc"].append(sc_name) except ApiException as e: LOGGER.error( f"Exception when calling StorageV1Api->create_storage_class: {e}") cleanup.clean_with_created_objects(created_objects) assert False
def expand_pvc(pvc_values, sc_name, pvc_name, created_objects, pv_name=None): """ expand pvc size """ api_instance = client.CoreV1Api() pvc_metadata = client.V1ObjectMeta(name=pvc_name) pvc_resources = client.V1ResourceRequirements( requests={"storage": pvc_values["storage"]}) pvc_spec = client.V1PersistentVolumeClaimSpec( access_modes=[pvc_values["access_modes"]], resources=pvc_resources, storage_class_name=sc_name, volume_name=pv_name) pvc_body = client.V1PersistentVolumeClaim(api_version="v1", kind="PersistentVolumeClaim", metadata=pvc_metadata, spec=pvc_spec) LOGGER.info(100 * "-") try: LOGGER.info( f'PVC Patch : Patching pvc {pvc_name} with parameters {str(pvc_values)} and storageclass {str(sc_name)}' ) api_response = api_instance.patch_namespaced_persistent_volume_claim( name=pvc_name, namespace=namespace_value, body=pvc_body, pretty=True) LOGGER.debug(str(api_response)) time.sleep(30) except ApiException as e: LOGGER.info(f'PVC {pvc_name} patch operation has been failed') LOGGER.error( f"Exception when calling CoreV1Api->patch_namespaced_persistent_volume_claim: {e}" ) cleanup.clean_with_created_objects(created_objects) assert False
def one_pvc_two_pod(self, value_sc): created_objects = get_cleanup_dict() sc_name = d.get_random_name("sc") config.load_kube_config(config_file=self.kubeconfig) d.create_storage_class(value_sc, sc_name, created_objects) d.check_storage_class(sc_name) value_pvc_pass = copy.deepcopy(self.value_pvc[0]) pvc_name = d.get_random_name("pvc") d.create_pvc(value_pvc_pass, sc_name, pvc_name, created_objects) val = d.check_pvc(value_pvc_pass, pvc_name, created_objects) if val is True: pod_name_1 = d.get_random_name("pod") d.create_pod(self.value_pod[0], pvc_name, pod_name_1, created_objects, self.image_name) d.check_pod(self.value_pod[0], pod_name_1, created_objects) pod_name_2 = d.get_random_name("pod") d.create_pod(self.value_pod[0], pvc_name, pod_name_2, created_objects, self.image_name) d.check_pod(self.value_pod[0], pod_name_2, created_objects) cleanup.delete_pod(pod_name_1, created_objects) cleanup.check_pod_deleted(pod_name_1, created_objects) cleanup.delete_pod(pod_name_2, created_objects) cleanup.check_pod_deleted(pod_name_2, created_objects) cleanup.clean_with_created_objects(created_objects)
def check_pod_execution(value_pod, pod_name, created_objects): """ checks can file be created in pod if file cannot be created , checks reason , if reason does not mathch , asserts Args: param1: value_pod - values required for creation of pod param2: sc_name - name of storage class , pod associated with param3: pvc_name - name of pvc , pod associated with param4: pod_name - name of pod to be checked param5: dir_name - directory associated with pod param6: pv_name - name of pv associated with pod Returns: None Raises: None """ api_instance = client.CoreV1Api() LOGGER.info( "POD Check : Trying to create testfile on SpectrumScale mount point inside the pod" ) exec_command1 = "touch " + value_pod["mount_path"] + "/testfile" exec_command = ['/bin/sh', '-c', exec_command1] resp = stream(api_instance.connect_get_namespaced_pod_exec, pod_name, namespace_value, command=exec_command, stderr=True, stdin=False, stdout=True, tty=False) if resp == "": LOGGER.info( "POD Check : Create testfile operation completed successfully") LOGGER.info( "POD Check : Deleting testfile from pod's SpectrumScale mount point" ) exec_command1 = "rm -rvf " + value_pod["mount_path"] + "/testfile" exec_command = ['/bin/sh', '-c', exec_command1] resp = stream(api_instance.connect_get_namespaced_pod_exec, pod_name, namespace_value, command=exec_command, stderr=True, stdin=False, stdout=True, tty=False) if check_key(value_pod, "reason"): cleanup.clean_with_created_objects(created_objects) LOGGER.error( "Pod should not be able to create file inside the pod as failure REASON provided, so asserting" ) assert False return if not (check_key(value_pod, "reason")): cleanup.clean_with_created_objects(created_objects) LOGGER.error(str(resp)) LOGGER.error("FAILED as reason of failure not provided") assert False search_result1 = re.search(value_pod["reason"], str(resp)) search_result2 = re.search("Permission denied", str(resp)) if search_result1 is not None: LOGGER.info(str(search_result1)) if search_result2 is not None: LOGGER.info(str(search_result2)) if not (search_result1 is None and search_result2 is None): LOGGER.info("execution of pod failed with expected reason") else: cleanup.clean_with_created_objects(created_objects) LOGGER.error(str(resp)) LOGGER.error( "execution of pod failed unexpected , reason does not match") assert False
def test_static(self, pv_value, pvc_value, sc_value=False, wrong=None, root_volume=False): config.load_kube_config(config_file=self.kubeconfig) created_objects = get_cleanup_dict() sc_name = "" if sc_value is not False: sc_name = d.get_random_name("sc") d.create_storage_class(sc_value, sc_name, created_objects) d.check_storage_class(sc_name) FSUID = ff.get_FSUID() cluster_id = self.cluster_id if wrong is not None: if wrong["id_wrong"] is True: cluster_id = int(cluster_id)+1 cluster_id = str(cluster_id) if wrong["FSUID_wrong"] is True: FSUID = "AAAA" mount_point = ff.get_mount_point() if root_volume is False: dir_name = d.get_random_name("dir") ff.create_dir(dir_name) created_objects["dir"].append(dir_name) pv_value["volumeHandle"] = cluster_id+";"+FSUID + \ ";path="+mount_point+"/"+dir_name elif root_volume is True: pv_value["volumeHandle"] = cluster_id+";"+FSUID + \ ";path="+mount_point if pvc_value == "Default": pvc_value = copy.deepcopy(self.value_pvc) num_final = len(pvc_value) for num in range(0, num_final): pv_name = d.get_random_name("pv") d.create_pv(pv_value, pv_name, created_objects, sc_name) d.check_pv(pv_name) value_pvc_pass = copy.deepcopy(pvc_value[num]) if (check_key(pv_value, "reason")): if not(check_key(value_pvc_pass, "reason")): value_pvc_pass["reason"] = pv_value["reason"] LOGGER.info(100*"=") pvc_name = d.get_random_name("pvc") d.create_pvc(value_pvc_pass, sc_name, pvc_name, created_objects, pv_name) val = d.check_pvc(value_pvc_pass, pvc_name, created_objects, pv_name) if val is True: for num2 in range(0, len(self.value_pod)): LOGGER.info(100*"-") pod_name = d.get_random_name("pod") d.create_pod(self.value_pod[num2], pvc_name, pod_name, created_objects, self.image_name) d.check_pod(self.value_pod[num2], pod_name, created_objects) cleanup.delete_pod(pod_name, created_objects) cleanup.check_pod_deleted(pod_name, created_objects) if value_pvc_pass["access_modes"] == "ReadWriteOnce" and self.keep_objects is True: break LOGGER.info(100*"-") vol_name = cleanup.delete_pvc(pvc_name, created_objects) cleanup.check_pvc_deleted(pvc_name, vol_name, created_objects) cleanup.delete_pv(pv_name, created_objects) cleanup.check_pv_deleted(pv_name, created_objects) LOGGER.info(100*"=") cleanup.clean_with_created_objects(created_objects)
def test_dynamic(self, value_sc, test_restore, value_vs_class=None, number_of_snapshots=None, reason=None, restore_sc=None, restore_pvc=None, value_pod=None, value_pvc=None, value_clone_passed=None): if value_vs_class is None: value_vs_class = self.value_vs_class if number_of_snapshots is None: number_of_snapshots = self.number_of_snapshots number_of_restore = 1 if "permissions" in value_sc.keys() and not(ff.feature_available("permissions")): LOGGER.warning("Min required Spectrum Scale version for permissions in storageclass support with CSI is 5.1.1-2") LOGGER.warning("Skipping Testcase") return if value_pvc is None: value_pvc = copy.deepcopy(self.value_pvc) for pvc_value in value_pvc: created_objects = get_cleanup_dict() LOGGER.info("-"*100) sc_name = d.get_random_name("sc") d.create_storage_class(value_sc, sc_name, created_objects) d.check_storage_class(sc_name) pvc_name = d.get_random_name("pvc") d.create_pvc(pvc_value, sc_name, pvc_name, created_objects) val = d.check_pvc(pvc_value, pvc_name, created_objects) if val is True and "permissions" in value_sc.keys(): d.check_permissions_for_pvc(pvc_name, value_sc["permissions"], created_objects) pod_name = d.get_random_name("snap-start-pod") if value_pod is None: value_pod = {"mount_path": "/usr/share/nginx/html/scale", "read_only": "False"} if value_sc.keys() >= {"permissions", "gid", "uid"}: value_pod["gid"] = value_sc["gid"] value_pod["uid"] = value_sc["uid"] d.create_pod(value_pod, pvc_name, pod_name, created_objects, self.image_name) d.check_pod(value_pod, pod_name, created_objects) d.create_file_inside_pod(value_pod, pod_name, created_objects) if "presnap_volume_expansion_storage" in pvc_value: d.expand_and_check_pvc(sc_name, pvc_name, pvc_value, "presnap_volume_expansion_storage", pod_name, value_pod, created_objects) vs_class_name = d.get_random_name("vsclass") snapshot.create_vs_class(vs_class_name, value_vs_class, created_objects) snapshot.check_vs_class(vs_class_name) if not(ff.feature_available("snapshot")): if reason is None: reason = "Min required Spectrum Scale version for snapshot support with CSI is 5.1.1-0" test_restore = False vs_name = d.get_random_name("vs") for num in range(0, number_of_snapshots): snapshot.create_vs(vs_name+"-"+str(num), vs_class_name, pvc_name, created_objects) snapshot.check_vs_detail(vs_name+"-"+str(num), pvc_name, value_vs_class, reason, created_objects) if test_restore: restore_sc_name = sc_name if restore_sc is not None: restore_sc_name = "restore-" + restore_sc_name d.create_storage_class(restore_sc, restore_sc_name, created_objects) d.check_storage_class(restore_sc_name) else: restore_sc = value_sc if restore_pvc is not None: pvc_value = restore_pvc for num in range(0, number_of_restore): restored_pvc_name = "restored-pvc"+vs_name[2:]+"-"+str(num) snap_pod_name = "snap-end-pod"+vs_name[2:] d.create_pvc_from_snapshot(pvc_value, restore_sc_name, restored_pvc_name, vs_name+"-"+str(num), created_objects) val = d.check_pvc(pvc_value, restored_pvc_name, created_objects) if val is True and "permissions" in value_sc.keys(): d.check_permissions_for_pvc(pvc_name, value_sc["permissions"], created_objects) if val is True: d.create_pod(value_pod, restored_pvc_name, snap_pod_name, created_objects, self.image_name) d.check_pod(value_pod, snap_pod_name, created_objects) d.check_file_inside_pod(value_pod, snap_pod_name, created_objects) if "postsnap_volume_expansion_storage" in pvc_value: d.expand_and_check_pvc(restore_sc_name, restored_pvc_name, pvc_value, "postsnap_volume_expansion_storage", snap_pod_name, value_pod, created_objects) if "post_presnap_volume_expansion_storage" in pvc_value: d.expand_and_check_pvc(sc_name, pvc_name, pvc_value, "post_presnap_volume_expansion_storage", pod_name, value_pod, created_objects) if value_clone_passed is not None: d.clone_and_check_pvc(restore_sc_name, restore_sc, restored_pvc_name, snap_pod_name, value_pod, value_clone_passed, created_objects) cleanup.delete_pod(snap_pod_name, created_objects) cleanup.check_pod_deleted(snap_pod_name, created_objects) vol_name = cleanup.delete_pvc(restored_pvc_name, created_objects) cleanup.check_pvc_deleted(restored_pvc_name, vol_name, created_objects) cleanup.clean_with_created_objects(created_objects)
def create_pod(value_pod, pvc_name, pod_name, created_objects, image_name="nginx:1.19.0"): """ creates pod Args: param1: value_pod - values required for creation of pod param2: pvc_name - name of pvc , pod associated with param3: pod_name - name of pod to be created param4: image_name - name of the pod image (Default:"nginx:1.19.0") Returns: None Raises: Raises an exception on kubernetes client api failure and asserts """ if value_pod["read_only"] == "True": value_pod["read_only"] = True elif value_pod["read_only"] == "False": value_pod["read_only"] = False api_instance = client.CoreV1Api() pod_metadata = client.V1ObjectMeta(name=pod_name, labels={"app": "nginx"}) pod_volume_mounts = client.V1VolumeMount( name="mypvc", mount_path=value_pod["mount_path"]) pod_ports = client.V1ContainerPort(container_port=80) pod_containers = client.V1Container(name="web-server", image=image_name, volume_mounts=[pod_volume_mounts], ports=[pod_ports]) pod_persistent_volume_claim = client.V1PersistentVolumeClaimVolumeSource( claim_name=pvc_name, read_only=value_pod["read_only"]) pod_volumes = client.V1Volume( name="mypvc", persistent_volume_claim=pod_persistent_volume_claim) pod_spec = client.V1PodSpec(containers=[pod_containers], volumes=[pod_volumes], node_selector=nodeselector) pod_body = client.V1Pod(api_version="v1", kind="Pod", metadata=pod_metadata, spec=pod_spec) try: LOGGER.info( f'POD Create : creating pod {pod_name} using {pvc_name} with {image_name} image' ) api_response = api_instance.create_namespaced_pod( namespace=namespace_value, body=pod_body, pretty=True) LOGGER.debug(str(api_response)) if pod_name[0:12] == "snap-end-pod": created_objects["restore_pod"].append(pod_name) else: created_objects["pod"].append(pod_name) except ApiException as e: LOGGER.error( f"Exception when calling CoreV1Api->create_namespaced_pod: {e}") cleanup.clean_with_created_objects(created_objects) assert False
def check_ds(ds_name, value_ds, created_objects): read_daemonsets_api_instance = client.AppsV1Api() num = 0 while (num < 11): try: read_daemonsets_api_response = read_daemonsets_api_instance.read_namespaced_daemon_set( name=ds_name, namespace=namespace_value, pretty=True) LOGGER.debug(read_daemonsets_api_response) LOGGER.info(f"Daemonset Check : Checking for daemonset {ds_name}") current_number_scheduled = read_daemonsets_api_response.status.current_number_scheduled desired_number_scheduled = read_daemonsets_api_response.status.desired_number_scheduled number_available = read_daemonsets_api_response.status.number_available if number_available == current_number_scheduled == desired_number_scheduled: if desired_number_scheduled < 2: LOGGER.error( f"Not enough nodes for this test, only {desired_number_scheduled} nodes are there" ) cleanup.clean_with_created_objects(created_objects) assert False if "reason" in value_ds: LOGGER.error( f"failure reason provided {value_ds} , still all pods are running" ) cleanup.clean_with_created_objects(created_objects) assert False LOGGER.info( f"Daemonset Check : daemonset {ds_name} all {current_number_scheduled} pods are Running" ) return time.sleep(20) num += 1 LOGGER.info(f"Daemonset Check : waiting for daemonsets {ds_name}") except ApiException: time.sleep(20) num += 1 LOGGER.info(f"Daemonset Check : waiting for daemonsets {ds_name}") if "reason" not in value_ds: LOGGER.error( f"Daemonset Check : daemonset {ds_name} {number_available}/{desired_number_scheduled} pods are Running, asserting" ) cleanup.clean_with_created_objects(created_objects) assert False if desired_number_scheduled < 2: LOGGER.error( f"Not enough nodes for this test, only {desired_number_scheduled} nodes are there" ) cleanup.clean_with_created_objects(created_objects) assert False if check_ds_pod(ds_name, value_ds, created_objects): LOGGER.info( f"Daemonset Check : daemonset {ds_name} pods failed with expected reason {value_ds['reason']}" ) return LOGGER.info( f"Daemonset Check : daemonset {ds_name} pods did not fail with expected reason {value_ds['reason']}" ) cleanup.clean_with_created_objects(created_objects) assert False
def create_ds(ds_values, ds_name, pvc_name, created_objects): api_instance = client.AppsV1Api() if ds_values["read_only"] == "True": ds_values["read_only"] = True elif ds_values["read_only"] == "False": ds_values["read_only"] = False ds_body = { "apiVersion": "apps/v1", "kind": "DaemonSet", "metadata": { "name": ds_name, "labels": { "app": "nginx", "ownerReferences": ds_name } }, "spec": { "selector": { "matchLabels": { "name": "nginx", "ownerReferences": ds_name } }, "template": { "metadata": { "labels": { "name": "nginx", "ownerReferences": ds_name } }, "spec": { "containers": [{ "name": "web-server", "image": "nginxinc/nginx-unprivileged", "volumeMounts": [{ "name": "mypvc", "mountPath": ds_values["mount_path"] }] }], "volumes": [{ "name": "mypvc", "persistentVolumeClaim": { "claimName": pvc_name, "readOnly": ds_values["read_only"] } }], "nodeSelector": nodeselector } } } } try: LOGGER.info( f'Daemonset Create : Creating daemonset {ds_name} with parameters {str(ds_values)} and pvc {str(pvc_name)}' ) api_response = api_instance.create_namespaced_daemon_set( namespace=namespace_value, body=ds_body, pretty=True) LOGGER.debug(str(api_response)) created_objects["ds"].append(ds_name) except ApiException as e: LOGGER.info( f'Daemonset Create : Daemonset {ds_name} creation operation has been failed' ) LOGGER.error( f"Exception when calling AppsV1Api->create_namespaced_daemon_set: {e}" ) cleanup.clean_with_created_objects(created_objects) assert False
def check_pod(value_pod, pod_name, created_objects): """ checks pod running or not Args: param1: value_pod - values required for creation of pod param2: sc_name - name of storage class , pod associated with param3: pvc_name - name of pvc , pod associated with param4: pod_name - name of pod to be checked param5: dir_name - directory associated with pod param6: pv_name - name of pv associated with pod Returns: None Raises: None """ api_instance = client.CoreV1Api() con = True var = 0 while (con is True): try: api_response = api_instance.read_namespaced_pod( name=pod_name, namespace=namespace_value, pretty=True) LOGGER.debug(str(api_response)) LOGGER.info(f'POD Check: Checking for pod {pod_name}') if api_response.status.phase == "Running": LOGGER.info(f'POD Check : POD {pod_name} is Running') check_pod_execution(value_pod, pod_name, created_objects) con = False else: var += 1 if (var > 20): LOGGER.error(f'POD Check : POD {pod_name} is not running') field = "involvedObject.name=" + pod_name reason = api_instance.list_namespaced_event( namespace=namespace_value, pretty=True, field_selector=field) if not (check_key(value_pod, "reason")): LOGGER.error( 'FAILED as reason of failure not provided') LOGGER.error( f"POD Check : Reason of failure is : {str(reason)}" ) cleanup.clean_with_created_objects(created_objects) assert False search_result = re.search(value_pod["reason"], str(reason)) if search_result is None: LOGGER.error( f'Failed as reason of failure does not match {value_pod["reason"]}' ) LOGGER.error( f"POD Check : Reason of failure is : {str(reason)}" ) cleanup.clean_with_created_objects(created_objects) assert False else: LOGGER.info( f'POD failed with expected reason {value_pod["reason"]}' ) return time.sleep(5) except ApiException as e: LOGGER.error( f"Exception when calling CoreV1Api->read_namespaced_pod: {e}") LOGGER.error("POD Check : POD does not exists on Cluster") cleanup.clean_with_created_objects(created_objects) assert False
def test_static(self, value_sc, test_restore, value_vs_class=None, number_of_snapshots=None, restore_sc=None, restore_pvc=None): if value_vs_class is None: value_vs_class = self.value_vs_class if number_of_snapshots is None: number_of_snapshots = self.number_of_snapshots number_of_restore = 1 for pvc_value in self.value_pvc: created_objects = get_cleanup_dict() LOGGER.info("-"*100) sc_name = d.get_random_name("sc") d.create_storage_class(value_sc, sc_name, created_objects) d.check_storage_class(sc_name) pvc_name = d.get_random_name("pvc") d.create_pvc(pvc_value, sc_name, pvc_name, created_objects) d.check_pvc(pvc_value, pvc_name, created_objects) pod_name = d.get_random_name("snap-start-pod") value_pod = {"mount_path": "/usr/share/nginx/html/scale", "read_only": "False"} d.create_pod(value_pod, pvc_name, pod_name, created_objects, self.image_name) d.check_pod(value_pod, pod_name, created_objects) d.create_file_inside_pod(value_pod, pod_name, created_objects) snapshot_name = d.get_random_name("snapshot") volume_name = snapshot.get_pv_name(pvc_name, created_objects) FSUID = ff.get_FSUID() cluster_id = self.cluster_id vs_content_name = d.get_random_name("vscontent") vs_name = d.get_random_name("vs") for num in range(0, number_of_snapshots): ff.create_snapshot(snapshot_name+"-"+str(num), volume_name, created_objects) if ff.check_snapshot(snapshot_name+"-"+str(num), volume_name): LOGGER.info(f"snapshot {snapshot_name} exists for {volume_name}") else: LOGGER.error(f"snapshot {snapshot_name} does not exists for {volume_name}") cleanup.clean_with_created_objects(created_objects) assert False snapshot_handle = cluster_id+';'+FSUID+';'+volume_name+';'+snapshot_name+"-"+str(num) body_params = {"deletionPolicy": "Retain", "snapshotHandle": snapshot_handle} snapshot.create_vs_content(vs_content_name+"-"+str(num), vs_name+"-"+str(num), body_params, created_objects) snapshot.check_vs_content(vs_content_name+"-"+str(num)) snapshot.create_vs_from_content(vs_name+"-"+str(num), vs_content_name+"-"+str(num), created_objects) snapshot.check_vs_detail_for_static(vs_name+"-"+str(num), created_objects) if not(ff.feature_available("snapshot")): pvc_value["reason"] = "Min required Spectrum Scale version for snapshot support with CSI is 5.1.1-0" if test_restore: if restore_sc is not None: sc_name = "restore-" + sc_name d.create_storage_class(restore_sc, sc_name, created_objects) d.check_storage_class(sc_name) if restore_pvc is not None: pvc_value = restore_pvc for num in range(0, number_of_restore): restored_pvc_name = "restored-pvc"+vs_name[2:]+"-"+str(num) snap_pod_name = "snap-end-pod"+vs_name[2:] d.create_pvc_from_snapshot(pvc_value, sc_name, restored_pvc_name, vs_name+"-"+str(num), created_objects) val = d.check_pvc(pvc_value, restored_pvc_name, created_objects) if val is True: d.create_pod(value_pod, restored_pvc_name, snap_pod_name, created_objects, self.image_name) d.check_pod(value_pod, snap_pod_name, created_objects) d.check_file_inside_pod(value_pod, snap_pod_name, created_objects, volume_name) cleanup.delete_pod(snap_pod_name, created_objects) cleanup.check_pod_deleted(snap_pod_name, created_objects) vol_name = cleanup.delete_pvc(restored_pvc_name, created_objects) cleanup.check_pvc_deleted(restored_pvc_name, vol_name, created_objects) cleanup.clean_with_created_objects(created_objects)
def check_pvc(pvc_values, pvc_name, created_objects, pv_name="pvnotavailable"): """ checks pvc is BOUND or not need to reduce complextity of this function """ api_instance = client.CoreV1Api() con = True var = 0 while (con is True): try: api_response = api_instance.read_namespaced_persistent_volume_claim( name=pvc_name, namespace=namespace_value, pretty=True) LOGGER.debug(str(api_response)) LOGGER.info(f'PVC Check: Checking for pvc {pvc_name}') except ApiException as e: LOGGER.error( f"Exception when calling CoreV1Api->read_namespaced_persistent_volume_claim: {e}" ) LOGGER.info( f"PVC Check : PVC {pvc_name} does not exists on the cluster") cleanup.clean_with_created_objects(created_objects) assert False if api_response.status.phase == "Bound": if (check_key(pvc_values, "reason")): LOGGER.error( f'PVC Check : {pvc_name} is BOUND but as the failure reason is provided so\ asserting the test') cleanup.clean_with_created_objects(created_objects) assert False if (pvc_bound_fileset_check(api_response, pv_name, pvc_name)): return True cleanup.clean_with_created_objects(created_objects) assert False else: var += 1 time.sleep(5) if (check_key(pvc_values, "reason")): time_count = 8 elif (check_key(pvc_values, "parallel")): time_count = 60 else: time_count = 20 if (var > time_count): LOGGER.info( "PVC Check : PVC is not BOUND,checking if failure reason is expected" ) field = "involvedObject.name=" + pvc_name reason = api_instance.list_namespaced_event( namespace=namespace_value, pretty=True, field_selector=field) if not (check_key(pvc_values, "reason")): cleanup.clean_with_created_objects(created_objects) LOGGER.error(str(reason)) LOGGER.error("FAILED as reason for Failure not provides") assert False search_result = None for item in reason.items: search_result = re.search(pvc_values["reason"], str(item.message)) if search_result is not None: break if search_result is None: cleanup.clean_with_created_objects(created_objects) LOGGER.error(f"Failed reason : {str(reason)}") LOGGER.error( "PVC Check : PVC is not Bound but FAILED reason does not match" ) assert False else: LOGGER.debug(search_result) LOGGER.info( f"PVC Check : PVC is not Bound and FAILED with expected error {pvc_values['reason']}" ) con = False