def test_dynamic(self, value_sc, value_pvc_passed=None, value_pod_passed=None, value_clone_passed=None): created_objects = get_cleanup_dict() if value_pvc_passed is None: value_pvc_passed = copy.deepcopy(self.value_pvc) if value_pod_passed is None: value_pod_passed = copy.deepcopy(self.value_pod) if "permissions" in value_sc.keys() and not(ff.feature_available("permissions")): LOGGER.warning("Min required Spectrum Scale version for permissions in storageclass support with CSI is 5.1.1-2") LOGGER.warning("Skipping Testcase") return LOGGER.info( f"Testing Dynamic Provisioning with following PVC parameters {str(value_pvc_passed)}") sc_name = d.get_random_name("sc") config.load_kube_config(config_file=self.kubeconfig) d.create_storage_class(value_sc, sc_name, created_objects) d.check_storage_class(sc_name) for num, _ in enumerate(value_pvc_passed): value_pvc_pass = copy.deepcopy(value_pvc_passed[num]) if (check_key(value_sc, "reason")): if not(check_key(value_pvc_pass, "reason")): value_pvc_pass["reason"] = value_sc["reason"] LOGGER.info(100*"=") pvc_name = d.get_random_name("pvc") d.create_pvc(value_pvc_pass, sc_name, pvc_name, created_objects) val = d.check_pvc(value_pvc_pass, pvc_name, created_objects) if val is True: if "permissions" in value_sc.keys(): d.check_permissions_for_pvc(pvc_name, value_sc["permissions"], created_objects) for num2, _ in enumerate(value_pod_passed): LOGGER.info(100*"-") pod_name = d.get_random_name("pod") if value_sc.keys() >= {"permissions", "gid", "uid"}: value_pod_passed[num2]["gid"] = value_sc["gid"] value_pod_passed[num2]["uid"] = value_sc["uid"] d.create_pod(value_pod_passed[num2], pvc_name, pod_name, created_objects, self.image_name) d.check_pod(value_pod_passed[num2], pod_name, created_objects) if "volume_expansion_storage" in value_pvc_pass: d.expand_and_check_pvc(sc_name, pvc_name, value_pvc_pass, "volume_expansion_storage", pod_name, value_pod_passed[num2], created_objects) if value_clone_passed is not None: d.clone_and_check_pvc(sc_name, value_sc, pvc_name, pod_name, value_pod_passed[num2], value_clone_passed, created_objects) cleanup.delete_pod(pod_name, created_objects) cleanup.check_pod_deleted(pod_name, created_objects) if ((value_pvc_pass["access_modes"] == "ReadWriteOnce") and (self.keep_objects is True) and (num2 < (len(value_pod_passed)-1))): pvc_name = d.get_random_name("pvc") d.create_pvc(value_pvc_pass, sc_name, pvc_name, created_objects) val = d.check_pvc(value_pvc_pass, pvc_name, created_objects) if val is not True: break LOGGER.info(100*"-") vol_name = cleanup.delete_pvc(pvc_name, created_objects) cleanup.check_pvc_deleted(pvc_name, vol_name, created_objects) LOGGER.info(100*"=") cleanup.clean_with_created_objects(created_objects)
def test_static(self, value_sc, test_restore, value_vs_class=None, number_of_snapshots=None, restore_sc=None, restore_pvc=None): if value_vs_class is None: value_vs_class = self.value_vs_class if number_of_snapshots is None: number_of_snapshots = self.number_of_snapshots number_of_restore = 1 for pvc_value in self.value_pvc: created_objects = get_cleanup_dict() LOGGER.info("-"*100) sc_name = d.get_random_name("sc") d.create_storage_class(value_sc, sc_name, created_objects) d.check_storage_class(sc_name) pvc_name = d.get_random_name("pvc") d.create_pvc(pvc_value, sc_name, pvc_name, created_objects) d.check_pvc(pvc_value, pvc_name, created_objects) pod_name = d.get_random_name("snap-start-pod") value_pod = {"mount_path": "/usr/share/nginx/html/scale", "read_only": "False"} d.create_pod(value_pod, pvc_name, pod_name, created_objects, self.image_name) d.check_pod(value_pod, pod_name, created_objects) d.create_file_inside_pod(value_pod, pod_name, created_objects) snapshot_name = d.get_random_name("snapshot") volume_name = snapshot.get_pv_name(pvc_name, created_objects) FSUID = ff.get_FSUID() cluster_id = self.cluster_id vs_content_name = d.get_random_name("vscontent") vs_name = d.get_random_name("vs") for num in range(0, number_of_snapshots): ff.create_snapshot(snapshot_name+"-"+str(num), volume_name, created_objects) if ff.check_snapshot(snapshot_name+"-"+str(num), volume_name): LOGGER.info(f"snapshot {snapshot_name} exists for {volume_name}") else: LOGGER.error(f"snapshot {snapshot_name} does not exists for {volume_name}") cleanup.clean_with_created_objects(created_objects) assert False snapshot_handle = cluster_id+';'+FSUID+';'+volume_name+';'+snapshot_name+"-"+str(num) body_params = {"deletionPolicy": "Retain", "snapshotHandle": snapshot_handle} snapshot.create_vs_content(vs_content_name+"-"+str(num), vs_name+"-"+str(num), body_params, created_objects) snapshot.check_vs_content(vs_content_name+"-"+str(num)) snapshot.create_vs_from_content(vs_name+"-"+str(num), vs_content_name+"-"+str(num), created_objects) snapshot.check_vs_detail_for_static(vs_name+"-"+str(num), created_objects) if not(ff.feature_available("snapshot")): pvc_value["reason"] = "Min required Spectrum Scale version for snapshot support with CSI is 5.1.1-0" if test_restore: if restore_sc is not None: sc_name = "restore-" + sc_name d.create_storage_class(restore_sc, sc_name, created_objects) d.check_storage_class(sc_name) if restore_pvc is not None: pvc_value = restore_pvc for num in range(0, number_of_restore): restored_pvc_name = "restored-pvc"+vs_name[2:]+"-"+str(num) snap_pod_name = "snap-end-pod"+vs_name[2:] d.create_pvc_from_snapshot(pvc_value, sc_name, restored_pvc_name, vs_name+"-"+str(num), created_objects) val = d.check_pvc(pvc_value, restored_pvc_name, created_objects) if val is True: d.create_pod(value_pod, restored_pvc_name, snap_pod_name, created_objects, self.image_name) d.check_pod(value_pod, snap_pod_name, created_objects) d.check_file_inside_pod(value_pod, snap_pod_name, created_objects, volume_name) cleanup.delete_pod(snap_pod_name, created_objects) cleanup.check_pod_deleted(snap_pod_name, created_objects) vol_name = cleanup.delete_pvc(restored_pvc_name, created_objects) cleanup.check_pvc_deleted(restored_pvc_name, vol_name, created_objects) cleanup.clean_with_created_objects(created_objects)
def test_dynamic(self, value_sc, test_restore, value_vs_class=None, number_of_snapshots=None, reason=None, restore_sc=None, restore_pvc=None, value_pod=None, value_pvc=None, value_clone_passed=None): if value_vs_class is None: value_vs_class = self.value_vs_class if number_of_snapshots is None: number_of_snapshots = self.number_of_snapshots number_of_restore = 1 if "permissions" in value_sc.keys() and not(ff.feature_available("permissions")): LOGGER.warning("Min required Spectrum Scale version for permissions in storageclass support with CSI is 5.1.1-2") LOGGER.warning("Skipping Testcase") return if value_pvc is None: value_pvc = copy.deepcopy(self.value_pvc) for pvc_value in value_pvc: created_objects = get_cleanup_dict() LOGGER.info("-"*100) sc_name = d.get_random_name("sc") d.create_storage_class(value_sc, sc_name, created_objects) d.check_storage_class(sc_name) pvc_name = d.get_random_name("pvc") d.create_pvc(pvc_value, sc_name, pvc_name, created_objects) val = d.check_pvc(pvc_value, pvc_name, created_objects) if val is True and "permissions" in value_sc.keys(): d.check_permissions_for_pvc(pvc_name, value_sc["permissions"], created_objects) pod_name = d.get_random_name("snap-start-pod") if value_pod is None: value_pod = {"mount_path": "/usr/share/nginx/html/scale", "read_only": "False"} if value_sc.keys() >= {"permissions", "gid", "uid"}: value_pod["gid"] = value_sc["gid"] value_pod["uid"] = value_sc["uid"] d.create_pod(value_pod, pvc_name, pod_name, created_objects, self.image_name) d.check_pod(value_pod, pod_name, created_objects) d.create_file_inside_pod(value_pod, pod_name, created_objects) if "presnap_volume_expansion_storage" in pvc_value: d.expand_and_check_pvc(sc_name, pvc_name, pvc_value, "presnap_volume_expansion_storage", pod_name, value_pod, created_objects) vs_class_name = d.get_random_name("vsclass") snapshot.create_vs_class(vs_class_name, value_vs_class, created_objects) snapshot.check_vs_class(vs_class_name) if not(ff.feature_available("snapshot")): if reason is None: reason = "Min required Spectrum Scale version for snapshot support with CSI is 5.1.1-0" test_restore = False vs_name = d.get_random_name("vs") for num in range(0, number_of_snapshots): snapshot.create_vs(vs_name+"-"+str(num), vs_class_name, pvc_name, created_objects) snapshot.check_vs_detail(vs_name+"-"+str(num), pvc_name, value_vs_class, reason, created_objects) if test_restore: restore_sc_name = sc_name if restore_sc is not None: restore_sc_name = "restore-" + restore_sc_name d.create_storage_class(restore_sc, restore_sc_name, created_objects) d.check_storage_class(restore_sc_name) else: restore_sc = value_sc if restore_pvc is not None: pvc_value = restore_pvc for num in range(0, number_of_restore): restored_pvc_name = "restored-pvc"+vs_name[2:]+"-"+str(num) snap_pod_name = "snap-end-pod"+vs_name[2:] d.create_pvc_from_snapshot(pvc_value, restore_sc_name, restored_pvc_name, vs_name+"-"+str(num), created_objects) val = d.check_pvc(pvc_value, restored_pvc_name, created_objects) if val is True and "permissions" in value_sc.keys(): d.check_permissions_for_pvc(pvc_name, value_sc["permissions"], created_objects) if val is True: d.create_pod(value_pod, restored_pvc_name, snap_pod_name, created_objects, self.image_name) d.check_pod(value_pod, snap_pod_name, created_objects) d.check_file_inside_pod(value_pod, snap_pod_name, created_objects) if "postsnap_volume_expansion_storage" in pvc_value: d.expand_and_check_pvc(restore_sc_name, restored_pvc_name, pvc_value, "postsnap_volume_expansion_storage", snap_pod_name, value_pod, created_objects) if "post_presnap_volume_expansion_storage" in pvc_value: d.expand_and_check_pvc(sc_name, pvc_name, pvc_value, "post_presnap_volume_expansion_storage", pod_name, value_pod, created_objects) if value_clone_passed is not None: d.clone_and_check_pvc(restore_sc_name, restore_sc, restored_pvc_name, snap_pod_name, value_pod, value_clone_passed, created_objects) cleanup.delete_pod(snap_pod_name, created_objects) cleanup.check_pod_deleted(snap_pod_name, created_objects) vol_name = cleanup.delete_pvc(restored_pvc_name, created_objects) cleanup.check_pvc_deleted(restored_pvc_name, vol_name, created_objects) cleanup.clean_with_created_objects(created_objects)