def create_clone(self, clone_num): """ Creating clone for pvc, measure the creation time Args: clone_num (int) the number of clones to create Returns: str: The created clone name int: the creation time of the clone (in secs.) int: the csi creation time of the clone (in secs.) """ csi_start_time = self.get_time("csi") cloned_pvc_obj = pvc.create_pvc_clone( sc_name=self.pvc_obj.backed_sc, parent_pvc=self.pvc_obj.name, pvc_name=f"pvc-clone-pas-test-{clone_num}", clone_yaml=Interfaces_info[self.interface]["clone_yaml"], namespace=self.namespace, storage_size=f"{self.pvc_obj.size}Gi", ) helpers.wait_for_resource_state(cloned_pvc_obj, constants.STATUS_BOUND, 600) cloned_pvc_obj.reload() clone_name = cloned_pvc_obj.name create_time = performance_lib.measure_pvc_creation_time( self.interface, clone_name, csi_start_time) csi_create_time = performance_lib.csi_pvc_time_measure( self.interface, cloned_pvc_obj, "create", csi_start_time) del cloned_pvc_obj return (clone_name, create_time, csi_create_time)
def clone_pvc(self, teardown_factory): """ Clone PVC Args: teardown_factory: teardown fixture """ for pvc_obj in self.pvc_objs: logger.info( f"Clone pvc {pvc_obj.name} sc_name={pvc_obj.storageclass.name} size=2Gi, " f"access_mode={pvc_obj.access_mode},volume_mode={pvc_obj.get_pvc_vol_mode}" ) clone_yaml = (constants.CSI_CEPHFS_PVC_CLONE_YAML if pvc_obj.backed_sc == constants.CEPHFILESYSTEM_SC else constants.CSI_RBD_PVC_CLONE_YAML) cloned_pvc_obj = pvc.create_pvc_clone( sc_name=pvc_obj.backed_sc, parent_pvc=pvc_obj.name, clone_yaml=clone_yaml, namespace=pvc_obj.namespace, storage_size="2Gi", volume_mode=pvc_obj.get_pvc_vol_mode, access_mode=pvc_obj.access_mode, ) teardown_factory(cloned_pvc_obj) helpers.wait_for_resource_state(cloned_pvc_obj, constants.STATUS_BOUND) cloned_pvc_obj.reload()
def create_and_delete_clones(self): # Creating the clones one by one and wait until they bound logger.info( f"Start creating {self.number_of_clones} clones on {self.interface} PVC of size {self.pvc_size} GB." ) clones_list = [] for i in range(self.number_of_clones): index = i + 1 logger.info(f"Start creation of clone number {index}.") cloned_pvc_obj = pvc.create_pvc_clone( sc_name=self.pvc_obj.backed_sc, parent_pvc=self.pvc_obj.name, pvc_name=f"clone-pas-test-{index}", clone_yaml=Interfaces_info[self.interface]["clone_yaml"], namespace=self.namespace, storage_size=self.pvc_size + "Gi", ) helpers.wait_for_resource_state(cloned_pvc_obj, constants.STATUS_BOUND, self.timeout) # TODO: adding flattern for RBD devices cloned_pvc_obj.reload() clones_list.append(cloned_pvc_obj) logger.info( f"Clone with name {cloned_pvc_obj.name} for {self.pvc_size} pvc {self.pvc_obj.name} was created." ) # Delete the clones one by one and wait for deletion logger.info( f"Start deleteing {self.number_of_clones} clones on {self.interface} PVC of size {self.pvc_size} GB." ) index = 0 for clone in clones_list: index += 1 pvc_reclaim_policy = clone.reclaim_policy clone.delete() logger.info( f"Deletion of clone number {index} , the clone name is {clone.name}." ) clone.ocp.wait_for_delete(clone.name, self.timeout) if pvc_reclaim_policy == constants.RECLAIM_POLICY_DELETE: helpers.validate_pv_delete(clone.backed_pv) return clones_list
def test_pvc_to_pvc_clone(self, interface_type, teardown_factory): """ Create a clone from an existing pvc, verify data is preserved in the cloning. """ logger.info(f"Running IO on pod {self.pod_obj.name}") file_name = self.pod_obj.name logger.info(f"File created during IO {file_name}") self.pod_obj.run_io(storage_type="fs", size="500M", fio_filename=file_name) # Wait for fio to finish self.pod_obj.get_fio_results() logger.info(f"Io completed on pod {self.pod_obj.name}.") # Verify presence of the file file_path = pod.get_file_path(self.pod_obj, file_name) logger.info(f"Actual file path on the pod {file_path}") assert pod.check_file_existence( self.pod_obj, file_path), f"File {file_name} does not exist" logger.info(f"File {file_name} exists in {self.pod_obj.name}") # Calculate md5sum of the file. orig_md5_sum = pod.cal_md5sum(self.pod_obj, file_name) # Create a clone of the existing pvc. sc_name = self.pvc_obj.backed_sc parent_pvc = self.pvc_obj.name clone_yaml = constants.CSI_RBD_PVC_CLONE_YAML if interface_type == constants.CEPHFILESYSTEM: clone_yaml = constants.CSI_CEPHFS_PVC_CLONE_YAML cloned_pvc_obj = pvc.create_pvc_clone(sc_name, parent_pvc, clone_yaml) teardown_factory(cloned_pvc_obj) helpers.wait_for_resource_state(cloned_pvc_obj, constants.STATUS_BOUND) cloned_pvc_obj.reload() # Create and attach pod to the pvc clone_pod_obj = helpers.create_pod( interface_type=interface_type, pvc_name=cloned_pvc_obj.name, namespace=cloned_pvc_obj.namespace, pod_dict_path=constants.NGINX_POD_YAML, ) # Confirm that the pod is running helpers.wait_for_resource_state(resource=clone_pod_obj, state=constants.STATUS_RUNNING) clone_pod_obj.reload() teardown_factory(clone_pod_obj) # Verify file's presence on the new pod logger.info(f"Checking the existence of {file_name} on cloned pod " f"{clone_pod_obj.name}") assert pod.check_file_existence( clone_pod_obj, file_path), f"File {file_path} does not exist" logger.info(f"File {file_name} exists in {clone_pod_obj.name}") # Verify Contents of a file in the cloned pvc # by validating if md5sum matches. logger.info(f"Verifying that md5sum of {file_name} " f"on pod {self.pod_obj.name} matches with md5sum " f"of the same file on restore pod {clone_pod_obj.name}") assert pod.verify_data_integrity( clone_pod_obj, file_name, orig_md5_sum), "Data integrity check failed" logger.info("Data integrity check passed, md5sum are same") logger.info("Run IO on new pod") clone_pod_obj.run_io(storage_type="fs", size="100M", runtime=10) # Wait for IO to finish on the new pod clone_pod_obj.get_fio_results() logger.info(f"IO completed on pod {clone_pod_obj.name}")
def test_pvc_to_pvc_clone(self, kv_version, kms_provider, pod_factory): """ Test to create a clone from an existing encrypted RBD PVC. Verify that the cloned PVC is encrypted and all the data is preserved. """ log.info("Checking for encrypted device and running IO on all pods") for vol_handle, pod_obj in zip(self.vol_handles, self.pod_objs): if pod_obj.exec_sh_cmd_on_pod( command=f"lsblk | grep {vol_handle} | grep crypt"): log.info(f"Encrypted device found in {pod_obj.name}") else: raise ResourceNotFoundError( f"Encrypted device not found in {pod_obj.name}") log.info(f"File created during IO {pod_obj.name}") pod_obj.run_io( storage_type="block", size="500M", io_direction="write", runtime=60, end_fsync=1, direct=1, ) log.info("IO started on all pods") # Wait for IO completion for pod_obj in self.pod_objs: pod_obj.get_fio_results() log.info("IO completed on all pods") cloned_pvc_objs, cloned_vol_handles = ([] for i in range(2)) # Calculate the md5sum value and create clones of exisiting PVCs log.info("Calculate the md5sum after IO and create clone of all PVCs") for pod_obj in self.pod_objs: pod_obj.md5sum_after_io = pod.cal_md5sum( pod_obj=pod_obj, file_name=pod_obj.get_storage_path(storage_type="block"), block=True, ) cloned_pvc_obj = pvc.create_pvc_clone( self.sc_obj.name, pod_obj.pvc.name, constants.CSI_RBD_PVC_CLONE_YAML, self.proj_obj.namespace, volume_mode=constants.VOLUME_MODE_BLOCK, access_mode=pod_obj.pvc.access_mode, ) helpers.wait_for_resource_state(cloned_pvc_obj, constants.STATUS_BOUND) cloned_pvc_obj.reload() cloned_pvc_obj.md5sum = pod_obj.md5sum_after_io cloned_pvc_objs.append(cloned_pvc_obj) log.info("Clone of all PVCs created") # Create and attach pod to the pvc cloned_pod_objs = helpers.create_pods( cloned_pvc_objs, pod_factory, constants.CEPHBLOCKPOOL, pods_for_rwx=1, status="", ) # Verify the new pods are running log.info("Verify the new pods are running") for pod_obj in cloned_pod_objs: helpers.wait_for_resource_state(pod_obj, constants.STATUS_RUNNING) pod_obj.reload() log.info("Verified: New pods are running") # Verify encryption keys are created for cloned PVCs in Vault for pvc_obj in cloned_pvc_objs: pv_obj = pvc_obj.backed_pv_obj vol_handle = pv_obj.get().get("spec").get("csi").get( "volumeHandle") cloned_vol_handles.append(vol_handle) if kms_provider == constants.VAULT_KMS_PROVIDER: if kms.is_key_present_in_path( key=vol_handle, path=self.kms.vault_backend_path): log.info( f"Vault: Found key for restore PVC {pvc_obj.name}") else: raise ResourceNotFoundError( f"Vault: Key not found for restored PVC {pvc_obj.name}" ) # Verify encrypted device is present and md5sum on all pods for vol_handle, pod_obj in zip(cloned_vol_handles, cloned_pod_objs): if pod_obj.exec_sh_cmd_on_pod( command=f"lsblk | grep {vol_handle} | grep crypt"): log.info(f"Encrypted device found in {pod_obj.name}") else: raise ResourceNotFoundError( f"Encrypted device not found in {pod_obj.name}") log.info(f"Verifying md5sum on pod {pod_obj.name}") pod.verify_data_integrity( pod_obj=pod_obj, file_name=pod_obj.get_storage_path(storage_type="block"), original_md5sum=pod_obj.pvc.md5sum, block=True, ) log.info(f"Verified md5sum on pod {pod_obj.name}") # Run IO on new pods log.info("Starting IO on new pods") for pod_obj in cloned_pod_objs: pod_obj.run_io(storage_type="block", size="100M", runtime=10) # Wait for IO completion on new pods log.info("Waiting for IO completion on new pods") for pod_obj in cloned_pod_objs: pod_obj.get_fio_results() log.info("IO completed on new pods.") # Delete the restored pods, PVC and snapshots log.info("Deleting all pods") for pod_obj in cloned_pod_objs + self.pod_objs: pod_obj.delete() pod_obj.ocp.wait_for_delete(resource_name=pod_obj.name) log.info("Deleting all PVCs") for pvc_obj in cloned_pvc_objs + self.pvc_objs: pv_obj = pvc_obj.backed_pv_obj pvc_obj.delete() pv_obj.ocp.wait_for_delete(resource_name=pv_obj.name) if kms_provider == constants.VAULT_KMS_PROVIDER: # Verify if the keys for parent and cloned PVCs are deleted from Vault if kv_version == "v1" or Version.coerce( config.ENV_DATA["ocs_version"]) >= Version.coerce("4.9"): log.info( "Verify whether the keys for cloned PVCs are deleted from vault" ) for key in cloned_vol_handles + self.vol_handles: if not kms.is_key_present_in_path( key=key, path=self.kms.vault_backend_path): log.info(f"Vault: Key deleted for {key}") else: raise KMSResourceCleaneupError( f"Vault: Key deletion failed for {key}") log.info("All keys from vault were deleted")
def test_clone_create_delete_performance(self, interface_type, pvc_size, file_size, teardown_factory): """ Write data (60% of PVC capacity) to the PVC created in setup Create single clone for an existing pvc, Measure clone creation time and speed Delete the created clone Measure clone deletion time and speed Note: by increasing max_num_of_clones value you increase number of the clones to be created/deleted """ file_size_for_io = file_size[:-1] performance_lib.write_fio_on_pod(self.pod_obj, file_size_for_io) max_num_of_clones = 1 clone_creation_measures = [] clones_list = [] timeout = 18000 sc_name = self.pvc_obj.backed_sc parent_pvc = self.pvc_obj.name clone_yaml = constants.CSI_RBD_PVC_CLONE_YAML namespace = self.pvc_obj.namespace if interface_type == constants.CEPHFILESYSTEM: clone_yaml = constants.CSI_CEPHFS_PVC_CLONE_YAML file_size_mb = convert_device_size(file_size, "MB") # creating single clone ( or many one by one if max_mum_of_clones > 1) logger.info( f"Start creating {max_num_of_clones} clones on {interface_type} PVC of size {pvc_size} GB." ) for i in range(max_num_of_clones): logger.info(f"Start creation of clone number {i + 1}.") cloned_pvc_obj = pvc.create_pvc_clone(sc_name, parent_pvc, clone_yaml, namespace, storage_size=pvc_size + "Gi") teardown_factory(cloned_pvc_obj) helpers.wait_for_resource_state(cloned_pvc_obj, constants.STATUS_BOUND, timeout) cloned_pvc_obj.reload() logger.info( f"Clone with name {cloned_pvc_obj.name} for {pvc_size} pvc {parent_pvc} was created." ) clones_list.append(cloned_pvc_obj) create_time = helpers.measure_pvc_creation_time( interface_type, cloned_pvc_obj.name) creation_speed = int(file_size_mb / create_time) logger.info( f"Clone number {i+1} creation time is {create_time} secs for {pvc_size} GB pvc." ) logger.info( f"Clone number {i+1} creation speed is {creation_speed} MB/sec for {pvc_size} GB pvc." ) creation_measures = { "clone_num": i + 1, "time": create_time, "speed": creation_speed, } clone_creation_measures.append(creation_measures) # deleting one by one and measuring deletion times and speed for each one of the clones create above # in case of single clone will run one time clone_deletion_measures = [] logger.info( f"Start deleting {max_num_of_clones} clones on {interface_type} PVC of size {pvc_size} GB." ) for i in range(max_num_of_clones): cloned_pvc_obj = clones_list[i] pvc_reclaim_policy = cloned_pvc_obj.reclaim_policy cloned_pvc_obj.delete() logger.info( f"Deletion of clone number {i + 1} , the clone name is {cloned_pvc_obj.name}." ) cloned_pvc_obj.ocp.wait_for_delete(cloned_pvc_obj.name, timeout) if pvc_reclaim_policy == constants.RECLAIM_POLICY_DELETE: helpers.validate_pv_delete(cloned_pvc_obj.backed_pv) delete_time = helpers.measure_pvc_deletion_time( interface_type, cloned_pvc_obj.backed_pv) logger.info( f"Clone number {i + 1} deletion time is {delete_time} secs for {pvc_size} GB pvc." ) deletion_speed = int(file_size_mb / delete_time) logger.info( f"Clone number {i+1} deletion speed is {deletion_speed} MB/sec for {pvc_size} GB pvc." ) deletion_measures = { "clone_num": i + 1, "time": delete_time, "speed": deletion_speed, } clone_deletion_measures.append(deletion_measures) logger.info( f"Printing clone creation time and speed for {max_num_of_clones} clones " f"on {interface_type} PVC of size {pvc_size} GB:") for c in clone_creation_measures: logger.info( f"Clone number {c['clone_num']} creation time is {c['time']} secs for {pvc_size} GB pvc ." ) logger.info( f"Clone number {c['clone_num']} creation speed is {c['speed']} MB/sec for {pvc_size} GB pvc." ) logger.info( f"Clone deletion time and speed for {interface_type} PVC of size {pvc_size} GB are:" ) for d in clone_deletion_measures: logger.info( f"Clone number {d['clone_num']} deletion time is {d['time']} secs for {pvc_size} GB pvc." ) logger.info( f"Clone number {d['clone_num']} deletion speed is {d['speed']} MB/sec for {pvc_size} GB pvc." ) logger.info("test_clones_creation_performance finished successfully.")
def test_pvc_clone_performance_multiple_files( self, pvc_factory, interface, copies, timeout, ): """ Test assign nodeName to a pod using RWX pvc Each kernel (unzipped) is 892M and 61694 files The test creates a pvc and a pods, writes kernel files multiplied by number of copies The test creates number of clones samples, calculates creation and deletion times for each one the clones and calculates the average creation and average deletion times """ kernel_url = "https://cdn.kernel.org/pub/linux/kernel/v4.x/linux-4.19.5.tar.gz" download_path = "tmp" test_start_time = self.get_time() helpers.pull_images(constants.PERF_IMAGE) # Download a linux Kernel dir_path = os.path.join(os.getcwd(), download_path) file_path = os.path.join(dir_path, "file.gz") if not os.path.exists(dir_path): os.makedirs(dir_path) urllib.request.urlretrieve(kernel_url, file_path) # Create a PVC accessmode = constants.ACCESS_MODE_RWX if interface == constants.CEPHBLOCKPOOL: accessmode = constants.ACCESS_MODE_RWO pvc_size = "100" try: pvc_obj = pvc_factory( interface=interface, access_mode=accessmode, status=constants.STATUS_BOUND, size=pvc_size, ) except Exception as e: logger.error(f"The PVC sample was not created, exception {str(e)}") raise PVCNotCreated("PVC did not reach BOUND state.") # Create a pod on one node logger.info(f"Creating Pod with pvc {pvc_obj.name}") try: pod_obj = helpers.create_pod( interface_type=interface, pvc_name=pvc_obj.name, namespace=pvc_obj.namespace, pod_dict_path=constants.PERF_POD_YAML, ) except Exception as e: logger.error( f"Pod on PVC {pvc_obj.name} was not created, exception {str(e)}" ) raise PodNotCreated("Pod on PVC was not created.") # Confirm that pod is running on the selected_nodes logger.info("Checking whether pods are running on the selected nodes") helpers.wait_for_resource_state(resource=pod_obj, state=constants.STATUS_RUNNING, timeout=timeout) pod_name = pod_obj.name pod_path = "/mnt" _ocp = OCP(namespace=pvc_obj.namespace) rsh_cmd = f"rsync {dir_path} {pod_name}:{pod_path}" _ocp.exec_oc_cmd(rsh_cmd) rsh_cmd = f"exec {pod_name} -- tar xvf {pod_path}/tmp/file.gz -C {pod_path}/tmp" _ocp.exec_oc_cmd(rsh_cmd) for x in range(copies): rsh_cmd = f"exec {pod_name} -- mkdir -p {pod_path}/folder{x}" _ocp.exec_oc_cmd(rsh_cmd) rsh_cmd = f"exec {pod_name} -- cp -r {pod_path}/tmp {pod_path}/folder{x}" _ocp.exec_oc_cmd(rsh_cmd) rsh_cmd = f"exec {pod_name} -- sync" _ocp.exec_oc_cmd(rsh_cmd) logger.info("Getting the amount of data written to the PVC") rsh_cmd = f"exec {pod_name} -- df -h {pod_path}" data_written = _ocp.exec_oc_cmd(rsh_cmd).split()[-4] logger.info(f"The amount of written data is {data_written}") rsh_cmd = f"exec {pod_name} -- find {pod_path} -type f" files_written = len(_ocp.exec_oc_cmd(rsh_cmd).split()) logger.info( f"For {interface} - The number of files written to the pod is {files_written}" ) # delete the pod pod_obj.delete(wait=False) logger.info("Wait for the pod to be deleted") performance_lib.wait_for_resource_bulk_status( "pod", 0, pvc_obj.namespace, constants.STATUS_COMPLETED, timeout, 5) logger.info("The pod was deleted") num_of_clones = 11 # increasing the timeout since clone creation time is longer than pod attach time timeout = 18000 clone_yaml = constants.CSI_RBD_PVC_CLONE_YAML if interface == constants.CEPHFILESYSTEM: clone_yaml = constants.CSI_CEPHFS_PVC_CLONE_YAML clone_creation_measures = [] csi_clone_creation_measures = [] clone_deletion_measures = [] csi_clone_deletion_measures = [] # taking the time, so parsing the provision log will be faster. start_time = datetime.datetime.utcnow().strftime("%Y-%m-%dT%H:%M:%SZ") for i in range(num_of_clones): logger.info(f"Start creation of clone number {i + 1}.") cloned_pvc_obj = pvc.create_pvc_clone( pvc_obj.backed_sc, pvc_obj.name, clone_yaml, pvc_obj.namespace, storage_size=pvc_size + "Gi", ) helpers.wait_for_resource_state(cloned_pvc_obj, constants.STATUS_BOUND, timeout) cloned_pvc_obj.reload() logger.info( f"Clone with name {cloned_pvc_obj.name} for {pvc_size} pvc {pvc_obj.name} was created." ) create_time = helpers.measure_pvc_creation_time( interface, cloned_pvc_obj.name) logger.info( f"Clone number {i+1} creation time is {create_time} secs for {pvc_size} GB pvc." ) clone_creation_measures.append(create_time) csi_clone_creation_measures.append( performance_lib.csi_pvc_time_measure(interface, cloned_pvc_obj, "create", start_time)) pvc_reclaim_policy = cloned_pvc_obj.reclaim_policy cloned_pvc_obj.delete() logger.info( f"Deletion of clone number {i + 1} , the clone name is {cloned_pvc_obj.name}." ) cloned_pvc_obj.ocp.wait_for_delete(cloned_pvc_obj.name, timeout) if pvc_reclaim_policy == constants.RECLAIM_POLICY_DELETE: helpers.validate_pv_delete(cloned_pvc_obj.backed_pv) delete_time = helpers.measure_pvc_deletion_time( interface, cloned_pvc_obj.backed_pv) logger.info( f"Clone number {i + 1} deletion time is {delete_time} secs for {pvc_size} GB pvc." ) clone_deletion_measures.append(delete_time) csi_clone_deletion_measures.append( performance_lib.csi_pvc_time_measure(interface, cloned_pvc_obj, "delete", start_time)) os.remove(file_path) os.rmdir(dir_path) pvc_obj.delete() average_creation_time = statistics.mean(clone_creation_measures) logger.info(f"Average creation time is {average_creation_time} secs.") average_csi_creation_time = statistics.mean( csi_clone_creation_measures) logger.info( f"Average csi creation time is {average_csi_creation_time} secs.") average_deletion_time = statistics.mean(clone_deletion_measures) logger.info(f"Average deletion time is {average_deletion_time} secs.") average_csi_deletion_time = statistics.mean( csi_clone_deletion_measures) logger.info( f"Average csi deletion time is {average_csi_deletion_time} secs.") # Produce ES report # Collecting environment information self.get_env_info() self.results_path = get_full_test_logs_path(cname=self) # Initialize the results doc file. full_results = self.init_full_results( ResultsAnalyse( self.uuid, self.crd_data, self.full_log_path, "test_pvc_clone_performance_multiple_files_fullres", )) full_results.add_key("files_number", files_written) test_end_time = self.get_time() full_results.add_key("test_time", { "start": test_start_time, "end": test_end_time }) full_results.add_key("interface", interface) full_results.add_key("clones_number", num_of_clones) full_results.add_key("pvc_size", pvc_size) full_results.add_key("average_clone_creation_time", average_creation_time) full_results.add_key("average_csi_clone_creation_time", average_csi_creation_time) full_results.add_key("average_clone_deletion_time", average_deletion_time) full_results.add_key("average_csi_clone_deletion_time", average_csi_deletion_time) full_results.all_results = { "clone_creation_time": clone_creation_measures, "csi_clone_creation_time": csi_clone_creation_measures, "clone_deletion_time": clone_deletion_measures, "csi_clone_deletion_time": csi_clone_deletion_measures, } # Write the test results into the ES server if full_results.es_write(): res_link = full_results.results_link() logger.info(f"The Result can be found at : {res_link}") # Create text file with results of all subtest (4 - according to the parameters) self.results_path = get_full_test_logs_path( cname=self, fname="test_pvc_clone_performance_multiple_files") self.write_result_to_file(res_link)
def test_clone_create_delete_performance(self, interface_type, pvc_size, file_size, teardown_factory): """ Write data (60% of PVC capacity) to the PVC created in setup Create clones for an existing pvc, Measure clones average creation time and speed Delete the created clone Measure clone average deletion time and speed Note: by increasing max_num_of_clones value you increase number of the clones to be created/deleted """ file_size_for_io = file_size[:-1] performance_lib.write_fio_on_pod(self.pod_object, file_size_for_io) max_num_of_clones = 10 clone_creation_measures = [] csi_clone_creation_measures = [] clones_list = [] timeout = 18000 sc_name = self.pvc_obj.backed_sc parent_pvc = self.pvc_obj.name clone_yaml = constants.CSI_RBD_PVC_CLONE_YAML namespace = self.pvc_obj.namespace if interface_type == constants.CEPHFILESYSTEM: clone_yaml = constants.CSI_CEPHFS_PVC_CLONE_YAML file_size_mb = convert_device_size(file_size, "MB") logger.info( f"Start creating {max_num_of_clones} clones on {interface_type} PVC of size {pvc_size} GB." ) # taking the time, so parsing the provision log will be faster. start_time = datetime.datetime.utcnow().strftime("%Y-%m-%dT%H:%M:%SZ") for i in range(max_num_of_clones): logger.info(f"Start creation of clone number {i + 1}.") cloned_pvc_obj = pvc.create_pvc_clone(sc_name, parent_pvc, clone_yaml, namespace, storage_size=pvc_size + "Gi") teardown_factory(cloned_pvc_obj) helpers.wait_for_resource_state(cloned_pvc_obj, constants.STATUS_BOUND, timeout) cloned_pvc_obj.reload() logger.info( f"Clone with name {cloned_pvc_obj.name} for {pvc_size} pvc {parent_pvc} was created." ) clones_list.append(cloned_pvc_obj) create_time = helpers.measure_pvc_creation_time( interface_type, cloned_pvc_obj.name) creation_speed = int(file_size_mb / create_time) logger.info( f"Clone number {i+1} creation time is {create_time} secs for {pvc_size} GB pvc." ) logger.info( f"Clone number {i+1} creation speed is {creation_speed} MB/sec for {pvc_size} GB pvc." ) creation_measures = { "clone_num": i + 1, "time": create_time, "speed": creation_speed, } clone_creation_measures.append(creation_measures) csi_clone_creation_measures.append( performance_lib.csi_pvc_time_measure(self.interface, cloned_pvc_obj, "create", start_time)) # deleting one by one and measuring deletion times and speed for each one of the clones create above # in case of single clone will run one time clone_deletion_measures = [] csi_clone_deletion_measures = [] logger.info( f"Start deleting {max_num_of_clones} clones on {interface_type} PVC of size {pvc_size} GB." ) for i in range(max_num_of_clones): cloned_pvc_obj = clones_list[i] pvc_reclaim_policy = cloned_pvc_obj.reclaim_policy cloned_pvc_obj.delete() logger.info( f"Deletion of clone number {i + 1} , the clone name is {cloned_pvc_obj.name}." ) cloned_pvc_obj.ocp.wait_for_delete(cloned_pvc_obj.name, timeout) if pvc_reclaim_policy == constants.RECLAIM_POLICY_DELETE: helpers.validate_pv_delete(cloned_pvc_obj.backed_pv) delete_time = helpers.measure_pvc_deletion_time( interface_type, cloned_pvc_obj.backed_pv) logger.info( f"Clone number {i + 1} deletion time is {delete_time} secs for {pvc_size} GB pvc." ) deletion_speed = int(file_size_mb / delete_time) logger.info( f"Clone number {i+1} deletion speed is {deletion_speed} MB/sec for {pvc_size} GB pvc." ) deletion_measures = { "clone_num": i + 1, "time": delete_time, "speed": deletion_speed, } clone_deletion_measures.append(deletion_measures) csi_clone_deletion_measures.append( performance_lib.csi_pvc_time_measure(self.interface, cloned_pvc_obj, "delete", start_time)) logger.info( f"Printing clone creation time and speed for {max_num_of_clones} clones " f"on {interface_type} PVC of size {pvc_size} GB:") for c in clone_creation_measures: logger.info( f"Clone number {c['clone_num']} creation time is {c['time']} secs for {pvc_size} GB pvc ." ) logger.info( f"Clone number {c['clone_num']} creation speed is {c['speed']} MB/sec for {pvc_size} GB pvc." ) logger.info( f"Clone deletion time and speed for {interface_type} PVC of size {pvc_size} GB are:" ) creation_time_list = [r["time"] for r in clone_creation_measures] creation_speed_list = [r["speed"] for r in clone_creation_measures] average_creation_time = statistics.mean(creation_time_list) average_csi_creation_time = statistics.mean( csi_clone_creation_measures) average_creation_speed = statistics.mean(creation_speed_list) logger.info(f"Average creation time is {average_creation_time} secs.") logger.info( f"Average creation speed is {average_creation_speed} Mb/sec.") for d in clone_deletion_measures: logger.info( f"Clone number {d['clone_num']} deletion time is {d['time']} secs for {pvc_size} GB pvc." ) logger.info( f"Clone number {d['clone_num']} deletion speed is {d['speed']} MB/sec for {pvc_size} GB pvc." ) deletion_time_list = [r["time"] for r in clone_deletion_measures] deletion_speed_list = [r["speed"] for r in clone_deletion_measures] average_deletion_time = statistics.mean(deletion_time_list) average_csi_deletion_time = statistics.mean( csi_clone_deletion_measures) average_deletion_speed = statistics.mean(deletion_speed_list) logger.info(f"Average deletion time is {average_deletion_time} secs.") logger.info( f"Average deletion speed is {average_deletion_speed} Mb/sec.") logger.info("test_clones_creation_performance finished successfully.") self.results_path = get_full_test_logs_path(cname=self) # Produce ES report # Collecting environment information self.get_env_info() self.full_log_path = get_full_test_logs_path(cname=self) self.results_path = get_full_test_logs_path(cname=self) self.full_log_path += f"-{self.interface}-{pvc_size}-{file_size}" logger.info(f"Logs file path name is : {self.full_log_path}") # Initialize the results doc file. full_results = self.init_full_results( ResultsAnalyse( self.uuid, self.crd_data, self.full_log_path, "pvc_clone_performance", )) full_results.add_key("interface", self.interface) full_results.add_key("total_clone_number", max_num_of_clones) full_results.add_key("pvc_size", self.pvc_size) full_results.add_key("average_clone_creation_time", average_creation_time) full_results.add_key("average_csi_clone_creation_time", average_csi_creation_time) full_results.add_key("average_clone_deletion_time", average_deletion_time) full_results.add_key("average_csi_clone_deletion_time", average_csi_deletion_time) full_results.add_key("average_clone_creation_speed", average_creation_speed) full_results.add_key("average_clone_deletion_speed", average_deletion_speed) full_results.all_results = { "clone_creation_time": creation_time_list, "csi_clone_creation_time": csi_clone_creation_measures, "clone_deletion_time": deletion_time_list, "csi_clone_deletion_time": csi_clone_deletion_measures, "clone_creation_speed": creation_speed_list, "clone_deletion_speed": deletion_speed_list, } # Write the test results into the ES server if full_results.es_write(): res_link = full_results.results_link() logger.info(f"The Result can be found at : {res_link}") # Create text file with results of all subtest (8 - according to the parameters) self.write_result_to_file(res_link)