def test_bulk_pvc_creation_after_deletion_performance( self, teardown_factory): """ Measuring PVC creation time of bulk of 75% of initial PVC bulk (120) in the same rate after deleting ( serial deletion) 75% of the initial PVCs. Args: teardown_factory: A fixture used when we want a new resource that was created during the tests to be removed in the teardown phase. Returns: """ initial_number_of_pvcs = 120 number_of_pvcs = math.ceil(initial_number_of_pvcs * 0.75) log.info(f"Start creating new {initial_number_of_pvcs} PVCs in a bulk") pvc_objs, _ = helpers.create_multiple_pvcs( sc_name=self.sc_obj.name, namespace=defaults.ROOK_CLUSTER_NAMESPACE, number_of_pvc=initial_number_of_pvcs, size=self.pvc_size, burst=True, ) for pvc_obj in pvc_objs: teardown_factory(pvc_obj) with ThreadPoolExecutor() as executor: for pvc_obj in pvc_objs: executor.submit(helpers.wait_for_resource_state, pvc_obj, constants.STATUS_BOUND) executor.submit(pvc_obj.reload) log.info("Deleting 75% of the PVCs - 90 PVCs") assert pvc.delete_pvcs(pvc_objs[:number_of_pvcs], True), "Deletion of 75% of PVCs failed" log.info("Re-creating the 90 PVCs") pvc_objs, _ = helpers.create_multiple_pvcs( sc_name=self.sc_obj.name, namespace=defaults.ROOK_CLUSTER_NAMESPACE, number_of_pvc=number_of_pvcs, size=self.pvc_size, burst=True, ) start_time = helpers.get_provision_time(self.interface, pvc_objs, status="start") end_time = helpers.get_provision_time(self.interface, pvc_objs, status="end") total = end_time - start_time total_time = total.total_seconds() logging.info( f"Deletion time of {number_of_pvcs} is {total_time} seconds.") if total_time > 50: raise ex.PerformanceException( f"{number_of_pvcs} PVCs creation (after initial deletion of " f"75% of PVCs) time is {total_time} and greater than 50 seconds." ) logging.info( f"{number_of_pvcs} PVCs creation time took less than a 50 seconds")
def create_pvcs(request): """ Create multiple PVCs """ class_instance = request.node.cls def finalizer(): """ Delete multiple PVCs """ if hasattr(class_instance, "pvc_objs"): for pvc_obj in class_instance.pvc_objs: pvc_obj.reload() backed_pv_name = pvc_obj.backed_pv pvc_obj.delete() for pvc_obj in class_instance.pvc_objs: pvc_obj.ocp.wait_for_delete(pvc_obj.name) helpers.validate_pv_delete(backed_pv_name) request.addfinalizer(finalizer) class_instance.pvc_objs, _ = helpers.create_multiple_pvcs( sc_name=class_instance.sc_obj.name, number_of_pvc=class_instance.num_of_pvcs, size=class_instance.pvc_size, namespace=class_instance.namespace, ) for pvc_obj in class_instance.pvc_objs: helpers.wait_for_resource_state(pvc_obj, constants.STATUS_BOUND) pvc_obj.reload()
def create_pvcs_and_wait_for_bound(self, msg_prefix, pvcs, pvc_size, burst=True): """ Creating PVC(s) - one or more - in serial or parallel way, and wait until all of them are in `Bound` state. In case of not all PVC(s) get into Bound state whithin 2 sec. per PVC, timeout exception will be raise. Args: msg_prefix (str): prefix message for the logging pvcs (int): number of PVC(s) to create pvc_size (str): The PVC size to create - the unit is part of the string e.g : 1Gi burst (bool): if more then one PVC will be created - do it in paralle or serial Return: datetime : the timestamp when the creation started, for log parsing Raise: TimeoutExpiredError : if not all PVC(s) get into Bound state whithin 2 sec. per PVC """ # Creating PVC(s) for creation time mesurment and wait for bound state timeout = pvcs * 2 start_time = self.get_time(time_format="csi") log.info(f"{msg_prefix} Start creating new {pvcs} PVCs") self.pvc_objs, _ = helpers.create_multiple_pvcs( sc_name=Interface_Info[self.interface]["sc"], namespace=self.namespace, number_of_pvc=pvcs, size=pvc_size, burst=burst, do_reload=False, ) log.info("Wait for all of the PVCs to be in Bound state") performance_lib.wait_for_resource_bulk_status("pvc", pvcs, self.namespace, constants.STATUS_BOUND, timeout, 5) # incase of creation faliure, the wait_for_resource_bulk_status function # will raise an exception. so in this point the creation succeed log.info("All PVCs was created and in Bound state.") # Reload all PVC(s) information for pvc_obj in self.pvc_objs: pvc_obj.reload() return start_time
def test_multiple_pvc_creation_measurement_performance( self, teardown_factory): """ Measuring PVC creation time of 120 PVCs in 180 seconds Args: teardown_factory: A fixture used when we want a new resource that was created during the tests to be removed in the teardown phase. Returns: """ number_of_pvcs = 120 multi_creation_time_limit = 60 log.info("Start creating new 120 PVCs") pvc_objs = helpers.create_multiple_pvcs( sc_name=self.sc_obj.name, namespace=defaults.ROOK_CLUSTER_NAMESPACE, number_of_pvc=number_of_pvcs, size=self.pvc_size, burst=True, ) for pvc_obj in pvc_objs: pvc_obj.reload() teardown_factory(pvc_obj) with ThreadPoolExecutor(max_workers=5) as executor: for pvc_obj in pvc_objs: executor.submit(helpers.wait_for_resource_state, pvc_obj, constants.STATUS_BOUND) executor.submit(pvc_obj.reload) start_time = helpers.get_provision_time(self.interface, pvc_objs, status="start") end_time = helpers.get_provision_time(self.interface, pvc_objs, status="end") total = end_time - start_time total_time = total.total_seconds() logging.info( f"{number_of_pvcs} PVCs creation time is {total_time} seconds.") if total_time > 60: raise ex.PerformanceException( f"{number_of_pvcs} PVCs creation time is {total_time} and " f"greater than {multi_creation_time_limit} seconds") logging.info( f"{number_of_pvcs} PVCs creation time took {total_time} seconds")
def pvc_bulk_create_and_wait_for_bound(self, bulk_size): """ Creating a bulk of PVCs and wait until all of them are bounded Args: bulk_size (int): the number of pvcs to create """ self.pvc_objs, self.yaml_creation_dir = helpers.create_multiple_pvcs( sc_name=self.sc_obj.name, namespace=self.namespace, number_of_pvc=bulk_size, size=self.pvc_size, burst=True, ) with ThreadPoolExecutor(max_workers=5) as executor: for pvc_obj in self.pvc_objs: executor.submit( helpers.wait_for_resource_state, pvc_obj, constants.STATUS_BOUND ) executor.submit(pvc_obj.reload)
def test_multiple_pvc_deletion_measurement_performance( self, teardown_factory): """ Measuring PVC deletion time of 120 PVCs in 180 seconds Args: teardown_factory: A fixture used when we want a new resource that was created during the tests to be removed in the teardown phase. Returns: """ number_of_pvcs = 120 pvc_size = "1Gi" msg_prefix = f"Interface: {self.interface}, PVC size: {pvc_size}." log.info(f"{msg_prefix} Start creating new 120 PVCs") pvc_objs = helpers.create_multiple_pvcs( sc_name=self.sc_obj.name, namespace=defaults.ROOK_CLUSTER_NAMESPACE, number_of_pvc=number_of_pvcs, size=pvc_size, burst=True, ) for pvc_obj in pvc_objs: pvc_obj.reload() teardown_factory(pvc_obj) with ThreadPoolExecutor(max_workers=5) as executor: for pvc_obj in pvc_objs: executor.submit(helpers.wait_for_resource_state, pvc_obj, constants.STATUS_BOUND) executor.submit(pvc_obj.reload) pod_objs = [] for pvc_obj in pvc_objs: pod_obj = self.write_file_on_pvc(pvc_obj, 0.3) pod_objs.append(pod_obj) # Get pvc_name, require pvc_name to fetch deletion time data from log threads = list() for pvc_obj in pvc_objs: process = threading.Thread(target=pvc_obj.reload) process.start() threads.append(process) for process in threads: process.join() pvc_name_list, pv_name_list = ([] for i in range(2)) threads = list() for pvc_obj in pvc_objs: process1 = threading.Thread( target=pvc_name_list.append(pvc_obj.name)) process2 = threading.Thread( target=pv_name_list.append(pvc_obj.backed_pv)) process1.start() process2.start() threads.append(process1) threads.append(process2) for process in threads: process.join() log.info(f"{msg_prefix} Preparing to delete 120 PVC") # Delete PVC for pvc_obj, pod_obj in zip(pvc_objs, pod_objs): pod_obj.delete(wait=True) pvc_obj.delete() pvc_obj.ocp.wait_for_delete(pvc_obj.name) # Get PVC deletion time pvc_deletion_time = helpers.measure_pv_deletion_time_bulk( interface=self.interface, pv_name_list=pv_name_list) log.info( f"{msg_prefix} {number_of_pvcs} bulk deletion time is {pvc_deletion_time}" ) # accepted deletion time is 2 secs for each PVC accepted_pvc_deletion_time = number_of_pvcs * 2 for del_time in pvc_deletion_time.values(): if del_time > accepted_pvc_deletion_time: raise ex.PerformanceException( f"{msg_prefix} {number_of_pvcs} PVCs deletion time is {pvc_deletion_time.values()} and is " f"greater than {accepted_pvc_deletion_time} seconds") logging.info(f"{msg_prefix} {number_of_pvcs} PVCs deletion times are:") for name, a_time in pvc_deletion_time.items(): logging.info(f"{name} deletion time is: {a_time} seconds")
def test_bulk_pvc_creation_after_deletion_performance( self, teardown_factory): """ Measuring PVC creation time of bulk of 75% of initial PVC bulk (120) in the same rate after deleting ( serial deletion) 75% of the initial PVCs and sends results to the Elastic Search DB Args: teardown_factory: A fixture used when we want a new resource that was created during the tests to be removed in the teardown phase. Returns: """ initial_number_of_pvcs = 120 number_of_pvcs = math.ceil(initial_number_of_pvcs * 0.75) log.info(f"Start creating new {initial_number_of_pvcs} PVCs in a bulk") pvc_objs, _ = helpers.create_multiple_pvcs( sc_name=self.sc_obj.name, namespace=self.namespace, number_of_pvc=initial_number_of_pvcs, size=self.pvc_size, burst=True, ) for pvc_obj in pvc_objs: teardown_factory(pvc_obj) with ThreadPoolExecutor() as executor: for pvc_obj in pvc_objs: executor.submit(helpers.wait_for_resource_state, pvc_obj, constants.STATUS_BOUND) executor.submit(pvc_obj.reload) log.info("Deleting 75% of the PVCs - 90 PVCs") assert pvc.delete_pvcs(pvc_objs[:number_of_pvcs], True), "Deletion of 75% of PVCs failed" log.info("Re-creating the 90 PVCs") pvc_objs, _ = helpers.create_multiple_pvcs( sc_name=self.sc_obj.name, namespace=self.namespace, number_of_pvc=number_of_pvcs, size=self.pvc_size, burst=True, ) start_time = helpers.get_provision_time(self.interface, pvc_objs, status="start") end_time = helpers.get_provision_time(self.interface, pvc_objs, status="end") total = end_time - start_time total_time = total.total_seconds() logging.info( f"Creation after deletion time of {number_of_pvcs} is {total_time} seconds." ) for pvc_obj in pvc_objs: teardown_factory(pvc_obj) with ThreadPoolExecutor() as executor: for pvc_obj in pvc_objs: executor.submit(helpers.wait_for_resource_state, pvc_obj, constants.STATUS_BOUND) executor.submit(pvc_obj.reload) if total_time > 50: raise ex.PerformanceException( f"{number_of_pvcs} PVCs creation (after initial deletion of " f"75% of PVCs) time is {total_time} and greater than 50 seconds." ) logging.info( f"{number_of_pvcs} PVCs creation time took less than a 50 seconds") # Produce ES report # Collecting environment information self.get_env_info() # Initialize the results doc file. full_results = self.init_full_results( ResultsAnalyse( self.uuid, self.crd_data, self.full_log_path, "bulk_pvc_creation_after_deletion_measurement", )) full_results.add_key("interface", self.interface) full_results.add_key("number_of_pvcs", number_of_pvcs) full_results.add_key("pvc_size", self.pvc_size) full_results.add_key("creation_after_deletion_time", total_time) # Write the test results into the ES server full_results.es_write()
def test_bulk_pvc_creation_deletion_measurement_performance( self, teardown_factory, bulk_size): """ Measuring PVC creation and deletion time of bulk_size PVCs and sends results to the Elastic Search DB Args: teardown_factory: A fixture used when we want a new resource that was created during the tests to be removed in the teardown phase. bulk_size: Size of the bulk to be tested Returns: """ bulk_creation_time_limit = bulk_size / 2 log.info(f"Start creating new {bulk_size} PVCs") pvc_objs, yaml_creation_dir = helpers.create_multiple_pvcs( sc_name=self.sc_obj.name, namespace=self.namespace, number_of_pvc=bulk_size, size=self.pvc_size, burst=True, ) logging.info(f"PVC creation dir is {yaml_creation_dir}") for pvc_obj in pvc_objs: pvc_obj.reload() teardown_factory(pvc_obj) with ThreadPoolExecutor(max_workers=5) as executor: for pvc_obj in pvc_objs: executor.submit(helpers.wait_for_resource_state, pvc_obj, constants.STATUS_BOUND) executor.submit(pvc_obj.reload) start_time = helpers.get_provision_time(self.interface, pvc_objs, status="start") end_time = helpers.get_provision_time(self.interface, pvc_objs, status="end") total_time = (end_time - start_time).total_seconds() logging.info( f"{bulk_size} Bulk PVCs creation time is {total_time} seconds.") if total_time > bulk_creation_time_limit: raise ex.PerformanceException( f"{bulk_size} Bulk PVCs creation time is {total_time} and " f"greater than {bulk_creation_time_limit} seconds") pv_names_list = [] for pvc_obj in pvc_objs: pv_names_list.append(pvc_obj.backed_pv) logging.info(f"Starting to delete bulk of {bulk_size} PVCs") helpers.delete_bulk_pvcs(yaml_creation_dir, pv_names_list, namespace=self.namespace) logging.info( f"Deletion of bulk of {bulk_size} PVCs successfully completed") log_deletion_times = helpers.measure_pv_deletion_time_bulk( self.interface, pv_names_list, return_log_times=True) all_start_times = [ a_tuple[0] for a_tuple in log_deletion_times.values() ] bulk_start_time = sorted(all_start_times)[0] # the eariles start time start_deletion_time = datetime.datetime.strptime( bulk_start_time, helpers.DATE_TIME_FORMAT) all_end_times = [a_tuple[1] for a_tuple in log_deletion_times.values()] bulk_deletion_time = sorted(all_end_times)[-1] # the latest end time end_deletion_time = datetime.datetime.strptime( bulk_deletion_time, helpers.DATE_TIME_FORMAT) total_deletion_time = (end_deletion_time - start_deletion_time).total_seconds() logging.info( f"{bulk_size} Bulk PVCs deletion time is {total_deletion_time} seconds." ) # Produce ES report # Collecting environment information self.get_env_info() # Initialize the results doc file. full_results = self.init_full_results( ResultsAnalyse( self.uuid, self.crd_data, self.full_log_path, "bulk_creation_deletion_measurement", )) full_results.add_key("interface", self.interface) full_results.add_key("bulk_size", bulk_size) full_results.add_key("pvc_size", self.pvc_size) full_results.add_key("bulk_pvc_creation_time", total_time) full_results.add_key("bulk_pvc_deletion_time", total_deletion_time) # Write the test results into the ES server full_results.es_write()
def test_multiple_pvc_deletion_measurement_performance( self, teardown_factory): """ Measuring PVC deletion time of 120 PVCs in 180 seconds Args: teardown_factory: A fixture used when we want a new resource that was created during the tests to be removed in the teardown phase. Returns: """ number_of_pvcs = 120 pvc_size = "1Gi" msg_prefix = f"Interface: {self.interface}, PVC size: {pvc_size}." log.info(f"{msg_prefix} Start creating new {number_of_pvcs} PVCs") pvc_objs, _ = helpers.create_multiple_pvcs( sc_name=self.sc_obj.name, namespace=self.namespace, number_of_pvc=number_of_pvcs, size=pvc_size, burst=True, ) for pvc_obj in pvc_objs: pvc_obj.reload() teardown_factory(pvc_obj) timeout = 600 if self.interface == constants.CEPHBLOCKPOOL_THICK else 60 with ThreadPoolExecutor(max_workers=5) as executor: for pvc_obj in pvc_objs: executor.submit( helpers.wait_for_resource_state, pvc_obj, constants.STATUS_BOUND, timeout=timeout, ) executor.submit(pvc_obj.reload) pod_objs = [] for pvc_obj in pvc_objs: pod_obj = self.write_file_on_pvc(pvc_obj, 0.3) pod_objs.append(pod_obj) # Get pvc_name, require pvc_name to fetch deletion time data from log threads = list() for pvc_obj in pvc_objs: process = threading.Thread(target=pvc_obj.reload) process.start() threads.append(process) for process in threads: process.join() pvc_name_list, pv_name_list = ([] for i in range(2)) threads = list() for pvc_obj in pvc_objs: process1 = threading.Thread( target=pvc_name_list.append(pvc_obj.name)) process2 = threading.Thread( target=pv_name_list.append(pvc_obj.backed_pv)) process1.start() process2.start() threads.append(process1) threads.append(process2) for process in threads: process.join() log.info(f"{msg_prefix} Preparing to delete 120 PVC") # Delete PVC for pvc_obj, pod_obj in zip(pvc_objs, pod_objs): pod_obj.delete(wait=True) pvc_obj.delete() pvc_obj.ocp.wait_for_delete(pvc_obj.name) # Get PVC deletion time pvc_deletion_time = helpers.measure_pv_deletion_time_bulk( interface=self.interface, pv_name_list=pv_name_list) log.info( f"{msg_prefix} {number_of_pvcs} bulk deletion time is {pvc_deletion_time}" ) # accepted deletion time is 2 secs for each PVC accepted_pvc_deletion_time = number_of_pvcs * 2 for del_time in pvc_deletion_time.values(): if del_time > accepted_pvc_deletion_time: raise ex.PerformanceException( f"{msg_prefix} {number_of_pvcs} PVCs deletion time is {pvc_deletion_time.values()} and is " f"greater than {accepted_pvc_deletion_time} seconds") logging.info(f"{msg_prefix} {number_of_pvcs} PVCs deletion times are:") for name, a_time in pvc_deletion_time.items(): logging.info(f"{name} deletion time is: {a_time} seconds") if self.interface == constants.CEPHBLOCKPOOL: self.sc = "RBD" elif self.interface == constants.CEPHFILESYSTEM: self.sc = "CephFS" elif self.interface == constants.CEPHBLOCKPOOL_THICK: self.sc = "RBD-Thick" full_log_path = get_full_test_logs_path( cname=self) + f"-{self.sc}-{pvc_size}" self.results_path = get_full_test_logs_path(cname=self) log.info(f"Logs file path name is : {full_log_path}") self.get_env_info() # Initialize the results doc file. full_results = self.init_full_results( ResultsAnalyse( self.uuid, self.crd_data, full_log_path, "pvc_bulk_deletion_fullres", )) full_results.add_key("interface", self.interface) full_results.add_key("bulk_size", number_of_pvcs) full_results.add_key("pvc_size", pvc_size) full_results.all_results["bulk_deletion_time"] = pvc_deletion_time if full_results.es_write(): res_link = full_results.results_link() log.info(f"The Result can be found at : {res_link}") # Create text file with results of all subtest (3 - according to the parameters) self.write_result_to_file(res_link)
def test_bulk_pod_attach_performance(self, teardown_factory, bulk_size): """ Measures pods attachment time in bulk_size bulk Args: teardown_factory: A fixture used when we want a new resource that was created during the tests to be removed in the teardown phase. bulk_size: Size of the bulk to be tested Returns: """ # Getting the test start time test_start_time = PASTest.get_time() log.info(f"Start creating bulk of new {bulk_size} PVCs") pvc_objs, _ = helpers.create_multiple_pvcs( sc_name=self.sc_obj.name, namespace=self.namespace, number_of_pvc=bulk_size, size=self.pvc_size, burst=True, ) for pvc_obj in pvc_objs: pvc_obj.reload() teardown_factory(pvc_obj) with ThreadPoolExecutor(max_workers=5) as executor: for pvc_obj in pvc_objs: executor.submit(helpers.wait_for_resource_state, pvc_obj, constants.STATUS_BOUND) executor.submit(pvc_obj.reload) start_time = helpers.get_provision_time(self.interface, pvc_objs, status="start") end_time = helpers.get_provision_time(self.interface, pvc_objs, status="end") total_time = (end_time - start_time).total_seconds() log.info( f"{self.interface}: Bulk of {bulk_size} PVCs creation time is {total_time} seconds." ) pvc_names_list = [] for pvc_obj in pvc_objs: pvc_names_list.append(pvc_obj.name) log.info(f"{self.interface} : Before pod attach") bulk_start_time = time.time() pod_data_list = list() pod_data_list.extend( scale_lib.attach_multiple_pvc_to_pod_dict( pvc_list=pvc_names_list, namespace=self.namespace, pvcs_per_pod=1, )) lcl = locals() tmp_path = pathlib.Path(ocsci_log_path()) obj_name = "obj1" # Create kube_job for pod creation lcl[f"pod_kube_{obj_name}"] = ObjectConfFile( name=f"pod_kube_{obj_name}", obj_dict_list=pod_data_list, project=defaults.ROOK_CLUSTER_NAMESPACE, tmp_path=tmp_path, ) lcl[f"pod_kube_{obj_name}"].create(namespace=self.namespace) log.info("Checking that pods are running") # Check all the PODs reached Running state pod_running_list = scale_lib.check_all_pod_reached_running_state_in_kube_job( kube_job_obj=lcl[f"pod_kube_{obj_name}"], namespace=self.namespace, no_of_pod=len(pod_data_list), timeout=180, ) for pod_name in pod_running_list: pod_obj = get_pod_obj(pod_name, self.namespace) teardown_factory(pod_obj) bulk_end_time = time.time() bulk_total_time = bulk_end_time - bulk_start_time log.info( f"Bulk attach time of {len(pod_running_list)} pods is {bulk_total_time} seconds" ) # Collecting environment information self.get_env_info() # Initialize the results doc file. full_log_path = get_full_test_logs_path(cname=self) full_log_path += f"-{self.sc}" full_results = self.init_full_results( ResultsAnalyse(self.uuid, self.crd_data, full_log_path)) full_results.add_key("storageclass", self.sc) full_results.add_key("pod_bulk_attach_time", bulk_total_time) full_results.add_key("pvc_size", self.pvc_size) full_results.add_key("bulk_size", bulk_size) # Getting the test end time test_end_time = PASTest.get_time() # Add the test time to the ES report full_results.add_key("test_time", { "start": test_start_time, "end": test_end_time }) # Write the test results into the ES server full_results.es_write() # write the ES link to the test results in the test log. log.info(f"The result can be found at : {full_results.results_link()}")
def test_multiple_pvc_deletion_measurement_performance(self, teardown_factory): """ Measuring PVC deletion time of 120 PVCs in 180 seconds Args: teardown_factory: A fixture used when we want a new resource that was created during the tests to be removed in the teardown phase. Returns: """ number_of_pvcs = 120 pvc_size = "1Gi" log.info("Start creating new 120 PVCs") pvc_objs = helpers.create_multiple_pvcs( sc_name=self.sc_obj.name, namespace=defaults.ROOK_CLUSTER_NAMESPACE, number_of_pvc=number_of_pvcs, size=pvc_size, ) for pvc_obj in pvc_objs: pvc_obj.reload() teardown_factory(pvc_obj) with ThreadPoolExecutor(max_workers=5) as executor: for pvc_obj in pvc_objs: executor.submit( helpers.wait_for_resource_state, pvc_obj, constants.STATUS_BOUND ) executor.submit(pvc_obj.reload) # Get pvc_name, require pvc_name to fetch deletion time data from log threads = list() for pvc_obj in pvc_objs: process = threading.Thread(target=pvc_obj.reload) process.start() threads.append(process) for process in threads: process.join() pvc_name_list, pv_name_list = ([] for i in range(2)) threads = list() for pvc_obj in pvc_objs: process1 = threading.Thread(target=pvc_name_list.append(pvc_obj.name)) process2 = threading.Thread(target=pv_name_list.append(pvc_obj.backed_pv)) process1.start() process2.start() threads.append(process1) threads.append(process2) for process in threads: process.join() log.info("Preparing to delete 120 PVC") # Delete PVC for obj in pvc_objs: obj.delete() for obj in pvc_objs: obj.ocp.wait_for_delete(obj.name) # Get PVC deletion time pvc_deletion_time = helpers.measure_pv_deletion_time_bulk( interface=self.interface, pv_name_list=pv_name_list ) logging.info(f"{number_of_pvcs} PVCs deletion time took {pvc_deletion_time}")
def test_bulk_pvc_creation_deletion_measurement_performance( self, teardown_factory, bulk_size): """ Measuring PVC creation and deletion time of bulk_size PVCs Args: teardown_factory: A fixture used when we want a new resource that was created during the tests to be removed in the teardown phase. bulk_size: Size of the bulk to be tested Returns: """ bulk_creation_time_limit = bulk_size / 2 log.info(f"Start creating new {bulk_size} PVCs") pvc_objs, yaml_creation_dir = helpers.create_multiple_pvcs( sc_name=self.sc_obj.name, namespace=defaults.ROOK_CLUSTER_NAMESPACE, number_of_pvc=bulk_size, size=self.pvc_size, burst=True, ) logging.info(f"PVC creation dir is {yaml_creation_dir}") for pvc_obj in pvc_objs: pvc_obj.reload() teardown_factory(pvc_obj) with ThreadPoolExecutor(max_workers=5) as executor: for pvc_obj in pvc_objs: executor.submit(helpers.wait_for_resource_state, pvc_obj, constants.STATUS_BOUND) executor.submit(pvc_obj.reload) start_time = helpers.get_provision_time(self.interface, pvc_objs, status="start") end_time = helpers.get_provision_time(self.interface, pvc_objs, status="end") total_time = (end_time - start_time).total_seconds() logging.info( f"{bulk_size} Bulk PVCs creation time is {total_time} seconds.") if total_time > bulk_creation_time_limit: raise ex.PerformanceException( f"{bulk_size} Bulk PVCs creation time is {total_time} and " f"greater than {bulk_creation_time_limit} seconds") pv_names_list = [] for pvc_obj in pvc_objs: pv_names_list.append(pvc_obj.backed_pv) logging.info(f"Starting to delete bulk of {bulk_size} PVCs") helpers.delete_bulk_pvcs(yaml_creation_dir, pv_names_list) logging.info( f"Deletion of bulk of {bulk_size} PVCs successfully completed") log_deletion_times = helpers.measure_pv_deletion_time_bulk( self.interface, pv_names_list, return_log_times=True) all_start_times = [ a_tuple[0] for a_tuple in log_deletion_times.values() ] bulk_start_time = sorted(all_start_times)[0] # the eariles start time start_deletion_time = datetime.datetime.strptime( bulk_start_time, helpers.DATE_TIME_FORMAT) all_end_times = [a_tuple[1] for a_tuple in log_deletion_times.values()] bulk_deletion_time = sorted(all_end_times)[-1] # the latest end time end_deletion_time = datetime.datetime.strptime( bulk_deletion_time, helpers.DATE_TIME_FORMAT) total_deletion_time = (end_deletion_time - start_deletion_time).total_seconds() logging.info( f"{bulk_size} Bulk PVCs deletion time is {total_deletion_time} seconds." )
def test_bulk_pod_attach_performance(self, interface_type, bulk_size): """ Measures pods attachment time in bulk_size bulk Args: interface_type (str): The interface type to be tested - CephBlockPool / CephFileSystem. bulk_size (int): Size of the bulk to be tested Returns: """ self.interface = interface_type if self.dev_mode: bulk_size = 3 # Initialize some variables timeout = bulk_size * 5 pvc_names_list = list() pod_data_list = list() # Getting the test start time test_start_time = self.get_time() csi_start_time = self.get_time("csi") log.info(f"Start creating bulk of new {bulk_size} PVCs") self.pvc_objs, _ = helpers.create_multiple_pvcs( sc_name=Interfaces_info[self.interface]["sc"], namespace=self.namespace, number_of_pvc=bulk_size, size=self.pvc_size, burst=True, do_reload=False, ) log.info("Wait for all of the PVCs to be in Bound state") performance_lib.wait_for_resource_bulk_status("pvc", bulk_size, self.namespace, constants.STATUS_BOUND, timeout, 10) # in case of creation faliure, the wait_for_resource_bulk_status function # will raise an exception. so in this point the creation succeed log.info("All PVCs was created and in Bound state.") # Reload all PVC(s) information for pvc_obj in self.pvc_objs: pvc_obj.reload() pvc_names_list.append(pvc_obj.name) log.debug(f"The PVCs names are : {pvc_names_list}") # Create kube_job for pod creation pod_data_list.extend( scale_lib.attach_multiple_pvc_to_pod_dict( pvc_list=pvc_names_list, namespace=self.namespace, pvcs_per_pod=1, )) self.pods_obj = ObjectConfFile( name="pod_kube_obj", obj_dict_list=pod_data_list, project=self.namespace, tmp_path=pathlib.Path(ocsci_log_path()), ) log.debug(f"PODs data list is : {json.dumps(pod_data_list, indent=3)}") log.info(f"{self.interface} : Before pod attach") bulk_start_time = time.time() self.pods_obj.create(namespace=self.namespace) # Check all the PODs reached Running state log.info("Checking that pods are running") performance_lib.wait_for_resource_bulk_status("pod", bulk_size, self.namespace, constants.STATUS_RUNNING, timeout, 2) log.info("All POD(s) are in Running State.") bulk_end_time = time.time() bulk_total_time = bulk_end_time - bulk_start_time log.info( f"Bulk attach time of {bulk_size} pods is {bulk_total_time} seconds" ) csi_bulk_total_time = performance_lib.pod_bulk_attach_csi_time( self.interface, self.pvc_objs, csi_start_time, self.namespace) # Collecting environment information self.get_env_info() # Initialize the results doc file. full_results = self.init_full_results( ResultsAnalyse(self.uuid, self.crd_data, self.full_log_path, "pod_bulk_attachtime")) full_results.add_key("storageclass", Interfaces_info[self.interface]["name"]) full_results.add_key("pod_bulk_attach_time", bulk_total_time) full_results.add_key("pod_csi_bulk_attach_time", csi_bulk_total_time) full_results.add_key("pvc_size", self.pvc_size) full_results.add_key("bulk_size", bulk_size) # Getting the test end time test_end_time = self.get_time() # Add the test time to the ES report full_results.add_key("test_time", { "start": test_start_time, "end": test_end_time }) # Write the test results into the ES server self.results_path = helpers.get_full_test_logs_path(cname=self) if full_results.es_write(): res_link = full_results.results_link() # write the ES link to the test results in the test log. log.info(f"The result can be found at : {res_link}") # Create text file with results of all subtests (4 - according to the parameters) self.write_result_to_file(res_link)