def test_fio_compressed_workload(self, ripsaw, es, storageclass_factory, io_pattern, bs, cmp_ratio): """ This is a basic fio perf test which run on compression enabled volume Args: io_pattern (str): the I/O pattern to do - random / sequential bs (str): block size to use in the test cmp_ratio (int): the expected compression ratio """ self.full_log_path = get_full_test_logs_path(cname=self) self.full_log_path += f"-{io_pattern}-{bs}-{cmp_ratio}" log.info(f"Logs file path name is : {self.full_log_path}") self.ripsaw_deploy(ripsaw) log.info("Creating compressed pool & SC") sc_obj = storageclass_factory( interface=constants.CEPHBLOCKPOOL, new_rbd_pool=True, replica=3, compression="aggressive", ) sc = sc_obj.name pool_name = run_cmd( f"oc get sc {sc} -o jsonpath={{'.parameters.pool'}}") # Create fio benchmark log.info("Create resource file for fio workload") self.fio_cr = templating.load_yaml( "ocs_ci/templates/workloads/fio/benchmark_fio_cmp.yaml") self.fio_cr["spec"]["workload"]["args"]["bs"] = [bs] self.fio_cr["spec"]["workload"]["args"]["prefill_bs"] = bs self.fio_cr["spec"]["workload"]["args"]["cmp_ratio"] = cmp_ratio # Saving the Original elastic-search IP and PORT - if defined in yaml self.es_info_backup(es) # Setting the data set to 40% of the total storage capacity self.setting_storage_usage() self.get_env_info() self.fio_cr["spec"]["workload"]["args"]["storageclass"] = sc self.setting_io_pattern(io_pattern) fio_client_pod = self.deploy_and_wait_for_wl_to_start() # Getting the UUID from inside the benchmark pod uuid = ripsaw.get_uuid(fio_client_pod) # Setting back the original elastic-search information self.fio_cr["spec"]["elasticsearch"] = self.backup_es # Initialize the results doc file. full_results = self.init_full_results( FIOResultsAnalyse(uuid, self.fio_cr, self.full_log_path, self.main_es)) # Setting the global parameters of the test full_results.add_key("io_pattern", io_pattern) end_time = self.wait_for_wl_to_finish(fio_client_pod) full_results.add_key("test_time", { "start": self.start_time, "end": end_time }) # Clean up fio benchmark self.copy_es_data(es) log.info("verifying compression ratio") ratio = calculate_compression_ratio(pool_name) full_results.add_key("cmp_ratio", { "expected": cmp_ratio, "actual": ratio }) full_results.analyze_results() # Analyze the results # TODO: change the info message to Warning/Error after # prefill at ripsaw will be fixed Ripsaw PR - #505 if (cmp_ratio + 5) < ratio or ratio < (cmp_ratio - 5): log.info(f"The compression ratio is {ratio}% " f"while the expected ratio is {cmp_ratio}%") else: log.info(f"The compression ratio is {ratio}%") # Writing the analyzed test results to the Elastic-Search server if self.main_es is not None: full_results.es_write() # Creating full link to the results on the ES server log.info( f"The Result can be found at : {full_results.results_link()}") self.cleanup() sc_obj.delete() sc_obj.ocp.wait_for_delete(resource_name=sc, timeout=300, sleep=5) log.debug(f"Full results is : {full_results.results}")
def test_fio_compressed_workload(self, storageclass_factory, io_pattern, bs, cmp_ratio): """ This is a basic fio perf test which run on compression enabled volume Args: io_pattern (str): the I/O pattern to do - random / sequential bs (str): block size to use in the test cmp_ratio (int): the expected compression ratio """ # Getting the full path for the test logs self.full_log_path = get_full_test_logs_path(cname=self) self.full_log_path += f"-{io_pattern}-{bs}-{cmp_ratio}" log.info(f"Logs file path name is : {self.full_log_path}") log.info("Create resource file for fio workload") self.crd_data = templating.load_yaml( "ocs_ci/templates/workloads/fio/benchmark_fio_cmp.yaml") # Saving the Original elastic-search IP and PORT - if defined in yaml self.es_info_backup(self.es) log.info("Creating compressed pool & SC") sc_obj = storageclass_factory( interface=constants.CEPHBLOCKPOOL, new_rbd_pool=True, replica=3, compression="aggressive", ) sc = sc_obj.name pool_name = run_cmd( f"oc get sc {sc} -o jsonpath={{'.parameters.pool'}}") # Create fio benchmark self.crd_data["spec"]["workload"]["args"]["bs"] = [bs] self.crd_data["spec"]["workload"]["args"]["cmp_ratio"] = cmp_ratio # Setting the data set to 40% of the total storage capacity self.setting_storage_usage() self.crd_data["spec"]["workload"]["args"][ "prefill_bs"] = self.crd_data["spec"]["workload"]["args"]["bs"][0] self.get_env_info() self.crd_data["spec"]["workload"]["args"]["storageclass"] = sc self.setting_io_pattern(io_pattern) self.run() # Initialize the results doc file. full_results = self.init_full_results( FIOResultsAnalyse(self.uuid, self.crd_data, self.full_log_path, self.main_es)) # Setting the global parameters of the test full_results.add_key("io_pattern", io_pattern) if isinstance(self.es, ElasticSearch): # Using internal deployed elasticsearch # if self.es: log.info("Getting data from internal ES") if self.main_es: self.copy_es_data(self.es) else: log.info("Dumping data from the Internal ES to tar ball file") self.es.dumping_all_data(self.full_log_path) log.info("verifying compression ratio") ratio = calculate_compression_ratio(pool_name) full_results.add_key("cmp_ratio", { "expected": cmp_ratio, "actual": ratio }) log.debug(f"Full results is : {full_results.results}") full_results.analyze_results(self) # Analyze the results if (cmp_ratio + 5) < ratio or ratio < (cmp_ratio - 5): log.warning(f"The compression ratio is {ratio}% " f"while the expected ratio is {cmp_ratio}%") else: log.info(f"The compression ratio is {ratio}%") full_results.add_key("test_time", { "start": self.start_time, "end": self.end_time }) # Writing the analyzed test results to the Elastic-Search server if full_results.es_write(): log.info( f"The Result can be found at : {full_results.results_link()}") # Clean up fio benchmark self.cleanup() sc_obj.delete() sc_obj.ocp.wait_for_delete(resource_name=sc, timeout=300, sleep=5)