def test_scale_obc_post_upgrade(): """ Validate OBC scaled for post upgrade """ # Get info from SCALE_DATA_FILE for validation if os.path.exists(obc_scaled_data_file): file_data = templating.load_yaml(obc_scaled_data_file) namespace = file_data.get("NAMESPACE") obc_scale_list = file_data.get("OBC_SCALE_LIST") else: raise FileNotFoundError # Check obc status in current namespace obc_bound_list, obc_not_bound_list = scale_noobaa_lib.check_all_obcs_status( namespace ) # Check status of OBC scaled in pre-upgrade if not len(obc_bound_list) == len(obc_scale_list): raise UnexpectedBehaviour( f" OBC bound list count mismatch {len(obc_not_bound_list)} OBCs not in Bound state " f" OBCs not in Bound state {obc_not_bound_list}" ) else: log.info(f" Expected all {len(obc_bound_list)} OBCs are in Bound state") # Check ceph health status utils.ceph_health_check() # Clean up all scaled obc scale_noobaa_lib.cleanup(namespace=namespace)
def test_scale_mcg_obc_creation(self, tmp_path, timeout=60): """ MCG OBC creation using Noobaa storage class """ log.info(f"Start creating {self.scale_obc_count} " f"OBC in a batch of {self.num_obc_batch}") for i in range(int(self.scale_obc_count / self.num_obc_batch)): obc_dict_list = ( scale_noobaa_lib.construct_obc_creation_yaml_bulk_for_kube_job( no_of_obc=self.num_obc_batch, sc_name=self.sc_name, namespace=self.namespace, )) # Create job profile job_file = ObjectConfFile( name="job_profile", obj_dict_list=obc_dict_list, project=self.namespace, tmp_path=tmp_path, ) # Create kube_job job_file.create(namespace=self.namespace) time.sleep(timeout * 5) # Check all the PVC reached Bound state obc_bound_list = ( scale_noobaa_lib.check_all_obc_reached_bound_state_in_kube_job( kube_job_obj=job_file, namespace=self.namespace, no_of_obc=self.num_obc_batch, )) log.info(f"Number of PVCs in Bound state {len(obc_bound_list)}") # Delete obc on cluster scale_noobaa_lib.cleanup(self.namespace)
def test_scale_mcg_rgw_obc_creation(self, tmp_path, timeout=60): """ OBC creation for both MCG and RGW storage class This test case only runs on vSphere cluster deployment """ log.info( f"Start creating {self.scale_obc_count} OBC in a batch of {self.num_obc_batch}" ) for i in range(int(self.scale_obc_count / self.num_obc_batch)): obc_dict_list1 = ( scale_noobaa_lib.construct_obc_creation_yaml_bulk_for_kube_job( no_of_obc=int(self.num_obc_batch / 2), sc_name=self.sc_name, namespace=self.namespace, )) obc_dict_list2 = ( scale_noobaa_lib.construct_obc_creation_yaml_bulk_for_kube_job( no_of_obc=int(self.num_obc_batch / 2), sc_name=self.sc_rgw_name, namespace=self.namespace, )) # Create job profile job_file1 = ObjectConfFile( name="job_profile1", obj_dict_list=obc_dict_list1, project=self.namespace, tmp_path=tmp_path, ) job_file2 = ObjectConfFile( name="job_profile2", obj_dict_list=obc_dict_list2, project=self.namespace, tmp_path=tmp_path, ) # Create kube_job job_file1.create(namespace=self.namespace) time.sleep(timeout * 3) job_file2.create(namespace=self.namespace) time.sleep(timeout * 3) # Check all the PVC reached Bound state obc_mcg_bound_list = ( scale_noobaa_lib.check_all_obc_reached_bound_state_in_kube_job( kube_job_obj=job_file1, namespace=self.namespace, no_of_obc=int(self.num_obc_batch / 2), )) obc_rgw_bound_list = ( scale_noobaa_lib.check_all_obc_reached_bound_state_in_kube_job( kube_job_obj=job_file2, namespace=self.namespace, no_of_obc=int(self.num_obc_batch / 2), )) log.info( f"Number of OBCs in Bound state MCG: {len(obc_mcg_bound_list)}," f" RGW: {len(obc_rgw_bound_list)}") # Delete obc on cluster scale_noobaa_lib.cleanup(self.namespace)
def test_scale_obc_create_delete_time(self, tmp_path): """ MCG OBC creation and deletion using Noobaa MCG storage class """ log.info(f"Start creating {self.scale_obc_count} " f"OBCs in a batch of {self.num_obc_batch}") obc_create = dict() obc_delete = dict() for i in range(int(self.scale_obc_count / self.num_obc_batch)): obc_dict_list = ( scale_noobaa_lib.construct_obc_creation_yaml_bulk_for_kube_job( no_of_obc=self.num_obc_batch, sc_name=constants.NOOBAA_SC, namespace=self.namespace, )) # Create job profile job_file = ObjectConfFile( name="job_profile", obj_dict_list=obc_dict_list, project=self.namespace, tmp_path=tmp_path, ) # Create kube_job job_file.create(namespace=self.namespace) # Check all the OBCs to reach Bound state obc_bound_list = ( scale_noobaa_lib.check_all_obc_reached_bound_state_in_kube_job( kube_job_obj=job_file, namespace=self.namespace, no_of_obc=self.num_obc_batch, )) log.info(f"Number of OBCs in Bound state {len(obc_bound_list)}") # Measure obc creation and deletion time obc_creation_time = scale_noobaa_lib.measure_obc_creation_time( obc_name_list=obc_bound_list) obc_create.update(obc_creation_time) # Delete all obcs in a batch obc_name_list = list(oc_get_all_obc_names()) new_list = [ obc_name_list[i:i + 20] for i in range(0, len(obc_name_list), self.num_obc_batch) ] for i in range(len(new_list)): scale_noobaa_lib.cleanup(self.namespace, obc_count=new_list[i]) obc_deletion_time = scale_noobaa_lib.measure_obc_deletion_time( obc_name_list=new_list[i]) obc_delete.update(obc_deletion_time) # Store obc creation time on csv file log_path = f"{ocsci_log_path()}/obc-creation" with open(f"{log_path}-{constants.NOOBAA_SC}.csv", "w") as fd: csv_obj = csv.writer(fd) for k, v in obc_create.items(): csv_obj.writerow([k, v]) log.info( f"OBC creation data present in {log_path}-{constants.NOOBAA_SC}.csv" ) # Store obc deletion time on csv file log_path = f"{ocsci_log_path()}/obc-deletion" with open(f"{log_path}-{constants.NOOBAA_SC}.csv", "w") as fd: csv_obj = csv.writer(fd) for k, v in obc_create.items(): csv_obj.writerow([k, v]) log.info( f"OBC deletion data present in {log_path}-{constants.NOOBAA_SC}.csv" )
def finalizer(): scale_noobaa_lib.cleanup(constants.OPENSHIFT_STORAGE_NAMESPACE)