def setup( self, storageclass_factory, project_factory, snapshot_restore_factory, pvc_clone_factory, create_pvcs_and_pods, pvc_create_sc_type, restore_sc_type, ): """ Create Storage Class, PVCs and pods """ self.pvc_size = 2 if "thick" in (pvc_create_sc_type, restore_sc_type): # Thick provisioning is applicable only for RBD thick_sc = default_thick_storage_class() access_modes_cephfs = None num_of_cephfs_pvc = 0 thin_sc = default_storage_class(constants.CEPHBLOCKPOOL) else: thick_sc = None access_modes_cephfs = [constants.ACCESS_MODE_RWO] num_of_cephfs_pvc = 1 thin_sc = default_storage_class(constants.CEPHFILESYSTEM) sc_dict = {"thin": thin_sc, "thick": thick_sc} self.pvc_create_sc = sc_dict[pvc_create_sc_type] self.restore_sc = sc_dict[restore_sc_type] self.pvcs, self.pods = create_pvcs_and_pods( pvc_size=self.pvc_size, access_modes_rbd=[constants.ACCESS_MODE_RWO], access_modes_cephfs=access_modes_cephfs, num_of_rbd_pvc=1, num_of_cephfs_pvc=num_of_cephfs_pvc, sc_rbd=self.pvc_create_sc, ) self.ct_pod = pod.get_ceph_tools_pod() if pvc_create_sc_type == "thick": assert check_rbd_image_used_size( pvc_objs=self.pvcs, usage_to_compare=f"{self.pvc_size}GiB", rbd_pool=constants.DEFAULT_BLOCKPOOL, expect_match=True, ), "One or more PVCs are not thick provisioned."
def test_delete_provisioner_pod_while_thick_provisioning( self, pvc_factory, pod_factory, ): """ Test to delete RBD provisioner leader pod while creating a PVC using thick provision enabled storage class """ pvc_size = 20 pool_name = default_ceph_block_pool() executor = ThreadPoolExecutor(max_workers=1) DISRUPTION_OPS.set_resource(resource="rbdplugin_provisioner", leader_type="provisioner") # Start creation of PVC pvc_create = executor.submit( pvc_factory, interface=constants.CEPHBLOCKPOOL, project=self.proj_obj, storageclass=default_thick_storage_class(), size=pvc_size, access_mode=constants.ACCESS_MODE_RWO, status="", ) # Ensure that the PVC is being created before deleting the rbd provisioner pod ret = helpers.wait_for_resource_count_change(get_all_pvcs, 0, self.proj_obj.namespace, "increase") assert ret, "Wait timeout: PVC is not being created." logger.info("PVC creation has started.") DISRUPTION_OPS.delete_resource() logger.info("Deleted RBD provisioner leader pod.") pvc_obj = pvc_create.result() # Confirm that the PVC is Bound helpers.wait_for_resource_state(resource=pvc_obj, state=constants.STATUS_BOUND, timeout=600) pvc_obj.reload() logger.info(f"Verified: PVC {pvc_obj.name} reached Bound state.") image_name = pvc_obj.get_rbd_image_name pv_obj = pvc_obj.backed_pv_obj # Verify thick provision by checking the image used size assert check_rbd_image_used_size( pvc_objs=[pvc_obj], usage_to_compare=f"{pvc_size}GiB", rbd_pool=pool_name, expect_match=True, ), f"PVC {pvc_obj.name} is not thick provisioned.\n PV describe :\n {pv_obj.describe()}" logger.info("Verified: The PVC is thick provisioned") # Create pod and run IO pod_obj = pod_factory( interface=constants.CEPHBLOCKPOOL, pvc=pvc_obj, status=constants.STATUS_RUNNING, ) pod_obj.run_io( storage_type="fs", size=f"{pvc_size-1}G", fio_filename=f"{pod_obj.name}_io", end_fsync=1, ) # Get IO result get_fio_rw_iops(pod_obj) logger.info(f"Deleting pod {pod_obj.name}") pod_obj.delete() pod_obj.ocp.wait_for_delete(pod_obj.name, 180), f"Pod {pod_obj.name} is not deleted" # Fetch image id for verification image_uid = pvc_obj.image_uuid logger.info(f"Deleting PVC {pvc_obj.name}") pvc_obj.delete() pvc_obj.ocp.wait_for_delete( pvc_obj.name), f"PVC {pvc_obj.name} is not deleted" logger.info(f"Verified: PVC {pvc_obj.name} is deleted.") pv_obj.ocp.wait_for_delete( pv_obj.name), f"PV {pv_obj.name} is not deleted" logger.info(f"Verified: PV {pv_obj.name} is deleted.") # Verify the rbd image is deleted logger.info(f"Wait for the RBD image {image_name} to get deleted") assert verify_volume_deleted_in_backend( interface=constants.CEPHBLOCKPOOL, image_uuid=image_uid, pool_name=pool_name, timeout=300, ), f"Wait timeout - RBD image {image_name} is not deleted" logger.info(f"Verified: RBD image {image_name} is deleted")
def test_delete_rbd_pvc_while_thick_provisioning( self, resource_to_delete, pvc_factory, pod_factory, ): """ Test to delete RBD PVC while thick provisioning is progressing and verify that no stale image is present. Based on the value of "resource_to_delete", provisioner pod also will be deleted. """ pvc_size = 15 executor = ThreadPoolExecutor(max_workers=1) if resource_to_delete: DISRUPTION_OPS.set_resource(resource=resource_to_delete, leader_type="provisioner") ct_pod = get_ceph_tools_pod() # Collect the list of RBD images image_list_out_initial = ct_pod.exec_ceph_cmd( ceph_cmd=f"rbd ls -p {constants.DEFAULT_BLOCKPOOL}", format="") image_list_initial = image_list_out_initial.strip().split() log.info( f"List of RBD images before creating the PVC {image_list_initial}") # Start creation of PVC pvc_obj = pvc_factory( interface=constants.CEPHBLOCKPOOL, project=self.proj_obj, storageclass=default_thick_storage_class(), size=pvc_size, access_mode=constants.ACCESS_MODE_RWO, status="", ) # Ensure that the PVC is being created ret = wait_for_resource_count_change(get_all_pvcs, 0, self.proj_obj.namespace, "increase") assert ret, "Wait timeout: PVC is not being created." log.info("PVC creation has started.") if resource_to_delete: log.info(f"Deleting {resource_to_delete} pod.") delete_provisioner = executor.submit( DISRUPTION_OPS.delete_resource) # Delete PVC log.info(f"Deleting PVC {pvc_obj.name}") pvc_obj.delete() pvc_obj.ocp.wait_for_delete(pvc_obj.name) log.info(f"Verified: PVC {pvc_obj.name} is deleted.") if resource_to_delete: delete_provisioner.result() # Collect the list of RBD images image_list_out_final = ct_pod.exec_ceph_cmd( ceph_cmd=f"rbd ls -p {default_ceph_block_pool()}", format="") image_list_final = image_list_out_final.strip().split() log.info( f"List of RBD images after deleting the PVC {image_list_final}") stale_images = [ image for image in image_list_final if image not in image_list_initial ] # Check whether more than one new image is present if len(stale_images) > 1: raise UnexpectedBehaviour( f"Could not verify the test result. Found more than one new rbd image - {stale_images}." ) if stale_images: stale_image = stale_images[0].strip() # Wait for the image to get deleted image_deleted = verify_volume_deleted_in_backend( constants.CEPHBLOCKPOOL, image_uuid=stale_image.split("csi-vol-")[1], pool_name=default_ceph_block_pool(), timeout=300, ) if not image_deleted: du_out = ct_pod.exec_ceph_cmd( ceph_cmd= f"rbd du -p {default_ceph_block_pool()} {stale_image}", format="", ) assert image_deleted, ( f"Wait timeout: RBD image {stale_image} is not deleted. Check the logs to ensure that" f" this is the stale image of the deleted PVC. rbd du output of the image : {du_out}" ) log.info( f"Image {stale_image} deleted within the wait time period") else: log.info("No stale image found")
def test_verify_rbd_thick_pvc_utilization( self, pvc_factory, pod_factory, ): """ Test to verify the storage utilization of RBD thick provisioned PVC """ pvc_size = 15 replica_size = 3 file1 = "fio_file1" file2 = "fio_file2" rbd_pool = default_ceph_block_pool() size_before_pvc = fetch_used_size(rbd_pool) log.info( f"Storage pool used size before creating the PVC is {size_before_pvc}" ) # Create RBD thick PVC pvc_obj = pvc_factory( interface=constants.CEPHBLOCKPOOL, project=self.proj_obj, storageclass=default_thick_storage_class(), size=pvc_size, access_mode=constants.ACCESS_MODE_RWO, status=constants.STATUS_BOUND, ) size_after_pvc = fetch_used_size( rbd_pool, size_before_pvc + (pvc_size * replica_size)) log.info( f"Verified: Storage pool used size after creating the PVC is {size_after_pvc}" ) pod_obj = pod_factory( interface=constants.CEPHBLOCKPOOL, pvc=pvc_obj, status=constants.STATUS_RUNNING, ) # Create 5GB file pod_obj.run_io( storage_type="fs", size="5G", runtime=60, fio_filename=file1, end_fsync=1, ) pod_obj.get_fio_results() # Verify the used size after IO fetch_used_size(rbd_pool, size_before_pvc + (pvc_size * replica_size)) # Create another 5GB file pod_obj.run_io( storage_type="fs", size="5G", runtime=60, fio_filename=file2, end_fsync=1, ) pod_obj.get_fio_results() # Verify the used size after IO fetch_used_size(rbd_pool, size_before_pvc + (pvc_size * replica_size)) # Delete the files created by fio mount_point = pod_obj.get_storage_path() rm_cmd = f"rm {path.join(mount_point, file1)} {path.join(mount_point, file2)}" pod_obj.exec_cmd_on_pod(command=rm_cmd, out_yaml_format=False) # Verify the used size after deleting the files fetch_used_size(rbd_pool, size_before_pvc + (pvc_size * replica_size)) # Delete the pod pod_obj.delete() pod_obj.ocp.wait_for_delete(resource_name=pod_obj.name) # Delete the PVC pvc_obj.delete() pvc_obj.ocp.wait_for_delete(resource_name=pvc_obj.name) # Verify used size after deleting the PVC size_after_pvc_delete = fetch_used_size(rbd_pool, size_before_pvc) log.info( f"Verified: Storage pool used size after deleting the PVC is {size_after_pvc_delete}" )