示例#1
0
    def setup(self, multi_pvc_factory, pod_factory):
        """
        Create resources for the test

        """
        access_modes_cephfs = [
            constants.ACCESS_MODE_RWO, constants.ACCESS_MODE_RWX
        ]
        access_modes_rbd = [
            constants.ACCESS_MODE_RWO,
            f"{constants.ACCESS_MODE_RWO}-Block",
            f"{constants.ACCESS_MODE_RWX}-Block",
        ]

        self.pvcs_cephfs = multi_pvc_factory(
            interface=constants.CEPHFILESYSTEM,
            size=10,
            access_modes=access_modes_cephfs,
            status=constants.STATUS_BOUND,
            num_of_pvc=2,
            timeout=90,
        )

        self.pvcs_rbd = multi_pvc_factory(
            interface=constants.CEPHBLOCKPOOL,
            project=self.pvcs_cephfs[0].project,
            size=10,
            access_modes=access_modes_rbd,
            status=constants.STATUS_BOUND,
            num_of_pvc=3,
            timeout=90,
        )

        pods_cephfs = helpers.create_pods(
            self.pvcs_cephfs,
            pod_factory,
            constants.CEPHFILESYSTEM,
            2,
            constants.STATUS_RUNNING,
        )
        pods_rbd = helpers.create_pods(
            self.pvcs_rbd,
            pod_factory,
            constants.CEPHBLOCKPOOL,
            2,
            constants.STATUS_RUNNING,
        )

        self.pods = pods_cephfs + pods_rbd

        # Set volume mode on PVC objects
        for pvc_obj in self.pvcs_cephfs + self.pvcs_rbd:
            pvc_info = pvc_obj.get()
            setattr(pvc_obj, "volume_mode", pvc_info["spec"]["volumeMode"])
    def setup(self, project_factory, snapshot_restore_factory,
              multi_pvc_factory, pod_factory):
        """
        Create PVCs and pods

        """
        self.pvc_size = 5

        self.pvc_objs = multi_pvc_factory(
            interface=constants.CEPHBLOCKPOOL,
            size=self.pvc_size,
            access_modes=[
                f"{constants.ACCESS_MODE_RWX}-Block",
                f"{constants.ACCESS_MODE_RWO}-Block",
            ],
            status=constants.STATUS_BOUND,
            num_of_pvc=2,
            wait_each=False,
        )

        self.pod_objs = create_pods(
            self.pvc_objs,
            pod_factory,
            constants.CEPHBLOCKPOOL,
            pods_for_rwx=1,
            status=constants.STATUS_RUNNING,
        )
示例#3
0
    def test_pvc_disruptive(
        self,
        interface,
        operation_to_disrupt,
        resource_to_delete,
        multi_pvc_factory,
        pod_factory,
    ):
        """
        Base function for PVC disruptive tests.
        Deletion of 'resource_to_delete' will be introduced while
        'operation_to_disrupt' is progressing.
        """
        pod_functions = {
            "mds":
            partial(pod.get_mds_pods),
            "mon":
            partial(pod.get_mon_pods),
            "mgr":
            partial(pod.get_mgr_pods),
            "osd":
            partial(pod.get_osd_pods),
            "rbdplugin":
            partial(pod.get_plugin_pods, interface=interface),
            "cephfsplugin":
            partial(pod.get_plugin_pods, interface=interface),
            "cephfsplugin_provisioner":
            partial(pod.get_cephfsplugin_provisioner_pods),
            "rbdplugin_provisioner":
            partial(pod.get_rbdfsplugin_provisioner_pods),
            "operator":
            partial(pod.get_operator_pods),
        }

        # Get number of pods of type 'resource_to_delete'
        num_of_resource_to_delete = len(pod_functions[resource_to_delete]())

        num_of_pvc = 12
        namespace = self.proj_obj.namespace

        # Fetch the number of Pods and PVCs
        initial_num_of_pods = len(pod.get_all_pods(namespace=namespace))
        initial_num_of_pvc = len(get_all_pvcs(namespace=namespace)["items"])

        executor = ThreadPoolExecutor(max_workers=(2 * num_of_pvc))

        DISRUPTION_OPS.set_resource(resource=resource_to_delete)

        access_modes = [constants.ACCESS_MODE_RWO]
        if interface == constants.CEPHFILESYSTEM:
            access_modes.append(constants.ACCESS_MODE_RWX)

        # Modify access_modes list to create rbd `block` type volume with
        # RWX access mode. RWX is not supported in non-block type rbd
        if interface == constants.CEPHBLOCKPOOL:
            access_modes.extend([
                f"{constants.ACCESS_MODE_RWO}-Block",
                f"{constants.ACCESS_MODE_RWX}-Block",
            ])

        # Start creation of PVCs
        bulk_pvc_create = executor.submit(
            multi_pvc_factory,
            interface=interface,
            project=self.proj_obj,
            size=5,
            access_modes=access_modes,
            access_modes_selection="distribute_random",
            status=constants.STATUS_BOUND,
            num_of_pvc=num_of_pvc,
            wait_each=False,
            timeout=90,
        )

        if operation_to_disrupt == "create_pvc":
            # Ensure PVCs are being created before deleting the resource
            ret = helpers.wait_for_resource_count_change(
                get_all_pvcs, initial_num_of_pvc, namespace, "increase")
            assert ret, "Wait timeout: PVCs are not being created."
            logger.info("PVCs creation has started.")
            DISRUPTION_OPS.delete_resource()

        pvc_objs = bulk_pvc_create.result()

        # Confirm that PVCs are Bound
        for pvc_obj in pvc_objs:
            helpers.wait_for_resource_state(resource=pvc_obj,
                                            state=constants.STATUS_BOUND,
                                            timeout=120)
            pvc_obj.reload()
        logger.info("Verified: PVCs are Bound.")

        # Start creating pods
        bulk_pod_create = executor.submit(helpers.create_pods, pvc_objs,
                                          pod_factory, interface, 2)

        if operation_to_disrupt == "create_pod":
            # Ensure that pods are being created before deleting the resource
            ret = helpers.wait_for_resource_count_change(
                pod.get_all_pods, initial_num_of_pods, namespace, "increase")
            assert ret, "Wait timeout: Pods are not being created."
            logger.info("Pods creation has started.")
            DISRUPTION_OPS.delete_resource()

        pod_objs = bulk_pod_create.result()

        # Verify pods are Running
        for pod_obj in pod_objs:
            helpers.wait_for_resource_state(resource=pod_obj,
                                            state=constants.STATUS_RUNNING)
            pod_obj.reload()
        logger.info("Verified: All pods are Running.")

        # Do setup on pods for running IO
        logger.info("Setting up pods for running IO.")
        for pod_obj in pod_objs:
            pvc_info = pod_obj.pvc.get()
            if pvc_info["spec"]["volumeMode"] == "Block":
                storage_type = "block"
            else:
                storage_type = "fs"
            executor.submit(pod_obj.workload_setup, storage_type=storage_type)

        # Wait for setup on pods to complete
        for pod_obj in pod_objs:
            logger.info(
                f"Waiting for IO setup to complete on pod {pod_obj.name}")
            for sample in TimeoutSampler(180, 2, getattr, pod_obj,
                                         "wl_setup_done"):
                if sample:
                    logger.info(f"Setup for running IO is completed on pod "
                                f"{pod_obj.name}.")
                    break
        logger.info("Setup for running IO is completed on all pods.")

        # Start IO on each pod
        for pod_obj in pod_objs:
            pvc_info = pod_obj.pvc.get()
            if pvc_info["spec"]["volumeMode"] == "Block":
                storage_type = "block"
            else:
                storage_type = "fs"
            pod_obj.run_io(
                storage_type=storage_type,
                size="1G",
                runtime=10,
                fio_filename=f"{pod_obj.name}_io_file1",
            )
        logger.info("FIO started on all pods.")

        if operation_to_disrupt == "run_io":
            DISRUPTION_OPS.delete_resource()

        logger.info("Fetching FIO results.")
        for pod_obj in pod_objs:
            fio_result = pod_obj.get_fio_results()
            err_count = fio_result.get("jobs")[0].get("error")
            assert (
                err_count == 0
            ), f"FIO error on pod {pod_obj.name}. FIO result: {fio_result}"
        logger.info("Verified FIO result on pods.")

        # Delete pods
        for pod_obj in pod_objs:
            pod_obj.delete(wait=True)
        for pod_obj in pod_objs:
            pod_obj.ocp.wait_for_delete(pod_obj.name)

        # Verify that PVCs are reusable by creating new pods
        pod_objs = helpers.create_pods(pvc_objs, pod_factory, interface, 2)

        # Verify new pods are Running
        for pod_obj in pod_objs:
            helpers.wait_for_resource_state(resource=pod_obj,
                                            state=constants.STATUS_RUNNING)
            pod_obj.reload()
        logging.info("Verified: All new pods are Running.")

        # Run IO on each of the new pods
        for pod_obj in pod_objs:
            pvc_info = pod_obj.pvc.get()
            if pvc_info["spec"]["volumeMode"] == "Block":
                storage_type = "block"
            else:
                storage_type = "fs"
            pod_obj.run_io(
                storage_type=storage_type,
                size="1G",
                runtime=10,
                fio_filename=f"{pod_obj.name}_io_file2",
            )

        logger.info("Fetching FIO results from new pods")
        for pod_obj in pod_objs:
            fio_result = pod_obj.get_fio_results()
            err_count = fio_result.get("jobs")[0].get("error")
            assert (
                err_count == 0
            ), f"FIO error on pod {pod_obj.name}. FIO result: {fio_result}"
        logger.info("Verified FIO result on new pods.")

        # Verify number of pods of type 'resource_to_delete'
        final_num_resource_to_delete = len(pod_functions[resource_to_delete]())
        assert final_num_resource_to_delete == num_of_resource_to_delete, (
            f"Total number of {resource_to_delete} pods is not matching with "
            f"initial value. Total number of pods before deleting a pod: "
            f"{num_of_resource_to_delete}. Total number of pods present now: "
            f"{final_num_resource_to_delete}")

        # Check ceph status
        ceph_health_check(namespace=config.ENV_DATA["cluster_namespace"])
        logger.info("Ceph cluster health is OK")
示例#4
0
def test_create_delete_pvcs(multi_pvc_factory, pod_factory, project=None):
    # create the pods for deleting
    # Create rbd pvcs for pods
    pvc_objs_rbd = create_pvcs(multi_pvc_factory,
                               "CephBlockPool",
                               project=project)
    storageclass_rbd = pvc_objs_rbd[0].storageclass

    # Create cephfs pvcs for pods
    pvc_objs_cephfs = create_pvcs(multi_pvc_factory,
                                  "CephFileSystem",
                                  project=project)
    storageclass_cephfs = pvc_objs_cephfs[0].storageclass

    all_pvc_for_pods = pvc_objs_rbd + pvc_objs_cephfs
    # Check pvc status
    for pvc_obj in all_pvc_for_pods:
        helpers.wait_for_resource_state(
            resource=pvc_obj,
            state=constants.STATUS_BOUND,
            timeout=1200,  # Timeout given 20 minutes
        )
        pvc_info = pvc_obj.get()
        setattr(pvc_obj, "volume_mode", pvc_info["spec"]["volumeMode"])

    # Create pods
    rbd_pods_to_delete = helpers.create_pods(pvc_objs_rbd, pod_factory,
                                             constants.RBD_INTERFACE)
    cephfs_pods_to_delete = helpers.create_pods(pvc_objs_cephfs, pod_factory,
                                                constants.CEPHFS_INTERFACE)
    pods_to_delete = rbd_pods_to_delete + cephfs_pods_to_delete
    for pod_obj in pods_to_delete:
        helpers.wait_for_resource_state(
            resource=pod_obj,
            state=constants.STATUS_RUNNING,
            timeout=300,  # Timeout given 5 minutes
        )

    log.info(
        f"#### Created the pods for deletion later...pods = {pods_to_delete}")
    # Create PVCs for deleting
    # Create rbd pvcs for deleting
    pvc_objs_rbd = create_pvcs(
        multi_pvc_factory=multi_pvc_factory,
        interface="CephBlockPool",
        project=project,
        status="",
        storageclass=storageclass_rbd,
    )

    # Create cephfs pvcs for deleting
    pvc_objs_cephfs = create_pvcs(
        multi_pvc_factory=multi_pvc_factory,
        interface="CephFileSystem",
        project=project,
        status="",
        storageclass=storageclass_cephfs,
    )

    all_pvc_to_delete = pvc_objs_rbd + pvc_objs_cephfs
    # Check pvc status
    for pvc_obj in all_pvc_to_delete:
        helpers.wait_for_resource_state(
            resource=pvc_obj,
            state=constants.STATUS_BOUND,
            timeout=300,  # Timeout given 5 minutes
        )

    log.info(
        f"#### Created the PVCs for deletion later...PVCs={all_pvc_to_delete}")

    # Create PVCs for new pods
    pvc_objs_rbd = create_pvcs(
        multi_pvc_factory=multi_pvc_factory,
        interface="CephBlockPool",
        project=project,
        status="",
        storageclass=storageclass_rbd,
    )

    # Create cephfs pvcs for new pods # for deleting
    pvc_objs_cephfs = create_pvcs(
        multi_pvc_factory=multi_pvc_factory,
        interface="CephFileSystem",
        project=project,
        status="",
        storageclass=storageclass_cephfs,
    )

    all_pvc_for_new_pods = pvc_objs_rbd + pvc_objs_cephfs
    # Check pvc status
    for pvc_obj in all_pvc_for_new_pods:
        helpers.wait_for_resource_state(
            resource=pvc_obj,
            state=constants.STATUS_BOUND,
            timeout=300,  # Timeout given 5 minutes
        )
        pvc_info = pvc_obj.get()
        setattr(pvc_obj, "volume_mode", pvc_info["spec"]["volumeMode"])

    log.info(
        f"#### Created the PVCs required for creating New Pods...{all_pvc_for_new_pods}"
    )

    executor = ThreadPoolExecutor(max_workers=10)
    # Start creating new PVCs
    # Start creating rbd PVCs
    rbd_pvc_exeuter = executor.submit(
        create_pvcs,
        multi_pvc_factory=multi_pvc_factory,
        interface="CephBlockPool",
        project=project,
        status="",
        storageclass=storageclass_rbd,
    )

    log.info("#### Started creating new RBD PVCs in thread...")
    # Start creating cephfs pvc
    cephfs_pvc_exeuter = executor.submit(
        create_pvcs,
        multi_pvc_factory=multi_pvc_factory,
        interface="CephFileSystem",
        project=project,
        status="",
        storageclass=storageclass_cephfs,
    )

    log.info("#### Started creating new cephfs PVCs in thread...")
    # Start creating pods
    rbd_pods_create_executer = executor.submit(helpers.create_pods,
                                               pvc_objs_rbd, pod_factory,
                                               constants.RBD_INTERFACE)
    cephfs_pods_create_executer = executor.submit(helpers.create_pods,
                                                  pvc_objs_cephfs, pod_factory,
                                                  constants.CEPHFS_INTERFACE)

    # Start deleting pods
    pods_delete_executer = executor.submit(delete_pods, pods_to_delete)
    log.info(f"### Started deleting the pods_to_delete = {pods_to_delete}")

    # Start deleting PVC
    pvc_delete_executer = executor.submit(delete_pvcs, all_pvc_to_delete)
    log.info(
        f"### Started deleting the all_pvc_to_delete = {all_pvc_to_delete}")

    log.info(
        "These process are started: Bulk delete PVC, Pods. Bulk create PVC, "
        "Pods. Waiting for its completion")

    while not (rbd_pvc_exeuter.done() and cephfs_pvc_exeuter.done()
               and rbd_pods_create_executer.done()
               and cephfs_pods_create_executer.done()
               and pods_delete_executer.done() and pvc_delete_executer.done()):
        sleep(10)
        log.info(
            "#### create_delete_pvcs....Waiting for threads to complete...")

    new_rbd_pvcs = rbd_pvc_exeuter.result()
    new_cephfs_pvcs = cephfs_pvc_exeuter.result()
    new_pods = cephfs_pods_create_executer.result(
    ) + rbd_pods_create_executer.result()

    # Check pvc status
    for pvc_obj in new_rbd_pvcs + new_cephfs_pvcs:
        helpers.wait_for_resource_state(
            resource=pvc_obj,
            state=constants.STATUS_BOUND,
            timeout=300,  # Timeout given 5 minutes
        )

    log.info("All new PVCs are bound")

    # Check pods status
    for pod_obj in new_pods:
        helpers.wait_for_resource_state(
            resource=pod_obj,
            state=constants.STATUS_RUNNING,
            timeout=300,  # Timeout given 5 minutes
        )
    log.info("All new pods are running")

    # Check pods are deleted
    for pod_obj in pods_to_delete:
        pod_obj.ocp.wait_for_delete(resource_name=pod_obj.name)

    log.info("All pods are deleted as expected.")

    # Check PVCs are deleted
    for pvc_obj in all_pvc_to_delete:
        pvc_obj.ocp.wait_for_delete(resource_name=pvc_obj.name)

    log.info("All PVCs are deleted as expected")
    def test_rbd_block_pvc_snapshot(self, snapshot_factory,
                                    snapshot_restore_factory, pod_factory):
        """
        Test to take snapshots of RBD Block VolumeMode PVCs

        """
        # Run IO
        log.info("Find initial md5sum value and run IO on all pods")
        for pod_obj in self.pod_objs:
            # Find initial md5sum
            pod_obj.md5sum_before_io = cal_md5sum(
                pod_obj=pod_obj,
                file_name=pod_obj.get_storage_path(storage_type="block"),
                block=True,
            )
            pod_obj.run_io(
                storage_type="block",
                size=f"{self.pvc_size - 1}G",
                io_direction="write",
                runtime=60,
            )
        log.info("IO started on all pods")

        # Wait for IO completion
        for pod_obj in self.pod_objs:
            pod_obj.get_fio_results()
        log.info("IO completed on all pods")

        snap_objs = []

        # Verify md5sum has changed after IO. Create snapshot
        log.info("Verify md5sum has changed after IO and create snapshot from "
                 "all PVCs")
        for pod_obj in self.pod_objs:
            md5sum_after_io = cal_md5sum(
                pod_obj=pod_obj,
                file_name=pod_obj.get_storage_path(storage_type="block"),
                block=True,
            )
            assert (pod_obj.md5sum_before_io != md5sum_after_io
                    ), f"md5sum has not changed after IO on pod {pod_obj.name}"
            log.info(f"Creating snapshot of PVC {pod_obj.pvc.name}")
            snap_obj = snapshot_factory(pod_obj.pvc, wait=False)
            snap_obj.md5sum = md5sum_after_io
            snap_objs.append(snap_obj)
        log.info("Snapshots created")

        # Verify snapshots are ready
        log.info("Verify snapshots are ready")
        for snap_obj in snap_objs:
            snap_obj.ocp.wait_for_resource(
                condition="true",
                resource_name=snap_obj.name,
                column=constants.STATUS_READYTOUSE,
                timeout=180,
            )

        # Delete pods
        log.info("Deleting the pods")
        for pod_obj in self.pod_objs:
            pod_obj.delete()
            pod_obj.ocp.wait_for_delete(resource_name=pod_obj.name)
        log.info("Deleted all the pods")

        # Delete parent PVCs to verify snapshot is independent
        log.info("Deleting parent PVCs")
        for pvc_obj in self.pvc_objs:
            pv_obj = pvc_obj.backed_pv_obj
            pvc_obj.delete()
            pvc_obj.ocp.wait_for_delete(resource_name=pvc_obj.name)
            log.info(f"Deleted PVC {pvc_obj.name}. Verifying whether PV "
                     f"{pv_obj.name} is deleted.")
            pv_obj.ocp.wait_for_delete(resource_name=pv_obj.name)
        log.info("Deleted parent PVCs before restoring snapshot. "
                 "PVs are also deleted.")

        restore_pvc_objs = []

        # Create PVCs out of the snapshots
        log.info("Creating new PVCs from snapshots")
        for snap_obj in snap_objs:
            log.info(f"Creating a PVC from snapshot {snap_obj.name}")
            restore_pvc_obj = snapshot_restore_factory(
                snapshot_obj=snap_obj,
                size=f"{self.pvc_size}Gi",
                volume_mode=snap_obj.parent_volume_mode,
                access_mode=snap_obj.parent_access_mode,
                status="",
            )

            log.info(f"Created PVC {restore_pvc_obj.name} from snapshot "
                     f"{snap_obj.name}")
            restore_pvc_obj.md5sum = snap_obj.md5sum
            restore_pvc_objs.append(restore_pvc_obj)
        log.info("Created new PVCs from all the snapshots")

        # Confirm that the restored PVCs are Bound
        log.info("Verify the restored PVCs are Bound")
        for pvc_obj in restore_pvc_objs:
            wait_for_resource_state(resource=pvc_obj,
                                    state=constants.STATUS_BOUND,
                                    timeout=180)
            pvc_obj.reload()
        log.info("Verified: Restored PVCs are Bound.")

        # Attach the restored PVCs to pods. Attach RWX PVC on two pods
        log.info("Attach the restored PVCs to pods")
        restore_pod_objs = create_pods(
            restore_pvc_objs,
            pod_factory,
            constants.CEPHBLOCKPOOL,
            pods_for_rwx=2,
            status="",
        )

        # Verify the new pods are running
        log.info("Verify the new pods are running")
        for pod_obj in restore_pod_objs:
            wait_for_resource_state(pod_obj, constants.STATUS_RUNNING)
        log.info("Verified: New pods are running")

        log.info("Verifying md5sum on new pods")
        for pod_obj in restore_pod_objs:
            log.info(f"Verifying md5sum on pod {pod_obj.name}")
            verify_data_integrity(
                pod_obj=pod_obj,
                file_name=pod_obj.get_storage_path(storage_type="block"),
                original_md5sum=pod_obj.pvc.md5sum,
                block=True,
            )
            log.info(f"Verified md5sum on pod {pod_obj.name}")
        log.info("Verified md5sum on all pods")

        # Run IO on new pods
        log.info("Starting IO on new pods")
        for pod_obj in restore_pod_objs:
            pod_obj.run_io(storage_type="block", size="500M", runtime=15)

        # Wait for IO completion on new pods
        log.info("Waiting for IO completion on new pods")
        for pod_obj in restore_pod_objs:
            pod_obj.get_fio_results()
        log.info("IO completed on new pods.")
    def test_rbd_pv_encryption_vaulttenantsa(
        self,
        project_factory,
        storageclass_factory,
        multi_pvc_factory,
        pod_factory,
        kv_version,
    ):
        """
        Test to verify creation and deletion of encrypted RBD PVC using vaulttenantsa method

        """
        # Create a project
        proj_obj = project_factory()

        # Create an encryption enabled storageclass for RBD
        sc_obj = storageclass_factory(
            interface=constants.CEPHBLOCKPOOL,
            encrypted=True,
            encryption_kms_id=self.kms.kmsid,
        )

        # Create serviceaccount in the tenant namespace
        self.kms.create_tenant_sa(namespace=proj_obj.namespace)

        # Create role in Vault
        self.kms.create_vault_kube_auth_role(namespace=proj_obj.namespace)

        # Create RBD PVCs with volume mode Block
        pvc_size = 5
        pvc_objs = multi_pvc_factory(
            interface=constants.CEPHBLOCKPOOL,
            project=proj_obj,
            storageclass=sc_obj,
            size=pvc_size,
            access_modes=[
                f"{constants.ACCESS_MODE_RWX}-Block",
                f"{constants.ACCESS_MODE_RWO}-Block",
            ],
            status=constants.STATUS_BOUND,
            num_of_pvc=3,
            wait_each=False,
        )

        # Create pods
        pod_objs = create_pods(
            pvc_objs,
            pod_factory,
            constants.CEPHBLOCKPOOL,
            pods_for_rwx=1,
            status=constants.STATUS_RUNNING,
        )

        # Verify if the key is created in Vault
        vol_handles = []
        for pvc_obj in pvc_objs:
            pv_obj = pvc_obj.backed_pv_obj
            vol_handle = pv_obj.get().get("spec").get("csi").get(
                "volumeHandle")
            vol_handles.append(vol_handle)

        # Check if encryption key is created in Vault
        if kms.is_key_present_in_path(key=vol_handle,
                                      path=self.kms.vault_backend_path):
            log.info(f"Vault: Found key for {pvc_obj.name}")
        else:
            raise ResourceNotFoundError(
                f"Vault: Key not found for {pvc_obj.name}")

        # Verify whether encrypted device is present inside the pod and run IO
        for vol_handle, pod_obj in zip(vol_handles, pod_objs):
            if pod_obj.exec_sh_cmd_on_pod(
                    command=f"lsblk | grep {vol_handle} | grep crypt"):
                log.info(f"Encrypted device found in {pod_obj.name}")
            else:
                raise ResourceNotFoundError(
                    f"Encrypted device not found in {pod_obj.name}")

            pod_obj.run_io(
                storage_type="block",
                size=f"{pvc_size - 1}G",
                io_direction="write",
                runtime=60,
            )
        log.info("IO started on all pods")

        # Wait for IO completion
        for pod_obj in pod_objs:
            pod_obj.get_fio_results()
        log.info("IO completed on all pods")

        # Delete the pod
        for pod_obj in pod_objs:
            pod_obj.delete()
            pod_obj.ocp.wait_for_delete(resource_name=pod_obj.name)

        # Delete the PVC
        for pvc_obj in pvc_objs:
            pv_obj = pvc_obj.backed_pv_obj
            pvc_obj.delete()
            pv_obj.ocp.wait_for_delete(resource_name=pv_obj.name)

        # Verify whether the key is deleted in Vault
        for vol_handle in vol_handles:
            if not kms.is_key_present_in_path(
                    key=vol_handle, path=self.kms.vault_backend_path):
                log.info(f"Vault: Key deleted for {vol_handle}")
            else:
                raise KMSResourceCleaneupError(
                    f"Vault: Key deletion failed for {vol_handle}")
示例#7
0
    def test_daemon_kill_during_pvc_pod_creation_deletion_and_io(
        self, setup_base, multi_pvc_factory, pod_factory
    ):
        """
        Kill ceph daemons while PVCs creation, PVCs deletion, pods creation, pods deletion
        and IO are progressing
        """
        daemons_to_kill = [
            "mgr",
            "mon",
            "osd",
            "mds",
        ]

        (
            pvc_objs,
            pod_objs,
            rwx_pod_objs,
            cephfs_pvc_for_pods,
            rbd_pvc_for_pods,
        ) = setup_base

        num_of_pods_to_delete = 3
        num_of_io_pods = 1
        num_pvc_create_during_disruption = len(
            self.access_modes_cephfs + self.access_modes_rbd
        )

        # Select pods to be deleted
        pods_to_delete = pod_objs[:num_of_pods_to_delete]
        pods_to_delete.extend(
            [
                pod
                for pod in rwx_pod_objs
                for pod_obj in pods_to_delete
                if (pod_obj.pvc == pod.pvc)
            ]
        )

        # Select pods to run IO
        io_pods = pod_objs[
            num_of_pods_to_delete : num_of_pods_to_delete + num_of_io_pods
        ]
        io_pods.extend(
            [
                pod
                for pod in rwx_pod_objs
                for pod_obj in io_pods
                if (pod_obj.pvc == pod.pvc)
            ]
        )

        # Select pods which are having PVCs to delete
        pods_for_pvc = pod_objs[num_of_pods_to_delete + num_of_io_pods :]
        pvcs_to_delete = [pod_obj.pvc for pod_obj in pods_for_pvc]
        pods_for_pvc.extend(
            [
                pod
                for pod in rwx_pod_objs
                for pod_obj in pods_for_pvc
                if (pod_obj.pvc == pod.pvc)
            ]
        )

        io_pods = [
            pod_obj
            for pod_obj in io_pods
            if pod_obj.pvc in select_unique_pvcs([pod_obj.pvc for pod_obj in io_pods])
        ]

        log.info(
            f"{len(pods_to_delete)} pods selected for deletion in which "
            f"{len(pods_to_delete) - num_of_pods_to_delete} pairs of pod "
            f"share same RWX PVC"
        )
        log.info(
            f"{len(io_pods)} pods selected for running IO in which one "
            f"pair of pod share same RWX PVC"
        )
        no_of_rwx_pvcs_delete = len(pods_for_pvc) - len(pvcs_to_delete)
        log.info(
            f"{len(pvcs_to_delete)} PVCs selected for deletion. "
            f"RWO PVCs: {len(pvcs_to_delete) - no_of_rwx_pvcs_delete}, "
            f"RWX PVCs: {no_of_rwx_pvcs_delete}"
        )

        pod_functions = {
            "mds": partial(get_mds_pods),
            "mon": partial(get_mon_pods),
            "mgr": partial(get_mgr_pods),
            "osd": partial(get_osd_pods),
        }

        # Disruption object for each daemon type
        disruption_ops = [disruption_helpers.Disruptions() for _ in daemons_to_kill]

        # Select the resource of each type
        for disruption, pod_type in zip(disruption_ops, daemons_to_kill):
            disruption.set_resource(resource=pod_type)
        executor = ThreadPoolExecutor(
            max_workers=len(pod_objs)
            + len(rwx_pod_objs)
            + len(rbd_pvc_for_pods)
            + len(cephfs_pvc_for_pods)
            + len(daemons_to_kill)
            + num_pvc_create_during_disruption
        )

        # Get number of pods of the type given in daemons_to_kill list
        num_of_resource_pods = [
            len(pod_functions[resource_name]()) for resource_name in daemons_to_kill
        ]

        # Fetch PV names to verify after deletion
        pv_objs = []
        for pvc_obj in pvcs_to_delete:
            pv_objs.append(pvc_obj.backed_pv_obj)

        # Fetch volume details from pods for the purpose of verification
        node_pv_dict = {}
        for pod_obj in pods_to_delete:
            pod_info = pod_obj.get()
            node = pod_info["spec"]["nodeName"]
            pvc = pod_info["spec"]["volumes"][0]["persistentVolumeClaim"]["claimName"]
            for pvc_obj in pvc_objs:
                if pvc_obj.name == pvc:
                    pv = pvc_obj.backed_pv
                    break
            if node in node_pv_dict:
                node_pv_dict[node].append(pv)
            else:
                node_pv_dict[node] = [pv]

        # Fetch image uuid associated with PVCs to be deleted
        pvc_uuid_map = {}
        for pvc_obj in pvcs_to_delete:
            pvc_uuid_map[pvc_obj] = pvc_obj.image_uuid
        log.info("Fetched image uuid associated with each PVC")

        # Do setup on pods for running IO
        log.info("Setting up pods for running IO.")
        for pod_obj in pod_objs + rwx_pod_objs:
            if pod_obj.pvc.get_pvc_vol_mode == "Block":
                storage_type = "block"
            else:
                storage_type = "fs"
            executor.submit(pod_obj.workload_setup, storage_type=storage_type)

        # Wait for setup on pods to complete
        for pod_obj in pod_objs + rwx_pod_objs:
            log.info(f"Waiting for IO setup to complete on pod {pod_obj.name}")
            for sample in TimeoutSampler(360, 2, getattr, pod_obj, "wl_setup_done"):
                if sample:
                    log.info(
                        f"Setup for running IO is completed on pod " f"{pod_obj.name}."
                    )
                    break
        log.info("Setup for running IO is completed on all pods.")

        # Start IO on pods having PVCs to delete to load data
        pods_for_pvc_io = [
            pod_obj
            for pod_obj in pods_for_pvc
            if pod_obj.pvc
            in select_unique_pvcs([pod_obj.pvc for pod_obj in pods_for_pvc])
        ]
        log.info("Starting IO on pods having PVCs to delete.")
        self.run_io_on_pods(pods_for_pvc_io)
        log.info("IO started on pods having PVCs to delete.")

        log.info("Fetching IO results from the pods having PVCs to delete.")
        for pod_obj in pods_for_pvc_io:
            get_fio_rw_iops(pod_obj)
        log.info("Verified IO result on pods having PVCs to delete.")

        # Delete pods having PVCs to delete.
        assert self.delete_pods(
            pods_for_pvc
        ), "Couldn't delete pods which are having PVCs to delete."
        for pod_obj in pods_for_pvc:
            pod_obj.ocp.wait_for_delete(pod_obj.name)
        log.info("Verified: Deleted pods which are having PVCs to delete.")

        # Select daemon of each type of resource and identify the daemons running on each node
        nodes_and_pids = {}
        for disruption in disruption_ops:
            disruption.select_daemon()
            node_name = disruption.resource_obj[0].pod_data.get("spec").get("nodeName")
            # Create node-daemons dict. Value as string for passing in the 'kill' command
            nodes_and_pids[
                node_name
            ] = f"{nodes_and_pids.get(node_name, '')} {disruption.daemon_pid}"

        # Start IO on pods to be deleted
        pods_to_delete_io = [
            pod_obj
            for pod_obj in pods_to_delete
            if pod_obj.pvc
            in select_unique_pvcs([pod_obj.pvc for pod_obj in pods_to_delete])
        ]
        log.info("Starting IO on selected pods to be deleted.")
        self.run_io_on_pods(pods_to_delete_io)
        log.info("IO started on selected pods to be deleted.")

        # Start creating new pods
        log.info("Start creating new pods.")
        pod_create_rbd = executor.submit(
            helpers.create_pods,
            rbd_pvc_for_pods,
            pod_factory,
            constants.CEPHBLOCKPOOL,
            2,
        )
        pod_create_cephfs = executor.submit(
            helpers.create_pods,
            cephfs_pvc_for_pods,
            pod_factory,
            constants.CEPHFILESYSTEM,
            2,
        )

        # Start creation of new CephFS PVCs.
        log.info("Start creating new CephFS PVCs.")
        pvc_create_cephfs = executor.submit(
            multi_pvc_factory,
            interface=constants.CEPHFILESYSTEM,
            project=self.project,
            storageclass=None,
            size=self.pvc_size,
            access_modes=self.access_modes_cephfs,
            access_modes_selection="distribute_random",
            status="",
            num_of_pvc=len(self.access_modes_cephfs),
            wait_each=False,
        )

        # Start creation of new RBD PVCs
        log.info("Start creating new RBD PVCs.")
        pvc_create_rbd = executor.submit(
            multi_pvc_factory,
            interface=constants.CEPHBLOCKPOOL,
            project=self.project,
            storageclass=None,
            size=self.pvc_size,
            access_modes=self.access_modes_rbd,
            access_modes_selection="distribute_random",
            status="",
            num_of_pvc=len(self.access_modes_rbd),
            wait_each=False,
        )

        # Start deleting PVCs
        pvc_bulk_delete = executor.submit(delete_pvcs, pvcs_to_delete)
        log.info("Started deleting PVCs")

        # Start deleting pods
        pod_bulk_delete = executor.submit(self.delete_pods, pods_to_delete)
        log.info("Started deleting pods")

        # Start IO on IO pods
        self.run_io_on_pods(io_pods)
        log.info("Started IO on IO pods")

        # Wait for 1 second before killing daemons. This is to wait for the create/delete operations to start
        sleep(1)

        # Kill daemons
        node_and_kill_proc = {}
        log.info(f"Killing daemons of {daemons_to_kill}")
        for node_name, pids in nodes_and_pids.items():
            # Command to kill the daemon
            kill_cmd = f"oc debug node/{node_name} -- chroot /host kill -9 {pids}"
            # Create node-kill process map for verifying the result
            node_and_kill_proc[node_name] = executor.submit(run_cmd, kill_cmd)

        # Verify daemon kill process
        for node_name, daemon_kill_proc in node_and_kill_proc.items():
            # Get the type of daemons killed on the particular node
            resources = [
                disruption.resource
                for disruption in disruption_ops
                if disruption.daemon_pid in nodes_and_pids[node_name]
            ]
            # 'daemon_kill_proc' result will be an empty string if command is success
            cmd_out = daemon_kill_proc.result()
            assert isinstance(cmd_out, str) and (not cmd_out), (
                f"Failed to kill {resources } daemons in the node {node_name}. "
                f"Daemon kill command output - {cmd_out}"
            )

        # Wait for new daemon to come up
        [disruption.check_new_pid() for disruption in disruption_ops]
        log.info("Verified daemons kill")

        pods_deleted = pod_bulk_delete.result()
        assert pods_deleted, "Deletion of pods failed."

        # Verify pods are deleted
        for pod_obj in pods_to_delete:
            pod_obj.ocp.wait_for_delete(pod_obj.name, 300)
        log.info("Verified: Pods are deleted.")

        # Verify that the mount point is removed from nodes after deleting pod
        node_pv_mounted = verify_pv_mounted_on_node(node_pv_dict)
        for node, pvs in node_pv_mounted.items():
            assert not pvs, (
                f"PVs {pvs} is still present on node {node} after "
                f"deleting the pods."
            )
        log.info(
            "Verified: mount points are removed from nodes after deleting " "the pods"
        )

        pvcs_deleted = pvc_bulk_delete.result()
        assert pvcs_deleted, "Deletion of PVCs failed."

        # Verify PVCs are deleted
        for pvc_obj in pvcs_to_delete:
            pvc_obj.ocp.wait_for_delete(pvc_obj.name)
        log.info("Verified: PVCs are deleted.")

        # Getting result of PVC creation as list of PVC objects
        log.info("Getting the result of CephFS PVC creation process")
        pvc_objs_cephfs_new = pvc_create_cephfs.result()

        log.info("Getting the result of RBD PVC creation process")
        pvc_objs_rbd_new = pvc_create_rbd.result()

        # Set interface argument for reference
        for pvc_obj in pvc_objs_cephfs_new:
            pvc_obj.interface = constants.CEPHFILESYSTEM

        # Set interface argument for reference
        for pvc_obj in pvc_objs_rbd_new:
            pvc_obj.interface = constants.CEPHBLOCKPOOL

        # Confirm PVCs are Bound
        log.info("Verifying the new CephFS and RBD PVCs are Bound")
        for pvc_obj in pvc_objs_cephfs_new + pvc_objs_rbd_new:
            helpers.wait_for_resource_state(
                resource=pvc_obj, state=constants.STATUS_BOUND, timeout=180
            )
            pvc_obj.reload()
        log.info("Verified: New CephFS and RBD PVCs are Bound.")

        # Getting result of pods creation as list of Pod objects
        log.info("Getting the result of pods creation process")
        pod_objs_rbd_new = pod_create_rbd.result()
        pod_objs_cephfs_new = pod_create_cephfs.result()

        # Verify new pods are Running
        log.info("Verifying the new pods are Running")
        for pod_obj in pod_objs_rbd_new + pod_objs_cephfs_new:
            helpers.wait_for_resource_state(
                resource=pod_obj, state=constants.STATUS_RUNNING, timeout=90
            )
            pod_obj.reload()
        log.info("Verified: All new pods are Running.")

        # Verify PVs are deleted
        for pv_obj in pv_objs:
            pv_obj.ocp.wait_for_delete(resource_name=pv_obj.name, timeout=300)
        log.info("Verified: PVs are deleted.")

        # Verify PV using ceph toolbox. Image/Subvolume should be deleted.
        pool_name = default_ceph_block_pool()
        for pvc_obj, uuid in pvc_uuid_map.items():
            if pvc_obj.interface == constants.CEPHBLOCKPOOL:
                ret = verify_volume_deleted_in_backend(
                    interface=constants.CEPHBLOCKPOOL,
                    image_uuid=uuid,
                    pool_name=pool_name,
                )
            if pvc_obj.interface == constants.CEPHFILESYSTEM:
                ret = verify_volume_deleted_in_backend(
                    interface=constants.CEPHFILESYSTEM, image_uuid=uuid
                )
            assert (
                ret
            ), f"Volume associated with PVC {pvc_obj.name} still exists in the backend"

        log.info("Fetching IO results from the pods.")
        for pod_obj in io_pods:
            fio_result = pod_obj.get_fio_results()
            err_count = fio_result.get("jobs")[0].get("error")
            assert (
                err_count == 0
            ), f"FIO error on pod {pod_obj.name}. FIO result: {fio_result}"
        log.info("Verified IO result on pods.")

        # Verify that the new PVCs are usable by creating new pods
        log.info("Verify that the new PVCs are usable by creating new pods")
        pod_objs_rbd_re = helpers.create_pods(
            pvc_objs_rbd_new, pod_factory, constants.CEPHBLOCKPOOL, 2
        )
        pod_objs_cephfs_re = helpers.create_pods(
            pvc_objs_cephfs_new, pod_factory, constants.CEPHFILESYSTEM, 2
        )

        # Verify pods are Running
        log.info("Verifying the pods are Running")
        for pod_obj in pod_objs_rbd_re + pod_objs_cephfs_re:
            helpers.wait_for_resource_state(
                resource=pod_obj, state=constants.STATUS_RUNNING, timeout=90
            )
            pod_obj.reload()
        log.info(
            "Successfully created and verified the status of the pods using the new CephFS and RBD PVCs."
        )

        new_pods = (
            pod_objs_rbd_new
            + pod_objs_cephfs_new
            + pod_objs_rbd_re
            + pod_objs_cephfs_re
        )

        # Do setup on the new pods for running IO
        log.info("Setting up the new pods for running IO.")
        for pod_obj in new_pods:
            if pod_obj.pvc.get_pvc_vol_mode == "Block":
                storage_type = "block"
            else:
                storage_type = "fs"
            executor.submit(pod_obj.workload_setup, storage_type=storage_type)

        # Wait for setup on the new pods to complete
        for pod_obj in new_pods:
            log.info(f"Waiting for IO setup to complete on pod {pod_obj.name}")
            for sample in TimeoutSampler(360, 2, getattr, pod_obj, "wl_setup_done"):
                if sample:
                    log.info(
                        f"Setup for running IO is completed on pod " f"{pod_obj.name}."
                    )
                    break
        log.info("Setup for running IO is completed on the new pods.")

        # Start IO on the new pods
        log.info("Start IO on the new pods")
        self.run_io_on_pods(new_pods)
        log.info("Started IO on the new pods")

        log.info("Fetching IO results from the new pods.")
        for pod_obj in new_pods:
            get_fio_rw_iops(pod_obj)
        log.info("Verified IO result on the new pods.")

        # Verify number of pods of each daemon type
        final_num_resource_name = [
            len(pod_functions[resource_name]()) for resource_name in daemons_to_kill
        ]
        assert final_num_resource_name == num_of_resource_pods, (
            f"Total number of pods of each type is not matching with "
            f"initial value. Total number of pods of each type before daemon kill: "
            f"{num_of_resource_pods}. Total number of pods of each type present now: "
            f"{final_num_resource_name}"
        )

        # Check ceph status
        ceph_health_check(namespace=config.ENV_DATA["cluster_namespace"])
        log.info("Ceph cluster health is OK")
    def setup(self, interface, multi_pvc_factory, pod_factory):
        """
        Create PVCs and pods
        """
        access_modes = [constants.ACCESS_MODE_RWO]
        if interface == constants.CEPHFILESYSTEM:
            access_modes.append(constants.ACCESS_MODE_RWX)

        # Modify access_modes list to create rbd `block` type volume with
        # RWX access mode. RWX is not supported in filesystem type rbd
        if interface == constants.CEPHBLOCKPOOL:
            access_modes.extend(
                [
                    f"{constants.ACCESS_MODE_RWO}-Block",
                    f"{constants.ACCESS_MODE_RWX}-Block",
                ]
            )

        pvc_objs = multi_pvc_factory(
            interface=interface,
            project=None,
            storageclass=None,
            size=self.pvc_size,
            access_modes=access_modes,
            status=constants.STATUS_BOUND,
            num_of_pvc=self.num_of_pvcs,
            wait_each=False,
        )

        # Set volume mode on PVC objects
        for pvc_obj in pvc_objs:
            pvc_info = pvc_obj.get()
            setattr(pvc_obj, "volume_mode", pvc_info["spec"]["volumeMode"])

        rwo_pvcs = [
            pvc_obj
            for pvc_obj in pvc_objs
            if (pvc_obj.access_mode == constants.ACCESS_MODE_RWO)
        ]
        rwx_pvcs = [
            pvc_obj
            for pvc_obj in pvc_objs
            if (pvc_obj.access_mode == constants.ACCESS_MODE_RWX)
        ]

        num_of_rwo_pvc = len(rwo_pvcs)
        num_of_rwx_pvc = len(rwx_pvcs)

        block_rwo_pvcs = []
        for pvc_obj in rwo_pvcs[:]:
            if pvc_obj.volume_mode == "Block":
                block_rwo_pvcs.append(pvc_obj)
                rwo_pvcs.remove(pvc_obj)

        log.info(
            f"Created {num_of_rwo_pvc} RWO PVCs in which "
            f"{len(block_rwo_pvcs)} are rbd block type."
        )
        log.info(f"Created {num_of_rwx_pvc} RWX PVCs.")

        # Select 6 PVCs for IO pods
        if block_rwo_pvcs:
            pvc_objs_for_io_pods = rwo_pvcs[0:2] + rwx_pvcs[0:2] + block_rwo_pvcs[0:2]
            pvc_objs_new_pods = rwo_pvcs[2:] + rwx_pvcs[2:] + block_rwo_pvcs[2:]
        else:
            pvc_objs_for_io_pods = rwo_pvcs[0:3] + rwx_pvcs[0:3]
            pvc_objs_new_pods = rwo_pvcs[3:] + rwx_pvcs[3:]

        # Create one pod using each RWO PVC and two pods using each RWX PVC
        # for running IO
        io_pods = helpers.create_pods(pvc_objs_for_io_pods, pod_factory, interface, 2)

        # Wait for pods to be in Running state
        for pod_obj in io_pods:
            helpers.wait_for_resource_state(
                resource=pod_obj, state=constants.STATUS_RUNNING
            )
            pod_obj.reload()
        log.info(f"Created {len(io_pods)} pods for running IO.")

        return pvc_objs, io_pods, pvc_objs_new_pods, access_modes
    def test_daemon_kill_during_pvc_pod_creation_and_io(
        self, interface, resource_name, setup, multi_pvc_factory, pod_factory
    ):
        """
        Kill 'resource_name' daemon while PVCs creation, pods
        creation and IO operation are progressing.
        """
        num_of_new_pvcs = 5
        pvc_objs, io_pods, pvc_objs_new_pods, access_modes = setup
        proj_obj = pvc_objs[0].project
        storageclass = pvc_objs[0].storageclass

        pod_functions = {
            "mds": partial(get_mds_pods),
            "mon": partial(get_mon_pods),
            "mgr": partial(get_mgr_pods),
            "osd": partial(get_osd_pods),
            "rbdplugin": partial(get_plugin_pods, interface=interface),
            "cephfsplugin": partial(get_plugin_pods, interface=interface),
            "cephfsplugin_provisioner": partial(get_cephfsplugin_provisioner_pods),
            "rbdplugin_provisioner": partial(get_rbdfsplugin_provisioner_pods),
            "operator": partial(get_operator_pods),
        }

        executor = ThreadPoolExecutor(max_workers=len(io_pods))

        disruption = disruption_helpers.Disruptions()
        disruption.set_resource(resource=resource_name)

        # Get number of pods of type 'resource_name'
        resource_pods_num = len(pod_functions[resource_name]())

        # Do setup for running IO on pods
        log.info("Setting up pods for running IO")
        for pod_obj in io_pods:
            if pod_obj.pvc.volume_mode == "Block":
                storage_type = "block"
            else:
                storage_type = "fs"
            executor.submit(pod_obj.workload_setup, storage_type=storage_type)

        # Wait for setup on pods to complete
        for pod_obj in io_pods:
            log.info(f"Waiting for IO setup to complete on pod {pod_obj.name}")
            for sample in TimeoutSampler(180, 2, getattr, pod_obj, "wl_setup_done"):
                if sample:
                    log.info(
                        f"Setup for running IO is completed on pod " f"{pod_obj.name}."
                    )
                    break
        log.info("Setup for running IO is completed on pods")

        # Set daemon to be killed
        disruption.select_daemon()

        # Start creating new pods
        log.info("Start creating new pods.")
        bulk_pod_create = executor.submit(
            helpers.create_pods, pvc_objs_new_pods, pod_factory, interface, 2
        )

        # Start creation of new PVCs
        log.info("Start creating new PVCs.")
        bulk_pvc_create = executor.submit(
            multi_pvc_factory,
            interface=interface,
            project=proj_obj,
            storageclass=storageclass,
            size=self.pvc_size,
            access_modes=access_modes,
            access_modes_selection="distribute_random",
            status="",
            num_of_pvc=num_of_new_pvcs,
            wait_each=False,
        )

        # Start IO on each pod
        log.info("Start IO on pods")
        for pod_obj in io_pods:
            if pod_obj.pvc.volume_mode == "Block":
                storage_type = "block"
            else:
                storage_type = "fs"
            pod_obj.run_io(
                storage_type=storage_type,
                size="1G",
                runtime=10,
                fio_filename=f"{pod_obj.name}_io_file1",
            )
        log.info("IO started on all pods.")

        # Kill daemon
        disruption.kill_daemon()

        # Getting result of PVC creation as list of PVC objects
        pvc_objs_new = bulk_pvc_create.result()

        # Confirm PVCs are Bound
        for pvc_obj in pvc_objs_new:
            helpers.wait_for_resource_state(
                resource=pvc_obj, state=constants.STATUS_BOUND, timeout=180
            )
            pvc_obj.reload()
        log.info("Verified: New PVCs are Bound.")

        # Getting result of pods creation as list of Pod objects
        pod_objs_new = bulk_pod_create.result()

        # Verify new pods are Running
        for pod_obj in pod_objs_new:
            helpers.wait_for_resource_state(
                resource=pod_obj, state=constants.STATUS_RUNNING
            )
            pod_obj.reload()
        log.info("Verified: All new pods are Running.")

        # Verify IO
        log.info("Fetching IO results from IO pods.")
        for pod_obj in io_pods:
            fio_result = pod_obj.get_fio_results()
            err_count = fio_result.get("jobs")[0].get("error")
            assert (
                err_count == 0
            ), f"FIO error on pod {pod_obj.name}. FIO result: {fio_result}"
            log.info(f"IOPs after FIO on pod {pod_obj.name}:")
            log.info(f"Read: {fio_result.get('jobs')[0].get('read').get('iops')}")
            log.info(f"Write: {fio_result.get('jobs')[0].get('write').get('iops')}")
        log.info("Verified IO result on IO pods.")

        all_pod_objs = io_pods + pod_objs_new

        # Fetch volume details from pods for the purpose of verification
        node_pv_dict = {}
        for pod in all_pod_objs:
            pod_info = pod.get()
            node = pod_info["spec"]["nodeName"]
            pvc = pod_info["spec"]["volumes"][0]["persistentVolumeClaim"]["claimName"]
            for pvc_obj in pvc_objs:
                if pvc_obj.name == pvc:
                    pvc_obj.reload()
                    pv = pvc_obj.backed_pv
                    break
            if node in node_pv_dict:
                node_pv_dict[node].append(pv)
            else:
                node_pv_dict[node] = [pv]

        # Delete pods
        for pod_obj in all_pod_objs:
            pod_obj.delete(wait=False)

        # Verify pods are deleted
        for pod_obj in all_pod_objs:
            pod_obj.ocp.wait_for_delete(resource_name=pod_obj.name)

        # Verify number of 'resource_name' type pods
        final_resource_pods_num = len(pod_functions[resource_name]())
        assert final_resource_pods_num == resource_pods_num, (
            f"Total number of {resource_name} pods is not matching with "
            f"initial value. Total number of pods before daemon kill: "
            f"{resource_pods_num}. Total number of pods present now: "
            f"{final_resource_pods_num}"
        )

        # Verify volumes are unmapped from nodes after deleting the pods
        node_pv_mounted = helpers.verify_pv_mounted_on_node(node_pv_dict)
        for node, pvs in node_pv_mounted.items():
            assert not pvs, (
                f"PVs {pvs} is still present on node {node} after "
                f"deleting the pods."
            )
        log.info(
            "Verified: mount points are removed from nodes after deleting " "the pods"
        )

        # Set volume mode on PVC objects
        for pvc_obj in pvc_objs_new:
            pvc_info = pvc_obj.get()
            setattr(pvc_obj, "volume_mode", pvc_info["spec"]["volumeMode"])

        # Verify that PVCs are reusable by creating new pods
        all_pvc_objs = pvc_objs + pvc_objs_new
        pod_objs_re = helpers.create_pods(all_pvc_objs, pod_factory, interface, 2)

        # Verify pods are Running
        for pod_obj in pod_objs_re:
            helpers.wait_for_resource_state(
                resource=pod_obj, state=constants.STATUS_RUNNING
            )
            pod_obj.reload()
        log.info("Successfully created new pods using all PVCs.")

        # Run IO on each of the newly created pods
        for pod_obj in pod_objs_re:
            if pod_obj.pvc.volume_mode == "Block":
                storage_type = "block"
            else:
                storage_type = "fs"
            pod_obj.run_io(
                storage_type=storage_type,
                size="1G",
                runtime=10,
                fio_filename=f"{pod_obj.name}_io_file2",
            )

        log.info("Fetching IO results from newly created pods")
        for pod_obj in pod_objs_re:
            fio_result = pod_obj.get_fio_results()
            err_count = fio_result.get("jobs")[0].get("error")
            assert (
                err_count == 0
            ), f"FIO error on pod {pod_obj.name}. FIO result: {fio_result}"
            log.info(f"IOPs after FIO on pod {pod_obj.name}:")
            log.info(f"Read: {fio_result.get('jobs')[0].get('read').get('iops')}")
            log.info(f"Write: {fio_result.get('jobs')[0].get('write').get('iops')}")
        log.info("Verified IO result on newly created pods.")
    def setup(
        self,
        kv_version,
        kms_provider,
        use_vault_namespace,
        pv_encryption_kms_setup_factory,
        project_factory,
        multi_pvc_factory,
        pod_factory,
        storageclass_factory,
    ):
        """
        Setup csi-kms-connection-details configmap and create resources for the test

        """

        log.info("Setting up csi-kms-connection-details configmap")
        self.kms = pv_encryption_kms_setup_factory(kv_version,
                                                   use_vault_namespace)
        log.info("csi-kms-connection-details setup successful")

        # Create a project
        self.proj_obj = project_factory()

        # Create an encryption enabled storageclass for RBD
        self.sc_obj = storageclass_factory(
            interface=constants.CEPHBLOCKPOOL,
            encrypted=True,
            encryption_kms_id=self.kms.kmsid,
        )

        if kms_provider == constants.VAULT_KMS_PROVIDER:
            # Create ceph-csi-kms-token in the tenant namespace
            self.kms.vault_path_token = self.kms.generate_vault_token()
            self.kms.create_vault_csi_kms_token(
                namespace=self.proj_obj.namespace)

        # Create PVC and Pods
        self.pvc_size = 1
        self.pvc_objs = multi_pvc_factory(
            interface=constants.CEPHBLOCKPOOL,
            project=self.proj_obj,
            storageclass=self.sc_obj,
            size=self.pvc_size,
            access_modes=[
                f"{constants.ACCESS_MODE_RWX}-Block",
                f"{constants.ACCESS_MODE_RWO}-Block",
            ],
            status=constants.STATUS_BOUND,
            num_of_pvc=2,
            wait_each=False,
        )

        self.pod_objs = helpers.create_pods(
            self.pvc_objs,
            pod_factory,
            constants.CEPHBLOCKPOOL,
            pods_for_rwx=1,
            status=constants.STATUS_RUNNING,
        )

        # Verify if the key is created in Vault
        self.vol_handles = []
        for pvc_obj in self.pvc_objs:
            pv_obj = pvc_obj.backed_pv_obj
            vol_handle = pv_obj.get().get("spec").get("csi").get(
                "volumeHandle")
            self.vol_handles.append(vol_handle)

            if kms_provider == constants.VAULT_KMS_PROVIDER:
                if kms.is_key_present_in_path(
                        key=vol_handle, path=self.kms.vault_backend_path):
                    log.info(f"Vault: Found key for {pvc_obj.name}")
                else:
                    raise ResourceNotFoundError(
                        f"Vault: Key not found for {pvc_obj.name}")
    def test_pvc_to_pvc_clone(self, kv_version, kms_provider, pod_factory):
        """
        Test to create a clone from an existing encrypted RBD PVC.
        Verify that the cloned PVC is encrypted and all the data is preserved.

        """

        log.info("Checking for encrypted device and running IO on all pods")
        for vol_handle, pod_obj in zip(self.vol_handles, self.pod_objs):
            if pod_obj.exec_sh_cmd_on_pod(
                    command=f"lsblk | grep {vol_handle} | grep crypt"):
                log.info(f"Encrypted device found in {pod_obj.name}")
            else:
                raise ResourceNotFoundError(
                    f"Encrypted device not found in {pod_obj.name}")
            log.info(f"File created during IO {pod_obj.name}")
            pod_obj.run_io(
                storage_type="block",
                size="500M",
                io_direction="write",
                runtime=60,
                end_fsync=1,
                direct=1,
            )
        log.info("IO started on all pods")

        # Wait for IO completion
        for pod_obj in self.pod_objs:
            pod_obj.get_fio_results()
        log.info("IO completed on all pods")

        cloned_pvc_objs, cloned_vol_handles = ([] for i in range(2))

        # Calculate the md5sum value and create clones of exisiting PVCs
        log.info("Calculate the md5sum after IO and create clone of all PVCs")
        for pod_obj in self.pod_objs:
            pod_obj.md5sum_after_io = pod.cal_md5sum(
                pod_obj=pod_obj,
                file_name=pod_obj.get_storage_path(storage_type="block"),
                block=True,
            )

            cloned_pvc_obj = pvc.create_pvc_clone(
                self.sc_obj.name,
                pod_obj.pvc.name,
                constants.CSI_RBD_PVC_CLONE_YAML,
                self.proj_obj.namespace,
                volume_mode=constants.VOLUME_MODE_BLOCK,
                access_mode=pod_obj.pvc.access_mode,
            )
            helpers.wait_for_resource_state(cloned_pvc_obj,
                                            constants.STATUS_BOUND)
            cloned_pvc_obj.reload()
            cloned_pvc_obj.md5sum = pod_obj.md5sum_after_io
            cloned_pvc_objs.append(cloned_pvc_obj)
        log.info("Clone of all PVCs created")

        # Create and attach pod to the pvc
        cloned_pod_objs = helpers.create_pods(
            cloned_pvc_objs,
            pod_factory,
            constants.CEPHBLOCKPOOL,
            pods_for_rwx=1,
            status="",
        )

        # Verify the new pods are running
        log.info("Verify the new pods are running")
        for pod_obj in cloned_pod_objs:
            helpers.wait_for_resource_state(pod_obj, constants.STATUS_RUNNING)
            pod_obj.reload()
        log.info("Verified: New pods are running")

        # Verify encryption keys are created for cloned PVCs in Vault
        for pvc_obj in cloned_pvc_objs:
            pv_obj = pvc_obj.backed_pv_obj
            vol_handle = pv_obj.get().get("spec").get("csi").get(
                "volumeHandle")
            cloned_vol_handles.append(vol_handle)

            if kms_provider == constants.VAULT_KMS_PROVIDER:
                if kms.is_key_present_in_path(
                        key=vol_handle, path=self.kms.vault_backend_path):
                    log.info(
                        f"Vault: Found key for restore PVC {pvc_obj.name}")
                else:
                    raise ResourceNotFoundError(
                        f"Vault: Key not found for restored PVC {pvc_obj.name}"
                    )
        # Verify encrypted device is present and md5sum on all pods
        for vol_handle, pod_obj in zip(cloned_vol_handles, cloned_pod_objs):
            if pod_obj.exec_sh_cmd_on_pod(
                    command=f"lsblk | grep {vol_handle} | grep crypt"):
                log.info(f"Encrypted device found in {pod_obj.name}")
            else:
                raise ResourceNotFoundError(
                    f"Encrypted device not found in {pod_obj.name}")

            log.info(f"Verifying md5sum on pod {pod_obj.name}")
            pod.verify_data_integrity(
                pod_obj=pod_obj,
                file_name=pod_obj.get_storage_path(storage_type="block"),
                original_md5sum=pod_obj.pvc.md5sum,
                block=True,
            )
            log.info(f"Verified md5sum on pod {pod_obj.name}")

        # Run IO on new pods
        log.info("Starting IO on new pods")
        for pod_obj in cloned_pod_objs:
            pod_obj.run_io(storage_type="block", size="100M", runtime=10)

        # Wait for IO completion on new pods
        log.info("Waiting for IO completion on new pods")
        for pod_obj in cloned_pod_objs:
            pod_obj.get_fio_results()
        log.info("IO completed on new pods.")

        # Delete the restored pods, PVC and snapshots
        log.info("Deleting all pods")
        for pod_obj in cloned_pod_objs + self.pod_objs:
            pod_obj.delete()
            pod_obj.ocp.wait_for_delete(resource_name=pod_obj.name)

        log.info("Deleting all PVCs")
        for pvc_obj in cloned_pvc_objs + self.pvc_objs:
            pv_obj = pvc_obj.backed_pv_obj
            pvc_obj.delete()
            pv_obj.ocp.wait_for_delete(resource_name=pv_obj.name)

        if kms_provider == constants.VAULT_KMS_PROVIDER:
            # Verify if the keys for parent and cloned PVCs are deleted from Vault
            if kv_version == "v1" or Version.coerce(
                    config.ENV_DATA["ocs_version"]) >= Version.coerce("4.9"):
                log.info(
                    "Verify whether the keys for cloned PVCs are deleted from vault"
                )
                for key in cloned_vol_handles + self.vol_handles:
                    if not kms.is_key_present_in_path(
                            key=key, path=self.kms.vault_backend_path):
                        log.info(f"Vault: Key deleted for {key}")
                    else:
                        raise KMSResourceCleaneupError(
                            f"Vault: Key deletion failed for {key}")
                log.info("All keys from vault were deleted")
    def test_rbd_thick_provisioning(self, multi_pvc_factory, pod_factory):
        """
        Test to verify RBD thick provisioning enabled storage class creation, PVC creation and consumption using
        the storage class.

        """
        # Dict represents the PVC size and size of the file used to consume the volume
        pvc_and_file_sizes = {1: "900M", 5: "4G"}

        access_modes_rbd = [
            constants.ACCESS_MODE_RWO,
            f"{constants.ACCESS_MODE_RWO}-Block",
            f"{constants.ACCESS_MODE_RWX}-Block",
        ]

        pvcs = []

        # Create PVCs
        for pvc_size in pvc_and_file_sizes.keys():
            pvc_objs = multi_pvc_factory(
                interface=constants.CEPHBLOCKPOOL,
                storageclass=self.sc_obj,
                size=pvc_size,
                access_modes=access_modes_rbd,
                status=constants.STATUS_BOUND,
                num_of_pvc=3,
                timeout=300,
            )
            for pvc_obj in pvc_objs:
                pvc_obj.io_file_size = pvc_and_file_sizes[pvc_size]
                pvc_obj.storage_type = (constants.WORKLOAD_STORAGE_TYPE_BLOCK
                                        if pvc_obj.get()["spec"]["volumeMode"]
                                        == constants.VOLUME_MODE_BLOCK else
                                        constants.WORKLOAD_STORAGE_TYPE_FS)
            pvcs.extend(pvc_objs)

        # Create pods
        pods = helpers.create_pods(
            pvc_objs=pvcs,
            pod_factory=pod_factory,
            interface=constants.CEPHBLOCKPOOL,
            status=constants.STATUS_RUNNING,
        )

        executor = ThreadPoolExecutor(max_workers=len(pods))

        # Do setup for running IO on pods
        log.info("Setting up pods to running IO")
        for pod_obj in pods:
            executor.submit(pod_obj.workload_setup,
                            storage_type=pod_obj.pvc.storage_type)

        # Wait for setup on pods to complete
        for pod_obj in pods:
            log.info(f"Waiting for IO setup to complete on pod {pod_obj.name}")
            for sample in TimeoutSampler(360, 2, getattr, pod_obj,
                                         "wl_setup_done"):
                if sample:
                    log.info(f"Setup for running IO is completed on pod "
                             f"{pod_obj.name}.")
                    break
        log.info("Setup for running IO is completed on pods")

        # Start IO
        for pod_obj in pods:
            log.info(f"Starting IO on pod {pod_obj.name}.")
            pod_obj.run_io(
                storage_type=pod_obj.pvc.storage_type,
                size=pod_obj.pvc.io_file_size,
                runtime=30,
            )
            log.info(f"IO started on pod {pod_obj.name}.")
        log.info("IO started on pods.")

        log.info("Verifying IO on pods.")
        for pod_obj in pods:
            pod_obj.get_fio_results()
            log.info(f"IO completed on pod {pod_obj.name}.")
        log.info("IO finished on all pods.")
    def setup(self, request, interface, multi_pvc_factory, pod_factory):
        """
        Create PVCs and pods
        """
        if config.ENV_DATA["platform"].lower(
        ) in constants.MANAGED_SERVICE_PLATFORMS:
            # Get the index of current consumer cluster
            self.consumer_cluster_index = config.cur_index

            def teardown():
                # Switching to provider cluster context will be done during the test case.
                # Switch back to consumer cluster context after the test case.
                config.switch_to_consumer(self.consumer_cluster_index)

            request.addfinalizer(teardown)

        access_modes = [constants.ACCESS_MODE_RWO]
        if interface == constants.CEPHFILESYSTEM:
            access_modes.append(constants.ACCESS_MODE_RWX)

        # Modify access_modes list to create rbd `block` type volume with
        # RWX access mode. RWX is not supported in filesystem type rbd
        if interface == constants.CEPHBLOCKPOOL:
            access_modes.extend([
                f"{constants.ACCESS_MODE_RWO}-Block",
                f"{constants.ACCESS_MODE_RWX}-Block",
            ])

        pvc_objs = multi_pvc_factory(
            interface=interface,
            project=None,
            storageclass=None,
            size=self.pvc_size,
            access_modes=access_modes,
            status=constants.STATUS_BOUND,
            num_of_pvc=self.num_of_pvcs,
            wait_each=False,
        )

        # Set volume mode on PVC objects
        for pvc_obj in pvc_objs:
            pvc_info = pvc_obj.get()
            setattr(pvc_obj, "volume_mode", pvc_info["spec"]["volumeMode"])

        rwo_pvcs = [
            pvc_obj for pvc_obj in pvc_objs
            if (pvc_obj.access_mode == constants.ACCESS_MODE_RWO)
        ]
        rwx_pvcs = [
            pvc_obj for pvc_obj in pvc_objs
            if (pvc_obj.access_mode == constants.ACCESS_MODE_RWX)
        ]

        num_of_rwo_pvc = len(rwo_pvcs)
        num_of_rwx_pvc = len(rwx_pvcs)

        block_rwo_pvcs = []
        for pvc_obj in rwo_pvcs[:]:
            if pvc_obj.volume_mode == "Block":
                block_rwo_pvcs.append(pvc_obj)
                rwo_pvcs.remove(pvc_obj)

        log.info(f"Created {num_of_rwo_pvc} RWO PVCs in which "
                 f"{len(block_rwo_pvcs)} are rbd block type.")
        log.info(f"Created {num_of_rwx_pvc} RWX PVCs.")

        # Select 6 PVCs for IO pods
        if block_rwo_pvcs:
            pvc_objs_for_io_pods = rwo_pvcs[0:2] + rwx_pvcs[
                0:2] + block_rwo_pvcs[0:2]
            pvc_objs_new_pods = rwo_pvcs[2:] + rwx_pvcs[2:] + block_rwo_pvcs[2:]
        else:
            pvc_objs_for_io_pods = rwo_pvcs[0:3] + rwx_pvcs[0:3]
            pvc_objs_new_pods = rwo_pvcs[3:] + rwx_pvcs[3:]

        # Create one pod using each RWO PVC and two pods using each RWX PVC
        # for running IO
        io_pods = helpers.create_pods(pvc_objs_for_io_pods, pod_factory,
                                      interface, 2)

        # Wait for pods to be in Running state
        for pod_obj in io_pods:
            helpers.wait_for_resource_state(resource=pod_obj,
                                            state=constants.STATUS_RUNNING)
            pod_obj.reload()
        log.info(f"Created {len(io_pods)} pods for running IO.")

        return pvc_objs, io_pods, pvc_objs_new_pods, access_modes
    def test_encrypted_rbd_block_pvc_snapshot(
        self,
        kms_provider,
        snapshot_factory,
        snapshot_restore_factory,
        pod_factory,
        kv_version,
    ):
        """
        Test to take snapshots of encrypted RBD Block VolumeMode PVCs

        """

        log.info(
            "Check for encrypted device, find initial md5sum value and run IO on all pods"
        )
        for vol_handle, pod_obj in zip(self.vol_handles, self.pod_objs):

            # Verify whether encrypted device is present inside the pod
            if pod_obj.exec_sh_cmd_on_pod(
                    command=f"lsblk | grep {vol_handle} | grep crypt"):
                log.info(f"Encrypted device found in {pod_obj.name}")
            else:
                raise ResourceNotFoundError(
                    f"Encrypted device not found in {pod_obj.name}")

            # Find initial md5sum
            pod_obj.md5sum_before_io = cal_md5sum(
                pod_obj=pod_obj,
                file_name=pod_obj.get_storage_path(storage_type="block"),
                block=True,
            )
            pod_obj.run_io(
                storage_type="block",
                size=f"{self.pvc_size - 1}G",
                io_direction="write",
                runtime=60,
            )
        log.info("IO started on all pods")

        # Wait for IO completion
        for pod_obj in self.pod_objs:
            pod_obj.get_fio_results()
        log.info("IO completed on all pods")

        snap_objs, snap_handles = ([] for i in range(2))

        # Verify md5sum has changed after IO. Create snapshot
        log.info(
            "Verify md5sum has changed after IO and create snapshot from all PVCs"
        )
        for pod_obj in self.pod_objs:
            md5sum_after_io = cal_md5sum(
                pod_obj=pod_obj,
                file_name=pod_obj.get_storage_path(storage_type="block"),
                block=True,
            )
            assert (pod_obj.md5sum_before_io != md5sum_after_io
                    ), f"md5sum has not changed after IO on pod {pod_obj.name}"
            log.info(f"Creating snapshot of PVC {pod_obj.pvc.name}")
            snap_obj = snapshot_factory(pod_obj.pvc, wait=False)
            snap_obj.md5sum = md5sum_after_io
            snap_objs.append(snap_obj)
        log.info("Snapshots created")

        # Verify snapshots are ready and verify if encryption key is created in vault
        log.info("Verify snapshots are ready")
        for snap_obj in snap_objs:
            snap_obj.ocp.wait_for_resource(
                condition="true",
                resource_name=snap_obj.name,
                column=constants.STATUS_READYTOUSE,
                timeout=180,
            )
            snapshot_content = get_snapshot_content_obj(snap_obj=snap_obj)
            snap_handle = snapshot_content.get().get("status").get(
                "snapshotHandle")
            if kms_provider == constants.VAULT_KMS_PROVIDER:
                if kms.is_key_present_in_path(
                        key=snap_handle, path=self.kms.vault_backend_path):
                    log.info(f"Vault: Found key for snapshot {snap_obj.name}")
                else:
                    raise ResourceNotFoundError(
                        f"Vault: Key not found for snapshot {snap_obj.name}")
            snap_handles.append(snap_handle)

        # Delete pods
        log.info("Deleting the pods")
        for pod_obj in self.pod_objs:
            pod_obj.delete()
            pod_obj.ocp.wait_for_delete(resource_name=pod_obj.name)
        log.info("Deleted all the pods")

        # Delete parent PVCs to verify snapshot is independent
        log.info("Deleting parent PVCs")
        for pvc_obj in self.pvc_objs:
            pv_obj = pvc_obj.backed_pv_obj
            pvc_obj.delete()
            pvc_obj.ocp.wait_for_delete(resource_name=pvc_obj.name)
            log.info(f"Deleted PVC {pvc_obj.name}. Verifying whether PV "
                     f"{pv_obj.name} is deleted.")
            pv_obj.ocp.wait_for_delete(resource_name=pv_obj.name)
        log.info(
            "All parent PVCs and PVs are deleted before restoring snapshot.")

        restore_pvc_objs, restore_vol_handles = ([] for i in range(2))

        # Create PVCs out of the snapshots
        log.info("Creating new PVCs from snapshots")
        for snap_obj in snap_objs:
            log.info(f"Creating a PVC from snapshot {snap_obj.name}")
            restore_pvc_obj = snapshot_restore_factory(
                snapshot_obj=snap_obj,
                storageclass=self.sc_obj.name,
                size=f"{self.pvc_size}Gi",
                volume_mode=snap_obj.parent_volume_mode,
                access_mode=snap_obj.parent_access_mode,
                status="",
            )
            log.info(f"Created PVC {restore_pvc_obj.name} from snapshot "
                     f"{snap_obj.name}")
            restore_pvc_obj.md5sum = snap_obj.md5sum
            restore_pvc_objs.append(restore_pvc_obj)
        log.info("Created new PVCs from all the snapshots")

        # Confirm that the restored PVCs are Bound
        log.info("Verify the restored PVCs are Bound")
        for pvc_obj in restore_pvc_objs:
            wait_for_resource_state(resource=pvc_obj,
                                    state=constants.STATUS_BOUND,
                                    timeout=180)
            pvc_obj.reload()
        log.info("Verified: Restored PVCs are Bound.")

        # Attach the restored PVCs to pods. Attach RWX PVC on two pods
        log.info("Attach the restored PVCs to pods")
        restore_pod_objs = create_pods(
            restore_pvc_objs,
            pod_factory,
            constants.CEPHBLOCKPOOL,
            pods_for_rwx=1,
            status="",
        )

        # Verify the new pods are running
        log.info("Verify the new pods are running")
        for pod_obj in restore_pod_objs:
            timeout = (300 if config.ENV_DATA["platform"]
                       == constants.IBMCLOUD_PLATFORM else 60)
            wait_for_resource_state(pod_obj, constants.STATUS_RUNNING, timeout)
        log.info("Verified: New pods are running")

        # Verify encryption keys are created for restored PVCs in Vault
        for pvc_obj in restore_pvc_objs:
            pv_obj = pvc_obj.backed_pv_obj
            vol_handle = pv_obj.get().get("spec").get("csi").get(
                "volumeHandle")
            restore_vol_handles.append(vol_handle)
            if kms_provider == constants.VAULT_KMS_PROVIDER:
                if kms.is_key_present_in_path(
                        key=vol_handle, path=self.kms.vault_backend_path):
                    log.info(
                        f"Vault: Found key for restore PVC {pvc_obj.name}")
                else:
                    raise ResourceNotFoundError(
                        f"Vault: Key not found for restored PVC {pvc_obj.name}"
                    )

        # Verify encrypted device is present and md5sum on all pods
        for vol_handle, pod_obj in zip(restore_vol_handles, restore_pod_objs):
            if pod_obj.exec_sh_cmd_on_pod(
                    command=f"lsblk | grep {vol_handle} | grep crypt"):
                log.info(f"Encrypted device found in {pod_obj.name}")
            else:
                raise ResourceNotFoundError(
                    f"Encrypted device not found in {pod_obj.name}")

            log.info(f"Verifying md5sum on pod {pod_obj.name}")
            verify_data_integrity(
                pod_obj=pod_obj,
                file_name=pod_obj.get_storage_path(storage_type="block"),
                original_md5sum=pod_obj.pvc.md5sum,
                block=True,
            )
            log.info(f"Verified md5sum on pod {pod_obj.name}")

        # Run IO on new pods
        log.info("Starting IO on new pods")
        for pod_obj in restore_pod_objs:
            pod_obj.run_io(storage_type="block", size="500M", runtime=15)

        # Wait for IO completion on new pods
        log.info("Waiting for IO completion on new pods")
        for pod_obj in restore_pod_objs:
            pod_obj.get_fio_results()
        log.info("IO completed on new pods.")

        # Delete the restored pods, PVC and snapshots
        log.info("Deleting pods using restored PVCs")
        for pod_obj in restore_pod_objs:
            pod_obj.delete()
            pod_obj.ocp.wait_for_delete(resource_name=pod_obj.name)

        log.info("Deleting restored PVCs")
        for pvc_obj in restore_pvc_objs:
            pv_obj = pvc_obj.backed_pv_obj
            pvc_obj.delete()
            pv_obj.ocp.wait_for_delete(resource_name=pv_obj.name)

        log.info("Deleting the snapshots")
        for snap_obj in snap_objs:
            snapcontent_obj = get_snapshot_content_obj(snap_obj=snap_obj)
            snap_obj.delete()
            snapcontent_obj.ocp.wait_for_delete(
                resource_name=snapcontent_obj.name)

        if kms_provider == constants.VAULT_KMS_PROVIDER:
            # Verify if keys for PVCs and snapshots are deleted from  Vault
            if kv_version == "v1" or Version.coerce(
                    config.ENV_DATA["ocs_version"]) >= Version.coerce("4.9"):
                log.info(
                    "Verify whether the keys for PVCs and snapshots are deleted in vault"
                )
                for key in self.vol_handles + snap_handles + restore_vol_handles:
                    if not kms.is_key_present_in_path(
                            key=key, path=self.kms.vault_backend_path):
                        log.info(f"Vault: Key deleted for {key}")
                    else:
                        raise KMSResourceCleaneupError(
                            f"Vault: Key deletion failed for {key}")
                log.info("All keys from vault were deleted")