コード例 #1
0
    def test_pvc_delete_and_verify_size_is_returned_to_backend_pool(
        self, pause_and_resume_cluster_load, pvc_factory, pod_factory
    ):
        """
        Test case to verify after delete pvc size returned to backend pools
        """

        cbp_name = helpers.default_ceph_block_pool()

        # TODO: Get exact value of replica size
        replica_size = 3

        pvc_obj = pvc_factory(
            interface=constants.CEPHBLOCKPOOL, size=10, status=constants.STATUS_BOUND
        )
        pod_obj = pod_factory(
            interface=constants.CEPHBLOCKPOOL,
            pvc=pvc_obj,
            status=constants.STATUS_RUNNING,
        )
        pvc_obj.reload()

        used_before_io = helpers.fetch_used_size(cbp_name)
        logger.info(f"Used before IO {used_before_io}")

        # Write 6Gb
        pod.run_io_and_verify_mount_point(pod_obj, bs="10M", count="600")
        exp_size = used_before_io + (6 * replica_size)
        used_after_io = helpers.fetch_used_size(cbp_name, exp_size)
        logger.info(f"Used space after IO {used_after_io}")

        rbd_image_id = pvc_obj.image_uuid
        pod_obj.delete()
        pod_obj.ocp.wait_for_delete(resource_name=pod_obj.name)
        pvc_obj.delete()
        pvc_obj.ocp.wait_for_delete(resource_name=pvc_obj.name)

        verify_pv_not_exists(pvc_obj, cbp_name, rbd_image_id)
        used_after_deleting_pvc = helpers.fetch_used_size(cbp_name, used_before_io)
        logger.info(f"Used after deleting PVC {used_after_deleting_pvc}")
    def test_delete_provisioner_pod_while_thick_provisioning(
        self,
        pvc_factory,
        pod_factory,
    ):
        """
        Test to delete RBD provisioner leader pod while creating a PVC using thick provision enabled storage class
        """
        pvc_size = 20
        pool_name = default_ceph_block_pool()
        executor = ThreadPoolExecutor(max_workers=1)
        DISRUPTION_OPS.set_resource(resource="rbdplugin_provisioner",
                                    leader_type="provisioner")

        # Start creation of PVC
        pvc_create = executor.submit(
            pvc_factory,
            interface=constants.CEPHBLOCKPOOL,
            project=self.proj_obj,
            storageclass=default_thick_storage_class(),
            size=pvc_size,
            access_mode=constants.ACCESS_MODE_RWO,
            status="",
        )

        # Ensure that the PVC is being created before deleting the rbd provisioner pod
        ret = helpers.wait_for_resource_count_change(get_all_pvcs, 0,
                                                     self.proj_obj.namespace,
                                                     "increase")
        assert ret, "Wait timeout: PVC is not being created."
        logger.info("PVC creation has started.")
        DISRUPTION_OPS.delete_resource()
        logger.info("Deleted RBD provisioner leader pod.")

        pvc_obj = pvc_create.result()

        # Confirm that the PVC is Bound
        helpers.wait_for_resource_state(resource=pvc_obj,
                                        state=constants.STATUS_BOUND,
                                        timeout=600)
        pvc_obj.reload()
        logger.info(f"Verified: PVC {pvc_obj.name} reached Bound state.")
        image_name = pvc_obj.get_rbd_image_name
        pv_obj = pvc_obj.backed_pv_obj

        # Verify thick provision by checking the image used size
        assert check_rbd_image_used_size(
            pvc_objs=[pvc_obj],
            usage_to_compare=f"{pvc_size}GiB",
            rbd_pool=pool_name,
            expect_match=True,
        ), f"PVC {pvc_obj.name} is not thick provisioned.\n PV describe :\n {pv_obj.describe()}"
        logger.info("Verified: The PVC is thick provisioned")

        # Create pod and run IO
        pod_obj = pod_factory(
            interface=constants.CEPHBLOCKPOOL,
            pvc=pvc_obj,
            status=constants.STATUS_RUNNING,
        )
        pod_obj.run_io(
            storage_type="fs",
            size=f"{pvc_size-1}G",
            fio_filename=f"{pod_obj.name}_io",
            end_fsync=1,
        )

        # Get IO result
        get_fio_rw_iops(pod_obj)

        logger.info(f"Deleting pod {pod_obj.name}")
        pod_obj.delete()
        pod_obj.ocp.wait_for_delete(pod_obj.name,
                                    180), f"Pod {pod_obj.name} is not deleted"

        # Fetch image id for verification
        image_uid = pvc_obj.image_uuid

        logger.info(f"Deleting PVC {pvc_obj.name}")
        pvc_obj.delete()
        pvc_obj.ocp.wait_for_delete(
            pvc_obj.name), f"PVC {pvc_obj.name} is not deleted"
        logger.info(f"Verified: PVC {pvc_obj.name} is deleted.")
        pv_obj.ocp.wait_for_delete(
            pv_obj.name), f"PV {pv_obj.name} is not deleted"
        logger.info(f"Verified: PV {pv_obj.name} is deleted.")

        # Verify the rbd image is deleted
        logger.info(f"Wait for the RBD image {image_name} to get deleted")
        assert verify_volume_deleted_in_backend(
            interface=constants.CEPHBLOCKPOOL,
            image_uuid=image_uid,
            pool_name=pool_name,
            timeout=300,
        ), f"Wait timeout - RBD image {image_name} is not deleted"
        logger.info(f"Verified: RBD image {image_name} is deleted")
コード例 #3
0
    def test_delete_rbd_pvc_while_thick_provisioning(
        self,
        resource_to_delete,
        pvc_factory,
        pod_factory,
    ):
        """
        Test to delete RBD PVC while thick provisioning is progressing and verify that no stale image is present.
        Based on the value of "resource_to_delete", provisioner pod also will be deleted.
        """
        pvc_size = 15
        executor = ThreadPoolExecutor(max_workers=1)

        if resource_to_delete:
            DISRUPTION_OPS.set_resource(resource=resource_to_delete,
                                        leader_type="provisioner")

        ct_pod = get_ceph_tools_pod()

        # Collect the list of RBD images
        image_list_out_initial = ct_pod.exec_ceph_cmd(
            ceph_cmd=f"rbd ls -p {constants.DEFAULT_BLOCKPOOL}", format="")
        image_list_initial = image_list_out_initial.strip().split()
        log.info(
            f"List of RBD images before creating the PVC {image_list_initial}")

        # Start creation of PVC
        pvc_obj = pvc_factory(
            interface=constants.CEPHBLOCKPOOL,
            project=self.proj_obj,
            storageclass=default_thick_storage_class(),
            size=pvc_size,
            access_mode=constants.ACCESS_MODE_RWO,
            status="",
        )

        # Ensure that the PVC is being created
        ret = wait_for_resource_count_change(get_all_pvcs, 0,
                                             self.proj_obj.namespace,
                                             "increase")
        assert ret, "Wait timeout: PVC is not being created."
        log.info("PVC creation has started.")

        if resource_to_delete:
            log.info(f"Deleting {resource_to_delete} pod.")
            delete_provisioner = executor.submit(
                DISRUPTION_OPS.delete_resource)

        # Delete PVC
        log.info(f"Deleting PVC {pvc_obj.name}")
        pvc_obj.delete()
        pvc_obj.ocp.wait_for_delete(pvc_obj.name)
        log.info(f"Verified: PVC {pvc_obj.name} is deleted.")

        if resource_to_delete:
            delete_provisioner.result()

        # Collect the list of RBD images
        image_list_out_final = ct_pod.exec_ceph_cmd(
            ceph_cmd=f"rbd ls -p {default_ceph_block_pool()}", format="")
        image_list_final = image_list_out_final.strip().split()
        log.info(
            f"List of RBD images after deleting the PVC {image_list_final}")

        stale_images = [
            image for image in image_list_final
            if image not in image_list_initial
        ]

        # Check whether more than one new image is present
        if len(stale_images) > 1:
            raise UnexpectedBehaviour(
                f"Could not verify the test result. Found more than one new rbd image - {stale_images}."
            )

        if stale_images:
            stale_image = stale_images[0].strip()
            # Wait for the image to get deleted
            image_deleted = verify_volume_deleted_in_backend(
                constants.CEPHBLOCKPOOL,
                image_uuid=stale_image.split("csi-vol-")[1],
                pool_name=default_ceph_block_pool(),
                timeout=300,
            )
            if not image_deleted:
                du_out = ct_pod.exec_ceph_cmd(
                    ceph_cmd=
                    f"rbd du -p {default_ceph_block_pool()} {stale_image}",
                    format="",
                )
            assert image_deleted, (
                f"Wait timeout: RBD image {stale_image} is not deleted. Check the logs to ensure that"
                f" this is the stale image of the deleted PVC. rbd du output of the image : {du_out}"
            )
            log.info(
                f"Image {stale_image} deleted within the wait time period")
        else:
            log.info("No stale image found")
    def test_disruptive_during_pod_pvc_deletion_and_io(
        self, interface, resource_to_delete, setup_base
    ):
        """
        Delete ceph/rook pod while PVCs deletion, pods deletion and IO are
        progressing
        """
        pvc_objs, pod_objs, rwx_pod_objs = setup_base
        namespace = pvc_objs[0].project.namespace

        num_of_pods_to_delete = 3
        num_of_io_pods = 1

        # Select pods to be deleted
        pods_to_delete = pod_objs[:num_of_pods_to_delete]
        pods_to_delete.extend(
            [
                pod
                for pod in rwx_pod_objs
                for pod_obj in pods_to_delete
                if (pod_obj.pvc == pod.pvc)
            ]
        )

        # Select pods to run IO
        io_pods = pod_objs[
            num_of_pods_to_delete : num_of_pods_to_delete + num_of_io_pods
        ]
        io_pods.extend(
            [
                pod
                for pod in rwx_pod_objs
                for pod_obj in io_pods
                if (pod_obj.pvc == pod.pvc)
            ]
        )

        # Select pods which are having PVCs to delete
        pods_for_pvc = pod_objs[num_of_pods_to_delete + num_of_io_pods :]
        pvcs_to_delete = [pod_obj.pvc for pod_obj in pods_for_pvc]
        pods_for_pvc.extend(
            [
                pod
                for pod in rwx_pod_objs
                for pod_obj in pods_for_pvc
                if (pod_obj.pvc == pod.pvc)
            ]
        )

        log.info(
            f"{len(pods_to_delete)} pods selected for deletion in which "
            f"{len(pods_to_delete) - num_of_pods_to_delete} pairs of pod "
            f"share same RWX PVC"
        )
        log.info(
            f"{len(io_pods)} pods selected for running IO in which "
            f"{len(io_pods) - num_of_io_pods} pairs of pod share same "
            f"RWX PVC"
        )
        no_of_rwx_pvcs_delete = len(pods_for_pvc) - len(pvcs_to_delete)
        log.info(
            f"{len(pvcs_to_delete)} PVCs selected for deletion. "
            f"RWO PVCs: {len(pvcs_to_delete) - no_of_rwx_pvcs_delete}, "
            f"RWX PVCs: {no_of_rwx_pvcs_delete}"
        )

        pod_functions = {
            "mds": partial(get_mds_pods),
            "mon": partial(get_mon_pods),
            "mgr": partial(get_mgr_pods),
            "osd": partial(get_osd_pods),
            "rbdplugin": partial(get_plugin_pods, interface=interface),
            "cephfsplugin": partial(get_plugin_pods, interface=interface),
            "cephfsplugin_provisioner": partial(get_cephfsplugin_provisioner_pods),
            "rbdplugin_provisioner": partial(get_rbdfsplugin_provisioner_pods),
            "operator": partial(get_operator_pods),
        }

        disruption = disruption_helpers.Disruptions()
        disruption.set_resource(resource=resource_to_delete)
        executor = ThreadPoolExecutor(max_workers=len(pod_objs) + len(rwx_pod_objs))

        # Get number of pods of type 'resource_to_delete'
        num_of_resource_to_delete = len(pod_functions[resource_to_delete]())

        # Fetch the number of Pods and PVCs
        initial_num_of_pods = len(get_all_pods(namespace=namespace))
        initial_num_of_pvc = len(get_all_pvcs(namespace=namespace)["items"])

        # Fetch PV names to verify after deletion
        pv_objs = []
        for pvc_obj in pvcs_to_delete:
            pvc_obj.reload()
            pv_objs.append(pvc_obj.backed_pv_obj)

        # Fetch volume details from pods for the purpose of verification
        node_pv_dict = {}
        for pod_obj in pods_to_delete:
            pod_info = pod_obj.get()
            node = pod_info["spec"]["nodeName"]
            pvc = pod_info["spec"]["volumes"][0]["persistentVolumeClaim"]["claimName"]
            for pvc_obj in pvc_objs:
                if pvc_obj.name == pvc:
                    pvc_obj.reload()
                    pv = pvc_obj.backed_pv
                    break
            if node in node_pv_dict:
                node_pv_dict[node].append(pv)
            else:
                node_pv_dict[node] = [pv]

        # Fetch image uuid associated with PVCs to be deleted
        pvc_uuid_map = {}
        for pvc_obj in pvcs_to_delete:
            pvc_uuid_map[pvc_obj.name] = pvc_obj.image_uuid
        log.info("Fetched image uuid associated with each PVC")

        # Do setup on pods for running IO
        log.info("Setting up pods for running IO.")
        for pod_obj in pod_objs + rwx_pod_objs:
            pvc_info = pod_obj.pvc.get()
            if pvc_info["spec"]["volumeMode"] == "Block":
                storage_type = "block"
            else:
                storage_type = "fs"
            executor.submit(pod_obj.workload_setup, storage_type=storage_type)

        # Wait for setup on pods to complete
        for pod_obj in pod_objs + rwx_pod_objs:
            log.info(f"Waiting for IO setup to complete on pod {pod_obj.name}")
            for sample in TimeoutSampler(180, 2, getattr, pod_obj, "wl_setup_done"):
                if sample:
                    log.info(
                        f"Setup for running IO is completed on pod " f"{pod_obj.name}."
                    )
                    break
        log.info("Setup for running IO is completed on all pods.")

        # Start IO on pods having PVCs to delete to load data
        log.info("Starting IO on pods having PVCs to delete.")
        self.run_io_on_pods(pods_for_pvc)
        log.info("IO started on pods having PVCs to delete.")

        log.info("Fetching IO results from the pods having PVCs to delete.")
        for pod_obj in pods_for_pvc:
            get_fio_rw_iops(pod_obj)
        log.info("Verified IO result on pods having PVCs to delete.")

        # Delete pods having PVCs to delete.
        delete_pods(pods_for_pvc)
        for pod_obj in pods_for_pvc:
            pod_obj.ocp.wait_for_delete(pod_obj.name)
        log.info("Verified: Deleted pods which are having PVCs to delete.")

        # Start IO on pods to be deleted
        log.info("Starting IO on pods to be deleted.")
        self.run_io_on_pods(pods_to_delete)
        log.info("IO started on pods to be deleted.")

        # Start deleting PVCs
        pvc_bulk_delete = executor.submit(delete_pvcs, pvcs_to_delete)
        log.info("Started deleting PVCs")

        # Start deleting pods
        pod_bulk_delete = executor.submit(delete_pods, pods_to_delete, wait=False)
        log.info("Started deleting pods")

        # Start IO on IO pods
        self.run_io_on_pods(io_pods)
        log.info("Started IO on IO pods")

        # Verify pvc deletion has started
        pvc_deleting = executor.submit(
            wait_for_resource_count_change,
            func_to_use=get_all_pvcs,
            previous_num=initial_num_of_pvc,
            namespace=namespace,
            change_type="decrease",
            min_difference=1,
            timeout=30,
            interval=0.01,
        )

        # Verify pod deletion has started
        pod_deleting = executor.submit(
            wait_for_resource_count_change,
            func_to_use=get_all_pods,
            previous_num=initial_num_of_pods,
            namespace=namespace,
            change_type="decrease",
            min_difference=1,
            timeout=30,
            interval=0.01,
        )

        assert pvc_deleting.result(), "Wait timeout: PVCs are not being deleted."
        log.info("PVCs deletion has started.")

        assert pod_deleting.result(), "Wait timeout: Pods are not being deleted."
        log.info("Pods deletion has started.")

        # Delete pod of type 'resource_to_delete'
        disruption.delete_resource()

        pod_bulk_delete.result()

        # Verify pods are deleted
        for pod_obj in pods_to_delete:
            pod_obj.ocp.wait_for_delete(pod_obj.name, 300)
        log.info("Verified: Pods are deleted.")

        # Verify that the mount point is removed from nodes after deleting pod
        node_pv_mounted = verify_pv_mounted_on_node(node_pv_dict)
        for node, pvs in node_pv_mounted.items():
            assert not pvs, (
                f"PVs {pvs} is still present on node {node} after "
                f"deleting the pods."
            )
        log.info(
            "Verified: mount points are removed from nodes after deleting " "the pods"
        )

        pvcs_deleted = pvc_bulk_delete.result()
        assert pvcs_deleted, "Deletion of PVCs failed."

        # Verify PVCs are deleted
        for pvc_obj in pvcs_to_delete:
            pvc_obj.ocp.wait_for_delete(pvc_obj.name)
        log.info("Verified: PVCs are deleted.")

        # Verify PVs are deleted
        for pv_obj in pv_objs:
            pv_obj.ocp.wait_for_delete(resource_name=pv_obj.name, timeout=300)
        log.info("Verified: PVs are deleted.")

        # Verify PV using ceph toolbox. Image/Subvolume should be deleted.
        pool_name = default_ceph_block_pool()
        for pvc_name, uuid in pvc_uuid_map.items():
            if interface == constants.CEPHBLOCKPOOL:
                ret = verify_volume_deleted_in_backend(
                    interface=interface, image_uuid=uuid, pool_name=pool_name
                )
            if interface == constants.CEPHFILESYSTEM:
                ret = verify_volume_deleted_in_backend(
                    interface=interface, image_uuid=uuid
                )
            assert ret, (
                f"Volume associated with PVC {pvc_name} still exists " f"in backend"
            )

        log.info("Fetching IO results from the pods.")
        for pod_obj in io_pods:
            fio_result = pod_obj.get_fio_results()
            err_count = fio_result.get("jobs")[0].get("error")
            assert (
                err_count == 0
            ), f"FIO error on pod {pod_obj.name}. FIO result: {fio_result}"
        log.info("Verified IO result on pods.")

        # Verify number of pods of type 'resource_to_delete'
        final_num_resource_to_delete = len(pod_functions[resource_to_delete]())
        assert final_num_resource_to_delete == num_of_resource_to_delete, (
            f"Total number of {resource_to_delete} pods is not matching with "
            f"initial value. Total number of pods before deleting a pod: "
            f"{num_of_resource_to_delete}. Total number of pods present now: "
            f"{final_num_resource_to_delete}"
        )

        # Check ceph status
        ceph_health_check(namespace=config.ENV_DATA["cluster_namespace"])
        log.info("Ceph cluster health is OK")
コード例 #5
0
    def test_daemon_kill_during_pvc_pod_creation_deletion_and_io(
        self, setup_base, multi_pvc_factory, pod_factory
    ):
        """
        Kill ceph daemons while PVCs creation, PVCs deletion, pods creation, pods deletion
        and IO are progressing
        """
        daemons_to_kill = [
            "mgr",
            "mon",
            "osd",
            "mds",
        ]

        (
            pvc_objs,
            pod_objs,
            rwx_pod_objs,
            cephfs_pvc_for_pods,
            rbd_pvc_for_pods,
        ) = setup_base

        num_of_pods_to_delete = 3
        num_of_io_pods = 1
        num_pvc_create_during_disruption = len(
            self.access_modes_cephfs + self.access_modes_rbd
        )

        # Select pods to be deleted
        pods_to_delete = pod_objs[:num_of_pods_to_delete]
        pods_to_delete.extend(
            [
                pod
                for pod in rwx_pod_objs
                for pod_obj in pods_to_delete
                if (pod_obj.pvc == pod.pvc)
            ]
        )

        # Select pods to run IO
        io_pods = pod_objs[
            num_of_pods_to_delete : num_of_pods_to_delete + num_of_io_pods
        ]
        io_pods.extend(
            [
                pod
                for pod in rwx_pod_objs
                for pod_obj in io_pods
                if (pod_obj.pvc == pod.pvc)
            ]
        )

        # Select pods which are having PVCs to delete
        pods_for_pvc = pod_objs[num_of_pods_to_delete + num_of_io_pods :]
        pvcs_to_delete = [pod_obj.pvc for pod_obj in pods_for_pvc]
        pods_for_pvc.extend(
            [
                pod
                for pod in rwx_pod_objs
                for pod_obj in pods_for_pvc
                if (pod_obj.pvc == pod.pvc)
            ]
        )

        io_pods = [
            pod_obj
            for pod_obj in io_pods
            if pod_obj.pvc in select_unique_pvcs([pod_obj.pvc for pod_obj in io_pods])
        ]

        log.info(
            f"{len(pods_to_delete)} pods selected for deletion in which "
            f"{len(pods_to_delete) - num_of_pods_to_delete} pairs of pod "
            f"share same RWX PVC"
        )
        log.info(
            f"{len(io_pods)} pods selected for running IO in which one "
            f"pair of pod share same RWX PVC"
        )
        no_of_rwx_pvcs_delete = len(pods_for_pvc) - len(pvcs_to_delete)
        log.info(
            f"{len(pvcs_to_delete)} PVCs selected for deletion. "
            f"RWO PVCs: {len(pvcs_to_delete) - no_of_rwx_pvcs_delete}, "
            f"RWX PVCs: {no_of_rwx_pvcs_delete}"
        )

        pod_functions = {
            "mds": partial(get_mds_pods),
            "mon": partial(get_mon_pods),
            "mgr": partial(get_mgr_pods),
            "osd": partial(get_osd_pods),
        }

        # Disruption object for each daemon type
        disruption_ops = [disruption_helpers.Disruptions() for _ in daemons_to_kill]

        # Select the resource of each type
        for disruption, pod_type in zip(disruption_ops, daemons_to_kill):
            disruption.set_resource(resource=pod_type)
        executor = ThreadPoolExecutor(
            max_workers=len(pod_objs)
            + len(rwx_pod_objs)
            + len(rbd_pvc_for_pods)
            + len(cephfs_pvc_for_pods)
            + len(daemons_to_kill)
            + num_pvc_create_during_disruption
        )

        # Get number of pods of the type given in daemons_to_kill list
        num_of_resource_pods = [
            len(pod_functions[resource_name]()) for resource_name in daemons_to_kill
        ]

        # Fetch PV names to verify after deletion
        pv_objs = []
        for pvc_obj in pvcs_to_delete:
            pv_objs.append(pvc_obj.backed_pv_obj)

        # Fetch volume details from pods for the purpose of verification
        node_pv_dict = {}
        for pod_obj in pods_to_delete:
            pod_info = pod_obj.get()
            node = pod_info["spec"]["nodeName"]
            pvc = pod_info["spec"]["volumes"][0]["persistentVolumeClaim"]["claimName"]
            for pvc_obj in pvc_objs:
                if pvc_obj.name == pvc:
                    pv = pvc_obj.backed_pv
                    break
            if node in node_pv_dict:
                node_pv_dict[node].append(pv)
            else:
                node_pv_dict[node] = [pv]

        # Fetch image uuid associated with PVCs to be deleted
        pvc_uuid_map = {}
        for pvc_obj in pvcs_to_delete:
            pvc_uuid_map[pvc_obj] = pvc_obj.image_uuid
        log.info("Fetched image uuid associated with each PVC")

        # Do setup on pods for running IO
        log.info("Setting up pods for running IO.")
        for pod_obj in pod_objs + rwx_pod_objs:
            if pod_obj.pvc.get_pvc_vol_mode == "Block":
                storage_type = "block"
            else:
                storage_type = "fs"
            executor.submit(pod_obj.workload_setup, storage_type=storage_type)

        # Wait for setup on pods to complete
        for pod_obj in pod_objs + rwx_pod_objs:
            log.info(f"Waiting for IO setup to complete on pod {pod_obj.name}")
            for sample in TimeoutSampler(360, 2, getattr, pod_obj, "wl_setup_done"):
                if sample:
                    log.info(
                        f"Setup for running IO is completed on pod " f"{pod_obj.name}."
                    )
                    break
        log.info("Setup for running IO is completed on all pods.")

        # Start IO on pods having PVCs to delete to load data
        pods_for_pvc_io = [
            pod_obj
            for pod_obj in pods_for_pvc
            if pod_obj.pvc
            in select_unique_pvcs([pod_obj.pvc for pod_obj in pods_for_pvc])
        ]
        log.info("Starting IO on pods having PVCs to delete.")
        self.run_io_on_pods(pods_for_pvc_io)
        log.info("IO started on pods having PVCs to delete.")

        log.info("Fetching IO results from the pods having PVCs to delete.")
        for pod_obj in pods_for_pvc_io:
            get_fio_rw_iops(pod_obj)
        log.info("Verified IO result on pods having PVCs to delete.")

        # Delete pods having PVCs to delete.
        assert self.delete_pods(
            pods_for_pvc
        ), "Couldn't delete pods which are having PVCs to delete."
        for pod_obj in pods_for_pvc:
            pod_obj.ocp.wait_for_delete(pod_obj.name)
        log.info("Verified: Deleted pods which are having PVCs to delete.")

        # Select daemon of each type of resource and identify the daemons running on each node
        nodes_and_pids = {}
        for disruption in disruption_ops:
            disruption.select_daemon()
            node_name = disruption.resource_obj[0].pod_data.get("spec").get("nodeName")
            # Create node-daemons dict. Value as string for passing in the 'kill' command
            nodes_and_pids[
                node_name
            ] = f"{nodes_and_pids.get(node_name, '')} {disruption.daemon_pid}"

        # Start IO on pods to be deleted
        pods_to_delete_io = [
            pod_obj
            for pod_obj in pods_to_delete
            if pod_obj.pvc
            in select_unique_pvcs([pod_obj.pvc for pod_obj in pods_to_delete])
        ]
        log.info("Starting IO on selected pods to be deleted.")
        self.run_io_on_pods(pods_to_delete_io)
        log.info("IO started on selected pods to be deleted.")

        # Start creating new pods
        log.info("Start creating new pods.")
        pod_create_rbd = executor.submit(
            helpers.create_pods,
            rbd_pvc_for_pods,
            pod_factory,
            constants.CEPHBLOCKPOOL,
            2,
        )
        pod_create_cephfs = executor.submit(
            helpers.create_pods,
            cephfs_pvc_for_pods,
            pod_factory,
            constants.CEPHFILESYSTEM,
            2,
        )

        # Start creation of new CephFS PVCs.
        log.info("Start creating new CephFS PVCs.")
        pvc_create_cephfs = executor.submit(
            multi_pvc_factory,
            interface=constants.CEPHFILESYSTEM,
            project=self.project,
            storageclass=None,
            size=self.pvc_size,
            access_modes=self.access_modes_cephfs,
            access_modes_selection="distribute_random",
            status="",
            num_of_pvc=len(self.access_modes_cephfs),
            wait_each=False,
        )

        # Start creation of new RBD PVCs
        log.info("Start creating new RBD PVCs.")
        pvc_create_rbd = executor.submit(
            multi_pvc_factory,
            interface=constants.CEPHBLOCKPOOL,
            project=self.project,
            storageclass=None,
            size=self.pvc_size,
            access_modes=self.access_modes_rbd,
            access_modes_selection="distribute_random",
            status="",
            num_of_pvc=len(self.access_modes_rbd),
            wait_each=False,
        )

        # Start deleting PVCs
        pvc_bulk_delete = executor.submit(delete_pvcs, pvcs_to_delete)
        log.info("Started deleting PVCs")

        # Start deleting pods
        pod_bulk_delete = executor.submit(self.delete_pods, pods_to_delete)
        log.info("Started deleting pods")

        # Start IO on IO pods
        self.run_io_on_pods(io_pods)
        log.info("Started IO on IO pods")

        # Wait for 1 second before killing daemons. This is to wait for the create/delete operations to start
        sleep(1)

        # Kill daemons
        node_and_kill_proc = {}
        log.info(f"Killing daemons of {daemons_to_kill}")
        for node_name, pids in nodes_and_pids.items():
            # Command to kill the daemon
            kill_cmd = f"oc debug node/{node_name} -- chroot /host kill -9 {pids}"
            # Create node-kill process map for verifying the result
            node_and_kill_proc[node_name] = executor.submit(run_cmd, kill_cmd)

        # Verify daemon kill process
        for node_name, daemon_kill_proc in node_and_kill_proc.items():
            # Get the type of daemons killed on the particular node
            resources = [
                disruption.resource
                for disruption in disruption_ops
                if disruption.daemon_pid in nodes_and_pids[node_name]
            ]
            # 'daemon_kill_proc' result will be an empty string if command is success
            cmd_out = daemon_kill_proc.result()
            assert isinstance(cmd_out, str) and (not cmd_out), (
                f"Failed to kill {resources } daemons in the node {node_name}. "
                f"Daemon kill command output - {cmd_out}"
            )

        # Wait for new daemon to come up
        [disruption.check_new_pid() for disruption in disruption_ops]
        log.info("Verified daemons kill")

        pods_deleted = pod_bulk_delete.result()
        assert pods_deleted, "Deletion of pods failed."

        # Verify pods are deleted
        for pod_obj in pods_to_delete:
            pod_obj.ocp.wait_for_delete(pod_obj.name, 300)
        log.info("Verified: Pods are deleted.")

        # Verify that the mount point is removed from nodes after deleting pod
        node_pv_mounted = verify_pv_mounted_on_node(node_pv_dict)
        for node, pvs in node_pv_mounted.items():
            assert not pvs, (
                f"PVs {pvs} is still present on node {node} after "
                f"deleting the pods."
            )
        log.info(
            "Verified: mount points are removed from nodes after deleting " "the pods"
        )

        pvcs_deleted = pvc_bulk_delete.result()
        assert pvcs_deleted, "Deletion of PVCs failed."

        # Verify PVCs are deleted
        for pvc_obj in pvcs_to_delete:
            pvc_obj.ocp.wait_for_delete(pvc_obj.name)
        log.info("Verified: PVCs are deleted.")

        # Getting result of PVC creation as list of PVC objects
        log.info("Getting the result of CephFS PVC creation process")
        pvc_objs_cephfs_new = pvc_create_cephfs.result()

        log.info("Getting the result of RBD PVC creation process")
        pvc_objs_rbd_new = pvc_create_rbd.result()

        # Set interface argument for reference
        for pvc_obj in pvc_objs_cephfs_new:
            pvc_obj.interface = constants.CEPHFILESYSTEM

        # Set interface argument for reference
        for pvc_obj in pvc_objs_rbd_new:
            pvc_obj.interface = constants.CEPHBLOCKPOOL

        # Confirm PVCs are Bound
        log.info("Verifying the new CephFS and RBD PVCs are Bound")
        for pvc_obj in pvc_objs_cephfs_new + pvc_objs_rbd_new:
            helpers.wait_for_resource_state(
                resource=pvc_obj, state=constants.STATUS_BOUND, timeout=180
            )
            pvc_obj.reload()
        log.info("Verified: New CephFS and RBD PVCs are Bound.")

        # Getting result of pods creation as list of Pod objects
        log.info("Getting the result of pods creation process")
        pod_objs_rbd_new = pod_create_rbd.result()
        pod_objs_cephfs_new = pod_create_cephfs.result()

        # Verify new pods are Running
        log.info("Verifying the new pods are Running")
        for pod_obj in pod_objs_rbd_new + pod_objs_cephfs_new:
            helpers.wait_for_resource_state(
                resource=pod_obj, state=constants.STATUS_RUNNING, timeout=90
            )
            pod_obj.reload()
        log.info("Verified: All new pods are Running.")

        # Verify PVs are deleted
        for pv_obj in pv_objs:
            pv_obj.ocp.wait_for_delete(resource_name=pv_obj.name, timeout=300)
        log.info("Verified: PVs are deleted.")

        # Verify PV using ceph toolbox. Image/Subvolume should be deleted.
        pool_name = default_ceph_block_pool()
        for pvc_obj, uuid in pvc_uuid_map.items():
            if pvc_obj.interface == constants.CEPHBLOCKPOOL:
                ret = verify_volume_deleted_in_backend(
                    interface=constants.CEPHBLOCKPOOL,
                    image_uuid=uuid,
                    pool_name=pool_name,
                )
            if pvc_obj.interface == constants.CEPHFILESYSTEM:
                ret = verify_volume_deleted_in_backend(
                    interface=constants.CEPHFILESYSTEM, image_uuid=uuid
                )
            assert (
                ret
            ), f"Volume associated with PVC {pvc_obj.name} still exists in the backend"

        log.info("Fetching IO results from the pods.")
        for pod_obj in io_pods:
            fio_result = pod_obj.get_fio_results()
            err_count = fio_result.get("jobs")[0].get("error")
            assert (
                err_count == 0
            ), f"FIO error on pod {pod_obj.name}. FIO result: {fio_result}"
        log.info("Verified IO result on pods.")

        # Verify that the new PVCs are usable by creating new pods
        log.info("Verify that the new PVCs are usable by creating new pods")
        pod_objs_rbd_re = helpers.create_pods(
            pvc_objs_rbd_new, pod_factory, constants.CEPHBLOCKPOOL, 2
        )
        pod_objs_cephfs_re = helpers.create_pods(
            pvc_objs_cephfs_new, pod_factory, constants.CEPHFILESYSTEM, 2
        )

        # Verify pods are Running
        log.info("Verifying the pods are Running")
        for pod_obj in pod_objs_rbd_re + pod_objs_cephfs_re:
            helpers.wait_for_resource_state(
                resource=pod_obj, state=constants.STATUS_RUNNING, timeout=90
            )
            pod_obj.reload()
        log.info(
            "Successfully created and verified the status of the pods using the new CephFS and RBD PVCs."
        )

        new_pods = (
            pod_objs_rbd_new
            + pod_objs_cephfs_new
            + pod_objs_rbd_re
            + pod_objs_cephfs_re
        )

        # Do setup on the new pods for running IO
        log.info("Setting up the new pods for running IO.")
        for pod_obj in new_pods:
            if pod_obj.pvc.get_pvc_vol_mode == "Block":
                storage_type = "block"
            else:
                storage_type = "fs"
            executor.submit(pod_obj.workload_setup, storage_type=storage_type)

        # Wait for setup on the new pods to complete
        for pod_obj in new_pods:
            log.info(f"Waiting for IO setup to complete on pod {pod_obj.name}")
            for sample in TimeoutSampler(360, 2, getattr, pod_obj, "wl_setup_done"):
                if sample:
                    log.info(
                        f"Setup for running IO is completed on pod " f"{pod_obj.name}."
                    )
                    break
        log.info("Setup for running IO is completed on the new pods.")

        # Start IO on the new pods
        log.info("Start IO on the new pods")
        self.run_io_on_pods(new_pods)
        log.info("Started IO on the new pods")

        log.info("Fetching IO results from the new pods.")
        for pod_obj in new_pods:
            get_fio_rw_iops(pod_obj)
        log.info("Verified IO result on the new pods.")

        # Verify number of pods of each daemon type
        final_num_resource_name = [
            len(pod_functions[resource_name]()) for resource_name in daemons_to_kill
        ]
        assert final_num_resource_name == num_of_resource_pods, (
            f"Total number of pods of each type is not matching with "
            f"initial value. Total number of pods of each type before daemon kill: "
            f"{num_of_resource_pods}. Total number of pods of each type present now: "
            f"{final_num_resource_name}"
        )

        # Check ceph status
        ceph_health_check(namespace=config.ENV_DATA["cluster_namespace"])
        log.info("Ceph cluster health is OK")
コード例 #6
0
    def test_verify_rbd_thick_pvc_utilization(
        self,
        pvc_factory,
        pod_factory,
    ):
        """
        Test to verify the storage utilization of RBD thick provisioned PVC

        """
        pvc_size = 15
        replica_size = 3
        file1 = "fio_file1"
        file2 = "fio_file2"
        rbd_pool = default_ceph_block_pool()

        size_before_pvc = fetch_used_size(rbd_pool)
        log.info(
            f"Storage pool used size before creating the PVC is {size_before_pvc}"
        )

        # Create RBD thick PVC
        pvc_obj = pvc_factory(
            interface=constants.CEPHBLOCKPOOL,
            project=self.proj_obj,
            storageclass=default_thick_storage_class(),
            size=pvc_size,
            access_mode=constants.ACCESS_MODE_RWO,
            status=constants.STATUS_BOUND,
        )

        size_after_pvc = fetch_used_size(
            rbd_pool, size_before_pvc + (pvc_size * replica_size))
        log.info(
            f"Verified: Storage pool used size after creating the PVC is {size_after_pvc}"
        )

        pod_obj = pod_factory(
            interface=constants.CEPHBLOCKPOOL,
            pvc=pvc_obj,
            status=constants.STATUS_RUNNING,
        )

        # Create 5GB file
        pod_obj.run_io(
            storage_type="fs",
            size="5G",
            runtime=60,
            fio_filename=file1,
            end_fsync=1,
        )
        pod_obj.get_fio_results()

        # Verify the used size after IO
        fetch_used_size(rbd_pool, size_before_pvc + (pvc_size * replica_size))

        # Create another 5GB file
        pod_obj.run_io(
            storage_type="fs",
            size="5G",
            runtime=60,
            fio_filename=file2,
            end_fsync=1,
        )
        pod_obj.get_fio_results()

        # Verify the used size after IO
        fetch_used_size(rbd_pool, size_before_pvc + (pvc_size * replica_size))

        # Delete the files created by fio
        mount_point = pod_obj.get_storage_path()
        rm_cmd = f"rm {path.join(mount_point, file1)} {path.join(mount_point, file2)}"
        pod_obj.exec_cmd_on_pod(command=rm_cmd, out_yaml_format=False)

        # Verify the used size after deleting the files
        fetch_used_size(rbd_pool, size_before_pvc + (pvc_size * replica_size))

        # Delete the pod
        pod_obj.delete()
        pod_obj.ocp.wait_for_delete(resource_name=pod_obj.name)

        # Delete the PVC
        pvc_obj.delete()
        pvc_obj.ocp.wait_for_delete(resource_name=pvc_obj.name)

        # Verify used size after deleting the PVC
        size_after_pvc_delete = fetch_used_size(rbd_pool, size_before_pvc)
        log.info(
            f"Verified: Storage pool used size after deleting the PVC is {size_after_pvc_delete}"
        )
    def disruptive_base(self, interface, operation_to_disrupt,
                        resource_to_delete):
        """
        Base function for disruptive tests.
        Deletion of 'resource_to_delete' will be introduced while
        'operation_to_disrupt' is progressing.
        """
        pod_functions = {
            "mds": partial(get_mds_pods),
            "mon": partial(get_mon_pods),
            "mgr": partial(get_mgr_pods),
            "osd": partial(get_osd_pods),
            "rbdplugin": partial(get_plugin_pods, interface=interface),
            "cephfsplugin": partial(get_plugin_pods, interface=interface),
            "cephfsplugin_provisioner":
            partial(get_cephfsplugin_provisioner_pods),
            "rbdplugin_provisioner": partial(get_rbdfsplugin_provisioner_pods),
            "operator": partial(get_operator_pods),
        }
        disruption = disruption_helpers.Disruptions()
        disruption.set_resource(resource=resource_to_delete)
        executor = ThreadPoolExecutor(max_workers=1)

        # Get number of pods of type 'resource_to_delete'
        num_of_resource_to_delete = len(pod_functions[resource_to_delete]())

        # Fetch the number of Pods and PVCs
        initial_num_of_pods = len(get_all_pods(namespace=self.namespace))
        initial_num_of_pvc = len(
            get_all_pvcs(namespace=self.namespace)["items"])

        # Fetch PV names
        pv_objs = []
        for pvc_obj in self.pvc_objs:
            pvc_obj.reload()
            pv_objs.append(pvc_obj.backed_pv_obj)

        # Fetch volume details from pods for the purpose of verification
        node_pv_dict = {}
        for pod_obj in self.pod_objs:
            pod_info = pod_obj.get()
            node = pod_info["spec"]["nodeName"]
            pvc = pod_info["spec"]["volumes"][0]["persistentVolumeClaim"][
                "claimName"]
            for pvc_obj in self.pvc_objs:
                if pvc_obj.name == pvc:
                    pvc_obj.reload()
                    pv = pvc_obj.backed_pv
                    break
            if node in node_pv_dict:
                node_pv_dict[node].append(pv)
            else:
                node_pv_dict[node] = [pv]

        # Do setup for running IO on pods
        log.info("Setting up pods for running IO")
        for pod_obj in self.pod_objs:
            pvc_info = pod_obj.pvc.get()
            if pvc_info["spec"]["volumeMode"] == "Block":
                pod_obj.pvc.storage_type = "block"
            else:
                pod_obj.pvc.storage_type = "fs"
            pod_obj.workload_setup(storage_type=pod_obj.pvc.storage_type)
        log.info("Setup for running IO is completed on pods")

        # Start IO on each pod. RWX PVC will be used on two pods. So split the
        # size accordingly
        log.info("Starting IO on pods")
        for pod_obj in self.pod_objs:
            if pod_obj.pvc.access_mode == constants.ACCESS_MODE_RWX:
                io_size = int((self.pvc_size - 1) / 2)
            else:
                io_size = self.pvc_size - 1
            pod_obj.run_io(
                storage_type=pod_obj.pvc.storage_type,
                size=f"{io_size}G",
                fio_filename=f"{pod_obj.name}_io",
                end_fsync=1,
            )
        log.info("IO started on all pods.")

        # Start deleting pods
        pod_bulk_delete = executor.submit(delete_pods,
                                          self.pod_objs,
                                          wait=False)

        if operation_to_disrupt == "delete_pods":
            ret = wait_for_resource_count_change(
                get_all_pods,
                initial_num_of_pods,
                self.namespace,
                "decrease",
                timeout=50,
            )
            assert ret, "Wait timeout: Pods are not being deleted."
            log.info("Pods deletion has started.")
            disruption.delete_resource()

        pod_bulk_delete.result()

        # Verify pods are deleted
        for pod_obj in self.pod_objs:
            assert pod_obj.ocp.wait_for_delete(
                pod_obj.name, 180), f"Pod {pod_obj.name} is not deleted"
        log.info("Verified: Pods are deleted.")

        # Verify that the mount point is removed from nodes after deleting pod
        for node, pvs in node_pv_dict.items():
            cmd = f"oc debug nodes/{node} -- df"
            df_on_node = run_cmd(cmd)
            for pv in pvs:
                assert pv not in df_on_node, (
                    f"{pv} is still present on node {node} after "
                    f"deleting the pods.")
        log.info(
            "Verified: mount points are removed from nodes after deleting "
            "the pods")

        # Fetch image uuid associated with PVCs
        pvc_uuid_map = {}
        for pvc_obj in self.pvc_objs:
            pvc_uuid_map[pvc_obj.name] = pvc_obj.image_uuid
        log.info("Fetched image uuid associated with each PVC")

        # Start deleting PVCs
        pvc_bulk_delete = executor.submit(delete_pvcs, self.pvc_objs)

        if operation_to_disrupt == "delete_pvcs":
            ret = wait_for_resource_count_change(get_all_pvcs,
                                                 initial_num_of_pvc,
                                                 self.namespace,
                                                 "decrease",
                                                 timeout=50)
            assert ret, "Wait timeout: PVCs are not being deleted."
            log.info("PVCs deletion has started.")
            disruption.delete_resource()

        pvcs_deleted = pvc_bulk_delete.result()

        assert pvcs_deleted, "Deletion of PVCs failed."

        # Verify PVCs are deleted
        for pvc_obj in self.pvc_objs:
            assert pvc_obj.ocp.wait_for_delete(
                pvc_obj.name), f"PVC {pvc_obj.name} is not deleted"
        log.info("Verified: PVCs are deleted.")

        # Verify PVs are deleted
        for pv_obj in pv_objs:
            assert pv_obj.ocp.wait_for_delete(
                pv_obj.name, 120), f"PV {pv_obj.name} is not deleted"
        log.info("Verified: PVs are deleted.")

        # Verify PV using ceph toolbox. Image/Subvolume should be deleted.
        pool_name = default_ceph_block_pool()
        for pvc_name, uuid in pvc_uuid_map.items():
            if interface == constants.CEPHBLOCKPOOL:
                ret = verify_volume_deleted_in_backend(interface=interface,
                                                       image_uuid=uuid,
                                                       pool_name=pool_name)
            if interface == constants.CEPHFILESYSTEM:
                ret = verify_volume_deleted_in_backend(interface=interface,
                                                       image_uuid=uuid)
            assert ret, (f"Volume associated with PVC {pvc_name} still exists "
                         f"in backend")

        # Verify number of pods of type 'resource_to_delete'
        final_num_resource_to_delete = len(pod_functions[resource_to_delete]())
        assert final_num_resource_to_delete == num_of_resource_to_delete, (
            f"Total number of {resource_to_delete} pods is not matching with "
            f"initial value. Total number of pods before deleting a pod: "
            f"{num_of_resource_to_delete}. Total number of pods present now: "
            f"{final_num_resource_to_delete}")

        # Check ceph status
        ceph_health_check(namespace=config.ENV_DATA["cluster_namespace"])
        log.info("Ceph cluster health is OK")
コード例 #8
0
    def test_multiple_pvc_concurrent_creation_deletion(self, interface,
                                                       multi_pvc_factory):
        """
        To exercise resource creation and deletion
        """
        proj_obj = self.pvc_objs[0].project

        executor = ThreadPoolExecutor(max_workers=1)

        # Get PVs
        pv_objs = []
        for pvc in self.pvc_objs:
            pv_objs.append(pvc.backed_pv_obj)

        # Fetch image uuid associated with PVCs
        pvc_uuid_map = {}
        for pvc_obj in self.pvc_objs:
            pvc_uuid_map[pvc_obj.name] = pvc_obj.image_uuid
        log.info("Fetched image uuid associated with each PVC")

        # Start deleting 100 PVCs
        log.info("Start deleting PVCs.")
        pvc_delete = executor.submit(delete_pvcs, self.pvc_objs)

        # Create 100 PVCs
        log.info("Start creating new PVCs")
        self.new_pvc_objs = multi_pvc_factory(
            interface=interface,
            project=proj_obj,
            size=self.pvc_size,
            access_modes=self.access_modes,
            status="",
            num_of_pvc=self.num_of_pvcs,
            wait_each=False,
        )

        for pvc_obj in self.new_pvc_objs:
            wait_for_resource_state(pvc_obj, constants.STATUS_BOUND)
            pvc_obj.reload()
        log.info(f"Newly created {self.num_of_pvcs} PVCs are in Bound state.")

        # Verify PVCs are deleted
        res = pvc_delete.result()
        assert res, "Deletion of PVCs failed"
        log.info("PVC deletion was successful.")
        for pvc in self.pvc_objs:
            pvc.ocp.wait_for_delete(resource_name=pvc.name)
        log.info(f"Successfully deleted initial {self.num_of_pvcs} PVCs")

        # Verify PVs are deleted
        for pv_obj in pv_objs:
            pv_obj.ocp.wait_for_delete(resource_name=pv_obj.name, timeout=180)
        log.info(f"Successfully deleted initial {self.num_of_pvcs} PVs")

        # Verify PV using ceph toolbox. Image/Subvolume should be deleted.
        for pvc_name, uuid in pvc_uuid_map.items():
            pool_name = None
            if interface == constants.CEPHBLOCKPOOL:
                pool_name = default_ceph_block_pool()
            ret = verify_volume_deleted_in_backend(interface=interface,
                                                   image_uuid=uuid,
                                                   pool_name=pool_name)
            assert ret, (f"Volume associated with PVC {pvc_name} still exists "
                         f"in backend")

        # Verify status of nodes
        for node in get_node_objs():
            node_status = node.ocp.get_resource_status(node.name)
            assert (node_status == constants.NODE_READY
                    ), f"Node {node.name} is in {node_status} state."
コード例 #9
0
def uninstall_cluster_logging():
    """
    Function to uninstall cluster-logging from the cluster
    Deletes the project "openshift-logging" and "openshift-operators-redhat"
    """
    # Validating the pods before deleting the instance
    pod_list = get_all_pods(namespace=constants.OPENSHIFT_LOGGING_NAMESPACE)

    for pod in pod_list:
        logger.info(
            f"Pods running in the openshift-logging namespace {pod.name}")

    # Excluding cluster-logging-operator from pod_list and getting pod names
    pod_names_list = [
        pod.name for pod in pod_list
        if not pod.name.startswith("cluster-logging-operator")
    ]
    pvc_objs = get_all_pvc_objs(
        namespace=constants.OPENSHIFT_LOGGING_NAMESPACE)

    # Fetch image uuid associated with PVCs to be deleted
    pvc_uuid_map = {}
    for pvc_obj in pvc_objs:
        pvc_uuid_map[pvc_obj.name] = pvc_obj.image_uuid

    # Checking for used space
    cbp_name = default_ceph_block_pool()
    used_space_before_deletion = fetch_used_size(cbp_name)
    logger.info(
        f"Used space before deletion of cluster logging {used_space_before_deletion}"
    )

    # Deleting the clusterlogging instance
    clusterlogging_obj = ocp.OCP(
        kind=constants.CLUSTER_LOGGING,
        namespace=constants.OPENSHIFT_LOGGING_NAMESPACE)
    assert clusterlogging_obj.delete(resource_name="instance")

    check_pod_vanished(pod_names_list)
    for pvc_obj in pvc_objs:
        pv_obj = pvc_obj.backed_pv_obj

    assert delete_pvcs(pvc_objs=pvc_objs), "PVCs deletion failed"

    for pvc_obj in pvc_objs:
        pvc_obj.ocp.wait_for_delete(resource_name=pvc_obj.name, timeout=300)
        pv_obj.ocp.wait_for_delete(resource_name=pv_obj.name, timeout=300)
    logger.info("Verified: PVCs are deleted.")
    logger.info("Verified: PV are deleted")

    for pvc_name, uuid in pvc_uuid_map.items():
        rbd = verify_volume_deleted_in_backend(
            interface=constants.CEPHBLOCKPOOL,
            image_uuid=uuid,
            pool_name=cbp_name)
        assert rbd, f"Volume associated with PVC {pvc_name} still exists " f"in backend"

    # Checking for used space after PVC deletion
    used_space_after_deletion = fetch_used_size(cbp_name, exp_val=30)
    logger.info(
        f"Used space after deletion of cluster logging {used_space_after_deletion}"
    )
    if used_space_after_deletion < used_space_before_deletion:
        logger.info("Expected !!! Space has reclaimed")
    else:
        logger.warning(
            "Unexpected !! No space reclaimed after deletion of PVC")

    # Deleting the RBAC permission set
    rbac_role = ocp.OCP(
        kind=constants.ROLE,
        namespace=constants.OPENSHIFT_OPERATORS_REDHAT_NAMESPACE)
    rbac_role.delete(yaml_file=constants.EO_RBAC_YAML)

    openshift_logging_namespace = ocp.OCP(
        kind=constants.NAMESPACES,
        namespace=constants.OPENSHIFT_LOGGING_NAMESPACE)
    openshift_operators_redhat_namespace = ocp.OCP(
        kind=constants.NAMESPACES,
        namespace=constants.OPENSHIFT_OPERATORS_REDHAT_NAMESPACE,
    )

    if openshift_operators_redhat_namespace.get():
        assert openshift_operators_redhat_namespace.delete(
            resource_name=constants.OPENSHIFT_OPERATORS_REDHAT_NAMESPACE)
        logger.info(
            "The project openshift-opertors-redhat got deleted successfully")

    if openshift_logging_namespace.get():
        assert openshift_logging_namespace.delete(
            resource_name=constants.OPENSHIFT_LOGGING_NAMESPACE)
        logger.info("The namespace openshift-logging got deleted successfully")
コード例 #10
0
    def test_change_reclaim_policy_of_pv(self, interface, reclaim_policy,
                                         pod_factory):
        """
        This test case tests update of reclaim policy of PV
        """
        reclaim_policy_to = "Delete" if reclaim_policy == "Retain" else (
            "Retain")

        # Fetch name of PVs
        pvs = [pvc_obj.backed_pv_obj for pvc_obj in self.pvc_objs]

        # Fetch image uuid associated with PVCs
        pvc_uuid_map = {}
        for pvc_obj in self.pvc_objs:
            pvc_uuid_map[pvc_obj.name] = pvc_obj.image_uuid
        log.info("Fetched image uuid associated with each PVC")

        # Select PVs to change reclaim policy
        changed_pvs = pvs[:5]

        # Run IO on pods
        self.run_and_verify_io(self.pod_objs)
        log.info("Verified IO result on pods.")

        # Change relaimPolicy to 'reclaim_policy_to'
        for pv_obj in changed_pvs:
            pv_name = pv_obj.name
            patch_param = (f'{{"spec":{{"persistentVolumeReclaimPolicy":'
                           f'"{reclaim_policy_to}"}}}}')
            assert pv_obj.ocp.patch(
                resource_name=pv_name,
                params=patch_param,
                format_type="strategic"), (
                    f"Failed to change persistentVolumeReclaimPolicy of pv "
                    f"{pv_name} to {reclaim_policy_to}")
            log.info(f"Changed persistentVolumeReclaimPolicy of pv {pv_name} "
                     f"to {reclaim_policy_to}")

        retain_pvs = []
        delete_pvs = []

        # Verify reclaim policy of all PVs
        for pv_obj in pvs:
            policy = pv_obj.get().get("spec").get(
                "persistentVolumeReclaimPolicy")
            retain_pvs.append(pv_obj) if policy == "Retain" else (
                delete_pvs.append(pv_obj))
            if pv_obj in changed_pvs:
                assert policy == reclaim_policy_to, (
                    f"Reclaim policy of {pv_obj.name} is {policy}. "
                    f"It has not changed to {reclaim_policy_to}")
            else:
                assert policy == reclaim_policy, (
                    f"Reclaim policy of {pv_obj.name} is {policy} instead "
                    f"of {reclaim_policy}.")
        log.info("Verified reclaim policy of all PVs")

        # Run IO on pods
        self.run_and_verify_io(self.pod_objs, do_setup=False)
        log.info("Ran and verified IO on pods after changing reclaim policy.")

        # Delete all pods
        log.info("Deleting all pods")
        for pod_obj in self.pod_objs:
            pod_obj.delete()

        # Verify pods are deleted
        for pod_obj in self.pod_objs:
            pod_obj.ocp.wait_for_delete(pod_obj.name, 300)
        log.info("Verified: Pods are deleted.")

        # Create new pods mounting one volume on each pod
        log.info("Creating new pods.")
        new_pod_objs = []
        for pvc_obj in self.pvc_objs:
            new_pod_objs.append(
                pod_factory(interface=interface, pvc=pvc_obj, status=None))
        for pod in new_pod_objs:
            wait_for_resource_state(pod, constants.STATUS_RUNNING)
            pod.reload()

        # Run IO on new pods
        self.run_and_verify_io(new_pod_objs)
        log.info("Ran and verified IO on new pods.")

        # Delete all pods
        log.info("Deleting all new pods.")
        for pod_obj in new_pod_objs:
            pod_obj.delete()

        # Verify pods are deleted
        for pod_obj in new_pod_objs:
            pod_obj.ocp.wait_for_delete(pod_obj.name, 300)
        log.info("Verified: All new pods are deleted.")

        # Delete PVCs
        log.info("Deleting all PVCs.")
        for pvc_obj in self.pvc_objs:
            pvc_obj.delete()

        # Verify PVCs are deleted
        for pvc_obj in self.pvc_objs:
            pvc_obj.ocp.wait_for_delete(pvc_obj.name, 300)
        log.info("Verified: All PVCs are deleted")

        # PVs having reclaim policy 'Delete' will be deleted
        for pv_obj in delete_pvs:
            pv_obj.ocp.wait_for_delete(pv_obj.name, 300)
        log.info(
            "Verified: All PVs having reclaim policy 'Delete' are deleted.")

        # PVs having reclaim policy 'Retain' will be in Released state
        for pv_obj in retain_pvs:
            wait_for_resource_state(resource=pv_obj,
                                    state=constants.STATUS_RELEASED)
        log.info("Verified: All PVs having reclaim policy 'Retain' are "
                 "in 'Released' state.")

        # Change relaimPolicy to Delete
        for pv_obj in retain_pvs:
            pv_name = pv_obj.name
            patch_param = '{"spec":{"persistentVolumeReclaimPolicy":"Delete"}}'
            assert pv_obj.ocp.patch(
                resource_name=pv_name,
                params=patch_param,
                format_type="strategic"), (
                    f"Failed to change persistentVolumeReclaimPolicy "
                    f"for pv {pv_name} to Delete")
        log.info("Changed reclaim policy of all remaining PVs to Delete")

        # Verify PVs deleted. PVs will be deleted immediately after setting
        # reclaim policy to Delete
        for pv_obj in retain_pvs:
            pv_obj.ocp.wait_for_delete(pv_obj.name, 300)
        log.info(
            "Verified: All remaining PVs are deleted after changing reclaim "
            "policy to Delete.")

        # Verify PV using ceph toolbox. Wait for Image/Subvolume to be deleted.
        pool_name = (default_ceph_block_pool()
                     if interface == constants.CEPHBLOCKPOOL else None)
        for pvc_name, uuid in pvc_uuid_map.items():
            assert verify_volume_deleted_in_backend(
                interface=interface, image_uuid=uuid, pool_name=pool_name
            ), f"Volume associated with PVC {pvc_name} still exists in backend"
        log.info("Verified: Image/Subvolume removed from backend.")