Esempio n. 1
0
    def setup(self, multi_pvc_factory, pod_factory):
        """
        Create resources for the test

        """
        access_modes_cephfs = [
            constants.ACCESS_MODE_RWO, constants.ACCESS_MODE_RWX
        ]
        access_modes_rbd = [
            constants.ACCESS_MODE_RWO, f'{constants.ACCESS_MODE_RWO}-Block',
            f'{constants.ACCESS_MODE_RWX}-Block'
        ]

        self.pvcs_cephfs = multi_pvc_factory(
            interface=constants.CEPHFILESYSTEM,
            size=10,
            access_modes=access_modes_cephfs,
            status=constants.STATUS_BOUND,
            num_of_pvc=2,
            timeout=90)

        self.pvcs_rbd = multi_pvc_factory(interface=constants.CEPHBLOCKPOOL,
                                          project=self.pvcs_cephfs[0].project,
                                          size=10,
                                          access_modes=access_modes_rbd,
                                          status=constants.STATUS_BOUND,
                                          num_of_pvc=3,
                                          timeout=90)

        pods_cephfs = helpers.create_pods(self.pvcs_cephfs, pod_factory,
                                          constants.CEPHFILESYSTEM, 2,
                                          constants.STATUS_RUNNING)
        pods_rbd = helpers.create_pods(self.pvcs_rbd, pod_factory,
                                       constants.CEPHBLOCKPOOL, 2,
                                       constants.STATUS_RUNNING)

        self.pods = pods_cephfs + pods_rbd

        # Set volume mode on PVC objects
        for pvc_obj in self.pvcs_cephfs + self.pvcs_rbd:
            pvc_info = pvc_obj.get()
            setattr(pvc_obj, 'volume_mode', pvc_info['spec']['volumeMode'])
    def test_pvc_disruptive(self, interface, operation_to_disrupt,
                            resource_to_delete, multi_pvc_factory,
                            pod_factory):
        """
        Base function for PVC disruptive tests.
        Deletion of 'resource_to_delete' will be introduced while
        'operation_to_disrupt' is progressing.
        """
        pod_functions = {
            'mds':
            partial(pod.get_mds_pods),
            'mon':
            partial(pod.get_mon_pods),
            'mgr':
            partial(pod.get_mgr_pods),
            'osd':
            partial(pod.get_osd_pods),
            'rbdplugin':
            partial(pod.get_plugin_pods, interface=interface),
            'cephfsplugin':
            partial(pod.get_plugin_pods, interface=interface),
            'cephfsplugin_provisioner':
            partial(pod.get_cephfsplugin_provisioner_pods),
            'rbdplugin_provisioner':
            partial(pod.get_rbdfsplugin_provisioner_pods),
            'operator':
            partial(pod.get_operator_pods)
        }

        # Get number of pods of type 'resource_to_delete'
        num_of_resource_to_delete = len(pod_functions[resource_to_delete]())

        num_of_pvc = 12
        namespace = self.proj_obj.namespace

        # Fetch the number of Pods and PVCs
        initial_num_of_pods = len(pod.get_all_pods(namespace=namespace))
        initial_num_of_pvc = len(get_all_pvcs(namespace=namespace)['items'])

        executor = ThreadPoolExecutor(max_workers=(2 * num_of_pvc))

        DISRUPTION_OPS.set_resource(resource=resource_to_delete)

        access_modes = [constants.ACCESS_MODE_RWO]
        if interface == constants.CEPHFILESYSTEM:
            access_modes.append(constants.ACCESS_MODE_RWX)

        # Modify access_modes list to create rbd `block` type volume with
        # RWX access mode. RWX is not supported in non-block type rbd
        if interface == constants.CEPHBLOCKPOOL:
            access_modes.extend([
                f'{constants.ACCESS_MODE_RWO}-Block',
                f'{constants.ACCESS_MODE_RWX}-Block'
            ])

        # Start creation of PVCs
        bulk_pvc_create = executor.submit(
            multi_pvc_factory,
            interface=interface,
            project=self.proj_obj,
            size=5,
            access_modes=access_modes,
            access_modes_selection='distribute_random',
            status=constants.STATUS_BOUND,
            num_of_pvc=num_of_pvc,
            wait_each=False)

        if operation_to_disrupt == 'create_pvc':
            # Ensure PVCs are being created before deleting the resource
            ret = helpers.wait_for_resource_count_change(
                get_all_pvcs, initial_num_of_pvc, namespace, 'increase')
            assert ret, "Wait timeout: PVCs are not being created."
            logger.info(f"PVCs creation has started.")
            DISRUPTION_OPS.delete_resource()

        pvc_objs = bulk_pvc_create.result()

        # Confirm that PVCs are Bound
        for pvc_obj in pvc_objs:
            helpers.wait_for_resource_state(resource=pvc_obj,
                                            state=constants.STATUS_BOUND,
                                            timeout=120)
            pvc_obj.reload()
        logger.info("Verified: PVCs are Bound.")

        # Start creating pods
        bulk_pod_create = executor.submit(helpers.create_pods, pvc_objs,
                                          pod_factory, interface, 2)

        if operation_to_disrupt == 'create_pod':
            # Ensure that pods are being created before deleting the resource
            ret = helpers.wait_for_resource_count_change(
                pod.get_all_pods, initial_num_of_pods, namespace, 'increase')
            assert ret, "Wait timeout: Pods are not being created."
            logger.info(f"Pods creation has started.")
            DISRUPTION_OPS.delete_resource()

        pod_objs = bulk_pod_create.result()

        # Verify pods are Running
        for pod_obj in pod_objs:
            helpers.wait_for_resource_state(resource=pod_obj,
                                            state=constants.STATUS_RUNNING)
            pod_obj.reload()
        logger.info("Verified: All pods are Running.")

        # Do setup on pods for running IO
        logger.info("Setting up pods for running IO.")
        for pod_obj in pod_objs:
            pvc_info = pod_obj.pvc.get()
            if pvc_info['spec']['volumeMode'] == 'Block':
                storage_type = 'block'
            else:
                storage_type = 'fs'
            executor.submit(pod_obj.workload_setup, storage_type=storage_type)

        # Wait for setup on pods to complete
        for pod_obj in pod_objs:
            for sample in TimeoutSampler(180, 2, getattr, pod_obj,
                                         'wl_setup_done'):
                if sample:
                    logger.info(f"Setup for running IO is completed on pod "
                                f"{pod_obj.name}.")
                    break
        logger.info("Setup for running IO is completed on all pods.")

        # Start IO on each pod
        for pod_obj in pod_objs:
            pvc_info = pod_obj.pvc.get()
            if pvc_info['spec']['volumeMode'] == 'Block':
                storage_type = 'block'
            else:
                storage_type = 'fs'
            pod_obj.run_io(storage_type=storage_type,
                           size='1G',
                           runtime=10,
                           fio_filename=f'{pod_obj.name}_io_file1')
        logger.info("FIO started on all pods.")

        if operation_to_disrupt == 'run_io':
            DISRUPTION_OPS.delete_resource()

        logger.info("Fetching FIO results.")
        for pod_obj in pod_objs:
            fio_result = pod_obj.get_fio_results()
            err_count = fio_result.get('jobs')[0].get('error')
            assert err_count == 0, (
                f"FIO error on pod {pod_obj.name}. FIO result: {fio_result}")
        logger.info("Verified FIO result on pods.")

        # Delete pods
        for pod_obj in pod_objs:
            pod_obj.delete(wait=True)
        for pod_obj in pod_objs:
            pod_obj.ocp.wait_for_delete(pod_obj.name)

        # Verify that PVCs are reusable by creating new pods
        pod_objs = helpers.create_pods(pvc_objs, pod_factory, interface, 2)

        # Verify new pods are Running
        for pod_obj in pod_objs:
            helpers.wait_for_resource_state(resource=pod_obj,
                                            state=constants.STATUS_RUNNING)
            pod_obj.reload()
        logging.info("Verified: All new pods are Running.")

        # Run IO on each of the new pods
        for pod_obj in pod_objs:
            pvc_info = pod_obj.pvc.get()
            if pvc_info['spec']['volumeMode'] == 'Block':
                storage_type = 'block'
            else:
                storage_type = 'fs'
            pod_obj.run_io(storage_type=storage_type,
                           size='1G',
                           runtime=10,
                           fio_filename=f'{pod_obj.name}_io_file2')

        logger.info("Fetching FIO results from new pods")
        for pod_obj in pod_objs:
            fio_result = pod_obj.get_fio_results()
            err_count = fio_result.get('jobs')[0].get('error')
            assert err_count == 0, (
                f"FIO error on pod {pod_obj.name}. FIO result: {fio_result}")
        logger.info("Verified FIO result on new pods.")

        # Verify number of pods of type 'resource_to_delete'
        final_num_resource_to_delete = len(pod_functions[resource_to_delete]())
        assert final_num_resource_to_delete == num_of_resource_to_delete, (
            f"Total number of {resource_to_delete} pods is not matching with "
            f"initial value. Total number of pods before deleting a pod: "
            f"{num_of_resource_to_delete}. Total number of pods present now: "
            f"{final_num_resource_to_delete}")

        # Check ceph status
        ceph_health_check(namespace=config.ENV_DATA['cluster_namespace'])
        logger.info("Ceph cluster health is OK")
    def setup(self, interface, multi_pvc_factory, pod_factory):
        """
        Create PVCs and pods
        """
        access_modes = [constants.ACCESS_MODE_RWO]
        if interface == constants.CEPHFILESYSTEM:
            access_modes.append(constants.ACCESS_MODE_RWX)

        # Modify access_modes list to create rbd `block` type volume with
        # RWX access mode. RWX is not supported in filesystem type rbd
        if interface == constants.CEPHBLOCKPOOL:
            access_modes.extend(
                [
                    f'{constants.ACCESS_MODE_RWO}-Block',
                    f'{constants.ACCESS_MODE_RWX}-Block'
                ]
            )

        pvc_objs = multi_pvc_factory(
            interface=interface,
            project=None,
            storageclass=None,
            size=self.pvc_size,
            access_modes=access_modes,
            status=constants.STATUS_BOUND,
            num_of_pvc=self.num_of_pvcs,
            wait_each=False
        )

        # Set volume mode on PVC objects
        for pvc_obj in pvc_objs:
            pvc_info = pvc_obj.get()
            setattr(pvc_obj, 'volume_mode', pvc_info['spec']['volumeMode'])

        rwo_pvcs = [pvc_obj for pvc_obj in pvc_objs if (
            pvc_obj.access_mode == constants.ACCESS_MODE_RWO
        )]
        rwx_pvcs = [pvc_obj for pvc_obj in pvc_objs if (
            pvc_obj.access_mode == constants.ACCESS_MODE_RWX
        )]

        num_of_rwo_pvc = len(rwo_pvcs)
        num_of_rwx_pvc = len(rwx_pvcs)

        block_rwo_pvcs = []
        for pvc_obj in rwo_pvcs[:]:
            if pvc_obj.volume_mode == 'Block':
                block_rwo_pvcs.append(pvc_obj)
                rwo_pvcs.remove(pvc_obj)

        log.info(
            f"Created {num_of_rwo_pvc} RWO PVCs in which "
            f"{len(block_rwo_pvcs)} are rbd block type."
        )
        log.info(f"Created {num_of_rwx_pvc} RWX PVCs.")

        # Select 6 PVCs for IO pods
        if block_rwo_pvcs:
            pvc_objs_for_io_pods = rwo_pvcs[0:2] + rwx_pvcs[0:2] + block_rwo_pvcs[0:2]
            pvc_objs_new_pods = rwo_pvcs[2:] + rwx_pvcs[2:] + block_rwo_pvcs[2:]
        else:
            pvc_objs_for_io_pods = rwo_pvcs[0:3] + rwx_pvcs[0:3]
            pvc_objs_new_pods = rwo_pvcs[3:] + rwx_pvcs[3:]

        # Create one pod using each RWO PVC and two pods using each RWX PVC
        # for running IO
        io_pods = helpers.create_pods(
            pvc_objs_for_io_pods, pod_factory, interface, 2
        )

        # Wait for pods to be in Running state
        for pod_obj in io_pods:
            helpers.wait_for_resource_state(
                resource=pod_obj, state=constants.STATUS_RUNNING
            )
            pod_obj.reload()
        log.info(f"Created {len(io_pods)} pods for running IO.")

        return pvc_objs, io_pods, pvc_objs_new_pods, access_modes
    def test_daemon_kill_during_pvc_pod_creation_and_io(
        self, interface, resource_name, setup, multi_pvc_factory,
        pod_factory
    ):
        """
        Kill 'resource_name' daemon while PVCs creation, pods
        creation and IO operation are progressing.
        """
        num_of_new_pvcs = 5
        pvc_objs, io_pods, pvc_objs_new_pods, access_modes = setup
        proj_obj = pvc_objs[0].project
        storageclass = pvc_objs[0].storageclass

        pod_functions = {
            'mds': partial(get_mds_pods), 'mon': partial(get_mon_pods),
            'mgr': partial(get_mgr_pods), 'osd': partial(get_osd_pods),
            'rbdplugin': partial(get_plugin_pods, interface=interface),
            'cephfsplugin': partial(get_plugin_pods, interface=interface),
            'cephfsplugin_provisioner': partial(get_cephfsplugin_provisioner_pods),
            'rbdplugin_provisioner': partial(get_rbdfsplugin_provisioner_pods),
            'operator': partial(get_operator_pods)
        }

        executor = ThreadPoolExecutor(max_workers=len(io_pods))

        disruption = disruption_helpers.Disruptions()
        disruption.set_resource(resource=resource_name)

        # Get number of pods of type 'resource_name'
        resource_pods_num = len(pod_functions[resource_name]())

        # Do setup for running IO on pods
        log.info("Setting up pods for running IO")
        for pod_obj in io_pods:
            if pod_obj.pvc.volume_mode == 'Block':
                storage_type = 'block'
            else:
                storage_type = 'fs'
            executor.submit(pod_obj.workload_setup, storage_type=storage_type)

        # Wait for setup on pods to complete
        for pod_obj in io_pods:
            log.info(f"Waiting for IO setup to complete on pod {pod_obj.name}")
            for sample in TimeoutSampler(
                180, 2, getattr, pod_obj, 'wl_setup_done'
            ):
                if sample:
                    log.info(
                        f"Setup for running IO is completed on pod "
                        f"{pod_obj.name}."
                    )
                    break
        log.info("Setup for running IO is completed on pods")

        # Set daemon to be killed
        disruption.select_daemon()

        # Start creating new pods
        log.info("Start creating new pods.")
        bulk_pod_create = executor.submit(
            helpers.create_pods, pvc_objs_new_pods, pod_factory, interface, 2
        )

        # Start creation of new PVCs
        log.info("Start creating new PVCs.")
        bulk_pvc_create = executor.submit(
            multi_pvc_factory, interface=interface,
            project=proj_obj, storageclass=storageclass, size=self.pvc_size,
            access_modes=access_modes,
            access_modes_selection='distribute_random',
            status="", num_of_pvc=num_of_new_pvcs, wait_each=False
        )

        # Start IO on each pod
        log.info("Start IO on pods")
        for pod_obj in io_pods:
            if pod_obj.pvc.volume_mode == 'Block':
                storage_type = 'block'
            else:
                storage_type = 'fs'
            pod_obj.run_io(
                storage_type=storage_type, size='1G', runtime=10,
                fio_filename=f'{pod_obj.name}_io_file1'
            )
        log.info("IO started on all pods.")

        # Kill daemon
        disruption.kill_daemon()

        # Getting result of PVC creation as list of PVC objects
        pvc_objs_new = bulk_pvc_create.result()

        # Confirm PVCs are Bound
        for pvc_obj in pvc_objs_new:
            helpers.wait_for_resource_state(
                resource=pvc_obj, state=constants.STATUS_BOUND, timeout=180
            )
            pvc_obj.reload()
        log.info("Verified: New PVCs are Bound.")

        # Getting result of pods creation as list of Pod objects
        pod_objs_new = bulk_pod_create.result()

        # Verify new pods are Running
        for pod_obj in pod_objs_new:
            helpers.wait_for_resource_state(
                resource=pod_obj, state=constants.STATUS_RUNNING
            )
            pod_obj.reload()
        log.info("Verified: All new pods are Running.")

        # Verify IO
        log.info("Fetching IO results from IO pods.")
        for pod_obj in io_pods:
            fio_result = pod_obj.get_fio_results()
            err_count = fio_result.get('jobs')[0].get('error')
            assert err_count == 0, (
                f"FIO error on pod {pod_obj.name}. FIO result: {fio_result}"
            )
            log.info(f"IOPs after FIO on pod {pod_obj.name}:")
            log.info(
                f"Read: {fio_result.get('jobs')[0].get('read').get('iops')}"
            )
            log.info(
                f"Write: {fio_result.get('jobs')[0].get('write').get('iops')}"
            )
        log.info("Verified IO result on IO pods.")

        all_pod_objs = io_pods + pod_objs_new

        # Fetch volume details from pods for the purpose of verification
        node_pv_dict = {}
        for pod in all_pod_objs:
            pod_info = pod.get()
            node = pod_info['spec']['nodeName']
            pvc = pod_info['spec']['volumes'][0]['persistentVolumeClaim']['claimName']
            for pvc_obj in pvc_objs:
                if pvc_obj.name == pvc:
                    pvc_obj.reload()
                    pv = pvc_obj.backed_pv
                    break
            if node in node_pv_dict:
                node_pv_dict[node].append(pv)
            else:
                node_pv_dict[node] = [pv]

        # Delete pods
        for pod_obj in all_pod_objs:
            pod_obj.delete(wait=False)

        # Verify pods are deleted
        for pod_obj in all_pod_objs:
            pod_obj.ocp.wait_for_delete(resource_name=pod_obj.name)

        # Verify number of 'resource_name' type pods
        final_resource_pods_num = len(pod_functions[resource_name]())
        assert final_resource_pods_num == resource_pods_num, (
            f"Total number of {resource_name} pods is not matching with "
            f"initial value. Total number of pods before daemon kill: "
            f"{resource_pods_num}. Total number of pods present now: "
            f"{final_resource_pods_num}"
        )

        # Verify volumes are unmapped from nodes after deleting the pods
        node_pv_mounted = helpers.verify_pv_mounted_on_node(node_pv_dict)
        for node, pvs in node_pv_mounted.items():
            assert not pvs, (
                f"PVs {pvs} is still present on node {node} after "
                f"deleting the pods."
            )
        log.info(
            "Verified: mount points are removed from nodes after deleting "
            "the pods"
        )

        # Set volume mode on PVC objects
        for pvc_obj in pvc_objs_new:
            pvc_info = pvc_obj.get()
            setattr(pvc_obj, 'volume_mode', pvc_info['spec']['volumeMode'])

        # Verify that PVCs are reusable by creating new pods
        all_pvc_objs = pvc_objs + pvc_objs_new
        pod_objs_re = helpers.create_pods(
            all_pvc_objs, pod_factory, interface, 2
        )

        # Verify pods are Running
        for pod_obj in pod_objs_re:
            helpers.wait_for_resource_state(
                resource=pod_obj, state=constants.STATUS_RUNNING
            )
            pod_obj.reload()
        log.info("Successfully created new pods using all PVCs.")

        # Run IO on each of the newly created pods
        for pod_obj in pod_objs_re:
            if pod_obj.pvc.volume_mode == 'Block':
                storage_type = 'block'
            else:
                storage_type = 'fs'
            pod_obj.run_io(
                storage_type=storage_type, size='1G', runtime=10,
                fio_filename=f'{pod_obj.name}_io_file2'
            )

        log.info("Fetching IO results from newly created pods")
        for pod_obj in pod_objs_re:
            fio_result = pod_obj.get_fio_results()
            err_count = fio_result.get('jobs')[0].get('error')
            assert err_count == 0, (
                f"FIO error on pod {pod_obj.name}. FIO result: {fio_result}"
            )
            log.info(f"IOPs after FIO on pod {pod_obj.name}:")
            log.info(
                f"Read: {fio_result.get('jobs')[0].get('read').get('iops')}"
            )
            log.info(
                f"Write: {fio_result.get('jobs')[0].get('write').get('iops')}"
            )
        log.info("Verified IO result on newly created pods.")
    def operations_base(self, resource_to_delete):
        """
        Delete resource 'resource_to_delete' while PVCs creation, Pods
        creation and IO operation are progressing.
        Verifies PVCs can be re-used by creating new pods.

        Steps:
        1. Create pods for running IO and verify they are Running.
        2. Start creating more pods.
        3. Start creating new PVCs.
        4. Start IO on pods created in Step 1.
        5. Delete the resource 'resource_to_delete'.
        6. Verify that PVCs created in Step 3 are in Bound state.
        7. Verify that pods created in Step 2 are Running.
        8. Verify IO results.
        9. Delete pods created in Steps 1 and 2.
        10. Verify the total number of 'resource_to_delete' pods.
        11. Verify volumes are unmapped from nodes after deleting pods.
        12. Use all PVCs to create new pods. One PVC for one pod.
        13. Start IO on all pods created in Step 10.
        14. Verify IO results.
        """
        # Separate the available PVCs
        pvc_objs_for_io_pods = self.pvc_objs[0:self.pvc_num_for_io_pods]
        pvc_objs_new_pods = self.pvc_objs[self.pvc_num_for_io_pods:]

        pod_functions = {
            'mds': get_mds_pods,
            'mon': get_mon_pods,
            'mgr': get_mgr_pods,
            'osd': get_osd_pods
        }

        executor = ThreadPoolExecutor(max_workers=2)

        disruption = disruption_helpers.Disruptions()
        disruption.set_resource(resource=resource_to_delete)

        # Get number of pods
        initial_pods_num = len(pod_functions[resource_to_delete]())

        # Create pods for running IO
        io_pods = helpers.create_pods(pvc_objs_list=pvc_objs_for_io_pods,
                                      interface_type=self.interface,
                                      desired_status=constants.STATUS_RUNNING,
                                      wait=True,
                                      namespace=self.namespace)

        # Updating self.pod_objs for the purpose of teardown
        self.pod_objs.extend(io_pods)

        # Do setup for running IO on pods
        log.info("Setting up pods for running IO")
        for pod_obj in io_pods:
            pod_obj.workload_setup(storage_type='fs')
        log.info("Setup for running IO is completed on pods")

        # Start creating new pods
        log.info("Start creating new pods.")
        bulk_pod_create = executor.submit(helpers.create_pods,
                                          pvc_objs_list=pvc_objs_new_pods,
                                          interface_type=self.interface,
                                          wait=False,
                                          namespace=self.namespace)

        # Start creation of new PVCs
        log.info("Start creating new PVCs.")
        bulk_pvc_create = executor.submit(helpers.create_multiple_pvcs,
                                          sc_name=self.sc_obj.name,
                                          namespace=self.namespace,
                                          number_of_pvc=self.num_of_new_pvcs,
                                          size=self.pvc_size,
                                          wait=False)

        # Start IO on each pod
        log.info("Start IO on pods")
        for pod_obj in io_pods:
            pod_obj.run_io(storage_type='fs', size=f'{self.pvc_size_int - 1}G')
        log.info("IO started on all pods.")

        # Delete the resource
        disruption.delete_resource()

        # Getting result of PVC creation as list of PVC objects
        pvc_objs_new = bulk_pvc_create.result()

        # Updating self.pvc_objs_new for the purpose of teardown
        self.pvc_objs_new.extend(pvc_objs_new)

        # Verify PVCs are Bound
        for pvc_obj in pvc_objs_new:
            assert pvc_obj.ocp.wait_for_resource(
                condition=constants.STATUS_BOUND,
                resource_name=pvc_obj.name,
                timeout=240,
                sleep=10
            ), (f"Wait timeout: PVC {pvc_obj.name} is not in 'Bound' status")
        log.info("Verified: New PVCs are Bound.")

        # Getting result of pods creation as list of Pod objects
        pod_objs_new = bulk_pod_create.result()

        # Updating self.pod_objs for the purpose of teardown
        self.pod_objs.extend(pod_objs_new)

        # Verify new pods are Running
        for pod_obj in pod_objs_new:
            assert pod_obj.ocp.wait_for_resource(
                condition=constants.STATUS_RUNNING,
                resource_name=pod_obj.name,
                timeout=240,
                sleep=10), (
                    f"Wait timeout: Pod {pod_obj.name} is not in 'Running' "
                    f"state even after 120 seconds.")
        log.info("Verified: All pods are Running.")

        # Verify IO
        log.info("Fetching IO results.")
        for pod_obj in io_pods:
            get_fio_rw_iops(pod_obj)
        log.info("Verified IO result on pods.")

        all_pod_objs = io_pods + pod_objs_new

        # Fetch volume details from pods for the purpose of verification
        node_pv_dict = {}
        for pod in all_pod_objs:
            pod_info = pod.get()
            node = pod_info['spec']['nodeName']
            pvc = pod_info['spec']['volumes'][0]['persistentVolumeClaim'][
                'claimName']
            for pvc_obj in self.pvc_objs:
                if pvc_obj.name == pvc:
                    pvc_obj.reload()
                    pv = pvc_obj.backed_pv
                    break
            if node in node_pv_dict:
                node_pv_dict[node].append(pv)
            else:
                node_pv_dict[node] = [pv]

        # Delete pods
        for pod_obj in all_pod_objs:
            pod_obj.delete(wait=False)

        # Verify pods are deleted
        for pod_obj in all_pod_objs:
            pod_obj.ocp.wait_for_delete(resource_name=pod_obj.name)

        # Updating self.pod_objs for the purpose of teardown
        self.pod_objs.clear()

        # Verify number of 'resource_to_delete' type pods
        final_pods_num = len(pod_functions[resource_to_delete]())
        assert final_pods_num == initial_pods_num, (
            f"Total number of {resource_to_delete} pods is not matching with "
            f"initial value. Total number of pods before deleting a pod: "
            f"{initial_pods_num}. Total number of pods present now: "
            f"{final_pods_num}")

        # Verify volumes are unmapped from nodes after deleting the pods
        for node, pvs in node_pv_dict.items():
            cmd = f'oc debug nodes/{node} -- df'
            df_on_node = run_cmd(cmd)
            for pv in pvs:
                assert pv not in df_on_node, (
                    f"{pv} is still present on node {node} after "
                    f"deleting the pods.")

        # Verify that PVCs are reusable by creating new pods
        all_pvc_objs = self.pvc_objs + pvc_objs_new
        pod_objs_re = helpers.create_pods(
            pvc_objs_list=all_pvc_objs,
            interface_type=self.interface,
            desired_status=constants.STATUS_RUNNING,
            wait=True,
            namespace=self.namespace)
        log.info("Successfully created new pods using all PVCs.")

        # Updating self.pod_objs for the purpose of teardown
        self.pod_objs.extend(pod_objs_re)

        # Run IO on each of the newly created pods
        for pod_obj in pod_objs_re:
            pod_obj.run_io(storage_type='fs',
                           size='100M',
                           runtime=10,
                           fio_filename='fio-file-retest')

        log.info("Fetching IO results from newly created pods")
        for pod_obj in pod_objs_re:
            get_fio_rw_iops(pod_obj)
        log.info("Verified IO result on newly created pods.")
Esempio n. 6
0
def test_create_delete_pvcs(multi_pvc_factory, pod_factory, project=None):
    # create the pods for deleting
    # Create rbd pvcs for pods
    pvc_objs_rbd = create_pvcs(multi_pvc_factory,
                               'CephBlockPool',
                               project=project)
    storageclass_rbd = pvc_objs_rbd[0].storageclass

    # Create cephfs pvcs for pods
    pvc_objs_cephfs = create_pvcs(multi_pvc_factory,
                                  'CephFileSystem',
                                  project=project)
    storageclass_cephfs = pvc_objs_cephfs[0].storageclass

    all_pvc_for_pods = pvc_objs_rbd + pvc_objs_cephfs
    # Check pvc status
    for pvc_obj in all_pvc_for_pods:
        helpers.wait_for_resource_state(
            resource=pvc_obj,
            state=constants.STATUS_BOUND,
            timeout=1200  # Timeout given 5 minutes
        )
        pvc_info = pvc_obj.get()
        setattr(pvc_obj, 'volume_mode', pvc_info['spec']['volumeMode'])

    # Create pods
    rbd_pods_to_delete = helpers.create_pods(pvc_objs_rbd, pod_factory,
                                             constants.RBD_INTERFACE)
    cephfs_pods_to_delete = helpers.create_pods(pvc_objs_cephfs, pod_factory,
                                                constants.CEPHFS_INTERFACE)
    pods_to_delete = rbd_pods_to_delete + cephfs_pods_to_delete
    for pod_obj in pods_to_delete:
        helpers.wait_for_resource_state(
            resource=pod_obj,
            state=constants.STATUS_RUNNING,
            timeout=300  # Timeout given 5 minutes
        )

    logging.info(
        f"#### Created the pods for deletion later...pods = {pods_to_delete}")
    # Create PVCs for deleting
    # Create rbd pvcs for deleting
    pvc_objs_rbd = create_pvcs(multi_pvc_factory=multi_pvc_factory,
                               interface='CephBlockPool',
                               project=project,
                               status="",
                               storageclass=storageclass_rbd)

    # Create cephfs pvcs for deleting
    pvc_objs_cephfs = create_pvcs(multi_pvc_factory=multi_pvc_factory,
                                  interface='CephFileSystem',
                                  project=project,
                                  status="",
                                  storageclass=storageclass_cephfs)

    all_pvc_to_delete = pvc_objs_rbd + pvc_objs_cephfs
    # Check pvc status
    for pvc_obj in all_pvc_to_delete:
        helpers.wait_for_resource_state(
            resource=pvc_obj,
            state=constants.STATUS_BOUND,
            timeout=300  # Timeout given 5 minutes
        )

    logging.info(
        f"#### Created the PVCs for deletion later...PVCs={all_pvc_to_delete}")

    # Create PVCs for new pods
    pvc_objs_rbd = create_pvcs(multi_pvc_factory=multi_pvc_factory,
                               interface='CephBlockPool',
                               project=project,
                               status="",
                               storageclass=storageclass_rbd)

    # Create cephfs pvcs for new pods # for deleting
    pvc_objs_cephfs = create_pvcs(multi_pvc_factory=multi_pvc_factory,
                                  interface='CephFileSystem',
                                  project=project,
                                  status="",
                                  storageclass=storageclass_cephfs)

    all_pvc_for_new_pods = pvc_objs_rbd + pvc_objs_cephfs
    # Check pvc status
    for pvc_obj in all_pvc_for_new_pods:
        helpers.wait_for_resource_state(
            resource=pvc_obj,
            state=constants.STATUS_BOUND,
            timeout=300  # Timeout given 5 minutes
        )
        pvc_info = pvc_obj.get()
        setattr(pvc_obj, 'volume_mode', pvc_info['spec']['volumeMode'])

    logging.info(
        f"#### Created the PVCs required for creating New Pods...{all_pvc_for_new_pods}"
    )

    executor = ThreadPoolExecutor(max_workers=10)
    # Start creating new PVCs
    # Start creating rbd PVCs
    rbd_pvc_exeuter = executor.submit(create_pvcs,
                                      multi_pvc_factory=multi_pvc_factory,
                                      interface='CephBlockPool',
                                      project=project,
                                      status="",
                                      storageclass=storageclass_rbd)

    logging.info("#### Started creating new RBD PVCs in thread...")
    # Start creating cephfs pvc
    cephfs_pvc_exeuter = executor.submit(create_pvcs,
                                         multi_pvc_factory=multi_pvc_factory,
                                         interface='CephFileSystem',
                                         project=project,
                                         status="",
                                         storageclass=storageclass_cephfs)

    logging.info("#### Started creating new cephfs PVCs in thread...")
    # Start creating pods
    rbd_pods_create_executer = executor.submit(helpers.create_pods,
                                               pvc_objs_rbd, pod_factory,
                                               constants.RBD_INTERFACE)
    cephfs_pods_create_executer = executor.submit(helpers.create_pods,
                                                  pvc_objs_cephfs, pod_factory,
                                                  constants.CEPHFS_INTERFACE)

    # Start deleting pods
    pods_delete_executer = executor.submit(delete_pods, pods_to_delete)
    logging.info(f"### Started deleting the pods_to_delete = {pods_to_delete}")

    # Start deleting PVC
    pvc_delete_executer = executor.submit(delete_pvcs, all_pvc_to_delete)
    logging.info(
        f"### Started deleting the all_pvc_to_delete = {all_pvc_to_delete}")

    log.info(
        "These process are started: Bulk delete PVC, Pods. Bulk create PVC, "
        "Pods. Waiting for its completion")

    while not (rbd_pvc_exeuter.done() and cephfs_pvc_exeuter.done()
               and rbd_pods_create_executer.done()
               and cephfs_pods_create_executer.done()
               and pods_delete_executer.done() and pvc_delete_executer.done()):
        sleep(10)
        logging.info(
            "#### create_delete_pvcs....Waiting for threads to complete...")

    new_rbd_pvcs = rbd_pvc_exeuter.result()
    new_cephfs_pvcs = cephfs_pvc_exeuter.result()
    new_pods = cephfs_pods_create_executer.result(
    ) + rbd_pods_create_executer.result()

    # Check pvc status
    for pvc_obj in new_rbd_pvcs + new_cephfs_pvcs:
        helpers.wait_for_resource_state(
            resource=pvc_obj,
            state=constants.STATUS_BOUND,
            timeout=300  # Timeout given 5 minutes
        )

    log.info("All new PVCs are bound")

    # Check pods status
    for pod_obj in new_pods:
        helpers.wait_for_resource_state(
            resource=pod_obj,
            state=constants.STATUS_RUNNING,
            timeout=300  # Timeout given 5 minutes
        )
    log.info("All new pods are running")

    # Check pods are deleted
    for pod_obj in pods_to_delete:
        pod_obj.ocp.wait_for_delete(resource_name=pod_obj.name)

    log.info("All pods are deleted as expected.")

    # Check PVCs are deleted
    for pvc_obj in all_pvc_to_delete:
        pvc_obj.ocp.wait_for_delete(resource_name=pvc_obj.name)

    log.info("All PVCs are deleted as expected")