def test_pv_scale_out_create_delete_pvcs_with_and_without_io(
        self,
        namespace,
        storageclass,
        setup_fixture,
        start_io,
        memory_leak_function,
    ):
        pvc_count_each_itr = 10
        scale_pod_count = 120
        size = "10Gi"
        test_run_time = 180
        self.all_pvc_obj, self.all_pod_obj = ([] for i in range(2))
        self.delete_pod_count = 0

        # Identify median memory value for each worker node
        median_dict = helpers.get_memory_leak_median_value()
        log.info(f"Median dict values for memory leak {median_dict}")

        # First Iteration call to create PVC and POD
        self.create_pvc_pod(
            self.rbd_sc_obj, self.cephfs_sc_obj, pvc_count_each_itr, size, start_io
        )

        # Continue to iterate till the scale pvc limit is reached
        # Also continue to perform create and delete of pod, pvc in parallel
        while True:
            if scale_pod_count <= len(self.all_pod_obj):
                log.info(f"Created {scale_pod_count} pvc and pods")
                break
            else:
                log.info(
                    f"Create {pvc_count_each_itr} and in parallel delete {self.delete_pod_count}"
                    " pods & pvc"
                )
                thread1 = threading.Thread(target=self.delete_pvc_pod, args=())
                thread2 = threading.Thread(
                    target=self.create_pvc_pod,
                    args=(
                        self.rbd_sc_obj,
                        self.cephfs_sc_obj,
                        pvc_count_each_itr,
                        size,
                        start_io,
                    ),
                )
                thread1.start()
                thread2.start()
            thread1.join()
            thread2.join()

        # Added sleep for test case run time and for capturing memory leak after scale
        time.sleep(test_run_time)
        helpers.memory_leak_analysis(median_dict)
示例#2
0
    def test_pv_scale_out_create_pvcs_and_respin_ceph_pods(
        self,
        namespace,
        storageclass,
        setup_fixture,
        resource_to_delete,
        memory_leak_function,
    ):
        pvc_count_each_itr = 10
        scale_pod_count = 120
        size = "10Gi"
        test_run_time = 180
        self.all_pvc_obj, self.all_pod_obj = ([] for i in range(2))

        # Identify median memory value for each worker node
        median_dict = helpers.get_memory_leak_median_value()
        log.info(f"Median dict values for memory leak {median_dict}")

        # First Iteration call to create PVC and POD
        self.create_pvc_pod(
            self.rbd_sc_obj, self.cephfs_sc_obj, pvc_count_each_itr, size
        )
        # Re-spin the ceph pods one by one in parallel with PVC and POD creation
        while True:
            if scale_pod_count <= len(self.all_pod_obj):
                log.info(f"Create {scale_pod_count} pvc and pods")
                break
            else:
                thread1 = threading.Thread(
                    target=self.respin_ceph_pod, args=(resource_to_delete,)
                )
                thread2 = threading.Thread(
                    target=self.create_pvc_pod,
                    args=(
                        self.rbd_sc_obj,
                        self.cephfs_sc_obj,
                        pvc_count_each_itr,
                        size,
                    ),
                )
                thread1.start()
                thread2.start()
            thread1.join()
            thread2.join()

        # Added sleep for test case run time and for capturing memory leak after scale
        time.sleep(test_run_time)
        assert utils.ceph_health_check(
            delay=180
        ), "Ceph health in bad state after pod respins"
        helpers.memory_leak_analysis(median_dict)