def test_bulk_clone_performance(self, namespace, tmp_path):
        """
        Creates number of PVCs in a bulk using kube job
        Write 60% of PVC capacity to each one of the created PVCs
        Creates 1 clone per each PVC altogether in a bulk
        Measuring total and csi creation times for bulk of clones

        """
        pvc_count = 50
        vol_size = "5Gi"
        job_pod_file, job_pvc_file, job_clone_file = [None, None, None]
        log.info(f"Start creating {self.interface} {pvc_count} PVC")
        if self.interface == constants.CEPHBLOCKPOOL:
            sc_name = constants.DEFAULT_STORAGECLASS_RBD
            clone_yaml = constants.CSI_RBD_PVC_CLONE_YAML
        elif self.interface == constants.CEPHFILESYSTEM:
            sc_name = constants.DEFAULT_STORAGECLASS_CEPHFS
            clone_yaml = constants.CSI_CEPHFS_PVC_CLONE_YAML

        try:
            pvc_dict_list = scale_lib.construct_pvc_creation_yaml_bulk_for_kube_job(
                no_of_pvc=pvc_count,
                access_mode=constants.ACCESS_MODE_RWO,
                sc_name=sc_name,
                pvc_size=vol_size,
            )

            job_pvc_file = ObjectConfFile(
                name="job_profile_pvc",
                obj_dict_list=pvc_dict_list,
                project=self.namespace,
                tmp_path=tmp_path,
            )

            # Create kube_job
            job_pvc_file.create(namespace=self.namespace)

            # Check all the PVC reached Bound state
            pvc_bound_list = scale_lib.check_all_pvc_reached_bound_state_in_kube_job(
                kube_job_obj=job_pvc_file,
                namespace=self.namespace,
                no_of_pvc=pvc_count,
            )

            log.info(f"Number of PVCs in Bound state {len(pvc_bound_list)}")

            # Kube_job to Create pod
            pod_dict_list = scale_lib.attach_multiple_pvc_to_pod_dict(
                pvc_list=pvc_bound_list,
                namespace=self.namespace,
                pvcs_per_pod=1,
                start_io=False,
                pod_yaml=constants.NGINX_POD_YAML,
            )
            job_pod_file = ObjectConfFile(
                name="job_profile_pod",
                obj_dict_list=pod_dict_list,
                project=self.namespace,
                tmp_path=tmp_path,
            )
            job_pod_file.create(namespace=self.namespace)

            # Check all PODs in Running state
            scale_lib.check_all_pod_reached_running_state_in_kube_job(
                kube_job_obj=job_pod_file,
                namespace=self.namespace,
                no_of_pod=len(pod_dict_list),
                timeout=90,
            )
            log.info(f"Number of PODs in Running state {len(pod_dict_list)}")

            total_files_size = self.run_fio_on_pvcs(vol_size)

            clone_dict_list = scale_lib.construct_pvc_clone_yaml_bulk_for_kube_job(
                pvc_dict_list, clone_yaml, sc_name)

            log.info("Created clone dict list")

            csi_bulk_start_time = self.get_time(time_format="csi")

            job_clone_file = ObjectConfFile(
                name="job_profile_clone",
                obj_dict_list=clone_dict_list,
                project=self.namespace,
                tmp_path=tmp_path,
            )

            # Create kube_job that creates clones
            job_clone_file.create(namespace=self.namespace)

            log.info("Going to check bound status for clones")
            # Check all the clones reached Bound state
            clone_bound_list = scale_lib.check_all_pvc_reached_bound_state_in_kube_job(
                kube_job_obj=job_clone_file,
                namespace=self.namespace,
                no_of_pvc=pvc_count,
                timeout=180,
            )

            log.info(
                f"Number of clones in Bound state {len(clone_bound_list)}")

            clone_objs = []
            all_pvc_objs = pvc.get_all_pvc_objs(namespace=self.namespace)
            for clone_yaml in clone_dict_list:
                name = clone_yaml["metadata"]["name"]
                size = clone_yaml["spec"]["resources"]["requests"]["storage"]
                log.info(f"Clone {name} of size {size} created")
                for pvc_obj in all_pvc_objs:
                    if pvc_obj.name == name:
                        clone_objs.append(pvc_obj)

            assert len(clone_bound_list) == len(
                clone_objs
            ), "Not all clones reached BOUND state, cannot measure time"
            start_time = helpers.get_provision_time(self.interface,
                                                    clone_objs,
                                                    status="start")
            end_time = helpers.get_provision_time(self.interface,
                                                  clone_objs,
                                                  status="end")
            total_time = (end_time - start_time).total_seconds()
            speed = round(total_files_size / total_time, 2)

            csi_creation_time = performance_lib.csi_bulk_pvc_time_measure(
                self.interface, clone_objs, "create", csi_bulk_start_time)

            log.info(
                f"Total creation time = {total_time} secs, csi creation time = {csi_creation_time},"
                f" data size = {total_files_size} MB, speed = {speed} MB/sec "
                f"for {self.interface} clone in bulk of {pvc_count} clones.")

            # Produce ES report
            # Collecting environment information
            self.get_env_info()

            # Initialize the results doc file.
            full_results = self.init_full_results(
                ResultsAnalyse(
                    self.uuid,
                    self.crd_data,
                    self.full_log_path,
                    "bulk_clone_perf_fullres",
                ))

            full_results.add_key("interface", self.interface)
            full_results.add_key("bulk_size", pvc_count)
            full_results.add_key("clone_size", vol_size)
            full_results.add_key("bulk_creation_time", total_time)
            full_results.add_key("bulk_csi_creation_time", csi_creation_time)
            full_results.add_key("data_size(MB)", total_files_size)
            full_results.add_key("speed", speed)
            full_results.add_key("es_results_link",
                                 full_results.results_link())

            # Write the test results into the ES server
            full_results.es_write()
            self.results_path = get_full_test_logs_path(cname=self)
            res_link = full_results.results_link()
            # write the ES link to the test results in the test log.
            log.info(f"The result can be found at : {res_link}")

            # Create text file with results of all subtest (3 - according to the parameters)
            self.write_result_to_file(res_link)

        # Finally is used to clean-up the resources created
        # Irrespective of try block pass/fail finally will be executed.
        finally:
            # Cleanup activities
            log.info(
                "Cleanup of all the resources created during test execution")
            if job_pod_file:
                job_pod_file.delete(namespace=self.namespace)
                job_pod_file.wait_for_delete(resource_name=job_pod_file.name,
                                             namespace=self.namespace)

            if job_clone_file:
                job_clone_file.delete(namespace=self.namespace)
                job_clone_file.wait_for_delete(
                    resource_name=job_clone_file.name,
                    namespace=self.namespace)

            if job_pvc_file:
                job_pvc_file.delete(namespace=self.namespace)
                job_pvc_file.wait_for_delete(resource_name=job_pvc_file.name,
                                             namespace=self.namespace)

            # Check ceph health status
            utils.ceph_health_check(tries=20)
    def test_bulk_clone_performance(self, namespace, tmp_path, pod_factory):
        """
        Creates number of PVCs in a bulk using kube job
        Write 60% of PVC capacity to each one of the created PVCs
        Creates 1 clone per each PVC altogether in a bulk
        Measuring time for bulk of clones creation

        """
        pvc_count = 50
        log.info(f"Start creating {self.interface} {pvc_count} PVC")
        if self.interface == constants.CEPHBLOCKPOOL:
            sc_name = constants.DEFAULT_STORAGECLASS_RBD
            clone_yaml = constants.CSI_RBD_PVC_CLONE_YAML
        elif self.interface == constants.CEPHFILESYSTEM:
            sc_name = constants.DEFAULT_STORAGECLASS_CEPHFS
            clone_yaml = constants.CSI_CEPHFS_PVC_CLONE_YAML

        pvc_dict_list = scale_lib.construct_pvc_creation_yaml_bulk_for_kube_job(
            no_of_pvc=pvc_count,
            access_mode=constants.ACCESS_MODE_RWO,
            sc_name=sc_name,
            pvc_size="5Gi",
        )

        job_pvc_file = ObjectConfFile(
            name="job_profile_pvc",
            obj_dict_list=pvc_dict_list,
            project=self.namespace,
            tmp_path=tmp_path,
        )

        # Create kube_job
        job_pvc_file.create(namespace=self.namespace)

        # Check all the PVC reached Bound state
        pvc_bound_list = scale_lib.check_all_pvc_reached_bound_state_in_kube_job(
            kube_job_obj=job_pvc_file,
            namespace=self.namespace,
            no_of_pvc=pvc_count,
        )

        logging.info(f"Number of PVCs in Bound state {len(pvc_bound_list)}")

        total_files_size = self.run_fio_on_pvcs(pvc_dict_list, pod_factory)

        clone_dict_list = scale_lib.construct_pvc_clone_yaml_bulk_for_kube_job(
            pvc_dict_list, clone_yaml, sc_name)

        logging.info("Created clone dict list")

        job_clone_file = ObjectConfFile(
            name="job_profile_clone",
            obj_dict_list=clone_dict_list,
            project=self.namespace,
            tmp_path=tmp_path,
        )

        # Create kube_job that creates clones
        job_clone_file.create(namespace=self.namespace)

        logging.info("Going to check bound status for clones")
        # Check all the clones reached Bound state
        clone_bound_list = scale_lib.check_all_pvc_reached_bound_state_in_kube_job(
            kube_job_obj=job_clone_file,
            namespace=self.namespace,
            no_of_pvc=pvc_count,
            timeout=200,
        )

        logging.info(
            f"Number of clones in Bound state {len(clone_bound_list)}")

        clone_objs = []
        all_pvc_objs = pvc.get_all_pvc_objs(namespace=self.namespace)
        for clone_yaml in clone_dict_list:
            name = clone_yaml["metadata"]["name"]
            size = clone_yaml["spec"]["resources"]["requests"]["storage"]
            logging.info(f"Clone {name} of size {size} created")
            for pvc_obj in all_pvc_objs:
                if pvc_obj.name == name:
                    clone_objs.append(pvc_obj)

        assert len(clone_bound_list) == len(
            clone_objs
        ), "Not all clones reached BOUND state, cannot measure time"
        start_time = helpers.get_provision_time(self.interface,
                                                clone_objs,
                                                status="start")
        end_time = helpers.get_provision_time(self.interface,
                                              clone_objs,
                                              status="end")
        total_time = (end_time - start_time).total_seconds()
        speed = round(total_files_size / total_time, 2)
        logging.info(
            f"Total creation time = {total_time} secs, data size = {total_files_size} MB, speed = {speed} MB/sec "
            f"for {self.interface} clone in bulk of {pvc_count} clones.")
Esempio n. 3
0
    def test_bulk_clone_performance(self, tmp_path, interface_iterate):
        """
        Creates number of PVCs in a bulk using kube job
        Write 60% of PVC capacity to each one of the created PVCs
        Creates 1 clone per each PVC altogether in a bulk
        Measuring total and csi creation times for bulk of clones

        """
        self.interface = interface_iterate
        job_pod_file, job_pvc_file, job_clone_file = [None, None, None]
        log.info(f"Start creating {self.interface} {self.pvc_count} PVC")

        try:
            pvc_dict_list = scale_lib.construct_pvc_creation_yaml_bulk_for_kube_job(
                no_of_pvc=self.pvc_count,
                access_mode=Interfaces_info[self.interface]["accessmode"],
                sc_name=Interfaces_info[self.interface]["sc_name"],
                pvc_size=self.vol_size,
            )

            job_pvc_file = ObjectConfFile(
                name="job_profile_pvc",
                obj_dict_list=pvc_dict_list,
                project=self.namespace,
                tmp_path=tmp_path,
            )

            # Create kube_job
            job_pvc_file.create(namespace=self.namespace)

            # Check all the PVC reached Bound state
            performance_lib.wait_for_resource_bulk_status(
                resource="pvc",
                resource_count=self.pvc_count,
                namespace=self.namespace,
                status=constants.STATUS_BOUND,
                timeout=120,
                sleep_time=5,
            )
            log.info(
                f"All the PVCs ({self.pvc_count}) was created and are in Bound state"
            )

            # Getting the list of the PVC names
            pvc_bound_list = [
                p.name for p in pvc.get_all_pvc_objs(namespace=self.namespace)
            ]

            # Kube_job to Create pod
            log.info(
                "Attaching PODs to the PVCs and filling them with data (60%)")
            pod_dict_list = self.attach_pvcs_to_pod_dict(pvc_bound_list)
            job_pod_file = ObjectConfFile(
                name="job_profile_pod",
                obj_dict_list=pod_dict_list,
                project=self.namespace,
                tmp_path=tmp_path,
            )
            job_pod_file.create(namespace=self.namespace)

            # Check all PODs are in Completed state
            performance_lib.wait_for_resource_bulk_status(
                resource="pod",
                resource_count=self.pvc_count,
                namespace=self.namespace,
                status=constants.STATUS_COMPLETED,
                timeout=1200,
                sleep_time=30,
            )
            log.info("All the PODs completed writing data to the PVC's")

            clone_dict_list = scale_lib.construct_pvc_clone_yaml_bulk_for_kube_job(
                pvc_dict_list,
                Interfaces_info[self.interface]["clone_yaml"],
                Interfaces_info[self.interface]["sc_name"],
            )

            log.info("Created clone dict list")

            csi_bulk_start_time = self.get_time(time_format="csi")

            job_clone_file = ObjectConfFile(
                name="job_profile_clone",
                obj_dict_list=clone_dict_list,
                project=self.namespace,
                tmp_path=tmp_path,
            )

            # Create kube_job that creates clones
            job_clone_file.create(namespace=self.namespace)

            log.info("Going to check bound status for clones")
            # Check all the clones reached Bound state
            try:
                performance_lib.wait_for_resource_bulk_status(
                    resource="pvc",
                    resource_count=self.pvc_count * 2,
                    namespace=self.namespace,
                    status=constants.STATUS_BOUND,
                    timeout=1200,
                    sleep_time=30,
                )
            except Exception as ex:
                log.error("Failed to cvreate clones for PVCs")
                raise ex

            log.info(
                f"All the Clones ({self.pvc_count}) was created and are in Bound state"
            )

            all_pvc_objs = pvc.get_all_pvc_objs(namespace=self.namespace)
            clone_objs = [
                cl for cl in all_pvc_objs if re.match("clone", cl.name)
            ]
            for clone_yaml in clone_dict_list:
                name = clone_yaml["metadata"]["name"]
                size = clone_yaml["spec"]["resources"]["requests"]["storage"]
                log.info(f"Clone {name} of size {size} created")

            start_time = get_provision_time(self.interface,
                                            clone_objs,
                                            status="start")
            end_time = get_provision_time(self.interface,
                                          clone_objs,
                                          status="end")
            total_time = (end_time - start_time).total_seconds()
            speed = round(self.total_files_size / total_time, 2)

            csi_creation_time = performance_lib.csi_bulk_pvc_time_measure(
                self.interface, clone_objs, "create", csi_bulk_start_time)

            log.info(
                f"Total creation time = {total_time} secs, csi creation time = {csi_creation_time},"
                f" data size = {self.total_files_size} MB, speed = {speed} MB/sec "
                f"for {self.interface} clone in bulk of {self.pvc_count} clones."
            )

            # Produce ES report
            # Collecting environment information
            self.get_env_info()

            # Initialize the results' doc file.
            full_results = self.init_full_results(
                ResultsAnalyse(
                    self.uuid,
                    self.crd_data,
                    self.full_log_path,
                    "bulk_clone_perf_fullres",
                ))

            full_results.add_key("interface", self.interface)
            full_results.add_key("bulk_size", self.pvc_count)
            full_results.add_key("clone_size", self.vol_size)
            full_results.add_key("bulk_creation_time", total_time)
            full_results.add_key("bulk_csi_creation_time", csi_creation_time)
            full_results.add_key("data_size(MB)", self.total_files_size)
            full_results.add_key("speed", speed)
            full_results.add_key("es_results_link",
                                 full_results.results_link())

            # Write the test results into the ES server
            full_results.es_write()
            self.results_path = get_full_test_logs_path(cname=self)
            res_link = full_results.results_link()
            # write the ES link to the test results in the test log.
            log.info(f"The result can be found at : {res_link}")

            # Create text file with results of all subtest (3 - according to the parameters)
            self.write_result_to_file(res_link)

        # Finally, is used to clean up the resources created
        # Irrespective of try block pass/fail finally will be executed.
        finally:
            # Cleanup activities
            log.info(
                "Cleanup of all the resources created during test execution")
            for object_file in [job_pod_file, job_clone_file, job_pvc_file]:
                if object_file:
                    object_file.delete(namespace=self.namespace)
                    try:
                        object_file.wait_for_delete(
                            resource_name=object_file.name,
                            namespace=self.namespace)
                    except Exception:
                        log.error(f"{object_file['name']} didnt deleted !")

            # Check ceph health status
            utils.ceph_health_check(tries=20)