Exemplo n.º 1
0
    def test_bulk_pod_attach_performance(self, teardown_factory, bulk_size):
        """
        Measures pods attachment time in bulk_size bulk

        Args:
            teardown_factory: A fixture used when we want a new resource that was created during the tests
                               to be removed in the teardown phase.
            bulk_size: Size of the bulk to be tested
        Returns:

        """
        # Getting the test start time
        test_start_time = PASTest.get_time()

        log.info(f"Start creating bulk of new {bulk_size} PVCs")

        pvc_objs, _ = helpers.create_multiple_pvcs(
            sc_name=self.sc_obj.name,
            namespace=self.namespace,
            number_of_pvc=bulk_size,
            size=self.pvc_size,
            burst=True,
        )

        for pvc_obj in pvc_objs:
            pvc_obj.reload()
            teardown_factory(pvc_obj)
        with ThreadPoolExecutor(max_workers=5) as executor:
            for pvc_obj in pvc_objs:
                executor.submit(helpers.wait_for_resource_state, pvc_obj,
                                constants.STATUS_BOUND)

                executor.submit(pvc_obj.reload)

        start_time = helpers.get_provision_time(self.interface,
                                                pvc_objs,
                                                status="start")
        end_time = helpers.get_provision_time(self.interface,
                                              pvc_objs,
                                              status="end")
        total_time = (end_time - start_time).total_seconds()
        log.info(
            f"{self.interface}: Bulk of {bulk_size} PVCs creation time is {total_time} seconds."
        )

        pvc_names_list = []
        for pvc_obj in pvc_objs:
            pvc_names_list.append(pvc_obj.name)

        log.info(f"{self.interface} : Before pod attach")
        bulk_start_time = time.time()
        pod_data_list = list()
        pod_data_list.extend(
            scale_lib.attach_multiple_pvc_to_pod_dict(
                pvc_list=pvc_names_list,
                namespace=self.namespace,
                pvcs_per_pod=1,
            ))

        lcl = locals()
        tmp_path = pathlib.Path(ocsci_log_path())
        obj_name = "obj1"
        # Create kube_job for pod creation
        lcl[f"pod_kube_{obj_name}"] = ObjectConfFile(
            name=f"pod_kube_{obj_name}",
            obj_dict_list=pod_data_list,
            project=defaults.ROOK_CLUSTER_NAMESPACE,
            tmp_path=tmp_path,
        )
        lcl[f"pod_kube_{obj_name}"].create(namespace=self.namespace)

        log.info("Checking that pods are running")
        # Check all the PODs reached Running state
        pod_running_list = scale_lib.check_all_pod_reached_running_state_in_kube_job(
            kube_job_obj=lcl[f"pod_kube_{obj_name}"],
            namespace=self.namespace,
            no_of_pod=len(pod_data_list),
            timeout=180,
        )
        for pod_name in pod_running_list:
            pod_obj = get_pod_obj(pod_name, self.namespace)
            teardown_factory(pod_obj)

        bulk_end_time = time.time()
        bulk_total_time = bulk_end_time - bulk_start_time
        log.info(
            f"Bulk attach time of {len(pod_running_list)} pods is {bulk_total_time} seconds"
        )

        # Collecting environment information
        self.get_env_info()

        # Initialize the results doc file.
        full_log_path = get_full_test_logs_path(cname=self)
        full_log_path += f"-{self.sc}"
        full_results = self.init_full_results(
            ResultsAnalyse(self.uuid, self.crd_data, full_log_path))

        full_results.add_key("storageclass", self.sc)
        full_results.add_key("pod_bulk_attach_time", bulk_total_time)
        full_results.add_key("pvc_size", self.pvc_size)
        full_results.add_key("bulk_size", bulk_size)

        # Getting the test end time
        test_end_time = PASTest.get_time()

        # Add the test time to the ES report
        full_results.add_key("test_time", {
            "start": test_start_time,
            "end": test_end_time
        })

        # Write the test results into the ES server
        full_results.es_write()
        # write the ES link to the test results in the test log.
        log.info(f"The result can be found at : {full_results.results_link()}")
    def test_bulk_clone_performance(self, namespace, tmp_path):
        """
        Creates number of PVCs in a bulk using kube job
        Write 60% of PVC capacity to each one of the created PVCs
        Creates 1 clone per each PVC altogether in a bulk
        Measuring total and csi creation times for bulk of clones

        """
        pvc_count = 50
        vol_size = "5Gi"
        job_pod_file, job_pvc_file, job_clone_file = [None, None, None]
        log.info(f"Start creating {self.interface} {pvc_count} PVC")
        if self.interface == constants.CEPHBLOCKPOOL:
            sc_name = constants.DEFAULT_STORAGECLASS_RBD
            clone_yaml = constants.CSI_RBD_PVC_CLONE_YAML
        elif self.interface == constants.CEPHFILESYSTEM:
            sc_name = constants.DEFAULT_STORAGECLASS_CEPHFS
            clone_yaml = constants.CSI_CEPHFS_PVC_CLONE_YAML

        try:
            pvc_dict_list = scale_lib.construct_pvc_creation_yaml_bulk_for_kube_job(
                no_of_pvc=pvc_count,
                access_mode=constants.ACCESS_MODE_RWO,
                sc_name=sc_name,
                pvc_size=vol_size,
            )

            job_pvc_file = ObjectConfFile(
                name="job_profile_pvc",
                obj_dict_list=pvc_dict_list,
                project=self.namespace,
                tmp_path=tmp_path,
            )

            # Create kube_job
            job_pvc_file.create(namespace=self.namespace)

            # Check all the PVC reached Bound state
            pvc_bound_list = scale_lib.check_all_pvc_reached_bound_state_in_kube_job(
                kube_job_obj=job_pvc_file,
                namespace=self.namespace,
                no_of_pvc=pvc_count,
            )

            log.info(f"Number of PVCs in Bound state {len(pvc_bound_list)}")

            # Kube_job to Create pod
            pod_dict_list = scale_lib.attach_multiple_pvc_to_pod_dict(
                pvc_list=pvc_bound_list,
                namespace=self.namespace,
                pvcs_per_pod=1,
                start_io=False,
                pod_yaml=constants.NGINX_POD_YAML,
            )
            job_pod_file = ObjectConfFile(
                name="job_profile_pod",
                obj_dict_list=pod_dict_list,
                project=self.namespace,
                tmp_path=tmp_path,
            )
            job_pod_file.create(namespace=self.namespace)

            # Check all PODs in Running state
            scale_lib.check_all_pod_reached_running_state_in_kube_job(
                kube_job_obj=job_pod_file,
                namespace=self.namespace,
                no_of_pod=len(pod_dict_list),
                timeout=90,
            )
            log.info(f"Number of PODs in Running state {len(pod_dict_list)}")

            total_files_size = self.run_fio_on_pvcs(vol_size)

            clone_dict_list = scale_lib.construct_pvc_clone_yaml_bulk_for_kube_job(
                pvc_dict_list, clone_yaml, sc_name)

            log.info("Created clone dict list")

            csi_bulk_start_time = self.get_time(time_format="csi")

            job_clone_file = ObjectConfFile(
                name="job_profile_clone",
                obj_dict_list=clone_dict_list,
                project=self.namespace,
                tmp_path=tmp_path,
            )

            # Create kube_job that creates clones
            job_clone_file.create(namespace=self.namespace)

            log.info("Going to check bound status for clones")
            # Check all the clones reached Bound state
            clone_bound_list = scale_lib.check_all_pvc_reached_bound_state_in_kube_job(
                kube_job_obj=job_clone_file,
                namespace=self.namespace,
                no_of_pvc=pvc_count,
                timeout=180,
            )

            log.info(
                f"Number of clones in Bound state {len(clone_bound_list)}")

            clone_objs = []
            all_pvc_objs = pvc.get_all_pvc_objs(namespace=self.namespace)
            for clone_yaml in clone_dict_list:
                name = clone_yaml["metadata"]["name"]
                size = clone_yaml["spec"]["resources"]["requests"]["storage"]
                log.info(f"Clone {name} of size {size} created")
                for pvc_obj in all_pvc_objs:
                    if pvc_obj.name == name:
                        clone_objs.append(pvc_obj)

            assert len(clone_bound_list) == len(
                clone_objs
            ), "Not all clones reached BOUND state, cannot measure time"
            start_time = helpers.get_provision_time(self.interface,
                                                    clone_objs,
                                                    status="start")
            end_time = helpers.get_provision_time(self.interface,
                                                  clone_objs,
                                                  status="end")
            total_time = (end_time - start_time).total_seconds()
            speed = round(total_files_size / total_time, 2)

            csi_creation_time = performance_lib.csi_bulk_pvc_time_measure(
                self.interface, clone_objs, "create", csi_bulk_start_time)

            log.info(
                f"Total creation time = {total_time} secs, csi creation time = {csi_creation_time},"
                f" data size = {total_files_size} MB, speed = {speed} MB/sec "
                f"for {self.interface} clone in bulk of {pvc_count} clones.")

            # Produce ES report
            # Collecting environment information
            self.get_env_info()

            # Initialize the results doc file.
            full_results = self.init_full_results(
                ResultsAnalyse(
                    self.uuid,
                    self.crd_data,
                    self.full_log_path,
                    "bulk_clone_perf_fullres",
                ))

            full_results.add_key("interface", self.interface)
            full_results.add_key("bulk_size", pvc_count)
            full_results.add_key("clone_size", vol_size)
            full_results.add_key("bulk_creation_time", total_time)
            full_results.add_key("bulk_csi_creation_time", csi_creation_time)
            full_results.add_key("data_size(MB)", total_files_size)
            full_results.add_key("speed", speed)
            full_results.add_key("es_results_link",
                                 full_results.results_link())

            # Write the test results into the ES server
            full_results.es_write()
            self.results_path = get_full_test_logs_path(cname=self)
            res_link = full_results.results_link()
            # write the ES link to the test results in the test log.
            log.info(f"The result can be found at : {res_link}")

            # Create text file with results of all subtest (3 - according to the parameters)
            self.write_result_to_file(res_link)

        # Finally is used to clean-up the resources created
        # Irrespective of try block pass/fail finally will be executed.
        finally:
            # Cleanup activities
            log.info(
                "Cleanup of all the resources created during test execution")
            if job_pod_file:
                job_pod_file.delete(namespace=self.namespace)
                job_pod_file.wait_for_delete(resource_name=job_pod_file.name,
                                             namespace=self.namespace)

            if job_clone_file:
                job_clone_file.delete(namespace=self.namespace)
                job_clone_file.wait_for_delete(
                    resource_name=job_clone_file.name,
                    namespace=self.namespace)

            if job_pvc_file:
                job_pvc_file.delete(namespace=self.namespace)
                job_pvc_file.wait_for_delete(resource_name=job_pvc_file.name,
                                             namespace=self.namespace)

            # Check ceph health status
            utils.ceph_health_check(tries=20)
Exemplo n.º 3
0
    def test_bulk_pod_attach_performance(self, interface_type, bulk_size):
        """
        Measures pods attachment time in bulk_size bulk

        Args:
            interface_type (str): The interface type to be tested - CephBlockPool / CephFileSystem.
            bulk_size (int): Size of the bulk to be tested
        Returns:

        """
        self.interface = interface_type

        if self.dev_mode:
            bulk_size = 3

        # Initialize some variables
        timeout = bulk_size * 5
        pvc_names_list = list()
        pod_data_list = list()

        # Getting the test start time
        test_start_time = self.get_time()
        csi_start_time = self.get_time("csi")

        log.info(f"Start creating bulk of new {bulk_size} PVCs")
        self.pvc_objs, _ = helpers.create_multiple_pvcs(
            sc_name=Interfaces_info[self.interface]["sc"],
            namespace=self.namespace,
            number_of_pvc=bulk_size,
            size=self.pvc_size,
            burst=True,
            do_reload=False,
        )
        log.info("Wait for all of the PVCs to be in Bound state")
        performance_lib.wait_for_resource_bulk_status("pvc", bulk_size,
                                                      self.namespace,
                                                      constants.STATUS_BOUND,
                                                      timeout, 10)
        # in case of creation faliure, the wait_for_resource_bulk_status function
        # will raise an exception. so in this point the creation succeed
        log.info("All PVCs was created and in Bound state.")

        # Reload all PVC(s) information
        for pvc_obj in self.pvc_objs:
            pvc_obj.reload()
            pvc_names_list.append(pvc_obj.name)
        log.debug(f"The PVCs names are : {pvc_names_list}")

        # Create kube_job for pod creation
        pod_data_list.extend(
            scale_lib.attach_multiple_pvc_to_pod_dict(
                pvc_list=pvc_names_list,
                namespace=self.namespace,
                pvcs_per_pod=1,
            ))
        self.pods_obj = ObjectConfFile(
            name="pod_kube_obj",
            obj_dict_list=pod_data_list,
            project=self.namespace,
            tmp_path=pathlib.Path(ocsci_log_path()),
        )
        log.debug(f"PODs data list is : {json.dumps(pod_data_list, indent=3)}")

        log.info(f"{self.interface} : Before pod attach")
        bulk_start_time = time.time()
        self.pods_obj.create(namespace=self.namespace)
        # Check all the PODs reached Running state
        log.info("Checking that pods are running")
        performance_lib.wait_for_resource_bulk_status("pod", bulk_size,
                                                      self.namespace,
                                                      constants.STATUS_RUNNING,
                                                      timeout, 2)
        log.info("All POD(s) are in Running State.")
        bulk_end_time = time.time()
        bulk_total_time = bulk_end_time - bulk_start_time
        log.info(
            f"Bulk attach time of {bulk_size} pods is {bulk_total_time} seconds"
        )

        csi_bulk_total_time = performance_lib.pod_bulk_attach_csi_time(
            self.interface, self.pvc_objs, csi_start_time, self.namespace)

        # Collecting environment information
        self.get_env_info()

        # Initialize the results doc file.
        full_results = self.init_full_results(
            ResultsAnalyse(self.uuid, self.crd_data, self.full_log_path,
                           "pod_bulk_attachtime"))

        full_results.add_key("storageclass",
                             Interfaces_info[self.interface]["name"])
        full_results.add_key("pod_bulk_attach_time", bulk_total_time)
        full_results.add_key("pod_csi_bulk_attach_time", csi_bulk_total_time)
        full_results.add_key("pvc_size", self.pvc_size)
        full_results.add_key("bulk_size", bulk_size)

        # Getting the test end time
        test_end_time = self.get_time()

        # Add the test time to the ES report
        full_results.add_key("test_time", {
            "start": test_start_time,
            "end": test_end_time
        })

        # Write the test results into the ES server
        self.results_path = helpers.get_full_test_logs_path(cname=self)
        if full_results.es_write():
            res_link = full_results.results_link()
            # write the ES link to the test results in the test log.
            log.info(f"The result can be found at : {res_link}")

            # Create text file with results of all subtests (4 - according to the parameters)
            self.write_result_to_file(res_link)