Beispiel #1
0
def measure_pvc_deletion_time(interface, pvc_objs):
    """
    Measures and Logs PVC Deletion Time of all PVCs.

    Args:
        interface (str) : an interface (RBD or CephFS) to run on.
        pvc_objs (list) : List of PVC objects for which we have to measure the time.

    Logs:
        PVC Deletion Time of all the PVCs.

    """
    accepted_deletion_time = 30
    num_of_pvcs = len(pvc_objs)
    pv_name_list = list()
    pv_to_pvc = dict()

    for pvc_no in range(num_of_pvcs):
        pv_name = pvc_objs[pvc_no].backed_pv
        pv_name_list.append(pv_name)
        pv_to_pvc[pv_name] = pvc_objs[pvc_no].name

    pvc_deletion_time = helpers.measure_pv_deletion_time_bulk(
        interface=interface, pv_name_list=pv_name_list)

    for pv_name, deletion_time in pvc_deletion_time.items():
        if deletion_time <= accepted_deletion_time:
            log.info(
                f"PVC {pv_to_pvc[pv_name]} was deleted in {deletion_time} seconds."
            )
        else:
            log.error(
                f"PVC {pv_to_pvc[pv_name]} deletion time is {deletion_time} and is greater than "
                f"{accepted_deletion_time} seconds.")
    def test_multiple_pvc_deletion_measurement_performance(
            self, teardown_factory):
        """
        Measuring PVC deletion time of 120 PVCs in 180 seconds

        Args:
            teardown_factory: A fixture used when we want a new resource that was created during the tests
                               to be removed in the teardown phase.
        Returns:

        """
        number_of_pvcs = 120
        pvc_size = "1Gi"
        msg_prefix = f"Interface: {self.interface}, PVC size: {pvc_size}."

        log.info(f"{msg_prefix} Start creating new 120 PVCs")

        pvc_objs = helpers.create_multiple_pvcs(
            sc_name=self.sc_obj.name,
            namespace=defaults.ROOK_CLUSTER_NAMESPACE,
            number_of_pvc=number_of_pvcs,
            size=pvc_size,
            burst=True,
        )

        for pvc_obj in pvc_objs:
            pvc_obj.reload()
            teardown_factory(pvc_obj)
        with ThreadPoolExecutor(max_workers=5) as executor:
            for pvc_obj in pvc_objs:
                executor.submit(helpers.wait_for_resource_state, pvc_obj,
                                constants.STATUS_BOUND)

                executor.submit(pvc_obj.reload)

        pod_objs = []
        for pvc_obj in pvc_objs:
            pod_obj = self.write_file_on_pvc(pvc_obj, 0.3)
            pod_objs.append(pod_obj)

        # Get pvc_name, require pvc_name to fetch deletion time data from log
        threads = list()
        for pvc_obj in pvc_objs:
            process = threading.Thread(target=pvc_obj.reload)
            process.start()
            threads.append(process)
        for process in threads:
            process.join()

        pvc_name_list, pv_name_list = ([] for i in range(2))
        threads = list()
        for pvc_obj in pvc_objs:
            process1 = threading.Thread(
                target=pvc_name_list.append(pvc_obj.name))
            process2 = threading.Thread(
                target=pv_name_list.append(pvc_obj.backed_pv))
            process1.start()
            process2.start()
            threads.append(process1)
            threads.append(process2)
        for process in threads:
            process.join()
        log.info(f"{msg_prefix} Preparing to delete 120 PVC")

        # Delete PVC
        for pvc_obj, pod_obj in zip(pvc_objs, pod_objs):
            pod_obj.delete(wait=True)
            pvc_obj.delete()
            pvc_obj.ocp.wait_for_delete(pvc_obj.name)

        # Get PVC deletion time
        pvc_deletion_time = helpers.measure_pv_deletion_time_bulk(
            interface=self.interface, pv_name_list=pv_name_list)
        log.info(
            f"{msg_prefix} {number_of_pvcs} bulk deletion time is {pvc_deletion_time}"
        )

        # accepted deletion time is 2 secs for each PVC
        accepted_pvc_deletion_time = number_of_pvcs * 2

        for del_time in pvc_deletion_time.values():
            if del_time > accepted_pvc_deletion_time:
                raise ex.PerformanceException(
                    f"{msg_prefix} {number_of_pvcs} PVCs deletion time is {pvc_deletion_time.values()} and is "
                    f"greater than {accepted_pvc_deletion_time} seconds")

        logging.info(f"{msg_prefix} {number_of_pvcs} PVCs deletion times are:")
        for name, a_time in pvc_deletion_time.items():
            logging.info(f"{name} deletion time is: {a_time} seconds")
    def test_bulk_pvc_creation_deletion_measurement_performance(
            self, teardown_factory, bulk_size):
        """
        Measuring PVC creation and deletion time of bulk_size PVCs
        and sends results to the Elastic Search DB

        Args:
            teardown_factory: A fixture used when we want a new resource that was created during the tests
                               to be removed in the teardown phase.
            bulk_size: Size of the bulk to be tested
        Returns:

        """
        bulk_creation_time_limit = bulk_size / 2
        log.info(f"Start creating new {bulk_size} PVCs")

        pvc_objs, yaml_creation_dir = helpers.create_multiple_pvcs(
            sc_name=self.sc_obj.name,
            namespace=self.namespace,
            number_of_pvc=bulk_size,
            size=self.pvc_size,
            burst=True,
        )
        logging.info(f"PVC creation dir is {yaml_creation_dir}")

        for pvc_obj in pvc_objs:
            pvc_obj.reload()
            teardown_factory(pvc_obj)
        with ThreadPoolExecutor(max_workers=5) as executor:
            for pvc_obj in pvc_objs:
                executor.submit(helpers.wait_for_resource_state, pvc_obj,
                                constants.STATUS_BOUND)
                executor.submit(pvc_obj.reload)

        start_time = helpers.get_provision_time(self.interface,
                                                pvc_objs,
                                                status="start")
        end_time = helpers.get_provision_time(self.interface,
                                              pvc_objs,
                                              status="end")
        total_time = (end_time - start_time).total_seconds()
        logging.info(
            f"{bulk_size} Bulk PVCs creation time is {total_time} seconds.")

        if total_time > bulk_creation_time_limit:
            raise ex.PerformanceException(
                f"{bulk_size} Bulk PVCs creation time is {total_time} and "
                f"greater than {bulk_creation_time_limit} seconds")

        pv_names_list = []
        for pvc_obj in pvc_objs:
            pv_names_list.append(pvc_obj.backed_pv)

        logging.info(f"Starting to delete bulk of {bulk_size} PVCs")
        helpers.delete_bulk_pvcs(yaml_creation_dir,
                                 pv_names_list,
                                 namespace=self.namespace)
        logging.info(
            f"Deletion of bulk of {bulk_size} PVCs successfully completed")

        log_deletion_times = helpers.measure_pv_deletion_time_bulk(
            self.interface, pv_names_list, return_log_times=True)

        all_start_times = [
            a_tuple[0] for a_tuple in log_deletion_times.values()
        ]
        bulk_start_time = sorted(all_start_times)[0]  # the eariles start time
        start_deletion_time = datetime.datetime.strptime(
            bulk_start_time, helpers.DATE_TIME_FORMAT)

        all_end_times = [a_tuple[1] for a_tuple in log_deletion_times.values()]
        bulk_deletion_time = sorted(all_end_times)[-1]  # the latest end time
        end_deletion_time = datetime.datetime.strptime(
            bulk_deletion_time, helpers.DATE_TIME_FORMAT)

        total_deletion_time = (end_deletion_time -
                               start_deletion_time).total_seconds()
        logging.info(
            f"{bulk_size} Bulk PVCs deletion time is {total_deletion_time} seconds."
        )

        # Produce ES report
        # Collecting environment information
        self.get_env_info()

        # Initialize the results doc file.
        full_results = self.init_full_results(
            ResultsAnalyse(
                self.uuid,
                self.crd_data,
                self.full_log_path,
                "bulk_creation_deletion_measurement",
            ))

        full_results.add_key("interface", self.interface)
        full_results.add_key("bulk_size", bulk_size)
        full_results.add_key("pvc_size", self.pvc_size)
        full_results.add_key("bulk_pvc_creation_time", total_time)
        full_results.add_key("bulk_pvc_deletion_time", total_deletion_time)

        # Write the test results into the ES server
        full_results.es_write()
Beispiel #4
0
    def test_multiple_pvc_deletion_measurement_performance(
            self, teardown_factory):
        """
        Measuring PVC deletion time of 120 PVCs in 180 seconds

        Args:
            teardown_factory: A fixture used when we want a new resource that was created during the tests
                               to be removed in the teardown phase.
        Returns:

        """
        number_of_pvcs = 120
        pvc_size = "1Gi"
        msg_prefix = f"Interface: {self.interface}, PVC size: {pvc_size}."

        log.info(f"{msg_prefix} Start creating new {number_of_pvcs} PVCs")

        pvc_objs, _ = helpers.create_multiple_pvcs(
            sc_name=self.sc_obj.name,
            namespace=self.namespace,
            number_of_pvc=number_of_pvcs,
            size=pvc_size,
            burst=True,
        )

        for pvc_obj in pvc_objs:
            pvc_obj.reload()
            teardown_factory(pvc_obj)

        timeout = 600 if self.interface == constants.CEPHBLOCKPOOL_THICK else 60
        with ThreadPoolExecutor(max_workers=5) as executor:
            for pvc_obj in pvc_objs:
                executor.submit(
                    helpers.wait_for_resource_state,
                    pvc_obj,
                    constants.STATUS_BOUND,
                    timeout=timeout,
                )
                executor.submit(pvc_obj.reload)

        pod_objs = []
        for pvc_obj in pvc_objs:
            pod_obj = self.write_file_on_pvc(pvc_obj, 0.3)
            pod_objs.append(pod_obj)

        # Get pvc_name, require pvc_name to fetch deletion time data from log
        threads = list()
        for pvc_obj in pvc_objs:
            process = threading.Thread(target=pvc_obj.reload)
            process.start()
            threads.append(process)
        for process in threads:
            process.join()

        pvc_name_list, pv_name_list = ([] for i in range(2))
        threads = list()
        for pvc_obj in pvc_objs:
            process1 = threading.Thread(
                target=pvc_name_list.append(pvc_obj.name))
            process2 = threading.Thread(
                target=pv_name_list.append(pvc_obj.backed_pv))
            process1.start()
            process2.start()
            threads.append(process1)
            threads.append(process2)
        for process in threads:
            process.join()
        log.info(f"{msg_prefix} Preparing to delete 120 PVC")

        # Delete PVC
        for pvc_obj, pod_obj in zip(pvc_objs, pod_objs):
            pod_obj.delete(wait=True)
            pvc_obj.delete()
            pvc_obj.ocp.wait_for_delete(pvc_obj.name)

        # Get PVC deletion time
        pvc_deletion_time = helpers.measure_pv_deletion_time_bulk(
            interface=self.interface, pv_name_list=pv_name_list)
        log.info(
            f"{msg_prefix} {number_of_pvcs} bulk deletion time is {pvc_deletion_time}"
        )

        # accepted deletion time is 2 secs for each PVC
        accepted_pvc_deletion_time = number_of_pvcs * 2

        for del_time in pvc_deletion_time.values():
            if del_time > accepted_pvc_deletion_time:
                raise ex.PerformanceException(
                    f"{msg_prefix} {number_of_pvcs} PVCs deletion time is {pvc_deletion_time.values()} and is "
                    f"greater than {accepted_pvc_deletion_time} seconds")

        logging.info(f"{msg_prefix} {number_of_pvcs} PVCs deletion times are:")
        for name, a_time in pvc_deletion_time.items():
            logging.info(f"{name} deletion time is: {a_time} seconds")

        if self.interface == constants.CEPHBLOCKPOOL:
            self.sc = "RBD"
        elif self.interface == constants.CEPHFILESYSTEM:
            self.sc = "CephFS"
        elif self.interface == constants.CEPHBLOCKPOOL_THICK:
            self.sc = "RBD-Thick"

        full_log_path = get_full_test_logs_path(
            cname=self) + f"-{self.sc}-{pvc_size}"
        self.results_path = get_full_test_logs_path(cname=self)
        log.info(f"Logs file path name is : {full_log_path}")

        self.get_env_info()

        # Initialize the results doc file.
        full_results = self.init_full_results(
            ResultsAnalyse(
                self.uuid,
                self.crd_data,
                full_log_path,
                "pvc_bulk_deletion_fullres",
            ))

        full_results.add_key("interface", self.interface)
        full_results.add_key("bulk_size", number_of_pvcs)
        full_results.add_key("pvc_size", pvc_size)
        full_results.all_results["bulk_deletion_time"] = pvc_deletion_time

        if full_results.es_write():
            res_link = full_results.results_link()
            log.info(f"The Result can be found at : {res_link}")

            # Create text file with results of all subtest (3 - according to the parameters)
            self.write_result_to_file(res_link)
    def test_multiple_pvc_creation_deletion_scale(self, namespace, tmp_path,
                                                  access_mode, interface):
        """
        Measuring PVC creation time while scaling PVC
        Measure PVC deletion time after creation test
        """
        scale_pvc_count = scale_lib.get_max_pvc_count()
        log.info(
            f"Start creating {access_mode}-{interface} {scale_pvc_count} PVC")
        if interface == constants.CEPHBLOCKPOOL:
            sc_name = constants.DEFAULT_STORAGECLASS_RBD
        elif interface == constants.CEPHFS_INTERFACE:
            sc_name = constants.DEFAULT_STORAGECLASS_CEPHFS

        # Get pvc_dict_list, append all the pvc.yaml dict to pvc_dict_list
        pvc_dict_list1 = scale_lib.construct_pvc_creation_yaml_bulk_for_kube_job(
            no_of_pvc=int(scale_pvc_count / 2),
            access_mode=access_mode,
            sc_name=sc_name)
        pvc_dict_list2 = scale_lib.construct_pvc_creation_yaml_bulk_for_kube_job(
            no_of_pvc=int(scale_pvc_count / 2),
            access_mode=access_mode,
            sc_name=sc_name)

        # There is 2 kube_job to reduce the load, observed time_out problems
        # during delete process of single kube_job and heavy load.
        job_file1 = ObjectConfFile(
            name="job_profile_1",
            obj_dict_list=pvc_dict_list1,
            project=self.namespace,
            tmp_path=tmp_path,
        )
        job_file2 = ObjectConfFile(
            name="job_profile_2",
            obj_dict_list=pvc_dict_list2,
            project=self.namespace,
            tmp_path=tmp_path,
        )

        # Create kube_job
        job_file1.create(namespace=self.namespace)
        job_file2.create(namespace=self.namespace)

        # Check all the PVC reached Bound state
        pvc_bound_list = scale_lib.check_all_pvc_reached_bound_state_in_kube_job(
            kube_job_obj=job_file1,
            namespace=self.namespace,
            no_of_pvc=int(scale_pvc_count / 2),
        )
        pvc_bound_list.extend(
            scale_lib.check_all_pvc_reached_bound_state_in_kube_job(
                kube_job_obj=job_file2,
                namespace=self.namespace,
                no_of_pvc=int(scale_pvc_count / 2),
            ))

        log.info(f"Number of PVCs in Bound state {len(pvc_bound_list)}")

        # Get PVC creation time
        pvc_create_time = helpers.measure_pvc_creation_time_bulk(
            interface=interface,
            pvc_name_list=pvc_bound_list,
            wait_time=300,
        )

        # TODO: Update below code with google API, to record value in spreadsheet
        # TODO: For now observing Google API limit to write more than 100 writes
        log_path = f"{ocsci_log_path()}/{interface}-{access_mode}"
        with open(f"{log_path}-creation-time.csv", "w") as fd:
            csv_obj = csv.writer(fd)
            for k, v in pvc_create_time.items():
                csv_obj.writerow([k, v])
        log.info(f"Create data present in {log_path}-creation-time.csv file")

        # Get pv_name, require pv_name to fetch deletion time data from log
        pv_name_list = list()
        get_kube_job_1 = job_file1.get(namespace=self.namespace)
        for i in range(int(scale_pvc_count / 2)):
            pv_name_list.append(
                get_kube_job_1["items"][i]["spec"]["volumeName"])

        get_kube_job_2 = job_file2.get(namespace=self.namespace)
        for i in range(int(scale_pvc_count / 2)):
            pv_name_list.append(
                get_kube_job_2["items"][i]["spec"]["volumeName"])

        # Delete kube_job
        job_file1.delete(namespace=self.namespace)
        job_file2.delete(namespace=self.namespace)

        # Adding 1min wait time for PVC deletion logs to be updated
        # Observed failure when we immediately check the logs for pvc delete time
        # https://github.com/red-hat-storage/ocs-ci/issues/3371
        time.sleep(60)

        # Get PVC deletion time
        pvc_deletion_time = helpers.measure_pv_deletion_time_bulk(
            interface=interface, pv_name_list=pv_name_list)

        # Update result to csv file.
        # TODO: Update below code with google API, to record value in spreadsheet
        # TODO: For now observing Google API limit to write more than 100 writes
        with open(f"{log_path}-deletion-time.csv", "w") as fd:
            csv_obj = csv.writer(fd)
            for k, v in pvc_deletion_time.items():
                csv_obj.writerow([k, v])
        log.info(f"Delete data present in {log_path}-deletion-time.csv file")
        end_time = default_timer()
        log.info(f"Elapsed time -- {end_time - self.start_time} seconds")
    def test_all_4_type_pvc_creation_deletion_scale(self, namespace, tmp_path):
        """
        Measuring PVC creation time while scaling PVC of all 4 types,
        A total of 500 times the number of worker nodes
        will be created, i.e. 375 each pvc type
        Measure PVC deletion time in scale env
        """
        scale_pvc_count = scale_lib.get_max_pvc_count()
        log.info(f"Start creating {scale_pvc_count} PVC of all 4 types")
        cephfs_sc_obj = constants.DEFAULT_STORAGECLASS_CEPHFS
        rbd_sc_obj = constants.DEFAULT_STORAGECLASS_RBD

        # Get pvc_dict_list, append all the pvc.yaml dict to pvc_dict_list
        rbd_pvc_dict_list, cephfs_pvc_dict_list = ([] for i in range(2))
        for mode in [constants.ACCESS_MODE_RWO, constants.ACCESS_MODE_RWX]:
            rbd_pvc_dict_list.extend(
                scale_lib.construct_pvc_creation_yaml_bulk_for_kube_job(
                    no_of_pvc=int(scale_pvc_count / 4),
                    access_mode=mode,
                    sc_name=rbd_sc_obj,
                ))
            cephfs_pvc_dict_list.extend(
                scale_lib.construct_pvc_creation_yaml_bulk_for_kube_job(
                    no_of_pvc=int(scale_pvc_count / 4),
                    access_mode=mode,
                    sc_name=cephfs_sc_obj,
                ))

        # There is 2 kube_job for cephfs and rbd PVCs
        job_file_rbd = ObjectConfFile(
            name="rbd_pvc_job",
            obj_dict_list=rbd_pvc_dict_list,
            project=self.namespace,
            tmp_path=tmp_path,
        )
        job_file_cephfs = ObjectConfFile(
            name="cephfs_pvc_job",
            obj_dict_list=cephfs_pvc_dict_list,
            project=self.namespace,
            tmp_path=tmp_path,
        )

        # Create kube_job
        job_file_rbd.create(namespace=self.namespace)
        job_file_cephfs.create(namespace=self.namespace)

        # Check all the PVC reached Bound state
        rbd_pvc_name = scale_lib.check_all_pvc_reached_bound_state_in_kube_job(
            kube_job_obj=job_file_rbd,
            namespace=self.namespace,
            no_of_pvc=int(scale_pvc_count / 2),
        )
        fs_pvc_name = scale_lib.check_all_pvc_reached_bound_state_in_kube_job(
            kube_job_obj=job_file_cephfs,
            namespace=self.namespace,
            no_of_pvc=int(scale_pvc_count / 2),
        )

        # Get pvc objs from namespace, which is used to identify backend pv
        rbd_pvc_obj, cephfs_pvc_obj = ([] for i in range(2))
        pvc_objs = pvc.get_all_pvc_objs(namespace=self.namespace)
        for pvc_obj in pvc_objs:
            if pvc_obj.backed_sc == constants.DEFAULT_STORAGECLASS_RBD:
                rbd_pvc_obj.append(pvc_obj)
            elif pvc_obj.backed_sc == constants.DEFAULT_STORAGECLASS_CEPHFS:
                cephfs_pvc_obj.append(pvc_obj)

        # Get PVC creation time
        fs_pvc_create_time = helpers.measure_pvc_creation_time_bulk(
            interface=constants.CEPHFS_INTERFACE, pvc_name_list=fs_pvc_name)
        rbd_pvc_create_time = helpers.measure_pvc_creation_time_bulk(
            interface=constants.CEPHBLOCKPOOL, pvc_name_list=rbd_pvc_name)
        fs_pvc_create_time.update(rbd_pvc_create_time)

        # TODO: Update below code with google API, to record value in spreadsheet
        # TODO: For now observing Google API limit to write more than 100 writes
        log_path = f"{ocsci_log_path()}/All-type-PVC"
        with open(f"{log_path}-creation-time.csv", "w") as fd:
            csv_obj = csv.writer(fd)
            for k, v in fs_pvc_create_time.items():
                csv_obj.writerow([k, v])
        log.info(f"Create data present in {log_path}-creation-time.csv file")

        # Get pv_name, require pv_name to fetch deletion time data from log
        rbd_pv_list, fs_pv_list = ([] for i in range(2))
        get_rbd_kube_job = job_file_rbd.get(namespace=self.namespace)
        for i in range(int(scale_pvc_count / 2)):
            rbd_pv_list.append(
                get_rbd_kube_job["items"][i]["spec"]["volumeName"])

        get_fs_kube_job = job_file_cephfs.get(namespace=self.namespace)
        for i in range(int(scale_pvc_count / 2)):
            fs_pv_list.append(
                get_fs_kube_job["items"][i]["spec"]["volumeName"])

        # Delete kube_job
        job_file_rbd.delete(namespace=self.namespace)
        job_file_cephfs.delete(namespace=self.namespace)

        # Adding 1min wait time for PVC deletion logs to be updated
        # Observed failure when we immediately check the logs for pvc delete time
        # https://github.com/red-hat-storage/ocs-ci/issues/3371
        time.sleep(60)

        # Get PV deletion time
        fs_pvc_deletion_time = helpers.measure_pv_deletion_time_bulk(
            interface=constants.CEPHFS_INTERFACE, pv_name_list=fs_pv_list)
        rbd_pvc_deletion_time = helpers.measure_pv_deletion_time_bulk(
            interface=constants.CEPHBLOCKPOOL, pv_name_list=rbd_pv_list)
        fs_pvc_deletion_time.update(rbd_pvc_deletion_time)

        # TODO: Update below code with google API, to record value in spreadsheet
        # TODO: For now observing Google API limit to write more than 100 writes
        with open(f"{log_path}-deletion-time.csv", "w") as fd:
            csv_obj = csv.writer(fd)
            for k, v in fs_pvc_deletion_time.items():
                csv_obj.writerow([k, v])
        log.info(f"Delete data present in {log_path}-deletion-time.csv file")
        end_time = default_timer()
        log.info(f"Elapsed time -- {end_time - self.start_time} seconds")
    def test_bulk_pvc_creation_deletion_measurement_performance(
        self, storageclass_factory, interface_type, bulk_size
    ):

        """
        Measuring PVC creation and deletion time of bulk_size PVCs
        and sends results to the Elastic Search DB

        Args:
            bulk_size: Size of the bulk to be tested
        Returns:

        """
        self.interface = interface_type
        self.sc_obj = storageclass_factory(self.interface)

        bulk_creation_time_limit = bulk_size / 2

        log.info(f"Start creating new {bulk_size} PVCs")

        # Getting the start time of the test.
        self.test_start_time = self.get_time()

        # Run the Bulk Creation test
        csi_bulk_start_time = self.get_time(time_format="csi")
        self.pvc_bulk_create_and_wait_for_bound(bulk_size)
        log.info(f"PVC creation dir is {self.yaml_creation_dir}")

        total_time = self.get_bulk_creation_time()
        log.info(f"{bulk_size} Bulk PVCs creation time is {total_time} seconds.")
        csi_creation_times = performance_lib.csi_bulk_pvc_time_measure(
            self.interface, self.pvc_objs, "create", csi_bulk_start_time
        )

        if total_time > bulk_creation_time_limit:
            raise ex.PerformanceException(
                f"{bulk_size} Bulk PVCs creation time is {total_time} and "
                f"greater than {bulk_creation_time_limit} seconds"
            )

        # Run the Bulk Deletion test
        pv_names_list = []
        for pvc_obj in self.pvc_objs:
            pv_names_list.append(pvc_obj.backed_pv)

        log.info(f"Starting to delete bulk of {bulk_size} PVCs")
        helpers.delete_bulk_pvcs(
            self.yaml_creation_dir, pv_names_list, namespace=self.namespace
        )
        log.info(f"Deletion of bulk of {bulk_size} PVCs successfully completed")

        log_deletion_times = helpers.measure_pv_deletion_time_bulk(
            self.interface, pv_names_list, return_log_times=True
        )

        all_start_times = [a_tuple[0] for a_tuple in log_deletion_times.values()]
        bulk_start_time = sorted(all_start_times)[0]  # the eariles start time
        start_deletion_time = datetime.datetime.strptime(
            bulk_start_time, helpers.DATE_TIME_FORMAT
        )

        all_end_times = [a_tuple[1] for a_tuple in log_deletion_times.values()]
        bulk_deletion_time = sorted(all_end_times)[-1]  # the latest end time
        end_deletion_time = datetime.datetime.strptime(
            bulk_deletion_time, helpers.DATE_TIME_FORMAT
        )

        total_deletion_time = (end_deletion_time - start_deletion_time).total_seconds()
        log.info(
            f"{bulk_size} Bulk PVCs deletion time is {total_deletion_time} seconds."
        )

        csi_deletion_times = performance_lib.csi_bulk_pvc_time_measure(
            self.interface, self.pvc_objs, "delete", csi_bulk_start_time
        )
        # Getting the end time of the test
        self.test_end_time = self.get_time()

        # reset the list oc PVCs since thay was deleted, and do not need to be deleted
        # in the teardown phase.
        self.pvc_objs = []

        # Produce ES report
        self.results_path = os.path.join(
            "/",
            *self.results_path,
            "test_bulk_pvc_creation_deletion_measurement_performance",
        )

        # Collecting environment information
        self.get_env_info()

        # Initialize the results doc file.
        full_results = self.init_full_results(
            ResultsAnalyse(
                self.uuid,
                self.crd_data,
                self.full_log_path,
                "bulk_creation_deletion_measurement",
            )
        )

        # Add the test time to the ES report
        full_results.add_key(
            "test_time", {"start": self.test_start_time, "end": self.test_end_time}
        )
        full_results.add_key("bulk_size", bulk_size)
        full_results.add_key("bulk_pvc_creation_time", total_time)
        full_results.add_key("bulk_pvc_csi_creation_time", csi_creation_times)
        full_results.add_key("bulk_pvc_deletion_time", total_deletion_time)
        full_results.add_key("bulk_pvc_csi_deletion_time", csi_deletion_times)

        # Write the test results into the ES server
        if full_results.es_write():
            res_link = full_results.results_link()
            log.info(f"The Result can be found at : {res_link}")

            # Create text file with results of all subtest (4 - according to the parameters)
            self.write_result_to_file(res_link)
    def test_multiple_pvc_deletion_measurement_performance(self, teardown_factory):
        """
        Measuring PVC deletion time of 120 PVCs in 180 seconds

        Args:
            teardown_factory: A fixture used when we want a new resource that was created during the tests
                               to be removed in the teardown phase.
        Returns:

        """
        number_of_pvcs = 120
        pvc_size = "1Gi"
        log.info("Start creating new 120 PVCs")

        pvc_objs = helpers.create_multiple_pvcs(
            sc_name=self.sc_obj.name,
            namespace=defaults.ROOK_CLUSTER_NAMESPACE,
            number_of_pvc=number_of_pvcs,
            size=pvc_size,
        )

        for pvc_obj in pvc_objs:
            pvc_obj.reload()
            teardown_factory(pvc_obj)
        with ThreadPoolExecutor(max_workers=5) as executor:
            for pvc_obj in pvc_objs:
                executor.submit(
                    helpers.wait_for_resource_state, pvc_obj, constants.STATUS_BOUND
                )

                executor.submit(pvc_obj.reload)
        # Get pvc_name, require pvc_name to fetch deletion time data from log
        threads = list()
        for pvc_obj in pvc_objs:
            process = threading.Thread(target=pvc_obj.reload)
            process.start()
            threads.append(process)
        for process in threads:
            process.join()

        pvc_name_list, pv_name_list = ([] for i in range(2))
        threads = list()
        for pvc_obj in pvc_objs:
            process1 = threading.Thread(target=pvc_name_list.append(pvc_obj.name))
            process2 = threading.Thread(target=pv_name_list.append(pvc_obj.backed_pv))
            process1.start()
            process2.start()
            threads.append(process1)
            threads.append(process2)
        for process in threads:
            process.join()
        log.info("Preparing to delete 120 PVC")

        # Delete PVC
        for obj in pvc_objs:
            obj.delete()
        for obj in pvc_objs:
            obj.ocp.wait_for_delete(obj.name)

        # Get PVC deletion time
        pvc_deletion_time = helpers.measure_pv_deletion_time_bulk(
            interface=self.interface, pv_name_list=pv_name_list
        )
        logging.info(f"{number_of_pvcs} PVCs deletion time took {pvc_deletion_time}")
Beispiel #9
0
    def test_bulk_pvc_creation_deletion_measurement_performance(
            self, teardown_factory, bulk_size):
        """
        Measuring PVC creation and deletion time of bulk_size PVCs

        Args:
            teardown_factory: A fixture used when we want a new resource that was created during the tests
                               to be removed in the teardown phase.
            bulk_size: Size of the bulk to be tested
        Returns:

        """
        bulk_creation_time_limit = bulk_size / 2
        log.info(f"Start creating new {bulk_size} PVCs")

        pvc_objs, yaml_creation_dir = helpers.create_multiple_pvcs(
            sc_name=self.sc_obj.name,
            namespace=defaults.ROOK_CLUSTER_NAMESPACE,
            number_of_pvc=bulk_size,
            size=self.pvc_size,
            burst=True,
        )
        logging.info(f"PVC creation dir is {yaml_creation_dir}")

        for pvc_obj in pvc_objs:
            pvc_obj.reload()
            teardown_factory(pvc_obj)
        with ThreadPoolExecutor(max_workers=5) as executor:
            for pvc_obj in pvc_objs:
                executor.submit(helpers.wait_for_resource_state, pvc_obj,
                                constants.STATUS_BOUND)

                executor.submit(pvc_obj.reload)

        start_time = helpers.get_provision_time(self.interface,
                                                pvc_objs,
                                                status="start")
        end_time = helpers.get_provision_time(self.interface,
                                              pvc_objs,
                                              status="end")
        total_time = (end_time - start_time).total_seconds()
        logging.info(
            f"{bulk_size} Bulk PVCs creation time is {total_time} seconds.")

        if total_time > bulk_creation_time_limit:
            raise ex.PerformanceException(
                f"{bulk_size} Bulk PVCs creation time is {total_time} and "
                f"greater than {bulk_creation_time_limit} seconds")

        pv_names_list = []
        for pvc_obj in pvc_objs:
            pv_names_list.append(pvc_obj.backed_pv)

        logging.info(f"Starting to delete bulk of {bulk_size} PVCs")
        helpers.delete_bulk_pvcs(yaml_creation_dir, pv_names_list)
        logging.info(
            f"Deletion of bulk of {bulk_size} PVCs successfully completed")

        log_deletion_times = helpers.measure_pv_deletion_time_bulk(
            self.interface, pv_names_list, return_log_times=True)

        all_start_times = [
            a_tuple[0] for a_tuple in log_deletion_times.values()
        ]
        bulk_start_time = sorted(all_start_times)[0]  # the eariles start time
        start_deletion_time = datetime.datetime.strptime(
            bulk_start_time, helpers.DATE_TIME_FORMAT)

        all_end_times = [a_tuple[1] for a_tuple in log_deletion_times.values()]
        bulk_deletion_time = sorted(all_end_times)[-1]  # the latest end time
        end_deletion_time = datetime.datetime.strptime(
            bulk_deletion_time, helpers.DATE_TIME_FORMAT)

        total_deletion_time = (end_deletion_time -
                               start_deletion_time).total_seconds()
        logging.info(
            f"{bulk_size} Bulk PVCs deletion time is {total_deletion_time} seconds."
        )