Пример #1
0
    def create_mutiple_pvcs_statistics(self, num_of_samples, teardown_factory,
                                       pvc_size):
        """

        Creates number (samples_num) of PVCs, measures creation time for each PVC and returns list of creation times.

         Args:
             num_of_samples: Number of the sampled created PVCs.
             teardown_factory: A fixture used when we want a new resource that was created during the tests.
             pvc_size: Size of the created PVCs.

         Returns:
             List of the creation times of all the created PVCs.

        """
        time_measures = []
        for i in range(num_of_samples):
            log.info(f"Start creation of PVC number {i + 1}.")

            pvc_obj = helpers.create_pvc(sc_name=self.sc_obj.name,
                                         size=pvc_size)
            helpers.wait_for_resource_state(pvc_obj, constants.STATUS_BOUND)
            pvc_obj.reload()
            teardown_factory(pvc_obj)
            create_time = helpers.measure_pvc_creation_time(
                self.interface, pvc_obj.name)
            logging.info(f"PVC created in {create_time} seconds")

            time_measures.append(create_time)
        return time_measures
    def test_pvc_snapshot_performance(self, pvc_size):
        """
        1. Run I/O on a pod file
        2. Calculate md5sum of the file
        3. Take a snapshot of the PVC
        4. Measure the total snapshot creation time and the CSI snapshot creation time
        4. Restore From the snapshot and measure the time
        5. Attach a new pod to it
        6. Verify that the file is present on the new pod also
        7. Verify that the md5sum of the file on the new pod matches
           with the md5sum of the file on the original pod

        This scenario run 3 times and report all the average results of the 3 runs
        and will send them to the ES
        Args:
            pvc_size: the size of the PVC to be tested - parametrize

        """

        # Getting the total Storage capacity
        ceph_cluster = CephCluster()
        ceph_capacity = ceph_cluster.get_ceph_capacity()

        log.info(f"Total capacity size is : {ceph_capacity}")
        log.info(f"PVC Size is : {pvc_size}")
        log.info(f"Needed capacity is {int(int(pvc_size) * 5)}")
        if int(ceph_capacity) < int(pvc_size) * 5:
            log.error(
                f"PVC size is {pvc_size}GiB and it is too large for this system"
                f" which have only {ceph_capacity}GiB")
            return
        # Calculating the file size as 25% of the PVC size
        # in the end the PVC will be 75% full
        filesize = self.pvc_obj.size * 0.25
        # Change the file size to MB and from int to str
        file_size = f"{int(filesize * 1024)}M"

        all_results = []

        self.results_path = get_full_test_logs_path(cname=self)
        log.info(f"Logs file path name is : {self.full_log_path}")

        # Produce ES report
        # Collecting environment information
        self.get_env_info()

        # Initialize the results doc file.
        self.full_results = self.init_full_results(
            ResultsAnalyse(
                self.uuid,
                self.crd_data,
                self.full_log_path,
                "pvc_snapshot_perf",
            ))
        self.full_results.add_key("pvc_size", pvc_size + " GiB")
        self.full_results.add_key("interface", self.sc)
        self.full_results.all_results["creation_time"] = []
        self.full_results.all_results["csi_creation_time"] = []
        self.full_results.all_results["creation_speed"] = []
        self.full_results.all_results["restore_time"] = []
        self.full_results.all_results["restore_speed"] = []
        self.full_results.all_results["restore_csi_time"] = []
        for test_num in range(self.tests_numbers):
            test_results = {
                "test_num": test_num + 1,
                "dataset": (test_num + 1) * filesize * 1024,  # size in MiB
                "create": {
                    "time": None,
                    "csi_time": None,
                    "speed": None
                },
                "restore": {
                    "time": None,
                    "speed": None
                },
            }
            log.info(f"Starting test phase number {test_num}")
            # Step 1. Run I/O on a pod file.
            file_name = f"{self.pod_object.name}-{test_num}"
            log.info(f"Starting IO on the POD {self.pod_object.name}")
            # Going to run only write IO to fill the PVC for the snapshot
            self.pod_object.fillup_fs(size=file_size, fio_filename=file_name)

            # Wait for fio to finish
            fio_result = self.pod_object.get_fio_results()
            err_count = fio_result.get("jobs")[0].get("error")
            assert (
                err_count == 0
            ), f"IO error on pod {self.pod_object.name}. FIO result: {fio_result}"
            log.info("IO on the PVC Finished")

            # Verify presence of the file
            file_path = pod.get_file_path(self.pod_object, file_name)
            log.info(f"Actual file path on the pod {file_path}")
            assert pod.check_file_existence(
                self.pod_object, file_path), f"File {file_name} doesn't exist"
            log.info(f"File {file_name} exists in {self.pod_object.name}")

            # Step 2. Calculate md5sum of the file.
            orig_md5_sum = pod.cal_md5sum(self.pod_object, file_name)

            # Step 3. Take a snapshot of the PVC and measure the time of creation.
            snap_name = self.pvc_obj.name.replace("pvc-test",
                                                  f"snapshot-test{test_num}")
            log.info(f"Taking snapshot of the PVC {snap_name}")

            start_time = datetime.datetime.utcnow().strftime(
                "%Y-%m-%dT%H:%M:%SZ")

            test_results["create"]["time"] = self.measure_create_snapshot_time(
                pvc_name=self.pvc_obj.name,
                snap_name=snap_name,
                namespace=self.pod_object.namespace,
                interface=self.interface,
                start_time=start_time,
            )

            test_results["create"][
                "csi_time"] = performance_lib.measure_csi_snapshot_creation_time(
                    interface=self.interface,
                    snapshot_id=self.snap_uid,
                    start_time=start_time,
                )

            test_results["create"]["speed"] = int(
                test_results["dataset"] / test_results["create"]["time"])
            log.info(
                f' Test {test_num} dataset is {test_results["dataset"]} MiB')
            log.info(
                f"Snapshot name {snap_name} and id {self.snap_uid} creation time is"
                f' : {test_results["create"]["time"]} sec.')
            log.info(
                f"Snapshot name {snap_name} and id {self.snap_uid} csi creation time is"
                f' : {test_results["create"]["csi_time"]} sec.')
            log.info(
                f'Snapshot speed is : {test_results["create"]["speed"]} MB/sec'
            )

            # Step 4. Restore the PVC from the snapshot and measure the time
            # Same Storage class of the original PVC
            sc_name = self.pvc_obj.backed_sc

            # Size should be same as of the original PVC
            pvc_size = str(self.pvc_obj.size) + "Gi"

            # Create pvc out of the snapshot
            # Both, the snapshot and the restore PVC should be in same namespace

            log.info("Restoring from the Snapshot")
            restore_pvc_name = self.pvc_obj.name.replace(
                "pvc-test", f"restore-pvc{test_num}")
            restore_pvc_yaml = constants.CSI_RBD_PVC_RESTORE_YAML
            if self.interface == constants.CEPHFILESYSTEM:
                restore_pvc_yaml = constants.CSI_CEPHFS_PVC_RESTORE_YAML

            csi_start_time = self.get_time("csi")
            log.info("Restoring the PVC from Snapshot")
            restore_pvc_obj = pvc.create_restore_pvc(
                sc_name=sc_name,
                snap_name=self.snap_obj.name,
                namespace=self.snap_obj.namespace,
                size=pvc_size,
                pvc_name=restore_pvc_name,
                restore_pvc_yaml=restore_pvc_yaml,
            )
            helpers.wait_for_resource_state(
                restore_pvc_obj,
                constants.STATUS_BOUND,
                timeout=3600  # setting this to 60 Min.
                # since it can be take long time to restore, and we want it to finished.
            )
            restore_pvc_obj.reload()
            log.info("PVC was restored from the snapshot")
            test_results["restore"][
                "time"] = helpers.measure_pvc_creation_time(
                    self.interface, restore_pvc_obj.name)

            test_results["restore"]["speed"] = int(
                test_results["dataset"] / test_results["restore"]["time"])
            log.info(
                f'Snapshot restore time is : {test_results["restore"]["time"]}'
            )
            log.info(
                f'restore speed is : {test_results["restore"]["speed"]} MB/sec'
            )

            test_results["restore"][
                "csi_time"] = performance_lib.csi_pvc_time_measure(
                    self.interface, restore_pvc_obj, "create", csi_start_time)
            log.info(
                f'Snapshot csi restore time is : {test_results["restore"]["csi_time"]}'
            )

            # Step 5. Attach a new pod to the restored PVC
            restore_pod_object = helpers.create_pod(
                interface_type=self.interface,
                pvc_name=restore_pvc_obj.name,
                namespace=self.snap_obj.namespace,
            )

            # Confirm that the pod is running
            helpers.wait_for_resource_state(resource=restore_pod_object,
                                            state=constants.STATUS_RUNNING)
            restore_pod_object.reload()

            # Step 6. Verify that the file is present on the new pod also.
            log.info(f"Checking the existence of {file_name} "
                     f"on restore pod {restore_pod_object.name}")
            assert pod.check_file_existence(
                restore_pod_object,
                file_path), f"File {file_name} doesn't exist"
            log.info(f"File {file_name} exists in {restore_pod_object.name}")

            # Step 7. Verify that the md5sum matches
            log.info(
                f"Verifying that md5sum of {file_name} "
                f"on pod {self.pod_object.name} matches with md5sum "
                f"of the same file on restore pod {restore_pod_object.name}")
            assert pod.verify_data_integrity(
                restore_pod_object, file_name,
                orig_md5_sum), "Data integrity check failed"
            log.info("Data integrity check passed, md5sum are same")

            restore_pod_object.delete()
            restore_pvc_obj.delete()

            all_results.append(test_results)

        # clean the enviroment
        self.pod_object.delete()
        self.pvc_obj.delete()
        self.delete_test_project()

        # logging the test summary, all info in one place for easy log reading
        c_speed, c_runtime, c_csi_runtime, r_speed, r_runtime, r_csi_runtime = (
            0 for i in range(6))

        log.info("Test summary :")
        for tst in all_results:
            c_speed += tst["create"]["speed"]
            c_runtime += tst["create"]["time"]
            c_csi_runtime += tst["create"]["csi_time"]
            r_speed += tst["restore"]["speed"]
            r_runtime += tst["restore"]["time"]
            r_csi_runtime += tst["restore"]["csi_time"]

            self.full_results.all_results["creation_time"].append(
                tst["create"]["time"])
            self.full_results.all_results["csi_creation_time"].append(
                tst["create"]["csi_time"])
            self.full_results.all_results["creation_speed"].append(
                tst["create"]["speed"])
            self.full_results.all_results["restore_time"].append(
                tst["restore"]["time"])
            self.full_results.all_results["restore_speed"].append(
                tst["restore"]["speed"])
            self.full_results.all_results["restore_csi_time"].append(
                tst["restore"]["csi_time"])
            self.full_results.all_results["dataset_inMiB"] = tst["dataset"]
            log.info(
                f"Test {tst['test_num']} results : dataset is {tst['dataset']} MiB. "
                f"Take snapshot time is {tst['create']['time']} "
                f"at {tst['create']['speed']} MiB/Sec "
                f"Restore from snapshot time is {tst['restore']['time']} "
                f"at {tst['restore']['speed']} MiB/Sec ")

        avg_snap_c_time = c_runtime / self.tests_numbers
        avg_snap_csi_c_time = c_csi_runtime / self.tests_numbers
        avg_snap_c_speed = c_speed / self.tests_numbers
        avg_snap_r_time = r_runtime / self.tests_numbers
        avg_snap_r_speed = r_speed / self.tests_numbers
        avg_snap_r_csi_time = r_csi_runtime / self.tests_numbers
        log.info(f" Average snapshot creation time is {avg_snap_c_time} sec.")
        log.info(
            f" Average csi snapshot creation time is {avg_snap_csi_c_time} sec."
        )
        log.info(
            f" Average snapshot creation speed is {avg_snap_c_speed} MiB/sec")
        log.info(f" Average snapshot restore time is {avg_snap_r_time} sec.")
        log.info(
            f" Average snapshot restore speed is {avg_snap_r_speed} MiB/sec")
        log.info(
            f" Average snapshot restore csi time is {avg_snap_r_csi_time} sec."
        )

        self.full_results.add_key("avg_snap_creation_time_insecs",
                                  avg_snap_c_time)
        self.full_results.add_key("avg_snap_csi_creation_time_insecs",
                                  avg_snap_csi_c_time)
        self.full_results.add_key("avg_snap_creation_speed", avg_snap_c_speed)
        self.full_results.add_key("avg_snap_restore_time_insecs",
                                  avg_snap_r_time)
        self.full_results.add_key("avg_snap_restore_speed", avg_snap_r_speed)
        self.full_results.add_key("avg_snap_restore_csi_time_insecs",
                                  avg_snap_r_csi_time)

        # Write the test results into the ES server
        log.info("writing results to elastic search server")
        if self.full_results.es_write():
            res_link = self.full_results.results_link()

            # write the ES link to the test results in the test log.
            log.info(f"The result can be found at : {res_link}")

            self.write_result_to_file(res_link)
    def test_pvc_snapshot_performance(self, teardown_factory, pvc_size):
        """
        1. Run I/O on a pod file.
        2. Calculate md5sum of the file.
        3. Take a snapshot of the PVC and measure the time of creation.
        4. Restore From the snapshot and measure the time
        5. Attach a new pod to it.
        6. Verify that the file is present on the new pod also.
        7. Verify that the md5sum of the file on the new pod matches
           with the md5sum of the file on the original pod.

        This scenario run 3 times and report all results
        Args:
            teardown_factory: A fixture to destroy objects
            pvc_size: the size of the PVC to be tested - parametrize

        """

        # Getting the total Storage capacity
        ceph_cluster = CephCluster()
        ceph_capacity = ceph_cluster.get_ceph_capacity()

        log.info(f"Total capacity size is : {ceph_capacity}")
        log.info(f"PVC Size is : {pvc_size}")
        log.info(f"Needed capacity is {int(int(pvc_size) * 5)}")
        if int(ceph_capacity) < int(pvc_size) * 5:
            log.error(
                f"PVC size is {pvc_size}GiB and it is too large for this system"
                f" which have only {ceph_capacity}GiB")
            return
        # Calculating the file size as 25% of the PVC size
        # in the end the PVC will be 75% full
        filesize = self.pvc_obj.size * 0.25
        # Change the file size to MB and from int to str
        file_size = f"{int(filesize * 1024)}M"

        all_results = []

        for test_num in range(self.tests_numbers):
            test_results = {
                "test_num": test_num + 1,
                "dataset": (test_num + 1) * filesize * 1024,  # size in MiB
                "create": {
                    "time": None,
                    "speed": None
                },
                "restore": {
                    "time": None,
                    "speed": None
                },
            }
            log.info(f"Starting test phase number {test_num}")
            # Step 1. Run I/O on a pod file.
            file_name = f"{self.pod_obj.name}-{test_num}"
            log.info(f"Starting IO on the POD {self.pod_obj.name}")
            # Going to run only write IO to fill the PVC for the snapshot
            self.pod_obj.fillup_fs(size=file_size, fio_filename=file_name)

            # Wait for fio to finish
            fio_result = self.pod_obj.get_fio_results()
            err_count = fio_result.get("jobs")[0].get("error")
            assert (
                err_count == 0
            ), f"IO error on pod {self.pod_obj.name}. FIO result: {fio_result}"
            log.info("IO on the PVC Finished")

            # Verify presence of the file
            file_path = pod.get_file_path(self.pod_obj, file_name)
            log.info(f"Actual file path on the pod {file_path}")
            assert pod.check_file_existence(
                self.pod_obj, file_path), f"File {file_name} doesn't exist"
            log.info(f"File {file_name} exists in {self.pod_obj.name}")

            # Step 2. Calculate md5sum of the file.
            orig_md5_sum = pod.cal_md5sum(self.pod_obj, file_name)

            # Step 3. Take a snapshot of the PVC and measure the time of creation.
            snap_name = self.pvc_obj.name.replace("pvc-test",
                                                  f"snapshot-test{test_num}")
            log.info(f"Taking snapshot of the PVC {snap_name}")

            test_results["create"]["time"] = self.measure_create_snapshot_time(
                pvc_name=self.pvc_obj.name,
                snap_name=snap_name,
                interface=self.interface,
            )
            test_results["create"]["speed"] = int(
                test_results["dataset"] / test_results["create"]["time"])
            log.info(
                f' Test {test_num} dataset is {test_results["dataset"]} MiB')
            log.info(
                f'Snapshot creation time is : {test_results["create"]["time"]} sec.'
            )
            log.info(
                f'Snapshot speed is : {test_results["create"]["speed"]} MB/sec'
            )

            # Step 4. Restore the PVC from the snapshot and measure the time
            # Same Storage class of the original PVC
            sc_name = self.pvc_obj.backed_sc

            # Size should be same as of the original PVC
            pvc_size = str(self.pvc_obj.size) + "Gi"

            # Create pvc out of the snapshot
            # Both, the snapshot and the restore PVC should be in same namespace

            log.info("Restoring from the Snapshot")
            restore_pvc_name = self.pvc_obj.name.replace(
                "pvc-test", f"restore-pvc{test_num}")
            restore_pvc_yaml = constants.CSI_RBD_PVC_RESTORE_YAML
            if self.interface == constants.CEPHFILESYSTEM:
                restore_pvc_yaml = constants.CSI_CEPHFS_PVC_RESTORE_YAML

            log.info("Resorting the PVC from Snapshot")
            restore_pvc_obj = pvc.create_restore_pvc(
                sc_name=sc_name,
                snap_name=self.snap_obj.name,
                namespace=self.snap_obj.namespace,
                size=pvc_size,
                pvc_name=restore_pvc_name,
                restore_pvc_yaml=restore_pvc_yaml,
            )
            helpers.wait_for_resource_state(
                restore_pvc_obj,
                constants.STATUS_BOUND,
                timeout=3600  # setting this to 60 Min.
                # since it can be take long time to restore, and we want it to finished.
            )
            teardown_factory(restore_pvc_obj)
            restore_pvc_obj.reload()
            log.info("PVC was restored from the snapshot")
            test_results["restore"][
                "time"] = helpers.measure_pvc_creation_time(
                    self.interface, restore_pvc_obj.name)
            test_results["restore"]["speed"] = int(
                test_results["dataset"] / test_results["restore"]["time"])
            log.info(
                f'Snapshot restore time is : {test_results["restore"]["time"]}'
            )
            log.info(
                f'restore sped is : {test_results["restore"]["speed"]} MB/sec')

            # Step 5. Attach a new pod to the restored PVC
            restore_pod_obj = helpers.create_pod(
                interface_type=self.interface,
                pvc_name=restore_pvc_obj.name,
                namespace=self.snap_obj.namespace,
                pod_dict_path=constants.NGINX_POD_YAML,
            )

            # Confirm that the pod is running
            helpers.wait_for_resource_state(resource=restore_pod_obj,
                                            state=constants.STATUS_RUNNING)
            teardown_factory(restore_pod_obj)
            restore_pod_obj.reload()

            # Step 6. Verify that the file is present on the new pod also.
            log.info(f"Checking the existence of {file_name} "
                     f"on restore pod {restore_pod_obj.name}")
            assert pod.check_file_existence(
                restore_pod_obj, file_path), f"File {file_name} doesn't exist"
            log.info(f"File {file_name} exists in {restore_pod_obj.name}")

            # Step 7. Verify that the md5sum matches
            log.info(f"Verifying that md5sum of {file_name} "
                     f"on pod {self.pod_obj.name} matches with md5sum "
                     f"of the same file on restore pod {restore_pod_obj.name}")
            assert pod.verify_data_integrity(
                restore_pod_obj, file_name,
                orig_md5_sum), "Data integrity check failed"
            log.info("Data integrity check passed, md5sum are same")

            all_results.append(test_results)

        # logging the test summery, all info in one place for easy log reading
        c_speed, c_runtime, r_speed, r_runtime = (0 for i in range(4))
        log.info("Test summery :")
        for tst in all_results:
            c_speed += tst["create"]["speed"]
            c_runtime += tst["create"]["time"]
            r_speed += tst["restore"]["speed"]
            r_runtime += tst["restore"]["time"]
            log.info(
                f"Test {tst['test_num']} results : dataset is {tst['dataset']} MiB. "
                f"Take snapshot time is {tst['create']['time']} "
                f"at {tst['create']['speed']} MiB/Sec "
                f"Restore from snapshot time is {tst['restore']['time']} "
                f"at {tst['restore']['speed']} MiB/Sec ")
        log.info(
            f" Average snapshot creation time is {c_runtime / self.tests_numbers} sec."
        )
        log.info(
            f" Average snapshot creation speed is {c_speed / self.tests_numbers} MiB/sec"
        )
        log.info(
            f" Average snapshot restore time is {r_runtime / self.tests_numbers} sec."
        )
        log.info(
            f" Average snapshot restore speed is {r_speed / self.tests_numbers} MiB/sec"
        )
Пример #4
0
    def test_clone_create_delete_performance(self, interface_type, pvc_size,
                                             file_size, teardown_factory):
        """
        Write data (60% of PVC capacity) to the PVC created in setup
        Create single clone for an existing pvc,
        Measure clone creation time and speed
        Delete the created clone
        Measure clone deletion time and speed
        Note: by increasing max_num_of_clones value you increase number of the clones to be created/deleted
        """

        file_size_for_io = file_size[:-1]

        performance_lib.write_fio_on_pod(self.pod_obj, file_size_for_io)

        max_num_of_clones = 1
        clone_creation_measures = []
        clones_list = []
        timeout = 18000
        sc_name = self.pvc_obj.backed_sc
        parent_pvc = self.pvc_obj.name
        clone_yaml = constants.CSI_RBD_PVC_CLONE_YAML
        namespace = self.pvc_obj.namespace
        if interface_type == constants.CEPHFILESYSTEM:
            clone_yaml = constants.CSI_CEPHFS_PVC_CLONE_YAML
        file_size_mb = convert_device_size(file_size, "MB")

        # creating single clone ( or many one by one if max_mum_of_clones > 1)
        logger.info(
            f"Start creating {max_num_of_clones} clones on {interface_type} PVC of size {pvc_size} GB."
        )

        for i in range(max_num_of_clones):
            logger.info(f"Start creation of clone number {i + 1}.")
            cloned_pvc_obj = pvc.create_pvc_clone(sc_name,
                                                  parent_pvc,
                                                  clone_yaml,
                                                  namespace,
                                                  storage_size=pvc_size + "Gi")
            teardown_factory(cloned_pvc_obj)
            helpers.wait_for_resource_state(cloned_pvc_obj,
                                            constants.STATUS_BOUND, timeout)

            cloned_pvc_obj.reload()
            logger.info(
                f"Clone with name {cloned_pvc_obj.name} for {pvc_size} pvc {parent_pvc} was created."
            )
            clones_list.append(cloned_pvc_obj)
            create_time = helpers.measure_pvc_creation_time(
                interface_type, cloned_pvc_obj.name)
            creation_speed = int(file_size_mb / create_time)
            logger.info(
                f"Clone number {i+1} creation time is {create_time} secs for {pvc_size} GB pvc."
            )
            logger.info(
                f"Clone number {i+1} creation speed is {creation_speed} MB/sec for {pvc_size} GB pvc."
            )
            creation_measures = {
                "clone_num": i + 1,
                "time": create_time,
                "speed": creation_speed,
            }
            clone_creation_measures.append(creation_measures)

        # deleting one by one and measuring deletion times and speed for each one of the clones create above
        # in case of single clone will run one time
        clone_deletion_measures = []

        logger.info(
            f"Start deleting {max_num_of_clones} clones on {interface_type} PVC of size {pvc_size} GB."
        )

        for i in range(max_num_of_clones):
            cloned_pvc_obj = clones_list[i]
            pvc_reclaim_policy = cloned_pvc_obj.reclaim_policy
            cloned_pvc_obj.delete()
            logger.info(
                f"Deletion of clone number {i + 1} , the clone name is {cloned_pvc_obj.name}."
            )
            cloned_pvc_obj.ocp.wait_for_delete(cloned_pvc_obj.name, timeout)
            if pvc_reclaim_policy == constants.RECLAIM_POLICY_DELETE:
                helpers.validate_pv_delete(cloned_pvc_obj.backed_pv)
            delete_time = helpers.measure_pvc_deletion_time(
                interface_type, cloned_pvc_obj.backed_pv)
            logger.info(
                f"Clone number {i + 1} deletion time is {delete_time} secs for {pvc_size} GB pvc."
            )

            deletion_speed = int(file_size_mb / delete_time)
            logger.info(
                f"Clone number {i+1} deletion speed is {deletion_speed} MB/sec for {pvc_size} GB pvc."
            )
            deletion_measures = {
                "clone_num": i + 1,
                "time": delete_time,
                "speed": deletion_speed,
            }
            clone_deletion_measures.append(deletion_measures)

        logger.info(
            f"Printing clone creation time and speed for {max_num_of_clones} clones "
            f"on {interface_type} PVC of size {pvc_size} GB:")

        for c in clone_creation_measures:
            logger.info(
                f"Clone number {c['clone_num']} creation time is {c['time']} secs for {pvc_size} GB pvc ."
            )
            logger.info(
                f"Clone number {c['clone_num']} creation speed is {c['speed']} MB/sec for {pvc_size} GB pvc."
            )

        logger.info(
            f"Clone deletion time and speed for {interface_type} PVC of size {pvc_size} GB are:"
        )
        for d in clone_deletion_measures:
            logger.info(
                f"Clone number {d['clone_num']} deletion time is {d['time']} secs for {pvc_size} GB pvc."
            )
            logger.info(
                f"Clone number {d['clone_num']} deletion speed is {d['speed']} MB/sec for {pvc_size} GB pvc."
            )

        logger.info("test_clones_creation_performance finished successfully.")
    def test_pvc_clone_performance_multiple_files(
        self,
        pvc_factory,
        interface,
        copies,
        timeout,
    ):
        """
        Test assign nodeName to a pod using RWX pvc
        Each kernel (unzipped) is 892M and 61694 files
        The test creates a pvc and a pods, writes kernel files multiplied by number of copies
        The test creates number of clones samples, calculates creation and deletion times for each one the clones
        and calculates the average creation and average deletion times
        """
        kernel_url = "https://cdn.kernel.org/pub/linux/kernel/v4.x/linux-4.19.5.tar.gz"
        download_path = "tmp"

        test_start_time = self.get_time()
        helpers.pull_images(constants.PERF_IMAGE)
        # Download a linux Kernel

        dir_path = os.path.join(os.getcwd(), download_path)
        file_path = os.path.join(dir_path, "file.gz")
        if not os.path.exists(dir_path):
            os.makedirs(dir_path)
        urllib.request.urlretrieve(kernel_url, file_path)

        # Create a PVC
        accessmode = constants.ACCESS_MODE_RWX
        if interface == constants.CEPHBLOCKPOOL:
            accessmode = constants.ACCESS_MODE_RWO

        pvc_size = "100"
        try:
            pvc_obj = pvc_factory(
                interface=interface,
                access_mode=accessmode,
                status=constants.STATUS_BOUND,
                size=pvc_size,
            )
        except Exception as e:
            logger.error(f"The PVC sample was not created, exception {str(e)}")
            raise PVCNotCreated("PVC did not reach BOUND state.")

        # Create a pod on one node
        logger.info(f"Creating Pod with pvc {pvc_obj.name}")

        try:
            pod_obj = helpers.create_pod(
                interface_type=interface,
                pvc_name=pvc_obj.name,
                namespace=pvc_obj.namespace,
                pod_dict_path=constants.PERF_POD_YAML,
            )
        except Exception as e:
            logger.error(
                f"Pod on PVC {pvc_obj.name} was not created, exception {str(e)}"
            )
            raise PodNotCreated("Pod on PVC was not created.")

        # Confirm that pod is running on the selected_nodes
        logger.info("Checking whether pods are running on the selected nodes")
        helpers.wait_for_resource_state(resource=pod_obj,
                                        state=constants.STATUS_RUNNING,
                                        timeout=timeout)

        pod_name = pod_obj.name
        pod_path = "/mnt"

        _ocp = OCP(namespace=pvc_obj.namespace)

        rsh_cmd = f"rsync {dir_path} {pod_name}:{pod_path}"
        _ocp.exec_oc_cmd(rsh_cmd)

        rsh_cmd = f"exec {pod_name} -- tar xvf {pod_path}/tmp/file.gz -C {pod_path}/tmp"
        _ocp.exec_oc_cmd(rsh_cmd)

        for x in range(copies):
            rsh_cmd = f"exec {pod_name} -- mkdir -p {pod_path}/folder{x}"
            _ocp.exec_oc_cmd(rsh_cmd)
            rsh_cmd = f"exec {pod_name} -- cp -r {pod_path}/tmp {pod_path}/folder{x}"
            _ocp.exec_oc_cmd(rsh_cmd)
            rsh_cmd = f"exec {pod_name} -- sync"
            _ocp.exec_oc_cmd(rsh_cmd)

        logger.info("Getting the amount of data written to the PVC")
        rsh_cmd = f"exec {pod_name} -- df -h {pod_path}"
        data_written = _ocp.exec_oc_cmd(rsh_cmd).split()[-4]
        logger.info(f"The amount of written data is {data_written}")

        rsh_cmd = f"exec {pod_name} -- find {pod_path} -type f"
        files_written = len(_ocp.exec_oc_cmd(rsh_cmd).split())
        logger.info(
            f"For {interface} - The number of files written to the pod is {files_written}"
        )

        # delete the pod
        pod_obj.delete(wait=False)

        logger.info("Wait for the pod to be deleted")
        performance_lib.wait_for_resource_bulk_status(
            "pod", 0, pvc_obj.namespace, constants.STATUS_COMPLETED, timeout,
            5)
        logger.info("The pod was deleted")

        num_of_clones = 11
        # increasing the timeout since clone creation time is longer than pod attach time
        timeout = 18000

        clone_yaml = constants.CSI_RBD_PVC_CLONE_YAML
        if interface == constants.CEPHFILESYSTEM:
            clone_yaml = constants.CSI_CEPHFS_PVC_CLONE_YAML

        clone_creation_measures = []
        csi_clone_creation_measures = []
        clone_deletion_measures = []
        csi_clone_deletion_measures = []

        # taking the time, so parsing the provision log will be faster.
        start_time = datetime.datetime.utcnow().strftime("%Y-%m-%dT%H:%M:%SZ")

        for i in range(num_of_clones):
            logger.info(f"Start creation of clone number {i + 1}.")
            cloned_pvc_obj = pvc.create_pvc_clone(
                pvc_obj.backed_sc,
                pvc_obj.name,
                clone_yaml,
                pvc_obj.namespace,
                storage_size=pvc_size + "Gi",
            )
            helpers.wait_for_resource_state(cloned_pvc_obj,
                                            constants.STATUS_BOUND, timeout)

            cloned_pvc_obj.reload()
            logger.info(
                f"Clone with name {cloned_pvc_obj.name} for {pvc_size} pvc {pvc_obj.name} was created."
            )
            create_time = helpers.measure_pvc_creation_time(
                interface, cloned_pvc_obj.name)
            logger.info(
                f"Clone number {i+1} creation time is {create_time} secs for {pvc_size} GB pvc."
            )
            clone_creation_measures.append(create_time)
            csi_clone_creation_measures.append(
                performance_lib.csi_pvc_time_measure(interface, cloned_pvc_obj,
                                                     "create", start_time))

            pvc_reclaim_policy = cloned_pvc_obj.reclaim_policy
            cloned_pvc_obj.delete()
            logger.info(
                f"Deletion of clone number {i + 1} , the clone name is {cloned_pvc_obj.name}."
            )
            cloned_pvc_obj.ocp.wait_for_delete(cloned_pvc_obj.name, timeout)
            if pvc_reclaim_policy == constants.RECLAIM_POLICY_DELETE:
                helpers.validate_pv_delete(cloned_pvc_obj.backed_pv)
            delete_time = helpers.measure_pvc_deletion_time(
                interface, cloned_pvc_obj.backed_pv)
            logger.info(
                f"Clone number {i + 1} deletion time is {delete_time} secs for {pvc_size} GB pvc."
            )

            clone_deletion_measures.append(delete_time)
            csi_clone_deletion_measures.append(
                performance_lib.csi_pvc_time_measure(interface, cloned_pvc_obj,
                                                     "delete", start_time))

        os.remove(file_path)
        os.rmdir(dir_path)
        pvc_obj.delete()

        average_creation_time = statistics.mean(clone_creation_measures)
        logger.info(f"Average creation time is  {average_creation_time} secs.")
        average_csi_creation_time = statistics.mean(
            csi_clone_creation_measures)
        logger.info(
            f"Average csi creation time is  {average_csi_creation_time} secs.")

        average_deletion_time = statistics.mean(clone_deletion_measures)
        logger.info(f"Average deletion time is  {average_deletion_time} secs.")
        average_csi_deletion_time = statistics.mean(
            csi_clone_deletion_measures)
        logger.info(
            f"Average csi deletion time is  {average_csi_deletion_time} secs.")

        # Produce ES report

        # Collecting environment information
        self.get_env_info()
        self.results_path = get_full_test_logs_path(cname=self)
        # Initialize the results doc file.
        full_results = self.init_full_results(
            ResultsAnalyse(
                self.uuid,
                self.crd_data,
                self.full_log_path,
                "test_pvc_clone_performance_multiple_files_fullres",
            ))

        full_results.add_key("files_number", files_written)

        test_end_time = self.get_time()

        full_results.add_key("test_time", {
            "start": test_start_time,
            "end": test_end_time
        })

        full_results.add_key("interface", interface)
        full_results.add_key("clones_number", num_of_clones)
        full_results.add_key("pvc_size", pvc_size)
        full_results.add_key("average_clone_creation_time",
                             average_creation_time)
        full_results.add_key("average_csi_clone_creation_time",
                             average_csi_creation_time)
        full_results.add_key("average_clone_deletion_time",
                             average_deletion_time)
        full_results.add_key("average_csi_clone_deletion_time",
                             average_csi_deletion_time)

        full_results.all_results = {
            "clone_creation_time": clone_creation_measures,
            "csi_clone_creation_time": csi_clone_creation_measures,
            "clone_deletion_time": clone_deletion_measures,
            "csi_clone_deletion_time": csi_clone_deletion_measures,
        }

        # Write the test results into the ES server
        if full_results.es_write():
            res_link = full_results.results_link()
            logger.info(f"The Result can be found at : {res_link}")

            # Create text file with results of all subtest (4 - according to the parameters)
            self.results_path = get_full_test_logs_path(
                cname=self, fname="test_pvc_clone_performance_multiple_files")
            self.write_result_to_file(res_link)
    def test_clone_create_delete_performance(self, interface_type, pvc_size,
                                             file_size, teardown_factory):
        """
        Write data (60% of PVC capacity) to the PVC created in setup
        Create clones for an existing pvc,
        Measure clones average creation time and speed
        Delete the created clone
        Measure clone average deletion time and speed
        Note: by increasing max_num_of_clones value you increase number of the clones to be created/deleted
        """

        file_size_for_io = file_size[:-1]

        performance_lib.write_fio_on_pod(self.pod_object, file_size_for_io)

        max_num_of_clones = 10
        clone_creation_measures = []
        csi_clone_creation_measures = []
        clones_list = []
        timeout = 18000
        sc_name = self.pvc_obj.backed_sc
        parent_pvc = self.pvc_obj.name
        clone_yaml = constants.CSI_RBD_PVC_CLONE_YAML
        namespace = self.pvc_obj.namespace
        if interface_type == constants.CEPHFILESYSTEM:
            clone_yaml = constants.CSI_CEPHFS_PVC_CLONE_YAML
        file_size_mb = convert_device_size(file_size, "MB")

        logger.info(
            f"Start creating {max_num_of_clones} clones on {interface_type} PVC of size {pvc_size} GB."
        )

        # taking the time, so parsing the provision log will be faster.
        start_time = datetime.datetime.utcnow().strftime("%Y-%m-%dT%H:%M:%SZ")

        for i in range(max_num_of_clones):
            logger.info(f"Start creation of clone number {i + 1}.")
            cloned_pvc_obj = pvc.create_pvc_clone(sc_name,
                                                  parent_pvc,
                                                  clone_yaml,
                                                  namespace,
                                                  storage_size=pvc_size + "Gi")
            teardown_factory(cloned_pvc_obj)
            helpers.wait_for_resource_state(cloned_pvc_obj,
                                            constants.STATUS_BOUND, timeout)

            cloned_pvc_obj.reload()
            logger.info(
                f"Clone with name {cloned_pvc_obj.name} for {pvc_size} pvc {parent_pvc} was created."
            )
            clones_list.append(cloned_pvc_obj)
            create_time = helpers.measure_pvc_creation_time(
                interface_type, cloned_pvc_obj.name)
            creation_speed = int(file_size_mb / create_time)
            logger.info(
                f"Clone number {i+1} creation time is {create_time} secs for {pvc_size} GB pvc."
            )
            logger.info(
                f"Clone number {i+1} creation speed is {creation_speed} MB/sec for {pvc_size} GB pvc."
            )
            creation_measures = {
                "clone_num": i + 1,
                "time": create_time,
                "speed": creation_speed,
            }
            clone_creation_measures.append(creation_measures)
            csi_clone_creation_measures.append(
                performance_lib.csi_pvc_time_measure(self.interface,
                                                     cloned_pvc_obj, "create",
                                                     start_time))

        # deleting one by one and measuring deletion times and speed for each one of the clones create above
        # in case of single clone will run one time
        clone_deletion_measures = []
        csi_clone_deletion_measures = []

        logger.info(
            f"Start deleting {max_num_of_clones} clones on {interface_type} PVC of size {pvc_size} GB."
        )

        for i in range(max_num_of_clones):
            cloned_pvc_obj = clones_list[i]
            pvc_reclaim_policy = cloned_pvc_obj.reclaim_policy
            cloned_pvc_obj.delete()
            logger.info(
                f"Deletion of clone number {i + 1} , the clone name is {cloned_pvc_obj.name}."
            )
            cloned_pvc_obj.ocp.wait_for_delete(cloned_pvc_obj.name, timeout)
            if pvc_reclaim_policy == constants.RECLAIM_POLICY_DELETE:
                helpers.validate_pv_delete(cloned_pvc_obj.backed_pv)
            delete_time = helpers.measure_pvc_deletion_time(
                interface_type, cloned_pvc_obj.backed_pv)
            logger.info(
                f"Clone number {i + 1} deletion time is {delete_time} secs for {pvc_size} GB pvc."
            )

            deletion_speed = int(file_size_mb / delete_time)
            logger.info(
                f"Clone number {i+1} deletion speed is {deletion_speed} MB/sec for {pvc_size} GB pvc."
            )
            deletion_measures = {
                "clone_num": i + 1,
                "time": delete_time,
                "speed": deletion_speed,
            }
            clone_deletion_measures.append(deletion_measures)
            csi_clone_deletion_measures.append(
                performance_lib.csi_pvc_time_measure(self.interface,
                                                     cloned_pvc_obj, "delete",
                                                     start_time))

        logger.info(
            f"Printing clone creation time and speed for {max_num_of_clones} clones "
            f"on {interface_type} PVC of size {pvc_size} GB:")
        for c in clone_creation_measures:
            logger.info(
                f"Clone number {c['clone_num']} creation time is {c['time']} secs for {pvc_size} GB pvc ."
            )
            logger.info(
                f"Clone number {c['clone_num']} creation speed is {c['speed']} MB/sec for {pvc_size} GB pvc."
            )
        logger.info(
            f"Clone deletion time and speed for {interface_type} PVC of size {pvc_size} GB are:"
        )
        creation_time_list = [r["time"] for r in clone_creation_measures]
        creation_speed_list = [r["speed"] for r in clone_creation_measures]
        average_creation_time = statistics.mean(creation_time_list)
        average_csi_creation_time = statistics.mean(
            csi_clone_creation_measures)
        average_creation_speed = statistics.mean(creation_speed_list)
        logger.info(f"Average creation time is  {average_creation_time} secs.")
        logger.info(
            f"Average creation speed is  {average_creation_speed} Mb/sec.")

        for d in clone_deletion_measures:
            logger.info(
                f"Clone number {d['clone_num']} deletion time is {d['time']} secs for {pvc_size} GB pvc."
            )
            logger.info(
                f"Clone number {d['clone_num']} deletion speed is {d['speed']} MB/sec for {pvc_size} GB pvc."
            )

        deletion_time_list = [r["time"] for r in clone_deletion_measures]
        deletion_speed_list = [r["speed"] for r in clone_deletion_measures]
        average_deletion_time = statistics.mean(deletion_time_list)
        average_csi_deletion_time = statistics.mean(
            csi_clone_deletion_measures)
        average_deletion_speed = statistics.mean(deletion_speed_list)
        logger.info(f"Average deletion time is  {average_deletion_time} secs.")
        logger.info(
            f"Average deletion speed is  {average_deletion_speed} Mb/sec.")
        logger.info("test_clones_creation_performance finished successfully.")

        self.results_path = get_full_test_logs_path(cname=self)
        # Produce ES report
        # Collecting environment information
        self.get_env_info()

        self.full_log_path = get_full_test_logs_path(cname=self)
        self.results_path = get_full_test_logs_path(cname=self)
        self.full_log_path += f"-{self.interface}-{pvc_size}-{file_size}"
        logger.info(f"Logs file path name is : {self.full_log_path}")

        # Initialize the results doc file.
        full_results = self.init_full_results(
            ResultsAnalyse(
                self.uuid,
                self.crd_data,
                self.full_log_path,
                "pvc_clone_performance",
            ))

        full_results.add_key("interface", self.interface)
        full_results.add_key("total_clone_number", max_num_of_clones)
        full_results.add_key("pvc_size", self.pvc_size)
        full_results.add_key("average_clone_creation_time",
                             average_creation_time)
        full_results.add_key("average_csi_clone_creation_time",
                             average_csi_creation_time)
        full_results.add_key("average_clone_deletion_time",
                             average_deletion_time)
        full_results.add_key("average_csi_clone_deletion_time",
                             average_csi_deletion_time)
        full_results.add_key("average_clone_creation_speed",
                             average_creation_speed)
        full_results.add_key("average_clone_deletion_speed",
                             average_deletion_speed)

        full_results.all_results = {
            "clone_creation_time": creation_time_list,
            "csi_clone_creation_time": csi_clone_creation_measures,
            "clone_deletion_time": deletion_time_list,
            "csi_clone_deletion_time": csi_clone_deletion_measures,
            "clone_creation_speed": creation_speed_list,
            "clone_deletion_speed": deletion_speed_list,
        }

        # Write the test results into the ES server
        if full_results.es_write():
            res_link = full_results.results_link()
            logger.info(f"The Result can be found at : {res_link}")

            # Create text file with results of all subtest (8 - according to the parameters)
            self.write_result_to_file(res_link)