def test_pvc_deletion_measurement_performance(self, teardown_factory, pvc_size):
        """
        Measuring PVC deletion time is within supported limits
        """
        logging.info("Start creating new PVC")

        pvc_obj = helpers.create_pvc(sc_name=self.sc_obj.name, size=pvc_size)
        helpers.wait_for_resource_state(pvc_obj, constants.STATUS_BOUND)
        pvc_obj.reload()
        pv_name = pvc_obj.backed_pv
        pvc_reclaim_policy = pvc_obj.reclaim_policy
        teardown_factory(pvc_obj)
        pvc_obj.delete()
        logging.info("Start deletion of PVC")
        pvc_obj.ocp.wait_for_delete(pvc_obj.name)
        if pvc_reclaim_policy == constants.RECLAIM_POLICY_DELETE:
            helpers.validate_pv_delete(pvc_obj.backed_pv)
        delete_time = helpers.measure_pvc_deletion_time(self.interface, pv_name)
        # Deletion time for CephFS PVC is a little over 3 seconds
        deletion_time = 4 if self.interface == constants.CEPHFILESYSTEM else 3
        logging.info(f"PVC deleted in {delete_time} seconds")
        if delete_time > deletion_time:
            raise ex.PerformanceException(
                f"PVC deletion time is {delete_time} and greater than {deletion_time} second"
            )
        push_to_pvc_time_dashboard(self.interface, "1-pvc-deletion", delete_time)
Beispiel #2
0
    def cleanup(self):
        """
        Clean up

        """
        switch_to_project(BMO_NAME)
        log.info("Deleting postgres pods and configuration")
        if self.pgsql_is_setup:
            self.pgsql_sset._is_deleted = False
            self.pgsql_sset.delete()
            self.pgsql_cmap._is_deleted = False
            self.pgsql_cmap.delete()
            self.pgsql_service._is_deleted = False
            self.pgsql_service.delete()
        log.info("Deleting pgbench pods")
        pods_obj = self.get_pgbench_pods()
        pvcs_obj = self.get_postgres_pvc()
        for pod in pods_obj:
            pod.delete()
            pod.ocp.wait_for_delete(pod.name)
        for pvc in pvcs_obj:
            pvc.delete()
            pvc.ocp.wait_for_delete(pvc.name)
            validate_pv_delete(pvc.backed_pv)
        log.info("Deleting benchmark operator configuration")
        BenchmarkOperator.cleanup(self)
Beispiel #3
0
    def cleanup(
        self,
        kafka_namespace=constants.AMQ_NAMESPACE,
        tiller_namespace=AMQ_BENCHMARK_NAMESPACE,
    ):
        """
        Clean up function,
        will start to delete from amq cluster operator
        then amq-connector, persistent, bridge, at the end it will delete the created namespace

        Args:
            kafka_namespace (str): Created namespace for amq
            tiller_namespace (str): Created namespace for benchmark

        """
        if self.amq_is_setup:
            if self.messaging:
                self.consumer_pod.delete()
                self.producer_pod.delete()
                self.kafka_user.delete()
                self.kafka_topic.delete()
            if self.benchmark:
                # Delete the helm app
                try:
                    purge_cmd = f"linux-amd64/helm delete benchmark --purge --tiller-namespace {tiller_namespace}"
                    run(purge_cmd, shell=True, cwd=self.dir, check=True)
                except (CommandFailed, CalledProcessError) as cf:
                    log.error("Failed to delete help app")
                    raise cf

                # Delete the pods and namespace created
                self.sa_tiller.delete()
                self.crb_tiller.delete()
                run_cmd(f"oc delete project {tiller_namespace}")
                self.ns_obj.wait_for_delete(resource_name=tiller_namespace)

            self.kafka_persistent.delete()
            self.kafka_connect.delete()
            self.kafka_bridge.delete()
            run_cmd(f"oc delete -f {self.amq_dir}",
                    shell=True,
                    check=True,
                    cwd=self.dir)

            ocs_pvc_obj = get_all_pvc_objs(namespace=kafka_namespace)

        run_cmd(f"oc delete project {kafka_namespace}")

        self.ns_obj.wait_for_delete(resource_name=kafka_namespace, timeout=90)
        for pvc in ocs_pvc_obj:
            logging.info(pvc.name)
            validate_pv_delete(pvc.backed_pv)
        # Reset namespace to default
        switch_to_default_rook_cluster_project()
Beispiel #4
0
 def finalizer():
     """
     Delete multiple PVCs
     """
     if hasattr(class_instance, "pvc_objs"):
         for pvc_obj in class_instance.pvc_objs:
             pvc_obj.reload()
             backed_pv_name = pvc_obj.backed_pv
             pvc_obj.delete()
         for pvc_obj in class_instance.pvc_objs:
             pvc_obj.ocp.wait_for_delete(pvc_obj.name)
             helpers.validate_pv_delete(backed_pv_name)
def verify_pv_not_exists(pvc_obj, cbp_name, rbd_image_id):
    """
    Ensure that pv does not exists
    """

    # Validate on ceph side
    logger.info(f"Verifying PV {pvc_obj.backed_pv} exists on backend")

    status = helpers.verify_volume_deleted_in_backend(
        interface=constants.CEPHBLOCKPOOL, image_uuid=rbd_image_id, pool_name=cbp_name
    )

    if not status:
        raise UnexpectedBehaviour(f"PV {pvc_obj.backed_pv} exists on backend")
    logger.info(
        f"Expected: PV {pvc_obj.backed_pv} "
        f"doesn't exist on backend after deleting PVC"
    )

    # Validate on oc side
    logger.info("Verifying whether PV is deleted")
    try:
        assert helpers.validate_pv_delete(pvc_obj.backed_pv)
    except AssertionError as ecf:
        assert "not found" in str(
            ecf
        ), f"Unexpected: PV {pvc_obj.backed_pv} still exists"
    logger.info("Expected: PV should not be found " "after deleting corresponding PVC")
    def create_and_delete_clones(self):
        # Creating the clones one by one and wait until they bound
        logger.info(
            f"Start creating {self.number_of_clones} clones on {self.interface} PVC of size {self.pvc_size} GB."
        )
        clones_list = []
        for i in range(self.number_of_clones):
            index = i + 1
            logger.info(f"Start creation of clone number {index}.")
            cloned_pvc_obj = pvc.create_pvc_clone(
                sc_name=self.pvc_obj.backed_sc,
                parent_pvc=self.pvc_obj.name,
                pvc_name=f"clone-pas-test-{index}",
                clone_yaml=Interfaces_info[self.interface]["clone_yaml"],
                namespace=self.namespace,
                storage_size=self.pvc_size + "Gi",
            )
            helpers.wait_for_resource_state(cloned_pvc_obj,
                                            constants.STATUS_BOUND,
                                            self.timeout)
            # TODO: adding flattern for RBD devices
            cloned_pvc_obj.reload()
            clones_list.append(cloned_pvc_obj)
            logger.info(
                f"Clone with name {cloned_pvc_obj.name} for {self.pvc_size} pvc {self.pvc_obj.name} was created."
            )

        # Delete the clones one by one and wait for deletion
        logger.info(
            f"Start deleteing {self.number_of_clones} clones on {self.interface} PVC of size {self.pvc_size} GB."
        )
        index = 0
        for clone in clones_list:
            index += 1
            pvc_reclaim_policy = clone.reclaim_policy
            clone.delete()
            logger.info(
                f"Deletion of clone number {index} , the clone name is {clone.name}."
            )
            clone.ocp.wait_for_delete(clone.name, self.timeout)
            if pvc_reclaim_policy == constants.RECLAIM_POLICY_DELETE:
                helpers.validate_pv_delete(clone.backed_pv)

        return clones_list
Beispiel #7
0
def teardown_fs():
    global CEPHFS_PVC_OBJ, CEPHFS_POD_OBJ
    log.info("deleting cephfs pod")
    CEPHFS_POD_OBJ.delete()
    log.info("deleting cephfs pvc")
    CEPHFS_PVC_OBJ.delete()
    assert helpers.validate_pv_delete(CEPHFS_PVC_OBJ.backed_pv)
    log.info("Deleting CEPHFS Secret")
    CEPHFS_SECRET_OBJ.delete()
    log.info("Deleting CephFS Storageclass")
    CEPHFS_SC_OBJ.delete()
Beispiel #8
0
def teardown_rbd():
    """
    Tearing down the environment
    Deleting pod,replicated pool,pvc,storageclass,secret of rbd
    """
    global RBD_PVC_OBJ, RBD_POD_OBJ
    log.info("deleting rbd pod")
    RBD_POD_OBJ.delete()
    log.info("Deleting RBD PVC")
    RBD_PVC_OBJ.delete()
    assert helpers.validate_pv_delete(RBD_PVC_OBJ.backed_pv)
    log.info("Deleting CEPH BLOCK POOL")
    RBD_POOL.delete()
    log.info("Deleting RBD Secret")
    RBD_SECRET_OBJ.delete()
    log.info("Deleting RBD Storageclass")
    RBD_SC_OBJ.delete()
Beispiel #9
0
 def finalizer():
     """
     Delete the resources created during the test
     """
     failed_to_delete = []
     for resource_type in pod, pvc, storageclass:
         for resource in resource_type:
             resource.delete()
             try:
                 resource.ocp.wait_for_delete(resource.name)
             except TimeoutError:
                 failed_to_delete.append(resource)
             if resource.kind == constants.PVC:
                 log.info("Checking whether PV is deleted")
                 assert helpers.validate_pv_delete(resource.backed_pv)
     if failed_to_delete:
         raise ResourceLeftoversException(
             f"Failed to delete resources: {failed_to_delete}"
         )
Beispiel #10
0
    def test_noobaa_db_backup_and_recovery(
        self,
        pvc_factory,
        pod_factory,
        snapshot_factory,
        bucket_factory,
        rgw_bucket_factory,
    ):
        """
        Test case to verify noobaa backup and recovery

        1. Take snapshot db-noobaa-db-0 PVC and retore it to PVC
        2. Scale down the statefulset noobaa-db
        3. Get the yaml of the current PVC, db-noobaa-db-0 and
           change the parameter persistentVolumeReclaimPolicy to Retain for restored PVC
        4. Delete both PVCs, the PV for the original claim db-noobaa-db-0 will be removed.
           The PV for claim db-noobaa-db-0-snapshot-restore will move to ‘Released’
        5. Edit again restore PV and remove the claimRef section.
           The volume will transition to Available.
        6. Edit the yaml db-noobaa-db-0.yaml and change the setting volumeName to restored PVC.
        7. Scale up the stateful set again and the pod should be running

        """

        # Initialise variable
        self.noobaa_db_sst_name = "noobaa-db-pg"

        # Get noobaa pods before execution
        noobaa_pods = get_noobaa_pods()

        # Get noobaa PVC before execution
        noobaa_pvc_obj = get_pvc_objs(pvc_names=["db-noobaa-db-pg-0"])
        noobaa_pv_name = noobaa_pvc_obj[0].get("spec").get("spec").get(
            "volumeName")

        # Take snapshot db-noobaa-db-0 PVC
        log.info(f"Creating snapshot of the {noobaa_pvc_obj[0].name} PVC")
        snap_obj = snapshot_factory(
            pvc_obj=noobaa_pvc_obj[0],
            wait=True,
            snapshot_name=f"{noobaa_pvc_obj[0].name}-snapshot",
        )
        log.info(
            f"Successfully created snapshot {snap_obj.name} and in Ready state"
        )

        # Restore it to PVC
        log.info(f"Restoring snapshot {snap_obj.name} to create new PVC")
        sc_name = noobaa_pvc_obj[0].get().get("spec").get("storageClassName")
        pvc_size = (noobaa_pvc_obj[0].get().get("spec").get("resources").get(
            "requests").get("storage"))
        self.restore_pvc_obj = create_restore_pvc(
            sc_name=sc_name,
            snap_name=snap_obj.name,
            namespace=snap_obj.namespace,
            size=pvc_size,
            pvc_name=f"{snap_obj.name}-restore",
            volume_mode=snap_obj.parent_volume_mode,
            access_mode=snap_obj.parent_access_mode,
        )
        wait_for_resource_state(self.restore_pvc_obj, constants.STATUS_BOUND)
        self.restore_pvc_obj.reload()
        log.info(f"Succeesfuly created PVC {self.restore_pvc_obj.name} "
                 f"from snapshot {snap_obj.name}")

        # Scale down the statefulset noobaa-db
        modify_statefulset_replica_count(
            statefulset_name=self.noobaa_db_sst_name, replica_count=0
        ), f"Failed to scale down the statefulset {self.noobaa_db_sst_name}"

        # Get the noobaa-db PVC
        pvc_obj = OCP(kind=constants.PVC,
                      namespace=constants.OPENSHIFT_STORAGE_NAMESPACE)
        noobaa_pvc_yaml = pvc_obj.get(resource_name=noobaa_pvc_obj[0].name)

        # Get the restored noobaa PVC and
        # change the parameter persistentVolumeReclaimPolicy to Retain
        restored_noobaa_pvc_obj = get_pvc_objs(
            pvc_names=[f"{snap_obj.name}-restore"])
        restored_noobaa_pv_name = (restored_noobaa_pvc_obj[0].get("spec").get(
            "spec").get("volumeName"))
        pv_obj = OCP(kind=constants.PV,
                     namespace=constants.OPENSHIFT_STORAGE_NAMESPACE)
        params = '{"spec":{"persistentVolumeReclaimPolicy":"Retain"}}'
        assert pv_obj.patch(
            resource_name=restored_noobaa_pv_name, params=params), (
                "Failed to change the parameter persistentVolumeReclaimPolicy"
                f" to Retain {restored_noobaa_pv_name}")

        # Delete both PVCs
        delete_pvcs(pvc_objs=[noobaa_pvc_obj[0], restored_noobaa_pvc_obj[0]])

        # Validate original claim db-noobaa-db-0 removed
        assert validate_pv_delete(
            pv_name=noobaa_pv_name
        ), f"PV not deleted, still exist {noobaa_pv_name}"

        # Validate PV for claim db-noobaa-db-0-snapshot-restore is in Released state
        pv_obj.wait_for_resource(condition=constants.STATUS_RELEASED,
                                 resource_name=restored_noobaa_pv_name)

        # Edit again restore PV and remove the claimRef section
        log.info(
            f"Remove the claimRef section from PVC {restored_noobaa_pv_name}")
        params = '[{"op": "remove", "path": "/spec/claimRef"}]'
        pv_obj.patch(resource_name=restored_noobaa_pv_name,
                     params=params,
                     format_type="json")
        log.info(
            f"Successfully removed claimRef section from PVC {restored_noobaa_pv_name}"
        )

        # Validate PV is in Available state
        pv_obj.wait_for_resource(condition=constants.STATUS_AVAILABLE,
                                 resource_name=restored_noobaa_pv_name)

        # Edit the yaml db-noobaa-db-0.yaml and change the
        # setting volumeName to restored PVC
        noobaa_pvc_yaml["spec"]["volumeName"] = restored_noobaa_pv_name
        noobaa_pvc_yaml = OCS(**noobaa_pvc_yaml)
        noobaa_pvc_yaml.create()

        # Validate noobaa PVC is in bound state
        pvc_obj.wait_for_resource(
            condition=constants.STATUS_BOUND,
            resource_name=noobaa_pvc_obj[0].name,
            timeout=120,
        )

        # Scale up the statefulset again
        assert modify_statefulset_replica_count(
            statefulset_name=self.noobaa_db_sst_name, replica_count=1
        ), f"Failed to scale up the statefulset {self.noobaa_db_sst_name}"

        # Validate noobaa pod is up and running
        pod_obj = OCP(kind=constants.POD,
                      namespace=defaults.ROOK_CLUSTER_NAMESPACE)
        pod_obj.wait_for_resource(
            condition=constants.STATUS_RUNNING,
            resource_count=len(noobaa_pods),
            selector=constants.NOOBAA_APP_LABEL,
        )

        # Change the parameter persistentVolumeReclaimPolicy to Delete again
        params = '{"spec":{"persistentVolumeReclaimPolicy":"Delete"}}'
        assert pv_obj.patch(
            resource_name=restored_noobaa_pv_name, params=params), (
                "Failed to change the parameter persistentVolumeReclaimPolicy"
                f" to Delete {restored_noobaa_pv_name}")
        log.info(
            "Changed the parameter persistentVolumeReclaimPolicy to Delete again"
        )

        # Verify all storage pods are running
        wait_for_storage_pods()

        # Creating Resources
        log.info("Creating Resources using sanity helpers")
        self.sanity_helpers.create_resources(pvc_factory, pod_factory,
                                             bucket_factory,
                                             rgw_bucket_factory)
        # Deleting Resources
        self.sanity_helpers.delete_resources()

        # Verify everything running fine
        log.info(
            "Verifying All resources are Running and matches expected result")
        self.sanity_helpers.health_check(tries=120)
    def test_pvc_creation_deletion_measurement_performance(
            self, teardown_factory, pvc_size):
        """
        Measuring PVC creation and deletion times for pvc samples
        Verifying that those times are within required limits
        """

        # Getting the full path for the test logs
        self.full_log_path = get_full_test_logs_path(cname=self)
        if self.interface == constants.CEPHBLOCKPOOL:
            self.sc = "RBD"
        if self.interface == constants.CEPHFILESYSTEM:
            self.sc = "CephFS"
        self.full_log_path += f"-{self.sc}-{pvc_size}"
        log.info(f"Logs file path name is : {self.full_log_path}")

        self.start_time = time.strftime("%Y-%m-%dT%H:%M:%SGMT", time.gmtime())

        self.get_env_info()

        # Initialize the results doc file.
        self.full_results = self.init_full_results(
            ResultsAnalyse(self.uuid, self.crd_data, self.full_log_path))
        self.full_results.add_key("pvc_size", pvc_size)
        num_of_samples = 5
        accepted_creation_time = 1
        accepted_deletion_time = 2 if self.interface == constants.CEPHFILESYSTEM else 1
        self.full_results.add_key("samples", num_of_samples)

        accepted_creation_deviation_percent = 50
        accepted_deletion_deviation_percent = 50

        creation_time_measures = []
        deletion_time_measures = []
        msg_prefix = f"Interface: {self.interface}, PVC size: {pvc_size}."

        for i in range(num_of_samples):
            logging.info(f"{msg_prefix} Start creating PVC number {i + 1}.")
            start_time = datetime.datetime.utcnow().strftime(
                "%Y-%m-%dT%H:%M:%SZ")
            pvc_obj = helpers.create_pvc(sc_name=self.sc_obj.name,
                                         size=pvc_size)
            helpers.wait_for_resource_state(pvc_obj, constants.STATUS_BOUND)
            pvc_obj.reload()

            creation_time = performance_lib.measure_pvc_creation_time(
                self.interface, pvc_obj.name, start_time)

            logging.info(
                f"{msg_prefix} PVC number {i + 1} was created in {creation_time} seconds."
            )
            if creation_time > accepted_creation_time:
                raise ex.PerformanceException(
                    f"{msg_prefix} PVC creation time is {creation_time} and is greater than "
                    f"{accepted_creation_time} seconds.")
            creation_time_measures.append(creation_time)

            pv_name = pvc_obj.backed_pv
            pvc_reclaim_policy = pvc_obj.reclaim_policy

            pod_obj = self.write_file_on_pvc(pvc_obj)
            pod_obj.delete(wait=True)
            teardown_factory(pvc_obj)
            logging.info(f"{msg_prefix} Start deleting PVC number {i + 1}")
            if pvc_reclaim_policy == constants.RECLAIM_POLICY_DELETE:
                pvc_obj.delete()
                pvc_obj.ocp.wait_for_delete(pvc_obj.name)
                helpers.validate_pv_delete(pvc_obj.backed_pv)
                deletion_time = helpers.measure_pvc_deletion_time(
                    self.interface, pv_name)
                logging.info(
                    f"{msg_prefix} PVC number {i + 1} was deleted in {deletion_time} seconds."
                )
                if deletion_time > accepted_deletion_time:
                    raise ex.PerformanceException(
                        f"{msg_prefix} PVC deletion time is {deletion_time} and is greater than "
                        f"{accepted_deletion_time} seconds.")
                deletion_time_measures.append(deletion_time)
            else:
                logging.info(
                    f"Reclaim policy of the PVC {pvc_obj.name} is not Delete;"
                    f" therefore not measuring deletion time for this PVC.")

        creation_average = self.process_time_measurements(
            "creation",
            creation_time_measures,
            accepted_creation_deviation_percent,
            msg_prefix,
        )
        self.full_results.add_key("creation-time", creation_average)
        deletion_average = self.process_time_measurements(
            "deletion",
            deletion_time_measures,
            accepted_deletion_deviation_percent,
            msg_prefix,
        )
        self.full_results.add_key("deletion-time", deletion_average)
        self.full_results.all_results["creation"] = creation_time_measures
        self.full_results.all_results["deletion"] = deletion_time_measures
        self.end_time = time.strftime("%Y-%m-%dT%H:%M:%SGMT", time.gmtime())
        self.full_results.add_key("test_time", {
            "start": self.start_time,
            "end": self.end_time
        })
        self.full_results.es_write()
        log.info(
            f"The Result can be found at : {self.full_results.results_link()}")

        if not self.dev_mode:
            # all the results are OK, the test passes, push the results to the codespeed
            push_to_pvc_time_dashboard(self.interface, "1-pvc-creation",
                                       creation_average)
            push_to_pvc_time_dashboard(self.interface, "1-pvc-deletion",
                                       deletion_average)
    def test_pvc_clone_performance_multiple_files(
        self,
        pvc_factory,
        interface,
        copies,
        timeout,
    ):
        """
        Test assign nodeName to a pod using RWX pvc
        Each kernel (unzipped) is 892M and 61694 files
        The test creates a pvc and a pods, writes kernel files multiplied by number of copies
        The test creates number of clones samples, calculates creation and deletion times for each one the clones
        and calculates the average creation and average deletion times
        """
        kernel_url = "https://cdn.kernel.org/pub/linux/kernel/v4.x/linux-4.19.5.tar.gz"
        download_path = "tmp"

        test_start_time = self.get_time()
        helpers.pull_images(constants.PERF_IMAGE)
        # Download a linux Kernel

        dir_path = os.path.join(os.getcwd(), download_path)
        file_path = os.path.join(dir_path, "file.gz")
        if not os.path.exists(dir_path):
            os.makedirs(dir_path)
        urllib.request.urlretrieve(kernel_url, file_path)

        # Create a PVC
        accessmode = constants.ACCESS_MODE_RWX
        if interface == constants.CEPHBLOCKPOOL:
            accessmode = constants.ACCESS_MODE_RWO

        pvc_size = "100"
        try:
            pvc_obj = pvc_factory(
                interface=interface,
                access_mode=accessmode,
                status=constants.STATUS_BOUND,
                size=pvc_size,
            )
        except Exception as e:
            logger.error(f"The PVC sample was not created, exception {str(e)}")
            raise PVCNotCreated("PVC did not reach BOUND state.")

        # Create a pod on one node
        logger.info(f"Creating Pod with pvc {pvc_obj.name}")

        try:
            pod_obj = helpers.create_pod(
                interface_type=interface,
                pvc_name=pvc_obj.name,
                namespace=pvc_obj.namespace,
                pod_dict_path=constants.PERF_POD_YAML,
            )
        except Exception as e:
            logger.error(
                f"Pod on PVC {pvc_obj.name} was not created, exception {str(e)}"
            )
            raise PodNotCreated("Pod on PVC was not created.")

        # Confirm that pod is running on the selected_nodes
        logger.info("Checking whether pods are running on the selected nodes")
        helpers.wait_for_resource_state(resource=pod_obj,
                                        state=constants.STATUS_RUNNING,
                                        timeout=timeout)

        pod_name = pod_obj.name
        pod_path = "/mnt"

        _ocp = OCP(namespace=pvc_obj.namespace)

        rsh_cmd = f"rsync {dir_path} {pod_name}:{pod_path}"
        _ocp.exec_oc_cmd(rsh_cmd)

        rsh_cmd = f"exec {pod_name} -- tar xvf {pod_path}/tmp/file.gz -C {pod_path}/tmp"
        _ocp.exec_oc_cmd(rsh_cmd)

        for x in range(copies):
            rsh_cmd = f"exec {pod_name} -- mkdir -p {pod_path}/folder{x}"
            _ocp.exec_oc_cmd(rsh_cmd)
            rsh_cmd = f"exec {pod_name} -- cp -r {pod_path}/tmp {pod_path}/folder{x}"
            _ocp.exec_oc_cmd(rsh_cmd)
            rsh_cmd = f"exec {pod_name} -- sync"
            _ocp.exec_oc_cmd(rsh_cmd)

        logger.info("Getting the amount of data written to the PVC")
        rsh_cmd = f"exec {pod_name} -- df -h {pod_path}"
        data_written = _ocp.exec_oc_cmd(rsh_cmd).split()[-4]
        logger.info(f"The amount of written data is {data_written}")

        rsh_cmd = f"exec {pod_name} -- find {pod_path} -type f"
        files_written = len(_ocp.exec_oc_cmd(rsh_cmd).split())
        logger.info(
            f"For {interface} - The number of files written to the pod is {files_written}"
        )

        # delete the pod
        pod_obj.delete(wait=False)

        logger.info("Wait for the pod to be deleted")
        performance_lib.wait_for_resource_bulk_status(
            "pod", 0, pvc_obj.namespace, constants.STATUS_COMPLETED, timeout,
            5)
        logger.info("The pod was deleted")

        num_of_clones = 11
        # increasing the timeout since clone creation time is longer than pod attach time
        timeout = 18000

        clone_yaml = constants.CSI_RBD_PVC_CLONE_YAML
        if interface == constants.CEPHFILESYSTEM:
            clone_yaml = constants.CSI_CEPHFS_PVC_CLONE_YAML

        clone_creation_measures = []
        csi_clone_creation_measures = []
        clone_deletion_measures = []
        csi_clone_deletion_measures = []

        # taking the time, so parsing the provision log will be faster.
        start_time = datetime.datetime.utcnow().strftime("%Y-%m-%dT%H:%M:%SZ")

        for i in range(num_of_clones):
            logger.info(f"Start creation of clone number {i + 1}.")
            cloned_pvc_obj = pvc.create_pvc_clone(
                pvc_obj.backed_sc,
                pvc_obj.name,
                clone_yaml,
                pvc_obj.namespace,
                storage_size=pvc_size + "Gi",
            )
            helpers.wait_for_resource_state(cloned_pvc_obj,
                                            constants.STATUS_BOUND, timeout)

            cloned_pvc_obj.reload()
            logger.info(
                f"Clone with name {cloned_pvc_obj.name} for {pvc_size} pvc {pvc_obj.name} was created."
            )
            create_time = helpers.measure_pvc_creation_time(
                interface, cloned_pvc_obj.name)
            logger.info(
                f"Clone number {i+1} creation time is {create_time} secs for {pvc_size} GB pvc."
            )
            clone_creation_measures.append(create_time)
            csi_clone_creation_measures.append(
                performance_lib.csi_pvc_time_measure(interface, cloned_pvc_obj,
                                                     "create", start_time))

            pvc_reclaim_policy = cloned_pvc_obj.reclaim_policy
            cloned_pvc_obj.delete()
            logger.info(
                f"Deletion of clone number {i + 1} , the clone name is {cloned_pvc_obj.name}."
            )
            cloned_pvc_obj.ocp.wait_for_delete(cloned_pvc_obj.name, timeout)
            if pvc_reclaim_policy == constants.RECLAIM_POLICY_DELETE:
                helpers.validate_pv_delete(cloned_pvc_obj.backed_pv)
            delete_time = helpers.measure_pvc_deletion_time(
                interface, cloned_pvc_obj.backed_pv)
            logger.info(
                f"Clone number {i + 1} deletion time is {delete_time} secs for {pvc_size} GB pvc."
            )

            clone_deletion_measures.append(delete_time)
            csi_clone_deletion_measures.append(
                performance_lib.csi_pvc_time_measure(interface, cloned_pvc_obj,
                                                     "delete", start_time))

        os.remove(file_path)
        os.rmdir(dir_path)
        pvc_obj.delete()

        average_creation_time = statistics.mean(clone_creation_measures)
        logger.info(f"Average creation time is  {average_creation_time} secs.")
        average_csi_creation_time = statistics.mean(
            csi_clone_creation_measures)
        logger.info(
            f"Average csi creation time is  {average_csi_creation_time} secs.")

        average_deletion_time = statistics.mean(clone_deletion_measures)
        logger.info(f"Average deletion time is  {average_deletion_time} secs.")
        average_csi_deletion_time = statistics.mean(
            csi_clone_deletion_measures)
        logger.info(
            f"Average csi deletion time is  {average_csi_deletion_time} secs.")

        # Produce ES report

        # Collecting environment information
        self.get_env_info()
        self.results_path = get_full_test_logs_path(cname=self)
        # Initialize the results doc file.
        full_results = self.init_full_results(
            ResultsAnalyse(
                self.uuid,
                self.crd_data,
                self.full_log_path,
                "test_pvc_clone_performance_multiple_files_fullres",
            ))

        full_results.add_key("files_number", files_written)

        test_end_time = self.get_time()

        full_results.add_key("test_time", {
            "start": test_start_time,
            "end": test_end_time
        })

        full_results.add_key("interface", interface)
        full_results.add_key("clones_number", num_of_clones)
        full_results.add_key("pvc_size", pvc_size)
        full_results.add_key("average_clone_creation_time",
                             average_creation_time)
        full_results.add_key("average_csi_clone_creation_time",
                             average_csi_creation_time)
        full_results.add_key("average_clone_deletion_time",
                             average_deletion_time)
        full_results.add_key("average_csi_clone_deletion_time",
                             average_csi_deletion_time)

        full_results.all_results = {
            "clone_creation_time": clone_creation_measures,
            "csi_clone_creation_time": csi_clone_creation_measures,
            "clone_deletion_time": clone_deletion_measures,
            "csi_clone_deletion_time": csi_clone_deletion_measures,
        }

        # Write the test results into the ES server
        if full_results.es_write():
            res_link = full_results.results_link()
            logger.info(f"The Result can be found at : {res_link}")

            # Create text file with results of all subtest (4 - according to the parameters)
            self.results_path = get_full_test_logs_path(
                cname=self, fname="test_pvc_clone_performance_multiple_files")
            self.write_result_to_file(res_link)
Beispiel #13
0
    def factory(snapshot_factory=snapshot_factory):
        # Get noobaa pods before execution
        noobaa_pods = pod.get_noobaa_pods()

        # Get noobaa PVC before execution
        noobaa_pvc_obj = pvc.get_pvc_objs(pvc_names=["db-noobaa-db-pg-0"])
        noobaa_pv_name = noobaa_pvc_obj[0].get("spec").get("spec").get(
            "volumeName")

        # Take snapshot db-noobaa-db-0 PVC
        logger.info(f"Creating snapshot of the {noobaa_pvc_obj[0].name} PVC")
        snap_obj = snapshot_factory(
            pvc_obj=noobaa_pvc_obj[0],
            wait=True,
            snapshot_name=f"{noobaa_pvc_obj[0].name}-snapshot",
        )
        logger.info(
            f"Successfully created snapshot {snap_obj.name} and in Ready state"
        )

        # Restore it to PVC
        logger.info(f"Restoring snapshot {snap_obj.name} to create new PVC")
        sc_name = noobaa_pvc_obj[0].get().get("spec").get("storageClassName")
        pvc_size = (noobaa_pvc_obj[0].get().get("spec").get("resources").get(
            "requests").get("storage"))
        restore_pvc_obj = pvc.create_restore_pvc(
            sc_name=sc_name,
            snap_name=snap_obj.name,
            namespace=snap_obj.namespace,
            size=pvc_size,
            pvc_name=f"{snap_obj.name}-restore",
            volume_mode=snap_obj.parent_volume_mode,
            access_mode=snap_obj.parent_access_mode,
        )
        restore_pvc_objs.append(restore_pvc_obj)
        wait_for_resource_state(restore_pvc_obj, constants.STATUS_BOUND)
        restore_pvc_obj.reload()
        logger.info(f"Succeesfuly created PVC {restore_pvc_obj.name} "
                    f"from snapshot {snap_obj.name}")

        # Scale down the statefulset noobaa-db
        modify_statefulset_replica_count(
            statefulset_name=constants.NOOBAA_DB_STATEFULSET, replica_count=0
        ), f"Failed to scale down the statefulset {constants.NOOBAA_DB_STATEFULSET}"

        # Get the noobaa-db PVC
        pvc_obj = OCP(kind=constants.PVC,
                      namespace=constants.OPENSHIFT_STORAGE_NAMESPACE)
        noobaa_pvc_yaml = pvc_obj.get(resource_name=noobaa_pvc_obj[0].name)

        # Get the restored noobaa PVC and
        # change the parameter persistentVolumeReclaimPolicy to Retain
        restored_noobaa_pvc_obj = pvc.get_pvc_objs(
            pvc_names=[f"{snap_obj.name}-restore"])
        restored_noobaa_pv_name = (restored_noobaa_pvc_obj[0].get("spec").get(
            "spec").get("volumeName"))
        pv_obj = OCP(kind=constants.PV,
                     namespace=constants.OPENSHIFT_STORAGE_NAMESPACE)
        params = '{"spec":{"persistentVolumeReclaimPolicy":"Retain"}}'
        assert pv_obj.patch(
            resource_name=restored_noobaa_pv_name, params=params), (
                "Failed to change the parameter persistentVolumeReclaimPolicy"
                f" to Retain {restored_noobaa_pv_name}")

        # Delete both PVCs
        pvc.delete_pvcs(
            pvc_objs=[noobaa_pvc_obj[0], restored_noobaa_pvc_obj[0]])

        # Validate original claim db-noobaa-db-0 removed
        assert validate_pv_delete(
            pv_name=noobaa_pv_name
        ), f"PV not deleted, still exist {noobaa_pv_name}"

        # Validate PV for claim db-noobaa-db-0-snapshot-restore is in Released state
        pv_obj.wait_for_resource(condition=constants.STATUS_RELEASED,
                                 resource_name=restored_noobaa_pv_name)

        # Edit again restore PV and remove the claimRef section
        logger.info(
            f"Remove the claimRef section from PVC {restored_noobaa_pv_name}")
        params = '[{"op": "remove", "path": "/spec/claimRef"}]'
        pv_obj.patch(resource_name=restored_noobaa_pv_name,
                     params=params,
                     format_type="json")
        logger.info(
            f"Successfully removed claimRef section from PVC {restored_noobaa_pv_name}"
        )

        # Validate PV is in Available state
        pv_obj.wait_for_resource(condition=constants.STATUS_AVAILABLE,
                                 resource_name=restored_noobaa_pv_name)

        # Edit the yaml db-noobaa-db-0.yaml and change the
        # setting volumeName to restored PVC
        noobaa_pvc_yaml["spec"]["volumeName"] = restored_noobaa_pv_name
        noobaa_pvc_yaml = OCS(**noobaa_pvc_yaml)
        noobaa_pvc_yaml.create()

        # Validate noobaa PVC is in bound state
        pvc_obj.wait_for_resource(
            condition=constants.STATUS_BOUND,
            resource_name=noobaa_pvc_obj[0].name,
            timeout=120,
        )

        # Scale up the statefulset again
        assert modify_statefulset_replica_count(
            statefulset_name=constants.NOOBAA_DB_STATEFULSET, replica_count=1
        ), f"Failed to scale up the statefulset {constants.NOOBAA_DB_STATEFULSET}"

        # Validate noobaa pod is up and running
        pod_obj = OCP(kind=constants.POD,
                      namespace=defaults.ROOK_CLUSTER_NAMESPACE)
        pod_obj.wait_for_resource(
            condition=constants.STATUS_RUNNING,
            resource_count=len(noobaa_pods),
            selector=constants.NOOBAA_APP_LABEL,
        )

        # Change the parameter persistentVolumeReclaimPolicy to Delete again
        params = '{"spec":{"persistentVolumeReclaimPolicy":"Delete"}}'
        assert pv_obj.patch(
            resource_name=restored_noobaa_pv_name, params=params), (
                "Failed to change the parameter persistentVolumeReclaimPolicy"
                f" to Delete {restored_noobaa_pv_name}")
        logger.info(
            "Changed the parameter persistentVolumeReclaimPolicy to Delete again"
        )
Beispiel #14
0
    def test_clone_create_delete_performance(self, interface_type, pvc_size,
                                             file_size, teardown_factory):
        """
        Write data (60% of PVC capacity) to the PVC created in setup
        Create single clone for an existing pvc,
        Measure clone creation time and speed
        Delete the created clone
        Measure clone deletion time and speed
        Note: by increasing max_num_of_clones value you increase number of the clones to be created/deleted
        """

        file_size_for_io = file_size[:-1]

        performance_lib.write_fio_on_pod(self.pod_obj, file_size_for_io)

        max_num_of_clones = 1
        clone_creation_measures = []
        clones_list = []
        timeout = 18000
        sc_name = self.pvc_obj.backed_sc
        parent_pvc = self.pvc_obj.name
        clone_yaml = constants.CSI_RBD_PVC_CLONE_YAML
        namespace = self.pvc_obj.namespace
        if interface_type == constants.CEPHFILESYSTEM:
            clone_yaml = constants.CSI_CEPHFS_PVC_CLONE_YAML
        file_size_mb = convert_device_size(file_size, "MB")

        # creating single clone ( or many one by one if max_mum_of_clones > 1)
        logger.info(
            f"Start creating {max_num_of_clones} clones on {interface_type} PVC of size {pvc_size} GB."
        )

        for i in range(max_num_of_clones):
            logger.info(f"Start creation of clone number {i + 1}.")
            cloned_pvc_obj = pvc.create_pvc_clone(sc_name,
                                                  parent_pvc,
                                                  clone_yaml,
                                                  namespace,
                                                  storage_size=pvc_size + "Gi")
            teardown_factory(cloned_pvc_obj)
            helpers.wait_for_resource_state(cloned_pvc_obj,
                                            constants.STATUS_BOUND, timeout)

            cloned_pvc_obj.reload()
            logger.info(
                f"Clone with name {cloned_pvc_obj.name} for {pvc_size} pvc {parent_pvc} was created."
            )
            clones_list.append(cloned_pvc_obj)
            create_time = helpers.measure_pvc_creation_time(
                interface_type, cloned_pvc_obj.name)
            creation_speed = int(file_size_mb / create_time)
            logger.info(
                f"Clone number {i+1} creation time is {create_time} secs for {pvc_size} GB pvc."
            )
            logger.info(
                f"Clone number {i+1} creation speed is {creation_speed} MB/sec for {pvc_size} GB pvc."
            )
            creation_measures = {
                "clone_num": i + 1,
                "time": create_time,
                "speed": creation_speed,
            }
            clone_creation_measures.append(creation_measures)

        # deleting one by one and measuring deletion times and speed for each one of the clones create above
        # in case of single clone will run one time
        clone_deletion_measures = []

        logger.info(
            f"Start deleting {max_num_of_clones} clones on {interface_type} PVC of size {pvc_size} GB."
        )

        for i in range(max_num_of_clones):
            cloned_pvc_obj = clones_list[i]
            pvc_reclaim_policy = cloned_pvc_obj.reclaim_policy
            cloned_pvc_obj.delete()
            logger.info(
                f"Deletion of clone number {i + 1} , the clone name is {cloned_pvc_obj.name}."
            )
            cloned_pvc_obj.ocp.wait_for_delete(cloned_pvc_obj.name, timeout)
            if pvc_reclaim_policy == constants.RECLAIM_POLICY_DELETE:
                helpers.validate_pv_delete(cloned_pvc_obj.backed_pv)
            delete_time = helpers.measure_pvc_deletion_time(
                interface_type, cloned_pvc_obj.backed_pv)
            logger.info(
                f"Clone number {i + 1} deletion time is {delete_time} secs for {pvc_size} GB pvc."
            )

            deletion_speed = int(file_size_mb / delete_time)
            logger.info(
                f"Clone number {i+1} deletion speed is {deletion_speed} MB/sec for {pvc_size} GB pvc."
            )
            deletion_measures = {
                "clone_num": i + 1,
                "time": delete_time,
                "speed": deletion_speed,
            }
            clone_deletion_measures.append(deletion_measures)

        logger.info(
            f"Printing clone creation time and speed for {max_num_of_clones} clones "
            f"on {interface_type} PVC of size {pvc_size} GB:")

        for c in clone_creation_measures:
            logger.info(
                f"Clone number {c['clone_num']} creation time is {c['time']} secs for {pvc_size} GB pvc ."
            )
            logger.info(
                f"Clone number {c['clone_num']} creation speed is {c['speed']} MB/sec for {pvc_size} GB pvc."
            )

        logger.info(
            f"Clone deletion time and speed for {interface_type} PVC of size {pvc_size} GB are:"
        )
        for d in clone_deletion_measures:
            logger.info(
                f"Clone number {d['clone_num']} deletion time is {d['time']} secs for {pvc_size} GB pvc."
            )
            logger.info(
                f"Clone number {d['clone_num']} deletion speed is {d['speed']} MB/sec for {pvc_size} GB pvc."
            )

        logger.info("test_clones_creation_performance finished successfully.")
Beispiel #15
0
    def factory(
        num_of_pvcs=100,
        pvc_size=2,
        bulk=False,
        project=None,
        measure=True,
        delete=True,
        file_name=None,
        fio_percentage=25,
        verify_fio=False,
        expand=False,
    ):
        """
        Args:
            num_of_pvcs (int) : Number of PVCs / PODs we want to create.
            pvc_size (int) : Size of each PVC in GB.
            bulk (bool) : True for bulk operations, False otherwise.
            project (obj) : Project obj inside which the PODs/PVCs are created.
            measure (bool) : True if we want to measure the PVC creation/deletion time and POD to PVC attach time,
                                False otherwise.
            delete (bool) : True if we want to delete PVCs and PODs, False otherwise
            file_name (str) : Name of the file on which FIO is performed.
            fio_percentage (float) : Percentage of PVC space we want to be utilized for FIO.
            verify_fio (bool) : True if we want to verify FIO, False otherwise.
            expand (bool) : True if we want to verify_fio for expansion of PVCs operation, False otherwise.

        """

        if not project:
            project = project_factory("longevity")
        pvc_objs = list()
        executor = ThreadPoolExecutor(max_workers=1)
        start_time = datetime.utcnow().strftime("%Y-%m-%dT%H:%M:%SZ")
        for interface in (constants.CEPHFILESYSTEM, constants.CEPHBLOCKPOOL):
            if interface == constants.CEPHFILESYSTEM:
                access_modes = [
                    constants.ACCESS_MODE_RWO, constants.ACCESS_MODE_RWX
                ]
                num_of_pvc = num_of_pvcs // 2
            else:
                access_modes = [
                    constants.ACCESS_MODE_RWO,
                    constants.ACCESS_MODE_RWO + "-" +
                    constants.VOLUME_MODE_BLOCK,
                    constants.ACCESS_MODE_RWX + "-" +
                    constants.VOLUME_MODE_BLOCK,
                ]
                num_of_pvc = num_of_pvcs - num_of_pvcs // 2

            # Create PVCs
            if num_of_pvc > 0:
                pvc_objs_tmp = multi_pvc_factory(
                    interface=interface,
                    size=pvc_size,
                    project=project,
                    access_modes=access_modes,
                    status=constants.STATUS_BOUND,
                    num_of_pvc=num_of_pvc,
                    wait_each=not bulk,
                )
                log.info("PVC creation was successful.")
                pvc_objs.extend(pvc_objs_tmp)

                if measure:
                    # Measure PVC Creation Time
                    measure_pvc_creation_time(interface, pvc_objs_tmp,
                                              start_time)

            else:
                log.info(
                    f"Num of PVCs of interface - {interface} = {num_of_pvc}. So no PVCs created."
                )

        # PVC and PV Teardown
        for pvc_obj in pvc_objs:
            teardown_factory(pvc_obj)
            teardown_factory(pvc_obj.backed_pv_obj)

        # Create PODs
        pod_objs = list()
        for pvc_obj in pvc_objs:
            if pvc_obj.get_pvc_vol_mode == constants.VOLUME_MODE_BLOCK:
                if not bulk:
                    pod_objs.append(
                        pod_factory(
                            pvc=pvc_obj,
                            raw_block_pv=True,
                            status=constants.STATUS_RUNNING,
                            pod_dict_path=constants.PERF_BLOCK_POD_YAML,
                        ))
                else:
                    pod_objs.append(
                        pod_factory(
                            pvc=pvc_obj,
                            raw_block_pv=True,
                            pod_dict_path=constants.PERF_BLOCK_POD_YAML,
                        ))
            else:
                if not bulk:
                    pod_objs.append(
                        pod_factory(
                            pvc=pvc_obj,
                            status=constants.STATUS_RUNNING,
                            pod_dict_path=constants.PERF_POD_YAML,
                        ))
                else:
                    pod_objs.append(
                        pod_factory(pvc=pvc_obj,
                                    pod_dict_path=constants.PERF_POD_YAML))

            log.info(f"POD {pod_objs[-1].name} creation was successful.")
        log.info("All PODs are created.")

        if bulk:
            for pod_obj in pod_objs:
                executor.submit(
                    helpers.wait_for_resource_state,
                    pod_obj,
                    constants.STATUS_RUNNING,
                    timeout=300,
                )
                log.info(f"POD {pod_obj.name} reached Running State.")

            log.info("All PODs reached Running State.")

        if measure:
            # Measure POD to PVC attach time
            measure_pod_to_pvc_attach_time(pod_objs)

        # POD Teardown
        for pod_obj in pod_objs:
            teardown_factory(pod_obj)

        # Run FIO on PODs
        fio_size = int((fio_percentage / 100) * pvc_size * 1000)
        for pod_obj in pod_objs:
            storage_type = ("block" if pod_obj.pvc.get_pvc_vol_mode
                            == constants.VOLUME_MODE_BLOCK else "fs")
            pod_obj.wl_setup_done = True
            pod_obj.wl_obj = workload.WorkLoad(
                "test_workload_fio",
                pod_obj.get_storage_path(storage_type),
                "fio",
                storage_type,
                pod_obj,
                1,
            )
            if not file_name:
                pod_obj.run_io(storage_type, f"{fio_size}M")
            else:
                pod_obj.run_io(
                    storage_type=storage_type,
                    size=f"{fio_size}M",
                    runtime=20,
                    fio_filename=file_name,
                    end_fsync=1,
                )

        if verify_fio:
            log.info(
                "Waiting for IO to complete on all pods to utilise 25% of PVC used space"
            )

            for pod_obj in pod_objs:
                # Wait for IO to finish
                pod_obj.get_fio_results(3600)
                log.info(f"IO finished on pod {pod_obj.name}")
                is_block = (True if pod_obj.pvc.get_pvc_vol_mode
                            == constants.VOLUME_MODE_BLOCK else False)
                file_name_pod = (file_name
                                 if not is_block else pod_obj.get_storage_path(
                                     storage_type="block"))
                # Verify presence of the file
                file_path = (file_name_pod if is_block else pod.get_file_path(
                    pod_obj, file_name_pod))
                log.info(f"Actual file path on the pod {file_path}")
                assert pod.check_file_existence(
                    pod_obj, file_path), f"File {file_name_pod} does not exist"
                log.info(f"File {file_name_pod} exists in {pod_obj.name}")

                if expand and is_block:
                    # Read IO from block PVCs using dd and calculate md5sum.
                    # This dd command reads the data from the device, writes it to
                    # stdout, and reads md5sum from stdin.
                    pod_obj.pvc.md5sum = pod_obj.exec_sh_cmd_on_pod(
                        command=(f"dd iflag=direct if={file_path} bs=10M "
                                 f"count={fio_size // 10} | md5sum"))
                    log.info(
                        f"md5sum of {file_name_pod}: {pod_obj.pvc.md5sum}")
                else:
                    # Calculate md5sum of the file
                    pod_obj.pvc.md5sum = pod.cal_md5sum(pod_obj, file_name_pod)

        log.info("POD FIO was successful.")

        if delete:
            # Delete PODs
            pod_delete = executor.submit(delete_pods, pod_objs, wait=not bulk)
            pod_delete.result()

            log.info("Verified: Pods are deleted.")

            # Delete PVCs
            pvc_delete = executor.submit(delete_pvcs,
                                         pvc_objs,
                                         concurrent=bulk)
            res = pvc_delete.result()
            if not res:
                raise ex.UnexpectedBehaviour("Deletion of PVCs failed")
            log.info("PVC deletion was successful.")

            # Validate PV Deletion
            for pvc_obj in pvc_objs:
                helpers.validate_pv_delete(pvc_obj.backed_pv)
            log.info("PV deletion was successful.")

            if measure:
                # Measure PVC Deletion Time
                for interface in (constants.CEPHFILESYSTEM,
                                  constants.CEPHBLOCKPOOL):
                    if interface == constants.CEPHFILESYSTEM:
                        measure_pvc_deletion_time(
                            interface,
                            pvc_objs[:num_of_pvcs // 2],
                        )
                    else:
                        measure_pvc_deletion_time(
                            interface,
                            pvc_objs[num_of_pvcs // 2:],
                        )

            log.info(f"Successfully deleted {num_of_pvcs} PVCs")
        else:
            return pvc_objs, pod_objs
Beispiel #16
0
    def test_pvc_creation_deletion_measurement_performance(
        self, teardown_factory, pvc_size
    ):
        """
        Measuring PVC creation and deletion times for pvc samples
        Verifying that those times are within required limits
        """

        num_of_samples = 5
        accepted_creation_time = 1
        accepted_deletion_time = 2 if self.interface == constants.CEPHFILESYSTEM else 1

        accepted_creation_deviation_percent = 50
        accepted_deletion_deviation_percent = 50

        creation_time_measures = []
        deletion_time_measures = []
        msg_prefix = f"Interface: {self.interface}, PVC size: {pvc_size}."

        for i in range(num_of_samples):
            logging.info(f"{msg_prefix} Start creating PVC number {i + 1}.")
            start_time = datetime.datetime.utcnow().strftime("%Y-%m-%dT%H:%M:%SZ")
            pvc_obj = helpers.create_pvc(sc_name=self.sc_obj.name, size=pvc_size)
            helpers.wait_for_resource_state(pvc_obj, constants.STATUS_BOUND)
            pvc_obj.reload()

            creation_time = performance_lib.measure_pvc_creation_time(
                self.interface, pvc_obj.name, start_time
            )

            logging.info(
                f"{msg_prefix} PVC number {i + 1} was created in {creation_time} seconds."
            )
            if creation_time > accepted_creation_time:
                raise ex.PerformanceException(
                    f"{msg_prefix} PVC creation time is {creation_time} and is greater than "
                    f"{accepted_creation_time} seconds."
                )
            creation_time_measures.append(creation_time)

            pv_name = pvc_obj.backed_pv
            pvc_reclaim_policy = pvc_obj.reclaim_policy

            pod_obj = self.write_file_on_pvc(pvc_obj)
            pod_obj.delete(wait=True)
            teardown_factory(pvc_obj)
            logging.info(f"{msg_prefix} Start deleting PVC number {i + 1}")
            if pvc_reclaim_policy == constants.RECLAIM_POLICY_DELETE:
                pvc_obj.delete()
                pvc_obj.ocp.wait_for_delete(pvc_obj.name)
                helpers.validate_pv_delete(pvc_obj.backed_pv)
                deletion_time = helpers.measure_pvc_deletion_time(
                    self.interface, pv_name
                )
                logging.info(
                    f"{msg_prefix} PVC number {i + 1} was deleted in {deletion_time} seconds."
                )
                if deletion_time > accepted_deletion_time:
                    raise ex.PerformanceException(
                        f"{msg_prefix} PVC deletion time is {deletion_time} and is greater than "
                        f"{accepted_deletion_time} seconds."
                    )
                deletion_time_measures.append(deletion_time)
            else:
                logging.info(
                    f"Reclaim policy of the PVC {pvc_obj.name} is not Delete;"
                    f" therefore not measuring deletion time for this PVC."
                )

        creation_average = self.process_time_measurements(
            "creation",
            creation_time_measures,
            accepted_creation_deviation_percent,
            msg_prefix,
        )
        deletion_average = self.process_time_measurements(
            "deletion",
            deletion_time_measures,
            accepted_deletion_deviation_percent,
            msg_prefix,
        )

        # all the results are OK, the test passes, push the results to the codespeed
        push_to_pvc_time_dashboard(self.interface, "1-pvc-creation", creation_average)
        push_to_pvc_time_dashboard(self.interface, "1-pvc-deletion", deletion_average)
    def test_clone_create_delete_performance(self, interface_type, pvc_size,
                                             file_size, teardown_factory):
        """
        Write data (60% of PVC capacity) to the PVC created in setup
        Create clones for an existing pvc,
        Measure clones average creation time and speed
        Delete the created clone
        Measure clone average deletion time and speed
        Note: by increasing max_num_of_clones value you increase number of the clones to be created/deleted
        """

        file_size_for_io = file_size[:-1]

        performance_lib.write_fio_on_pod(self.pod_object, file_size_for_io)

        max_num_of_clones = 10
        clone_creation_measures = []
        csi_clone_creation_measures = []
        clones_list = []
        timeout = 18000
        sc_name = self.pvc_obj.backed_sc
        parent_pvc = self.pvc_obj.name
        clone_yaml = constants.CSI_RBD_PVC_CLONE_YAML
        namespace = self.pvc_obj.namespace
        if interface_type == constants.CEPHFILESYSTEM:
            clone_yaml = constants.CSI_CEPHFS_PVC_CLONE_YAML
        file_size_mb = convert_device_size(file_size, "MB")

        logger.info(
            f"Start creating {max_num_of_clones} clones on {interface_type} PVC of size {pvc_size} GB."
        )

        # taking the time, so parsing the provision log will be faster.
        start_time = datetime.datetime.utcnow().strftime("%Y-%m-%dT%H:%M:%SZ")

        for i in range(max_num_of_clones):
            logger.info(f"Start creation of clone number {i + 1}.")
            cloned_pvc_obj = pvc.create_pvc_clone(sc_name,
                                                  parent_pvc,
                                                  clone_yaml,
                                                  namespace,
                                                  storage_size=pvc_size + "Gi")
            teardown_factory(cloned_pvc_obj)
            helpers.wait_for_resource_state(cloned_pvc_obj,
                                            constants.STATUS_BOUND, timeout)

            cloned_pvc_obj.reload()
            logger.info(
                f"Clone with name {cloned_pvc_obj.name} for {pvc_size} pvc {parent_pvc} was created."
            )
            clones_list.append(cloned_pvc_obj)
            create_time = helpers.measure_pvc_creation_time(
                interface_type, cloned_pvc_obj.name)
            creation_speed = int(file_size_mb / create_time)
            logger.info(
                f"Clone number {i+1} creation time is {create_time} secs for {pvc_size} GB pvc."
            )
            logger.info(
                f"Clone number {i+1} creation speed is {creation_speed} MB/sec for {pvc_size} GB pvc."
            )
            creation_measures = {
                "clone_num": i + 1,
                "time": create_time,
                "speed": creation_speed,
            }
            clone_creation_measures.append(creation_measures)
            csi_clone_creation_measures.append(
                performance_lib.csi_pvc_time_measure(self.interface,
                                                     cloned_pvc_obj, "create",
                                                     start_time))

        # deleting one by one and measuring deletion times and speed for each one of the clones create above
        # in case of single clone will run one time
        clone_deletion_measures = []
        csi_clone_deletion_measures = []

        logger.info(
            f"Start deleting {max_num_of_clones} clones on {interface_type} PVC of size {pvc_size} GB."
        )

        for i in range(max_num_of_clones):
            cloned_pvc_obj = clones_list[i]
            pvc_reclaim_policy = cloned_pvc_obj.reclaim_policy
            cloned_pvc_obj.delete()
            logger.info(
                f"Deletion of clone number {i + 1} , the clone name is {cloned_pvc_obj.name}."
            )
            cloned_pvc_obj.ocp.wait_for_delete(cloned_pvc_obj.name, timeout)
            if pvc_reclaim_policy == constants.RECLAIM_POLICY_DELETE:
                helpers.validate_pv_delete(cloned_pvc_obj.backed_pv)
            delete_time = helpers.measure_pvc_deletion_time(
                interface_type, cloned_pvc_obj.backed_pv)
            logger.info(
                f"Clone number {i + 1} deletion time is {delete_time} secs for {pvc_size} GB pvc."
            )

            deletion_speed = int(file_size_mb / delete_time)
            logger.info(
                f"Clone number {i+1} deletion speed is {deletion_speed} MB/sec for {pvc_size} GB pvc."
            )
            deletion_measures = {
                "clone_num": i + 1,
                "time": delete_time,
                "speed": deletion_speed,
            }
            clone_deletion_measures.append(deletion_measures)
            csi_clone_deletion_measures.append(
                performance_lib.csi_pvc_time_measure(self.interface,
                                                     cloned_pvc_obj, "delete",
                                                     start_time))

        logger.info(
            f"Printing clone creation time and speed for {max_num_of_clones} clones "
            f"on {interface_type} PVC of size {pvc_size} GB:")
        for c in clone_creation_measures:
            logger.info(
                f"Clone number {c['clone_num']} creation time is {c['time']} secs for {pvc_size} GB pvc ."
            )
            logger.info(
                f"Clone number {c['clone_num']} creation speed is {c['speed']} MB/sec for {pvc_size} GB pvc."
            )
        logger.info(
            f"Clone deletion time and speed for {interface_type} PVC of size {pvc_size} GB are:"
        )
        creation_time_list = [r["time"] for r in clone_creation_measures]
        creation_speed_list = [r["speed"] for r in clone_creation_measures]
        average_creation_time = statistics.mean(creation_time_list)
        average_csi_creation_time = statistics.mean(
            csi_clone_creation_measures)
        average_creation_speed = statistics.mean(creation_speed_list)
        logger.info(f"Average creation time is  {average_creation_time} secs.")
        logger.info(
            f"Average creation speed is  {average_creation_speed} Mb/sec.")

        for d in clone_deletion_measures:
            logger.info(
                f"Clone number {d['clone_num']} deletion time is {d['time']} secs for {pvc_size} GB pvc."
            )
            logger.info(
                f"Clone number {d['clone_num']} deletion speed is {d['speed']} MB/sec for {pvc_size} GB pvc."
            )

        deletion_time_list = [r["time"] for r in clone_deletion_measures]
        deletion_speed_list = [r["speed"] for r in clone_deletion_measures]
        average_deletion_time = statistics.mean(deletion_time_list)
        average_csi_deletion_time = statistics.mean(
            csi_clone_deletion_measures)
        average_deletion_speed = statistics.mean(deletion_speed_list)
        logger.info(f"Average deletion time is  {average_deletion_time} secs.")
        logger.info(
            f"Average deletion speed is  {average_deletion_speed} Mb/sec.")
        logger.info("test_clones_creation_performance finished successfully.")

        self.results_path = get_full_test_logs_path(cname=self)
        # Produce ES report
        # Collecting environment information
        self.get_env_info()

        self.full_log_path = get_full_test_logs_path(cname=self)
        self.results_path = get_full_test_logs_path(cname=self)
        self.full_log_path += f"-{self.interface}-{pvc_size}-{file_size}"
        logger.info(f"Logs file path name is : {self.full_log_path}")

        # Initialize the results doc file.
        full_results = self.init_full_results(
            ResultsAnalyse(
                self.uuid,
                self.crd_data,
                self.full_log_path,
                "pvc_clone_performance",
            ))

        full_results.add_key("interface", self.interface)
        full_results.add_key("total_clone_number", max_num_of_clones)
        full_results.add_key("pvc_size", self.pvc_size)
        full_results.add_key("average_clone_creation_time",
                             average_creation_time)
        full_results.add_key("average_csi_clone_creation_time",
                             average_csi_creation_time)
        full_results.add_key("average_clone_deletion_time",
                             average_deletion_time)
        full_results.add_key("average_csi_clone_deletion_time",
                             average_csi_deletion_time)
        full_results.add_key("average_clone_creation_speed",
                             average_creation_speed)
        full_results.add_key("average_clone_deletion_speed",
                             average_deletion_speed)

        full_results.all_results = {
            "clone_creation_time": creation_time_list,
            "csi_clone_creation_time": csi_clone_creation_measures,
            "clone_deletion_time": deletion_time_list,
            "csi_clone_deletion_time": csi_clone_deletion_measures,
            "clone_creation_speed": creation_speed_list,
            "clone_deletion_speed": deletion_speed_list,
        }

        # Write the test results into the ES server
        if full_results.es_write():
            res_link = full_results.results_link()
            logger.info(f"The Result can be found at : {res_link}")

            # Create text file with results of all subtest (8 - according to the parameters)
            self.write_result_to_file(res_link)