Esempio n. 1
0
    def create_mutiple_pvcs_statistics(self, num_of_samples, teardown_factory,
                                       pvc_size):
        """

        Creates number (samples_num) of PVCs, measures creation time for each PVC and returns list of creation times.

         Args:
             num_of_samples: Number of the sampled created PVCs.
             teardown_factory: A fixture used when we want a new resource that was created during the tests.
             pvc_size: Size of the created PVCs.

         Returns:
             List of the creation times of all the created PVCs.

        """
        time_measures = []
        for i in range(num_of_samples):
            log.info(f"Start creation of PVC number {i + 1}.")

            pvc_obj = helpers.create_pvc(sc_name=self.sc_obj.name,
                                         size=pvc_size)
            helpers.wait_for_resource_state(pvc_obj, constants.STATUS_BOUND)
            pvc_obj.reload()
            teardown_factory(pvc_obj)
            create_time = helpers.measure_pvc_creation_time(
                self.interface, pvc_obj.name)
            logging.info(f"PVC created in {create_time} seconds")

            time_measures.append(create_time)
        return time_measures
Esempio n. 2
0
    def test_delete_create_pvc_same_name(self, interface, pvc_factory,
                                         teardown_factory):
        """
        Delete PVC and create a new PVC with same name
        """
        # Create a PVC
        pvc_obj1 = pvc_factory(
            interface=interface,
            access_mode=constants.ACCESS_MODE_RWO,
            status=constants.STATUS_BOUND,
        )

        # Delete the PVC
        logger.info(f"Deleting PVC {pvc_obj1.name}")
        pvc_obj1.delete()
        pvc_obj1.ocp.wait_for_delete(pvc_obj1.name)
        logger.info(f"Deleted PVC {pvc_obj1.name}")

        # Create a new PVC with same name
        logger.info(f"Creating new PVC with same name {pvc_obj1.name}")
        pvc_obj2 = helpers.create_pvc(
            sc_name=pvc_obj1.storageclass.name,
            pvc_name=pvc_obj1.name,
            namespace=pvc_obj1.project.namespace,
            do_reload=False,
        )

        teardown_factory(pvc_obj2)

        # Check the new PVC and PV are Bound
        helpers.wait_for_resource_state(resource=pvc_obj2,
                                        state=constants.STATUS_BOUND)
        pv_obj2 = pvc_obj2.backed_pv_obj
        helpers.wait_for_resource_state(resource=pv_obj2,
                                        state=constants.STATUS_BOUND)
Esempio n. 3
0
    def create_resource_hsbench(self):
        """
        Create resource for hsbench mark test:
            Create service account
            Create PVC
            Create golang pod

        """

        # Create service account
        self.sa_name = helpers.create_serviceaccount(self.namespace)
        self.sa_name = self.sa_name.name
        helpers.add_scc_policy(sa_name=self.sa_name, namespace=self.namespace)

        # Create test pvc+pod
        log.info(
            f"Create Golang pod to generate S3 workload... {self.namespace}")
        pvc_size = "50Gi"
        self.pod_name = "hsbench-pod"
        self.pvc_obj = helpers.create_pvc(
            sc_name=constants.DEFAULT_STORAGECLASS_RBD,
            namespace=self.namespace,
            size=pvc_size,
        )
        self.pod_obj = helpers.create_pod(
            constants.CEPHBLOCKPOOL,
            namespace=self.namespace,
            pod_name=self.pod_name,
            pvc_name=self.pvc_obj.name,
            sa_name=self.sa_name,
            pod_dict_path=self.pod_dic_path,
            dc_deployment=True,
            deploy_pod_status=constants.STATUS_COMPLETED,
        )
Esempio n. 4
0
def change_registry_backend_to_ocs():
    """
    Function to deploy registry with OCS backend.

    Raises:
        AssertionError: When failure in change of registry backend to OCS

    """
    sc = helpers.default_storage_class(interface_type=constants.CEPHFILESYSTEM)
    pv_obj = helpers.create_pvc(
        sc_name=sc.name,
        pvc_name="registry-cephfs-rwx-pvc",
        namespace=constants.OPENSHIFT_IMAGE_REGISTRY_NAMESPACE,
        size="100Gi",
        access_mode=constants.ACCESS_MODE_RWX,
    )
    helpers.wait_for_resource_state(pv_obj, "Bound")
    param_cmd = f'[{{"op": "add", "path": "/spec/storage", "value": {{"pvc": {{"claim": "{pv_obj.name}"}}}}}}]'

    run_cmd(f"oc patch {constants.IMAGE_REGISTRY_CONFIG} -p "
            f"'{param_cmd}' --type json")

    # Validate registry pod status
    retry((CommandFailed, UnexpectedBehaviour), tries=3,
          delay=15)(validate_registry_pod_status)()

    # Validate pvc mount in the registry pod
    retry((CommandFailed, UnexpectedBehaviour, AssertionError),
          tries=3,
          delay=15)(validate_pvc_mount_on_registry_pod)()
Esempio n. 5
0
    def __init__(self):
        with open(constants.CSI_CEPHFS_POD_YAML, "r") as pod_fd:
            pod_info = yaml.safe_load(pod_fd)
        pvc_name = pod_info["spec"]["volumes"][0]["persistentVolumeClaim"][
            "claimName"]
        self.pod_name = pod_info["metadata"]["name"]
        config.RUN["cli_params"]["teardown"] = True
        self.cephfs_pvc = helpers.create_pvc(
            sc_name=constants.DEFAULT_STORAGECLASS_CEPHFS,
            namespace=config.ENV_DATA["cluster_namespace"],
            pvc_name=pvc_name,
            size=SIZE,
        )
        helpers.wait_for_resource_state(self.cephfs_pvc,
                                        constants.STATUS_BOUND,
                                        timeout=300)
        self.cephfs_pod = helpers.create_pod(
            interface_type=constants.CEPHFILESYSTEM,
            namespace=config.ENV_DATA["cluster_namespace"],
            pvc_name=pvc_name,
            node_name="compute-0",
            pod_name=self.pod_name,
        )
        helpers.wait_for_resource_state(self.cephfs_pod,
                                        constants.STATUS_RUNNING,
                                        timeout=300)
        logging.info("pvc and cephfs pod created")
        self.ocp_obj = ocp.OCP(
            kind=constants.POD,
            namespace=config.ENV_DATA["cluster_namespace"],
        )

        self.test_file_list = add_million_files(self.pod_name, self.ocp_obj)
        logging.info("cephfs test files created")
    def test_pvc_deletion_measurement_performance(self, teardown_factory, pvc_size):
        """
        Measuring PVC deletion time is within supported limits
        """
        logging.info("Start creating new PVC")

        pvc_obj = helpers.create_pvc(sc_name=self.sc_obj.name, size=pvc_size)
        helpers.wait_for_resource_state(pvc_obj, constants.STATUS_BOUND)
        pvc_obj.reload()
        pv_name = pvc_obj.backed_pv
        pvc_reclaim_policy = pvc_obj.reclaim_policy
        teardown_factory(pvc_obj)
        pvc_obj.delete()
        logging.info("Start deletion of PVC")
        pvc_obj.ocp.wait_for_delete(pvc_obj.name)
        if pvc_reclaim_policy == constants.RECLAIM_POLICY_DELETE:
            helpers.validate_pv_delete(pvc_obj.backed_pv)
        delete_time = helpers.measure_pvc_deletion_time(self.interface, pv_name)
        # Deletion time for CephFS PVC is a little over 3 seconds
        deletion_time = 4 if self.interface == constants.CEPHFILESYSTEM else 3
        logging.info(f"PVC deleted in {delete_time} seconds")
        if delete_time > deletion_time:
            raise ex.PerformanceException(
                f"PVC deletion time is {delete_time} and greater than {deletion_time} second"
            )
        push_to_pvc_time_dashboard(self.interface, "1-pvc-deletion", delete_time)
Esempio n. 7
0
    def test_ocs_347(self, resources):
        pod, pvc, storageclass = resources

        log.info("Creating RBD StorageClass")
        storageclass.append(
            helpers.create_storage_class(
                interface_type=constants.CEPHBLOCKPOOL,
                interface_name=self.cbp_obj.name,
                secret_name=self.rbd_secret_obj.name,
            )
        )
        log.info("Creating a PVC")
        pvc.append(helpers.create_pvc(sc_name=storageclass[0].name))
        for pvc_obj in pvc:
            helpers.wait_for_resource_state(pvc_obj, constants.STATUS_BOUND)
            pvc_obj.reload()
        log.info(f"Creating a pod on with pvc {pvc[0].name}")
        pod_obj = helpers.create_pod(
            interface_type=constants.CEPHBLOCKPOOL,
            pvc_name=pvc[0].name,
            pod_dict_path=constants.NGINX_POD_YAML,
        )
        pod.append(pod_obj)
        helpers.wait_for_resource_state(pod_obj, constants.STATUS_RUNNING)
        pod_obj.reload()
Esempio n. 8
0
    def test_create_storage_class_with_wrong_provisioner(self, interface):
        """
        Test function which creates Storage Class with
        wrong provisioner and verifies PVC status
        """
        log.info(f"Creating a {interface} storage class")
        if interface == "RBD":
            interface_type = constants.CEPHBLOCKPOOL
            secret = self.rbd_secret_obj.name
            interface_name = self.cbp_obj.name
        else:
            interface_type = constants.CEPHFILESYSTEM
            secret = self.cephfs_secret_obj.name
            interface_name = helpers.get_cephfs_data_pool_name()
        sc_obj = helpers.create_storage_class(
            interface_type=interface_type,
            interface_name=interface_name,
            secret_name=secret,
            provisioner=constants.AWS_EFS_PROVISIONER,
        )
        log.info(
            f"{interface}Storage class: {sc_obj.name} created successfully")

        # Create PVC
        pvc_obj = helpers.create_pvc(sc_name=sc_obj.name, do_reload=False)

        # Check PVC status
        pvc_output = pvc_obj.get()
        pvc_status = pvc_output["status"]["phase"]
        log.info(f"Status of PVC {pvc_obj.name} after creation: {pvc_status}")
        log.info(f"Waiting for status '{constants.STATUS_PENDING}' "
                 f"for 20 seconds (it shouldn't change)")

        pvc_obj.ocp.wait_for_resource(
            resource_name=pvc_obj.name,
            condition=constants.STATUS_PENDING,
            timeout=20,
            sleep=5,
        )
        # Check PVC status again after 20 seconds
        pvc_output = pvc_obj.get()
        pvc_status = pvc_output["status"]["phase"]
        assert_msg = (
            f"PVC {pvc_obj.name} is not in {constants.STATUS_PENDING} "
            f"status")
        assert pvc_status == constants.STATUS_PENDING, assert_msg
        log.info(f"Status of {pvc_obj.name} after 20 seconds: {pvc_status}")

        # Delete PVC
        log.info(f"Deleting PVC: {pvc_obj.name}")
        assert pvc_obj.delete()
        log.info(f"PVC {pvc_obj.name} delete successfully")

        # Delete Storage Class
        log.info(f"Deleting Storageclass: {sc_obj.name}")
        assert sc_obj.delete()
        log.info(f"Storage Class: {sc_obj.name} deleted successfully")
Esempio n. 9
0
    def pod_obj_list(
        self,
        interface,
        storageclass_factory,
        pod_factory,
        pvc_factory,
        teardown_factory,
        samples_num,
        pvc_size,
    ):
        """
        Prepare sample pods for the test

        Returns:
            pod obj: List of pod instances

        """
        self.interface = interface
        pod_result_list = []

        self.msg_prefix = f"Interface: {self.interface}, PVC size: {pvc_size}."
        self.samples_num = samples_num
        self.pvc_size = pvc_size

        if self.interface == constants.CEPHBLOCKPOOL_THICK:
            self.sc_obj = storageclass_factory(
                interface=constants.CEPHBLOCKPOOL,
                new_rbd_pool=True,
                rbd_thick_provision=True,
            )
        else:
            self.sc_obj = storageclass_factory(self.interface)

        for i in range(samples_num):
            logging.info(
                f"{self.msg_prefix} Start creating PVC number {i + 1}.")
            pvc_obj = helpers.create_pvc(sc_name=self.sc_obj.name,
                                         size=pvc_size)
            teardown_factory(pvc_obj)
            timeout = 600 if self.interface == constants.CEPHBLOCKPOOL_THICK else 60
            helpers.wait_for_resource_state(pvc_obj,
                                            constants.STATUS_BOUND,
                                            timeout=timeout)
            pvc_obj.reload()

            logging.info(
                f"{self.msg_prefix} PVC number {i + 1} was successfully created ."
            )

            pod_obj = pod_factory(interface=self.interface,
                                  pvc=pvc_obj,
                                  status=constants.STATUS_RUNNING)
            teardown_factory(pod_obj)
            pod_result_list.append(pod_obj)

        return pod_result_list
Esempio n. 10
0
    def _deploy_es(self):
        """
        Deploying the Elasticsearch server

        """

        # Creating PVC for the elasticsearch server and wait until it bound
        log.info("Creating 10 GiB PVC for the ElasticSearch cluster on")
        try:
            self.pvc_obj = create_pvc(
                sc_name=self.args.get("sc") or constants.CEPHBLOCKPOOL_SC,
                namespace=self.namespace,
                pvc_name="elasticsearch-data-quickstart-es-default-0",
                access_mode=constants.ACCESS_MODE_RWO,
                size="10Gi",
            )

            # Make sure the PVC bound, or delete it and raise exception
            wait_for_resource_state(self.pvc_obj, constants.STATUS_BOUND)
        except ResourceWrongStatusException:
            log.error("The PVC couldn't created")
            return False

        self.pvc_obj.reload()

        log.info("Deploy the ElasticSearch cluster")
        self.ocp.apply(self.crd)

        sample = TimeoutSampler(
            timeout=300,
            sleep=10,
            func=self._pod_is_found,
            pattern="quickstart-es-default",
        )
        if not sample.wait_for_func_status(True):
            log.error("The ElasticSearch pod deployment Failed")
            return False

        self.espod = get_pod_name_by_pattern("quickstart-es-default",
                                             self.namespace)[0]
        log.info(f"The ElasticSearch pod {self.espod} Started")

        es_pod = OCP(kind="pod", namespace=self.namespace)
        log.info("Waiting for ElasticSearch to Run")
        if not es_pod.wait_for_resource(
                condition=constants.STATUS_RUNNING,
                resource_name=self.espod,
                sleep=30,
                timeout=600,
        ):
            log.error("TThe ElasticSearch pod is not running !")
            return False
        else:
            log.info("Elastic Search is ready !!!")
            return True
Esempio n. 11
0
def create_pvc(request):
    """
    Create a persistent Volume Claim
    """
    class_instance = request.node.cls

    class_instance.pvc_obj = helpers.create_pvc(
        sc_name=class_instance.sc_obj.name, namespace=class_instance.namespace)
    helpers.wait_for_resource_state(class_instance.pvc_obj,
                                    constants.STATUS_BOUND)
    class_instance.pvc_obj.reload()
Esempio n. 12
0
    def test_capacity_breakdown_ui(
        self, setup_ui_class, project_name, pod_name, sc_type, teardown_project_factory
    ):
        """
        Test Capacity Breakdown UI

        project_name (str): the project name
        pod_name (str): pod name
        sc_type (str): storage class [fs, block]

        """
        project_obj = helpers.create_project(project_name=project_name)
        teardown_project_factory(project_obj)
        logger.info(
            f"Created new pvc sc_name={sc_type} namespace={project_name}, "
            f"size=6Gi, access_mode={constants.ACCESS_MODE_RWO}"
        )
        pvc_obj = helpers.create_pvc(
            sc_name=sc_type,
            namespace=project_name,
            size="6Gi",
            do_reload=False,
            access_mode=constants.ACCESS_MODE_RWO,
        )
        logger.info(
            f"Create new pod. Pod name={pod_name},"
            f"interface_type={constants.CEPHBLOCKPOOL}"
        )
        pod_obj = helpers.create_pod(
            pvc_name=pvc_obj.name,
            namespace=project_obj.namespace,
            interface_type=constants.CEPHBLOCKPOOL,
            pod_name=pod_name,
        )
        logger.info(f"Wait for pod {pod_name} move to Running state")
        helpers.wait_for_resource_state(
            pod_obj, state=constants.STATUS_RUNNING, timeout=300
        )
        logger.info("Run fio workload")
        pod_obj.run_io(
            storage_type=constants.WORKLOAD_STORAGE_TYPE_FS,
            size="4GB",
        )
        fio_result = pod_obj.get_fio_results()
        logger.info("IOPs after FIO:")
        reads = fio_result.get("jobs")[0].get("read").get("iops")
        writes = fio_result.get("jobs")[0].get("write").get("iops")
        logger.info(f"Read: {reads}")
        logger.info(f"Write: {writes}")

        validation_ui_obj = ValidationUI(setup_ui_class)
        assert validation_ui_obj.check_capacity_breakdown(
            project_name=project_name, pod_name=pod_name
        ), "The Project/Pod not created on Capacity Breakdown"
    def base_setup(
        self,
        interface_iterate,
        storageclass_factory,
        pvc_size,
    ):
        """
        A setup phase for the test - creating resources

        Args:
            interface_iterate: A fixture to iterate over ceph interfaces
            storageclass_factory: A fixture to create everything needed for a
                storageclass
            pvc_size: The size of the PVC in Gi

        """
        self.interface = interface_iterate
        self.sc_obj = storageclass_factory(self.interface)

        if self.interface == constants.CEPHBLOCKPOOL:
            self.sc = "RBD"
        elif self.interface == constants.CEPHFILESYSTEM:
            self.sc = "CephFS"
        elif self.interface == constants.CEPHBLOCKPOOL_THICK:
            self.sc = "RBD-Thick"

        self.create_test_project()

        self.pvc_obj = helpers.create_pvc(sc_name=self.sc_obj.name,
                                          size=pvc_size + "Gi",
                                          namespace=self.namespace)
        helpers.wait_for_resource_state(self.pvc_obj, constants.STATUS_BOUND)
        self.pvc_obj.reload()

        # Create a POD and attach it the the PVC
        try:
            self.pod_object = helpers.create_pod(
                interface_type=self.interface,
                pvc_name=self.pvc_obj.name,
                namespace=self.namespace,
                pod_dict_path=constants.PERF_POD_YAML,
            )
            helpers.wait_for_resource_state(self.pod_object,
                                            constants.STATUS_RUNNING)
            self.pod_object.reload()
            self.pod_object.workload_setup("fs", jobs=1, fio_installed=True)
        except Exception as e:
            log.error(
                f"Pod on PVC {self.pvc_obj.name} was not created, exception {str(e)}"
            )
            raise ex.PodNotCreated("Pod on PVC was not created.")
Esempio n. 14
0
 def test_basics_rbd(self, test_fixture_rbd):
     """
     Testing basics: secret creation,
     storage class creation,pvc and pod with rbd
     """
     global RBD_PVC_OBJ, RBD_POD_OBJ
     log.info("creating pvc for RBD ")
     pvc_name = helpers.create_unique_resource_name("test-rbd", "pvc")
     RBD_PVC_OBJ = helpers.create_pvc(sc_name=RBD_SC_OBJ.name,
                                      pvc_name=pvc_name)
     helpers.wait_for_resource_state(RBD_PVC_OBJ, constants.STATUS_BOUND)
     RBD_PVC_OBJ.reload()
     if RBD_PVC_OBJ.backed_pv is None:
         RBD_PVC_OBJ.reload()
     RBD_POD_OBJ = helpers.create_pod(
         interface_type=constants.CEPHBLOCKPOOL, pvc_name=RBD_PVC_OBJ.name)
     helpers.wait_for_resource_state(RBD_POD_OBJ, constants.STATUS_RUNNING)
     RBD_POD_OBJ.reload()
Esempio n. 15
0
    def create_jenkins_pvc(self):
        """
        create jenkins pvc

        Returns:
            List: pvc_objs
        """
        pvc_objs = []
        for project in self.projects:
            log.info(f"create jenkins pvc on project {project}")
            pvc_obj = create_pvc(
                pvc_name="dependencies",
                size="10Gi",
                sc_name=constants.DEFAULT_STORAGECLASS_RBD,
                namespace=project,
            )
            pvc_objs.append(pvc_obj)
        return pvc_objs
Esempio n. 16
0
 def create_testing_pvc_and_wait_for_bound(self):
     log.info("Creating PVC for the test")
     try:
         self.pvc_obj = helpers.create_pvc(
             sc_name=self.sc_obj.name,
             pvc_name="pvc-pas-test",
             size=f"{self.pvc_size}Gi",
             namespace=self.namespace,
             # access_mode=Interfaces_info[self.interface]["accessmode"],
         )
     except Exception as e:
         log.exception(f"The PVC was not created, exception [{str(e)}]")
         raise PVCNotCreated("PVC did not reach BOUND state.")
     # Wait for the PVC to be Bound
     performance_lib.wait_for_resource_bulk_status(
         "pvc", 1, self.namespace, constants.STATUS_BOUND, 120, 5
     )
     log.info(f"The PVC {self.pvc_obj.name} was created and in Bound state.")
Esempio n. 17
0
    def run(self):
        """
        Running the test
        """
        for i in range(self.samples_num):

            # Creating PVC to attache POD to it
            csi_start_time = self.get_time("csi")
            log.info(f"{self.msg_prefix} Start creating PVC number {i + 1}.")
            pvc_obj = helpers.create_pvc(sc_name=self.sc_obj.name,
                                         size=self.pvc_size,
                                         namespace=self.namespace)
            helpers.wait_for_resource_state(pvc_obj, constants.STATUS_BOUND)
            pvc_obj.reload()
            self.pvc_list.append(pvc_obj)
            log.info(
                f"{self.msg_prefix} PVC number {i + 1} was successfully created ."
            )

            # Create a POD and attache it the the PVC
            try:
                pod_obj = helpers.create_pod(
                    interface_type=self.interface,
                    pvc_name=pvc_obj.name,
                    namespace=self.namespace,
                    pod_dict_path=constants.PERF_POD_YAML,
                )
                helpers.wait_for_resource_state(pod_obj,
                                                constants.STATUS_RUNNING)
                pod_obj.reload()
            except Exception as e:
                log.error(
                    f"Pod on PVC {pvc_obj.name} was not created, exception {str(e)}"
                )
                raise ex.PodNotCreated("Pod on PVC was not created.")
            self.pod_result_list.append(pod_obj)

            # Get the POD start time including the attache time
            self.start_time_dict_list.append(helpers.pod_start_time(pod_obj))
            self.csi_time_dict_list.append(
                performance_lib.pod_attach_csi_time(self.interface,
                                                    pvc_obj.backed_pv,
                                                    csi_start_time,
                                                    self.namespace)[0])
Esempio n. 18
0
 def test_basics_cephfs(self, test_fixture_cephfs):
     """
     Testing basics: secret creation,
      storage class creation, pvc and pod with cephfs
     """
     global CEPHFS_PVC_OBJ, CEPHFS_POD_OBJ
     log.info("creating pvc for CephFS ")
     pvc_name = helpers.create_unique_resource_name("test-cephfs", "pvc")
     CEPHFS_PVC_OBJ = helpers.create_pvc(sc_name=CEPHFS_SC_OBJ.name,
                                         pvc_name=pvc_name)
     helpers.wait_for_resource_state(CEPHFS_PVC_OBJ, constants.STATUS_BOUND)
     CEPHFS_PVC_OBJ.reload()
     log.info("creating cephfs pod")
     CEPHFS_POD_OBJ = helpers.create_pod(
         interface_type=constants.CEPHFILESYSTEM,
         pvc_name=CEPHFS_PVC_OBJ.name)
     helpers.wait_for_resource_state(CEPHFS_POD_OBJ,
                                     constants.STATUS_RUNNING)
     CEPHFS_POD_OBJ.reload()
Esempio n. 19
0
    def create_resource_hsbench(self):
        """
        Create resource for hsbench mark test:
            Create service account
            Create PVC
            Create golang pod

        """
        # Check for existing rgw pods on cluster
        self.rgw_pod = pod.get_rgw_pods()
        if self.rgw_pod:
            # Create service account
            self.sa_name = helpers.create_serviceaccount(self.namespace)
            self.sa_name = self.sa_name.name
            helpers.add_scc_policy(sa_name=self.sa_name,
                                   namespace=self.namespace)

            # Create test pvc+pod
            log.info(
                f"Create Golang pod to generate S3 workload... {self.namespace}"
            )
            pvc_size = "50Gi"
            node_name = "compute-0"
            self.pod_name = "hsbench-pod"
            self.pvc_obj = helpers.create_pvc(
                sc_name=constants.DEFAULT_STORAGECLASS_RBD,
                namespace=self.namespace,
                size=pvc_size,
            )
            self.pod_obj = helpers.create_pod(
                constants.CEPHBLOCKPOOL,
                namespace=self.namespace,
                pod_name=self.pod_name,
                pvc_name=self.pvc_obj.name,
                node_name=node_name,
                sa_name=self.sa_name,
                pod_dict_path=self.pod_dic_path,
                dc_deployment=True,
                deploy_pod_status=constants.STATUS_COMPLETED,
            )
        else:
            raise UnexpectedBehaviour(
                "This cluster doesn't have RGW pod(s) to perform hsbench")
Esempio n. 20
0
    def _deploy_es(self):
        """
        Deploying the Elasticsearch server

        """

        # Creating PVC for the elasticsearch server and wait until it bound
        log.info("Creating 10 GiB PVC for the ElasticSearch cluster on")
        self.pvc_obj = create_pvc(
            sc_name=constants.CEPHBLOCKPOOL_SC,
            namespace=self.namespace,
            pvc_name="elasticsearch-data-quickstart-es-default-0",
            access_mode=constants.ACCESS_MODE_RWO,
            size="10Gi",
        )
        wait_for_resource_state(self.pvc_obj, constants.STATUS_BOUND)
        self.pvc_obj.reload()

        log.info("Deploy the ElasticSearch cluster")
        self.ocp.apply(self.crd)

        sample = TimeoutSampler(
            timeout=300,
            sleep=10,
            func=self._pod_is_found,
            pattern="quickstart-es-default",
        )
        if not sample.wait_for_func_status(True):
            self.cleanup()
            raise Exception("The ElasticSearch pod deployment Failed")
        self.espod = get_pod_name_by_pattern("quickstart-es-default",
                                             self.namespace)[0]
        log.info(f"The ElasticSearch pod {self.espod} Started")

        es_pod = OCP(kind="pod", namespace=self.namespace)
        log.info("Waiting for ElasticSearch to Run")
        assert es_pod.wait_for_resource(
            condition=constants.STATUS_RUNNING,
            resource_name=self.espod,
            sleep=30,
            timeout=600,
        )
        log.info("Elastic Search is ready !!!")
Esempio n. 21
0
    def create_jenkins_pvc(self):
        """
        create jenkins pvc

        Returns:
            List: pvc_objs
        """
        pvc_objs = []
        sc_name = (constants.DEFAULT_EXTERNAL_MODE_STORAGECLASS_RBD
                   if storagecluster_independent_check() else
                   constants.DEFAULT_STORAGECLASS_RBD)
        for project in self.projects:
            log.info(f"create jenkins pvc on project {project}")
            pvc_obj = create_pvc(
                pvc_name="dependencies",
                size="10Gi",
                sc_name=sc_name,
                namespace=project,
            )
            pvc_objs.append(pvc_obj)
        return pvc_objs
Esempio n. 22
0
    def test_create_multiple_sc_with_same_pool_name(self, interface_type,
                                                    resources):
        """
        This test function does below,
        *. Creates multiple Storage Classes with same pool name
        *. Creates PVCs using each Storage Class
        *. Mount each PVC to an app pod
        *. Run IO on each app pod
        """
        # Unpack resources
        pods, pvcs, storageclasses = resources

        # Create 3 Storage Classes with same pool name
        if interface_type == constants.CEPHBLOCKPOOL:
            secret = self.rbd_secret_obj.name
            interface_name = self.cbp_obj.name
        else:
            interface_type = constants.CEPHFILESYSTEM
            secret = self.cephfs_secret_obj.name
            interface_name = helpers.get_cephfs_data_pool_name()
        for i in range(3):
            log.info(f"Creating a {interface_type} storage class")
            storageclasses.append(
                helpers.create_storage_class(
                    interface_type=interface_type,
                    interface_name=interface_name,
                    secret_name=secret,
                ))
            log.info(f"{interface_type}StorageClass: {storageclasses[i].name} "
                     f"created successfully")

        # Create PVCs using each SC
        for i in range(3):
            log.info(f"Creating a PVC using {storageclasses[i].name}")
            pvcs.append(helpers.create_pvc(storageclasses[i].name))
        for pvc in pvcs:
            helpers.wait_for_resource_state(pvc, constants.STATUS_BOUND)
            pvc.reload()

        # Create app pod and mount each PVC
        for i in range(3):
            log.info(f"Creating an app pod and mount {pvcs[i].name}")
            pods.append(
                helpers.create_pod(
                    interface_type=interface_type,
                    pvc_name=pvcs[i].name,
                    namespace=defaults.ROOK_CLUSTER_NAMESPACE,
                ))
            for pod in pods:
                helpers.wait_for_resource_state(pod, constants.STATUS_RUNNING)
                pod.reload()
            log.info(f"{pods[i].name} created successfully and "
                     f"mounted {pvcs[i].name}")

        # Run IO on each app pod for sometime
        for pod in pods:
            log.info(f"Running FIO on {pod.name}")
            pod.run_io("fs", size="2G")

        for pod in pods:
            get_fio_rw_iops(pod)
    def test_pvc_creation_deletion_measurement_performance(
            self, teardown_factory, pvc_size):
        """
        Measuring PVC creation and deletion times for pvc samples
        Verifying that those times are within required limits
        """

        # Getting the full path for the test logs
        self.full_log_path = get_full_test_logs_path(cname=self)
        if self.interface == constants.CEPHBLOCKPOOL:
            self.sc = "RBD"
        if self.interface == constants.CEPHFILESYSTEM:
            self.sc = "CephFS"
        self.full_log_path += f"-{self.sc}-{pvc_size}"
        log.info(f"Logs file path name is : {self.full_log_path}")

        self.start_time = time.strftime("%Y-%m-%dT%H:%M:%SGMT", time.gmtime())

        self.get_env_info()

        # Initialize the results doc file.
        self.full_results = self.init_full_results(
            ResultsAnalyse(self.uuid, self.crd_data, self.full_log_path))
        self.full_results.add_key("pvc_size", pvc_size)
        num_of_samples = 5
        accepted_creation_time = 1
        accepted_deletion_time = 2 if self.interface == constants.CEPHFILESYSTEM else 1
        self.full_results.add_key("samples", num_of_samples)

        accepted_creation_deviation_percent = 50
        accepted_deletion_deviation_percent = 50

        creation_time_measures = []
        deletion_time_measures = []
        msg_prefix = f"Interface: {self.interface}, PVC size: {pvc_size}."

        for i in range(num_of_samples):
            logging.info(f"{msg_prefix} Start creating PVC number {i + 1}.")
            start_time = datetime.datetime.utcnow().strftime(
                "%Y-%m-%dT%H:%M:%SZ")
            pvc_obj = helpers.create_pvc(sc_name=self.sc_obj.name,
                                         size=pvc_size)
            helpers.wait_for_resource_state(pvc_obj, constants.STATUS_BOUND)
            pvc_obj.reload()

            creation_time = performance_lib.measure_pvc_creation_time(
                self.interface, pvc_obj.name, start_time)

            logging.info(
                f"{msg_prefix} PVC number {i + 1} was created in {creation_time} seconds."
            )
            if creation_time > accepted_creation_time:
                raise ex.PerformanceException(
                    f"{msg_prefix} PVC creation time is {creation_time} and is greater than "
                    f"{accepted_creation_time} seconds.")
            creation_time_measures.append(creation_time)

            pv_name = pvc_obj.backed_pv
            pvc_reclaim_policy = pvc_obj.reclaim_policy

            pod_obj = self.write_file_on_pvc(pvc_obj)
            pod_obj.delete(wait=True)
            teardown_factory(pvc_obj)
            logging.info(f"{msg_prefix} Start deleting PVC number {i + 1}")
            if pvc_reclaim_policy == constants.RECLAIM_POLICY_DELETE:
                pvc_obj.delete()
                pvc_obj.ocp.wait_for_delete(pvc_obj.name)
                helpers.validate_pv_delete(pvc_obj.backed_pv)
                deletion_time = helpers.measure_pvc_deletion_time(
                    self.interface, pv_name)
                logging.info(
                    f"{msg_prefix} PVC number {i + 1} was deleted in {deletion_time} seconds."
                )
                if deletion_time > accepted_deletion_time:
                    raise ex.PerformanceException(
                        f"{msg_prefix} PVC deletion time is {deletion_time} and is greater than "
                        f"{accepted_deletion_time} seconds.")
                deletion_time_measures.append(deletion_time)
            else:
                logging.info(
                    f"Reclaim policy of the PVC {pvc_obj.name} is not Delete;"
                    f" therefore not measuring deletion time for this PVC.")

        creation_average = self.process_time_measurements(
            "creation",
            creation_time_measures,
            accepted_creation_deviation_percent,
            msg_prefix,
        )
        self.full_results.add_key("creation-time", creation_average)
        deletion_average = self.process_time_measurements(
            "deletion",
            deletion_time_measures,
            accepted_deletion_deviation_percent,
            msg_prefix,
        )
        self.full_results.add_key("deletion-time", deletion_average)
        self.full_results.all_results["creation"] = creation_time_measures
        self.full_results.all_results["deletion"] = deletion_time_measures
        self.end_time = time.strftime("%Y-%m-%dT%H:%M:%SGMT", time.gmtime())
        self.full_results.add_key("test_time", {
            "start": self.start_time,
            "end": self.end_time
        })
        self.full_results.es_write()
        log.info(
            f"The Result can be found at : {self.full_results.results_link()}")

        if not self.dev_mode:
            # all the results are OK, the test passes, push the results to the codespeed
            push_to_pvc_time_dashboard(self.interface, "1-pvc-creation",
                                       creation_average)
            push_to_pvc_time_dashboard(self.interface, "1-pvc-deletion",
                                       deletion_average)
Esempio n. 24
0
    def raw_block_pv(self):
        """
        Testing basic creation of app pod with RBD RWX raw block pv support
        """
        worker_nodes = node.get_worker_nodes()
        pvcs = list()
        size_mb = "500Mi"
        size_gb = "10Gi"
        if config.ENV_DATA["platform"].lower(
        ) in constants.MANAGED_SERVICE_PLATFORMS:
            size_tb = str(convert_device_size("50Gi", "TB")) + "Ti"
        else:
            size_tb = "1Ti"
        for size in [size_mb, size_gb, size_tb]:
            pvcs.append(
                helpers.create_pvc(
                    sc_name=self.sc_obj.name,
                    size=size,
                    access_mode=constants.ACCESS_MODE_RWX,
                    namespace=self.namespace,
                    volume_mode="Block",
                ))
        pvc_mb, pvc_gb, pvc_tb = pvcs[0], pvcs[1], pvcs[2]

        for pvc in pvcs:
            helpers.wait_for_resource_state(resource=pvc,
                                            state=constants.STATUS_BOUND,
                                            timeout=120)

        pvs = [pvc.backed_pv_obj for pvc in pvcs]

        pods = list()
        pod_dict = constants.CSI_RBD_RAW_BLOCK_POD_YAML
        for pvc in pvc_mb, pvc_gb, pvc_tb:
            for _ in range(3):
                pods.append(
                    helpers.create_pod(
                        interface_type=constants.CEPHBLOCKPOOL,
                        pvc_name=pvc.name,
                        namespace=self.namespace,
                        raw_block_pv=True,
                        pod_dict_path=pod_dict,
                        node_name=random.choice(worker_nodes),
                    ))

        pvc_mb_pods, pvc_gb_pods, pvc_tb_pods = pods[0:3], pods[3:6], pods[6:9]
        for pod in pods:
            helpers.wait_for_resource_state(resource=pod,
                                            state=constants.STATUS_RUNNING,
                                            timeout=120)
        storage_type = "block"

        with ThreadPoolExecutor() as p:
            for pod in pvc_mb_pods:
                log.info(f"running io on pod {pod.name}")
                p.submit(
                    pod.run_io,
                    storage_type=storage_type,
                    size=f"{random.randint(10,200)}M",
                    invalidate=0,
                )
            for pod in pvc_gb_pods:
                log.info(f"running io on pod {pod.name}")
                p.submit(
                    pod.run_io,
                    storage_type=storage_type,
                    size=f"{random.randint(1,5)}G",
                    invalidate=0,
                )
            for pod in pvc_tb_pods:
                log.info(f"running io on pod {pod.name}")
                p.submit(
                    pod.run_io,
                    storage_type=storage_type,
                    size=f"{random.randint(10,15)}G",
                    invalidate=0,
                )

        for pod in pods:
            get_fio_rw_iops(pod)
        return pods, pvcs, pvs
Esempio n. 25
0
    def test_pod_reattach_time_performance(
        self, storageclass_factory, copies, timeout, total_time_limit
    ):
        """
        Test assign nodeName to a pod using RWX pvc
        Each kernel (unzipped) is 892M and 61694 files
        The test creates samples_num pvcs and pods, writes kernel files multiplied by number of copies
        and calculates average total and csi reattach times and standard deviation
        """
        kernel_url = "https://cdn.kernel.org/pub/linux/kernel/v4.x/linux-4.19.5.tar.gz"
        download_path = "tmp"

        samples_num = 7
        if self.dev_mode:
            samples_num = 3

        test_start_time = PASTest.get_time()
        helpers.pull_images(constants.PERF_IMAGE)
        # Download a linux Kernel

        dir_path = os.path.join(os.getcwd(), download_path)
        file_path = os.path.join(dir_path, "file.gz")
        if not os.path.exists(dir_path):
            os.makedirs(dir_path)
        urllib.request.urlretrieve(kernel_url, file_path)

        worker_nodes_list = node.get_worker_nodes()
        assert len(worker_nodes_list) > 1
        node_one = worker_nodes_list[0]
        node_two = worker_nodes_list[1]

        time_measures, csi_time_measures, files_written_list, data_written_list = (
            [],
            [],
            [],
            [],
        )

        self.sc_obj = storageclass_factory(self.interface)
        for sample_index in range(1, samples_num + 1):

            csi_start_time = self.get_time("csi")

            logger.info(f"Start creating PVC number {sample_index}.")
            pvc_obj = helpers.create_pvc(
                sc_name=self.sc_obj.name, size="100Gi", namespace=self.namespace
            )
            helpers.wait_for_resource_state(pvc_obj, constants.STATUS_BOUND)

            # Create a pod on one node
            logger.info(f"Creating Pod with pvc {pvc_obj.name} on node {node_one}")

            pvc_obj.reload()
            self.pvc_list.append(pvc_obj)

            try:
                pod_obj1 = helpers.create_pod(
                    interface_type=self.interface,
                    pvc_name=pvc_obj.name,
                    namespace=pvc_obj.namespace,
                    node_name=node_one,
                    pod_dict_path=constants.PERF_POD_YAML,
                )
            except Exception as e:
                logger.error(
                    f"Pod on PVC {pvc_obj.name} was not created, exception {str(e)}"
                )
                raise PodNotCreated("Pod on PVC was not created.")

            # Confirm that pod is running on the selected_nodes
            logger.info("Checking whether pods are running on the selected nodes")
            helpers.wait_for_resource_state(
                resource=pod_obj1, state=constants.STATUS_RUNNING, timeout=timeout
            )

            pod_name = pod_obj1.name
            pod_path = "/mnt"

            _ocp = OCP(namespace=pvc_obj.namespace)

            rsh_cmd = f"rsync {dir_path} {pod_name}:{pod_path}"
            _ocp.exec_oc_cmd(rsh_cmd)

            rsh_cmd = (
                f"exec {pod_name} -- tar xvf {pod_path}/tmp/file.gz -C {pod_path}/tmp"
            )
            _ocp.exec_oc_cmd(rsh_cmd)

            for x in range(copies):
                rsh_cmd = f"exec {pod_name} -- mkdir -p {pod_path}/folder{x}"
                _ocp.exec_oc_cmd(rsh_cmd)
                rsh_cmd = (
                    f"exec {pod_name} -- cp -r {pod_path}/tmp {pod_path}/folder{x}"
                )
                _ocp.exec_oc_cmd(rsh_cmd)
                rsh_cmd = f"exec {pod_name} -- sync"
                _ocp.exec_oc_cmd(rsh_cmd)

            logger.info("Getting the amount of data written to the PVC")
            rsh_cmd = f"exec {pod_name} -- df -h {pod_path}"
            data_written_str = _ocp.exec_oc_cmd(rsh_cmd).split()[-4]
            logger.info(f"The amount of written data is {data_written_str}")
            data_written = float(data_written_str[:-1])

            rsh_cmd = f"exec {pod_name} -- find {pod_path} -type f"
            files_written = len(_ocp.exec_oc_cmd(rsh_cmd).split())
            logger.info(
                f"For {self.interface} - The number of files written to the pod is {files_written}"
            )
            files_written_list.append(files_written)
            data_written_list.append(data_written)

            logger.info("Deleting the pod")
            rsh_cmd = f"delete pod {pod_name}"
            _ocp.exec_oc_cmd(rsh_cmd)

            logger.info(f"Creating Pod with pvc {pvc_obj.name} on node {node_two}")

            try:
                pod_obj2 = helpers.create_pod(
                    interface_type=self.interface,
                    pvc_name=pvc_obj.name,
                    namespace=pvc_obj.namespace,
                    node_name=node_two,
                    pod_dict_path=constants.PERF_POD_YAML,
                )
            except Exception as e:
                logger.error(
                    f"Pod on PVC {pvc_obj.name} was not created, exception {str(e)}"
                )
                raise PodNotCreated("Pod on PVC was not created.")

            start_time = time.time()

            pod_name = pod_obj2.name
            helpers.wait_for_resource_state(
                resource=pod_obj2, state=constants.STATUS_RUNNING, timeout=timeout
            )
            end_time = time.time()
            total_time = end_time - start_time
            if total_time > total_time_limit:
                logger.error(
                    f"Pod creation time is {total_time} and greater than {total_time_limit} seconds"
                )
                raise ex.PerformanceException(
                    f"Pod creation time is {total_time} and greater than {total_time_limit} seconds"
                )

            csi_time = performance_lib.pod_attach_csi_time(
                self.interface, pvc_obj.backed_pv, csi_start_time, pvc_obj.namespace
            )[0]
            csi_time_measures.append(csi_time)
            logger.info(
                f"PVC #{pvc_obj.name} pod {pod_name} creation time took {total_time} seconds, "
                f"csi time is {csi_time} seconds"
            )
            time_measures.append(total_time)

            logger.info("Deleting the pod")
            rsh_cmd = f"delete pod {pod_name}"
            _ocp.exec_oc_cmd(rsh_cmd)
            # teardown_factory(pod_obj2)

        average = statistics.mean(time_measures)
        logger.info(
            f"The average time of {self.interface} pod creation on {samples_num} PVCs is {average} seconds"
        )

        st_deviation = statistics.stdev(time_measures)
        logger.info(
            f"The standard deviation of {self.interface} pod creation time on {samples_num} PVCs is {st_deviation}"
        )

        csi_average = statistics.mean(csi_time_measures)
        logger.info(
            f"The average csi time of {self.interface} pod creation on {samples_num} PVCs is {csi_average} seconds"
        )

        csi_st_deviation = statistics.stdev(csi_time_measures)
        logger.info(
            f"The standard deviation of {self.interface} csi pod creation time on {samples_num} PVCs "
            f"is {csi_st_deviation}"
        )

        files_written_average = statistics.mean(files_written_list)
        data_written_average = statistics.mean(data_written_list)

        os.remove(file_path)
        os.rmdir(dir_path)

        # Produce ES report

        # Collecting environment information
        self.get_env_info()

        # Initialize the results doc file.
        full_results = self.init_full_results(
            ResultsAnalyse(
                self.uuid,
                self.crd_data,
                self.full_log_path,
                "pod_reattach_time_fullres",
            )
        )

        full_results.add_key("storageclass", self.sc)
        full_results.add_key("pod_reattach_time", time_measures)
        full_results.add_key("copies_number", copies)
        full_results.add_key("files_number_average", files_written_average)
        full_results.add_key("data_average", data_written_average)
        full_results.add_key("pod_reattach_time_average", average)
        full_results.add_key("pod_reattach_standard_deviation", st_deviation)
        full_results.add_key("pod_csi_reattach_time_average", csi_average)
        full_results.add_key("pod_csi_reattach_standard_deviation", csi_st_deviation)

        test_end_time = PASTest.get_time()

        # Add the test time to the ES report
        full_results.add_key(
            "test_time", {"start": test_start_time, "end": test_end_time}
        )

        # Write the test results into the ES server
        if full_results.es_write():
            res_link = full_results.results_link()
            logger.info(f"The Result can be found at : {res_link}")

            # Create text file with results of all subtest (4 - according to the parameters)
            self.results_path = get_full_test_logs_path(
                cname=self, fname="test_pod_reattach_time_performance"
            )
            self.write_result_to_file(res_link)
Esempio n. 26
0
    def test_pvc_creation_deletion_measurement_performance(
        self, teardown_factory, pvc_size
    ):
        """
        Measuring PVC creation and deletion times for pvc samples
        Verifying that those times are within required limits
        """

        num_of_samples = 5
        accepted_creation_time = 1
        accepted_deletion_time = 2 if self.interface == constants.CEPHFILESYSTEM else 1

        accepted_creation_deviation_percent = 50
        accepted_deletion_deviation_percent = 50

        creation_time_measures = []
        deletion_time_measures = []
        msg_prefix = f"Interface: {self.interface}, PVC size: {pvc_size}."

        for i in range(num_of_samples):
            logging.info(f"{msg_prefix} Start creating PVC number {i + 1}.")
            start_time = datetime.datetime.utcnow().strftime("%Y-%m-%dT%H:%M:%SZ")
            pvc_obj = helpers.create_pvc(sc_name=self.sc_obj.name, size=pvc_size)
            helpers.wait_for_resource_state(pvc_obj, constants.STATUS_BOUND)
            pvc_obj.reload()

            creation_time = performance_lib.measure_pvc_creation_time(
                self.interface, pvc_obj.name, start_time
            )

            logging.info(
                f"{msg_prefix} PVC number {i + 1} was created in {creation_time} seconds."
            )
            if creation_time > accepted_creation_time:
                raise ex.PerformanceException(
                    f"{msg_prefix} PVC creation time is {creation_time} and is greater than "
                    f"{accepted_creation_time} seconds."
                )
            creation_time_measures.append(creation_time)

            pv_name = pvc_obj.backed_pv
            pvc_reclaim_policy = pvc_obj.reclaim_policy

            pod_obj = self.write_file_on_pvc(pvc_obj)
            pod_obj.delete(wait=True)
            teardown_factory(pvc_obj)
            logging.info(f"{msg_prefix} Start deleting PVC number {i + 1}")
            if pvc_reclaim_policy == constants.RECLAIM_POLICY_DELETE:
                pvc_obj.delete()
                pvc_obj.ocp.wait_for_delete(pvc_obj.name)
                helpers.validate_pv_delete(pvc_obj.backed_pv)
                deletion_time = helpers.measure_pvc_deletion_time(
                    self.interface, pv_name
                )
                logging.info(
                    f"{msg_prefix} PVC number {i + 1} was deleted in {deletion_time} seconds."
                )
                if deletion_time > accepted_deletion_time:
                    raise ex.PerformanceException(
                        f"{msg_prefix} PVC deletion time is {deletion_time} and is greater than "
                        f"{accepted_deletion_time} seconds."
                    )
                deletion_time_measures.append(deletion_time)
            else:
                logging.info(
                    f"Reclaim policy of the PVC {pvc_obj.name} is not Delete;"
                    f" therefore not measuring deletion time for this PVC."
                )

        creation_average = self.process_time_measurements(
            "creation",
            creation_time_measures,
            accepted_creation_deviation_percent,
            msg_prefix,
        )
        deletion_average = self.process_time_measurements(
            "deletion",
            deletion_time_measures,
            accepted_deletion_deviation_percent,
            msg_prefix,
        )

        # all the results are OK, the test passes, push the results to the codespeed
        push_to_pvc_time_dashboard(self.interface, "1-pvc-creation", creation_average)
        push_to_pvc_time_dashboard(self.interface, "1-pvc-deletion", deletion_average)
    def test_create_multiple_sc_with_different_pool_name(
            self, teardown_factory):
        """
        This test function does below,
        *. Creates multiple Storage Classes with different pool name
        *. Creates PVCs using each Storage Class
        *. Mount each PVC to an app pod
        *. Run IO on each app pod
        """

        # Create 2 storageclasses, each with different pool name
        cbp_list = []
        sc_list = []
        for i in range(2):
            log.info("Creating cephblockpool")
            cbp_obj = helpers.create_ceph_block_pool()
            log.info(f"{cbp_obj.name} created successfully")
            log.info(f"Creating a RBD storage class using {cbp_obj.name}")
            cbp_list.append(cbp_obj)
            sc_obj = helpers.create_storage_class(
                interface_type=constants.CEPHBLOCKPOOL,
                interface_name=cbp_obj.name,
                secret_name=self.rbd_secret_obj.name,
            )

            log.info(f"StorageClass: {sc_obj.name} "
                     f"created successfully using {cbp_obj.name}")
            sc_list.append(sc_obj)
            teardown_factory(cbp_obj)
            teardown_factory(sc_obj)

        # Create PVCs using each SC
        pvc_list = []
        for i in range(2):
            log.info(f"Creating a PVC using {sc_list[i].name}")
            pvc_obj = helpers.create_pvc(sc_list[i].name)
            log.info(f"PVC: {pvc_obj.name} created successfully using "
                     f"{sc_list[i].name}")
            pvc_list.append(pvc_obj)
            teardown_factory(pvc_obj)
            helpers.wait_for_resource_state(pvc_obj, constants.STATUS_BOUND)
            pvc_obj.reload()

        # Create app pod and mount each PVC
        pod_list = []
        for i in range(2):
            log.info(f"Creating an app pod and mount {pvc_list[i].name}")
            pod_obj = helpers.create_pod(
                interface_type=constants.CEPHBLOCKPOOL,
                pvc_name=pvc_list[i].name,
            )
            log.info(f"{pod_obj.name} created successfully and "
                     f"mounted {pvc_list[i].name}")
            pod_list.append(pod_obj)
            teardown_factory(pod_obj)
            helpers.wait_for_resource_state(pod_obj, constants.STATUS_RUNNING)
            pod_obj.reload()

        # Run IO on each app pod for sometime
        for pod in pod_list:
            log.info(f"Running FIO on {pod.name}")
            pod.run_io("fs", size="2G")

        for pod in pod_list:
            get_fio_rw_iops(pod)