コード例 #1
0
    def test_ocs_347(self, resources):
        pod, pvc, storageclass = resources

        log.info("Creating RBD StorageClass")
        storageclass.append(
            helpers.create_storage_class(
                interface_type=constants.CEPHBLOCKPOOL,
                interface_name=self.cbp_obj.name,
                secret_name=self.rbd_secret_obj.name,
            )
        )
        log.info("Creating a PVC")
        pvc.append(helpers.create_pvc(sc_name=storageclass[0].name))
        for pvc_obj in pvc:
            helpers.wait_for_resource_state(pvc_obj, constants.STATUS_BOUND)
            pvc_obj.reload()
        log.info(f"Creating a pod on with pvc {pvc[0].name}")
        pod_obj = helpers.create_pod(
            interface_type=constants.CEPHBLOCKPOOL,
            pvc_name=pvc[0].name,
            pod_dict_path=constants.NGINX_POD_YAML,
        )
        pod.append(pod_obj)
        helpers.wait_for_resource_state(pod_obj, constants.STATUS_RUNNING)
        pod_obj.reload()
コード例 #2
0
def create_rbd_storageclass(request):
    """
    Create an RBD storage class
    """
    class_instance = request.node.cls

    def finalizer():
        """
        Delete the RBD storage class
        """
        if class_instance.sc_obj.get():
            class_instance.sc_obj.delete()
            class_instance.sc_obj.ocp.wait_for_delete(
                class_instance.sc_obj.name)

    request.addfinalizer(finalizer)

    if not hasattr(class_instance, "reclaim_policy"):
        class_instance.reclaim_policy = constants.RECLAIM_POLICY_DELETE

    class_instance.sc_obj = helpers.create_storage_class(
        interface_type=constants.CEPHBLOCKPOOL,
        interface_name=class_instance.cbp_obj.name,
        secret_name=class_instance.rbd_secret_obj.name,
        reclaim_policy=class_instance.reclaim_policy,
    )
    assert class_instance.sc_obj, "Failed to create storage class"
コード例 #3
0
    def test_create_storage_class_with_wrong_provisioner(self, interface):
        """
        Test function which creates Storage Class with
        wrong provisioner and verifies PVC status
        """
        log.info(f"Creating a {interface} storage class")
        if interface == "RBD":
            interface_type = constants.CEPHBLOCKPOOL
            secret = self.rbd_secret_obj.name
            interface_name = self.cbp_obj.name
        else:
            interface_type = constants.CEPHFILESYSTEM
            secret = self.cephfs_secret_obj.name
            interface_name = helpers.get_cephfs_data_pool_name()
        sc_obj = helpers.create_storage_class(
            interface_type=interface_type,
            interface_name=interface_name,
            secret_name=secret,
            provisioner=constants.AWS_EFS_PROVISIONER,
        )
        log.info(
            f"{interface}Storage class: {sc_obj.name} created successfully")

        # Create PVC
        pvc_obj = helpers.create_pvc(sc_name=sc_obj.name, do_reload=False)

        # Check PVC status
        pvc_output = pvc_obj.get()
        pvc_status = pvc_output["status"]["phase"]
        log.info(f"Status of PVC {pvc_obj.name} after creation: {pvc_status}")
        log.info(f"Waiting for status '{constants.STATUS_PENDING}' "
                 f"for 20 seconds (it shouldn't change)")

        pvc_obj.ocp.wait_for_resource(
            resource_name=pvc_obj.name,
            condition=constants.STATUS_PENDING,
            timeout=20,
            sleep=5,
        )
        # Check PVC status again after 20 seconds
        pvc_output = pvc_obj.get()
        pvc_status = pvc_output["status"]["phase"]
        assert_msg = (
            f"PVC {pvc_obj.name} is not in {constants.STATUS_PENDING} "
            f"status")
        assert pvc_status == constants.STATUS_PENDING, assert_msg
        log.info(f"Status of {pvc_obj.name} after 20 seconds: {pvc_status}")

        # Delete PVC
        log.info(f"Deleting PVC: {pvc_obj.name}")
        assert pvc_obj.delete()
        log.info(f"PVC {pvc_obj.name} delete successfully")

        # Delete Storage Class
        log.info(f"Deleting Storageclass: {sc_obj.name}")
        assert sc_obj.delete()
        log.info(f"Storage Class: {sc_obj.name} deleted successfully")
コード例 #4
0
def setup_fs():
    log.info("Creating CEPHFS Secret")
    global CEPHFS_SECRET_OBJ
    CEPHFS_SECRET_OBJ = helpers.create_secret(constants.CEPHFILESYSTEM)

    global CEPHFS_SC_OBJ
    log.info("Creating CephFS Storage class ")
    CEPHFS_SC_OBJ = helpers.create_storage_class(
        constants.CEPHFILESYSTEM,
        helpers.get_cephfs_data_pool_name(),
        CEPHFS_SECRET_OBJ.name,
    )
コード例 #5
0
    def create_new_pool_and_sc(self, secret_factory):

        self.pool_name = (
            f"pas-test-pool-{Interfaces_info[self.interface]['name'].lower()}")
        secret = secret_factory(interface=self.interface)
        self.create_new_pool(self.pool_name)
        # Creating new StorageClass (pool) for the test.
        self.sc_obj = helpers.create_storage_class(
            interface_type=self.interface,
            interface_name=self.pool_name,
            secret_name=secret.name,
            sc_name=self.pool_name,
            fs_name=self.pool_name,
        )
        logger.info(f"The new SC is : {self.sc_obj.name}")
コード例 #6
0
def setup_rbd():
    """
    Setting up the environment
    Creating replicated pool,secret,storageclass for rbd
    """
    log.info("Creating CephBlockPool")
    global RBD_POOL
    RBD_POOL = helpers.create_ceph_block_pool()
    global RBD_SECRET_OBJ
    RBD_SECRET_OBJ = helpers.create_secret(constants.CEPHBLOCKPOOL)
    global RBD_SC_OBJ
    log.info("Creating RBD Storage class ")
    RBD_SC_OBJ = helpers.create_storage_class(
        interface_type=constants.CEPHBLOCKPOOL,
        interface_name=RBD_POOL.name,
        secret_name=RBD_SECRET_OBJ.name,
    )
コード例 #7
0
def create_cephfs_storageclass(request):
    """
    Create a CephFS storage class
    """
    class_instance = request.node.cls

    def finalizer():
        """
        Delete the CephFS storage class
        """
        if class_instance.sc_obj.get():
            class_instance.sc_obj.delete()
            class_instance.sc_obj.ocp.wait_for_delete(
                class_instance.sc_obj.name)

    request.addfinalizer(finalizer)

    class_instance.sc_obj = helpers.create_storage_class(
        interface_type=constants.CEPHFILESYSTEM,
        interface_name=helpers.get_cephfs_data_pool_name(),
        secret_name=class_instance.cephfs_secret_obj.name,
    )
    assert class_instance.sc_obj, "Failed to create storage class"
コード例 #8
0
    def test_create_multiple_sc_with_same_pool_name(self, interface_type,
                                                    resources):
        """
        This test function does below,
        *. Creates multiple Storage Classes with same pool name
        *. Creates PVCs using each Storage Class
        *. Mount each PVC to an app pod
        *. Run IO on each app pod
        """
        # Unpack resources
        pods, pvcs, storageclasses = resources

        # Create 3 Storage Classes with same pool name
        if interface_type == constants.CEPHBLOCKPOOL:
            secret = self.rbd_secret_obj.name
            interface_name = self.cbp_obj.name
        else:
            interface_type = constants.CEPHFILESYSTEM
            secret = self.cephfs_secret_obj.name
            interface_name = helpers.get_cephfs_data_pool_name()
        for i in range(3):
            log.info(f"Creating a {interface_type} storage class")
            storageclasses.append(
                helpers.create_storage_class(
                    interface_type=interface_type,
                    interface_name=interface_name,
                    secret_name=secret,
                ))
            log.info(f"{interface_type}StorageClass: {storageclasses[i].name} "
                     f"created successfully")

        # Create PVCs using each SC
        for i in range(3):
            log.info(f"Creating a PVC using {storageclasses[i].name}")
            pvcs.append(helpers.create_pvc(storageclasses[i].name))
        for pvc in pvcs:
            helpers.wait_for_resource_state(pvc, constants.STATUS_BOUND)
            pvc.reload()

        # Create app pod and mount each PVC
        for i in range(3):
            log.info(f"Creating an app pod and mount {pvcs[i].name}")
            pods.append(
                helpers.create_pod(
                    interface_type=interface_type,
                    pvc_name=pvcs[i].name,
                    namespace=defaults.ROOK_CLUSTER_NAMESPACE,
                ))
            for pod in pods:
                helpers.wait_for_resource_state(pod, constants.STATUS_RUNNING)
                pod.reload()
            log.info(f"{pods[i].name} created successfully and "
                     f"mounted {pvcs[i].name}")

        # Run IO on each app pod for sometime
        for pod in pods:
            log.info(f"Running FIO on {pod.name}")
            pod.run_io("fs", size="2G")

        for pod in pods:
            get_fio_rw_iops(pod)
コード例 #9
0
    def test_pvc_multiple_snapshot_performance(
        self,
        pvc_factory,
        pod_factory,
        secret_factory,
        interface_type,
        snap_number,
    ):
        """
        1. Creating PVC
           size is depend on storage capacity, but not less then 1 GiB
           it will use ~75% capacity of the Storage, Min storage capacity 1 TiB
        2. Fill the PVC with 80% of data
        3. Take a snapshot of the PVC and measure the total and CSI times of creation.
        4. re-write the data on the PVC
        5. Take a snapshot of the PVC and measure the total and the CSI times of creation.
        6. repeat steps 4-5 the numbers of snapshot we want to take : 512
           this will be run by outside script for low memory consumption
        7. print all information.

        Raises:
            StorageNotSufficientException: in case of not enough capacity

        """

        # Getting the full path for the test logs
        self.results_path = get_full_test_logs_path(cname=self)
        self.full_log_path = f"{self.results_path}-{interface_type}-{snap_number}"
        log.info(f"Logs file path name is : {self.full_log_path}")
        log.info(f"Reslut path is : {self.results_path}")

        self.full_teardown = True
        self.num_of_snaps = snap_number
        if self.dev_mode:
            self.num_of_snaps = 2

        log.info(
            f"Going to create {self.num_of_snaps} {interface_type} snapshots")

        # since we do not want to use more then 65%, we add 35% to the needed
        # capacity, and minimum PVC size is 1 GiB
        self.need_capacity = int((self.num_of_snaps + 2) * 1.35)

        # Test will run only on system with enough capacity
        if self.capacity_to_use < self.need_capacity:
            err_msg = (f"The system have only {self.ceph_capacity} GiB, "
                       f"we want to use only {self.capacity_to_use} GiB, "
                       f"and we need {self.need_capacity} GiB to run the test")
            log.error(err_msg)
            raise exceptions.StorageNotSufficientException(err_msg)

        # Calculating the PVC size in GiB
        self.pvc_size = int(self.capacity_to_use / (self.num_of_snaps + 2))
        if self.dev_mode:
            self.pvc_size = 5

        self.interface = interface_type
        self.sc_name = "pas-testing-rbd"
        pool_name = self.sc_name
        if self.interface == constants.CEPHFILESYSTEM:
            self.sc_name = "pas-testing-cephfs"
            pool_name = f"{self.sc_name}-data0"

        # Creating new storage pool
        self.create_new_pool(self.sc_name)

        # Creating new StorageClass (pool) for the test.
        secret = secret_factory(interface=self.interface)
        self.sc_obj = helpers.create_storage_class(
            interface_type=self.interface,
            interface_name=pool_name,
            secret_name=secret.name,
            sc_name=self.sc_name,
            fs_name=self.sc_name,
        )
        log.info(f"The new SC is : {self.sc_obj.name}")
        log.debug(f"All SC data is {json.dumps(self.sc_obj.data, indent=3)}")

        # Create new VolumeSnapshotClass
        self.snap_class = self.create_snapshotclass(self.interface)

        # Create new PVC
        log.info(f"Creating {self.pvc_size} GiB PVC of {interface_type}")
        self.pvc_obj = pvc_factory(
            interface=self.interface,
            storageclass=self.sc_obj,
            size=self.pvc_size,
            status=constants.STATUS_BOUND,
            project=self.proj,
        )

        # Create POD which will attache to the new PVC
        log.info("Creating A POD")
        self.pod_obj = pod_factory(
            interface=self.interface,
            pvc=self.pvc_obj,
            status=constants.STATUS_RUNNING,
            pod_dict_path=constants.PERF_POD_YAML,
        )

        # Calculating the file size as 80% of the PVC size
        self.filesize = self.pvc_obj.size * 0.80
        # Change the file size to MB for the FIO function
        self.file_size = f"{int(self.filesize * constants.GB2MB)}M"
        self.file_name = self.pod_obj.name

        log.info(
            f"Total capacity size is : {self.ceph_capacity} GiB, "
            f"Going to use {self.need_capacity} GiB, "
            f"With {self.num_of_snaps} Snapshots to {self.pvc_size} GiB PVC. "
            f"File size to be written is : {self.file_size} "
            f"with the name of {self.file_name}")

        # Reading basic snapshot yaml file
        self.snap_yaml = constants.CSI_CEPHFS_SNAPSHOT_YAML
        self.sc = constants.DEFAULT_VOLUMESNAPSHOTCLASS_CEPHFS
        self.fs_type = "cephfs"
        if interface_type == constants.CEPHBLOCKPOOL:
            self.snap_yaml = constants.CSI_RBD_SNAPSHOT_YAML
            self.fs_type = "rbd"
            self.sc = constants.DEFAULT_VOLUMESNAPSHOTCLASS_RBD
        with open(self.snap_yaml, "r") as stream:
            try:
                self.snap_templ = yaml.safe_load(stream)
                self.snap_templ["spec"]["volumeSnapshotClassName"] = self.sc
                self.snap_templ["spec"]["source"][
                    "persistentVolumeClaimName"] = self.pvc_obj.name
            except yaml.YAMLError as exc:
                log.error(f"Can not read template yaml file {exc}")
        log.debug(
            f"Snapshot yaml file : {self.snap_yaml} "
            f"Content of snapshot yaml file {json.dumps(self.snap_templ, indent=4)}"
        )

        self.build_fio_command()
        self.start_time = self.get_time()

        # Initialize the results doc file.
        full_results = self.init_full_results(
            ResultsAnalyse(self.uuid, self.crd_data, self.full_log_path,
                           "multiple_snapshots"))
        full_results.all_results = self.run()
        self.end_time = self.get_time()
        full_results.add_key(
            "avg_creation_time",
            f"{float(self.total_creation_time / self.num_of_snaps):.2f}",
        )
        full_results.add_key(
            "avg_csi_creation_time",
            f"{float(self.total_csi_creation_time / self.num_of_snaps):.2f}",
        )
        full_results.add_key(
            "avg_creation_speed",
            f"{float(self.total_creation_speed / self.num_of_snaps):.2f}",
        )
        full_results.add_key("test_time", {
            "start": self.start_time,
            "end": self.end_time
        })

        # Writing the analyzed test results to the Elastic-Search server
        if full_results.es_write():
            res_link = full_results.results_link()
            log.info(f"The Result can be found at : {res_link}")

            # Create text file with results of all subtests (2 - according to the parameters)
            self.write_result_to_file(res_link)
    def test_create_multiple_sc_with_different_pool_name(
            self, teardown_factory):
        """
        This test function does below,
        *. Creates multiple Storage Classes with different pool name
        *. Creates PVCs using each Storage Class
        *. Mount each PVC to an app pod
        *. Run IO on each app pod
        """

        # Create 2 storageclasses, each with different pool name
        cbp_list = []
        sc_list = []
        for i in range(2):
            log.info("Creating cephblockpool")
            cbp_obj = helpers.create_ceph_block_pool()
            log.info(f"{cbp_obj.name} created successfully")
            log.info(f"Creating a RBD storage class using {cbp_obj.name}")
            cbp_list.append(cbp_obj)
            sc_obj = helpers.create_storage_class(
                interface_type=constants.CEPHBLOCKPOOL,
                interface_name=cbp_obj.name,
                secret_name=self.rbd_secret_obj.name,
            )

            log.info(f"StorageClass: {sc_obj.name} "
                     f"created successfully using {cbp_obj.name}")
            sc_list.append(sc_obj)
            teardown_factory(cbp_obj)
            teardown_factory(sc_obj)

        # Create PVCs using each SC
        pvc_list = []
        for i in range(2):
            log.info(f"Creating a PVC using {sc_list[i].name}")
            pvc_obj = helpers.create_pvc(sc_list[i].name)
            log.info(f"PVC: {pvc_obj.name} created successfully using "
                     f"{sc_list[i].name}")
            pvc_list.append(pvc_obj)
            teardown_factory(pvc_obj)
            helpers.wait_for_resource_state(pvc_obj, constants.STATUS_BOUND)
            pvc_obj.reload()

        # Create app pod and mount each PVC
        pod_list = []
        for i in range(2):
            log.info(f"Creating an app pod and mount {pvc_list[i].name}")
            pod_obj = helpers.create_pod(
                interface_type=constants.CEPHBLOCKPOOL,
                pvc_name=pvc_list[i].name,
            )
            log.info(f"{pod_obj.name} created successfully and "
                     f"mounted {pvc_list[i].name}")
            pod_list.append(pod_obj)
            teardown_factory(pod_obj)
            helpers.wait_for_resource_state(pod_obj, constants.STATUS_RUNNING)
            pod_obj.reload()

        # Run IO on each app pod for sometime
        for pod in pod_list:
            log.info(f"Running FIO on {pod.name}")
            pod.run_io("fs", size="2G")

        for pod in pod_list:
            get_fio_rw_iops(pod)
コード例 #11
0
    def test_pvc_multiple_snapshot_performance(
        self,
        pvc_factory,
        pod_factory,
        secret_factory,
        interface_type,
        snap_number,
    ):
        """
        1. Creating PVC
           size is depend on storage capacity, but not less then 1 GiB
           it will use ~75% capacity of the Storage, Min storage capacity 1 TiB
        2. Fill the PVC with 80% of data
        3. Take a snapshot of the PVC and measure the time of creation.
        4. re-write the data on the PVC
        5. Take a snapshot of the PVC and measure the time of creation.
        6. repeat steps 4-5 the numbers of snapshot we want to take : 512
           this will be run by outside script for low memory consumption
        7. print all information.

        Raises:
            StorageNotSufficientException: in case of not enough capacity

        """

        self.num_of_snaps = snap_number
        if self.dev_mode:
            self.num_of_snaps = 2

        log.info(
            f"Going to Create {self.num_of_snaps} {interface_type} snapshots")

        # since we do not want to use more then 65%, we add 35% to the needed
        # capacity, and minimum PVC size is 1 GiB
        self.need_capacity = int((self.num_of_snaps + 2) * 1.35)

        # Test will run only on system with enough capacity
        if self.capacity_to_use < self.need_capacity:
            err_msg = (f"The system have only {self.ceph_capacity} GiB, "
                       f"we want to use only {self.capacity_to_use} GiB, "
                       f"and we need {self.need_capacity} GiB to run the test")
            log.error(err_msg)
            raise exceptions.StorageNotSufficientException(err_msg)

        # Calculating the PVC size in GiB
        self.pvc_size = int(self.capacity_to_use / (self.num_of_snaps + 2))
        if self.dev_mode:
            self.pvc_size = 5

        self.interface = interface_type
        self.sc_name = "pas-testing-rbd"
        pool_name = self.sc_name
        if self.interface == constants.CEPHFILESYSTEM:
            self.sc_name = "pas-testing-cephfs"
            pool_name = f"{self.sc_name}-data0"

        # Creating new storage pool
        self.create_new_pool(self.sc_name)

        # Creating new StorageClass (pool) for the test.
        secret = secret_factory(interface=self.interface)
        self.sc_obj = helpers.create_storage_class(
            interface_type=self.interface,
            interface_name=pool_name,
            secret_name=secret.name,
            sc_name=self.sc_name,
            fs_name=self.sc_name,
        )
        log.info(f"The new SC is : {self.sc_obj.name}")
        log.debug(f"All Sc data is {json.dumps(self.sc_obj.data, indent=3)}")

        # Create new VolumeSnapshotClass
        self.snap_class = self.create_snapshotclass(self.interface)

        # Create new PVC
        log.info(f"Creating {self.pvc_size} GiB PVC of {interface_type}")
        self.pvc_obj = pvc_factory(
            interface=self.interface,
            storageclass=self.sc_obj,
            size=self.pvc_size,
            status=constants.STATUS_BOUND,
            project=self.proj,
        )

        # Create POD which will attache to the new PVC
        log.info("Creating A POD")
        self.pod_obj = pod_factory(
            interface=self.interface,
            pvc=self.pvc_obj,
            status=constants.STATUS_RUNNING,
            pod_dict_path=constants.PERF_POD_YAML,
        )

        # Calculating the file size as 80% of the PVC size
        self.filesize = self.pvc_obj.size * 0.80
        # Change the file size to MB for the FIO function
        self.file_size = f"{int(self.filesize * constants.GB2MB)}M"
        self.file_name = self.pod_obj.name

        log.info(
            f"Total capacity size is : {self.ceph_capacity} GiB, "
            f"Going to use {self.need_capacity} GiB, "
            f"With {self.num_of_snaps} Snapshots to {self.pvc_size} GiB PVC. "
            f"File size to be written is : {self.file_size} "
            f"with the name of {self.file_name}")

        # Reading basic snapshot yaml file
        self.snap_yaml = constants.CSI_CEPHFS_SNAPSHOT_YAML
        self.sc = constants.DEFAULT_VOLUMESNAPSHOTCLASS_CEPHFS
        self.fs_type = "cephfs"
        if interface_type == constants.CEPHBLOCKPOOL:
            self.snap_yaml = constants.CSI_RBD_SNAPSHOT_YAML
            self.fs_type = "rbd"
            self.sc = constants.DEFAULT_VOLUMESNAPSHOTCLASS_RBD
        with open(self.snap_yaml, "r") as stream:
            try:
                self.snap_templ = yaml.safe_load(stream)
                self.snap_templ["spec"]["volumeSnapshotClassName"] = self.sc
                self.snap_templ["spec"]["source"][
                    "persistentVolumeClaimName"] = self.pvc_obj.name
            except yaml.YAMLError as exc:
                log.error(f"Can not read template yaml file {exc}")
        log.debug(
            f"Snapshot yaml file : {self.snap_yaml} "
            f"Content of snapshot yaml file {json.dumps(self.snap_templ, indent=4)}"
        )

        self.get_log_names()
        self.build_fio_command()

        self.run()