Beispiel #1
0
    def __init__(self):
        """
        Cluster object initializer, this object needs to be initialized
        after cluster deployment. However its harmless to do anywhere.
        """
        # cluster_name is name of cluster in rook of type CephCluster

        self.POD = ocp.OCP(kind='Pod',
                           namespace=config.ENV_DATA['cluster_namespace'])
        self.CEPHCLUSTER = ocp.OCP(
            kind='CephCluster', namespace=config.ENV_DATA['cluster_namespace'])
        self.CEPHFS = ocp.OCP(kind='CephFilesystem',
                              namespace=config.ENV_DATA['cluster_namespace'])
        self.DEP = ocp.OCP(kind='Deployment',
                           namespace=config.ENV_DATA['cluster_namespace'])

        self.cluster_resource_config = self.CEPHCLUSTER.get().get('items')[0]
        try:
            self.cephfs_config = self.CEPHFS.get().get('items')[0]
        except IndexError as e:
            logging.warning(e)
            logging.warning("No CephFS found")
            self.cephfs_config = None

        self._cluster_name = (
            self.cluster_resource_config.get('metadata').get('name'))
        self._namespace = (
            self.cluster_resource_config.get('metadata').get('namespace'))

        # We are not invoking ocs.create() here
        # assuming cluster creation is done somewhere after deployment
        # So just load ocs with existing cluster details
        self.cluster = ocs.OCS(**self.cluster_resource_config)
        if self.cephfs_config:
            self.cephfs = ocs.OCS(**self.cephfs_config)
        else:
            self.cephfs = None

        self.mon_selector = constant.MON_APP_LABEL
        self.mds_selector = constant.MDS_APP_LABEL
        self.tool_selector = constant.TOOL_APP_LABEL
        self.mgr_selector = constant.MGR_APP_LABEL
        self.osd_selector = constant.OSD_APP_LABEL
        self.mons = []
        self._ceph_pods = []
        self.mdss = []
        self.mgrs = []
        self.osds = []
        self.toolbox = None
        self.mds_count = 0
        self.mon_count = 0
        self.mgr_count = 0
        self.osd_count = 0

        self.scan_cluster()
        logging.info(f"Number of mons = {self.mon_count}")
        logging.info(f"Number of mds = {self.mds_count}")

        self.used_space = 0
Beispiel #2
0
    def scan_cluster(self):
        """
        Get accurate info on current state of pods
        """
        self._ceph_pods = pod.get_all_pods(self._namespace)
        # TODO: Workaround for BZ1748325:
        mons = pod.get_mon_pods(self.mon_selector, self.namespace)
        for mon in mons:
            if mon.ocp.get_resource_status(
                    mon.name) == constant.STATUS_RUNNING:
                self.mons.append(mon)
        # TODO: End of workaround for BZ1748325
        self.mdss = pod.get_mds_pods(self.mds_selector, self.namespace)
        self.mgrs = pod.get_mgr_pods(self.mgr_selector, self.namespace)
        self.osds = pod.get_osd_pods(self.osd_selector, self.namespace)
        self.toolbox = pod.get_ceph_tools_pod()

        # set port attrib on mon pods
        self.mons = list(map(self.set_port, self.mons))
        self.cluster.reload()
        if self.cephfs:
            self.cephfs.reload()
        else:
            try:
                self.cephfs_config = self.CEPHFS.get().get('items')[0]
                self.cephfs = ocs.OCS(**self.cephfs_config)
                self.cephfs.reload()
            except IndexError as e:
                logging.warning(e)
                logging.warning("No CephFS found")

        self.mon_count = len(self.mons)
        self.mds_count = len(self.mdss)
        self.mgr_count = len(self.mgrs)
        self.osd_count = len(self.osds)
Beispiel #3
0
    def create_snapshotclass(self, interface):
        """
        Creates own volumesnapshotclass

        Args:
            interface (str): Interface type used

        Returns:
            ocs_obj (obj): Snapshotclass obj instances

        """
        if interface == constants.CEPHFILESYSTEM:
            snapshotclass_data = templating.load_yaml(
                constants.CSI_CEPHFS_SNAPSHOTCLASS_YAML
            )
            snapclass_name = "cephfssnapshotclass"
        else:
            snapshotclass_data = templating.load_yaml(
                constants.CSI_RBD_SNAPSHOTCLASS_YAML
            )
            snapclass_name = "rbdsnapshotclass"
        snapshotclass_data["metadata"]["name"] = snapclass_name
        ocs_obj = ocs.OCS(**snapshotclass_data)
        created_snapclass = ocs_obj.create(do_reload=True)
        assert created_snapclass, f"Failed to create snapshot class {snapclass_name}"
        return ocs_obj
Beispiel #4
0
    def create_snapshotclass(self, interface):
        """
        Creates own VolumeSnapshotClass

        Args:
            interface (str): Interface type used

        Returns:
            ocs_obj (obj): SnapshotClass obj instances

        """
        if interface == constants.CEPHFILESYSTEM:
            snapclass_name = "pas-test-cephfs-snapshot-class"
        else:
            snapclass_name = "pas-test-rbd-snapshot-class"

        yaml_files = {
            constants.CEPHBLOCKPOOL: constants.CSI_RBD_SNAPSHOTCLASS_YAML,
            constants.CEPHFILESYSTEM: constants.CSI_CEPHFS_SNAPSHOTCLASS_YAML,
        }
        snapshotclass_data = templating.load_yaml(yaml_files[interface])

        snapshotclass_data["metadata"]["name"] = snapclass_name
        ocs_obj = ocs.OCS(**snapshotclass_data)
        log.info(f"Creating new snapshot class : {snapclass_name}")
        try:
            created_snapclass = ocs_obj.create(do_reload=True)
            log.debug(created_snapclass)
        except Exception as ex:
            err_msg = f"Failed to create new snapshot class : {snapclass_name} [{ex}]"
            log.error(err_msg)
            raise Exception(err_msg)
        return ocs_obj
Beispiel #5
0
    def scan_cluster(self):
        """
        Get accurate info on current state of pods
        """
        self._ceph_pods = pod.get_all_pods(self._namespace)
        self.mons = pod.get_mon_pods(self.mon_selector, self.namespace)
        self.mdss = pod.get_mds_pods(self.mds_selector, self.namespace)
        self.mgrs = pod.get_mgr_pods(self.mgr_selector, self.namespace)
        self.osds = pod.get_osd_pods(self.osd_selector, self.namespace)
        self.toolbox = pod.get_ceph_tools_pod()

        # set port attrib on mon pods
        self.mons = list(map(self.set_port, self.mons))
        self.cluster.reload()
        if self.cephfs:
            self.cephfs.reload()
        else:
            try:
                self.cephfs_config = self.CEPHFS.get().get('items')[0]
                self.cephfs = ocs.OCS(**self.cephfs_config)
                self.cephfs.reload()
            except IndexError as e:
                logging.warning(e)
                logging.warning("No CephFS found")

        self.mon_count = len(self.mons)
        self.mds_count = len(self.mdss)
        self.mgr_count = len(self.mgrs)
        self.osd_count = len(self.osds)
Beispiel #6
0
    def __init__(self):
        self.POD = ocp.OCP(kind="Pod", namespace=config.ENV_DATA["cluster_namespace"])
        self.CEPHCLUSTER = ocp.OCP(
            kind="CephCluster", namespace=config.ENV_DATA["cluster_namespace"]
        )

        self.wait_for_cluster_cr()
        self._cluster_name = self.cluster_resource.get("metadata").get("name")
        self._namespace = self.cluster_resource.get("metadata").get("namespace")
        self.cluster = ocs.OCS(**self.cluster_resource)
        self.wait_for_nooba_cr()
Beispiel #7
0
    def __init__(self):
        self.POD = ocp.OCP(kind='Pod',
                           namespace=config.ENV_DATA['cluster_namespace'])
        self.CEPHCLUSTER = ocp.OCP(
            kind='CephCluster', namespace=config.ENV_DATA['cluster_namespace'])

        self.wait_for_cluster_cr()
        self._cluster_name = (
            self.cluster_resource.get('metadata').get('name'))
        self._namespace = (
            self.cluster_resource.get('metadata').get('namespace'))
        self.cluster = ocs.OCS(**self.cluster_resource)
        self.wait_for_nooba_cr()
    def test_pvc_clone_performance_multiple_files(
        self,
        secret_factory,
        interface,
        copies,
        timeout,
    ):
        """
        Test assign nodeName to a pod using RWX pvc
        Each kernel (unzipped) is 892M and 61694 files
        The test creates a pvc and a pods, writes kernel files multiplied by number of copies
        The test creates number of clones samples, calculates creation and deletion times for each one the clones
        and calculates the average creation and average deletion times
        """

        # Initialize some variabels
        self.interface = interface
        self.timeout = timeout
        self.pvc_size = "100"
        if self.dev_mode:
            self.pvc_size = "10"
            copies = 1
        self.results_path = get_full_test_logs_path(cname=self)
        # Initialize the results doc file.
        full_results = self.init_full_results(
            ClonesResultsAnalyse(
                self.uuid,
                self.crd_data,
                self.full_log_path,
                "test_pvc_clone_performance_multiple_files",
            ))
        files_written = ""
        data_written = ""

        test_start_time = self.get_time()

        # Create new pool and sc only for RBD, for CepgFS use thr default
        if self.interface == constants.CEPHBLOCKPOOL:
            # Creating new pool to run the test on it
            self.create_new_pool_and_sc(secret_factory)
        else:
            self.sc_obj = ocs.OCS(
                kind="StorageCluster",
                metadata={
                    "namespace": self.namespace,
                    "name": Interfaces_info[self.interface]["sc"],
                },
            )
            self.pool_name = "ocs-storagecluster-cephfilesystem"
        # Create a PVC
        self.create_pvc_and_wait_for_bound()
        # Create a POD
        self.create_pod_and_wait_for_completion(
            command=["/opt/multiple_files.sh"],
            command_args=[f"{copies}", "/mnt"],
        )

        # Get the number of files and total written data from the pod
        for line in self.pod_object.ocp.get_logs(
                name=self.pod_object.name).split("\n"):
            if "Number Of Files" in line:
                files_written = line.split(" ")[-1]
            if "Total Data" in line:
                data_written = line.split(" ")[-1]
        logger.info("Getting the amount of data written to the PVC")
        logger.info(f"The amount of written data is {data_written}")
        logger.info(
            f"For {self.interface} - The number of files written to the pod is {int(files_written):,}"
        )

        # increasing the timeout since clone creation time is longer than pod attach time
        self.timeout = 18000

        # taking the time, so parsing the provision log will be faster.
        start_time = self.get_time("csi")
        clones_list = self.create_and_delete_clones()

        # Mesure Creation / Deletion time of all clones
        results_times = performance_lib.get_pvc_provision_times(
            interface=self.interface,
            pvc_name=clones_list,
            start_time=start_time,
        )

        test_end_time = self.get_time()

        logger.info(
            f"Printing clone creation and deletion times for {self.number_of_clones} clones "
            f"on {self.interface} PVC of size {self.pvc_size} GB:")
        # Produce ES report
        full_results.analyse_results(results_times, speed=False)
        # Add the test time to the ES report
        full_results.add_key("test_time", {
            "start": test_start_time,
            "end": test_end_time
        })
        full_results.add_key("clones_number", self.number_of_clones)
        full_results.add_key("files_number", files_written)
        full_results.all_results = results_times
        # Write the test results into the ES server
        if full_results.es_write():
            res_link = full_results.results_link()
            logger.info(f"The Result can be found at : {res_link}")

            # Create text file with results of all subtest (2 - according to the parameters)
            self.results_path = get_full_test_logs_path(
                cname=self, fname="test_pvc_clone_performance_multiple_files")
            self.write_result_to_file(res_link)
    def test_clone_create_delete_performance(self, secret_factory,
                                             interface_type, pvc_size,
                                             file_size):
        """
        Write data (60% of PVC capacity) to the PVC created in setup
        Create clones for an existing pvc,
        Measure clones average creation time and speed
        Delete the created clone
        Measure clone average deletion time and speed
        Note: by increasing max_num_of_clones value you increase number of the clones to be created/deleted
        """

        # Initialize some variabels
        self.interface = interface_type
        self.timeout = 18000
        self.pvc_size = pvc_size
        self.results_path = get_full_test_logs_path(cname=self)
        file_size_mb = convert_device_size(file_size, "MB")
        # Initialize the results doc file.
        full_results = self.init_full_results(
            ClonesResultsAnalyse(
                self.uuid,
                self.crd_data,
                self.full_log_path,
                "pvc_clone_performance",
            ))

        test_start_time = self.get_time()

        # Create new pool and sc only for RBD, for CepgFS use thr default
        if self.interface == constants.CEPHBLOCKPOOL:
            # Creating new pool to run the test on it
            self.create_new_pool_and_sc(secret_factory)
        else:
            self.sc_obj = ocs.OCS(
                kind="StorageCluster",
                metadata={
                    "namespace": self.namespace,
                    "name": Interfaces_info[self.interface]["sc"],
                },
            )
            self.pool_name = "ocs-storagecluster-cephfilesystem"
        # Create a PVC
        self.create_pvc_and_wait_for_bound()
        # Create a POD
        self.create_pod_and_wait_for_completion(filesize=f"{file_size_mb}M")
        # taking the time, so parsing the provision log will be faster.
        start_time = self.get_time("csi")
        self.clones_list = self.create_and_delete_clones()

        # Mesure Creation / Deletion time of all clones
        results_times = performance_lib.get_pvc_provision_times(
            interface=self.interface,
            pvc_name=self.clones_list,
            start_time=start_time,
        )

        test_end_time = self.get_time()

        logger.info(
            f"Printing clone creation time and speed for {self.number_of_clones} clones "
            f"on {self.interface} PVC of size {self.pvc_size} GB:")
        # Produce ES report
        speed = True if self.interface == constants.CEPHFILESYSTEM else False
        full_results.analyse_results(results_times,
                                     total_data=file_size_mb,
                                     speed=speed)
        # Add the test time to the ES report
        full_results.add_key("test_time", {
            "start": test_start_time,
            "end": test_end_time
        })
        full_results.add_key("total_clone_number", self.number_of_clones)
        # Write the test results into the ES server
        if full_results.es_write():
            res_link = full_results.results_link()
            logger.info(f"The Result can be found at : {res_link}")

            # Create text file with results of all subtest (8 - according to the parameters)
            self.write_result_to_file(res_link)
Beispiel #10
0
    def test_pvc_multiple_clone_performance(
        self,
        interface_iterate,
        secret_factory,
    ):
        """
        1. Creating PVC
           PVC size is calculated in the test and depends on the storage capacity, but not less then 1 GiB
           it will use ~75% capacity of the Storage, Min storage capacity 1 TiB
        2. Fill the PVC with 70% of data
        3. Take a clone of the PVC and measure Total time and speed of creation of each clone
            by reading start creation and end creation times from relevant logs
        4. Measure CSI time for creation of each clone
        5. Repeat the previous steps number of times (maximal num_of_clones is 512)
        6. Print and push to the ES all the measured statistics for all the clones.

        Raises:
            StorageNotSufficientException: in case of not enough capacity on the cluster

        """

        log.info(
            f"Total capacity size is : {self.ceph_capacity} GiB, "
            f"Going to use {self.need_capacity} GiB, "
            f"With {self.num_of_clones} clones to {self.pvc_size} GiB PVC. "
            f"File size to be written is : {self.file_size} ")

        self.interface = interface_iterate

        # Create new pool and sc only for RBD, for CepgFS use the++ default
        if self.interface == constants.CEPHBLOCKPOOL:
            # Creating new pool to run the test on it
            self.create_new_pool_and_sc(secret_factory)
        else:
            # use the default ceph filesystem pool
            self.sc_obj = ocs.OCS(
                kind="StorageCluster",
                metadata={
                    "namespace": self.namespace,
                    "name": Interfaces_info[self.interface]["sc"],
                },
            )

        # Create a PVC
        self.create_testing_pvc_and_wait_for_bound()

        # Create a POD
        self.create_testing_pod_and_wait_for_completion(
            filesize=self.file_size)

        # Running the test
        creation_time_list, creation_speed_list, csi_creation_time_list = ([],
                                                                           [],
                                                                           [])
        self.cloned_obj_list = []
        for test_num in range(1, self.num_of_clones + 1):
            log.info(f"Starting test number {test_num}")
            try:
                cloned_obj, ct, csi_ct = self.create_clone(test_num)
            except Exception as e:
                log.error(f"Failed to create clone number {test_num} : [{e}]")
                break
            self.cloned_obj_list.append(cloned_obj)
            speed = self.filesize / ct
            creation_time_list.append(ct)
            creation_speed_list.append(speed)
            csi_creation_time_list.append(csi_ct)

        # Analyse the results and log the results
        for i, val in enumerate(self.cloned_obj_list):
            log.info(f"The Results for clone number {i+1} ({val}) :")
            log.info(
                f"  Creation time is     : {creation_time_list[i]:,.3f} secs.")
            log.info(
                f"  Csi Creation time is : {csi_creation_time_list[i]:,.3f} secs."
            )
            log.info(
                f"  Creation speed is    : {creation_speed_list[i]:,.3f} MB/sec."
            )

        average_creation_time = statistics.mean(creation_time_list)
        average_creation_speed = statistics.mean(creation_speed_list)
        average_csi_creation_time = statistics.mean(csi_creation_time_list)

        log.info("The Average results are :")
        log.info(
            f"  Average creation time is     : {average_creation_time:,.3f} secs."
        )
        log.info(
            f"  Average csi creation time is : {average_csi_creation_time:,.3f} secs."
        )
        log.info(
            f"  Average creation speed is    : {average_creation_speed:,.3f} MB/sec."
        )

        if len(self.cloned_obj_list) != self.num_of_clones:
            log.error("Not all clones created.")
            raise exceptions.BenchmarkTestFailed("Not all clones created.")

        self.results_path = get_full_test_logs_path(cname=self)
        # Produce ES report
        # Collecting environment information
        self.get_env_info()

        # Initialize the results doc file.
        full_results = self.init_full_results(
            ResultsAnalyse(
                self.uuid,
                self.crd_data,
                self.full_log_path,
                "pvc_multiple_clone_measurement",
            ))

        full_results.add_key("multi_clone_creation_time", creation_time_list)
        full_results.add_key("multi_clone_creation_time_average",
                             average_creation_time)
        full_results.add_key("multi_clone_creation_speed", creation_speed_list)
        full_results.add_key("multi_clone_creation_speed_average",
                             average_creation_speed)
        full_results.add_key("multi_clone_csi_creation_time",
                             csi_creation_time_list)
        full_results.add_key("multi_clone_csi_creation_time_average",
                             average_csi_creation_time)

        # Write the test results into the ES server
        if full_results.es_write():
            res_link = full_results.results_link()
            log.info(f"The Result can be found at : {res_link}")

            # Create text file with results of all subtest (4 - according to the parameters)
            self.write_result_to_file(res_link)