Beispiel #1
0
def check_nodes_specs(min_memory, min_cpu):
    """
    Check that the cluster worker nodes meet the required minimum CPU and memory

    Args:
        min_memory (int): The required minimum memory in bytes
        min_cpu (int): The required minimum number of vCPUs

    Returns:
        bool: True if all nodes meet the required minimum specs, False otherwise

    """
    nodes = get_nodes()
    log.info(f"Checking following nodes with worker selector (assuming that "
             f"this is ran in CI and there are no worker nodes without OCS):\n"
             f"{[node.get().get('metadata').get('name') for node in nodes]}")
    for node in nodes:
        real_cpu = int(node.get()["status"]["capacity"]["cpu"])
        real_memory = convert_device_size(
            node.get()["status"]["capacity"]["memory"], "B")
        if real_cpu < min_cpu or real_memory < min_memory:
            log.warning(
                f"Node {node.get().get('metadata').get('name')} specs don't meet "
                f" the minimum required specs.\n The requirements are: "
                f"{min_cpu} CPUs and {min_memory} Memory\nThe node has: {real_cpu} "
                f"CPUs and {real_memory} Memory")
            return False
    log.info(f"Cluster worker nodes meet the minimum requirements of "
             f"{min_cpu} CPUs and {min_memory} Memory")
    return True
Beispiel #2
0
    def size(self):
        """
        Returns the PVC size pvc_name in namespace

        Returns:
            int: PVC size
        """
        unformatted_size = self.data.get('spec').get('resources').get(
            'requests').get('storage')
        return convert_device_size(unformatted_size, 'GB')
Beispiel #3
0
    def size(self):
        """
        Returns the PVC size pvc_name in namespace

        Returns:
            int: PVC size
        """
        unformatted_size = (self.data.get("spec").get("resources").get(
            "requests").get("storage"))
        return convert_device_size(unformatted_size, "GB")
Beispiel #4
0
def get_pv_size(pv_obj):
    """
    Get the size of a pv object

    Args:
        pv_obj (dict): A dictionary that represent the pv object

    Returns:
        int: The size of the pv object

    """
    storage_size = pv_obj.get("spec").get("capacity").get("storage")
    return convert_device_size(storage_size, "GB")
Beispiel #5
0
def get_osd_pods_memory_sum():
    """
    Get the sum of memory of all OSD pods. This is used to determine the size
    needed for a PVC so when IO will be running over it the OSDs cache will be filled

    Returns:
        int: The sum of the OSD pods memory in GB

    """
    osd_pods = pod.get_osd_pods()
    num_of_osd_pods = len(osd_pods)
    osd_pod_mem_size_str = osd_pods[0].get_memory().get('osd')
    osd_pod_mem_size = convert_device_size(
        unformatted_size=osd_pod_mem_size_str, units_to_covert_to='GB')
    return num_of_osd_pods * osd_pod_mem_size
 def get_mon_db_size_in_kb(self):
     """
     Get mon db size and returns the size in KB
     The output of 'du -sh' command contains the size of the directory and its path as string
     e.g. "67M\t/var/lib/ceph/mon/ceph-c/store.db"
     The size is extracted by splitting the string with '\t'.
     The size format for example: 1K, 234M, 2G
     For uniformity, this test uses KB
     """
     size = self.selected_mon_pod_obj.exec_cmd_on_pod(
         f"du -sh /var/lib/ceph/mon/ceph-{self.selected_mon_pod}/store.db",
         out_yaml_format=False,
     )
     size = re.split("\t+", size)
     assert len(size) > 0, f"Failed to get mon-{self.selected_mon_pod} db size"
     size = size[0]
     return convert_device_size(size + "i", "KB")
    def test_clone_create_delete_performance(self, secret_factory,
                                             interface_type, pvc_size,
                                             file_size):
        """
        Write data (60% of PVC capacity) to the PVC created in setup
        Create clones for an existing pvc,
        Measure clones average creation time and speed
        Delete the created clone
        Measure clone average deletion time and speed
        Note: by increasing max_num_of_clones value you increase number of the clones to be created/deleted
        """

        # Initialize some variabels
        self.interface = interface_type
        self.timeout = 18000
        self.pvc_size = pvc_size
        self.results_path = get_full_test_logs_path(cname=self)
        file_size_mb = convert_device_size(file_size, "MB")
        # Initialize the results doc file.
        full_results = self.init_full_results(
            ClonesResultsAnalyse(
                self.uuid,
                self.crd_data,
                self.full_log_path,
                "pvc_clone_performance",
            ))

        test_start_time = self.get_time()

        # Create new pool and sc only for RBD, for CepgFS use thr default
        if self.interface == constants.CEPHBLOCKPOOL:
            # Creating new pool to run the test on it
            self.create_new_pool_and_sc(secret_factory)
        else:
            self.sc_obj = ocs.OCS(
                kind="StorageCluster",
                metadata={
                    "namespace": self.namespace,
                    "name": Interfaces_info[self.interface]["sc"],
                },
            )
            self.pool_name = "ocs-storagecluster-cephfilesystem"
        # Create a PVC
        self.create_pvc_and_wait_for_bound()
        # Create a POD
        self.create_pod_and_wait_for_completion(filesize=f"{file_size_mb}M")
        # taking the time, so parsing the provision log will be faster.
        start_time = self.get_time("csi")
        self.clones_list = self.create_and_delete_clones()

        # Mesure Creation / Deletion time of all clones
        results_times = performance_lib.get_pvc_provision_times(
            interface=self.interface,
            pvc_name=self.clones_list,
            start_time=start_time,
        )

        test_end_time = self.get_time()

        logger.info(
            f"Printing clone creation time and speed for {self.number_of_clones} clones "
            f"on {self.interface} PVC of size {self.pvc_size} GB:")
        # Produce ES report
        speed = True if self.interface == constants.CEPHFILESYSTEM else False
        full_results.analyse_results(results_times,
                                     total_data=file_size_mb,
                                     speed=speed)
        # Add the test time to the ES report
        full_results.add_key("test_time", {
            "start": test_start_time,
            "end": test_end_time
        })
        full_results.add_key("total_clone_number", self.number_of_clones)
        # Write the test results into the ES server
        if full_results.es_write():
            res_link = full_results.results_link()
            logger.info(f"The Result can be found at : {res_link}")

            # Create text file with results of all subtest (8 - according to the parameters)
            self.write_result_to_file(res_link)
Beispiel #8
0
    def test_clone_create_delete_performance(self, interface_type, pvc_size,
                                             file_size, teardown_factory):
        """
        Write data (60% of PVC capacity) to the PVC created in setup
        Create single clone for an existing pvc,
        Measure clone creation time and speed
        Delete the created clone
        Measure clone deletion time and speed
        Note: by increasing max_num_of_clones value you increase number of the clones to be created/deleted
        """

        file_size_for_io = file_size[:-1]

        performance_lib.write_fio_on_pod(self.pod_obj, file_size_for_io)

        max_num_of_clones = 1
        clone_creation_measures = []
        clones_list = []
        timeout = 18000
        sc_name = self.pvc_obj.backed_sc
        parent_pvc = self.pvc_obj.name
        clone_yaml = constants.CSI_RBD_PVC_CLONE_YAML
        namespace = self.pvc_obj.namespace
        if interface_type == constants.CEPHFILESYSTEM:
            clone_yaml = constants.CSI_CEPHFS_PVC_CLONE_YAML
        file_size_mb = convert_device_size(file_size, "MB")

        # creating single clone ( or many one by one if max_mum_of_clones > 1)
        logger.info(
            f"Start creating {max_num_of_clones} clones on {interface_type} PVC of size {pvc_size} GB."
        )

        for i in range(max_num_of_clones):
            logger.info(f"Start creation of clone number {i + 1}.")
            cloned_pvc_obj = pvc.create_pvc_clone(sc_name,
                                                  parent_pvc,
                                                  clone_yaml,
                                                  namespace,
                                                  storage_size=pvc_size + "Gi")
            teardown_factory(cloned_pvc_obj)
            helpers.wait_for_resource_state(cloned_pvc_obj,
                                            constants.STATUS_BOUND, timeout)

            cloned_pvc_obj.reload()
            logger.info(
                f"Clone with name {cloned_pvc_obj.name} for {pvc_size} pvc {parent_pvc} was created."
            )
            clones_list.append(cloned_pvc_obj)
            create_time = helpers.measure_pvc_creation_time(
                interface_type, cloned_pvc_obj.name)
            creation_speed = int(file_size_mb / create_time)
            logger.info(
                f"Clone number {i+1} creation time is {create_time} secs for {pvc_size} GB pvc."
            )
            logger.info(
                f"Clone number {i+1} creation speed is {creation_speed} MB/sec for {pvc_size} GB pvc."
            )
            creation_measures = {
                "clone_num": i + 1,
                "time": create_time,
                "speed": creation_speed,
            }
            clone_creation_measures.append(creation_measures)

        # deleting one by one and measuring deletion times and speed for each one of the clones create above
        # in case of single clone will run one time
        clone_deletion_measures = []

        logger.info(
            f"Start deleting {max_num_of_clones} clones on {interface_type} PVC of size {pvc_size} GB."
        )

        for i in range(max_num_of_clones):
            cloned_pvc_obj = clones_list[i]
            pvc_reclaim_policy = cloned_pvc_obj.reclaim_policy
            cloned_pvc_obj.delete()
            logger.info(
                f"Deletion of clone number {i + 1} , the clone name is {cloned_pvc_obj.name}."
            )
            cloned_pvc_obj.ocp.wait_for_delete(cloned_pvc_obj.name, timeout)
            if pvc_reclaim_policy == constants.RECLAIM_POLICY_DELETE:
                helpers.validate_pv_delete(cloned_pvc_obj.backed_pv)
            delete_time = helpers.measure_pvc_deletion_time(
                interface_type, cloned_pvc_obj.backed_pv)
            logger.info(
                f"Clone number {i + 1} deletion time is {delete_time} secs for {pvc_size} GB pvc."
            )

            deletion_speed = int(file_size_mb / delete_time)
            logger.info(
                f"Clone number {i+1} deletion speed is {deletion_speed} MB/sec for {pvc_size} GB pvc."
            )
            deletion_measures = {
                "clone_num": i + 1,
                "time": delete_time,
                "speed": deletion_speed,
            }
            clone_deletion_measures.append(deletion_measures)

        logger.info(
            f"Printing clone creation time and speed for {max_num_of_clones} clones "
            f"on {interface_type} PVC of size {pvc_size} GB:")

        for c in clone_creation_measures:
            logger.info(
                f"Clone number {c['clone_num']} creation time is {c['time']} secs for {pvc_size} GB pvc ."
            )
            logger.info(
                f"Clone number {c['clone_num']} creation speed is {c['speed']} MB/sec for {pvc_size} GB pvc."
            )

        logger.info(
            f"Clone deletion time and speed for {interface_type} PVC of size {pvc_size} GB are:"
        )
        for d in clone_deletion_measures:
            logger.info(
                f"Clone number {d['clone_num']} deletion time is {d['time']} secs for {pvc_size} GB pvc."
            )
            logger.info(
                f"Clone number {d['clone_num']} deletion speed is {d['speed']} MB/sec for {pvc_size} GB pvc."
            )

        logger.info("test_clones_creation_performance finished successfully.")
Beispiel #9
0
    def raw_block_pv(self):
        """
        Testing basic creation of app pod with RBD RWX raw block pv support
        """
        worker_nodes = node.get_worker_nodes()
        pvcs = list()
        size_mb = "500Mi"
        size_gb = "10Gi"
        if config.ENV_DATA["platform"].lower(
        ) in constants.MANAGED_SERVICE_PLATFORMS:
            size_tb = str(convert_device_size("50Gi", "TB")) + "Ti"
        else:
            size_tb = "1Ti"
        for size in [size_mb, size_gb, size_tb]:
            pvcs.append(
                helpers.create_pvc(
                    sc_name=self.sc_obj.name,
                    size=size,
                    access_mode=constants.ACCESS_MODE_RWX,
                    namespace=self.namespace,
                    volume_mode="Block",
                ))
        pvc_mb, pvc_gb, pvc_tb = pvcs[0], pvcs[1], pvcs[2]

        for pvc in pvcs:
            helpers.wait_for_resource_state(resource=pvc,
                                            state=constants.STATUS_BOUND,
                                            timeout=120)

        pvs = [pvc.backed_pv_obj for pvc in pvcs]

        pods = list()
        pod_dict = constants.CSI_RBD_RAW_BLOCK_POD_YAML
        for pvc in pvc_mb, pvc_gb, pvc_tb:
            for _ in range(3):
                pods.append(
                    helpers.create_pod(
                        interface_type=constants.CEPHBLOCKPOOL,
                        pvc_name=pvc.name,
                        namespace=self.namespace,
                        raw_block_pv=True,
                        pod_dict_path=pod_dict,
                        node_name=random.choice(worker_nodes),
                    ))

        pvc_mb_pods, pvc_gb_pods, pvc_tb_pods = pods[0:3], pods[3:6], pods[6:9]
        for pod in pods:
            helpers.wait_for_resource_state(resource=pod,
                                            state=constants.STATUS_RUNNING,
                                            timeout=120)
        storage_type = "block"

        with ThreadPoolExecutor() as p:
            for pod in pvc_mb_pods:
                log.info(f"running io on pod {pod.name}")
                p.submit(
                    pod.run_io,
                    storage_type=storage_type,
                    size=f"{random.randint(10,200)}M",
                    invalidate=0,
                )
            for pod in pvc_gb_pods:
                log.info(f"running io on pod {pod.name}")
                p.submit(
                    pod.run_io,
                    storage_type=storage_type,
                    size=f"{random.randint(1,5)}G",
                    invalidate=0,
                )
            for pod in pvc_tb_pods:
                log.info(f"running io on pod {pod.name}")
                p.submit(
                    pod.run_io,
                    storage_type=storage_type,
                    size=f"{random.randint(10,15)}G",
                    invalidate=0,
                )

        for pod in pods:
            get_fio_rw_iops(pod)
        return pods, pvcs, pvs
    def test_clone_create_delete_performance(self, interface_type, pvc_size,
                                             file_size, teardown_factory):
        """
        Write data (60% of PVC capacity) to the PVC created in setup
        Create clones for an existing pvc,
        Measure clones average creation time and speed
        Delete the created clone
        Measure clone average deletion time and speed
        Note: by increasing max_num_of_clones value you increase number of the clones to be created/deleted
        """

        file_size_for_io = file_size[:-1]

        performance_lib.write_fio_on_pod(self.pod_object, file_size_for_io)

        max_num_of_clones = 10
        clone_creation_measures = []
        csi_clone_creation_measures = []
        clones_list = []
        timeout = 18000
        sc_name = self.pvc_obj.backed_sc
        parent_pvc = self.pvc_obj.name
        clone_yaml = constants.CSI_RBD_PVC_CLONE_YAML
        namespace = self.pvc_obj.namespace
        if interface_type == constants.CEPHFILESYSTEM:
            clone_yaml = constants.CSI_CEPHFS_PVC_CLONE_YAML
        file_size_mb = convert_device_size(file_size, "MB")

        logger.info(
            f"Start creating {max_num_of_clones} clones on {interface_type} PVC of size {pvc_size} GB."
        )

        # taking the time, so parsing the provision log will be faster.
        start_time = datetime.datetime.utcnow().strftime("%Y-%m-%dT%H:%M:%SZ")

        for i in range(max_num_of_clones):
            logger.info(f"Start creation of clone number {i + 1}.")
            cloned_pvc_obj = pvc.create_pvc_clone(sc_name,
                                                  parent_pvc,
                                                  clone_yaml,
                                                  namespace,
                                                  storage_size=pvc_size + "Gi")
            teardown_factory(cloned_pvc_obj)
            helpers.wait_for_resource_state(cloned_pvc_obj,
                                            constants.STATUS_BOUND, timeout)

            cloned_pvc_obj.reload()
            logger.info(
                f"Clone with name {cloned_pvc_obj.name} for {pvc_size} pvc {parent_pvc} was created."
            )
            clones_list.append(cloned_pvc_obj)
            create_time = helpers.measure_pvc_creation_time(
                interface_type, cloned_pvc_obj.name)
            creation_speed = int(file_size_mb / create_time)
            logger.info(
                f"Clone number {i+1} creation time is {create_time} secs for {pvc_size} GB pvc."
            )
            logger.info(
                f"Clone number {i+1} creation speed is {creation_speed} MB/sec for {pvc_size} GB pvc."
            )
            creation_measures = {
                "clone_num": i + 1,
                "time": create_time,
                "speed": creation_speed,
            }
            clone_creation_measures.append(creation_measures)
            csi_clone_creation_measures.append(
                performance_lib.csi_pvc_time_measure(self.interface,
                                                     cloned_pvc_obj, "create",
                                                     start_time))

        # deleting one by one and measuring deletion times and speed for each one of the clones create above
        # in case of single clone will run one time
        clone_deletion_measures = []
        csi_clone_deletion_measures = []

        logger.info(
            f"Start deleting {max_num_of_clones} clones on {interface_type} PVC of size {pvc_size} GB."
        )

        for i in range(max_num_of_clones):
            cloned_pvc_obj = clones_list[i]
            pvc_reclaim_policy = cloned_pvc_obj.reclaim_policy
            cloned_pvc_obj.delete()
            logger.info(
                f"Deletion of clone number {i + 1} , the clone name is {cloned_pvc_obj.name}."
            )
            cloned_pvc_obj.ocp.wait_for_delete(cloned_pvc_obj.name, timeout)
            if pvc_reclaim_policy == constants.RECLAIM_POLICY_DELETE:
                helpers.validate_pv_delete(cloned_pvc_obj.backed_pv)
            delete_time = helpers.measure_pvc_deletion_time(
                interface_type, cloned_pvc_obj.backed_pv)
            logger.info(
                f"Clone number {i + 1} deletion time is {delete_time} secs for {pvc_size} GB pvc."
            )

            deletion_speed = int(file_size_mb / delete_time)
            logger.info(
                f"Clone number {i+1} deletion speed is {deletion_speed} MB/sec for {pvc_size} GB pvc."
            )
            deletion_measures = {
                "clone_num": i + 1,
                "time": delete_time,
                "speed": deletion_speed,
            }
            clone_deletion_measures.append(deletion_measures)
            csi_clone_deletion_measures.append(
                performance_lib.csi_pvc_time_measure(self.interface,
                                                     cloned_pvc_obj, "delete",
                                                     start_time))

        logger.info(
            f"Printing clone creation time and speed for {max_num_of_clones} clones "
            f"on {interface_type} PVC of size {pvc_size} GB:")
        for c in clone_creation_measures:
            logger.info(
                f"Clone number {c['clone_num']} creation time is {c['time']} secs for {pvc_size} GB pvc ."
            )
            logger.info(
                f"Clone number {c['clone_num']} creation speed is {c['speed']} MB/sec for {pvc_size} GB pvc."
            )
        logger.info(
            f"Clone deletion time and speed for {interface_type} PVC of size {pvc_size} GB are:"
        )
        creation_time_list = [r["time"] for r in clone_creation_measures]
        creation_speed_list = [r["speed"] for r in clone_creation_measures]
        average_creation_time = statistics.mean(creation_time_list)
        average_csi_creation_time = statistics.mean(
            csi_clone_creation_measures)
        average_creation_speed = statistics.mean(creation_speed_list)
        logger.info(f"Average creation time is  {average_creation_time} secs.")
        logger.info(
            f"Average creation speed is  {average_creation_speed} Mb/sec.")

        for d in clone_deletion_measures:
            logger.info(
                f"Clone number {d['clone_num']} deletion time is {d['time']} secs for {pvc_size} GB pvc."
            )
            logger.info(
                f"Clone number {d['clone_num']} deletion speed is {d['speed']} MB/sec for {pvc_size} GB pvc."
            )

        deletion_time_list = [r["time"] for r in clone_deletion_measures]
        deletion_speed_list = [r["speed"] for r in clone_deletion_measures]
        average_deletion_time = statistics.mean(deletion_time_list)
        average_csi_deletion_time = statistics.mean(
            csi_clone_deletion_measures)
        average_deletion_speed = statistics.mean(deletion_speed_list)
        logger.info(f"Average deletion time is  {average_deletion_time} secs.")
        logger.info(
            f"Average deletion speed is  {average_deletion_speed} Mb/sec.")
        logger.info("test_clones_creation_performance finished successfully.")

        self.results_path = get_full_test_logs_path(cname=self)
        # Produce ES report
        # Collecting environment information
        self.get_env_info()

        self.full_log_path = get_full_test_logs_path(cname=self)
        self.results_path = get_full_test_logs_path(cname=self)
        self.full_log_path += f"-{self.interface}-{pvc_size}-{file_size}"
        logger.info(f"Logs file path name is : {self.full_log_path}")

        # Initialize the results doc file.
        full_results = self.init_full_results(
            ResultsAnalyse(
                self.uuid,
                self.crd_data,
                self.full_log_path,
                "pvc_clone_performance",
            ))

        full_results.add_key("interface", self.interface)
        full_results.add_key("total_clone_number", max_num_of_clones)
        full_results.add_key("pvc_size", self.pvc_size)
        full_results.add_key("average_clone_creation_time",
                             average_creation_time)
        full_results.add_key("average_csi_clone_creation_time",
                             average_csi_creation_time)
        full_results.add_key("average_clone_deletion_time",
                             average_deletion_time)
        full_results.add_key("average_csi_clone_deletion_time",
                             average_csi_deletion_time)
        full_results.add_key("average_clone_creation_speed",
                             average_creation_speed)
        full_results.add_key("average_clone_deletion_speed",
                             average_deletion_speed)

        full_results.all_results = {
            "clone_creation_time": creation_time_list,
            "csi_clone_creation_time": csi_clone_creation_measures,
            "clone_deletion_time": deletion_time_list,
            "csi_clone_deletion_time": csi_clone_deletion_measures,
            "clone_creation_speed": creation_speed_list,
            "clone_deletion_speed": deletion_speed_list,
        }

        # Write the test results into the ES server
        if full_results.es_write():
            res_link = full_results.results_link()
            logger.info(f"The Result can be found at : {res_link}")

            # Create text file with results of all subtest (8 - according to the parameters)
            self.write_result_to_file(res_link)