def test_scale_pvcs_pods_pre_upgrade():
    """
    Function to scale PVCs and PODs
    """

    # Scale 1500+ PVCs and PODs in the cluster
    fioscale = FioPodScale(kind=constants.DEPLOYMENTCONFIG,
                           node_selector=constants.SCALE_NODE_SELECTOR)
    kube_pod_obj_list, kube_pvc_obj_list = fioscale.create_scale_pods(
        scale_count=SCALE_COUNT, pvc_per_pod_count=PVCS_PER_POD)

    namespace = fioscale.namespace
    scale_round_up_count = SCALE_COUNT + 20

    # Get PVCs and PODs count and list
    pod_running_list, pvc_bound_list = ([], [])
    for pod_objs in kube_pod_obj_list:
        pod_running_list.extend(
            scale_lib.check_all_pod_reached_running_state_in_kube_job(
                kube_job_obj=pod_objs,
                namespace=namespace,
                no_of_pod=int(scale_round_up_count / 40),
            ))
    for pvc_objs in kube_pvc_obj_list:
        pvc_bound_list.extend(
            scale_lib.check_all_pvc_reached_bound_state_in_kube_job(
                kube_job_obj=pvc_objs,
                namespace=namespace,
                no_of_pvc=int(scale_round_up_count / 4),
            ))

    logging.info(f"Running PODs count {len(pod_running_list)} & "
                 f"Bound PVCs count {len(pvc_bound_list)} "
                 f"in namespace {fioscale.namespace}")

    # Write namespace, PVC and POD data in a SCALE_DATA_FILE which
    # will be used during post_upgrade validation tests
    with open(SCALE_DATA_FILE, "a+") as w_obj:
        w_obj.write(str("# Scale Data File\n"))
        w_obj.write(str(f"NAMESPACE: {namespace}\n"))
        w_obj.write(str(f"POD_SCALE_LIST: {pod_running_list}\n"))
        w_obj.write(str(f"PVC_SCALE_LIST: {pvc_bound_list}\n"))

    # Check ceph health status
    utils.ceph_health_check(tries=30)
    def test_bulk_clone_performance(self, namespace, tmp_path, pod_factory):
        """
        Creates number of PVCs in a bulk using kube job
        Write 60% of PVC capacity to each one of the created PVCs
        Creates 1 clone per each PVC altogether in a bulk
        Measuring time for bulk of clones creation

        """
        pvc_count = 50
        log.info(f"Start creating {self.interface} {pvc_count} PVC")
        if self.interface == constants.CEPHBLOCKPOOL:
            sc_name = constants.DEFAULT_STORAGECLASS_RBD
            clone_yaml = constants.CSI_RBD_PVC_CLONE_YAML
        elif self.interface == constants.CEPHFILESYSTEM:
            sc_name = constants.DEFAULT_STORAGECLASS_CEPHFS
            clone_yaml = constants.CSI_CEPHFS_PVC_CLONE_YAML

        pvc_dict_list = scale_lib.construct_pvc_creation_yaml_bulk_for_kube_job(
            no_of_pvc=pvc_count,
            access_mode=constants.ACCESS_MODE_RWO,
            sc_name=sc_name,
            pvc_size="5Gi",
        )

        job_pvc_file = ObjectConfFile(
            name="job_profile_pvc",
            obj_dict_list=pvc_dict_list,
            project=self.namespace,
            tmp_path=tmp_path,
        )

        # Create kube_job
        job_pvc_file.create(namespace=self.namespace)

        # Check all the PVC reached Bound state
        pvc_bound_list = scale_lib.check_all_pvc_reached_bound_state_in_kube_job(
            kube_job_obj=job_pvc_file,
            namespace=self.namespace,
            no_of_pvc=pvc_count,
        )

        logging.info(f"Number of PVCs in Bound state {len(pvc_bound_list)}")

        total_files_size = self.run_fio_on_pvcs(pvc_dict_list, pod_factory)

        clone_dict_list = scale_lib.construct_pvc_clone_yaml_bulk_for_kube_job(
            pvc_dict_list, clone_yaml, sc_name)

        logging.info("Created clone dict list")

        job_clone_file = ObjectConfFile(
            name="job_profile_clone",
            obj_dict_list=clone_dict_list,
            project=self.namespace,
            tmp_path=tmp_path,
        )

        # Create kube_job that creates clones
        job_clone_file.create(namespace=self.namespace)

        logging.info("Going to check bound status for clones")
        # Check all the clones reached Bound state
        clone_bound_list = scale_lib.check_all_pvc_reached_bound_state_in_kube_job(
            kube_job_obj=job_clone_file,
            namespace=self.namespace,
            no_of_pvc=pvc_count,
            timeout=200,
        )

        logging.info(
            f"Number of clones in Bound state {len(clone_bound_list)}")

        clone_objs = []
        all_pvc_objs = pvc.get_all_pvc_objs(namespace=self.namespace)
        for clone_yaml in clone_dict_list:
            name = clone_yaml["metadata"]["name"]
            size = clone_yaml["spec"]["resources"]["requests"]["storage"]
            logging.info(f"Clone {name} of size {size} created")
            for pvc_obj in all_pvc_objs:
                if pvc_obj.name == name:
                    clone_objs.append(pvc_obj)

        assert len(clone_bound_list) == len(
            clone_objs
        ), "Not all clones reached BOUND state, cannot measure time"
        start_time = helpers.get_provision_time(self.interface,
                                                clone_objs,
                                                status="start")
        end_time = helpers.get_provision_time(self.interface,
                                              clone_objs,
                                              status="end")
        total_time = (end_time - start_time).total_seconds()
        speed = round(total_files_size / total_time, 2)
        logging.info(
            f"Total creation time = {total_time} secs, data size = {total_files_size} MB, speed = {speed} MB/sec "
            f"for {self.interface} clone in bulk of {pvc_count} clones.")
    def test_bulk_clone_performance(self, namespace, tmp_path):
        """
        Creates number of PVCs in a bulk using kube job
        Write 60% of PVC capacity to each one of the created PVCs
        Creates 1 clone per each PVC altogether in a bulk
        Measuring total and csi creation times for bulk of clones

        """
        pvc_count = 50
        vol_size = "5Gi"
        job_pod_file, job_pvc_file, job_clone_file = [None, None, None]
        log.info(f"Start creating {self.interface} {pvc_count} PVC")
        if self.interface == constants.CEPHBLOCKPOOL:
            sc_name = constants.DEFAULT_STORAGECLASS_RBD
            clone_yaml = constants.CSI_RBD_PVC_CLONE_YAML
        elif self.interface == constants.CEPHFILESYSTEM:
            sc_name = constants.DEFAULT_STORAGECLASS_CEPHFS
            clone_yaml = constants.CSI_CEPHFS_PVC_CLONE_YAML

        try:
            pvc_dict_list = scale_lib.construct_pvc_creation_yaml_bulk_for_kube_job(
                no_of_pvc=pvc_count,
                access_mode=constants.ACCESS_MODE_RWO,
                sc_name=sc_name,
                pvc_size=vol_size,
            )

            job_pvc_file = ObjectConfFile(
                name="job_profile_pvc",
                obj_dict_list=pvc_dict_list,
                project=self.namespace,
                tmp_path=tmp_path,
            )

            # Create kube_job
            job_pvc_file.create(namespace=self.namespace)

            # Check all the PVC reached Bound state
            pvc_bound_list = scale_lib.check_all_pvc_reached_bound_state_in_kube_job(
                kube_job_obj=job_pvc_file,
                namespace=self.namespace,
                no_of_pvc=pvc_count,
            )

            log.info(f"Number of PVCs in Bound state {len(pvc_bound_list)}")

            # Kube_job to Create pod
            pod_dict_list = scale_lib.attach_multiple_pvc_to_pod_dict(
                pvc_list=pvc_bound_list,
                namespace=self.namespace,
                pvcs_per_pod=1,
                start_io=False,
                pod_yaml=constants.NGINX_POD_YAML,
            )
            job_pod_file = ObjectConfFile(
                name="job_profile_pod",
                obj_dict_list=pod_dict_list,
                project=self.namespace,
                tmp_path=tmp_path,
            )
            job_pod_file.create(namespace=self.namespace)

            # Check all PODs in Running state
            scale_lib.check_all_pod_reached_running_state_in_kube_job(
                kube_job_obj=job_pod_file,
                namespace=self.namespace,
                no_of_pod=len(pod_dict_list),
                timeout=90,
            )
            log.info(f"Number of PODs in Running state {len(pod_dict_list)}")

            total_files_size = self.run_fio_on_pvcs(vol_size)

            clone_dict_list = scale_lib.construct_pvc_clone_yaml_bulk_for_kube_job(
                pvc_dict_list, clone_yaml, sc_name)

            log.info("Created clone dict list")

            csi_bulk_start_time = self.get_time(time_format="csi")

            job_clone_file = ObjectConfFile(
                name="job_profile_clone",
                obj_dict_list=clone_dict_list,
                project=self.namespace,
                tmp_path=tmp_path,
            )

            # Create kube_job that creates clones
            job_clone_file.create(namespace=self.namespace)

            log.info("Going to check bound status for clones")
            # Check all the clones reached Bound state
            clone_bound_list = scale_lib.check_all_pvc_reached_bound_state_in_kube_job(
                kube_job_obj=job_clone_file,
                namespace=self.namespace,
                no_of_pvc=pvc_count,
                timeout=180,
            )

            log.info(
                f"Number of clones in Bound state {len(clone_bound_list)}")

            clone_objs = []
            all_pvc_objs = pvc.get_all_pvc_objs(namespace=self.namespace)
            for clone_yaml in clone_dict_list:
                name = clone_yaml["metadata"]["name"]
                size = clone_yaml["spec"]["resources"]["requests"]["storage"]
                log.info(f"Clone {name} of size {size} created")
                for pvc_obj in all_pvc_objs:
                    if pvc_obj.name == name:
                        clone_objs.append(pvc_obj)

            assert len(clone_bound_list) == len(
                clone_objs
            ), "Not all clones reached BOUND state, cannot measure time"
            start_time = helpers.get_provision_time(self.interface,
                                                    clone_objs,
                                                    status="start")
            end_time = helpers.get_provision_time(self.interface,
                                                  clone_objs,
                                                  status="end")
            total_time = (end_time - start_time).total_seconds()
            speed = round(total_files_size / total_time, 2)

            csi_creation_time = performance_lib.csi_bulk_pvc_time_measure(
                self.interface, clone_objs, "create", csi_bulk_start_time)

            log.info(
                f"Total creation time = {total_time} secs, csi creation time = {csi_creation_time},"
                f" data size = {total_files_size} MB, speed = {speed} MB/sec "
                f"for {self.interface} clone in bulk of {pvc_count} clones.")

            # Produce ES report
            # Collecting environment information
            self.get_env_info()

            # Initialize the results doc file.
            full_results = self.init_full_results(
                ResultsAnalyse(
                    self.uuid,
                    self.crd_data,
                    self.full_log_path,
                    "bulk_clone_perf_fullres",
                ))

            full_results.add_key("interface", self.interface)
            full_results.add_key("bulk_size", pvc_count)
            full_results.add_key("clone_size", vol_size)
            full_results.add_key("bulk_creation_time", total_time)
            full_results.add_key("bulk_csi_creation_time", csi_creation_time)
            full_results.add_key("data_size(MB)", total_files_size)
            full_results.add_key("speed", speed)
            full_results.add_key("es_results_link",
                                 full_results.results_link())

            # Write the test results into the ES server
            full_results.es_write()
            self.results_path = get_full_test_logs_path(cname=self)
            res_link = full_results.results_link()
            # write the ES link to the test results in the test log.
            log.info(f"The result can be found at : {res_link}")

            # Create text file with results of all subtest (3 - according to the parameters)
            self.write_result_to_file(res_link)

        # Finally is used to clean-up the resources created
        # Irrespective of try block pass/fail finally will be executed.
        finally:
            # Cleanup activities
            log.info(
                "Cleanup of all the resources created during test execution")
            if job_pod_file:
                job_pod_file.delete(namespace=self.namespace)
                job_pod_file.wait_for_delete(resource_name=job_pod_file.name,
                                             namespace=self.namespace)

            if job_clone_file:
                job_clone_file.delete(namespace=self.namespace)
                job_clone_file.wait_for_delete(
                    resource_name=job_clone_file.name,
                    namespace=self.namespace)

            if job_pvc_file:
                job_pvc_file.delete(namespace=self.namespace)
                job_pvc_file.wait_for_delete(resource_name=job_pvc_file.name,
                                             namespace=self.namespace)

            # Check ceph health status
            utils.ceph_health_check(tries=20)
    def test_multiple_pvc_creation_deletion_scale(self, namespace, tmp_path,
                                                  access_mode, interface):
        """
        Measuring PVC creation time while scaling PVC
        Measure PVC deletion time after creation test
        """
        scale_pvc_count = scale_lib.get_max_pvc_count()
        log.info(
            f"Start creating {access_mode}-{interface} {scale_pvc_count} PVC")
        if interface == constants.CEPHBLOCKPOOL:
            sc_name = constants.DEFAULT_STORAGECLASS_RBD
        elif interface == constants.CEPHFS_INTERFACE:
            sc_name = constants.DEFAULT_STORAGECLASS_CEPHFS

        # Get pvc_dict_list, append all the pvc.yaml dict to pvc_dict_list
        pvc_dict_list1 = scale_lib.construct_pvc_creation_yaml_bulk_for_kube_job(
            no_of_pvc=int(scale_pvc_count / 2),
            access_mode=access_mode,
            sc_name=sc_name)
        pvc_dict_list2 = scale_lib.construct_pvc_creation_yaml_bulk_for_kube_job(
            no_of_pvc=int(scale_pvc_count / 2),
            access_mode=access_mode,
            sc_name=sc_name)

        # There is 2 kube_job to reduce the load, observed time_out problems
        # during delete process of single kube_job and heavy load.
        job_file1 = ObjectConfFile(
            name="job_profile_1",
            obj_dict_list=pvc_dict_list1,
            project=self.namespace,
            tmp_path=tmp_path,
        )
        job_file2 = ObjectConfFile(
            name="job_profile_2",
            obj_dict_list=pvc_dict_list2,
            project=self.namespace,
            tmp_path=tmp_path,
        )

        # Create kube_job
        job_file1.create(namespace=self.namespace)
        job_file2.create(namespace=self.namespace)

        # Check all the PVC reached Bound state
        pvc_bound_list = scale_lib.check_all_pvc_reached_bound_state_in_kube_job(
            kube_job_obj=job_file1,
            namespace=self.namespace,
            no_of_pvc=int(scale_pvc_count / 2),
        )
        pvc_bound_list.extend(
            scale_lib.check_all_pvc_reached_bound_state_in_kube_job(
                kube_job_obj=job_file2,
                namespace=self.namespace,
                no_of_pvc=int(scale_pvc_count / 2),
            ))

        log.info(f"Number of PVCs in Bound state {len(pvc_bound_list)}")

        # Get PVC creation time
        pvc_create_time = helpers.measure_pvc_creation_time_bulk(
            interface=interface,
            pvc_name_list=pvc_bound_list,
            wait_time=300,
        )

        # TODO: Update below code with google API, to record value in spreadsheet
        # TODO: For now observing Google API limit to write more than 100 writes
        log_path = f"{ocsci_log_path()}/{interface}-{access_mode}"
        with open(f"{log_path}-creation-time.csv", "w") as fd:
            csv_obj = csv.writer(fd)
            for k, v in pvc_create_time.items():
                csv_obj.writerow([k, v])
        log.info(f"Create data present in {log_path}-creation-time.csv file")

        # Get pv_name, require pv_name to fetch deletion time data from log
        pv_name_list = list()
        get_kube_job_1 = job_file1.get(namespace=self.namespace)
        for i in range(int(scale_pvc_count / 2)):
            pv_name_list.append(
                get_kube_job_1["items"][i]["spec"]["volumeName"])

        get_kube_job_2 = job_file2.get(namespace=self.namespace)
        for i in range(int(scale_pvc_count / 2)):
            pv_name_list.append(
                get_kube_job_2["items"][i]["spec"]["volumeName"])

        # Delete kube_job
        job_file1.delete(namespace=self.namespace)
        job_file2.delete(namespace=self.namespace)

        # Adding 1min wait time for PVC deletion logs to be updated
        # Observed failure when we immediately check the logs for pvc delete time
        # https://github.com/red-hat-storage/ocs-ci/issues/3371
        time.sleep(60)

        # Get PVC deletion time
        pvc_deletion_time = helpers.measure_pv_deletion_time_bulk(
            interface=interface, pv_name_list=pv_name_list)

        # Update result to csv file.
        # TODO: Update below code with google API, to record value in spreadsheet
        # TODO: For now observing Google API limit to write more than 100 writes
        with open(f"{log_path}-deletion-time.csv", "w") as fd:
            csv_obj = csv.writer(fd)
            for k, v in pvc_deletion_time.items():
                csv_obj.writerow([k, v])
        log.info(f"Delete data present in {log_path}-deletion-time.csv file")
        end_time = default_timer()
        log.info(f"Elapsed time -- {end_time - self.start_time} seconds")
    def test_all_4_type_pvc_creation_deletion_scale(self, namespace, tmp_path):
        """
        Measuring PVC creation time while scaling PVC of all 4 types,
        A total of 500 times the number of worker nodes
        will be created, i.e. 375 each pvc type
        Measure PVC deletion time in scale env
        """
        scale_pvc_count = scale_lib.get_max_pvc_count()
        log.info(f"Start creating {scale_pvc_count} PVC of all 4 types")
        cephfs_sc_obj = constants.DEFAULT_STORAGECLASS_CEPHFS
        rbd_sc_obj = constants.DEFAULT_STORAGECLASS_RBD

        # Get pvc_dict_list, append all the pvc.yaml dict to pvc_dict_list
        rbd_pvc_dict_list, cephfs_pvc_dict_list = ([] for i in range(2))
        for mode in [constants.ACCESS_MODE_RWO, constants.ACCESS_MODE_RWX]:
            rbd_pvc_dict_list.extend(
                scale_lib.construct_pvc_creation_yaml_bulk_for_kube_job(
                    no_of_pvc=int(scale_pvc_count / 4),
                    access_mode=mode,
                    sc_name=rbd_sc_obj,
                ))
            cephfs_pvc_dict_list.extend(
                scale_lib.construct_pvc_creation_yaml_bulk_for_kube_job(
                    no_of_pvc=int(scale_pvc_count / 4),
                    access_mode=mode,
                    sc_name=cephfs_sc_obj,
                ))

        # There is 2 kube_job for cephfs and rbd PVCs
        job_file_rbd = ObjectConfFile(
            name="rbd_pvc_job",
            obj_dict_list=rbd_pvc_dict_list,
            project=self.namespace,
            tmp_path=tmp_path,
        )
        job_file_cephfs = ObjectConfFile(
            name="cephfs_pvc_job",
            obj_dict_list=cephfs_pvc_dict_list,
            project=self.namespace,
            tmp_path=tmp_path,
        )

        # Create kube_job
        job_file_rbd.create(namespace=self.namespace)
        job_file_cephfs.create(namespace=self.namespace)

        # Check all the PVC reached Bound state
        rbd_pvc_name = scale_lib.check_all_pvc_reached_bound_state_in_kube_job(
            kube_job_obj=job_file_rbd,
            namespace=self.namespace,
            no_of_pvc=int(scale_pvc_count / 2),
        )
        fs_pvc_name = scale_lib.check_all_pvc_reached_bound_state_in_kube_job(
            kube_job_obj=job_file_cephfs,
            namespace=self.namespace,
            no_of_pvc=int(scale_pvc_count / 2),
        )

        # Get pvc objs from namespace, which is used to identify backend pv
        rbd_pvc_obj, cephfs_pvc_obj = ([] for i in range(2))
        pvc_objs = pvc.get_all_pvc_objs(namespace=self.namespace)
        for pvc_obj in pvc_objs:
            if pvc_obj.backed_sc == constants.DEFAULT_STORAGECLASS_RBD:
                rbd_pvc_obj.append(pvc_obj)
            elif pvc_obj.backed_sc == constants.DEFAULT_STORAGECLASS_CEPHFS:
                cephfs_pvc_obj.append(pvc_obj)

        # Get PVC creation time
        fs_pvc_create_time = helpers.measure_pvc_creation_time_bulk(
            interface=constants.CEPHFS_INTERFACE, pvc_name_list=fs_pvc_name)
        rbd_pvc_create_time = helpers.measure_pvc_creation_time_bulk(
            interface=constants.CEPHBLOCKPOOL, pvc_name_list=rbd_pvc_name)
        fs_pvc_create_time.update(rbd_pvc_create_time)

        # TODO: Update below code with google API, to record value in spreadsheet
        # TODO: For now observing Google API limit to write more than 100 writes
        log_path = f"{ocsci_log_path()}/All-type-PVC"
        with open(f"{log_path}-creation-time.csv", "w") as fd:
            csv_obj = csv.writer(fd)
            for k, v in fs_pvc_create_time.items():
                csv_obj.writerow([k, v])
        log.info(f"Create data present in {log_path}-creation-time.csv file")

        # Get pv_name, require pv_name to fetch deletion time data from log
        rbd_pv_list, fs_pv_list = ([] for i in range(2))
        get_rbd_kube_job = job_file_rbd.get(namespace=self.namespace)
        for i in range(int(scale_pvc_count / 2)):
            rbd_pv_list.append(
                get_rbd_kube_job["items"][i]["spec"]["volumeName"])

        get_fs_kube_job = job_file_cephfs.get(namespace=self.namespace)
        for i in range(int(scale_pvc_count / 2)):
            fs_pv_list.append(
                get_fs_kube_job["items"][i]["spec"]["volumeName"])

        # Delete kube_job
        job_file_rbd.delete(namespace=self.namespace)
        job_file_cephfs.delete(namespace=self.namespace)

        # Adding 1min wait time for PVC deletion logs to be updated
        # Observed failure when we immediately check the logs for pvc delete time
        # https://github.com/red-hat-storage/ocs-ci/issues/3371
        time.sleep(60)

        # Get PV deletion time
        fs_pvc_deletion_time = helpers.measure_pv_deletion_time_bulk(
            interface=constants.CEPHFS_INTERFACE, pv_name_list=fs_pv_list)
        rbd_pvc_deletion_time = helpers.measure_pv_deletion_time_bulk(
            interface=constants.CEPHBLOCKPOOL, pv_name_list=rbd_pv_list)
        fs_pvc_deletion_time.update(rbd_pvc_deletion_time)

        # TODO: Update below code with google API, to record value in spreadsheet
        # TODO: For now observing Google API limit to write more than 100 writes
        with open(f"{log_path}-deletion-time.csv", "w") as fd:
            csv_obj = csv.writer(fd)
            for k, v in fs_pvc_deletion_time.items():
                csv_obj.writerow([k, v])
        log.info(f"Delete data present in {log_path}-deletion-time.csv file")
        end_time = default_timer()
        log.info(f"Elapsed time -- {end_time - self.start_time} seconds")
示例#6
0
    def test_scale_pvcs_pods(self):
        """
        Scale 6000 PVCs and PODs in cluster with 12 worker nodes
        """

        scale_count = 6000
        pvcs_per_pod = 20

        try:
            # Scale
            fioscale = FioPodScale(
                kind=constants.DEPLOYMENTCONFIG,
                node_selector=constants.SCALE_NODE_SELECTOR,
            )
            kube_pod_obj_list, kube_pvc_obj_list = fioscale.create_scale_pods(
                scale_count=scale_count, pvc_per_pod_count=pvcs_per_pod)

            namespace = fioscale.namespace
            scale_round_up_count = scale_count + 80

            # Get PVCs and PODs count and list
            pod_running_list, pvc_bound_list = ([], [])
            for pod_objs in kube_pod_obj_list:
                pod_running_list.extend(
                    scale_lib.check_all_pod_reached_running_state_in_kube_job(
                        kube_job_obj=pod_objs,
                        namespace=namespace,
                        no_of_pod=int(scale_round_up_count / 160),
                    ))
            for pvc_objs in kube_pvc_obj_list:
                pvc_bound_list.extend(
                    scale_lib.check_all_pvc_reached_bound_state_in_kube_job(
                        kube_job_obj=pvc_objs,
                        namespace=namespace,
                        no_of_pvc=int(scale_round_up_count / 16),
                    ))

            logging.info(f"Running PODs count {len(pod_running_list)} & "
                         f"Bound PVCs count {len(pvc_bound_list)} "
                         f"in namespace {fioscale.namespace}")

            # Get kube obj files in the list to update in scale_data_file
            pod_obj_file_list, pvc_obj_file_list = ([], [])
            files = os.listdir(ocsci_log_path())
            for f in files:
                if "pod" in f:
                    pod_obj_file_list.append(f)
                elif "pvc" in f:
                    pvc_obj_file_list.append(f)

            # Write namespace, PVC and POD data in a SCALE_DATA_FILE which
            # will be used during post_upgrade validation tests
            with open(SCALE_DATA_FILE, "a+") as w_obj:
                w_obj.write(str("# Scale Data File\n"))
                w_obj.write(str(f"NAMESPACE: {namespace}\n"))
                w_obj.write(str(f"POD_SCALE_LIST: {pod_running_list}\n"))
                w_obj.write(str(f"PVC_SCALE_LIST: {pvc_bound_list}\n"))
                w_obj.write(str(f"POD_OBJ_FILE_LIST: {pod_obj_file_list}\n"))
                w_obj.write(str(f"PVC_OBJ_FILE_LIST: {pvc_obj_file_list}\n"))

            # Check ceph health status
            utils.ceph_health_check(tries=30)

        except UnexpectedBehaviour:
            TestAddNode.skip_all = True
            logging.info(
                "Cluster is not in expected state, unexpected behaviour")
            raise