Пример #1
0
    def test_scale_mcg_rgw_obc_creation(self, tmp_path, timeout=60):
        """
        OBC creation for both MCG and RGW storage class
        This test case only runs on vSphere cluster deployment
        """

        log.info(
            f"Start creating  {self.scale_obc_count} OBC in a batch of {self.num_obc_batch}"
        )
        for i in range(int(self.scale_obc_count / self.num_obc_batch)):
            obc_dict_list1 = (
                scale_noobaa_lib.construct_obc_creation_yaml_bulk_for_kube_job(
                    no_of_obc=int(self.num_obc_batch / 2),
                    sc_name=self.sc_name,
                    namespace=self.namespace,
                ))
            obc_dict_list2 = (
                scale_noobaa_lib.construct_obc_creation_yaml_bulk_for_kube_job(
                    no_of_obc=int(self.num_obc_batch / 2),
                    sc_name=self.sc_rgw_name,
                    namespace=self.namespace,
                ))
            # Create job profile
            job_file1 = ObjectConfFile(
                name="job_profile1",
                obj_dict_list=obc_dict_list1,
                project=self.namespace,
                tmp_path=tmp_path,
            )
            job_file2 = ObjectConfFile(
                name="job_profile2",
                obj_dict_list=obc_dict_list2,
                project=self.namespace,
                tmp_path=tmp_path,
            )
            # Create kube_job
            job_file1.create(namespace=self.namespace)
            time.sleep(timeout * 3)
            job_file2.create(namespace=self.namespace)
            time.sleep(timeout * 3)

            # Check all the PVC reached Bound state
            obc_mcg_bound_list = (
                scale_noobaa_lib.check_all_obc_reached_bound_state_in_kube_job(
                    kube_job_obj=job_file1,
                    namespace=self.namespace,
                    no_of_obc=int(self.num_obc_batch / 2),
                ))
            obc_rgw_bound_list = (
                scale_noobaa_lib.check_all_obc_reached_bound_state_in_kube_job(
                    kube_job_obj=job_file2,
                    namespace=self.namespace,
                    no_of_obc=int(self.num_obc_batch / 2),
                ))
            log.info(
                f"Number of OBCs in Bound state MCG: {len(obc_mcg_bound_list)},"
                f" RGW: {len(obc_rgw_bound_list)}")

        # Delete obc on cluster
        scale_noobaa_lib.cleanup(self.namespace)
Пример #2
0
    def test_scale_mcg_obc_creation(self, tmp_path, timeout=60):
        """
        MCG OBC creation using Noobaa storage class
        """

        log.info(f"Start creating  {self.scale_obc_count} "
                 f"OBC in a batch of {self.num_obc_batch}")
        for i in range(int(self.scale_obc_count / self.num_obc_batch)):
            obc_dict_list = (
                scale_noobaa_lib.construct_obc_creation_yaml_bulk_for_kube_job(
                    no_of_obc=self.num_obc_batch,
                    sc_name=self.sc_name,
                    namespace=self.namespace,
                ))
            # Create job profile
            job_file = ObjectConfFile(
                name="job_profile",
                obj_dict_list=obc_dict_list,
                project=self.namespace,
                tmp_path=tmp_path,
            )
            # Create kube_job
            job_file.create(namespace=self.namespace)
            time.sleep(timeout * 5)

            # Check all the PVC reached Bound state
            obc_bound_list = (
                scale_noobaa_lib.check_all_obc_reached_bound_state_in_kube_job(
                    kube_job_obj=job_file,
                    namespace=self.namespace,
                    no_of_obc=self.num_obc_batch,
                ))
            log.info(f"Number of PVCs in Bound state {len(obc_bound_list)}")
        # Delete obc on cluster
        scale_noobaa_lib.cleanup(self.namespace)
    def test_scale_obc_creation_noobaa_pod_respin(self, tmp_path, pod_name,
                                                  sc_name, mcg_job_factory):
        """
        OBC creation using RGW storage class
        This test case only runs on vSphere cluster deployment
        """

        # Create OBCs with FIO running using mcg_job_factory()
        for i in range(self.scale_obc_count_io):
            exec(f"job{i} = mcg_job_factory()")

        log.info(f"Start creating  {self.scale_obc_count} "
                 f"OBC in a batch of {self.num_obc_batch}")
        for i in range(int(self.scale_obc_count / self.num_obc_batch)):
            obc_dict_list = (
                scale_noobaa_lib.construct_obc_creation_yaml_bulk_for_kube_job(
                    no_of_obc=self.num_obc_batch,
                    sc_name=sc_name,
                    namespace=self.namespace,
                ))
            # Create job profile
            job_file = ObjectConfFile(
                name="job_profile",
                obj_dict_list=obc_dict_list,
                project=self.namespace,
                tmp_path=tmp_path,
            )
            # Create kube_job
            job_file.create(namespace=self.namespace)

            # Check all the OBCs reached Bound state
            obc_bound_list = (
                scale_noobaa_lib.check_all_obc_reached_bound_state_in_kube_job(
                    kube_job_obj=job_file,
                    namespace=self.namespace,
                    no_of_obc=self.num_obc_batch,
                ))
            log.info(f"Number of OBCs in Bound state: {len(obc_bound_list)}")

        # Reset node which noobaa pods is running on
        # And validate noobaa pods are re-spinned and in running state
        scale_noobaa_lib.noobaa_running_node_restart(pod_name=pod_name)

        # Verify all OBCs are in Bound state after node restart
        log.info("Verify all OBCs are in Bound state after node restart.....")
        obc_status_list = scale_noobaa_lib.check_all_obcs_status(
            namespace=self.namespace)
        log.info(f"Number of OBCs in Bound state after node reset: "
                 f"{len(obc_status_list[0])}")
        assert (len(obc_status_list[0]) == self.scale_obc_count
                ), "Not all OBCs in Bound state"
Пример #4
0
def test_scale_obc_pre_upgrade(tmp_path, timeout=60):
    """
    Create scaled MCG OBC using Noobaa storage class before upgrade
    Save scaled obc data in a file for post upgrade validation
    """
    obc_scaled_list = []
    log.info(f"Start creating  {scale_obc_count} " f"OBC in a batch of {num_obc_batch}")
    for i in range(int(scale_obc_count / num_obc_batch)):
        obc_dict_list = scale_noobaa_lib.construct_obc_creation_yaml_bulk_for_kube_job(
            no_of_obc=num_obc_batch,
            sc_name=sc_name,
            namespace=namespace,
        )
        # Create job profile
        job_file = ObjectConfFile(
            name="job_profile",
            obj_dict_list=obc_dict_list,
            project=namespace,
            tmp_path=tmp_path,
        )
        # Create kube_job
        job_file.create(namespace=namespace)
        time.sleep(timeout * 5)

        # Check all the OBCs reached Bound state
        obc_bound_list = scale_noobaa_lib.check_all_obc_reached_bound_state_in_kube_job(
            kube_job_obj=job_file,
            namespace=namespace,
            no_of_obc=num_obc_batch,
        )
        obc_scaled_list.extend(obc_bound_list)

    log.info(
        f"Number of OBCs in scaled list: {len(obc_scaled_list)}",
    )

    # Write namespace, OBC data in a OBC_SCALE_DATA_FILE which
    # will be used during post_upgrade validation tests
    with open(obc_scaled_data_file, "a+") as w_obj:
        w_obj.write(str("# Scale Data File\n"))
        w_obj.write(str(f"NAMESPACE: {namespace}\n"))
        w_obj.write(str(f"OBC_SCALE_LIST: {obc_scaled_list}\n"))
    def test_scale_obc_create_delete_time(self, tmp_path):
        """
        MCG OBC creation and deletion using Noobaa MCG storage class

        """

        log.info(f"Start creating  {self.scale_obc_count} "
                 f"OBCs in a batch of {self.num_obc_batch}")
        obc_create = dict()
        obc_delete = dict()
        for i in range(int(self.scale_obc_count / self.num_obc_batch)):
            obc_dict_list = (
                scale_noobaa_lib.construct_obc_creation_yaml_bulk_for_kube_job(
                    no_of_obc=self.num_obc_batch,
                    sc_name=constants.NOOBAA_SC,
                    namespace=self.namespace,
                ))
            # Create job profile
            job_file = ObjectConfFile(
                name="job_profile",
                obj_dict_list=obc_dict_list,
                project=self.namespace,
                tmp_path=tmp_path,
            )
            # Create kube_job
            job_file.create(namespace=self.namespace)

            # Check all the OBCs to reach Bound state
            obc_bound_list = (
                scale_noobaa_lib.check_all_obc_reached_bound_state_in_kube_job(
                    kube_job_obj=job_file,
                    namespace=self.namespace,
                    no_of_obc=self.num_obc_batch,
                ))
            log.info(f"Number of OBCs in Bound state {len(obc_bound_list)}")

            # Measure obc creation and deletion time
            obc_creation_time = scale_noobaa_lib.measure_obc_creation_time(
                obc_name_list=obc_bound_list)
            obc_create.update(obc_creation_time)

        # Delete all obcs in a batch
        obc_name_list = list(oc_get_all_obc_names())
        new_list = [
            obc_name_list[i:i + 20]
            for i in range(0, len(obc_name_list), self.num_obc_batch)
        ]

        for i in range(len(new_list)):
            scale_noobaa_lib.cleanup(self.namespace, obc_count=new_list[i])
            obc_deletion_time = scale_noobaa_lib.measure_obc_deletion_time(
                obc_name_list=new_list[i])
            obc_delete.update(obc_deletion_time)

        # Store obc creation time on csv file
        log_path = f"{ocsci_log_path()}/obc-creation"
        with open(f"{log_path}-{constants.NOOBAA_SC}.csv", "w") as fd:
            csv_obj = csv.writer(fd)
            for k, v in obc_create.items():
                csv_obj.writerow([k, v])
        log.info(
            f"OBC creation data present in {log_path}-{constants.NOOBAA_SC}.csv"
        )

        # Store obc deletion time on csv file
        log_path = f"{ocsci_log_path()}/obc-deletion"
        with open(f"{log_path}-{constants.NOOBAA_SC}.csv", "w") as fd:
            csv_obj = csv.writer(fd)
            for k, v in obc_create.items():
                csv_obj.writerow([k, v])
        log.info(
            f"OBC deletion data present in {log_path}-{constants.NOOBAA_SC}.csv"
        )