示例#1
0
    def setup(self, request, project_factory, pvc_clone_factory,
              create_pvcs_and_pods):
        """
        Create PVCs and pods

        """
        if config.ENV_DATA["platform"].lower(
        ) in constants.MANAGED_SERVICE_PLATFORMS:
            # Get the index of current cluster
            initial_cluster_index = config.cur_index
            # Get the index of provider cluster. provider_index will be used as a flag to decide whether switching to
            # provider cluster index is required
            self.provider_index = config.get_provider_index()
            # Get the index of a consumer cluster
            self.consumer_index = config.get_consumer_indexes_list()[0]

            def finalizer():
                # Switching to provider cluster context will be done during the test case.
                # Switch back to consumer cluster context after the test case.
                config.switch_ctx(initial_cluster_index)

            request.addfinalizer(finalizer)

        self.pvc_size = 3
        self.pvcs, self.pods = create_pvcs_and_pods(pvc_size=self.pvc_size,
                                                    num_of_rbd_pvc=6,
                                                    num_of_cephfs_pvc=4)
示例#2
0
    def test_automated_recovery_from_failed_nodes_reactive_ms(
        self,
        nodes,
        failure,
    ):
        """
        We have 3 test cases to check when running IO in the background:
            A) Automated recovery from stopped worker node
            B) Automated recovery from termination of a worker node
            C) Automated recovery from unschedule and reschedule a worker node.
        """
        self.create_resources()

        config.switch_to_provider()
        log.info("Start executing the node test function on the provider...")
        FAILURE_TYPE_FUNC_CALL_DICT[failure](nodes)

        # Verification steps after the automated recovery.
        assert check_pods_after_node_replacement(
        ), "Not all the pods are running"
        assert (verify_worker_nodes_security_groups()
                ), "Not all the worker nodes security groups set correctly"

        log.info("Checking that the ceph health is ok on the provider")
        ceph_health_check()

        log.info("Checking that the ceph health is ok on the consumers")
        consumer_indexes = config.get_consumer_indexes_list()
        for i in consumer_indexes:
            config.switch_ctx(i)
            ceph_health_check()
    def test_create_scale_pods_and_pvcs_with_ms_consumers(
            self, create_scale_pods_and_pvcs_using_kube_job_on_ms_consumers):
        """
        Test create scale pods and PVCs using a kube job with MS consumers
        """
        self.orig_index = config.cur_index
        self.consumer_i_per_fio_scale = (
            create_scale_pods_and_pvcs_using_kube_job_on_ms_consumers(
                scale_count=self.scale_count,
                pvc_per_pod_count=self.pvc_per_pod_count,
            ))
        assert config.cur_index == self.orig_index, "The current index has changed"

        config.switch_to_provider()
        time_to_wait_for_io_running = 120
        log.info(f"Wait {time_to_wait_for_io_running} seconds for checking "
                 f"that the IO running as expected")
        sleep(time_to_wait_for_io_running)
        ceph_health_check()

        log.info("Checking the Ceph Health on the consumers")
        consumer_indexes = config.get_consumer_indexes_list()
        for i in consumer_indexes:
            config.switch_ctx(i)
            ceph_health_check()

        self.check_scale_pods_and_pvcs_created_on_consumers()
        log.info(
            "The scale pods and PVCs using a kube job with MS consumers created successfully"
        )
示例#4
0
    def create_resources(self):
        """
        Create resources on the consumers and run IO

        """
        if is_ms_consumer_cluster():
            consumer_indexes = [config.cur_index]
        else:
            consumer_indexes = config.get_consumer_indexes_list()

        self.create_pods_and_pvcs_factory(consumer_indexes=consumer_indexes)
    def setup(self, resource_to_delete, create_pvcs_and_pods):
        """
        Create PVCs and pods

        """
        if (
            config.ENV_DATA["platform"].lower() in constants.MANAGED_SERVICE_PLATFORMS
        ) and (resource_to_delete in ["mds", "mon", "mgr", "osd"]):
            # Get the index of current cluster
            self.initial_cluster_index = config.cur_index
            # Get the index of a consumer cluster
            self.consumer_index = config.get_consumer_indexes_list()[0]
            # Get the index of provider cluster. provider_index will act as the flag to decide if switch to provider is
            # required
            self.provider_index = config.get_provider_index()
        self.pvcs, self.pods = create_pvcs_and_pods(pvc_size=10, pods_for_rwx=2)
示例#6
0
    def setup(self, request, create_pvcs_and_pods):
        """
        Prepare pods for the test and add finalizer.

        """
        self.provider_cluster_index = config.get_provider_index()
        self.consumer_indexes = config.get_consumer_indexes_list()
        if config.ENV_DATA["platform"].lower(
        ) in constants.MANAGED_SERVICE_PLATFORMS:
            # Get the index of current cluster
            initial_cluster_index = config.cur_index

            def teardown():
                # ocs-operator pod deletion on consumer cluster will trigger rook-ceph-tools pod respin. Patching of
                # rook-ceph-tools pod is done in the test case after ocs-operator pod respin. But if the automatic
                # respin of rook-ceph-tools pod is delayed by few seconds, the patching step in the test case will not
                # run. So doing patch at the end of the test to ensure that the rook-ceph-tools pod on consumers
                # can run ceph command.
                for consumer_index in self.consumer_indexes:
                    config.switch_ctx(consumer_index)
                    patch_consumer_toolbox()
                # Switching cluster context will be done during the test case.
                # Switch back to current cluster context after the test case.
                config.switch_ctx(initial_cluster_index)

            request.addfinalizer(teardown)

        self.io_pods = list()
        for cluster_index in self.consumer_indexes:
            config.switch_ctx(cluster_index)
            consumer_cluster_kubeconfig = os.path.join(
                config.clusters[cluster_index].ENV_DATA["cluster_path"],
                config.clusters[cluster_index].RUN.get("kubeconfig_location"),
            )
            pvcs, io_pods = create_pvcs_and_pods(
                pvc_size=self.pvc_size,
                replica_count=1,
                pod_dict_path=constants.PERF_POD_YAML,
            )
            for pvc_obj in pvcs:
                pvc_obj.ocp.cluster_kubeconfig = consumer_cluster_kubeconfig
            for io_pod in io_pods:
                io_pod.ocp.cluster_kubeconfig = consumer_cluster_kubeconfig
            pvcs[0].project.cluster_kubeconfig = consumer_cluster_kubeconfig
            self.io_pods.extend(io_pods)
示例#7
0
 def setup(self):
     if config.ENV_DATA["platform"].lower(
     ) in constants.MANAGED_SERVICE_PLATFORMS:
         # Get the index of consumer cluster
         self.consumer_cluster_index = config.get_consumer_indexes_list()[0]