コード例 #1
0
    def external_post_deploy_validation(self):
        """
        This function validates successful deployment of OCS
        in external mode, some of the steps overlaps with
        converged mode

        """
        cephcluster = CephClusterExternal()
        cephcluster.cluster_health_check(timeout=300)
コード例 #2
0
ファイル: sanity_helpers.py プロジェクト: romayalon/ocs-ci
 def __init__(self):
     """
     Initializer for Sanity class - Init CephCluster() in order to
     set the cluster status before starting the tests
     """
     self.pvc_objs = list()
     self.pod_objs = list()
     self.ceph_cluster = CephClusterExternal()
コード例 #3
0
    def setup(
        self,
        request,
        scenario,
        num_of_nodes,
        num_of_fail_nodes,
        disrupt_provisioner,
        project_factory,
        multi_pvc_factory,
        dc_pod_factory,
    ):
        """
        Identify the nodes and start DeploymentConfig based app pods using
        PVC with ReadWriteOnce (RWO) access mode on selected nodes

        Args:
            scenario (str): Scenario of app pods running on OCS or dedicated nodes
                (eg., 'colocated', 'dedicated')
            num_of_nodes (int): number of nodes required for running test
            num_of_fail_nodes (int): number of nodes to make unresponsive during test
            disrupt_provisioner (bool): True to disrupt the leader provisioner
                pods if not running on selected nodes, else False
            project_factory: A fixture to create new project
            multi_pvc_factory: A fixture create a set of new PVCs
            dc_pod_factory: A fixture to create deploymentconfig pods

        Returns:
            tuple: containing the params used in test cases

        """
        ocs_nodes, non_ocs_nodes = self.identify_and_add_nodes(
            scenario, num_of_nodes)
        test_nodes = ocs_nodes if (scenario == "colocated") else non_ocs_nodes
        logger.info(f"Using nodes {test_nodes} for running test")

        def finalizer():
            helpers.remove_label_from_worker_node(node_list=test_nodes,
                                                  label_key="nodetype")

            # Check ceph health
            ceph_health_check(tries=40)

        request.addfinalizer(finalizer)

        project = project_factory()

        if helpers.storagecluster_independent_check():
            ceph_cluster = CephClusterExternal()
        else:
            ceph_cluster = CephCluster()
            # Wait for mon pods to reach expected count
            # Bug 1778273 - [RFE]: Configure 5 MONs for OCS cluster with 5 or more nodes
            # This wait is required for some of the previous OCS versions (< 4.5)
            current_mon_count = int(
                ceph_cluster.CEPHCLUSTER.get_resource(resource_name="",
                                                      column="MONCOUNT"))
            assert ceph_cluster.POD.wait_for_resource(
                condition=constants.STATUS_RUNNING,
                selector=constants.MON_APP_LABEL,
                resource_count=current_mon_count,
                timeout=900,
            )
            ceph_cluster.mons = []
            ceph_cluster.scan_cluster()

        # Select nodes for running app pods and inducing network failure later
        app_pod_nodes = self.select_nodes_for_app_pods(scenario, ceph_cluster,
                                                       ocs_nodes,
                                                       non_ocs_nodes,
                                                       num_of_fail_nodes)

        # Create multiple RBD and CephFS backed PVCs with RWO accessmode
        num_of_pvcs = self.num_of_app_pods_per_node * num_of_fail_nodes
        rbd_pvcs = multi_pvc_factory(
            interface=constants.CEPHBLOCKPOOL,
            project=project,
            size=self.pvc_size,
            access_modes=[constants.ACCESS_MODE_RWO],
            num_of_pvc=num_of_pvcs,
        )
        cephfs_pvcs = multi_pvc_factory(
            interface=constants.CEPHFILESYSTEM,
            project=project,
            size=self.pvc_size,
            access_modes=[constants.ACCESS_MODE_RWO],
            num_of_pvc=num_of_pvcs,
        )

        # Create deploymentconfig based pods
        dc_pods = []
        # Start app-pods on selected node(s)
        for node_name in app_pod_nodes:
            logger.info(f"Starting app pods on the node {node_name}")
            helpers.label_worker_node(node_list=[node_name],
                                      label_key="nodetype",
                                      label_value="app-pod")

            for num in range(self.num_of_app_pods_per_node):
                dc_pods.append(
                    dc_pod_factory(
                        interface=constants.CEPHBLOCKPOOL,
                        pvc=rbd_pvcs.pop(0),
                        node_selector={"nodetype": "app-pod"},
                    ))
                assert pod.verify_node_name(
                    dc_pods[-1], node_name
                ), f"Pod {dc_pods[-1].name} is not running on labeled node {node_name}"
                dc_pods.append(
                    dc_pod_factory(
                        interface=constants.CEPHFILESYSTEM,
                        pvc=cephfs_pvcs.pop(0),
                        node_selector={"nodetype": "app-pod"},
                    ))
                assert pod.verify_node_name(
                    dc_pods[-1], node_name
                ), f"Pod {dc_pods[-1].name} is not running on labeled node {node_name}"
            helpers.remove_label_from_worker_node(node_list=[node_name],
                                                  label_key="nodetype")

        # Label other test nodes to be able to run app pods later
        helpers.label_worker_node(node_list=test_nodes,
                                  label_key="nodetype",
                                  label_value="app-pod")

        # Get ceph mon,osd pods running on selected node if colocated scenario
        # and extra OCS nodes are present
        # Recovery steps for MON and OSDS not required from OCS 4.4 onwards
        # Refer to BZ 1830015 and BZ 1835908
        ceph_pods = []
        if float(config.ENV_DATA["ocs_version"]) < 4.4 and (
                scenario == "colocated" and len(test_nodes) > 3):
            pods_to_check = ceph_cluster.osds
            # Skip mon pods if mon_count is 5 as there may not be enough nodes
            # for all mons to run after multiple node failures
            if ceph_cluster.mon_count == 3:
                pods_to_check.extend(ceph_cluster.mons)
            for pod_obj in pods_to_check:
                if pod.get_pod_node(pod_obj).name in app_pod_nodes[0]:
                    ceph_pods.append(pod_obj)
            logger.info(
                f"Colocated Mon, OSD pods: {[pod_obj.name for pod_obj in ceph_pods]}"
            )

        disruptor = []
        if disrupt_provisioner:
            disruptor = self.disrupt_plugin_provisioner_pods(app_pod_nodes)

        return ceph_cluster, dc_pods, ceph_pods, app_pod_nodes, test_nodes, disruptor