def test_gluster_block_provisioning_with_valid_ha_count(self, hacount):
        """Validate gluster-block provisioning with different valid 'hacount'
           values
        """
        # TODO(vamahaja): Add check for CRS version
        if not self.is_containerized_gluster():
            self.skipTest("Skipping this test case as CRS version check "
                          "is not implemented")

        if hacount > 1 and get_openshift_storage_version() <= "3.9":
            self.skipTest("Skipping this test case as multipath validation "
                          "is not supported in OCS 3.9")

        # create storage class and pvc with given parameters
        self.create_sc_with_parameter('glusterblock',
                                      success=True,
                                      parameter={'hacount': str(hacount)})

        # validate HA parameter with gluster block volume
        self.validate_gluster_block_volume_info(self.assertEqual, 'HA',
                                                hacount)

        # TODO: need more info on hacount=1 for multipath validation hence
        #       skipping multipath validation
        if hacount > 1:
            self.validate_multipath_info(hacount)
    def test_gluster_block_provisioning_with_invalid_ha_count(self):
        """Validate gluster-block provisioning with any invalid 'hacount'
           value
        """
        # TODO(vamahaja): Add check for CRS version
        if not self.is_containerized_gluster():
            self.skipTest("Skipping this test case as CRS version check "
                          "is not implemented")

        if get_openshift_storage_version() <= "3.9":
            self.skipTest("Skipping this test case as multipath validation "
                          "is not supported in OCS 3.9")

        # get hacount as no of gluster pods + 1 to fail the pvc creation
        gluster_pod_count = get_amount_of_gluster_nodes(
            self.ocp_master_node[0])
        hacount = gluster_pod_count + 1

        # create storage class and pvc with given parameters
        self.create_sc_with_parameter('glusterblock',
                                      success=True,
                                      parameter={'hacount': str(hacount)})

        # validate HA parameter with gluster block volume
        self.validate_gluster_block_volume_info(self.assertEqual, 'HA',
                                                gluster_pod_count)
        self.validate_multipath_info(gluster_pod_count)
Exemple #3
0
    def test_target_side_failures_brick_failure_on_block_hosting_volume(self):
        """Target side failures - Brick failure on block hosting volume"""
        skip_msg = ("Skipping this test case due to bugs "
                    "BZ-1634745, BZ-1635736, BZ-1636477, BZ-1641668")

        # TODO(vamahaja): Add check for CRS version
        if not self.is_containerized_gluster():
            self.skipTest(skip_msg + " and not impleted CRS version check")

        if get_openshift_storage_version() < "3.11.2":
            self.skipTest(skip_msg)

        self.deploy_and_verify_resouces()

        # get block hosting volume from pvc name
        block_hosting_vol = self.get_block_hosting_volume_by_pvc_name(
            self.pvc_name)

        # restarts 2 brick processes of block hosting volume
        g_nodes = get_gluster_vol_hosting_nodes(block_hosting_vol)
        self.assertGreater(len(g_nodes), 2)
        restart_gluster_vol_brick_processes(self.oc_node, block_hosting_vol,
                                            g_nodes[:2])

        # checks if all glusterfs services are in running state
        for g_node in g_nodes:
            for service in (SERVICE_BLOCKD, SERVICE_TCMU, SERVICE_TARGET):
                state = "exited" if service == SERVICE_TARGET else "running"
                self.assertTrue(
                    wait_for_service_status_on_gluster_pod_or_node(
                        self.oc_node, service, 'active', state, g_node))

        # validates pvc, pv, heketi block and gluster block count after
        # service restarts
        self.validate_volumes_and_blocks()
Exemple #4
0
    def test_start_stop_block_volume_service(self):
        """Validate block hosting volume by start/stop operation

           Perform stop/start operation on block hosting volume when
           IO's and provisioning are going on
        """
        skip_msg = ("Skipping this test case due to bugs "
                    "BZ-1634745, BZ-1635736, BZ-1636477, BZ-1641668")

        # TODO(vamahaja): Add check for CRS version
        if not self.is_containerized_gluster():
            self.skipTest(skip_msg + " and not impleted CRS version check")

        if get_openshift_storage_version() < "3.11.2":
            self.skipTest(skip_msg)

        self.deploy_and_verify_resouces()

        # get block hosting volume from pvc name
        block_hosting_vol = self.get_block_hosting_volume_by_pvc_name(
            self.pvc_name)

        # restarts one of the block hosting volume and checks heal
        self.restart_block_hosting_volume_wait_for_heal(block_hosting_vol)

        # validates pvc, pv, heketi block and gluster block count after
        # service restarts
        self.validate_volumes_and_blocks()
    def test_validate_heketi_node_add_with_db_check(self):
        """Test heketi db check after node add operation"""
        if not self.is_containerized_gluster():
            self.skipTest("Skipping this test case as CRS version check "
                          "is not implemented")

        if (openshift_storage_version.get_openshift_storage_version() <
                "3.11.4"):
            self.skipTest(
                "This test case is not supported for < OCS 3.11.4 builds due "
                "to bug BZ-1732831")

        # Get the total number of nodes in heketi db
        intial_db_info = heketi_ops.heketi_db_check(self.heketi_client_node,
                                                    self.heketi_server_url,
                                                    json=True)
        initial_node_count = intial_db_info['nodes']['total']

        # Add node to valid cluster id
        self.heketi_node_add_with_valid_cluster()

        # Verify the addition of node in heketi db
        final_db_info = heketi_ops.heketi_db_check(self.heketi_client_node,
                                                   self.heketi_server_url,
                                                   json=True)
        final_node_count = final_db_info['nodes']['total']
        msg = ("Initial node count {} and final node count {} in heketi db is"
               " not as expected".format(initial_node_count, final_node_count))
        self.assertEqual(initial_node_count + 1, final_node_count, msg)
    def test_pv_resize_no_free_space(self):
        """Validate PVC resize fails if there is no free space available"""
        if get_openshift_storage_version() < "3.11.5":
            self.skipTest(
                "This test case is not supported for < OCS 3.11.5 builds due "
                "to bug BZ-1653567")

        self._pv_resize(exceed_free_space=True)
    def test_pv_resize_no_free_space(self):
        """Validate PVC resize fails if there is no free space available"""
        if not self.is_containerized_gluster():
            self.skipTest("Skipping this test case as CRS version check "
                          "is not implemented")

        if get_openshift_storage_version() < "3.11.5":
            self.skipTest(
                "This test case is not supported for < OCS 3.11.5 builds due "
                "to bug BZ-1653567")

        self._pv_resize(exceed_free_space=True)
    def test_heketi_node_add_with_valid_cluster(self):
        """Test heketi node add operation with valid cluster id"""
        if not self.is_containerized_gluster():
            self.skipTest("Skipping this test case as CRS version check "
                          "is not implemented")

        if (openshift_storage_version.get_openshift_storage_version() <
                "3.11.4"):
            self.skipTest(
                "This test case is not supported for < OCS 3.11.4 builds due "
                "to bug BZ-1732831")

        # Add node to valid cluster id
        self.heketi_node_add_with_valid_cluster()
    def setUp(self):
        super(TestNodeRestart, self).setUp()
        self.oc_node = self.ocp_master_node[0]

        self.gluster_pod_list = [
            pod["pod_name"]
            for pod in get_ocp_gluster_pod_details(self.oc_node)]
        if not self.gluster_pod_list:
            self.skipTest("Standalone Gluster is not supported by this test.")
        self.gluster_pod_name = self.gluster_pod_list[0]

        if get_openshift_storage_version() < "3.11.1":
            self.skipTest("Skipping this test case due to bug BZ-1635736")

        self.sc_name = self.create_storage_class()

        self.pvc_names = self._create_volumes_with_io(3)
Exemple #10
0
    def test_restart_services_provision_volume_and_run_io(self, service):
        """Restart gluster service then validate volumes"""
        skip_msg = ("Skipping this test case due to bugs "
                    "BZ-1634745, BZ-1635736, BZ-1636477, BZ-1641668")

        # TODO(vamahaja): Add check for CRS version
        if not self.is_containerized_gluster():
            self.skipTest(skip_msg + " and not implemented CRS version check")

        if get_openshift_storage_version() < "3.11.2":
            self.skipTest(skip_msg)

        self.deploy_and_verify_resouces()

        block_hosting_vol = self.get_block_hosting_volume_by_pvc_name(
            self.pvc_name)
        g_nodes = get_gluster_vol_hosting_nodes(block_hosting_vol)
        self.assertGreater(len(g_nodes), 2)

        # restarts glusterfs service
        restart_service_on_gluster_pod_or_node(self.oc_node, service,
                                               g_nodes[0])

        # wait for deployed user pod to be in Running state after restarting
        # service
        wait_for_pod_be_ready(self.oc_node,
                              self.pod_name,
                              timeout=60,
                              wait_step=5)

        # checks if all glusterfs services are in running state
        for g_node in g_nodes:
            for service in (SERVICE_BLOCKD, SERVICE_TCMU, SERVICE_TARGET):
                state = "exited" if service == SERVICE_TARGET else "running"
                self.assertTrue(
                    wait_for_service_status_on_gluster_pod_or_node(
                        self.oc_node, service, 'active', state, g_node))

        # validates pvc, pv, heketi block and gluster block count after
        # service restarts
        self.validate_volumes_and_blocks()
    def test_heketi_node_add_with_valid_cluster(self):
        """Test heketi node add operation with valid cluster id"""
        if (openshift_storage_version.get_openshift_storage_version()
                < "3.11.4"):
            self.skipTest(
                "This test case is not supported for < OCS 3.11.4 builds due "
                "to bug BZ-1732831")

        h_client, h_server = self.heketi_client_node, self.heketi_server_url
        ocp_node = self.ocp_master_node[0]

        # Get heketi endpoints before adding node
        h_volume_ids = heketi_ops.heketi_volume_list(
            h_client, h_server, json=True)
        h_endpoints_before_new_node = heketi_ops.heketi_volume_endpoint_patch(
            h_client, h_server, h_volume_ids["volumes"][0])

        cluster_info = heketi_ops.heketi_cluster_list(
            h_client, h_server, json=True)
        storage_hostname, storage_ip = self.add_heketi_node_to_cluster(
            cluster_info["clusters"][0])

        # Get heketi nodes and validate for newly added node
        h_node_ids = heketi_ops.heketi_node_list(h_client, h_server, json=True)
        for h_node_id in h_node_ids:
            node_hostname = heketi_ops.heketi_node_info(
                h_client, h_server, h_node_id, json=True)
            if node_hostname["hostnames"]["manage"][0] == storage_hostname:
                break
            node_hostname = None

        err_msg = ("Newly added heketi node %s not found in heketi node "
                   "list %s" % (storage_hostname, h_node_ids))
        self.assertTrue(node_hostname, err_msg)

        # Check gluster peer status for newly added node
        if self.is_containerized_gluster():
            gluster_pods = openshift_ops.get_ocp_gluster_pod_details(ocp_node)
            gluster_pod = [
                gluster_pod["pod_name"]
                for gluster_pod in gluster_pods
                if gluster_pod["pod_hostname"] == storage_hostname][0]

            gluster_peer_status = peer_ops.get_peer_status(
                podcmd.Pod(ocp_node, gluster_pod))
        else:
            gluster_peer_status = peer_ops.get_peer_status(
                storage_hostname)
        self.assertEqual(
            len(gluster_peer_status), len(self.gluster_servers))

        err_msg = "Expected peer status is 1 and actual is %s"
        for peer in gluster_peer_status:
            peer_status = int(peer["connected"])
            self.assertEqual(peer_status, 1, err_msg % peer_status)

        # Get heketi endpoints after adding node
        h_endpoints_after_new_node = heketi_ops.heketi_volume_endpoint_patch(
            h_client, h_server, h_volume_ids["volumes"][0])

        # Get openshift openshift endpoints and patch with heketi endpoints
        heketi_db_endpoint = openshift_ops.oc_get_custom_resource(
            ocp_node, "dc", name=self.heketi_dc_name,
            custom=".:spec.template.spec.volumes[*].glusterfs.endpoints")[0]
        openshift_ops.oc_patch(
            ocp_node, "ep", heketi_db_endpoint, h_endpoints_after_new_node)
        self.addCleanup(
            openshift_ops.oc_patch, ocp_node, "ep", heketi_db_endpoint,
            h_endpoints_before_new_node)
        ep_addresses = openshift_ops.oc_get_custom_resource(
            ocp_node, "ep", name=heketi_db_endpoint,
            custom=".:subsets[*].addresses[*].ip")[0].split(",")

        err_msg = "Hostname %s not present in endpoints %s" % (
            storage_ip, ep_addresses)
        self.assertIn(storage_ip, ep_addresses, err_msg)