def test_targetcli_weak_permissions_config_files(self):
        """Validate permissions on config files"""

        ocp_node = self.ocp_master_node[0]
        gluster_node = self.gluster_servers[0]
        dir_perm_before, dir_perm_after = "drwxrwxrwx.", "drw-------."
        file_perm_before, file_perm_after = "-rwxrwxrwx.", "-rw-------."
        services = ("tcmu-runner", "gluster-block-target", "gluster-blockd")
        cmd = "chmod -R 777 /etc/target/"

        # Check the permissions on '/etc/target' and '/etc/target/backup'
        cmd_run_on_gluster_pod_or_node(ocp_node, cmd, gluster_node)
        for service in services:
            state = ('exited'
                     if service == 'gluster-block-target' else 'running')
            self.addCleanup(wait_for_service_status_on_gluster_pod_or_node,
                            ocp_node, service, 'active', state, gluster_node)
            self.addCleanup(restart_service_on_gluster_pod_or_node, ocp_node,
                            service, gluster_node)

        self._validate_permission(ocp_node, gluster_node, dir_perm_before,
                                  file_perm_before)

        # Restart the services
        for service in services:
            state = ('exited'
                     if service == 'gluster-block-target' else 'running')
            restart_service_on_gluster_pod_or_node(ocp_node, service,
                                                   gluster_node)
            wait_for_service_status_on_gluster_pod_or_node(
                ocp_node, service, 'active', state, gluster_node)

        # Permission on '/etc/target' should be changed to default
        self._validate_permission(ocp_node, gluster_node, dir_perm_after,
                                  file_perm_after)
    def test_restart_services_provision_volume_and_run_io(self, service):
        """Restart gluster service then validate volumes"""
        block_hosting_vol = self.get_block_hosting_volume_by_pvc_name(
            self.pvc_name)
        g_nodes = get_gluster_vol_hosting_nodes(block_hosting_vol)
        self.assertGreater(len(g_nodes), 2)

        # restarts glusterfs service
        restart_service_on_gluster_pod_or_node(
            self.oc_node, service, g_nodes[0])

        # wait for deployed user pod to be in Running state after restarting
        # service
        wait_for_pod_be_ready(
            self.oc_node, self.pod_name, timeout=60, wait_step=5)

        # checks if all glusterfs services are in running state
        for g_node in g_nodes:
            for service in (SERVICE_BLOCKD, SERVICE_TCMU, SERVICE_TARGET):
                status = "exited" if service == SERVICE_TARGET else "running"
                self.assertTrue(wait_for_service_status_on_gluster_pod_or_node(
                    self.oc_node, service, status, g_node))

        # validates pvc, pv, heketi block and gluster block count after
        # service restarts
        self.validate_volumes_and_blocks()
    def test_restart_services_provision_volume_and_run_io(self, service):
        """Restart gluster service then validate volumes"""
        block_hosting_vol = self.get_block_hosting_volume_by_pvc_name(
            self.pvc_name)
        g_nodes = get_gluster_vol_hosting_nodes(block_hosting_vol)
        self.assertGreater(len(g_nodes), 2)

        # restarts glusterfs service
        restart_service_on_gluster_pod_or_node(self.oc_node, service,
                                               g_nodes[0])

        # wait for deployed user pod to be in Running state after restarting
        # service
        wait_for_pod_be_ready(self.oc_node,
                              self.pod_name,
                              timeout=60,
                              wait_step=5)

        # checks if all glusterfs services are in running state
        for g_node in g_nodes:
            for service in (SERVICE_BLOCKD, SERVICE_TCMU, SERVICE_TARGET):
                status = "exited" if service == SERVICE_TARGET else "running"
                self.assertTrue(
                    wait_for_service_status_on_gluster_pod_or_node(
                        self.oc_node, service, status, g_node))

        # validates pvc, pv, heketi block and gluster block count after
        # service restarts
        self.validate_volumes_and_blocks()
예제 #4
0
    def test_restart_services_provision_volume_and_run_io(self, service):
        """Restart gluster service then validate volumes"""
        skip_msg = ("Skipping this test case due to bugs "
                    "BZ-1634745, BZ-1635736, BZ-1636477, BZ-1641668")

        # TODO(vamahaja): Add check for CRS version
        if not self.is_containerized_gluster():
            self.skipTest(skip_msg + " and not implemented CRS version check")

        if get_openshift_storage_version() < "3.11.2":
            self.skipTest(skip_msg)

        self.deploy_and_verify_resouces()

        block_hosting_vol = self.get_block_hosting_volume_by_pvc_name(
            self.pvc_name)
        g_nodes = get_gluster_vol_hosting_nodes(block_hosting_vol)
        self.assertGreater(len(g_nodes), 2)

        # restarts glusterfs service
        restart_service_on_gluster_pod_or_node(self.oc_node, service,
                                               g_nodes[0])

        # wait for deployed user pod to be in Running state after restarting
        # service
        wait_for_pod_be_ready(self.oc_node,
                              self.pod_name,
                              timeout=60,
                              wait_step=5)

        # checks if all glusterfs services are in running state
        for g_node in g_nodes:
            for service in (SERVICE_BLOCKD, SERVICE_TCMU, SERVICE_TARGET):
                state = "exited" if service == SERVICE_TARGET else "running"
                self.assertTrue(
                    wait_for_service_status_on_gluster_pod_or_node(
                        self.oc_node, service, 'active', state, g_node))

        # validates pvc, pv, heketi block and gluster block count after
        # service restarts
        self.validate_volumes_and_blocks()
예제 #5
0
    def test_heketi_manual_cleanup_operation_in_bhv(self):
        """Validate heketi db cleanup will resolve the mismatch
           in the free size of the block hosting volume with failed
           block device create operations.
        """
        bhv_size_before, bhv_size_after, vol_count = [], [], 5
        ocp_node, g_node = self.ocp_master_node[0], self.gluster_servers[0]
        h_node, h_url = self.heketi_client_node, self.heketi_server_url

        # Get existing heketi volume list
        existing_volumes = heketi_volume_list(h_node, h_url, json=True)

        # Add function to clean stale volumes created during test
        self.addCleanup(self._cleanup_heketi_volumes,
                        existing_volumes.get("volumes"))

        # Get nodes id list
        node_id_list = heketi_node_list(h_node, h_url)

        # Disable 4th and other nodes
        for node_id in node_id_list[3:]:
            heketi_node_disable(h_node, h_url, node_id)
            self.addCleanup(heketi_node_enable, h_node, h_url, node_id)

        # Calculate heketi volume size
        free_space, nodenum = get_total_free_space(h_node, h_url)
        free_space_available = int(free_space / nodenum)
        if free_space_available > vol_count:
            h_volume_size = int(free_space_available / vol_count)
            if h_volume_size > 50:
                h_volume_size = 50
        else:
            h_volume_size, vol_count = 1, free_space_available

        # Create BHV in case blockvolume size is greater than default BHV size
        default_bhv_size = get_default_block_hosting_volume_size(
            h_node, self.heketi_dc_name)
        if default_bhv_size < h_volume_size:
            h_volume_name = "autotest-{}".format(utils.get_random_str())
            bhv_info = self.create_heketi_volume_with_name_and_wait(
                h_volume_name,
                free_space_available,
                raise_on_cleanup_error=False,
                block=True,
                json=True)
            free_space_available -= (
                int(bhv_info.get("blockinfo").get("reservedsize")) + 1)
            h_volume_size = int(free_space_available / vol_count)

        # Get BHV list
        h_bhv_list = get_block_hosting_volume_list(h_node, h_url).keys()
        self.assertTrue(h_bhv_list, "Failed to get the BHV list")

        # Get BHV size
        for bhv in h_bhv_list:
            vol_info = heketi_volume_info(h_node, h_url, bhv, json=True)
            bhv_vol_size_before = vol_info.get("freesize")
            bhv_size_before.append(bhv_vol_size_before)

        # Kill Tcmu-runner service
        services = ("tcmu-runner", "gluster-block-target", "gluster-blockd")
        kill_service_on_gluster_pod_or_node(ocp_node, "tcmu-runner", g_node)

        # Restart the services
        for service in services:
            state = ('exited'
                     if service == 'gluster-block-target' else 'running')
            self.addCleanup(wait_for_service_status_on_gluster_pod_or_node,
                            ocp_node, service, 'active', state, g_node)
            self.addCleanup(restart_service_on_gluster_pod_or_node, ocp_node,
                            service, g_node)

        def run_async(cmd, hostname, raise_on_error=True):
            return g.run_async(host=hostname, command=cmd)

        # Create stale block volumes in async
        for count in range(vol_count):
            with mock.patch.object(json, 'loads', side_effect=(lambda j: j)):
                with mock.patch.object(command,
                                       'cmd_run',
                                       side_effect=run_async):
                    heketi_blockvolume_create(h_node,
                                              h_url,
                                              h_volume_size,
                                              json=True)

        # Wait for pending operation to get generated
        self._check_for_pending_operations(h_node, h_url)

        # Restart the services
        for service in services:
            state = ('exited'
                     if service == 'gluster-block-target' else 'running')
            restart_service_on_gluster_pod_or_node(ocp_node, service, g_node)
            wait_for_service_status_on_gluster_pod_or_node(
                ocp_node, service, 'active', state, g_node)

        # Cleanup pending operation
        heketi_server_operation_cleanup(h_node, h_url)

        # wait for pending operation to get cleaned up
        for w in waiter.Waiter(timeout=120, interval=10):
            # Get BHV size
            for bhv in h_bhv_list:
                vol_info = heketi_volume_info(h_node, h_url, bhv, json=True)
                bhv_vol_size_after = vol_info.get("freesize")
                bhv_size_after.append(bhv_vol_size_after)

            if (set(bhv_size_before) == set(bhv_size_after)):
                break
        if w.expired:
            raise exceptions.ExecutionError(
                "Failed to Validate volume size Actual:{},"
                " Expected:{}".format(set(bhv_size_before),
                                      set(bhv_size_after)))
    def test_targetcli_when_block_hosting_volume_down(self):
        """Validate no inconsistencies occur in targetcli when block volumes
           are created with one block hosting volume down."""
        h_node, h_server = self.heketi_client_node, self.heketi_server_url
        cmd = ("targetcli ls | egrep '%s' || echo unavailable")
        error_msg = ("targetcli has inconsistencies when block devices are "
                     "created with one block hosting volume %s is down")

        # Delete BHV which has no BV or fill it completely
        bhv_list = get_block_hosting_volume_list(h_node, h_server).keys()
        for bhv in bhv_list:
            bhv_info = heketi_volume_info(h_node, h_server, bhv, json=True)
            if not bhv_info["blockinfo"].get("blockvolume", []):
                heketi_volume_delete(h_node, h_server, bhv)
                continue
            free_size = bhv_info["blockinfo"].get("freesize", 0)
            if free_size:
                bv = heketi_volume_create(h_node,
                                          h_server,
                                          free_size,
                                          json=True)
                self.addCleanup(heketi_volume_delete, h_node, h_server,
                                bv["id"])

        # Create BV
        bv = heketi_blockvolume_create(h_node, h_server, 2, json=True)
        self.addCleanup(heketi_blockvolume_delete, h_node, h_server, bv["id"])

        # Bring down BHV
        bhv_name = get_block_hosting_volume_name(h_node, h_server, bv["id"])
        ret, out, err = volume_stop("auto_get_gluster_endpoint", bhv_name)
        if ret != 0:
            err_msg = "Failed to stop gluster volume %s. error: %s" % (
                bhv_name, err)
            g.log.error(err_msg)
            raise AssertionError(err_msg)
        self.addCleanup(podcmd.GlustoPod()(volume_start),
                        "auto_get_gluster_endpoint", bhv_name)

        ocp_node = self.ocp_master_node[0]
        gluster_block_svc = "gluster-block-target"
        self.addCleanup(wait_for_service_status_on_gluster_pod_or_node,
                        ocp_node,
                        gluster_block_svc,
                        "active",
                        "exited",
                        gluster_node=self.gluster_servers[0])
        self.addCleanup(restart_service_on_gluster_pod_or_node, ocp_node,
                        gluster_block_svc, self.gluster_servers[0])
        for condition in ("continue", "break"):
            restart_service_on_gluster_pod_or_node(
                ocp_node,
                gluster_block_svc,
                gluster_node=self.gluster_servers[0])
            wait_for_service_status_on_gluster_pod_or_node(
                ocp_node,
                gluster_block_svc,
                "active",
                "exited",
                gluster_node=self.gluster_servers[0])

            targetcli = cmd_run_on_gluster_pod_or_node(ocp_node,
                                                       cmd % bv["id"],
                                                       self.gluster_servers[0])
            if condition == "continue":
                self.assertEqual(targetcli, "unavailable",
                                 error_msg % bhv_name)
            else:
                self.assertNotEqual(targetcli, "unavailable",
                                    error_msg % bhv_name)
                break

            # Bring up the same BHV
            ret, out, err = volume_start("auto_get_gluster_endpoint", bhv_name)
            if ret != 0:
                err = "Failed to start gluster volume %s on %s. error: %s" % (
                    bhv_name, h_node, err)
                raise exceptions.ExecutionError(err)