예제 #1
0
    def test_delete_heketi_volume(self):
        """
        Method to test heketi volume deletion and whether it
        frees up used space after deletion
        """

        volume_info = heketi_ops.heketi_volume_create(self.heketi_client_node,
                                                      self.heketi_server_url,
                                                      10,
                                                      json=True)
        self.addCleanup(heketi_ops.heketi_volume_delete,
                        self.heketi_client_node,
                        self.heketi_server_url,
                        volume_info["id"],
                        raise_on_error=False)

        free_space_after_creation = self.get_free_space_summary_devices()

        heketi_ops.heketi_volume_delete(self.heketi_client_node,
                                        self.heketi_server_url,
                                        volume_info["id"])

        free_space_after_deletion = self.get_free_space_summary_devices()

        self.assertTrue(
            free_space_after_deletion > free_space_after_creation,
            "Free space is not reclaimed after deletion "
            "of %s" % volume_info["id"])
예제 #2
0
    def test_heketi_metrics_validating_vol_count_on_vol_deletion(self):
        """Validate heketi metrics VolumeCount after volume deletion"""

        vol_list = []

        for i in range(3):
            # Create volume
            vol = heketi_volume_create(self.heketi_client_node,
                                       self.heketi_server_url,
                                       1,
                                       json=True)

            self.assertTrue(vol)

            self.addCleanup(heketi_volume_delete,
                            self.heketi_client_node,
                            self.heketi_server_url,
                            vol['id'],
                            raise_on_error=False)

            volume_list = heketi_volume_list(self.heketi_client_node,
                                             self.heketi_server_url)

            self.assertIn(vol['id'], volume_list)
            vol_list.append(vol)

        for vol in vol_list:
            # delete volume
            heketi_volume_delete(self.heketi_client_node,
                                 self.heketi_server_url, vol['id'])
            volume_list = heketi_volume_list(self.heketi_client_node,
                                             self.heketi_server_url)
            self.assertNotIn(vol['id'], volume_list)
            self.verify_volume_count()
예제 #3
0
    def test_create_vol_and_retrieve_topology_info(self):
        volume_names = []
        volume_ids = []

        # Create 3 volumes and make 3rd volume of type distributed replica
        g.log.info("Creating 3 volumes")
        for i in range(3):
            out = heketi_volume_create(self.heketi_client_node,
                                       self.heketi_server_url,
                                       self.volume_size,
                                       json=True)
            g.log.info("Heketi volume %s successfully created" % out)
            volume_names.append(out["name"])
            volume_ids.append(out["bricks"][0]["volume"])
            self.addCleanup(heketi_volume_delete,
                            self.heketi_client_node,
                            self.heketi_server_url,
                            volume_ids[i],
                            raise_on_error=(i == 2))
        heketi_volume_expand(self.heketi_client_node, self.heketi_server_url,
                             volume_ids[1], 1)

        # Check if volume is shown in the heketi topology
        topology_volumes = get_heketi_volume_and_brick_count_list(
            self.heketi_client_node, self.heketi_server_url)
        existing_volumes = [v for v, _ in topology_volumes]
        for v in volume_names:
            self.assertIn(v, existing_volumes)
        for v, b_count in topology_volumes:
            expected_bricks_count = 6 if v == volume_names[1] else 3
            self.assertGreaterEqual(
                b_count, expected_bricks_count,
                'Bricks number of the %s volume is %s and it is expected '
                'to be greater or equal to %s' %
                (v, b_count, expected_bricks_count))

        # Delete first 2 volumes and verify their deletion in the topology
        for vol_id in volume_ids[:2]:
            g.log.info("Deleting volume %s" % vol_id)
            heketi_volume_delete(self.heketi_client_node,
                                 self.heketi_server_url, vol_id)
        topology_volumes = get_heketi_volume_and_brick_count_list(
            self.heketi_client_node, self.heketi_server_url)
        existing_volumes = [v for v, _ in topology_volumes]
        for vol_name in volume_names[:2]:
            self.assertNotIn(
                vol_name, existing_volumes,
                ("volume %s shown in the heketi topology after deletion"
                 "\nTopology info:\n%s" % (vol_name, existing_volumes)))

        # Check the existence of third volume
        self.assertIn(
            volume_names[2], existing_volumes, "volume %s not "
            "shown in the heketi topology\nTopology info"
            "\n%s" % (volume_ids[2], existing_volumes))
        g.log.info("Sucessfully verified the topology info")
예제 #4
0
    def test_restart_heketi_pod(self):
        """Validate restarting heketi pod"""

        # create heketi volume
        vol_info = heketi_volume_create(self.heketi_client_node,
                                        self.heketi_server_url,
                                        size=1,
                                        json=True)
        self.assertTrue(vol_info, "Failed to create heketi volume of size 1")
        self.addCleanup(heketi_volume_delete,
                        self.heketi_client_node,
                        self.heketi_server_url,
                        vol_info['id'],
                        raise_on_error=False)
        topo_info = heketi_topology_info(self.heketi_client_node,
                                         self.heketi_server_url,
                                         json=True)

        # get heketi-pod name
        heketi_pod_name = get_pod_name_from_dc(self.ocp_master_node[0],
                                               self.heketi_dc_name)

        # delete heketi-pod (it restarts the pod)
        oc_delete(self.ocp_master_node[0],
                  'pod',
                  heketi_pod_name,
                  collect_logs=self.heketi_logs_before_delete)
        wait_for_resource_absence(self.ocp_master_node[0], 'pod',
                                  heketi_pod_name)

        # get new heketi-pod name
        heketi_pod_name = get_pod_name_from_dc(self.ocp_master_node[0],
                                               self.heketi_dc_name)
        wait_for_pod_be_ready(self.ocp_master_node[0], heketi_pod_name)

        # check heketi server is running
        self.assertTrue(
            hello_heketi(self.heketi_client_node, self.heketi_server_url),
            "Heketi server %s is not alive" % self.heketi_server_url)

        # compare the topology
        new_topo_info = heketi_topology_info(self.heketi_client_node,
                                             self.heketi_server_url,
                                             json=True)
        self.assertEqual(
            new_topo_info, topo_info, "topology info is not same,"
            " difference - %s" % diff(topo_info, new_topo_info))

        # create new volume
        vol_info = heketi_volume_create(self.heketi_client_node,
                                        self.heketi_server_url,
                                        size=2,
                                        json=True)
        self.assertTrue(vol_info, "Failed to create heketi volume of size 20")
        heketi_volume_delete(self.heketi_client_node, self.heketi_server_url,
                             vol_info['id'])
    def test_heketi_node_states_enable_disable(self):
        """Test node enable and disable functionality
        """
        h_client, h_server = self.heketi_client_node, self.heketi_server_url

        node_list = heketi_ops.heketi_node_list(h_client, h_server)
        online_hosts = []
        for node_id in node_list:
            node_info = heketi_ops.heketi_node_info(h_client,
                                                    h_server,
                                                    node_id,
                                                    json=True)
            if node_info["state"] == "online":
                online_hosts.append(node_info)

        if len(online_hosts) < 3:
            raise self.skipTest(
                "This test can run only if online hosts are more than 2")

        #  Disable n-3 nodes, in case we have n nodes
        for node_info in online_hosts[3:]:
            node_id = node_info["id"]
            heketi_ops.heketi_node_disable(h_client, h_server, node_id)
            self.addCleanup(heketi_ops.heketi_node_enable, h_client, h_server,
                            node_id)

        # Create volume when 3 nodes are online
        vol_size = 1
        vol_info = heketi_ops.heketi_volume_create(h_client,
                                                   h_server,
                                                   vol_size,
                                                   json=True)
        self.addCleanup(heketi_ops.heketi_volume_delete, h_client, h_server,
                        vol_info['id'])

        node_id = online_hosts[0]['id']
        try:
            heketi_ops.heketi_node_disable(h_client, h_server, node_id)

            # Try to create a volume, volume creation should fail
            with self.assertRaises(AssertionError):
                heketi_volume = heketi_ops.heketi_volume_create(
                    h_client, h_server, vol_size)
                self.addCleanup(heketi_ops.heketi_volume_delete, h_client,
                                h_server, heketi_volume["id"])
        finally:
            # Enable heketi node
            heketi_ops.heketi_node_enable(h_client, h_server, node_id)

        # Create volume when heketi node is enabled
        vol_info = heketi_ops.heketi_volume_create(h_client,
                                                   h_server,
                                                   vol_size,
                                                   json=True)
        heketi_ops.heketi_volume_delete(h_client, h_server, vol_info['id'])
예제 #6
0
    def _cleanup_heketi_volumes(self, existing_volumes):
        h_node, h_url = self.heketi_client_node, self.heketi_server_url

        volumes = heketi_volume_list(h_node, h_url, json=True).get("volumes")
        new_volumes = list(set(volumes) - set(existing_volumes))
        for volume in new_volumes:
            h_vol_info = heketi_volume_info(h_node, h_url, volume, json=True)
            if h_vol_info.get("block"):
                for block_vol in (
                        h_vol_info.get("blockinfo").get("blockvolume")):
                    heketi_blockvolume_delete(h_node, h_url, block_vol)
            heketi_volume_delete(h_node, h_url, volume, raise_on_error=False)
    def test_dynamic_provisioning_glusterfile_reclaim_policy_retain(self):
        """Validate retain policy for glusterfs after deletion of pvc"""

        if get_openshift_version() < "3.9":
            self.skipTest(
                "'Reclaim' feature is not supported in OCP older than 3.9")

        self.create_storage_class(reclaim_policy='Retain')
        self.create_and_wait_for_pvc()

        # get the name of the volume
        pv_name = get_pv_name_from_pvc(self.node, self.pvc_name)
        custom = [
            r':.metadata.annotations.'
            r'"gluster\.kubernetes\.io\/heketi\-volume\-id"',
            r':.spec.persistentVolumeReclaimPolicy'
        ]

        vol_id, reclaim_policy = oc_get_custom_resource(
            self.node, 'pv', custom, pv_name)

        self.assertEqual(reclaim_policy, 'Retain')

        # Create DC with POD and attached PVC to it.
        try:
            dc_name = oc_create_app_dc_with_io(
                self.node, self.pvc_name, image=self.io_container_image_cirros)
            pod_name = get_pod_name_from_dc(self.node, dc_name)
            wait_for_pod_be_ready(self.node, pod_name)
        finally:
            scale_dc_pod_amount_and_wait(self.node, dc_name, 0)
            oc_delete(self.node, 'dc', dc_name)
            wait_for_resource_absence(self.node, 'pod', pod_name)

        oc_delete(self.node, 'pvc', self.pvc_name)

        with self.assertRaises(ExecutionError):
            wait_for_resource_absence(self.node,
                                      'pvc',
                                      self.pvc_name,
                                      interval=3,
                                      timeout=30)

        heketi_volume_delete(self.heketi_client_node, self.heketi_server_url,
                             vol_id)

        vol_list = heketi_volume_list(self.heketi_client_node,
                                      self.heketi_server_url)

        self.assertNotIn(vol_id, vol_list)

        oc_delete(self.node, 'pv', pv_name)
        wait_for_resource_absence(self.node, 'pv', pv_name)
    def test_block_host_volume_delete_without_block_volumes(self):
        """Validate deletion of empty block hosting volume"""
        block_host_create_info = heketi_volume_create(
            self.heketi_client_node, self.heketi_server_url, 1, json=True,
            block=True)

        block_hosting_vol_id = block_host_create_info["id"]
        self.addCleanup(
            heketi_volume_delete, self.heketi_client_node,
            self.heketi_server_url, block_hosting_vol_id, raise_on_error=False)

        heketi_volume_delete(
            self.heketi_client_node, self.heketi_server_url,
            block_hosting_vol_id, json=True)
    def test_restart_heketi_pod(self):
        """Validate restarting heketi pod"""

        # create heketi volume
        vol_info = heketi_volume_create(self.heketi_client_node,
                                        self.heketi_server_url,
                                        size=1, json=True)
        self.assertTrue(vol_info, "Failed to create heketi volume of size 1")
        self.addCleanup(
            heketi_volume_delete, self.heketi_client_node,
            self.heketi_server_url, vol_info['id'], raise_on_error=False)
        topo_info = heketi_topology_info(self.heketi_client_node,
                                         self.heketi_server_url,
                                         json=True)

        # get heketi-pod name
        heketi_pod_name = get_pod_name_from_dc(self.ocp_master_node[0],
                                               self.heketi_dc_name)

        # delete heketi-pod (it restarts the pod)
        oc_delete(self.ocp_master_node[0], 'pod', heketi_pod_name)
        wait_for_resource_absence(self.ocp_master_node[0],
                                  'pod', heketi_pod_name)

        # get new heketi-pod name
        heketi_pod_name = get_pod_name_from_dc(self.ocp_master_node[0],
                                               self.heketi_dc_name)
        wait_for_pod_be_ready(self.ocp_master_node[0],
                              heketi_pod_name)

        # check heketi server is running
        self.assertTrue(
            hello_heketi(self.heketi_client_node, self.heketi_server_url),
            "Heketi server %s is not alive" % self.heketi_server_url
        )

        # compare the topology
        new_topo_info = heketi_topology_info(self.heketi_client_node,
                                             self.heketi_server_url,
                                             json=True)
        self.assertEqual(new_topo_info, topo_info, "topology info is not same,"
                         " difference - %s" % diff(topo_info, new_topo_info))

        # create new volume
        vol_info = heketi_volume_create(self.heketi_client_node,
                                        self.heketi_server_url,
                                        size=2, json=True)
        self.assertTrue(vol_info, "Failed to create heketi volume of size 20")
        heketi_volume_delete(
            self.heketi_client_node, self.heketi_server_url, vol_info['id'])
    def test_validate_brick_paths_on_gluster_pods_or_nodes(self):
        """Validate brick paths after creation and deletion of a volume."""

        # Create heketi volume
        vol = heketi_volume_create(self.heketi_client_node,
                                   self.heketi_server_url,
                                   size=1,
                                   json=True)
        self.assertTrue(vol, "Failed to create 1Gb heketi volume")
        vol_id = vol["bricks"][0]["volume"]
        self.addCleanup(heketi_volume_delete,
                        self.heketi_client_node,
                        self.heketi_server_url,
                        vol_id,
                        raise_on_error=False)

        # Gather brick paths
        brick_paths = [p['path'] for p in vol["bricks"]]

        # Make sure that volume's brick paths exist in the fstab files
        self._find_bricks(brick_paths, present=True)

        # Delete heketi volume
        out = heketi_volume_delete(self.heketi_client_node,
                                   self.heketi_server_url, vol_id)
        self.assertTrue(out, "Failed to delete heketi volume %s" % vol_id)

        # Make sure that volume's brick paths are absent in the fstab file
        self._find_bricks(brick_paths, present=False)
    def test_validate_brick_paths_on_gluster_pods_or_nodes(self):
        """Validate brick paths after creation and deletion of a volume."""

        # Create heketi volume
        vol = heketi_volume_create(
            self.heketi_client_node, self.heketi_server_url, size=1, json=True)
        self.assertTrue(vol, "Failed to create 1Gb heketi volume")
        vol_id = vol["bricks"][0]["volume"]
        self.addCleanup(
            heketi_volume_delete,
            self.heketi_client_node, self.heketi_server_url, vol_id,
            raise_on_error=False)

        # Gather brick paths
        brick_paths = [p['path'] for p in vol["bricks"]]

        # Make sure that volume's brick paths exist in the fstab files
        self._find_bricks(brick_paths, present=True)

        # Delete heketi volume
        out = heketi_volume_delete(
            self.heketi_client_node, self.heketi_server_url, vol_id)
        self.assertTrue(out, "Failed to delete heketi volume %s" % vol_id)

        # Make sure that volume's brick paths are absent in the fstab file
        self._find_bricks(brick_paths, present=False)
    def test_dynamic_provisioning_glusterfile_reclaim_policy_retain(self):
        """Validate retain policy for glusterfs after deletion of pvc"""

        if get_openshift_version() < "3.9":
            self.skipTest(
                "'Reclaim' feature is not supported in OCP older than 3.9")

        self.create_storage_class(reclaim_policy='Retain')
        self.create_and_wait_for_pvc()

        # get the name of the volume
        pv_name = get_pv_name_from_pvc(self.node, self.pvc_name)
        custom = [r':.metadata.annotations.'
                  r'"gluster\.kubernetes\.io\/heketi\-volume\-id"',
                  r':.spec.persistentVolumeReclaimPolicy']

        vol_id, reclaim_policy = oc_get_custom_resource(
            self.node, 'pv', custom, pv_name)

        self.assertEqual(reclaim_policy, 'Retain')

        # Create DC with POD and attached PVC to it.
        try:
            dc_name = oc_create_app_dc_with_io(self.node, self.pvc_name)
            pod_name = get_pod_name_from_dc(self.node, dc_name)
            wait_for_pod_be_ready(self.node, pod_name)
        finally:
            scale_dc_pod_amount_and_wait(self.node, dc_name, 0)
            oc_delete(self.node, 'dc', dc_name)
            wait_for_resource_absence(self.node, 'pod', pod_name)

        oc_delete(self.node, 'pvc', self.pvc_name)

        with self.assertRaises(ExecutionError):
            wait_for_resource_absence(
                self.node, 'pvc', self.pvc_name, interval=3, timeout=30)

        heketi_volume_delete(self.heketi_client_node,
                             self.heketi_server_url, vol_id)

        vol_list = heketi_volume_list(self.heketi_client_node,
                                      self.heketi_server_url)

        self.assertNotIn(vol_id, vol_list)

        oc_delete(self.node, 'pv', pv_name)
        wait_for_resource_absence(self.node, 'pv', pv_name)
    def test_block_host_volume_delete_without_block_volumes(self):
        """Validate deletion of empty block hosting volume"""
        block_host_create_info = heketi_volume_create(self.heketi_client_node,
                                                      self.heketi_server_url,
                                                      1,
                                                      json=True,
                                                      block=True)

        block_hosting_vol_id = block_host_create_info["id"]
        self.addCleanup(heketi_volume_delete,
                        self.heketi_client_node,
                        self.heketi_server_url,
                        block_hosting_vol_id,
                        raise_on_error=False)

        heketi_volume_delete(self.heketi_client_node,
                             self.heketi_server_url,
                             block_hosting_vol_id,
                             json=True)
예제 #14
0
    def test_delete_heketi_volume(self):
        """
        Method to test heketi volume deletion and whether it
        frees up used space after deletion
        """

        creation_output_dict = heketi_ops.heketi_volume_create(
            self.heketi_client_node, self.heketi_server_url, 10, json=True)

        volume_id = creation_output_dict["name"].strip().split("_")[1]
        free_space_after_creation = self.get_free_space_summary_devices()

        heketi_ops.heketi_volume_delete(self.heketi_client_node,
                                        self.heketi_server_url, volume_id)

        free_space_after_deletion = self.get_free_space_summary_devices()

        self.assertTrue(
            free_space_after_deletion > free_space_after_creation,
            "Free space is not reclaimed after deletion of %s" % volume_id)
    def test_delete_heketi_volume(self):
        """
        Method to test heketi volume deletion and whether it
        frees up used space after deletion
        """

        creation_output_dict = heketi_ops.heketi_volume_create(
            self.heketi_client_node,
            self.heketi_server_url, 10, json=True)

        volume_id = creation_output_dict["name"].strip().split("_")[1]
        free_space_after_creation = self.get_free_space_summary_devices()

        heketi_ops.heketi_volume_delete(
            self.heketi_client_node, self.heketi_server_url, volume_id)

        free_space_after_deletion = self.get_free_space_summary_devices()

        self.assertTrue(
            free_space_after_deletion > free_space_after_creation,
            "Free space is not reclaimed after deletion of %s" % volume_id)
    def test_heketi_metrics_validating_vol_count_on_vol_deletion(self):
        """Validate heketi metrics VolumeCount after volume deletion"""

        vol_list = []

        for i in range(3):
            # Create volume
            vol = heketi_volume_create(
                self.heketi_client_node,
                self.heketi_server_url, 1, json=True)

            self.assertTrue(vol)

            self.addCleanup(
                heketi_volume_delete,
                self.heketi_client_node,
                self.heketi_server_url,
                vol['id'],
                raise_on_error=False)

            volume_list = heketi_volume_list(
                self.heketi_client_node,
                self.heketi_server_url)

            self.assertIn(vol['id'], volume_list)
            vol_list.append(vol)

        for vol in vol_list:
            # delete volume
            heketi_volume_delete(
                self.heketi_client_node,
                self.heketi_server_url,
                vol['id'])
            volume_list = heketi_volume_list(
                self.heketi_client_node,
                self.heketi_server_url)
            self.assertNotIn(vol['id'], volume_list)
            self.verify_volume_count()
예제 #17
0
    def _cleanup_heketi_volumes(self, existing_volumes):
        """Cleanup created BHV and BV"""

        volumes = heketi_ops.heketi_volume_list(self.h_node,
                                                self.h_server,
                                                json=True).get("volumes")
        new_volumes = list(set(volumes) - set(existing_volumes))
        for volume in new_volumes:
            h_vol_info = heketi_ops.heketi_volume_info(self.h_node,
                                                       self.h_server,
                                                       volume,
                                                       json=True)
            if h_vol_info.get("block"):
                for block_vol in (
                        h_vol_info.get("blockinfo").get("blockvolume")):
                    heketi_ops.heketi_blockvolume_delete(self.h_node,
                                                         self.h_server,
                                                         block_vol,
                                                         raise_on_error=False)
            heketi_ops.heketi_volume_delete(self.h_node,
                                            self.h_server,
                                            volume,
                                            raise_on_error=False)
    def test_delete_heketidb_volume(self):
        """
        Method to test heketidb volume deletion via heketi-cli
        """
        heketidbexists = False
        msg = "Error: Cannot delete volume containing the Heketi database"

        for i in range(0, 2):
            volume_info = heketi_ops.heketi_volume_create(
                self.heketi_client_node, self.heketi_server_url,
                10, json=True)

            self.addCleanup(
                heketi_ops.heketi_volume_delete, self.heketi_client_node,
                self.heketi_server_url, volume_info["id"])

        volume_list_info = heketi_ops.heketi_volume_list(
            self.heketi_client_node,
            self.heketi_server_url, json=True)

        if volume_list_info["volumes"] == []:
            raise ExecutionError("Heketi volume list empty")

        for volume_id in volume_list_info["volumes"]:
            volume_info = heketi_ops.heketi_volume_info(
                self.heketi_client_node, self.heketi_server_url,
                volume_id, json=True)

            if volume_info["name"] == "heketidbstorage":
                heketidbexists = True
                delete_ret, delete_output, delete_error = (
                    heketi_ops.heketi_volume_delete(
                        self.heketi_client_node,
                        self.heketi_server_url, volume_id,
                        raw_cli_output=True))

                self.assertNotEqual(delete_ret, 0, "Return code not 0")
                self.assertEqual(
                    delete_error.strip(), msg,
                    "Invalid reason for heketidb deletion failure")

        if not heketidbexists:
            raise ExecutionError(
                "Warning: heketidbstorage doesn't exist in list of volumes")
    def test_volume_expansion_no_free_space(self):
        """Validate volume expansion when there is no free space"""

        vol_size, expand_size, additional_devices_attached = None, 10, {}
        h_node, h_server_url = self.heketi_client_node, self.heketi_server_url

        # Get nodes info
        heketi_node_id_list = heketi_ops.heketi_node_list(h_node, h_server_url)
        if len(heketi_node_id_list) < 3:
            self.skipTest("3 Heketi nodes are required.")

        # Disable 4th and other nodes
        for node_id in heketi_node_id_list[3:]:
            heketi_ops.heketi_node_disable(h_node, h_server_url, node_id)
            self.addCleanup(
                heketi_ops.heketi_node_enable, h_node, h_server_url, node_id)

        # Prepare first 3 nodes
        smallest_size = None
        err_msg = ''
        for node_id in heketi_node_id_list[0:3]:
            node_info = heketi_ops.heketi_node_info(
                h_node, h_server_url, node_id, json=True)

            # Disable second and other devices
            devices = node_info["devices"]
            self.assertTrue(
                devices, "Node '%s' does not have devices." % node_id)
            if devices[0]["state"].strip().lower() != "online":
                self.skipTest("Test expects first device to be enabled.")
            if (smallest_size is None or
                    devices[0]["storage"]["free"] < smallest_size):
                smallest_size = devices[0]["storage"]["free"]
            for device in node_info["devices"][1:]:
                heketi_ops.heketi_device_disable(
                    h_node, h_server_url, device["id"])
                self.addCleanup(
                    heketi_ops.heketi_device_enable,
                    h_node, h_server_url, device["id"])

            # Gather info about additional devices
            additional_device_name = None
            for gluster_server in self.gluster_servers:
                gluster_server_data = self.gluster_servers_info[gluster_server]
                g_manage = gluster_server_data["manage"]
                g_storage = gluster_server_data["storage"]
                if not (g_manage in node_info["hostnames"]["manage"] or
                        g_storage in node_info["hostnames"]["storage"]):
                    continue
                additional_device_name = ((
                    gluster_server_data.get("additional_devices") or [''])[0])
                break

            if not additional_device_name:
                err_msg += ("No 'additional_devices' are configured for "
                            "'%s' node, which has following hostnames and "
                            "IP addresses: %s.\n" % (
                                node_id,
                                ', '.join(node_info["hostnames"]["manage"] +
                                          node_info["hostnames"]["storage"])))
                continue

            heketi_ops.heketi_device_add(
                h_node, h_server_url, additional_device_name, node_id)
            additional_devices_attached.update(
                {node_id: additional_device_name})

        # Schedule cleanup of the added devices
        for node_id in additional_devices_attached.keys():
            node_info = heketi_ops.heketi_node_info(
                h_node, h_server_url, node_id, json=True)
            for device in node_info["devices"]:
                if device["name"] != additional_devices_attached[node_id]:
                    continue
                self.addCleanup(self.detach_devices_attached, device["id"])
                break
            else:
                self.fail("Could not find ID for added device on "
                          "'%s' node." % node_id)

        if err_msg:
            self.skipTest(err_msg)

        # Temporary disable new devices
        self.disable_devices(additional_devices_attached)

        # Create volume and save info about it
        vol_size = int(smallest_size / (1024**2)) - 1
        creation_info = heketi_ops.heketi_volume_create(
            h_node, h_server_url, vol_size, json=True)
        volume_name, volume_id = creation_info["name"], creation_info["id"]
        self.addCleanup(
            heketi_ops.heketi_volume_delete,
            h_node, h_server_url, volume_id, raise_on_error=False)

        volume_info_before_expansion = heketi_ops.heketi_volume_info(
            h_node, h_server_url, volume_id, json=True)
        num_of_bricks_before_expansion = self.get_num_of_bricks(volume_name)
        self.get_brick_and_volume_status(volume_name)
        free_space_before_expansion = self.get_devices_summary_free_space()

        # Try to expand volume with not enough device space
        self.assertRaises(
            ExecutionError, heketi_ops.heketi_volume_expand,
            h_node, h_server_url, volume_id, expand_size)

        # Enable new devices to be able to expand our volume
        self.enable_devices(additional_devices_attached)

        # Expand volume and validate results
        heketi_ops.heketi_volume_expand(
            h_node, h_server_url, volume_id, expand_size, json=True)
        free_space_after_expansion = self.get_devices_summary_free_space()
        self.assertGreater(
            free_space_before_expansion, free_space_after_expansion,
            "Free space not consumed after expansion of %s" % volume_id)
        num_of_bricks_after_expansion = self.get_num_of_bricks(volume_name)
        self.get_brick_and_volume_status(volume_name)
        volume_info_after_expansion = heketi_ops.heketi_volume_info(
            h_node, h_server_url, volume_id, json=True)
        self.assertGreater(
            volume_info_after_expansion["size"],
            volume_info_before_expansion["size"],
            "Size of %s not increased" % volume_id)
        self.assertGreater(
            num_of_bricks_after_expansion, num_of_bricks_before_expansion)
        self.assertEqual(
            num_of_bricks_after_expansion % num_of_bricks_before_expansion, 0)

        # Delete volume and validate release of the used space
        heketi_ops.heketi_volume_delete(h_node, h_server_url, volume_id)
        free_space_after_deletion = self.get_devices_summary_free_space()
        self.assertGreater(
            free_space_after_deletion, free_space_after_expansion,
            "Free space not reclaimed after deletion of volume %s" % volume_id)
예제 #20
0
    def test_verify_delete_heketi_volumes_pending_entries_in_db(
            self, vol_type):
        """Verify pending entries of blockvolumes/volumes and bricks in heketi
           db during blockvolume/volume delete operation.
        """
        # Create a large volumes to observe the pending operation
        vol_count, volume_ids, async_obj = 10, [], []
        h_node, h_url = self.heketi_client_node, self.heketi_server_url

        # Verify file/block volumes pending operation before creation,
        h_db_check_before = heketi_db_check(h_node, h_url)
        h_db_check_bricks_before = h_db_check_before.get("bricks")
        h_db_check_vol_before = (h_db_check_before.get(
            "{}volumes".format(vol_type)))

        # Get existing heketi volume list
        existing_volumes = heketi_volume_list(h_node, h_url, json=True)

        # Add cleanup function to clean stale volumes created during test
        self.addCleanup(self._cleanup_heketi_volumes,
                        existing_volumes.get("volumes"))

        # Delete heketi pod to clean db operations
        if (h_db_check_bricks_before.get("pending")
                or h_db_check_vol_before.get("pending")):
            self._respin_heketi_pod()

        # Calculate heketi volume size
        free_space, nodenum = get_total_free_space(h_node, h_url)
        free_space_available = int(free_space / nodenum)
        if free_space_available > vol_count:
            h_volume_size = int(free_space_available / vol_count)
            if h_volume_size > 50:
                h_volume_size = 50
        else:
            h_volume_size, vol_count = 1, free_space_available

        # Create BHV in case blockvolume size is greater than default BHV size
        if vol_type:
            default_bhv_size = get_default_block_hosting_volume_size(
                h_node, self.heketi_dc_name)
            if default_bhv_size < h_volume_size:
                h_volume_name = "autotest-{}".format(utils.get_random_str())
                bhv_info = self.create_heketi_volume_with_name_and_wait(
                    h_volume_name,
                    free_space_available,
                    raise_on_cleanup_error=False,
                    block=True,
                    json=True)
                free_space_available -= (
                    int(bhv_info.get("blockinfo").get("reservedsize")) + 1)
                h_volume_size = int(free_space_available / vol_count)

        # Create file/block volumes
        for _ in range(vol_count):
            vol_id = eval("heketi_{}volume_create".format(vol_type))(
                h_node, h_url, h_volume_size, json=True).get("id")
            volume_ids.append(vol_id)
            self.addCleanup(eval("heketi_{}volume_delete".format(vol_type)),
                            h_node,
                            h_url,
                            vol_id,
                            raise_on_error=False)

        def run_async(cmd, hostname, raise_on_error=True):
            async_op = g.run_async(host=hostname, command=cmd)
            async_obj.append(async_op)
            return async_op

        bhv_list = []
        for vol_id in volume_ids:
            # Get BHV ids to delete in case of block volumes
            if vol_type:
                vol_info = (heketi_blockvolume_info(h_node,
                                                    h_url,
                                                    vol_id,
                                                    json=True))
                if not vol_info.get("blockhostingvolume") in bhv_list:
                    bhv_list.append(vol_info.get("blockhostingvolume"))

            # Temporary replace g.run with g.async_run in heketi_volume_delete
            # and heketi_blockvolume_delete func to be able to run it in
            # background.
            with mock.patch.object(command, 'cmd_run', side_effect=run_async):
                eval("heketi_{}volume_delete".format(vol_type))(h_node, h_url,
                                                                vol_id)

        # Wait for pending operations to get generate
        for w in waiter.Waiter(timeout=30, interval=3):
            h_db_check = heketi_db_check(h_node, h_url)
            h_db_check_vol = h_db_check.get("{}volumes".format(vol_type))
            if h_db_check_vol.get("pending"):
                h_db_check_bricks = h_db_check.get("bricks")
                break
        if w.expired:
            raise exceptions.ExecutionError(
                "No any pending operations found during {}volumes deletion "
                "{}".format(vol_type, h_db_check_vol.get("pending")))

        # Verify bricks pending operation during creation
        if not vol_type:
            self.assertTrue(h_db_check_bricks.get("pending"),
                            "Expecting at least one bricks pending count")
            self.assertFalse(
                h_db_check_bricks.get("pending") % 3,
                "Expecting bricks pending count to be multiple of 3 but "
                "found {}".format(h_db_check_bricks.get("pending")))

        # Verify file/block volume pending operation during delete
        for w in waiter.Waiter(timeout=120, interval=10):
            h_db_check = heketi_db_check(h_node, h_url)
            h_db_check_vol = h_db_check.get("{}volumes".format(vol_type))
            h_db_check_bricks = h_db_check.get("bricks")
            if ((not h_db_check_bricks.get("pending"))
                    and (not h_db_check_vol.get("pending"))):
                break
        if w.expired:
            raise AssertionError(
                "Failed to delete {}volumes after 120 secs".format(vol_type))

        # Check that all background processes got exited
        for obj in async_obj:
            ret, out, err = obj.async_communicate()
            self.assertFalse(
                ret, "Failed to delete {}volume due to error: {}".format(
                    vol_type, err))

        # Delete BHV created during block volume creation
        if vol_type:
            for bhv_id in bhv_list:
                heketi_volume_delete(h_node, h_url, bhv_id)

        # Verify bricks and volume pending operations
        h_db_check_after = heketi_db_check(h_node, h_url)
        h_db_check_bricks_after = h_db_check_after.get("bricks")
        h_db_check_vol_after = (h_db_check_after.get(
            "{}volumes".format(vol_type)))
        act_brick_count = h_db_check_bricks_after.get("pending")
        act_vol_count = h_db_check_vol_after.get("pending")

        # Verify bricks pending operation after delete
        err_msg = "{} operations are pending for {} after {}volume deletion"
        if not vol_type:
            self.assertFalse(
                act_brick_count,
                err_msg.format(act_brick_count, "brick", vol_type))

        # Verify file/bock volumes pending operation after delete
        self.assertFalse(act_vol_count,
                         err_msg.format(act_vol_count, "volume", vol_type))

        act_brick_count = h_db_check_bricks_after.get("total")
        act_vol_count = h_db_check_vol_after.get("total")
        exp_brick_count = h_db_check_bricks_before.get("total")
        exp_vol_count = h_db_check_vol_before.get("total")
        err_msg = "Actual {} and expected {} {} counts are not matched"

        # Verify if initial and final file/block volumes are same
        self.assertEqual(
            act_vol_count, exp_vol_count,
            err_msg.format(act_vol_count, exp_vol_count, "volume"))

        # Verify if initial and final bricks are same
        self.assertEqual(
            act_brick_count, exp_brick_count,
            err_msg.format(act_brick_count, exp_brick_count, "brick"))
예제 #21
0
    def test_heketi_server_stale_operations_during_heketi_pod_reboot(self):
        """
        Validate failed/stale entries in db and performs a cleanup
        of those entries
        """
        volume_id_list, async_obj, ocp_node = [], [], self.ocp_master_node[0]
        h_node, h_server = self.heketi_client_node, self.heketi_server_url
        for i in range(0, 8):
            volume_info = heketi_ops.heketi_volume_create(h_node,
                                                          h_server,
                                                          1,
                                                          json=True)
            volume_id_list.append(volume_info["id"])
            self.addCleanup(heketi_ops.heketi_volume_delete,
                            h_node,
                            h_server,
                            volume_info["id"],
                            raise_on_error=False)

        def run_async(cmd, hostname, raise_on_error=True):
            async_op = g.run_async(host=hostname, command=cmd)
            async_obj.append(async_op)
            return async_op

        # Temporary replace g.run with g.async_run in heketi_volume_delete
        # to be able to run it in background.
        for vol_id in volume_id_list:
            with mock.patch.object(command, 'cmd_run', side_effect=run_async):
                heketi_ops.heketi_volume_delete(h_node, h_server, vol_id)

        # Restart heketi pod and check pod is running
        heketi_pod_name = openshift_ops.get_pod_name_from_dc(
            ocp_node, self.heketi_dc_name)
        openshift_ops.oc_delete(ocp_node,
                                'pod',
                                heketi_pod_name,
                                collect_logs=self.heketi_logs_before_delete)
        self.addCleanup(self._heketi_pod_delete_cleanup, ocp_node)
        openshift_ops.wait_for_resource_absence(ocp_node, 'pod',
                                                heketi_pod_name)
        heketi_pod_name = openshift_ops.get_pod_name_from_dc(
            ocp_node, self.heketi_dc_name)
        openshift_ops.wait_for_pod_be_ready(ocp_node, heketi_pod_name)
        self.assertTrue(heketi_ops.hello_heketi(h_node, h_server),
                        "Heketi server {} is not alive".format(h_server))

        # Wait for pending operations to get generate
        for w in waiter.Waiter(timeout=30, interval=3):
            h_db_check = heketi_ops.heketi_db_check(h_node, h_server)
            h_db_check_vol = h_db_check.get("volumes")
            h_db_check_bricks = h_db_check.get("bricks")
            if ((h_db_check_vol.get("pending"))
                    and (h_db_check_bricks.get("pending"))):
                break
        if w.expired:
            raise exceptions.ExecutionError(
                "No any pending operations found during volumes deletion "
                "volumes:{}, Bricks:{} ".format(
                    h_db_check_vol.get("pending"),
                    h_db_check_bricks.get("pending")))

        # Verify pending bricks are multiples of 3
        self.assertFalse(
            h_db_check_bricks.get("pending") % 3,
            "Expecting bricks pending count to be multiple of 3 but "
            "found {}".format(h_db_check_bricks.get("pending")))

        # Verify and Wait for pending operations to complete
        for w in waiter.Waiter(timeout=120, interval=10):
            h_db_check = heketi_ops.heketi_db_check(h_node, h_server)
            h_db_check_vol = h_db_check.get("volumes")
            h_db_check_bricks = h_db_check.get("bricks")
            if ((not h_db_check_bricks.get("pending"))
                    and (not h_db_check_vol.get("pending"))):
                break
        if w.expired:
            raise AssertionError("Failed to delete volumes after 120 secs")
    def test_targetcli_when_block_hosting_volume_down(self):
        """Validate no inconsistencies occur in targetcli when block volumes
           are created with one block hosting volume down."""
        h_node, h_server = self.heketi_client_node, self.heketi_server_url
        cmd = ("targetcli ls | egrep '%s' || echo unavailable")
        error_msg = ("targetcli has inconsistencies when block devices are "
                     "created with one block hosting volume %s is down")

        # Delete BHV which has no BV or fill it completely
        bhv_list = get_block_hosting_volume_list(h_node, h_server).keys()
        for bhv in bhv_list:
            bhv_info = heketi_volume_info(h_node, h_server, bhv, json=True)
            if not bhv_info["blockinfo"].get("blockvolume", []):
                heketi_volume_delete(h_node, h_server, bhv)
                continue
            free_size = bhv_info["blockinfo"].get("freesize", 0)
            if free_size:
                bv = heketi_volume_create(h_node,
                                          h_server,
                                          free_size,
                                          json=True)
                self.addCleanup(heketi_volume_delete, h_node, h_server,
                                bv["id"])

        # Create BV
        bv = heketi_blockvolume_create(h_node, h_server, 2, json=True)
        self.addCleanup(heketi_blockvolume_delete, h_node, h_server, bv["id"])

        # Bring down BHV
        bhv_name = get_block_hosting_volume_name(h_node, h_server, bv["id"])
        ret, out, err = volume_stop("auto_get_gluster_endpoint", bhv_name)
        if ret != 0:
            err_msg = "Failed to stop gluster volume %s. error: %s" % (
                bhv_name, err)
            g.log.error(err_msg)
            raise AssertionError(err_msg)
        self.addCleanup(podcmd.GlustoPod()(volume_start),
                        "auto_get_gluster_endpoint", bhv_name)

        ocp_node = self.ocp_master_node[0]
        gluster_block_svc = "gluster-block-target"
        self.addCleanup(wait_for_service_status_on_gluster_pod_or_node,
                        ocp_node,
                        gluster_block_svc,
                        "active",
                        "exited",
                        gluster_node=self.gluster_servers[0])
        self.addCleanup(restart_service_on_gluster_pod_or_node, ocp_node,
                        gluster_block_svc, self.gluster_servers[0])
        for condition in ("continue", "break"):
            restart_service_on_gluster_pod_or_node(
                ocp_node,
                gluster_block_svc,
                gluster_node=self.gluster_servers[0])
            wait_for_service_status_on_gluster_pod_or_node(
                ocp_node,
                gluster_block_svc,
                "active",
                "exited",
                gluster_node=self.gluster_servers[0])

            targetcli = cmd_run_on_gluster_pod_or_node(ocp_node,
                                                       cmd % bv["id"],
                                                       self.gluster_servers[0])
            if condition == "continue":
                self.assertEqual(targetcli, "unavailable",
                                 error_msg % bhv_name)
            else:
                self.assertNotEqual(targetcli, "unavailable",
                                    error_msg % bhv_name)
                break

            # Bring up the same BHV
            ret, out, err = volume_start("auto_get_gluster_endpoint", bhv_name)
            if ret != 0:
                err = "Failed to start gluster volume %s on %s. error: %s" % (
                    bhv_name, h_node, err)
                raise exceptions.ExecutionError(err)
    def test_create_max_num_blockhostingvolumes(self):
        num_of_bv = 10
        new_bhv_list, bv_list, g_nodes = [], [], []
        free_space, nodenum = get_total_free_space(self.heketi_client_node,
                                                   self.heketi_server_url)
        if nodenum < 3:
            self.skipTest("Skip the test case since number of"
                          "online nodes is less than 3.")
        free_space_available = int(free_space / nodenum)
        default_bhv_size = get_default_block_hosting_volume_size(
            self.heketi_client_node, self.heketi_dc_name)
        # Get existing list of BHV's
        existing_bhv_list = get_block_hosting_volume_list(
            self.heketi_client_node, self.heketi_server_url)

        # Skip the test if available space is less than default_bhv_size
        if free_space_available < default_bhv_size:
            self.skipTest("Skip the test case since free_space_available %s"
                          "is less than space_required_for_bhv %s ." %
                          (free_space_available, default_bhv_size))

        # Create BHV's
        while free_space_available > default_bhv_size:
            block_host_create_info = heketi_volume_create(
                self.heketi_client_node,
                self.heketi_server_url,
                default_bhv_size,
                json=True,
                block=True)
            if block_host_create_info["id"] not in existing_bhv_list.keys():
                new_bhv_list.append(block_host_create_info["id"])
            self.addCleanup(heketi_volume_delete,
                            self.heketi_client_node,
                            self.heketi_server_url,
                            block_host_create_info["id"],
                            raise_on_error=False)
            block_vol_size = int(
                block_host_create_info["blockinfo"]["freesize"] / num_of_bv)

            # Create specified number of BV's in BHV's created
            for i in range(0, num_of_bv):
                block_vol = heketi_blockvolume_create(self.heketi_client_node,
                                                      self.heketi_server_url,
                                                      block_vol_size,
                                                      json=True,
                                                      ha=3,
                                                      auth=True)
                self.addCleanup(heketi_blockvolume_delete,
                                self.heketi_client_node,
                                self.heketi_server_url,
                                block_vol["id"],
                                raise_on_error=False)
                bv_list.append(block_vol["id"])
            free_space_available = int(free_space_available - default_bhv_size)

        # Get gluster node ips
        h_nodes_ids = heketi_node_list(self.heketi_client_node,
                                       self.heketi_server_url)
        for h_node in h_nodes_ids[:2]:
            g_node = heketi_node_info(self.heketi_client_node,
                                      self.heketi_server_url,
                                      h_node,
                                      json=True)
            g_nodes.append(g_node['hostnames']['manage'][0])

        # Check if there is no crash in gluster related services & heketi
        services = (("glusterd", "running"), ("gluster-blockd", "running"),
                    ("tcmu-runner", "running"), ("gluster-block-target",
                                                 "exited"))
        for g_node in g_nodes:
            for service, state in services:
                wait_for_service_status_on_gluster_pod_or_node(
                    self.ocp_client[0],
                    service,
                    'active',
                    state,
                    g_node,
                    raise_on_error=False)
            out = hello_heketi(self.heketi_client_node, self.heketi_server_url)
            self.assertTrue(
                out, "Heketi server %s is not alive" % self.heketi_server_url)

        # Delete all the BHV's and BV's created
        for bv_volume in bv_list:
            heketi_blockvolume_delete(self.heketi_client_node,
                                      self.heketi_server_url, bv_volume)

        # Check if any blockvolume exist in heketi & gluster
        for bhv_volume in new_bhv_list[:]:
            heketi_vol_info = heketi_volume_info(self.heketi_client_node,
                                                 self.heketi_server_url,
                                                 bhv_volume,
                                                 json=True)
            self.assertNotIn("blockvolume",
                             heketi_vol_info["blockinfo"].keys())
            gluster_vol_info = get_block_list('auto_get_gluster_endpoint',
                                              volname="vol_%s" % bhv_volume)
            self.assertIsNotNone(gluster_vol_info,
                                 "Failed to get volume info %s" % bhv_volume)
            new_bhv_list.remove(bhv_volume)
            for blockvol in gluster_vol_info:
                self.assertNotIn("blockvol_", blockvol)
                heketi_volume_delete(self.heketi_client_node,
                                     self.heketi_server_url, bhv_volume)

        # Check if all blockhosting volumes are deleted from heketi
        self.assertFalse(new_bhv_list)
예제 #24
0
    def test_volume_expansion_no_free_space(self):
        """Validate volume expansion when there is no free space"""

        vol_size, expand_size, additional_devices_attached = None, 10, {}
        h_node, h_server_url = self.heketi_client_node, self.heketi_server_url

        # Get nodes info
        heketi_node_id_list = heketi_ops.heketi_node_list(h_node, h_server_url)
        if len(heketi_node_id_list) < 3:
            self.skipTest("3 Heketi nodes are required.")

        # Disable 4th and other nodes
        for node_id in heketi_node_id_list[3:]:
            heketi_ops.heketi_node_disable(h_node, h_server_url, node_id)
            self.addCleanup(
                heketi_ops.heketi_node_enable, h_node, h_server_url, node_id)

        # Prepare first 3 nodes
        smallest_size = None
        err_msg = ''
        for node_id in heketi_node_id_list[0:3]:
            node_info = heketi_ops.heketi_node_info(
                h_node, h_server_url, node_id, json=True)

            # Disable second and other devices
            devices = node_info["devices"]
            self.assertTrue(
                devices, "Node '%s' does not have devices." % node_id)
            if devices[0]["state"].strip().lower() != "online":
                self.skipTest("Test expects first device to be enabled.")
            if (smallest_size is None
                    or devices[0]["storage"]["free"] < smallest_size):
                smallest_size = devices[0]["storage"]["free"]
            for device in node_info["devices"][1:]:
                heketi_ops.heketi_device_disable(
                    h_node, h_server_url, device["id"])
                self.addCleanup(
                    heketi_ops.heketi_device_enable,
                    h_node, h_server_url, device["id"])

            # Gather info about additional devices
            additional_device_name = None
            for gluster_server in self.gluster_servers:
                gluster_server_data = self.gluster_servers_info[gluster_server]
                g_manage = gluster_server_data["manage"]
                g_storage = gluster_server_data["storage"]
                if not (g_manage in node_info["hostnames"]["manage"]
                        or g_storage in node_info["hostnames"]["storage"]):
                    continue
                additional_device_name = ((
                    gluster_server_data.get("additional_devices") or [''])[0])
                break

            if not additional_device_name:
                err_msg += ("No 'additional_devices' are configured for "
                            "'%s' node, which has following hostnames and "
                            "IP addresses: %s.\n" % (
                                node_id,
                                ', '.join(
                                    node_info["hostnames"]["manage"]
                                    + node_info["hostnames"]["storage"])))
                continue

            heketi_ops.heketi_device_add(
                h_node, h_server_url, additional_device_name, node_id)
            additional_devices_attached.update(
                {node_id: additional_device_name})

        # Schedule cleanup of the added devices
        for node_id in additional_devices_attached.keys():
            node_info = heketi_ops.heketi_node_info(
                h_node, h_server_url, node_id, json=True)
            for device in node_info["devices"]:
                if device["name"] != additional_devices_attached[node_id]:
                    continue
                self.addCleanup(self.detach_devices_attached, device["id"])
                break
            else:
                self.fail("Could not find ID for added device on "
                          "'%s' node." % node_id)

        if err_msg:
            self.skipTest(err_msg)

        # Temporary disable new devices
        self.disable_devices(additional_devices_attached)

        # Create volume and save info about it
        vol_size = int(smallest_size / (1024**2)) - 1
        creation_info = heketi_ops.heketi_volume_create(
            h_node, h_server_url, vol_size, json=True)
        volume_name, volume_id = creation_info["name"], creation_info["id"]
        self.addCleanup(
            heketi_ops.heketi_volume_delete,
            h_node, h_server_url, volume_id, raise_on_error=False)

        volume_info_before_expansion = heketi_ops.heketi_volume_info(
            h_node, h_server_url, volume_id, json=True)
        num_of_bricks_before_expansion = self.get_num_of_bricks(volume_name)
        self.get_brick_and_volume_status(volume_name)
        free_space_before_expansion = self.get_devices_summary_free_space()

        # Try to expand volume with not enough device space
        self.assertRaises(
            AssertionError, heketi_ops.heketi_volume_expand,
            h_node, h_server_url, volume_id, expand_size)

        # Enable new devices to be able to expand our volume
        self.enable_devices(additional_devices_attached)

        # Expand volume and validate results
        heketi_ops.heketi_volume_expand(
            h_node, h_server_url, volume_id, expand_size, json=True)
        free_space_after_expansion = self.get_devices_summary_free_space()
        self.assertGreater(
            free_space_before_expansion, free_space_after_expansion,
            "Free space not consumed after expansion of %s" % volume_id)
        num_of_bricks_after_expansion = self.get_num_of_bricks(volume_name)
        self.get_brick_and_volume_status(volume_name)
        volume_info_after_expansion = heketi_ops.heketi_volume_info(
            h_node, h_server_url, volume_id, json=True)
        self.assertGreater(
            volume_info_after_expansion["size"],
            volume_info_before_expansion["size"],
            "Size of %s not increased" % volume_id)
        self.assertGreater(
            num_of_bricks_after_expansion, num_of_bricks_before_expansion)
        self.assertEqual(
            num_of_bricks_after_expansion % num_of_bricks_before_expansion, 0)

        # Delete volume and validate release of the used space
        heketi_ops.heketi_volume_delete(h_node, h_server_url, volume_id)
        free_space_after_deletion = self.get_devices_summary_free_space()
        self.assertGreater(
            free_space_after_deletion, free_space_after_expansion,
            "Free space not reclaimed after deletion of volume %s" % volume_id)
    def test_volume_expansion_rebalance_brick(self):
        """Validate volume expansion with brick and check rebalance"""
        creation_info = heketi_ops.heketi_volume_create(
            self.heketi_client_node, self.heketi_server_url, 10, json=True)

        self.assertNotEqual(creation_info, False, "Volume creation failed")

        volume_name = creation_info["name"]
        volume_id = creation_info["id"]

        free_space_after_creation = self.get_devices_summary_free_space()

        volume_info_before_expansion = heketi_ops.heketi_volume_info(
            self.heketi_client_node,
            self.heketi_server_url,
            volume_id, json=True)

        self.assertNotEqual(volume_info_before_expansion, False,
                            "Volume info for %s failed" % volume_id)

        heketi_vol_info_size_before_expansion = (
            volume_info_before_expansion["size"])

        self.get_brick_and_volume_status(volume_name)
        num_of_bricks_before_expansion = self.get_num_of_bricks(volume_name)

        expansion_info = heketi_ops.heketi_volume_expand(
            self.heketi_client_node,
            self.heketi_server_url,
            volume_id, 5)

        self.assertNotEqual(expansion_info, False,
                            "Volume expansion of %s failed" % volume_id)

        free_space_after_expansion = self.get_devices_summary_free_space()
        self.assertTrue(
            free_space_after_creation > free_space_after_expansion,
            "Free space not consumed after expansion of %s" % volume_id)

        volume_info_after_expansion = heketi_ops.heketi_volume_info(
            self.heketi_client_node,
            self.heketi_server_url,
            volume_id, json=True)

        self.assertNotEqual(volume_info_after_expansion, False,
                            "Volume info failed for %s" % volume_id)

        heketi_vol_info_size_after_expansion = (
            volume_info_after_expansion["size"])

        difference_size = (heketi_vol_info_size_after_expansion -
                           heketi_vol_info_size_before_expansion)

        self.assertTrue(
            difference_size > 0,
            "Size not increased after expansion of %s" % volume_id)

        self.get_brick_and_volume_status(volume_name)
        num_of_bricks_after_expansion = self.get_num_of_bricks(volume_name)

        num_of_bricks_added = (num_of_bricks_after_expansion -
                               num_of_bricks_before_expansion)

        self.assertEqual(
            num_of_bricks_added, 3,
            "Number of bricks added is not 3 for %s" % volume_id)

        self.get_rebalance_status(volume_name)

        deletion_info = heketi_ops.heketi_volume_delete(
            self.heketi_client_node, self.heketi_server_url,
            volume_id, json=True)

        self.assertNotEqual(deletion_info, False,
                            "Deletion of volume %s failed" % volume_id)

        free_space_after_deletion = self.get_devices_summary_free_space()

        self.assertTrue(
            free_space_after_deletion > free_space_after_expansion,
            "Free space is not reclaimed after volume deletion of %s"
            % volume_id)
    def test_set_heketi_vol_size_and_brick_amount_limits(self):
        # Get Heketi secret name
        cmd_get_heketi_secret_name = (
            "oc get dc -n %s %s -o jsonpath='{.spec.template.spec.volumes"
            "[?(@.name==\"config\")].secret.secretName}'" %
            (self.storage_project_name, self.heketi_dc_name))
        heketi_secret_name = self.cmd_run(cmd_get_heketi_secret_name)

        # Read Heketi secret data
        self.node = self.ocp_master_node[0]
        heketi_secret_data_str_base64 = oc_get_custom_resource(
            self.node,
            "secret",
            ":.data.'heketi\.json'",  # noqa
            name=heketi_secret_name)[0]
        heketi_secret_data_str = self.cmd_run("echo %s | base64 -d" %
                                              heketi_secret_data_str_base64)
        heketi_secret_data = json.loads(heketi_secret_data_str)

        # Update Heketi secret data
        brick_min_size_gb, brick_max_size_gb = 2, 4
        heketi_secret_data["glusterfs"].update({
            "brick_min_size_gb": brick_min_size_gb,
            "brick_max_size_gb": brick_max_size_gb,
            "max_bricks_per_volume": 3,
        })
        heketi_secret_data_patched = json.dumps(heketi_secret_data)
        heketi_secret_data_str_encoded = self.cmd_run(
            "echo '%s' |base64" % heketi_secret_data_patched).replace(
                '\n', '')
        h_client, h_server = self.heketi_client_node, self.heketi_server_url
        try:
            # Patch Heketi secret
            cmd_patch_heketi_secret = (
                'oc patch secret -n %s %s -p '
                '"{\\"data\\": {\\"heketi.json\\": \\"%s\\"}}"') % (
                    self.storage_project_name, heketi_secret_name, "%s")
            self.cmd_run(cmd_patch_heketi_secret %
                         heketi_secret_data_str_encoded)

            # Recreate the Heketi pod to make it reuse updated configuration
            scale_dc_pod_amount_and_wait(self.node, self.heketi_dc_name, 0)
            scale_dc_pod_amount_and_wait(self.node, self.heketi_dc_name, 1)

            # Try to create too small and too big volumes
            # It must fail because allowed range is not satisfied
            for gb in (brick_min_size_gb - 1, brick_max_size_gb + 1):
                try:
                    vol_1 = heketi_volume_create(h_client,
                                                 h_server,
                                                 size=gb,
                                                 json=True)
                except AssertionError:
                    pass
                else:
                    self.addCleanup(heketi_volume_delete, h_client, h_server,
                                    vol_1['id'])
                    self.assertFalse(
                        vol_1,
                        "Volume '%s' got unexpectedly created. Heketi server "
                        "configuration haven't made required effect." %
                        (vol_1.get('id', 'failed_to_get_heketi_vol_id')))

            # Create the smallest allowed volume
            vol_2 = heketi_volume_create(h_client,
                                         h_server,
                                         size=brick_min_size_gb,
                                         json=True)
            self.addCleanup(heketi_volume_delete, h_client, h_server,
                            vol_2['id'])

            # Try to expand volume, it must fail due to the brick amount limit
            self.assertRaises(AssertionError, heketi_volume_expand, h_client,
                              h_server, vol_2['id'], 2)

            # Create the largest allowed volume
            vol_3 = heketi_volume_create(h_client,
                                         h_server,
                                         size=brick_max_size_gb,
                                         json=True)
            heketi_volume_delete(h_client, h_server, vol_3['id'])
        finally:
            # Revert the Heketi configuration back
            self.cmd_run(cmd_patch_heketi_secret %
                         heketi_secret_data_str_base64)
            scale_dc_pod_amount_and_wait(self.node, self.heketi_dc_name, 0)
            scale_dc_pod_amount_and_wait(self.node, self.heketi_dc_name, 1)

        # Create volume less than the old minimum limit
        vol_4 = heketi_volume_create(h_client,
                                     h_server,
                                     size=(brick_min_size_gb - 1),
                                     json=True)
        self.addCleanup(heketi_volume_delete, h_client, h_server, vol_4['id'])

        # Create volume bigger than the old maximum limit and expand it
        vol_5 = heketi_volume_create(h_client,
                                     h_server,
                                     size=(brick_max_size_gb + 1),
                                     json=True)
        self.addCleanup(heketi_volume_delete, h_client, h_server, vol_5['id'])
        heketi_volume_expand(h_client, h_server, vol_5['id'], 2)
예제 #27
0
    def _create_distributed_replica_vol(self, validate_cleanup, block=False):

        # Create distributed vol
        vol_size_gb = self._get_vol_size()
        heketi_url = self.heketi_server_url
        h_volume_name = "autotests-heketi-volume-%s" % utils.get_random_str()
        try:
            heketi_vol = self.create_heketi_volume_with_name_and_wait(
                h_volume_name,
                vol_size_gb,
                json=True,
                raise_on_cleanup_error=False,
                block=block)
        except AssertionError as e:
            # NOTE: rare situation when we need to decrease size of a volume.
            #       and we expect this vol to be distributed.
            g.log.info("Failed to create distributed '%s'Gb volume. "
                       "Trying to create another one, smaller for 1Gb.")
            if not ('more required' in str(e) and
                    ('Insufficient suitable allocatable extents for '
                     'logical volume' in str(e))):
                raise

            vol_size_gb -= 1
            heketi_vol = self.create_heketi_volume_with_name_and_wait(
                h_volume_name,
                vol_size_gb,
                json=True,
                raise_on_cleanup_error=False,
                block=block)
        g.log.info("Successfully created distributed volume.")

        vol_name = heketi_vol['name']
        vol_id = heketi_vol["bricks"][0]["volume"]

        # Get gluster volume info
        g.log.info("Get gluster volume '%s' info" % vol_name)
        gluster_vol = get_volume_info('auto_get_gluster_endpoint',
                                      volname=vol_name)
        self.assertTrue(gluster_vol,
                        "Failed to get volume '%s' info" % vol_name)
        g.log.info("Successfully got volume '%s' info" % vol_name)
        gluster_vol = gluster_vol[vol_name]
        self.assertEqual(
            gluster_vol["typeStr"], "Distributed-Replicate",
            "'%s' gluster vol isn't a Distributed-Replicate volume" % vol_name)

        # Check amount of bricks
        brick_amount = len(gluster_vol['bricks']['brick'])
        self.assertEqual(
            brick_amount % 3, 0,
            "Brick amount is expected to be divisible by 3. "
            "Actual amount is '%s'" % brick_amount)
        self.assertGreater(
            brick_amount, 3, "Brick amount is expected to be bigger than 3. "
            "Actual amount is '%s'." % brick_amount)

        # Run unique actions to Validate whether deleting a dist-rep
        # volume is handled by heketi else return
        if not validate_cleanup:
            return vol_id

        # Get the free space after creating heketi volume
        free_space_after_creating_vol = self._get_free_space()

        # Delete heketi volume
        g.log.info("Deleting heketi volume '%s'" % vol_id)
        volume_deleted = heketi_volume_delete(self.heketi_client_node,
                                              heketi_url, vol_id)
        self.assertTrue(volume_deleted,
                        "Failed to delete heketi volume '%s'" % vol_id)
        g.log.info("Heketi volume '%s' has successfully been deleted" % vol_id)

        # Check the heketi volume list
        g.log.info("List heketi volumes")
        heketi_volumes = heketi_volume_list(self.heketi_client_node,
                                            self.heketi_server_url,
                                            json=True)
        self.assertTrue(heketi_volumes, "Failed to list heketi volumes")
        g.log.info("Heketi volumes have successfully been listed")
        heketi_volumes = heketi_volumes.get('volumes', heketi_volumes)
        self.assertNotIn(vol_id, heketi_volumes)
        self.assertNotIn(vol_name, heketi_volumes)

        # Check the gluster volume list
        g.log.info("Get the gluster volume list")
        gluster_volumes = get_volume_list('auto_get_gluster_endpoint')
        self.assertTrue(gluster_volumes, "Unable to get Gluster volume list")

        g.log.info("Successfully got Gluster volume list" % gluster_volumes)
        self.assertNotIn(vol_id, gluster_volumes)
        self.assertNotIn(vol_name, gluster_volumes)

        # Get the used space after deleting heketi volume
        free_space_after_deleting_vol = self._get_free_space()

        # Compare the free space before and after deleting the volume
        g.log.info("Comparing the free space before and after deleting volume")
        self.assertLessEqual(free_space_after_creating_vol + (3 * vol_size_gb),
                             free_space_after_deleting_vol)
        g.log.info(
            "Volume successfully deleted and space is reallocated. "
            "Free space after creating volume %s. "
            "Free space after deleting volume %s." %
            (free_space_after_creating_vol, free_space_after_deleting_vol))
예제 #28
0
    def test_volume_expansion_rebalance_brick(self):
        """Validate volume expansion with brick and check rebalance"""
        creation_info = heketi_ops.heketi_volume_create(
            self.heketi_client_node, self.heketi_server_url, 10, json=True)

        self.assertNotEqual(creation_info, False, "Volume creation failed")

        volume_name = creation_info["name"]
        volume_id = creation_info["id"]

        free_space_after_creation = self.get_devices_summary_free_space()

        volume_info_before_expansion = heketi_ops.heketi_volume_info(
            self.heketi_client_node,
            self.heketi_server_url,
            volume_id, json=True)

        self.assertNotEqual(volume_info_before_expansion, False,
                            "Volume info for %s failed" % volume_id)

        heketi_vol_info_size_before_expansion = (
            volume_info_before_expansion["size"])

        self.get_brick_and_volume_status(volume_name)
        num_of_bricks_before_expansion = self.get_num_of_bricks(volume_name)

        expansion_info = heketi_ops.heketi_volume_expand(
            self.heketi_client_node,
            self.heketi_server_url,
            volume_id, 5)

        self.assertNotEqual(expansion_info, False,
                            "Volume expansion of %s failed" % volume_id)

        free_space_after_expansion = self.get_devices_summary_free_space()
        self.assertTrue(
            free_space_after_creation > free_space_after_expansion,
            "Free space not consumed after expansion of %s" % volume_id)

        volume_info_after_expansion = heketi_ops.heketi_volume_info(
            self.heketi_client_node,
            self.heketi_server_url,
            volume_id, json=True)

        self.assertNotEqual(volume_info_after_expansion, False,
                            "Volume info failed for %s" % volume_id)

        heketi_vol_info_size_after_expansion = (
            volume_info_after_expansion["size"])

        difference_size = (heketi_vol_info_size_after_expansion
                           - heketi_vol_info_size_before_expansion)

        self.assertTrue(
            difference_size > 0,
            "Size not increased after expansion of %s" % volume_id)

        self.get_brick_and_volume_status(volume_name)
        num_of_bricks_after_expansion = self.get_num_of_bricks(volume_name)

        num_of_bricks_added = (
            num_of_bricks_after_expansion - num_of_bricks_before_expansion)

        self.assertEqual(
            num_of_bricks_added, 3,
            "Number of bricks added is not 3 for %s" % volume_id)

        self.get_rebalance_status(volume_name)

        deletion_info = heketi_ops.heketi_volume_delete(
            self.heketi_client_node, self.heketi_server_url,
            volume_id, json=True)

        self.assertNotEqual(deletion_info, False,
                            "Deletion of volume %s failed" % volume_id)

        free_space_after_deletion = self.get_devices_summary_free_space()

        self.assertTrue(
            free_space_after_deletion > free_space_after_expansion,
            "Free space is not reclaimed after volume deletion of %s"
            % volume_id)
예제 #29
0
    def test_volume_creation_of_size_greater_than_the_device_size(self):
        """Validate creation of a volume of size greater than the size of a
        device.
        """
        h_node, h_url = self.heketi_client_node, self.heketi_server_url

        # Remove existing BHV to calculate freespace
        bhv_list = heketi_ops.get_block_hosting_volume_list(h_node, h_url)
        if bhv_list:
            for bhv in bhv_list:
                bhv_info = heketi_ops.heketi_volume_info(h_node,
                                                         h_url,
                                                         bhv,
                                                         json=True)
                if bhv_info['blockinfo'].get('blockvolume') is None:
                    heketi_ops.heketi_volume_delete(h_node, h_url, bhv)

        topology = heketi_ops.heketi_topology_info(h_node, h_url, json=True)
        nodes_free_space, nodes_ips = [], []
        selected_nodes, selected_devices = [], []
        cluster = topology['clusters'][0]
        node_count = len(cluster['nodes'])
        msg = ("At least 3 Nodes are required in cluster. "
               "But only %s Nodes are present." % node_count)
        if node_count < 3:
            self.skipTest(msg)

        online_nodes_count = 0
        for node in cluster['nodes']:
            nodes_ips.append(node['hostnames']['storage'][0])

            if node['state'] != 'online':
                continue

            online_nodes_count += 1

            # Disable nodes after 3rd online nodes
            if online_nodes_count > 3:
                heketi_ops.heketi_node_disable(h_node, h_url, node['id'])
                self.addCleanup(heketi_ops.heketi_node_enable, h_node, h_url,
                                node['id'])
                continue

            selected_nodes.append(node['id'])

            device_count = len(node['devices'])
            msg = ("At least 2 Devices are required on each Node."
                   "But only %s Devices are present." % device_count)
            if device_count < 2:
                self.skipTest(msg)

            sel_devices, online_devices_count, free_space = [], 0, 0
            for device in node['devices']:
                if device['state'] != 'online':
                    continue

                online_devices_count += 1

                # Disable devices after 2nd online devices
                if online_devices_count > 2:
                    heketi_ops.heketi_device_disable(h_node, h_url,
                                                     device['id'])
                    self.addCleanup(heketi_ops.heketi_device_enable, h_node,
                                    h_url, device['id'])
                    continue

                sel_devices.append(device['id'])
                free_space += int(device['storage']['free'] / (1024**2))

            selected_devices.append(sel_devices)
            nodes_free_space.append(free_space)

            msg = ("At least 2 online Devices are required on each Node. "
                   "But only %s Devices are online on Node: %s." %
                   (online_devices_count, node['id']))
            if online_devices_count < 2:
                self.skipTest(msg)

        msg = ("At least 3 online Nodes are required in cluster. "
               "But only %s Nodes are online in Cluster: %s." %
               (online_nodes_count, cluster['id']))
        if online_nodes_count < 3:
            self.skipTest(msg)

        # Select node with minimum free space
        min_free_size = min(nodes_free_space)
        index = nodes_free_space.index(min_free_size)

        # Get max device size from selected node
        device_size = 0
        for device in selected_devices[index]:
            device_info = heketi_ops.heketi_device_info(h_node,
                                                        h_url,
                                                        device,
                                                        json=True)
            device_size = max(device_size,
                              (int(device_info['storage']['total'] /
                                   (1024**2))))

        vol_size = device_size + 1

        if vol_size >= min_free_size:
            self.skipTest('Required free space %s is not available' % vol_size)

        # Create heketi volume with device size + 1
        vol_info = self.create_heketi_volume_with_name_and_wait(
            name="volume_size_greater_than_device_size",
            size=vol_size,
            json=True)

        # Get gluster server IP's from heketi volume info
        glusterfs_servers = heketi_ops.get_vol_file_servers_and_hosts(
            h_node, h_url, vol_info['id'])

        # Verify gluster server IP's in heketi volume info
        msg = ("gluster IP's '%s' does not match with IP's '%s' found in "
               "heketi volume info" %
               (nodes_ips, glusterfs_servers['vol_servers']))
        self.assertEqual(set(glusterfs_servers['vol_servers']), set(nodes_ips),
                         msg)

        vol_name = vol_info['name']
        gluster_v_info = self.get_gluster_vol_info(vol_name)

        # Verify replica count in gluster v info
        msg = "Volume %s is replica %s instead of replica 3" % (
            vol_name, gluster_v_info['replicaCount'])
        self.assertEqual('3', gluster_v_info['replicaCount'])

        # Verify distCount in gluster v info
        msg = "Volume %s distCount is %s instead of distCount as 3" % (
            vol_name, int(gluster_v_info['distCount']))
        self.assertEqual(
            int(gluster_v_info['brickCount']) // 3,
            int(gluster_v_info['distCount']), msg)

        # Verify bricks count in gluster v info
        msg = (
            "Volume %s does not have bricks count multiple of 3. It has %s" %
            (vol_name, gluster_v_info['brickCount']))
        self.assertFalse(int(gluster_v_info['brickCount']) % 3, msg)