def test_heketi_block_volume_create_with_size_more_than_bhv_free_space(
            self):
        """ Test to create heketi block volume of size greater than
            free space in BHV so that it will create a new BHV.
        """
        h_node, h_url = self.heketi_client_node, self.heketi_server_url

        default_bhv_size = get_default_block_hosting_volume_size(
            self.node, self.heketi_dc_name)
        reserve_size = math.ceil(default_bhv_size * 0.02)
        bhv_list, pvc_size = [], (default_bhv_size - reserve_size)

        # Get existing BHV list
        bhv_list = list(get_block_hosting_volume_list(h_node, h_url).keys())
        for vol in bhv_list:
            info = heketi_volume_info(h_node, h_url, vol, json=True)
            if info['blockinfo']['freesize'] >= pvc_size:
                self.skipTest(
                    "Skip test case since there is atleast one BHV with free"
                    " space {} greater than the default value {}".format(
                        info['blockinfo']['freesize'], pvc_size))

        # To verify if there is enough free space for two BHVs
        self.verify_free_space(2 * (default_bhv_size + 1))

        sc_name = self.create_storage_class()

        self._dynamic_provisioning_block_with_bhv_cleanup(
            sc_name, pvc_size, bhv_list)
        self._dynamic_provisioning_block_with_bhv_cleanup(
            sc_name, pvc_size, bhv_list)
        bhv_post = len(get_block_hosting_volume_list(h_node, h_url))
        err_msg = ("New BHVs were not created to satisfy the block PV requests"
                   " No. of BHV before the test : {} \n"
                   " No. of BHV after the test : {}".format(
                       len(bhv_list), bhv_post))
        self.assertEqual(bhv_post, (len(bhv_list) + 2), err_msg)
    def validate_block_volumes_count(self, h_node, h_server, node_ip):

        # get list of block volumes using heketi
        h_blockvol_list = heketi_blockvolume_list(
            h_node, h_server, json=True)
        # Get existing BHV list
        bhv_list = list(
            get_block_hosting_volume_list(h_node, h_server).keys())
        for vol in bhv_list:
            bhv_info = heketi_volume_info(h_node, h_server, vol, json=True)
            bhv_name = bhv_info['name']
        gluster_block_list = get_block_list(node_ip, volname=bhv_name)
        self.assertIsNotNone(
            gluster_block_list, "Failed to get gluster block list")
        self.assertEqual(
            len(h_blockvol_list['blockvolumes']), len(gluster_block_list),
            "Failed to verify blockvolume count Expected:'{}', "
            "Actual:'{}'".format(
                len(h_blockvol_list['blockvolumes']), len(gluster_block_list)))
Esempio n. 3
0
    def test_heketi_manual_cleanup_operation_in_bhv(self):
        """Validate heketi db cleanup will resolve the mismatch
           in the free size of the block hosting volume with failed
           block device create operations.
        """
        bhv_size_before, bhv_size_after, vol_count = [], [], 5
        ocp_node, g_node = self.ocp_master_node[0], self.gluster_servers[0]
        h_node, h_url = self.heketi_client_node, self.heketi_server_url

        # Get existing heketi volume list
        existing_volumes = heketi_volume_list(h_node, h_url, json=True)

        # Add function to clean stale volumes created during test
        self.addCleanup(self._cleanup_heketi_volumes,
                        existing_volumes.get("volumes"))

        # Get nodes id list
        node_id_list = heketi_node_list(h_node, h_url)

        # Disable 4th and other nodes
        for node_id in node_id_list[3:]:
            heketi_node_disable(h_node, h_url, node_id)
            self.addCleanup(heketi_node_enable, h_node, h_url, node_id)

        # Calculate heketi volume size
        free_space, nodenum = get_total_free_space(h_node, h_url)
        free_space_available = int(free_space / nodenum)
        if free_space_available > vol_count:
            h_volume_size = int(free_space_available / vol_count)
            if h_volume_size > 50:
                h_volume_size = 50
        else:
            h_volume_size, vol_count = 1, free_space_available

        # Create BHV in case blockvolume size is greater than default BHV size
        default_bhv_size = get_default_block_hosting_volume_size(
            h_node, self.heketi_dc_name)
        if default_bhv_size < h_volume_size:
            h_volume_name = "autotest-{}".format(utils.get_random_str())
            bhv_info = self.create_heketi_volume_with_name_and_wait(
                h_volume_name,
                free_space_available,
                raise_on_cleanup_error=False,
                block=True,
                json=True)
            free_space_available -= (
                int(bhv_info.get("blockinfo").get("reservedsize")) + 1)
            h_volume_size = int(free_space_available / vol_count)

        # Get BHV list
        h_bhv_list = get_block_hosting_volume_list(h_node, h_url).keys()
        self.assertTrue(h_bhv_list, "Failed to get the BHV list")

        # Get BHV size
        for bhv in h_bhv_list:
            vol_info = heketi_volume_info(h_node, h_url, bhv, json=True)
            bhv_vol_size_before = vol_info.get("freesize")
            bhv_size_before.append(bhv_vol_size_before)

        # Kill Tcmu-runner service
        services = ("tcmu-runner", "gluster-block-target", "gluster-blockd")
        kill_service_on_gluster_pod_or_node(ocp_node, "tcmu-runner", g_node)

        # Restart the services
        for service in services:
            state = ('exited'
                     if service == 'gluster-block-target' else 'running')
            self.addCleanup(wait_for_service_status_on_gluster_pod_or_node,
                            ocp_node, service, 'active', state, g_node)
            self.addCleanup(restart_service_on_gluster_pod_or_node, ocp_node,
                            service, g_node)

        def run_async(cmd, hostname, raise_on_error=True):
            return g.run_async(host=hostname, command=cmd)

        # Create stale block volumes in async
        for count in range(vol_count):
            with mock.patch.object(json, 'loads', side_effect=(lambda j: j)):
                with mock.patch.object(command,
                                       'cmd_run',
                                       side_effect=run_async):
                    heketi_blockvolume_create(h_node,
                                              h_url,
                                              h_volume_size,
                                              json=True)

        # Wait for pending operation to get generated
        self._check_for_pending_operations(h_node, h_url)

        # Restart the services
        for service in services:
            state = ('exited'
                     if service == 'gluster-block-target' else 'running')
            restart_service_on_gluster_pod_or_node(ocp_node, service, g_node)
            wait_for_service_status_on_gluster_pod_or_node(
                ocp_node, service, 'active', state, g_node)

        # Cleanup pending operation
        heketi_server_operation_cleanup(h_node, h_url)

        # wait for pending operation to get cleaned up
        for w in waiter.Waiter(timeout=120, interval=10):
            # Get BHV size
            for bhv in h_bhv_list:
                vol_info = heketi_volume_info(h_node, h_url, bhv, json=True)
                bhv_vol_size_after = vol_info.get("freesize")
                bhv_size_after.append(bhv_vol_size_after)

            if (set(bhv_size_before) == set(bhv_size_after)):
                break
        if w.expired:
            raise exceptions.ExecutionError(
                "Failed to Validate volume size Actual:{},"
                " Expected:{}".format(set(bhv_size_before),
                                      set(bhv_size_after)))
    def test_expansion_of_block_hosting_volume_using_heketi(self):
        """Verify that after expanding block hosting volume we are able to
        consume the expanded space"""

        h_node = self.heketi_client_node
        h_url = self.heketi_server_url
        bvols_in_bhv = set([])
        bvols_pv = set([])

        BHVS = get_block_hosting_volume_list(h_node, h_url)

        free_BHVS_count = 0
        for vol in BHVS.keys():
            info = heketi_volume_info(h_node, h_url, vol, json=True)
            if info['blockinfo']['freesize'] > 0:
                free_BHVS_count += 1
            if free_BHVS_count > 1:
                self.skipTest("Skip test case because there is more than one"
                              " Block Hosting Volume with free space")

        # create block volume of 1gb
        bvol_info = heketi_blockvolume_create(h_node, h_url, 1, json=True)

        expand_size = 20
        try:
            self.verify_free_space(expand_size)
            bhv = bvol_info['blockhostingvolume']
            vol_info = heketi_volume_info(h_node, h_url, bhv, json=True)
            bvols_in_bhv.update(vol_info['blockinfo']['blockvolume'])
        finally:
            # cleanup BHV if there is only one block volume inside it
            if len(bvols_in_bhv) == 1:
                self.addCleanup(heketi_volume_delete,
                                h_node,
                                h_url,
                                bhv,
                                json=True)
            self.addCleanup(heketi_blockvolume_delete, h_node, h_url,
                            bvol_info['id'])

        size = vol_info['size']
        free_size = vol_info['blockinfo']['freesize']
        bvol_count = int(free_size / expand_size)
        bricks = vol_info['bricks']

        # create pvs to fill the BHV
        pvcs = self.create_and_wait_for_pvcs(
            pvc_size=(expand_size if bvol_count else free_size),
            pvc_amount=(bvol_count or 1),
            timeout=300)

        vol_expand = True

        for i in range(2):
            # get the vol ids from pvcs
            for pvc in pvcs:
                pv = get_pv_name_from_pvc(self.node, pvc)
                custom = r':.metadata.annotations."gluster\.org\/volume-id"'
                bvol_id = oc_get_custom_resource(self.node, 'pv', custom, pv)
                bvols_pv.add(bvol_id[0])

            vol_info = heketi_volume_info(h_node, h_url, bhv, json=True)
            bvols = vol_info['blockinfo']['blockvolume']
            bvols_in_bhv.update(bvols)
            self.assertEqual(bvols_pv, (bvols_in_bhv & bvols_pv))

            # Expand BHV and verify bricks and size of BHV
            if vol_expand:
                vol_expand = False
                heketi_volume_expand(h_node,
                                     h_url,
                                     bhv,
                                     expand_size,
                                     json=True)
                vol_info = heketi_volume_info(h_node, h_url, bhv, json=True)

                self.assertEqual(size + expand_size, vol_info['size'])
                self.assertFalse(len(vol_info['bricks']) % 3)
                self.assertLess(len(bricks), len(vol_info['bricks']))

                # create more PVCs in expanded BHV
                pvcs = self.create_and_wait_for_pvcs(pvc_size=(expand_size -
                                                               1),
                                                     pvc_amount=1)
    def test_targetcli_when_block_hosting_volume_down(self):
        """Validate no inconsistencies occur in targetcli when block volumes
           are created with one block hosting volume down."""
        h_node, h_server = self.heketi_client_node, self.heketi_server_url
        cmd = ("targetcli ls | egrep '%s' || echo unavailable")
        error_msg = ("targetcli has inconsistencies when block devices are "
                     "created with one block hosting volume %s is down")

        # Delete BHV which has no BV or fill it completely
        bhv_list = get_block_hosting_volume_list(h_node, h_server).keys()
        for bhv in bhv_list:
            bhv_info = heketi_volume_info(h_node, h_server, bhv, json=True)
            if not bhv_info["blockinfo"].get("blockvolume", []):
                heketi_volume_delete(h_node, h_server, bhv)
                continue
            free_size = bhv_info["blockinfo"].get("freesize", 0)
            if free_size:
                bv = heketi_volume_create(h_node,
                                          h_server,
                                          free_size,
                                          json=True)
                self.addCleanup(heketi_volume_delete, h_node, h_server,
                                bv["id"])

        # Create BV
        bv = heketi_blockvolume_create(h_node, h_server, 2, json=True)
        self.addCleanup(heketi_blockvolume_delete, h_node, h_server, bv["id"])

        # Bring down BHV
        bhv_name = get_block_hosting_volume_name(h_node, h_server, bv["id"])
        ret, out, err = volume_stop("auto_get_gluster_endpoint", bhv_name)
        if ret != 0:
            err_msg = "Failed to stop gluster volume %s. error: %s" % (
                bhv_name, err)
            g.log.error(err_msg)
            raise AssertionError(err_msg)
        self.addCleanup(podcmd.GlustoPod()(volume_start),
                        "auto_get_gluster_endpoint", bhv_name)

        ocp_node = self.ocp_master_node[0]
        gluster_block_svc = "gluster-block-target"
        self.addCleanup(wait_for_service_status_on_gluster_pod_or_node,
                        ocp_node,
                        gluster_block_svc,
                        "active",
                        "exited",
                        gluster_node=self.gluster_servers[0])
        self.addCleanup(restart_service_on_gluster_pod_or_node, ocp_node,
                        gluster_block_svc, self.gluster_servers[0])
        for condition in ("continue", "break"):
            restart_service_on_gluster_pod_or_node(
                ocp_node,
                gluster_block_svc,
                gluster_node=self.gluster_servers[0])
            wait_for_service_status_on_gluster_pod_or_node(
                ocp_node,
                gluster_block_svc,
                "active",
                "exited",
                gluster_node=self.gluster_servers[0])

            targetcli = cmd_run_on_gluster_pod_or_node(ocp_node,
                                                       cmd % bv["id"],
                                                       self.gluster_servers[0])
            if condition == "continue":
                self.assertEqual(targetcli, "unavailable",
                                 error_msg % bhv_name)
            else:
                self.assertNotEqual(targetcli, "unavailable",
                                    error_msg % bhv_name)
                break

            # Bring up the same BHV
            ret, out, err = volume_start("auto_get_gluster_endpoint", bhv_name)
            if ret != 0:
                err = "Failed to start gluster volume %s on %s. error: %s" % (
                    bhv_name, h_node, err)
                raise exceptions.ExecutionError(err)
    def test_create_max_num_blockhostingvolumes(self):
        num_of_bv = 10
        new_bhv_list, bv_list, g_nodes = [], [], []
        free_space, nodenum = get_total_free_space(self.heketi_client_node,
                                                   self.heketi_server_url)
        if nodenum < 3:
            self.skipTest("Skip the test case since number of"
                          "online nodes is less than 3.")
        free_space_available = int(free_space / nodenum)
        default_bhv_size = get_default_block_hosting_volume_size(
            self.heketi_client_node, self.heketi_dc_name)
        # Get existing list of BHV's
        existing_bhv_list = get_block_hosting_volume_list(
            self.heketi_client_node, self.heketi_server_url)

        # Skip the test if available space is less than default_bhv_size
        if free_space_available < default_bhv_size:
            self.skipTest("Skip the test case since free_space_available %s"
                          "is less than space_required_for_bhv %s ." %
                          (free_space_available, default_bhv_size))

        # Create BHV's
        while free_space_available > default_bhv_size:
            block_host_create_info = heketi_volume_create(
                self.heketi_client_node,
                self.heketi_server_url,
                default_bhv_size,
                json=True,
                block=True)
            if block_host_create_info["id"] not in existing_bhv_list.keys():
                new_bhv_list.append(block_host_create_info["id"])
            self.addCleanup(heketi_volume_delete,
                            self.heketi_client_node,
                            self.heketi_server_url,
                            block_host_create_info["id"],
                            raise_on_error=False)
            block_vol_size = int(
                block_host_create_info["blockinfo"]["freesize"] / num_of_bv)

            # Create specified number of BV's in BHV's created
            for i in range(0, num_of_bv):
                block_vol = heketi_blockvolume_create(self.heketi_client_node,
                                                      self.heketi_server_url,
                                                      block_vol_size,
                                                      json=True,
                                                      ha=3,
                                                      auth=True)
                self.addCleanup(heketi_blockvolume_delete,
                                self.heketi_client_node,
                                self.heketi_server_url,
                                block_vol["id"],
                                raise_on_error=False)
                bv_list.append(block_vol["id"])
            free_space_available = int(free_space_available - default_bhv_size)

        # Get gluster node ips
        h_nodes_ids = heketi_node_list(self.heketi_client_node,
                                       self.heketi_server_url)
        for h_node in h_nodes_ids[:2]:
            g_node = heketi_node_info(self.heketi_client_node,
                                      self.heketi_server_url,
                                      h_node,
                                      json=True)
            g_nodes.append(g_node['hostnames']['manage'][0])

        # Check if there is no crash in gluster related services & heketi
        services = (("glusterd", "running"), ("gluster-blockd", "running"),
                    ("tcmu-runner", "running"), ("gluster-block-target",
                                                 "exited"))
        for g_node in g_nodes:
            for service, state in services:
                wait_for_service_status_on_gluster_pod_or_node(
                    self.ocp_client[0],
                    service,
                    'active',
                    state,
                    g_node,
                    raise_on_error=False)
            out = hello_heketi(self.heketi_client_node, self.heketi_server_url)
            self.assertTrue(
                out, "Heketi server %s is not alive" % self.heketi_server_url)

        # Delete all the BHV's and BV's created
        for bv_volume in bv_list:
            heketi_blockvolume_delete(self.heketi_client_node,
                                      self.heketi_server_url, bv_volume)

        # Check if any blockvolume exist in heketi & gluster
        for bhv_volume in new_bhv_list[:]:
            heketi_vol_info = heketi_volume_info(self.heketi_client_node,
                                                 self.heketi_server_url,
                                                 bhv_volume,
                                                 json=True)
            self.assertNotIn("blockvolume",
                             heketi_vol_info["blockinfo"].keys())
            gluster_vol_info = get_block_list('auto_get_gluster_endpoint',
                                              volname="vol_%s" % bhv_volume)
            self.assertIsNotNone(gluster_vol_info,
                                 "Failed to get volume info %s" % bhv_volume)
            new_bhv_list.remove(bhv_volume)
            for blockvol in gluster_vol_info:
                self.assertNotIn("blockvol_", blockvol)
                heketi_volume_delete(self.heketi_client_node,
                                     self.heketi_server_url, bhv_volume)

        # Check if all blockhosting volumes are deleted from heketi
        self.assertFalse(new_bhv_list)
Esempio n. 7
0
    def test_volume_creation_of_size_greater_than_the_device_size(self):
        """Validate creation of a volume of size greater than the size of a
        device.
        """
        h_node, h_url = self.heketi_client_node, self.heketi_server_url

        # Remove existing BHV to calculate freespace
        bhv_list = heketi_ops.get_block_hosting_volume_list(h_node, h_url)
        if bhv_list:
            for bhv in bhv_list:
                bhv_info = heketi_ops.heketi_volume_info(h_node,
                                                         h_url,
                                                         bhv,
                                                         json=True)
                if bhv_info['blockinfo'].get('blockvolume') is None:
                    heketi_ops.heketi_volume_delete(h_node, h_url, bhv)

        topology = heketi_ops.heketi_topology_info(h_node, h_url, json=True)
        nodes_free_space, nodes_ips = [], []
        selected_nodes, selected_devices = [], []
        cluster = topology['clusters'][0]
        node_count = len(cluster['nodes'])
        msg = ("At least 3 Nodes are required in cluster. "
               "But only %s Nodes are present." % node_count)
        if node_count < 3:
            self.skipTest(msg)

        online_nodes_count = 0
        for node in cluster['nodes']:
            nodes_ips.append(node['hostnames']['storage'][0])

            if node['state'] != 'online':
                continue

            online_nodes_count += 1

            # Disable nodes after 3rd online nodes
            if online_nodes_count > 3:
                heketi_ops.heketi_node_disable(h_node, h_url, node['id'])
                self.addCleanup(heketi_ops.heketi_node_enable, h_node, h_url,
                                node['id'])
                continue

            selected_nodes.append(node['id'])

            device_count = len(node['devices'])
            msg = ("At least 2 Devices are required on each Node."
                   "But only %s Devices are present." % device_count)
            if device_count < 2:
                self.skipTest(msg)

            sel_devices, online_devices_count, free_space = [], 0, 0
            for device in node['devices']:
                if device['state'] != 'online':
                    continue

                online_devices_count += 1

                # Disable devices after 2nd online devices
                if online_devices_count > 2:
                    heketi_ops.heketi_device_disable(h_node, h_url,
                                                     device['id'])
                    self.addCleanup(heketi_ops.heketi_device_enable, h_node,
                                    h_url, device['id'])
                    continue

                sel_devices.append(device['id'])
                free_space += int(device['storage']['free'] / (1024**2))

            selected_devices.append(sel_devices)
            nodes_free_space.append(free_space)

            msg = ("At least 2 online Devices are required on each Node. "
                   "But only %s Devices are online on Node: %s." %
                   (online_devices_count, node['id']))
            if online_devices_count < 2:
                self.skipTest(msg)

        msg = ("At least 3 online Nodes are required in cluster. "
               "But only %s Nodes are online in Cluster: %s." %
               (online_nodes_count, cluster['id']))
        if online_nodes_count < 3:
            self.skipTest(msg)

        # Select node with minimum free space
        min_free_size = min(nodes_free_space)
        index = nodes_free_space.index(min_free_size)

        # Get max device size from selected node
        device_size = 0
        for device in selected_devices[index]:
            device_info = heketi_ops.heketi_device_info(h_node,
                                                        h_url,
                                                        device,
                                                        json=True)
            device_size = max(device_size,
                              (int(device_info['storage']['total'] /
                                   (1024**2))))

        vol_size = device_size + 1

        if vol_size >= min_free_size:
            self.skipTest('Required free space %s is not available' % vol_size)

        # Create heketi volume with device size + 1
        vol_info = self.create_heketi_volume_with_name_and_wait(
            name="volume_size_greater_than_device_size",
            size=vol_size,
            json=True)

        # Get gluster server IP's from heketi volume info
        glusterfs_servers = heketi_ops.get_vol_file_servers_and_hosts(
            h_node, h_url, vol_info['id'])

        # Verify gluster server IP's in heketi volume info
        msg = ("gluster IP's '%s' does not match with IP's '%s' found in "
               "heketi volume info" %
               (nodes_ips, glusterfs_servers['vol_servers']))
        self.assertEqual(set(glusterfs_servers['vol_servers']), set(nodes_ips),
                         msg)

        vol_name = vol_info['name']
        gluster_v_info = self.get_gluster_vol_info(vol_name)

        # Verify replica count in gluster v info
        msg = "Volume %s is replica %s instead of replica 3" % (
            vol_name, gluster_v_info['replicaCount'])
        self.assertEqual('3', gluster_v_info['replicaCount'])

        # Verify distCount in gluster v info
        msg = "Volume %s distCount is %s instead of distCount as 3" % (
            vol_name, int(gluster_v_info['distCount']))
        self.assertEqual(
            int(gluster_v_info['brickCount']) // 3,
            int(gluster_v_info['distCount']), msg)

        # Verify bricks count in gluster v info
        msg = (
            "Volume %s does not have bricks count multiple of 3. It has %s" %
            (vol_name, gluster_v_info['brickCount']))
        self.assertFalse(int(gluster_v_info['brickCount']) % 3, msg)
Esempio n. 8
0
    def test_dev_path_block_volume_delete(self):
        """Validate device path name changes the deletion of
           already existing file volumes
        """

        pvc_size, pvc_amount = 2, 5
        pvc_names, gluster_block_list, vol_details = [], [], []

        # Fetch BHV list
        h_bhv_list_before = heketi_ops.get_block_hosting_volume_list(
            self.h_node, self.h_server).keys()

        # Create storage class
        sc_name = self.create_storage_class()

        # Delete created BHV and BV as cleanup during failures
        self.addCleanup(self._cleanup_heketi_volumes, h_bhv_list_before)

        # Create PVC's
        for i in range(0, pvc_amount):
            pvc_name = openshift_ops.oc_create_pvc(self.node,
                                                   sc_name,
                                                   pvc_size=pvc_size)
            pvc_names.append(pvc_name)
            self.addCleanup(openshift_ops.wait_for_resource_absence, self.node,
                            'pvc', pvc_name)
            self.addCleanup(openshift_ops.oc_delete,
                            self.node,
                            'pvc',
                            pvc_name,
                            raise_on_absence=False)

        # Wait for PVC's to be bound
        openshift_ops.wait_for_pvc_be_bound(self.node, pvc_names)

        # Get volume name list
        for pvc_name in pvc_names:
            pv_name = openshift_ops.get_pv_name_from_pvc(self.node, pvc_name)
            volume_name = openshift_ops.get_vol_names_from_pv(self.node,
                                                              pv_name,
                                                              vol_type='block')
            vol_details.append(volume_name)

        # Get BHV list after BV creation
        h_bhv_list_after = heketi_ops.get_block_hosting_volume_list(
            self.h_node, self.h_server).keys()
        self.assertTrue(h_bhv_list_after, "Failed to get the BHV list")

        # Validate BV's count
        self.validate_block_volumes_count(self.h_node, self.h_server,
                                          self.node_ip)

        # Collect pvs info and detach disks and collect pvs info
        pvs_info_before = openshift_storage_libs.get_pvs_info(
            self.node, self.node_ip, self.devices_list, raise_on_error=False)
        self.detach_and_attach_vmdk(self.vm_name, self.node_hostname,
                                    self.devices_list)
        pvs_info_after = openshift_storage_libs.get_pvs_info(
            self.node, self.node_ip, self.devices_list, raise_on_error=False)

        # Compare pvs info before and after
        for (path, uuid, vg_name), (_path, _uuid,
                                    _vg_name) in zip(pvs_info_before[:-1],
                                                     pvs_info_after[1:]):
            self.assertEqual(
                uuid, _uuid, "pv_uuid check failed. Expected:{},"
                "Actual: {}".format(uuid, _uuid))
            self.assertEqual(
                vg_name, _vg_name, "vg_name check failed. Expected:"
                "{}, Actual:{}".format(vg_name, _vg_name))

        # Delete created PVC's
        for pvc_name in pvc_names:
            openshift_ops.oc_delete(self.node, 'pvc', pvc_name)

        # Wait for pvc to get deleted
        openshift_ops.wait_for_resources_absence(self.node, 'pvc', pvc_names)

        # Get existing BHV list
        for bhv_name in h_bhv_list_after:
            b_list = block_libs.get_block_list(self.node_ip, volname=bhv_name)
            self.assertIsNotNone(gluster_block_list,
                                 "Failed to get gluster block list")
            gluster_block_list.append(b_list)

        # Get list of block volumes using heketi
        h_blockvol_list = heketi_ops.heketi_blockvolume_list(self.h_node,
                                                             self.h_server,
                                                             json=True)

        # Validate volumes created are not present
        for vol in vol_details:
            self.assertNotIn(vol, gluster_block_list,
                             "Failed to delete volume {}".format(vol))
            self.assertNotIn(vol, h_blockvol_list['blockvolumes'],
                             "Failed to delete blockvolume '{}'".format(vol))
    def test_expansion_of_block_hosting_volume_using_heketi(self):
        """Verify that after expanding block hosting volume we are able to
        consume the expanded space"""

        h_node = self.heketi_client_node
        h_url = self.heketi_server_url
        bvols_in_bhv = set([])
        bvols_pv = set([])

        BHVS = get_block_hosting_volume_list(h_node, h_url)

        free_BHVS_count = 0
        for vol in BHVS.keys():
            info = heketi_volume_info(h_node, h_url, vol, json=True)
            if info['blockinfo']['freesize'] > 0:
                free_BHVS_count += 1
            if free_BHVS_count > 1:
                self.skipTest("Skip test case because there is more than one"
                              " Block Hosting Volume with free space")

        # create block volume of 1gb
        bvol_info = heketi_blockvolume_create(h_node, h_url, 1, json=True)

        expand_size = 20
        try:
            self.verify_free_space(expand_size)
            bhv = bvol_info['blockhostingvolume']
            vol_info = heketi_volume_info(h_node, h_url, bhv, json=True)
            bvols_in_bhv.update(vol_info['blockinfo']['blockvolume'])
        finally:
            # cleanup BHV if there is only one block volume inside it
            if len(bvols_in_bhv) == 1:
                self.addCleanup(
                    heketi_volume_delete, h_node, h_url, bhv, json=True)
            self.addCleanup(
                heketi_blockvolume_delete, h_node, h_url, bvol_info['id'])

        size = vol_info['size']
        free_size = vol_info['blockinfo']['freesize']
        bvol_count = int(free_size / expand_size)
        bricks = vol_info['bricks']

        # create pvs to fill the BHV
        pvcs = self.create_and_wait_for_pvcs(
            pvc_size=(expand_size if bvol_count else free_size),
            pvc_amount=(bvol_count or 1), timeout=300)

        vol_expand = True

        for i in range(2):
            # get the vol ids from pvcs
            for pvc in pvcs:
                pv = get_pv_name_from_pvc(self.node, pvc)
                custom = r':.metadata.annotations."gluster\.org\/volume-id"'
                bvol_id = oc_get_custom_resource(self.node, 'pv', custom, pv)
                bvols_pv.add(bvol_id[0])

            vol_info = heketi_volume_info(h_node, h_url, bhv, json=True)
            bvols = vol_info['blockinfo']['blockvolume']
            bvols_in_bhv.update(bvols)
            self.assertEqual(bvols_pv, (bvols_in_bhv & bvols_pv))

            # Expand BHV and verify bricks and size of BHV
            if vol_expand:
                vol_expand = False
                heketi_volume_expand(
                    h_node, h_url, bhv, expand_size, json=True)
                vol_info = heketi_volume_info(h_node, h_url, bhv, json=True)

                self.assertEqual(size + expand_size, vol_info['size'])
                self.assertFalse(len(vol_info['bricks']) % 3)
                self.assertLess(len(bricks), len(vol_info['bricks']))

                # create more PVCs in expanded BHV
                pvcs = self.create_and_wait_for_pvcs(
                    pvc_size=(expand_size - 1), pvc_amount=1)