def test_100gb_block_pvc_create_and_delete_twice(self):
        """Validate creation and deletion of blockvoume of size 100GB"""
        # Define required space, bhv size required for on 100GB block PVC
        size, bhv_size, required_space = 100, 103, 309
        h_node, h_url = self.heketi_client_node, self.heketi_server_url
        prefix = 'autotest-pvc-{}'.format(utils.get_random_str(size=5))

        # Skip test if required free space is not available
        free_space = get_total_free_space(self.heketi_client_node,
                                          self.heketi_server_url)[0]
        if free_space < required_space:
            self.skipTest("Available free space {} is less than the required "
                          "free space {}".format(free_space, required_space))

        # Create block hosting volume of 103GB required for 100GB block PVC
        bhv = heketi_volume_create(h_node,
                                   h_url,
                                   bhv_size,
                                   block=True,
                                   json=True)['id']
        self.addCleanup(heketi_volume_delete, h_node, h_url, bhv)

        for _ in range(2):
            # Create PVC of size 100GB
            pvc_name = self.create_and_wait_for_pvc(pvc_size=size,
                                                    pvc_name_prefix=prefix)
            match_pvc_and_pv(self.node, prefix)

            # Delete the PVC
            oc_delete(self.node, 'pvc', pvc_name)
            wait_for_resource_absence(self.node, 'pvc', pvc_name)
 def test_validate_gluster_voloptions_blockhostvolume(self):
     """Validate gluster volume options which are set for
        block hosting volume"""
     options_to_validate = (
         ('performance.quick-read', 'off'),
         ('performance.read-ahead', 'off'),
         ('performance.io-cache', 'off'),
         ('performance.stat-prefetch', 'off'),
         ('performance.open-behind', 'off'),
         ('performance.readdir-ahead', 'off'),
         ('performance.strict-o-direct', 'on'),
         ('network.remote-dio', 'disable'),
         ('cluster.eager-lock', 'enable'),
         ('cluster.quorum-type', 'auto'),
         ('cluster.data-self-heal-algorithm', 'full'),
         ('cluster.locking-scheme', 'granular'),
         ('cluster.shd-max-threads', '8'),
         ('cluster.shd-wait-qlength', '10000'),
         ('features.shard', 'on'),
         ('features.shard-block-size', '64MB'),
         ('user.cifs', 'off'),
         ('server.allow-insecure', 'on'),
     )
     free_space, nodenum = get_total_free_space(self.heketi_client_node,
                                                self.heketi_server_url)
     if nodenum < 3:
         self.skipTest("Skip the test case since number of"
                       "online nodes is less than 3.")
     free_space_available = int(free_space / nodenum)
     default_bhv_size = get_default_block_hosting_volume_size(
         self.heketi_client_node, self.heketi_dc_name)
     if free_space_available < default_bhv_size:
         self.skipTest("Skip the test case since free_space_available %s"
                       "is less than the default_bhv_size %s ." %
                       (free_space_available, default_bhv_size))
     block_host_create_info = heketi_volume_create(self.heketi_client_node,
                                                   self.heketi_server_url,
                                                   default_bhv_size,
                                                   json=True,
                                                   block=True)
     self.addCleanup(heketi_volume_delete,
                     self.heketi_client_node,
                     self.heketi_server_url,
                     block_host_create_info["id"],
                     raise_on_error=True)
     bhv_name = block_host_create_info["name"]
     vol_info = get_volume_info('auto_get_gluster_endpoint',
                                volname=bhv_name)
     self.assertTrue(vol_info, "Failed to get volume info %s" % bhv_name)
     self.assertIn("options", vol_info[bhv_name].keys())
     for k, v in options_to_validate:
         self.assertIn(k, vol_info[bhv_name]["options"].keys())
         self.assertEqual(v, vol_info[bhv_name]["options"][k])
Exemplo n.º 3
0
    def test_heketi_volume_create_mutiple_sizes(self):
        """Validate creation of heketi volume with differnt sizes"""
        sizes, required_space = [15, 50, 100], 495
        h_node, h_url = self.heketi_client_node, self.heketi_server_url

        # Skip test if space is not available
        available_space = get_total_free_space(h_node, h_url)[0]
        if required_space > available_space:
            self.skipTest("Required space {} greater than the available space "
                          "{}".format(required_space, available_space))

        # Create volume 3 times, each time different size
        for size in sizes:
            vol_id = heketi_volume_create(h_node, h_url, size, json=True)['id']
            self.addCleanup(heketi_volume_delete, h_node, h_url, vol_id)
Exemplo n.º 4
0
    def test_heketi_volume_create_mutiple_sizes(self):
        """Validate creation of heketi volume with differnt sizes"""
        prefix = "autotest-{}".format(utils.get_random_str())
        sizes, required_space = [15, 50, 100], 495
        h_node, h_url = self.heketi_client_node, self.heketi_server_url

        # Skip test if space is not available
        available_space = get_total_free_space(h_node, h_url)[0]
        if required_space > available_space:
            self.skipTest("Required space {} greater than the available space "
                          "{}".format(required_space, available_space))

        # Create volume 3 times, each time different size
        for size in sizes:
            self.create_heketi_volume_with_name_and_wait("{}-{}".format(
                prefix, size),
                                                         size,
                                                         json=True)
    def test_block_host_volume_delete_block_volume_delete(self):
        """Validate block volume and BHV removal using heketi"""
        free_space, nodenum = get_total_free_space(self.heketi_client_node,
                                                   self.heketi_server_url)
        if nodenum < 3:
            self.skipTest("Skipping the test since number of nodes"
                          "online are less than 3")
        free_space_available = int(free_space / nodenum)
        default_bhv_size = get_default_block_hosting_volume_size(
            self.heketi_client_node, self.heketi_dc_name)
        if free_space_available < default_bhv_size:
            self.skipTest("Skipping the test since free_space_available %s"
                          "is less than the default_bhv_size %s" %
                          (free_space_available, default_bhv_size))
        h_volume_name = ("autotests-heketi-volume-%s" % utils.get_random_str())
        block_host_create_info = self.create_heketi_volume_with_name_and_wait(
            h_volume_name, default_bhv_size, json=True, block=True)

        block_vol_size = block_host_create_info["blockinfo"]["freesize"]
        block_hosting_vol_id = block_host_create_info["id"]
        block_vol_info = {"blockhostingvolume": "init_value"}
        while (block_vol_info['blockhostingvolume'] != block_hosting_vol_id):
            block_vol = heketi_blockvolume_create(self.heketi_client_node,
                                                  self.heketi_server_url,
                                                  block_vol_size,
                                                  json=True,
                                                  ha=3,
                                                  auth=True)
            self.addCleanup(heketi_blockvolume_delete,
                            self.heketi_client_node,
                            self.heketi_server_url,
                            block_vol["id"],
                            raise_on_error=True)
            block_vol_info = heketi_blockvolume_info(self.heketi_client_node,
                                                     self.heketi_server_url,
                                                     block_vol["id"],
                                                     json=True)
        bhv_info = heketi_volume_info(self.heketi_client_node,
                                      self.heketi_server_url,
                                      block_hosting_vol_id,
                                      json=True)
        self.assertIn(block_vol_info["id"],
                      bhv_info["blockinfo"]["blockvolume"])
Exemplo n.º 6
0
    def test_heketi_manual_cleanup_operation_in_bhv(self):
        """Validate heketi db cleanup will resolve the mismatch
           in the free size of the block hosting volume with failed
           block device create operations.
        """
        bhv_size_before, bhv_size_after, vol_count = [], [], 5
        ocp_node, g_node = self.ocp_master_node[0], self.gluster_servers[0]
        h_node, h_url = self.heketi_client_node, self.heketi_server_url

        # Get existing heketi volume list
        existing_volumes = heketi_volume_list(h_node, h_url, json=True)

        # Add function to clean stale volumes created during test
        self.addCleanup(self._cleanup_heketi_volumes,
                        existing_volumes.get("volumes"))

        # Get nodes id list
        node_id_list = heketi_node_list(h_node, h_url)

        # Disable 4th and other nodes
        for node_id in node_id_list[3:]:
            heketi_node_disable(h_node, h_url, node_id)
            self.addCleanup(heketi_node_enable, h_node, h_url, node_id)

        # Calculate heketi volume size
        free_space, nodenum = get_total_free_space(h_node, h_url)
        free_space_available = int(free_space / nodenum)
        if free_space_available > vol_count:
            h_volume_size = int(free_space_available / vol_count)
            if h_volume_size > 50:
                h_volume_size = 50
        else:
            h_volume_size, vol_count = 1, free_space_available

        # Create BHV in case blockvolume size is greater than default BHV size
        default_bhv_size = get_default_block_hosting_volume_size(
            h_node, self.heketi_dc_name)
        if default_bhv_size < h_volume_size:
            h_volume_name = "autotest-{}".format(utils.get_random_str())
            bhv_info = self.create_heketi_volume_with_name_and_wait(
                h_volume_name,
                free_space_available,
                raise_on_cleanup_error=False,
                block=True,
                json=True)
            free_space_available -= (
                int(bhv_info.get("blockinfo").get("reservedsize")) + 1)
            h_volume_size = int(free_space_available / vol_count)

        # Get BHV list
        h_bhv_list = get_block_hosting_volume_list(h_node, h_url).keys()
        self.assertTrue(h_bhv_list, "Failed to get the BHV list")

        # Get BHV size
        for bhv in h_bhv_list:
            vol_info = heketi_volume_info(h_node, h_url, bhv, json=True)
            bhv_vol_size_before = vol_info.get("freesize")
            bhv_size_before.append(bhv_vol_size_before)

        # Kill Tcmu-runner service
        services = ("tcmu-runner", "gluster-block-target", "gluster-blockd")
        kill_service_on_gluster_pod_or_node(ocp_node, "tcmu-runner", g_node)

        # Restart the services
        for service in services:
            state = ('exited'
                     if service == 'gluster-block-target' else 'running')
            self.addCleanup(wait_for_service_status_on_gluster_pod_or_node,
                            ocp_node, service, 'active', state, g_node)
            self.addCleanup(restart_service_on_gluster_pod_or_node, ocp_node,
                            service, g_node)

        def run_async(cmd, hostname, raise_on_error=True):
            return g.run_async(host=hostname, command=cmd)

        # Create stale block volumes in async
        for count in range(vol_count):
            with mock.patch.object(json, 'loads', side_effect=(lambda j: j)):
                with mock.patch.object(command,
                                       'cmd_run',
                                       side_effect=run_async):
                    heketi_blockvolume_create(h_node,
                                              h_url,
                                              h_volume_size,
                                              json=True)

        # Wait for pending operation to get generated
        self._check_for_pending_operations(h_node, h_url)

        # Restart the services
        for service in services:
            state = ('exited'
                     if service == 'gluster-block-target' else 'running')
            restart_service_on_gluster_pod_or_node(ocp_node, service, g_node)
            wait_for_service_status_on_gluster_pod_or_node(
                ocp_node, service, 'active', state, g_node)

        # Cleanup pending operation
        heketi_server_operation_cleanup(h_node, h_url)

        # wait for pending operation to get cleaned up
        for w in waiter.Waiter(timeout=120, interval=10):
            # Get BHV size
            for bhv in h_bhv_list:
                vol_info = heketi_volume_info(h_node, h_url, bhv, json=True)
                bhv_vol_size_after = vol_info.get("freesize")
                bhv_size_after.append(bhv_vol_size_after)

            if (set(bhv_size_before) == set(bhv_size_after)):
                break
        if w.expired:
            raise exceptions.ExecutionError(
                "Failed to Validate volume size Actual:{},"
                " Expected:{}".format(set(bhv_size_before),
                                      set(bhv_size_after)))
Exemplo n.º 7
0
    def test_verify_create_heketi_volumes_pending_entries_in_db(
            self, vol_type):
        """Verify pending entries of file/block volumes in db during
           volumes creation from heketi side
        """
        # Create large volumes to observe the pending operations
        vol_count, h_vol_creation_async_op = 3, []
        h_node, h_url = self.heketi_client_node, self.heketi_server_url

        # Verify file/block volumes pending operation before creation,
        h_db_check_before = heketi_db_check(h_node, h_url)
        h_db_check_vol_before = (h_db_check_before.get(
            "{}volumes".format(vol_type)))

        # Delete heketi pod to clean db operations
        if (h_db_check_vol_before.get("pending")
                or h_db_check_before.get("bricks").get("pending")):
            self._respin_heketi_pod()

        # Calculate heketi volume size
        free_space, nodenum = get_total_free_space(h_node, h_url)
        free_space_available = int(free_space / nodenum)
        if free_space_available > vol_count:
            h_volume_size = int(free_space_available / vol_count)
            if h_volume_size > 30:
                h_volume_size = 30
        else:
            h_volume_size, vol_count = 1, free_space_available

        # Get existing heketi volume list
        existing_volumes = heketi_volume_list(h_node, h_url, json=True)

        # Add cleanup function to clean stale volumes created during test
        self.addCleanup(self._cleanup_heketi_volumes,
                        existing_volumes.get("volumes"))

        # Create BHV in case blockvolume size is greater than default BHV size
        if vol_type:
            default_bhv_size = get_default_block_hosting_volume_size(
                h_node, self.heketi_dc_name)
            if default_bhv_size < h_volume_size:
                h_volume_name = "autotest-{}".format(utils.get_random_str())
                bhv_info = self.create_heketi_volume_with_name_and_wait(
                    h_volume_name,
                    free_space_available,
                    raise_on_cleanup_error=False,
                    block=True,
                    json=True)
                free_space_available -= (
                    int(bhv_info.get("blockinfo").get("reservedsize")) + 1)
                h_volume_size = int(free_space_available / vol_count)

        # Temporary replace g.run with g.async_run in heketi_blockvolume_create
        # func to be able to run it in background.Also, avoid parsing the
        # output as it won't be json at that moment. Parse it after reading
        # the async operation results.
        def run_async(cmd, hostname, raise_on_error=True):
            return g.run_async(host=hostname, command=cmd)

        for count in range(vol_count):
            with mock.patch.object(json, 'loads', side_effect=(lambda j: j)):
                with mock.patch.object(command,
                                       'cmd_run',
                                       side_effect=run_async):
                    h_vol_creation_async_op.append(
                        eval("heketi_{}volume_create".format(vol_type))(
                            h_node, h_url, h_volume_size, json=True))

        # Check for pending operations
        for w in waiter.Waiter(timeout=120, interval=10):
            h_db_check = heketi_db_check(h_node, h_url)
            h_db_check_vol = h_db_check.get("{}volumes".format(vol_type))
            if h_db_check_vol.get("pending"):
                h_db_check_bricks = h_db_check.get("bricks")
                break
        if w.expired:
            raise exceptions.ExecutionError(
                "No any pending operations found during {}volumes creation "
                "{}".format(vol_type, h_db_check_vol.get("pending")))

        # Verify bricks pending operation during creation
        if not vol_type:
            self.assertTrue(h_db_check_bricks.get("pending"),
                            "Expecting at least one bricks pending count")
            self.assertFalse(
                h_db_check_bricks.get("pending") % 3,
                "Expecting bricks pending count to be multiple of 3 but "
                "found {}".format(h_db_check_bricks.get("pending")))

        # Wait for all counts of pending operations to be zero
        for w in waiter.Waiter(timeout=300, interval=10):
            h_db_check = heketi_db_check(h_node, h_url)
            h_db_check_vol = h_db_check.get("{}volumes".format(vol_type))
            if not h_db_check_vol.get("pending"):
                break
        if w.expired:
            raise exceptions.ExecutionError(
                "Expecting no pending operations after 300 sec but "
                "found {} operation".format(h_db_check_vol.get("pending")))

        # Get heketi server DB details
        h_db_check_after = heketi_db_check(h_node, h_url)
        h_db_check_vol_after = (h_db_check_after.get(
            "{}volumes".format(vol_type)))
        h_db_check_bricks_after = h_db_check_after.get("bricks")

        # Verify if initial and final file/block volumes are same
        act_vol_count = h_db_check_vol_after.get("total")
        exp_vol_count = h_db_check_vol_before.get("total") + vol_count
        err_msg = (
            "Actual {} and expected {} {}volume counts are not matched".format(
                act_vol_count, exp_vol_count, vol_type))
        self.assertEqual(act_vol_count, exp_vol_count, err_msg)

        # Verify if initial and final bricks are same for file volume
        volumes = heketi_volume_list(h_node, h_url, json=True).get("volumes")
        new_volumes = list(set(volumes) - set(existing_volumes))
        exp_brick_count = 0
        for volume in new_volumes:
            vol_info = heketi_volume_info(h_node, h_url, volume, json=True)
            exp_brick_count += len(vol_info.get("bricks"))

        err_msg = "Actual {} and expected {} bricks counts are not matched"
        act_brick_count = h_db_check_bricks_after.get("total")
        self.assertEqual(act_brick_count, exp_brick_count,
                         err_msg.format(act_brick_count, exp_brick_count))
Exemplo n.º 8
0
    def test_verify_delete_heketi_volumes_pending_entries_in_db(
            self, vol_type):
        """Verify pending entries of blockvolumes/volumes and bricks in heketi
           db during blockvolume/volume delete operation.
        """
        # Create a large volumes to observe the pending operation
        vol_count, volume_ids, async_obj = 10, [], []
        h_node, h_url = self.heketi_client_node, self.heketi_server_url

        # Verify file/block volumes pending operation before creation,
        h_db_check_before = heketi_db_check(h_node, h_url)
        h_db_check_bricks_before = h_db_check_before.get("bricks")
        h_db_check_vol_before = (h_db_check_before.get(
            "{}volumes".format(vol_type)))

        # Get existing heketi volume list
        existing_volumes = heketi_volume_list(h_node, h_url, json=True)

        # Add cleanup function to clean stale volumes created during test
        self.addCleanup(self._cleanup_heketi_volumes,
                        existing_volumes.get("volumes"))

        # Delete heketi pod to clean db operations
        if (h_db_check_bricks_before.get("pending")
                or h_db_check_vol_before.get("pending")):
            self._respin_heketi_pod()

        # Calculate heketi volume size
        free_space, nodenum = get_total_free_space(h_node, h_url)
        free_space_available = int(free_space / nodenum)
        if free_space_available > vol_count:
            h_volume_size = int(free_space_available / vol_count)
            if h_volume_size > 50:
                h_volume_size = 50
        else:
            h_volume_size, vol_count = 1, free_space_available

        # Create BHV in case blockvolume size is greater than default BHV size
        if vol_type:
            default_bhv_size = get_default_block_hosting_volume_size(
                h_node, self.heketi_dc_name)
            if default_bhv_size < h_volume_size:
                h_volume_name = "autotest-{}".format(utils.get_random_str())
                bhv_info = self.create_heketi_volume_with_name_and_wait(
                    h_volume_name,
                    free_space_available,
                    raise_on_cleanup_error=False,
                    block=True,
                    json=True)
                free_space_available -= (
                    int(bhv_info.get("blockinfo").get("reservedsize")) + 1)
                h_volume_size = int(free_space_available / vol_count)

        # Create file/block volumes
        for _ in range(vol_count):
            vol_id = eval("heketi_{}volume_create".format(vol_type))(
                h_node, h_url, h_volume_size, json=True).get("id")
            volume_ids.append(vol_id)
            self.addCleanup(eval("heketi_{}volume_delete".format(vol_type)),
                            h_node,
                            h_url,
                            vol_id,
                            raise_on_error=False)

        def run_async(cmd, hostname, raise_on_error=True):
            async_op = g.run_async(host=hostname, command=cmd)
            async_obj.append(async_op)
            return async_op

        bhv_list = []
        for vol_id in volume_ids:
            # Get BHV ids to delete in case of block volumes
            if vol_type:
                vol_info = (heketi_blockvolume_info(h_node,
                                                    h_url,
                                                    vol_id,
                                                    json=True))
                if not vol_info.get("blockhostingvolume") in bhv_list:
                    bhv_list.append(vol_info.get("blockhostingvolume"))

            # Temporary replace g.run with g.async_run in heketi_volume_delete
            # and heketi_blockvolume_delete func to be able to run it in
            # background.
            with mock.patch.object(command, 'cmd_run', side_effect=run_async):
                eval("heketi_{}volume_delete".format(vol_type))(h_node, h_url,
                                                                vol_id)

        # Wait for pending operations to get generate
        for w in waiter.Waiter(timeout=30, interval=3):
            h_db_check = heketi_db_check(h_node, h_url)
            h_db_check_vol = h_db_check.get("{}volumes".format(vol_type))
            if h_db_check_vol.get("pending"):
                h_db_check_bricks = h_db_check.get("bricks")
                break
        if w.expired:
            raise exceptions.ExecutionError(
                "No any pending operations found during {}volumes deletion "
                "{}".format(vol_type, h_db_check_vol.get("pending")))

        # Verify bricks pending operation during creation
        if not vol_type:
            self.assertTrue(h_db_check_bricks.get("pending"),
                            "Expecting at least one bricks pending count")
            self.assertFalse(
                h_db_check_bricks.get("pending") % 3,
                "Expecting bricks pending count to be multiple of 3 but "
                "found {}".format(h_db_check_bricks.get("pending")))

        # Verify file/block volume pending operation during delete
        for w in waiter.Waiter(timeout=120, interval=10):
            h_db_check = heketi_db_check(h_node, h_url)
            h_db_check_vol = h_db_check.get("{}volumes".format(vol_type))
            h_db_check_bricks = h_db_check.get("bricks")
            if ((not h_db_check_bricks.get("pending"))
                    and (not h_db_check_vol.get("pending"))):
                break
        if w.expired:
            raise AssertionError(
                "Failed to delete {}volumes after 120 secs".format(vol_type))

        # Check that all background processes got exited
        for obj in async_obj:
            ret, out, err = obj.async_communicate()
            self.assertFalse(
                ret, "Failed to delete {}volume due to error: {}".format(
                    vol_type, err))

        # Delete BHV created during block volume creation
        if vol_type:
            for bhv_id in bhv_list:
                heketi_volume_delete(h_node, h_url, bhv_id)

        # Verify bricks and volume pending operations
        h_db_check_after = heketi_db_check(h_node, h_url)
        h_db_check_bricks_after = h_db_check_after.get("bricks")
        h_db_check_vol_after = (h_db_check_after.get(
            "{}volumes".format(vol_type)))
        act_brick_count = h_db_check_bricks_after.get("pending")
        act_vol_count = h_db_check_vol_after.get("pending")

        # Verify bricks pending operation after delete
        err_msg = "{} operations are pending for {} after {}volume deletion"
        if not vol_type:
            self.assertFalse(
                act_brick_count,
                err_msg.format(act_brick_count, "brick", vol_type))

        # Verify file/bock volumes pending operation after delete
        self.assertFalse(act_vol_count,
                         err_msg.format(act_vol_count, "volume", vol_type))

        act_brick_count = h_db_check_bricks_after.get("total")
        act_vol_count = h_db_check_vol_after.get("total")
        exp_brick_count = h_db_check_bricks_before.get("total")
        exp_vol_count = h_db_check_vol_before.get("total")
        err_msg = "Actual {} and expected {} {} counts are not matched"

        # Verify if initial and final file/block volumes are same
        self.assertEqual(
            act_vol_count, exp_vol_count,
            err_msg.format(act_vol_count, exp_vol_count, "volume"))

        # Verify if initial and final bricks are same
        self.assertEqual(
            act_brick_count, exp_brick_count,
            err_msg.format(act_brick_count, exp_brick_count, "brick"))
    def test_create_max_num_blockhostingvolumes(self):
        num_of_bv = 10
        new_bhv_list, bv_list, g_nodes = [], [], []
        free_space, nodenum = get_total_free_space(self.heketi_client_node,
                                                   self.heketi_server_url)
        if nodenum < 3:
            self.skipTest("Skip the test case since number of"
                          "online nodes is less than 3.")
        free_space_available = int(free_space / nodenum)
        default_bhv_size = get_default_block_hosting_volume_size(
            self.heketi_client_node, self.heketi_dc_name)
        # Get existing list of BHV's
        existing_bhv_list = get_block_hosting_volume_list(
            self.heketi_client_node, self.heketi_server_url)

        # Skip the test if available space is less than default_bhv_size
        if free_space_available < default_bhv_size:
            self.skipTest("Skip the test case since free_space_available %s"
                          "is less than space_required_for_bhv %s ." %
                          (free_space_available, default_bhv_size))

        # Create BHV's
        while free_space_available > default_bhv_size:
            block_host_create_info = heketi_volume_create(
                self.heketi_client_node,
                self.heketi_server_url,
                default_bhv_size,
                json=True,
                block=True)
            if block_host_create_info["id"] not in existing_bhv_list.keys():
                new_bhv_list.append(block_host_create_info["id"])
            self.addCleanup(heketi_volume_delete,
                            self.heketi_client_node,
                            self.heketi_server_url,
                            block_host_create_info["id"],
                            raise_on_error=False)
            block_vol_size = int(
                block_host_create_info["blockinfo"]["freesize"] / num_of_bv)

            # Create specified number of BV's in BHV's created
            for i in range(0, num_of_bv):
                block_vol = heketi_blockvolume_create(self.heketi_client_node,
                                                      self.heketi_server_url,
                                                      block_vol_size,
                                                      json=True,
                                                      ha=3,
                                                      auth=True)
                self.addCleanup(heketi_blockvolume_delete,
                                self.heketi_client_node,
                                self.heketi_server_url,
                                block_vol["id"],
                                raise_on_error=False)
                bv_list.append(block_vol["id"])
            free_space_available = int(free_space_available - default_bhv_size)

        # Get gluster node ips
        h_nodes_ids = heketi_node_list(self.heketi_client_node,
                                       self.heketi_server_url)
        for h_node in h_nodes_ids[:2]:
            g_node = heketi_node_info(self.heketi_client_node,
                                      self.heketi_server_url,
                                      h_node,
                                      json=True)
            g_nodes.append(g_node['hostnames']['manage'][0])

        # Check if there is no crash in gluster related services & heketi
        services = (("glusterd", "running"), ("gluster-blockd", "running"),
                    ("tcmu-runner", "running"), ("gluster-block-target",
                                                 "exited"))
        for g_node in g_nodes:
            for service, state in services:
                wait_for_service_status_on_gluster_pod_or_node(
                    self.ocp_client[0],
                    service,
                    'active',
                    state,
                    g_node,
                    raise_on_error=False)
            out = hello_heketi(self.heketi_client_node, self.heketi_server_url)
            self.assertTrue(
                out, "Heketi server %s is not alive" % self.heketi_server_url)

        # Delete all the BHV's and BV's created
        for bv_volume in bv_list:
            heketi_blockvolume_delete(self.heketi_client_node,
                                      self.heketi_server_url, bv_volume)

        # Check if any blockvolume exist in heketi & gluster
        for bhv_volume in new_bhv_list[:]:
            heketi_vol_info = heketi_volume_info(self.heketi_client_node,
                                                 self.heketi_server_url,
                                                 bhv_volume,
                                                 json=True)
            self.assertNotIn("blockvolume",
                             heketi_vol_info["blockinfo"].keys())
            gluster_vol_info = get_block_list('auto_get_gluster_endpoint',
                                              volname="vol_%s" % bhv_volume)
            self.assertIsNotNone(gluster_vol_info,
                                 "Failed to get volume info %s" % bhv_volume)
            new_bhv_list.remove(bhv_volume)
            for blockvol in gluster_vol_info:
                self.assertNotIn("blockvol_", blockvol)
                heketi_volume_delete(self.heketi_client_node,
                                     self.heketi_server_url, bhv_volume)

        # Check if all blockhosting volumes are deleted from heketi
        self.assertFalse(new_bhv_list)