def get_block_hosting_volume_name(heketi_client_node, heketi_server_url,
                                  block_volume):
    """Returns block hosting volume name of given block volume

    Args:
        heketi_client_node (str): Node on which cmd has to be executed.
        heketi_server_url (str): Heketi server url
        block_volume (str): Block volume of which block hosting volume
                            returned
    Returns:
        str : Name of the block hosting volume for given block volume
    """
    block_vol_info = heketi_blockvolume_info(
        heketi_client_node, heketi_server_url, block_volume
    )

    for line in block_vol_info.splitlines():
        block_hosting_vol_match = re.search(
            "^Block Hosting Volume: (.*)$", line
        )

        if not block_hosting_vol_match:
            continue

        gluster_vol_list = get_volume_list("auto_get_gluster_endpoint")
        for vol in gluster_vol_list:
            if block_hosting_vol_match.group(1).strip() in vol:
                return vol
    def test_create_blockvolume_with_different_auth_values(self, auth_value):
        """To validate block volume creation with different auth values"""
        # Block volume create with auth enabled
        block_vol = heketi_blockvolume_create(self.heketi_client_node,
                                              self.heketi_server_url,
                                              1,
                                              auth=auth_value,
                                              json=True)
        self.addCleanup(heketi_blockvolume_delete, self.heketi_client_node,
                        self.heketi_server_url, block_vol["id"])

        # Verify username and password are present
        block_vol_info = heketi_blockvolume_info(self.heketi_client_node,
                                                 self.heketi_server_url,
                                                 block_vol["id"],
                                                 json=True)
        assertion_func = (self.assertNotEqual
                          if auth_value else self.assertEqual)
        assertion_msg_part = "not " if auth_value else ""
        assertion_func(block_vol_info["blockvolume"]["username"], "",
                       ("Username is %spresent in %s",
                        (assertion_msg_part, block_vol["id"])))
        assertion_func(block_vol_info["blockvolume"]["password"], "",
                       ("Password is %spresent in %s",
                        (assertion_msg_part, block_vol["id"])))
Ejemplo n.º 3
0
def get_block_hosting_volume_name(heketi_client_node, heketi_server_url,
                                  block_volume):
    """Returns block hosting volume name of given block volume

    Args:
        heketi_client_node (str): Node on which cmd has to be executed.
        heketi_server_url (str): Heketi server url
        block_volume (str): Block volume of which block hosting volume
                            returned
    Returns:
        str : Name of the block hosting volume for given block volume
    """
    block_vol_info = heketi_blockvolume_info(heketi_client_node,
                                             heketi_server_url, block_volume)

    for line in block_vol_info.splitlines():
        block_hosting_vol_match = re.search("^Block Hosting Volume: (.*)$",
                                            line)

        if not block_hosting_vol_match:
            continue

        gluster_vol_list = get_volume_list("auto_get_gluster_endpoint")
        for vol in gluster_vol_list:
            if block_hosting_vol_match.group(1).strip() in vol:
                return vol
    def validate_multipath_info(self, hacount):
        """validates multipath command on the pod node

        Args:
            hacount (int): hacount for which multipath to be checked
        """
        # create pod using pvc created
        dc_name = oc_create_app_dc_with_io(
            self.ocp_master_node[0],
            self.pvc_name,
            image=self.io_container_image_cirros)
        pod_name = get_pod_name_from_dc(self.ocp_master_node[0], dc_name)
        self.addCleanup(oc_delete, self.ocp_master_node[0], "dc", dc_name)
        self.addCleanup(scale_dc_pod_amount_and_wait, self.ocp_master_node[0],
                        dc_name, 0)

        wait_for_pod_be_ready(self.ocp_master_node[0],
                              pod_name,
                              timeout=120,
                              wait_step=3)

        # Get pod info
        pod_info = oc_get_pods(self.ocp_master_node[0],
                               selector='deploymentconfig=%s' % dc_name)
        node = pod_info[pod_name]['node']

        # Find iqn from volume info
        pv_name = get_pv_name_from_pvc(self.ocp_master_node[0], self.pvc_name)
        custom = [r':.metadata.annotations."gluster\.org\/volume\-id"']
        vol_id = oc_get_custom_resource(self.ocp_master_node[0], 'pv', custom,
                                        pv_name)[0]
        vol_info = heketi_blockvolume_info(self.heketi_client_node,
                                           self.heketi_server_url,
                                           vol_id,
                                           json=True)
        iqn = vol_info['blockvolume']['iqn']

        # Get the paths info from the node
        devices = get_iscsi_block_devices_by_path(node, iqn).keys()
        self.assertEqual(hacount, len(devices))

        # Validate mpath
        mpaths = set()
        for device in devices:
            mpaths.add(get_mpath_name_from_device_name(node, device))
        self.assertEqual(1, len(mpaths))
        validate_multipath_pod(self.ocp_master_node[0], pod_name, hacount,
                               list(mpaths)[0])
Ejemplo n.º 5
0
    def _block_vol_expand_common_offline_vs_online(self, is_online_expand):
        node = self.ocp_master_node[0]
        h_node, h_server = self.heketi_client_node, self.heketi_server_url

        version = heketi_version.get_heketi_version(h_node)
        if version < '9.0.0-14':
            self.skipTest("heketi-client package {} does not support "
                          "blockvolume expand".format(version.v_str))

        pvc_name = self.create_and_wait_for_pvc()
        dc_name = self.create_dc_with_pvc(pvc_name)
        pv_name = get_pv_name_from_pvc(node, pvc_name)

        # get block volume id
        custom = r":.metadata.annotations.'gluster\.org\/volume-id'"
        bvol_id = oc_get_custom_resource(node, 'pv', custom, pv_name)
        self.assertNotEqual(bvol_id[0], "<none>",
                            "volume name not found from pv {}".format(pv_name))
        bvol_info = heketi_blockvolume_info(h_node,
                                            h_server,
                                            bvol_id[0],
                                            json=True)

        # verify required blockhostingvolume free size
        bhv_id = bvol_info["blockhostingvolume"]
        bhv_info = heketi_volume_info(h_node, h_server, bhv_id, json=True)
        if bhv_info["blockinfo"]["freesize"] < 1:
            self.skipTest("blockhostingvolume doesn't have required freespace")

        if not is_online_expand:
            scale_dc_pod_amount_and_wait(node, dc_name[0], pod_amount=0)

        # expand block volume and verify usable size
        bvol_info = heketi_blockvolume_expand(h_node,
                                              h_server,
                                              bvol_id[0],
                                              2,
                                              json=True)
        self.assertEqual(bvol_info["size"], 2,
                         "Block volume expand does not works")
        self.assertEqual(
            bvol_info["size"], bvol_info["usablesize"],
            "block volume size is not equal to the usablesize: {}".format(
                bvol_info))

        return pvc_name, dc_name, bvol_info
    def validate_multipath_info(self, hacount):
        """validates multipath command on the pod node

        Args:
            hacount (int): hacount for which multipath to be checked
        """
        # create pod using pvc created
        dc_name = oc_create_app_dc_with_io(
            self.ocp_master_node[0], self.pvc_name
        )
        pod_name = get_pod_name_from_dc(self.ocp_master_node[0], dc_name)
        self.addCleanup(oc_delete, self.ocp_master_node[0], "dc", dc_name)
        self.addCleanup(
            scale_dc_pod_amount_and_wait, self.ocp_master_node[0], dc_name, 0
        )

        wait_for_pod_be_ready(
            self.ocp_master_node[0], pod_name, timeout=120, wait_step=3
        )

        # Get pod info
        pod_info = oc_get_pods(
            self.ocp_master_node[0], selector='deploymentconfig=%s' % dc_name)
        node = pod_info[pod_name]['node']

        # Find iqn from volume info
        pv_name = get_pv_name_from_pvc(self.ocp_master_node[0], self.pvc_name)
        custom = [r':.metadata.annotations."gluster\.org\/volume\-id"']
        vol_id = oc_get_custom_resource(
            self.ocp_master_node[0], 'pv', custom, pv_name)[0]
        vol_info = heketi_blockvolume_info(
            self.heketi_client_node, self.heketi_server_url, vol_id, json=True)
        iqn = vol_info['blockvolume']['iqn']

        # Get the paths info from the node
        devices = get_iscsi_block_devices_by_path(node, iqn).keys()
        self.assertEqual(hacount, len(devices))

        # Validate mpath
        mpaths = set()
        for device in devices:
            mpaths.add(get_mpath_name_from_device_name(node, device))
        self.assertEqual(1, len(mpaths))
        validate_multipath_pod(
            self.ocp_master_node[0], pod_name, hacount, list(mpaths)[0])
    def test_block_volume_create_with_name(self):
        """Validate creation of block volume with name"""
        vol_name = "autotests-heketi-volume-%s" % utils.get_random_str()
        block_vol = heketi_blockvolume_create(self.heketi_client_node,
                                              self.heketi_server_url,
                                              1,
                                              name=vol_name,
                                              json=True)
        self.addCleanup(heketi_blockvolume_delete, self.heketi_client_node,
                        self.heketi_server_url, block_vol["id"])

        # check volume name through heketi-cli
        block_vol_info = heketi_blockvolume_info(self.heketi_client_node,
                                                 self.heketi_server_url,
                                                 block_vol["id"],
                                                 json=True)
        self.assertEqual(block_vol_info["name"], vol_name,
                         ("Block volume Names are not same %s as %s",
                          (block_vol_info["name"], vol_name)))
    def test_block_host_volume_delete_block_volume_delete(self):
        """Validate block volume and BHV removal using heketi"""
        free_space, nodenum = get_total_free_space(self.heketi_client_node,
                                                   self.heketi_server_url)
        if nodenum < 3:
            self.skipTest("Skipping the test since number of nodes"
                          "online are less than 3")
        free_space_available = int(free_space / nodenum)
        default_bhv_size = get_default_block_hosting_volume_size(
            self.heketi_client_node, self.heketi_dc_name)
        if free_space_available < default_bhv_size:
            self.skipTest("Skipping the test since free_space_available %s"
                          "is less than the default_bhv_size %s" %
                          (free_space_available, default_bhv_size))
        h_volume_name = ("autotests-heketi-volume-%s" % utils.get_random_str())
        block_host_create_info = self.create_heketi_volume_with_name_and_wait(
            h_volume_name, default_bhv_size, json=True, block=True)

        block_vol_size = block_host_create_info["blockinfo"]["freesize"]
        block_hosting_vol_id = block_host_create_info["id"]
        block_vol_info = {"blockhostingvolume": "init_value"}
        while (block_vol_info['blockhostingvolume'] != block_hosting_vol_id):
            block_vol = heketi_blockvolume_create(self.heketi_client_node,
                                                  self.heketi_server_url,
                                                  block_vol_size,
                                                  json=True,
                                                  ha=3,
                                                  auth=True)
            self.addCleanup(heketi_blockvolume_delete,
                            self.heketi_client_node,
                            self.heketi_server_url,
                            block_vol["id"],
                            raise_on_error=True)
            block_vol_info = heketi_blockvolume_info(self.heketi_client_node,
                                                     self.heketi_server_url,
                                                     block_vol["id"],
                                                     json=True)
        bhv_info = heketi_volume_info(self.heketi_client_node,
                                      self.heketi_server_url,
                                      block_hosting_vol_id,
                                      json=True)
        self.assertIn(block_vol_info["id"],
                      bhv_info["blockinfo"]["blockvolume"])
 def _dynamic_provisioning_block_with_bhv_cleanup(
         self, sc_name, pvc_size, bhv_list):
     """Dynamic provisioning for glusterblock with BHV Cleanup"""
     h_node, h_url = self.heketi_client_node, self.heketi_server_url
     pvc_name = oc_create_pvc(self.node, sc_name, pvc_size=pvc_size)
     try:
         verify_pvc_status_is_bound(self.node, pvc_name)
         pv_name = get_pv_name_from_pvc(self.node, pvc_name)
         custom = [r':.metadata.annotations."gluster\.org\/volume\-id"']
         bvol_id = oc_get_custom_resource(
             self.node, 'pv', custom, pv_name)[0]
         bhv_id = heketi_blockvolume_info(
             h_node, h_url, bvol_id, json=True)['blockhostingvolume']
         if bhv_id not in bhv_list:
             self.addCleanup(
                 heketi_volume_delete, h_node, h_url, bhv_id)
     finally:
         self.addCleanup(
             wait_for_resource_absence, self.node, 'pvc', pvc_name)
         self.addCleanup(
             oc_delete, self.node, 'pvc', pvc_name, raise_on_absence=True)
def get_block_hosting_volume_name(heketi_client_node, heketi_server_url,
                                  block_volume, gluster_node=None,
                                  ocp_client_node=None):
    """Returns block hosting volume name of given block volume

    Args:
        heketi_client_node (str): Node on which cmd has to be executed.
        heketi_server_url (str): Heketi server url
        block_volume (str): Block volume of which block hosting volume
                            returned
    Kwargs:
        gluster_node (str): gluster node/pod ip where gluster command can be
                            run
        ocp_client_node (str): OCP client node where oc commands can be run
    Returns:
        str : Name of the block hosting volume for given block volume
    """
    block_vol_info = heketi_blockvolume_info(
        heketi_client_node, heketi_server_url, block_volume
    )

    for line in block_vol_info.splitlines():
        block_hosting_vol_match = re.search(
            "^Block Hosting Volume: (.*)$", line
        )

        if not block_hosting_vol_match:
            continue

        if gluster_node and ocp_client_node:
            cmd = 'gluster volume list'
            gluster_vol_list = cmd_run_on_gluster_pod_or_node(
                ocp_client_node, cmd, gluster_node).split('\n')
        else:
            gluster_vol_list = get_volume_list('auto_get_gluster_endpoint')

        for vol in gluster_vol_list:
            if block_hosting_vol_match.group(1).strip() in vol:
                return vol
    def initiator_side_failures(self):

        # get storage ips of glusterfs pods
        keys = self.gluster_servers
        gluster_ips = []
        for key in keys:
            gluster_ips.append(self.gluster_servers_info[key]['storage'])
        gluster_ips.sort()

        self.create_storage_class()
        self.create_and_wait_for_pvc()

        # find iqn and hacount from volume info
        pv_name = get_pv_name_from_pvc(self.node, self.pvc_name)
        custom = [r':.metadata.annotations."gluster\.org\/volume\-id"']
        vol_id = oc_get_custom_resource(self.node, 'pv', custom, pv_name)[0]
        vol_info = heketi_blockvolume_info(
            self.heketi_client_node, self.heketi_server_url, vol_id, json=True)
        iqn = vol_info['blockvolume']['iqn']
        hacount = int(self.sc['hacount'])

        # create app pod
        dc_name, pod_name = self.create_dc_with_pvc(self.pvc_name)

        # When we have to verify iscsi login  devices & mpaths, we run it twice
        for i in range(2):

            # get node hostname from pod info
            pod_info = oc_get_pods(
                self.node, selector='deploymentconfig=%s' % dc_name)
            node = pod_info[pod_name]['node']

            # get the iscsi sessions info from the node
            iscsi = get_iscsi_session(node, iqn)
            self.assertEqual(hacount, len(iscsi))
            iscsi.sort()
            self.assertEqual(set(iscsi), (set(gluster_ips) & set(iscsi)))

            # get the paths info from the node
            devices = get_iscsi_block_devices_by_path(node, iqn).keys()
            self.assertEqual(hacount, len(devices))

            # get mpath names and verify that only one mpath is there
            mpaths = set()
            for device in devices:
                mpaths.add(get_mpath_name_from_device_name(node, device))
            self.assertEqual(1, len(mpaths))

            validate_multipath_pod(
                self.node, pod_name, hacount, mpath=list(mpaths)[0])

            # When we have to verify iscsi session logout, we run only once
            if i == 1:
                break

            # make node unschedulabe where pod is running
            oc_adm_manage_node(
                self.node, '--schedulable=false', nodes=[node])

            # make node schedulabe where pod is running
            self.addCleanup(
                oc_adm_manage_node, self.node, '--schedulable=true',
                nodes=[node])

            # delete pod so it get respun on any other node
            oc_delete(self.node, 'pod', pod_name)
            wait_for_resource_absence(self.node, 'pod', pod_name)

            # wait for pod to come up
            pod_name = get_pod_name_from_dc(self.node, dc_name)
            wait_for_pod_be_ready(self.node, pod_name)

            # get the iscsi session from the previous node to verify logout
            iscsi = get_iscsi_session(node, iqn, raise_on_error=False)
            self.assertFalse(iscsi)
    def verify_iscsi_sessions_and_multipath(
            self, pvc_name, rname, rtype='dc', heketi_server_url=None,
            is_registry_gluster=False):
        if not heketi_server_url:
            heketi_server_url = self.heketi_server_url

        # Get storage ips of glusterfs pods
        keys = (list(g.config['gluster_registry_servers'].keys()) if
                is_registry_gluster else self.gluster_servers)
        servers_info = (g.config['gluster_registry_servers'] if
                        is_registry_gluster else self.gluster_servers_info)
        gluster_ips = []
        for key in keys:
            gluster_ips.append(servers_info[key]['storage'])
        gluster_ips.sort()

        # Find iqn and hacount from volume info
        pv_name = get_pv_name_from_pvc(self.ocp_client[0], pvc_name)
        custom = [r':.metadata.annotations."gluster\.org\/volume\-id"']
        vol_id = oc_get_custom_resource(
            self.ocp_client[0], 'pv', custom, pv_name)[0]
        vol_info = heketi_blockvolume_info(
            self.heketi_client_node, heketi_server_url, vol_id, json=True)
        iqn = vol_info['blockvolume']['iqn']
        hacount = int(vol_info['hacount'])

        # Find node on which pod is running
        if rtype == 'dc':
            pod_name = get_pod_name_from_dc(self.ocp_client[0], rname)
            pod_info = oc_get_pods(
                self.ocp_client[0], selector='deploymentconfig=%s' % rname)
        elif rtype == 'pod':
            pod_info = oc_get_pods(self.ocp_client[0], name=rname)
            pod_name = rname
        elif rtype == 'rc':
            pod_name = get_pod_name_from_rc(self.ocp_client[0], rname)
            pod_info = oc_get_pods(
                self.ocp_client[0], selector='name=%s' % rname)
        else:
            raise NameError("Value of rtype should be either 'dc' or 'pod'")

        node = pod_info[pod_name]['node']

        # Get the iscsi sessions info from the node
        iscsi = get_iscsi_session(node, iqn)
        msg = ('Only %s iscsi sessions are present on node %s, expected %s.'
               % (iscsi, node, hacount))
        self.assertEqual(hacount, len(iscsi), msg)
        iscsi.sort()
        msg = ("Only gluster Nodes %s were expected in iscsi sessions, "
               "but got other Nodes %s on Node %s" % (
                   gluster_ips, iscsi, node))
        self.assertEqual(set(iscsi), (set(gluster_ips) & set(iscsi)), msg)

        # Get the paths info from the node
        devices = get_iscsi_block_devices_by_path(node, iqn)
        msg = ("Only %s devices are present on Node %s, expected %s" % (
            devices, node, hacount,))
        self.assertEqual(hacount, len(devices), msg)

        # Get mpath names and verify that only one mpath is there
        mpaths = set()
        for device in devices.keys():
            mpaths.add(get_mpath_name_from_device_name(node, device))
        msg = ("Only one mpath was expected on Node %s, but got %s" % (
            node, mpaths))
        self.assertEqual(1, len(mpaths), msg)

        validate_multipath_pod(
            self.ocp_client[0], pod_name, hacount, mpath=list(mpaths)[0])

        return iqn, hacount, node
    def initiator_side_failures(self):

        # get storage ips of glusterfs pods
        keys = self.gluster_servers
        gluster_ips = []
        for key in keys:
            gluster_ips.append(self.gluster_servers_info[key]['storage'])
        gluster_ips.sort()

        self.create_storage_class()
        self.create_and_wait_for_pvc()

        # find iqn and hacount from volume info
        pv_name = get_pv_name_from_pvc(self.node, self.pvc_name)
        custom = [r':.metadata.annotations."gluster\.org\/volume\-id"']
        vol_id = oc_get_custom_resource(self.node, 'pv', custom, pv_name)[0]
        vol_info = heketi_blockvolume_info(self.heketi_client_node,
                                           self.heketi_server_url,
                                           vol_id,
                                           json=True)
        iqn = vol_info['blockvolume']['iqn']
        hacount = int(self.sc['hacount'])

        # create app pod
        dc_name, pod_name = self.create_dc_with_pvc(self.pvc_name)

        # When we have to verify iscsi login  devices & mpaths, we run it twice
        for i in range(2):

            # get node hostname from pod info
            pod_info = oc_get_pods(self.node,
                                   selector='deploymentconfig=%s' % dc_name)
            node = pod_info[pod_name]['node']

            # get the iscsi sessions info from the node
            iscsi = get_iscsi_session(node, iqn)
            self.assertEqual(hacount, len(iscsi))
            iscsi.sort()
            self.assertEqual(set(iscsi), (set(gluster_ips) & set(iscsi)))

            # get the paths info from the node
            devices = get_iscsi_block_devices_by_path(node, iqn).keys()
            self.assertEqual(hacount, len(devices))

            # get mpath names and verify that only one mpath is there
            mpaths = set()
            for device in devices:
                mpaths.add(get_mpath_name_from_device_name(node, device))
            self.assertEqual(1, len(mpaths))

            validate_multipath_pod(self.node,
                                   pod_name,
                                   hacount,
                                   mpath=list(mpaths)[0])

            # When we have to verify iscsi session logout, we run only once
            if i == 1:
                break

            # make node unschedulabe where pod is running
            oc_adm_manage_node(self.node, '--schedulable=false', nodes=[node])

            # make node schedulabe where pod is running
            self.addCleanup(oc_adm_manage_node,
                            self.node,
                            '--schedulable=true',
                            nodes=[node])

            # delete pod so it get respun on any other node
            oc_delete(self.node, 'pod', pod_name)
            wait_for_resource_absence(self.node, 'pod', pod_name)

            # wait for pod to come up
            pod_name = get_pod_name_from_dc(self.node, dc_name)
            wait_for_pod_be_ready(self.node, pod_name)

            # get the iscsi session from the previous node to verify logout
            iscsi = get_iscsi_session(node, iqn, raise_on_error=False)
            self.assertFalse(iscsi)
Ejemplo n.º 14
0
    def test_verify_delete_heketi_volumes_pending_entries_in_db(
            self, vol_type):
        """Verify pending entries of blockvolumes/volumes and bricks in heketi
           db during blockvolume/volume delete operation.
        """
        # Create a large volumes to observe the pending operation
        vol_count, volume_ids, async_obj = 10, [], []
        h_node, h_url = self.heketi_client_node, self.heketi_server_url

        # Verify file/block volumes pending operation before creation,
        h_db_check_before = heketi_db_check(h_node, h_url)
        h_db_check_bricks_before = h_db_check_before.get("bricks")
        h_db_check_vol_before = (h_db_check_before.get(
            "{}volumes".format(vol_type)))

        # Get existing heketi volume list
        existing_volumes = heketi_volume_list(h_node, h_url, json=True)

        # Add cleanup function to clean stale volumes created during test
        self.addCleanup(self._cleanup_heketi_volumes,
                        existing_volumes.get("volumes"))

        # Delete heketi pod to clean db operations
        if (h_db_check_bricks_before.get("pending")
                or h_db_check_vol_before.get("pending")):
            self._respin_heketi_pod()

        # Calculate heketi volume size
        free_space, nodenum = get_total_free_space(h_node, h_url)
        free_space_available = int(free_space / nodenum)
        if free_space_available > vol_count:
            h_volume_size = int(free_space_available / vol_count)
            if h_volume_size > 50:
                h_volume_size = 50
        else:
            h_volume_size, vol_count = 1, free_space_available

        # Create BHV in case blockvolume size is greater than default BHV size
        if vol_type:
            default_bhv_size = get_default_block_hosting_volume_size(
                h_node, self.heketi_dc_name)
            if default_bhv_size < h_volume_size:
                h_volume_name = "autotest-{}".format(utils.get_random_str())
                bhv_info = self.create_heketi_volume_with_name_and_wait(
                    h_volume_name,
                    free_space_available,
                    raise_on_cleanup_error=False,
                    block=True,
                    json=True)
                free_space_available -= (
                    int(bhv_info.get("blockinfo").get("reservedsize")) + 1)
                h_volume_size = int(free_space_available / vol_count)

        # Create file/block volumes
        for _ in range(vol_count):
            vol_id = eval("heketi_{}volume_create".format(vol_type))(
                h_node, h_url, h_volume_size, json=True).get("id")
            volume_ids.append(vol_id)
            self.addCleanup(eval("heketi_{}volume_delete".format(vol_type)),
                            h_node,
                            h_url,
                            vol_id,
                            raise_on_error=False)

        def run_async(cmd, hostname, raise_on_error=True):
            async_op = g.run_async(host=hostname, command=cmd)
            async_obj.append(async_op)
            return async_op

        bhv_list = []
        for vol_id in volume_ids:
            # Get BHV ids to delete in case of block volumes
            if vol_type:
                vol_info = (heketi_blockvolume_info(h_node,
                                                    h_url,
                                                    vol_id,
                                                    json=True))
                if not vol_info.get("blockhostingvolume") in bhv_list:
                    bhv_list.append(vol_info.get("blockhostingvolume"))

            # Temporary replace g.run with g.async_run in heketi_volume_delete
            # and heketi_blockvolume_delete func to be able to run it in
            # background.
            with mock.patch.object(command, 'cmd_run', side_effect=run_async):
                eval("heketi_{}volume_delete".format(vol_type))(h_node, h_url,
                                                                vol_id)

        # Wait for pending operations to get generate
        for w in waiter.Waiter(timeout=30, interval=3):
            h_db_check = heketi_db_check(h_node, h_url)
            h_db_check_vol = h_db_check.get("{}volumes".format(vol_type))
            if h_db_check_vol.get("pending"):
                h_db_check_bricks = h_db_check.get("bricks")
                break
        if w.expired:
            raise exceptions.ExecutionError(
                "No any pending operations found during {}volumes deletion "
                "{}".format(vol_type, h_db_check_vol.get("pending")))

        # Verify bricks pending operation during creation
        if not vol_type:
            self.assertTrue(h_db_check_bricks.get("pending"),
                            "Expecting at least one bricks pending count")
            self.assertFalse(
                h_db_check_bricks.get("pending") % 3,
                "Expecting bricks pending count to be multiple of 3 but "
                "found {}".format(h_db_check_bricks.get("pending")))

        # Verify file/block volume pending operation during delete
        for w in waiter.Waiter(timeout=120, interval=10):
            h_db_check = heketi_db_check(h_node, h_url)
            h_db_check_vol = h_db_check.get("{}volumes".format(vol_type))
            h_db_check_bricks = h_db_check.get("bricks")
            if ((not h_db_check_bricks.get("pending"))
                    and (not h_db_check_vol.get("pending"))):
                break
        if w.expired:
            raise AssertionError(
                "Failed to delete {}volumes after 120 secs".format(vol_type))

        # Check that all background processes got exited
        for obj in async_obj:
            ret, out, err = obj.async_communicate()
            self.assertFalse(
                ret, "Failed to delete {}volume due to error: {}".format(
                    vol_type, err))

        # Delete BHV created during block volume creation
        if vol_type:
            for bhv_id in bhv_list:
                heketi_volume_delete(h_node, h_url, bhv_id)

        # Verify bricks and volume pending operations
        h_db_check_after = heketi_db_check(h_node, h_url)
        h_db_check_bricks_after = h_db_check_after.get("bricks")
        h_db_check_vol_after = (h_db_check_after.get(
            "{}volumes".format(vol_type)))
        act_brick_count = h_db_check_bricks_after.get("pending")
        act_vol_count = h_db_check_vol_after.get("pending")

        # Verify bricks pending operation after delete
        err_msg = "{} operations are pending for {} after {}volume deletion"
        if not vol_type:
            self.assertFalse(
                act_brick_count,
                err_msg.format(act_brick_count, "brick", vol_type))

        # Verify file/bock volumes pending operation after delete
        self.assertFalse(act_vol_count,
                         err_msg.format(act_vol_count, "volume", vol_type))

        act_brick_count = h_db_check_bricks_after.get("total")
        act_vol_count = h_db_check_vol_after.get("total")
        exp_brick_count = h_db_check_bricks_before.get("total")
        exp_vol_count = h_db_check_vol_before.get("total")
        err_msg = "Actual {} and expected {} {} counts are not matched"

        # Verify if initial and final file/block volumes are same
        self.assertEqual(
            act_vol_count, exp_vol_count,
            err_msg.format(act_vol_count, exp_vol_count, "volume"))

        # Verify if initial and final bricks are same
        self.assertEqual(
            act_brick_count, exp_brick_count,
            err_msg.format(act_brick_count, exp_brick_count, "brick"))