def test_to_check_deletion_of_node(self):
        """Validate deletion of a node which contains devices"""

        # Create Heketi volume to make sure we have devices with usages
        heketi_url = self.heketi_server_url
        vol = heketi_volume_create(self.heketi_client_node,
                                   heketi_url,
                                   1,
                                   json=True)
        self.assertTrue(vol, "Failed to create heketi volume.")
        g.log.info("Heketi volume successfully created")
        volume_id = vol["bricks"][0]["volume"]
        self.addCleanup(heketi_volume_delete, self.heketi_client_node,
                        self.heketi_server_url, volume_id)

        # Pick up suitable node
        node_ids = heketi_node_list(self.heketi_client_node, heketi_url)
        self.assertTrue(node_ids)
        for node_id in node_ids:
            node_info = heketi_node_info(self.heketi_client_node,
                                         heketi_url,
                                         node_id,
                                         json=True)
            if (node_info['state'].lower() != 'online'
                    or not node_info['devices']):
                continue
            for device in node_info['devices']:
                if device['state'].lower() != 'online':
                    continue
                if device['storage']['used']:
                    node_id = node_info['id']
                    break
        else:
            self.assertTrue(
                node_id, "Failed to find online node with online device which "
                "has some usages.")

        # Try to delete the node by its ID
        g.log.info("Trying to delete the node which contains devices in it. "
                   "Expecting failure.")
        self.assertRaises(ExecutionError, heketi_node_delete,
                          self.heketi_client_node, heketi_url, node_id)

        # Make sure our node hasn't been deleted
        g.log.info("Listing heketi node list")
        node_list = heketi_node_list(self.heketi_client_node, heketi_url)
        self.assertTrue(node_list, ("Failed to list heketi nodes"))
        self.assertIn(node_id, node_list)
        node_info = heketi_node_info(self.heketi_client_node,
                                     heketi_url,
                                     node_id,
                                     json=True)
        self.assertEqual(node_info['state'].lower(), 'online')
    def enable_disable_devices(self, additional_devices_attached, enable=True):
        """
        Method to enable and disable devices
        """
        op = 'enable' if enable else 'disable'
        for node_id in additional_devices_attached.keys():
            node_info = heketi_ops.heketi_node_info(self.heketi_client_node,
                                                    self.heketi_server_url,
                                                    node_id,
                                                    json=True)

            if not enable:
                self.assertNotEqual(node_info, False,
                                    "Node info for node %s failed" % node_id)

            for device in node_info["devices"]:
                if device["name"] == additional_devices_attached[node_id]:
                    out = getattr(heketi_ops, 'heketi_device_%s' % op)(
                        self.heketi_client_node,
                        self.heketi_server_url,
                        device["id"],
                        json=True)
                    if out is False:
                        g.log.info("Device %s could not be %sd" %
                                   (device["id"], op))
                    else:
                        g.log.info("Device %s %sd" % (device["id"], op))
    def get_devices_summary_free_space(self):
        """
        Calculates minimum free space per device and
        returns total free space across all devices
        """

        free_spaces = []

        heketi_node_id_list = heketi_ops.heketi_node_list(
            self.heketi_client_node, self.heketi_server_url)

        for node_id in heketi_node_id_list:
            node_info_dict = heketi_ops.heketi_node_info(
                self.heketi_client_node,
                self.heketi_server_url,
                node_id,
                json=True)
            total_free_space = 0
            for device in node_info_dict["devices"]:
                total_free_space += device["storage"]["free"]
            free_spaces.append(total_free_space)

        total_free_space = sum(free_spaces) / (1024**2)
        total_free_space = int(math.floor(total_free_space))

        return total_free_space
    def setUp(self):
        super(TestArbiterVolumeCreateExpandDelete, self).setUp()
        self.node = self.ocp_master_node[0]

        # Mark one of the Heketi nodes as arbiter-supported if none of
        # existent nodes or devices already enabled to support it.
        self.heketi_server_url = self.sc.get('resturl')
        arbiter_tags = ('required', 'supported')
        arbiter_already_supported = False

        self.node_id_list = heketi_ops.heketi_node_list(
            self.heketi_client_node, self.heketi_server_url)

        for node_id in self.node_id_list[::-1]:
            node_info = heketi_ops.heketi_node_info(self.heketi_client_node,
                                                    self.heketi_server_url,
                                                    node_id,
                                                    json=True)
            if node_info.get('tags', {}).get('arbiter') in arbiter_tags:
                arbiter_already_supported = True
                break
            for device in node_info['devices'][::-1]:
                if device.get('tags', {}).get('arbiter') in arbiter_tags:
                    arbiter_already_supported = True
                    break
            else:
                continue
            break
        if not arbiter_already_supported:
            self._set_arbiter_tag_with_further_revert(self.heketi_client_node,
                                                      self.heketi_server_url,
                                                      'node',
                                                      self.node_id_list[0],
                                                      'supported')
    def test_expand_arbiter_volume_according_to_avg_file_size(
            self, avg_file_size, expected_brick_size, vol_expand=True):
        """Validate expansion of arbiter volume with diff avg file size"""
        data_hosts = []
        arbiter_hosts = []

        # set tags arbiter:disabled, arbiter:required
        for i, node_id in enumerate(self.node_id_list):
            self._set_arbiter_tag_with_further_revert(
                self.heketi_client_node, self.heketi_server_url, 'node',
                node_id, 'disabled' if i < 2 else 'required')

            node_info = heketi_ops.heketi_node_info(self.heketi_client_node,
                                                    self.heketi_server_url,
                                                    node_id,
                                                    json=True)
            (data_hosts.append(node_info['hostnames']['storage'][0]) if i < 2
             else arbiter_hosts.append(node_info['hostnames']['storage'][0]))
            self.assertEqual(node_info['tags']['arbiter'],
                             'disabled' if i < 2 else 'required')

        # Create sc with gluster arbiter info
        self.create_storage_class(is_arbiter_vol=True,
                                  allow_volume_expansion=True,
                                  arbiter_avg_file_size=avg_file_size)

        # Create PVC and wait for it to be in 'Bound' state
        self.create_and_wait_for_pvc()

        vol_expanded = False

        for i in range(2):
            vol_info = get_gluster_vol_info_by_pvc_name(
                self.node, self.pvc_name)
            bricks = (
                self.verify_amount_and_proportion_of_arbiter_and_data_bricks(
                    vol_info,
                    arbiter_bricks=(2 if vol_expanded else 1),
                    data_bricks=(4 if vol_expanded else 2)))

            # verify arbiter bricks lies on arbiter hosts
            for brick in bricks['arbiter_list']:
                ip, brick_name = brick['name'].split(':')
                self.assertIn(ip, arbiter_hosts)
                # verify the size of arbiter brick
                cmd = "df -h %s --output=size | tail -1" % brick_name
                out = cmd_run_on_gluster_pod_or_node(self.node, cmd, ip)
                self.assertEqual(out, expected_brick_size)
            # verify that data bricks lies on data hosts
            for brick in bricks['data_list']:
                self.assertIn(brick['name'].split(':')[0], data_hosts)

            if vol_expanded or not vol_expand:
                break
            # Expand PVC and verify the size
            pvc_size = 2
            resize_pvc(self.node, self.pvc_name, pvc_size)
            verify_pvc_size(self.node, self.pvc_name, pvc_size)
            vol_expanded = True
    def get_node_info(self, node_id):
        """
        Get node information from node_id.

        :param node_id: str node ID
        :return node_info: list node information
        """
        node_info = heketi_node_info(self.heketi_client_node,
                                     self.heketi_server_url,
                                     node_id,
                                     json=True)
        self.assertNotEqual(node_info, False,
                            "Node info on %s failed" % node_id)
        return node_info
예제 #7
0
 def _get_free_space(self):
     """Get free space in each heketi device"""
     free_spaces = []
     heketi_node_id_list = heketi_node_list(
         self.heketi_client_node, self.heketi_server_url)
     for node_id in heketi_node_id_list:
         node_info_dict = heketi_node_info(self.heketi_client_node,
                                           self.heketi_server_url,
                                           node_id, json=True)
         total_free_space = 0
         for device in node_info_dict["devices"]:
             total_free_space += device["storage"]["free"]
         free_spaces.append(total_free_space)
     total_free_space = int(math.floor(sum(free_spaces) / (1024**2)))
     return total_free_space
예제 #8
0
    def _get_vol_size(self):
        # Get available free space disabling redundant nodes
        min_free_space_gb = 5
        heketi_url = self.heketi_server_url
        node_ids = heketi_node_list(self.heketi_client_node, heketi_url)
        self.assertTrue(node_ids)
        nodes = {}
        min_free_space = min_free_space_gb * 1024**2
        for node_id in node_ids:
            node_info = heketi_node_info(
                self.heketi_client_node, heketi_url, node_id, json=True)
            if (node_info['state'].lower() != 'online' or
                    not node_info['devices']):
                continue
            if len(nodes) > 2:
                out = heketi_node_disable(
                    self.heketi_client_node, heketi_url, node_id)
                self.assertTrue(out)
                self.addCleanup(
                    heketi_node_enable,
                    self.heketi_client_node, heketi_url, node_id)
            for device in node_info['devices']:
                if device['state'].lower() != 'online':
                    continue
                free_space = device['storage']['free']
                if free_space < min_free_space:
                    out = heketi_device_disable(
                        self.heketi_client_node, heketi_url, device['id'])
                    self.assertTrue(out)
                    self.addCleanup(
                        heketi_device_enable,
                        self.heketi_client_node, heketi_url, device['id'])
                    continue
                if node_id not in nodes:
                    nodes[node_id] = []
                nodes[node_id].append(device['storage']['free'])

        # Skip test if nodes requirements are not met
        if (len(nodes) < 3 or
                not all(map((lambda _list: len(_list) > 1), nodes.values()))):
            raise self.skipTest(
                "Could not find 3 online nodes with, "
                "at least, 2 online devices having free space "
                "bigger than %dGb." % min_free_space_gb)

        # Calculate size of a potential distributed vol
        vol_size_gb = int(min(map(max, nodes.values())) / (1024 ** 2)) + 1
        return vol_size_gb
예제 #9
0
    def get_online_nodes_disable_redundant(self):
        """
        Find online nodes and disable n-3 nodes and return
        list of online nodes
        """
        node_list = heketi_node_list(self.heketi_client_node,
                                     self.heketi_server_url)
        self.assertTrue(node_list, "Failed to list heketi nodes")
        g.log.info("Successfully got the list of nodes")
        # Fetch online nodes  from node list
        online_hosts = []

        for node in node_list:
            node_info = heketi_node_info(
                self.heketi_client_node, self.heketi_server_url,
                node, json=True)
            if node_info["state"] == "online":
                online_hosts.append(node_info)

        # Skip test if online node count is less than 3i
        if len(online_hosts) < 3:
            raise self.skipTest(
                "This test can run only if online hosts are more than 2")
        # if we have n nodes, disable n-3 nodes
        for node_info in online_hosts[3:]:
            node_id = node_info["id"]
            g.log.info("going to disable node id %s", node_id)
            heketi_node_disable(self.heketi_client_node,
                                self.heketi_server_url,
                                node_id)
            self.addCleanup(heketi_node_enable,
                            self.heketi_client_node,
                            self.heketi_server_url,
                            node_id)

        for host in online_hosts[1:3]:
            found_online = False
            for device in host["devices"]:
                if device["state"].strip().lower() == "online":
                    found_online = True
                    break
            if not found_online:
                self.skipTest(("no device online on node %s" % host["id"]))

        return online_hosts
    def test_to_get_list_of_nodes(self):
        """
        Listing all nodes and compare the
        node listed in previous step
        """

        # List all list
        ip = []
        g.log.info("Listing the node id")
        heketi_node_id_list = heketi_ops.heketi_node_list(
            self.heketi_client_node, self.heketi_server_url)

        g.log.info("Successfully listed the node")

        if (len(heketi_node_id_list) == 0):
            raise ExecutionError("Node list empty")

        for node_id in heketi_node_id_list:
            g.log.info("Retrieve the node info")
            node_info = heketi_ops.heketi_node_info(self.heketi_client_node,
                                                    self.heketi_server_url,
                                                    node_id,
                                                    json=True)
            self.assertTrue(node_info, ("Failed to " "retrieve the node info"))
            g.log.info("Successfully retrieved the node info %s" % node_id)
            ip.append(node_info["hostnames"]["storage"])

        # Compare the node listed in previous step
        hostname = []

        g.log.info("Get the pool list")
        list_of_pools = get_pool_list('auto_get_gluster_endpoint')
        self.assertTrue(list_of_pools, ("Failed to get the "
                                        "pool list from gluster pods/nodes"))
        g.log.info("Successfully got the pool list from gluster pods/nodes")
        for pool in list_of_pools:
            hostname.append(pool["hostname"])

        if (len(heketi_node_id_list) != len(list_of_pools)):
            raise ExecutionError("Heketi volume list %s is not equal "
                                 "to gluster volume list %s" % ((ip),
                                                                (hostname)))
        g.log.info("The node IP's from node info and list"
                   " is : %s/n and pool list from gluster"
                   " pods/nodes is %s" % ((ip), (hostname)))
    def get_free_space_summary_devices(self):
        """
        Calculates free space across all devices
        """

        heketi_node_id_list = heketi_ops.heketi_node_list(
            self.heketi_client_node, self.heketi_server_url)

        total_free_space = 0
        for node_id in heketi_node_id_list:
            node_info_dict = heketi_ops.heketi_node_info(
                self.heketi_client_node,
                self.heketi_server_url,
                node_id,
                json=True)
            for device in node_info_dict["devices"]:
                total_free_space += (device["storage"]["free"] / (1024**2))

        return total_free_space
    def test_to_retrieve_node_info(self):
        """
        List and retrieve node related info
        """

        # List all list
        g.log.info("Listing the node id")
        heketi_node_id_list = heketi_ops.heketi_node_list(
            self.heketi_client_node, self.heketi_server_url)
        self.assertTrue(heketi_node_id_list, ("Node Id list is empty."))
        g.log.info("Successfully listed the node")

        for node_id in heketi_node_id_list:
            g.log.info("Retrieve the node info")
            node_info = heketi_ops.heketi_node_info(self.heketi_client_node,
                                                    self.heketi_server_url,
                                                    node_id,
                                                    json=True)
            self.assertTrue(node_info, ("Failed to " "retrieve the node info"))
            g.log.info("Successfully retrieved the node info %s" % node_id)
예제 #13
0
    def test_heketi_with_device_removal_insuff_space(self):
        """Validate heketi with device removal insufficient space"""

        # Disable 4+ nodes and 3+ devices on the first 3 nodes
        min_free_space_gb = 5
        min_free_space = min_free_space_gb * 1024**2
        heketi_url = self.heketi_server_url
        heketi_node = self.heketi_client_node
        nodes = {}

        node_ids = heketi_node_list(heketi_node, heketi_url)
        self.assertTrue(node_ids)
        for node_id in node_ids:
            node_info = heketi_node_info(
                heketi_node, heketi_url, node_id, json=True)
            if (node_info["state"].lower() != "online" or
                    not node_info["devices"]):
                continue
            if len(nodes) > 2:
                heketi_node_disable(heketi_node, heketi_url, node_id)
                self.addCleanup(
                    heketi_node_enable, heketi_node, heketi_url, node_id)
                continue
            for device in node_info["devices"]:
                if device["state"].lower() != "online":
                    continue
                free_space = device["storage"]["free"]
                if node_id not in nodes:
                    nodes[node_id] = []
                if (free_space < min_free_space or len(nodes[node_id]) > 1):
                    heketi_device_disable(
                        heketi_node, heketi_url, device["id"])
                    self.addCleanup(
                        heketi_device_enable,
                        heketi_node, heketi_url, device["id"])
                    continue
                nodes[node_id].append({
                    "device_id": device["id"], "free": free_space})

        # Skip test if nodes requirements are not met
        if (len(nodes) < 3 or
                not all(map((lambda _list: len(_list) > 1), nodes.values()))):
            raise self.skipTest(
                "Could not find 3 online nodes with 2 online devices "
                "having free space bigger than %dGb." % min_free_space_gb)

        # Calculate size of a potential distributed vol
        if nodes[node_ids[0]][0]["free"] > nodes[node_ids[0]][1]["free"]:
            index = 0
        else:
            index = 1
        vol_size_gb = int(nodes[node_ids[0]][index]["free"] / (1024 ** 2)) + 1
        device_id = nodes[node_ids[0]][index]["device_id"]

        # Create volume with such size that we consume space more than
        # size of smaller disks
        try:
            heketi_vol = heketi_volume_create(
                heketi_node, heketi_url, vol_size_gb, json=True)
        except Exception as e:
            g.log.warning(
                "Got following error trying to create '%s'Gb vol: %s" % (
                    vol_size_gb, e))
            vol_size_gb -= 1
            heketi_vol = heketi_volume_create(
                heketi_node, heketi_url, vol_size_gb, json=True)
        self.addCleanup(self.delete_volumes, heketi_vol["bricks"][0]["volume"])

        # Try to 'remove' bigger Heketi disk expecting error,
        # because there is no space on smaller disk to relocate bricks to
        heketi_device_disable(heketi_node, heketi_url, device_id)
        self.addCleanup(
            heketi_device_enable, heketi_node, heketi_url, device_id)
        try:
            self.assertRaises(
                ExecutionError, heketi_device_remove,
                heketi_node, heketi_url, device_id)
        except Exception:
            self.addCleanup(
                heketi_device_disable, heketi_node, heketi_url, device_id)
            raise
예제 #14
0
    def test_device_remove_operation(self, delete_device):
        """Validate remove/delete device using heketi-cli"""

        gluster_server_0 = g.config["gluster_servers"].values()[0]
        try:
            device_name = gluster_server_0["additional_devices"][0]
        except (KeyError, IndexError):
            self.skipTest(
                "Additional disk is not specified for node with following "
                "hostnames and IP addresses: %s, %s." % (
                    gluster_server_0.get('manage', '?'),
                    gluster_server_0.get('storage', '?')))
        manage_hostname = gluster_server_0["manage"]

        # Get node ID of the Gluster hostname
        topo_info = heketi_topology_info(self.heketi_client_node,
                                         self.heketi_server_url, json=True)
        self.assertTrue(
            topo_info["clusters"][0]["nodes"],
            "Cluster info command returned empty list of nodes.")

        node_id = None
        for node in topo_info["clusters"][0]["nodes"]:
            if manage_hostname == node['hostnames']["manage"][0]:
                node_id = node["id"]
                break
        self.assertNotEqual(
            node_id, None,
            "No information about node_id for %s" % manage_hostname)

        # Iterate chosen node devices and pick the smallest online one.
        lowest_device_size = lowest_device_id = None
        online_hosts = self.get_online_nodes_disable_redundant()
        for host in online_hosts[0:3]:
            if node_id != host["id"]:
                continue
            for device in host["devices"]:
                if device["state"].strip().lower() != "online":
                    continue
                if (lowest_device_size is None or
                        device["storage"]["total"] < lowest_device_size):
                    lowest_device_size = device["storage"]["total"]
                    lowest_device_id = device["id"]
                    lowest_device_name = device["name"]
        if lowest_device_id is None:
            self.skipTest(
                "Didn't find suitable device for disablement on '%s' node." % (
                    node_id))

        # Create volume
        vol_size = 1
        vol_info = heketi_volume_create(
            self.heketi_client_node, self.heketi_server_url, vol_size,
            json=True)
        self.assertTrue(vol_info, (
            "Failed to create heketi volume of size %d" % vol_size))
        self.addCleanup(self.delete_volumes, vol_info['id'])

        # Add extra device, then remember it's ID and size
        heketi_device_add(self.heketi_client_node, self.heketi_server_url,
                          device_name, node_id)
        node_info_after_addition = heketi_node_info(
            self.heketi_client_node, self.heketi_server_url, node_id,
            json=True)
        for device in node_info_after_addition["devices"]:
            if device["name"] != device_name:
                continue
            device_id_new = device["id"]
            device_size_new = device["storage"]["total"]
        self.addCleanup(heketi_device_delete, self.heketi_client_node,
                        self.heketi_server_url, device_id_new)
        self.addCleanup(heketi_device_remove, self.heketi_client_node,
                        self.heketi_server_url, device_id_new)
        self.addCleanup(heketi_device_disable, self.heketi_client_node,
                        self.heketi_server_url, device_id_new)

        if lowest_device_size > device_size_new:
            skip_msg = ("Skip test case, because newly added disk %s is "
                        "smaller than device which we want to remove %s." % (
                            device_size_new, lowest_device_size))
            self.skipTest(skip_msg)

        g.log.info("Removing device id %s" % lowest_device_id)
        ret, out, err = heketi_device_remove(
            self.heketi_client_node, self.heketi_server_url,
            lowest_device_id, raw_cli_output=True)
        if ret == 0:
            self.addCleanup(heketi_device_enable, self.heketi_client_node,
                            self.heketi_server_url, lowest_device_id)
            self.addCleanup(heketi_device_disable, self.heketi_client_node,
                            self.heketi_server_url, lowest_device_id)
        self.assertNotEqual(ret, 0, (
            "Device removal did not fail. ret: %s, out: %s, err: %s." % (
                ret, out, err)))
        g.log.info("Device removal failed as expected, err- %s", err)

        # Need to disable device before removing
        heketi_device_disable(
            self.heketi_client_node, self.heketi_server_url,
            lowest_device_id)
        if not delete_device:
            self.addCleanup(heketi_device_enable, self.heketi_client_node,
                            self.heketi_server_url, lowest_device_id)

        # Remove device from Heketi
        try:
            heketi_device_remove(
                self.heketi_client_node, self.heketi_server_url,
                lowest_device_id)
        except Exception:
            if delete_device:
                self.addCleanup(heketi_device_enable, self.heketi_client_node,
                                self.heketi_server_url, lowest_device_id)
            raise
        if not delete_device:
            self.addCleanup(heketi_device_disable, self.heketi_client_node,
                            self.heketi_server_url, lowest_device_id)

        if delete_device:
            try:
                heketi_device_delete(
                    self.heketi_client_node, self.heketi_server_url,
                    lowest_device_id)
            except Exception:
                self.addCleanup(heketi_device_enable, self.heketi_client_node,
                                self.heketi_server_url, lowest_device_id)
                self.addCleanup(heketi_device_disable, self.heketi_client_node,
                                self.heketi_server_url, lowest_device_id)
                raise
            self.addCleanup(
                heketi_device_add,
                self.heketi_client_node, self.heketi_server_url,
                lowest_device_name, node_id)

        # Create volume
        vol_info = heketi_volume_create(self.heketi_client_node,
                                        self.heketi_server_url, vol_size,
                                        json=True)
        self.assertTrue(vol_info, (
                "Failed to create heketi volume of size %d" % vol_size))
        self.addCleanup(self.delete_volumes, vol_info['id'])

        if delete_device:
            return

        # Check that none of volume's bricks is present on the device
        present = self.check_any_of_bricks_present_in_device(
            vol_info['bricks'], lowest_device_id)
        self.assertFalse(
            present,
            "Some of the '%s' volume bricks is present of the removed "
            "'%s' device." % (vol_info['id'], lowest_device_id))
    def test_volume_expansion_no_free_space(self):
        """Validate volume expansion when there is no free space"""

        vol_size, expand_size, additional_devices_attached = None, 10, {}
        h_node, h_server_url = self.heketi_client_node, self.heketi_server_url

        # Get nodes info
        heketi_node_id_list = heketi_ops.heketi_node_list(h_node, h_server_url)
        if len(heketi_node_id_list) < 3:
            self.skipTest("3 Heketi nodes are required.")

        # Disable 4th and other nodes
        for node_id in heketi_node_id_list[3:]:
            heketi_ops.heketi_node_disable(h_node, h_server_url, node_id)
            self.addCleanup(heketi_ops.heketi_node_enable, h_node,
                            h_server_url, node_id)

        # Prepare first 3 nodes
        smallest_size = None
        err_msg = ''
        for node_id in heketi_node_id_list[0:3]:
            node_info = heketi_ops.heketi_node_info(h_node,
                                                    h_server_url,
                                                    node_id,
                                                    json=True)

            # Disable second and other devices
            devices = node_info["devices"]
            self.assertTrue(devices,
                            "Node '%s' does not have devices." % node_id)
            if devices[0]["state"].strip().lower() != "online":
                self.skipTest("Test expects first device to be enabled.")
            if (smallest_size is None
                    or devices[0]["storage"]["free"] < smallest_size):
                smallest_size = devices[0]["storage"]["free"]
            for device in node_info["devices"][1:]:
                heketi_ops.heketi_device_disable(h_node, h_server_url,
                                                 device["id"])
                self.addCleanup(heketi_ops.heketi_device_enable, h_node,
                                h_server_url, device["id"])

            # Gather info about additional devices
            additional_device_name = None
            for gluster_server in self.gluster_servers:
                gluster_server_data = self.gluster_servers_info[gluster_server]
                g_manage = gluster_server_data["manage"]
                g_storage = gluster_server_data["storage"]
                if not (g_manage in node_info["hostnames"]["manage"]
                        or g_storage in node_info["hostnames"]["storage"]):
                    continue
                additional_device_name = ((
                    gluster_server_data.get("additional_devices") or [''])[0])
                break

            if not additional_device_name:
                err_msg += (
                    "No 'additional_devices' are configured for "
                    "'%s' node, which has following hostnames and "
                    "IP addresses: %s.\n" %
                    (node_id, ', '.join(node_info["hostnames"]["manage"] +
                                        node_info["hostnames"]["storage"])))
                continue

            heketi_ops.heketi_device_add(h_node, h_server_url,
                                         additional_device_name, node_id)
            additional_devices_attached.update(
                {node_id: additional_device_name})

        # Schedule cleanup of the added devices
        for node_id in additional_devices_attached.keys():
            node_info = heketi_ops.heketi_node_info(h_node,
                                                    h_server_url,
                                                    node_id,
                                                    json=True)
            for device in node_info["devices"]:
                if device["name"] != additional_devices_attached[node_id]:
                    continue
                self.addCleanup(self.detach_devices_attached, device["id"])
                break
            else:
                self.fail("Could not find ID for added device on "
                          "'%s' node." % node_id)

        if err_msg:
            self.skipTest(err_msg)

        # Temporary disable new devices
        self.disable_devices(additional_devices_attached)

        # Create volume and save info about it
        vol_size = int(smallest_size / (1024**2)) - 1
        creation_info = heketi_ops.heketi_volume_create(h_node,
                                                        h_server_url,
                                                        vol_size,
                                                        json=True)
        volume_name, volume_id = creation_info["name"], creation_info["id"]
        self.addCleanup(heketi_ops.heketi_volume_delete,
                        h_node,
                        h_server_url,
                        volume_id,
                        raise_on_error=False)

        volume_info_before_expansion = heketi_ops.heketi_volume_info(
            h_node, h_server_url, volume_id, json=True)
        num_of_bricks_before_expansion = self.get_num_of_bricks(volume_name)
        self.get_brick_and_volume_status(volume_name)
        free_space_before_expansion = self.get_devices_summary_free_space()

        # Try to expand volume with not enough device space
        self.assertRaises(ExecutionError, heketi_ops.heketi_volume_expand,
                          h_node, h_server_url, volume_id, expand_size)

        # Enable new devices to be able to expand our volume
        self.enable_devices(additional_devices_attached)

        # Expand volume and validate results
        heketi_ops.heketi_volume_expand(h_node,
                                        h_server_url,
                                        volume_id,
                                        expand_size,
                                        json=True)
        free_space_after_expansion = self.get_devices_summary_free_space()
        self.assertGreater(
            free_space_before_expansion, free_space_after_expansion,
            "Free space not consumed after expansion of %s" % volume_id)
        num_of_bricks_after_expansion = self.get_num_of_bricks(volume_name)
        self.get_brick_and_volume_status(volume_name)
        volume_info_after_expansion = heketi_ops.heketi_volume_info(
            h_node, h_server_url, volume_id, json=True)
        self.assertGreater(volume_info_after_expansion["size"],
                           volume_info_before_expansion["size"],
                           "Size of %s not increased" % volume_id)
        self.assertGreater(num_of_bricks_after_expansion,
                           num_of_bricks_before_expansion)
        self.assertEqual(
            num_of_bricks_after_expansion % num_of_bricks_before_expansion, 0)

        # Delete volume and validate release of the used space
        heketi_ops.heketi_volume_delete(h_node, h_server_url, volume_id)
        free_space_after_deletion = self.get_devices_summary_free_space()
        self.assertGreater(
            free_space_after_deletion, free_space_after_expansion,
            "Free space not reclaimed after deletion of volume %s" % volume_id)
    def _pv_resize(self, exceed_free_space):
        dir_path = "/mnt"
        pvc_size_gb, min_free_space_gb = 1, 3

        # Get available free space disabling redundant devices and nodes
        heketi_url = self.heketi_server_url
        node_id_list = heketi_ops.heketi_node_list(self.heketi_client_node,
                                                   heketi_url)
        self.assertTrue(node_id_list)
        nodes = {}
        min_free_space = min_free_space_gb * 1024**2
        for node_id in node_id_list:
            node_info = heketi_ops.heketi_node_info(self.heketi_client_node,
                                                    heketi_url,
                                                    node_id,
                                                    json=True)
            if (node_info['state'].lower() != 'online'
                    or not node_info['devices']):
                continue
            if len(nodes) > 2:
                out = heketi_ops.heketi_node_disable(self.heketi_client_node,
                                                     heketi_url, node_id)
                self.assertTrue(out)
                self.addCleanup(heketi_ops.heketi_node_enable,
                                self.heketi_client_node, heketi_url, node_id)
            for device in node_info['devices']:
                if device['state'].lower() != 'online':
                    continue
                free_space = device['storage']['free']
                if (node_id in nodes.keys() or free_space < min_free_space):
                    out = heketi_ops.heketi_device_disable(
                        self.heketi_client_node, heketi_url, device['id'])
                    self.assertTrue(out)
                    self.addCleanup(heketi_ops.heketi_device_enable,
                                    self.heketi_client_node, heketi_url,
                                    device['id'])
                    continue
                nodes[node_id] = free_space
        if len(nodes) < 3:
            raise self.skipTest("Could not find 3 online nodes with, "
                                "at least, 1 online device having free space "
                                "bigger than %dGb." % min_free_space_gb)

        # Calculate maximum available size for PVC
        available_size_gb = int(min(nodes.values()) / (1024**2))

        # Create PVC
        self.create_storage_class(allow_volume_expansion=True)
        pvc_name = self.create_and_wait_for_pvc(pvc_size=pvc_size_gb)

        # Create DC with POD and attached PVC to it
        dc_name = oc_create_app_dc_with_io(self.node, pvc_name)
        self.addCleanup(oc_delete, self.node, 'dc', dc_name)
        self.addCleanup(scale_dc_pod_amount_and_wait, self.node, dc_name, 0)
        pod_name = get_pod_name_from_dc(self.node, dc_name)
        wait_for_pod_be_ready(self.node, pod_name)

        if exceed_free_space:
            # Try to expand existing PVC exceeding free space
            resize_pvc(self.node, pvc_name, available_size_gb)
            wait_for_events(self.node,
                            obj_name=pvc_name,
                            event_reason='VolumeResizeFailed')

            # Check that app POD is up and runnig then try to write data
            wait_for_pod_be_ready(self.node, pod_name)
            cmd = ("dd if=/dev/urandom of=%s/autotest bs=100K count=1" %
                   dir_path)
            ret, out, err = oc_rsh(self.node, pod_name, cmd)
            self.assertEqual(
                ret, 0,
                "Failed to write data after failed attempt to expand PVC.")
        else:
            # Expand existing PVC using all the available free space
            expand_size_gb = available_size_gb - pvc_size_gb
            resize_pvc(self.node, pvc_name, expand_size_gb)
            verify_pvc_size(self.node, pvc_name, expand_size_gb)
            pv_name = get_pv_name_from_pvc(self.node, pvc_name)
            verify_pv_size(self.node, pv_name, expand_size_gb)
            wait_for_events(self.node,
                            obj_name=pvc_name,
                            event_reason='VolumeResizeSuccessful')

            # Recreate app POD
            oc_delete(self.node, 'pod', pod_name)
            wait_for_resource_absence(self.node, 'pod', pod_name)
            pod_name = get_pod_name_from_dc(self.node, dc_name)
            wait_for_pod_be_ready(self.node, pod_name)

            # Write data on the expanded PVC
            cmd = ("dd if=/dev/urandom of=%s/autotest "
                   "bs=1M count=1025" % dir_path)
            ret, out, err = oc_rsh(self.node, pod_name, cmd)
            self.assertEqual(ret, 0,
                             "Failed to write data on the expanded PVC")
예제 #17
0
    def test_volume_creation_no_free_devices(self):
        """Validate heketi error is returned when no free devices available"""
        node, server_url = self.heketi_client_node, self.heketi_server_url

        # Get nodes info
        node_id_list = heketi_ops.heketi_node_list(node, server_url)
        node_info_list = []
        for node_id in node_id_list[0:3]:
            node_info = heketi_ops.heketi_node_info(node,
                                                    server_url,
                                                    node_id,
                                                    json=True)
            node_info_list.append(node_info)

        # Disable 4th and other nodes
        for node_id in node_id_list[3:]:
            heketi_ops.heketi_node_disable(node, server_url, node_id)
            self.addCleanup(heketi_ops.heketi_node_enable, node, server_url,
                            node_id)

        # Disable second and other devices on the first 3 nodes
        for node_info in node_info_list[0:3]:
            devices = node_info["devices"]
            self.assertTrue(
                devices, "Node '%s' does not have devices." % node_info["id"])
            if devices[0]["state"].strip().lower() != "online":
                self.skipTest("Test expects first device to be enabled.")
            if len(devices) < 2:
                continue
            for device in node_info["devices"][1:]:
                out = heketi_ops.heketi_device_disable(node, server_url,
                                                       device["id"])
                self.assertTrue(
                    out, "Failed to disable the device %s" % device["id"])
                self.addCleanup(heketi_ops.heketi_device_enable, node,
                                server_url, device["id"])

        # Calculate common available space
        available_spaces = [
            int(node_info["devices"][0]["storage"]["free"])
            for n in node_info_list[0:3]
        ]
        min_space_gb = int(min(available_spaces) / 1024**2)
        self.assertGreater(min_space_gb, 3, "Not enough available free space.")

        # Create first small volume
        vol = heketi_ops.heketi_volume_create(node, server_url, 1, json=True)
        self.addCleanup(heketi_ops.heketi_volume_delete,
                        self.heketi_client_node, self.heketi_server_url,
                        vol["id"])

        # Try to create second volume getting "no free space" error
        try:
            vol_fail = heketi_ops.heketi_volume_create(node,
                                                       server_url,
                                                       min_space_gb,
                                                       json=True)
        except exceptions.ExecutionError:
            g.log.info("Volume was not created as expected.")
        else:
            self.addCleanup(heketi_ops.heketi_volume_delete,
                            self.heketi_client_node, self.heketi_server_url,
                            vol_fail["bricks"][0]["volume"])
            self.assertFalse(
                vol_fail,
                "Volume should have not been created. Out: %s" % vol_fail)
    def test_create_volumes_enabling_and_disabling_heketi_devices(self):
        """Validate enable/disable of heketi device"""

        # Get nodes info
        node_id_list = heketi_ops.heketi_node_list(self.heketi_client_node,
                                                   self.heketi_server_url)
        node_info_list = []
        for node_id in node_id_list[0:3]:
            node_info = heketi_ops.heketi_node_info(self.heketi_client_node,
                                                    self.heketi_server_url,
                                                    node_id,
                                                    json=True)
            node_info_list.append(node_info)

        # Disable 4th and other nodes
        if len(node_id_list) > 3:
            for node in node_id_list[3:]:
                heketi_ops.heketi_node_disable(self.heketi_client_node,
                                               self.heketi_server_url, node_id)
                self.addCleanup(heketi_ops.heketi_node_enable,
                                self.heketi_client_node,
                                self.heketi_server_url, node_id)

        # Disable second and other devices on the first 3 nodes
        for node_info in node_info_list[0:3]:
            devices = node_info["devices"]
            self.assertTrue(
                devices, "Node '%s' does not have devices." % node_info["id"])
            if devices[0]["state"].strip().lower() != "online":
                self.skipTest("Test expects first device to be enabled.")
            if len(devices) < 2:
                continue
            for device in node_info["devices"][1:]:
                out = heketi_ops.heketi_device_disable(self.heketi_client_node,
                                                       self.heketi_server_url,
                                                       device["id"])
                self.assertTrue(
                    out, "Failed to disable the device %s" % device["id"])
                self.addCleanup(heketi_ops.heketi_device_enable,
                                self.heketi_client_node,
                                self.heketi_server_url, device["id"])

        # Create heketi volume
        out = heketi_ops.heketi_volume_create(self.heketi_client_node,
                                              self.heketi_server_url,
                                              1,
                                              json=True)
        self.assertTrue(out, "Failed to create heketi volume of size 1")
        g.log.info("Successfully created heketi volume of size 1")
        device_id = out["bricks"][0]["device"]
        self.addCleanup(heketi_ops.heketi_volume_delete,
                        self.heketi_client_node, self.heketi_server_url,
                        out["bricks"][0]["volume"])

        # Disable device
        g.log.info("Disabling '%s' device" % device_id)
        out = heketi_ops.heketi_device_disable(self.heketi_client_node,
                                               self.heketi_server_url,
                                               device_id)
        self.assertTrue(out, "Failed to disable the device %s" % device_id)
        g.log.info("Successfully disabled device %s" % device_id)

        try:
            # Get device info
            g.log.info("Retrieving '%s' device info" % device_id)
            out = heketi_ops.heketi_device_info(self.heketi_client_node,
                                                self.heketi_server_url,
                                                device_id,
                                                json=True)
            self.assertTrue(out, "Failed to get device info %s" % device_id)
            g.log.info("Successfully retrieved device info %s" % device_id)
            name = out["name"]
            if out["state"].lower().strip() != "offline":
                raise exceptions.ExecutionError(
                    "Device %s is not in offline state." % name)
            g.log.info("Device %s is now offine" % name)

            # Try to create heketi volume
            g.log.info("Creating heketi volume: Expected to fail.")
            try:
                out = heketi_ops.heketi_volume_create(self.heketi_client_node,
                                                      self.heketi_server_url,
                                                      1,
                                                      json=True)
            except exceptions.ExecutionError:
                g.log.info("Volume was not created as expected.")
            else:
                self.addCleanup(heketi_ops.heketi_volume_delete,
                                self.heketi_client_node,
                                self.heketi_server_url,
                                out["bricks"][0]["volume"])
                msg = "Volume unexpectedly created. Out: %s" % out
                assert False, msg
        finally:
            # Enable the device back
            g.log.info("Enable '%s' device back." % device_id)
            out = heketi_ops.heketi_device_enable(self.heketi_client_node,
                                                  self.heketi_server_url,
                                                  device_id)
            self.assertTrue(out, "Failed to enable the device %s" % device_id)
            g.log.info("Successfully enabled device %s" % device_id)

        # Get device info
        out = heketi_ops.heketi_device_info(self.heketi_client_node,
                                            self.heketi_server_url,
                                            device_id,
                                            json=True)
        self.assertTrue(out, ("Failed to get device info %s" % device_id))
        g.log.info("Successfully retrieved device info %s" % device_id)
        name = out["name"]
        if out["state"] != "online":
            raise exceptions.ExecutionError(
                "Device %s is not in online state." % name)

        # Create heketi volume of size
        out = heketi_ops.heketi_volume_create(self.heketi_client_node,
                                              self.heketi_server_url,
                                              1,
                                              json=True)
        self.assertTrue(out, "Failed to create volume of size 1")
        self.addCleanup(heketi_ops.heketi_volume_delete,
                        self.heketi_client_node, self.heketi_server_url,
                        out["bricks"][0]["volume"])
        g.log.info("Successfully created volume of size 1")
        name = out["name"]

        # Get gluster volume info
        vol_info = get_volume_info('auto_get_gluster_endpoint', volname=name)
        self.assertTrue(vol_info, "Failed to get '%s' volume info." % name)
        g.log.info("Successfully got the '%s' volume info." % name)
    def test_create_arbiter_vol_with_more_than_one_brick_set(self):
        """Validate volume creation using heketi for more than six brick set"""

        # Set arbiter:disabled tag to the data devices and get their info
        data_nodes = []
        for node_id in self.node_id_list[0:2]:
            node_info = heketi_ops.heketi_node_info(self.heketi_client_node,
                                                    self.heketi_server_url,
                                                    node_id,
                                                    json=True)

            if len(node_info['devices']) < 2:
                self.skipTest("Nodes are expected to have at least 2 devices")
            if not all([
                    int(d['storage']['free']) > (3 * 1024**2)
                    for d in node_info['devices'][0:2]
            ]):
                self.skipTest(
                    "Devices are expected to have more than 3Gb of free space")
            for device in node_info['devices']:
                self._set_arbiter_tag_with_further_revert(
                    self.heketi_client_node, self.heketi_server_url, 'device',
                    device['id'], 'disabled',
                    device.get('tags', {}).get('arbiter'))
            self._set_arbiter_tag_with_further_revert(
                self.heketi_client_node, self.heketi_server_url, 'node',
                node_id, 'disabled',
                node_info.get('tags', {}).get('arbiter'))

            data_nodes.append(node_info)

        # Set arbiter:required tag to all other nodes and their devices
        for node_id in self.node_id_list[2:]:
            node_info = heketi_ops.heketi_node_info(self.heketi_client_node,
                                                    self.heketi_server_url,
                                                    node_id,
                                                    json=True)
            self._set_arbiter_tag_with_further_revert(
                self.heketi_client_node, self.heketi_server_url, 'node',
                node_id, 'required',
                node_info.get('tags', {}).get('arbiter'))
            for device in node_info['devices']:
                self._set_arbiter_tag_with_further_revert(
                    self.heketi_client_node, self.heketi_server_url, 'device',
                    device['id'], 'required',
                    device.get('tags', {}).get('arbiter'))

        # Get second big volume between 2 data nodes and use it
        # for target vol calculation.
        for i, node_info in enumerate(data_nodes):
            biggest_disk_free_space = 0
            for device in node_info['devices'][0:2]:
                free = int(device['storage']['free'])
                if free > biggest_disk_free_space:
                    biggest_disk_free_space = free
            data_nodes[i]['biggest_free_space'] = biggest_disk_free_space
        target_vol_size_kb = 1 + min(
            [n['biggest_free_space'] for n in data_nodes])

        # Check that all the data devices have, at least, half of required size
        all_big_enough = True
        for node_info in data_nodes:
            for device in node_info['devices'][0:2]:
                if float(device['storage']['free']) < (target_vol_size_kb / 2):
                    all_big_enough = False
                    break

        # Create sc with gluster arbiter info
        self.create_storage_class(is_arbiter_vol=True)

        # Create helper arbiter vol if not all the data devices have
        # half of required free space.
        if not all_big_enough:
            helper_vol_size_kb, target_vol_size_kb = 0, 0
            smaller_device_id = None
            for node_info in data_nodes:
                devices = node_info['devices']
                if ((devices[0]['storage']['free']) >
                    (devices[1]['storage']['free'])):
                    smaller_device_id = devices[1]['id']
                    smaller_device = devices[1]['storage']['free']
                    bigger_device = devices[0]['storage']['free']
                else:
                    smaller_device_id = devices[0]['id']
                    smaller_device = devices[0]['storage']['free']
                    bigger_device = devices[1]['storage']['free']
                diff = bigger_device - (2 * smaller_device) + 1
                if diff > helper_vol_size_kb:
                    helper_vol_size_kb = diff
                    target_vol_size_kb = bigger_device - diff

            # Disable smaller device and create helper vol on bigger one
            # to reduce its size, then enable smaller device back.
            try:
                out = heketi_ops.heketi_device_disable(self.heketi_client_node,
                                                       self.heketi_server_url,
                                                       smaller_device_id)
                self.assertTrue(out)
                self.create_and_wait_for_pvc(
                    int(helper_vol_size_kb / 1024.0**2) + 1)
            finally:
                out = heketi_ops.heketi_device_enable(self.heketi_client_node,
                                                      self.heketi_server_url,
                                                      smaller_device_id)
                self.assertTrue(out)

        # Create target arbiter volume
        self.create_and_wait_for_pvc(int(target_vol_size_kb / 1024.0**2))

        # Get gluster volume info
        vol_info = get_gluster_vol_info_by_pvc_name(self.node, self.pvc_name)

        self.verify_amount_and_proportion_of_arbiter_and_data_bricks(
            vol_info, arbiter_bricks=2, data_bricks=4)
    def test_expand_arbiter_volume_setting_tags_on_nodes_or_devices(
            self, node_tags):
        """Validate exapnsion of arbiter volume with defferent tags

           This test case is going to run two tests:
                1. If value is True it is going to set tags
                   on nodes and run test
                2. If value is False it is going to set tags
                   on devices and run test
        """

        data_nodes = []
        arbiter_nodes = []

        # set tags arbiter:disabled, arbiter:required
        for i, node_id in enumerate(self.node_id_list):
            if node_tags:
                self._set_arbiter_tag_with_further_revert(
                    self.heketi_client_node, self.heketi_server_url, 'node',
                    node_id, 'disabled' if i < 2 else 'required')

            node_info = heketi_ops.heketi_node_info(self.heketi_client_node,
                                                    self.heketi_server_url,
                                                    node_id,
                                                    json=True)

            if not node_tags:
                for device in node_info['devices']:
                    self._set_arbiter_tag_with_further_revert(
                        self.heketi_client_node, self.heketi_server_url,
                        'device', device['id'],
                        'disabled' if i < 2 else 'required')
                    device_info = heketi_ops.heketi_device_info(
                        self.heketi_client_node,
                        self.heketi_server_url,
                        device['id'],
                        json=True)
                    self.assertEqual(device_info['tags']['arbiter'],
                                     'disabled' if i < 2 else 'required')

            node = {
                'id': node_id,
                'host': node_info['hostnames']['storage'][0]
            }
            if node_tags:
                self.assertEqual(node_info['tags']['arbiter'],
                                 'disabled' if i < 2 else 'required')
            data_nodes.append(node) if i < 2 else arbiter_nodes.append(node)

        # Create sc with gluster arbiter info
        self.create_storage_class(is_arbiter_vol=True,
                                  allow_volume_expansion=True)

        # Create PVC and wait for it to be in 'Bound' state
        self.create_and_wait_for_pvc()

        vol_info = get_gluster_vol_info_by_pvc_name(self.node, self.pvc_name)

        bricks = self.verify_amount_and_proportion_of_arbiter_and_data_bricks(
            vol_info)

        arbiter_hosts = [obj['host'] for obj in arbiter_nodes]
        data_hosts = [obj['host'] for obj in data_nodes]

        for brick in bricks['arbiter_list']:
            self.assertIn(brick['name'].split(':')[0], arbiter_hosts)

        for brick in bricks['data_list']:
            self.assertIn(brick['name'].split(':')[0], data_hosts)

        # Expand PVC and verify the size
        pvc_size = 2
        resize_pvc(self.node, self.pvc_name, pvc_size)
        verify_pvc_size(self.node, self.pvc_name, pvc_size)

        vol_info = get_gluster_vol_info_by_pvc_name(self.node, self.pvc_name)

        bricks = self.verify_amount_and_proportion_of_arbiter_and_data_bricks(
            vol_info, arbiter_bricks=2, data_bricks=4)

        for brick in bricks['arbiter_list']:
            self.assertIn(brick['name'].split(':')[0], arbiter_hosts)

        for brick in bricks['data_list']:
            self.assertIn(brick['name'].split(':')[0], data_hosts)
    def test_create_delete_pvcs_to_make_gluster_reuse_released_space(self):
        """Validate reuse of volume space after deletion of PVCs"""
        min_storage_gb = 10

        # Set arbiter:disabled tags to the first 2 nodes
        data_nodes = []
        biggest_disks = []
        self.assertGreater(len(self.node_id_list), 2)
        for node_id in self.node_id_list[0:2]:
            node_info = heketi_ops.heketi_node_info(self.heketi_client_node,
                                                    self.heketi_server_url,
                                                    node_id,
                                                    json=True)
            biggest_disk_free_space = 0
            for device in node_info['devices']:
                disk_free_space = int(device['storage']['free'])
                if disk_free_space < (min_storage_gb * 1024**2):
                    self.skipTest("Devices are expected to have more than "
                                  "%sGb of free space" % min_storage_gb)
                if disk_free_space > biggest_disk_free_space:
                    biggest_disk_free_space = disk_free_space
                self._set_arbiter_tag_with_further_revert(
                    self.heketi_client_node,
                    self.heketi_server_url,
                    'device',
                    device['id'],
                    'disabled',
                    revert_to=device.get('tags', {}).get('arbiter'))
            biggest_disks.append(biggest_disk_free_space)
            self._set_arbiter_tag_with_further_revert(self.heketi_client_node,
                                                      self.heketi_server_url,
                                                      'node',
                                                      node_id,
                                                      'disabled',
                                                      revert_to=node_info.get(
                                                          'tags',
                                                          {}).get('arbiter'))
            data_nodes.append(node_info)

        # Set arbiter:required tag to all other nodes and their devices
        arbiter_nodes = []
        for node_id in self.node_id_list[2:]:
            node_info = heketi_ops.heketi_node_info(self.heketi_client_node,
                                                    self.heketi_server_url,
                                                    node_id,
                                                    json=True)
            for device in node_info['devices']:
                self._set_arbiter_tag_with_further_revert(
                    self.heketi_client_node,
                    self.heketi_server_url,
                    'device',
                    device['id'],
                    'required',
                    revert_to=device.get('tags', {}).get('arbiter'))
            self._set_arbiter_tag_with_further_revert(self.heketi_client_node,
                                                      self.heketi_server_url,
                                                      'node',
                                                      node_id,
                                                      'required',
                                                      revert_to=node_info.get(
                                                          'tags',
                                                          {}).get('arbiter'))
            arbiter_nodes.append(node_info)

        # Calculate size and amount of volumes to be created
        pvc_size = int(min(biggest_disks) / 1024**2)
        pvc_amount = max([len(n['devices']) for n in data_nodes]) + 1

        # Create sc with gluster arbiter info
        self.create_storage_class(is_arbiter_vol=True)

        # Create and delete 3 small volumes concurrently
        pvc_names = []
        for i in range(3):
            pvc_name = oc_create_pvc(self.node,
                                     self.sc_name,
                                     pvc_name_prefix='arbiter-pvc',
                                     pvc_size=int(pvc_size / 3))
            pvc_names.append(pvc_name)
        exception_exists = False
        for pvc_name in pvc_names:
            try:
                verify_pvc_status_is_bound(self.node, pvc_name)
            except Exception:
                for pvc_name in pvc_names:
                    self.addCleanup(wait_for_resource_absence, self.node,
                                    'pvc', pvc_name)
                for pvc_name in pvc_names:
                    self.addCleanup(oc_delete, self.node, 'pvc', pvc_name)
                exception_exists = True
        if exception_exists:
            raise
        for pvc_name in pvc_names:
            oc_delete(self.node, 'pvc', pvc_name)
        for pvc_name in pvc_names:
            wait_for_resource_absence(self.node, 'pvc', pvc_name)

        # Create and delete big volumes in a loop
        for i in range(pvc_amount):
            pvc_name = oc_create_pvc(self.node,
                                     self.sc_name,
                                     pvc_name_prefix='arbiter-pvc',
                                     pvc_size=pvc_size)
            try:
                verify_pvc_status_is_bound(self.node, pvc_name)
            except Exception:
                self.addCleanup(wait_for_resource_absence, self.node, 'pvc',
                                pvc_name)
                self.addCleanup(oc_delete, self.node, 'pvc', pvc_name)
                raise
            oc_delete(self.node, 'pvc', pvc_name)
            wait_for_resource_absence(self.node, 'pvc', pvc_name)
    def test_aribiter_required_tag_on_node_or_devices_other_disabled(
            self, node_with_tag):
        """Validate arbiter vol creation with required node or device tag"""

        pvc_amount = 3

        # Get Heketi nodes info
        node_id_list = heketi_ops.heketi_node_list(self.heketi_client_node,
                                                   self.heketi_server_url)

        # Set arbiter:required tags
        arbiter_node = heketi_ops.heketi_node_info(self.heketi_client_node,
                                                   self.heketi_server_url,
                                                   node_id_list[0],
                                                   json=True)
        arbiter_nodes_ip_addresses = arbiter_node['hostnames']['storage']
        self._set_arbiter_tag_with_further_revert(
            self.heketi_client_node,
            self.heketi_server_url,
            'node',
            node_id_list[0], ('required' if node_with_tag else None),
            revert_to=arbiter_node.get('tags', {}).get('arbiter'))
        for device in arbiter_node['devices']:
            self._set_arbiter_tag_with_further_revert(
                self.heketi_client_node,
                self.heketi_server_url,
                'device',
                device['id'], (None if node_with_tag else 'required'),
                revert_to=device.get('tags', {}).get('arbiter'))

        # Set arbiter:disabled tags
        data_nodes, data_nodes_ip_addresses = [], []
        for node_id in node_id_list[1:]:
            node_info = heketi_ops.heketi_node_info(self.heketi_client_node,
                                                    self.heketi_server_url,
                                                    node_id,
                                                    json=True)
            if not any([
                    int(d['storage']['free']) > (pvc_amount * 1024**2)
                    for d in node_info['devices']
            ]):
                self.skipTest("Devices are expected to have more than "
                              "%sGb of free space" % pvc_amount)
            data_nodes_ip_addresses.extend(node_info['hostnames']['storage'])
            for device in node_info['devices']:
                self._set_arbiter_tag_with_further_revert(
                    self.heketi_client_node,
                    self.heketi_server_url,
                    'device',
                    device['id'], (None if node_with_tag else 'disabled'),
                    revert_to=device.get('tags', {}).get('arbiter'))
            self._set_arbiter_tag_with_further_revert(
                self.heketi_client_node,
                self.heketi_server_url,
                'node',
                node_id, ('disabled' if node_with_tag else None),
                revert_to=node_info.get('tags', {}).get('arbiter'))
            data_nodes.append(node_info)

        # Create PVCs and check that their bricks are correctly located
        self.create_storage_class(is_arbiter_vol=True)
        for i in range(pvc_amount):
            self.create_and_wait_for_pvc(1)

            # Get gluster volume info
            vol_info = get_gluster_vol_info_by_pvc_name(
                self.node, self.pvc_name)
            arbiter_bricks, data_bricks = [], []
            for brick in vol_info['bricks']['brick']:
                if int(brick["isArbiter"]) == 1:
                    arbiter_bricks.append(brick["name"])
                else:
                    data_bricks.append(brick["name"])

            # Verify that all the arbiter bricks are located on
            # arbiter:required node and data bricks on all other nodes only.
            for arbiter_brick in arbiter_bricks:
                self.assertIn(
                    arbiter_brick.split(':')[0], arbiter_nodes_ip_addresses)
            for data_brick in data_bricks:
                self.assertIn(
                    data_brick.split(':')[0], data_nodes_ip_addresses)