Exemple #1
0
    def test_to_check_deletion_of_node(self):
        """Validate deletion of a node which contains devices"""

        # Create Heketi volume to make sure we have devices with usages
        heketi_url = self.heketi_server_url
        vol = heketi_volume_create(self.heketi_client_node,
                                   heketi_url,
                                   1,
                                   json=True)
        self.assertTrue(vol, "Failed to create heketi volume.")
        g.log.info("Heketi volume successfully created")
        volume_id = vol["bricks"][0]["volume"]
        self.addCleanup(heketi_volume_delete, self.heketi_client_node,
                        self.heketi_server_url, volume_id)

        # Pick up suitable node
        node_ids = heketi_node_list(self.heketi_client_node, heketi_url)
        self.assertTrue(node_ids)
        for node_id in node_ids:
            node_info = heketi_node_info(self.heketi_client_node,
                                         heketi_url,
                                         node_id,
                                         json=True)
            if (node_info['state'].lower() != 'online'
                    or not node_info['devices']):
                continue
            for device in node_info['devices']:
                if device['state'].lower() != 'online':
                    continue
                if device['storage']['used']:
                    node_id = node_info['id']
                    break
        else:
            self.assertTrue(
                node_id, "Failed to find online node with online device which "
                "has some usages.")

        # Try to delete the node by its ID
        g.log.info("Trying to delete the node which contains devices in it. "
                   "Expecting failure.")
        self.assertRaises(AssertionError, heketi_node_delete,
                          self.heketi_client_node, heketi_url, node_id)

        # Make sure our node hasn't been deleted
        g.log.info("Listing heketi node list")
        node_list = heketi_node_list(self.heketi_client_node, heketi_url)
        self.assertTrue(node_list, ("Failed to list heketi nodes"))
        self.assertIn(node_id, node_list)
        node_info = heketi_node_info(self.heketi_client_node,
                                     heketi_url,
                                     node_id,
                                     json=True)
        self.assertEqual(node_info['state'].lower(), 'online')
    def test_to_check_deletion_of_node(self):
        """Validate deletion of a node which contains devices"""

        # Create Heketi volume to make sure we have devices with usages
        heketi_url = self.heketi_server_url
        vol = heketi_volume_create(
            self.heketi_client_node, heketi_url, 1, json=True)
        self.assertTrue(vol, "Failed to create heketi volume.")
        g.log.info("Heketi volume successfully created")
        volume_id = vol["bricks"][0]["volume"]
        self.addCleanup(
            heketi_volume_delete, self.heketi_client_node,
            self.heketi_server_url, volume_id)

        # Pick up suitable node
        node_ids = heketi_node_list(self.heketi_client_node, heketi_url)
        self.assertTrue(node_ids)
        for node_id in node_ids:
            node_info = heketi_node_info(
                self.heketi_client_node, heketi_url, node_id, json=True)
            if (node_info['state'].lower() != 'online' or
                    not node_info['devices']):
                continue
            for device in node_info['devices']:
                if device['state'].lower() != 'online':
                    continue
                if device['storage']['used']:
                    node_id = node_info['id']
                    break
        else:
            self.assertTrue(
                node_id,
                "Failed to find online node with online device which "
                "has some usages.")

        # Try to delete the node by its ID
        g.log.info("Trying to delete the node which contains devices in it. "
                   "Expecting failure.")
        self.assertRaises(
            ExecutionError,
            heketi_node_delete,
            self.heketi_client_node, heketi_url, node_id)

        # Make sure our node hasn't been deleted
        g.log.info("Listing heketi node list")
        node_list = heketi_node_list(self.heketi_client_node, heketi_url)
        self.assertTrue(node_list, ("Failed to list heketi nodes"))
        self.assertIn(node_id, node_list)
        node_info = heketi_node_info(
            self.heketi_client_node, heketi_url, node_id, json=True)
        self.assertEqual(node_info['state'].lower(), 'online')
    def get_devices_summary_free_space(self):
        """
        Calculates minimum free space per device and
        returns total free space across all devices
        """

        free_spaces = []

        heketi_node_id_list = heketi_ops.heketi_node_list(
            self.heketi_client_node, self.heketi_server_url)

        for node_id in heketi_node_id_list:
            node_info_dict = heketi_ops.heketi_node_info(
                self.heketi_client_node,
                self.heketi_server_url,
                node_id,
                json=True)
            total_free_space = 0
            for device in node_info_dict["devices"]:
                total_free_space += device["storage"]["free"]
            free_spaces.append(total_free_space)

        total_free_space = sum(free_spaces) / (1024**2)
        total_free_space = int(math.floor(total_free_space))

        return total_free_space
    def test_heketi_node_list(self):
        """Test node list operation
        """
        h_client, h_server = self.heketi_client_node, self.heketi_server_url

        # List heketi nodes
        node_ips = []
        heketi_node_id_list = heketi_ops.heketi_node_list(h_client, h_server)

        for node_id in heketi_node_id_list:
            node_info = heketi_ops.heketi_node_info(
                h_client, h_server, node_id, json=True)
            node_ips.append(node_info["hostnames"]["storage"])

        # Compare the node listed in previous step
        hostnames = []
        list_of_pools = peer_ops.get_pool_list('auto_get_gluster_endpoint')
        self.assertTrue(
            list_of_pools,
            "Failed to get the pool list from gluster pods/nodes")
        for pool in list_of_pools:
            hostnames.append(pool["hostname"])
        self.assertEqual(
            len(heketi_node_id_list), len(list_of_pools),
            "Heketi volume list %s is not equal to gluster volume list %s"
            % (node_ips, hostnames))
    def test_brick_evict_on_three_node_with_one_down(self):
        """Test brick evict basic functionality and verify brick evict
        will fail after node down if nodes are three"""

        h_node, h_server = self.heketi_client_node, self.heketi_server_url

        # Disable node if more than 3
        node_list = heketi_ops.heketi_node_list(h_node, h_server)
        if len(node_list) > 3:
            for node_id in node_list[3:]:
                heketi_ops.heketi_node_disable(h_node, h_server, node_id)
                self.addCleanup(heketi_ops.heketi_node_enable, h_node,
                                h_server, node_id)

        # Create heketi volume
        vol_info = heketi_ops.heketi_volume_create(h_node,
                                                   h_server,
                                                   1,
                                                   json=True)
        self.addCleanup(heketi_ops.heketi_volume_delete, h_node, h_server,
                        vol_info.get('id'))

        # Get node on which heketi pod is scheduled
        heketi_pod = openshift_ops.get_pod_name_from_dc(
            self.ocp_client, self.heketi_dc_name)
        heketi_node = openshift_ops.oc_get_custom_resource(
            self.ocp_client, 'pod', '.:spec.nodeName', heketi_pod)[0]

        # Get list of hostname from node id
        host_list = []
        for node_id in node_list[3:]:
            node_info = heketi_ops.heketi_node_info(h_node,
                                                    h_server,
                                                    node_id,
                                                    json=True)
            host_list.append(node_info.get('hostnames').get('manage')[0])

        # Get brick id and glusterfs node which is not heketi node
        for node in vol_info.get('bricks', {}):
            node_info = heketi_ops.heketi_node_info(h_node,
                                                    h_server,
                                                    node.get('node'),
                                                    json=True)
            hostname = node_info.get('hostnames').get('manage')[0]
            if (hostname != heketi_node) and (hostname not in host_list):
                brick_id = node.get('id')
                break

        self._power_off_node_and_wait_node_to_be_not_ready(hostname)

        # Perform brick evict operation
        try:
            heketi_ops.heketi_brick_evict(h_node, h_server, brick_id)
        except AssertionError as e:
            if ('No Replacement was found' not in six.text_type(e)):
                raise
    def get_ready_for_node_add(self, hostname):
        self.configure_node_to_run_gluster(hostname)

        h_nodes = heketi_ops.heketi_node_list(self.h_node, self.h_url)

        # Disable nodes except first two nodes
        for node_id in h_nodes[2:]:
            heketi_ops.heketi_node_disable(self.h_node, self.h_url, node_id)
            self.addCleanup(heketi_ops.heketi_node_enable, self.h_node,
                            self.h_url, node_id)
    def test_heketi_node_states_enable_disable(self):
        """Test node enable and disable functionality
        """
        h_client, h_server = self.heketi_client_node, self.heketi_server_url

        node_list = heketi_ops.heketi_node_list(h_client, h_server)
        online_hosts = []
        for node_id in node_list:
            node_info = heketi_ops.heketi_node_info(h_client,
                                                    h_server,
                                                    node_id,
                                                    json=True)
            if node_info["state"] == "online":
                online_hosts.append(node_info)

        if len(online_hosts) < 3:
            raise self.skipTest(
                "This test can run only if online hosts are more than 2")

        #  Disable n-3 nodes, in case we have n nodes
        for node_info in online_hosts[3:]:
            node_id = node_info["id"]
            heketi_ops.heketi_node_disable(h_client, h_server, node_id)
            self.addCleanup(heketi_ops.heketi_node_enable, h_client, h_server,
                            node_id)

        # Create volume when 3 nodes are online
        vol_size = 1
        vol_info = heketi_ops.heketi_volume_create(h_client,
                                                   h_server,
                                                   vol_size,
                                                   json=True)
        self.addCleanup(heketi_ops.heketi_volume_delete, h_client, h_server,
                        vol_info['id'])

        node_id = online_hosts[0]['id']
        try:
            heketi_ops.heketi_node_disable(h_client, h_server, node_id)

            # Try to create a volume, volume creation should fail
            with self.assertRaises(AssertionError):
                heketi_volume = heketi_ops.heketi_volume_create(
                    h_client, h_server, vol_size)
                self.addCleanup(heketi_ops.heketi_volume_delete, h_client,
                                h_server, heketi_volume["id"])
        finally:
            # Enable heketi node
            heketi_ops.heketi_node_enable(h_client, h_server, node_id)

        # Create volume when heketi node is enabled
        vol_info = heketi_ops.heketi_volume_create(h_client,
                                                   h_server,
                                                   vol_size,
                                                   json=True)
        heketi_ops.heketi_volume_delete(h_client, h_server, vol_info['id'])
Exemple #8
0
 def _get_online_nodes(self):
     node_ids = heketi_ops.heketi_node_list(self.h_client, self.h_server)
     online_nodes = []
     for node_id in node_ids:
         node_info = heketi_ops.heketi_node_info(self.h_client,
                                                 self.h_server,
                                                 node_id,
                                                 json=True)
         if (node_info["state"] == "online"
                 and node_info['cluster'] == self.cluster_id):
             online_nodes.append(
                 (node_info["zone"], node_info['hostnames']['storage']))
     return online_nodes
    def test_brick_multiplex_pids_with_diff_vol_option_values(self):
        """Test Brick Pid's should be same when values of vol options are diff
        """
        h_client, h_url = self.heketi_client_node, self.heketi_server_url
        # Disable heketi nodes except first three nodes
        h_nodes_list = heketi_node_list(h_client, h_url)
        for node_id in h_nodes_list[3:]:
            heketi_node_disable(h_client, h_url, node_id)
            self.addCleanup(heketi_node_enable, h_client, h_url, node_id)

        # Create storage class with diff volumeoptions
        sc1 = self.create_storage_class(volumeoptions='user.heketi.abc 1')
        sc2 = self.create_storage_class(volumeoptions='user.heketi.abc 2')
        # Create PVC's with above SC
        pvc1 = self.create_and_wait_for_pvcs(sc_name=sc1)
        pvc2 = self.create_and_wait_for_pvcs(sc_name=sc2)

        # Get vol info and status
        vol_info1 = get_gluster_vol_info_by_pvc_name(self.node, pvc1[0])
        vol_info2 = get_gluster_vol_info_by_pvc_name(self.node, pvc2[0])
        vol_status1 = get_gluster_vol_status(vol_info1['gluster_vol_id'])
        vol_status2 = get_gluster_vol_status(vol_info2['gluster_vol_id'])

        # Verify vol options
        err_msg = ('Volume option "user.heketi.abc %s" did not got match for '
                   'volume %s in gluster vol info')
        self.assertEqual(
            vol_info1['options']['user.heketi.abc'], '1',
            err_msg % (1, vol_info1['gluster_vol_id']))
        self.assertEqual(
            vol_info2['options']['user.heketi.abc'], '2',
            err_msg % (2, vol_info2['gluster_vol_id']))

        # Get the PID's and match them
        pids1 = set()
        for brick in vol_info1['bricks']['brick']:
            host, bname = brick['name'].split(":")
            pids1.add(vol_status1[host][bname]['pid'])

        pids2 = set()
        for brick in vol_info2['bricks']['brick']:
            host, bname = brick['name'].split(":")
            pids2.add(vol_status2[host][bname]['pid'])

        err_msg = ('Pids of both the volumes %s and %s are expected to be'
                   'same. But got the different Pids "%s" and "%s".' %
                   (vol_info1['gluster_vol_id'], vol_info2['gluster_vol_id'],
                    pids1, pids2))
        self.assertEqual(pids1, pids2, err_msg)
Exemple #10
0
    def _get_vol_size(self):
        # Get available free space disabling redundant nodes
        min_free_space_gb = 5
        heketi_url = self.heketi_server_url
        node_ids = heketi_node_list(self.heketi_client_node, heketi_url)
        self.assertTrue(node_ids)
        nodes = {}
        min_free_space = min_free_space_gb * 1024**2
        for node_id in node_ids:
            node_info = heketi_node_info(self.heketi_client_node,
                                         heketi_url,
                                         node_id,
                                         json=True)
            if (node_info['state'].lower() != 'online'
                    or not node_info['devices']):
                continue
            if len(nodes) > 2:
                out = heketi_node_disable(self.heketi_client_node, heketi_url,
                                          node_id)
                self.assertTrue(out)
                self.addCleanup(heketi_node_enable, self.heketi_client_node,
                                heketi_url, node_id)
            for device in node_info['devices']:
                if device['state'].lower() != 'online':
                    continue
                free_space = device['storage']['free']
                if free_space < min_free_space:
                    out = heketi_device_disable(self.heketi_client_node,
                                                heketi_url, device['id'])
                    self.assertTrue(out)
                    self.addCleanup(heketi_device_enable,
                                    self.heketi_client_node, heketi_url,
                                    device['id'])
                    continue
                if node_id not in nodes:
                    nodes[node_id] = []
                nodes[node_id].append(device['storage']['free'])

        # Skip test if nodes requirements are not met
        if (len(nodes) < 3
                or not all(map(
                    (lambda _list: len(_list) > 1), nodes.values()))):
            raise self.skipTest("Could not find 3 online nodes with, "
                                "at least, 2 online devices having free space "
                                "bigger than %dGb." % min_free_space_gb)

        # Calculate size of a potential distributed vol
        vol_size_gb = int(min(map(max, nodes.values())) / (1024**2)) + 1
        return vol_size_gb
 def _get_free_space(self):
     """Get free space in each heketi device"""
     free_spaces = []
     heketi_node_id_list = heketi_node_list(
         self.heketi_client_node, self.heketi_server_url)
     for node_id in heketi_node_id_list:
         node_info_dict = heketi_node_info(self.heketi_client_node,
                                           self.heketi_server_url,
                                           node_id, json=True)
         total_free_space = 0
         for device in node_info_dict["devices"]:
             total_free_space += device["storage"]["free"]
         free_spaces.append(total_free_space)
     total_free_space = int(math.floor(sum(free_spaces) / (1024**2)))
     return total_free_space
    def test_compare_node_count_with_db_check_info(self):
        """Validate nodes count using heketi db check"""

        # Check heketi db
        db_result = heketi_ops.heketi_db_check(self.heketi_client_node,
                                               self.heketi_server_url)
        db_nodes_count = db_result["nodes"]["total"]
        nodes_list = heketi_ops.heketi_node_list(self.heketi_client_node,
                                                 self.heketi_server_url,
                                                 json=True)
        calculated_nodes_count = len(nodes_list)
        self.assertEqual(
            calculated_nodes_count, db_nodes_count,
            "Nodes count from 'DB check' (%s) doesn't match calculated nodes "
            "count (%s)." % (db_nodes_count, calculated_nodes_count))
    def test_heketi_node_info(self):
        """Test heketi node info operation
        """
        h_client, h_server = self.heketi_client_node, self.heketi_server_url

        # List heketi node
        heketi_node_id_list = heketi_ops.heketi_node_list(h_client, h_server)
        self.assertTrue(heketi_node_id_list, "Node Id list is empty.")

        for node_id in heketi_node_id_list:
            node_info = heketi_ops.heketi_node_info(
                h_client, h_server, node_id, json=True)
            self.assertTrue(node_info, "Failed to retrieve the node info")
            self.assertEqual(
                node_info["id"], node_id,
                "Failed to match node ID. Exp: %s, Act: %s" % (
                    node_id, node_info["id"]))
Exemple #14
0
    def get_online_nodes_disable_redundant(self):
        """
        Find online nodes and disable n-3 nodes and return
        list of online nodes
        """
        node_list = heketi_node_list(self.heketi_client_node,
                                     self.heketi_server_url)
        self.assertTrue(node_list, "Failed to list heketi nodes")
        g.log.info("Successfully got the list of nodes")
        # Fetch online nodes  from node list
        online_hosts = []

        for node in node_list:
            node_info = heketi_node_info(
                self.heketi_client_node, self.heketi_server_url,
                node, json=True)
            if node_info["state"] == "online":
                online_hosts.append(node_info)

        # Skip test if online node count is less than 3i
        if len(online_hosts) < 3:
            raise self.skipTest(
                "This test can run only if online hosts are more than 2")
        # if we have n nodes, disable n-3 nodes
        for node_info in online_hosts[3:]:
            node_id = node_info["id"]
            g.log.info("going to disable node id %s", node_id)
            heketi_node_disable(self.heketi_client_node,
                                self.heketi_server_url,
                                node_id)
            self.addCleanup(heketi_node_enable,
                            self.heketi_client_node,
                            self.heketi_server_url,
                            node_id)

        for host in online_hosts[1:3]:
            found_online = False
            for device in host["devices"]:
                if device["state"].strip().lower() == "online":
                    found_online = True
                    break
            if not found_online:
                self.skipTest(("no device online on node %s" % host["id"]))

        return online_hosts
    def _available_disk_free_space(self):
        min_free_space_gb = 3
        # Get available free space disabling redundant devices and nodes
        heketi_url = self.heketi_server_url
        node_id_list = heketi_ops.heketi_node_list(self.heketi_client_node,
                                                   heketi_url)
        self.assertTrue(node_id_list)
        nodes = {}
        min_free_space = min_free_space_gb * 1024**2
        for node_id in node_id_list:
            node_info = heketi_ops.heketi_node_info(self.heketi_client_node,
                                                    heketi_url,
                                                    node_id,
                                                    json=True)
            if (node_info['state'].lower() != 'online'
                    or not node_info['devices']):
                continue
            if len(nodes) > 2:
                self.addCleanup(heketi_ops.heketi_node_enable,
                                self.heketi_client_node, heketi_url, node_id)
                out = heketi_ops.heketi_node_disable(self.heketi_client_node,
                                                     heketi_url, node_id)
                self.assertTrue(out)

            for device in node_info['devices']:
                if device['state'].lower() != 'online':
                    continue
                free_space = device['storage']['free']
                if (node_id in nodes.keys() or free_space < min_free_space):
                    out = heketi_ops.heketi_device_disable(
                        self.heketi_client_node, heketi_url, device['id'])
                    self.assertTrue(out)
                    self.addCleanup(heketi_ops.heketi_device_enable,
                                    self.heketi_client_node, heketi_url,
                                    device['id'])
                    continue
                nodes[node_id] = free_space
        if len(nodes) < 3:
            raise self.skipTest("Could not find 3 online nodes with, "
                                "at least, 1 online device having free space "
                                "bigger than %dGb." % min_free_space_gb)

        # Calculate maximum available size for PVC
        available_size_gb = int(min(nodes.values()) / (1024**2))
        return available_size_gb
    def test_to_get_list_of_nodes(self):
        """
        Listing all nodes and compare the
        node listed in previous step
        """

        # List all list
        ip = []
        g.log.info("Listing the node id")
        heketi_node_id_list = heketi_ops.heketi_node_list(
            self.heketi_client_node, self.heketi_server_url)

        g.log.info("Successfully listed the node")

        if (len(heketi_node_id_list) == 0):
            raise ExecutionError("Node list empty")

        for node_id in heketi_node_id_list:
            g.log.info("Retrieve the node info")
            node_info = heketi_ops.heketi_node_info(
                self.heketi_client_node, self.heketi_server_url,
                node_id, json=True)
            self.assertTrue(node_info, ("Failed to "
                            "retrieve the node info"))
            g.log.info("Successfully retrieved the node info %s" % node_id)
            ip.append(node_info["hostnames"]["storage"])

        # Compare the node listed in previous step
        hostname = []

        g.log.info("Get the pool list")
        list_of_pools = get_pool_list('auto_get_gluster_endpoint')
        self.assertTrue(list_of_pools, ("Failed to get the "
                        "pool list from gluster pods/nodes"))
        g.log.info("Successfully got the pool list from gluster pods/nodes")
        for pool in list_of_pools:
            hostname.append(pool["hostname"])

        if (len(heketi_node_id_list) != len(list_of_pools)):
            raise ExecutionError(
                "Heketi volume list %s is not equal "
                "to gluster volume list %s" % ((ip), (hostname)))
        g.log.info("The node IP's from node info and list"
                   " is : %s/n and pool list from gluster"
                   " pods/nodes is %s" % ((ip), (hostname)))
    def test_to_get_list_of_nodes(self):
        """
        Listing all nodes and compare the
        node listed in previous step
        """

        # List all list
        ip = []
        g.log.info("Listing the node id")
        heketi_node_id_list = heketi_ops.heketi_node_list(
            self.heketi_client_node, self.heketi_server_url)

        g.log.info("Successfully listed the node")

        if (len(heketi_node_id_list) == 0):
            raise ExecutionError("Node list empty")

        for node_id in heketi_node_id_list:
            g.log.info("Retrieve the node info")
            node_info = heketi_ops.heketi_node_info(self.heketi_client_node,
                                                    self.heketi_server_url,
                                                    node_id,
                                                    json=True)
            self.assertTrue(node_info, ("Failed to " "retrieve the node info"))
            g.log.info("Successfully retrieved the node info %s" % node_id)
            ip.append(node_info["hostnames"]["storage"])

        # Compare the node listed in previous step
        hostname = []

        g.log.info("Get the pool list")
        list_of_pools = get_pool_list('auto_get_gluster_endpoint')
        self.assertTrue(list_of_pools, ("Failed to get the "
                                        "pool list from gluster pods/nodes"))
        g.log.info("Successfully got the pool list from gluster pods/nodes")
        for pool in list_of_pools:
            hostname.append(pool["hostname"])

        if (len(heketi_node_id_list) != len(list_of_pools)):
            raise ExecutionError("Heketi volume list %s is not equal "
                                 "to gluster volume list %s" % ((ip),
                                                                (hostname)))
        g.log.info("The node IP's from node info and list"
                   " is : %s/n and pool list from gluster"
                   " pods/nodes is %s" % ((ip), (hostname)))
    def get_free_space_summary_devices(self):
        """
        Calculates free space across all devices
        """

        heketi_node_id_list = heketi_ops.heketi_node_list(
            self.heketi_client_node, self.heketi_server_url)

        total_free_space = 0
        for node_id in heketi_node_id_list:
            node_info_dict = heketi_ops.heketi_node_info(
                self.heketi_client_node, self.heketi_server_url,
                node_id, json=True)
            for device in node_info_dict["devices"]:
                total_free_space += (device["storage"]
                                     ["free"] / (1024 ** 2))

        return total_free_space
Exemple #19
0
    def _get_bricks_counts_and_device_name(self):
        """Fetch bricks count and device name from all the nodes"""
        h_client, h_url = self.heketi_client_node, self.heketi_server_url

        # Fetch bricks on the devices
        h_nodes = heketi_ops.heketi_node_list(h_client, h_url)

        node_details = {}
        for h_node in h_nodes:
            h_node_info = heketi_ops.heketi_node_info(h_client,
                                                      h_url,
                                                      h_node,
                                                      json=True)
            node_details[h_node] = [[], []]
            for device in h_node_info['devices']:
                node_details[h_node][0].append(len(device['bricks']))
                node_details[h_node][1].append(device['id'])
        return node_details
Exemple #20
0
    def setUp(self):
        super(TestArbiterVolumeCreateExpandDelete, self).setUp()
        self.node = self.ocp_master_node[0]
        if openshift_version.get_openshift_version() < "3.9":
            self.skipTest("Arbiter feature cannot be used on OCP older "
                          "than 3.9, because 'volumeoptions' for Heketi "
                          "is not supported there.")
        version = heketi_version.get_heketi_version(self.heketi_client_node)
        if version < '6.0.0-11':
            self.skipTest("heketi-client package %s does not support arbiter "
                          "functionality" % version.v_str)

        # Mark one of the Heketi nodes as arbiter-supported if none of
        # existent nodes or devices already enabled to support it.
        self.heketi_server_url = self.sc.get('resturl')
        arbiter_tags = ('required', 'supported')
        arbiter_already_supported = False

        self.node_id_list = heketi_ops.heketi_node_list(
            self.heketi_client_node, self.heketi_server_url)

        for node_id in self.node_id_list[::-1]:
            node_info = heketi_ops.heketi_node_info(self.heketi_client_node,
                                                    self.heketi_server_url,
                                                    node_id,
                                                    json=True)
            if node_info.get('tags', {}).get('arbiter') in arbiter_tags:
                arbiter_already_supported = True
                break
            for device in node_info['devices'][::-1]:
                if device.get('tags', {}).get('arbiter') in arbiter_tags:
                    arbiter_already_supported = True
                    break
            else:
                continue
            break
        if not arbiter_already_supported:
            self._set_arbiter_tag_with_further_revert(self.heketi_client_node,
                                                      self.heketi_server_url,
                                                      'node',
                                                      self.node_id_list[0],
                                                      'supported')
    def test_to_retrieve_node_info(self):
        """
        List and retrieve node related info
        """

        # List all list
        g.log.info("Listing the node id")
        heketi_node_id_list = heketi_ops.heketi_node_list(
            self.heketi_client_node, self.heketi_server_url)
        self.assertTrue(heketi_node_id_list, ("Node Id list is empty."))
        g.log.info("Successfully listed the node")

        for node_id in heketi_node_id_list:
            g.log.info("Retrieve the node info")
            node_info = heketi_ops.heketi_node_info(self.heketi_client_node,
                                                    self.heketi_server_url,
                                                    node_id,
                                                    json=True)
            self.assertTrue(node_info, ("Failed to " "retrieve the node info"))
            g.log.info("Successfully retrieved the node info %s" % node_id)
    def test_to_retrieve_node_info(self):
        """
        List and retrieve node related info
        """

        # List all list
        g.log.info("Listing the node id")
        heketi_node_id_list = heketi_ops.heketi_node_list(
            self.heketi_client_node, self.heketi_server_url)
        self.assertTrue(heketi_node_id_list, ("Node Id list is empty."))
        g.log.info("Successfully listed the node")

        for node_id in heketi_node_id_list:
            g.log.info("Retrieve the node info")
            node_info = heketi_ops.heketi_node_info(
                self.heketi_client_node, self.heketi_server_url,
                node_id, json=True)
            self.assertTrue(node_info, ("Failed to "
                            "retrieve the node info"))
            g.log.info("Successfully retrieved the node info %s" % node_id)
Exemple #23
0
    def setUp(self):
        super(TestDevPathMapping, self).setUp()
        self.node = self.ocp_master_node[0]
        self.h_node, self.h_server = (self.heketi_client_node,
                                      self.heketi_server_url)
        h_nodes_list = heketi_ops.heketi_node_list(self.h_node, self.h_server)
        h_node_count = len(h_nodes_list)
        if h_node_count < 3:
            self.skipTest(
                "At least 3 nodes are required, found {}".format(h_node_count))

        # Disable 4th and other nodes
        for node_id in h_nodes_list[3:]:
            self.addCleanup(heketi_ops.heketi_node_enable, self.h_node,
                            self.h_server, node_id)
            heketi_ops.heketi_node_disable(self.h_node, self.h_server, node_id)

        h_info = heketi_ops.heketi_node_info(self.h_node,
                                             self.h_server,
                                             h_nodes_list[0],
                                             json=True)
        self.assertTrue(
            h_info, "Failed to get the heketi node info for node id"
            " {}".format(h_nodes_list[0]))

        self.node_ip = h_info['hostnames']['storage'][0]
        self.node_hostname = h_info["hostnames"]["manage"][0]
        self.vm_name = node_ops.find_vm_name_by_ip_or_hostname(
            self.node_hostname)
        self.devices_list = [device['name'] for device in h_info["devices"]]

        # Get list of additional devices for one of the Gluster nodes
        for gluster_server in list(g.config["gluster_servers"].values()):
            if gluster_server['storage'] == self.node_ip:
                additional_device = gluster_server.get("additional_devices")
                if additional_device:
                    self.devices_list.extend(additional_device)

        # sort the devices list
        self.devices_list.sort()
Exemple #24
0
    def setUp(self):
        super(TestHeketiBrickEvict, self).setUp()

        version = heketi_version.get_heketi_version(self.heketi_client_node)
        if version < '9.0.0-14':
            self.skipTest(
                "heketi-client package {} does not support brick evict".format(
                    version.v_str))

        node_list = heketi_ops.heketi_node_list(self.heketi_client_node,
                                                self.heketi_server_url)

        if len(node_list) > 3:
            return

        for node_id in node_list:
            node_info = heketi_ops.heketi_node_info(self.heketi_client_node,
                                                    self.heketi_server_url,
                                                    node_id,
                                                    json=True)
            if len(node_info["devices"]) < 2:
                self.skipTest("does not have extra device/node to evict brick")
    def setUp(self):
        super(TestArbiterVolumeCreateExpandDelete, self).setUp()
        self.node = self.ocp_master_node[0]
        if get_openshift_version() < "3.9":
            self.skipTest("Arbiter feature cannot be used on OCP older "
                          "than 3.9, because 'volumeoptions' for Heketi "
                          "is not supported there.")
        version = heketi_version.get_heketi_version(self.heketi_client_node)
        if version < '6.0.0-11':
            self.skipTest("heketi-client package %s does not support arbiter "
                          "functionality" % version.v_str)

        # Mark one of the Heketi nodes as arbiter-supported if none of
        # existent nodes or devices already enabled to support it.
        self.heketi_server_url = self.sc.get('resturl')
        arbiter_tags = ('required', 'supported')
        arbiter_already_supported = False

        self.node_id_list = heketi_ops.heketi_node_list(
            self.heketi_client_node, self.heketi_server_url)

        for node_id in self.node_id_list[::-1]:
            node_info = heketi_ops.heketi_node_info(
                self.heketi_client_node, self.heketi_server_url,
                node_id, json=True)
            if node_info.get('tags', {}).get('arbiter') in arbiter_tags:
                arbiter_already_supported = True
                break
            for device in node_info['devices'][::-1]:
                if device.get('tags', {}).get('arbiter') in arbiter_tags:
                    arbiter_already_supported = True
                    break
            else:
                continue
            break
        if not arbiter_already_supported:
            self._set_arbiter_tag_with_further_revert(
                self.heketi_client_node, self.heketi_server_url,
                'node', self.node_id_list[0], 'supported')
    def test_volume_create_as_tag_maching_rule(self):
        """Validate settags operation only on one device in the cluster"""

        h_node, h_server = self.heketi_client_node, self.heketi_server_url

        # Set tag on any one device in cluster
        node_list = heketi_node_list(h_node, h_server, json=True)
        node_info = heketi_node_info(h_node, h_server, node_list[0], json=True)
        device_id = node_info.get('devices', {})[0].get('id')
        set_tags(h_node, h_server, 'device', device_id, "tier:it")
        self.addCleanup(rm_tags, h_node, h_server, 'device', device_id, 'tier')

        # Volume creation should fail
        try:
            heketi_volume_create(
                h_node,
                h_server,
                2,
                gluster_volume_options="user.heketi.device-tag-match tier=it")
        except AssertionError as e:
            if ("Failed to allocate new volume" not in six.text_type(e)):
                raise
    def verify_free_space(self, free_space):
        # verify free space on nodes otherwise skip test case
        node_list = heketi_node_list(self.heketi_client_node,
                                     self.heketi_server_url)
        self.assertTrue(node_list)

        free_nodes = 0
        for node in node_list:
            node_info = heketi_node_info(self.heketi_client_node,
                                         self.heketi_server_url,
                                         node,
                                         json=True)

            if node_info['state'] != 'online':
                continue

            free_size = 0
            self.assertTrue(node_info['devices'])

            for device in node_info['devices']:
                if device['state'] != 'online':
                    continue
                # convert size kb into gb
                device_f_size = device['storage']['free'] / 1048576
                free_size += device_f_size

                if free_size > free_space:
                    free_nodes += 1
                    break

            if free_nodes >= 3:
                break

        if free_nodes < 3:
            self.skipTest("skip test case because required free space is "
                          "not available for creating BHV of size %s /n"
                          "only %s free space is available" %
                          (free_space, free_size))
    def verify_free_space(self, free_space):
        # verify free space on nodes otherwise skip test case
        node_list = heketi_node_list(
            self.heketi_client_node, self.heketi_server_url)
        self.assertTrue(node_list)

        free_nodes = 0
        for node in node_list:
            node_info = heketi_node_info(
                self.heketi_client_node, self.heketi_server_url, node,
                json=True)

            if node_info['state'] != 'online':
                continue

            free_size = 0
            self.assertTrue(node_info['devices'])

            for device in node_info['devices']:
                if device['state'] != 'online':
                    continue
                # convert size kb into gb
                device_f_size = device['storage']['free'] / 1048576
                free_size += device_f_size

                if free_size > free_space:
                    free_nodes += 1
                    break

            if free_nodes >= 3:
                break

        if free_nodes < 3:
            self.skipTest("skip test case because required free space is "
                          "not available for creating BHV of size %s /n"
                          "only %s free space is available"
                          % (free_space, free_size))
    def get_devices_summary_free_space(self):
        """
        Calculates minimum free space per device and
        returns total free space across all devices
        """

        free_spaces = []

        heketi_node_id_list = heketi_ops.heketi_node_list(
            self.heketi_client_node, self.heketi_server_url)

        for node_id in heketi_node_id_list:
            node_info_dict = heketi_ops.heketi_node_info(
                self.heketi_client_node, self.heketi_server_url,
                node_id, json=True)
            total_free_space = 0
            for device in node_info_dict["devices"]:
                total_free_space += device["storage"]["free"]
            free_spaces.append(total_free_space)

        total_free_space = sum(free_spaces)/(1024 ** 2)
        total_free_space = int(math.floor(total_free_space))

        return total_free_space
Exemple #30
0
    def _get_bricks_and_device_details(self):
        """Fetch bricks count and device id list from the node where dev path
        operation is performed
        """

        h_client, h_url = self.heketi_client_node, self.heketi_server_url
        h_node_details = []

        # Fetch bricks on the devices
        h_nodes = heketi_ops.heketi_node_list(h_client, h_url)
        for h_node in h_nodes:
            h_node_info = heketi_ops.heketi_node_info(h_client,
                                                      h_url,
                                                      h_node,
                                                      json=True)
            h_node_hostname = h_node_info.get("hostnames").get("manage")[0]

            # Fetch bricks count and device list
            if h_node_hostname == self.node_hostname:
                h_node_details = [[
                    node_info['id'],
                    len(node_info['bricks']), node_info['name']
                ] for node_info in h_node_info['devices']]
                return h_node_details, h_node
Exemple #31
0
    def test_arbiter_required_tag_on_node_or_devices_other_disabled(
            self, r_node_tag, d_node_tag, r_device_tag, d_device_tag):
        """Validate arbiter vol creation with node or device tag"""

        pvc_amount = 3

        # Get Heketi nodes info
        node_id_list = heketi_ops.heketi_node_list(self.heketi_client_node,
                                                   self.heketi_server_url)

        # Disable n-3 nodes
        for node_id in node_id_list[3:]:
            heketi_ops.heketi_node_disable(self.heketi_client_node,
                                           self.heketi_server_url, node_id)
            self.addCleanup(heketi_ops.heketi_node_enable,
                            self.heketi_client_node, self.heketi_server_url,
                            node_id)

        # Set arbiter:required tags
        arbiter_node = heketi_ops.heketi_node_info(self.heketi_client_node,
                                                   self.heketi_server_url,
                                                   node_id_list[0],
                                                   json=True)
        arbiter_nodes_ip_addresses = arbiter_node['hostnames']['storage']
        self._set_arbiter_tag_with_further_revert(
            self.heketi_client_node,
            self.heketi_server_url,
            'node',
            node_id_list[0], ('required' if r_node_tag else None),
            revert_to=arbiter_node.get('tags', {}).get('arbiter'))
        for device in arbiter_node['devices']:
            self._set_arbiter_tag_with_further_revert(
                self.heketi_client_node,
                self.heketi_server_url,
                'device',
                device['id'], ('required' if r_device_tag else None),
                revert_to=device.get('tags', {}).get('arbiter'))

        # Set arbiter:disabled tags
        data_nodes_ip_addresses = []
        for node_id in node_id_list[1:]:
            node_info = heketi_ops.heketi_node_info(self.heketi_client_node,
                                                    self.heketi_server_url,
                                                    node_id,
                                                    json=True)
            if not any([
                    int(d['storage']['free']) > (pvc_amount * 1024**2)
                    for d in node_info['devices']
            ]):
                self.skipTest("Devices are expected to have more than "
                              "%sGb of free space" % pvc_amount)
            data_nodes_ip_addresses.extend(node_info['hostnames']['storage'])
            for device in node_info['devices']:
                self._set_arbiter_tag_with_further_revert(
                    self.heketi_client_node,
                    self.heketi_server_url,
                    'device',
                    device['id'], ('disabled' if d_device_tag else None),
                    revert_to=device.get('tags', {}).get('arbiter'))
            self._set_arbiter_tag_with_further_revert(
                self.heketi_client_node,
                self.heketi_server_url,
                'node',
                node_id, ('disabled' if d_node_tag else None),
                revert_to=node_info.get('tags', {}).get('arbiter'))

        # Create PVCs and check that their bricks are correctly located
        self.create_storage_class(is_arbiter_vol=True)
        for i in range(pvc_amount):
            self.create_and_wait_for_pvc(1)

            # Get gluster volume info
            vol_info = openshift_ops.get_gluster_vol_info_by_pvc_name(
                self.node, self.pvc_name)
            arbiter_bricks, data_bricks = [], []
            for brick in vol_info['bricks']['brick']:
                if int(brick["isArbiter"]) == 1:
                    arbiter_bricks.append(brick["name"])
                else:
                    data_bricks.append(brick["name"])

            # Verify that all the arbiter bricks are located on
            # arbiter:required node and data bricks on all other nodes only.
            for arbiter_brick in arbiter_bricks:
                self.assertIn(
                    arbiter_brick.split(':')[0], arbiter_nodes_ip_addresses)
            for data_brick in data_bricks:
                self.assertIn(
                    data_brick.split(':')[0], data_nodes_ip_addresses)
    def test_aribiter_required_tag_on_node_or_devices_other_disabled(
            self, node_with_tag):
        """Validate arbiter vol creation with required node or device tag"""

        pvc_amount = 3

        # Get Heketi nodes info
        node_id_list = heketi_ops.heketi_node_list(
            self.heketi_client_node, self.heketi_server_url)

        # Set arbiter:required tags
        arbiter_node = heketi_ops.heketi_node_info(
            self.heketi_client_node, self.heketi_server_url, node_id_list[0],
            json=True)
        arbiter_nodes_ip_addresses = arbiter_node['hostnames']['storage']
        self._set_arbiter_tag_with_further_revert(
            self.heketi_client_node, self.heketi_server_url, 'node',
            node_id_list[0], ('required' if node_with_tag else None),
            revert_to=arbiter_node.get('tags', {}).get('arbiter'))
        for device in arbiter_node['devices']:
            self._set_arbiter_tag_with_further_revert(
                self.heketi_client_node, self.heketi_server_url, 'device',
                device['id'], (None if node_with_tag else 'required'),
                revert_to=device.get('tags', {}).get('arbiter'))

        # Set arbiter:disabled tags
        data_nodes, data_nodes_ip_addresses = [], []
        for node_id in node_id_list[1:]:
            node_info = heketi_ops.heketi_node_info(
                self.heketi_client_node, self.heketi_server_url,
                node_id, json=True)
            if not any([int(d['storage']['free']) > (pvc_amount * 1024**2)
                        for d in node_info['devices']]):
                self.skipTest(
                    "Devices are expected to have more than "
                    "%sGb of free space" % pvc_amount)
            data_nodes_ip_addresses.extend(node_info['hostnames']['storage'])
            for device in node_info['devices']:
                self._set_arbiter_tag_with_further_revert(
                    self.heketi_client_node, self.heketi_server_url, 'device',
                    device['id'], (None if node_with_tag else 'disabled'),
                    revert_to=device.get('tags', {}).get('arbiter'))
            self._set_arbiter_tag_with_further_revert(
                self.heketi_client_node, self.heketi_server_url, 'node',
                node_id, ('disabled' if node_with_tag else None),
                revert_to=node_info.get('tags', {}).get('arbiter'))
            data_nodes.append(node_info)

        # Create PVCs and check that their bricks are correctly located
        self.create_storage_class(is_arbiter_vol=True)
        for i in range(pvc_amount):
            self.create_and_wait_for_pvc(1)

            # Get gluster volume info
            vol_info = get_gluster_vol_info_by_pvc_name(
                self.node, self.pvc_name)
            arbiter_bricks, data_bricks = [], []
            for brick in vol_info['bricks']['brick']:
                if int(brick["isArbiter"]) == 1:
                    arbiter_bricks.append(brick["name"])
                else:
                    data_bricks.append(brick["name"])

            # Verify that all the arbiter bricks are located on
            # arbiter:required node and data bricks on all other nodes only.
            for arbiter_brick in arbiter_bricks:
                self.assertIn(
                    arbiter_brick.split(':')[0], arbiter_nodes_ip_addresses)
            for data_brick in data_bricks:
                self.assertIn(
                    data_brick.split(':')[0], data_nodes_ip_addresses)
    def test_volume_expansion_no_free_space(self):
        """Validate volume expansion when there is no free space"""

        vol_size, expand_size, additional_devices_attached = None, 10, {}
        h_node, h_server_url = self.heketi_client_node, self.heketi_server_url

        # Get nodes info
        heketi_node_id_list = heketi_ops.heketi_node_list(h_node, h_server_url)
        if len(heketi_node_id_list) < 3:
            self.skipTest("3 Heketi nodes are required.")

        # Disable 4th and other nodes
        for node_id in heketi_node_id_list[3:]:
            heketi_ops.heketi_node_disable(h_node, h_server_url, node_id)
            self.addCleanup(
                heketi_ops.heketi_node_enable, h_node, h_server_url, node_id)

        # Prepare first 3 nodes
        smallest_size = None
        err_msg = ''
        for node_id in heketi_node_id_list[0:3]:
            node_info = heketi_ops.heketi_node_info(
                h_node, h_server_url, node_id, json=True)

            # Disable second and other devices
            devices = node_info["devices"]
            self.assertTrue(
                devices, "Node '%s' does not have devices." % node_id)
            if devices[0]["state"].strip().lower() != "online":
                self.skipTest("Test expects first device to be enabled.")
            if (smallest_size is None or
                    devices[0]["storage"]["free"] < smallest_size):
                smallest_size = devices[0]["storage"]["free"]
            for device in node_info["devices"][1:]:
                heketi_ops.heketi_device_disable(
                    h_node, h_server_url, device["id"])
                self.addCleanup(
                    heketi_ops.heketi_device_enable,
                    h_node, h_server_url, device["id"])

            # Gather info about additional devices
            additional_device_name = None
            for gluster_server in self.gluster_servers:
                gluster_server_data = self.gluster_servers_info[gluster_server]
                g_manage = gluster_server_data["manage"]
                g_storage = gluster_server_data["storage"]
                if not (g_manage in node_info["hostnames"]["manage"] or
                        g_storage in node_info["hostnames"]["storage"]):
                    continue
                additional_device_name = ((
                    gluster_server_data.get("additional_devices") or [''])[0])
                break

            if not additional_device_name:
                err_msg += ("No 'additional_devices' are configured for "
                            "'%s' node, which has following hostnames and "
                            "IP addresses: %s.\n" % (
                                node_id,
                                ', '.join(node_info["hostnames"]["manage"] +
                                          node_info["hostnames"]["storage"])))
                continue

            heketi_ops.heketi_device_add(
                h_node, h_server_url, additional_device_name, node_id)
            additional_devices_attached.update(
                {node_id: additional_device_name})

        # Schedule cleanup of the added devices
        for node_id in additional_devices_attached.keys():
            node_info = heketi_ops.heketi_node_info(
                h_node, h_server_url, node_id, json=True)
            for device in node_info["devices"]:
                if device["name"] != additional_devices_attached[node_id]:
                    continue
                self.addCleanup(self.detach_devices_attached, device["id"])
                break
            else:
                self.fail("Could not find ID for added device on "
                          "'%s' node." % node_id)

        if err_msg:
            self.skipTest(err_msg)

        # Temporary disable new devices
        self.disable_devices(additional_devices_attached)

        # Create volume and save info about it
        vol_size = int(smallest_size / (1024**2)) - 1
        creation_info = heketi_ops.heketi_volume_create(
            h_node, h_server_url, vol_size, json=True)
        volume_name, volume_id = creation_info["name"], creation_info["id"]
        self.addCleanup(
            heketi_ops.heketi_volume_delete,
            h_node, h_server_url, volume_id, raise_on_error=False)

        volume_info_before_expansion = heketi_ops.heketi_volume_info(
            h_node, h_server_url, volume_id, json=True)
        num_of_bricks_before_expansion = self.get_num_of_bricks(volume_name)
        self.get_brick_and_volume_status(volume_name)
        free_space_before_expansion = self.get_devices_summary_free_space()

        # Try to expand volume with not enough device space
        self.assertRaises(
            ExecutionError, heketi_ops.heketi_volume_expand,
            h_node, h_server_url, volume_id, expand_size)

        # Enable new devices to be able to expand our volume
        self.enable_devices(additional_devices_attached)

        # Expand volume and validate results
        heketi_ops.heketi_volume_expand(
            h_node, h_server_url, volume_id, expand_size, json=True)
        free_space_after_expansion = self.get_devices_summary_free_space()
        self.assertGreater(
            free_space_before_expansion, free_space_after_expansion,
            "Free space not consumed after expansion of %s" % volume_id)
        num_of_bricks_after_expansion = self.get_num_of_bricks(volume_name)
        self.get_brick_and_volume_status(volume_name)
        volume_info_after_expansion = heketi_ops.heketi_volume_info(
            h_node, h_server_url, volume_id, json=True)
        self.assertGreater(
            volume_info_after_expansion["size"],
            volume_info_before_expansion["size"],
            "Size of %s not increased" % volume_id)
        self.assertGreater(
            num_of_bricks_after_expansion, num_of_bricks_before_expansion)
        self.assertEqual(
            num_of_bricks_after_expansion % num_of_bricks_before_expansion, 0)

        # Delete volume and validate release of the used space
        heketi_ops.heketi_volume_delete(h_node, h_server_url, volume_id)
        free_space_after_deletion = self.get_devices_summary_free_space()
        self.assertGreater(
            free_space_after_deletion, free_space_after_expansion,
            "Free space not reclaimed after deletion of volume %s" % volume_id)
Exemple #34
0
    def test_create_volumes_enabling_and_disabling_heketi_devices(self):
        """Validate enable/disable of heketi device"""

        # Get nodes info
        node_id_list = heketi_ops.heketi_node_list(self.heketi_client_node,
                                                   self.heketi_server_url)
        node_info_list = []
        for node_id in node_id_list[0:3]:
            node_info = heketi_ops.heketi_node_info(self.heketi_client_node,
                                                    self.heketi_server_url,
                                                    node_id,
                                                    json=True)
            node_info_list.append(node_info)

        # Disable 4th and other nodes
        if len(node_id_list) > 3:
            for node_id in node_id_list[3:]:
                heketi_ops.heketi_node_disable(self.heketi_client_node,
                                               self.heketi_server_url, node_id)
                self.addCleanup(heketi_ops.heketi_node_enable,
                                self.heketi_client_node,
                                self.heketi_server_url, node_id)

        # Disable second and other devices on the first 3 nodes
        for node_info in node_info_list[0:3]:
            devices = node_info["devices"]
            self.assertTrue(
                devices, "Node '%s' does not have devices." % node_info["id"])
            if devices[0]["state"].strip().lower() != "online":
                self.skipTest("Test expects first device to be enabled.")
            if len(devices) < 2:
                continue
            for device in node_info["devices"][1:]:
                out = heketi_ops.heketi_device_disable(self.heketi_client_node,
                                                       self.heketi_server_url,
                                                       device["id"])
                self.assertTrue(
                    out, "Failed to disable the device %s" % device["id"])
                self.addCleanup(heketi_ops.heketi_device_enable,
                                self.heketi_client_node,
                                self.heketi_server_url, device["id"])

        # Create heketi volume
        out = heketi_ops.heketi_volume_create(self.heketi_client_node,
                                              self.heketi_server_url,
                                              1,
                                              json=True)
        self.assertTrue(out, "Failed to create heketi volume of size 1")
        g.log.info("Successfully created heketi volume of size 1")
        device_id = out["bricks"][0]["device"]
        self.addCleanup(heketi_ops.heketi_volume_delete,
                        self.heketi_client_node, self.heketi_server_url,
                        out["bricks"][0]["volume"])

        # Disable device
        g.log.info("Disabling '%s' device" % device_id)
        out = heketi_ops.heketi_device_disable(self.heketi_client_node,
                                               self.heketi_server_url,
                                               device_id)
        self.assertTrue(out, "Failed to disable the device %s" % device_id)
        g.log.info("Successfully disabled device %s" % device_id)

        try:
            # Get device info
            g.log.info("Retrieving '%s' device info" % device_id)
            out = heketi_ops.heketi_device_info(self.heketi_client_node,
                                                self.heketi_server_url,
                                                device_id,
                                                json=True)
            self.assertTrue(out, "Failed to get device info %s" % device_id)
            g.log.info("Successfully retrieved device info %s" % device_id)
            name = out["name"]
            self.assertEqual(out["state"].lower().strip(), "offline",
                             "Device %s is not in offline state." % name)
            g.log.info("Device %s is now offine" % name)

            # Try to create heketi volume
            g.log.info("Creating heketi volume: Expected to fail.")
            try:
                out = heketi_ops.heketi_volume_create(self.heketi_client_node,
                                                      self.heketi_server_url,
                                                      1,
                                                      json=True)
            except AssertionError:
                g.log.info("Volume was not created as expected.")
            else:
                self.addCleanup(heketi_ops.heketi_volume_delete,
                                self.heketi_client_node,
                                self.heketi_server_url,
                                out["bricks"][0]["volume"])
                msg = "Volume unexpectedly created. Out: %s" % out
                assert False, msg
        finally:
            # Enable the device back
            g.log.info("Enable '%s' device back." % device_id)
            out = heketi_ops.heketi_device_enable(self.heketi_client_node,
                                                  self.heketi_server_url,
                                                  device_id)
            self.assertTrue(out, "Failed to enable the device %s" % device_id)
            g.log.info("Successfully enabled device %s" % device_id)

        # Get device info
        out = heketi_ops.heketi_device_info(self.heketi_client_node,
                                            self.heketi_server_url,
                                            device_id,
                                            json=True)
        self.assertTrue(out, ("Failed to get device info %s" % device_id))
        g.log.info("Successfully retrieved device info %s" % device_id)
        name = out["name"]
        self.assertEqual(out["state"], "online",
                         "Device %s is not in online state." % name)

        # Create heketi volume of size
        out = heketi_ops.heketi_volume_create(self.heketi_client_node,
                                              self.heketi_server_url,
                                              1,
                                              json=True)
        self.assertTrue(out, "Failed to create volume of size 1")
        self.addCleanup(heketi_ops.heketi_volume_delete,
                        self.heketi_client_node, self.heketi_server_url,
                        out["bricks"][0]["volume"])
        g.log.info("Successfully created volume of size 1")
        name = out["name"]

        # Get gluster volume info
        vol_info = get_volume_info('auto_get_gluster_endpoint', volname=name)
        self.assertTrue(vol_info, "Failed to get '%s' volume info." % name)
        g.log.info("Successfully got the '%s' volume info." % name)
    def test_dynamic_provisioning_glusterfile_gluster_pod_or_node_failure(
            self):
        """Create glusterblock PVC when gluster pod or node is down."""
        mount_path = "/mnt"
        datafile_path = '%s/fake_file_for_%s' % (mount_path, self.id())

        # Create secret and storage class
        self.create_storage_class()

        # Create PVC
        pvc_name = self.create_and_wait_for_pvc()

        # Create app POD with attached volume
        pod_name = oc_create_tiny_pod_with_volume(
            self.node,
            pvc_name,
            "test-pvc-mount-on-app-pod",
            mount_path=mount_path,
            image=self.io_container_image_cirros)
        self.addCleanup(wait_for_resource_absence, self.node, 'pod', pod_name)
        self.addCleanup(oc_delete, self.node, 'pod', pod_name)

        # Wait for app POD be up and running
        wait_for_pod_be_ready(self.node, pod_name, timeout=60, wait_step=2)

        # Run IO in background
        io_cmd = "oc rsh %s dd if=/dev/urandom of=%s bs=1000K count=900" % (
            pod_name, datafile_path)
        async_io = g.run_async(self.node, io_cmd, "root")

        # Check for containerized Gluster
        if self.is_containerized_gluster():
            # Pick up one of the hosts which stores PV brick (4+ nodes case)
            gluster_pod_data = get_gluster_pod_names_by_pvc_name(
                self.node, pvc_name)[0]

            # Delete glusterfs POD from chosen host and wait for
            # spawn of new one
            oc_delete(self.node, 'pod', gluster_pod_data["pod_name"])
            cmd = ("oc get pods -o wide | grep glusterfs | grep %s | "
                   "grep -v Terminating | awk '{print $1}'") % (
                       gluster_pod_data["pod_hostname"])
            for w in Waiter(600, 15):
                new_gluster_pod_name = self.cmd_run(cmd)
                if new_gluster_pod_name:
                    break
            if w.expired:
                error_msg = "exceeded timeout, new gluster pod not created"
                g.log.error(error_msg)
                raise AssertionError(error_msg)
            g.log.info("new gluster pod name is %s" % new_gluster_pod_name)
            wait_for_pod_be_ready(self.node, new_gluster_pod_name)
        else:
            pvc_hosting_node_ip = get_gluster_host_ips_by_pvc_name(
                self.node, pvc_name)[0]
            heketi_nodes = heketi_node_list(self.heketi_client_node,
                                            self.heketi_server_url)
            node_ip_for_reboot = None
            for heketi_node in heketi_nodes:
                heketi_node_ip = heketi_node_info(
                    self.heketi_client_node,
                    self.heketi_server_url,
                    heketi_node,
                    json=True)["hostnames"]["storage"][0]
                if heketi_node_ip == pvc_hosting_node_ip:
                    node_ip_for_reboot = heketi_node_ip
                    break

            if not node_ip_for_reboot:
                raise AssertionError(
                    "Gluster node IP %s not matched with heketi node %s" %
                    (pvc_hosting_node_ip, heketi_node_ip))

            node_reboot_by_command(node_ip_for_reboot)

        # Check that async IO was not interrupted
        ret, out, err = async_io.async_communicate()
        self.assertEqual(ret, 0, "IO %s failed on %s" % (io_cmd, self.node))
    def test_pv_resize_device_disabled(self):
        """Validate resize after disabling all devices except one"""
        h_node, h_url = self.heketi_client_node, self.heketi_server_url

        # expand volume size and path volume is mounted
        expand_size, dir_path = 7, "/mnt"

        # Get nodes info
        heketi_node_id_list = heketi_ops.heketi_node_list(h_node, h_url)
        if len(heketi_node_id_list) < 3:
            self.skipTest(
                "At-least 3 gluster nodes are required to execute test case")

        self.create_storage_class(allow_volume_expansion=True)
        pvc_name = self.create_and_wait_for_pvc(pvc_size=2)
        vol_info = get_gluster_vol_info_by_pvc_name(self.node, pvc_name)
        dc_name, pod_name = self.create_dc_with_pvc(pvc_name)

        self._write_file(pod_name, "file1", "1G", dir_path)

        with self.assertRaises(AssertionError):
            self._write_file(pod_name, "file2", "3G", dir_path)

        # Prepare first 3 nodes and then disable other devices.
        for node_id in heketi_node_id_list[:3]:
            node_info = heketi_ops.heketi_node_info(h_node,
                                                    h_url,
                                                    node_id,
                                                    json=True)
            self.assertTrue(node_info, "Failed to get node info")
            devices = node_info.get("devices", None)
            self.assertTrue(devices,
                            "Node {} does not have devices".format(node_id))
            if devices[0]["state"].strip().lower() != "online":
                self.skipTest("Skipping test as it expects to first device to"
                              " be enabled")
            for device in devices[1:]:
                heketi_ops.heketi_device_disable(h_node, h_url, device["id"])
                self.addCleanup(heketi_ops.heketi_device_enable, h_node, h_url,
                                device["id"])

        usedsize_before_resize = self._get_mount_size(pod_name, dir_path)

        # Resize pvc
        resize_pvc(self.node, pvc_name, expand_size)
        verify_pvc_size(self.node, pvc_name, expand_size)
        vol_info = get_gluster_vol_info_by_pvc_name(self.node, pvc_name)
        self.assertFalse(len(vol_info['bricks']['brick']) % 3)

        for node_id in heketi_node_id_list[:3]:
            for device in devices[1:]:
                heketi_ops.heketi_device_enable(h_node, h_url, device["id"])

        self._write_file(pod_name, "file3", "3G", dir_path)

        usedsize_after_resize = self._get_mount_size(pod_name, dir_path)
        self.assertGreater(
            int(usedsize_before_resize.strip('%')),
            int(usedsize_after_resize.strip('%')),
            "Mount size {} should be greater than {}".format(
                usedsize_before_resize, usedsize_after_resize))

        self._write_file(pod_name, "file4", "1024", dir_path)

        # Validate dist-rep volume with 6 bricks after pv resize
        vol_info = get_gluster_vol_info_by_pvc_name(self.node, pvc_name)
        self.assertEqual(
            6, len(vol_info['bricks']['brick']),
            "Expected bricks count is 6, but actual brick count is {}".format(
                len(vol_info['bricks']['brick'])))
    def test_node_state(self):
        """
        Test node enable and disable functionality.

        If we have 4 gluster servers, if we disable 1/4 nodes from heketi
        and create a volume, the volume creation should be successful.

        If we disable 2/4 nodes from heketi-cli and create a volume
        the volume creation should fail.

        If we enable back one gluster server and create a volume
        the volume creation should be successful.
        """
        g.log.info("Disable node in heketi")
        node_list = heketi_node_list(self.heketi_client_node,
                                     self.heketi_server_url)
        self.assertTrue(node_list, "Failed to list heketi nodes")
        g.log.info("Successfully got the list of nodes")
        online_hosts = self.get_online_nodes(node_list)

        if len(online_hosts) < 3:
            raise self.skipTest(
                "This test can run only if online hosts are more "
                "than 2")
        # if we have n nodes, disable n-3 nodes
        for node_info in online_hosts[3:]:
            node_id = node_info["id"]
            g.log.info("going to disable node id %s", node_id)
            self.disable_node(node_id)
            self.addCleanup(self.enable_node, node_id)

        vol_size = 1
        # create volume when 3 nodes are online
        vol_info = heketi_volume_create(self.heketi_client_node,
                                        self.heketi_server_url, vol_size,
                                        json=True)
        self.addCleanup(
            heketi_volume_delete, self.heketi_client_node,
            self.heketi_server_url, vol_info['id'])

        node_id = online_hosts[0]['id']
        g.log.info("going to disable node id %s", node_id)
        self.disable_node(node_id)
        self.addCleanup(self.enable_node, node_id)

        # try to create a volume, volume creation should fail
        ret, out, err = heketi_volume_create(
            self.heketi_client_node, self.heketi_server_url,
            vol_size, raw_cli_output=True)
        if ret == 0:
            out_json = json.loads(out)
            self.addCleanup(
                heketi_volume_delete, self.heketi_client_node,
                self.heketi_server_url, out_json["id"])
        self.assertNotEqual(ret, 0,
                            ("Volume creation did not fail ret- %s "
                             "out- %s err- %s" % (ret, out, err)))

        g.log.info("Volume creation failed as expected, err- %s", err)
        # enable node
        self.enable_node(node_id)

        # create volume when node is enabled
        vol_info = heketi_volume_create(self.heketi_client_node,
                                        self.heketi_server_url, vol_size,
                                        json=True)
        self.addCleanup(
            heketi_volume_delete, self.heketi_client_node,
            self.heketi_server_url, vol_info['id'])
    def _pv_resize(self, exceed_free_space):
        dir_path = "/mnt"
        pvc_size_gb, min_free_space_gb = 1, 3

        # Get available free space disabling redundant devices and nodes
        heketi_url = self.heketi_server_url
        node_id_list = heketi_ops.heketi_node_list(
            self.heketi_client_node, heketi_url)
        self.assertTrue(node_id_list)
        nodes = {}
        min_free_space = min_free_space_gb * 1024**2
        for node_id in node_id_list:
            node_info = heketi_ops.heketi_node_info(
                self.heketi_client_node, heketi_url, node_id, json=True)
            if (node_info['state'].lower() != 'online' or
                    not node_info['devices']):
                continue
            if len(nodes) > 2:
                out = heketi_ops.heketi_node_disable(
                    self.heketi_client_node, heketi_url, node_id)
                self.assertTrue(out)
                self.addCleanup(
                    heketi_ops.heketi_node_enable,
                    self.heketi_client_node, heketi_url, node_id)
            for device in node_info['devices']:
                if device['state'].lower() != 'online':
                    continue
                free_space = device['storage']['free']
                if (node_id in nodes.keys() or free_space < min_free_space):
                    out = heketi_ops.heketi_device_disable(
                        self.heketi_client_node, heketi_url, device['id'])
                    self.assertTrue(out)
                    self.addCleanup(
                        heketi_ops.heketi_device_enable,
                        self.heketi_client_node, heketi_url, device['id'])
                    continue
                nodes[node_id] = free_space
        if len(nodes) < 3:
            raise self.skipTest(
                "Could not find 3 online nodes with, "
                "at least, 1 online device having free space "
                "bigger than %dGb." % min_free_space_gb)

        # Calculate maximum available size for PVC
        available_size_gb = int(min(nodes.values()) / (1024**2))

        # Create PVC
        self.create_storage_class(allow_volume_expansion=True)
        pvc_name = self.create_and_wait_for_pvc(pvc_size=pvc_size_gb)

        # Create DC with POD and attached PVC to it
        dc_name = oc_create_app_dc_with_io(self.node, pvc_name)
        self.addCleanup(oc_delete, self.node, 'dc', dc_name)
        self.addCleanup(scale_dc_pod_amount_and_wait, self.node, dc_name, 0)
        pod_name = get_pod_name_from_dc(self.node, dc_name)
        wait_for_pod_be_ready(self.node, pod_name)

        if exceed_free_space:
            # Try to expand existing PVC exceeding free space
            resize_pvc(self.node, pvc_name, available_size_gb)
            wait_for_events(self.node, obj_name=pvc_name,
                            event_reason='VolumeResizeFailed')

            # Check that app POD is up and runnig then try to write data
            wait_for_pod_be_ready(self.node, pod_name)
            cmd = (
                "dd if=/dev/urandom of=%s/autotest bs=100K count=1" % dir_path)
            ret, out, err = oc_rsh(self.node, pod_name, cmd)
            self.assertEqual(
                ret, 0,
                "Failed to write data after failed attempt to expand PVC.")
        else:
            # Expand existing PVC using all the available free space
            expand_size_gb = available_size_gb - pvc_size_gb
            resize_pvc(self.node, pvc_name, expand_size_gb)
            verify_pvc_size(self.node, pvc_name, expand_size_gb)
            pv_name = get_pv_name_from_pvc(self.node, pvc_name)
            verify_pv_size(self.node, pv_name, expand_size_gb)
            wait_for_events(
                self.node, obj_name=pvc_name,
                event_reason='VolumeResizeSuccessful')

            # Recreate app POD
            oc_delete(self.node, 'pod', pod_name)
            wait_for_resource_absence(self.node, 'pod', pod_name)
            pod_name = get_pod_name_from_dc(self.node, dc_name)
            wait_for_pod_be_ready(self.node, pod_name)

            # Write data on the expanded PVC
            cmd = ("dd if=/dev/urandom of=%s/autotest "
                   "bs=1M count=1025" % dir_path)
            ret, out, err = oc_rsh(self.node, pod_name, cmd)
            self.assertEqual(
                ret, 0, "Failed to write data on the expanded PVC")
Exemple #39
0
    def _pv_resize(self, exceed_free_space):
        dir_path = "/mnt"
        pvc_size_gb, min_free_space_gb = 1, 3

        # Get available free space disabling redundant devices and nodes
        heketi_url = self.heketi_server_url
        node_id_list = heketi_ops.heketi_node_list(self.heketi_client_node,
                                                   heketi_url)
        self.assertTrue(node_id_list)
        nodes = {}
        min_free_space = min_free_space_gb * 1024**2
        for node_id in node_id_list:
            node_info = heketi_ops.heketi_node_info(self.heketi_client_node,
                                                    heketi_url,
                                                    node_id,
                                                    json=True)
            if (node_info['state'].lower() != 'online'
                    or not node_info['devices']):
                continue
            if len(nodes) > 2:
                out = heketi_ops.heketi_node_disable(self.heketi_client_node,
                                                     heketi_url, node_id)
                self.assertTrue(out)
                self.addCleanup(heketi_ops.heketi_node_enable,
                                self.heketi_client_node, heketi_url, node_id)
            for device in node_info['devices']:
                if device['state'].lower() != 'online':
                    continue
                free_space = device['storage']['free']
                if (node_id in nodes.keys() or free_space < min_free_space):
                    out = heketi_ops.heketi_device_disable(
                        self.heketi_client_node, heketi_url, device['id'])
                    self.assertTrue(out)
                    self.addCleanup(heketi_ops.heketi_device_enable,
                                    self.heketi_client_node, heketi_url,
                                    device['id'])
                    continue
                nodes[node_id] = free_space
        if len(nodes) < 3:
            raise self.skipTest("Could not find 3 online nodes with, "
                                "at least, 1 online device having free space "
                                "bigger than %dGb." % min_free_space_gb)

        # Calculate maximum available size for PVC
        available_size_gb = int(min(nodes.values()) / (1024**2))

        # Create PVC
        self.create_storage_class(allow_volume_expansion=True)
        pvc_name = self.create_and_wait_for_pvc(pvc_size=pvc_size_gb)

        # Create DC with POD and attached PVC to it
        dc_name = oc_create_app_dc_with_io(self.node, pvc_name)
        self.addCleanup(oc_delete, self.node, 'dc', dc_name)
        self.addCleanup(scale_dc_pod_amount_and_wait, self.node, dc_name, 0)
        pod_name = get_pod_name_from_dc(self.node, dc_name)
        wait_for_pod_be_ready(self.node, pod_name)

        if exceed_free_space:
            # Try to expand existing PVC exceeding free space
            resize_pvc(self.node, pvc_name, available_size_gb)
            wait_for_events(self.node,
                            obj_name=pvc_name,
                            event_reason='VolumeResizeFailed')

            # Check that app POD is up and runnig then try to write data
            wait_for_pod_be_ready(self.node, pod_name)
            cmd = ("dd if=/dev/urandom of=%s/autotest bs=100K count=1" %
                   dir_path)
            ret, out, err = oc_rsh(self.node, pod_name, cmd)
            self.assertEqual(
                ret, 0,
                "Failed to write data after failed attempt to expand PVC.")
        else:
            # Expand existing PVC using all the available free space
            expand_size_gb = available_size_gb - pvc_size_gb
            resize_pvc(self.node, pvc_name, expand_size_gb)
            verify_pvc_size(self.node, pvc_name, expand_size_gb)
            pv_name = get_pv_name_from_pvc(self.node, pvc_name)
            verify_pv_size(self.node, pv_name, expand_size_gb)
            wait_for_events(self.node,
                            obj_name=pvc_name,
                            event_reason='VolumeResizeSuccessful')

            # Recreate app POD
            oc_delete(self.node, 'pod', pod_name)
            wait_for_resource_absence(self.node, 'pod', pod_name)
            pod_name = get_pod_name_from_dc(self.node, dc_name)
            wait_for_pod_be_ready(self.node, pod_name)

            # Write data on the expanded PVC
            cmd = ("dd if=/dev/urandom of=%s/autotest "
                   "bs=1M count=1025" % dir_path)
            ret, out, err = oc_rsh(self.node, pod_name, cmd)
            self.assertEqual(ret, 0,
                             "Failed to write data on the expanded PVC")
Exemple #40
0
    def test_heketi_device_removal_with_insuff_space(self):
        """Validate heketi with device removal insufficient space"""

        # Disable 4+ nodes and 3+ devices on the first 3 nodes
        min_free_space_gb = 5
        min_free_space = min_free_space_gb * 1024**2
        heketi_url = self.heketi_server_url
        heketi_node = self.heketi_client_node
        nodes = {}

        node_ids = heketi_node_list(heketi_node, heketi_url)
        self.assertTrue(node_ids)
        for node_id in node_ids:
            node_info = heketi_node_info(heketi_node,
                                         heketi_url,
                                         node_id,
                                         json=True)
            if (node_info["state"].lower() != "online"
                    or not node_info["devices"]):
                continue
            if len(nodes) > 2:
                heketi_node_disable(heketi_node, heketi_url, node_id)
                self.addCleanup(heketi_node_enable, heketi_node, heketi_url,
                                node_id)
                continue
            for device in node_info["devices"]:
                if device["state"].lower() != "online":
                    continue
                free_space = device["storage"]["free"]
                if node_id not in nodes:
                    nodes[node_id] = []
                if (free_space < min_free_space or len(nodes[node_id]) > 1):
                    heketi_device_disable(heketi_node, heketi_url,
                                          device["id"])
                    self.addCleanup(heketi_device_enable, heketi_node,
                                    heketi_url, device["id"])
                    continue
                nodes[node_id].append({
                    "device_id": device["id"],
                    "free": free_space
                })

        # Skip test if nodes requirements are not met
        if (len(nodes) < 3
                or not all(map((lambda _l: len(_l) > 1), nodes.values()))):
            raise self.skipTest(
                "Could not find 3 online nodes with 2 online devices "
                "having free space bigger than %dGb." % min_free_space_gb)

        # Calculate size of a potential distributed vol
        if nodes[node_ids[0]][0]["free"] > nodes[node_ids[0]][1]["free"]:
            index = 0
        else:
            index = 1
        vol_size_gb = int(nodes[node_ids[0]][index]["free"] / (1024**2)) + 1
        device_id = nodes[node_ids[0]][index]["device_id"]

        # Create volume with such size that we consume space more than
        # size of smaller disks
        h_volume_name = "autotests-heketi-volume-%s" % utils.get_random_str()
        try:
            self.create_heketi_volume_with_name_and_wait(h_volume_name,
                                                         vol_size_gb,
                                                         json=True)
        except Exception as e:
            # NOTE: rare situation when we need to decrease size of a volume.
            g.log.info("Failed to create '%s'Gb volume. "
                       "Trying to create another one, smaller for 1Gb.")

            if not ('more required' in str(e) and
                    ('Insufficient suitable allocatable extents for '
                     'logical volume' in str(e))):
                raise

            vol_size_gb -= 1
            self.create_heketi_volume_with_name_and_wait(h_volume_name,
                                                         vol_size_gb,
                                                         json=True)

        # Try to 'remove' bigger Heketi disk expecting error,
        # because there is no space on smaller disk to relocate bricks to
        heketi_device_disable(heketi_node, heketi_url, device_id)
        self.addCleanup(heketi_device_enable, heketi_node, heketi_url,
                        device_id)
        try:
            self.assertRaises(AssertionError, heketi_device_remove,
                              heketi_node, heketi_url, device_id)
        except Exception:
            self.addCleanup(heketi_device_disable, heketi_node, heketi_url,
                            device_id)
            raise
    def test_create_volumes_enabling_and_disabling_heketi_devices(self):
        """Validate enable/disable of heketi device"""

        # Get nodes info
        node_id_list = heketi_ops.heketi_node_list(
            self.heketi_client_node, self.heketi_server_url)
        node_info_list = []
        for node_id in node_id_list[0:3]:
            node_info = heketi_ops.heketi_node_info(
                self.heketi_client_node, self.heketi_server_url,
                node_id, json=True)
            node_info_list.append(node_info)

        # Disable 4th and other nodes
        if len(node_id_list) > 3:
            for node in node_id_list[3:]:
                heketi_ops.heketi_node_disable(
                    self.heketi_client_node, self.heketi_server_url, node_id)
                self.addCleanup(
                    heketi_ops.heketi_node_enable, self.heketi_client_node,
                    self.heketi_server_url, node_id)

        # Disable second and other devices on the first 3 nodes
        for node_info in node_info_list[0:3]:
            devices = node_info["devices"]
            self.assertTrue(
                devices, "Node '%s' does not have devices." % node_info["id"])
            if devices[0]["state"].strip().lower() != "online":
                self.skipTest("Test expects first device to be enabled.")
            if len(devices) < 2:
                continue
            for device in node_info["devices"][1:]:
                out = heketi_ops.heketi_device_disable(
                    self.heketi_client_node, self.heketi_server_url,
                    device["id"])
                self.assertTrue(
                    out, "Failed to disable the device %s" % device["id"])
                self.addCleanup(
                    heketi_ops.heketi_device_enable,
                    self.heketi_client_node, self.heketi_server_url,
                    device["id"])

        # Create heketi volume
        out = heketi_ops.heketi_volume_create(
            self.heketi_client_node, self.heketi_server_url, 1, json=True)
        self.assertTrue(out, "Failed to create heketi volume of size 1")
        g.log.info("Successfully created heketi volume of size 1")
        device_id = out["bricks"][0]["device"]
        self.addCleanup(
            heketi_ops.heketi_volume_delete, self.heketi_client_node,
            self.heketi_server_url, out["bricks"][0]["volume"])

        # Disable device
        g.log.info("Disabling '%s' device" % device_id)
        out = heketi_ops.heketi_device_disable(
            self.heketi_client_node, self.heketi_server_url, device_id)
        self.assertTrue(out, "Failed to disable the device %s" % device_id)
        g.log.info("Successfully disabled device %s" % device_id)

        try:
            # Get device info
            g.log.info("Retrieving '%s' device info" % device_id)
            out = heketi_ops.heketi_device_info(
                self.heketi_client_node, self.heketi_server_url,
                device_id, json=True)
            self.assertTrue(out, "Failed to get device info %s" % device_id)
            g.log.info("Successfully retrieved device info %s" % device_id)
            name = out["name"]
            if out["state"].lower().strip() != "offline":
                raise exceptions.ExecutionError(
                    "Device %s is not in offline state." % name)
            g.log.info("Device %s is now offine" % name)

            # Try to create heketi volume
            g.log.info("Creating heketi volume: Expected to fail.")
            try:
                out = heketi_ops.heketi_volume_create(
                    self.heketi_client_node, self.heketi_server_url, 1,
                    json=True)
            except exceptions.ExecutionError:
                g.log.info("Volume was not created as expected.")
            else:
                self.addCleanup(
                    heketi_ops.heketi_volume_delete, self.heketi_client_node,
                    self.heketi_server_url, out["bricks"][0]["volume"])
                msg = "Volume unexpectedly created. Out: %s" % out
                assert False, msg
        finally:
            # Enable the device back
            g.log.info("Enable '%s' device back." % device_id)
            out = heketi_ops.heketi_device_enable(
                self.heketi_client_node, self.heketi_server_url, device_id)
            self.assertTrue(out, "Failed to enable the device %s" % device_id)
            g.log.info("Successfully enabled device %s" % device_id)

        # Get device info
        out = heketi_ops.heketi_device_info(
            self.heketi_client_node, self.heketi_server_url, device_id,
            json=True)
        self.assertTrue(out, ("Failed to get device info %s" % device_id))
        g.log.info("Successfully retrieved device info %s" % device_id)
        name = out["name"]
        if out["state"] != "online":
            raise exceptions.ExecutionError(
                "Device %s is not in online state." % name)

        # Create heketi volume of size
        out = heketi_ops.heketi_volume_create(
            self.heketi_client_node, self.heketi_server_url, 1, json=True)
        self.assertTrue(out, "Failed to create volume of size 1")
        self.addCleanup(
            heketi_ops.heketi_volume_delete, self.heketi_client_node,
            self.heketi_server_url, out["bricks"][0]["volume"])
        g.log.info("Successfully created volume of size 1")
        name = out["name"]

        # Get gluster volume info
        vol_info = get_volume_info('auto_get_gluster_endpoint', volname=name)
        self.assertTrue(vol_info, "Failed to get '%s' volume info." % name)
        g.log.info("Successfully got the '%s' volume info." % name)