def test_try_to_create_sc_with_duplicated_name(self):
     """Verify SC creation fails with duplicate name"""
     sc_name = "test-sc-duplicated-name-" + utils.get_random_str()
     self.create_storage_class(sc_name=sc_name)
     self.create_and_wait_for_pvc(sc_name=sc_name)
     with self.assertRaises(AssertionError):
         self.create_storage_class(sc_name=sc_name)
Exemple #2
0
 def setUp(self):
     """Deploys, Verifies and adds resources required for testcases
        in cleanup method
     """
     super(GlusterStabilityTestSetup, self).setUp()
     self.oc_node = self.ocp_master_node[0]
     self.prefix = "autotest-%s" % utils.get_random_str()
    def test_100gb_block_pvc_create_and_delete_twice(self):
        """Validate creation and deletion of blockvoume of size 100GB"""
        # Define required space, bhv size required for on 100GB block PVC
        size, bhv_size, required_space = 100, 103, 309
        h_node, h_url = self.heketi_client_node, self.heketi_server_url
        prefix = 'autotest-pvc-{}'.format(utils.get_random_str(size=5))

        # Skip test if required free space is not available
        free_space = get_total_free_space(self.heketi_client_node,
                                          self.heketi_server_url)[0]
        if free_space < required_space:
            self.skipTest("Available free space {} is less than the required "
                          "free space {}".format(free_space, required_space))

        # Create block hosting volume of 103GB required for 100GB block PVC
        bhv = heketi_volume_create(h_node,
                                   h_url,
                                   bhv_size,
                                   block=True,
                                   json=True)['id']
        self.addCleanup(heketi_volume_delete, h_node, h_url, bhv)

        for _ in range(2):
            # Create PVC of size 100GB
            pvc_name = self.create_and_wait_for_pvc(pvc_size=size,
                                                    pvc_name_prefix=prefix)
            match_pvc_and_pv(self.node, prefix)

            # Delete the PVC
            oc_delete(self.node, 'pvc', pvc_name)
            wait_for_resource_absence(self.node, 'pvc', pvc_name)
Exemple #4
0
    def test_arbiter_volume_delete_using_pvc(self):
        """Test Arbiter volume delete using pvc when volume is not mounted
           on app pod
        """
        prefix = "autotest-%s" % utils.get_random_str()

        # Create sc with gluster arbiter info
        sc_name = self.create_storage_class(vol_name_prefix=prefix,
                                            is_arbiter_vol=True)

        # Create PVC and wait for it to be in 'Bound' state
        pvc_name = self.create_and_wait_for_pvc(pvc_name_prefix=prefix,
                                                sc_name=sc_name)

        # Get vol info
        gluster_vol_info = openshift_ops.get_gluster_vol_info_by_pvc_name(
            self.node, pvc_name)

        # Verify arbiter volume properties
        self.verify_amount_and_proportion_of_arbiter_and_data_bricks(
            gluster_vol_info)

        # Get volume ID
        gluster_vol_id = gluster_vol_info["gluster_vol_id"]

        # Delete the pvc
        openshift_ops.oc_delete(self.node, 'pvc', pvc_name)
        openshift_ops.wait_for_resource_absence(self.node, 'pvc', pvc_name)

        # Check the heketi volume list if pvc is deleted
        g.log.info("List heketi volumes")
        heketi_volumes = heketi_ops.heketi_volume_list(self.heketi_client_node,
                                                       self.heketi_server_url)

        err_msg = "Failed to delete heketi volume by prefix %s" % prefix
        self.assertNotIn(prefix, heketi_volumes, err_msg)

        # Check presence for the gluster volume
        get_gluster_vol_info = volume_ops.get_volume_info(
            "auto_get_gluster_endpoint", gluster_vol_id)
        err_msg = "Failed to delete gluster volume %s" % gluster_vol_id
        self.assertFalse(get_gluster_vol_info, err_msg)

        # Check presence of bricks and lvs
        for brick in gluster_vol_info['bricks']['brick']:
            gluster_node_ip, brick_name = brick["name"].split(":")

            with self.assertRaises(exceptions.ExecutionError):
                cmd = "df %s" % brick_name
                openshift_ops.cmd_run_on_gluster_pod_or_node(
                    self.node, cmd, gluster_node_ip)

            with self.assertRaises(exceptions.ExecutionError):
                lv_match = re.search(BRICK_REGEX, brick["name"])
                if lv_match:
                    cmd = "lvs %s" % lv_match.group(2).strip()
                    openshift_ops.cmd_run_on_gluster_pod_or_node(
                        self.node, cmd, gluster_node_ip)
    def setUp(self):
        """Deploys, Verifies and adds resources required for testcases
           in cleanup method
        """
        self.oc_node = self.ocp_master_node[0]
        self.prefix = "autotest-%s" % utils.get_random_str()
        _storage_class = self.storage_classes.get(
            'storage_class2', self.storage_classes.get('block_storage_class'))
        self.provisioner = _storage_class["provisioner"]
        self.restsecretnamespace = _storage_class["restsecretnamespace"]
        self.restuser = _storage_class["restuser"]
        self.resturl = _storage_class["resturl"]

        # using pvc size count as 1 by default
        self.pvcsize = 1

        # using pvc count as 10 by default
        self.pvccount = 10

        # create gluster block storage class, PVC and user app pod
        self.sc_name, self.pvc_name, self.dc_name, self.secret_name = (
            self.deploy_resouces())

        # verify storage class
        oc_get_yaml(self.oc_node, "sc", self.sc_name)

        # verify pod creation, it's state and get the pod name
        self.pod_name = get_pod_name_from_dc(self.oc_node,
                                             self.dc_name,
                                             timeout=180,
                                             wait_step=3)
        wait_for_pod_be_ready(self.oc_node,
                              self.pod_name,
                              timeout=180,
                              wait_step=3)
        verify_pvc_status_is_bound(self.oc_node, self.pvc_name)

        # create pvc's to test
        self.pvc_list = []
        for pvc in range(self.pvccount):
            test_pvc_name = oc_create_pvc(self.oc_node,
                                          self.sc_name,
                                          pvc_name_prefix=self.prefix,
                                          pvc_size=self.pvcsize)
            self.pvc_list.append(test_pvc_name)
            self.addCleanup(wait_for_resource_absence,
                            self.oc_node,
                            "pvc",
                            test_pvc_name,
                            timeout=600,
                            interval=10)

        for pvc_name in self.pvc_list:
            self.addCleanup(oc_delete, self.oc_node, "pvc", pvc_name)
Exemple #6
0
    def test_dynamic_provisioning_block_vol_with_custom_prefix(self):
        """Verify creation of block volume with custom prefix
        """
        node = self.ocp_master_node[0]
        prefix = "autotest-{}".format(utils.get_random_str())

        # cmd to get available space
        cmd_get_free_space = "df -h | grep '/mnt'| awk '{{print $4}}'"

        # cmd to create a 100M file
        cmd_run_io = 'dd if=/dev/zero of=/mnt/testfile bs=1024 count=102400'

        # Create sc with prefix
        sc_name = self.create_storage_class(sc_name_prefix=prefix,
                                            create_vol_name_prefix=True,
                                            vol_name_prefix=prefix)

        # Create pvc and wait for it to be in bound state
        pvc_name = self.create_and_wait_for_pvc(sc_name=sc_name, pvc_size=1)

        # Verify blockvolume list with prefix
        h_block_vol = heketi_blockvolume_list_by_name_prefix(
            self.heketi_client_node, self.heketi_server_url, prefix)
        self.assertIsNotNone(
            h_block_vol,
            "Failed to find blockvolume with prefix {}".format(prefix))
        self.assertTrue(
            h_block_vol[0][2].startswith(prefix),
            "Failed to create blockvolume with the prefix {}".format(prefix))

        # Create app pod
        dc_name, pod_name = self.create_dc_with_pvc(pvc_name)

        err_msg = ("Failed to get the free space for the mount point of the "
                   "app pod {} with error {}")
        # Get free space of app pod before IO run
        _, free_space_before, err = oc_rsh(node, pod_name, cmd_get_free_space)
        self.assertTrue(free_space_before, err_msg.format(pod_name, err))

        # Running IO on the app pod
        ret, _, err = oc_rsh(node, pod_name, cmd_run_io)
        self.assertFalse(
            ret, "Failed to run the Io with the error msg {}".format(err))

        # Get free space of app pod after IO run
        _, free_space_after, err = oc_rsh(node, pod_name, cmd_get_free_space)
        self.assertTrue(free_space_after, err_msg.format(pod_name, err))
        self.assertGreaterEqual(
            free_space_before, free_space_after,
            "Expecting free space in app pod before {} should be greater than"
            " {} as 100M file is created".format(free_space_before,
                                                 free_space_after))
    def setUp(self):
        """Deploys, Verifies and adds resources required for testcases
           in cleanup method
        """
        self.oc_node = self.ocp_master_node[0]
        self.prefix = "autotest-%s" % utils.get_random_str()
        _storage_class = self.storage_classes.get(
            'storage_class2',
            self.storage_classes.get('block_storage_class'))
        self.provisioner = _storage_class["provisioner"]
        self.restsecretnamespace = _storage_class["restsecretnamespace"]
        self.restuser = _storage_class["restuser"]
        self.resturl = _storage_class["resturl"]

        # using pvc size count as 1 by default
        self.pvcsize = 1

        # using pvc count as 10 by default
        self.pvccount = 10

        # create gluster block storage class, PVC and user app pod
        self.sc_name, self.pvc_name, self.dc_name, self.secret_name = (
            self.deploy_resouces()
        )

        # verify storage class
        oc_get_yaml(self.oc_node, "sc", self.sc_name)

        # verify pod creation, it's state and get the pod name
        self.pod_name = get_pod_name_from_dc(
            self.oc_node, self.dc_name, timeout=180, wait_step=3
        )
        wait_for_pod_be_ready(
            self.oc_node, self.pod_name, timeout=180, wait_step=3
        )
        verify_pvc_status_is_bound(self.oc_node, self.pvc_name)

        # create pvc's to test
        self.pvc_list = []
        for pvc in range(self.pvccount):
            test_pvc_name = oc_create_pvc(
                self.oc_node, self.sc_name,
                pvc_name_prefix=self.prefix, pvc_size=self.pvcsize
            )
            self.pvc_list.append(test_pvc_name)
            self.addCleanup(
                wait_for_resource_absence, self.oc_node, "pvc", test_pvc_name,
                timeout=600, interval=10
            )

        for pvc_name in self.pvc_list:
            self.addCleanup(oc_delete, self.oc_node, "pvc", pvc_name)
    def test_heketi_node_add_with_invalid_cluster(self):
        """Test heketi node add operation with invalid cluster id"""
        storage_hostname, cluster_id = None, utils.get_random_str(size=33)
        try:
            storage_hostname, _ = self.add_heketi_node_to_cluster(cluster_id)
        except AssertionError as e:
            err_msg = ("validation failed: cluster: %s is not a valid UUID" %
                       cluster_id)
            if err_msg not in six.text_type(e):
                raise

        err_msg = ("Unexpectedly node %s got added to cluster %s" % (
            storage_hostname, cluster_id))
        self.assertFalse(storage_hostname, err_msg)
    def test_block_provisioner_on_multiple_clusters(self):
        """Check block provisioner and verify on multiple clusters
        """
        # Skip test if registry project is not present
        self.registry_sc = self.storage_classes.get(
            'registry_block_storage_class')
        if not self.registry_sc:
            self.skipTest("Config file doesn't have key "
                          "openshift.dynamic_provisioning.storage_classes")

        self._registry_heketi_server_url = self.registry_sc.get('resturl')
        self._registry_project_name = self.registry_sc.get(
            'restsecretnamespace')
        if not (self._registry_heketi_server_url
                and self._registry_project_name):
            self.skipTest(
                "Config file doesn't have key"
                "'storage_classes.registry_block_storage_class.resturl' or "
                "'storage_classes.registry_block_storage_class"
                ".restsecretnamespace'")

        size = 1
        prefix = 'autotest-pvc-{}'.format(utils.get_random_str(size=5))

        # Create PVC in default namespace and verify multipath
        pvc_name = self.create_and_wait_for_pvc(pvc_size=size,
                                                pvc_name_prefix=prefix)
        _, pod_name = self.create_dc_with_pvc(pvc_name)
        match_pvc_and_pv(self.node, prefix)
        self.verify_iscsi_sessions_and_multipath(pvc_name,
                                                 pod_name,
                                                 rtype='pod')

        # Change project namespace
        self.addCleanup(switch_oc_project, self.node,
                        self.storage_project_name)
        switch_oc_project(self.node, self._registry_project_name)

        # Create PVC in registry namespace and verify multipath
        self.sc_name = self.create_storage_class(glusterfs_registry=True)
        pvc_name = self.create_and_wait_for_pvc(sc_name=self.sc_name,
                                                pvc_size=size,
                                                pvc_name_prefix=prefix)
        _, pod_name = self.create_dc_with_pvc(pvc_name)
        self.verify_iscsi_sessions_and_multipath(
            pvc_name,
            pod_name,
            rtype='pod',
            heketi_server_url=self._registry_heketi_server_url,
            is_registry_gluster=True)
Exemple #10
0
    def test_heketi_volume_mount(self):
        self.node = self.ocp_master_node[0]
        try:
            cmd_run('rpm -q glusterfs-fuse', self.node)
        except AssertionError:
            self.skipTest("gluster-fuse package is not present on Node %s" %
                          self.node)

        h_node, h_url = self.heketi_client_node, self.heketi_server_url

        # Create volume
        vol_info = heketi_volume_create(h_node, h_url, 2, json=True)
        self.addCleanup(heketi_volume_delete, h_node, h_url, vol_info['id'])

        mount_point = vol_info['mount']['glusterfs']['device']
        mount_dir = '/mnt/dir-%s' % utils.get_random_str()
        mount_cmd = 'mount -t glusterfs %s %s' % (mount_point, mount_dir)

        # Create directory to mount volume
        cmd_run('mkdir %s' % mount_dir, self.node)
        self.addCleanup(cmd_run, 'rm -rf %s' % mount_dir, self.node)

        # Mount volume
        cmd_run(mount_cmd, self.node)
        self.addCleanup(cmd_run, 'umount %s' % mount_dir, self.node)

        # Run I/O to make sure Mount point works
        _file = 'file'
        run_io_cmd = ('dd if=/dev/urandom of=%s/%s bs=4k count=1000' %
                      (mount_dir, _file))

        # Verify size of volume
        cmd_run(run_io_cmd, self.node)
        size = cmd_run('df -kh --output=size %s | tail -1' % mount_dir,
                       self.node).strip()
        self.assertEqual('2.0G', size)

        # Verify file on gluster vol bricks
        for brick in vol_info['bricks']:
            node_id = brick['node']
            node_info = heketi_node_info(h_node, h_url, node_id, json=True)
            brick_host = node_info['hostnames']['storage'][0]
            cmd_run_on_gluster_pod_or_node(self.node,
                                           'ls %s/%s' % (brick['path'], _file),
                                           brick_host)
Exemple #11
0
    def test_heketi_volume_create_mutiple_sizes(self):
        """Validate creation of heketi volume with differnt sizes"""
        prefix = "autotest-{}".format(utils.get_random_str())
        sizes, required_space = [15, 50, 100], 495
        h_node, h_url = self.heketi_client_node, self.heketi_server_url

        # Skip test if space is not available
        available_space = get_total_free_space(h_node, h_url)[0]
        if required_space > available_space:
            self.skipTest("Required space {} greater than the available space "
                          "{}".format(required_space, available_space))

        # Create volume 3 times, each time different size
        for size in sizes:
            self.create_heketi_volume_with_name_and_wait("{}-{}".format(
                prefix, size),
                                                         size,
                                                         json=True)
    def test_block_host_volume_delete_block_volume_delete(self):
        """Validate block volume and BHV removal using heketi"""
        free_space, nodenum = get_total_free_space(self.heketi_client_node,
                                                   self.heketi_server_url)
        if nodenum < 3:
            self.skipTest("Skipping the test since number of nodes"
                          "online are less than 3")
        free_space_available = int(free_space / nodenum)
        default_bhv_size = get_default_block_hosting_volume_size(
            self.heketi_client_node, self.heketi_dc_name)
        if free_space_available < default_bhv_size:
            self.skipTest("Skipping the test since free_space_available %s"
                          "is less than the default_bhv_size %s" %
                          (free_space_available, default_bhv_size))
        h_volume_name = ("autotests-heketi-volume-%s" % utils.get_random_str())
        block_host_create_info = self.create_heketi_volume_with_name_and_wait(
            h_volume_name, default_bhv_size, json=True, block=True)

        block_vol_size = block_host_create_info["blockinfo"]["freesize"]
        block_hosting_vol_id = block_host_create_info["id"]
        block_vol_info = {"blockhostingvolume": "init_value"}
        while (block_vol_info['blockhostingvolume'] != block_hosting_vol_id):
            block_vol = heketi_blockvolume_create(self.heketi_client_node,
                                                  self.heketi_server_url,
                                                  block_vol_size,
                                                  json=True,
                                                  ha=3,
                                                  auth=True)
            self.addCleanup(heketi_blockvolume_delete,
                            self.heketi_client_node,
                            self.heketi_server_url,
                            block_vol["id"],
                            raise_on_error=True)
            block_vol_info = heketi_blockvolume_info(self.heketi_client_node,
                                                     self.heketi_server_url,
                                                     block_vol["id"],
                                                     json=True)
        bhv_info = heketi_volume_info(self.heketi_client_node,
                                      self.heketi_server_url,
                                      block_hosting_vol_id,
                                      json=True)
        self.assertIn(block_vol_info["id"],
                      bhv_info["blockinfo"]["blockvolume"])
    def test_block_volume_create_with_name(self):
        """Validate creation of block volume with name"""
        vol_name = "autotests-heketi-volume-%s" % utils.get_random_str()
        block_vol = heketi_blockvolume_create(self.heketi_client_node,
                                              self.heketi_server_url,
                                              1,
                                              name=vol_name,
                                              json=True)
        self.addCleanup(heketi_blockvolume_delete, self.heketi_client_node,
                        self.heketi_server_url, block_vol["id"])

        # check volume name through heketi-cli
        block_vol_info = heketi_blockvolume_info(self.heketi_client_node,
                                                 self.heketi_server_url,
                                                 block_vol["id"],
                                                 json=True)
        self.assertEqual(block_vol_info["name"], vol_name,
                         ("Block volume Names are not same %s as %s",
                          (block_vol_info["name"], vol_name)))
Exemple #14
0
    def test_replica_volume_expand(self):
        """
        Test expansion of a replica volume
        """
        h_node, h_server = self.heketi_client_node, self.heketi_server_url
        volume_name = ("autotests-heketi-volume-{}".format(
            utils.get_random_str()))
        volume_size = 10
        creation_info = self.create_heketi_volume_with_name_and_wait(
            volume_name, volume_size, json=True, raise_on_cleanup_error=False)
        volume_id = creation_info["id"]
        volume_info = heketi_ops.heketi_volume_info(h_node,
                                                    h_server,
                                                    volume_id,
                                                    json=True)

        # Get gluster volume info
        gluster_vol = volume_ops.get_volume_info('auto_get_gluster_endpoint',
                                                 volname=volume_name)
        self.assertTrue(gluster_vol,
                        "Failed to get volume {} info".format(volume_name))
        vol_name = gluster_vol[volume_name]
        self.assertEqual(
            vol_name['replicaCount'], "3",
            "Replica count is different for volume {} Actual:{} "
            "Expected : 3".format(vol_name, vol_name['replicaCount']))

        expand_size = 5
        heketi_ops.heketi_volume_expand(h_node, h_server, volume_id,
                                        expand_size)
        volume_info = heketi_ops.heketi_volume_info(h_node,
                                                    h_server,
                                                    volume_id,
                                                    json=True)
        expected_size = volume_size + expand_size
        self.assertEqual(
            volume_info['size'], expected_size,
            "Volume Expansion failed, Expected Size: {}, Actual "
            "Size: {}".format(str(expected_size), str(volume_info['size'])))

        self.get_brick_and_volume_status(volume_name)
        self.get_rebalance_status(volume_name)
Exemple #15
0
    def test_create_volume_with_same_name(self):
        """Test create two volumes with the same name and verify that 2nd one
        is failing with the appropriate error.
        """
        h_node, h_url = self.heketi_client_node, self.heketi_server_url
        vol_name = "autovol-%s" % utils.get_random_str()

        # Create volume for the first time
        vol_info = heketi_ops.heketi_volume_create(h_node,
                                                   h_url,
                                                   size=1,
                                                   name=vol_name,
                                                   json=True)
        self.addCleanup(heketi_ops.heketi_volume_delete, h_node, h_url,
                        vol_info['id'])

        vol_info_new = None
        try:
            # Try to create volume for 2nd time
            vol_info_new = heketi_ops.heketi_volume_create(h_node,
                                                           h_url,
                                                           size=1,
                                                           name=vol_name,
                                                           json=True)
            self.addCleanup(heketi_ops.heketi_volume_delete, h_node, h_url,
                            vol_info_new['id'])
        except AssertionError as err:
            # Verify msg in error
            msg = "Volume name '%s' already in use" % vol_name
            if msg not in six.text_type(err):
                raise

        # Raise exception if there is no error raised by heketi
        msg = ('Volume %s and %s got created two times with the same name '
               'unexpectedly.' % (vol_info, vol_info_new))
        self.assertFalse(vol_info_new, msg)
Exemple #16
0
    def test_heketi_manual_cleanup_operation_in_bhv(self):
        """Validate heketi db cleanup will resolve the mismatch
           in the free size of the block hosting volume with failed
           block device create operations.
        """
        bhv_size_before, bhv_size_after, vol_count = [], [], 5
        ocp_node, g_node = self.ocp_master_node[0], self.gluster_servers[0]
        h_node, h_url = self.heketi_client_node, self.heketi_server_url

        # Get existing heketi volume list
        existing_volumes = heketi_volume_list(h_node, h_url, json=True)

        # Add function to clean stale volumes created during test
        self.addCleanup(self._cleanup_heketi_volumes,
                        existing_volumes.get("volumes"))

        # Get nodes id list
        node_id_list = heketi_node_list(h_node, h_url)

        # Disable 4th and other nodes
        for node_id in node_id_list[3:]:
            heketi_node_disable(h_node, h_url, node_id)
            self.addCleanup(heketi_node_enable, h_node, h_url, node_id)

        # Calculate heketi volume size
        free_space, nodenum = get_total_free_space(h_node, h_url)
        free_space_available = int(free_space / nodenum)
        if free_space_available > vol_count:
            h_volume_size = int(free_space_available / vol_count)
            if h_volume_size > 50:
                h_volume_size = 50
        else:
            h_volume_size, vol_count = 1, free_space_available

        # Create BHV in case blockvolume size is greater than default BHV size
        default_bhv_size = get_default_block_hosting_volume_size(
            h_node, self.heketi_dc_name)
        if default_bhv_size < h_volume_size:
            h_volume_name = "autotest-{}".format(utils.get_random_str())
            bhv_info = self.create_heketi_volume_with_name_and_wait(
                h_volume_name,
                free_space_available,
                raise_on_cleanup_error=False,
                block=True,
                json=True)
            free_space_available -= (
                int(bhv_info.get("blockinfo").get("reservedsize")) + 1)
            h_volume_size = int(free_space_available / vol_count)

        # Get BHV list
        h_bhv_list = get_block_hosting_volume_list(h_node, h_url).keys()
        self.assertTrue(h_bhv_list, "Failed to get the BHV list")

        # Get BHV size
        for bhv in h_bhv_list:
            vol_info = heketi_volume_info(h_node, h_url, bhv, json=True)
            bhv_vol_size_before = vol_info.get("freesize")
            bhv_size_before.append(bhv_vol_size_before)

        # Kill Tcmu-runner service
        services = ("tcmu-runner", "gluster-block-target", "gluster-blockd")
        kill_service_on_gluster_pod_or_node(ocp_node, "tcmu-runner", g_node)

        # Restart the services
        for service in services:
            state = ('exited'
                     if service == 'gluster-block-target' else 'running')
            self.addCleanup(wait_for_service_status_on_gluster_pod_or_node,
                            ocp_node, service, 'active', state, g_node)
            self.addCleanup(restart_service_on_gluster_pod_or_node, ocp_node,
                            service, g_node)

        def run_async(cmd, hostname, raise_on_error=True):
            return g.run_async(host=hostname, command=cmd)

        # Create stale block volumes in async
        for count in range(vol_count):
            with mock.patch.object(json, 'loads', side_effect=(lambda j: j)):
                with mock.patch.object(command,
                                       'cmd_run',
                                       side_effect=run_async):
                    heketi_blockvolume_create(h_node,
                                              h_url,
                                              h_volume_size,
                                              json=True)

        # Wait for pending operation to get generated
        self._check_for_pending_operations(h_node, h_url)

        # Restart the services
        for service in services:
            state = ('exited'
                     if service == 'gluster-block-target' else 'running')
            restart_service_on_gluster_pod_or_node(ocp_node, service, g_node)
            wait_for_service_status_on_gluster_pod_or_node(
                ocp_node, service, 'active', state, g_node)

        # Cleanup pending operation
        heketi_server_operation_cleanup(h_node, h_url)

        # wait for pending operation to get cleaned up
        for w in waiter.Waiter(timeout=120, interval=10):
            # Get BHV size
            for bhv in h_bhv_list:
                vol_info = heketi_volume_info(h_node, h_url, bhv, json=True)
                bhv_vol_size_after = vol_info.get("freesize")
                bhv_size_after.append(bhv_vol_size_after)

            if (set(bhv_size_before) == set(bhv_size_after)):
                break
        if w.expired:
            raise exceptions.ExecutionError(
                "Failed to Validate volume size Actual:{},"
                " Expected:{}".format(set(bhv_size_before),
                                      set(bhv_size_after)))
Exemple #17
0
    def test_verify_create_heketi_volumes_pending_entries_in_db(
            self, vol_type):
        """Verify pending entries of file/block volumes in db during
           volumes creation from heketi side
        """
        # Create large volumes to observe the pending operations
        vol_count, h_vol_creation_async_op = 3, []
        h_node, h_url = self.heketi_client_node, self.heketi_server_url

        # Verify file/block volumes pending operation before creation,
        h_db_check_before = heketi_db_check(h_node, h_url)
        h_db_check_vol_before = (h_db_check_before.get(
            "{}volumes".format(vol_type)))

        # Delete heketi pod to clean db operations
        if (h_db_check_vol_before.get("pending")
                or h_db_check_before.get("bricks").get("pending")):
            self._respin_heketi_pod()

        # Calculate heketi volume size
        free_space, nodenum = get_total_free_space(h_node, h_url)
        free_space_available = int(free_space / nodenum)
        if free_space_available > vol_count:
            h_volume_size = int(free_space_available / vol_count)
            if h_volume_size > 30:
                h_volume_size = 30
        else:
            h_volume_size, vol_count = 1, free_space_available

        # Get existing heketi volume list
        existing_volumes = heketi_volume_list(h_node, h_url, json=True)

        # Add cleanup function to clean stale volumes created during test
        self.addCleanup(self._cleanup_heketi_volumes,
                        existing_volumes.get("volumes"))

        # Create BHV in case blockvolume size is greater than default BHV size
        if vol_type:
            default_bhv_size = get_default_block_hosting_volume_size(
                h_node, self.heketi_dc_name)
            if default_bhv_size < h_volume_size:
                h_volume_name = "autotest-{}".format(utils.get_random_str())
                bhv_info = self.create_heketi_volume_with_name_and_wait(
                    h_volume_name,
                    free_space_available,
                    raise_on_cleanup_error=False,
                    block=True,
                    json=True)
                free_space_available -= (
                    int(bhv_info.get("blockinfo").get("reservedsize")) + 1)
                h_volume_size = int(free_space_available / vol_count)

        # Temporary replace g.run with g.async_run in heketi_blockvolume_create
        # func to be able to run it in background.Also, avoid parsing the
        # output as it won't be json at that moment. Parse it after reading
        # the async operation results.
        def run_async(cmd, hostname, raise_on_error=True):
            return g.run_async(host=hostname, command=cmd)

        for count in range(vol_count):
            with mock.patch.object(json, 'loads', side_effect=(lambda j: j)):
                with mock.patch.object(command,
                                       'cmd_run',
                                       side_effect=run_async):
                    h_vol_creation_async_op.append(
                        eval("heketi_{}volume_create".format(vol_type))(
                            h_node, h_url, h_volume_size, json=True))

        # Check for pending operations
        for w in waiter.Waiter(timeout=120, interval=10):
            h_db_check = heketi_db_check(h_node, h_url)
            h_db_check_vol = h_db_check.get("{}volumes".format(vol_type))
            if h_db_check_vol.get("pending"):
                h_db_check_bricks = h_db_check.get("bricks")
                break
        if w.expired:
            raise exceptions.ExecutionError(
                "No any pending operations found during {}volumes creation "
                "{}".format(vol_type, h_db_check_vol.get("pending")))

        # Verify bricks pending operation during creation
        if not vol_type:
            self.assertTrue(h_db_check_bricks.get("pending"),
                            "Expecting at least one bricks pending count")
            self.assertFalse(
                h_db_check_bricks.get("pending") % 3,
                "Expecting bricks pending count to be multiple of 3 but "
                "found {}".format(h_db_check_bricks.get("pending")))

        # Wait for all counts of pending operations to be zero
        for w in waiter.Waiter(timeout=300, interval=10):
            h_db_check = heketi_db_check(h_node, h_url)
            h_db_check_vol = h_db_check.get("{}volumes".format(vol_type))
            if not h_db_check_vol.get("pending"):
                break
        if w.expired:
            raise exceptions.ExecutionError(
                "Expecting no pending operations after 300 sec but "
                "found {} operation".format(h_db_check_vol.get("pending")))

        # Get heketi server DB details
        h_db_check_after = heketi_db_check(h_node, h_url)
        h_db_check_vol_after = (h_db_check_after.get(
            "{}volumes".format(vol_type)))
        h_db_check_bricks_after = h_db_check_after.get("bricks")

        # Verify if initial and final file/block volumes are same
        act_vol_count = h_db_check_vol_after.get("total")
        exp_vol_count = h_db_check_vol_before.get("total") + vol_count
        err_msg = (
            "Actual {} and expected {} {}volume counts are not matched".format(
                act_vol_count, exp_vol_count, vol_type))
        self.assertEqual(act_vol_count, exp_vol_count, err_msg)

        # Verify if initial and final bricks are same for file volume
        volumes = heketi_volume_list(h_node, h_url, json=True).get("volumes")
        new_volumes = list(set(volumes) - set(existing_volumes))
        exp_brick_count = 0
        for volume in new_volumes:
            vol_info = heketi_volume_info(h_node, h_url, volume, json=True)
            exp_brick_count += len(vol_info.get("bricks"))

        err_msg = "Actual {} and expected {} bricks counts are not matched"
        act_brick_count = h_db_check_bricks_after.get("total")
        self.assertEqual(act_brick_count, exp_brick_count,
                         err_msg.format(act_brick_count, exp_brick_count))
Exemple #18
0
    def test_verify_delete_heketi_volumes_pending_entries_in_db(
            self, vol_type):
        """Verify pending entries of blockvolumes/volumes and bricks in heketi
           db during blockvolume/volume delete operation.
        """
        # Create a large volumes to observe the pending operation
        vol_count, volume_ids, async_obj = 10, [], []
        h_node, h_url = self.heketi_client_node, self.heketi_server_url

        # Verify file/block volumes pending operation before creation,
        h_db_check_before = heketi_db_check(h_node, h_url)
        h_db_check_bricks_before = h_db_check_before.get("bricks")
        h_db_check_vol_before = (h_db_check_before.get(
            "{}volumes".format(vol_type)))

        # Get existing heketi volume list
        existing_volumes = heketi_volume_list(h_node, h_url, json=True)

        # Add cleanup function to clean stale volumes created during test
        self.addCleanup(self._cleanup_heketi_volumes,
                        existing_volumes.get("volumes"))

        # Delete heketi pod to clean db operations
        if (h_db_check_bricks_before.get("pending")
                or h_db_check_vol_before.get("pending")):
            self._respin_heketi_pod()

        # Calculate heketi volume size
        free_space, nodenum = get_total_free_space(h_node, h_url)
        free_space_available = int(free_space / nodenum)
        if free_space_available > vol_count:
            h_volume_size = int(free_space_available / vol_count)
            if h_volume_size > 50:
                h_volume_size = 50
        else:
            h_volume_size, vol_count = 1, free_space_available

        # Create BHV in case blockvolume size is greater than default BHV size
        if vol_type:
            default_bhv_size = get_default_block_hosting_volume_size(
                h_node, self.heketi_dc_name)
            if default_bhv_size < h_volume_size:
                h_volume_name = "autotest-{}".format(utils.get_random_str())
                bhv_info = self.create_heketi_volume_with_name_and_wait(
                    h_volume_name,
                    free_space_available,
                    raise_on_cleanup_error=False,
                    block=True,
                    json=True)
                free_space_available -= (
                    int(bhv_info.get("blockinfo").get("reservedsize")) + 1)
                h_volume_size = int(free_space_available / vol_count)

        # Create file/block volumes
        for _ in range(vol_count):
            vol_id = eval("heketi_{}volume_create".format(vol_type))(
                h_node, h_url, h_volume_size, json=True).get("id")
            volume_ids.append(vol_id)
            self.addCleanup(eval("heketi_{}volume_delete".format(vol_type)),
                            h_node,
                            h_url,
                            vol_id,
                            raise_on_error=False)

        def run_async(cmd, hostname, raise_on_error=True):
            async_op = g.run_async(host=hostname, command=cmd)
            async_obj.append(async_op)
            return async_op

        bhv_list = []
        for vol_id in volume_ids:
            # Get BHV ids to delete in case of block volumes
            if vol_type:
                vol_info = (heketi_blockvolume_info(h_node,
                                                    h_url,
                                                    vol_id,
                                                    json=True))
                if not vol_info.get("blockhostingvolume") in bhv_list:
                    bhv_list.append(vol_info.get("blockhostingvolume"))

            # Temporary replace g.run with g.async_run in heketi_volume_delete
            # and heketi_blockvolume_delete func to be able to run it in
            # background.
            with mock.patch.object(command, 'cmd_run', side_effect=run_async):
                eval("heketi_{}volume_delete".format(vol_type))(h_node, h_url,
                                                                vol_id)

        # Wait for pending operations to get generate
        for w in waiter.Waiter(timeout=30, interval=3):
            h_db_check = heketi_db_check(h_node, h_url)
            h_db_check_vol = h_db_check.get("{}volumes".format(vol_type))
            if h_db_check_vol.get("pending"):
                h_db_check_bricks = h_db_check.get("bricks")
                break
        if w.expired:
            raise exceptions.ExecutionError(
                "No any pending operations found during {}volumes deletion "
                "{}".format(vol_type, h_db_check_vol.get("pending")))

        # Verify bricks pending operation during creation
        if not vol_type:
            self.assertTrue(h_db_check_bricks.get("pending"),
                            "Expecting at least one bricks pending count")
            self.assertFalse(
                h_db_check_bricks.get("pending") % 3,
                "Expecting bricks pending count to be multiple of 3 but "
                "found {}".format(h_db_check_bricks.get("pending")))

        # Verify file/block volume pending operation during delete
        for w in waiter.Waiter(timeout=120, interval=10):
            h_db_check = heketi_db_check(h_node, h_url)
            h_db_check_vol = h_db_check.get("{}volumes".format(vol_type))
            h_db_check_bricks = h_db_check.get("bricks")
            if ((not h_db_check_bricks.get("pending"))
                    and (not h_db_check_vol.get("pending"))):
                break
        if w.expired:
            raise AssertionError(
                "Failed to delete {}volumes after 120 secs".format(vol_type))

        # Check that all background processes got exited
        for obj in async_obj:
            ret, out, err = obj.async_communicate()
            self.assertFalse(
                ret, "Failed to delete {}volume due to error: {}".format(
                    vol_type, err))

        # Delete BHV created during block volume creation
        if vol_type:
            for bhv_id in bhv_list:
                heketi_volume_delete(h_node, h_url, bhv_id)

        # Verify bricks and volume pending operations
        h_db_check_after = heketi_db_check(h_node, h_url)
        h_db_check_bricks_after = h_db_check_after.get("bricks")
        h_db_check_vol_after = (h_db_check_after.get(
            "{}volumes".format(vol_type)))
        act_brick_count = h_db_check_bricks_after.get("pending")
        act_vol_count = h_db_check_vol_after.get("pending")

        # Verify bricks pending operation after delete
        err_msg = "{} operations are pending for {} after {}volume deletion"
        if not vol_type:
            self.assertFalse(
                act_brick_count,
                err_msg.format(act_brick_count, "brick", vol_type))

        # Verify file/bock volumes pending operation after delete
        self.assertFalse(act_vol_count,
                         err_msg.format(act_vol_count, "volume", vol_type))

        act_brick_count = h_db_check_bricks_after.get("total")
        act_vol_count = h_db_check_vol_after.get("total")
        exp_brick_count = h_db_check_bricks_before.get("total")
        exp_vol_count = h_db_check_vol_before.get("total")
        err_msg = "Actual {} and expected {} {} counts are not matched"

        # Verify if initial and final file/block volumes are same
        self.assertEqual(
            act_vol_count, exp_vol_count,
            err_msg.format(act_vol_count, exp_vol_count, "volume"))

        # Verify if initial and final bricks are same
        self.assertEqual(
            act_brick_count, exp_brick_count,
            err_msg.format(act_brick_count, exp_brick_count, "brick"))
Exemple #19
0
 def setUp(self):
     super(TestHeketiZones, self).setUp()
     self.node = self.ocp_master_node[0]
     self.h_client = self.heketi_client_node
     self.h_server = self.heketi_server_url
     self.prefix = "autotests-{}".format(utils.get_random_str())
Exemple #20
0
    def _create_distributed_replica_vol(self, validate_cleanup, block=False):

        # Create distributed vol
        vol_size_gb = self._get_vol_size()
        heketi_url = self.heketi_server_url
        h_volume_name = "autotests-heketi-volume-%s" % utils.get_random_str()
        try:
            heketi_vol = self.create_heketi_volume_with_name_and_wait(
                h_volume_name,
                vol_size_gb,
                json=True,
                raise_on_cleanup_error=False,
                block=block)
        except AssertionError as e:
            # NOTE: rare situation when we need to decrease size of a volume.
            #       and we expect this vol to be distributed.
            g.log.info("Failed to create distributed '%s'Gb volume. "
                       "Trying to create another one, smaller for 1Gb.")
            if not ('more required' in str(e) and
                    ('Insufficient suitable allocatable extents for '
                     'logical volume' in str(e))):
                raise

            vol_size_gb -= 1
            heketi_vol = self.create_heketi_volume_with_name_and_wait(
                h_volume_name,
                vol_size_gb,
                json=True,
                raise_on_cleanup_error=False,
                block=block)
        g.log.info("Successfully created distributed volume.")

        vol_name = heketi_vol['name']
        vol_id = heketi_vol["bricks"][0]["volume"]

        # Get gluster volume info
        g.log.info("Get gluster volume '%s' info" % vol_name)
        gluster_vol = get_volume_info('auto_get_gluster_endpoint',
                                      volname=vol_name)
        self.assertTrue(gluster_vol,
                        "Failed to get volume '%s' info" % vol_name)
        g.log.info("Successfully got volume '%s' info" % vol_name)
        gluster_vol = gluster_vol[vol_name]
        self.assertEqual(
            gluster_vol["typeStr"], "Distributed-Replicate",
            "'%s' gluster vol isn't a Distributed-Replicate volume" % vol_name)

        # Check amount of bricks
        brick_amount = len(gluster_vol['bricks']['brick'])
        self.assertEqual(
            brick_amount % 3, 0,
            "Brick amount is expected to be divisible by 3. "
            "Actual amount is '%s'" % brick_amount)
        self.assertGreater(
            brick_amount, 3, "Brick amount is expected to be bigger than 3. "
            "Actual amount is '%s'." % brick_amount)

        # Run unique actions to Validate whether deleting a dist-rep
        # volume is handled by heketi else return
        if not validate_cleanup:
            return vol_id

        # Get the free space after creating heketi volume
        free_space_after_creating_vol = self._get_free_space()

        # Delete heketi volume
        g.log.info("Deleting heketi volume '%s'" % vol_id)
        volume_deleted = heketi_volume_delete(self.heketi_client_node,
                                              heketi_url, vol_id)
        self.assertTrue(volume_deleted,
                        "Failed to delete heketi volume '%s'" % vol_id)
        g.log.info("Heketi volume '%s' has successfully been deleted" % vol_id)

        # Check the heketi volume list
        g.log.info("List heketi volumes")
        heketi_volumes = heketi_volume_list(self.heketi_client_node,
                                            self.heketi_server_url,
                                            json=True)
        self.assertTrue(heketi_volumes, "Failed to list heketi volumes")
        g.log.info("Heketi volumes have successfully been listed")
        heketi_volumes = heketi_volumes.get('volumes', heketi_volumes)
        self.assertNotIn(vol_id, heketi_volumes)
        self.assertNotIn(vol_name, heketi_volumes)

        # Check the gluster volume list
        g.log.info("Get the gluster volume list")
        gluster_volumes = get_volume_list('auto_get_gluster_endpoint')
        self.assertTrue(gluster_volumes, "Unable to get Gluster volume list")

        g.log.info("Successfully got Gluster volume list" % gluster_volumes)
        self.assertNotIn(vol_id, gluster_volumes)
        self.assertNotIn(vol_name, gluster_volumes)

        # Get the used space after deleting heketi volume
        free_space_after_deleting_vol = self._get_free_space()

        # Compare the free space before and after deleting the volume
        g.log.info("Comparing the free space before and after deleting volume")
        self.assertLessEqual(free_space_after_creating_vol + (3 * vol_size_gb),
                             free_space_after_deleting_vol)
        g.log.info(
            "Volume successfully deleted and space is reallocated. "
            "Free space after creating volume %s. "
            "Free space after deleting volume %s." %
            (free_space_after_creating_vol, free_space_after_deleting_vol))
    def test_create_max_num_blockhostingvolumes(self):
        """Create blockvolumes to test max no of blockvolumes in
        blockhosting volume
        """
        prefix = "autotest-{}".format(utils.get_random_str())

        num_of_bv = 10
        new_bhv_list, bv_list, g_nodes = [], [], []
        free_space, nodenum = get_total_free_space(self.heketi_client_node,
                                                   self.heketi_server_url)
        if nodenum < 3:
            self.skipTest("Skip the test case since number of"
                          "online nodes is less than 3.")
        free_space_available = int(free_space / nodenum)
        default_bhv_size = get_default_block_hosting_volume_size(
            self.heketi_client_node, self.heketi_dc_name)
        # Get existing list of BHV's
        existing_bhv_list = get_block_hosting_volume_list(
            self.heketi_client_node, self.heketi_server_url)

        # Skip the test if available space is less than default_bhv_size
        if free_space_available < default_bhv_size:
            self.skipTest("Skip the test case since free_space_available %s"
                          "is less than space_required_for_bhv %s ." %
                          (free_space_available, default_bhv_size))

        # Create BHV's
        count = 1
        while free_space_available > default_bhv_size:
            bhv_name = "{}-bhv-{}".format(prefix, count)
            bhv_info = self.create_heketi_volume_with_name_and_wait(
                bhv_name,
                default_bhv_size,
                raise_on_cleanup_error=False,
                json=True,
                block=True)
            if bhv_info["id"] not in existing_bhv_list.keys():
                new_bhv_list.append(bhv_info["id"])

            free_size = bhv_info["blockinfo"]["freesize"]
            if free_size > num_of_bv:
                block_vol_size = int(free_size / num_of_bv)
            else:
                block_vol_size, num_of_bv = 1, free_size

            # Create specified number of BV's in BHV's created
            for i in range(0, num_of_bv):
                block_vol = heketi_blockvolume_create(self.heketi_client_node,
                                                      self.heketi_server_url,
                                                      block_vol_size,
                                                      json=True,
                                                      ha=3,
                                                      auth=True)
                self.addCleanup(heketi_blockvolume_delete,
                                self.heketi_client_node,
                                self.heketi_server_url,
                                block_vol["id"],
                                raise_on_error=False)
                bv_list.append(block_vol["id"])
            free_space_available = int(free_space_available - default_bhv_size)
            count += 1

        # Get gluster node ips
        h_nodes_ids = heketi_node_list(self.heketi_client_node,
                                       self.heketi_server_url)
        for h_node in h_nodes_ids[:2]:
            g_node = heketi_node_info(self.heketi_client_node,
                                      self.heketi_server_url,
                                      h_node,
                                      json=True)
            g_nodes.append(g_node['hostnames']['manage'][0])

        # Check if there is no crash in gluster related services & heketi
        services = (("glusterd", "running"), ("gluster-blockd", "running"),
                    ("tcmu-runner", "running"), ("gluster-block-target",
                                                 "exited"))
        for g_node in g_nodes:
            for service, state in services:
                wait_for_service_status_on_gluster_pod_or_node(
                    self.ocp_client[0],
                    service,
                    'active',
                    state,
                    g_node,
                    raise_on_error=False)
            out = hello_heketi(self.heketi_client_node, self.heketi_server_url)
            self.assertTrue(
                out, "Heketi server %s is not alive" % self.heketi_server_url)

        # Delete all the BHV's and BV's created
        for bv_volume in bv_list:
            heketi_blockvolume_delete(self.heketi_client_node,
                                      self.heketi_server_url, bv_volume)

        # Check if any blockvolume exist in heketi & gluster
        for bhv_volume in new_bhv_list[:]:
            heketi_vol_info = heketi_volume_info(self.heketi_client_node,
                                                 self.heketi_server_url,
                                                 bhv_volume,
                                                 json=True)
            self.assertNotIn("blockvolume",
                             heketi_vol_info["blockinfo"].keys())
            gluster_vol_info = get_block_list('auto_get_gluster_endpoint',
                                              volname="vol_%s" % bhv_volume)
            self.assertIsNotNone(gluster_vol_info,
                                 "Failed to get volume info %s" % bhv_volume)
            new_bhv_list.remove(bhv_volume)
            for blockvol in gluster_vol_info:
                self.assertNotIn("blockvol_", blockvol)
                heketi_volume_delete(self.heketi_client_node,
                                     self.heketi_server_url, bhv_volume)

        # Check if all blockhosting volumes are deleted from heketi
        self.assertFalse(new_bhv_list)
    def test_prometheus_volume_metrics_on_node_reboot(self):
        """Validate volume metrics using prometheus before and after node
        reboot"""

        # Pod name for the entire test
        prefix = "autotest-{}".format(utils.get_random_str())

        # Create I/O pod with PVC
        pvc_name = self.create_and_wait_for_pvc()
        pod_name = openshift_ops.oc_create_tiny_pod_with_volume(
            self._master, pvc_name, prefix,
            image=self.io_container_image_cirros)
        self.addCleanup(openshift_ops.oc_delete, self._master, 'pod', pod_name,
                        raise_on_absence=False)
        openshift_ops.wait_for_pod_be_ready(
            self._master, pod_name, timeout=60, wait_step=5)

        # Write data on the volume and wait for 2 mins and sleep is must for
        # prometheus to get the exact values of the metrics
        ret, _, err = openshift_ops.oc_rsh(
            self._master, pod_name, "touch /mnt/file{1..1000}")
        self.assertEqual(
            ret, 0, "Failed to create files in the app pod "
                    "with {}".format(err))
        time.sleep(120)

        # Fetch the metrics and store in initial_metrics as dictionary
        initial_metrics = self._get_and_manipulate_metric_data(
            self.metrics, pvc_name)
        openshift_ops.switch_oc_project(
            self._master, self.storage_project_name)

        # Get the hostname to reboot where the pod is running
        pod_info = openshift_ops.oc_get_pods(self._master, name=pod_name)
        node_for_reboot = pod_info[pod_name]['node']

        # Get the vm name by the hostname
        vm_name = node_ops.find_vm_name_by_ip_or_hostname(node_for_reboot)

        # power off and on the vm, based on the vm type(either gluster or not)
        if node_for_reboot in self.gluster_servers:
            self.power_off_gluster_node_vm(vm_name, node_for_reboot)
            self.power_on_gluster_node_vm(vm_name, node_for_reboot)
        else:
            self.power_off_vm(vm_name)
            self.power_on_vm(vm_name)
            openshift_ops.wait_for_ocp_node_be_ready(
                self._master, node_for_reboot)

        # Create the new pod and validate the prometheus metrics
        pod_name = openshift_ops.oc_create_tiny_pod_with_volume(
            self._master, pvc_name, prefix)
        self.addCleanup(openshift_ops.oc_delete, self._master, 'pod', pod_name)

        # Wait for POD be up and running and prometheus to refresh the data
        openshift_ops.wait_for_pod_be_ready(
            self._master, pod_name, timeout=60, wait_step=5)
        time.sleep(120)

        # Fetching the metrics and storing in final_metrics as dictionary and
        # validating with initial_metrics
        final_metrics = self._get_and_manipulate_metric_data(
            self.metrics, pvc_name)
        self.assertEqual(dict(initial_metrics), dict(final_metrics),
                         "Metrics are different post node reboot")
Exemple #23
0
    def test_heketi_device_removal_with_insuff_space(self):
        """Validate heketi with device removal insufficient space"""

        # Disable 4+ nodes and 3+ devices on the first 3 nodes
        min_free_space_gb = 5
        min_free_space = min_free_space_gb * 1024**2
        heketi_url = self.heketi_server_url
        heketi_node = self.heketi_client_node
        nodes = {}

        node_ids = heketi_node_list(heketi_node, heketi_url)
        self.assertTrue(node_ids)
        for node_id in node_ids:
            node_info = heketi_node_info(heketi_node,
                                         heketi_url,
                                         node_id,
                                         json=True)
            if (node_info["state"].lower() != "online"
                    or not node_info["devices"]):
                continue
            if len(nodes) > 2:
                heketi_node_disable(heketi_node, heketi_url, node_id)
                self.addCleanup(heketi_node_enable, heketi_node, heketi_url,
                                node_id)
                continue
            for device in node_info["devices"]:
                if device["state"].lower() != "online":
                    continue
                free_space = device["storage"]["free"]
                if node_id not in nodes:
                    nodes[node_id] = []
                if (free_space < min_free_space or len(nodes[node_id]) > 1):
                    heketi_device_disable(heketi_node, heketi_url,
                                          device["id"])
                    self.addCleanup(heketi_device_enable, heketi_node,
                                    heketi_url, device["id"])
                    continue
                nodes[node_id].append({
                    "device_id": device["id"],
                    "free": free_space
                })

        # Skip test if nodes requirements are not met
        if (len(nodes) < 3
                or not all(map((lambda _l: len(_l) > 1), nodes.values()))):
            raise self.skipTest(
                "Could not find 3 online nodes with 2 online devices "
                "having free space bigger than %dGb." % min_free_space_gb)

        # Calculate size of a potential distributed vol
        if nodes[node_ids[0]][0]["free"] > nodes[node_ids[0]][1]["free"]:
            index = 0
        else:
            index = 1
        vol_size_gb = int(nodes[node_ids[0]][index]["free"] / (1024**2)) + 1
        device_id = nodes[node_ids[0]][index]["device_id"]

        # Create volume with such size that we consume space more than
        # size of smaller disks
        h_volume_name = "autotests-heketi-volume-%s" % utils.get_random_str()
        try:
            self.create_heketi_volume_with_name_and_wait(h_volume_name,
                                                         vol_size_gb,
                                                         json=True)
        except Exception as e:
            # NOTE: rare situation when we need to decrease size of a volume.
            g.log.info("Failed to create '%s'Gb volume. "
                       "Trying to create another one, smaller for 1Gb.")

            if not ('more required' in str(e) and
                    ('Insufficient suitable allocatable extents for '
                     'logical volume' in str(e))):
                raise

            vol_size_gb -= 1
            self.create_heketi_volume_with_name_and_wait(h_volume_name,
                                                         vol_size_gb,
                                                         json=True)

        # Try to 'remove' bigger Heketi disk expecting error,
        # because there is no space on smaller disk to relocate bricks to
        heketi_device_disable(heketi_node, heketi_url, device_id)
        self.addCleanup(heketi_device_enable, heketi_node, heketi_url,
                        device_id)
        try:
            self.assertRaises(AssertionError, heketi_device_remove,
                              heketi_node, heketi_url, device_id)
        except Exception:
            self.addCleanup(heketi_device_disable, heketi_node, heketi_url,
                            device_id)
            raise
    def test_metrics_workload_on_prometheus(self):
        """Validate metrics workload on prometheus"""

        # Skip test if the prometheus pods are not present
        openshift_ops.switch_oc_project(
            self._master, self._prometheus_project_name)
        prometheus_pods = openshift_ops.oc_get_pods(
            self._master, selector=self._prometheus_resources_selector)
        if not prometheus_pods:
            self.skipTest(
                prometheus_pods, "Skipping test as prometheus"
                " pod is not present")

        if not self.registry_sc:
            self.skipTest(
                prometheus_pods, "Skipping test as registry "
                " storage details are not provided")
        self._registry_project = self.registry_sc.get(
            'restsecretnamespace')
        self.prefix = "autotest-{}".format(utils.get_random_str())

        # Get one of the prometheus pod name and respective pvc name
        prometheus_pod = list(prometheus_pods.keys())[0]
        pvc_custom = ":.spec.volumes[*].persistentVolumeClaim.claimName"
        pvc_name = openshift_ops.oc_get_custom_resource(
            self._master, "pod", pvc_custom, prometheus_pod)[0]
        self.assertTrue(
            pvc_name, "Failed to get PVC name for prometheus"
            " pod {}".format(prometheus_pod))
        self.verify_iscsi_sessions_and_multipath(
            pvc_name, prometheus_pod, rtype='pod',
            heketi_server_url=self._registry_heketi_server_url,
            is_registry_gluster=True)

        # Try to fetch metric from the prometheus pod
        self._fetch_metric_from_promtheus_pod(
            metric='kube_persistentvolumeclaim_info')

        # Create storage class
        openshift_ops.switch_oc_project(
            self._master, self._registry_project)
        self.sc_name = self.create_storage_class(
            vol_name_prefix=self.prefix, glusterfs_registry=True)
        self.addCleanup(openshift_ops.switch_oc_project,
                        self._master, self._registry_project)

        # Create PVCs and app pods
        pvc_size, pvc_count, batch_count = 1, 5, 5
        for _ in range(batch_count):
            test_pvc_names = self.create_and_wait_for_pvcs(
                pvc_size, pvc_name_prefix=self.prefix,
                pvc_amount=pvc_count, sc_name=self.sc_name, timeout=600,
                wait_step=10)
            self.create_dcs_with_pvc(
                test_pvc_names, timeout=600, wait_step=5,
                dc_name_prefix="autotests-dc-with-app-io",
                space_to_use=1048576)

        # Check from the prometheus pod for the PVC space usage
        openshift_ops.switch_oc_project(
            self._master, self._prometheus_project_name)
        mount_path = "/prometheus"
        cmd = "oc exec {0} -- df -PT {1} | grep {1}".format(
            prometheus_pod, mount_path)
        out = self.cmd_run(cmd)
        self.assertTrue(out, "Failed to get info about mounted volume. "
                             "Output is empty.")

        # Try to fetch metric from prometheus pod
        self._fetch_metric_from_promtheus_pod(
            metric='kube_persistentvolumeclaim_info')
        self._fetch_metric_from_promtheus_pod(
            metric='kube_pod_spec_volumes_persistentvolumeclaims_info')
        self.addCleanup(openshift_ops.switch_oc_project,
                        self._master, self._registry_project)