예제 #1
0
    def test_heketi_metrics_validating_vol_count_on_vol_deletion(self):
        """Validate heketi metrics VolumeCount after volume deletion"""

        vol_list = []

        for i in range(3):
            # Create volume
            vol = heketi_volume_create(self.heketi_client_node,
                                       self.heketi_server_url,
                                       1,
                                       json=True)

            self.assertTrue(vol)

            self.addCleanup(heketi_volume_delete,
                            self.heketi_client_node,
                            self.heketi_server_url,
                            vol['id'],
                            raise_on_error=False)

            volume_list = heketi_volume_list(self.heketi_client_node,
                                             self.heketi_server_url)

            self.assertIn(vol['id'], volume_list)
            vol_list.append(vol)

        for vol in vol_list:
            # delete volume
            heketi_volume_delete(self.heketi_client_node,
                                 self.heketi_server_url, vol['id'])
            volume_list = heketi_volume_list(self.heketi_client_node,
                                             self.heketi_server_url)
            self.assertNotIn(vol['id'], volume_list)
            self.verify_volume_count()
예제 #2
0
    def test_delete_heketidb_volume(self):
        """Method to test heketidb volume deletion via heketi-cli."""
        for i in range(0, 2):
            volume_info = heketi_ops.heketi_volume_create(
                self.heketi_client_node, self.heketi_server_url, 10, json=True)
            self.addCleanup(heketi_ops.heketi_volume_delete,
                            self.heketi_client_node, self.heketi_server_url,
                            volume_info["id"])

        volume_list_info = heketi_ops.heketi_volume_list(
            self.heketi_client_node, self.heketi_server_url, json=True)

        self.assertTrue(volume_list_info["volumes"],
                        "Heketi volume list empty.")

        for volume_id in volume_list_info["volumes"]:
            volume_info = heketi_ops.heketi_volume_info(
                self.heketi_client_node,
                self.heketi_server_url,
                volume_id,
                json=True)

            if volume_info["name"] == "heketidbstorage":
                self.assertRaises(AssertionError,
                                  heketi_ops.heketi_volume_delete,
                                  self.heketi_client_node,
                                  self.heketi_server_url, volume_id)
                return
        raise ExecutionError(
            "Warning: heketidbstorage doesn't exist in list of volumes")
    def test_blockvolume_create_no_free_space(self):
        """Validate error is returned when free capacity is exhausted"""

        # Create first small blockvolume
        blockvol1 = heketi_blockvolume_create(
            self.heketi_client_node, self.heketi_server_url, 1, json=True)
        self.assertTrue(blockvol1, "Failed to create block volume.")
        self.addCleanup(
            heketi_blockvolume_delete, self.heketi_client_node,
            self.heketi_server_url, blockvol1['id'])

        # Get info about block hosting volumes
        file_volumes = heketi_volume_list(
            self.heketi_client_node, self.heketi_server_url, json=True)
        self.assertTrue(file_volumes)
        self.assertIn("volumes", file_volumes)
        self.assertTrue(file_volumes["volumes"])
        max_block_hosting_vol_size, file_volumes_debug_info = 0, []
        for vol_id in file_volumes["volumes"]:
            vol = heketi_volume_info(
                self.heketi_client_node, self.heketi_server_url,
                vol_id, json=True)
            current_block_hosting_vol_size = vol.get('size', 0)
            if current_block_hosting_vol_size > max_block_hosting_vol_size:
                max_block_hosting_vol_size = current_block_hosting_vol_size
            if current_block_hosting_vol_size:
                file_volumes_debug_info.append(six.text_type({
                    'id': vol.get('id', '?'),
                    'name': vol.get('name', '?'),
                    'size': current_block_hosting_vol_size,
                    'blockinfo': vol.get('blockinfo', '?'),
                }))
        self.assertGreater(max_block_hosting_vol_size, 0)

        # Try to create blockvolume with size bigger than available
        too_big_vol_size = max_block_hosting_vol_size + 1
        try:
            blockvol2 = heketi_blockvolume_create(
                self.heketi_client_node, self.heketi_server_url,
                too_big_vol_size, json=True)
        except ExecutionError:
            return

        if blockvol2 and blockvol2.get('id'):
            self.addCleanup(
                heketi_blockvolume_delete, self.heketi_client_node,
                self.heketi_server_url, blockvol2['id'])
        block_hosting_vol = heketi_volume_info(
            self.heketi_client_node, self.heketi_server_url,
            blockvol2.get('blockhostingvolume'), json=True)
        self.assertGreater(
            block_hosting_vol.get('size', -2), blockvol2.get('size', -1),
            ("Block volume unexpectedly was created. "
             "Calculated 'max free size' is '%s'.\nBlock volume info is: %s \n"
             "File volume info, which hosts block volume: \n%s,"
             "Block hosting volumes which were considered: \n%s" % (
                 max_block_hosting_vol_size, blockvol2, block_hosting_vol,
                 '\n'.join(file_volumes_debug_info))))
예제 #4
0
    def test_arbiter_volume_delete_using_pvc(self):
        """Test Arbiter volume delete using pvc when volume is not mounted
           on app pod
        """
        prefix = "autotest-%s" % utils.get_random_str()

        # Create sc with gluster arbiter info
        sc_name = self.create_storage_class(vol_name_prefix=prefix,
                                            is_arbiter_vol=True)

        # Create PVC and wait for it to be in 'Bound' state
        pvc_name = self.create_and_wait_for_pvc(pvc_name_prefix=prefix,
                                                sc_name=sc_name)

        # Get vol info
        gluster_vol_info = openshift_ops.get_gluster_vol_info_by_pvc_name(
            self.node, pvc_name)

        # Verify arbiter volume properties
        self.verify_amount_and_proportion_of_arbiter_and_data_bricks(
            gluster_vol_info)

        # Get volume ID
        gluster_vol_id = gluster_vol_info["gluster_vol_id"]

        # Delete the pvc
        openshift_ops.oc_delete(self.node, 'pvc', pvc_name)
        openshift_ops.wait_for_resource_absence(self.node, 'pvc', pvc_name)

        # Check the heketi volume list if pvc is deleted
        g.log.info("List heketi volumes")
        heketi_volumes = heketi_ops.heketi_volume_list(self.heketi_client_node,
                                                       self.heketi_server_url)

        err_msg = "Failed to delete heketi volume by prefix %s" % prefix
        self.assertNotIn(prefix, heketi_volumes, err_msg)

        # Check presence for the gluster volume
        get_gluster_vol_info = volume_ops.get_volume_info(
            "auto_get_gluster_endpoint", gluster_vol_id)
        err_msg = "Failed to delete gluster volume %s" % gluster_vol_id
        self.assertFalse(get_gluster_vol_info, err_msg)

        # Check presence of bricks and lvs
        for brick in gluster_vol_info['bricks']['brick']:
            gluster_node_ip, brick_name = brick["name"].split(":")

            with self.assertRaises(exceptions.ExecutionError):
                cmd = "df %s" % brick_name
                openshift_ops.cmd_run_on_gluster_pod_or_node(
                    self.node, cmd, gluster_node_ip)

            with self.assertRaises(exceptions.ExecutionError):
                lv_match = re.search(BRICK_REGEX, brick["name"])
                if lv_match:
                    cmd = "lvs %s" % lv_match.group(2).strip()
                    openshift_ops.cmd_run_on_gluster_pod_or_node(
                        self.node, cmd, gluster_node_ip)
예제 #5
0
    def test_blockvolume_create_no_free_space(self):
        """Validate error is returned when free capacity is exhausted"""

        # Create first small blockvolume
        blockvol1 = heketi_blockvolume_create(
            self.heketi_client_node, self.heketi_server_url, 1, json=True)
        self.assertTrue(blockvol1, "Failed to create block volume.")
        self.addCleanup(
            heketi_blockvolume_delete, self.heketi_client_node,
            self.heketi_server_url, blockvol1['id'])

        # Get info about block hosting volumes
        file_volumes = heketi_volume_list(
            self.heketi_client_node, self.heketi_server_url, json=True)
        self.assertTrue(file_volumes)
        self.assertIn("volumes", file_volumes)
        self.assertTrue(file_volumes["volumes"])
        max_block_hosting_vol_size, file_volumes_debug_info = 0, []
        for vol_id in file_volumes["volumes"]:
            vol = heketi_volume_info(
                self.heketi_client_node, self.heketi_server_url,
                vol_id, json=True)
            current_block_hosting_vol_size = vol.get('size', 0)
            if current_block_hosting_vol_size > max_block_hosting_vol_size:
                max_block_hosting_vol_size = current_block_hosting_vol_size
            if current_block_hosting_vol_size:
                file_volumes_debug_info.append(six.text_type({
                    'id': vol.get('id', '?'),
                    'name': vol.get('name', '?'),
                    'size': current_block_hosting_vol_size,
                    'blockinfo': vol.get('blockinfo', '?'),
                }))
        self.assertGreater(max_block_hosting_vol_size, 0)

        # Try to create blockvolume with size bigger than available
        too_big_vol_size = max_block_hosting_vol_size + 1
        try:
            blockvol2 = heketi_blockvolume_create(
                self.heketi_client_node, self.heketi_server_url,
                too_big_vol_size, json=True)
        except AssertionError:
            return

        if blockvol2 and blockvol2.get('id'):
            self.addCleanup(
                heketi_blockvolume_delete, self.heketi_client_node,
                self.heketi_server_url, blockvol2['id'])
        block_hosting_vol = heketi_volume_info(
            self.heketi_client_node, self.heketi_server_url,
            blockvol2.get('blockhostingvolume'), json=True)
        self.assertGreater(
            block_hosting_vol.get('size', -2), blockvol2.get('size', -1),
            ("Block volume unexpectedly was created. "
             "Calculated 'max free size' is '%s'.\nBlock volume info is: %s \n"
             "File volume info, which hosts block volume: \n%s,"
             "Block hosting volumes which were considered: \n%s" % (
                 max_block_hosting_vol_size, blockvol2, block_hosting_vol,
                 '\n'.join(file_volumes_debug_info))))
예제 #6
0
    def test_volume_create_and_list_volume(self):
        """Validate heketi and gluster volume list"""
        g.log.info("List gluster volumes before Heketi volume creation")
        existing_g_vol_list = get_volume_list('auto_get_gluster_endpoint')
        self.assertTrue(existing_g_vol_list, ("Unable to get volumes list"))

        g.log.info("List heketi volumes before volume creation")
        existing_h_vol_list = heketi_volume_list(self.heketi_client_node,
                                                 self.heketi_server_url,
                                                 json=True)["volumes"]
        g.log.info("Heketi volumes successfully listed")

        g.log.info("Create a heketi volume")
        out = heketi_volume_create(self.heketi_client_node,
                                   self.heketi_server_url,
                                   self.volume_size,
                                   json=True)
        g.log.info("Heketi volume successfully created" % out)
        volume_id = out["bricks"][0]["volume"]
        self.addCleanup(heketi_volume_delete, self.heketi_client_node,
                        self.heketi_server_url, volume_id)

        g.log.info("List heketi volumes after volume creation")
        h_vol_list = heketi_volume_list(self.heketi_client_node,
                                        self.heketi_server_url,
                                        json=True)["volumes"]
        g.log.info("Heketi volumes successfully listed")

        g.log.info("List gluster volumes after Heketi volume creation")
        g_vol_list = get_volume_list('auto_get_gluster_endpoint')
        self.assertTrue(g_vol_list, ("Unable to get volumes list"))
        g.log.info("Successfully got the volumes list")

        # Perform checks
        self.assertEqual(
            len(existing_g_vol_list) + 1, len(g_vol_list),
            "Expected creation of only one volume in Gluster creating "
            "Heketi volume. Here is lists before and after volume creation: "
            "%s \n%s" % (existing_g_vol_list, g_vol_list))
        self.assertEqual(
            len(existing_h_vol_list) + 1, len(h_vol_list),
            "Expected creation of only one volume in Heketi. Here is lists "
            "of Heketi volumes before and after volume creation: %s\n%s" %
            (existing_h_vol_list, h_vol_list))
    def test_volume_create_and_list_volume(self):
        """Validate heketi and gluster volume list"""
        g.log.info("List gluster volumes before Heketi volume creation")
        existing_g_vol_list = get_volume_list('auto_get_gluster_endpoint')
        self.assertTrue(existing_g_vol_list, ("Unable to get volumes list"))

        g.log.info("List heketi volumes before volume creation")
        existing_h_vol_list = heketi_volume_list(
            self.heketi_client_node, self.heketi_server_url,
            json=True)["volumes"]
        g.log.info("Heketi volumes successfully listed")

        g.log.info("Create a heketi volume")
        out = heketi_volume_create(self.heketi_client_node,
                                   self.heketi_server_url,
                                   self.volume_size, json=True)
        g.log.info("Heketi volume successfully created" % out)
        volume_id = out["bricks"][0]["volume"]
        self.addCleanup(
            heketi_volume_delete, self.heketi_client_node,
            self.heketi_server_url, volume_id)

        g.log.info("List heketi volumes after volume creation")
        h_vol_list = heketi_volume_list(
            self.heketi_client_node, self.heketi_server_url,
            json=True)["volumes"]
        g.log.info("Heketi volumes successfully listed")

        g.log.info("List gluster volumes after Heketi volume creation")
        g_vol_list = get_volume_list('auto_get_gluster_endpoint')
        self.assertTrue(g_vol_list, ("Unable to get volumes list"))
        g.log.info("Successfully got the volumes list")

        # Perform checks
        self.assertEqual(
            len(existing_g_vol_list) + 1, len(g_vol_list),
            "Expected creation of only one volume in Gluster creating "
            "Heketi volume. Here is lists before and after volume creation: "
            "%s \n%s" % (existing_g_vol_list, g_vol_list))
        self.assertEqual(
            len(existing_h_vol_list) + 1, len(h_vol_list),
            "Expected creation of only one volume in Heketi. Here is lists "
            "of Heketi volumes before and after volume creation: %s\n%s" % (
                existing_h_vol_list, h_vol_list))
    def validate_file_volumes_count(self, h_node, h_server, node_ip):

        # check volume count from heketi and gluster are same
        heketi_topology_info(h_node, h_server, json=True)
        h_volume_list = heketi_volume_list(h_node, h_server, json=True)
        vol_list = get_volume_list(node_ip)
        self.assertIsNotNone(
            vol_list, "Failed to get volumes list")
        self.assertEqual(
            len(h_volume_list['volumes']), len(vol_list),
            "Failed to verify volume count Expected:'{}', Actual:'{}'".format(
                len(h_volume_list['volumes']), len(vol_list)))
    def test_dynamic_provisioning_glusterfile_reclaim_policy_retain(self):
        """Validate retain policy for glusterfs after deletion of pvc"""

        if get_openshift_version() < "3.9":
            self.skipTest(
                "'Reclaim' feature is not supported in OCP older than 3.9")

        self.create_storage_class(reclaim_policy='Retain')
        self.create_and_wait_for_pvc()

        # get the name of the volume
        pv_name = get_pv_name_from_pvc(self.node, self.pvc_name)
        custom = [
            r':.metadata.annotations.'
            r'"gluster\.kubernetes\.io\/heketi\-volume\-id"',
            r':.spec.persistentVolumeReclaimPolicy'
        ]

        vol_id, reclaim_policy = oc_get_custom_resource(
            self.node, 'pv', custom, pv_name)

        self.assertEqual(reclaim_policy, 'Retain')

        # Create DC with POD and attached PVC to it.
        try:
            dc_name = oc_create_app_dc_with_io(
                self.node, self.pvc_name, image=self.io_container_image_cirros)
            pod_name = get_pod_name_from_dc(self.node, dc_name)
            wait_for_pod_be_ready(self.node, pod_name)
        finally:
            scale_dc_pod_amount_and_wait(self.node, dc_name, 0)
            oc_delete(self.node, 'dc', dc_name)
            wait_for_resource_absence(self.node, 'pod', pod_name)

        oc_delete(self.node, 'pvc', self.pvc_name)

        with self.assertRaises(ExecutionError):
            wait_for_resource_absence(self.node,
                                      'pvc',
                                      self.pvc_name,
                                      interval=3,
                                      timeout=30)

        heketi_volume_delete(self.heketi_client_node, self.heketi_server_url,
                             vol_id)

        vol_list = heketi_volume_list(self.heketi_client_node,
                                      self.heketi_server_url)

        self.assertNotIn(vol_id, vol_list)

        oc_delete(self.node, 'pv', pv_name)
        wait_for_resource_absence(self.node, 'pv', pv_name)
예제 #10
0
    def _cleanup_heketi_volumes(self, existing_volumes):
        h_node, h_url = self.heketi_client_node, self.heketi_server_url

        volumes = heketi_volume_list(h_node, h_url, json=True).get("volumes")
        new_volumes = list(set(volumes) - set(existing_volumes))
        for volume in new_volumes:
            h_vol_info = heketi_volume_info(h_node, h_url, volume, json=True)
            if h_vol_info.get("block"):
                for block_vol in (
                        h_vol_info.get("blockinfo").get("blockvolume")):
                    heketi_blockvolume_delete(h_node, h_url, block_vol)
            heketi_volume_delete(h_node, h_url, volume, raise_on_error=False)
    def test_heketi_authentication_with_user_credentials(self):
        """Heketi command authentication with invalid and valid credentials"""

        h_client, h_server = self.heketi_client_node, self.heketi_server_url
        err_msg = "Error: Invalid JWT token: Token missing iss claim"

        # Run heketi commands with invalid credentials
        for each_cmd in ("volume list", "topology info"):
            cmd = "timeout 120 heketi-cli -s  {} {}".format(
                self.heketi_server_url, each_cmd)
            ret, _, err = g.run(h_client, cmd)
            self.assertTrue(ret, "Command execution with invalid credentials"
                                 " should not succeed")
            self.assertEqual(
                err_msg, err.strip(), "Error is different from the command"
                                      " execution {}".format(err.strip()))

        # Run heketi commands with valid credentials
        kwar = {'json_arg': True, 'secret': self.heketi_cli_key,
                'user': self.heketi_cli_user}
        heketi_ops.heketi_volume_list(h_client, h_server, **kwar)
        heketi_ops.heketi_topology_info(h_client, h_server, **kwar)
예제 #12
0
    def test_to_check_deletion_of_cluster(self):
        """Validate deletion of cluster with volumes"""
        # List heketi volumes
        g.log.info("List heketi volumes")
        volumes = heketi_volume_list(self.heketi_client_node,
                                     self.heketi_server_url,
                                     json=True)
        if (len(volumes["volumes"]) == 0):
            g.log.info("Creating heketi volume")
            out = heketi_volume_create(self.heketi_client_node,
                                       self.heketi_server_url,
                                       self.volume_size,
                                       json=True)
            self.assertTrue(out, ("Failed to create heketi "
                                  "volume of size %s" % self.volume_size))
            g.log.info("Heketi volume successfully created" % out)
            volume_id = out["bricks"][0]["volume"]
            self.addCleanup(heketi_volume_delete, self.heketi_client_node,
                            self.heketi_server_url, volume_id)

        # List heketi cluster's
        g.log.info("Listing heketi cluster list")
        out = heketi_cluster_list(self.heketi_client_node,
                                  self.heketi_server_url,
                                  json=True)
        self.assertTrue(out, ("Failed to list heketi cluster"))
        g.log.info("All heketi cluster successfully listed")
        cluster_id = out["clusters"][0]

        # Deleting a heketi cluster
        g.log.info("Trying to delete a heketi cluster"
                   " which contains volumes and/or nodes:"
                   " Expected to fail")
        self.assertRaises(
            AssertionError,
            heketi_cluster_delete,
            self.heketi_client_node,
            self.heketi_server_url,
            cluster_id,
        )
        g.log.info("Expected result: Unable to delete cluster %s"
                   " because it contains volumes "
                   " and/or nodes" % cluster_id)

        # To confirm deletion failed, check heketi cluster list
        g.log.info("Listing heketi cluster list")
        out = heketi_cluster_list(self.heketi_client_node,
                                  self.heketi_server_url,
                                  json=True)
        self.assertTrue(out, ("Failed to list heketi cluster"))
        g.log.info("All heketi cluster successfully listed")
    def test_heketi_metrics_validating_vol_count_on_vol_deletion(self):
        """Validate heketi metrics VolumeCount after volume deletion"""

        vol_list = []

        for i in range(3):
            # Create volume
            vol = heketi_volume_create(
                self.heketi_client_node,
                self.heketi_server_url, 1, json=True)

            self.assertTrue(vol)

            self.addCleanup(
                heketi_volume_delete,
                self.heketi_client_node,
                self.heketi_server_url,
                vol['id'],
                raise_on_error=False)

            volume_list = heketi_volume_list(
                self.heketi_client_node,
                self.heketi_server_url)

            self.assertIn(vol['id'], volume_list)
            vol_list.append(vol)

        for vol in vol_list:
            # delete volume
            heketi_volume_delete(
                self.heketi_client_node,
                self.heketi_server_url,
                vol['id'])
            volume_list = heketi_volume_list(
                self.heketi_client_node,
                self.heketi_server_url)
            self.assertNotIn(vol['id'], volume_list)
            self.verify_volume_count()
    def test_to_check_deletion_of_cluster(self):
        """Validate deletion of cluster with volumes"""
        # List heketi volumes
        g.log.info("List heketi volumes")
        volumes = heketi_volume_list(self.heketi_client_node,
                                     self.heketi_server_url,
                                     json=True)
        if (len(volumes["volumes"]) == 0):
            g.log.info("Creating heketi volume")
            out = heketi_volume_create(self.heketi_client_node,
                                       self.heketi_server_url,
                                       self.volume_size, json=True)
            self.assertTrue(out, ("Failed to create heketi "
                            "volume of size %s" % self.volume_size))
            g.log.info("Heketi volume successfully created" % out)
            volume_id = out["bricks"][0]["volume"]
            self.addCleanup(
                heketi_volume_delete, self.heketi_client_node,
                self.heketi_server_url, volume_id)

        # List heketi cluster's
        g.log.info("Listing heketi cluster list")
        out = heketi_cluster_list(self.heketi_client_node,
                                  self.heketi_server_url,
                                  json=True)
        self.assertTrue(out, ("Failed to list heketi cluster"))
        g.log.info("All heketi cluster successfully listed")
        cluster_id = out["clusters"][0]

        # Deleting a heketi cluster
        g.log.info("Trying to delete a heketi cluster"
                   " which contains volumes and/or nodes:"
                   " Expected to fail")
        self.assertRaises(
            ExecutionError,
            heketi_cluster_delete,
            self.heketi_client_node, self.heketi_server_url, cluster_id,
        )
        g.log.info("Expected result: Unable to delete cluster %s"
                   " because it contains volumes "
                   " and/or nodes" % cluster_id)

        # To confirm deletion failed, check heketi cluster list
        g.log.info("Listing heketi cluster list")
        out = heketi_cluster_list(self.heketi_client_node,
                                  self.heketi_server_url,
                                  json=True)
        self.assertTrue(out, ("Failed to list heketi cluster"))
        g.log.info("All heketi cluster successfully listed")
    def test_dynamic_provisioning_glusterfile_reclaim_policy_retain(self):
        """Validate retain policy for glusterfs after deletion of pvc"""

        if get_openshift_version() < "3.9":
            self.skipTest(
                "'Reclaim' feature is not supported in OCP older than 3.9")

        self.create_storage_class(reclaim_policy='Retain')
        self.create_and_wait_for_pvc()

        # get the name of the volume
        pv_name = get_pv_name_from_pvc(self.node, self.pvc_name)
        custom = [r':.metadata.annotations.'
                  r'"gluster\.kubernetes\.io\/heketi\-volume\-id"',
                  r':.spec.persistentVolumeReclaimPolicy']

        vol_id, reclaim_policy = oc_get_custom_resource(
            self.node, 'pv', custom, pv_name)

        self.assertEqual(reclaim_policy, 'Retain')

        # Create DC with POD and attached PVC to it.
        try:
            dc_name = oc_create_app_dc_with_io(self.node, self.pvc_name)
            pod_name = get_pod_name_from_dc(self.node, dc_name)
            wait_for_pod_be_ready(self.node, pod_name)
        finally:
            scale_dc_pod_amount_and_wait(self.node, dc_name, 0)
            oc_delete(self.node, 'dc', dc_name)
            wait_for_resource_absence(self.node, 'pod', pod_name)

        oc_delete(self.node, 'pvc', self.pvc_name)

        with self.assertRaises(ExecutionError):
            wait_for_resource_absence(
                self.node, 'pvc', self.pvc_name, interval=3, timeout=30)

        heketi_volume_delete(self.heketi_client_node,
                             self.heketi_server_url, vol_id)

        vol_list = heketi_volume_list(self.heketi_client_node,
                                      self.heketi_server_url)

        self.assertNotIn(vol_id, vol_list)

        oc_delete(self.node, 'pv', pv_name)
        wait_for_resource_absence(self.node, 'pv', pv_name)
예제 #16
0
    def test_heketi_metrics_heketipod_failure(self):
        """Validate heketi metrics after heketi pod failure"""
        scale_dc_pod_amount_and_wait(self.ocp_master_node[0],
                                     self.heketi_dc_name,
                                     pod_amount=0)
        self.addCleanup(scale_dc_pod_amount_and_wait,
                        self.ocp_master_node[0],
                        self.heketi_dc_name,
                        pod_amount=1)

        # verify that metrics is not accessable when heketi pod is down
        with self.assertRaises(AssertionError):
            get_heketi_metrics(self.heketi_client_node,
                               self.heketi_server_url,
                               prometheus_format=True)

        scale_dc_pod_amount_and_wait(self.ocp_master_node[0],
                                     self.heketi_dc_name,
                                     pod_amount=1)

        pod_name = get_pod_name_from_dc(self.ocp_master_node[0],
                                        self.heketi_dc_name,
                                        self.heketi_dc_name)
        wait_for_pod_be_ready(self.ocp_master_node[0], pod_name, wait_step=5)

        for i in range(3):
            vol = heketi_volume_create(self.heketi_client_node,
                                       self.heketi_server_url,
                                       1,
                                       json=True)

            self.assertTrue(vol)

            self.addCleanup(heketi_volume_delete,
                            self.heketi_client_node,
                            self.heketi_server_url,
                            vol['id'],
                            raise_on_error=False)

            vol_list = heketi_volume_list(self.heketi_client_node,
                                          self.heketi_server_url)

            self.assertIn(vol['id'], vol_list)

        self.verify_heketi_metrics_with_topology_info()
def _heketi_vols(ocp_node, url):
    # Unfortunately, getting json from heketi-cli only gets the ids
    # To get a mapping of ids & volume names without a lot of
    # back and forth between the test and the ocp_node we end up having
    # to scrape the output of 'volume list'
    # TODO: This probably should be made into a utility function
    out = heketi_volume_list(ocp_node, url, json=False)
    res = []
    for line in out.splitlines():
        if not line.startswith('Id:'):
            continue
        row = {}
        for section in line.split():
            if ':' in section:
                key, value = section.split(':', 1)
                row[key.lower()] = value.strip()
        res.append(row)
    return res
예제 #18
0
    def create_heketi_volume_with_name_and_wait(self,
                                                name,
                                                size,
                                                raise_on_cleanup_error=True,
                                                timeout=600,
                                                wait_step=10,
                                                **kwargs):
        json = kwargs.get("json", False)

        try:
            h_volume_info = heketi_volume_create(self.heketi_client_node,
                                                 self.heketi_server_url,
                                                 size,
                                                 name=name,
                                                 **kwargs)
        except Exception as e:
            if ('more required' in six.text_type(e)
                    or ('Failed to allocate new volume' in six.text_type(e))):
                raise

            for w in Waiter(timeout, wait_step):
                h_volumes = heketi_volume_list(self.heketi_client_node,
                                               self.heketi_server_url)
                h_volume_match = re.search(HEKETI_VOLUME_REGEX % name,
                                           h_volumes)
                if h_volume_match:
                    h_volume_info = heketi_volume_info(self.heketi_client_node,
                                                       self.heketi_server_url,
                                                       h_volume_match.group(1),
                                                       json=json)
                    break

            if w.expired:
                g.log.info(
                    "Heketi volume with name %s not created in 600 sec" % name)
                raise

        self.addCleanup(heketi_volume_delete,
                        self.heketi_client_node,
                        self.heketi_server_url,
                        h_volume_info["id"],
                        raise_on_error=raise_on_cleanup_error)

        return h_volume_info
    def test_delete_heketidb_volume(self):
        """
        Method to test heketidb volume deletion via heketi-cli
        """
        heketidbexists = False
        msg = "Error: Cannot delete volume containing the Heketi database"

        for i in range(0, 2):
            volume_info = heketi_ops.heketi_volume_create(
                self.heketi_client_node, self.heketi_server_url,
                10, json=True)

            self.addCleanup(
                heketi_ops.heketi_volume_delete, self.heketi_client_node,
                self.heketi_server_url, volume_info["id"])

        volume_list_info = heketi_ops.heketi_volume_list(
            self.heketi_client_node,
            self.heketi_server_url, json=True)

        if volume_list_info["volumes"] == []:
            raise ExecutionError("Heketi volume list empty")

        for volume_id in volume_list_info["volumes"]:
            volume_info = heketi_ops.heketi_volume_info(
                self.heketi_client_node, self.heketi_server_url,
                volume_id, json=True)

            if volume_info["name"] == "heketidbstorage":
                heketidbexists = True
                delete_ret, delete_output, delete_error = (
                    heketi_ops.heketi_volume_delete(
                        self.heketi_client_node,
                        self.heketi_server_url, volume_id,
                        raw_cli_output=True))

                self.assertNotEqual(delete_ret, 0, "Return code not 0")
                self.assertEqual(
                    delete_error.strip(), msg,
                    "Invalid reason for heketidb deletion failure")

        if not heketidbexists:
            raise ExecutionError(
                "Warning: heketidbstorage doesn't exist in list of volumes")
    def test_heketi_metrics_heketipod_failure(self):
        """Validate heketi metrics after heketi pod failure"""
        scale_dc_pod_amount_and_wait(
            self.ocp_master_node[0], self.heketi_dc_name, pod_amount=0)
        self.addCleanup(
            scale_dc_pod_amount_and_wait, self.ocp_master_node[0],
            self.heketi_dc_name, pod_amount=1)

        # verify that metrics is not accessable when heketi pod is down
        with self.assertRaises(exceptions.ExecutionError):
            get_heketi_metrics(
                self.heketi_client_node,
                self.heketi_server_url,
                prometheus_format=True)

        scale_dc_pod_amount_and_wait(
            self.ocp_master_node[0], self.heketi_dc_name, pod_amount=1)

        pod_name = get_pod_name_from_dc(
            self.ocp_master_node[0], self.heketi_dc_name, self.heketi_dc_name)
        wait_for_pod_be_ready(self.ocp_master_node[0], pod_name, wait_step=5)

        for i in range(3):
            vol = heketi_volume_create(
                self.heketi_client_node,
                self.heketi_server_url, 1, json=True)

            self.assertTrue(vol)

            self.addCleanup(
                heketi_volume_delete,
                self.heketi_client_node,
                self.heketi_server_url,
                vol['id'],
                raise_on_error=False)

            vol_list = heketi_volume_list(
                self.heketi_client_node,
                self.heketi_server_url)

            self.assertIn(vol['id'], vol_list)

        self.verify_heketi_metrics_with_topology_info()
예제 #21
0
    def _cleanup_heketi_volumes(self, existing_volumes):
        """Cleanup created BHV and BV"""

        volumes = heketi_ops.heketi_volume_list(self.h_node,
                                                self.h_server,
                                                json=True).get("volumes")
        new_volumes = list(set(volumes) - set(existing_volumes))
        for volume in new_volumes:
            h_vol_info = heketi_ops.heketi_volume_info(self.h_node,
                                                       self.h_server,
                                                       volume,
                                                       json=True)
            if h_vol_info.get("block"):
                for block_vol in (
                        h_vol_info.get("blockinfo").get("blockvolume")):
                    heketi_ops.heketi_blockvolume_delete(self.h_node,
                                                         self.h_server,
                                                         block_vol,
                                                         raise_on_error=False)
            heketi_ops.heketi_volume_delete(self.h_node,
                                            self.h_server,
                                            volume,
                                            raise_on_error=False)
예제 #22
0
    def _create_distributed_replica_vol(self, validate_cleanup, block=False):

        # Create distributed vol
        vol_size_gb = self._get_vol_size()
        heketi_url = self.heketi_server_url
        h_volume_name = "autotests-heketi-volume-%s" % utils.get_random_str()
        try:
            heketi_vol = self.create_heketi_volume_with_name_and_wait(
                h_volume_name,
                vol_size_gb,
                json=True,
                raise_on_cleanup_error=False,
                block=block)
        except AssertionError as e:
            # NOTE: rare situation when we need to decrease size of a volume.
            #       and we expect this vol to be distributed.
            g.log.info("Failed to create distributed '%s'Gb volume. "
                       "Trying to create another one, smaller for 1Gb.")
            if not ('more required' in str(e) and
                    ('Insufficient suitable allocatable extents for '
                     'logical volume' in str(e))):
                raise

            vol_size_gb -= 1
            heketi_vol = self.create_heketi_volume_with_name_and_wait(
                h_volume_name,
                vol_size_gb,
                json=True,
                raise_on_cleanup_error=False,
                block=block)
        g.log.info("Successfully created distributed volume.")

        vol_name = heketi_vol['name']
        vol_id = heketi_vol["bricks"][0]["volume"]

        # Get gluster volume info
        g.log.info("Get gluster volume '%s' info" % vol_name)
        gluster_vol = get_volume_info('auto_get_gluster_endpoint',
                                      volname=vol_name)
        self.assertTrue(gluster_vol,
                        "Failed to get volume '%s' info" % vol_name)
        g.log.info("Successfully got volume '%s' info" % vol_name)
        gluster_vol = gluster_vol[vol_name]
        self.assertEqual(
            gluster_vol["typeStr"], "Distributed-Replicate",
            "'%s' gluster vol isn't a Distributed-Replicate volume" % vol_name)

        # Check amount of bricks
        brick_amount = len(gluster_vol['bricks']['brick'])
        self.assertEqual(
            brick_amount % 3, 0,
            "Brick amount is expected to be divisible by 3. "
            "Actual amount is '%s'" % brick_amount)
        self.assertGreater(
            brick_amount, 3, "Brick amount is expected to be bigger than 3. "
            "Actual amount is '%s'." % brick_amount)

        # Run unique actions to Validate whether deleting a dist-rep
        # volume is handled by heketi else return
        if not validate_cleanup:
            return vol_id

        # Get the free space after creating heketi volume
        free_space_after_creating_vol = self._get_free_space()

        # Delete heketi volume
        g.log.info("Deleting heketi volume '%s'" % vol_id)
        volume_deleted = heketi_volume_delete(self.heketi_client_node,
                                              heketi_url, vol_id)
        self.assertTrue(volume_deleted,
                        "Failed to delete heketi volume '%s'" % vol_id)
        g.log.info("Heketi volume '%s' has successfully been deleted" % vol_id)

        # Check the heketi volume list
        g.log.info("List heketi volumes")
        heketi_volumes = heketi_volume_list(self.heketi_client_node,
                                            self.heketi_server_url,
                                            json=True)
        self.assertTrue(heketi_volumes, "Failed to list heketi volumes")
        g.log.info("Heketi volumes have successfully been listed")
        heketi_volumes = heketi_volumes.get('volumes', heketi_volumes)
        self.assertNotIn(vol_id, heketi_volumes)
        self.assertNotIn(vol_name, heketi_volumes)

        # Check the gluster volume list
        g.log.info("Get the gluster volume list")
        gluster_volumes = get_volume_list('auto_get_gluster_endpoint')
        self.assertTrue(gluster_volumes, "Unable to get Gluster volume list")

        g.log.info("Successfully got Gluster volume list" % gluster_volumes)
        self.assertNotIn(vol_id, gluster_volumes)
        self.assertNotIn(vol_name, gluster_volumes)

        # Get the used space after deleting heketi volume
        free_space_after_deleting_vol = self._get_free_space()

        # Compare the free space before and after deleting the volume
        g.log.info("Comparing the free space before and after deleting volume")
        self.assertLessEqual(free_space_after_creating_vol + (3 * vol_size_gb),
                             free_space_after_deleting_vol)
        g.log.info(
            "Volume successfully deleted and space is reallocated. "
            "Free space after creating volume %s. "
            "Free space after deleting volume %s." %
            (free_space_after_creating_vol, free_space_after_deleting_vol))
예제 #23
0
    def test_heketi_manual_cleanup_operation_in_bhv(self):
        """Validate heketi db cleanup will resolve the mismatch
           in the free size of the block hosting volume with failed
           block device create operations.
        """
        bhv_size_before, bhv_size_after, vol_count = [], [], 5
        ocp_node, g_node = self.ocp_master_node[0], self.gluster_servers[0]
        h_node, h_url = self.heketi_client_node, self.heketi_server_url

        # Get existing heketi volume list
        existing_volumes = heketi_volume_list(h_node, h_url, json=True)

        # Add function to clean stale volumes created during test
        self.addCleanup(self._cleanup_heketi_volumes,
                        existing_volumes.get("volumes"))

        # Get nodes id list
        node_id_list = heketi_node_list(h_node, h_url)

        # Disable 4th and other nodes
        for node_id in node_id_list[3:]:
            heketi_node_disable(h_node, h_url, node_id)
            self.addCleanup(heketi_node_enable, h_node, h_url, node_id)

        # Calculate heketi volume size
        free_space, nodenum = get_total_free_space(h_node, h_url)
        free_space_available = int(free_space / nodenum)
        if free_space_available > vol_count:
            h_volume_size = int(free_space_available / vol_count)
            if h_volume_size > 50:
                h_volume_size = 50
        else:
            h_volume_size, vol_count = 1, free_space_available

        # Create BHV in case blockvolume size is greater than default BHV size
        default_bhv_size = get_default_block_hosting_volume_size(
            h_node, self.heketi_dc_name)
        if default_bhv_size < h_volume_size:
            h_volume_name = "autotest-{}".format(utils.get_random_str())
            bhv_info = self.create_heketi_volume_with_name_and_wait(
                h_volume_name,
                free_space_available,
                raise_on_cleanup_error=False,
                block=True,
                json=True)
            free_space_available -= (
                int(bhv_info.get("blockinfo").get("reservedsize")) + 1)
            h_volume_size = int(free_space_available / vol_count)

        # Get BHV list
        h_bhv_list = get_block_hosting_volume_list(h_node, h_url).keys()
        self.assertTrue(h_bhv_list, "Failed to get the BHV list")

        # Get BHV size
        for bhv in h_bhv_list:
            vol_info = heketi_volume_info(h_node, h_url, bhv, json=True)
            bhv_vol_size_before = vol_info.get("freesize")
            bhv_size_before.append(bhv_vol_size_before)

        # Kill Tcmu-runner service
        services = ("tcmu-runner", "gluster-block-target", "gluster-blockd")
        kill_service_on_gluster_pod_or_node(ocp_node, "tcmu-runner", g_node)

        # Restart the services
        for service in services:
            state = ('exited'
                     if service == 'gluster-block-target' else 'running')
            self.addCleanup(wait_for_service_status_on_gluster_pod_or_node,
                            ocp_node, service, 'active', state, g_node)
            self.addCleanup(restart_service_on_gluster_pod_or_node, ocp_node,
                            service, g_node)

        def run_async(cmd, hostname, raise_on_error=True):
            return g.run_async(host=hostname, command=cmd)

        # Create stale block volumes in async
        for count in range(vol_count):
            with mock.patch.object(json, 'loads', side_effect=(lambda j: j)):
                with mock.patch.object(command,
                                       'cmd_run',
                                       side_effect=run_async):
                    heketi_blockvolume_create(h_node,
                                              h_url,
                                              h_volume_size,
                                              json=True)

        # Wait for pending operation to get generated
        self._check_for_pending_operations(h_node, h_url)

        # Restart the services
        for service in services:
            state = ('exited'
                     if service == 'gluster-block-target' else 'running')
            restart_service_on_gluster_pod_or_node(ocp_node, service, g_node)
            wait_for_service_status_on_gluster_pod_or_node(
                ocp_node, service, 'active', state, g_node)

        # Cleanup pending operation
        heketi_server_operation_cleanup(h_node, h_url)

        # wait for pending operation to get cleaned up
        for w in waiter.Waiter(timeout=120, interval=10):
            # Get BHV size
            for bhv in h_bhv_list:
                vol_info = heketi_volume_info(h_node, h_url, bhv, json=True)
                bhv_vol_size_after = vol_info.get("freesize")
                bhv_size_after.append(bhv_vol_size_after)

            if (set(bhv_size_before) == set(bhv_size_after)):
                break
        if w.expired:
            raise exceptions.ExecutionError(
                "Failed to Validate volume size Actual:{},"
                " Expected:{}".format(set(bhv_size_before),
                                      set(bhv_size_after)))
예제 #24
0
    def test_verify_create_heketi_volumes_pending_entries_in_db(
            self, vol_type):
        """Verify pending entries of file/block volumes in db during
           volumes creation from heketi side
        """
        # Create large volumes to observe the pending operations
        vol_count, h_vol_creation_async_op = 3, []
        h_node, h_url = self.heketi_client_node, self.heketi_server_url

        # Verify file/block volumes pending operation before creation,
        h_db_check_before = heketi_db_check(h_node, h_url)
        h_db_check_vol_before = (h_db_check_before.get(
            "{}volumes".format(vol_type)))

        # Delete heketi pod to clean db operations
        if (h_db_check_vol_before.get("pending")
                or h_db_check_before.get("bricks").get("pending")):
            self._respin_heketi_pod()

        # Calculate heketi volume size
        free_space, nodenum = get_total_free_space(h_node, h_url)
        free_space_available = int(free_space / nodenum)
        if free_space_available > vol_count:
            h_volume_size = int(free_space_available / vol_count)
            if h_volume_size > 30:
                h_volume_size = 30
        else:
            h_volume_size, vol_count = 1, free_space_available

        # Get existing heketi volume list
        existing_volumes = heketi_volume_list(h_node, h_url, json=True)

        # Add cleanup function to clean stale volumes created during test
        self.addCleanup(self._cleanup_heketi_volumes,
                        existing_volumes.get("volumes"))

        # Create BHV in case blockvolume size is greater than default BHV size
        if vol_type:
            default_bhv_size = get_default_block_hosting_volume_size(
                h_node, self.heketi_dc_name)
            if default_bhv_size < h_volume_size:
                h_volume_name = "autotest-{}".format(utils.get_random_str())
                bhv_info = self.create_heketi_volume_with_name_and_wait(
                    h_volume_name,
                    free_space_available,
                    raise_on_cleanup_error=False,
                    block=True,
                    json=True)
                free_space_available -= (
                    int(bhv_info.get("blockinfo").get("reservedsize")) + 1)
                h_volume_size = int(free_space_available / vol_count)

        # Temporary replace g.run with g.async_run in heketi_blockvolume_create
        # func to be able to run it in background.Also, avoid parsing the
        # output as it won't be json at that moment. Parse it after reading
        # the async operation results.
        def run_async(cmd, hostname, raise_on_error=True):
            return g.run_async(host=hostname, command=cmd)

        for count in range(vol_count):
            with mock.patch.object(json, 'loads', side_effect=(lambda j: j)):
                with mock.patch.object(command,
                                       'cmd_run',
                                       side_effect=run_async):
                    h_vol_creation_async_op.append(
                        eval("heketi_{}volume_create".format(vol_type))(
                            h_node, h_url, h_volume_size, json=True))

        # Check for pending operations
        for w in waiter.Waiter(timeout=120, interval=10):
            h_db_check = heketi_db_check(h_node, h_url)
            h_db_check_vol = h_db_check.get("{}volumes".format(vol_type))
            if h_db_check_vol.get("pending"):
                h_db_check_bricks = h_db_check.get("bricks")
                break
        if w.expired:
            raise exceptions.ExecutionError(
                "No any pending operations found during {}volumes creation "
                "{}".format(vol_type, h_db_check_vol.get("pending")))

        # Verify bricks pending operation during creation
        if not vol_type:
            self.assertTrue(h_db_check_bricks.get("pending"),
                            "Expecting at least one bricks pending count")
            self.assertFalse(
                h_db_check_bricks.get("pending") % 3,
                "Expecting bricks pending count to be multiple of 3 but "
                "found {}".format(h_db_check_bricks.get("pending")))

        # Wait for all counts of pending operations to be zero
        for w in waiter.Waiter(timeout=300, interval=10):
            h_db_check = heketi_db_check(h_node, h_url)
            h_db_check_vol = h_db_check.get("{}volumes".format(vol_type))
            if not h_db_check_vol.get("pending"):
                break
        if w.expired:
            raise exceptions.ExecutionError(
                "Expecting no pending operations after 300 sec but "
                "found {} operation".format(h_db_check_vol.get("pending")))

        # Get heketi server DB details
        h_db_check_after = heketi_db_check(h_node, h_url)
        h_db_check_vol_after = (h_db_check_after.get(
            "{}volumes".format(vol_type)))
        h_db_check_bricks_after = h_db_check_after.get("bricks")

        # Verify if initial and final file/block volumes are same
        act_vol_count = h_db_check_vol_after.get("total")
        exp_vol_count = h_db_check_vol_before.get("total") + vol_count
        err_msg = (
            "Actual {} and expected {} {}volume counts are not matched".format(
                act_vol_count, exp_vol_count, vol_type))
        self.assertEqual(act_vol_count, exp_vol_count, err_msg)

        # Verify if initial and final bricks are same for file volume
        volumes = heketi_volume_list(h_node, h_url, json=True).get("volumes")
        new_volumes = list(set(volumes) - set(existing_volumes))
        exp_brick_count = 0
        for volume in new_volumes:
            vol_info = heketi_volume_info(h_node, h_url, volume, json=True)
            exp_brick_count += len(vol_info.get("bricks"))

        err_msg = "Actual {} and expected {} bricks counts are not matched"
        act_brick_count = h_db_check_bricks_after.get("total")
        self.assertEqual(act_brick_count, exp_brick_count,
                         err_msg.format(act_brick_count, exp_brick_count))
예제 #25
0
    def test_verify_delete_heketi_volumes_pending_entries_in_db(
            self, vol_type):
        """Verify pending entries of blockvolumes/volumes and bricks in heketi
           db during blockvolume/volume delete operation.
        """
        # Create a large volumes to observe the pending operation
        vol_count, volume_ids, async_obj = 10, [], []
        h_node, h_url = self.heketi_client_node, self.heketi_server_url

        # Verify file/block volumes pending operation before creation,
        h_db_check_before = heketi_db_check(h_node, h_url)
        h_db_check_bricks_before = h_db_check_before.get("bricks")
        h_db_check_vol_before = (h_db_check_before.get(
            "{}volumes".format(vol_type)))

        # Get existing heketi volume list
        existing_volumes = heketi_volume_list(h_node, h_url, json=True)

        # Add cleanup function to clean stale volumes created during test
        self.addCleanup(self._cleanup_heketi_volumes,
                        existing_volumes.get("volumes"))

        # Delete heketi pod to clean db operations
        if (h_db_check_bricks_before.get("pending")
                or h_db_check_vol_before.get("pending")):
            self._respin_heketi_pod()

        # Calculate heketi volume size
        free_space, nodenum = get_total_free_space(h_node, h_url)
        free_space_available = int(free_space / nodenum)
        if free_space_available > vol_count:
            h_volume_size = int(free_space_available / vol_count)
            if h_volume_size > 50:
                h_volume_size = 50
        else:
            h_volume_size, vol_count = 1, free_space_available

        # Create BHV in case blockvolume size is greater than default BHV size
        if vol_type:
            default_bhv_size = get_default_block_hosting_volume_size(
                h_node, self.heketi_dc_name)
            if default_bhv_size < h_volume_size:
                h_volume_name = "autotest-{}".format(utils.get_random_str())
                bhv_info = self.create_heketi_volume_with_name_and_wait(
                    h_volume_name,
                    free_space_available,
                    raise_on_cleanup_error=False,
                    block=True,
                    json=True)
                free_space_available -= (
                    int(bhv_info.get("blockinfo").get("reservedsize")) + 1)
                h_volume_size = int(free_space_available / vol_count)

        # Create file/block volumes
        for _ in range(vol_count):
            vol_id = eval("heketi_{}volume_create".format(vol_type))(
                h_node, h_url, h_volume_size, json=True).get("id")
            volume_ids.append(vol_id)
            self.addCleanup(eval("heketi_{}volume_delete".format(vol_type)),
                            h_node,
                            h_url,
                            vol_id,
                            raise_on_error=False)

        def run_async(cmd, hostname, raise_on_error=True):
            async_op = g.run_async(host=hostname, command=cmd)
            async_obj.append(async_op)
            return async_op

        bhv_list = []
        for vol_id in volume_ids:
            # Get BHV ids to delete in case of block volumes
            if vol_type:
                vol_info = (heketi_blockvolume_info(h_node,
                                                    h_url,
                                                    vol_id,
                                                    json=True))
                if not vol_info.get("blockhostingvolume") in bhv_list:
                    bhv_list.append(vol_info.get("blockhostingvolume"))

            # Temporary replace g.run with g.async_run in heketi_volume_delete
            # and heketi_blockvolume_delete func to be able to run it in
            # background.
            with mock.patch.object(command, 'cmd_run', side_effect=run_async):
                eval("heketi_{}volume_delete".format(vol_type))(h_node, h_url,
                                                                vol_id)

        # Wait for pending operations to get generate
        for w in waiter.Waiter(timeout=30, interval=3):
            h_db_check = heketi_db_check(h_node, h_url)
            h_db_check_vol = h_db_check.get("{}volumes".format(vol_type))
            if h_db_check_vol.get("pending"):
                h_db_check_bricks = h_db_check.get("bricks")
                break
        if w.expired:
            raise exceptions.ExecutionError(
                "No any pending operations found during {}volumes deletion "
                "{}".format(vol_type, h_db_check_vol.get("pending")))

        # Verify bricks pending operation during creation
        if not vol_type:
            self.assertTrue(h_db_check_bricks.get("pending"),
                            "Expecting at least one bricks pending count")
            self.assertFalse(
                h_db_check_bricks.get("pending") % 3,
                "Expecting bricks pending count to be multiple of 3 but "
                "found {}".format(h_db_check_bricks.get("pending")))

        # Verify file/block volume pending operation during delete
        for w in waiter.Waiter(timeout=120, interval=10):
            h_db_check = heketi_db_check(h_node, h_url)
            h_db_check_vol = h_db_check.get("{}volumes".format(vol_type))
            h_db_check_bricks = h_db_check.get("bricks")
            if ((not h_db_check_bricks.get("pending"))
                    and (not h_db_check_vol.get("pending"))):
                break
        if w.expired:
            raise AssertionError(
                "Failed to delete {}volumes after 120 secs".format(vol_type))

        # Check that all background processes got exited
        for obj in async_obj:
            ret, out, err = obj.async_communicate()
            self.assertFalse(
                ret, "Failed to delete {}volume due to error: {}".format(
                    vol_type, err))

        # Delete BHV created during block volume creation
        if vol_type:
            for bhv_id in bhv_list:
                heketi_volume_delete(h_node, h_url, bhv_id)

        # Verify bricks and volume pending operations
        h_db_check_after = heketi_db_check(h_node, h_url)
        h_db_check_bricks_after = h_db_check_after.get("bricks")
        h_db_check_vol_after = (h_db_check_after.get(
            "{}volumes".format(vol_type)))
        act_brick_count = h_db_check_bricks_after.get("pending")
        act_vol_count = h_db_check_vol_after.get("pending")

        # Verify bricks pending operation after delete
        err_msg = "{} operations are pending for {} after {}volume deletion"
        if not vol_type:
            self.assertFalse(
                act_brick_count,
                err_msg.format(act_brick_count, "brick", vol_type))

        # Verify file/bock volumes pending operation after delete
        self.assertFalse(act_vol_count,
                         err_msg.format(act_vol_count, "volume", vol_type))

        act_brick_count = h_db_check_bricks_after.get("total")
        act_vol_count = h_db_check_vol_after.get("total")
        exp_brick_count = h_db_check_bricks_before.get("total")
        exp_vol_count = h_db_check_vol_before.get("total")
        err_msg = "Actual {} and expected {} {} counts are not matched"

        # Verify if initial and final file/block volumes are same
        self.assertEqual(
            act_vol_count, exp_vol_count,
            err_msg.format(act_vol_count, exp_vol_count, "volume"))

        # Verify if initial and final bricks are same
        self.assertEqual(
            act_brick_count, exp_brick_count,
            err_msg.format(act_brick_count, exp_brick_count, "brick"))
    def test_heketi_device_replacement_in_node(self):
        """Validate device replacement operation on single node"""

        h_client, h_server = self.heketi_client_node, self.heketi_server_url

        try:
            gluster_server_0 = list(g.config["gluster_servers"].values())[0]
            manage_hostname = gluster_server_0["manage"]
            add_device_name = gluster_server_0["additional_devices"][0]
        except (KeyError, IndexError):
            self.skipTest(
                "Additional disk is not specified for node with following "
                "hostnames and IP addresses: {}, {}".format(
                    gluster_server_0.get('manage', '?'),
                    gluster_server_0.get('storage', '?')))

        # Get existing heketi volume list
        existing_volumes = heketi_volume_list(h_client, h_server, json=True)

        # Add cleanup function to clean stale volumes created during test
        self.addCleanup(self._cleanup_heketi_volumes,
                        existing_volumes.get("volumes"))

        # Get nodes info
        node_id_list = heketi_node_list(h_client, h_server)

        # Disable 4th and other nodes
        if len(node_id_list) > 3:
            for node_id in node_id_list[3:]:
                heketi_node_disable(h_client, h_server, node_id)
                self.addCleanup(heketi_node_enable, h_client, h_server,
                                node_id)

        # Create volume when 3 nodes are online
        vol_size, vol_count = 2, 4
        for _ in range(vol_count):
            vol_info = heketi_blockvolume_create(h_client,
                                                 h_server,
                                                 vol_size,
                                                 json=True)
            self.addCleanup(heketi_blockvolume_delete, h_client, h_server,
                            vol_info['id'])

        # Get node ID of the Gluster hostname
        topology_info = heketi_topology_info(h_client, h_server, json=True)
        self.assertIsNotNone(topology_info, "Failed to get topology info")
        self.assertIn("clusters", topology_info.keys(),
                      "Failed to get cluster "
                      "details from topology info")
        node_list = topology_info["clusters"][0]["nodes"]
        self.assertTrue(node_list,
                        "Cluster info command returned empty list of nodes")

        node_id = None
        for node in node_list:
            if manage_hostname == node['hostnames']["manage"][0]:
                node_id = node["id"]
                break
        self.assertTrue(
            node_id, "Failed to get  node info for node  id '{}'".format(
                manage_hostname))

        # Add extra device, then remember it's ID and size
        device_id_new, device_size_new = self._add_heketi_device(
            add_device_name, node_id)

        # Remove one of the existing devices on node except new device
        device_name, device_id = None, None
        node_info_after_addition = heketi_node_info(h_client,
                                                    h_server,
                                                    node_id,
                                                    json=True)
        for device in node_info_after_addition["devices"]:
            if (device["name"] != add_device_name
                    and device["storage"]["total"] == device_size_new):
                device_name = device["name"]
                device_id = device["id"]
                break

        self.assertIsNotNone(device_name, "Failed to get device name")
        self.assertIsNotNone(device_id, "Failed to get device id")
        self.addCleanup(heketi_device_enable,
                        h_client,
                        h_server,
                        device_id,
                        raise_on_error=False)
        self.addCleanup(heketi_device_add,
                        h_client,
                        h_server,
                        device_name,
                        node_id,
                        raise_on_error=False)
        heketi_device_disable(h_client, h_server, device_id)
        heketi_device_remove(h_client, h_server, device_id)
        heketi_device_delete(h_client, h_server, device_id)
    def test_heketi_node_add_with_valid_cluster(self):
        """Test heketi node add operation with valid cluster id"""
        if (openshift_storage_version.get_openshift_storage_version()
                < "3.11.4"):
            self.skipTest(
                "This test case is not supported for < OCS 3.11.4 builds due "
                "to bug BZ-1732831")

        h_client, h_server = self.heketi_client_node, self.heketi_server_url
        ocp_node = self.ocp_master_node[0]

        # Get heketi endpoints before adding node
        h_volume_ids = heketi_ops.heketi_volume_list(
            h_client, h_server, json=True)
        h_endpoints_before_new_node = heketi_ops.heketi_volume_endpoint_patch(
            h_client, h_server, h_volume_ids["volumes"][0])

        cluster_info = heketi_ops.heketi_cluster_list(
            h_client, h_server, json=True)
        storage_hostname, storage_ip = self.add_heketi_node_to_cluster(
            cluster_info["clusters"][0])

        # Get heketi nodes and validate for newly added node
        h_node_ids = heketi_ops.heketi_node_list(h_client, h_server, json=True)
        for h_node_id in h_node_ids:
            node_hostname = heketi_ops.heketi_node_info(
                h_client, h_server, h_node_id, json=True)
            if node_hostname["hostnames"]["manage"][0] == storage_hostname:
                break
            node_hostname = None

        err_msg = ("Newly added heketi node %s not found in heketi node "
                   "list %s" % (storage_hostname, h_node_ids))
        self.assertTrue(node_hostname, err_msg)

        # Check gluster peer status for newly added node
        if self.is_containerized_gluster():
            gluster_pods = openshift_ops.get_ocp_gluster_pod_details(ocp_node)
            gluster_pod = [
                gluster_pod["pod_name"]
                for gluster_pod in gluster_pods
                if gluster_pod["pod_hostname"] == storage_hostname][0]

            gluster_peer_status = peer_ops.get_peer_status(
                podcmd.Pod(ocp_node, gluster_pod))
        else:
            gluster_peer_status = peer_ops.get_peer_status(
                storage_hostname)
        self.assertEqual(
            len(gluster_peer_status), len(self.gluster_servers))

        err_msg = "Expected peer status is 1 and actual is %s"
        for peer in gluster_peer_status:
            peer_status = int(peer["connected"])
            self.assertEqual(peer_status, 1, err_msg % peer_status)

        # Get heketi endpoints after adding node
        h_endpoints_after_new_node = heketi_ops.heketi_volume_endpoint_patch(
            h_client, h_server, h_volume_ids["volumes"][0])

        # Get openshift openshift endpoints and patch with heketi endpoints
        heketi_db_endpoint = openshift_ops.oc_get_custom_resource(
            ocp_node, "dc", name=self.heketi_dc_name,
            custom=".:spec.template.spec.volumes[*].glusterfs.endpoints")[0]
        openshift_ops.oc_patch(
            ocp_node, "ep", heketi_db_endpoint, h_endpoints_after_new_node)
        self.addCleanup(
            openshift_ops.oc_patch, ocp_node, "ep", heketi_db_endpoint,
            h_endpoints_before_new_node)
        ep_addresses = openshift_ops.oc_get_custom_resource(
            ocp_node, "ep", name=heketi_db_endpoint,
            custom=".:subsets[*].addresses[*].ip")[0].split(",")

        err_msg = "Hostname %s not present in endpoints %s" % (
            storage_ip, ep_addresses)
        self.assertIn(storage_ip, ep_addresses, err_msg)