def test_recreate_app_pod_with_attached_block_pv(self):
        """Validate app pod attached block device I/O after restart"""
        datafile_path = '/mnt/temporary_test_file'

        # Create DC with POD and attached PVC to it
        sc_name = self.create_storage_class()
        pvc_name = self.create_and_wait_for_pvc(
            pvc_name_prefix='autotest-block', sc_name=sc_name)
        dc_name, pod_name = self.create_dc_with_pvc(pvc_name)

        # Write data
        write_cmd = "oc exec %s -- dd if=/dev/urandom of=%s bs=4k count=10000"
        self.cmd_run(write_cmd % (pod_name, datafile_path))

        # Recreate app POD
        scale_dc_pod_amount_and_wait(self.node, dc_name, 0)
        scale_dc_pod_amount_and_wait(self.node, dc_name, 1)
        new_pod_name = get_pod_name_from_dc(self.node, dc_name)

        # Check presence of already written file
        check_existing_file_cmd = (
            "oc exec %s -- ls %s" % (new_pod_name, datafile_path))
        out = self.cmd_run(check_existing_file_cmd)
        self.assertIn(datafile_path, out)

        # Perform I/O on the new POD
        self.cmd_run(write_cmd % (new_pod_name, datafile_path))
    def test_recreate_app_pod_with_attached_block_pv(self):
        """Validate app pod attached block device I/O after restart"""
        datafile_path = '/mnt/temporary_test_file'

        # Create DC with POD and attached PVC to it
        sc_name = self.create_storage_class()
        pvc_name = self.create_and_wait_for_pvc(
            pvc_name_prefix='autotest-block', sc_name=sc_name)
        dc_name, pod_name = self.create_dc_with_pvc(pvc_name)

        # Write data
        write_cmd = "oc exec %s -- dd if=/dev/urandom of=%s bs=4k count=10000"
        self.cmd_run(write_cmd % (pod_name, datafile_path))

        # Recreate app POD
        scale_dc_pod_amount_and_wait(self.node, dc_name, 0)
        scale_dc_pod_amount_and_wait(self.node, dc_name, 1)
        new_pod_name = get_pod_name_from_dc(self.node, dc_name)

        # Check presence of already written file
        check_existing_file_cmd = ("oc exec %s -- ls %s" %
                                   (new_pod_name, datafile_path))
        out = self.cmd_run(check_existing_file_cmd)
        self.assertIn(datafile_path, out)

        # Perform I/O on the new POD
        self.cmd_run(write_cmd % (new_pod_name, datafile_path))
Exemple #3
0
    def test_block_vol_offline_expand(self):
        """Test blockvol expansion while PVC is not in use"""
        node = self.ocp_master_node[0]

        pvc_name, dc_name, bvol_info = (
            self._block_vol_expand_common_offline_vs_online(False))

        # create and wait for job to be completed
        jobname = oc_create_offline_block_volume_expand_job(node, pvc_name)
        self.addCleanup(oc_delete, node, 'job', jobname)
        for w in waiter.Waiter(300, 5):
            if is_job_complete(node, jobname):
                break
        if w.expired:
            raise AssertionError(
                "block expand job {} is not completed".format(jobname))

        # verify expand size
        scale_dc_pod_amount_and_wait(node, dc_name[0], pod_amount=1)
        pod_name = get_pod_name_from_dc(node, dc_name[0])
        ret, size, _ = oc_rsh(
            node, pod_name,
            'df -kh /mnt | sed "/Filesystem/d" | awk \'{print $2}\' '
            '| sed "s/G//"')
        self.assertFalse(ret, "Failed to get size from client side")
        self.assertEqual(
            int(float(size)), bvol_info["size"], "new size is not "
            "reflected at mount point after block volume expand")
    def test_dynamic_provisioning_glusterblock_reclaim_policy_retain(self):
        """Validate retain policy for gluster-block after PVC deletion"""

        if get_openshift_version() < "3.9":
            self.skipTest(
                "'Reclaim' feature is not supported in OCP older than 3.9")

        self.create_storage_class(reclaim_policy='Retain')
        self.create_and_wait_for_pvc()

        dc_name = oc_create_app_dc_with_io(self.node, self.pvc_name)

        try:
            pod_name = get_pod_name_from_dc(self.node, dc_name)
            wait_for_pod_be_ready(self.node, pod_name)
        finally:
            scale_dc_pod_amount_and_wait(self.node, dc_name, pod_amount=0)
            oc_delete(self.node, 'dc', dc_name)

        # get the name of volume
        pv_name = get_pv_name_from_pvc(self.node, self.pvc_name)

        custom = [
            r':.metadata.annotations."gluster\.org\/volume\-id"',
            r':.spec.persistentVolumeReclaimPolicy'
        ]
        vol_id, reclaim_policy = oc_get_custom_resource(
            self.node, 'pv', custom, pv_name)

        # checking the retainPolicy of pvc
        self.assertEqual(reclaim_policy, 'Retain')

        # delete the pvc
        oc_delete(self.node, 'pvc', self.pvc_name)

        # check if pv is also deleted or not
        with self.assertRaises(ExecutionError):
            wait_for_resource_absence(self.node,
                                      'pvc',
                                      self.pvc_name,
                                      interval=3,
                                      timeout=30)

        # getting the blockvol list
        blocklist = heketi_blockvolume_list(self.heketi_client_node,
                                            self.heketi_server_url)
        self.assertIn(vol_id, blocklist)

        heketi_blockvolume_delete(self.heketi_client_node,
                                  self.heketi_server_url, vol_id)
        blocklist = heketi_blockvolume_list(self.heketi_client_node,
                                            self.heketi_server_url)
        self.assertNotIn(vol_id, blocklist)
        oc_delete(self.node, 'pv', pv_name)
        wait_for_resource_absence(self.node, 'pv', pv_name)
    def test_dynamic_provisioning_glusterfile_reclaim_policy_retain(self):
        """Validate retain policy for glusterfs after deletion of pvc"""

        if get_openshift_version() < "3.9":
            self.skipTest(
                "'Reclaim' feature is not supported in OCP older than 3.9")

        self.create_storage_class(reclaim_policy='Retain')
        self.create_and_wait_for_pvc()

        # get the name of the volume
        pv_name = get_pv_name_from_pvc(self.node, self.pvc_name)
        custom = [
            r':.metadata.annotations.'
            r'"gluster\.kubernetes\.io\/heketi\-volume\-id"',
            r':.spec.persistentVolumeReclaimPolicy'
        ]

        vol_id, reclaim_policy = oc_get_custom_resource(
            self.node, 'pv', custom, pv_name)

        self.assertEqual(reclaim_policy, 'Retain')

        # Create DC with POD and attached PVC to it.
        try:
            dc_name = oc_create_app_dc_with_io(
                self.node, self.pvc_name, image=self.io_container_image_cirros)
            pod_name = get_pod_name_from_dc(self.node, dc_name)
            wait_for_pod_be_ready(self.node, pod_name)
        finally:
            scale_dc_pod_amount_and_wait(self.node, dc_name, 0)
            oc_delete(self.node, 'dc', dc_name)
            wait_for_resource_absence(self.node, 'pod', pod_name)

        oc_delete(self.node, 'pvc', self.pvc_name)

        with self.assertRaises(ExecutionError):
            wait_for_resource_absence(self.node,
                                      'pvc',
                                      self.pvc_name,
                                      interval=3,
                                      timeout=30)

        heketi_volume_delete(self.heketi_client_node, self.heketi_server_url,
                             vol_id)

        vol_list = heketi_volume_list(self.heketi_client_node,
                                      self.heketi_server_url)

        self.assertNotIn(vol_id, vol_list)

        oc_delete(self.node, 'pv', pv_name)
        wait_for_resource_absence(self.node, 'pv', pv_name)
    def test_dynamic_provisioning_glusterblock_reclaim_policy_retain(self):
        """Validate retain policy for gluster-block after PVC deletion"""

        if get_openshift_version() < "3.9":
            self.skipTest(
                "'Reclaim' feature is not supported in OCP older than 3.9")

        self.create_storage_class(reclaim_policy='Retain')
        self.create_and_wait_for_pvc()

        dc_name = oc_create_app_dc_with_io(self.node, self.pvc_name)

        try:
            pod_name = get_pod_name_from_dc(self.node, dc_name)
            wait_for_pod_be_ready(self.node, pod_name)
        finally:
            scale_dc_pod_amount_and_wait(self.node, dc_name, pod_amount=0)
            oc_delete(self.node, 'dc', dc_name)

        # get the name of volume
        pv_name = get_pv_name_from_pvc(self.node, self.pvc_name)

        custom = [r':.metadata.annotations."gluster\.org\/volume\-id"',
                  r':.spec.persistentVolumeReclaimPolicy']
        vol_id, reclaim_policy = oc_get_custom_resource(
            self.node, 'pv', custom, pv_name)

        # checking the retainPolicy of pvc
        self.assertEqual(reclaim_policy, 'Retain')

        # delete the pvc
        oc_delete(self.node, 'pvc', self.pvc_name)

        # check if pv is also deleted or not
        with self.assertRaises(ExecutionError):
            wait_for_resource_absence(
                self.node, 'pvc', self.pvc_name, interval=3, timeout=30)

        # getting the blockvol list
        blocklist = heketi_blockvolume_list(self.heketi_client_node,
                                            self.heketi_server_url)
        self.assertIn(vol_id, blocklist)

        heketi_blockvolume_delete(self.heketi_client_node,
                                  self.heketi_server_url, vol_id)
        blocklist = heketi_blockvolume_list(self.heketi_client_node,
                                            self.heketi_server_url)
        self.assertNotIn(vol_id, blocklist)
        oc_delete(self.node, 'pv', pv_name)
        wait_for_resource_absence(self.node, 'pv', pv_name)
    def test_dynamic_provisioning_glusterfile_reclaim_policy_retain(self):
        """Validate retain policy for glusterfs after deletion of pvc"""

        if get_openshift_version() < "3.9":
            self.skipTest(
                "'Reclaim' feature is not supported in OCP older than 3.9")

        self.create_storage_class(reclaim_policy='Retain')
        self.create_and_wait_for_pvc()

        # get the name of the volume
        pv_name = get_pv_name_from_pvc(self.node, self.pvc_name)
        custom = [r':.metadata.annotations.'
                  r'"gluster\.kubernetes\.io\/heketi\-volume\-id"',
                  r':.spec.persistentVolumeReclaimPolicy']

        vol_id, reclaim_policy = oc_get_custom_resource(
            self.node, 'pv', custom, pv_name)

        self.assertEqual(reclaim_policy, 'Retain')

        # Create DC with POD and attached PVC to it.
        try:
            dc_name = oc_create_app_dc_with_io(self.node, self.pvc_name)
            pod_name = get_pod_name_from_dc(self.node, dc_name)
            wait_for_pod_be_ready(self.node, pod_name)
        finally:
            scale_dc_pod_amount_and_wait(self.node, dc_name, 0)
            oc_delete(self.node, 'dc', dc_name)
            wait_for_resource_absence(self.node, 'pod', pod_name)

        oc_delete(self.node, 'pvc', self.pvc_name)

        with self.assertRaises(ExecutionError):
            wait_for_resource_absence(
                self.node, 'pvc', self.pvc_name, interval=3, timeout=30)

        heketi_volume_delete(self.heketi_client_node,
                             self.heketi_server_url, vol_id)

        vol_list = heketi_volume_list(self.heketi_client_node,
                                      self.heketi_server_url)

        self.assertNotIn(vol_id, vol_list)

        oc_delete(self.node, 'pv', pv_name)
        wait_for_resource_absence(self.node, 'pv', pv_name)
Exemple #8
0
    def _block_vol_expand_common_offline_vs_online(self, is_online_expand):
        node = self.ocp_master_node[0]
        h_node, h_server = self.heketi_client_node, self.heketi_server_url

        version = heketi_version.get_heketi_version(h_node)
        if version < '9.0.0-14':
            self.skipTest("heketi-client package {} does not support "
                          "blockvolume expand".format(version.v_str))

        pvc_name = self.create_and_wait_for_pvc()
        dc_name = self.create_dc_with_pvc(pvc_name)
        pv_name = get_pv_name_from_pvc(node, pvc_name)

        # get block volume id
        custom = r":.metadata.annotations.'gluster\.org\/volume-id'"
        bvol_id = oc_get_custom_resource(node, 'pv', custom, pv_name)
        self.assertNotEqual(bvol_id[0], "<none>",
                            "volume name not found from pv {}".format(pv_name))
        bvol_info = heketi_blockvolume_info(h_node,
                                            h_server,
                                            bvol_id[0],
                                            json=True)

        # verify required blockhostingvolume free size
        bhv_id = bvol_info["blockhostingvolume"]
        bhv_info = heketi_volume_info(h_node, h_server, bhv_id, json=True)
        if bhv_info["blockinfo"]["freesize"] < 1:
            self.skipTest("blockhostingvolume doesn't have required freespace")

        if not is_online_expand:
            scale_dc_pod_amount_and_wait(node, dc_name[0], pod_amount=0)

        # expand block volume and verify usable size
        bvol_info = heketi_blockvolume_expand(h_node,
                                              h_server,
                                              bvol_id[0],
                                              2,
                                              json=True)
        self.assertEqual(bvol_info["size"], 2,
                         "Block volume expand does not works")
        self.assertEqual(
            bvol_info["size"], bvol_info["usablesize"],
            "block volume size is not equal to the usablesize: {}".format(
                bvol_info))

        return pvc_name, dc_name, bvol_info
Exemple #9
0
    def test_heketi_metrics_heketipod_failure(self):
        """Validate heketi metrics after heketi pod failure"""
        scale_dc_pod_amount_and_wait(self.ocp_master_node[0],
                                     self.heketi_dc_name,
                                     pod_amount=0)
        self.addCleanup(scale_dc_pod_amount_and_wait,
                        self.ocp_master_node[0],
                        self.heketi_dc_name,
                        pod_amount=1)

        # verify that metrics is not accessable when heketi pod is down
        with self.assertRaises(AssertionError):
            get_heketi_metrics(self.heketi_client_node,
                               self.heketi_server_url,
                               prometheus_format=True)

        scale_dc_pod_amount_and_wait(self.ocp_master_node[0],
                                     self.heketi_dc_name,
                                     pod_amount=1)

        pod_name = get_pod_name_from_dc(self.ocp_master_node[0],
                                        self.heketi_dc_name,
                                        self.heketi_dc_name)
        wait_for_pod_be_ready(self.ocp_master_node[0], pod_name, wait_step=5)

        for i in range(3):
            vol = heketi_volume_create(self.heketi_client_node,
                                       self.heketi_server_url,
                                       1,
                                       json=True)

            self.assertTrue(vol)

            self.addCleanup(heketi_volume_delete,
                            self.heketi_client_node,
                            self.heketi_server_url,
                            vol['id'],
                            raise_on_error=False)

            vol_list = heketi_volume_list(self.heketi_client_node,
                                          self.heketi_server_url)

            self.assertIn(vol['id'], vol_list)

        self.verify_heketi_metrics_with_topology_info()
    def test_heketi_metrics_heketipod_failure(self):
        """Validate heketi metrics after heketi pod failure"""
        scale_dc_pod_amount_and_wait(
            self.ocp_master_node[0], self.heketi_dc_name, pod_amount=0)
        self.addCleanup(
            scale_dc_pod_amount_and_wait, self.ocp_master_node[0],
            self.heketi_dc_name, pod_amount=1)

        # verify that metrics is not accessable when heketi pod is down
        with self.assertRaises(exceptions.ExecutionError):
            get_heketi_metrics(
                self.heketi_client_node,
                self.heketi_server_url,
                prometheus_format=True)

        scale_dc_pod_amount_and_wait(
            self.ocp_master_node[0], self.heketi_dc_name, pod_amount=1)

        pod_name = get_pod_name_from_dc(
            self.ocp_master_node[0], self.heketi_dc_name, self.heketi_dc_name)
        wait_for_pod_be_ready(self.ocp_master_node[0], pod_name, wait_step=5)

        for i in range(3):
            vol = heketi_volume_create(
                self.heketi_client_node,
                self.heketi_server_url, 1, json=True)

            self.assertTrue(vol)

            self.addCleanup(
                heketi_volume_delete,
                self.heketi_client_node,
                self.heketi_server_url,
                vol['id'],
                raise_on_error=False)

            vol_list = heketi_volume_list(
                self.heketi_client_node,
                self.heketi_server_url)

            self.assertIn(vol['id'], vol_list)

        self.verify_heketi_metrics_with_topology_info()
    def test_dynamic_provisioning_glusterfile_heketidown_pvc_delete(self):
        """Validate deletion of PVC's when heketi is down"""

        # Create storage class, secret and PVCs
        self.create_storage_class()
        self.pvc_name_list = self.create_and_wait_for_pvcs(
            1, 'pvc-heketi-down', 3)

        # remove heketi-pod
        scale_dc_pod_amount_and_wait(self.ocp_client[0], self.heketi_dc_name,
                                     0, self.storage_project_name)
        try:
            # delete pvc
            for pvc in self.pvc_name_list:
                oc_delete(self.ocp_client[0], 'pvc', pvc)
            for pvc in self.pvc_name_list:
                with self.assertRaises(ExecutionError):
                    wait_for_resource_absence(self.ocp_client[0],
                                              'pvc',
                                              pvc,
                                              interval=3,
                                              timeout=30)
        finally:
            # bring back heketi-pod
            scale_dc_pod_amount_and_wait(self.ocp_client[0],
                                         self.heketi_dc_name, 1,
                                         self.storage_project_name)

        # verify PVC's are deleted
        for pvc in self.pvc_name_list:
            wait_for_resource_absence(self.ocp_client[0],
                                      'pvc',
                                      pvc,
                                      interval=1,
                                      timeout=120)

        # create a new PVC
        self.create_and_wait_for_pvc()
    def test_dynamic_provisioning_glusterblock_heketidown_pvc_delete(self):
        """Validate PVC deletion when heketi is down"""

        # Create Secret, SC and PVCs
        self.create_storage_class()
        self.pvc_name_list = self.create_and_wait_for_pvcs(
            1, 'pvc-heketi-down', 3)

        # remove heketi-pod
        scale_dc_pod_amount_and_wait(self.ocp_client[0],
                                     self.heketi_dc_name,
                                     0,
                                     self.storage_project_name)
        try:
            # delete pvc
            for pvc in self.pvc_name_list:
                oc_delete(self.ocp_client[0], 'pvc', pvc)
            for pvc in self.pvc_name_list:
                with self.assertRaises(ExecutionError):
                    wait_for_resource_absence(
                       self.ocp_client[0], 'pvc', pvc,
                       interval=3, timeout=30)
        finally:
            # bring back heketi-pod
            scale_dc_pod_amount_and_wait(self.ocp_client[0],
                                         self.heketi_dc_name,
                                         1,
                                         self.storage_project_name)

        # verify PVC's are deleted
        for pvc in self.pvc_name_list:
            wait_for_resource_absence(self.ocp_client[0], 'pvc',
                                      pvc,
                                      interval=1, timeout=120)

        # create a new PVC
        self.create_and_wait_for_pvc()
Exemple #13
0
    def test_pv_resize_when_heketi_down(self):
        """Create a PVC and try to expand it when heketi is down, It should
        fail. After heketi is up, expand PVC should work.
        """
        self.create_storage_class(allow_volume_expansion=True)
        pvc_name = self.create_and_wait_for_pvc()
        dc_name, pod_name = self.create_dc_with_pvc(pvc_name)

        pv_name = get_pv_name_from_pvc(self.node, pvc_name)
        custom = (r':metadata.annotations.'
                  r'"gluster\.kubernetes\.io\/heketi-volume-id"')
        vol_id = oc_get_custom_resource(self.node, 'pv', custom, pv_name)[0]

        h_vol_info = heketi_ops.heketi_volume_info(self.heketi_client_node,
                                                   self.heketi_server_url,
                                                   vol_id,
                                                   json=True)

        # Bring the heketi POD down
        scale_dc_pod_amount_and_wait(self.node,
                                     self.heketi_dc_name,
                                     pod_amount=0)
        self.addCleanup(scale_dc_pod_amount_and_wait,
                        self.node,
                        self.heketi_dc_name,
                        pod_amount=1)

        cmd = 'dd if=/dev/urandom of=/mnt/%s bs=614400k count=1'
        ret, out, err = oc_rsh(self.node, pod_name, cmd % 'file1')
        self.assertFalse(ret, 'Not able to write file with err: %s' % err)
        wait_for_pod_be_ready(self.node, pod_name, 10, 5)

        resize_pvc(self.node, pvc_name, 2)
        wait_for_events(self.node,
                        pvc_name,
                        obj_type='PersistentVolumeClaim',
                        event_type='Warning',
                        event_reason='VolumeResizeFailed')

        # Verify volume was not expanded
        vol_info = get_gluster_vol_info_by_pvc_name(self.node, pvc_name)
        self.assertEqual(vol_info['gluster_vol_id'], h_vol_info['name'])
        self.assertEqual(len(vol_info['bricks']['brick']),
                         len(h_vol_info['bricks']))

        # Bring the heketi POD up
        scale_dc_pod_amount_and_wait(self.node,
                                     self.heketi_dc_name,
                                     pod_amount=1)

        # Verify volume expansion
        verify_pvc_size(self.node, pvc_name, 2)
        vol_info = get_gluster_vol_info_by_pvc_name(self.node, pvc_name)
        self.assertFalse(len(vol_info['bricks']['brick']) % 3)
        self.assertLess(len(h_vol_info['bricks']),
                        len(vol_info['bricks']['brick']))

        # Wait for remount after expansion
        for w in waiter.Waiter(timeout=30, interval=5):
            ret, out, err = oc_rsh(self.node, pod_name,
                                   "df -Ph /mnt | awk '{print $2}' | tail -1")
            self.assertFalse(ret,
                             'Failed with err: %s and Output: %s' % (err, out))
            if out.strip() == '2.0G':
                break

        # Write data making sure we have more space than it was
        ret, out, err = oc_rsh(self.node, pod_name, cmd % 'file2')
        self.assertFalse(ret, 'Not able to write file with err: %s' % err)

        # Verify pod is running
        wait_for_pod_be_ready(self.node, pod_name, 10, 5)
    def _set_heketi_zones(self, unique_zones_amount=1):
        h = heketi_ops.cmd_run_on_heketi_pod
        heketi_db_dir = h("mount | grep heketidbstorage | awk '{print $3}'")

        # Copy the Heketi DB file to a backup file to be able to export it
        h("cp %s/heketi.db heketi_export.db" % heketi_db_dir)

        # Export the Heketi DB to a json file
        h("rm -f heketi_db_export.json")
        h("heketi db export --dbfile=heketi_export.db "
          "--jsonfile=heketi_db_export.json")
        h("rm heketi_export.db")

        # Read json file info
        heketi_db_original_str = h("cat heketi_db_export.json")
        heketi_db_data = json.loads(heketi_db_original_str)

        # Process the Heketi DB data
        node_count = len([
            n for n in heketi_db_data['nodeentries'].values()
            if (n['State'] == 'online'
                and n['Info']['cluster'] == self.cluster_id)
        ])
        if node_count < unique_zones_amount:
            self.skipTest(
                "Not enough amount of online nodes (%s) to set '%s' unique "
                "zones." % (node_count, unique_zones_amount))
        zones = itertools.cycle(range(unique_zones_amount))
        for n in heketi_db_data['nodeentries'].values():
            if (n['State'] == 'online'
                    and n['Info']['cluster'] == self.cluster_id):
                n['Info']['zone'] = next(zones) + 1

        # Schedule revert-back of the Heketi DB
        cmd_put_data_to_a_json_file = (
            "bash -ec \"\"\"echo '%s' > heketi_db_import.json\"\"\"")
        cmd_remove_heketi_import_db_file = "rm -f heketi_import.db"
        cmd_import_json_file = (
            "heketi db import --jsonfile=heketi_db_import.json "
            "--dbfile=heketi_import.db")
        cmd_apply_imported_data = ("mv -f heketi_import.db %s/heketi.db" %
                                   heketi_db_dir)
        cmd_remove_heketi_import_json_files = (
            "rm -f heketi_db_export.json heketi_db_import.json")

        for i in (1, 0):
            self.addCleanup(openshift_ops.scale_dc_pod_amount_and_wait,
                            self.node,
                            self.heketi_dc_name,
                            i,
                            wait_step=3)
        for cmd in (cmd_remove_heketi_import_json_files,
                    cmd_apply_imported_data, cmd_import_json_file,
                    cmd_remove_heketi_import_db_file,
                    cmd_put_data_to_a_json_file %
                    (heketi_db_original_str.replace('"', '\\"'))):
            self.addCleanup(h, cmd)

        # Import the Heketi DB data
        heketi_db_updated_str = json.dumps(heketi_db_data).replace('"', '\\"')
        for cmd in (cmd_put_data_to_a_json_file % heketi_db_updated_str,
                    cmd_remove_heketi_import_db_file, cmd_import_json_file,
                    cmd_apply_imported_data,
                    cmd_remove_heketi_import_json_files):
            h(cmd)

        # Apply changes by recreating the Heketi POD
        openshift_ops.scale_dc_pod_amount_and_wait(self.node,
                                                   self.heketi_dc_name,
                                                   0,
                                                   wait_step=3)
        openshift_ops.scale_dc_pod_amount_and_wait(self.node,
                                                   self.heketi_dc_name,
                                                   1,
                                                   wait_step=3)

        return heketi_db_data
    def test_pv_resize_when_heketi_down(self):
        """Create a PVC and try to expand it when heketi is down, It should
        fail. After heketi is up, expand PVC should work.
        """
        self.create_storage_class(allow_volume_expansion=True)
        pvc_name = self.create_and_wait_for_pvc()
        dc_name, pod_name = self.create_dc_with_pvc(pvc_name)

        pv_name = get_pv_name_from_pvc(self.node, pvc_name)
        custom = (r':metadata.annotations.'
                  r'"gluster\.kubernetes\.io\/heketi-volume-id"')
        vol_id = oc_get_custom_resource(self.node, 'pv', custom, pv_name)[0]

        h_vol_info = heketi_ops.heketi_volume_info(
            self.heketi_client_node, self.heketi_server_url, vol_id, json=True)

        # Bring the heketi POD down
        scale_dc_pod_amount_and_wait(
            self.node, self.heketi_dc_name, pod_amount=0)
        self.addCleanup(
            scale_dc_pod_amount_and_wait, self.node,
            self.heketi_dc_name, pod_amount=1)

        cmd = 'dd if=/dev/urandom of=/mnt/%s bs=614400k count=1'
        ret, out, err = oc_rsh(self.node, pod_name, cmd % 'file1')
        self.assertFalse(ret, 'Not able to write file with err: %s' % err)
        wait_for_pod_be_ready(self.node, pod_name, 10, 5)

        resize_pvc(self.node, pvc_name, 2)
        wait_for_events(
            self.node, pvc_name, obj_type='PersistentVolumeClaim',
            event_type='Warning', event_reason='VolumeResizeFailed')

        # Verify volume was not expanded
        vol_info = get_gluster_vol_info_by_pvc_name(self.node, pvc_name)
        self.assertEqual(vol_info['gluster_vol_id'], h_vol_info['name'])
        self.assertEqual(
            len(vol_info['bricks']['brick']), len(h_vol_info['bricks']))

        # Bring the heketi POD up
        scale_dc_pod_amount_and_wait(
            self.node, self.heketi_dc_name, pod_amount=1)

        # Verify volume expansion
        verify_pvc_size(self.node, pvc_name, 2)
        vol_info = get_gluster_vol_info_by_pvc_name(self.node, pvc_name)
        self.assertFalse(len(vol_info['bricks']['brick']) % 3)
        self.assertLess(
            len(h_vol_info['bricks']), len(vol_info['bricks']['brick']))

        # Wait for remount after expansion
        for w in waiter.Waiter(timeout=30, interval=5):
            ret, out, err = oc_rsh(
                self.node, pod_name,
                "df -Ph /mnt | awk '{print $2}' | tail -1")
            self.assertFalse(ret, 'Failed with err: %s and Output: %s' % (
                err, out))
            if out.strip() == '2.0G':
                break

        # Write data making sure we have more space than it was
        ret, out, err = oc_rsh(self.node, pod_name, cmd % 'file2')
        self.assertFalse(ret, 'Not able to write file with err: %s' % err)

        # Verify pod is running
        wait_for_pod_be_ready(self.node, pod_name, 10, 5)
    def test_set_heketi_vol_size_and_brick_amount_limits(self):
        # Get Heketi secret name
        cmd_get_heketi_secret_name = (
            "oc get dc -n %s %s -o jsonpath='{.spec.template.spec.volumes"
            "[?(@.name==\"config\")].secret.secretName}'" %
            (self.storage_project_name, self.heketi_dc_name))
        heketi_secret_name = self.cmd_run(cmd_get_heketi_secret_name)

        # Read Heketi secret data
        self.node = self.ocp_master_node[0]
        heketi_secret_data_str_base64 = oc_get_custom_resource(
            self.node,
            "secret",
            ":.data.'heketi\.json'",  # noqa
            name=heketi_secret_name)[0]
        heketi_secret_data_str = self.cmd_run("echo %s | base64 -d" %
                                              heketi_secret_data_str_base64)
        heketi_secret_data = json.loads(heketi_secret_data_str)

        # Update Heketi secret data
        brick_min_size_gb, brick_max_size_gb = 2, 4
        heketi_secret_data["glusterfs"].update({
            "brick_min_size_gb": brick_min_size_gb,
            "brick_max_size_gb": brick_max_size_gb,
            "max_bricks_per_volume": 3,
        })
        heketi_secret_data_patched = json.dumps(heketi_secret_data)
        heketi_secret_data_str_encoded = self.cmd_run(
            "echo '%s' |base64" % heketi_secret_data_patched).replace(
                '\n', '')
        h_client, h_server = self.heketi_client_node, self.heketi_server_url
        try:
            # Patch Heketi secret
            cmd_patch_heketi_secret = (
                'oc patch secret -n %s %s -p '
                '"{\\"data\\": {\\"heketi.json\\": \\"%s\\"}}"') % (
                    self.storage_project_name, heketi_secret_name, "%s")
            self.cmd_run(cmd_patch_heketi_secret %
                         heketi_secret_data_str_encoded)

            # Recreate the Heketi pod to make it reuse updated configuration
            scale_dc_pod_amount_and_wait(self.node, self.heketi_dc_name, 0)
            scale_dc_pod_amount_and_wait(self.node, self.heketi_dc_name, 1)

            # Try to create too small and too big volumes
            # It must fail because allowed range is not satisfied
            for gb in (brick_min_size_gb - 1, brick_max_size_gb + 1):
                try:
                    vol_1 = heketi_volume_create(h_client,
                                                 h_server,
                                                 size=gb,
                                                 json=True)
                except AssertionError:
                    pass
                else:
                    self.addCleanup(heketi_volume_delete, h_client, h_server,
                                    vol_1['id'])
                    self.assertFalse(
                        vol_1,
                        "Volume '%s' got unexpectedly created. Heketi server "
                        "configuration haven't made required effect." %
                        (vol_1.get('id', 'failed_to_get_heketi_vol_id')))

            # Create the smallest allowed volume
            vol_2 = heketi_volume_create(h_client,
                                         h_server,
                                         size=brick_min_size_gb,
                                         json=True)
            self.addCleanup(heketi_volume_delete, h_client, h_server,
                            vol_2['id'])

            # Try to expand volume, it must fail due to the brick amount limit
            self.assertRaises(AssertionError, heketi_volume_expand, h_client,
                              h_server, vol_2['id'], 2)

            # Create the largest allowed volume
            vol_3 = heketi_volume_create(h_client,
                                         h_server,
                                         size=brick_max_size_gb,
                                         json=True)
            heketi_volume_delete(h_client, h_server, vol_3['id'])
        finally:
            # Revert the Heketi configuration back
            self.cmd_run(cmd_patch_heketi_secret %
                         heketi_secret_data_str_base64)
            scale_dc_pod_amount_and_wait(self.node, self.heketi_dc_name, 0)
            scale_dc_pod_amount_and_wait(self.node, self.heketi_dc_name, 1)

        # Create volume less than the old minimum limit
        vol_4 = heketi_volume_create(h_client,
                                     h_server,
                                     size=(brick_min_size_gb - 1),
                                     json=True)
        self.addCleanup(heketi_volume_delete, h_client, h_server, vol_4['id'])

        # Create volume bigger than the old maximum limit and expand it
        vol_5 = heketi_volume_create(h_client,
                                     h_server,
                                     size=(brick_max_size_gb + 1),
                                     json=True)
        self.addCleanup(heketi_volume_delete, h_client, h_server, vol_5['id'])
        heketi_volume_expand(h_client, h_server, vol_5['id'], 2)