def test_node_failure_pv_mounted(self):
        """Test node failure when PV is mounted with app pods running"""
        filepath = "/mnt/file_for_testing_volume.log"
        pvc_name = self.create_and_wait_for_pvc()

        dc_and_pod_names = self.create_dcs_with_pvc(pvc_name)
        dc_name, pod_name = dc_and_pod_names[pvc_name]

        mount_point = "df -kh /mnt -P | tail -1 | awk '{{print $1}}'"
        pod_cmd = "oc exec {} -- {}".format(pod_name, mount_point)
        hostname = command.cmd_run(pod_cmd, hostname=self.node)
        hostname = hostname.split(":")[0]

        vm_name = find_vm_name_by_ip_or_hostname(hostname)
        self.addCleanup(power_on_vm_by_name, vm_name)
        power_off_vm_by_name(vm_name)

        cmd = "dd if=/dev/urandom of={} bs=1K count=100".format(filepath)
        ret, _, err = oc_rsh(self.node, pod_name, cmd)
        self.assertFalse(
            ret, "Failed to execute command {} on {} with error {}".format(
                cmd, self.node, err))

        oc_delete(self.node, 'pod', pod_name)
        wait_for_resource_absence(self.node, 'pod', pod_name)
        pod_name = get_pod_name_from_dc(self.node, dc_name)
        wait_for_pod_be_ready(self.node, pod_name)

        ret, _, err = oc_rsh(self.node, pod_name, cmd)
        self.assertFalse(
            ret, "Failed to execute command {} on {} with error {}".format(
                cmd, self.node, err))
    def test_pv_resize_try_shrink_pv_size(self):
        """Validate whether reducing the PV size is allowed"""
        dir_path = "/mnt/"
        node = self.ocp_master_node[0]

        # Create PVC
        pv_size = 5
        self.create_storage_class(allow_volume_expansion=True)
        pvc_name = self.create_and_wait_for_pvc(pvc_size=pv_size)

        # Create DC with POD and attached PVC to it.
        dc_name = oc_create_app_dc_with_io(node, pvc_name)
        self.addCleanup(oc_delete, node, 'dc', dc_name)
        self.addCleanup(scale_dc_pod_amount_and_wait,
                        node, dc_name, 0)

        pod_name = get_pod_name_from_dc(node, dc_name)
        wait_for_pod_be_ready(node, pod_name)

        cmd = ("dd if=/dev/urandom of=%sfile "
               "bs=100K count=3000") % dir_path
        ret, out, err = oc_rsh(node, pod_name, cmd)
        self.assertEqual(ret, 0, "failed to execute command %s on %s" % (
                             cmd, node))
        pvc_resize = 2
        with self.assertRaises(ExecutionError):
            resize_pvc(node, pvc_name, pvc_resize)
        verify_pvc_size(node, pvc_name, pv_size)
        pv_name = get_pv_name_from_pvc(node, pvc_name)
        verify_pv_size(node, pv_name, pv_size)
        cmd = ("dd if=/dev/urandom of=%sfile_new "
               "bs=100K count=2000") % dir_path
        ret, out, err = oc_rsh(node, pod_name, cmd)
        self.assertEqual(ret, 0, "failed to execute command %s on %s" % (
                             cmd, node))
예제 #3
0
    def test_pv_resize_try_shrink_pv_size(self):
        """Validate whether reducing the PV size is allowed"""
        dir_path = "/mnt/"
        node = self.ocp_master_node[0]

        # Create PVC
        pv_size = 5
        self.create_storage_class(allow_volume_expansion=True)
        pvc_name = self.create_and_wait_for_pvc(pvc_size=pv_size)

        # Create DC with POD and attached PVC to it.
        dc_name = oc_create_app_dc_with_io(node, pvc_name)
        self.addCleanup(oc_delete, node, 'dc', dc_name)
        self.addCleanup(scale_dc_pod_amount_and_wait, node, dc_name, 0)

        pod_name = get_pod_name_from_dc(node, dc_name)
        wait_for_pod_be_ready(node, pod_name)

        cmd = ("dd if=/dev/urandom of=%sfile " "bs=100K count=3000") % dir_path
        ret, out, err = oc_rsh(node, pod_name, cmd)
        self.assertEqual(ret, 0,
                         "failed to execute command %s on %s" % (cmd, node))
        pvc_resize = 2
        with self.assertRaises(ExecutionError):
            resize_pvc(node, pvc_name, pvc_resize)
        verify_pvc_size(node, pvc_name, pv_size)
        pv_name = get_pv_name_from_pvc(node, pvc_name)
        verify_pv_size(node, pv_name, pv_size)
        cmd = ("dd if=/dev/urandom of=%sfile_new "
               "bs=100K count=2000") % dir_path
        ret, out, err = oc_rsh(node, pod_name, cmd)
        self.assertEqual(ret, 0,
                         "failed to execute command %s on %s" % (cmd, node))
예제 #4
0
    def test_logging_es_pod_pvc_all_freespace_utilization(self):
        """Validate logging by utilizing all the free space of block PVC bound
           to elsaticsearch pod"""

        # Fetch pod and validate iscsi and multipath
        es_pod, _ = self._get_es_pod_and_verify_iscsi_sessions()

        # Get the available free space
        mount_point = '/elasticsearch/persistent'
        cmd_free_space = (
            "df -kh {} | awk '{{print $4}}' | tail -1".format(mount_point))
        old_available_space = openshift_ops.oc_rsh(self._master, es_pod,
                                                   cmd_free_space)[1]

        # Fill the all the available space
        file_name = '{}/file'.format(mount_point)
        cmd_fill_space = ("fallocate -l {} {}".format(old_available_space,
                                                      file_name))
        with self.assertRaises(AssertionError):
            openshift_ops.oc_rsh(self._master, es_pod, cmd_fill_space)

            # Cleanup the filled space
            cmd_remove_file = 'rm {}'.format(file_name)
            self.addCleanup(openshift_ops.oc_rsh, self._master, es_pod,
                            cmd_remove_file)
예제 #5
0
    def test_pv_resize_with_prefix_for_name_and_size(
            self, create_vol_name_prefix=False, valid_size=True):
        """Validate PV resize with and without name prefix"""
        dir_path = "/mnt/"
        node = self.ocp_client[0]

        # Create PVC
        self.create_storage_class(
            allow_volume_expansion=True,
            create_vol_name_prefix=create_vol_name_prefix)
        pvc_name = self.create_and_wait_for_pvc()

        # Create DC with POD and attached PVC to it.
        dc_name = oc_create_app_dc_with_io(node, pvc_name)
        self.addCleanup(oc_delete, node, 'dc', dc_name)
        self.addCleanup(scale_dc_pod_amount_and_wait, node, dc_name, 0)

        pod_name = get_pod_name_from_dc(node, dc_name)
        wait_for_pod_be_ready(node, pod_name)
        if create_vol_name_prefix:
            ret = heketi_ops.verify_volume_name_prefix(
                node, self.sc['volumenameprefix'], self.sc['secretnamespace'],
                pvc_name, self.heketi_server_url)
            self.assertTrue(ret, "verify volnameprefix failed")
        cmd = ("dd if=/dev/urandom of=%sfile " "bs=100K count=1000") % dir_path
        ret, out, err = oc_rsh(node, pod_name, cmd)
        self.assertEqual(ret, 0,
                         "Failed to execute command %s on %s" % (cmd, node))
        pv_name = get_pv_name_from_pvc(node, pvc_name)

        # If resize size is invalid then size should not change
        if valid_size:
            cmd = ("dd if=/dev/urandom of=%sfile2 "
                   "bs=100K count=10000") % dir_path
            with self.assertRaises(AssertionError):
                ret, out, err = oc_rsh(node, pod_name, cmd)
                msg = ("Command '%s' was expected to fail on '%s' node. "
                       "But it returned following: ret is '%s', err is '%s' "
                       "and out is '%s'" % (cmd, node, ret, err, out))
                raise ExecutionError(msg)
            pvc_size = 2
            resize_pvc(node, pvc_name, pvc_size)
            verify_pvc_size(node, pvc_name, pvc_size)
            verify_pv_size(node, pv_name, pvc_size)
        else:
            invalid_pvc_size = 'ten'
            with self.assertRaises(AssertionError):
                resize_pvc(node, pvc_name, invalid_pvc_size)
            verify_pvc_size(node, pvc_name, 1)
            verify_pv_size(node, pv_name, 1)

        oc_delete(node, 'pod', pod_name)
        wait_for_resource_absence(node, 'pod', pod_name)
        pod_name = get_pod_name_from_dc(node, dc_name)
        wait_for_pod_be_ready(node, pod_name)
        cmd = ("dd if=/dev/urandom of=%sfile_new "
               "bs=50K count=10000") % dir_path
        ret, out, err = oc_rsh(node, pod_name, cmd)
        self.assertEqual(ret, 0,
                         "Failed to execute command %s on %s" % (cmd, node))
예제 #6
0
    def test_metrics_cassandra_pod_pvc_all_freespace_utilization(self):
        """Validate metrics by utilizing all the free space of block PVC bound
           to cassandra pod"""

        # Validate iscsi and multipath
        hawkular_cassandra, _, _, _, _ = (
            self.verify_cassandra_pod_multipath_and_iscsi())

        # Get the available free space
        mount_point = '/cassandra_data'
        cmd_free_space = (
            "df -kh {} | awk '{{print $4}}' | tail -1".format(mount_point))
        old_available_space = oc_rsh(self.master, hawkular_cassandra,
                                     cmd_free_space)[1]

        # Fill the all the available space
        file_name = '{}/file'.format(mount_point)
        cmd_fill_space = ("fallocate -l {} {}".format(old_available_space,
                                                      file_name))
        with self.assertRaises(AssertionError):
            oc_rsh(self.master, hawkular_cassandra, cmd_fill_space)

        # Cleanup the filled space
        cmd_remove_file = 'rm {}'.format(file_name)
        self.addCleanup(oc_rsh, self.master, hawkular_cassandra,
                        cmd_remove_file)
    def _pv_resize(self, exceed_free_space):
        dir_path = "/mnt"
        pvc_size_gb = 1

        available_size_gb = self._available_disk_free_space()

        # Create PVC
        self.create_storage_class(allow_volume_expansion=True)
        pvc_name = self.create_and_wait_for_pvc(pvc_size=pvc_size_gb)

        # Create DC with POD and attached PVC to it
        dc_name = oc_create_app_dc_with_io(
            self.node, pvc_name, image=self.io_container_image_cirros)
        self.addCleanup(oc_delete, self.node, 'dc', dc_name)
        self.addCleanup(scale_dc_pod_amount_and_wait, self.node, dc_name, 0)
        pod_name = get_pod_name_from_dc(self.node, dc_name)
        wait_for_pod_be_ready(self.node, pod_name)

        if exceed_free_space:
            exceed_size = available_size_gb + 10

            # Try to expand existing PVC exceeding free space
            resize_pvc(self.node, pvc_name, exceed_size)
            wait_for_events(self.node,
                            obj_name=pvc_name,
                            event_reason='VolumeResizeFailed')

            # Check that app POD is up and runnig then try to write data
            wait_for_pod_be_ready(self.node, pod_name)
            cmd = ("dd if=/dev/urandom of=%s/autotest bs=100K count=1" %
                   dir_path)
            ret, out, err = oc_rsh(self.node, pod_name, cmd)
            self.assertEqual(
                ret, 0,
                "Failed to write data after failed attempt to expand PVC.")
        else:
            # Expand existing PVC using all the available free space
            expand_size_gb = available_size_gb - pvc_size_gb
            resize_pvc(self.node, pvc_name, expand_size_gb)
            verify_pvc_size(self.node, pvc_name, expand_size_gb)
            pv_name = get_pv_name_from_pvc(self.node, pvc_name)
            verify_pv_size(self.node, pv_name, expand_size_gb)
            wait_for_events(self.node,
                            obj_name=pvc_name,
                            event_reason='VolumeResizeSuccessful')

            # Recreate app POD
            oc_delete(self.node, 'pod', pod_name)
            wait_for_resource_absence(self.node, 'pod', pod_name)
            pod_name = get_pod_name_from_dc(self.node, dc_name)
            wait_for_pod_be_ready(self.node, pod_name)

            # Write data on the expanded PVC
            cmd = ("dd if=/dev/urandom of=%s/autotest "
                   "bs=1M count=1025" % dir_path)
            ret, out, err = oc_rsh(self.node, pod_name, cmd)
            self.assertEqual(ret, 0,
                             "Failed to write data on the expanded PVC")
예제 #8
0
    def test_kill_bhv_fsd_while_es_pod_running(self):
        """Validate killing of bhv fsd won't effect es pod io's"""

        # Fetch pod and PVC names and validate iscsi and multipath
        es_pod, pvc_name = self._get_es_pod_and_verify_iscsi_sessions()

        # Get the bhv name
        gluster_node = list(self._registry_servers_info.keys())[0]
        openshift_ops.switch_oc_project(self._master,
                                        self._registry_project_name)
        bhv_name = self.get_block_hosting_volume_by_pvc_name(
            pvc_name,
            heketi_server_url=self._registry_heketi_server_url,
            gluster_node=gluster_node)

        # Get one of the bricks pid of the bhv
        gluster_volume_status = gluster_ops.get_gluster_vol_status(bhv_name)
        pid = None
        for g_node, g_node_data in gluster_volume_status.items():
            if g_node != gluster_node:
                continue
            for process_name, process_data in g_node_data.items():
                if not process_name.startswith("/var"):
                    continue
                pid = process_data["pid"]
                # When birck is down, pid of the brick is returned as -1.
                # Which is unexepeted situation. So, add appropriate assertion.
                self.assertNotEqual(
                    pid, "-1", "Got unexpected PID (-1) for '{}' gluster vol "
                    "on '{}' node.".format(bhv_name, gluster_node))
                break
            self.assertTrue(
                pid, "Could not find 'pid' in Gluster vol data for '{}' "
                "Gluster node. Data: {}".format(gluster_node,
                                                gluster_volume_status))
            break

        # Kill gluster vol brick process using found pid
        cmd_kill = "kill -9 {}".format(pid)
        cmd_start_vol = "gluster v start {} force".format(bhv_name)
        openshift_ops.cmd_run_on_gluster_pod_or_node(self._master, cmd_kill,
                                                     gluster_node)
        self.addCleanup(openshift_ops.cmd_run_on_gluster_pod_or_node,
                        self._master, cmd_start_vol, gluster_node)
        self.addCleanup(openshift_ops.switch_oc_project, self._master,
                        self._registry_project_name)

        # Run I/O on ES pod
        openshift_ops.switch_oc_project(self._master,
                                        self._logging_project_name)
        file_name = '/elasticsearch/persistent/file1'
        cmd_run_io = 'dd if=/dev/urandom of={} bs=4k count=10000'.format(
            file_name)
        cmd_remove_file = 'rm {}'.format(file_name)
        openshift_ops.oc_rsh(self._master, es_pod, cmd_run_io)
        self.addCleanup(openshift_ops.oc_rsh, self._master, es_pod,
                        cmd_remove_file)
    def test_pv_resize_with_prefix_for_name(self,
                                            create_vol_name_prefix=False):
        """Validate PV resize with and without name prefix"""
        dir_path = "/mnt/"
        node = self.ocp_client[0]

        # Create PVC
        self.create_storage_class(
            allow_volume_expansion=True,
            create_vol_name_prefix=create_vol_name_prefix)
        pvc_name = self.create_and_wait_for_pvc()

        # Create DC with POD and attached PVC to it.
        dc_name = oc_create_app_dc_with_io(node, pvc_name)
        self.addCleanup(oc_delete, node, 'dc', dc_name)
        self.addCleanup(scale_dc_pod_amount_and_wait,
                        node, dc_name, 0)

        pod_name = get_pod_name_from_dc(node, dc_name)
        wait_for_pod_be_ready(node, pod_name)
        if create_vol_name_prefix:
            ret = heketi_ops.verify_volume_name_prefix(
                node, self.sc['volumenameprefix'],
                self.sc['secretnamespace'],
                pvc_name, self.heketi_server_url)
            self.assertTrue(ret, "verify volnameprefix failed")
        cmd = ("dd if=/dev/urandom of=%sfile "
               "bs=100K count=1000") % dir_path
        ret, out, err = oc_rsh(node, pod_name, cmd)
        self.assertEqual(ret, 0, "failed to execute command %s on %s" % (
                             cmd, node))
        cmd = ("dd if=/dev/urandom of=%sfile2 "
               "bs=100K count=10000") % dir_path
        with self.assertRaises(AssertionError):
            ret, out, err = oc_rsh(node, pod_name, cmd)
            msg = ("Command '%s' was expected to fail on '%s' node. "
                   "But it returned following: ret is '%s', err is '%s' "
                   "and out is '%s'" % (cmd, node, ret, err, out))
            raise ExecutionError(msg)

        pvc_size = 2
        resize_pvc(node, pvc_name, pvc_size)
        verify_pvc_size(node, pvc_name, pvc_size)
        pv_name = get_pv_name_from_pvc(node, pvc_name)
        verify_pv_size(node, pv_name, pvc_size)
        oc_delete(node, 'pod', pod_name)
        wait_for_resource_absence(node, 'pod', pod_name)
        pod_name = get_pod_name_from_dc(node, dc_name)
        wait_for_pod_be_ready(node, pod_name)
        cmd = ("dd if=/dev/urandom of=%sfile_new "
               "bs=50K count=10000") % dir_path
        ret, out, err = oc_rsh(node, pod_name, cmd)
        self.assertEqual(ret, 0, "failed to execute command %s on %s" % (
                             cmd, node))
예제 #10
0
    def test_dynamic_provisioning_block_vol_with_custom_prefix(self):
        """Verify creation of block volume with custom prefix
        """
        node = self.ocp_master_node[0]
        prefix = "autotest-{}".format(utils.get_random_str())

        # cmd to get available space
        cmd_get_free_space = "df -h | grep '/mnt'| awk '{{print $4}}'"

        # cmd to create a 100M file
        cmd_run_io = 'dd if=/dev/zero of=/mnt/testfile bs=1024 count=102400'

        # Create sc with prefix
        sc_name = self.create_storage_class(sc_name_prefix=prefix,
                                            create_vol_name_prefix=True,
                                            vol_name_prefix=prefix)

        # Create pvc and wait for it to be in bound state
        pvc_name = self.create_and_wait_for_pvc(sc_name=sc_name, pvc_size=1)

        # Verify blockvolume list with prefix
        h_block_vol = heketi_blockvolume_list_by_name_prefix(
            self.heketi_client_node, self.heketi_server_url, prefix)
        self.assertIsNotNone(
            h_block_vol,
            "Failed to find blockvolume with prefix {}".format(prefix))
        self.assertTrue(
            h_block_vol[0][2].startswith(prefix),
            "Failed to create blockvolume with the prefix {}".format(prefix))

        # Create app pod
        dc_name, pod_name = self.create_dc_with_pvc(pvc_name)

        err_msg = ("Failed to get the free space for the mount point of the "
                   "app pod {} with error {}")
        # Get free space of app pod before IO run
        _, free_space_before, err = oc_rsh(node, pod_name, cmd_get_free_space)
        self.assertTrue(free_space_before, err_msg.format(pod_name, err))

        # Running IO on the app pod
        ret, _, err = oc_rsh(node, pod_name, cmd_run_io)
        self.assertFalse(
            ret, "Failed to run the Io with the error msg {}".format(err))

        # Get free space of app pod after IO run
        _, free_space_after, err = oc_rsh(node, pod_name, cmd_get_free_space)
        self.assertTrue(free_space_after, err_msg.format(pod_name, err))
        self.assertGreaterEqual(
            free_space_before, free_space_after,
            "Expecting free space in app pod before {} should be greater than"
            " {} as 100M file is created".format(free_space_before,
                                                 free_space_after))
예제 #11
0
    def _check_fstab_and_df_entries(self, first_cmd, second_cmd):
        # matches output of "df --out=target" and entries in fstab
        # and vice-versa as per commands given in first_cmd and
        # second_cmd
        err_msg = "failed to execute command: %s with error: %s"

        ret, out, err = oc_rsh(self.oc_node, self.gluster_pod_name, first_cmd)
        self.assertEqual(ret, 0, err_msg % (first_cmd, err))

        for mnt_path in (out.strip()).split("\n"):
            ret, out, err = oc_rsh(self.oc_node, self.gluster_pod_name,
                                   second_cmd % mnt_path)
            self.assertEqual(ret, 0, err_msg % (second_cmd, err))
    def _check_fstab_and_df_entries(self, first_cmd, second_cmd):
        # matches output of "df --out=target" and entries in fstab
        # and vice-versa as per commands given in first_cmd and
        # second_cmd
        err_msg = "failed to execute command: %s with error: %s"

        ret, out, err = oc_rsh(self.oc_node, self.gluster_pod_name, first_cmd)
        self.assertEqual(ret, 0, err_msg % (first_cmd, err))

        for mnt_path in (out.strip()).split("\n"):
            ret, out, err = oc_rsh(
                self.oc_node, self.gluster_pod_name, second_cmd % mnt_path
            )
            self.assertEqual(ret, 0, err_msg % (second_cmd, err))
    def test_storage_class_mandatory_params_glusterfile(self):
        """Validate storage-class creation with mandatory parameters"""

        # create secret
        self.secret_name = oc_create_secret(
            self.node,
            namespace=self.sc.get('secretnamespace', 'default'),
            data_key=self.heketi_cli_key,
            secret_type=self.sc.get('provisioner', 'kubernetes.io/glusterfs'))
        self.addCleanup(oc_delete, self.node, 'secret', self.secret_name)

        # create storage class with mandatory parameters only
        sc_name = oc_create_sc(self.node,
                               provisioner='kubernetes.io/glusterfs',
                               resturl=self.sc['resturl'],
                               restuser=self.sc['restuser'],
                               secretnamespace=self.sc['secretnamespace'],
                               secretname=self.secret_name)
        self.addCleanup(oc_delete, self.node, 'sc', sc_name)

        # Create PVC
        pvc_name = self.create_and_wait_for_pvc(sc_name=sc_name)

        # Create DC with POD and attached PVC to it.
        dc_name = oc_create_app_dc_with_io(
            self.node, pvc_name, image=self.io_container_image_cirros)
        self.addCleanup(oc_delete, self.node, 'dc', dc_name)
        self.addCleanup(scale_dc_pod_amount_and_wait, self.node, dc_name, 0)

        pod_name = get_pod_name_from_dc(self.node, dc_name)
        wait_for_pod_be_ready(self.node, pod_name)

        # Make sure we are able to work with files on the mounted volume
        filepath = "/mnt/file_for_testing_sc.log"
        cmd = "dd if=/dev/urandom of=%s bs=1K count=100" % filepath
        ret, out, err = oc_rsh(self.node, pod_name, cmd)
        self.assertEqual(
            ret, 0, "Failed to execute command %s on %s" % (cmd, self.node))

        cmd = "ls -lrt %s" % filepath
        ret, out, err = oc_rsh(self.node, pod_name, cmd)
        self.assertEqual(
            ret, 0, "Failed to execute command %s on %s" % (cmd, self.node))

        cmd = "rm -rf %s" % filepath
        ret, out, err = oc_rsh(self.node, pod_name, cmd)
        self.assertEqual(
            ret, 0, "Failed to execute command %s on %s" % (cmd, self.node))
예제 #14
0
    def test_pv_resize_with_prefix_for_name(self,
                                            create_vol_name_prefix=False):
        """Validate PV resize with and without name prefix"""
        dir_path = "/mnt/"
        node = self.ocp_client[0]

        # Create PVC
        self.create_storage_class(
            allow_volume_expansion=True,
            create_vol_name_prefix=create_vol_name_prefix)
        pvc_name = self.create_and_wait_for_pvc()

        # Create DC with POD and attached PVC to it.
        dc_name = oc_create_app_dc_with_io(node, pvc_name)
        self.addCleanup(oc_delete, node, 'dc', dc_name)
        self.addCleanup(scale_dc_pod_amount_and_wait, node, dc_name, 0)

        pod_name = get_pod_name_from_dc(node, dc_name)
        wait_for_pod_be_ready(node, pod_name)
        if create_vol_name_prefix:
            ret = heketi_ops.verify_volume_name_prefix(
                node, self.sc['volumenameprefix'], self.sc['secretnamespace'],
                pvc_name, self.heketi_server_url)
            self.assertTrue(ret, "verify volnameprefix failed")
        cmd = ("dd if=/dev/urandom of=%sfile " "bs=100K count=1000") % dir_path
        ret, out, err = oc_rsh(node, pod_name, cmd)
        self.assertEqual(ret, 0,
                         "failed to execute command %s on %s" % (cmd, node))
        cmd = ("dd if=/dev/urandom of=%sfile2 "
               "bs=100K count=10000") % dir_path
        ret, out, err = oc_rsh(node, pod_name, cmd)
        self.assertNotEqual(
            ret, 0, " This IO did not fail as expected "
            "command %s on %s" % (cmd, node))
        pvc_size = 2
        resize_pvc(node, pvc_name, pvc_size)
        verify_pvc_size(node, pvc_name, pvc_size)
        pv_name = get_pv_name_from_pvc(node, pvc_name)
        verify_pv_size(node, pv_name, pvc_size)
        oc_delete(node, 'pod', pod_name)
        wait_for_resource_absence(node, 'pod', pod_name)
        pod_name = get_pod_name_from_dc(node, dc_name)
        wait_for_pod_be_ready(node, pod_name)
        cmd = ("dd if=/dev/urandom of=%sfile_new "
               "bs=50K count=10000") % dir_path
        ret, out, err = oc_rsh(node, pod_name, cmd)
        self.assertEqual(ret, 0,
                         "failed to execute command %s on %s" % (cmd, node))
    def test_storage_class_mandatory_params_glusterfile(self):
        """Validate storage-class creation with mandatory parameters"""

        # create secret
        self.secret_name = oc_create_secret(
            self.node,
            namespace=self.sc.get('secretnamespace', 'default'),
            data_key=self.heketi_cli_key,
            secret_type=self.sc.get('provisioner', 'kubernetes.io/glusterfs'))
        self.addCleanup(
            oc_delete, self.node, 'secret', self.secret_name)

        # create storage class with mandatory parameters only
        sc_name = oc_create_sc(
            self.node, provisioner='kubernetes.io/glusterfs',
            resturl=self.sc['resturl'], restuser=self.sc['restuser'],
            secretnamespace=self.sc['secretnamespace'],
            secretname=self.secret_name
        )
        self.addCleanup(oc_delete, self.node, 'sc', sc_name)

        # Create PVC
        pvc_name = self.create_and_wait_for_pvc(sc_name=sc_name)

        # Create DC with POD and attached PVC to it.
        dc_name = oc_create_app_dc_with_io(self.node, pvc_name)
        self.addCleanup(oc_delete, self.node, 'dc', dc_name)
        self.addCleanup(scale_dc_pod_amount_and_wait, self.node, dc_name, 0)

        pod_name = get_pod_name_from_dc(self.node, dc_name)
        wait_for_pod_be_ready(self.node, pod_name)

        # Make sure we are able to work with files on the mounted volume
        filepath = "/mnt/file_for_testing_sc.log"
        cmd = "dd if=/dev/urandom of=%s bs=1K count=100" % filepath
        ret, out, err = oc_rsh(self.node, pod_name, cmd)
        self.assertEqual(
            ret, 0, "Failed to execute command %s on %s" % (cmd, self.node))

        cmd = "ls -lrt %s" % filepath
        ret, out, err = oc_rsh(self.node, pod_name, cmd)
        self.assertEqual(
            ret, 0, "Failed to execute command %s on %s" % (cmd, self.node))

        cmd = "rm -rf %s" % filepath
        ret, out, err = oc_rsh(self.node, pod_name, cmd)
        self.assertEqual(
            ret, 0, "Failed to execute command %s on %s" % (cmd, self.node))
예제 #16
0
    def test_lvm_script_and_wrapper_environments(self):
        """Validate lvm script present on glusterfs pods
           lvm wrapper environment is present on heketi pod"""

        # Check script /usr/sbin/exec-on-host is present in pod
        if self.is_containerized_gluster():
            cmd = "ls -lrt {}".format(ENV_VALUE)
            ret, out, err = openshift_ops.oc_rsh(
                self.oc_node, self.pod_name[0]['pod_name'], cmd)
            self.assertFalse(
                ret, "failed to execute command {} on pod {} with error:"
                " {}".format(cmd, self.pod_name[0]['pod_name'], err))
            self.assertIn(ENV_VALUE, out)

        # Get a value associated with HEKETI_LVM_WRAPPER
        custom = (r'":spec.containers[*].env[?(@.name==\"{}\")]'
                  r'.value"'.format(ENV_NAME))
        env_var_value = openshift_ops.oc_get_custom_resource(
            self.oc_node, "pod", custom, self.h_pod_name)

        # Check value /usr/sbin/exec-on-host is present in converged mode
        # and absent in independent mode deployment
        err_msg = "Heketi LVM environment {} match failed".format(ENV_VALUE)
        if self.is_containerized_gluster():
            self.assertEqual(env_var_value[0], ENV_VALUE, err_msg)
        else:
            self.assertIsNotNone(env_var_value[0], err_msg)
    def dynamic_provisioning_glusterfile(self, create_vol_name_prefix):
        # Create secret and storage class
        self.create_storage_class(
            create_vol_name_prefix=create_vol_name_prefix)

        # Create PVC
        pvc_name = self.create_and_wait_for_pvc()

        # Create DC with POD and attached PVC to it.
        dc_name = oc_create_app_dc_with_io(self.node, pvc_name)
        self.addCleanup(oc_delete, self.node, 'dc', dc_name)
        self.addCleanup(scale_dc_pod_amount_and_wait, self.node, dc_name, 0)

        pod_name = get_pod_name_from_dc(self.node, dc_name)
        wait_for_pod_be_ready(self.node, pod_name)

        # Verify Heketi volume name for prefix presence if provided
        if create_vol_name_prefix:
            ret = verify_volume_name_prefix(self.node,
                                            self.sc['volumenameprefix'],
                                            self.sc['secretnamespace'],
                                            pvc_name, self.sc['resturl'])
            self.assertTrue(ret, "verify volnameprefix failed")
        else:
            # Get the volume name and volume id from PV
            pv_name = get_pv_name_from_pvc(self.ocp_client[0], pvc_name)
            custom = [
                r':spec.glusterfs.path',
                r':metadata.annotations.'
                r'"gluster\.kubernetes\.io\/heketi-volume-id"'
            ]
            pv_vol_name, vol_id = oc_get_custom_resource(
                self.ocp_client[0], 'pv', custom, pv_name)

            # check if the pv_volume_name is present in heketi
            # Check if volume name is "vol_"+volumeid or not
            heketi_vol_name = heketi_volume_info(
                self.ocp_client[0], self.heketi_server_url, vol_id,
                json=True)['name']
            self.assertEqual(pv_vol_name, heketi_vol_name,
                             'Volume with vol_id = %s not found'
                             'in heketidb' % vol_id)
            self.assertEqual(heketi_vol_name, 'vol_' + vol_id,
                             'Volume with vol_id = %s have a'
                             'custom perfix' % vol_id)
            out = cmd_run_on_gluster_pod_or_node(self.ocp_master_node[0],
                                                 "gluster volume list")
            self.assertIn(pv_vol_name, out,
                          "Volume with id %s does not exist" % vol_id)

        # Make sure we are able to work with files on the mounted volume
        filepath = "/mnt/file_for_testing_io.log"
        for cmd in ("dd if=/dev/urandom of=%s bs=1K count=100",
                    "ls -lrt %s",
                    "rm -rf %s"):
            cmd = cmd % filepath
            ret, out, err = oc_rsh(self.node, pod_name, cmd)
            self.assertEqual(
                ret, 0,
                "Failed to execute '%s' command on %s" % (cmd, self.node))
    def dynamic_provisioning_glusterfile(self, create_vol_name_prefix):
        # Create secret and storage class
        self.create_storage_class(
            create_vol_name_prefix=create_vol_name_prefix)

        # Create PVC
        pvc_name = self.create_and_wait_for_pvc()

        # Create DC with POD and attached PVC to it.
        dc_name = oc_create_app_dc_with_io(self.node, pvc_name)
        self.addCleanup(oc_delete, self.node, 'dc', dc_name)
        self.addCleanup(scale_dc_pod_amount_and_wait, self.node, dc_name, 0)

        pod_name = get_pod_name_from_dc(self.node, dc_name)
        wait_for_pod_be_ready(self.node, pod_name)

        # Verify Heketi volume name for prefix presence if provided
        if create_vol_name_prefix:
            ret = verify_volume_name_prefix(self.node,
                                            self.sc['volumenameprefix'],
                                            self.sc['secretnamespace'],
                                            pvc_name, self.sc['resturl'])
            self.assertTrue(ret, "verify volnameprefix failed")
        else:
            # Get the volume name and volume id from PV
            pv_name = get_pv_name_from_pvc(self.ocp_client[0], pvc_name)
            custom = [
                r':spec.glusterfs.path',
                r':metadata.annotations.'
                r'"gluster\.kubernetes\.io\/heketi-volume-id"'
            ]
            pv_vol_name, vol_id = oc_get_custom_resource(
                    self.ocp_client[0], 'pv', custom, pv_name)

            # check if the pv_volume_name is present in heketi
            # Check if volume name is "vol_"+volumeid or not
            heketi_vol_name = heketi_volume_info(
                self.ocp_client[0], self.heketi_server_url, vol_id,
                json=True)['name']
            self.assertEqual(pv_vol_name, heketi_vol_name,
                             'Volume with vol_id = %s not found'
                             'in heketidb' % vol_id)
            self.assertEqual(heketi_vol_name, 'vol_' + vol_id,
                             'Volume with vol_id = %s have a'
                             'custom perfix' % vol_id)
            out = cmd_run_on_gluster_pod_or_node(self.ocp_master_node[0],
                                                 "gluster volume list")
            self.assertIn(pv_vol_name, out,
                          "Volume with id %s does not exist" % vol_id)

        # Make sure we are able to work with files on the mounted volume
        filepath = "/mnt/file_for_testing_io.log"
        for cmd in ("dd if=/dev/urandom of=%s bs=1K count=100",
                    "ls -lrt %s",
                    "rm -rf %s"):
            cmd = cmd % filepath
            ret, out, err = oc_rsh(self.node, pod_name, cmd)
            self.assertEqual(
                ret, 0,
                "Failed to execute '%s' command on %s" % (cmd, self.node))
    def test_pvc_deletion_while_pod_is_running(self):
        """Validate PVC deletion while pod is running"""
        if get_openshift_version() <= "3.9":
            self.skipTest(
                "PVC deletion while pod is running is not supported"
                " in OCP older than 3.9")

        # Create DC with POD and attached PVC to it
        sc_name = self.create_storage_class()
        pvc_name = self.create_and_wait_for_pvc(sc_name=sc_name)
        dc_name, pod_name = self.create_dc_with_pvc(pvc_name)

        # Delete PVC
        oc_delete(self.node, 'pvc', self.pvc_name)

        with self.assertRaises(ExecutionError):
            wait_for_resource_absence(
                self.node, 'pvc', self.pvc_name, interval=3, timeout=30)

        # Make sure we are able to work with files on the mounted volume
        # after deleting pvc.
        filepath = "/mnt/file_for_testing_volume.log"
        cmd = "dd if=/dev/urandom of=%s bs=1K count=100" % filepath
        ret, out, err = oc_rsh(self.node, pod_name, cmd)
        self.assertEqual(
            ret, 0, "Failed to execute command %s on %s" % (cmd, self.node))
 def _write_file(self, pod_name, filename, filesize, mnt_path):
     # write file
     cmd = ('dd if=/dev/zero of={}/{} bs={} count=1'.format(
         mnt_path, filename, filesize))
     ret, _, err = oc_rsh(self.node, pod_name, cmd)
     self.assertFalse(ret, 'Failed to write file due to err {}'.format(err))
     wait_for_pod_be_ready(self.node, pod_name)
예제 #21
0
    def test_block_vol_offline_expand(self):
        """Test blockvol expansion while PVC is not in use"""
        node = self.ocp_master_node[0]

        pvc_name, dc_name, bvol_info = (
            self._block_vol_expand_common_offline_vs_online(False))

        # create and wait for job to be completed
        jobname = oc_create_offline_block_volume_expand_job(node, pvc_name)
        self.addCleanup(oc_delete, node, 'job', jobname)
        for w in waiter.Waiter(300, 5):
            if is_job_complete(node, jobname):
                break
        if w.expired:
            raise AssertionError(
                "block expand job {} is not completed".format(jobname))

        # verify expand size
        scale_dc_pod_amount_and_wait(node, dc_name[0], pod_amount=1)
        pod_name = get_pod_name_from_dc(node, dc_name[0])
        ret, size, _ = oc_rsh(
            node, pod_name,
            'df -kh /mnt | sed "/Filesystem/d" | awk \'{print $2}\' '
            '| sed "s/G//"')
        self.assertFalse(ret, "Failed to get size from client side")
        self.assertEqual(
            int(float(size)), bvol_info["size"], "new size is not "
            "reflected at mount point after block volume expand")
    def test_pvc_deletion_while_pod_is_running(self):
        """Validate PVC deletion while pod is running"""
        if get_openshift_version() <= "3.9":
            self.skipTest("PVC deletion while pod is running is not supported"
                          " in OCP older than 3.9")

        # Create DC with POD and attached PVC to it
        sc_name = self.create_storage_class()
        pvc_name = self.create_and_wait_for_pvc(sc_name=sc_name)
        dc_name, pod_name = self.create_dc_with_pvc(pvc_name)

        # Delete PVC
        oc_delete(self.node, 'pvc', self.pvc_name)

        with self.assertRaises(ExecutionError):
            wait_for_resource_absence(self.node,
                                      'pvc',
                                      self.pvc_name,
                                      interval=3,
                                      timeout=30)

        # Make sure we are able to work with files on the mounted volume
        # after deleting pvc.
        filepath = "/mnt/file_for_testing_volume.log"
        cmd = "dd if=/dev/urandom of=%s bs=1K count=100" % filepath
        ret, out, err = oc_rsh(self.node, pod_name, cmd)
        self.assertEqual(
            ret, 0, "Failed to execute command %s on %s" % (cmd, self.node))
예제 #23
0
    def _get_mount_size(self, pod_name, mnt_path):
        cmd = ("df -h | grep {} | tail -1 | awk '{{print $4}}'".format(
            mnt_path))
        ret, out, err = oc_rsh(self.node, pod_name, cmd)
        self.assertFalse(ret, 'Failed to get size due to err {} in {}'.format(
            err, mnt_path))

        return out
예제 #24
0
    def test_lvm_script_executable_on_host(self):
        """Validate lvm script is executable on host instead
           of container"""

        # Skip the TC if independent mode deployment
        if not self.is_containerized_gluster():
            self.skipTest(
                "Skipping this test as LVM script is not available in "
                "independent mode deployment")

        pod_name = self.pod_name[0]['pod_name']
        gluster_pod_label = "glusterfs=storage-pod"

        # Remove LVM banaries to validate /usr/sbin/exec-on-host script
        # is execute LVM commands on host instead on pod
        cmd = "rm /usr/sbin/lvm"
        ret, _, err = openshift_ops.oc_rsh(self.oc_node, pod_name, cmd)
        self.addCleanup(
            openshift_ops.wait_for_pods_be_ready, self.oc_node,
            len(self.gluster_servers), gluster_pod_label)
        self.addCleanup(
            openshift_ops.wait_for_resource_absence, self.oc_node, "pod",
            pod_name)
        self.addCleanup(
            openshift_ops.oc_delete, self.oc_node, "pod", pod_name)
        err_msg = (
            "failed to execute command {} on pod {} with error: {}"
            "".format(cmd, pod_name, err))
        self.assertFalse(ret, err_msg)

        # Validate LVM command is not executable in pod
        cmd = "oc rsh {} lvs".format(pod_name)
        stdout = command.cmd_run(cmd, self.oc_node, raise_on_error=False)
        self.assertIn(
            'exec: \\"lvs\\": executable file not found in $PATH', stdout)

        # Run LVM command with /usr/sbin/exec-on-host
        cmd = "{} lvs".format(ENV_VALUE)
        ret, out, err = openshift_ops.oc_rsh(self.oc_node, pod_name, cmd)
        err_msg = (
            "failed to execute command {} on pod {} with error: {}"
            "".format(cmd, pod_name, err))
        self.assertFalse(ret, err_msg)
        self.assertIn("VG", out)
    def _create_volumes_with_io(self, pvc_cnt, timeout=120, wait_step=3):
        pvc_names = self.create_and_wait_for_pvcs(
            pvc_amount=pvc_cnt, sc_name=self.sc_name,
            timeout=timeout, wait_step=wait_step
        )
        err_msg = "failed to execute command %s on pod %s with error: %s"
        for pvc_name in pvc_names:
            dc_name, pod_name = self.create_dc_with_pvc(pvc_name)

            # Make sure we are able to work with files
            # on the mounted volume
            filepath = "/mnt/file_for_testing_io.log"
            cmd = "dd if=/dev/urandom of=%s bs=1K count=100" % filepath
            ret, out, err = oc_rsh(self.oc_node, pod_name, cmd)
            self.assertEqual(ret, 0, err_msg % (cmd, pod_name, err))

            cmd = "ls -lrt %s" % filepath
            ret, out, err = oc_rsh(self.oc_node, pod_name, cmd)
            self.assertEqual(ret, 0, err_msg % (cmd, pod_name, err))

        return pvc_names
예제 #26
0
    def _create_volumes_with_io(self, pvc_cnt, timeout=120, wait_step=3):
        pvc_names = self.create_and_wait_for_pvcs(pvc_amount=pvc_cnt,
                                                  sc_name=self.sc_name,
                                                  timeout=timeout,
                                                  wait_step=wait_step)
        err_msg = "failed to execute command %s on pod %s with error: %s"
        for pvc_name in pvc_names:
            dc_name, pod_name = self.create_dc_with_pvc(pvc_name)

            # Make sure we are able to work with files
            # on the mounted volume
            filepath = "/mnt/file_for_testing_io.log"
            cmd = "dd if=/dev/urandom of=%s bs=1K count=100" % filepath
            ret, out, err = oc_rsh(self.oc_node, pod_name, cmd)
            self.assertEqual(ret, 0, err_msg % (cmd, pod_name, err))

            cmd = "ls -lrt %s" % filepath
            ret, out, err = oc_rsh(self.oc_node, pod_name, cmd)
            self.assertEqual(ret, 0, err_msg % (cmd, pod_name, err))

        return pvc_names
예제 #27
0
    def test_run_workload_with_logging(self):
        """Validate logs are being generated aifter running workload"""

        # Get the size of used space of logs
        es_pod = openshift_ops.get_pod_name_from_dc(
            self._master, self._logging_es_dc)
        mount_point = "/elasticsearch/persistent"
        cmd_space_check = ('df -kh --output=used {} | sed "/Used/d" |'
                           'sed "s/G//"'.format(mount_point))
        ret, initial_used_percent, err = openshift_ops.oc_rsh(
            self._master, es_pod, cmd_space_check)
        err_msg = "Failed to fetch the size of used space, error {}"
        self.assertFalse(ret, err_msg.format(err))

        # Create 20 pvcs and app pods with io
        openshift_ops.switch_oc_project(
            self._master, self.storage_project_name)
        pvc_count, batch_count = 5, 4
        for _ in range(batch_count):
            pvcs = self.create_and_wait_for_pvcs(pvc_amount=pvc_count)
            self.create_dcs_with_pvc(pvcs)
        self.addCleanup(
            openshift_ops.switch_oc_project,
            self._master, self.storage_project_name)

        # Get and verify the final used size of used space of logs
        openshift_ops.switch_oc_project(
            self._master, self._logging_project_name)
        for w in waiter.Waiter(600, 30):
            ret, final_used_percent, err = openshift_ops.oc_rsh(
                self._master, es_pod, cmd_space_check)
            self.assertFalse(ret, err_msg.format(err))
            if int(initial_used_percent) < int(final_used_percent):
                break
        if w.expired:
            raise AssertionError(
                "Initial used space {} for logs is not less than final "
                "used space {}".format(
                    initial_used_percent, final_used_percent))
 def _fetch_metric_from_promtheus_pod(self, metric):
     """Fetch metric from prometheus pod using api call"""
     prometheus_pods = list(openshift_ops.oc_get_pods(
         self._master, selector=self._prometheus_resources_selector).keys())
     fetch_metric_cmd = ("curl 'http://localhost:9090/api/v1/query"
                         "?query={}'".format(metric))
     ret, metric_data, _ = openshift_ops.oc_rsh(
         self._master, prometheus_pods[0], fetch_metric_cmd)
     metric_result = json.loads(metric_data)["data"]["result"]
     if (not metric_result) or ret:
         raise exceptions.ExecutionError(
             "Failed to fecth data for metric {}, output {}".format(
                 metric, metric_result))
     return metric_result
예제 #29
0
    def test_run_workload_with_metrics(self):
        """Validate if logs are being generated after running workload"""

        # Get the size of used space of logs
        cassandra_pod = get_pod_name_from_rc(
            self.master, self.metrics_rc_hawkular_cassandra)
        mount_point = "/cassandra_data"
        cmd_space_check = ('df -k --output=used {} | sed "/Used/d" |'
                           'sed "s/G//"'.format(mount_point))
        ret, initial_used_percent, err = oc_rsh(self.master, cassandra_pod,
                                                cmd_space_check)
        err_msg = "Failed to fetch the size of used space, error {}"
        self.assertFalse(ret, err_msg.format(err))

        # Create 20 PVCs and app pods with IO
        switch_oc_project(self.master, self.storage_project_name)
        pvc_count, batch_count = 5, 4
        for _ in range(batch_count):
            pvcs = self.create_and_wait_for_pvcs(pvc_amount=pvc_count)
            self.create_dcs_with_pvc(pvcs)
        self.addCleanup(switch_oc_project, self.master,
                        self.storage_project_name)

        # Get and verify the final size of used space of logs
        switch_oc_project(self.master, self.metrics_project_name)
        for w in waiter.Waiter(600, 30):
            ret, final_used_percent, err = oc_rsh(self.master, cassandra_pod,
                                                  cmd_space_check)
            self.assertFalse(ret, err_msg.format(err))
            if int(initial_used_percent) < int(final_used_percent):
                break
        if w.expired:
            raise AssertionError(
                "Initial used space {} for logs is not less than final "
                "used space {}".format(initial_used_percent,
                                       final_used_percent))
    def test_prometheus_basic_validation(self):
        """ Validate basic volume metrics using prometheus """

        # Fetch the metrics and storing initial_metrics as dictionary
        pvc_name, pod_name, initial_metrics = self._fetch_initial_metrics(
            volume_expansion=False)

        # Create 1000 files and fetch the metrics that the data is updated
        self._perform_io_and_fetch_metrics(pod_name=pod_name,
                                           pvc_name=pvc_name,
                                           filename="filename1",
                                           dirname="dirname1",
                                           metric_data=initial_metrics,
                                           operation="create")

        # Write the IO half the size of the volume and validated from
        # prometheus pod that the size change is reflected
        size_to_write = int(
            initial_metrics['kubelet_volume_stats_capacity_bytes']) // 2
        openshift_ops.switch_oc_project(self._master,
                                        self.storage_project_name)
        cmd = ("dd if=/dev/urandom of=/mnt/large_file bs={} count=1024".format(
            size_to_write // 1024))
        ret, _, err = openshift_ops.oc_rsh(self._master, pod_name, cmd)
        self.assertFalse(ret, 'Failed to write file due to err {}'.format(err))

        # Fetching the metrics and validating the data change is reflected
        for w in waiter.Waiter(120, 10):
            half_io_metrics = self._get_and_manipulate_metric_data(
                ['kubelet_volume_stats_used_bytes'], pvc_name)
            if bool(half_io_metrics) and (int(
                    half_io_metrics['kubelet_volume_stats_used_bytes']) >
                                          size_to_write):
                break
        if w.expired:
            raise AssertionError(
                "After Data is written on the pvc, metrics like inodes used "
                "and bytes used are not reflected in the prometheus")

        # Delete the files from the volume and wait for the
        # updated details reflected in prometheus
        self._perform_io_and_fetch_metrics(pod_name=pod_name,
                                           pvc_name=pvc_name,
                                           filename="filename1",
                                           dirname="dirname1",
                                           metric_data=half_io_metrics,
                                           operation="delete")
예제 #31
0
    def test_block_vol_online_expand(self):
        """Test blockvol expansion while PVC is in use"""
        node = self.ocp_master_node[0]

        pvc_name, dc_name, bvol_info = (
            self._block_vol_expand_common_offline_vs_online(True))

        # get pod hostname
        iqn, _, pod_hostname = self.verify_iscsi_sessions_and_multipath(
            pvc_name, dc_name[0])

        # Get the paths info from the node
        device = list(
            get_iscsi_block_devices_by_path(pod_hostname, iqn).keys())[0]

        # Get mpath name
        mpath = get_mpath_name_from_device_name(pod_hostname, device)

        # rescan the devices on pod_hostname
        cmd = "iscsiadm -m node -R -T {}".format(iqn)
        self.cmd_run(cmd, pod_hostname)

        # refresh multipath device size
        cmd = "multipathd -k'resize map {}'".format(mpath)
        self.cmd_run(cmd, pod_hostname)

        # get mount point
        cmd = "lsblk /dev/{} --output MOUNTPOINT --noheadings".format(device)
        mount_point = self.cmd_run(cmd, pod_hostname)

        cmd = "xfs_growfs {}".format(mount_point)
        self.cmd_run(cmd, pod_hostname)

        cmd = ("df -h {} | sed '/Filesystem/d' | awk '{{print $2}}' |"
               " sed 's/G//'")
        size = self.cmd_run(cmd.format(mount_point), pod_hostname)
        self.assertEqual(
            int(float(size)), bvol_info["size"], "new size is not "
            "reflected at host mount point after block volume expand")

        # verify expand size
        pod_name = get_pod_name_from_dc(node, dc_name[0])
        ret, size, _ = oc_rsh(node, pod_name, cmd.format("/mnt"))
        self.assertFalse(ret, "Failed to get size from client side")
        self.assertEqual(
            int(float(size)), bvol_info["size"], "new size is not "
            "reflected at mount point after block volume expand")
예제 #32
0
    def get_io_time(self):
        """Gets last io time of io pod by listing log file directory
           /mnt on pod
        """
        ret, stdout, stderr = oc_rsh(self.oc_node, self.pod_name,
                                     "ls -l /mnt/ | awk '{print $8}'")
        if ret != 0:
            err_msg = "failed to get io time for pod %s" % self.pod_name
            g.log.error(err_msg)
            raise AssertionError(err_msg)

        get_time = None
        try:
            get_time = datetime.strptime(stdout.strip(), "%H:%M")
        except Exception:
            g.log.error("invalid time format ret %s, stout: %s, "
                        "stderr: %s" % (ret, stdout, stderr))
            raise

        return get_time
    def get_io_time(self):
        """Gets last io time of io pod by listing log file directory
           /mnt on pod
        """
        ret, stdout, stderr = oc_rsh(
            self.oc_node, self.pod_name, "ls -l /mnt/ | awk '{print $8}'"
        )
        if ret != 0:
            err_msg = "failed to get io time for pod %s" % self.pod_name
            g.log.error(err_msg)
            raise AssertionError(err_msg)

        get_time = None
        try:
            get_time = datetime.strptime(stdout.strip(), "%H:%M")
        except Exception:
            g.log.error("invalid time format ret %s, stout: %s, "
                        "stderr: %s" % (ret, stdout, stderr))
            raise

        return get_time
    def dynamic_provisioning_glusterblock(
            self, set_hacount, create_vol_name_prefix=False):
        datafile_path = '/mnt/fake_file_for_%s' % self.id()

        # Create DC with attached PVC
        sc_name = self.create_storage_class(
            set_hacount=set_hacount,
            create_vol_name_prefix=create_vol_name_prefix)
        pvc_name = self.create_and_wait_for_pvc(
            pvc_name_prefix='autotest-block', sc_name=sc_name)
        dc_name, pod_name = self.create_dc_with_pvc(pvc_name)

        # Check that we can write data
        for cmd in ("dd if=/dev/urandom of=%s bs=1K count=100",
                    "ls -lrt %s",
                    "rm -rf %s"):
            cmd = cmd % datafile_path
            ret, out, err = oc_rsh(self.node, pod_name, cmd)
            self.assertEqual(
                ret, 0,
                "Failed to execute '%s' command on '%s'." % (cmd, self.node))
    def dynamic_provisioning_glusterblock(self,
                                          set_hacount,
                                          create_vol_name_prefix=False):
        datafile_path = '/mnt/fake_file_for_%s' % self.id()

        # Create DC with attached PVC
        sc_name = self.create_storage_class(
            set_hacount=set_hacount,
            create_vol_name_prefix=create_vol_name_prefix)
        pvc_name = self.create_and_wait_for_pvc(
            pvc_name_prefix='autotest-block', sc_name=sc_name)
        dc_name, pod_name = self.create_dc_with_pvc(pvc_name)

        # Check that we can write data
        for cmd in ("dd if=/dev/urandom of=%s bs=1K count=100", "ls -lrt %s",
                    "rm -rf %s"):
            cmd = cmd % datafile_path
            ret, out, err = oc_rsh(self.node, pod_name, cmd)
            self.assertEqual(
                ret, 0,
                "Failed to execute '%s' command on '%s'." % (cmd, self.node))
    def test_pvc_resize_size_greater_than_available_space(self):
        """Re-size PVC to greater value than available volume size and then
        expand volume to support maximum size.
        """
        sc_name = self.create_storage_class(create_vol_name_prefix=True,
                                            allow_volume_expansion=True)
        pvc_name = self.create_and_wait_for_pvc(sc_name=sc_name, pvc_size=1)
        dc_name, pod_name = self.create_dc_with_pvc(pvc_name)

        # Verify the size of volume mounted
        cmd_on_pod = "df -Ph /mnt | tail -1 | awk '{print $2}'"
        _, stdout, _ = oc_rsh(self.node, pod_name, cmd_on_pod)
        self.assertGreaterEqual(int(stdout.strip('.0M\n')), 1000,
                                "Size of %s not equal to 1G" % pvc_name)
        self.assertLessEqual(int(stdout.strip('.0M\n')), 1024,
                             "Size of %s not equal to 1G" % pvc_name)
        available_size_gb = self._available_disk_free_space()
        with self.assertRaises(AssertionError):
            resize_pvc(self.ocp_master_node[0], pvc_name,
                       available_size_gb + 1)
        resize_pvc(self.ocp_master_node[0], pvc_name, available_size_gb)
        verify_pvc_size(self.ocp_master_node[0], pvc_name, available_size_gb)
    def test_pv_resize_when_heketi_down(self):
        """Create a PVC and try to expand it when heketi is down, It should
        fail. After heketi is up, expand PVC should work.
        """
        self.create_storage_class(allow_volume_expansion=True)
        pvc_name = self.create_and_wait_for_pvc()
        dc_name, pod_name = self.create_dc_with_pvc(pvc_name)

        pv_name = get_pv_name_from_pvc(self.node, pvc_name)
        custom = (r':metadata.annotations.'
                  r'"gluster\.kubernetes\.io\/heketi-volume-id"')
        vol_id = oc_get_custom_resource(self.node, 'pv', custom, pv_name)[0]

        h_vol_info = heketi_ops.heketi_volume_info(
            self.heketi_client_node, self.heketi_server_url, vol_id, json=True)

        # Bring the heketi POD down
        scale_dc_pod_amount_and_wait(
            self.node, self.heketi_dc_name, pod_amount=0)
        self.addCleanup(
            scale_dc_pod_amount_and_wait, self.node,
            self.heketi_dc_name, pod_amount=1)

        cmd = 'dd if=/dev/urandom of=/mnt/%s bs=614400k count=1'
        ret, out, err = oc_rsh(self.node, pod_name, cmd % 'file1')
        self.assertFalse(ret, 'Not able to write file with err: %s' % err)
        wait_for_pod_be_ready(self.node, pod_name, 10, 5)

        resize_pvc(self.node, pvc_name, 2)
        wait_for_events(
            self.node, pvc_name, obj_type='PersistentVolumeClaim',
            event_type='Warning', event_reason='VolumeResizeFailed')

        # Verify volume was not expanded
        vol_info = get_gluster_vol_info_by_pvc_name(self.node, pvc_name)
        self.assertEqual(vol_info['gluster_vol_id'], h_vol_info['name'])
        self.assertEqual(
            len(vol_info['bricks']['brick']), len(h_vol_info['bricks']))

        # Bring the heketi POD up
        scale_dc_pod_amount_and_wait(
            self.node, self.heketi_dc_name, pod_amount=1)

        # Verify volume expansion
        verify_pvc_size(self.node, pvc_name, 2)
        vol_info = get_gluster_vol_info_by_pvc_name(self.node, pvc_name)
        self.assertFalse(len(vol_info['bricks']['brick']) % 3)
        self.assertLess(
            len(h_vol_info['bricks']), len(vol_info['bricks']['brick']))

        # Wait for remount after expansion
        for w in waiter.Waiter(timeout=30, interval=5):
            ret, out, err = oc_rsh(
                self.node, pod_name,
                "df -Ph /mnt | awk '{print $2}' | tail -1")
            self.assertFalse(ret, 'Failed with err: %s and Output: %s' % (
                err, out))
            if out.strip() == '2.0G':
                break

        # Write data making sure we have more space than it was
        ret, out, err = oc_rsh(self.node, pod_name, cmd % 'file2')
        self.assertFalse(ret, 'Not able to write file with err: %s' % err)

        # Verify pod is running
        wait_for_pod_be_ready(self.node, pod_name, 10, 5)
    def test_dynamic_provisioning_glusterfile_heketipod_failure(self):
        """Validate dynamic provisioning for gluster file when heketi pod down
        """
        mount_path = "/mnt"
        datafile_path = '%s/fake_file_for_%s' % (mount_path, self.id())

        # Create secret and storage class
        sc_name = self.create_storage_class()

        # Create PVC
        app_1_pvc_name = self.create_and_wait_for_pvc(
            pvc_name_prefix="autotest-file", sc_name=sc_name
        )

        # Create app POD with attached volume
        app_1_pod_name = oc_create_tiny_pod_with_volume(
            self.node, app_1_pvc_name, "test-pvc-mount-on-app-pod",
            mount_path=mount_path)
        self.addCleanup(
            wait_for_resource_absence, self.node, 'pod', app_1_pod_name)
        self.addCleanup(oc_delete, self.node, 'pod', app_1_pod_name)

        # Wait for app POD be up and running
        wait_for_pod_be_ready(
            self.node, app_1_pod_name, timeout=60, wait_step=2)

        # Write data to the app POD
        write_data_cmd = (
            "dd if=/dev/urandom of=%s bs=1K count=100" % datafile_path)
        ret, out, err = oc_rsh(self.node, app_1_pod_name, write_data_cmd)
        self.assertEqual(
            ret, 0,
            "Failed to execute command %s on %s" % (write_data_cmd, self.node))

        # Remove Heketi pod
        heketi_down_cmd = "oc scale --replicas=0 dc/%s --namespace %s" % (
            self.heketi_dc_name, self.storage_project_name)
        heketi_up_cmd = "oc scale --replicas=1 dc/%s --namespace %s" % (
            self.heketi_dc_name, self.storage_project_name)
        self.addCleanup(self.cmd_run, heketi_up_cmd)
        heketi_pod_name = get_pod_name_from_dc(
            self.node, self.heketi_dc_name, timeout=10, wait_step=3)
        self.cmd_run(heketi_down_cmd)
        wait_for_resource_absence(self.node, 'pod', heketi_pod_name)

        app_2_pvc_name = oc_create_pvc(
            self.node, pvc_name_prefix="autotest-file2", sc_name=sc_name
        )
        self.addCleanup(
            wait_for_resource_absence, self.node, 'pvc', app_2_pvc_name)
        self.addCleanup(
            oc_delete, self.node, 'pvc', app_2_pvc_name, raise_on_absence=False
        )

        # Create second app POD
        app_2_pod_name = oc_create_tiny_pod_with_volume(
            self.node, app_2_pvc_name, "test-pvc-mount-on-app-pod",
            mount_path=mount_path)
        self.addCleanup(
            wait_for_resource_absence, self.node, 'pod', app_2_pod_name)
        self.addCleanup(oc_delete, self.node, 'pod', app_2_pod_name)

        # Bring Heketi POD back
        self.cmd_run(heketi_up_cmd)

        # Wait for Heketi POD be up and running
        new_heketi_pod_name = get_pod_name_from_dc(
            self.node, self.heketi_dc_name, timeout=10, wait_step=2)
        wait_for_pod_be_ready(
            self.node, new_heketi_pod_name, wait_step=5, timeout=120)

        # Wait for second PVC and app POD be ready
        verify_pvc_status_is_bound(self.node, app_2_pvc_name)
        wait_for_pod_be_ready(
            self.node, app_2_pod_name, timeout=60, wait_step=2)

        # Verify that we are able to write data
        ret, out, err = oc_rsh(self.node, app_2_pod_name, write_data_cmd)
        self.assertEqual(
            ret, 0,
            "Failed to execute command %s on %s" % (write_data_cmd, self.node))
예제 #39
0
    def _pv_resize(self, exceed_free_space):
        dir_path = "/mnt"
        pvc_size_gb, min_free_space_gb = 1, 3

        # Get available free space disabling redundant devices and nodes
        heketi_url = self.heketi_server_url
        node_id_list = heketi_ops.heketi_node_list(self.heketi_client_node,
                                                   heketi_url)
        self.assertTrue(node_id_list)
        nodes = {}
        min_free_space = min_free_space_gb * 1024**2
        for node_id in node_id_list:
            node_info = heketi_ops.heketi_node_info(self.heketi_client_node,
                                                    heketi_url,
                                                    node_id,
                                                    json=True)
            if (node_info['state'].lower() != 'online'
                    or not node_info['devices']):
                continue
            if len(nodes) > 2:
                out = heketi_ops.heketi_node_disable(self.heketi_client_node,
                                                     heketi_url, node_id)
                self.assertTrue(out)
                self.addCleanup(heketi_ops.heketi_node_enable,
                                self.heketi_client_node, heketi_url, node_id)
            for device in node_info['devices']:
                if device['state'].lower() != 'online':
                    continue
                free_space = device['storage']['free']
                if (node_id in nodes.keys() or free_space < min_free_space):
                    out = heketi_ops.heketi_device_disable(
                        self.heketi_client_node, heketi_url, device['id'])
                    self.assertTrue(out)
                    self.addCleanup(heketi_ops.heketi_device_enable,
                                    self.heketi_client_node, heketi_url,
                                    device['id'])
                    continue
                nodes[node_id] = free_space
        if len(nodes) < 3:
            raise self.skipTest("Could not find 3 online nodes with, "
                                "at least, 1 online device having free space "
                                "bigger than %dGb." % min_free_space_gb)

        # Calculate maximum available size for PVC
        available_size_gb = int(min(nodes.values()) / (1024**2))

        # Create PVC
        self.create_storage_class(allow_volume_expansion=True)
        pvc_name = self.create_and_wait_for_pvc(pvc_size=pvc_size_gb)

        # Create DC with POD and attached PVC to it
        dc_name = oc_create_app_dc_with_io(self.node, pvc_name)
        self.addCleanup(oc_delete, self.node, 'dc', dc_name)
        self.addCleanup(scale_dc_pod_amount_and_wait, self.node, dc_name, 0)
        pod_name = get_pod_name_from_dc(self.node, dc_name)
        wait_for_pod_be_ready(self.node, pod_name)

        if exceed_free_space:
            # Try to expand existing PVC exceeding free space
            resize_pvc(self.node, pvc_name, available_size_gb)
            wait_for_events(self.node,
                            obj_name=pvc_name,
                            event_reason='VolumeResizeFailed')

            # Check that app POD is up and runnig then try to write data
            wait_for_pod_be_ready(self.node, pod_name)
            cmd = ("dd if=/dev/urandom of=%s/autotest bs=100K count=1" %
                   dir_path)
            ret, out, err = oc_rsh(self.node, pod_name, cmd)
            self.assertEqual(
                ret, 0,
                "Failed to write data after failed attempt to expand PVC.")
        else:
            # Expand existing PVC using all the available free space
            expand_size_gb = available_size_gb - pvc_size_gb
            resize_pvc(self.node, pvc_name, expand_size_gb)
            verify_pvc_size(self.node, pvc_name, expand_size_gb)
            pv_name = get_pv_name_from_pvc(self.node, pvc_name)
            verify_pv_size(self.node, pv_name, expand_size_gb)
            wait_for_events(self.node,
                            obj_name=pvc_name,
                            event_reason='VolumeResizeSuccessful')

            # Recreate app POD
            oc_delete(self.node, 'pod', pod_name)
            wait_for_resource_absence(self.node, 'pod', pod_name)
            pod_name = get_pod_name_from_dc(self.node, dc_name)
            wait_for_pod_be_ready(self.node, pod_name)

            # Write data on the expanded PVC
            cmd = ("dd if=/dev/urandom of=%s/autotest "
                   "bs=1M count=1025" % dir_path)
            ret, out, err = oc_rsh(self.node, pod_name, cmd)
            self.assertEqual(ret, 0,
                             "Failed to write data on the expanded PVC")
    def _pv_resize(self, exceed_free_space):
        dir_path = "/mnt"
        pvc_size_gb, min_free_space_gb = 1, 3

        # Get available free space disabling redundant devices and nodes
        heketi_url = self.heketi_server_url
        node_id_list = heketi_ops.heketi_node_list(
            self.heketi_client_node, heketi_url)
        self.assertTrue(node_id_list)
        nodes = {}
        min_free_space = min_free_space_gb * 1024**2
        for node_id in node_id_list:
            node_info = heketi_ops.heketi_node_info(
                self.heketi_client_node, heketi_url, node_id, json=True)
            if (node_info['state'].lower() != 'online' or
                    not node_info['devices']):
                continue
            if len(nodes) > 2:
                out = heketi_ops.heketi_node_disable(
                    self.heketi_client_node, heketi_url, node_id)
                self.assertTrue(out)
                self.addCleanup(
                    heketi_ops.heketi_node_enable,
                    self.heketi_client_node, heketi_url, node_id)
            for device in node_info['devices']:
                if device['state'].lower() != 'online':
                    continue
                free_space = device['storage']['free']
                if (node_id in nodes.keys() or free_space < min_free_space):
                    out = heketi_ops.heketi_device_disable(
                        self.heketi_client_node, heketi_url, device['id'])
                    self.assertTrue(out)
                    self.addCleanup(
                        heketi_ops.heketi_device_enable,
                        self.heketi_client_node, heketi_url, device['id'])
                    continue
                nodes[node_id] = free_space
        if len(nodes) < 3:
            raise self.skipTest(
                "Could not find 3 online nodes with, "
                "at least, 1 online device having free space "
                "bigger than %dGb." % min_free_space_gb)

        # Calculate maximum available size for PVC
        available_size_gb = int(min(nodes.values()) / (1024**2))

        # Create PVC
        self.create_storage_class(allow_volume_expansion=True)
        pvc_name = self.create_and_wait_for_pvc(pvc_size=pvc_size_gb)

        # Create DC with POD and attached PVC to it
        dc_name = oc_create_app_dc_with_io(self.node, pvc_name)
        self.addCleanup(oc_delete, self.node, 'dc', dc_name)
        self.addCleanup(scale_dc_pod_amount_and_wait, self.node, dc_name, 0)
        pod_name = get_pod_name_from_dc(self.node, dc_name)
        wait_for_pod_be_ready(self.node, pod_name)

        if exceed_free_space:
            # Try to expand existing PVC exceeding free space
            resize_pvc(self.node, pvc_name, available_size_gb)
            wait_for_events(self.node, obj_name=pvc_name,
                            event_reason='VolumeResizeFailed')

            # Check that app POD is up and runnig then try to write data
            wait_for_pod_be_ready(self.node, pod_name)
            cmd = (
                "dd if=/dev/urandom of=%s/autotest bs=100K count=1" % dir_path)
            ret, out, err = oc_rsh(self.node, pod_name, cmd)
            self.assertEqual(
                ret, 0,
                "Failed to write data after failed attempt to expand PVC.")
        else:
            # Expand existing PVC using all the available free space
            expand_size_gb = available_size_gb - pvc_size_gb
            resize_pvc(self.node, pvc_name, expand_size_gb)
            verify_pvc_size(self.node, pvc_name, expand_size_gb)
            pv_name = get_pv_name_from_pvc(self.node, pvc_name)
            verify_pv_size(self.node, pv_name, expand_size_gb)
            wait_for_events(
                self.node, obj_name=pvc_name,
                event_reason='VolumeResizeSuccessful')

            # Recreate app POD
            oc_delete(self.node, 'pod', pod_name)
            wait_for_resource_absence(self.node, 'pod', pod_name)
            pod_name = get_pod_name_from_dc(self.node, dc_name)
            wait_for_pod_be_ready(self.node, pod_name)

            # Write data on the expanded PVC
            cmd = ("dd if=/dev/urandom of=%s/autotest "
                   "bs=1M count=1025" % dir_path)
            ret, out, err = oc_rsh(self.node, pod_name, cmd)
            self.assertEqual(
                ret, 0, "Failed to write data on the expanded PVC")
예제 #41
0
    def test_pv_resize_when_heketi_down(self):
        """Create a PVC and try to expand it when heketi is down, It should
        fail. After heketi is up, expand PVC should work.
        """
        self.create_storage_class(allow_volume_expansion=True)
        pvc_name = self.create_and_wait_for_pvc()
        dc_name, pod_name = self.create_dc_with_pvc(pvc_name)

        pv_name = get_pv_name_from_pvc(self.node, pvc_name)
        custom = (r':metadata.annotations.'
                  r'"gluster\.kubernetes\.io\/heketi-volume-id"')
        vol_id = oc_get_custom_resource(self.node, 'pv', custom, pv_name)[0]

        h_vol_info = heketi_ops.heketi_volume_info(self.heketi_client_node,
                                                   self.heketi_server_url,
                                                   vol_id,
                                                   json=True)

        # Bring the heketi POD down
        scale_dc_pod_amount_and_wait(self.node,
                                     self.heketi_dc_name,
                                     pod_amount=0)
        self.addCleanup(scale_dc_pod_amount_and_wait,
                        self.node,
                        self.heketi_dc_name,
                        pod_amount=1)

        cmd = 'dd if=/dev/urandom of=/mnt/%s bs=614400k count=1'
        ret, out, err = oc_rsh(self.node, pod_name, cmd % 'file1')
        self.assertFalse(ret, 'Not able to write file with err: %s' % err)
        wait_for_pod_be_ready(self.node, pod_name, 10, 5)

        resize_pvc(self.node, pvc_name, 2)
        wait_for_events(self.node,
                        pvc_name,
                        obj_type='PersistentVolumeClaim',
                        event_type='Warning',
                        event_reason='VolumeResizeFailed')

        # Verify volume was not expanded
        vol_info = get_gluster_vol_info_by_pvc_name(self.node, pvc_name)
        self.assertEqual(vol_info['gluster_vol_id'], h_vol_info['name'])
        self.assertEqual(len(vol_info['bricks']['brick']),
                         len(h_vol_info['bricks']))

        # Bring the heketi POD up
        scale_dc_pod_amount_and_wait(self.node,
                                     self.heketi_dc_name,
                                     pod_amount=1)

        # Verify volume expansion
        verify_pvc_size(self.node, pvc_name, 2)
        vol_info = get_gluster_vol_info_by_pvc_name(self.node, pvc_name)
        self.assertFalse(len(vol_info['bricks']['brick']) % 3)
        self.assertLess(len(h_vol_info['bricks']),
                        len(vol_info['bricks']['brick']))

        # Wait for remount after expansion
        for w in waiter.Waiter(timeout=30, interval=5):
            ret, out, err = oc_rsh(self.node, pod_name,
                                   "df -Ph /mnt | awk '{print $2}' | tail -1")
            self.assertFalse(ret,
                             'Failed with err: %s and Output: %s' % (err, out))
            if out.strip() == '2.0G':
                break

        # Write data making sure we have more space than it was
        ret, out, err = oc_rsh(self.node, pod_name, cmd % 'file2')
        self.assertFalse(ret, 'Not able to write file with err: %s' % err)

        # Verify pod is running
        wait_for_pod_be_ready(self.node, pod_name, 10, 5)
    def test_dynamic_provisioning_glusterblock_heketipod_failure(self):
        """Validate PVC with glusterblock creation when heketi pod is down"""
        datafile_path = '/mnt/fake_file_for_%s' % self.id()

        # Create DC with attached PVC
        sc_name = self.create_storage_class()
        app_1_pvc_name = self.create_and_wait_for_pvc(
            pvc_name_prefix='autotest-block', sc_name=sc_name)
        app_1_dc_name, app_1_pod_name = self.create_dc_with_pvc(app_1_pvc_name)

        # Write test data
        write_data_cmd = (
            "dd if=/dev/urandom of=%s bs=1K count=100" % datafile_path)
        ret, out, err = oc_rsh(self.node, app_1_pod_name, write_data_cmd)
        self.assertEqual(
            ret, 0,
            "Failed to execute command %s on %s" % (write_data_cmd, self.node))

        # Remove Heketi pod
        heketi_down_cmd = "oc scale --replicas=0 dc/%s --namespace %s" % (
            self.heketi_dc_name, self.storage_project_name)
        heketi_up_cmd = "oc scale --replicas=1 dc/%s --namespace %s" % (
            self.heketi_dc_name, self.storage_project_name)
        self.addCleanup(self.cmd_run, heketi_up_cmd)
        heketi_pod_name = get_pod_name_from_dc(
            self.node, self.heketi_dc_name, timeout=10, wait_step=3)
        self.cmd_run(heketi_down_cmd)
        wait_for_resource_absence(self.node, 'pod', heketi_pod_name)

        # Create second PVC
        app_2_pvc_name = oc_create_pvc(
            self.node, pvc_name_prefix='autotest-block2', sc_name=sc_name
        )
        self.addCleanup(
            wait_for_resource_absence, self.node, 'pvc', app_2_pvc_name)
        self.addCleanup(
            oc_delete, self.node, 'pvc', app_2_pvc_name
        )

        # Create second app POD
        app_2_dc_name = oc_create_app_dc_with_io(self.node, app_2_pvc_name)
        self.addCleanup(oc_delete, self.node, 'dc', app_2_dc_name)
        self.addCleanup(
            scale_dc_pod_amount_and_wait, self.node, app_2_dc_name, 0)
        app_2_pod_name = get_pod_name_from_dc(self.node, app_2_dc_name)

        # Bring Heketi pod back
        self.cmd_run(heketi_up_cmd)

        # Wait for Heketi POD be up and running
        new_heketi_pod_name = get_pod_name_from_dc(
            self.node, self.heketi_dc_name, timeout=10, wait_step=2)
        wait_for_pod_be_ready(
            self.node, new_heketi_pod_name, wait_step=5, timeout=120)

        # Wait for second PVC and app POD be ready
        verify_pvc_status_is_bound(self.node, app_2_pvc_name)
        wait_for_pod_be_ready(
            self.node, app_2_pod_name, timeout=150, wait_step=3)

        # Verify that we are able to write data
        ret, out, err = oc_rsh(self.node, app_2_pod_name, write_data_cmd)
        self.assertEqual(
            ret, 0,
            "Failed to execute command %s on %s" % (write_data_cmd, self.node))
    def test_dynamic_provisioning_glusterfile_heketipod_failure(self):
        """Validate dynamic provisioning for gluster file when heketi pod down
        """
        mount_path = "/mnt"
        datafile_path = '%s/fake_file_for_%s' % (mount_path, self.id())

        # Create secret and storage class
        sc_name = self.create_storage_class()

        # Create PVC
        app_1_pvc_name = self.create_and_wait_for_pvc(
            pvc_name_prefix="autotest-file", sc_name=sc_name)

        # Create app POD with attached volume
        app_1_pod_name = oc_create_tiny_pod_with_volume(
            self.node,
            app_1_pvc_name,
            "test-pvc-mount-on-app-pod",
            mount_path=mount_path,
            image=self.io_container_image_cirros)
        self.addCleanup(wait_for_resource_absence, self.node, 'pod',
                        app_1_pod_name)
        self.addCleanup(oc_delete, self.node, 'pod', app_1_pod_name)

        # Wait for app POD be up and running
        wait_for_pod_be_ready(self.node,
                              app_1_pod_name,
                              timeout=60,
                              wait_step=2)

        # Write data to the app POD
        write_data_cmd = ("dd if=/dev/urandom of=%s bs=1K count=100" %
                          datafile_path)
        ret, out, err = oc_rsh(self.node, app_1_pod_name, write_data_cmd)
        self.assertEqual(
            ret, 0,
            "Failed to execute command %s on %s" % (write_data_cmd, self.node))

        # Remove Heketi pod
        heketi_down_cmd = "oc scale --replicas=0 dc/%s --namespace %s" % (
            self.heketi_dc_name, self.storage_project_name)
        heketi_up_cmd = "oc scale --replicas=1 dc/%s --namespace %s" % (
            self.heketi_dc_name, self.storage_project_name)
        self.addCleanup(self.cmd_run, heketi_up_cmd)
        heketi_pod_name = get_pod_name_from_dc(self.node,
                                               self.heketi_dc_name,
                                               timeout=10,
                                               wait_step=3)
        self.cmd_run(heketi_down_cmd)
        wait_for_resource_absence(self.node, 'pod', heketi_pod_name)

        app_2_pvc_name = oc_create_pvc(self.node,
                                       pvc_name_prefix="autotest-file2",
                                       sc_name=sc_name)
        self.addCleanup(wait_for_resource_absence, self.node, 'pvc',
                        app_2_pvc_name)
        self.addCleanup(oc_delete,
                        self.node,
                        'pvc',
                        app_2_pvc_name,
                        raise_on_absence=False)

        # Create second app POD
        app_2_pod_name = oc_create_tiny_pod_with_volume(
            self.node,
            app_2_pvc_name,
            "test-pvc-mount-on-app-pod",
            mount_path=mount_path,
            image=self.io_container_image_cirros)
        self.addCleanup(wait_for_resource_absence, self.node, 'pod',
                        app_2_pod_name)
        self.addCleanup(oc_delete, self.node, 'pod', app_2_pod_name)

        # Bring Heketi POD back
        self.cmd_run(heketi_up_cmd)

        # Wait for Heketi POD be up and running
        new_heketi_pod_name = get_pod_name_from_dc(self.node,
                                                   self.heketi_dc_name,
                                                   timeout=10,
                                                   wait_step=2)
        wait_for_pod_be_ready(self.node,
                              new_heketi_pod_name,
                              wait_step=5,
                              timeout=120)

        # Wait for second PVC and app POD be ready
        verify_pvc_status_is_bound(self.node, app_2_pvc_name)
        wait_for_pod_be_ready(self.node,
                              app_2_pod_name,
                              timeout=60,
                              wait_step=2)

        # Verify that we are able to write data
        ret, out, err = oc_rsh(self.node, app_2_pod_name, write_data_cmd)
        self.assertEqual(
            ret, 0,
            "Failed to execute command %s on %s" % (write_data_cmd, self.node))