def test_pv_resize_try_shrink_pv_size(self):
        """Validate whether reducing the PV size is allowed"""
        dir_path = "/mnt/"
        node = self.ocp_master_node[0]

        # Create PVC
        pv_size = 5
        self.create_storage_class(allow_volume_expansion=True)
        pvc_name = self.create_and_wait_for_pvc(pvc_size=pv_size)

        # Create DC with POD and attached PVC to it.
        dc_name = oc_create_app_dc_with_io(node, pvc_name)
        self.addCleanup(oc_delete, node, 'dc', dc_name)
        self.addCleanup(scale_dc_pod_amount_and_wait,
                        node, dc_name, 0)

        pod_name = get_pod_name_from_dc(node, dc_name)
        wait_for_pod_be_ready(node, pod_name)

        cmd = ("dd if=/dev/urandom of=%sfile "
               "bs=100K count=3000") % dir_path
        ret, out, err = oc_rsh(node, pod_name, cmd)
        self.assertEqual(ret, 0, "failed to execute command %s on %s" % (
                             cmd, node))
        pvc_resize = 2
        with self.assertRaises(ExecutionError):
            resize_pvc(node, pvc_name, pvc_resize)
        verify_pvc_size(node, pvc_name, pv_size)
        pv_name = get_pv_name_from_pvc(node, pvc_name)
        verify_pv_size(node, pv_name, pv_size)
        cmd = ("dd if=/dev/urandom of=%sfile_new "
               "bs=100K count=2000") % dir_path
        ret, out, err = oc_rsh(node, pod_name, cmd)
        self.assertEqual(ret, 0, "failed to execute command %s on %s" % (
                             cmd, node))
示例#2
0
    def test_expand_arbiter_volume_according_to_avg_file_size(
            self, avg_file_size, expected_brick_size, vol_expand=True):
        """Validate expansion of arbiter volume with diff avg file size"""
        data_hosts = []
        arbiter_hosts = []

        # set tags arbiter:disabled, arbiter:required
        for i, node_id in enumerate(self.node_id_list):
            self._set_arbiter_tag_with_further_revert(
                self.heketi_client_node, self.heketi_server_url, 'node',
                node_id, 'disabled' if i < 2 else 'required')

            node_info = heketi_ops.heketi_node_info(self.heketi_client_node,
                                                    self.heketi_server_url,
                                                    node_id,
                                                    json=True)
            (data_hosts.append(node_info['hostnames']['storage'][0]) if i < 2
             else arbiter_hosts.append(node_info['hostnames']['storage'][0]))
            self.assertEqual(node_info['tags']['arbiter'],
                             'disabled' if i < 2 else 'required')

        # Create sc with gluster arbiter info
        self.create_storage_class(is_arbiter_vol=True,
                                  allow_volume_expansion=True,
                                  arbiter_avg_file_size=avg_file_size)

        # Create PVC and wait for it to be in 'Bound' state
        self.create_and_wait_for_pvc()

        vol_expanded = False

        for i in range(2):
            vol_info = openshift_ops.get_gluster_vol_info_by_pvc_name(
                self.node, self.pvc_name)
            bricks = (
                self.verify_amount_and_proportion_of_arbiter_and_data_bricks(
                    vol_info,
                    arbiter_bricks=(2 if vol_expanded else 1),
                    data_bricks=(4 if vol_expanded else 2)))

            # verify arbiter bricks lies on arbiter hosts
            for brick in bricks['arbiter_list']:
                ip, brick_name = brick['name'].split(':')
                self.assertIn(ip, arbiter_hosts)
                # verify the size of arbiter brick
                cmd = "df -h %s --output=size | tail -1" % brick_name
                out = openshift_ops.cmd_run_on_gluster_pod_or_node(
                    self.node, cmd, ip)
                self.assertEqual(out, expected_brick_size)
            # verify that data bricks lies on data hosts
            for brick in bricks['data_list']:
                self.assertIn(brick['name'].split(':')[0], data_hosts)

            if vol_expanded or not vol_expand:
                break
            # Expand PVC and verify the size
            pvc_size = 2
            openshift_ops.resize_pvc(self.node, self.pvc_name, pvc_size)
            openshift_ops.verify_pvc_size(self.node, self.pvc_name, pvc_size)
            vol_expanded = True
示例#3
0
    def test_pv_resize_try_shrink_pv_size(self):
        """Validate whether reducing the PV size is allowed"""
        dir_path = "/mnt/"
        node = self.ocp_master_node[0]

        # Create PVC
        pv_size = 5
        self.create_storage_class(allow_volume_expansion=True)
        pvc_name = self.create_and_wait_for_pvc(pvc_size=pv_size)

        # Create DC with POD and attached PVC to it.
        dc_name = oc_create_app_dc_with_io(node, pvc_name)
        self.addCleanup(oc_delete, node, 'dc', dc_name)
        self.addCleanup(scale_dc_pod_amount_and_wait, node, dc_name, 0)

        pod_name = get_pod_name_from_dc(node, dc_name)
        wait_for_pod_be_ready(node, pod_name)

        cmd = ("dd if=/dev/urandom of=%sfile " "bs=100K count=3000") % dir_path
        ret, out, err = oc_rsh(node, pod_name, cmd)
        self.assertEqual(ret, 0,
                         "failed to execute command %s on %s" % (cmd, node))
        pvc_resize = 2
        with self.assertRaises(ExecutionError):
            resize_pvc(node, pvc_name, pvc_resize)
        verify_pvc_size(node, pvc_name, pv_size)
        pv_name = get_pv_name_from_pvc(node, pvc_name)
        verify_pv_size(node, pv_name, pv_size)
        cmd = ("dd if=/dev/urandom of=%sfile_new "
               "bs=100K count=2000") % dir_path
        ret, out, err = oc_rsh(node, pod_name, cmd)
        self.assertEqual(ret, 0,
                         "failed to execute command %s on %s" % (cmd, node))
    def test_pvc_resize_while_ios_are_running(self):
        """Re-size PVC  while IO's are running"""

        # Create an SC, PVC and app pod
        sc_name = self.create_storage_class(create_vol_name_prefix=True,
                                            allow_volume_expansion=True)
        pvc_name = self.create_and_wait_for_pvc(sc_name=sc_name, pvc_size=1)
        dc_name, pod_name = self.create_dc_with_pvc(pvc_name)

        # Run io on the pod for 5 minutes in background
        cmd_io = ('timeout 5m bash -c -- "while true; do oc exec  {} dd '
                  'if=/dev/urandom of=/mnt/f1 bs=100K count=2000; '
                  'done"'.format(pod_name))
        proc = g.run_async(host=self.node, command=cmd_io)

        # Resize PVC while io's are running and validate resize operation
        resize_pvc(self.node, pvc_name, 2)
        verify_pvc_size(self.node, pvc_name, 2)
        pv_name = get_pv_name_from_pvc(self.node, pvc_name)
        verify_pv_size(self.node, pv_name, 2)

        # Check if timeout command and ios are successful
        ret, _, err = proc.async_communicate()
        msg = "command terminated with exit code"
        if ret != 124 or msg in str(err):
            raise ExecutionError("Failed to run io, error {}".format(str(err)))
示例#5
0
    def test_pv_resize_with_prefix_for_name_and_size(
            self, create_vol_name_prefix=False, valid_size=True):
        """Validate PV resize with and without name prefix"""
        dir_path = "/mnt/"
        node = self.ocp_client[0]

        # Create PVC
        self.create_storage_class(
            allow_volume_expansion=True,
            create_vol_name_prefix=create_vol_name_prefix)
        pvc_name = self.create_and_wait_for_pvc()

        # Create DC with POD and attached PVC to it.
        dc_name = oc_create_app_dc_with_io(node, pvc_name)
        self.addCleanup(oc_delete, node, 'dc', dc_name)
        self.addCleanup(scale_dc_pod_amount_and_wait, node, dc_name, 0)

        pod_name = get_pod_name_from_dc(node, dc_name)
        wait_for_pod_be_ready(node, pod_name)
        if create_vol_name_prefix:
            ret = heketi_ops.verify_volume_name_prefix(
                node, self.sc['volumenameprefix'], self.sc['secretnamespace'],
                pvc_name, self.heketi_server_url)
            self.assertTrue(ret, "verify volnameprefix failed")
        cmd = ("dd if=/dev/urandom of=%sfile " "bs=100K count=1000") % dir_path
        ret, out, err = oc_rsh(node, pod_name, cmd)
        self.assertEqual(ret, 0,
                         "Failed to execute command %s on %s" % (cmd, node))
        pv_name = get_pv_name_from_pvc(node, pvc_name)

        # If resize size is invalid then size should not change
        if valid_size:
            cmd = ("dd if=/dev/urandom of=%sfile2 "
                   "bs=100K count=10000") % dir_path
            with self.assertRaises(AssertionError):
                ret, out, err = oc_rsh(node, pod_name, cmd)
                msg = ("Command '%s' was expected to fail on '%s' node. "
                       "But it returned following: ret is '%s', err is '%s' "
                       "and out is '%s'" % (cmd, node, ret, err, out))
                raise ExecutionError(msg)
            pvc_size = 2
            resize_pvc(node, pvc_name, pvc_size)
            verify_pvc_size(node, pvc_name, pvc_size)
            verify_pv_size(node, pv_name, pvc_size)
        else:
            invalid_pvc_size = 'ten'
            with self.assertRaises(AssertionError):
                resize_pvc(node, pvc_name, invalid_pvc_size)
            verify_pvc_size(node, pvc_name, 1)
            verify_pv_size(node, pv_name, 1)

        oc_delete(node, 'pod', pod_name)
        wait_for_resource_absence(node, 'pod', pod_name)
        pod_name = get_pod_name_from_dc(node, dc_name)
        wait_for_pod_be_ready(node, pod_name)
        cmd = ("dd if=/dev/urandom of=%sfile_new "
               "bs=50K count=10000") % dir_path
        ret, out, err = oc_rsh(node, pod_name, cmd)
        self.assertEqual(ret, 0,
                         "Failed to execute command %s on %s" % (cmd, node))
示例#6
0
    def test_pvc_placement_and_expansion_with_zone_check_set_in_dc(
            self, zone_count, is_arbiter):
        heketi_zone_checking, expand_size = "strict", 2

        # Create storage class setting expansion and arbiter option up
        sc_name = self.create_storage_class(sc_name_prefix=self.prefix,
                                            vol_name_prefix=self.prefix,
                                            allow_volume_expansion=True,
                                            is_arbiter_vol=True)

        # Create PVC using above storage class
        pvc_name = self.create_and_wait_for_pvc(pvc_name_prefix=self.prefix,
                                                sc_name=sc_name)

        # Check amount of available online heketi zones
        self._check_for_available_zones(zone_count)

        # Set "user.heketi.zone-checking" to strict inside heketi dc
        self._set_zone_check_env_in_heketi_dc(heketi_zone_checking)

        # Expand PVC
        openshift_storage_libs.enable_pvc_resize(self.node)
        openshift_ops.resize_pvc(self.node, pvc_name, expand_size)
        openshift_ops.verify_pvc_size(self.node, pvc_name, expand_size)

        # Make sure that gluster vol has appropriate option set
        vol_info = openshift_ops.get_gluster_vol_info_by_pvc_name(
            self.node, pvc_name)
        if is_arbiter:
            self.assertIn('user.heketi.arbiter', vol_info['options'])
            self.assertEqual(vol_info['options']['user.heketi.arbiter'],
                             'true')

        # Create app DC with the above PVC
        self.create_dc_with_pvc(pvc_name, timeout=120, wait_step=3)
    def _pv_resize(self, exceed_free_space):
        dir_path = "/mnt"
        pvc_size_gb = 1

        available_size_gb = self._available_disk_free_space()

        # Create PVC
        self.create_storage_class(allow_volume_expansion=True)
        pvc_name = self.create_and_wait_for_pvc(pvc_size=pvc_size_gb)

        # Create DC with POD and attached PVC to it
        dc_name = oc_create_app_dc_with_io(
            self.node, pvc_name, image=self.io_container_image_cirros)
        self.addCleanup(oc_delete, self.node, 'dc', dc_name)
        self.addCleanup(scale_dc_pod_amount_and_wait, self.node, dc_name, 0)
        pod_name = get_pod_name_from_dc(self.node, dc_name)
        wait_for_pod_be_ready(self.node, pod_name)

        if exceed_free_space:
            exceed_size = available_size_gb + 10

            # Try to expand existing PVC exceeding free space
            resize_pvc(self.node, pvc_name, exceed_size)
            wait_for_events(self.node,
                            obj_name=pvc_name,
                            event_reason='VolumeResizeFailed')

            # Check that app POD is up and runnig then try to write data
            wait_for_pod_be_ready(self.node, pod_name)
            cmd = ("dd if=/dev/urandom of=%s/autotest bs=100K count=1" %
                   dir_path)
            ret, out, err = oc_rsh(self.node, pod_name, cmd)
            self.assertEqual(
                ret, 0,
                "Failed to write data after failed attempt to expand PVC.")
        else:
            # Expand existing PVC using all the available free space
            expand_size_gb = available_size_gb - pvc_size_gb
            resize_pvc(self.node, pvc_name, expand_size_gb)
            verify_pvc_size(self.node, pvc_name, expand_size_gb)
            pv_name = get_pv_name_from_pvc(self.node, pvc_name)
            verify_pv_size(self.node, pv_name, expand_size_gb)
            wait_for_events(self.node,
                            obj_name=pvc_name,
                            event_reason='VolumeResizeSuccessful')

            # Recreate app POD
            oc_delete(self.node, 'pod', pod_name)
            wait_for_resource_absence(self.node, 'pod', pod_name)
            pod_name = get_pod_name_from_dc(self.node, dc_name)
            wait_for_pod_be_ready(self.node, pod_name)

            # Write data on the expanded PVC
            cmd = ("dd if=/dev/urandom of=%s/autotest "
                   "bs=1M count=1025" % dir_path)
            ret, out, err = oc_rsh(self.node, pod_name, cmd)
            self.assertEqual(ret, 0,
                             "Failed to write data on the expanded PVC")
    def test_pv_resize_with_prefix_for_name(self,
                                            create_vol_name_prefix=False):
        """Validate PV resize with and without name prefix"""
        dir_path = "/mnt/"
        node = self.ocp_client[0]

        # Create PVC
        self.create_storage_class(
            allow_volume_expansion=True,
            create_vol_name_prefix=create_vol_name_prefix)
        pvc_name = self.create_and_wait_for_pvc()

        # Create DC with POD and attached PVC to it.
        dc_name = oc_create_app_dc_with_io(node, pvc_name)
        self.addCleanup(oc_delete, node, 'dc', dc_name)
        self.addCleanup(scale_dc_pod_amount_and_wait,
                        node, dc_name, 0)

        pod_name = get_pod_name_from_dc(node, dc_name)
        wait_for_pod_be_ready(node, pod_name)
        if create_vol_name_prefix:
            ret = heketi_ops.verify_volume_name_prefix(
                node, self.sc['volumenameprefix'],
                self.sc['secretnamespace'],
                pvc_name, self.heketi_server_url)
            self.assertTrue(ret, "verify volnameprefix failed")
        cmd = ("dd if=/dev/urandom of=%sfile "
               "bs=100K count=1000") % dir_path
        ret, out, err = oc_rsh(node, pod_name, cmd)
        self.assertEqual(ret, 0, "failed to execute command %s on %s" % (
                             cmd, node))
        cmd = ("dd if=/dev/urandom of=%sfile2 "
               "bs=100K count=10000") % dir_path
        with self.assertRaises(AssertionError):
            ret, out, err = oc_rsh(node, pod_name, cmd)
            msg = ("Command '%s' was expected to fail on '%s' node. "
                   "But it returned following: ret is '%s', err is '%s' "
                   "and out is '%s'" % (cmd, node, ret, err, out))
            raise ExecutionError(msg)

        pvc_size = 2
        resize_pvc(node, pvc_name, pvc_size)
        verify_pvc_size(node, pvc_name, pvc_size)
        pv_name = get_pv_name_from_pvc(node, pvc_name)
        verify_pv_size(node, pv_name, pvc_size)
        oc_delete(node, 'pod', pod_name)
        wait_for_resource_absence(node, 'pod', pod_name)
        pod_name = get_pod_name_from_dc(node, dc_name)
        wait_for_pod_be_ready(node, pod_name)
        cmd = ("dd if=/dev/urandom of=%sfile_new "
               "bs=50K count=10000") % dir_path
        ret, out, err = oc_rsh(node, pod_name, cmd)
        self.assertEqual(ret, 0, "failed to execute command %s on %s" % (
                             cmd, node))
示例#9
0
    def test_pv_resize_with_prefix_for_name(self,
                                            create_vol_name_prefix=False):
        """Validate PV resize with and without name prefix"""
        dir_path = "/mnt/"
        node = self.ocp_client[0]

        # Create PVC
        self.create_storage_class(
            allow_volume_expansion=True,
            create_vol_name_prefix=create_vol_name_prefix)
        pvc_name = self.create_and_wait_for_pvc()

        # Create DC with POD and attached PVC to it.
        dc_name = oc_create_app_dc_with_io(node, pvc_name)
        self.addCleanup(oc_delete, node, 'dc', dc_name)
        self.addCleanup(scale_dc_pod_amount_and_wait, node, dc_name, 0)

        pod_name = get_pod_name_from_dc(node, dc_name)
        wait_for_pod_be_ready(node, pod_name)
        if create_vol_name_prefix:
            ret = heketi_ops.verify_volume_name_prefix(
                node, self.sc['volumenameprefix'], self.sc['secretnamespace'],
                pvc_name, self.heketi_server_url)
            self.assertTrue(ret, "verify volnameprefix failed")
        cmd = ("dd if=/dev/urandom of=%sfile " "bs=100K count=1000") % dir_path
        ret, out, err = oc_rsh(node, pod_name, cmd)
        self.assertEqual(ret, 0,
                         "failed to execute command %s on %s" % (cmd, node))
        cmd = ("dd if=/dev/urandom of=%sfile2 "
               "bs=100K count=10000") % dir_path
        ret, out, err = oc_rsh(node, pod_name, cmd)
        self.assertNotEqual(
            ret, 0, " This IO did not fail as expected "
            "command %s on %s" % (cmd, node))
        pvc_size = 2
        resize_pvc(node, pvc_name, pvc_size)
        verify_pvc_size(node, pvc_name, pvc_size)
        pv_name = get_pv_name_from_pvc(node, pvc_name)
        verify_pv_size(node, pv_name, pvc_size)
        oc_delete(node, 'pod', pod_name)
        wait_for_resource_absence(node, 'pod', pod_name)
        pod_name = get_pod_name_from_dc(node, dc_name)
        wait_for_pod_be_ready(node, pod_name)
        cmd = ("dd if=/dev/urandom of=%sfile_new "
               "bs=50K count=10000") % dir_path
        ret, out, err = oc_rsh(node, pod_name, cmd)
        self.assertEqual(ret, 0,
                         "failed to execute command %s on %s" % (cmd, node))
示例#10
0
    def test_arbiter_volume_expand_using_pvc(self):
        """Validate arbiter volume expansion by PVC creation"""
        # Create sc with gluster arbiter info
        self.create_storage_class(is_arbiter_vol=True,
                                  allow_volume_expansion=True)

        # Create PVC and wait for it to be in 'Bound' state
        self.create_and_wait_for_pvc()

        # Get vol info
        vol_info = get_gluster_vol_info_by_pvc_name(self.node, self.pvc_name)

        self.verify_amount_and_proportion_of_arbiter_and_data_bricks(vol_info)

        pvc_size = 2
        resize_pvc(self.node, self.pvc_name, pvc_size)
        verify_pvc_size(self.node, self.pvc_name, pvc_size)

        vol_info = get_gluster_vol_info_by_pvc_name(self.node, self.pvc_name)

        self.verify_amount_and_proportion_of_arbiter_and_data_bricks(
            vol_info, arbiter_bricks=2, data_bricks=4)
    def test_pvc_resize_size_greater_than_available_space(self):
        """Re-size PVC to greater value than available volume size and then
        expand volume to support maximum size.
        """
        sc_name = self.create_storage_class(create_vol_name_prefix=True,
                                            allow_volume_expansion=True)
        pvc_name = self.create_and_wait_for_pvc(sc_name=sc_name, pvc_size=1)
        dc_name, pod_name = self.create_dc_with_pvc(pvc_name)

        # Verify the size of volume mounted
        cmd_on_pod = "df -Ph /mnt | tail -1 | awk '{print $2}'"
        _, stdout, _ = oc_rsh(self.node, pod_name, cmd_on_pod)
        self.assertGreaterEqual(int(stdout.strip('.0M\n')), 1000,
                                "Size of %s not equal to 1G" % pvc_name)
        self.assertLessEqual(int(stdout.strip('.0M\n')), 1024,
                             "Size of %s not equal to 1G" % pvc_name)
        available_size_gb = self._available_disk_free_space()
        with self.assertRaises(AssertionError):
            resize_pvc(self.ocp_master_node[0], pvc_name,
                       available_size_gb + 1)
        resize_pvc(self.ocp_master_node[0], pvc_name, available_size_gb)
        verify_pvc_size(self.ocp_master_node[0], pvc_name, available_size_gb)
    def _validate_brick_placement_in_correct_zone_or_with_expand_pvc(
            self, heketi_zone_checking, pvc_name, zone_count, expand=False):
        online_nodes = self._get_online_nodes()

        for i in range(2):
            # Validate brick placement if heketi zone checking is 'strict'
            if heketi_zone_checking == 'strict':
                brick_hosts_ips = (
                    openshift_ops.get_gluster_host_ips_by_pvc_name(
                        self.node, pvc_name))
                placement_zones = {}
                for brick_host_ip in brick_hosts_ips:
                    for node_zone, node_ips in online_nodes:
                        if brick_host_ip not in node_ips:
                            continue
                        placement_zones[node_zone] = placement_zones.get(
                            node_zone, 0) + 1
                        break
                actual_zone_count = len(placement_zones)
                # NOTE(vponomar): '3' is default amount of volume replicas.
                # And it is just impossible to find more actual zones than
                # amount of replicas/bricks.
                brick_number = len(brick_hosts_ips)
                expected_zone_count = (
                    brick_number if brick_number < zone_count else zone_count)
                self.assertEqual(
                    expected_zone_count, actual_zone_count,
                    "PVC '%s' is incorrectly placed on the Heketi nodes "
                    "according to their zones. Expected '%s' unique zones, "
                    "got '%s'." % (pvc_name, zone_count, actual_zone_count))

            # Expand PVC if needed
            if expand:
                expand_size, expand = 2, False
                openshift_storage_libs.enable_pvc_resize(self.node)
                openshift_ops.resize_pvc(self.node, pvc_name, expand_size)
                openshift_ops.verify_pvc_size(self.node, pvc_name, expand_size)
            else:
                break
    def test_arbiter_volume_expand_using_pvc(self):
        """Validate arbiter volume expansion by PVC creation"""
        # Create sc with gluster arbiter info
        self.create_storage_class(
            is_arbiter_vol=True, allow_volume_expansion=True)

        # Create PVC and wait for it to be in 'Bound' state
        self.create_and_wait_for_pvc()

        # Get vol info
        vol_info = get_gluster_vol_info_by_pvc_name(self.node, self.pvc_name)

        self.verify_amount_and_proportion_of_arbiter_and_data_bricks(vol_info)

        pvc_size = 2
        resize_pvc(self.node, self.pvc_name, pvc_size)
        verify_pvc_size(self.node, self.pvc_name, pvc_size)

        vol_info = get_gluster_vol_info_by_pvc_name(self.node, self.pvc_name)

        self.verify_amount_and_proportion_of_arbiter_and_data_bricks(
            vol_info, arbiter_bricks=2, data_bricks=4)
    def test_expand_arbiter_volume_setting_tags_on_nodes_or_devices(
            self, node_tags):
        """Validate exapnsion of arbiter volume with defferent tags

           This test case is going to run two tests:
                1. If value is True it is going to set tags
                   on nodes and run test
                2. If value is False it is going to set tags
                   on devices and run test
        """

        data_nodes = []
        arbiter_nodes = []

        # set tags arbiter:disabled, arbiter:required
        for i, node_id in enumerate(self.node_id_list):
            if node_tags:
                self._set_arbiter_tag_with_further_revert(
                    self.heketi_client_node, self.heketi_server_url, 'node',
                    node_id, 'disabled' if i < 2 else 'required')

            node_info = heketi_ops.heketi_node_info(
                self.heketi_client_node, self.heketi_server_url,
                node_id, json=True)

            if not node_tags:
                for device in node_info['devices']:
                    self._set_arbiter_tag_with_further_revert(
                        self.heketi_client_node, self.heketi_server_url,
                        'device', device['id'],
                        'disabled' if i < 2 else 'required')
                    device_info = heketi_ops.heketi_device_info(
                        self.heketi_client_node, self.heketi_server_url,
                        device['id'], json=True)
                    self.assertEqual(
                        device_info['tags']['arbiter'],
                        'disabled' if i < 2 else 'required')

            node = {
                'id': node_id, 'host': node_info['hostnames']['storage'][0]}
            if node_tags:
                self.assertEqual(
                    node_info['tags']['arbiter'],
                    'disabled' if i < 2 else 'required')
            data_nodes.append(node) if i < 2 else arbiter_nodes.append(
                node)

        # Create sc with gluster arbiter info
        self.create_storage_class(
            is_arbiter_vol=True, allow_volume_expansion=True)

        # Create PVC and wait for it to be in 'Bound' state
        self.create_and_wait_for_pvc()

        vol_info = get_gluster_vol_info_by_pvc_name(self.node, self.pvc_name)

        bricks = self.verify_amount_and_proportion_of_arbiter_and_data_bricks(
            vol_info)

        arbiter_hosts = [obj['host'] for obj in arbiter_nodes]
        data_hosts = [obj['host'] for obj in data_nodes]

        for brick in bricks['arbiter_list']:
            self.assertIn(brick['name'].split(':')[0], arbiter_hosts)

        for brick in bricks['data_list']:
            self.assertIn(brick['name'].split(':')[0], data_hosts)

        # Expand PVC and verify the size
        pvc_size = 2
        resize_pvc(self.node, self.pvc_name, pvc_size)
        verify_pvc_size(self.node, self.pvc_name, pvc_size)

        vol_info = get_gluster_vol_info_by_pvc_name(self.node, self.pvc_name)

        bricks = self.verify_amount_and_proportion_of_arbiter_and_data_bricks(
            vol_info, arbiter_bricks=2, data_bricks=4)

        for brick in bricks['arbiter_list']:
            self.assertIn(brick['name'].split(':')[0], arbiter_hosts)

        for brick in bricks['data_list']:
            self.assertIn(brick['name'].split(':')[0], data_hosts)
示例#15
0
    def test_expand_arbiter_volume_setting_tags_on_nodes_or_devices(
            self, node_tags):
        """Validate exapnsion of arbiter volume with defferent tags

           This test case is going to run two tests:
                1. If value is True it is going to set tags
                   on nodes and run test
                2. If value is False it is going to set tags
                   on devices and run test
        """

        data_nodes = []
        arbiter_nodes = []

        # set tags arbiter:disabled, arbiter:required
        for i, node_id in enumerate(self.node_id_list):
            if node_tags:
                self._set_arbiter_tag_with_further_revert(
                    self.heketi_client_node, self.heketi_server_url, 'node',
                    node_id, 'disabled' if i < 2 else 'required')

            node_info = heketi_ops.heketi_node_info(self.heketi_client_node,
                                                    self.heketi_server_url,
                                                    node_id,
                                                    json=True)

            if not node_tags:
                for device in node_info['devices']:
                    self._set_arbiter_tag_with_further_revert(
                        self.heketi_client_node, self.heketi_server_url,
                        'device', device['id'],
                        'disabled' if i < 2 else 'required')
                    device_info = heketi_ops.heketi_device_info(
                        self.heketi_client_node,
                        self.heketi_server_url,
                        device['id'],
                        json=True)
                    self.assertEqual(device_info['tags']['arbiter'],
                                     'disabled' if i < 2 else 'required')

            node = {
                'id': node_id,
                'host': node_info['hostnames']['storage'][0]
            }
            if node_tags:
                self.assertEqual(node_info['tags']['arbiter'],
                                 'disabled' if i < 2 else 'required')
            data_nodes.append(node) if i < 2 else arbiter_nodes.append(node)

        # Create sc with gluster arbiter info
        self.create_storage_class(is_arbiter_vol=True,
                                  allow_volume_expansion=True)

        # Create PVC and wait for it to be in 'Bound' state
        self.create_and_wait_for_pvc()

        vol_info = openshift_ops.get_gluster_vol_info_by_pvc_name(
            self.node, self.pvc_name)

        bricks = self.verify_amount_and_proportion_of_arbiter_and_data_bricks(
            vol_info)

        arbiter_hosts = [obj['host'] for obj in arbiter_nodes]
        data_hosts = [obj['host'] for obj in data_nodes]

        for brick in bricks['arbiter_list']:
            self.assertIn(brick['name'].split(':')[0], arbiter_hosts)

        for brick in bricks['data_list']:
            self.assertIn(brick['name'].split(':')[0], data_hosts)

        # Expand PVC and verify the size
        pvc_size = 2
        openshift_ops.resize_pvc(self.node, self.pvc_name, pvc_size)
        openshift_ops.verify_pvc_size(self.node, self.pvc_name, pvc_size)

        vol_info = openshift_ops.get_gluster_vol_info_by_pvc_name(
            self.node, self.pvc_name)

        bricks = self.verify_amount_and_proportion_of_arbiter_and_data_bricks(
            vol_info, arbiter_bricks=2, data_bricks=4)

        for brick in bricks['arbiter_list']:
            self.assertIn(brick['name'].split(':')[0], arbiter_hosts)

        for brick in bricks['data_list']:
            self.assertIn(brick['name'].split(':')[0], data_hosts)
    def test_pv_resize_device_disabled(self):
        """Validate resize after disabling all devices except one"""
        h_node, h_url = self.heketi_client_node, self.heketi_server_url

        # expand volume size and path volume is mounted
        expand_size, dir_path = 7, "/mnt"

        # Get nodes info
        heketi_node_id_list = heketi_ops.heketi_node_list(h_node, h_url)
        if len(heketi_node_id_list) < 3:
            self.skipTest(
                "At-least 3 gluster nodes are required to execute test case")

        self.create_storage_class(allow_volume_expansion=True)
        pvc_name = self.create_and_wait_for_pvc(pvc_size=2)
        vol_info = get_gluster_vol_info_by_pvc_name(self.node, pvc_name)
        dc_name, pod_name = self.create_dc_with_pvc(pvc_name)

        self._write_file(pod_name, "file1", "1G", dir_path)

        with self.assertRaises(AssertionError):
            self._write_file(pod_name, "file2", "3G", dir_path)

        # Prepare first 3 nodes and then disable other devices.
        for node_id in heketi_node_id_list[:3]:
            node_info = heketi_ops.heketi_node_info(h_node,
                                                    h_url,
                                                    node_id,
                                                    json=True)
            self.assertTrue(node_info, "Failed to get node info")
            devices = node_info.get("devices", None)
            self.assertTrue(devices,
                            "Node {} does not have devices".format(node_id))
            if devices[0]["state"].strip().lower() != "online":
                self.skipTest("Skipping test as it expects to first device to"
                              " be enabled")
            for device in devices[1:]:
                heketi_ops.heketi_device_disable(h_node, h_url, device["id"])
                self.addCleanup(heketi_ops.heketi_device_enable, h_node, h_url,
                                device["id"])

        usedsize_before_resize = self._get_mount_size(pod_name, dir_path)

        # Resize pvc
        resize_pvc(self.node, pvc_name, expand_size)
        verify_pvc_size(self.node, pvc_name, expand_size)
        vol_info = get_gluster_vol_info_by_pvc_name(self.node, pvc_name)
        self.assertFalse(len(vol_info['bricks']['brick']) % 3)

        for node_id in heketi_node_id_list[:3]:
            for device in devices[1:]:
                heketi_ops.heketi_device_enable(h_node, h_url, device["id"])

        self._write_file(pod_name, "file3", "3G", dir_path)

        usedsize_after_resize = self._get_mount_size(pod_name, dir_path)
        self.assertGreater(
            int(usedsize_before_resize.strip('%')),
            int(usedsize_after_resize.strip('%')),
            "Mount size {} should be greater than {}".format(
                usedsize_before_resize, usedsize_after_resize))

        self._write_file(pod_name, "file4", "1024", dir_path)

        # Validate dist-rep volume with 6 bricks after pv resize
        vol_info = get_gluster_vol_info_by_pvc_name(self.node, pvc_name)
        self.assertEqual(
            6, len(vol_info['bricks']['brick']),
            "Expected bricks count is 6, but actual brick count is {}".format(
                len(vol_info['bricks']['brick'])))
    def test_check_pvc_placement_based_on_the_heketi_zones(
            self,
            zone_count,
            heketi_zone_checking,
            is_arbiter_vol,
            expand=False):
        # TODO(vponomar): implement setting env vars for the Heketi dc.

        # Check amount of available online heketi nodes
        online_nodes = self._get_online_nodes()
        node_count = len(online_nodes)

        # Check current amount of the Heketi zones
        actual_heketi_zones_amount = len(set([n[0] for n in online_nodes]))
        if zone_count != actual_heketi_zones_amount:
            if self.allow_heketi_zones_update:
                if zone_count > node_count:
                    self.skipTest("Not enough online nodes '%s' to test '%s' "
                                  "unique Heketi zones." %
                                  (node_count, zone_count))
                heketi_db_data = self._set_heketi_zones(zone_count)
                online_nodes = [
                    (n['Info']['zone'], n['Info']['hostnames']['storage'])
                    for n in heketi_db_data['nodeentries'].values()
                ]
            else:
                self.skipTest(
                    "Required amount of the Heketi zones (%s < %s) is not "
                    "satisfied and 'common.allow_heketi_zones_update' config "
                    "option is set to 'False'." %
                    (zone_count, actual_heketi_zones_amount))

        # Create storage class setting "user.heketi.zone-checking" option up
        prefix = "autotests-heketi-zones"
        sc_name = self.create_storage_class(
            sc_name_prefix=prefix,
            vol_name_prefix=prefix,
            allow_volume_expansion=expand,
            is_arbiter_vol=is_arbiter_vol,
            heketi_zone_checking=heketi_zone_checking)

        # Create PVC using above storage class
        pvc_name = self.create_and_wait_for_pvc(pvc_name_prefix=prefix,
                                                sc_name=sc_name)

        for i in range(2):
            # Validate brick placement if heketi zone checking is 'strict'
            if heketi_zone_checking == 'strict':
                brick_hosts_ips = (
                    openshift_ops.get_gluster_host_ips_by_pvc_name(
                        self.node, pvc_name))
                placement_zones = {}
                for brick_host_ip in brick_hosts_ips:
                    for node_zone, node_ips in online_nodes:
                        if brick_host_ip not in node_ips:
                            continue
                        placement_zones[node_zone] = placement_zones.get(
                            node_zone, 0) + 1
                        break
                actual_zone_count = len(placement_zones)
                # NOTE(vponomar): '3' is default amount of volume replicas.
                # And it is just impossible to find more actual zones than
                # amount of replicas/bricks.
                brick_number = len(brick_hosts_ips)
                expected_zone_count = (
                    brick_number if brick_number < zone_count else zone_count)
                self.assertEqual(
                    expected_zone_count, actual_zone_count,
                    "PVC '%s' is incorrectly placed on the Heketi nodes "
                    "according to their zones. Expected '%s' unique zones, "
                    "got '%s'." % (pvc_name, zone_count, actual_zone_count))

            # Expand PVC if needed
            if expand:
                expand_size, expand = 2, False
                openshift_storage_libs.enable_pvc_resize(self.node)
                openshift_ops.resize_pvc(self.node, pvc_name, expand_size)
                openshift_ops.verify_pvc_size(self.node, pvc_name, expand_size)
            else:
                break

        # Make sure that gluster vol has appropriate option set
        vol_info = openshift_ops.get_gluster_vol_info_by_pvc_name(
            self.node, pvc_name)
        self.assertIn('user.heketi.zone-checking', vol_info['options'])
        self.assertEqual(vol_info['options']['user.heketi.zone-checking'],
                         heketi_zone_checking)
        if is_arbiter_vol:
            self.assertIn('user.heketi.arbiter', vol_info['options'])
            self.assertEqual(vol_info['options']['user.heketi.arbiter'],
                             'true')

        # Create app DC with the above PVC
        self.create_dc_with_pvc(pvc_name, timeout=120, wait_step=3)
    def test_expand_arbiter_volume_according_to_avg_file_size(
            self, avg_file_size, expected_brick_size, vol_expand=True):
        """Validate expansion of arbiter volume with diff avg file size"""
        data_hosts = []
        arbiter_hosts = []

        # set tags arbiter:disabled, arbiter:required
        for i, node_id in enumerate(self.node_id_list):
            self._set_arbiter_tag_with_further_revert(
                self.heketi_client_node, self.heketi_server_url, 'node',
                node_id, 'disabled' if i < 2 else 'required')

            node_info = heketi_ops.heketi_node_info(
                self.heketi_client_node, self.heketi_server_url,
                node_id, json=True)
            (data_hosts.append(node_info['hostnames']['storage'][0])
                if i < 2 else
                arbiter_hosts.append(node_info['hostnames']['storage'][0]))
            self.assertEqual(
                node_info['tags']['arbiter'],
                'disabled' if i < 2 else 'required')

        # Create sc with gluster arbiter info
        self.create_storage_class(
            is_arbiter_vol=True, allow_volume_expansion=True,
            arbiter_avg_file_size=avg_file_size)

        # Create PVC and wait for it to be in 'Bound' state
        self.create_and_wait_for_pvc()

        vol_expanded = False

        for i in range(2):
            vol_info = get_gluster_vol_info_by_pvc_name(
                self.node, self.pvc_name)
            bricks = (
                self.verify_amount_and_proportion_of_arbiter_and_data_bricks(
                    vol_info,
                    arbiter_bricks=(2 if vol_expanded else 1),
                    data_bricks=(4 if vol_expanded else 2)
                )
            )

            # verify arbiter bricks lies on arbiter hosts
            for brick in bricks['arbiter_list']:
                ip, brick_name = brick['name'].split(':')
                self.assertIn(ip, arbiter_hosts)
                # verify the size of arbiter brick
                cmd = "df -h %s --output=size | tail -1" % brick_name
                out = cmd_run_on_gluster_pod_or_node(self.node, cmd, ip)
                self.assertEqual(out, expected_brick_size)
            # verify that data bricks lies on data hosts
            for brick in bricks['data_list']:
                self.assertIn(brick['name'].split(':')[0], data_hosts)

            if vol_expanded or not vol_expand:
                break
            # Expand PVC and verify the size
            pvc_size = 2
            resize_pvc(self.node, self.pvc_name, pvc_size)
            verify_pvc_size(self.node, self.pvc_name, pvc_size)
            vol_expanded = True
示例#19
0
    def test_prometheus_pv_resize(self):
        """ Validate prometheus metrics with pv resize"""

        # Fetch the metrics and storing initial_metrics as dictionary
        pvc_name, pod_name, initial_metrics = self._fetch_initial_metrics(
            vol_name_prefix="for-pv-resize", volume_expansion=True)

        # Write data on the pvc and confirm it is reflected in the prometheus
        self._perform_io_and_fetch_metrics(
            pod_name=pod_name, pvc_name=pvc_name,
            filename="filename1", dirname="dirname1",
            metric_data=initial_metrics, operation="create")

        # Resize the pvc to 2GiB
        openshift_ops.switch_oc_project(
            self._master, self.storage_project_name)
        pvc_size = 2
        openshift_ops.resize_pvc(self._master, pvc_name, pvc_size)
        openshift_ops.wait_for_events(self._master, obj_name=pvc_name,
                                      event_reason='VolumeResizeSuccessful')
        openshift_ops.verify_pvc_size(self._master, pvc_name, pvc_size)
        pv_name = openshift_ops.get_pv_name_from_pvc(
            self._master, pvc_name)
        openshift_ops.verify_pv_size(self._master, pv_name, pvc_size)

        heketi_volume_name = heketi_ops.heketi_volume_list_by_name_prefix(
            self.heketi_client_node, self.heketi_server_url,
            "for-pv-resize", json=True)[0][2]
        self.assertIsNotNone(
            heketi_volume_name, "Failed to fetch volume with prefix {}".
            format("for-pv-resize"))

        openshift_ops.oc_delete(self._master, 'pod', pod_name)
        openshift_ops.wait_for_resource_absence(self._master, 'pod', pod_name)
        pod_name = openshift_ops.get_pod_name_from_dc(
            self._master, self.dc_name)
        openshift_ops.wait_for_pod_be_ready(self._master, pod_name)

        # Check whether the metrics are updated or not
        for w in waiter.Waiter(120, 10):
            resize_metrics = self._get_and_manipulate_metric_data(
                self.metrics, pvc_name)
            if bool(resize_metrics) and int(resize_metrics[
                'kubelet_volume_stats_capacity_bytes']) > int(
                    initial_metrics['kubelet_volume_stats_capacity_bytes']):
                break
        if w.expired:
            raise AssertionError("Failed to reflect PVC Size after resizing")
        openshift_ops.switch_oc_project(
            self._master, self.storage_project_name)
        time.sleep(240)

        # Lookup and trigger rebalance and wait for the its completion
        for _ in range(100):
            self.cmd_run("oc rsh {} ls /mnt/".format(pod_name))
        self._rebalance_completion(heketi_volume_name)

        # Write data on the resized pvc and compared with the resized_metrics
        self._perform_io_and_fetch_metrics(
            pod_name=pod_name, pvc_name=pvc_name,
            filename="secondfilename", dirname="seconddirname",
            metric_data=resize_metrics, operation="create")
示例#20
0
    def test_pv_resize_when_heketi_down(self):
        """Create a PVC and try to expand it when heketi is down, It should
        fail. After heketi is up, expand PVC should work.
        """
        self.create_storage_class(allow_volume_expansion=True)
        pvc_name = self.create_and_wait_for_pvc()
        dc_name, pod_name = self.create_dc_with_pvc(pvc_name)

        pv_name = get_pv_name_from_pvc(self.node, pvc_name)
        custom = (r':metadata.annotations.'
                  r'"gluster\.kubernetes\.io\/heketi-volume-id"')
        vol_id = oc_get_custom_resource(self.node, 'pv', custom, pv_name)[0]

        h_vol_info = heketi_ops.heketi_volume_info(self.heketi_client_node,
                                                   self.heketi_server_url,
                                                   vol_id,
                                                   json=True)

        # Bring the heketi POD down
        scale_dc_pod_amount_and_wait(self.node,
                                     self.heketi_dc_name,
                                     pod_amount=0)
        self.addCleanup(scale_dc_pod_amount_and_wait,
                        self.node,
                        self.heketi_dc_name,
                        pod_amount=1)

        cmd = 'dd if=/dev/urandom of=/mnt/%s bs=614400k count=1'
        ret, out, err = oc_rsh(self.node, pod_name, cmd % 'file1')
        self.assertFalse(ret, 'Not able to write file with err: %s' % err)
        wait_for_pod_be_ready(self.node, pod_name, 10, 5)

        resize_pvc(self.node, pvc_name, 2)
        wait_for_events(self.node,
                        pvc_name,
                        obj_type='PersistentVolumeClaim',
                        event_type='Warning',
                        event_reason='VolumeResizeFailed')

        # Verify volume was not expanded
        vol_info = get_gluster_vol_info_by_pvc_name(self.node, pvc_name)
        self.assertEqual(vol_info['gluster_vol_id'], h_vol_info['name'])
        self.assertEqual(len(vol_info['bricks']['brick']),
                         len(h_vol_info['bricks']))

        # Bring the heketi POD up
        scale_dc_pod_amount_and_wait(self.node,
                                     self.heketi_dc_name,
                                     pod_amount=1)

        # Verify volume expansion
        verify_pvc_size(self.node, pvc_name, 2)
        vol_info = get_gluster_vol_info_by_pvc_name(self.node, pvc_name)
        self.assertFalse(len(vol_info['bricks']['brick']) % 3)
        self.assertLess(len(h_vol_info['bricks']),
                        len(vol_info['bricks']['brick']))

        # Wait for remount after expansion
        for w in waiter.Waiter(timeout=30, interval=5):
            ret, out, err = oc_rsh(self.node, pod_name,
                                   "df -Ph /mnt | awk '{print $2}' | tail -1")
            self.assertFalse(ret,
                             'Failed with err: %s and Output: %s' % (err, out))
            if out.strip() == '2.0G':
                break

        # Write data making sure we have more space than it was
        ret, out, err = oc_rsh(self.node, pod_name, cmd % 'file2')
        self.assertFalse(ret, 'Not able to write file with err: %s' % err)

        # Verify pod is running
        wait_for_pod_be_ready(self.node, pod_name, 10, 5)
    def test_pv_resize_when_heketi_down(self):
        """Create a PVC and try to expand it when heketi is down, It should
        fail. After heketi is up, expand PVC should work.
        """
        self.create_storage_class(allow_volume_expansion=True)
        pvc_name = self.create_and_wait_for_pvc()
        dc_name, pod_name = self.create_dc_with_pvc(pvc_name)

        pv_name = get_pv_name_from_pvc(self.node, pvc_name)
        custom = (r':metadata.annotations.'
                  r'"gluster\.kubernetes\.io\/heketi-volume-id"')
        vol_id = oc_get_custom_resource(self.node, 'pv', custom, pv_name)[0]

        h_vol_info = heketi_ops.heketi_volume_info(
            self.heketi_client_node, self.heketi_server_url, vol_id, json=True)

        # Bring the heketi POD down
        scale_dc_pod_amount_and_wait(
            self.node, self.heketi_dc_name, pod_amount=0)
        self.addCleanup(
            scale_dc_pod_amount_and_wait, self.node,
            self.heketi_dc_name, pod_amount=1)

        cmd = 'dd if=/dev/urandom of=/mnt/%s bs=614400k count=1'
        ret, out, err = oc_rsh(self.node, pod_name, cmd % 'file1')
        self.assertFalse(ret, 'Not able to write file with err: %s' % err)
        wait_for_pod_be_ready(self.node, pod_name, 10, 5)

        resize_pvc(self.node, pvc_name, 2)
        wait_for_events(
            self.node, pvc_name, obj_type='PersistentVolumeClaim',
            event_type='Warning', event_reason='VolumeResizeFailed')

        # Verify volume was not expanded
        vol_info = get_gluster_vol_info_by_pvc_name(self.node, pvc_name)
        self.assertEqual(vol_info['gluster_vol_id'], h_vol_info['name'])
        self.assertEqual(
            len(vol_info['bricks']['brick']), len(h_vol_info['bricks']))

        # Bring the heketi POD up
        scale_dc_pod_amount_and_wait(
            self.node, self.heketi_dc_name, pod_amount=1)

        # Verify volume expansion
        verify_pvc_size(self.node, pvc_name, 2)
        vol_info = get_gluster_vol_info_by_pvc_name(self.node, pvc_name)
        self.assertFalse(len(vol_info['bricks']['brick']) % 3)
        self.assertLess(
            len(h_vol_info['bricks']), len(vol_info['bricks']['brick']))

        # Wait for remount after expansion
        for w in waiter.Waiter(timeout=30, interval=5):
            ret, out, err = oc_rsh(
                self.node, pod_name,
                "df -Ph /mnt | awk '{print $2}' | tail -1")
            self.assertFalse(ret, 'Failed with err: %s and Output: %s' % (
                err, out))
            if out.strip() == '2.0G':
                break

        # Write data making sure we have more space than it was
        ret, out, err = oc_rsh(self.node, pod_name, cmd % 'file2')
        self.assertFalse(ret, 'Not able to write file with err: %s' % err)

        # Verify pod is running
        wait_for_pod_be_ready(self.node, pod_name, 10, 5)
示例#22
0
    def _pv_resize(self, exceed_free_space):
        dir_path = "/mnt"
        pvc_size_gb, min_free_space_gb = 1, 3

        # Get available free space disabling redundant devices and nodes
        heketi_url = self.heketi_server_url
        node_id_list = heketi_ops.heketi_node_list(self.heketi_client_node,
                                                   heketi_url)
        self.assertTrue(node_id_list)
        nodes = {}
        min_free_space = min_free_space_gb * 1024**2
        for node_id in node_id_list:
            node_info = heketi_ops.heketi_node_info(self.heketi_client_node,
                                                    heketi_url,
                                                    node_id,
                                                    json=True)
            if (node_info['state'].lower() != 'online'
                    or not node_info['devices']):
                continue
            if len(nodes) > 2:
                out = heketi_ops.heketi_node_disable(self.heketi_client_node,
                                                     heketi_url, node_id)
                self.assertTrue(out)
                self.addCleanup(heketi_ops.heketi_node_enable,
                                self.heketi_client_node, heketi_url, node_id)
            for device in node_info['devices']:
                if device['state'].lower() != 'online':
                    continue
                free_space = device['storage']['free']
                if (node_id in nodes.keys() or free_space < min_free_space):
                    out = heketi_ops.heketi_device_disable(
                        self.heketi_client_node, heketi_url, device['id'])
                    self.assertTrue(out)
                    self.addCleanup(heketi_ops.heketi_device_enable,
                                    self.heketi_client_node, heketi_url,
                                    device['id'])
                    continue
                nodes[node_id] = free_space
        if len(nodes) < 3:
            raise self.skipTest("Could not find 3 online nodes with, "
                                "at least, 1 online device having free space "
                                "bigger than %dGb." % min_free_space_gb)

        # Calculate maximum available size for PVC
        available_size_gb = int(min(nodes.values()) / (1024**2))

        # Create PVC
        self.create_storage_class(allow_volume_expansion=True)
        pvc_name = self.create_and_wait_for_pvc(pvc_size=pvc_size_gb)

        # Create DC with POD and attached PVC to it
        dc_name = oc_create_app_dc_with_io(self.node, pvc_name)
        self.addCleanup(oc_delete, self.node, 'dc', dc_name)
        self.addCleanup(scale_dc_pod_amount_and_wait, self.node, dc_name, 0)
        pod_name = get_pod_name_from_dc(self.node, dc_name)
        wait_for_pod_be_ready(self.node, pod_name)

        if exceed_free_space:
            # Try to expand existing PVC exceeding free space
            resize_pvc(self.node, pvc_name, available_size_gb)
            wait_for_events(self.node,
                            obj_name=pvc_name,
                            event_reason='VolumeResizeFailed')

            # Check that app POD is up and runnig then try to write data
            wait_for_pod_be_ready(self.node, pod_name)
            cmd = ("dd if=/dev/urandom of=%s/autotest bs=100K count=1" %
                   dir_path)
            ret, out, err = oc_rsh(self.node, pod_name, cmd)
            self.assertEqual(
                ret, 0,
                "Failed to write data after failed attempt to expand PVC.")
        else:
            # Expand existing PVC using all the available free space
            expand_size_gb = available_size_gb - pvc_size_gb
            resize_pvc(self.node, pvc_name, expand_size_gb)
            verify_pvc_size(self.node, pvc_name, expand_size_gb)
            pv_name = get_pv_name_from_pvc(self.node, pvc_name)
            verify_pv_size(self.node, pv_name, expand_size_gb)
            wait_for_events(self.node,
                            obj_name=pvc_name,
                            event_reason='VolumeResizeSuccessful')

            # Recreate app POD
            oc_delete(self.node, 'pod', pod_name)
            wait_for_resource_absence(self.node, 'pod', pod_name)
            pod_name = get_pod_name_from_dc(self.node, dc_name)
            wait_for_pod_be_ready(self.node, pod_name)

            # Write data on the expanded PVC
            cmd = ("dd if=/dev/urandom of=%s/autotest "
                   "bs=1M count=1025" % dir_path)
            ret, out, err = oc_rsh(self.node, pod_name, cmd)
            self.assertEqual(ret, 0,
                             "Failed to write data on the expanded PVC")
    def _pv_resize(self, exceed_free_space):
        dir_path = "/mnt"
        pvc_size_gb, min_free_space_gb = 1, 3

        # Get available free space disabling redundant devices and nodes
        heketi_url = self.heketi_server_url
        node_id_list = heketi_ops.heketi_node_list(
            self.heketi_client_node, heketi_url)
        self.assertTrue(node_id_list)
        nodes = {}
        min_free_space = min_free_space_gb * 1024**2
        for node_id in node_id_list:
            node_info = heketi_ops.heketi_node_info(
                self.heketi_client_node, heketi_url, node_id, json=True)
            if (node_info['state'].lower() != 'online' or
                    not node_info['devices']):
                continue
            if len(nodes) > 2:
                out = heketi_ops.heketi_node_disable(
                    self.heketi_client_node, heketi_url, node_id)
                self.assertTrue(out)
                self.addCleanup(
                    heketi_ops.heketi_node_enable,
                    self.heketi_client_node, heketi_url, node_id)
            for device in node_info['devices']:
                if device['state'].lower() != 'online':
                    continue
                free_space = device['storage']['free']
                if (node_id in nodes.keys() or free_space < min_free_space):
                    out = heketi_ops.heketi_device_disable(
                        self.heketi_client_node, heketi_url, device['id'])
                    self.assertTrue(out)
                    self.addCleanup(
                        heketi_ops.heketi_device_enable,
                        self.heketi_client_node, heketi_url, device['id'])
                    continue
                nodes[node_id] = free_space
        if len(nodes) < 3:
            raise self.skipTest(
                "Could not find 3 online nodes with, "
                "at least, 1 online device having free space "
                "bigger than %dGb." % min_free_space_gb)

        # Calculate maximum available size for PVC
        available_size_gb = int(min(nodes.values()) / (1024**2))

        # Create PVC
        self.create_storage_class(allow_volume_expansion=True)
        pvc_name = self.create_and_wait_for_pvc(pvc_size=pvc_size_gb)

        # Create DC with POD and attached PVC to it
        dc_name = oc_create_app_dc_with_io(self.node, pvc_name)
        self.addCleanup(oc_delete, self.node, 'dc', dc_name)
        self.addCleanup(scale_dc_pod_amount_and_wait, self.node, dc_name, 0)
        pod_name = get_pod_name_from_dc(self.node, dc_name)
        wait_for_pod_be_ready(self.node, pod_name)

        if exceed_free_space:
            # Try to expand existing PVC exceeding free space
            resize_pvc(self.node, pvc_name, available_size_gb)
            wait_for_events(self.node, obj_name=pvc_name,
                            event_reason='VolumeResizeFailed')

            # Check that app POD is up and runnig then try to write data
            wait_for_pod_be_ready(self.node, pod_name)
            cmd = (
                "dd if=/dev/urandom of=%s/autotest bs=100K count=1" % dir_path)
            ret, out, err = oc_rsh(self.node, pod_name, cmd)
            self.assertEqual(
                ret, 0,
                "Failed to write data after failed attempt to expand PVC.")
        else:
            # Expand existing PVC using all the available free space
            expand_size_gb = available_size_gb - pvc_size_gb
            resize_pvc(self.node, pvc_name, expand_size_gb)
            verify_pvc_size(self.node, pvc_name, expand_size_gb)
            pv_name = get_pv_name_from_pvc(self.node, pvc_name)
            verify_pv_size(self.node, pv_name, expand_size_gb)
            wait_for_events(
                self.node, obj_name=pvc_name,
                event_reason='VolumeResizeSuccessful')

            # Recreate app POD
            oc_delete(self.node, 'pod', pod_name)
            wait_for_resource_absence(self.node, 'pod', pod_name)
            pod_name = get_pod_name_from_dc(self.node, dc_name)
            wait_for_pod_be_ready(self.node, pod_name)

            # Write data on the expanded PVC
            cmd = ("dd if=/dev/urandom of=%s/autotest "
                   "bs=1M count=1025" % dir_path)
            ret, out, err = oc_rsh(self.node, pod_name, cmd)
            self.assertEqual(
                ret, 0, "Failed to write data on the expanded PVC")