def test_pv_resize_try_shrink_pv_size(self):
        """Validate whether reducing the PV size is allowed"""
        dir_path = "/mnt/"
        node = self.ocp_master_node[0]

        # Create PVC
        pv_size = 5
        self.create_storage_class(allow_volume_expansion=True)
        pvc_name = self.create_and_wait_for_pvc(pvc_size=pv_size)

        # Create DC with POD and attached PVC to it.
        dc_name = oc_create_app_dc_with_io(node, pvc_name)
        self.addCleanup(oc_delete, node, 'dc', dc_name)
        self.addCleanup(scale_dc_pod_amount_and_wait, node, dc_name, 0)

        pod_name = get_pod_name_from_dc(node, dc_name)
        wait_for_pod_be_ready(node, pod_name)

        cmd = ("dd if=/dev/urandom of=%sfile " "bs=100K count=3000") % dir_path
        ret, out, err = oc_rsh(node, pod_name, cmd)
        self.assertEqual(ret, 0,
                         "failed to execute command %s on %s" % (cmd, node))
        pvc_resize = 2
        with self.assertRaises(ExecutionError):
            resize_pvc(node, pvc_name, pvc_resize)
        verify_pvc_size(node, pvc_name, pv_size)
        pv_name = get_pv_name_from_pvc(node, pvc_name)
        verify_pv_size(node, pv_name, pv_size)
        cmd = ("dd if=/dev/urandom of=%sfile_new "
               "bs=100K count=2000") % dir_path
        ret, out, err = oc_rsh(node, pod_name, cmd)
        self.assertEqual(ret, 0,
                         "failed to execute command %s on %s" % (cmd, node))
    def test_expand_arbiter_volume_according_to_avg_file_size(
            self, avg_file_size, expected_brick_size, vol_expand=True):
        """Validate expansion of arbiter volume with diff avg file size"""
        data_hosts = []
        arbiter_hosts = []

        # set tags arbiter:disabled, arbiter:required
        for i, node_id in enumerate(self.node_id_list):
            self._set_arbiter_tag_with_further_revert(
                self.heketi_client_node, self.heketi_server_url, 'node',
                node_id, 'disabled' if i < 2 else 'required')

            node_info = heketi_ops.heketi_node_info(self.heketi_client_node,
                                                    self.heketi_server_url,
                                                    node_id,
                                                    json=True)
            (data_hosts.append(node_info['hostnames']['storage'][0]) if i < 2
             else arbiter_hosts.append(node_info['hostnames']['storage'][0]))
            self.assertEqual(node_info['tags']['arbiter'],
                             'disabled' if i < 2 else 'required')

        # Create sc with gluster arbiter info
        self.create_storage_class(is_arbiter_vol=True,
                                  allow_volume_expansion=True,
                                  arbiter_avg_file_size=avg_file_size)

        # Create PVC and wait for it to be in 'Bound' state
        self.create_and_wait_for_pvc()

        vol_expanded = False

        for i in range(2):
            vol_info = get_gluster_vol_info_by_pvc_name(
                self.node, self.pvc_name)
            bricks = (
                self.verify_amount_and_proportion_of_arbiter_and_data_bricks(
                    vol_info,
                    arbiter_bricks=(2 if vol_expanded else 1),
                    data_bricks=(4 if vol_expanded else 2)))

            # verify arbiter bricks lies on arbiter hosts
            for brick in bricks['arbiter_list']:
                ip, brick_name = brick['name'].split(':')
                self.assertIn(ip, arbiter_hosts)
                # verify the size of arbiter brick
                cmd = "df -h %s --output=size | tail -1" % brick_name
                out = cmd_run_on_gluster_pod_or_node(self.node, cmd, ip)
                self.assertEqual(out, expected_brick_size)
            # verify that data bricks lies on data hosts
            for brick in bricks['data_list']:
                self.assertIn(brick['name'].split(':')[0], data_hosts)

            if vol_expanded or not vol_expand:
                break
            # Expand PVC and verify the size
            pvc_size = 2
            resize_pvc(self.node, self.pvc_name, pvc_size)
            verify_pvc_size(self.node, self.pvc_name, pvc_size)
            vol_expanded = True
    def test_pv_resize_with_prefix_for_name(self,
                                            create_vol_name_prefix=False):
        """Validate PV resize with and without name prefix"""
        dir_path = "/mnt/"
        node = self.ocp_client[0]

        # Create PVC
        self.create_storage_class(
            allow_volume_expansion=True,
            create_vol_name_prefix=create_vol_name_prefix)
        pvc_name = self.create_and_wait_for_pvc()

        # Create DC with POD and attached PVC to it.
        dc_name = oc_create_app_dc_with_io(node, pvc_name)
        self.addCleanup(oc_delete, node, 'dc', dc_name)
        self.addCleanup(scale_dc_pod_amount_and_wait, node, dc_name, 0)

        pod_name = get_pod_name_from_dc(node, dc_name)
        wait_for_pod_be_ready(node, pod_name)
        if create_vol_name_prefix:
            ret = heketi_ops.verify_volume_name_prefix(
                node, self.sc['volumenameprefix'], self.sc['secretnamespace'],
                pvc_name, self.heketi_server_url)
            self.assertTrue(ret, "verify volnameprefix failed")
        cmd = ("dd if=/dev/urandom of=%sfile " "bs=100K count=1000") % dir_path
        ret, out, err = oc_rsh(node, pod_name, cmd)
        self.assertEqual(ret, 0,
                         "failed to execute command %s on %s" % (cmd, node))
        cmd = ("dd if=/dev/urandom of=%sfile2 "
               "bs=100K count=10000") % dir_path
        ret, out, err = oc_rsh(node, pod_name, cmd)
        self.assertNotEqual(
            ret, 0, " This IO did not fail as expected "
            "command %s on %s" % (cmd, node))
        pvc_size = 2
        resize_pvc(node, pvc_name, pvc_size)
        verify_pvc_size(node, pvc_name, pvc_size)
        pv_name = get_pv_name_from_pvc(node, pvc_name)
        verify_pv_size(node, pv_name, pvc_size)
        oc_delete(node, 'pod', pod_name)
        wait_for_resource_absence(node, 'pod', pod_name)
        pod_name = get_pod_name_from_dc(node, dc_name)
        wait_for_pod_be_ready(node, pod_name)
        cmd = ("dd if=/dev/urandom of=%sfile_new "
               "bs=50K count=10000") % dir_path
        ret, out, err = oc_rsh(node, pod_name, cmd)
        self.assertEqual(ret, 0,
                         "failed to execute command %s on %s" % (cmd, node))
    def test_arbiter_volume_expand_using_pvc(self):
        """Validate arbiter volume expansion by PVC creation"""
        # Create sc with gluster arbiter info
        self.create_storage_class(is_arbiter_vol=True,
                                  allow_volume_expansion=True)

        # Create PVC and wait for it to be in 'Bound' state
        self.create_and_wait_for_pvc()

        # Get vol info
        vol_info = get_gluster_vol_info_by_pvc_name(self.node, self.pvc_name)

        self.verify_amount_and_proportion_of_arbiter_and_data_bricks(vol_info)

        pvc_size = 2
        resize_pvc(self.node, self.pvc_name, pvc_size)
        verify_pvc_size(self.node, self.pvc_name, pvc_size)

        vol_info = get_gluster_vol_info_by_pvc_name(self.node, self.pvc_name)

        self.verify_amount_and_proportion_of_arbiter_and_data_bricks(
            vol_info, arbiter_bricks=2, data_bricks=4)
    def _pv_resize(self, exceed_free_space):
        dir_path = "/mnt"
        pvc_size_gb, min_free_space_gb = 1, 3

        # Get available free space disabling redundant devices and nodes
        heketi_url = self.heketi_server_url
        node_id_list = heketi_ops.heketi_node_list(self.heketi_client_node,
                                                   heketi_url)
        self.assertTrue(node_id_list)
        nodes = {}
        min_free_space = min_free_space_gb * 1024**2
        for node_id in node_id_list:
            node_info = heketi_ops.heketi_node_info(self.heketi_client_node,
                                                    heketi_url,
                                                    node_id,
                                                    json=True)
            if (node_info['state'].lower() != 'online'
                    or not node_info['devices']):
                continue
            if len(nodes) > 2:
                out = heketi_ops.heketi_node_disable(self.heketi_client_node,
                                                     heketi_url, node_id)
                self.assertTrue(out)
                self.addCleanup(heketi_ops.heketi_node_enable,
                                self.heketi_client_node, heketi_url, node_id)
            for device in node_info['devices']:
                if device['state'].lower() != 'online':
                    continue
                free_space = device['storage']['free']
                if (node_id in nodes.keys() or free_space < min_free_space):
                    out = heketi_ops.heketi_device_disable(
                        self.heketi_client_node, heketi_url, device['id'])
                    self.assertTrue(out)
                    self.addCleanup(heketi_ops.heketi_device_enable,
                                    self.heketi_client_node, heketi_url,
                                    device['id'])
                    continue
                nodes[node_id] = free_space
        if len(nodes) < 3:
            raise self.skipTest("Could not find 3 online nodes with, "
                                "at least, 1 online device having free space "
                                "bigger than %dGb." % min_free_space_gb)

        # Calculate maximum available size for PVC
        available_size_gb = int(min(nodes.values()) / (1024**2))

        # Create PVC
        self.create_storage_class(allow_volume_expansion=True)
        pvc_name = self.create_and_wait_for_pvc(pvc_size=pvc_size_gb)

        # Create DC with POD and attached PVC to it
        dc_name = oc_create_app_dc_with_io(self.node, pvc_name)
        self.addCleanup(oc_delete, self.node, 'dc', dc_name)
        self.addCleanup(scale_dc_pod_amount_and_wait, self.node, dc_name, 0)
        pod_name = get_pod_name_from_dc(self.node, dc_name)
        wait_for_pod_be_ready(self.node, pod_name)

        if exceed_free_space:
            # Try to expand existing PVC exceeding free space
            resize_pvc(self.node, pvc_name, available_size_gb)
            wait_for_events(self.node,
                            obj_name=pvc_name,
                            event_reason='VolumeResizeFailed')

            # Check that app POD is up and runnig then try to write data
            wait_for_pod_be_ready(self.node, pod_name)
            cmd = ("dd if=/dev/urandom of=%s/autotest bs=100K count=1" %
                   dir_path)
            ret, out, err = oc_rsh(self.node, pod_name, cmd)
            self.assertEqual(
                ret, 0,
                "Failed to write data after failed attempt to expand PVC.")
        else:
            # Expand existing PVC using all the available free space
            expand_size_gb = available_size_gb - pvc_size_gb
            resize_pvc(self.node, pvc_name, expand_size_gb)
            verify_pvc_size(self.node, pvc_name, expand_size_gb)
            pv_name = get_pv_name_from_pvc(self.node, pvc_name)
            verify_pv_size(self.node, pv_name, expand_size_gb)
            wait_for_events(self.node,
                            obj_name=pvc_name,
                            event_reason='VolumeResizeSuccessful')

            # Recreate app POD
            oc_delete(self.node, 'pod', pod_name)
            wait_for_resource_absence(self.node, 'pod', pod_name)
            pod_name = get_pod_name_from_dc(self.node, dc_name)
            wait_for_pod_be_ready(self.node, pod_name)

            # Write data on the expanded PVC
            cmd = ("dd if=/dev/urandom of=%s/autotest "
                   "bs=1M count=1025" % dir_path)
            ret, out, err = oc_rsh(self.node, pod_name, cmd)
            self.assertEqual(ret, 0,
                             "Failed to write data on the expanded PVC")
    def test_expand_arbiter_volume_setting_tags_on_nodes_or_devices(
            self, node_tags):
        """Validate exapnsion of arbiter volume with defferent tags

           This test case is going to run two tests:
                1. If value is True it is going to set tags
                   on nodes and run test
                2. If value is False it is going to set tags
                   on devices and run test
        """

        data_nodes = []
        arbiter_nodes = []

        # set tags arbiter:disabled, arbiter:required
        for i, node_id in enumerate(self.node_id_list):
            if node_tags:
                self._set_arbiter_tag_with_further_revert(
                    self.heketi_client_node, self.heketi_server_url, 'node',
                    node_id, 'disabled' if i < 2 else 'required')

            node_info = heketi_ops.heketi_node_info(self.heketi_client_node,
                                                    self.heketi_server_url,
                                                    node_id,
                                                    json=True)

            if not node_tags:
                for device in node_info['devices']:
                    self._set_arbiter_tag_with_further_revert(
                        self.heketi_client_node, self.heketi_server_url,
                        'device', device['id'],
                        'disabled' if i < 2 else 'required')
                    device_info = heketi_ops.heketi_device_info(
                        self.heketi_client_node,
                        self.heketi_server_url,
                        device['id'],
                        json=True)
                    self.assertEqual(device_info['tags']['arbiter'],
                                     'disabled' if i < 2 else 'required')

            node = {
                'id': node_id,
                'host': node_info['hostnames']['storage'][0]
            }
            if node_tags:
                self.assertEqual(node_info['tags']['arbiter'],
                                 'disabled' if i < 2 else 'required')
            data_nodes.append(node) if i < 2 else arbiter_nodes.append(node)

        # Create sc with gluster arbiter info
        self.create_storage_class(is_arbiter_vol=True,
                                  allow_volume_expansion=True)

        # Create PVC and wait for it to be in 'Bound' state
        self.create_and_wait_for_pvc()

        vol_info = get_gluster_vol_info_by_pvc_name(self.node, self.pvc_name)

        bricks = self.verify_amount_and_proportion_of_arbiter_and_data_bricks(
            vol_info)

        arbiter_hosts = [obj['host'] for obj in arbiter_nodes]
        data_hosts = [obj['host'] for obj in data_nodes]

        for brick in bricks['arbiter_list']:
            self.assertIn(brick['name'].split(':')[0], arbiter_hosts)

        for brick in bricks['data_list']:
            self.assertIn(brick['name'].split(':')[0], data_hosts)

        # Expand PVC and verify the size
        pvc_size = 2
        resize_pvc(self.node, self.pvc_name, pvc_size)
        verify_pvc_size(self.node, self.pvc_name, pvc_size)

        vol_info = get_gluster_vol_info_by_pvc_name(self.node, self.pvc_name)

        bricks = self.verify_amount_and_proportion_of_arbiter_and_data_bricks(
            vol_info, arbiter_bricks=2, data_bricks=4)

        for brick in bricks['arbiter_list']:
            self.assertIn(brick['name'].split(':')[0], arbiter_hosts)

        for brick in bricks['data_list']:
            self.assertIn(brick['name'].split(':')[0], data_hosts)