def test_pv_resize_try_shrink_pv_size(self):
        """Validate whether reducing the PV size is allowed"""
        dir_path = "/mnt/"
        node = self.ocp_master_node[0]

        # Create PVC
        pv_size = 5
        self.create_storage_class(allow_volume_expansion=True)
        pvc_name = self.create_and_wait_for_pvc(pvc_size=pv_size)

        # Create DC with POD and attached PVC to it.
        dc_name = oc_create_app_dc_with_io(node, pvc_name)
        self.addCleanup(oc_delete, node, 'dc', dc_name)
        self.addCleanup(scale_dc_pod_amount_and_wait,
                        node, dc_name, 0)

        pod_name = get_pod_name_from_dc(node, dc_name)
        wait_for_pod_be_ready(node, pod_name)

        cmd = ("dd if=/dev/urandom of=%sfile "
               "bs=100K count=3000") % dir_path
        ret, out, err = oc_rsh(node, pod_name, cmd)
        self.assertEqual(ret, 0, "failed to execute command %s on %s" % (
                             cmd, node))
        pvc_resize = 2
        with self.assertRaises(ExecutionError):
            resize_pvc(node, pvc_name, pvc_resize)
        verify_pvc_size(node, pvc_name, pv_size)
        pv_name = get_pv_name_from_pvc(node, pvc_name)
        verify_pv_size(node, pv_name, pv_size)
        cmd = ("dd if=/dev/urandom of=%sfile_new "
               "bs=100K count=2000") % dir_path
        ret, out, err = oc_rsh(node, pod_name, cmd)
        self.assertEqual(ret, 0, "failed to execute command %s on %s" % (
                             cmd, node))
    def test_sc_create_with_clusterid(self):
        """Create storage class with 'cluster id'"""
        h_cluster_list = heketi_cluster_list(self.heketi_client_node,
                                             self.heketi_server_url,
                                             json=True)
        self.assertTrue(h_cluster_list, "Failed to list heketi cluster")
        cluster_id = h_cluster_list["clusters"][0]
        sc = self.create_storage_class(clusterid=cluster_id)
        pvc_name = self.create_and_wait_for_pvc(sc_name=sc)

        # Validate if cluster id is correct in the heketi volume info
        pv_name = get_pv_name_from_pvc(self.ocp_master_node[0], pvc_name)
        volume_id = oc_get_custom_resource(
            self.ocp_master_node[0],
            'pv', r':metadata.annotations."gluster\.kubernetes\.io'
            r'\/heketi-volume-id"',
            name=pv_name)[0]
        volume_info = heketi_volume_info(self.heketi_client_node,
                                         self.heketi_server_url,
                                         volume_id,
                                         json=True)

        self.assertEqual(
            cluster_id, volume_info["cluster"],
            "Cluster ID %s has NOT been used to"
            "create the PVC %s. Found %s" %
            (cluster_id, pvc_name, volume_info["cluster"]))
Exemplo n.º 3
0
    def test_pv_resize_with_prefix_for_name_and_size(
            self, create_vol_name_prefix=False, valid_size=True):
        """Validate PV resize with and without name prefix"""
        dir_path = "/mnt/"
        node = self.ocp_client[0]

        # Create PVC
        self.create_storage_class(
            allow_volume_expansion=True,
            create_vol_name_prefix=create_vol_name_prefix)
        pvc_name = self.create_and_wait_for_pvc()

        # Create DC with POD and attached PVC to it.
        dc_name = oc_create_app_dc_with_io(node, pvc_name)
        self.addCleanup(oc_delete, node, 'dc', dc_name)
        self.addCleanup(scale_dc_pod_amount_and_wait, node, dc_name, 0)

        pod_name = get_pod_name_from_dc(node, dc_name)
        wait_for_pod_be_ready(node, pod_name)
        if create_vol_name_prefix:
            ret = heketi_ops.verify_volume_name_prefix(
                node, self.sc['volumenameprefix'], self.sc['secretnamespace'],
                pvc_name, self.heketi_server_url)
            self.assertTrue(ret, "verify volnameprefix failed")
        cmd = ("dd if=/dev/urandom of=%sfile " "bs=100K count=1000") % dir_path
        ret, out, err = oc_rsh(node, pod_name, cmd)
        self.assertEqual(ret, 0,
                         "Failed to execute command %s on %s" % (cmd, node))
        pv_name = get_pv_name_from_pvc(node, pvc_name)

        # If resize size is invalid then size should not change
        if valid_size:
            cmd = ("dd if=/dev/urandom of=%sfile2 "
                   "bs=100K count=10000") % dir_path
            with self.assertRaises(AssertionError):
                ret, out, err = oc_rsh(node, pod_name, cmd)
                msg = ("Command '%s' was expected to fail on '%s' node. "
                       "But it returned following: ret is '%s', err is '%s' "
                       "and out is '%s'" % (cmd, node, ret, err, out))
                raise ExecutionError(msg)
            pvc_size = 2
            resize_pvc(node, pvc_name, pvc_size)
            verify_pvc_size(node, pvc_name, pvc_size)
            verify_pv_size(node, pv_name, pvc_size)
        else:
            invalid_pvc_size = 'ten'
            with self.assertRaises(AssertionError):
                resize_pvc(node, pvc_name, invalid_pvc_size)
            verify_pvc_size(node, pvc_name, 1)
            verify_pv_size(node, pv_name, 1)

        oc_delete(node, 'pod', pod_name)
        wait_for_resource_absence(node, 'pod', pod_name)
        pod_name = get_pod_name_from_dc(node, dc_name)
        wait_for_pod_be_ready(node, pod_name)
        cmd = ("dd if=/dev/urandom of=%sfile_new "
               "bs=50K count=10000") % dir_path
        ret, out, err = oc_rsh(node, pod_name, cmd)
        self.assertEqual(ret, 0,
                         "Failed to execute command %s on %s" % (cmd, node))
    def dynamic_provisioning_glusterfile(self, create_vol_name_prefix):
        # Create secret and storage class
        self.create_storage_class(
            create_vol_name_prefix=create_vol_name_prefix)

        # Create PVC
        pvc_name = self.create_and_wait_for_pvc()

        # Create DC with POD and attached PVC to it.
        dc_name = oc_create_app_dc_with_io(self.node, pvc_name)
        self.addCleanup(oc_delete, self.node, 'dc', dc_name)
        self.addCleanup(scale_dc_pod_amount_and_wait, self.node, dc_name, 0)

        pod_name = get_pod_name_from_dc(self.node, dc_name)
        wait_for_pod_be_ready(self.node, pod_name)

        # Verify Heketi volume name for prefix presence if provided
        if create_vol_name_prefix:
            ret = verify_volume_name_prefix(self.node,
                                            self.sc['volumenameprefix'],
                                            self.sc['secretnamespace'],
                                            pvc_name, self.sc['resturl'])
            self.assertTrue(ret, "verify volnameprefix failed")
        else:
            # Get the volume name and volume id from PV
            pv_name = get_pv_name_from_pvc(self.ocp_client[0], pvc_name)
            custom = [
                r':spec.glusterfs.path',
                r':metadata.annotations.'
                r'"gluster\.kubernetes\.io\/heketi-volume-id"'
            ]
            pv_vol_name, vol_id = oc_get_custom_resource(
                    self.ocp_client[0], 'pv', custom, pv_name)

            # check if the pv_volume_name is present in heketi
            # Check if volume name is "vol_"+volumeid or not
            heketi_vol_name = heketi_volume_info(
                self.ocp_client[0], self.heketi_server_url, vol_id,
                json=True)['name']
            self.assertEqual(pv_vol_name, heketi_vol_name,
                             'Volume with vol_id = %s not found'
                             'in heketidb' % vol_id)
            self.assertEqual(heketi_vol_name, 'vol_' + vol_id,
                             'Volume with vol_id = %s have a'
                             'custom perfix' % vol_id)
            out = cmd_run_on_gluster_pod_or_node(self.ocp_master_node[0],
                                                 "gluster volume list")
            self.assertIn(pv_vol_name, out,
                          "Volume with id %s does not exist" % vol_id)

        # Make sure we are able to work with files on the mounted volume
        filepath = "/mnt/file_for_testing_io.log"
        for cmd in ("dd if=/dev/urandom of=%s bs=1K count=100",
                    "ls -lrt %s",
                    "rm -rf %s"):
            cmd = cmd % filepath
            ret, out, err = oc_rsh(self.node, pod_name, cmd)
            self.assertEqual(
                ret, 0,
                "Failed to execute '%s' command on %s" % (cmd, self.node))
    def get_block_hosting_volume_by_pvc_name(
            self, pvc_name, heketi_server_url=None, gluster_node=None,
            ocp_client_node=None):
        """Get block hosting volume of pvc name given

        Args:
            pvc_name (str): pvc name for which the BHV name needs
                            to be returned
        Kwargs:
            heketi_server_url (str): heketi server url to run heketi commands
            gluster_node (str): gluster node where to run gluster commands
            ocp_client_node (str): ocp cleint node where to run oc commands
        """
        if not heketi_server_url:
            heketi_server_url = self.heketi_server_url
        pv_name = get_pv_name_from_pvc(self.ocp_client[0], pvc_name)
        block_volume = oc_get_custom_resource(
            self.ocp_client[0], 'pv',
            r':.metadata.annotations."gluster\.org\/volume\-id"',
            name=pv_name
        )[0]

        # get block hosting volume from block volume
        block_hosting_vol = get_block_hosting_volume_name(
            self.heketi_client_node, heketi_server_url, block_volume,
            gluster_node=gluster_node, ocp_client_node=ocp_client_node)

        return block_hosting_vol
Exemplo n.º 6
0
    def test_pv_resize_try_shrink_pv_size(self):
        """Validate whether reducing the PV size is allowed"""
        dir_path = "/mnt/"
        node = self.ocp_master_node[0]

        # Create PVC
        pv_size = 5
        self.create_storage_class(allow_volume_expansion=True)
        pvc_name = self.create_and_wait_for_pvc(pvc_size=pv_size)

        # Create DC with POD and attached PVC to it.
        dc_name = oc_create_app_dc_with_io(node, pvc_name)
        self.addCleanup(oc_delete, node, 'dc', dc_name)
        self.addCleanup(scale_dc_pod_amount_and_wait, node, dc_name, 0)

        pod_name = get_pod_name_from_dc(node, dc_name)
        wait_for_pod_be_ready(node, pod_name)

        cmd = ("dd if=/dev/urandom of=%sfile " "bs=100K count=3000") % dir_path
        ret, out, err = oc_rsh(node, pod_name, cmd)
        self.assertEqual(ret, 0,
                         "failed to execute command %s on %s" % (cmd, node))
        pvc_resize = 2
        with self.assertRaises(ExecutionError):
            resize_pvc(node, pvc_name, pvc_resize)
        verify_pvc_size(node, pvc_name, pv_size)
        pv_name = get_pv_name_from_pvc(node, pvc_name)
        verify_pv_size(node, pv_name, pv_size)
        cmd = ("dd if=/dev/urandom of=%sfile_new "
               "bs=100K count=2000") % dir_path
        ret, out, err = oc_rsh(node, pod_name, cmd)
        self.assertEqual(ret, 0,
                         "failed to execute command %s on %s" % (cmd, node))
    def dynamic_provisioning_glusterfile(self, create_vol_name_prefix):
        # Create secret and storage class
        self.create_storage_class(
            create_vol_name_prefix=create_vol_name_prefix)

        # Create PVC
        pvc_name = self.create_and_wait_for_pvc()

        # Create DC with POD and attached PVC to it.
        dc_name = oc_create_app_dc_with_io(self.node, pvc_name)
        self.addCleanup(oc_delete, self.node, 'dc', dc_name)
        self.addCleanup(scale_dc_pod_amount_and_wait, self.node, dc_name, 0)

        pod_name = get_pod_name_from_dc(self.node, dc_name)
        wait_for_pod_be_ready(self.node, pod_name)

        # Verify Heketi volume name for prefix presence if provided
        if create_vol_name_prefix:
            ret = verify_volume_name_prefix(self.node,
                                            self.sc['volumenameprefix'],
                                            self.sc['secretnamespace'],
                                            pvc_name, self.sc['resturl'])
            self.assertTrue(ret, "verify volnameprefix failed")
        else:
            # Get the volume name and volume id from PV
            pv_name = get_pv_name_from_pvc(self.ocp_client[0], pvc_name)
            custom = [
                r':spec.glusterfs.path',
                r':metadata.annotations.'
                r'"gluster\.kubernetes\.io\/heketi-volume-id"'
            ]
            pv_vol_name, vol_id = oc_get_custom_resource(
                self.ocp_client[0], 'pv', custom, pv_name)

            # check if the pv_volume_name is present in heketi
            # Check if volume name is "vol_"+volumeid or not
            heketi_vol_name = heketi_volume_info(
                self.ocp_client[0], self.heketi_server_url, vol_id,
                json=True)['name']
            self.assertEqual(pv_vol_name, heketi_vol_name,
                             'Volume with vol_id = %s not found'
                             'in heketidb' % vol_id)
            self.assertEqual(heketi_vol_name, 'vol_' + vol_id,
                             'Volume with vol_id = %s have a'
                             'custom perfix' % vol_id)
            out = cmd_run_on_gluster_pod_or_node(self.ocp_master_node[0],
                                                 "gluster volume list")
            self.assertIn(pv_vol_name, out,
                          "Volume with id %s does not exist" % vol_id)

        # Make sure we are able to work with files on the mounted volume
        filepath = "/mnt/file_for_testing_io.log"
        for cmd in ("dd if=/dev/urandom of=%s bs=1K count=100",
                    "ls -lrt %s",
                    "rm -rf %s"):
            cmd = cmd % filepath
            ret, out, err = oc_rsh(self.node, pod_name, cmd)
            self.assertEqual(
                ret, 0,
                "Failed to execute '%s' command on %s" % (cmd, self.node))
    def test_pvc_resize_while_ios_are_running(self):
        """Re-size PVC  while IO's are running"""

        # Create an SC, PVC and app pod
        sc_name = self.create_storage_class(create_vol_name_prefix=True,
                                            allow_volume_expansion=True)
        pvc_name = self.create_and_wait_for_pvc(sc_name=sc_name, pvc_size=1)
        dc_name, pod_name = self.create_dc_with_pvc(pvc_name)

        # Run io on the pod for 5 minutes in background
        cmd_io = ('timeout 5m bash -c -- "while true; do oc exec  {} dd '
                  'if=/dev/urandom of=/mnt/f1 bs=100K count=2000; '
                  'done"'.format(pod_name))
        proc = g.run_async(host=self.node, command=cmd_io)

        # Resize PVC while io's are running and validate resize operation
        resize_pvc(self.node, pvc_name, 2)
        verify_pvc_size(self.node, pvc_name, 2)
        pv_name = get_pv_name_from_pvc(self.node, pvc_name)
        verify_pv_size(self.node, pv_name, 2)

        # Check if timeout command and ios are successful
        ret, _, err = proc.async_communicate()
        msg = "command terminated with exit code"
        if ret != 124 or msg in str(err):
            raise ExecutionError("Failed to run io, error {}".format(str(err)))
    def _pv_resize(self, exceed_free_space):
        dir_path = "/mnt"
        pvc_size_gb = 1

        available_size_gb = self._available_disk_free_space()

        # Create PVC
        self.create_storage_class(allow_volume_expansion=True)
        pvc_name = self.create_and_wait_for_pvc(pvc_size=pvc_size_gb)

        # Create DC with POD and attached PVC to it
        dc_name = oc_create_app_dc_with_io(
            self.node, pvc_name, image=self.io_container_image_cirros)
        self.addCleanup(oc_delete, self.node, 'dc', dc_name)
        self.addCleanup(scale_dc_pod_amount_and_wait, self.node, dc_name, 0)
        pod_name = get_pod_name_from_dc(self.node, dc_name)
        wait_for_pod_be_ready(self.node, pod_name)

        if exceed_free_space:
            exceed_size = available_size_gb + 10

            # Try to expand existing PVC exceeding free space
            resize_pvc(self.node, pvc_name, exceed_size)
            wait_for_events(self.node,
                            obj_name=pvc_name,
                            event_reason='VolumeResizeFailed')

            # Check that app POD is up and runnig then try to write data
            wait_for_pod_be_ready(self.node, pod_name)
            cmd = ("dd if=/dev/urandom of=%s/autotest bs=100K count=1" %
                   dir_path)
            ret, out, err = oc_rsh(self.node, pod_name, cmd)
            self.assertEqual(
                ret, 0,
                "Failed to write data after failed attempt to expand PVC.")
        else:
            # Expand existing PVC using all the available free space
            expand_size_gb = available_size_gb - pvc_size_gb
            resize_pvc(self.node, pvc_name, expand_size_gb)
            verify_pvc_size(self.node, pvc_name, expand_size_gb)
            pv_name = get_pv_name_from_pvc(self.node, pvc_name)
            verify_pv_size(self.node, pv_name, expand_size_gb)
            wait_for_events(self.node,
                            obj_name=pvc_name,
                            event_reason='VolumeResizeSuccessful')

            # Recreate app POD
            oc_delete(self.node, 'pod', pod_name)
            wait_for_resource_absence(self.node, 'pod', pod_name)
            pod_name = get_pod_name_from_dc(self.node, dc_name)
            wait_for_pod_be_ready(self.node, pod_name)

            # Write data on the expanded PVC
            cmd = ("dd if=/dev/urandom of=%s/autotest "
                   "bs=1M count=1025" % dir_path)
            ret, out, err = oc_rsh(self.node, pod_name, cmd)
            self.assertEqual(ret, 0,
                             "Failed to write data on the expanded PVC")
Exemplo n.º 10
0
    def create_and_wait_for_pvcs(self, pvc_size=1,
                                 pvc_name_prefix="autotests-pvc",
                                 pvc_amount=1, sc_name=None,
                                 timeout=120, wait_step=3):
        node = self.ocp_client[0]

        # Create storage class if not specified
        if not sc_name:
            if getattr(self, "sc_name", ""):
                sc_name = self.sc_name
            else:
                sc_name = self.create_storage_class()

        # Create PVCs
        pvc_names = []
        for i in range(pvc_amount):
            pvc_name = oc_create_pvc(
                node, sc_name, pvc_name_prefix=pvc_name_prefix,
                pvc_size=pvc_size)
            pvc_names.append(pvc_name)
            self.addCleanup(
                wait_for_resource_absence, node, 'pvc', pvc_name)

        # Wait for PVCs to be in bound state
        try:
            for pvc_name in pvc_names:
                verify_pvc_status_is_bound(node, pvc_name, timeout, wait_step)
        finally:
            if get_openshift_version() < "3.9":
                reclaim_policy = "Delete"
            else:
                reclaim_policy = oc_get_custom_resource(
                    node, 'sc', ':.reclaimPolicy', sc_name)[0]

            for pvc_name in pvc_names:
                if reclaim_policy == 'Retain':
                    pv_name = get_pv_name_from_pvc(node, pvc_name)
                    self.addCleanup(oc_delete, node, 'pv', pv_name,
                                    raise_on_absence=False)
                    custom = (r':.metadata.annotations."gluster\.kubernetes'
                              r'\.io\/heketi\-volume\-id"')
                    vol_id = oc_get_custom_resource(
                        node, 'pv', custom, pv_name)[0]
                    if self.sc.get('provisioner') == "kubernetes.io/glusterfs":
                        self.addCleanup(heketi_volume_delete,
                                        self.heketi_client_node,
                                        self.heketi_server_url, vol_id,
                                        raise_on_error=False)
                    else:
                        self.addCleanup(heketi_blockvolume_delete,
                                        self.heketi_client_node,
                                        self.heketi_server_url, vol_id,
                                        raise_on_error=False)
                self.addCleanup(oc_delete, node, 'pvc', pvc_name,
                                raise_on_absence=False)

        return pvc_names
 def get_vol_ids_by_pvc_names(self, pvc_names):
     vol_ids = []
     custom = (r':.metadata.annotations."gluster\.kubernetes\.io\/'
               'heketi-volume-id"')
     for pvc in pvc_names:
         pv = openshift_ops.get_pv_name_from_pvc(self.node, pvc)
         vol_id = openshift_ops.oc_get_custom_resource(
             self.node, 'pv', custom, pv)
         vol_ids.append(vol_id[0])
     return vol_ids
    def test_dynamic_provisioning_glusterblock_reclaim_policy_retain(self):
        """Validate retain policy for gluster-block after PVC deletion"""

        if get_openshift_version() < "3.9":
            self.skipTest(
                "'Reclaim' feature is not supported in OCP older than 3.9")

        self.create_storage_class(reclaim_policy='Retain')
        self.create_and_wait_for_pvc()

        dc_name = oc_create_app_dc_with_io(self.node, self.pvc_name)

        try:
            pod_name = get_pod_name_from_dc(self.node, dc_name)
            wait_for_pod_be_ready(self.node, pod_name)
        finally:
            scale_dc_pod_amount_and_wait(self.node, dc_name, pod_amount=0)
            oc_delete(self.node, 'dc', dc_name)

        # get the name of volume
        pv_name = get_pv_name_from_pvc(self.node, self.pvc_name)

        custom = [
            r':.metadata.annotations."gluster\.org\/volume\-id"',
            r':.spec.persistentVolumeReclaimPolicy'
        ]
        vol_id, reclaim_policy = oc_get_custom_resource(
            self.node, 'pv', custom, pv_name)

        # checking the retainPolicy of pvc
        self.assertEqual(reclaim_policy, 'Retain')

        # delete the pvc
        oc_delete(self.node, 'pvc', self.pvc_name)

        # check if pv is also deleted or not
        with self.assertRaises(ExecutionError):
            wait_for_resource_absence(self.node,
                                      'pvc',
                                      self.pvc_name,
                                      interval=3,
                                      timeout=30)

        # getting the blockvol list
        blocklist = heketi_blockvolume_list(self.heketi_client_node,
                                            self.heketi_server_url)
        self.assertIn(vol_id, blocklist)

        heketi_blockvolume_delete(self.heketi_client_node,
                                  self.heketi_server_url, vol_id)
        blocklist = heketi_blockvolume_list(self.heketi_client_node,
                                            self.heketi_server_url)
        self.assertNotIn(vol_id, blocklist)
        oc_delete(self.node, 'pv', pv_name)
        wait_for_resource_absence(self.node, 'pv', pv_name)
    def test_pv_resize_with_prefix_for_name(self,
                                            create_vol_name_prefix=False):
        """Validate PV resize with and without name prefix"""
        dir_path = "/mnt/"
        node = self.ocp_client[0]

        # Create PVC
        self.create_storage_class(
            allow_volume_expansion=True,
            create_vol_name_prefix=create_vol_name_prefix)
        pvc_name = self.create_and_wait_for_pvc()

        # Create DC with POD and attached PVC to it.
        dc_name = oc_create_app_dc_with_io(node, pvc_name)
        self.addCleanup(oc_delete, node, 'dc', dc_name)
        self.addCleanup(scale_dc_pod_amount_and_wait,
                        node, dc_name, 0)

        pod_name = get_pod_name_from_dc(node, dc_name)
        wait_for_pod_be_ready(node, pod_name)
        if create_vol_name_prefix:
            ret = heketi_ops.verify_volume_name_prefix(
                node, self.sc['volumenameprefix'],
                self.sc['secretnamespace'],
                pvc_name, self.heketi_server_url)
            self.assertTrue(ret, "verify volnameprefix failed")
        cmd = ("dd if=/dev/urandom of=%sfile "
               "bs=100K count=1000") % dir_path
        ret, out, err = oc_rsh(node, pod_name, cmd)
        self.assertEqual(ret, 0, "failed to execute command %s on %s" % (
                             cmd, node))
        cmd = ("dd if=/dev/urandom of=%sfile2 "
               "bs=100K count=10000") % dir_path
        with self.assertRaises(AssertionError):
            ret, out, err = oc_rsh(node, pod_name, cmd)
            msg = ("Command '%s' was expected to fail on '%s' node. "
                   "But it returned following: ret is '%s', err is '%s' "
                   "and out is '%s'" % (cmd, node, ret, err, out))
            raise ExecutionError(msg)

        pvc_size = 2
        resize_pvc(node, pvc_name, pvc_size)
        verify_pvc_size(node, pvc_name, pvc_size)
        pv_name = get_pv_name_from_pvc(node, pvc_name)
        verify_pv_size(node, pv_name, pvc_size)
        oc_delete(node, 'pod', pod_name)
        wait_for_resource_absence(node, 'pod', pod_name)
        pod_name = get_pod_name_from_dc(node, dc_name)
        wait_for_pod_be_ready(node, pod_name)
        cmd = ("dd if=/dev/urandom of=%sfile_new "
               "bs=50K count=10000") % dir_path
        ret, out, err = oc_rsh(node, pod_name, cmd)
        self.assertEqual(ret, 0, "failed to execute command %s on %s" % (
                             cmd, node))
    def test_dynamic_provisioning_glusterfile_reclaim_policy_retain(self):
        """Validate retain policy for glusterfs after deletion of pvc"""

        if get_openshift_version() < "3.9":
            self.skipTest(
                "'Reclaim' feature is not supported in OCP older than 3.9")

        self.create_storage_class(reclaim_policy='Retain')
        self.create_and_wait_for_pvc()

        # get the name of the volume
        pv_name = get_pv_name_from_pvc(self.node, self.pvc_name)
        custom = [
            r':.metadata.annotations.'
            r'"gluster\.kubernetes\.io\/heketi\-volume\-id"',
            r':.spec.persistentVolumeReclaimPolicy'
        ]

        vol_id, reclaim_policy = oc_get_custom_resource(
            self.node, 'pv', custom, pv_name)

        self.assertEqual(reclaim_policy, 'Retain')

        # Create DC with POD and attached PVC to it.
        try:
            dc_name = oc_create_app_dc_with_io(
                self.node, self.pvc_name, image=self.io_container_image_cirros)
            pod_name = get_pod_name_from_dc(self.node, dc_name)
            wait_for_pod_be_ready(self.node, pod_name)
        finally:
            scale_dc_pod_amount_and_wait(self.node, dc_name, 0)
            oc_delete(self.node, 'dc', dc_name)
            wait_for_resource_absence(self.node, 'pod', pod_name)

        oc_delete(self.node, 'pvc', self.pvc_name)

        with self.assertRaises(ExecutionError):
            wait_for_resource_absence(self.node,
                                      'pvc',
                                      self.pvc_name,
                                      interval=3,
                                      timeout=30)

        heketi_volume_delete(self.heketi_client_node, self.heketi_server_url,
                             vol_id)

        vol_list = heketi_volume_list(self.heketi_client_node,
                                      self.heketi_server_url)

        self.assertNotIn(vol_id, vol_list)

        oc_delete(self.node, 'pv', pv_name)
        wait_for_resource_absence(self.node, 'pv', pv_name)
    def test_dynamic_provisioning_glusterblock_reclaim_policy_retain(self):
        """Validate retain policy for gluster-block after PVC deletion"""

        if get_openshift_version() < "3.9":
            self.skipTest(
                "'Reclaim' feature is not supported in OCP older than 3.9")

        self.create_storage_class(reclaim_policy='Retain')
        self.create_and_wait_for_pvc()

        dc_name = oc_create_app_dc_with_io(self.node, self.pvc_name)

        try:
            pod_name = get_pod_name_from_dc(self.node, dc_name)
            wait_for_pod_be_ready(self.node, pod_name)
        finally:
            scale_dc_pod_amount_and_wait(self.node, dc_name, pod_amount=0)
            oc_delete(self.node, 'dc', dc_name)

        # get the name of volume
        pv_name = get_pv_name_from_pvc(self.node, self.pvc_name)

        custom = [r':.metadata.annotations."gluster\.org\/volume\-id"',
                  r':.spec.persistentVolumeReclaimPolicy']
        vol_id, reclaim_policy = oc_get_custom_resource(
            self.node, 'pv', custom, pv_name)

        # checking the retainPolicy of pvc
        self.assertEqual(reclaim_policy, 'Retain')

        # delete the pvc
        oc_delete(self.node, 'pvc', self.pvc_name)

        # check if pv is also deleted or not
        with self.assertRaises(ExecutionError):
            wait_for_resource_absence(
                self.node, 'pvc', self.pvc_name, interval=3, timeout=30)

        # getting the blockvol list
        blocklist = heketi_blockvolume_list(self.heketi_client_node,
                                            self.heketi_server_url)
        self.assertIn(vol_id, blocklist)

        heketi_blockvolume_delete(self.heketi_client_node,
                                  self.heketi_server_url, vol_id)
        blocklist = heketi_blockvolume_list(self.heketi_client_node,
                                            self.heketi_server_url)
        self.assertNotIn(vol_id, blocklist)
        oc_delete(self.node, 'pv', pv_name)
        wait_for_resource_absence(self.node, 'pv', pv_name)
Exemplo n.º 16
0
    def test_pv_resize_with_prefix_for_name(self,
                                            create_vol_name_prefix=False):
        """Validate PV resize with and without name prefix"""
        dir_path = "/mnt/"
        node = self.ocp_client[0]

        # Create PVC
        self.create_storage_class(
            allow_volume_expansion=True,
            create_vol_name_prefix=create_vol_name_prefix)
        pvc_name = self.create_and_wait_for_pvc()

        # Create DC with POD and attached PVC to it.
        dc_name = oc_create_app_dc_with_io(node, pvc_name)
        self.addCleanup(oc_delete, node, 'dc', dc_name)
        self.addCleanup(scale_dc_pod_amount_and_wait, node, dc_name, 0)

        pod_name = get_pod_name_from_dc(node, dc_name)
        wait_for_pod_be_ready(node, pod_name)
        if create_vol_name_prefix:
            ret = heketi_ops.verify_volume_name_prefix(
                node, self.sc['volumenameprefix'], self.sc['secretnamespace'],
                pvc_name, self.heketi_server_url)
            self.assertTrue(ret, "verify volnameprefix failed")
        cmd = ("dd if=/dev/urandom of=%sfile " "bs=100K count=1000") % dir_path
        ret, out, err = oc_rsh(node, pod_name, cmd)
        self.assertEqual(ret, 0,
                         "failed to execute command %s on %s" % (cmd, node))
        cmd = ("dd if=/dev/urandom of=%sfile2 "
               "bs=100K count=10000") % dir_path
        ret, out, err = oc_rsh(node, pod_name, cmd)
        self.assertNotEqual(
            ret, 0, " This IO did not fail as expected "
            "command %s on %s" % (cmd, node))
        pvc_size = 2
        resize_pvc(node, pvc_name, pvc_size)
        verify_pvc_size(node, pvc_name, pvc_size)
        pv_name = get_pv_name_from_pvc(node, pvc_name)
        verify_pv_size(node, pv_name, pvc_size)
        oc_delete(node, 'pod', pod_name)
        wait_for_resource_absence(node, 'pod', pod_name)
        pod_name = get_pod_name_from_dc(node, dc_name)
        wait_for_pod_be_ready(node, pod_name)
        cmd = ("dd if=/dev/urandom of=%sfile_new "
               "bs=50K count=10000") % dir_path
        ret, out, err = oc_rsh(node, pod_name, cmd)
        self.assertEqual(ret, 0,
                         "failed to execute command %s on %s" % (cmd, node))
    def validate_multipath_info(self, hacount):
        """validates multipath command on the pod node

        Args:
            hacount (int): hacount for which multipath to be checked
        """
        # create pod using pvc created
        dc_name = oc_create_app_dc_with_io(
            self.ocp_master_node[0],
            self.pvc_name,
            image=self.io_container_image_cirros)
        pod_name = get_pod_name_from_dc(self.ocp_master_node[0], dc_name)
        self.addCleanup(oc_delete, self.ocp_master_node[0], "dc", dc_name)
        self.addCleanup(scale_dc_pod_amount_and_wait, self.ocp_master_node[0],
                        dc_name, 0)

        wait_for_pod_be_ready(self.ocp_master_node[0],
                              pod_name,
                              timeout=120,
                              wait_step=3)

        # Get pod info
        pod_info = oc_get_pods(self.ocp_master_node[0],
                               selector='deploymentconfig=%s' % dc_name)
        node = pod_info[pod_name]['node']

        # Find iqn from volume info
        pv_name = get_pv_name_from_pvc(self.ocp_master_node[0], self.pvc_name)
        custom = [r':.metadata.annotations."gluster\.org\/volume\-id"']
        vol_id = oc_get_custom_resource(self.ocp_master_node[0], 'pv', custom,
                                        pv_name)[0]
        vol_info = heketi_blockvolume_info(self.heketi_client_node,
                                           self.heketi_server_url,
                                           vol_id,
                                           json=True)
        iqn = vol_info['blockvolume']['iqn']

        # Get the paths info from the node
        devices = get_iscsi_block_devices_by_path(node, iqn).keys()
        self.assertEqual(hacount, len(devices))

        # Validate mpath
        mpaths = set()
        for device in devices:
            mpaths.add(get_mpath_name_from_device_name(node, device))
        self.assertEqual(1, len(mpaths))
        validate_multipath_pod(self.ocp_master_node[0], pod_name, hacount,
                               list(mpaths)[0])
    def test_dynamic_provisioning_glusterfile_reclaim_policy_retain(self):
        """Validate retain policy for glusterfs after deletion of pvc"""

        if get_openshift_version() < "3.9":
            self.skipTest(
                "'Reclaim' feature is not supported in OCP older than 3.9")

        self.create_storage_class(reclaim_policy='Retain')
        self.create_and_wait_for_pvc()

        # get the name of the volume
        pv_name = get_pv_name_from_pvc(self.node, self.pvc_name)
        custom = [r':.metadata.annotations.'
                  r'"gluster\.kubernetes\.io\/heketi\-volume\-id"',
                  r':.spec.persistentVolumeReclaimPolicy']

        vol_id, reclaim_policy = oc_get_custom_resource(
            self.node, 'pv', custom, pv_name)

        self.assertEqual(reclaim_policy, 'Retain')

        # Create DC with POD and attached PVC to it.
        try:
            dc_name = oc_create_app_dc_with_io(self.node, self.pvc_name)
            pod_name = get_pod_name_from_dc(self.node, dc_name)
            wait_for_pod_be_ready(self.node, pod_name)
        finally:
            scale_dc_pod_amount_and_wait(self.node, dc_name, 0)
            oc_delete(self.node, 'dc', dc_name)
            wait_for_resource_absence(self.node, 'pod', pod_name)

        oc_delete(self.node, 'pvc', self.pvc_name)

        with self.assertRaises(ExecutionError):
            wait_for_resource_absence(
                self.node, 'pvc', self.pvc_name, interval=3, timeout=30)

        heketi_volume_delete(self.heketi_client_node,
                             self.heketi_server_url, vol_id)

        vol_list = heketi_volume_list(self.heketi_client_node,
                                      self.heketi_server_url)

        self.assertNotIn(vol_id, vol_list)

        oc_delete(self.node, 'pv', pv_name)
        wait_for_resource_absence(self.node, 'pv', pv_name)
Exemplo n.º 19
0
    def _block_vol_expand_common_offline_vs_online(self, is_online_expand):
        node = self.ocp_master_node[0]
        h_node, h_server = self.heketi_client_node, self.heketi_server_url

        version = heketi_version.get_heketi_version(h_node)
        if version < '9.0.0-14':
            self.skipTest("heketi-client package {} does not support "
                          "blockvolume expand".format(version.v_str))

        pvc_name = self.create_and_wait_for_pvc()
        dc_name = self.create_dc_with_pvc(pvc_name)
        pv_name = get_pv_name_from_pvc(node, pvc_name)

        # get block volume id
        custom = r":.metadata.annotations.'gluster\.org\/volume-id'"
        bvol_id = oc_get_custom_resource(node, 'pv', custom, pv_name)
        self.assertNotEqual(bvol_id[0], "<none>",
                            "volume name not found from pv {}".format(pv_name))
        bvol_info = heketi_blockvolume_info(h_node,
                                            h_server,
                                            bvol_id[0],
                                            json=True)

        # verify required blockhostingvolume free size
        bhv_id = bvol_info["blockhostingvolume"]
        bhv_info = heketi_volume_info(h_node, h_server, bhv_id, json=True)
        if bhv_info["blockinfo"]["freesize"] < 1:
            self.skipTest("blockhostingvolume doesn't have required freespace")

        if not is_online_expand:
            scale_dc_pod_amount_and_wait(node, dc_name[0], pod_amount=0)

        # expand block volume and verify usable size
        bvol_info = heketi_blockvolume_expand(h_node,
                                              h_server,
                                              bvol_id[0],
                                              2,
                                              json=True)
        self.assertEqual(bvol_info["size"], 2,
                         "Block volume expand does not works")
        self.assertEqual(
            bvol_info["size"], bvol_info["usablesize"],
            "block volume size is not equal to the usablesize: {}".format(
                bvol_info))

        return pvc_name, dc_name, bvol_info
    def validate_multipath_info(self, hacount):
        """validates multipath command on the pod node

        Args:
            hacount (int): hacount for which multipath to be checked
        """
        # create pod using pvc created
        dc_name = oc_create_app_dc_with_io(
            self.ocp_master_node[0], self.pvc_name
        )
        pod_name = get_pod_name_from_dc(self.ocp_master_node[0], dc_name)
        self.addCleanup(oc_delete, self.ocp_master_node[0], "dc", dc_name)
        self.addCleanup(
            scale_dc_pod_amount_and_wait, self.ocp_master_node[0], dc_name, 0
        )

        wait_for_pod_be_ready(
            self.ocp_master_node[0], pod_name, timeout=120, wait_step=3
        )

        # Get pod info
        pod_info = oc_get_pods(
            self.ocp_master_node[0], selector='deploymentconfig=%s' % dc_name)
        node = pod_info[pod_name]['node']

        # Find iqn from volume info
        pv_name = get_pv_name_from_pvc(self.ocp_master_node[0], self.pvc_name)
        custom = [r':.metadata.annotations."gluster\.org\/volume\-id"']
        vol_id = oc_get_custom_resource(
            self.ocp_master_node[0], 'pv', custom, pv_name)[0]
        vol_info = heketi_blockvolume_info(
            self.heketi_client_node, self.heketi_server_url, vol_id, json=True)
        iqn = vol_info['blockvolume']['iqn']

        # Get the paths info from the node
        devices = get_iscsi_block_devices_by_path(node, iqn).keys()
        self.assertEqual(hacount, len(devices))

        # Validate mpath
        mpaths = set()
        for device in devices:
            mpaths.add(get_mpath_name_from_device_name(node, device))
        self.assertEqual(1, len(mpaths))
        validate_multipath_pod(
            self.ocp_master_node[0], pod_name, hacount, list(mpaths)[0])
    def test_volname_prefix_glusterblock(self):
        """Validate custom volname prefix blockvol"""

        self.dynamic_provisioning_glusterblock(set_hacount=False,
                                               create_vol_name_prefix=True)

        pv_name = get_pv_name_from_pvc(self.node, self.pvc_name)
        vol_name = oc_get_custom_resource(
            self.node, 'pv', ':.metadata.annotations.glusterBlockShare',
            pv_name)[0]

        block_vol_list = heketi_blockvolume_list(self.heketi_client_node,
                                                 self.heketi_server_url)

        self.assertIn(vol_name, block_vol_list)

        self.assertTrue(
            vol_name.startswith(self.sc.get('volumenameprefix', 'autotest')))
    def test_volname_prefix_glusterblock(self):
        """Validate custom volname prefix blockvol"""

        self.dynamic_provisioning_glusterblock(
            set_hacount=False, create_vol_name_prefix=True)

        pv_name = get_pv_name_from_pvc(self.node, self.pvc_name)
        vol_name = oc_get_custom_resource(
                self.node, 'pv',
                ':.metadata.annotations.glusterBlockShare', pv_name)[0]

        block_vol_list = heketi_blockvolume_list(
                self.heketi_client_node, self.heketi_server_url)

        self.assertIn(vol_name, block_vol_list)

        self.assertTrue(vol_name.startswith(
            self.sc.get('volumenameprefix', 'autotest')))
    def get_block_hosting_volume_by_pvc_name(self, pvc_name):
        """Get block hosting volume of pvc name given

        Args:
            pvc_name (str): pvc name for which the BHV name needs
                            to be returned
        """
        pv_name = get_pv_name_from_pvc(self.ocp_client[0], pvc_name)
        block_volume = oc_get_custom_resource(
            self.ocp_client[0], 'pv',
            r':.metadata.annotations."gluster\.org\/volume\-id"',
            name=pv_name
        )[0]

        # get block hosting volume from block volume
        block_hosting_vol = get_block_hosting_volume_name(
            self.heketi_client_node, self.heketi_server_url, block_volume)

        return block_hosting_vol
 def _dynamic_provisioning_block_with_bhv_cleanup(
         self, sc_name, pvc_size, bhv_list):
     """Dynamic provisioning for glusterblock with BHV Cleanup"""
     h_node, h_url = self.heketi_client_node, self.heketi_server_url
     pvc_name = oc_create_pvc(self.node, sc_name, pvc_size=pvc_size)
     try:
         verify_pvc_status_is_bound(self.node, pvc_name)
         pv_name = get_pv_name_from_pvc(self.node, pvc_name)
         custom = [r':.metadata.annotations."gluster\.org\/volume\-id"']
         bvol_id = oc_get_custom_resource(
             self.node, 'pv', custom, pv_name)[0]
         bhv_id = heketi_blockvolume_info(
             h_node, h_url, bvol_id, json=True)['blockhostingvolume']
         if bhv_id not in bhv_list:
             self.addCleanup(
                 heketi_volume_delete, h_node, h_url, bhv_id)
     finally:
         self.addCleanup(
             wait_for_resource_absence, self.node, 'pvc', pvc_name)
         self.addCleanup(
             oc_delete, self.node, 'pvc', pvc_name, raise_on_absence=True)
    def test_create_and_verify_pvc_with_volume_name_prefix(self):
        """create and verify pvc with volname prefix on an app pod"""
        if get_openshift_version() < "3.9":
            self.skipTest(
                "'volumenameprefix' option for Heketi is not supported"
                " in OCP older than 3.9")

        sc_name = self.create_storage_class(create_vol_name_prefix=True)
        pvc_name = self.create_and_wait_for_pvc(sc_name=sc_name)
        namespace = (self.sc.get('secretnamespace',
                                 self.sc.get('restsecretnamespace',
                                             'default')))
        verify_volume_name_prefix(self.heketi_client_node,
                                  self.sc.get("volumenameprefix", "autotest"),
                                  namespace, pvc_name, self.heketi_server_url)
        self.create_dc_with_pvc(pvc_name)
        pv_name = get_pv_name_from_pvc(self.ocp_master_node[0], pvc_name)
        endpoint = oc_get_custom_resource(self.ocp_master_node[0],
                                          "pv",
                                          ":spec.glusterfs.endpoints",
                                          name=pv_name)
        self.assertTrue(
            endpoint, "Failed to read Endpoints of %s on  %s " %
            (pv_name, self.ocp_master_node[0]))
Exemplo n.º 26
0
    def create_and_wait_for_pvcs(self,
                                 pvc_size=1,
                                 pvc_name_prefix="autotests-pvc",
                                 pvc_amount=1,
                                 sc_name=None,
                                 timeout=120,
                                 wait_step=3):
        node = self.ocp_client[0]

        # Create storage class if not specified
        if not sc_name:
            if getattr(self, "sc_name", ""):
                sc_name = self.sc_name
            else:
                sc_name = self.create_storage_class()

        # Create PVCs
        pvc_names = []
        for i in range(pvc_amount):
            pvc_name = oc_create_pvc(node,
                                     sc_name,
                                     pvc_name_prefix=pvc_name_prefix,
                                     pvc_size=pvc_size)
            pvc_names.append(pvc_name)
            self.addCleanup(wait_for_resource_absence, node, 'pvc', pvc_name)

        # Wait for PVCs to be in bound state
        try:
            for pvc_name in pvc_names:
                verify_pvc_status_is_bound(node, pvc_name, timeout, wait_step)
        finally:
            if get_openshift_version() < "3.9":
                reclaim_policy = "Delete"
            else:
                reclaim_policy = oc_get_custom_resource(
                    node, 'sc', ':.reclaimPolicy', sc_name)[0]

            for pvc_name in pvc_names:
                if reclaim_policy == 'Retain':
                    pv_name = get_pv_name_from_pvc(node, pvc_name)
                    self.addCleanup(oc_delete,
                                    node,
                                    'pv',
                                    pv_name,
                                    raise_on_absence=False)
                    custom = (r':.metadata.annotations."gluster\.kubernetes'
                              r'\.io\/heketi\-volume\-id"')
                    vol_id = oc_get_custom_resource(node, 'pv', custom,
                                                    pv_name)[0]
                    if self.sc.get('provisioner') == "kubernetes.io/glusterfs":
                        self.addCleanup(heketi_volume_delete,
                                        self.heketi_client_node,
                                        self.heketi_server_url,
                                        vol_id,
                                        raise_on_error=False)
                    else:
                        self.addCleanup(heketi_blockvolume_delete,
                                        self.heketi_client_node,
                                        self.heketi_server_url,
                                        vol_id,
                                        raise_on_error=False)
                self.addCleanup(oc_delete,
                                node,
                                'pvc',
                                pvc_name,
                                raise_on_absence=False)

        return pvc_names
    def initiator_side_failures(self):

        # get storage ips of glusterfs pods
        keys = self.gluster_servers
        gluster_ips = []
        for key in keys:
            gluster_ips.append(self.gluster_servers_info[key]['storage'])
        gluster_ips.sort()

        self.create_storage_class()
        self.create_and_wait_for_pvc()

        # find iqn and hacount from volume info
        pv_name = get_pv_name_from_pvc(self.node, self.pvc_name)
        custom = [r':.metadata.annotations."gluster\.org\/volume\-id"']
        vol_id = oc_get_custom_resource(self.node, 'pv', custom, pv_name)[0]
        vol_info = heketi_blockvolume_info(
            self.heketi_client_node, self.heketi_server_url, vol_id, json=True)
        iqn = vol_info['blockvolume']['iqn']
        hacount = int(self.sc['hacount'])

        # create app pod
        dc_name, pod_name = self.create_dc_with_pvc(self.pvc_name)

        # When we have to verify iscsi login  devices & mpaths, we run it twice
        for i in range(2):

            # get node hostname from pod info
            pod_info = oc_get_pods(
                self.node, selector='deploymentconfig=%s' % dc_name)
            node = pod_info[pod_name]['node']

            # get the iscsi sessions info from the node
            iscsi = get_iscsi_session(node, iqn)
            self.assertEqual(hacount, len(iscsi))
            iscsi.sort()
            self.assertEqual(set(iscsi), (set(gluster_ips) & set(iscsi)))

            # get the paths info from the node
            devices = get_iscsi_block_devices_by_path(node, iqn).keys()
            self.assertEqual(hacount, len(devices))

            # get mpath names and verify that only one mpath is there
            mpaths = set()
            for device in devices:
                mpaths.add(get_mpath_name_from_device_name(node, device))
            self.assertEqual(1, len(mpaths))

            validate_multipath_pod(
                self.node, pod_name, hacount, mpath=list(mpaths)[0])

            # When we have to verify iscsi session logout, we run only once
            if i == 1:
                break

            # make node unschedulabe where pod is running
            oc_adm_manage_node(
                self.node, '--schedulable=false', nodes=[node])

            # make node schedulabe where pod is running
            self.addCleanup(
                oc_adm_manage_node, self.node, '--schedulable=true',
                nodes=[node])

            # delete pod so it get respun on any other node
            oc_delete(self.node, 'pod', pod_name)
            wait_for_resource_absence(self.node, 'pod', pod_name)

            # wait for pod to come up
            pod_name = get_pod_name_from_dc(self.node, dc_name)
            wait_for_pod_be_ready(self.node, pod_name)

            # get the iscsi session from the previous node to verify logout
            iscsi = get_iscsi_session(node, iqn, raise_on_error=False)
            self.assertFalse(iscsi)
    def test_expansion_of_block_hosting_volume_using_heketi(self):
        """Verify that after expanding block hosting volume we are able to
        consume the expanded space"""

        h_node = self.heketi_client_node
        h_url = self.heketi_server_url
        bvols_in_bhv = set([])
        bvols_pv = set([])

        BHVS = get_block_hosting_volume_list(h_node, h_url)

        free_BHVS_count = 0
        for vol in BHVS.keys():
            info = heketi_volume_info(h_node, h_url, vol, json=True)
            if info['blockinfo']['freesize'] > 0:
                free_BHVS_count += 1
            if free_BHVS_count > 1:
                self.skipTest("Skip test case because there is more than one"
                              " Block Hosting Volume with free space")

        # create block volume of 1gb
        bvol_info = heketi_blockvolume_create(h_node, h_url, 1, json=True)

        expand_size = 20
        try:
            self.verify_free_space(expand_size)
            bhv = bvol_info['blockhostingvolume']
            vol_info = heketi_volume_info(h_node, h_url, bhv, json=True)
            bvols_in_bhv.update(vol_info['blockinfo']['blockvolume'])
        finally:
            # cleanup BHV if there is only one block volume inside it
            if len(bvols_in_bhv) == 1:
                self.addCleanup(
                    heketi_volume_delete, h_node, h_url, bhv, json=True)
            self.addCleanup(
                heketi_blockvolume_delete, h_node, h_url, bvol_info['id'])

        size = vol_info['size']
        free_size = vol_info['blockinfo']['freesize']
        bvol_count = int(free_size / expand_size)
        bricks = vol_info['bricks']

        # create pvs to fill the BHV
        pvcs = self.create_and_wait_for_pvcs(
            pvc_size=(expand_size if bvol_count else free_size),
            pvc_amount=(bvol_count or 1), timeout=300)

        vol_expand = True

        for i in range(2):
            # get the vol ids from pvcs
            for pvc in pvcs:
                pv = get_pv_name_from_pvc(self.node, pvc)
                custom = r':.metadata.annotations."gluster\.org\/volume-id"'
                bvol_id = oc_get_custom_resource(self.node, 'pv', custom, pv)
                bvols_pv.add(bvol_id[0])

            vol_info = heketi_volume_info(h_node, h_url, bhv, json=True)
            bvols = vol_info['blockinfo']['blockvolume']
            bvols_in_bhv.update(bvols)
            self.assertEqual(bvols_pv, (bvols_in_bhv & bvols_pv))

            # Expand BHV and verify bricks and size of BHV
            if vol_expand:
                vol_expand = False
                heketi_volume_expand(
                    h_node, h_url, bhv, expand_size, json=True)
                vol_info = heketi_volume_info(h_node, h_url, bhv, json=True)

                self.assertEqual(size + expand_size, vol_info['size'])
                self.assertFalse(len(vol_info['bricks']) % 3)
                self.assertLess(len(bricks), len(vol_info['bricks']))

                # create more PVCs in expanded BHV
                pvcs = self.create_and_wait_for_pvcs(
                    pvc_size=(expand_size - 1), pvc_amount=1)
Exemplo n.º 29
0
    def test_dev_path_block_volume_delete(self):
        """Validate device path name changes the deletion of
           already existing file volumes
        """

        pvc_size, pvc_amount = 2, 5
        pvc_names, gluster_block_list, vol_details = [], [], []

        # Fetch BHV list
        h_bhv_list_before = heketi_ops.get_block_hosting_volume_list(
            self.h_node, self.h_server).keys()

        # Create storage class
        sc_name = self.create_storage_class()

        # Delete created BHV and BV as cleanup during failures
        self.addCleanup(self._cleanup_heketi_volumes, h_bhv_list_before)

        # Create PVC's
        for i in range(0, pvc_amount):
            pvc_name = openshift_ops.oc_create_pvc(self.node,
                                                   sc_name,
                                                   pvc_size=pvc_size)
            pvc_names.append(pvc_name)
            self.addCleanup(openshift_ops.wait_for_resource_absence, self.node,
                            'pvc', pvc_name)
            self.addCleanup(openshift_ops.oc_delete,
                            self.node,
                            'pvc',
                            pvc_name,
                            raise_on_absence=False)

        # Wait for PVC's to be bound
        openshift_ops.wait_for_pvc_be_bound(self.node, pvc_names)

        # Get volume name list
        for pvc_name in pvc_names:
            pv_name = openshift_ops.get_pv_name_from_pvc(self.node, pvc_name)
            volume_name = openshift_ops.get_vol_names_from_pv(self.node,
                                                              pv_name,
                                                              vol_type='block')
            vol_details.append(volume_name)

        # Get BHV list after BV creation
        h_bhv_list_after = heketi_ops.get_block_hosting_volume_list(
            self.h_node, self.h_server).keys()
        self.assertTrue(h_bhv_list_after, "Failed to get the BHV list")

        # Validate BV's count
        self.validate_block_volumes_count(self.h_node, self.h_server,
                                          self.node_ip)

        # Collect pvs info and detach disks and collect pvs info
        pvs_info_before = openshift_storage_libs.get_pvs_info(
            self.node, self.node_ip, self.devices_list, raise_on_error=False)
        self.detach_and_attach_vmdk(self.vm_name, self.node_hostname,
                                    self.devices_list)
        pvs_info_after = openshift_storage_libs.get_pvs_info(
            self.node, self.node_ip, self.devices_list, raise_on_error=False)

        # Compare pvs info before and after
        for (path, uuid, vg_name), (_path, _uuid,
                                    _vg_name) in zip(pvs_info_before[:-1],
                                                     pvs_info_after[1:]):
            self.assertEqual(
                uuid, _uuid, "pv_uuid check failed. Expected:{},"
                "Actual: {}".format(uuid, _uuid))
            self.assertEqual(
                vg_name, _vg_name, "vg_name check failed. Expected:"
                "{}, Actual:{}".format(vg_name, _vg_name))

        # Delete created PVC's
        for pvc_name in pvc_names:
            openshift_ops.oc_delete(self.node, 'pvc', pvc_name)

        # Wait for pvc to get deleted
        openshift_ops.wait_for_resources_absence(self.node, 'pvc', pvc_names)

        # Get existing BHV list
        for bhv_name in h_bhv_list_after:
            b_list = block_libs.get_block_list(self.node_ip, volname=bhv_name)
            self.assertIsNotNone(gluster_block_list,
                                 "Failed to get gluster block list")
            gluster_block_list.append(b_list)

        # Get list of block volumes using heketi
        h_blockvol_list = heketi_ops.heketi_blockvolume_list(self.h_node,
                                                             self.h_server,
                                                             json=True)

        # Validate volumes created are not present
        for vol in vol_details:
            self.assertNotIn(vol, gluster_block_list,
                             "Failed to delete volume {}".format(vol))
            self.assertNotIn(vol, h_blockvol_list['blockvolumes'],
                             "Failed to delete blockvolume '{}'".format(vol))
Exemplo n.º 30
0
    def test_dev_path_file_volume_delete(self):
        """Validate device path name changes the deletion of
           already existing file volumes
        """

        pvc_size, pvc_amount = 2, 5
        vol_details, pvc_names = [], []

        # Create PVC's
        sc_name = self.create_storage_class()
        for i in range(0, pvc_amount):
            pvc_name = openshift_ops.oc_create_pvc(self.node,
                                                   sc_name,
                                                   pvc_size=pvc_size)
            pvc_names.append(pvc_name)
            self.addCleanup(openshift_ops.wait_for_resource_absence, self.node,
                            'pvc', pvc_name)
            self.addCleanup(openshift_ops.oc_delete,
                            self.node,
                            'pvc',
                            pvc_name,
                            raise_on_absence=False)

        # Wait for PVC's to be bound
        openshift_ops.wait_for_pvcs_be_bound(self.node, pvc_names)

        # Get Volumes name and validate volumes count
        for pvc_name in pvc_names:
            pv_name = openshift_ops.get_pv_name_from_pvc(self.node, pvc_name)
            volume_name = openshift_ops.get_vol_names_from_pv(
                self.node, pv_name)
            vol_details.append(volume_name)

        # Verify file volumes count
        self.validate_file_volumes_count(self.h_node, self.h_server,
                                         self.node_ip)

        # Collect pvs info and detach disks and get pvs info
        pvs_info_before = openshift_storage_libs.get_pvs_info(
            self.node, self.node_ip, self.devices_list, raise_on_error=False)
        self.detach_and_attach_vmdk(self.vm_name, self.node_hostname,
                                    self.devices_list)
        pvs_info_after = openshift_storage_libs.get_pvs_info(
            self.node, self.node_ip, self.devices_list, raise_on_error=False)

        # Compare pvs info before and after
        for (path, uuid, vg_name), (_path, _uuid,
                                    _vg_name) in zip(pvs_info_before[:-1],
                                                     pvs_info_after[1:]):
            self.assertEqual(
                uuid, _uuid, "pv_uuid check failed. Expected:{},"
                "Actual: {}".format(uuid, _uuid))
            self.assertEqual(
                vg_name, _vg_name, "vg_name check failed. Expected:"
                "{}, Actual:{}".format(vg_name, _vg_name))

        # Delete created PVC's
        for pvc_name in pvc_names:
            openshift_ops.oc_delete(self.node, 'pvc', pvc_name)

        # Wait for resource absence and get volume list
        openshift_ops.wait_for_resources_absence(self.node, 'pvc', pvc_names)
        vol_list = volume_ops.get_volume_list(self.node_ip)
        self.assertIsNotNone(vol_list, "Failed to get volumes list")

        # Validate volumes created are not present
        for vol in vol_details:
            self.assertNotIn(vol, vol_list,
                             "Failed to delete volume {}".format(vol))
    def verify_iscsi_sessions_and_multipath(
            self, pvc_name, rname, rtype='dc', heketi_server_url=None,
            is_registry_gluster=False):
        if not heketi_server_url:
            heketi_server_url = self.heketi_server_url

        # Get storage ips of glusterfs pods
        keys = (list(g.config['gluster_registry_servers'].keys()) if
                is_registry_gluster else self.gluster_servers)
        servers_info = (g.config['gluster_registry_servers'] if
                        is_registry_gluster else self.gluster_servers_info)
        gluster_ips = []
        for key in keys:
            gluster_ips.append(servers_info[key]['storage'])
        gluster_ips.sort()

        # Find iqn and hacount from volume info
        pv_name = get_pv_name_from_pvc(self.ocp_client[0], pvc_name)
        custom = [r':.metadata.annotations."gluster\.org\/volume\-id"']
        vol_id = oc_get_custom_resource(
            self.ocp_client[0], 'pv', custom, pv_name)[0]
        vol_info = heketi_blockvolume_info(
            self.heketi_client_node, heketi_server_url, vol_id, json=True)
        iqn = vol_info['blockvolume']['iqn']
        hacount = int(vol_info['hacount'])

        # Find node on which pod is running
        if rtype == 'dc':
            pod_name = get_pod_name_from_dc(self.ocp_client[0], rname)
            pod_info = oc_get_pods(
                self.ocp_client[0], selector='deploymentconfig=%s' % rname)
        elif rtype == 'pod':
            pod_info = oc_get_pods(self.ocp_client[0], name=rname)
            pod_name = rname
        elif rtype == 'rc':
            pod_name = get_pod_name_from_rc(self.ocp_client[0], rname)
            pod_info = oc_get_pods(
                self.ocp_client[0], selector='name=%s' % rname)
        else:
            raise NameError("Value of rtype should be either 'dc' or 'pod'")

        node = pod_info[pod_name]['node']

        # Get the iscsi sessions info from the node
        iscsi = get_iscsi_session(node, iqn)
        msg = ('Only %s iscsi sessions are present on node %s, expected %s.'
               % (iscsi, node, hacount))
        self.assertEqual(hacount, len(iscsi), msg)
        iscsi.sort()
        msg = ("Only gluster Nodes %s were expected in iscsi sessions, "
               "but got other Nodes %s on Node %s" % (
                   gluster_ips, iscsi, node))
        self.assertEqual(set(iscsi), (set(gluster_ips) & set(iscsi)), msg)

        # Get the paths info from the node
        devices = get_iscsi_block_devices_by_path(node, iqn)
        msg = ("Only %s devices are present on Node %s, expected %s" % (
            devices, node, hacount,))
        self.assertEqual(hacount, len(devices), msg)

        # Get mpath names and verify that only one mpath is there
        mpaths = set()
        for device in devices.keys():
            mpaths.add(get_mpath_name_from_device_name(node, device))
        msg = ("Only one mpath was expected on Node %s, but got %s" % (
            node, mpaths))
        self.assertEqual(1, len(mpaths), msg)

        validate_multipath_pod(
            self.ocp_client[0], pod_name, hacount, mpath=list(mpaths)[0])

        return iqn, hacount, node
    def create_and_wait_for_pvcs(
            self, pvc_size=1, pvc_name_prefix="autotests-pvc", pvc_amount=1,
            sc_name=None, timeout=600, wait_step=10, skip_waiting=False,
            skip_cleanup=False):
        """Create multiple PVC's not waiting for it

        Args:
            pvc_size (int): size of PVC, default value is 1
            pvc_name_prefix (str): volume prefix for each PVC, default value is
                                   'autotests-pvc'
            pvc_amount (int): number of PVC's, default value is 1
            sc_name (str): storage class to create PVC, default value is None,
                           which will cause automatic creation of sc.
            timeout (int): timeout time for waiting for PVC's to get bound
            wait_step (int): waiting time between each try of PVC status check
            skip_waiting (bool): boolean value which defines whether
                                 we need to wait for PVC creation or not.
        Returns:
            List: list of PVC names
        """
        node = self.ocp_client[0]

        # Create storage class if not specified
        if not sc_name:
            if getattr(self, "sc_name", ""):
                sc_name = self.sc_name
            else:
                sc_name = self.create_storage_class(skip_cleanup=skip_cleanup)

        # Create PVCs
        pvc_names = []
        for i in range(pvc_amount):
            pvc_name = oc_create_pvc(
                node, sc_name, pvc_name_prefix=pvc_name_prefix,
                pvc_size=pvc_size)
            pvc_names.append(pvc_name)
        if not skip_cleanup:
            self.addCleanup(
                wait_for_resources_absence, node, 'pvc', pvc_names)

        # Wait for PVCs to be in bound state
        try:
            if not skip_waiting:
                wait_for_pvcs_be_bound(node, pvc_names, timeout, wait_step)
        finally:
            if skip_cleanup:
                return pvc_names

            if get_openshift_version() < "3.9":
                reclaim_policy = "Delete"
            else:
                reclaim_policy = oc_get_custom_resource(
                    node, 'sc', ':.reclaimPolicy', sc_name)[0]

            for pvc_name in pvc_names:
                if reclaim_policy == 'Retain':
                    pv_name = get_pv_name_from_pvc(node, pvc_name)
                    if not pv_name and skip_waiting:
                        continue
                    self.addCleanup(oc_delete, node, 'pv', pv_name,
                                    raise_on_absence=False)
                    custom = (r':.metadata.annotations."gluster\.kubernetes'
                              r'\.io\/heketi\-volume\-id"')
                    vol_id = oc_get_custom_resource(
                        node, 'pv', custom, pv_name)[0]
                    if self.sc.get('provisioner') == "kubernetes.io/glusterfs":
                        self.addCleanup(heketi_volume_delete,
                                        self.heketi_client_node,
                                        self.heketi_server_url, vol_id,
                                        raise_on_error=False)
                    else:
                        self.addCleanup(heketi_blockvolume_delete,
                                        self.heketi_client_node,
                                        self.heketi_server_url, vol_id,
                                        raise_on_error=False)
                self.addCleanup(oc_delete, node, 'pvc', pvc_name,
                                raise_on_absence=False)
        return pvc_names
    def initiator_side_failures(self):

        # get storage ips of glusterfs pods
        keys = self.gluster_servers
        gluster_ips = []
        for key in keys:
            gluster_ips.append(self.gluster_servers_info[key]['storage'])
        gluster_ips.sort()

        self.create_storage_class()
        self.create_and_wait_for_pvc()

        # find iqn and hacount from volume info
        pv_name = get_pv_name_from_pvc(self.node, self.pvc_name)
        custom = [r':.metadata.annotations."gluster\.org\/volume\-id"']
        vol_id = oc_get_custom_resource(self.node, 'pv', custom, pv_name)[0]
        vol_info = heketi_blockvolume_info(self.heketi_client_node,
                                           self.heketi_server_url,
                                           vol_id,
                                           json=True)
        iqn = vol_info['blockvolume']['iqn']
        hacount = int(self.sc['hacount'])

        # create app pod
        dc_name, pod_name = self.create_dc_with_pvc(self.pvc_name)

        # When we have to verify iscsi login  devices & mpaths, we run it twice
        for i in range(2):

            # get node hostname from pod info
            pod_info = oc_get_pods(self.node,
                                   selector='deploymentconfig=%s' % dc_name)
            node = pod_info[pod_name]['node']

            # get the iscsi sessions info from the node
            iscsi = get_iscsi_session(node, iqn)
            self.assertEqual(hacount, len(iscsi))
            iscsi.sort()
            self.assertEqual(set(iscsi), (set(gluster_ips) & set(iscsi)))

            # get the paths info from the node
            devices = get_iscsi_block_devices_by_path(node, iqn).keys()
            self.assertEqual(hacount, len(devices))

            # get mpath names and verify that only one mpath is there
            mpaths = set()
            for device in devices:
                mpaths.add(get_mpath_name_from_device_name(node, device))
            self.assertEqual(1, len(mpaths))

            validate_multipath_pod(self.node,
                                   pod_name,
                                   hacount,
                                   mpath=list(mpaths)[0])

            # When we have to verify iscsi session logout, we run only once
            if i == 1:
                break

            # make node unschedulabe where pod is running
            oc_adm_manage_node(self.node, '--schedulable=false', nodes=[node])

            # make node schedulabe where pod is running
            self.addCleanup(oc_adm_manage_node,
                            self.node,
                            '--schedulable=true',
                            nodes=[node])

            # delete pod so it get respun on any other node
            oc_delete(self.node, 'pod', pod_name)
            wait_for_resource_absence(self.node, 'pod', pod_name)

            # wait for pod to come up
            pod_name = get_pod_name_from_dc(self.node, dc_name)
            wait_for_pod_be_ready(self.node, pod_name)

            # get the iscsi session from the previous node to verify logout
            iscsi = get_iscsi_session(node, iqn, raise_on_error=False)
            self.assertFalse(iscsi)
Exemplo n.º 34
0
    def test_prometheus_pv_resize(self):
        """ Validate prometheus metrics with pv resize"""

        # Fetch the metrics and storing initial_metrics as dictionary
        pvc_name, pod_name, initial_metrics = self._fetch_initial_metrics(
            vol_name_prefix="for-pv-resize", volume_expansion=True)

        # Write data on the pvc and confirm it is reflected in the prometheus
        self._perform_io_and_fetch_metrics(
            pod_name=pod_name, pvc_name=pvc_name,
            filename="filename1", dirname="dirname1",
            metric_data=initial_metrics, operation="create")

        # Resize the pvc to 2GiB
        openshift_ops.switch_oc_project(
            self._master, self.storage_project_name)
        pvc_size = 2
        openshift_ops.resize_pvc(self._master, pvc_name, pvc_size)
        openshift_ops.wait_for_events(self._master, obj_name=pvc_name,
                                      event_reason='VolumeResizeSuccessful')
        openshift_ops.verify_pvc_size(self._master, pvc_name, pvc_size)
        pv_name = openshift_ops.get_pv_name_from_pvc(
            self._master, pvc_name)
        openshift_ops.verify_pv_size(self._master, pv_name, pvc_size)

        heketi_volume_name = heketi_ops.heketi_volume_list_by_name_prefix(
            self.heketi_client_node, self.heketi_server_url,
            "for-pv-resize", json=True)[0][2]
        self.assertIsNotNone(
            heketi_volume_name, "Failed to fetch volume with prefix {}".
            format("for-pv-resize"))

        openshift_ops.oc_delete(self._master, 'pod', pod_name)
        openshift_ops.wait_for_resource_absence(self._master, 'pod', pod_name)
        pod_name = openshift_ops.get_pod_name_from_dc(
            self._master, self.dc_name)
        openshift_ops.wait_for_pod_be_ready(self._master, pod_name)

        # Check whether the metrics are updated or not
        for w in waiter.Waiter(120, 10):
            resize_metrics = self._get_and_manipulate_metric_data(
                self.metrics, pvc_name)
            if bool(resize_metrics) and int(resize_metrics[
                'kubelet_volume_stats_capacity_bytes']) > int(
                    initial_metrics['kubelet_volume_stats_capacity_bytes']):
                break
        if w.expired:
            raise AssertionError("Failed to reflect PVC Size after resizing")
        openshift_ops.switch_oc_project(
            self._master, self.storage_project_name)
        time.sleep(240)

        # Lookup and trigger rebalance and wait for the its completion
        for _ in range(100):
            self.cmd_run("oc rsh {} ls /mnt/".format(pod_name))
        self._rebalance_completion(heketi_volume_name)

        # Write data on the resized pvc and compared with the resized_metrics
        self._perform_io_and_fetch_metrics(
            pod_name=pod_name, pvc_name=pvc_name,
            filename="secondfilename", dirname="seconddirname",
            metric_data=resize_metrics, operation="create")
    def test_expansion_of_block_hosting_volume_using_heketi(self):
        """Verify that after expanding block hosting volume we are able to
        consume the expanded space"""

        h_node = self.heketi_client_node
        h_url = self.heketi_server_url
        bvols_in_bhv = set([])
        bvols_pv = set([])

        BHVS = get_block_hosting_volume_list(h_node, h_url)

        free_BHVS_count = 0
        for vol in BHVS.keys():
            info = heketi_volume_info(h_node, h_url, vol, json=True)
            if info['blockinfo']['freesize'] > 0:
                free_BHVS_count += 1
            if free_BHVS_count > 1:
                self.skipTest("Skip test case because there is more than one"
                              " Block Hosting Volume with free space")

        # create block volume of 1gb
        bvol_info = heketi_blockvolume_create(h_node, h_url, 1, json=True)

        expand_size = 20
        try:
            self.verify_free_space(expand_size)
            bhv = bvol_info['blockhostingvolume']
            vol_info = heketi_volume_info(h_node, h_url, bhv, json=True)
            bvols_in_bhv.update(vol_info['blockinfo']['blockvolume'])
        finally:
            # cleanup BHV if there is only one block volume inside it
            if len(bvols_in_bhv) == 1:
                self.addCleanup(heketi_volume_delete,
                                h_node,
                                h_url,
                                bhv,
                                json=True)
            self.addCleanup(heketi_blockvolume_delete, h_node, h_url,
                            bvol_info['id'])

        size = vol_info['size']
        free_size = vol_info['blockinfo']['freesize']
        bvol_count = int(free_size / expand_size)
        bricks = vol_info['bricks']

        # create pvs to fill the BHV
        pvcs = self.create_and_wait_for_pvcs(
            pvc_size=(expand_size if bvol_count else free_size),
            pvc_amount=(bvol_count or 1),
            timeout=300)

        vol_expand = True

        for i in range(2):
            # get the vol ids from pvcs
            for pvc in pvcs:
                pv = get_pv_name_from_pvc(self.node, pvc)
                custom = r':.metadata.annotations."gluster\.org\/volume-id"'
                bvol_id = oc_get_custom_resource(self.node, 'pv', custom, pv)
                bvols_pv.add(bvol_id[0])

            vol_info = heketi_volume_info(h_node, h_url, bhv, json=True)
            bvols = vol_info['blockinfo']['blockvolume']
            bvols_in_bhv.update(bvols)
            self.assertEqual(bvols_pv, (bvols_in_bhv & bvols_pv))

            # Expand BHV and verify bricks and size of BHV
            if vol_expand:
                vol_expand = False
                heketi_volume_expand(h_node,
                                     h_url,
                                     bhv,
                                     expand_size,
                                     json=True)
                vol_info = heketi_volume_info(h_node, h_url, bhv, json=True)

                self.assertEqual(size + expand_size, vol_info['size'])
                self.assertFalse(len(vol_info['bricks']) % 3)
                self.assertLess(len(bricks), len(vol_info['bricks']))

                # create more PVCs in expanded BHV
                pvcs = self.create_and_wait_for_pvcs(pvc_size=(expand_size -
                                                               1),
                                                     pvc_amount=1)
Exemplo n.º 36
0
    def test_pv_resize_when_heketi_down(self):
        """Create a PVC and try to expand it when heketi is down, It should
        fail. After heketi is up, expand PVC should work.
        """
        self.create_storage_class(allow_volume_expansion=True)
        pvc_name = self.create_and_wait_for_pvc()
        dc_name, pod_name = self.create_dc_with_pvc(pvc_name)

        pv_name = get_pv_name_from_pvc(self.node, pvc_name)
        custom = (r':metadata.annotations.'
                  r'"gluster\.kubernetes\.io\/heketi-volume-id"')
        vol_id = oc_get_custom_resource(self.node, 'pv', custom, pv_name)[0]

        h_vol_info = heketi_ops.heketi_volume_info(self.heketi_client_node,
                                                   self.heketi_server_url,
                                                   vol_id,
                                                   json=True)

        # Bring the heketi POD down
        scale_dc_pod_amount_and_wait(self.node,
                                     self.heketi_dc_name,
                                     pod_amount=0)
        self.addCleanup(scale_dc_pod_amount_and_wait,
                        self.node,
                        self.heketi_dc_name,
                        pod_amount=1)

        cmd = 'dd if=/dev/urandom of=/mnt/%s bs=614400k count=1'
        ret, out, err = oc_rsh(self.node, pod_name, cmd % 'file1')
        self.assertFalse(ret, 'Not able to write file with err: %s' % err)
        wait_for_pod_be_ready(self.node, pod_name, 10, 5)

        resize_pvc(self.node, pvc_name, 2)
        wait_for_events(self.node,
                        pvc_name,
                        obj_type='PersistentVolumeClaim',
                        event_type='Warning',
                        event_reason='VolumeResizeFailed')

        # Verify volume was not expanded
        vol_info = get_gluster_vol_info_by_pvc_name(self.node, pvc_name)
        self.assertEqual(vol_info['gluster_vol_id'], h_vol_info['name'])
        self.assertEqual(len(vol_info['bricks']['brick']),
                         len(h_vol_info['bricks']))

        # Bring the heketi POD up
        scale_dc_pod_amount_and_wait(self.node,
                                     self.heketi_dc_name,
                                     pod_amount=1)

        # Verify volume expansion
        verify_pvc_size(self.node, pvc_name, 2)
        vol_info = get_gluster_vol_info_by_pvc_name(self.node, pvc_name)
        self.assertFalse(len(vol_info['bricks']['brick']) % 3)
        self.assertLess(len(h_vol_info['bricks']),
                        len(vol_info['bricks']['brick']))

        # Wait for remount after expansion
        for w in waiter.Waiter(timeout=30, interval=5):
            ret, out, err = oc_rsh(self.node, pod_name,
                                   "df -Ph /mnt | awk '{print $2}' | tail -1")
            self.assertFalse(ret,
                             'Failed with err: %s and Output: %s' % (err, out))
            if out.strip() == '2.0G':
                break

        # Write data making sure we have more space than it was
        ret, out, err = oc_rsh(self.node, pod_name, cmd % 'file2')
        self.assertFalse(ret, 'Not able to write file with err: %s' % err)

        # Verify pod is running
        wait_for_pod_be_ready(self.node, pod_name, 10, 5)
    def test_pv_resize_when_heketi_down(self):
        """Create a PVC and try to expand it when heketi is down, It should
        fail. After heketi is up, expand PVC should work.
        """
        self.create_storage_class(allow_volume_expansion=True)
        pvc_name = self.create_and_wait_for_pvc()
        dc_name, pod_name = self.create_dc_with_pvc(pvc_name)

        pv_name = get_pv_name_from_pvc(self.node, pvc_name)
        custom = (r':metadata.annotations.'
                  r'"gluster\.kubernetes\.io\/heketi-volume-id"')
        vol_id = oc_get_custom_resource(self.node, 'pv', custom, pv_name)[0]

        h_vol_info = heketi_ops.heketi_volume_info(
            self.heketi_client_node, self.heketi_server_url, vol_id, json=True)

        # Bring the heketi POD down
        scale_dc_pod_amount_and_wait(
            self.node, self.heketi_dc_name, pod_amount=0)
        self.addCleanup(
            scale_dc_pod_amount_and_wait, self.node,
            self.heketi_dc_name, pod_amount=1)

        cmd = 'dd if=/dev/urandom of=/mnt/%s bs=614400k count=1'
        ret, out, err = oc_rsh(self.node, pod_name, cmd % 'file1')
        self.assertFalse(ret, 'Not able to write file with err: %s' % err)
        wait_for_pod_be_ready(self.node, pod_name, 10, 5)

        resize_pvc(self.node, pvc_name, 2)
        wait_for_events(
            self.node, pvc_name, obj_type='PersistentVolumeClaim',
            event_type='Warning', event_reason='VolumeResizeFailed')

        # Verify volume was not expanded
        vol_info = get_gluster_vol_info_by_pvc_name(self.node, pvc_name)
        self.assertEqual(vol_info['gluster_vol_id'], h_vol_info['name'])
        self.assertEqual(
            len(vol_info['bricks']['brick']), len(h_vol_info['bricks']))

        # Bring the heketi POD up
        scale_dc_pod_amount_and_wait(
            self.node, self.heketi_dc_name, pod_amount=1)

        # Verify volume expansion
        verify_pvc_size(self.node, pvc_name, 2)
        vol_info = get_gluster_vol_info_by_pvc_name(self.node, pvc_name)
        self.assertFalse(len(vol_info['bricks']['brick']) % 3)
        self.assertLess(
            len(h_vol_info['bricks']), len(vol_info['bricks']['brick']))

        # Wait for remount after expansion
        for w in waiter.Waiter(timeout=30, interval=5):
            ret, out, err = oc_rsh(
                self.node, pod_name,
                "df -Ph /mnt | awk '{print $2}' | tail -1")
            self.assertFalse(ret, 'Failed with err: %s and Output: %s' % (
                err, out))
            if out.strip() == '2.0G':
                break

        # Write data making sure we have more space than it was
        ret, out, err = oc_rsh(self.node, pod_name, cmd % 'file2')
        self.assertFalse(ret, 'Not able to write file with err: %s' % err)

        # Verify pod is running
        wait_for_pod_be_ready(self.node, pod_name, 10, 5)
Exemplo n.º 38
0
    def _pv_resize(self, exceed_free_space):
        dir_path = "/mnt"
        pvc_size_gb, min_free_space_gb = 1, 3

        # Get available free space disabling redundant devices and nodes
        heketi_url = self.heketi_server_url
        node_id_list = heketi_ops.heketi_node_list(self.heketi_client_node,
                                                   heketi_url)
        self.assertTrue(node_id_list)
        nodes = {}
        min_free_space = min_free_space_gb * 1024**2
        for node_id in node_id_list:
            node_info = heketi_ops.heketi_node_info(self.heketi_client_node,
                                                    heketi_url,
                                                    node_id,
                                                    json=True)
            if (node_info['state'].lower() != 'online'
                    or not node_info['devices']):
                continue
            if len(nodes) > 2:
                out = heketi_ops.heketi_node_disable(self.heketi_client_node,
                                                     heketi_url, node_id)
                self.assertTrue(out)
                self.addCleanup(heketi_ops.heketi_node_enable,
                                self.heketi_client_node, heketi_url, node_id)
            for device in node_info['devices']:
                if device['state'].lower() != 'online':
                    continue
                free_space = device['storage']['free']
                if (node_id in nodes.keys() or free_space < min_free_space):
                    out = heketi_ops.heketi_device_disable(
                        self.heketi_client_node, heketi_url, device['id'])
                    self.assertTrue(out)
                    self.addCleanup(heketi_ops.heketi_device_enable,
                                    self.heketi_client_node, heketi_url,
                                    device['id'])
                    continue
                nodes[node_id] = free_space
        if len(nodes) < 3:
            raise self.skipTest("Could not find 3 online nodes with, "
                                "at least, 1 online device having free space "
                                "bigger than %dGb." % min_free_space_gb)

        # Calculate maximum available size for PVC
        available_size_gb = int(min(nodes.values()) / (1024**2))

        # Create PVC
        self.create_storage_class(allow_volume_expansion=True)
        pvc_name = self.create_and_wait_for_pvc(pvc_size=pvc_size_gb)

        # Create DC with POD and attached PVC to it
        dc_name = oc_create_app_dc_with_io(self.node, pvc_name)
        self.addCleanup(oc_delete, self.node, 'dc', dc_name)
        self.addCleanup(scale_dc_pod_amount_and_wait, self.node, dc_name, 0)
        pod_name = get_pod_name_from_dc(self.node, dc_name)
        wait_for_pod_be_ready(self.node, pod_name)

        if exceed_free_space:
            # Try to expand existing PVC exceeding free space
            resize_pvc(self.node, pvc_name, available_size_gb)
            wait_for_events(self.node,
                            obj_name=pvc_name,
                            event_reason='VolumeResizeFailed')

            # Check that app POD is up and runnig then try to write data
            wait_for_pod_be_ready(self.node, pod_name)
            cmd = ("dd if=/dev/urandom of=%s/autotest bs=100K count=1" %
                   dir_path)
            ret, out, err = oc_rsh(self.node, pod_name, cmd)
            self.assertEqual(
                ret, 0,
                "Failed to write data after failed attempt to expand PVC.")
        else:
            # Expand existing PVC using all the available free space
            expand_size_gb = available_size_gb - pvc_size_gb
            resize_pvc(self.node, pvc_name, expand_size_gb)
            verify_pvc_size(self.node, pvc_name, expand_size_gb)
            pv_name = get_pv_name_from_pvc(self.node, pvc_name)
            verify_pv_size(self.node, pv_name, expand_size_gb)
            wait_for_events(self.node,
                            obj_name=pvc_name,
                            event_reason='VolumeResizeSuccessful')

            # Recreate app POD
            oc_delete(self.node, 'pod', pod_name)
            wait_for_resource_absence(self.node, 'pod', pod_name)
            pod_name = get_pod_name_from_dc(self.node, dc_name)
            wait_for_pod_be_ready(self.node, pod_name)

            # Write data on the expanded PVC
            cmd = ("dd if=/dev/urandom of=%s/autotest "
                   "bs=1M count=1025" % dir_path)
            ret, out, err = oc_rsh(self.node, pod_name, cmd)
            self.assertEqual(ret, 0,
                             "Failed to write data on the expanded PVC")
    def _pv_resize(self, exceed_free_space):
        dir_path = "/mnt"
        pvc_size_gb, min_free_space_gb = 1, 3

        # Get available free space disabling redundant devices and nodes
        heketi_url = self.heketi_server_url
        node_id_list = heketi_ops.heketi_node_list(
            self.heketi_client_node, heketi_url)
        self.assertTrue(node_id_list)
        nodes = {}
        min_free_space = min_free_space_gb * 1024**2
        for node_id in node_id_list:
            node_info = heketi_ops.heketi_node_info(
                self.heketi_client_node, heketi_url, node_id, json=True)
            if (node_info['state'].lower() != 'online' or
                    not node_info['devices']):
                continue
            if len(nodes) > 2:
                out = heketi_ops.heketi_node_disable(
                    self.heketi_client_node, heketi_url, node_id)
                self.assertTrue(out)
                self.addCleanup(
                    heketi_ops.heketi_node_enable,
                    self.heketi_client_node, heketi_url, node_id)
            for device in node_info['devices']:
                if device['state'].lower() != 'online':
                    continue
                free_space = device['storage']['free']
                if (node_id in nodes.keys() or free_space < min_free_space):
                    out = heketi_ops.heketi_device_disable(
                        self.heketi_client_node, heketi_url, device['id'])
                    self.assertTrue(out)
                    self.addCleanup(
                        heketi_ops.heketi_device_enable,
                        self.heketi_client_node, heketi_url, device['id'])
                    continue
                nodes[node_id] = free_space
        if len(nodes) < 3:
            raise self.skipTest(
                "Could not find 3 online nodes with, "
                "at least, 1 online device having free space "
                "bigger than %dGb." % min_free_space_gb)

        # Calculate maximum available size for PVC
        available_size_gb = int(min(nodes.values()) / (1024**2))

        # Create PVC
        self.create_storage_class(allow_volume_expansion=True)
        pvc_name = self.create_and_wait_for_pvc(pvc_size=pvc_size_gb)

        # Create DC with POD and attached PVC to it
        dc_name = oc_create_app_dc_with_io(self.node, pvc_name)
        self.addCleanup(oc_delete, self.node, 'dc', dc_name)
        self.addCleanup(scale_dc_pod_amount_and_wait, self.node, dc_name, 0)
        pod_name = get_pod_name_from_dc(self.node, dc_name)
        wait_for_pod_be_ready(self.node, pod_name)

        if exceed_free_space:
            # Try to expand existing PVC exceeding free space
            resize_pvc(self.node, pvc_name, available_size_gb)
            wait_for_events(self.node, obj_name=pvc_name,
                            event_reason='VolumeResizeFailed')

            # Check that app POD is up and runnig then try to write data
            wait_for_pod_be_ready(self.node, pod_name)
            cmd = (
                "dd if=/dev/urandom of=%s/autotest bs=100K count=1" % dir_path)
            ret, out, err = oc_rsh(self.node, pod_name, cmd)
            self.assertEqual(
                ret, 0,
                "Failed to write data after failed attempt to expand PVC.")
        else:
            # Expand existing PVC using all the available free space
            expand_size_gb = available_size_gb - pvc_size_gb
            resize_pvc(self.node, pvc_name, expand_size_gb)
            verify_pvc_size(self.node, pvc_name, expand_size_gb)
            pv_name = get_pv_name_from_pvc(self.node, pvc_name)
            verify_pv_size(self.node, pv_name, expand_size_gb)
            wait_for_events(
                self.node, obj_name=pvc_name,
                event_reason='VolumeResizeSuccessful')

            # Recreate app POD
            oc_delete(self.node, 'pod', pod_name)
            wait_for_resource_absence(self.node, 'pod', pod_name)
            pod_name = get_pod_name_from_dc(self.node, dc_name)
            wait_for_pod_be_ready(self.node, pod_name)

            # Write data on the expanded PVC
            cmd = ("dd if=/dev/urandom of=%s/autotest "
                   "bs=1M count=1025" % dir_path)
            ret, out, err = oc_rsh(self.node, pod_name, cmd)
            self.assertEqual(
                ret, 0, "Failed to write data on the expanded PVC")