Пример #1
0
 def setUpClass(cls):
     super(TestPvResizeClass, cls).setUpClass()
     cls.node = cls.ocp_master_node[0]
     if get_openshift_version() < "3.9":
         cls.skip_me = True
         return
     enable_pvc_resize(cls.node)
    def test_pvc_deletion_while_pod_is_running(self):
        """Validate PVC deletion while pod is running"""
        if get_openshift_version() <= "3.9":
            self.skipTest("PVC deletion while pod is running is not supported"
                          " in OCP older than 3.9")

        # Create DC with POD and attached PVC to it
        sc_name = self.create_storage_class()
        pvc_name = self.create_and_wait_for_pvc(sc_name=sc_name)
        dc_name, pod_name = self.create_dc_with_pvc(pvc_name)

        # Delete PVC
        oc_delete(self.node, 'pvc', self.pvc_name)

        with self.assertRaises(ExecutionError):
            wait_for_resource_absence(self.node,
                                      'pvc',
                                      self.pvc_name,
                                      interval=3,
                                      timeout=30)

        # Make sure we are able to work with files on the mounted volume
        # after deleting pvc.
        filepath = "/mnt/file_for_testing_volume.log"
        cmd = "dd if=/dev/urandom of=%s bs=1K count=100" % filepath
        ret, out, err = oc_rsh(self.node, pod_name, cmd)
        self.assertEqual(
            ret, 0, "Failed to execute command %s on %s" % (cmd, self.node))
 def setUpClass(cls):
     super(TestPvResizeClass, cls).setUpClass()
     cls.node = cls.ocp_master_node[0]
     if get_openshift_version() < "3.9":
         cls.skip_me = True
         return
     enable_pvc_resize(cls.node)
    def test_pvc_deletion_while_pod_is_running(self):
        """Validate PVC deletion while pod is running"""
        if get_openshift_version() <= "3.9":
            self.skipTest(
                "PVC deletion while pod is running is not supported"
                " in OCP older than 3.9")

        # Create DC with POD and attached PVC to it
        sc_name = self.create_storage_class()
        pvc_name = self.create_and_wait_for_pvc(sc_name=sc_name)
        dc_name, pod_name = self.create_dc_with_pvc(pvc_name)

        # Delete PVC
        oc_delete(self.node, 'pvc', self.pvc_name)

        with self.assertRaises(ExecutionError):
            wait_for_resource_absence(
                self.node, 'pvc', self.pvc_name, interval=3, timeout=30)

        # Make sure we are able to work with files on the mounted volume
        # after deleting pvc.
        filepath = "/mnt/file_for_testing_volume.log"
        cmd = "dd if=/dev/urandom of=%s bs=1K count=100" % filepath
        ret, out, err = oc_rsh(self.node, pod_name, cmd)
        self.assertEqual(
            ret, 0, "Failed to execute command %s on %s" % (cmd, self.node))
    def test_usage_of_default_storage_class(self):
        """Validate PVs creation for SC with default custom volname prefix"""

        if get_openshift_version() < "3.9":
            self.skipTest(
                "'volumenameprefix' option for Heketi is not supported"
                " in OCP older than 3.9")

        # Unset 'default' option from all the existing Storage Classes
        unset_sc_annotation_cmd = (
            r"""oc annotate sc %s """
            r""""storageclass%s.kubernetes.io/is-default-class"-""")
        set_sc_annotation_cmd = (
            r"""oc patch storageclass %s -p'{"metadata": {"annotations": """
            r"""{"storageclass%s.kubernetes.io/is-default-class": "%s"}}}'""")
        get_sc_cmd = (
            r'oc get sc --no-headers '
            r'-o=custom-columns=:.metadata.name,'
            r':".metadata.annotations.storageclass\.'
            r'kubernetes\.io\/is-default-class",:".metadata.annotations.'
            r'storageclass\.beta\.kubernetes\.io\/is-default-class"')
        sc_list = self.cmd_run(get_sc_cmd)
        for sc in sc_list.split("\n"):
            sc = sc.split()
            if len(sc) != 3:
                self.skipTest(
                    "Unexpected output for list of storage classes. "
                    "Following is expected to contain 3 keys:: %s" % sc)
            for value, api_type in ((sc[1], ''), (sc[2], '.beta')):
                if value == '<none>':
                    continue
                self.cmd_run(unset_sc_annotation_cmd % (sc[0], api_type))
                self.addCleanup(
                    self.cmd_run,
                    set_sc_annotation_cmd % (sc[0], api_type, value))

        # Create new SC
        prefix = "autotests-default-sc"
        self.create_storage_class(sc_name_prefix=prefix)

        # Make new SC be the default one and sleep for 1 sec to avoid races
        self.cmd_run(set_sc_annotation_cmd % (self.sc_name, '', 'true'))
        self.cmd_run(set_sc_annotation_cmd % (self.sc_name, '.beta', 'true'))
        time.sleep(1)

        # Create PVC without specification of SC
        pvc_name = oc_create_pvc(
            self.node, sc_name=None, pvc_name_prefix=prefix)
        self.addCleanup(
            wait_for_resource_absence, self.node, 'pvc', pvc_name)
        self.addCleanup(oc_delete, self.node, 'pvc', pvc_name)

        # Wait for successful creation of PVC and check its SC
        verify_pvc_status_is_bound(self.node, pvc_name)
        get_sc_of_pvc_cmd = (
            "oc get pvc %s --no-headers "
            "-o=custom-columns=:.spec.storageClassName" % pvc_name)
        out = self.cmd_run(get_sc_of_pvc_cmd)
        self.assertEqual(out, self.sc_name)
    def test_usage_of_default_storage_class(self):
        """Validate PVs creation for SC with default custom volname prefix"""

        if get_openshift_version() < "3.9":
            self.skipTest(
                "'volumenameprefix' option for Heketi is not supported"
                " in OCP older than 3.9")

        # Unset 'default' option from all the existing Storage Classes
        unset_sc_annotation_cmd = (
            r"""oc annotate sc %s """
            r""""storageclass%s.kubernetes.io/is-default-class"-""")
        set_sc_annotation_cmd = (
            r"""oc patch storageclass %s -p'{"metadata": {"annotations": """
            r"""{"storageclass%s.kubernetes.io/is-default-class": "%s"}}}'""")
        get_sc_cmd = (
            r'oc get sc --no-headers '
            r'-o=custom-columns=:.metadata.name,'
            r':".metadata.annotations.storageclass\.'
            r'kubernetes\.io\/is-default-class",:".metadata.annotations.'
            r'storageclass\.beta\.kubernetes\.io\/is-default-class"')
        sc_list = self.cmd_run(get_sc_cmd)
        for sc in sc_list.split("\n"):
            sc = sc.split()
            if len(sc) != 3:
                self.skipTest("Unexpected output for list of storage classes. "
                              "Following is expected to contain 3 keys:: %s" %
                              sc)
            for value, api_type in ((sc[1], ''), (sc[2], '.beta')):
                if value == '<none>':
                    continue
                self.cmd_run(unset_sc_annotation_cmd % (sc[0], api_type))
                self.addCleanup(
                    self.cmd_run,
                    set_sc_annotation_cmd % (sc[0], api_type, value))

        # Create new SC
        prefix = "autotests-default-sc"
        self.create_storage_class(sc_name_prefix=prefix)

        # Make new SC be the default one and sleep for 1 sec to avoid races
        self.cmd_run(set_sc_annotation_cmd % (self.sc_name, '', 'true'))
        self.cmd_run(set_sc_annotation_cmd % (self.sc_name, '.beta', 'true'))
        time.sleep(1)

        # Create PVC without specification of SC
        pvc_name = oc_create_pvc(self.node,
                                 sc_name=None,
                                 pvc_name_prefix=prefix)
        self.addCleanup(wait_for_resource_absence, self.node, 'pvc', pvc_name)
        self.addCleanup(oc_delete, self.node, 'pvc', pvc_name)

        # Wait for successful creation of PVC and check its SC
        verify_pvc_status_is_bound(self.node, pvc_name)
        get_sc_of_pvc_cmd = ("oc get pvc %s --no-headers "
                             "-o=custom-columns=:.spec.storageClassName" %
                             pvc_name)
        out = self.cmd_run(get_sc_of_pvc_cmd)
        self.assertEqual(out, self.sc_name)
    def create_and_wait_for_pvcs(self, pvc_size=1,
                                 pvc_name_prefix="autotests-pvc",
                                 pvc_amount=1, sc_name=None,
                                 timeout=120, wait_step=3):
        node = self.ocp_client[0]

        # Create storage class if not specified
        if not sc_name:
            if getattr(self, "sc_name", ""):
                sc_name = self.sc_name
            else:
                sc_name = self.create_storage_class()

        # Create PVCs
        pvc_names = []
        for i in range(pvc_amount):
            pvc_name = oc_create_pvc(
                node, sc_name, pvc_name_prefix=pvc_name_prefix,
                pvc_size=pvc_size)
            pvc_names.append(pvc_name)
            self.addCleanup(
                wait_for_resource_absence, node, 'pvc', pvc_name)

        # Wait for PVCs to be in bound state
        try:
            for pvc_name in pvc_names:
                verify_pvc_status_is_bound(node, pvc_name, timeout, wait_step)
        finally:
            if get_openshift_version() < "3.9":
                reclaim_policy = "Delete"
            else:
                reclaim_policy = oc_get_custom_resource(
                    node, 'sc', ':.reclaimPolicy', sc_name)[0]

            for pvc_name in pvc_names:
                if reclaim_policy == 'Retain':
                    pv_name = get_pv_name_from_pvc(node, pvc_name)
                    self.addCleanup(oc_delete, node, 'pv', pv_name,
                                    raise_on_absence=False)
                    custom = (r':.metadata.annotations."gluster\.kubernetes'
                              r'\.io\/heketi\-volume\-id"')
                    vol_id = oc_get_custom_resource(
                        node, 'pv', custom, pv_name)[0]
                    if self.sc.get('provisioner') == "kubernetes.io/glusterfs":
                        self.addCleanup(heketi_volume_delete,
                                        self.heketi_client_node,
                                        self.heketi_server_url, vol_id,
                                        raise_on_error=False)
                    else:
                        self.addCleanup(heketi_blockvolume_delete,
                                        self.heketi_client_node,
                                        self.heketi_server_url, vol_id,
                                        raise_on_error=False)
                self.addCleanup(oc_delete, node, 'pvc', pvc_name,
                                raise_on_absence=False)

        return pvc_names
    def test_dynamic_provisioning_glusterfile_volname_prefix(self):
        """Validate dynamic provisioning for gluster file with vol name prefix
        """
        if get_openshift_version() < "3.9":
            self.skipTest(
                "'volumenameprefix' option for Heketi is not supported"
                " in OCP older than 3.9")

        g.log.info("test_dynamic_provisioning_glusterfile volname prefix")
        self.dynamic_provisioning_glusterfile(True)
    def test_dynamic_provisioning_glusterfile_volname_prefix(self):
        """Validate dynamic provisioning for gluster file with vol name prefix
        """
        if get_openshift_version() < "3.9":
            self.skipTest(
                "'volumenameprefix' option for Heketi is not supported"
                " in OCP older than 3.9")

        g.log.info("test_dynamic_provisioning_glusterfile volname prefix")
        self.dynamic_provisioning_glusterfile(True)
    def test_dynamic_provisioning_glusterblock_reclaim_policy_retain(self):
        """Validate retain policy for gluster-block after PVC deletion"""

        if get_openshift_version() < "3.9":
            self.skipTest(
                "'Reclaim' feature is not supported in OCP older than 3.9")

        self.create_storage_class(reclaim_policy='Retain')
        self.create_and_wait_for_pvc()

        dc_name = oc_create_app_dc_with_io(self.node, self.pvc_name)

        try:
            pod_name = get_pod_name_from_dc(self.node, dc_name)
            wait_for_pod_be_ready(self.node, pod_name)
        finally:
            scale_dc_pod_amount_and_wait(self.node, dc_name, pod_amount=0)
            oc_delete(self.node, 'dc', dc_name)

        # get the name of volume
        pv_name = get_pv_name_from_pvc(self.node, self.pvc_name)

        custom = [
            r':.metadata.annotations."gluster\.org\/volume\-id"',
            r':.spec.persistentVolumeReclaimPolicy'
        ]
        vol_id, reclaim_policy = oc_get_custom_resource(
            self.node, 'pv', custom, pv_name)

        # checking the retainPolicy of pvc
        self.assertEqual(reclaim_policy, 'Retain')

        # delete the pvc
        oc_delete(self.node, 'pvc', self.pvc_name)

        # check if pv is also deleted or not
        with self.assertRaises(ExecutionError):
            wait_for_resource_absence(self.node,
                                      'pvc',
                                      self.pvc_name,
                                      interval=3,
                                      timeout=30)

        # getting the blockvol list
        blocklist = heketi_blockvolume_list(self.heketi_client_node,
                                            self.heketi_server_url)
        self.assertIn(vol_id, blocklist)

        heketi_blockvolume_delete(self.heketi_client_node,
                                  self.heketi_server_url, vol_id)
        blocklist = heketi_blockvolume_list(self.heketi_client_node,
                                            self.heketi_server_url)
        self.assertNotIn(vol_id, blocklist)
        oc_delete(self.node, 'pv', pv_name)
        wait_for_resource_absence(self.node, 'pv', pv_name)
    def test_dynamic_provisioning_glusterfile_reclaim_policy_retain(self):
        """Validate retain policy for glusterfs after deletion of pvc"""

        if get_openshift_version() < "3.9":
            self.skipTest(
                "'Reclaim' feature is not supported in OCP older than 3.9")

        self.create_storage_class(reclaim_policy='Retain')
        self.create_and_wait_for_pvc()

        # get the name of the volume
        pv_name = get_pv_name_from_pvc(self.node, self.pvc_name)
        custom = [
            r':.metadata.annotations.'
            r'"gluster\.kubernetes\.io\/heketi\-volume\-id"',
            r':.spec.persistentVolumeReclaimPolicy'
        ]

        vol_id, reclaim_policy = oc_get_custom_resource(
            self.node, 'pv', custom, pv_name)

        self.assertEqual(reclaim_policy, 'Retain')

        # Create DC with POD and attached PVC to it.
        try:
            dc_name = oc_create_app_dc_with_io(
                self.node, self.pvc_name, image=self.io_container_image_cirros)
            pod_name = get_pod_name_from_dc(self.node, dc_name)
            wait_for_pod_be_ready(self.node, pod_name)
        finally:
            scale_dc_pod_amount_and_wait(self.node, dc_name, 0)
            oc_delete(self.node, 'dc', dc_name)
            wait_for_resource_absence(self.node, 'pod', pod_name)

        oc_delete(self.node, 'pvc', self.pvc_name)

        with self.assertRaises(ExecutionError):
            wait_for_resource_absence(self.node,
                                      'pvc',
                                      self.pvc_name,
                                      interval=3,
                                      timeout=30)

        heketi_volume_delete(self.heketi_client_node, self.heketi_server_url,
                             vol_id)

        vol_list = heketi_volume_list(self.heketi_client_node,
                                      self.heketi_server_url)

        self.assertNotIn(vol_id, vol_list)

        oc_delete(self.node, 'pv', pv_name)
        wait_for_resource_absence(self.node, 'pv', pv_name)
    def test_dynamic_provisioning_glusterblock_reclaim_policy_retain(self):
        """Validate retain policy for gluster-block after PVC deletion"""

        if get_openshift_version() < "3.9":
            self.skipTest(
                "'Reclaim' feature is not supported in OCP older than 3.9")

        self.create_storage_class(reclaim_policy='Retain')
        self.create_and_wait_for_pvc()

        dc_name = oc_create_app_dc_with_io(self.node, self.pvc_name)

        try:
            pod_name = get_pod_name_from_dc(self.node, dc_name)
            wait_for_pod_be_ready(self.node, pod_name)
        finally:
            scale_dc_pod_amount_and_wait(self.node, dc_name, pod_amount=0)
            oc_delete(self.node, 'dc', dc_name)

        # get the name of volume
        pv_name = get_pv_name_from_pvc(self.node, self.pvc_name)

        custom = [r':.metadata.annotations."gluster\.org\/volume\-id"',
                  r':.spec.persistentVolumeReclaimPolicy']
        vol_id, reclaim_policy = oc_get_custom_resource(
            self.node, 'pv', custom, pv_name)

        # checking the retainPolicy of pvc
        self.assertEqual(reclaim_policy, 'Retain')

        # delete the pvc
        oc_delete(self.node, 'pvc', self.pvc_name)

        # check if pv is also deleted or not
        with self.assertRaises(ExecutionError):
            wait_for_resource_absence(
                self.node, 'pvc', self.pvc_name, interval=3, timeout=30)

        # getting the blockvol list
        blocklist = heketi_blockvolume_list(self.heketi_client_node,
                                            self.heketi_server_url)
        self.assertIn(vol_id, blocklist)

        heketi_blockvolume_delete(self.heketi_client_node,
                                  self.heketi_server_url, vol_id)
        blocklist = heketi_blockvolume_list(self.heketi_client_node,
                                            self.heketi_server_url)
        self.assertNotIn(vol_id, blocklist)
        oc_delete(self.node, 'pv', pv_name)
        wait_for_resource_absence(self.node, 'pv', pv_name)
    def test_dynamic_provisioning_glusterfile_reclaim_policy_retain(self):
        """Validate retain policy for glusterfs after deletion of pvc"""

        if get_openshift_version() < "3.9":
            self.skipTest(
                "'Reclaim' feature is not supported in OCP older than 3.9")

        self.create_storage_class(reclaim_policy='Retain')
        self.create_and_wait_for_pvc()

        # get the name of the volume
        pv_name = get_pv_name_from_pvc(self.node, self.pvc_name)
        custom = [r':.metadata.annotations.'
                  r'"gluster\.kubernetes\.io\/heketi\-volume\-id"',
                  r':.spec.persistentVolumeReclaimPolicy']

        vol_id, reclaim_policy = oc_get_custom_resource(
            self.node, 'pv', custom, pv_name)

        self.assertEqual(reclaim_policy, 'Retain')

        # Create DC with POD and attached PVC to it.
        try:
            dc_name = oc_create_app_dc_with_io(self.node, self.pvc_name)
            pod_name = get_pod_name_from_dc(self.node, dc_name)
            wait_for_pod_be_ready(self.node, pod_name)
        finally:
            scale_dc_pod_amount_and_wait(self.node, dc_name, 0)
            oc_delete(self.node, 'dc', dc_name)
            wait_for_resource_absence(self.node, 'pod', pod_name)

        oc_delete(self.node, 'pvc', self.pvc_name)

        with self.assertRaises(ExecutionError):
            wait_for_resource_absence(
                self.node, 'pvc', self.pvc_name, interval=3, timeout=30)

        heketi_volume_delete(self.heketi_client_node,
                             self.heketi_server_url, vol_id)

        vol_list = heketi_volume_list(self.heketi_client_node,
                                      self.heketi_server_url)

        self.assertNotIn(vol_id, vol_list)

        oc_delete(self.node, 'pv', pv_name)
        wait_for_resource_absence(self.node, 'pv', pv_name)
Пример #14
0
    def setUp(self):
        super(TestHeketiLvmWrapper, self).setUp()

        self.oc_node = self.ocp_master_node[0]
        self.pod_name = openshift_ops.get_ocp_gluster_pod_details(self.oc_node)
        self.h_pod_name = openshift_ops.get_pod_name_from_dc(
            self.oc_node, self.heketi_dc_name)
        self.volume_size = 2

        ocp_version = openshift_version.get_openshift_version()
        if ocp_version < "3.11.170":
            self.skipTest("Heketi LVM Wrapper functionality does not "
                          "support on OCP {}".format(ocp_version.v_str))
        h_version = heketi_version.get_heketi_version(self.heketi_client_node)
        if h_version < '9.0.0-9':
            self.skipTest("heketi-client package {} does not support Heketi "
                          "LVM Wrapper functionality".format(h_version.v_str))
Пример #15
0
    def test_create_and_verify_pvc_with_volume_name_prefix(self):
        """create and verify pvc with volname prefix on an app pod"""
        if get_openshift_version() < "3.9":
            self.skipTest(
                "'volumenameprefix' option for Heketi is not supported"
                " in OCP older than 3.9")

        sc_name = self.create_storage_class(create_vol_name_prefix=True)
        pvc_name = self.create_and_wait_for_pvc(sc_name=sc_name)
        namespace = (self.sc.get(
            'secretnamespace',
            self.sc.get('restsecretnamespace', 'default')))
        verify_volume_name_prefix(
            self.heketi_client_node,
            self.sc.get("volumenameprefix", "autotest"),
            namespace, pvc_name, self.heketi_server_url)
        self.create_dc_with_pvc(pvc_name)
    def test_create_and_verify_pvc_with_volume_name_prefix(self):
        """create and verify pvc with volname prefix on an app pod"""
        if get_openshift_version() < "3.9":
            self.skipTest(
                "'volumenameprefix' option for Heketi is not supported"
                " in OCP older than 3.9")

        sc_name = self.create_storage_class(create_vol_name_prefix=True)
        pvc_name = self.create_and_wait_for_pvc(sc_name=sc_name)
        namespace = (self.sc.get(
            'secretnamespace',
            self.sc.get('restsecretnamespace', 'default')))
        verify_volume_name_prefix(
            self.heketi_client_node,
            self.sc.get("volumenameprefix", "autotest"),
            namespace, pvc_name, self.heketi_server_url)
        self.create_dc_with_pvc(pvc_name)
Пример #17
0
    def setUp(self):
        super(TestArbiterVolumeCreateExpandDelete, self).setUp()
        self.node = self.ocp_master_node[0]
        if openshift_version.get_openshift_version() < "3.9":
            self.skipTest("Arbiter feature cannot be used on OCP older "
                          "than 3.9, because 'volumeoptions' for Heketi "
                          "is not supported there.")
        version = heketi_version.get_heketi_version(self.heketi_client_node)
        if version < '6.0.0-11':
            self.skipTest("heketi-client package %s does not support arbiter "
                          "functionality" % version.v_str)

        # Mark one of the Heketi nodes as arbiter-supported if none of
        # existent nodes or devices already enabled to support it.
        self.heketi_server_url = self.sc.get('resturl')
        arbiter_tags = ('required', 'supported')
        arbiter_already_supported = False

        self.node_id_list = heketi_ops.heketi_node_list(
            self.heketi_client_node, self.heketi_server_url)

        for node_id in self.node_id_list[::-1]:
            node_info = heketi_ops.heketi_node_info(self.heketi_client_node,
                                                    self.heketi_server_url,
                                                    node_id,
                                                    json=True)
            if node_info.get('tags', {}).get('arbiter') in arbiter_tags:
                arbiter_already_supported = True
                break
            for device in node_info['devices'][::-1]:
                if device.get('tags', {}).get('arbiter') in arbiter_tags:
                    arbiter_already_supported = True
                    break
            else:
                continue
            break
        if not arbiter_already_supported:
            self._set_arbiter_tag_with_further_revert(self.heketi_client_node,
                                                      self.heketi_server_url,
                                                      'node',
                                                      self.node_id_list[0],
                                                      'supported')
    def setUp(self):
        super(TestArbiterVolumeCreateExpandDelete, self).setUp()
        self.node = self.ocp_master_node[0]
        if get_openshift_version() < "3.9":
            self.skipTest("Arbiter feature cannot be used on OCP older "
                          "than 3.9, because 'volumeoptions' for Heketi "
                          "is not supported there.")
        version = heketi_version.get_heketi_version(self.heketi_client_node)
        if version < '6.0.0-11':
            self.skipTest("heketi-client package %s does not support arbiter "
                          "functionality" % version.v_str)

        # Mark one of the Heketi nodes as arbiter-supported if none of
        # existent nodes or devices already enabled to support it.
        self.heketi_server_url = self.sc.get('resturl')
        arbiter_tags = ('required', 'supported')
        arbiter_already_supported = False

        self.node_id_list = heketi_ops.heketi_node_list(
            self.heketi_client_node, self.heketi_server_url)

        for node_id in self.node_id_list[::-1]:
            node_info = heketi_ops.heketi_node_info(
                self.heketi_client_node, self.heketi_server_url,
                node_id, json=True)
            if node_info.get('tags', {}).get('arbiter') in arbiter_tags:
                arbiter_already_supported = True
                break
            for device in node_info['devices'][::-1]:
                if device.get('tags', {}).get('arbiter') in arbiter_tags:
                    arbiter_already_supported = True
                    break
            else:
                continue
            break
        if not arbiter_already_supported:
            self._set_arbiter_tag_with_further_revert(
                self.heketi_client_node, self.heketi_server_url,
                'node', self.node_id_list[0], 'supported')
    def test_create_and_verify_pvc_with_volume_name_prefix(self):
        """create and verify pvc with volname prefix on an app pod"""
        if get_openshift_version() < "3.9":
            self.skipTest(
                "'volumenameprefix' option for Heketi is not supported"
                " in OCP older than 3.9")

        sc_name = self.create_storage_class(create_vol_name_prefix=True)
        pvc_name = self.create_and_wait_for_pvc(sc_name=sc_name)
        namespace = (self.sc.get('secretnamespace',
                                 self.sc.get('restsecretnamespace',
                                             'default')))
        verify_volume_name_prefix(self.heketi_client_node,
                                  self.sc.get("volumenameprefix", "autotest"),
                                  namespace, pvc_name, self.heketi_server_url)
        self.create_dc_with_pvc(pvc_name)
        pv_name = get_pv_name_from_pvc(self.ocp_master_node[0], pvc_name)
        endpoint = oc_get_custom_resource(self.ocp_master_node[0],
                                          "pv",
                                          ":spec.glusterfs.endpoints",
                                          name=pv_name)
        self.assertTrue(
            endpoint, "Failed to read Endpoints of %s on  %s " %
            (pv_name, self.ocp_master_node[0]))
def enable_pvc_resize(master_node):
    '''
     This function edits the /etc/origin/master/master-config.yaml
     file - to enable pv_resize feature
     and restarts atomic-openshift service on master node
     Args:
         master_node (str): hostname of masternode  on which
                           want to edit the
                           master-config.yaml file
     Returns:
         bool: True if successful,
               otherwise raise Exception
    '''
    version = get_openshift_version()
    if version < "3.9":
        msg = ("pv resize is not available in openshift "
               "version %s " % version)
        g.log.error(msg)
        raise NotSupportedException(msg)

    try:
        conn = g.rpyc_get_connection(master_node, user="******")
        if conn is None:
            err_msg = ("Failed to get rpyc connection of node %s"
                       % master_node)
            g.log.error(err_msg)
            raise ExecutionError(err_msg)

        with conn.builtin.open(MASTER_CONFIG_FILEPATH, 'r') as f:
            data = yaml.load(f)
            dict_add = data['admissionConfig']['pluginConfig']
            if "PersistentVolumeClaimResize" in dict_add:
                g.log.info("master-config.yaml file is already edited")
                return True
            dict_add['PersistentVolumeClaimResize'] = {
                'configuration': {
                    'apiVersion': 'v1',
                    'disable': 'false',
                    'kind': 'DefaultAdmissionConfig'}}
            data['admissionConfig']['pluginConfig'] = dict_add
            kube_config = data['kubernetesMasterConfig']
            for key in ('apiServerArguments', 'controllerArguments'):
                kube_config[key] = (
                    kube_config.get(key)
                    if isinstance(kube_config.get(key), dict) else {})
                value = ['ExpandPersistentVolumes=true']
                kube_config[key]['feature-gates'] = value
        with conn.builtin.open(MASTER_CONFIG_FILEPATH, 'w+') as f:
            yaml.dump(data, f, default_flow_style=False)
    except Exception as err:
        raise ExecutionError("failed to edit master-config.yaml file "
                             "%s on %s" % (err, master_node))
    finally:
        g.rpyc_close_connection(master_node, user="******")

    g.log.info("successfully edited master-config.yaml file "
               "%s" % master_node)
    if version == "3.9":
        cmd = ("systemctl restart atomic-openshift-master-api "
               "atomic-openshift-master-controllers")
    else:
        cmd = ("/usr/local/bin/master-restart api && "
               "/usr/local/bin/master-restart controllers")
    ret, out, err = g.run(master_node, cmd, "root")
    if ret != 0:
        err_msg = "Failed to execute cmd %s on %s\nout: %s\nerr: %s" % (
            cmd, master_node, out, err)
        g.log.error(err_msg)
        raise ExecutionError(err_msg)

    # Wait for API service to be ready after the restart
    for w in waiter.Waiter(timeout=120, interval=1):
        try:
            cmd_run("oc get nodes", master_node)
            return True
        except AssertionError:
            continue
    err_msg = "Exceeded 120s timeout waiting for OCP API to start responding."
    g.log.error(err_msg)
    raise ExecutionError(err_msg)
Пример #21
0
def enable_pvc_resize(master_node):
    '''
     This function edits the /etc/origin/master/master-config.yaml
     file - to enable pv_resize feature
     and restarts atomic-openshift service on master node
     Args:
         master_node (str): hostname of masternode  on which
                           want to edit the
                           master-config.yaml file
     Returns:
         bool: True if successful,
               otherwise raise Exception
    '''
    version = get_openshift_version()
    if version < "3.9":
        msg = ("pv resize is not available in openshift "
               "version %s " % version)
        g.log.error(msg)
        raise NotSupportedException(msg)

    try:
        conn = g.rpyc_get_connection(master_node, user="******")
        if conn is None:
            err_msg = ("Failed to get rpyc connection of node %s" %
                       master_node)
            g.log.error(err_msg)
            raise ExecutionError(err_msg)

        with conn.builtin.open(MASTER_CONFIG_FILEPATH, 'r') as f:
            data = yaml.load(f)
            dict_add = data['admissionConfig']['pluginConfig']
            if "PersistentVolumeClaimResize" in dict_add:
                g.log.info("master-config.yaml file is already edited")
                return True
            dict_add['PersistentVolumeClaimResize'] = {
                'configuration': {
                    'apiVersion': 'v1',
                    'disable': 'false',
                    'kind': 'DefaultAdmissionConfig'
                }
            }
            data['admissionConfig']['pluginConfig'] = dict_add
            kube_config = data['kubernetesMasterConfig']
            for key in ('apiServerArguments', 'controllerArguments'):
                kube_config[key] = (kube_config.get(key) if isinstance(
                    kube_config.get(key), dict) else {})
                value = ['ExpandPersistentVolumes=true']
                kube_config[key]['feature-gates'] = value
        with conn.builtin.open(MASTER_CONFIG_FILEPATH, 'w+') as f:
            yaml.dump(data, f, default_flow_style=False)
    except Exception as err:
        raise ExecutionError("failed to edit master-config.yaml file "
                             "%s on %s" % (err, master_node))
    finally:
        g.rpyc_close_connection(master_node, user="******")

    g.log.info("successfully edited master-config.yaml file "
               "%s" % master_node)
    if version == "3.9":
        cmd = ("systemctl restart atomic-openshift-master-api "
               "atomic-openshift-master-controllers")
    else:
        cmd = ("/usr/local/bin/master-restart api && "
               "/usr/local/bin/master-restart controllers")
    ret, out, err = g.run(master_node, cmd, "root")
    if ret != 0:
        err_msg = "Failed to execute cmd %s on %s\nout: %s\nerr: %s" % (
            cmd, master_node, out, err)
        g.log.error(err_msg)
        raise ExecutionError(err_msg)

    return True
    def create_and_wait_for_pvcs(
            self, pvc_size=1, pvc_name_prefix="autotests-pvc", pvc_amount=1,
            sc_name=None, timeout=600, wait_step=10, skip_waiting=False,
            skip_cleanup=False):
        """Create multiple PVC's not waiting for it

        Args:
            pvc_size (int): size of PVC, default value is 1
            pvc_name_prefix (str): volume prefix for each PVC, default value is
                                   'autotests-pvc'
            pvc_amount (int): number of PVC's, default value is 1
            sc_name (str): storage class to create PVC, default value is None,
                           which will cause automatic creation of sc.
            timeout (int): timeout time for waiting for PVC's to get bound
            wait_step (int): waiting time between each try of PVC status check
            skip_waiting (bool): boolean value which defines whether
                                 we need to wait for PVC creation or not.
        Returns:
            List: list of PVC names
        """
        node = self.ocp_client[0]

        # Create storage class if not specified
        if not sc_name:
            if getattr(self, "sc_name", ""):
                sc_name = self.sc_name
            else:
                sc_name = self.create_storage_class(skip_cleanup=skip_cleanup)

        # Create PVCs
        pvc_names = []
        for i in range(pvc_amount):
            pvc_name = oc_create_pvc(
                node, sc_name, pvc_name_prefix=pvc_name_prefix,
                pvc_size=pvc_size)
            pvc_names.append(pvc_name)
        if not skip_cleanup:
            self.addCleanup(
                wait_for_resources_absence, node, 'pvc', pvc_names)

        # Wait for PVCs to be in bound state
        try:
            if not skip_waiting:
                wait_for_pvcs_be_bound(node, pvc_names, timeout, wait_step)
        finally:
            if skip_cleanup:
                return pvc_names

            if get_openshift_version() < "3.9":
                reclaim_policy = "Delete"
            else:
                reclaim_policy = oc_get_custom_resource(
                    node, 'sc', ':.reclaimPolicy', sc_name)[0]

            for pvc_name in pvc_names:
                if reclaim_policy == 'Retain':
                    pv_name = get_pv_name_from_pvc(node, pvc_name)
                    if not pv_name and skip_waiting:
                        continue
                    self.addCleanup(oc_delete, node, 'pv', pv_name,
                                    raise_on_absence=False)
                    custom = (r':.metadata.annotations."gluster\.kubernetes'
                              r'\.io\/heketi\-volume\-id"')
                    vol_id = oc_get_custom_resource(
                        node, 'pv', custom, pv_name)[0]
                    if self.sc.get('provisioner') == "kubernetes.io/glusterfs":
                        self.addCleanup(heketi_volume_delete,
                                        self.heketi_client_node,
                                        self.heketi_server_url, vol_id,
                                        raise_on_error=False)
                    else:
                        self.addCleanup(heketi_blockvolume_delete,
                                        self.heketi_client_node,
                                        self.heketi_server_url, vol_id,
                                        raise_on_error=False)
                self.addCleanup(oc_delete, node, 'pvc', pvc_name,
                                raise_on_absence=False)
        return pvc_names
def enable_pvc_resize(master_node):
    '''
     This function edits the /etc/origin/master/master-config.yaml
     file - to enable pv_resize feature
     and restarts atomic-openshift service on master node
     Args:
         master_node (str): hostname of masternode  on which
                           want to edit the
                           master-config.yaml file
     Returns:
         bool: True if successful,
               otherwise raise Exception
    '''
    version = get_openshift_version()
    if version < "3.9":
        msg = ("pv resize is not available in openshift "
               "version %s " % version)
        g.log.error(msg)
        raise NotSupportedException(msg)

    with tempfile.NamedTemporaryFile(delete=False) as temp:
        temp_filename = temp.name

    try:
        g.download(master_node, MASTER_CONFIG_FILEPATH, temp_filename)
    except Exception as e:
        err_msg = (
            "Failed to download '{}' from master node '{}' due to"
            "exception\n{}".format(
                MASTER_CONFIG_FILEPATH, master_node, six.text_type(e)))
        raise ExecutionError(err_msg)

    with open(temp_filename, 'r') as f:
        data = yaml.load(f, Loader=yaml.FullLoader)
        dict_add = data['admissionConfig']['pluginConfig']
        if "PersistentVolumeClaimResize" in dict_add:
            g.log.info("master-config.yaml file is already edited")
            return True
        dict_add['PersistentVolumeClaimResize'] = {
            'configuration': {
                'apiVersion': 'v1',
                'disable': 'false',
                'kind': 'DefaultAdmissionConfig'}}
        data['admissionConfig']['pluginConfig'] = dict_add
        kube_config = data['kubernetesMasterConfig']
        for key in ('apiServerArguments', 'controllerArguments'):
            kube_config[key] = (
                kube_config.get(key)
                if isinstance(kube_config.get(key), dict) else {})
            value = ['ExpandPersistentVolumes=true']
            kube_config[key]['feature-gates'] = value

    with open(temp_filename, 'w+') as f:
        yaml.dump(data, f, default_flow_style=False)

    try:
        g.upload(master_node, temp_filename, MASTER_CONFIG_FILEPATH)
    except Exception as e:
        err_msg = (
            "Failed to upload '{}' to master node '{}' due to"
            "exception\n{}".format(
                master_node, MASTER_CONFIG_FILEPATH, six.text_type(e)))
        raise ExecutionError(err_msg)
    os.unlink(temp_filename)

    if version == "3.9":
        cmd = ("systemctl restart atomic-openshift-master-api "
               "atomic-openshift-master-controllers")
    else:
        cmd = ("/usr/local/bin/master-restart api && "
               "/usr/local/bin/master-restart controllers")
    ret, out, err = g.run(master_node, cmd, "root")
    if ret != 0:
        err_msg = "Failed to execute cmd %s on %s\nout: %s\nerr: %s" % (
            cmd, master_node, out, err)
        g.log.error(err_msg)
        raise ExecutionError(err_msg)

    # Wait for API service to be ready after the restart
    for w in waiter.Waiter(timeout=120, interval=1):
        try:
            cmd_run("oc get nodes", master_node)
            return True
        except AssertionError:
            continue
    err_msg = "Exceeded 120s timeout waiting for OCP API to start responding."
    g.log.error(err_msg)
    raise ExecutionError(err_msg)
Пример #24
0
    def create_and_wait_for_pvcs(self,
                                 pvc_size=1,
                                 pvc_name_prefix="autotests-pvc",
                                 pvc_amount=1,
                                 sc_name=None,
                                 timeout=120,
                                 wait_step=3):
        node = self.ocp_client[0]

        # Create storage class if not specified
        if not sc_name:
            if getattr(self, "sc_name", ""):
                sc_name = self.sc_name
            else:
                sc_name = self.create_storage_class()

        # Create PVCs
        pvc_names = []
        for i in range(pvc_amount):
            pvc_name = oc_create_pvc(node,
                                     sc_name,
                                     pvc_name_prefix=pvc_name_prefix,
                                     pvc_size=pvc_size)
            pvc_names.append(pvc_name)
            self.addCleanup(wait_for_resource_absence, node, 'pvc', pvc_name)

        # Wait for PVCs to be in bound state
        try:
            for pvc_name in pvc_names:
                verify_pvc_status_is_bound(node, pvc_name, timeout, wait_step)
        finally:
            if get_openshift_version() < "3.9":
                reclaim_policy = "Delete"
            else:
                reclaim_policy = oc_get_custom_resource(
                    node, 'sc', ':.reclaimPolicy', sc_name)[0]

            for pvc_name in pvc_names:
                if reclaim_policy == 'Retain':
                    pv_name = get_pv_name_from_pvc(node, pvc_name)
                    self.addCleanup(oc_delete,
                                    node,
                                    'pv',
                                    pv_name,
                                    raise_on_absence=False)
                    custom = (r':.metadata.annotations."gluster\.kubernetes'
                              r'\.io\/heketi\-volume\-id"')
                    vol_id = oc_get_custom_resource(node, 'pv', custom,
                                                    pv_name)[0]
                    if self.sc.get('provisioner') == "kubernetes.io/glusterfs":
                        self.addCleanup(heketi_volume_delete,
                                        self.heketi_client_node,
                                        self.heketi_server_url,
                                        vol_id,
                                        raise_on_error=False)
                    else:
                        self.addCleanup(heketi_blockvolume_delete,
                                        self.heketi_client_node,
                                        self.heketi_server_url,
                                        vol_id,
                                        raise_on_error=False)
                self.addCleanup(oc_delete,
                                node,
                                'pvc',
                                pvc_name,
                                raise_on_absence=False)

        return pvc_names